Commit 8c0e01b1 by Stephen Sanchez

Merge pull request #270 from edx/sanchez/TIM-267-Student-State

WIP: Removing the peer uuid from the assessment api
parents ff52a5c4 9605ed9c
......@@ -90,7 +90,7 @@ class Command(BaseCommand):
# Retrieve the submission we want to score
# Note that we are NOT using the priority queue here, since we know
# exactly which submission we want to score.
peer_api.create_peer_workflow_item(scorer_id, submission_uuid)
peer_api.create_peer_workflow_item(scorer_submission_uuid, submission_uuid)
# Create the peer assessment
assessment = {
......
......@@ -4,9 +4,6 @@
<li id="openassessment__peer-assessment" class="openassessment__steps__step step--peer-assessment ui-toggle-visibility">
{% endblock %}
{% spaceless %}
<span class="system__element" id="peer_submission_uuid">
{{ peer_submission.uuid }}
</span>
<header class="step__header ui-toggle-visibility__control">
<h2 class="step__title">
......
......@@ -4,9 +4,6 @@
{% block list_item %}
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment ui-toggle-visibility">
{% endblock %}
<span class="system__element" id="self_submission_uuid">
{{ self_submission.uuid }}
</span>
<header class="step__header ui-toggle-visibility__control">
<h2 class="step__title">
......
......@@ -161,7 +161,7 @@ class PeerAssessmentMixin(object):
if assessment:
context_dict["must_grade"] = assessment["must_grade"]
finished, count = peer_api.has_finished_required_evaluating(
student_item,
self.submission_uuid,
assessment["must_grade"]
)
context_dict["graded"] = count
......@@ -212,7 +212,7 @@ class PeerAssessmentMixin(object):
peer_submission = False
try:
peer_submission = peer_api.get_submission_to_assess(
student_item_dict,
self.submission_uuid,
assessment["must_be_graded_by"],
True
)
......
......@@ -99,8 +99,6 @@ class SelfAssessmentMixin(object):
Dict with keys "success" (bool) indicating success/failure
and "msg" (unicode) containing additional information if an error occurs.
"""
if 'submission_uuid' not in data:
return {'success': False, 'msg': _(u"Missing submission_uuid key in request")}
if 'options_selected' not in data:
return {'success': False, 'msg': _(u"Missing options_selected key in request")}
......
......@@ -15,11 +15,11 @@ describe("OpenAssessment.BaseView", function() {
grade: readFixtures("oa_grade_complete.html")
};
this.peerAssess = function(submissionId, optionsSelected, feedback) {
this.peerAssess = function(optionsSelected, feedback) {
return $.Deferred(function(defer) { defer.resolve(); }).promise();
};
this.selfAssess = function(submissionId, optionsSelected) {
this.selfAssess = function(optionsSelected) {
return $.Deferred(function(defer) { defer.resolve(); }).promise();
};
......@@ -90,7 +90,7 @@ describe("OpenAssessment.BaseView", function() {
var testError = 'Test failure contacting server message';
loadSubviews(function() {
/* stub our selfAssess to fail */
spyOn(server, 'selfAssess').andCallFake(function(submissionId, optionsSelected) {
spyOn(server, 'selfAssess').andCallFake(function(optionsSelected) {
return $.Deferred(function(defer) { defer.rejectWith(server, [testError]); }).promise();
});
view.selfAssess();
......
......@@ -97,7 +97,7 @@ describe("OpenAssessment.Server", function() {
var success = false;
var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.peerAssess("abc1234", options, "Excellent job!").done(function() {
server.peerAssess(options, "Excellent job!").done(function() {
success = true;
});
......@@ -106,7 +106,6 @@ describe("OpenAssessment.Server", function() {
url: '/peer_assess',
type: "POST",
data: JSON.stringify({
submission_uuid: "abc1234",
options_selected: options,
feedback: "Excellent job!"
})
......@@ -306,7 +305,7 @@ describe("OpenAssessment.Server", function() {
var options = {clarity: "Very clear", precision: "Somewhat precise"};
var receivedErrorMsg = "";
var testString = getHugeTestString();
server.peerAssess("abc1234", options, testString).fail(
server.peerAssess(options, testString).fail(
function(errorMsg) {
receivedErrorMsg = errorMsg;
}
......@@ -319,7 +318,7 @@ describe("OpenAssessment.Server", function() {
var receivedMsg = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.peerAssess("abc1234", options, "Excellent job!").fail(function(msg) {
server.peerAssess(options, "Excellent job!").fail(function(msg) {
receivedMsg = msg;
});
......@@ -331,7 +330,7 @@ describe("OpenAssessment.Server", function() {
var receivedMsg = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.peerAssess("abc1234", options, "Excellent job!").fail(function(msg) {
server.peerAssess(options, "Excellent job!").fail(function(msg) {
receivedMsg = msg;
});
......
......@@ -245,7 +245,6 @@ OpenAssessment.BaseView.prototype = {
*/
peerAssessRequest: function(successFunction) {
// Retrieve assessment info from the DOM
var submissionId = $("#peer_submission_uuid", this.element)[0].innerHTML.trim();
var optionsSelected = {};
$("#peer-assessment--001__assessment input[type=radio]:checked", this.element).each(
function(index, sel) {
......@@ -257,7 +256,7 @@ OpenAssessment.BaseView.prototype = {
// Send the assessment to the server
var view = this;
this.toggleActionError('peer', null);
this.server.peerAssess(submissionId, optionsSelected, feedback).done(
this.server.peerAssess(optionsSelected, feedback).done(
successFunction
).fail(function(errMsg) {
view.toggleActionError('peer', errMsg);
......@@ -269,7 +268,6 @@ OpenAssessment.BaseView.prototype = {
**/
selfAssess: function() {
// Retrieve self-assessment info from the DOM
var submissionId = $("#self_submission_uuid", this.element)[0].innerHTML.trim();
var optionsSelected = {};
$("#self-assessment--001__assessment input[type=radio]:checked", this.element).each(
function(index, sel) {
......@@ -280,7 +278,7 @@ OpenAssessment.BaseView.prototype = {
// Send the assessment to the server
var view = this;
this.toggleActionError('self', null);
this.server.selfAssess(submissionId, optionsSelected).done(
this.server.selfAssess(optionsSelected).done(
function() {
view.renderPeerAssessmentStep();
view.renderSelfAssessmentStep();
......
......@@ -221,7 +221,6 @@ OpenAssessment.Server.prototype = {
/**
Send a peer assessment to the XBlock.
Args:
submissionId (string): The UUID of the submission.
optionsSelected (object literal): Keys are criteria names,
values are the option text the user selected for the criterion.
feedback (string): Written feedback on the submission.
......@@ -233,13 +232,13 @@ OpenAssessment.Server.prototype = {
Example:
var options = { clarity: "Very clear", precision: "Somewhat precise" };
var feedback = "Good job!";
server.peerAssess("abc123", options, feedback).done(
server.peerAssess(options, feedback).done(
function() { console.log("Success!"); }
).fail(
function(errorMsg) { console.log(errorMsg); }
);
**/
peerAssess: function(submissionId, optionsSelected, feedback) {
peerAssess: function(optionsSelected, feedback) {
var url = this.url('peer_assess');
if (feedback.length > this.maxInputSize) {
return $.Deferred(function(defer) {
......@@ -247,7 +246,6 @@ OpenAssessment.Server.prototype = {
}).promise();
}
var payload = JSON.stringify({
submission_uuid: submissionId,
options_selected: optionsSelected,
feedback: feedback
});
......@@ -271,7 +269,6 @@ OpenAssessment.Server.prototype = {
Send a self-assessment to the XBlock.
Args:
submissionId (string): The UUID of the submission.
optionsSelected (object literal): Keys are criteria names,
values are the option text the user selected for the criterion.
......@@ -281,16 +278,15 @@ OpenAssessment.Server.prototype = {
Example:
var options = { clarity: "Very clear", precision: "Somewhat precise" };
server.selfAssess("abc123", options).done(
server.selfAssess(options).done(
function() { console.log("Success!"); }
).fail(
function(errorMsg) { console.log(errorMsg); }
);
**/
selfAssess: function(submissionId, optionsSelected) {
selfAssess: function(optionsSelected) {
var url = this.url('self_assess');
var payload = JSON.stringify({
submission_uuid: submissionId,
options_selected: optionsSelected
});
return $.Deferred(function(defer) {
......
......@@ -188,7 +188,7 @@ class TestGrade(XBlockHandlerTestCase):
scorer_sub = sub_api.create_submission(scorer, {'text': submission_text})
workflow_api.create_workflow(scorer_sub['uuid'])
submission = peer_api.get_submission_to_assess(scorer, len(peers))
submission = peer_api.get_submission_to_assess(scorer_sub['uuid'], len(peers))
# Store the scorer's submission so our user can assess it later
scorer_submissions.append(scorer_sub)
......@@ -203,7 +203,7 @@ class TestGrade(XBlockHandlerTestCase):
# Have our user make assessments (so she can get a score)
for asmnt in peer_assessments:
new_submission = peer_api.get_submission_to_assess(student_item, len(peers))
new_submission = peer_api.get_submission_to_assess(submission['uuid'], len(peers))
peer_api.create_assessment(
submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
......
......@@ -6,9 +6,6 @@ from collections import namedtuple
import copy
import json
import mock
import submissions.api as sub_api
from openassessment.workflow import api as workflow_api
from openassessment.assessment import peer_api
from .base import XBlockHandlerTestCase, scenario
......@@ -37,7 +34,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Now Hal will assess Sally.
assessment = copy.deepcopy(self.ASSESSMENT)
sub = peer_api.get_submission_to_assess(hal_student_item, 1)
peer_api.get_submission_to_assess(hal_submission['uuid'], 1)
peer_api.create_assessment(
hal_submission['uuid'],
hal_student_item['student_id'],
......@@ -48,7 +45,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Now Sally will assess Hal.
assessment = copy.deepcopy(self.ASSESSMENT)
sub = peer_api.get_submission_to_assess(sally_student_item, 1)
peer_api.get_submission_to_assess(sally_submission['uuid'], 1)
peer_api.create_assessment(
sally_submission['uuid'],
sally_student_item['student_id'],
......@@ -86,8 +83,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
xblock.create_submission(another_student, self.SUBMISSION)
peer_api.get_submission_to_assess(another_student, 3)
another_submission = xblock.create_submission(another_student, self.SUBMISSION)
peer_api.get_submission_to_assess(another_submission['uuid'], 3)
# Submit an assessment and expect a successful response
......@@ -126,8 +123,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
xblock.create_submission(another_student, self.SUBMISSION)
peer_api.get_submission_to_assess(another_student, 3)
another_sub = xblock.create_submission(another_student, self.SUBMISSION)
peer_api.get_submission_to_assess(another_sub['uuid'], 3)
# Submit an assessment and expect a successful response
......@@ -160,7 +157,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
another_submission = xblock.create_submission(another_student, self.SUBMISSION)
xblock.create_submission(another_student, self.SUBMISSION)
# Submit an assessment, but mutate the options selected so they do NOT match the rubric
assessment = copy.deepcopy(self.ASSESSMENT)
......@@ -211,7 +208,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Now Hal will assess Sally.
assessment = copy.deepcopy(self.ASSESSMENT)
sally_sub = peer_api.get_submission_to_assess(hal_student_item, 1)
sally_sub = peer_api.get_submission_to_assess(hal_submission['uuid'], 1)
assessment['submission_uuid'] = sally_sub['uuid']
peer_api.create_assessment(
hal_submission['uuid'],
hal_student_item['student_id'],
......@@ -222,7 +220,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Now Sally will assess Hal.
assessment = copy.deepcopy(self.ASSESSMENT)
hal_sub = peer_api.get_submission_to_assess(sally_student_item, 1)
hal_sub = peer_api.get_submission_to_assess(sally_submission['uuid'], 1)
assessment['submission_uuid'] = hal_sub['uuid']
peer_api.create_assessment(
sally_submission['uuid'],
sally_student_item['student_id'],
......@@ -243,9 +242,6 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
hal_response = "Hal".encode('utf-8') in peer_response.body
sally_response = "Sally".encode('utf-8') in peer_response.body
peer_api.create_assessment(
submission['uuid'],
student_item['student_id'],
......
......@@ -95,13 +95,6 @@ class TestSelfAssessment(XBlockHandlerTestCase):
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_handler_missing_keys(self, xblock):
# Missing submission_uuid
assessment = copy.deepcopy(self.ASSESSMENT)
del assessment['submission_uuid']
resp = self.request(xblock, 'self_assess', json.dumps(assessment), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('submission_uuid', resp['msg'])
# Missing options_selected
assessment = copy.deepcopy(self.ASSESSMENT)
del assessment['options_selected']
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment