Commit 5f6071b8 by Joe Blaylock

Merge pull request #37 from edx/jrbl/125-show-users-scored-subs

TIM-125: Students completing scoring see results
parents 8f08dce5 941f5969
...@@ -140,7 +140,7 @@ def create_evaluation( ...@@ -140,7 +140,7 @@ def create_evaluation(
# Check if the submission is finished and its Author has graded enough. # Check if the submission is finished and its Author has graded enough.
student_item = submission.student_item student_item = submission.student_item
_check_if_finished_and_create_score( _score_if_finished(
student_item, student_item,
submission, submission,
required_evaluations_for_student, required_evaluations_for_student,
...@@ -159,7 +159,7 @@ def create_evaluation( ...@@ -159,7 +159,7 @@ def create_evaluation(
student_item=scorer_item student_item=scorer_item
).order_by("-attempt_number") ).order_by("-attempt_number")
_check_if_finished_and_create_score( _score_if_finished(
scorer_item, scorer_item,
scorer_submissions[0], scorer_submissions[0],
required_evaluations_for_student, required_evaluations_for_student,
...@@ -177,11 +177,11 @@ def create_evaluation( ...@@ -177,11 +177,11 @@ def create_evaluation(
raise PeerEvaluationInternalError(error_message) raise PeerEvaluationInternalError(error_message)
def _check_if_finished_and_create_score(student_item, def _score_if_finished(student_item,
submission, submission,
required_evaluations_for_student, required_evaluations_for_student,
required_evaluations_for_submission): required_evaluations_for_submission):
"""Basic function for checking if a student is finished with peer workflow. """Calculate final grade iff peer evaluation flow is satisfied.
Checks if the student is finished with the peer evaluation workflow. If the Checks if the student is finished with the peer evaluation workflow. If the
student already has a final grade calculated, there is no need to proceed. student already has a final grade calculated, there is no need to proceed.
...@@ -397,4 +397,4 @@ def _get_first_submission_not_evaluated(student_items, student_id, required_num_ ...@@ -397,4 +397,4 @@ def _get_first_submission_not_evaluated(student_items, student_id, required_num_
for evaluation in evaluations: for evaluation in evaluations:
already_evaluated = already_evaluated or evaluation.scorer_id == student_id already_evaluated = already_evaluated or evaluation.scorer_id == student_id
if not already_evaluated: if not already_evaluated:
return submission return submission
\ No newline at end of file
...@@ -8,9 +8,9 @@ function OpenAssessmentBlock(runtime, element) { ...@@ -8,9 +8,9 @@ function OpenAssessmentBlock(runtime, element) {
/* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */ /* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */
function displayStatus(result) { function displayStatus(result) {
status = result[0] status = result[0];
error_msg = result[2] error_msg = result[2];
if (status) { if (status === 'true') {
$('.openassessment_response_status_block', element).html(success_msg.concat(click_msg)); $('.openassessment_response_status_block', element).html(success_msg.concat(click_msg));
} else { } else {
$('.openassessment_response_status_block', element).html(failure_msg.concat(error_msg).concat(click_msg)); $('.openassessment_response_status_block', element).html(failure_msg.concat(error_msg).concat(click_msg));
......
...@@ -77,7 +77,8 @@ class TestOpenAssessment(TestCase): ...@@ -77,7 +77,8 @@ class TestOpenAssessment(TestCase):
return request return request
def test_submit_submission(self): def test_submit_submission(self):
"""XBlock accepts response, returns true on success.""" """XBlock accepts response, returns true on success"""
# This one should pass because we haven't submitted before
resp = self.runtime.handle( resp = self.runtime.handle(
self.assessment, 'submit', self.assessment, 'submit',
self.make_request(self.default_json_submission) self.make_request(self.default_json_submission)
...@@ -85,6 +86,23 @@ class TestOpenAssessment(TestCase): ...@@ -85,6 +86,23 @@ class TestOpenAssessment(TestCase):
result = json.loads(resp.body) result = json.loads(resp.body)
self.assertTrue(result[0]) self.assertTrue(result[0])
def test_submission_multisubmit_failure(self):
"""XBlock returns true on first, false on second submission"""
# We don't care about return value of first one
resp = self.runtime.handle(
self.assessment, 'submit',
self.make_request(self.default_json_submission)
)
# This one should fail becaus we're not allowed to submit multiple times
resp = self.runtime.handle(
self.assessment, 'submit',
self.make_request(self.default_json_submission)
)
result = json.loads(resp.body)
self.assertFalse(result[0])
self.assertEqual(result[1], "ENOMULTI")
self.assertEqual(result[2], self.assessment.submit_errors["ENOMULTI"])
@patch.object(api, 'create_submission') @patch.object(api, 'create_submission')
def test_submission_general_failure(self, mock_submit): def test_submission_general_failure(self, mock_submit):
"""Internal errors return some code for submission failure.""" """Internal errors return some code for submission failure."""
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment