Commit 941f5969 by Joe Blaylock

TIM-125: Students completing scoring see results

* When a score becomes available for a student (i.e., when they have
  completed the minimum number of peer assessments and have received
  enough assessments to their submission), it is shown to the student.
* XXX: As an interim fix to not having thought through all the cases
  around multiple existing submissions, this commit disallows making
  multiple submissions.
* Refactoring of XBlock view to devolve functionality into smaller
  pieces - this is interim work until there's a better workflow layer
  (or perhaps it can be the prep for that.)
* TODO: since overgrading should be baked in, we should still show them
  their assessment page.
parent 8f08dce5
...@@ -140,7 +140,7 @@ def create_evaluation( ...@@ -140,7 +140,7 @@ def create_evaluation(
# Check if the submission is finished and its Author has graded enough. # Check if the submission is finished and its Author has graded enough.
student_item = submission.student_item student_item = submission.student_item
_check_if_finished_and_create_score( _score_if_finished(
student_item, student_item,
submission, submission,
required_evaluations_for_student, required_evaluations_for_student,
...@@ -159,7 +159,7 @@ def create_evaluation( ...@@ -159,7 +159,7 @@ def create_evaluation(
student_item=scorer_item student_item=scorer_item
).order_by("-attempt_number") ).order_by("-attempt_number")
_check_if_finished_and_create_score( _score_if_finished(
scorer_item, scorer_item,
scorer_submissions[0], scorer_submissions[0],
required_evaluations_for_student, required_evaluations_for_student,
...@@ -177,11 +177,11 @@ def create_evaluation( ...@@ -177,11 +177,11 @@ def create_evaluation(
raise PeerEvaluationInternalError(error_message) raise PeerEvaluationInternalError(error_message)
def _check_if_finished_and_create_score(student_item, def _score_if_finished(student_item,
submission, submission,
required_evaluations_for_student, required_evaluations_for_student,
required_evaluations_for_submission): required_evaluations_for_submission):
"""Basic function for checking if a student is finished with peer workflow. """Calculate final grade iff peer evaluation flow is satisfied.
Checks if the student is finished with the peer evaluation workflow. If the Checks if the student is finished with the peer evaluation workflow. If the
student already has a final grade calculated, there is no need to proceed. student already has a final grade calculated, there is no need to proceed.
...@@ -397,4 +397,4 @@ def _get_first_submission_not_evaluated(student_items, student_id, required_num_ ...@@ -397,4 +397,4 @@ def _get_first_submission_not_evaluated(student_items, student_id, required_num_
for evaluation in evaluations: for evaluation in evaluations:
already_evaluated = already_evaluated or evaluation.scorer_id == student_id already_evaluated = already_evaluated or evaluation.scorer_id == student_id
if not already_evaluated: if not already_evaluated:
return submission return submission
\ No newline at end of file
...@@ -7,11 +7,9 @@ from mako.template import Template ...@@ -7,11 +7,9 @@ from mako.template import Template
from xblock.core import XBlock from xblock.core import XBlock
from xblock.fields import List, Scope, String from xblock.fields import List, Scope, String
from xblock.fragment import Fragment from xblock.fragment import Fragment
from submissions.api import SubmissionRequestError
from submissions import api from submissions import api as submissions_api
from openassessment.peer import api as peer_api from openassessment.peer import api as peer_api
from openassessment.peer.api import PeerEvaluationWorkflowError
from scenario_parser import ScenarioParser from scenario_parser import ScenarioParser
...@@ -243,6 +241,7 @@ class OpenAssessmentBlock(XBlock): ...@@ -243,6 +241,7 @@ class OpenAssessmentBlock(XBlock):
'ENODATA': 'API returned an empty response', 'ENODATA': 'API returned an empty response',
'EBADFORM': 'API Submission Request Error', 'EBADFORM': 'API Submission Request Error',
'EUNKNOWN': 'API returned unclassified exception', 'EUNKNOWN': 'API returned unclassified exception',
'ENOMULTI': 'Multiple submissions are not allowed for this item',
} }
def _get_xblock_trace(self): def _get_xblock_trace(self):
...@@ -273,59 +272,118 @@ class OpenAssessmentBlock(XBlock): ...@@ -273,59 +272,118 @@ class OpenAssessmentBlock(XBlock):
def student_view(self, context=None): def student_view(self, context=None):
"""The main view of OpenAssessmentBlock, displayed when viewing courses.""" """The main view of OpenAssessmentBlock, displayed when viewing courses."""
def load(path): # XXX: For the moment, the initializations and if/else tree below
"""Handy helper for getting resources from our kit.""" # embody a rough-and-ready workflow, which will be replaced in the
data = pkg_resources.resource_string(__name__, path) # beatific future of a proper workflow module. Probably. HACK
return data.decode("utf8")
trace = self._get_xblock_trace() trace = self._get_xblock_trace()
student_item_dict = self._get_student_item_dict() student_item_dict = self._get_student_item_dict()
# This user's most recent previous submission
user_submission = self.__get_submission(student_item_dict)
# This score for this user's user_submission
user_score = self.__get_score(student_item_dict, user_submission)
peer_eval = self._hack_get_eval() # HACK: Replace with proper workflow.
peer_submission = self.__get_peer_submission(student_item_dict, peer_eval)
if user_score:
# We're Done!
return self.__view_helper_show_scores(user_score, trace)
elif user_submission and peer_submission:
# We've submitted, but not finished assessing. Do assessments.
return self._view_helper_make_assessment(peer_submission, trace)
elif user_submission:
# We've submitted, but there's no assesing to do yet.
return self.__view_helper_check_back()
else:
# We haven't submitted, so do that first.
# XXX: In future, we'll support multiple submission and this will be wrong
return self._view_helper_make_submission(trace)
@staticmethod
def __get_submission(student_item_dict):
"""Return the most recent submission, if any, by user in student_item_dict"""
submissions = []
try: try:
previous_submissions = api.get_submissions(student_item_dict) submissions = submissions_api.get_submissions(student_item_dict)
except SubmissionRequestError: except submissions_api.SubmissionRequestError:
previous_submissions = [] # This error is actually ok.
pass
return submissions[0] if submissions else None
@staticmethod
def __get_score(student_item_dict, submission=False):
"""Return the most recent score, if any, for student item"""
scores = False
if submission:
scores = submissions_api.get_score(student_item_dict)
return scores[0] if scores else None
@staticmethod
def __get_peer_submission(student_item_dict, peer_eval):
"""Return a peer submission, if any, for user to assess"""
peer_submission = None
try: try:
# HACK: Replace with proper workflow. peer_submission = peer_api.get_submission_to_evaluate(student_item_dict, peer_eval["must_be_graded_by"])
peer_submission = False except peer_api.PeerEvaluationWorkflowError:
peer_eval = self._hack_get_peer_eval()
if peer_eval:
peer_submission = peer_api.get_submission_to_evaluate(student_item_dict, peer_eval["must_be_graded_by"])
except PeerEvaluationWorkflowError:
# Additional HACK: Without proper workflow, there may not be the # Additional HACK: Without proper workflow, there may not be the
# correct information to complete the request for a peer submission. # correct information to complete the request for a peer submission.
# This error should be handled properly once we have a workflow API. # This error should be handled properly once we have a workflow API.
pass pass
return peer_submission
def __view_helper_show_check_back(self):
"""Return HTML saying no peer work to assess, check back later."""
# This looks awful on purpose; XXX: should fix as shiny lands
return Fragment(u"<div>There are no submissions to review. Check back soon.</div>")
def __view_helper_show_scores(self, user_score, trace=None):
"""Return HTML to display users's score to them."""
# This looks awful on purpose; XXX: should fix as shiny lands
return Fragment(u"<div>You've received the following score:"
" %s/%s.</div>" % (user_score['points_earned'], user_score['points_possible']))
def _view_helper_make_assessment(self, peer_submission, trace=None):
"""Return HTML for rubric display and assessment solicitation."""
# Submits to assess handler
load = self._load
html = Template(load("static/html/oa_rubric.html"),
default_filters=mako_default_filters,
input_encoding='utf-8',
)
frag = Fragment(html.render_unicode(xblock_trace=trace,
peer_submission=peer_submission,
rubric_instructions=self.rubric_instructions,
rubric_criteria=self.rubric_criteria,
))
frag.add_css(load("static/css/openassessment.css"))
frag.add_javascript(load("static/js/src/oa_assessment.js"))
frag.initialize_js('OpenAssessmentBlock')
return frag
if previous_submissions and peer_submission: # XXX: until workflow better, move on w/ prev submit def _view_helper_make_submission(self, trace=None):
html = Template(load("static/html/oa_rubric.html"), """Return HTML for the page prompting the user and soliciting submissions."""
default_filters=mako_default_filters, # Submits to submission handler
input_encoding='utf-8', load = self._load
) html = Template(load("static/html/oa_submission.html"),
frag = Fragment(html.render_unicode(xblock_trace=trace, default_filters=mako_default_filters,
peer_submission=peer_submission, input_encoding='utf-8',
rubric_instructions=self.rubric_instructions, )
rubric_criteria=self.rubric_criteria, frag = Fragment(html.render_unicode(xblock_trace=trace, question=self.prompt))
)) frag.add_css(load("static/css/openassessment.css"))
frag.add_css(load("static/css/openassessment.css")) frag.add_javascript(load("static/js/src/oa_submission.js"))
frag.add_javascript(load("static/js/src/oa_assessment.js")) frag.initialize_js('OpenAssessmentBlock')
frag.initialize_js('OpenAssessmentBlock')
elif previous_submissions:
return Fragment(u"<div>There are no submissions to review.</div>")
else: # XXX: until workflow better, submit until submitted
html = Template(load("static/html/oa_submission.html"),
default_filters=mako_default_filters,
input_encoding='utf-8',
)
frag = Fragment(html.render_unicode(xblock_trace=trace, question=self.prompt))
frag.add_css(load("static/css/openassessment.css"))
frag.add_javascript(load("static/js/src/oa_submission.js"))
frag.initialize_js('OpenAssessmentBlock')
return frag return frag
def _hack_get_peer_eval(self): @staticmethod
def _load(path):
"""Help get resources from our package kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def _hack_get_eval(self):
# HACK: Forcing Peer Eval, we'll get the Eval config. # HACK: Forcing Peer Eval, we'll get the Eval config.
# TODO: When this is smarter, remove 'hack' from name
for next_eval in self.rubric_evals: for next_eval in self.rubric_evals:
if next_eval["type"] == "peereval": if next_eval["type"] == "peereval":
return next_eval return next_eval
...@@ -333,7 +391,7 @@ class OpenAssessmentBlock(XBlock): ...@@ -333,7 +391,7 @@ class OpenAssessmentBlock(XBlock):
@XBlock.json_handler @XBlock.json_handler
def assess(self, data, suffix=''): def assess(self, data, suffix=''):
# HACK: Replace with proper workflow. # HACK: Replace with proper workflow.
peer_eval = self._hack_get_peer_eval() peer_eval = self._hack_get_eval()
"""Place an assessment into Openassessment system""" """Place an assessment into Openassessment system"""
student_item_dict = self._get_student_item_dict() student_item_dict = self._get_student_item_dict()
...@@ -365,18 +423,25 @@ class OpenAssessmentBlock(XBlock): ...@@ -365,18 +423,25 @@ class OpenAssessmentBlock(XBlock):
status_text = None status_text = None
student_sub = data['submission'] student_sub = data['submission']
student_item_dict = self._get_student_item_dict() student_item_dict = self._get_student_item_dict()
try: prev_sub = self.__get_submission(student_item_dict)
if prev_sub:
# It is an error to submit multiple times for the same item
status_tag = 'ENOMULTI'
else:
status_tag = 'ENODATA' status_tag = 'ENODATA'
response = api.create_submission(student_item_dict, student_sub) try:
if response: response = submissions_api.create_submission(student_item_dict, student_sub)
except submissions_api.SubmissionRequestError, e:
status_tag = 'EBADFORM'
status_text = unicode(e.field_errors)
except submissions_api.SubmissionError:
status_tag = 'EUNKNOWN'
else:
status = True status = True
status_tag = response.get('student_item') status_tag = response.get('student_item')
status_text = response.get('attempt_number') status_text = response.get('attempt_number')
except api.SubmissionRequestError, e:
status_tag = 'EBADFORM'
status_text = unicode(e.field_errors)
except api.SubmissionError:
status_tag = 'EUNKNOWN'
# relies on success being orthogonal to errors # relies on success being orthogonal to errors
status_text = status_text if status_text else self.submit_errors[status_tag] status_text = status_text if status_text else self.submit_errors[status_tag]
return (status, status_tag, status_text) return (status, status_tag, status_text)
......
...@@ -8,9 +8,9 @@ function OpenAssessmentBlock(runtime, element) { ...@@ -8,9 +8,9 @@ function OpenAssessmentBlock(runtime, element) {
/* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */ /* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */
function displayStatus(result) { function displayStatus(result) {
status = result[0] status = result[0];
error_msg = result[2] error_msg = result[2];
if (status) { if (status === 'true') {
$('.openassessment_response_status_block', element).html(success_msg.concat(click_msg)); $('.openassessment_response_status_block', element).html(success_msg.concat(click_msg));
} else { } else {
$('.openassessment_response_status_block', element).html(failure_msg.concat(error_msg).concat(click_msg)); $('.openassessment_response_status_block', element).html(failure_msg.concat(error_msg).concat(click_msg));
......
...@@ -77,7 +77,8 @@ class TestOpenAssessment(TestCase): ...@@ -77,7 +77,8 @@ class TestOpenAssessment(TestCase):
return request return request
def test_submit_submission(self): def test_submit_submission(self):
"""XBlock accepts response, returns true on success.""" """XBlock accepts response, returns true on success"""
# This one should pass because we haven't submitted before
resp = self.runtime.handle( resp = self.runtime.handle(
self.assessment, 'submit', self.assessment, 'submit',
self.make_request(self.default_json_submission) self.make_request(self.default_json_submission)
...@@ -85,6 +86,23 @@ class TestOpenAssessment(TestCase): ...@@ -85,6 +86,23 @@ class TestOpenAssessment(TestCase):
result = json.loads(resp.body) result = json.loads(resp.body)
self.assertTrue(result[0]) self.assertTrue(result[0])
def test_submission_multisubmit_failure(self):
"""XBlock returns true on first, false on second submission"""
# We don't care about return value of first one
resp = self.runtime.handle(
self.assessment, 'submit',
self.make_request(self.default_json_submission)
)
# This one should fail becaus we're not allowed to submit multiple times
resp = self.runtime.handle(
self.assessment, 'submit',
self.make_request(self.default_json_submission)
)
result = json.loads(resp.body)
self.assertFalse(result[0])
self.assertEqual(result[1], "ENOMULTI")
self.assertEqual(result[2], self.assessment.submit_errors["ENOMULTI"])
@patch.object(api, 'create_submission') @patch.object(api, 'create_submission')
def test_submission_general_failure(self, mock_submit): def test_submission_general_failure(self, mock_submit):
"""Internal errors return some code for submission failure.""" """Internal errors return some code for submission failure."""
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment