Commit 2b4b4e5a by Stephen Sanchez

Add in all the UI Workflow Hooks to have the correct front end behavior

Includes a number of JS changes, templates, and handlers to call the workflow api instead of complex, hard-coded logic.
parent 5e67d1dc
......@@ -3,7 +3,7 @@ Public interface for self-assessment.
"""
from django.utils.translation import ugettext as _
from submissions.api import (
get_submission_and_student, get_submissions,
get_submission_and_student, get_submission,
SubmissionNotFoundError, SubmissionRequestError
)
from openassessment.assessment.serializers import (
......@@ -97,12 +97,13 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
return serializer.data
def get_submission_and_assessment(student_item_dict):
def get_submission_and_assessment(submission_uuid):
"""
Retrieve a submission and self-assessment for a student item.
Args:
student_item_dict (dict): serialized StudentItem model
submission_uuid (str): The submission uuid for we want information for
regarding self assessment.
Returns:
A tuple `(submission, assessment)` where:
......@@ -116,16 +117,14 @@ def get_submission_and_assessment(student_item_dict):
"""
# Look up the most recent submission from the student item
try:
submissions = get_submissions(student_item_dict, limit=1)
if not submissions:
submission = get_submission(submission_uuid)
if not submission:
return (None, None)
except SubmissionNotFoundError:
return (None, None)
except SubmissionRequestError as ex:
except SubmissionRequestError:
raise SelfAssessmentRequestError(_('Could not retrieve submission'))
submission_uuid = submissions[0]['uuid']
# Retrieve assessments for the submission
# We weakly enforce that number of self-assessments per submission is <= 1,
# but not at the database level. Someone could take advantage of the race condition
......@@ -139,9 +138,9 @@ def get_submission_and_assessment(student_item_dict):
# TODO -- remove once Dave's changes land
assessment_dict = full_assessment_dict(assessments[0])
assessment_dict['submission_uuid'] = submission_uuid
return (submissions[0], assessment_dict)
return submission, assessment_dict
else:
return (submissions[0], None)
return submission, None
def is_complete(submission_uuid):
......
......@@ -53,13 +53,15 @@ class TestSelfApi(TestCase):
def test_create_assessment(self):
# Initially, there should be no submission or self assessment
self.assertEqual(get_submission_and_assessment(self.STUDENT_ITEM), (None, None))
self.assertEqual(get_submission_and_assessment("5"), (None, None))
# Create a submission to self-assess
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Now there should be a submission, but no self-assessment
received_submission, assessment = get_submission_and_assessment(self.STUDENT_ITEM)
received_submission, assessment = get_submission_and_assessment(
submission["uuid"]
)
self.assertItemsEqual(received_submission, submission)
self.assertIs(assessment, None)
self.assertFalse(is_complete(submission['uuid']))
......@@ -75,7 +77,9 @@ class TestSelfApi(TestCase):
self.assertTrue(is_complete(submission['uuid']))
# Retrieve the self-assessment
received_submission, retrieved = get_submission_and_assessment(self.STUDENT_ITEM)
received_submission, retrieved = get_submission_and_assessment(
submission["uuid"]
)
self.assertItemsEqual(received_submission, submission)
# Check that the assessment we created matches the assessment we retrieved
......@@ -171,7 +175,7 @@ class TestSelfApi(TestCase):
)
# Retrieve the self-assessment
_, retrieved = get_submission_and_assessment(self.STUDENT_ITEM)
_, retrieved = get_submission_and_assessment(submission["uuid"])
# Expect that both the created and retrieved assessments have the same
# timestamp, and it's >= our recorded time.
......@@ -196,7 +200,7 @@ class TestSelfApi(TestCase):
)
# Expect that we still have the original assessment
_, retrieved = get_submission_and_assessment(self.STUDENT_ITEM)
_, retrieved = get_submission_and_assessment(submission["uuid"])
self.assertItemsEqual(assessment, retrieved)
def test_is_complete_no_submission(self):
......
......@@ -3,7 +3,7 @@
<h2 class="openassessment__grade__title">Your Grade:</h2>
<div class="openassessment__grade__content">
<span class="grade__value">Incomplete</span>
<span class="grade__value">Not Completed</span>
<p>You did not complete the <span class="step">Peer Assessment</span> and <span class="step">Self Assessment</span> steps of this problem.</p>
</div>
</div>
\ No newline at end of file
......@@ -3,7 +3,7 @@
<h2 class="openassessment__grade__title">Your Grade:</h2>
<div class="openassessment__grade__content">
<span class="grade__value">Incomplete</span>
<span class="grade__value">Not Completed</span>
<p>You have not completed the <span class="step">Peer Assessment</span> and <span class="step">Self Assessment</span> steps of this problem.</p>
</div>
</div>
\ No newline at end of file
{% extends "openassessmentblock/peer/oa_peer_assessment.html" %}
{% block list_item %}
<li id="openassessment__peer-assessment"class="openassessment__steps__step step--peer-assessment ui-toggle-visibility">
<li id="openassessment__peer-assessment"class="openassessment__steps__step step--peer-assessment ui-toggle-visibility is--collapsed">
{% endblock %}
{% block title %}
......
{% extends "openassessmentblock/peer/oa_peer_assessment.html" %}
{% block list_item %}
<li id="openassessment__peer-assessment" class="openassessment__steps__step step--peer-assessment ui-toggle-visibility">
<li id="openassessment__peer-assessment" class="openassessment__steps__step step--peer-assessment ui-toggle-visibility is--collapsed">
{% endblock %}
{% block title %}
......
{% extends "openassessmentblock/peer/oa_peer_assessment.html" %}
{% block list_item %}
<li id="openassessment__peer-assessment" class="openassessment__steps__step step--peer-assessment ui-toggle-visibility">
<li id="openassessment__peer-assessment" class="openassessment__steps__step step--peer-assessment ui-toggle-visibility is--collapsed">
{% endblock %}
{% block title %}
......
......@@ -18,7 +18,7 @@
<span class="step__status__label">This step's status:</span>
<span class="step__status__value">
<i class="ico icon-ok"></i>
<span class="copy">{{ step_status }}</span>
<span class="copy">Not Completed</span>
</span>
</span>
{% endblock %}
......
{% extends "openassessmentblock/response/oa_response.html" %}
{% block list_item %}
<li id="openassessment__response" class="openassessment__steps__step step--response ui-toggle-visibility">
<li id="openassessment__response" class="openassessment__steps__step step--response ui-toggle-visibility is--collapsed">
{% endblock %}
{% block title %}
......@@ -8,7 +8,7 @@
<span class="step__status__label">This step's status:</span>
<span class="step__status__value">
<i class="ico icon-warning-sign"></i>
<span class="copy">{{ step_status }}</span>
<span class="copy">Closed</span>
</span>
</span>
{% endblock %}
......
{% extends "openassessmentblock/response/oa_response.html" %}
{% block list_item %}
<li id="openassessment__response" class="openassessment__steps__step step--response is--graded ui-toggle-visibility">
<li id="openassessment__response" class="openassessment__steps__step step--response is--graded ui-toggle-visibility is--collapsed">
{% endblock %}
{% block title %}
......@@ -9,7 +9,7 @@
<span class="step__status__label">This step's status:</span>
<span class="step__status__value">
<i class="ico icon-ok"></i>
<span class="copy">{{ step_status }}</span>
<span class="copy">Graded</span>
</span>
</span>
{% endblock %}
......
{% extends "openassessmentblock/response/oa_response.html" %}
{% block list_item %}
<li id="openassessment__response" class="openassessment__steps__step step--response is--submitted ui-toggle-visibility">
<li id="openassessment__response" class="openassessment__steps__step step--response is--submitted ui-toggle-visibility is--collapsed">
{% endblock %}
{% block title %}
......@@ -9,7 +9,7 @@
<span class="step__status__label">This step's status:</span>
<span class="step__status__value">
<i class="ico icon-ok"></i>
<span class="copy">{{ step_status }}</span>
<span class="copy">Submitted</span>
</span>
</span>
{% endblock %}
......
{% load i18n %}
{% block list_item %}
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment is--expanded">
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment">
{% endblock %}
<span class="system__element" id="self_submission_uuid">
{{ self_submission.uuid }}
......@@ -20,7 +20,7 @@
<span class="step__status">
<span class="step__status__label">This step's status:</span>
<span class="step__status__value">
<span class="copy">{{ step_status }}</span>
<span class="copy">Grading</span>
</span>
</span>
{% endblock %}
......
{% extends "openassessmentblock/self/oa_self_assessment.html" %}
{% block list_item %}
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment ui-toggle-visibility">
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment ui-toggle-visibility is--collapsed">
{% endblock %}
{% block title %}
......@@ -9,7 +9,7 @@
<span class="step__status__label">This step's status:</span>
<span class="step__status__value">
<i class="ico icon-warning-sign"></i>
<span class="copy">{{ step_status }}</span>
<span class="copy">Not Completed</span>
</span>
</span>
{% endblock %}
......
{% extends "openassessmentblock/self/oa_self_assessment.html" %}
{% block list_item %}
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment ui-toggle-visibility">
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment ui-toggle-visibility is--collapsed">
{% endblock %}
{% block title %}
......@@ -9,7 +9,7 @@
<span class="step__status__label">This step's status:</span>
<span class="step__status__value">
<i class="ico icon-ok"></i>
<span class="copy">{{ step_status }}</span>
<span class="copy">Complete</span>
</span>
</span>
{% endblock %}
......
......@@ -101,19 +101,19 @@ class PeerAssessmentMixin(object):
more information on rendering XBlock sections.
"""
student_item = None
workflow = self.get_workflow_info()
problem_open, date = self.is_open(step="peer")
context_dict = {
"rubric_criteria": self.rubric_criteria,
"estimated_time": "20 minutes" # TODO: Need to configure this.
}
path = 'openassessmentblock/peer/oa_peer_waiting.html'
assessment = self.get_assessment_module('peer-assessment')
if assessment:
context_dict["must_grade"] = assessment["must_grade"]
student_item = self.get_student_item_dict()
student_submission = self.get_user_submission(student_item)
finished, count = peer_api.has_finished_required_evaluating(
student_item,
......@@ -121,22 +121,26 @@ class PeerAssessmentMixin(object):
)
context_dict["graded"] = count
context_dict["review_num"] = count + 1
if finished:
path = "openassessmentblock/peer/oa_peer_complete.html"
elif student_submission:
peer_sub = self.get_peer_submission(student_item, assessment)
if peer_sub:
path = 'openassessmentblock/peer/oa_peer_assessment.html'
context_dict["peer_submission"] = peer_sub
if assessment["must_grade"] - count == 1:
context_dict["submit_button_text"] = "Submit your assessment & move onto next step."
context_dict["submit_button_text"] = (
"Submit your assessment & move onto next step."
)
else:
context_dict["submit_button_text"] = "Submit your assessment & move to response #{}".format(count + 2)
context_dict["submit_button_text"] = (
"Submit your assessment & move to response #{}"
).format(count + 2)
path = 'openassessmentblock/peer/oa_peer_waiting.html'
problem_open, date = self.is_open(step="peer")
if not problem_open and date == "due" and not finished:
path = 'openassessmentblock/peer/oa_peer_closed.html'
if date == "due" and not problem_open:
path = 'openassessmentblock/peer/oa_peer_closed.html'
elif workflow and workflow["status"] == "peer" and student_item:
peer_sub = self.get_peer_submission(student_item, assessment)
if peer_sub:
path = 'openassessmentblock/peer/oa_peer_assessment.html'
context_dict["peer_submission"] = peer_sub
elif workflow and workflow["status"] == "done":
path = "openassessmentblock/peer/oa_peer_complete.html"
return self.render_assessment(path, context_dict)
......
......@@ -22,41 +22,37 @@ class SelfAssessmentMixin(object):
@XBlock.handler
def render_self_assessment(self, data, suffix=''):
student = self.get_student_item_dict()
path = 'openassessmentblock/self/oa_self_closed.html'
context = {"step_status": "Incomplete"}
# Retrieve the self-assessment, if there is one
# If we are not logged in (as in Studio preview mode),
# we cannot interact with the self-assessment API.
if student['student_id'] is not None:
workflow = self.get_workflow_info()
if workflow:
return self._determine_assessment_state(workflow)
# Retrieve the self-assessment, if there is one
try:
submission, assessment = self_api.get_submission_and_assessment(student)
except self_api.SelfAssessmentRequestError:
logger.exception(u"Could not retrieve self assessment for {student_item}".format(student_item=student))
return self.render_error(_(u"An unexpected error occurred."))
# If we haven't submitted yet, we cannot self-assess
if submission is None:
path = 'openassessmentblock/self/oa_self_closed.html'
context = {"step_status": "Incomplete"}
# If we have already submitted, then we're complete
elif assessment is not None:
path = 'openassessmentblock/self/oa_self_complete.html'
context = {"step_status": "Complete"}
# Otherwise, we can submit a self-assessment
else:
path = 'openassessmentblock/self/oa_self_assessment.html'
context = {
"rubric_criteria": self.rubric_criteria,
"estimated_time": "20 minutes", # TODO: Need to configure this.
"self_submission": submission,
"step_status": "Grading"
}
return self.render_assessment('openassessmentblock/self/oa_self_closed.html')
def _determine_assessment_state(self, workflow):
context = {}
try:
submission, assessment = self_api.get_submission_and_assessment(
workflow["submission_uuid"]
)
except self_api.SelfAssessmentRequestError:
logger.exception(
u"Could not retrieve self assessment for submission {}"
.format(workflow["submission_uuid"])
)
return self.render_error(_(u"An unexpected error occurred."))
if workflow["status"] == "self":
path = 'openassessmentblock/self/oa_self_assessment.html'
context = {
"rubric_criteria": self.rubric_criteria,
"estimated_time": "20 minutes", # TODO: Need to configure this.
"self_submission": submission,
}
elif assessment:
path = 'openassessmentblock/self/oa_self_complete.html'
else:
path = 'openassessmentblock/self/oa_self_closed.html'
return self.render_assessment(path, context)
@XBlock.json_handler
......
......@@ -67,7 +67,6 @@ OpenAssessment.BaseUI.prototype = {
// Load the HTML
var sel = $('#openassessment__response', ui.element);
sel.replaceWith(html);
ui.setExpanded('response', expand);
// Install a click handler for submission
$('#step--response__submit', ui.element).click(
......@@ -102,7 +101,6 @@ OpenAssessment.BaseUI.prototype = {
// Load the HTML
var sel = $('#openassessment__peer-assessment', ui.element);
sel.replaceWith(html);
ui.setExpanded('peer-assessment', expand);
// Install a click handler for assessment
$('#peer-assessment--001__assessment__submit', ui.element).click(
......@@ -132,7 +130,6 @@ OpenAssessment.BaseUI.prototype = {
this.server.render('self_assessment').done(
function(html) {
$('#openassessment__self-assessment', ui.element).replaceWith(html);
ui.setExpanded('self-assessment', expand);
// Install a click handler for the submit button
$('#self-assessment--001__assessment__submit', ui.element).click(
......@@ -162,7 +159,6 @@ OpenAssessment.BaseUI.prototype = {
this.server.render('grade').done(
function(html) {
$('#openassessment__grade', ui.element).replaceWith(html);
ui.setExpanded('grade', expand);
}
).fail(function(errMsg) {
// TODO: display to the user
......
......@@ -53,10 +53,10 @@ class SubmissionMixin(object):
status_text = None
student_sub = data['submission']
student_item_dict = self.get_student_item_dict()
prev_sub = self.get_user_submission(student_item_dict)
workflow = self.get_workflow_info()
status_tag = 'ENOMULTI' # It is an error to submit multiple times for the same item
if not prev_sub:
if not workflow:
status_tag = 'ENODATA'
try:
submission = self.create_submission(student_item_dict, student_sub)
......@@ -125,30 +125,27 @@ class SubmissionMixin(object):
return scores[0] if scores else None
@staticmethod
def get_user_submission(student_item_dict):
"""Return the most recent submission by user in student_item_dict
def get_user_submission(submission_uuid):
"""Return the most recent submission by user in workflow
Given a student item, return the most recent submission. If no
submission is available, return None. All submissions are preserved, but
only the most recent will be returned in this function, since the active
workflow will only be concerned with the most recent submission.
Return the most recent submission. If no submission is available,
return None. All submissions are preserved, but only the most recent
will be returned in this function, since the active workflow will only
be concerned with the most recent submission.
Args:
student_item_dict (dict): The student item we want to get the
latest submission for.
submission_uuid (str): The uuid for the submission to retrieve.
Returns:
(dict): A dictionary representation of a submission to render to
the front end.
"""
submissions = []
try:
submissions = api.get_submissions(student_item_dict)
return api.get_submission(submission_uuid)
except api.SubmissionRequestError:
# This error is actually ok.
pass
return submissions[0] if submissions else None
return None
@property
def save_status(self):
......@@ -179,40 +176,40 @@ class SubmissionMixin(object):
Submitted and graded
"""
# TODO Check if Saved
student_item = self.get_student_item_dict()
# Has the student submitted?
student_submission = self.get_user_submission(student_item)
# Has it been graded yet?
student_score = self._get_submission_score(student_item)
step_status = "Graded" if student_score else "Submitted"
step_status = step_status if student_submission else "Incomplete"
assessment_ui_model = self.get_assessment_module('peer-assessment')
problem_open, date = self.is_open(step="submission")
workflow = self.get_workflow_info()
problem_open, date = self.is_open()
context = {
"student_submission": student_submission,
"student_score": student_score,
"step_status": step_status,
"saved_response": self.saved_response,
"save_status": self.save_status
}
path = "openassessmentblock/response/oa_response.html"
if student_score:
if not workflow and not problem_open:
path = 'openassessmentblock/response/oa_response_closed.html'
elif not workflow:
path = "openassessmentblock/response/oa_response.html"
elif workflow["status"] == "done":
assessment_ui_model = self.get_assessment_module('peer-assessment')
student_submission = self.get_user_submission(
workflow["submission_uuid"]
)
student_score = workflow["score"]
assessments = peer_api.get_assessments(student_submission["uuid"])
median_scores = peer_api.get_assessment_median_scores(
student_submission["uuid"],
assessment_ui_model["must_be_graded_by"]
)
context["student_submission"] = student_submission
context["peer_assessments"] = assessments
context["rubric_criteria"] = self.rubric_criteria
context["student_score"] = student_score
for criterion in context["rubric_criteria"]:
criterion["median_score"] = median_scores[criterion["name"]]
path = 'openassessmentblock/response/oa_response_graded.html'
elif student_submission:
else:
context["student_submission"] = self.get_user_submission(
workflow["submission_uuid"]
)
path = 'openassessmentblock/response/oa_response_submitted.html'
elif not problem_open and date == "due" and not student_submission:
path = 'openassessmentblock/response/oa_response_closed.html'
return self.render_assessment(path, context_dict=context)
......@@ -32,7 +32,7 @@ class TestOpenAssessment(XBlockHandlerTestCase):
# We don't care about return value of first one
self.request(xblock, 'submit', self.SUBMISSION, response_format='json')
# This one should fail becaus we're not allowed to submit multiple times
# This one should fail because we're not allowed to submit multiple times
resp = self.request(xblock, 'submit', self.SUBMISSION, response_format='json')
self.assertFalse(resp[0])
self.assertEqual(resp[1], "ENOMULTI")
......
......@@ -35,7 +35,7 @@ class TestSelfAssessment(XBlockHandlerTestCase):
self.assertTrue(resp['success'])
# Expect that a self-assessment was created
_, assessment = self_api.get_submission_and_assessment(student_item)
_, assessment = self_api.get_submission_and_assessment(submission["uuid"])
self.assertEqual(assessment['submission_uuid'], submission['uuid'])
self.assertEqual(assessment['points_earned'], 5)
self.assertEqual(assessment['points_possible'], 6)
......@@ -70,14 +70,14 @@ class TestSelfAssessment(XBlockHandlerTestCase):
@scenario('data/self_assessment_scenario.xml')
def test_render_self_assessment_preview(self, xblock):
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("Incomplete", resp)
self.assertIn("Not Completed", resp)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_render_self_assessment_complete(self, xblock):
student_item = xblock.get_student_item_dict()
# Create a submission for the student
submission = submission_api.create_submission(student_item, self.SUBMISSION)
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Self-assess the submission
assessment = copy.deepcopy(self.ASSESSMENT)
......@@ -94,24 +94,25 @@ class TestSelfAssessment(XBlockHandlerTestCase):
student_item = xblock.get_student_item_dict()
# Create a submission for the student
submission = submission_api.create_submission(student_item, self.SUBMISSION)
# Expect that the self-assessment step is open
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("Grading", resp)
submission = xblock.create_submission(student_item, self.SUBMISSION)
with mock.patch('openassessment.assessment.peer_api.is_complete') as mock_complete:
mock_complete.return_value = True
# Expect that the self-assessment step is open
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("Grading", resp)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_render_self_assessment_no_submission(self, xblock):
# Without creating a submission, render the self-assessment step
# Expect that the step is closed
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("Incomplete", resp)
self.assertIn("Not Completed", resp)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_render_self_assessessment_api_error(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
submission = submission_api.create_submission(student_item, self.SUBMISSION)
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Simulate an error and expect a failure response
with mock.patch('openassessment.xblock.self_assessment_mixin.self_api') as mock_api:
......
......@@ -13,6 +13,10 @@ class WorkflowMixin(object):
def workflow_requirements(self):
assessment_ui_model = self.get_assessment_module('peer-assessment')
if not assessment_ui_model:
return {}
return {
"peer": {
"must_grade": assessment_ui_model["must_grade"],
......
......@@ -151,6 +151,7 @@ def create_submission(student_item_dict, answer, submitted_at=None,
logger.exception(error_message)
raise SubmissionInternalError(error_message)
def get_submission(submission_uuid):
"""Retrieves a single submission by uuid.
......@@ -192,6 +193,7 @@ def get_submission(submission_uuid):
return SubmissionSerializer(submission).data
def get_submission_and_student(uuid):
"""
Retrieve a submission by its unique identifier, including the associated student item.
......
......@@ -169,8 +169,8 @@ LOGGING = {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
},
},
}
# TODO: add config for XBLOCK_WORKBENCH { SCENARIO_CLASSES }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment