Commit dfc5e4cb by Will Daly

Self-assessment not yet started should be unavailable.

Display submission deadline in template, not problem deadline.
parent 26fc1126
......@@ -30,8 +30,8 @@
<div class="step__instruction">
<p>
Please provide your response below.
{% if formatted_due_date %}
You can save your progress and return to complete your response at any time before the due date of <span class="step__deadline"><span class="date">{{ formatted_due_date }}</span></span>.
{% if submission_due %}
You can save your progress and return to complete your response at any time before the due date of <span class="step__deadline"><span class="date">{{ submission_due }}</span></span>.
{% else %}
You can save your progress and return to complete your response at any time.
{% endif %}
......
......@@ -4,11 +4,11 @@
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment ui-toggle-visibility">
{% endblock %}
<span class="system__element" id="self_submission_uuid">
{{ self_submission.uuid }}
{{ self_submission.uuid }}
</span>
<header class="step__header ui-toggle-visibility__control">
<h2 class="step__title">
<h2 class="step__title">
<span class="step__counter"></span>
<span class="wrapper--copy">
<span class="step__label">Assess Yourself</span>
......
......@@ -468,18 +468,19 @@ class OpenAssessmentBlock(
"self-assessment": check whether the self-assessment section is open.
Returns:
tuple of the form (is_closed, reason, date), where
tuple of the form (is_closed, reason, start_date, due_date), where
is_closed (bool): indicates whether the step is closed.
reason (str or None): specifies the reason the step is closed ("start" or "due")
date (datetime or None): is the start/due date.
start_date (datetime): is the start date of the step/problem.
due_date (datetime): is the due date of the step/problem.
Examples:
>>> is_closed()
False, None, None
False, None, datetime.datetime(2014, 3, 27, 22, 7, 38, 788861), datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
>>> is_closed(step="submission")
True, "due", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861)
True, "due", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861), datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
>>> is_closed(step="self-assessment")
True, "start", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861)
True, "start", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861), datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
"""
submission_range = (self.start, self.submission_due)
......@@ -506,11 +507,11 @@ class OpenAssessmentBlock(
now = dt.datetime.now().replace(tzinfo=pytz.utc)
if now < open_range[0]:
return True, "start", open_range[0]
return True, "start", open_range[0], open_range[1]
elif now >= open_range[1]:
return True, "due", open_range[1]
return True, "due", open_range[0], open_range[1]
else:
return False, None, None
return False, None, open_range[0], open_range[1]
def is_released(self, step=None):
"""
......@@ -528,7 +529,7 @@ class OpenAssessmentBlock(
"""
# By default, assume that we're published, in case the runtime doesn't support publish date.
is_published = getattr(self, 'published_date', True) is not None
is_closed, reason, __ = self.is_closed(step=step)
is_closed, reason, __, __ = self.is_closed(step=step)
return is_published and (not is_closed or reason == 'due')
def get_assessment_module(self, mixin_name):
......
......@@ -136,13 +136,13 @@ class PeerAssessmentMixin(object):
"""
path = 'openassessmentblock/peer/oa_peer_unavailable.html'
finished = False
problem_closed, reason, date = self.is_closed(step="peer-assessment")
problem_closed, reason, start_date, due_date = self.is_closed(step="peer-assessment")
context_dict = {
"rubric_criteria": self.rubric_criteria,
"estimated_time": "20 minutes" # TODO: Need to configure this.
}
submissions_closed, __, __ = self.is_closed(step="submission")
submissions_closed, __, __, __ = self.is_closed(step="submission")
workflow = self.get_workflow_info()
if workflow is None:
......@@ -178,10 +178,10 @@ class PeerAssessmentMixin(object):
).format(count + 2)
if reason == 'due' and problem_closed:
context_dict["peer_due"] = self.format_datetime_string(date)
context_dict["peer_due"] = self.format_datetime_string(due_date)
path = 'openassessmentblock/peer/oa_peer_closed.html'
elif reason == 'start' and problem_closed:
context_dict["peer_start"] = self.format_datetime_string(date)
context_dict["peer_start"] = self.format_datetime_string(start_date)
path = 'openassessmentblock/peer/oa_peer_unavailable.html'
elif workflow.get("status") == "peer":
peer_sub = self.get_peer_submission(student_item, assessment)
......
......@@ -23,47 +23,61 @@ class SelfAssessmentMixin(object):
@XBlock.handler
def render_self_assessment(self, data, suffix=''):
context = {}
try:
path, context = self.self_path_and_context()
except:
msg = u"Could not retrieve self assessment for submission {}".format(self.submission_uuid)
logger.exception(msg)
return self.render_error(_(u"An unexpected error occurred."))
else:
return self.render_assessment(path, context)
assessment_module = self.get_assessment_module('self-assessment')
def self_path_and_context(self):
"""
Determine the template path and context to use when rendering the self-assessment step.
path = 'openassessmentblock/self/oa_self_unavailable.html'
problem_closed, reason, date = self.is_closed(step="self-assessment")
Returns:
tuple of `(path, context)`, where `path` (str) is the path to the template,
and `context` (dict) is the template context.
if problem_closed:
if date == 'start':
context["self_start"] = self.format_datetime_string(date)
elif date == 'due':
context["self_due"] = self.format_datetime_string(date)
Raises:
SubmissionError: Error occurred while retrieving the current submission.
SelfAssessmentRequestError: Error occurred while checking if we had a self-assessment.
"""
context = {}
path = 'openassessmentblock/self/oa_self_unavailable.html'
problem_closed, reason, start_date, due_date = self.is_closed(step="self-assessment")
# If we haven't submitted yet, `workflow` will be an empty dict,
# and `workflow_status` will be None.
workflow = self.get_workflow_info()
if not workflow:
return self.render_assessment(path, context)
workflow_status = workflow.get('status')
try:
submission = submission_api.get_submission(self.submission_uuid)
assessment = self_api.get_assessment(
workflow["submission_uuid"]
)
except (submission_api.SubmissionError, self_api.SelfAssessmentRequestError):
logger.exception(
u"Could not retrieve self assessment for submission {}"
.format(workflow["submission_uuid"])
)
return self.render_error(_(u"An unexpected error occurred."))
if workflow["status"] == "self":
path = 'openassessmentblock/self/oa_self_assessment.html'
context = {
"rubric_criteria": self.rubric_criteria,
"estimated_time": "20 minutes", # TODO: Need to configure this.
"self_submission": submission,
}
elif assessment is not None:
if workflow_status == 'waiting' or workflow_status == 'done':
path = 'openassessmentblock/self/oa_self_complete.html'
elif date == "due" and problem_closed:
path = 'openassessmentblock/self/oa_self_closed.html'
elif workflow_status == 'self' or problem_closed:
assessment = self_api.get_assessment(workflow.get("submission_uuid"))
if assessment is not None:
path = 'openassessmentblock/self/oa_self_complete.html'
elif problem_closed:
if reason == 'start':
context["self_start"] = self.format_datetime_string(start_date)
path = 'openassessmentblock/self/oa_self_unavailable.html'
elif reason == 'due':
context["self_due"] = self.format_datetime_string(due_date)
path = 'openassessmentblock/self/oa_self_closed.html'
else:
submission = submission_api.get_submission(self.submission_uuid)
context["rubric_criteria"] = self.rubric_criteria
context["estimated_time"] = "20 minutes" # TODO: Need to configure this.
context["self_submission"] = submission
path = 'openassessmentblock/self/oa_self_assessment.html'
else:
# No submission yet or in peer assessment
path = 'openassessmentblock/self/oa_self_unavailable.html'
return self.render_assessment(path, context)
return path, context
@XBlock.json_handler
def self_assess(self, data, suffix=''):
......
......@@ -6,6 +6,7 @@ from xblock.core import XBlock
from submissions import api
from openassessment.workflow import api as workflow_api
from .resolve_dates import DISTANT_FUTURE
logger = logging.getLogger(__name__)
......@@ -194,15 +195,20 @@ class SubmissionMixin(object):
"""
workflow = self.get_workflow_info()
problem_closed, __, date = self.is_closed('submission')
sub_due = date.strftime("%A, %B %d, %Y %X") if date is not None else None
problem_closed, __, __, due_date = self.is_closed('submission')
context = {
"saved_response": self.saved_response,
"save_status": self.save_status,
"submit_enabled": self.saved_response != '',
"submission_due": sub_due,
}
# Due dates can default to the distant future, in which case
# there's effectively no due date.
# If we don't add the date to the context, the template won't display it.
if due_date < DISTANT_FUTURE:
context["submission_due"] = due_date.strftime("%A, %B %d, %Y %X")
if not workflow and problem_closed:
path = 'openassessmentblock/response/oa_response_closed.html'
elif not workflow:
......
<openassessment submission_due="1999-03-05">
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1" due="2000-01-02"/>
<assessment name="self-assessment" due="2000-01-8"/>
</assessments>
</openassessment>
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" due="2000-01-01"/>
</assessments>
</openassessment>
......@@ -40,10 +40,7 @@
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment"
must_grade="5" must_be_graded_by="3"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00" />
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>HP Lovecraft explanation</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>Neal Stephenson (early) explanation</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>Earnest Hemingway</explanation>
</option>
</criterion>
<criterion>
<name>Clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation>Yogi Berra explanation</explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation>Hunter S. Thompson explanation</explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation>Isaac Asimov explanation</explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>Spock explanation</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation>lolcats explanation</explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation>Facebook explanation</explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation>Reddit explanation</explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation>metafilter explanation</explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation>Usenet, 1996 explanation</explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation>The Elements of Style explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" start="5999-01-01"/>
</assessments>
</openassessment>
......@@ -200,28 +200,28 @@ class TestDates(XBlockHandlerTestCase):
self.assert_is_closed(
xblock,
dt.datetime(2014, 2, 28, 23, 59, 59),
None, True, "start", xblock.start,
None, True, "start", xblock.start, xblock.due,
released=False
)
self.assert_is_closed(
xblock,
dt.datetime(2014, 3, 1, 1, 1, 1),
None, False, None, None,
None, False, None, xblock.start, xblock.due,
released=True
)
self.assert_is_closed(
xblock,
dt.datetime(2014, 3, 4, 23, 59, 59),
None, False, None, None,
None, False, None, xblock.start, xblock.due,
released=True
)
self.assert_is_closed(
xblock,
dt.datetime(2014, 3, 5, 1, 1, 1),
None, True, "due", xblock.due,
None, True, "due", xblock.start, xblock.due,
released=True
)
......@@ -236,20 +236,25 @@ class TestDates(XBlockHandlerTestCase):
dt.datetime(2014, 2, 28, 23, 59, 59).replace(tzinfo=pytz.utc),
"submission", True, "start",
dt.datetime(2014, 3, 1).replace(tzinfo=pytz.utc),
dt.datetime(2014, 4, 1).replace(tzinfo=pytz.utc),
released=False
)
self.assert_is_closed(
xblock,
dt.datetime(2014, 3, 1, 1, 1, 1).replace(tzinfo=pytz.utc),
"submission", False, None, None,
"submission", False, None,
dt.datetime(2014, 3, 1).replace(tzinfo=pytz.utc),
dt.datetime(2014, 4, 1).replace(tzinfo=pytz.utc),
released=True
)
self.assert_is_closed(
xblock,
dt.datetime(2014, 3, 31, 23, 59, 59).replace(tzinfo=pytz.utc),
"submission", False, None, None,
"submission", False, None,
dt.datetime(2014, 3, 1).replace(tzinfo=pytz.utc),
dt.datetime(2014, 4, 1).replace(tzinfo=pytz.utc),
released=True
)
......@@ -257,6 +262,7 @@ class TestDates(XBlockHandlerTestCase):
xblock,
dt.datetime(2014, 4, 1, 1, 1, 1, 1).replace(tzinfo=pytz.utc),
"submission", True, "due",
dt.datetime(2014, 3, 1).replace(tzinfo=pytz.utc),
dt.datetime(2014, 4, 1).replace(tzinfo=pytz.utc),
released=True
)
......@@ -272,20 +278,25 @@ class TestDates(XBlockHandlerTestCase):
dt.datetime(2015, 1, 1, 23, 59, 59).replace(tzinfo=pytz.utc),
"peer-assessment", True, "start",
dt.datetime(2015, 1, 2).replace(tzinfo=pytz.utc),
dt.datetime(2015, 4, 1).replace(tzinfo=pytz.utc),
released=False
)
self.assert_is_closed(
xblock,
dt.datetime(2015, 1, 2, 1, 1, 1).replace(tzinfo=pytz.utc),
"peer-assessment", False, None, None,
"peer-assessment", False, None,
dt.datetime(2015, 1, 2).replace(tzinfo=pytz.utc),
dt.datetime(2015, 4, 1).replace(tzinfo=pytz.utc),
released=True
)
self.assert_is_closed(
xblock,
dt.datetime(2015, 3, 31, 23, 59, 59).replace(tzinfo=pytz.utc),
"peer-assessment", False, None, None,
"peer-assessment", False, None,
dt.datetime(2015, 1, 2).replace(tzinfo=pytz.utc),
dt.datetime(2015, 4, 1).replace(tzinfo=pytz.utc),
released=True
)
......@@ -293,6 +304,7 @@ class TestDates(XBlockHandlerTestCase):
xblock,
dt.datetime(2015, 4, 1, 1, 1, 1, 1).replace(tzinfo=pytz.utc),
"peer-assessment", True, "due",
dt.datetime(2015, 1, 2).replace(tzinfo=pytz.utc),
dt.datetime(2015, 4, 1).replace(tzinfo=pytz.utc),
released=True
)
......@@ -308,20 +320,25 @@ class TestDates(XBlockHandlerTestCase):
dt.datetime(2016, 1, 1, 23, 59, 59).replace(tzinfo=pytz.utc),
"self-assessment", True, "start",
dt.datetime(2016, 1, 2).replace(tzinfo=pytz.utc),
dt.datetime(2016, 4, 1).replace(tzinfo=pytz.utc),
released=False
)
self.assert_is_closed(
xblock,
dt.datetime(2016, 1, 2, 1, 1, 1).replace(tzinfo=pytz.utc),
"self-assessment", False, None, None,
"self-assessment", False, None,
dt.datetime(2016, 1, 2).replace(tzinfo=pytz.utc),
dt.datetime(2016, 4, 1).replace(tzinfo=pytz.utc),
released=True
)
self.assert_is_closed(
xblock,
dt.datetime(2016, 3, 31, 23, 59, 59).replace(tzinfo=pytz.utc),
"self-assessment", False, None, None,
"self-assessment", False, None,
dt.datetime(2016, 1, 2).replace(tzinfo=pytz.utc),
dt.datetime(2016, 4, 1).replace(tzinfo=pytz.utc),
released=True
)
......@@ -329,6 +346,7 @@ class TestDates(XBlockHandlerTestCase):
xblock,
dt.datetime(2016, 4, 1, 1, 1, 1, 1).replace(tzinfo=pytz.utc),
"self-assessment", True, "due",
dt.datetime(2016, 1, 2).replace(tzinfo=pytz.utc),
dt.datetime(2016, 4, 1).replace(tzinfo=pytz.utc),
released=True
)
......@@ -346,20 +364,25 @@ class TestDates(XBlockHandlerTestCase):
dt.datetime(2014, 2, 28, 23, 59, 59).replace(tzinfo=pytz.utc),
"peer-assessment", True, "start",
dt.datetime(2014, 3, 1).replace(tzinfo=pytz.utc),
dt.datetime(2016, 5, 2).replace(tzinfo=pytz.utc),
released=False
)
self.assert_is_closed(
xblock,
dt.datetime(2014, 3, 1, 1, 1, 1).replace(tzinfo=pytz.utc),
"peer-assessment", False, None, None,
"peer-assessment", False, None,
dt.datetime(2014, 3, 1).replace(tzinfo=pytz.utc),
dt.datetime(2016, 5, 2).replace(tzinfo=pytz.utc),
released=True
)
self.assert_is_closed(
xblock,
dt.datetime(2016, 5, 1, 23, 59, 59).replace(tzinfo=pytz.utc),
"peer-assessment", False, None, None,
"peer-assessment", False, None,
dt.datetime(2014, 3, 1).replace(tzinfo=pytz.utc),
dt.datetime(2016, 5, 2).replace(tzinfo=pytz.utc),
released=True
)
......@@ -367,6 +390,7 @@ class TestDates(XBlockHandlerTestCase):
xblock,
dt.datetime(2016, 5, 2, 1, 1, 1).replace(tzinfo=pytz.utc),
"peer-assessment", True, "due",
dt.datetime(2014, 3, 1).replace(tzinfo=pytz.utc),
dt.datetime(2016, 5, 2).replace(tzinfo=pytz.utc),
released=True
)
......@@ -394,7 +418,7 @@ class TestDates(XBlockHandlerTestCase):
def assert_is_closed(
self, xblock, now, step, expected_is_closed, expected_reason,
expected_date=None, released=None
expected_start, expected_due, released=None
):
"""
Assert whether the XBlock step is open/closed.
......@@ -405,7 +429,8 @@ class TestDates(XBlockHandlerTestCase):
step (str): The step in the workflow (e.g. "submission", "self-assessment")
expected_is_closed (bool): Do we expect the step to be open or closed?
expected_reason (str): Either "start", "due", or None.
expected_date (datetime): Expected start/due date, or None
expected_start (datetime): Expected start date.
expected_due (datetime): Expected due date.
Kwargs:
released (bool): If set, check whether the XBlock has been released.
......@@ -421,10 +446,11 @@ class TestDates(XBlockHandlerTestCase):
self.addCleanup(datetime_patcher.stop)
mocked_datetime.datetime.now.return_value = now
is_closed, reason, date = xblock.is_closed(step=step)
is_closed, reason, start, due = xblock.is_closed(step=step)
self.assertEqual(is_closed, expected_is_closed)
self.assertEqual(reason, expected_reason)
self.assertEqual(date, expected_date)
self.assertEqual(start, expected_start)
self.assertEqual(due, expected_due)
if released is not None:
self.assertEqual(xblock.is_released(step=step), released)
......@@ -6,6 +6,7 @@ import copy
import json
import datetime
import mock
import pytz
from openassessment.assessment import self_api
from openassessment.workflow import api as workflow_api
from .base import XBlockHandlerTestCase, scenario
......@@ -92,7 +93,6 @@ class TestSelfAssessment(XBlockHandlerTestCase):
self.assertFalse(resp['success'])
self.assertIn('workflow', resp['msg'].lower())
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_handler_missing_keys(self, xblock):
# Missing submission_uuid
......@@ -109,61 +109,6 @@ class TestSelfAssessment(XBlockHandlerTestCase):
self.assertFalse(resp['success'])
self.assertIn('options_selected', resp['msg'])
# No user specified, to simulate the Studio preview runtime
@scenario('data/self_assessment_scenario.xml')
def test_render_self_assessment_preview(self, xblock):
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("Not Available", resp)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_render_self_assessment_complete(self, xblock):
student_item = xblock.get_student_item_dict()
# Create a submission for the student
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Self-assess the submission
assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = submission['uuid']
resp = self.request(xblock, 'self_assess', json.dumps(assessment), response_format='json')
self.assertTrue(resp['success'])
# Expect that the self assessment shows that we've completed the step
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("Complete", resp)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_render_self_assessment_open(self, xblock):
student_item = xblock.get_student_item_dict()
# Create a submission for the student
submission = xblock.create_submission(student_item, self.SUBMISSION)
with mock.patch('openassessment.assessment.peer_api.is_complete') as mock_complete:
mock_complete.return_value = True
# Expect that the self-assessment step is open
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("In Progress", resp)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_render_self_assessment_no_submission(self, xblock):
# Without creating a submission, render the self-assessment step
# Expect that the step is closed
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("Not Available", resp)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_render_self_assessessment_api_error(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Simulate an error and expect a failure response
with mock.patch('openassessment.xblock.self_assessment_mixin.self_api') as mock_api:
mock_api.SelfAssessmentRequestError = self_api.SelfAssessmentRequestError
mock_api.get_assessment.side_effect = self_api.SelfAssessmentRequestError
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("error", resp.lower())
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_api_error(self, xblock):
# Create a submission for the student
......@@ -182,27 +127,208 @@ class TestSelfAssessment(XBlockHandlerTestCase):
self.assertFalse(resp['success'])
@scenario('data/assessment_not_started.xml', user_id='Bob')
def test_start_dates(self, xblock):
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, u"Bob's answer")
workflow_info = xblock.get_workflow_info()
self.assertEqual(workflow_info["status"], u'peer')
self_response = xblock.render_self_assessment({})
self.assertIsNotNone(self_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), self_response.body)
# Validate Self Rendering.
self.assertIn("available".encode('utf-8'), self_response.body)
@scenario('data/self_assessment_default_dates.xml', user_id='Bob')
def test_no_dates(self, xblock):
# In this scenario, the self-assessment has no dates specified,
# but the module before it specifies a start date, and the
# problem itself specifies a due date.
xblock.due = datetime.datetime(4000, 1, 1, 1)
self_response = xblock.render_self_assessment({})
self.assertIsNotNone(self_response)
self.assertIn("available".encode('utf-8'), self_response.body)
class TestSelfAssessmentRender(XBlockHandlerTestCase):
"""
Test rendering of the self-assessment step.
The basic strategy is to verify that we're providing the right
template and context for each possible state,
plus an integration test to verify that the context
is being rendered correctly.
"""
@scenario('data/self_assessment_unavailable.xml', user_id='Bob')
def test_unavailable(self, xblock):
# Start date is in the future for this scenario
self._assert_path_and_context(
xblock,
'openassessmentblock/self/oa_self_unavailable.html',
{'self_start': datetime.datetime(5999, 1, 1).replace(tzinfo=pytz.utc)}
)
@scenario('data/self_assessment_closed.xml', user_id='Bob')
def test_closed(self, xblock):
# Due date is in the past for this scenario
self._assert_path_and_context(
xblock,
'openassessmentblock/self/oa_self_closed.html',
{'self_due': datetime.datetime(2000, 1, 1).replace(tzinfo=pytz.utc)}
)
@scenario('data/self_assessment_open.xml', user_id='Bob')
def test_open_no_submission(self, xblock):
# Without making a submission, this step should be unavailable
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_unavailable.html', {}
)
@scenario('data/self_assessment_open.xml', user_id='James Brown')
def test_open_in_peer_step(self, xblock):
# Make a submission, so we're in the peer-assessment step
xblock.create_submission(
xblock.get_student_item_dict(), u"ⱣȺꝑȺ đøn'ŧ ŧȺꝁɇ nø mɇss."
)
# Should still not be able to access self-assessment
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_unavailable.html', {}
)
@scenario('data/self_assessment_open.xml', user_id='James Brown')
def test_open_in_waiting_for_peer_step(self, xblock):
# Simulate the workflow status being "waiting"
# Currently, this implies that we've completed the self assessment module,
# but this may change in the future.
xblock.create_submission(
xblock.get_student_item_dict(), u"𝓟𝓪𝓼𝓼 𝓽𝓱𝓮 𝓹𝓮𝓪𝓼"
)
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {},
workflow_status='waiting'
)
@scenario('data/self_assessment_open.xml', user_id='James Brown')
def test_open_done_status(self, xblock):
# Simulate the workflow status being "done"
xblock.create_submission(
xblock.get_student_item_dict(), u"Ⱥɨn'ŧ ɨŧ fᵾnꝁɏ"
)
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {},
workflow_status='done'
)
@scenario('data/self_assessment_open.xml', user_id='James Brown')
def test_open_self_assessing(self, xblock):
# Simulate the workflow being in the self assessment step
submission = xblock.create_submission(
xblock.get_student_item_dict(), u"Đøɨn' ɨŧ ŧø đɇȺŧħ"
)
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_assessment.html',
{
'rubric_criteria': xblock.rubric_criteria,
'estimated_time': '20 minutes',
'self_submission': submission
},
workflow_status='self',
submission_uuid=submission['uuid']
)
@scenario('data/self_assessment_open.xml', user_id='Bob')
def test_open_completed_self_assessment(self, xblock):
# Simulate the workflow being in the self assessment step
# and we've created a self-assessment
submission = xblock.create_submission(
xblock.get_student_item_dict(), u"Đøɨn' ɨŧ ŧø đɇȺŧħ"
)
self_api.create_assessment(
submission['uuid'],
xblock.get_student_item_dict()['student_id'],
{u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
{'criteria': xblock.rubric_criteria}
)
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {},
workflow_status='self',
submission_uuid=submission['uuid']
)
@scenario('data/self_assessment_closed.xml', user_id='Bob')
def test_started_and_past_due(self, xblock):
# Simulate the workflow being in the self assessment step
# Since we're past the due date, the step should appear closed.
submission = xblock.create_submission(
xblock.get_student_item_dict(), u"Đøɨn' ɨŧ ŧø đɇȺŧħ"
)
self._assert_path_and_context(
xblock,
'openassessmentblock/self/oa_self_closed.html',
{'self_due': datetime.datetime(2000, 1, 1).replace(tzinfo=pytz.utc)},
workflow_status='self',
submission_uuid=submission['uuid']
)
@scenario('data/self_assessment_closed.xml', user_id='Bob')
def test_completed_and_past_due(self, xblock):
# Simulate having completed self assessment
# Even though the problem is closed, we should still see
# that we completed the step.
submission = xblock.create_submission(
xblock.get_student_item_dict(), u"Đøɨn' ɨŧ ŧø đɇȺŧħ"
)
self_api.create_assessment(
submission['uuid'],
xblock.get_student_item_dict()['student_id'],
{u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
{'criteria': xblock.rubric_criteria}
)
# This case probably isn't possible, because presumably when we create
# the self-assessment, the workflow status will be "waiting" or "done".
# We're checking it anyway to be overly defensive: if the user has made a self-assessment,
# we ALWAYS show complete, even if the workflow tells us we're still have status 'self'.
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {},
workflow_status='self',
submission_uuid=submission['uuid']
)
@scenario('data/self_assessment_open.xml', user_id='Bob')
def test_integration(self, xblock):
# Simulate the workflow being in the self assessment step
# and we've created a self-assessment
submission = xblock.create_submission(
xblock.get_student_item_dict(), u"Test submission"
)
xblock.get_workflow_info = mock.Mock(return_value={
'status': 'self', 'submission_uuid': submission['uuid']
})
resp = self.request(xblock, 'render_self_assessment', json.dumps({}))
self.assertIn(u'in progress', resp.decode('utf-8').lower())
self.assertIn(u'Test submission', resp.decode('utf-8'))
@scenario('data/self_assessment_open.xml', user_id='Bob')
def test_retrieve_api_error(self, xblock):
# Simulate the workflow being in the self assessment step
xblock.get_workflow_info = mock.Mock(return_value={'status': 'self'})
# Simulate an error from the submission API
with mock.patch('openassessment.xblock.self_assessment_mixin.self_api') as mock_self:
mock_self.get_assessment.side_effect = self_api.SelfAssessmentRequestError
resp = self.request(xblock, 'render_self_assessment', json.dumps({}))
self.assertIn(u'error', resp.decode('utf-8').lower())
def _assert_path_and_context(
self, xblock, expected_path, expected_context,
workflow_status=None, submission_uuid=None
):
"""
Render the self assessment step and verify:
1) that the correct template and context were used
2) that the rendering occurred without an error
Args:
xblock (OpenAssessmentBlock): The XBlock under test.
expected_path (str): The expected template path.
expected_context (dict): The expected template context.
Kwargs:
workflow_status (str): If provided, simulate this status from the workflow API.
submission_uuid (str): If provided, simulate this submision UUI for the current workflow.
"""
if workflow_status is not None:
xblock.get_workflow_info = mock.Mock(return_value={
'status': workflow_status,
'submission_uuid': submission_uuid
})
path, context = xblock.self_path_and_context()
self.assertEqual(path, expected_path)
self.assertItemsEqual(context, expected_context)
# Verify that we render without error
resp = self.request(xblock, 'render_self_assessment', json.dumps({}))
self.assertGreater(len(resp), 0)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment