Commit fa48be6c by Stephen Sanchez

Merge pull request #215 from edx/sanchez/assessments_start_dates

Adding start date validation to assessment modules
parents 875c14f6 9dc9dba8
......@@ -11,7 +11,14 @@
<span class="step__counter"></span>
<span class="wrapper--copy">
<span class="step__label">Assess Peers</span>
{% if peer_due %}
{% if peer_start %}
<span class="step__deadline">available
<span class="date">
{{ peer_start }}
(in {{ peer_start|timeuntil }})
</span>
</span>
{% elif peer_due %}
<span class="step__deadline">due
<span class="date">
{{ peer_due }}
......
......@@ -12,7 +12,14 @@
<span class="step__counter"></span>
<span class="wrapper--copy">
<span class="step__label">Assess Yourself</span>
{% if self_due %}
{% if self_start %}
<span class="step__deadline">available
<span class="date">
{{ self_start }}
(in {{ self_start|timeuntil }})
</span>
</span>
{% elif self_due %}
<span class="step__deadline">due
<span class="date">
{{ self_due }}
......
......@@ -112,7 +112,7 @@ class PeerAssessmentMixin(object):
path = 'openassessmentblock/peer/oa_peer_unavailable.html'
finished = False
problem_open, date = self.is_open(step="peer")
problem_open, date = self.is_open(step="peer-assessment")
context_dict = {
"rubric_criteria": self.rubric_criteria,
"estimated_time": "20 minutes" # TODO: Need to configure this.
......@@ -153,12 +153,16 @@ class PeerAssessmentMixin(object):
context_dict["submit_button_text"] = (
"Submit your assessment & move to response #{}"
).format(count + 2)
if assessment.get('due'):
context_dict["peer_due"] = self.format_datetime_string(assessment["due"])
if date == "due" and not problem_open:
if date == 'due' and not problem_open:
path = 'openassessmentblock/peer/oa_peer_closed.html'
elif date == 'start' and not problem_open:
if assessment.get('start'):
context_dict["peer_start"] = self.format_datetime_string(assessment["start"])
path = 'openassessmentblock/peer/oa_peer_unavailable.html'
elif workflow.get("status") == "peer":
peer_sub = self.get_peer_submission(student_item, assessment, over_grading)
if peer_sub:
......
......@@ -26,11 +26,16 @@ class SelfAssessmentMixin(object):
context = {}
assessment_module = self.get_assessment_module('self-assessment')
if assessment_module and assessment_module.get('due'):
context["self_due"] = self.format_datetime_string(assessment_module["due"])
path = 'openassessmentblock/self/oa_self_unavailable.html'
problem_open, date = self.is_open(step="self")
problem_open, date = self.is_open(step="self-assessment")
due_date = assessment_module.get('due')
if date == 'start' and not problem_open:
context["self_start"] = self.format_datetime_string(assessment_module["start"])
elif due_date:
context["self_due"] = self.format_datetime_string(assessment_module["due"])
workflow = self.get_workflow_info()
if not workflow:
return self.render_assessment(path, context)
......@@ -55,7 +60,7 @@ class SelfAssessmentMixin(object):
}
elif assessment is not None:
path = 'openassessmentblock/self/oa_self_complete.html'
elif date == "due" and not problem_open:
elif date == 'due' and not problem_open:
path = 'openassessmentblock/self/oa_self_closed.html'
return self.render_assessment(path, context)
......
<openassessment submission_due="2014-03-05">
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1" start="4014-03-02"/>
<assessment name="self-assessment" start="4014-03-8"/>
</assessments>
</openassessment>
......@@ -40,7 +40,7 @@
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1" start="2014-03-02" due="2014-03-10"/>
<assessment name="self-assessment" start="2014-03-8" due="2014-03-10"/>
<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1" start="2014-03-02"/>
<assessment name="self-assessment" start="2014-03-8"/>
</assessments>
</openassessment>
......@@ -143,6 +143,24 @@ class TestPeerAssessment(XBlockHandlerTestCase):
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
self.assertEqual(resp['success'], False)
@scenario('data/assessment_not_started.xml', user_id='Bob')
def test_start_dates(self, xblock):
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, u"Bob's answer")
workflow_info = xblock.get_workflow_info()
self.assertEqual(workflow_info["status"], u'peer')
# Validate Submission Rendering.
request = namedtuple('Request', 'params')
request.params = {}
peer_response = xblock.render_peer_assessment(request)
self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
# Validate Peer Rendering.
self.assertIn("available".encode('utf-8'), peer_response.body)
@scenario('data/over_grade_scenario.xml', user_id='Bob')
def test_turbo_grading(self, xblock):
student_item = xblock.get_student_item_dict()
......@@ -190,7 +208,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
#Validate Peer Rendering.
# Validate Peer Rendering.
self.assertIn("Sally".encode('utf-8'), peer_response.body)
peer_api.create_assessment(
sally_sub['uuid'],
......@@ -206,7 +224,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
#Validate Peer Rendering.
# Validate Peer Rendering.
self.assertIn("Hal".encode('utf-8'), peer_response.body)
peer_api.create_assessment(
hal_sub['uuid'],
......
......@@ -180,3 +180,18 @@ class TestSelfAssessment(XBlockHandlerTestCase):
resp = self.request(xblock, 'self_assess', json.dumps(assessment), response_format='json')
self.assertFalse(resp['success'])
@scenario('data/assessment_not_started.xml', user_id='Bob')
def test_start_dates(self, xblock):
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, u"Bob's answer")
workflow_info = xblock.get_workflow_info()
self.assertEqual(workflow_info["status"], u'peer')
self_response = xblock.render_self_assessment({})
self.assertIsNotNone(self_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), self_response.body)
#Validate Self Rendering.
self.assertIn("available".encode('utf-8'), self_response.body)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment