Commit a54859b2 by Will Daly

Validate assessment start/due dates

Validate assessments are specified in the order: peer, then self.
Update XBlock date fields to use scope.settings and be named "start" / "due"
Add validation to workbench scenario parsing.
Resolve unspecified dates.
Use resolved dates to determine whether problem/submission/assessments are open or closed.
parent 01ddf348
......@@ -5,7 +5,6 @@ scope of the Tim APIs.
"""
from copy import deepcopy
import dateutil.parser
from django.utils.translation import ugettext as _
from rest_framework import serializers
from openassessment.assessment.models import (
......@@ -272,60 +271,3 @@ def rubric_from_dict(rubric_dict):
rubric = rubric_serializer.save()
return rubric
def validate_assessment_dict(assessment_dict):
"""
Check that the assessment dict is semantically valid.
Args:
assessment (dict): Serialized Assessment model.
Returns:
boolean indicating whether the assessment is semantically valid.
"""
# Supported assessment
if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']:
return (False, _("Assessment type is not supported"))
# Peer assessments need to specify must_grade and must_be_graded_by
if assessment_dict.get('name') == 'peer-assessment':
if 'must_grade' not in assessment_dict:
return (False, _(u'Attribute "must_grade" is missing from peer assessment.'))
if 'must_be_graded_by' not in assessment_dict:
return (False, _(u'Attribute "must_be_graded_by" is missing from peer assessment.'))
# Number you need to grade is >= the number of people that need to grade you
must_grade = assessment_dict.get('must_grade')
must_be_graded_by = assessment_dict.get('must_be_graded_by')
if must_grade is None or must_grade < 1:
return (False, _('"must_grade" must be a positive integer'))
if must_be_graded_by is None or must_be_graded_by < 1:
return (False, _('"must_be_graded_by" must be a positive integer'))
if must_grade < must_be_graded_by:
return (False, _('"must_grade" should be greater than or equal to "must_be_graded_by"'))
return (True, u'')
def validate_rubric_dict(rubric_dict):
"""
Check that the rubric is semantically valid.
Args:
rubric_dict (dict): Serialized Rubric model
Returns:
boolean indicating whether the rubric is semantically valid.
"""
try:
rubric_from_dict(rubric_dict)
except InvalidRubric as ex:
return (False, ex.message)
else:
return (True, u'')
"""
Test validation of serialized models.
"""
import ddt
from django.test import TestCase
from openassessment.assessment.serializers import validate_assessment_dict, validate_rubric_dict
@ddt.ddt
class AssessmentValidationTest(TestCase):
@ddt.file_data('data/valid_assessments.json')
def test_valid_assessment(self, data):
success, msg = validate_assessment_dict(data['assessment'])
self.assertTrue(success)
self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_assessments.json')
def test_invalid_assessment(self, data):
success, msg = validate_assessment_dict(data['assessment'])
self.assertFalse(success)
self.assertGreater(len(msg), 0)
@ddt.ddt
class RubricValidationTest(TestCase):
@ddt.file_data('data/valid_rubrics.json')
def test_valid_assessment(self, data):
success, msg = validate_rubric_dict(data['rubric'])
self.assertTrue(success)
self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_rubrics.json')
def test_invalid_assessment(self, data):
success, msg = validate_rubric_dict(data['rubric'])
self.assertFalse(success)
self.assertGreater(len(msg), 0)
"""An XBlock where students can read a question and compose their response"""
import datetime
import datetime as dt
import pkg_resources
import pytz
import dateutil.parser
from django.template.context import Context
......@@ -21,6 +22,8 @@ from openassessment.xblock.studio_mixin import StudioMixin
from openassessment.xblock.xml import update_from_xml
from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.workflow import api as workflow_api
from openassessment.xblock.validation import validator
from openassessment.xblock.resolve_dates import resolve_dates
DEFAULT_PROMPT = """
......@@ -131,14 +134,20 @@ module(s) associated with the XBlock.
"""
DEFAULT_PEER_ASSESSMENT = {
"name": "peer-assessment",
"start_datetime": datetime.datetime.now().isoformat(),
"start": None,
"due": None,
"must_grade": 5,
"must_be_graded_by": 3,
}
DEFAULT_SELF_ASSESSMENT = {
"due": None,
}
DEFAULT_ASSESSMENT_MODULES = [
DEFAULT_PEER_ASSESSMENT,
DEFAULT_SELF_ASSESSMENT,
]
......@@ -158,16 +167,19 @@ class OpenAssessmentBlock(
WorkflowMixin):
"""Displays a question and gives an area where students can compose a response."""
start_datetime = String(
default=datetime.datetime.now().isoformat(),
scope=Scope.content,
start = String(
default=None, scope=Scope.settings,
help="ISO-8601 formatted string representing the start date of this assignment."
)
due_datetime = String(
default=None,
scope=Scope.content,
help="ISO-8601 formatted string representing the end date of this assignment."
due = String(
default=None, scope=Scope.settings,
help="ISO-8601 formatted string representing the due date of this assignment."
)
submission_due = String(
default=None, scope=Scope.settings,
help="ISO-8601 formatted string representing the submission due date."
)
title = String(
......@@ -323,7 +335,8 @@ class OpenAssessmentBlock(
"""Recursively embed xblocks for nodes we don't recognize"""
block.runtime.add_node_as_child(block, child, id_generator)
block = runtime.construct_xblock_from_class(cls, keys)
return update_from_xml(block, node)
return update_from_xml(block, node, validator=validator(block.start, block.due))
def render_assessment(self, path, context_dict=None):
"""Render an Assessment Module's HTML
......@@ -347,12 +360,12 @@ class OpenAssessmentBlock(
context_dict["xblock_trace"] = self.get_xblock_trace()
if self.start_datetime:
start = dateutil.parser.parse(self.start_datetime)
if self.start:
start = dateutil.parser.parse(self.start)
context_dict["formatted_start_date"] = start.strftime("%A, %B %d, %Y")
context_dict["formatted_start_datetime"] = start.strftime("%A, %B %d, %Y %X")
if self.due_datetime:
due = dateutil.parser.parse(self.due_datetime)
if self.due:
due = dateutil.parser.parse(self.due)
context_dict["formatted_due_date"] = due.strftime("%A, %B %d, %Y")
context_dict["formatted_due_datetime"] = due.strftime("%A, %B %d, %Y %X")
......@@ -374,11 +387,19 @@ class OpenAssessmentBlock(
template = get_template('openassessmentblock/oa_error.html')
return Response(template.render(context), content_type='application/html', charset='UTF-8')
def is_open(self):
"""Checks if the question is open.
def is_open(self, step=None):
"""
Checks if the question is open.
Determines if the start date has occurred and the end date has not
passed.
passed. Optionally limited to a particular step in the workflow.
Kwargs:
step (str): The step in the workflow to check. Options are:
None: check whether the problem as a whole is open.
"submission": check whether the submission section is open.
"peer-assessment": check whether the peer-assessment section is open.
"self-assessment": check whether the self-assessment section is open.
Returns:
(tuple): True if the question is open, False if not. If False,
......@@ -387,19 +408,42 @@ class OpenAssessmentBlock(
Examples:
>>> is_open()
True, None
>>> is_open(step="submission")
False, "due"
>>> is_open(step="self-assessment")
False, "start"
"""
# Is the question closed?
if self.start_datetime:
start = dateutil.parser.parse(self.start_datetime)
if start > datetime.datetime.utcnow():
return False, "start"
if self.due_datetime:
due = dateutil.parser.parse(self.due_datetime)
if due < datetime.datetime.utcnow():
return False, "due"
return True, None
submission_range = (self.start, self.submission_due)
assessment_ranges = [
(asmnt.get('start'), asmnt.get('due'))
for asmnt in self.rubric_assessments
]
# Resolve unspecified dates and date strings to datetimes
start, due, date_ranges = resolve_dates(self.start, self.due, [submission_range] + assessment_ranges)
# Based on the step, choose the date range to consider
# We hard-code this to the submission -> peer -> self workflow for now;
# later, we can revisit to make this more flexible.
open_range = (start, due)
if step == "submission":
open_range = date_ranges[0]
if step == "peer-assessment":
open_range = date_ranges[1]
if step == "self-assessment":
open_range = date_ranges[2]
# Check if we are in the open date range
now = dt.datetime.now().replace(tzinfo=pytz.utc)
if now < open_range[0]:
return False, "start"
elif now >= open_range[1]:
return False, "due"
else:
return True, None
def update_workflow_status(self, submission_uuid):
assessment_ui_model = self.get_assessment_module('peer-assessment')
......
......@@ -135,7 +135,7 @@ class PeerAssessmentMixin(object):
else:
context_dict["submit_button_text"] = "Submit your assessment & move to response #{}".format(count + 2)
problem_open, date = self.is_open()
problem_open, date = self.is_open(step="peer")
if not problem_open and date == "due" and not finished:
path = 'openassessmentblock/peer/oa_peer_closed.html'
......
"""
Resolve unspecified dates and date strings to datetimes.
"""
import datetime as dt
import pytz
from dateutil.parser import parse as parse_date
from django.utils.translation import ugettext as _
class InvalidDateFormat(Exception):
"""
The date string could not be parsed.
"""
pass
class DateValidationError(Exception):
"""
Dates are not semantically valid.
"""
pass
DISTANT_PAST = dt.datetime(dt.MINYEAR, 1, 1, tzinfo=pytz.utc)
DISTANT_FUTURE = dt.datetime(dt.MAXYEAR, 1, 1, tzinfo=pytz.utc)
def _parse_date(date_string):
"""
Parse an ISO formatted datestring into a datetime object with timezone set to UTC.
Args:
date_string (str): The ISO formatted date string.
Returns:
datetime.datetime
Raises:
InvalidDateFormat: The date string could not be parsed.
"""
try:
return parse_date(date_string).replace(tzinfo=pytz.utc)
except ValueError:
raise InvalidDateFormat(_("Could not parse date '{date}'").format(date=date_string))
def resolve_dates(start, end, date_ranges):
"""
Resolve date strings (including "default" dates) to datetimes.
The basic rules are:
1) Unset problem start dates default to the distant past.
2) Unset problem end dates default to the distant future.
3) Unset start dates default to the start date of the previous assessment/submission.
(The first submission defaults to the problem start date.)
4) Unset end dates default to the end date of the following assessment/submission.
(The last assessment defaults to the problem end date.)
Example:
Suppose I have a problem with a submission and two assessments:
| |
| |== submission ==| |== peer-assessessment ==| |== self-assessment ==| |
| |
and I set start/due dates for the submission and self-assessment, but not for peer-assessment.
Then by default, peer-assessment will "expand":
| |
| |== submission ==| |== self-assessment ==| |
| |============================ peer-assessment ==========================| |
| |
If I then remove the due date for the submission, but add a due date for peer-assessment:
| |
| |== submission =============================| |== self-assessment ==| |
| |============== peer-assessment ============| |
| |
If no dates are set, start dates default to the distant past and end dates default
to the distant future:
| |
| |================= submission ==============| |
| |============== self-assessment ============| |
| |============== peer-assessment ============| |
| |
Args:
start (str, ISO date format): When the problem opens. A value of None indicates that the problem is always open.
end (str, ISO date format): When the problem closes. A value of None indicates that the problem never closes.
date_ranges (list of tuples): list of (start, end) ISO date string tuples indicating
the start/end timestamps of each submission/assessment.
Returns:
start (datetime): The resolved start date
end (datetime): The resolved end date.
list of (start, end) tuples, where both elements are datetime objects.
Raises:
DateValidationError
InvalidDateFormat
"""
# Resolve problem start and end dates to minimum and maximum dates
start = _parse_date(start) if start is not None else DISTANT_PAST
end = _parse_date(end) if end is not None else DISTANT_FUTURE
resolved_starts = []
resolved_ends = []
# Validate the problem start/end dates
if start >= end:
msg = _(u"Problem start date '{start}' cannot be later than the problem due date '{due}'.").format(
start=start, due=end
)
raise DateValidationError(msg)
# Iterate through the list forwards and backwards simultaneously
# As we iterate forwards, resolve start dates.
# As we iterate backwards, resolve end dates.
prev_start = start
prev_end = end
for index in range(len(date_ranges)):
reverse_index = len(date_ranges) - index - 1
# Resolve "default" start dates to the previous start date.
# If I set a start date for peer-assessment, but don't set a start date for the following self-assessment,
# then the self-assessment should default to the same start date as the peer-assessment.
step_start, __ = date_ranges[index]
step_start = _parse_date(step_start) if step_start is not None else prev_start
# Resolve "default" end dates to the following end date.
# If I set a due date for self-assessment, but don't set a due date for the previous peer-assessment,
# then the peer-assessment should default to the same due date as the self-assessment.
__, step_end = date_ranges[reverse_index]
step_end = _parse_date(step_end) if step_end is not None else prev_end
if step_start < prev_start:
msg = _(u"The start date '{start}' must be after the previous start date '{prev}'.").format(
start=step_start, prev=prev_start
)
raise DateValidationError(msg)
if step_end > prev_end:
msg = _(u"The due date '{due}' must be before the following due date '{prev}'.").format(
due=step_end, prev=prev_end
)
raise DateValidationError(msg)
resolved_starts.append(step_start)
resolved_ends.insert(0, step_end)
prev_start = step_start
prev_end = step_end
# Combine the resolved dates back into a list of tuples
resolved_ranges = zip(resolved_starts, resolved_ends)
# Now that we have resolved both start and end dates, we can safely compare them
for resolved_start, resolved_end in resolved_ranges:
if resolved_start >= resolved_end:
msg = _(u"Start date '{start}' cannot be later than the due date '{due}'").format(
start=resolved_start, due=resolved_end
)
raise DateValidationError(msg)
return start, end, resolved_ranges
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<openassessment start="2013-02-24T13:53:50" due="2040-02-24T13:53:50">
<openassessment>
<title>
Censorship in Public Libraries
</title>
......@@ -63,5 +63,6 @@
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<openassessment start="2013-02-24T13:53:50" due="2040-02-24T13:53:50">
<openassessment>
<title>
Global Poverty
</title>
......@@ -105,11 +105,11 @@
</criterion>
</rubric>
<assessments>
<assessment start="2014-12-20T19:00-7:00"
name="peer-assessment"
<assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<assessment name="self-assessment" due="2014-12-21T22:22-7:00" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
......@@ -8,12 +8,8 @@ from django.template.loader import get_template
from django.utils.translation import ugettext as _
from xblock.core import XBlock
from xblock.fragment import Fragment
from openassessment.xblock.xml import (
serialize_content, update_from_xml_str,
UpdateFromXmlError, InvalidRubricError
)
from openassessment.assessment.serializers import validate_assessment_dict, validate_rubric_dict
from openassessment.xblock.xml import serialize_content, update_from_xml_str, ValidationError, UpdateFromXmlError
from openassessment.xblock.validation import validator
logger = logging.getLogger(__name__)
......@@ -58,14 +54,10 @@ class StudioMixin(object):
"""
if 'xml' in data:
try:
update_from_xml_str(
self, data['xml'],
rubric_validator=validate_rubric_dict,
assessment_validator=validate_assessment_dict
)
except InvalidRubricError:
return {'success': False, 'msg': _('Rubric definition was not valid.')}
update_from_xml_str(self, data['xml'], validator=validator(self.start, self.due))
except ValidationError as ex:
return {'success': False, 'msg': _('Validation error: {error}').format(error=ex.message)}
except UpdateFromXmlError as ex:
return {'success': False, 'msg': _('An error occurred while saving: {error}').format(error=ex.message)}
......
......@@ -172,7 +172,7 @@ class SubmissionMixin(object):
step_status = "Graded" if student_score else "Submitted"
step_status = step_status if student_submission else "Incomplete"
assessment_ui_model = self.get_assessment_module('peer-assessment')
problem_open, date = self.is_open()
problem_open, date = self.is_open(step="submission")
context = {
"student_submission": student_submission,
"student_score": student_score,
......
......@@ -43,6 +43,9 @@ def scenario(scenario_path, user_id=None):
self = args[0]
if isinstance(self, XBlockHandlerTestCase):
# Print a debug message
print "Loading scenario from {path}".format(path=scenario_path)
# Configure the runtime with our user id
self.set_user(user_id)
......
{
"peer_then_self": {
"valid": true,
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment"
}
]
},
"peer_only": {
"valid": false,
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
}
]
},
"self_only": {
"valid": false,
"assessments": [
{
"name": "self-assessment"
}
]
},
"self_before_peer": {
"valid": false,
"assessments": [
{
"name": "self-assessment"
},
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
}
]
},
"peer_then_peer": {
"valid": false,
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
}
]
}
}
......@@ -84,10 +84,7 @@
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment"
start="2014-12-20T19:00"
due="2014-12-21T22:22"
must_grade="5"
must_be_graded_by="3" />
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
<openassessment submission_due="2014-04-01">
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>HP Lovecraft explanation</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>Neal Stephenson (early) explanation</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>Earnest Hemingway</explanation>
</option>
</criterion>
<criterion>
<name>Clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation>Yogi Berra explanation</explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation>Hunter S. Thompson explanation</explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation>Isaac Asimov explanation</explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>Spock explanation</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>Lastly, how is it's form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation>lolcats explanation</explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation>Facebook explanation</explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation>Reddit explanation</explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation>metafilter explanation</explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation>Usenet, 1996 explanation</explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation>The Elements of Style explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" start="2015-01-02" due="2015-04-01"/>
<assessment name="self-assessment" start="2016-01-02" due="2016-04-01"/>
</assessments>
</openassessment>
......@@ -40,10 +40,7 @@
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment"
start="2014-12-20T19:00"
due="2014-12-21T22:22"
must_grade="2"
must_be_graded_by="2" />
<assessment name="peer-assessment" must_grade="2" must_be_graded_by="2" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
......@@ -2,8 +2,8 @@
<title>Foo</title>
<assessments>
<!-- assessment name not supported -->
<assessment name="unsupported-assessment" start="2014-02-27T09:46:28" due="2014-03-01T00:00:00" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" start="2014-04-01T00:00:00" due="2014-06-01T00:00:00" must_grade="2" must_be_graded_by="1" />
<assessment name="unsupported-assessment" />
<assessment name="self-assessment" />
</assessments>
<rubric>
<prompt>Test prompt</prompt>
......
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>HP Lovecraft explanation</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>Neal Stephenson (early) explanation</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>Earnest Hemingway</explanation>
</option>
</criterion>
<criterion>
<name>Clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation>Yogi Berra explanation</explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation>Hunter S. Thompson explanation</explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation>Isaac Asimov explanation</explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>Spock explanation</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>Lastly, how is it's form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation>lolcats explanation</explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation>Facebook explanation</explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation>Reddit explanation</explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation>metafilter explanation</explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation>Usenet, 1996 explanation</explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation>The Elements of Style explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="self-assessment" />
</assessments>
</openassessment>
{
"xblock_start_past_xblock_due": {
"xblock_start": 11,
"submission_start": 1,
"submission_due": 2,
"peer_start": 3,
"peer_due": 4,
"self_start": 5,
"self_due": 6,
"xblock_due": 10
},
"xblock_start_equals_xblock_due": {
"xblock_start": 10,
"submission_start": 1,
"submission_due": 2,
"peer_start": 3,
"peer_due": 4,
"self_start": 5,
"self_due": 6,
"xblock_due": 10
},
"submission_start_past_submission_due": {
"xblock_start": 0,
"submission_start": 3,
"submission_due": 2,
"peer_start": 4,
"peer_due": 5,
"self_start": 6,
"self_due": 7,
"xblock_due": 10
},
"submission_start_equals_submission_due": {
"xblock_start": 0,
"submission_start": 2,
"submission_due": 2,
"peer_start": 3,
"peer_due": 4,
"self_start": 5,
"self_due": 6,
"xblock_due": 10
},
"peer_start_past_peer_due": {
"xblock_start": 0,
"submission_start": 1,
"submission_due": 2,
"peer_start": 5,
"peer_due": 4,
"self_start": 6,
"self_due": 7,
"xblock_due": 10
},
"peer_start_equals_peer_due": {
"xblock_start": 0,
"submission_start": 1,
"submission_due": 2,
"peer_start": 4,
"peer_due": 4,
"self_start": 5,
"self_due": 6,
"xblock_due": 10
},
"self_start_past_self_due": {
"xblock_start": 0,
"submission_start": 1,
"submission_due": 2,
"peer_start": 3,
"peer_due": 4,
"self_start": 7,
"self_due": 6,
"xblock_due": 10
},
"self_start_equals_self_due": {
"xblock_start": 0,
"submission_start": 1,
"submission_due": 2,
"peer_start": 3,
"peer_due": 4,
"self_start": 6,
"self_due": 6,
"xblock_due": 10
},
"xblock_start_past_submission_start": {
"xblock_start": 2,
"submission_start": 1,
"submission_due": 3,
"peer_start": 4,
"peer_due": 5,
"self_start": 6,
"self_due": 7,
"xblock_due": 10
},
"submission_start_past_peer_start": {
"xblock_start": 0,
"submission_start": 4,
"submission_due": 2,
"peer_start": 3,
"peer_due": 5,
"self_start": 6,
"self_due": 7,
"xblock_due": 8
},
"peer_start_past_self_start": {
"xblock_start": 0,
"submission_start": 1,
"submission_due": 2,
"peer_start": 6,
"peer_due": 4,
"self_start": 5,
"self_due": 7,
"xblock_due": 10
},
"xblock_due_before_self_due": {
"xblock_start": 0,
"submission_start": 1,
"submission_due": 2,
"peer_start": 3,
"peer_due": 4,
"self_start": 5,
"self_due": 7,
"xblock_due": 6
},
"self_due_before_peer_due": {
"xblock_start": 0,
"submission_start": 1,
"submission_due": 2,
"peer_start": 3,
"peer_due": 9,
"self_start": 5,
"self_due": 6,
"xblock_due": 10
},
"peer_due_before_submission_due": {
"xblock_start": 0,
"submission_start": 1,
"submission_due": 7,
"peer_start": 2,
"peer_due": 6,
"self_start": 8,
"self_due": 9,
"xblock_due": 10
},
"xblock_start_set_but_submission_start_none": {
"xblock_start": 0,
"submission_start": null,
"submission_due": 7,
"peer_start": 2,
"peer_due": 6,
"self_start": 8,
"self_due": 9,
"xblock_due": 10
},
"xblock_due_set_but_self_due_none": {
"xblock_start": 0,
"submission_start": 1,
"submission_due": 7,
"peer_start": 2,
"peer_due": 6,
"self_start": 8,
"self_due": null,
"xblock_due": 10
}
}
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>HP Lovecraft explanation</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>Neal Stephenson (early) explanation</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>Earnest Hemingway</explanation>
</option>
</criterion>
<criterion>
<name>Clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation>Yogi Berra explanation</explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation>Hunter S. Thompson explanation</explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation>Isaac Asimov explanation</explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>Spock explanation</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>Lastly, how is it's form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation>lolcats explanation</explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation>Facebook explanation</explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation>Reddit explanation</explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation>metafilter explanation</explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation>Usenet, 1996 explanation</explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation>The Elements of Style explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<!-- start date is after due date -->
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" start="2010-01-01" due="2003-01-01"/>
</assessments>
</openassessment>
<openassessment>
<title>Foo</title>
<assessments>
<assessment name="peer-assessment" start="2014-02-27T09:46:28" due="2014-03-01T00:00:00" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" start="2014-04-01T00:00:00" due="2014-06-01T00:00:00" must_grade="2" must_be_graded_by="1" />
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" />
</assessments>
<rubric>
<prompt>Test prompt</prompt>
......
......@@ -41,5 +41,6 @@
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
{
"all_specified": {
"start": 0,
"end": 10,
"date_ranges": [
[1, 3],
[2, 4],
[3, 5]
],
"resolved_start": 0,
"resolved_end": 10,
"resolved_ranges": [
[1, 3],
[2, 4],
[3, 5]
]
},
"start_none": {
"start": null,
"end": 10,
"date_ranges": [
[0, 10],
[0, 10],
[0, 10]
],
"resolved_start": -1,
"resolved_end": 10,
"resolved_ranges": [
[0, 10],
[0, 10],
[0, 10]
]
},
"end_none": {
"start": 0,
"end": null,
"date_ranges": [
[0, 10],
[0, 10],
[0, 10]
],
"resolved_start": 0,
"resolved_end": 99,
"resolved_ranges": [
[0, 10],
[0, 10],
[0, 10]
]
},
"start_and_due_none": {
"start": null,
"end": null,
"date_ranges": [
[0, 10],
[0, 10],
[0, 10]
],
"resolved_start": -1,
"resolved_end": 99,
"resolved_ranges": [
[0, 10],
[0, 10],
[0, 10]
]
},
"default_to_start": {
"start": 0,
"end": 10,
"date_ranges": [
[null, 3],
[2, 4],
[3, 5]
],
"resolved_start": 0,
"resolved_end": 10,
"resolved_ranges": [
[0, 3],
[2, 4],
[3, 5]
]
},
"default_to_end": {
"start": 0,
"end": null,
"date_ranges": [
[1, 3],
[2, 4],
[3, null]
],
"resolved_start": 0,
"resolved_end": 99,
"resolved_ranges": [
[1, 3],
[2, 4],
[3, 99]
]
},
"default_to_prev_start": {
"start": 0,
"end": 10,
"date_ranges": [
[1, 3],
[null, 4],
[3, 5]
],
"resolved_start": 0,
"resolved_end": 10,
"resolved_ranges": [
[1, 3],
[1, 4],
[3, 5]
]
},
"default_to_next_end": {
"start": 0,
"end": 10,
"date_ranges": [
[1, 3],
[2, null],
[3, 5]
],
"resolved_start": 0,
"resolved_end": 10,
"resolved_ranges": [
[1, 3],
[2, 5],
[3, 5]
]
},
"none_specified": {
"start": null,
"end": null,
"date_ranges": [
[null, null],
[null, null],
[null, null]
],
"resolved_start": -1,
"resolved_end": 99,
"resolved_ranges": [
[-1, 99],
[-1, 99],
[-1, 99]
]
}
}
<openassessment submission_due="2014-04-01">
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>HP Lovecraft explanation</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>Neal Stephenson (early) explanation</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>Earnest Hemingway</explanation>
</option>
</criterion>
<criterion>
<name>Clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation>Yogi Berra explanation</explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation>Hunter S. Thompson explanation</explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation>Isaac Asimov explanation</explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>Spock explanation</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>Lastly, how is it's form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation>lolcats explanation</explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation>Facebook explanation</explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation>Reddit explanation</explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation>metafilter explanation</explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation>Usenet, 1996 explanation</explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation>The Elements of Style explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" start="2016-01-01" due="2016-05-02"/>
</assessments>
</openassessment>
......@@ -89,5 +89,6 @@
due="2014-12-21T22:22"
must_grade="5"
must_be_graded_by="3" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
......@@ -40,6 +40,7 @@
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
......@@ -2,6 +2,9 @@
"simple": {
"title": "Foo",
"prompt": "Test prompt",
"start": null,
"due": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
......@@ -26,17 +29,15 @@
"assessments": [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28",
"due_datetime": "2014-03-01T00:00:00",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start_datetime": "2014-04-01T00:00:00",
"due_datetime": "2014-06-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00"
}
],
"expected_xml": [
......@@ -44,7 +45,7 @@
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
......@@ -62,6 +63,9 @@
"unicode": {
"title": "ƒσσ",
"prompt": "Ṫëṡẗ ṗṛöṁṗẗ",
"start": null,
"due": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
......@@ -86,8 +90,8 @@
"assessments": [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28",
"due_datetime": "2014-03-01T00:00:00",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
......@@ -96,7 +100,7 @@
"<openassessment>",
"<title>ƒσσ</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Ṫëṡẗ ṗṛöṁṗẗ</prompt>",
......@@ -114,6 +118,9 @@
"reverse_option_order": {
"title": "Foo",
"prompt": "Test prompt",
"start": null,
"due": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
......@@ -138,8 +145,8 @@
"assessments": [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28",
"due_datetime": "2014-06-01T00:00:00",
"start": "2014-02-27T09:46:28",
"due": "2014-06-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
......@@ -166,6 +173,9 @@
"reverse_criteria_order": {
"title": "Foo",
"prompt": "Test prompt",
"start": null,
"due": null,
"submission_due": null,
"criteria": [
{
"order_num": 2,
......@@ -203,8 +213,8 @@
"assessments": [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28",
"due_datetime": "2014-06-01T00:00:00",
"start": "2014-02-27T09:46:28",
"due": "2014-06-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
......@@ -236,6 +246,9 @@
"default_dates": {
"title": "Foo",
"prompt": "Test prompt",
"start": null,
"due": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
......@@ -260,15 +273,15 @@
"assessments": [
{
"name": "peer-assessment",
"start_datetime": null,
"due_datetime": "2014-03-01T00:00:00",
"start": null,
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start_datetime": "2014-04-01T00:00:00",
"due_datetime": null,
"start": "2014-04-01T00:00:00",
"due": null,
"must_grade": 5,
"must_be_graded_by": 3
}
......@@ -291,5 +304,68 @@
"</rubric>",
"</openassessment>"
]
},
"set_dates": {
"title": "Foo",
"prompt": "Test prompt",
"start": "2010-04-01T00:00:00",
"due": "2030-05-01T00:00:00",
"submission_due": "2020-04-15T00:00:00",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"expected_xml": [
"<openassessment submission_due=\"2020-04-15T00:00:00\">",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
}
}
......@@ -5,7 +5,7 @@
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" must_grade=\"2\" must_be_graded_by=\"1\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
......@@ -20,6 +20,9 @@
],
"title": "Foo",
"prompt": "Test prompt",
"start": "2000-01-01T00:00:00",
"due": "3000-01-01T00:00:00",
"submission_due": null,
"criteria": [
{
"order_num": 0,
......@@ -44,17 +47,15 @@
"assessments": [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28",
"due_datetime": "2014-03-01T00:00:00",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start_datetime": "2014-04-01T00:00:00",
"due_datetime": "2014-06-01T00:00:00",
"must_grade": 2,
"must_be_graded_by": 1
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00"
}
]
},
......@@ -78,6 +79,9 @@
"</openassessment>"
],
"title": "िѻѻ",
"start": "2000-01-01T00:00:00",
"due": "3000-01-01T00:00:00",
"submission_due": null,
"prompt": "ՇєรՇ קг๏๓קՇ",
"criteria": [
{
......@@ -103,8 +107,8 @@
"assessments": [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28",
"due_datetime": "2014-03-01T00:00:00",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
......@@ -136,6 +140,9 @@
],
"title": "Foo",
"prompt": "Test prompt",
"start": "2000-01-01T00:00:00",
"due": "3000-01-01T00:00:00",
"submission_due": null,
"criteria": [
{
"order_num": 0,
......@@ -173,8 +180,8 @@
"assessments": [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28",
"due_datetime": "2014-03-01T00:00:00",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
......@@ -201,6 +208,9 @@
],
"title": "Foo",
"prompt": "Test prompt",
"start": "2000-01-01T00:00:00",
"due": "3000-01-01T00:00:00",
"submission_due": null,
"criteria": [
{
"order_num": 0,
......@@ -225,12 +235,66 @@
"assessments": [
{
"name": "peer-assessment",
"start_datetime": null,
"due_datetime": null,
"start": null,
"due": null,
"must_grade": 5,
"must_be_graded_by": 3
}
]
}
},
"submission_due": {
"xml": [
"<openassessment submission_due=\"2014-01-01T00:00:00\">",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
],
"title": "Foo",
"prompt": "Test prompt",
"start": "2000-01-01T00:00:00",
"due": "3000-01-01T00:00:00",
"submission_due": "2014-01-01T00:00:00",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start": null,
"due": null,
"must_grade": 5,
"must_be_graded_by": 3
}
]
}
}
......@@ -164,6 +164,25 @@
]
},
"invalid_submission_due_date": {
"xml": [
"<openassessment submission_due=\"non-date\">",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-03-01T00:00:00\" must_grade=\"2\" must_be_graded_by=\"5\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"5\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
},
"missing_rubric_prompt": {
"xml": [
"<openassessment>",
......
<openassessment>
<title>Foo</title>
<assessments>
<assessment name="peer-assessment" start="2014-02-27T09:46:28" due="2014-03-01T00:00:00" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" start="2014-04-01T00:00:00" due="2014-06-01T00:00:00" />
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" />
</assessments>
<rubric>
<prompt>Test prompt</prompt>
......
{
"overlap": {
"xblock_start": 0,
"submission_start": 1,
"submission_due": 3,
"peer_start": 2,
"peer_due": 4,
"self_start": 3,
"self_due":5,
"xblock_due": 10
},
"adjacent": {
"xblock_start": 0,
"submission_start": 0,
"submission_due": 1,
"peer_start": 1,
"peer_due": 2,
"self_start": 2,
"self_due":3,
"xblock_due": 3
},
"distinct": {
"xblock_start": 0,
"submission_start": 1,
"submission_due": 2,
"peer_start": 3,
"peer_due": 4,
"self_start": 5,
"self_due": 6,
"xblock_due": 10
},
"starts_equal": {
"xblock_start": 0,
"submission_start": 0,
"submission_due": 2,
"peer_start": 0,
"peer_due": 3,
"self_start": 0,
"self_due": 4,
"xblock_due": 5
},
"dues_equal": {
"xblock_start": 0,
"xblock_due": 10,
"submission_start": 1,
"submission_due": 10,
"peer_start": 2,
"peer_due": 10,
"self_start": 3,
"self_due": 10
},
"starts_and_dues_equal": {
"xblock_start": 0,
"xblock_due": 10,
"submission_start": 0,
"submission_due": 10,
"peer_start": 0,
"peer_due": 10,
"self_start": 0,
"self_due": 10
},
"xblock_start_none": {
"xblock_start": null,
"xblock_due": 10,
"submission_start": 0,
"submission_due": 10,
"peer_start": 0,
"peer_due": 10,
"self_start": 0,
"self_due": 10
},
"xblock_due_none": {
"xblock_start": 0,
"xblock_due": null,
"submission_start": 0,
"submission_due": 10,
"peer_start": 0,
"peer_due": 10,
"self_start": 0,
"self_due": 10
},
"xblock_start_and_due_none": {
"xblock_start": null,
"xblock_due": null,
"submission_start": 0,
"submission_due": 10,
"peer_start": 0,
"peer_due": 10,
"self_start": 0,
"self_due": 10
},
"all_start_and_due_none": {
"xblock_start": null,
"xblock_due": null,
"submission_start": null,
"submission_due": null,
"peer_start": null,
"peer_due": null,
"self_start": null,
"self_due": null
}
}
......@@ -2,10 +2,12 @@
Tests the Open Assessment XBlock functionality.
"""
import json
import datetime
import datetime as dt
import pytz
from mock import patch
from mock import Mock, patch
from openassessment.xblock import openassessmentblock
from openassessment.xblock.submission_mixin import SubmissionMixin
from submissions import api as sub_api
from submissions.api import SubmissionRequestError, SubmissionInternalError
......@@ -87,29 +89,182 @@ class TestOpenAssessment(XBlockHandlerTestCase):
self.assertIsNotNone(grade_response)
self.assertTrue(grade_response.body.find("openassessment__grade"))
class TestDates(XBlockHandlerTestCase):
@scenario('data/basic_scenario.xml')
def test_start_end_date_checks(self, xblock):
xblock.start = dt.datetime(2014, 3, 1).replace(tzinfo=pytz.utc).isoformat()
xblock.due = dt.datetime(2014, 3, 5).replace(tzinfo=pytz.utc).isoformat()
self.assert_is_open(
xblock,
dt.datetime(2014, 2, 28, 23, 59, 59),
None, False, "start"
)
self.assert_is_open(
xblock,
dt.datetime(2014, 3, 1, 1, 1, 1),
None, True, None
)
self.assert_is_open(
xblock,
dt.datetime(2014, 3, 4, 23, 59, 59),
None, True, None
)
self.assert_is_open(
xblock,
dt.datetime(2014, 3, 5, 1, 1, 1),
None, False, "due"
)
@scenario('data/dates_scenario.xml')
def test_submission_dates(self, xblock):
# Scenario defines submission due at 2014-04-01
xblock.start = dt.datetime(2014, 3, 1).replace(tzinfo=pytz.utc).isoformat()
xblock.due = None
self.assert_is_open(
xblock,
dt.datetime(2014, 2, 28, 23, 59, 59).replace(tzinfo=pytz.utc),
"submission", False, "start"
)
self.assert_is_open(
xblock,
dt.datetime(2014, 3, 1, 1, 1, 1).replace(tzinfo=pytz.utc),
"submission", True, None
)
self.assert_is_open(
xblock,
dt.datetime(2014, 3, 31, 23, 59, 59).replace(tzinfo=pytz.utc),
"submission", True, None
)
self.assert_is_open(
xblock,
dt.datetime(2014, 4, 1, 1, 1, 1, 1).replace(tzinfo=pytz.utc),
"submission", False, "due"
)
@scenario('data/dates_scenario.xml')
def test_peer_assessment_dates(self, xblock):
# Scenario defines peer assessment open from 2015-01-02 to 2015-04-01
xblock.start = None
xblock.due = None
self.assert_is_open(
xblock,
dt.datetime(2015, 1, 1, 23, 59, 59).replace(tzinfo=pytz.utc),
"peer-assessment", False, "start"
)
self.assert_is_open(
xblock,
dt.datetime(2015, 1, 2, 1, 1, 1).replace(tzinfo=pytz.utc),
"peer-assessment", True, None
)
self.assert_is_open(
xblock,
dt.datetime(2015, 3, 31, 23, 59, 59).replace(tzinfo=pytz.utc),
"peer-assessment", True, None
)
self.assert_is_open(
xblock,
dt.datetime(2015, 4, 1, 1, 1, 1, 1).replace(tzinfo=pytz.utc),
"peer-assessment", False, "due"
)
@scenario('data/dates_scenario.xml')
def test_self_assessment_dates(self, xblock):
# Scenario defines peer assessment open from 2016-01-02 to 2016-04-01
xblock.start = None
xblock.due = None
self.assert_is_open(
xblock,
dt.datetime(2016, 1, 1, 23, 59, 59).replace(tzinfo=pytz.utc),
"self-assessment", False, "start"
)
self.assert_is_open(
xblock,
dt.datetime(2016, 1, 2, 1, 1, 1).replace(tzinfo=pytz.utc),
"self-assessment", True, None
)
self.assert_is_open(
xblock,
dt.datetime(2016, 3, 31, 23, 59, 59).replace(tzinfo=pytz.utc),
"self-assessment", True, None
)
self.assert_is_open(
xblock,
dt.datetime(2016, 4, 1, 1, 1, 1, 1).replace(tzinfo=pytz.utc),
"self-assessment", False, "due"
)
@scenario('data/resolve_dates_scenario.xml')
def test_resolve_dates(self, xblock):
# Peer-assessment does not have dates specified, so it should resolve
# to the previous start (problem start time)
# and following due date (self-assessment, at 2016-05-02)
xblock.start = dt.datetime(2014, 3, 1).replace(tzinfo=pytz.utc).isoformat()
xblock.due = None
self.assert_is_open(
xblock,
dt.datetime(2014, 2, 28, 23, 59, 59).replace(tzinfo=pytz.utc),
"peer-assessment", False, "start"
)
self.assert_is_open(
xblock,
dt.datetime(2014, 3, 1, 1, 1, 1).replace(tzinfo=pytz.utc),
"peer-assessment", True, None
)
self.assert_is_open(
xblock,
dt.datetime(2016, 5, 1, 23, 59, 59).replace(tzinfo=pytz.utc),
"peer-assessment", True, None
)
self.assert_is_open(
xblock,
dt.datetime(2016, 5, 2, 1, 1, 1).replace(tzinfo=pytz.utc),
"peer-assessment", False, "due"
)
def assert_is_open(self, xblock, now, step, expected_is_open, expected_reason):
"""
Check if the start and end date checks work appropriately.
Assert whether the XBlock step is open/closed.
Args:
xblock (OpenAssessmentBlock): The xblock under test.
now (datetime): Time to patch for the xblock's call to datetime.now()
step (str): The step in the workflow (e.g. "submission", "self-assessment")
expected_is_open (bool): Do we expect the step to be open or closed?
expecetd_reason (str): Either "start", "due", or None.
Raises:
AssertionError
"""
now = datetime.datetime.utcnow()
past = now - datetime.timedelta(minutes = 10)
future = now + datetime.timedelta(minutes = 10)
way_future = now + datetime.timedelta(minutes = 20)
xblock.start_datetime = past.isoformat()
xblock.due_datetime = past.isoformat()
problem_open, reason = xblock.is_open()
self.assertFalse(problem_open)
self.assertEqual("due", reason)
xblock.start_datetime = past.isoformat()
xblock.due_datetime = future.isoformat()
problem_open, reason = xblock.is_open()
self.assertTrue(problem_open)
self.assertEqual(None, reason)
xblock.start_datetime = future.isoformat()
xblock.due_datetime = way_future.isoformat()
problem_open, reason = xblock.is_open()
self.assertFalse(problem_open)
self.assertEqual("start", reason)
# Need some non-conventional setup to patch datetime because it's a C module.
# http://nedbatchelder.com/blog/201209/mocking_datetimetoday.html
# Thanks Ned!
datetime_patcher = patch.object(openassessmentblock, 'dt', Mock(wraps=dt))
mocked_datetime = datetime_patcher.start()
self.addCleanup(datetime_patcher.stop)
mocked_datetime.datetime.now.return_value = now
is_open, reason = xblock.is_open(step=step)
self.assertEqual(is_open, expected_is_open)
self.assertEqual(reason, expected_reason)
\ No newline at end of file
"""
Test resolving unspecified dates and date strings to datetimes.
"""
import datetime
import pytz
from django.test import TestCase
import ddt
from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DISTANT_FUTURE
@ddt.ddt
class ResolveDatesTest(TestCase):
def setUp(self):
# Construct a dictionary of datetimes for our test data to index
self.DATES = {
(day - 1): datetime.datetime(2014, 1, day).replace(tzinfo=pytz.UTC)
for day in range(1, 15)
}
self.DATES[-1] = DISTANT_PAST
self.DATES[99] = DISTANT_FUTURE
# Construct a dictionary of ISO-formatted date strings for our test data to index
self.DATE_STRINGS = {key: val.isoformat() for key, val in self.DATES.iteritems()}
self.DATE_STRINGS[None] = None
@ddt.file_data('data/resolve_dates.json')
def test_resolve_dates(self, data):
# Test data provides indices into our date dictionaries
resolved_start, resolved_end, resolved_ranges = resolve_dates(
self.DATE_STRINGS[data['start']],
self.DATE_STRINGS[data['end']],
[
(self.DATE_STRINGS[start], self.DATE_STRINGS[end])
for start, end in tuple(data['date_ranges'])
]
)
self.assertEqual(resolved_start, self.DATES[data['resolved_start']])
self.assertEqual(resolved_end, self.DATES[data['resolved_end']])
self.assertEqual(
resolved_ranges,
[
(self.DATES[start], self.DATES[end])
for start, end in tuple(data['resolved_ranges'])
]
)
\ No newline at end of file
......@@ -65,6 +65,23 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertFalse(resp['success'])
self.assertIn('xml', resp['msg'].lower())
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid_date_format(self, xblock):
request = json.dumps({'xml': self.load_fixture_str('data/invalid_dates.xml')})
resp = self.request(xblock, 'update_xml', request, response_format='json')
self.assertFalse(resp['success'])
self.assertIn("cannot be later than", resp['msg'].lower())
# Test that we enforce that there are exactly two assessments,
# peer ==> self
# If and when we remove this restriction, this test can be deleted.
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid_assessment_combo(self, xblock):
request = json.dumps({'xml': self.load_fixture_str('data/invalid_assessment_combo.xml')})
resp = self.request(xblock, 'update_xml', request, response_format='json')
self.assertFalse(resp['success'])
self.assertIn("must have exactly two assessments", resp['msg'].lower())
@data(('data/invalid_rubric.xml', 'rubric'), ('data/invalid_assessment.xml', 'assessment'))
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid(self, xblock, data):
......
"""
Test OpenAssessment XBlock validation.
"""
from datetime import datetime as dt
import pytz
import ddt
from django.test import TestCase
from openassessment.xblock.validation import validate_assessments, validate_rubric, validate_dates
@ddt.ddt
class AssessmentValidationTest(TestCase):
@ddt.file_data('data/valid_assessments.json')
def test_valid_assessment(self, data):
success, msg = validate_assessments([data['assessment']])
self.assertTrue(success)
self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_assessments.json')
def test_invalid_assessment(self, data):
success, msg = validate_assessments([data['assessment']])
self.assertFalse(success)
self.assertGreater(len(msg), 0)
def test_no_assessments(self):
success, msg = validate_assessments([])
self.assertFalse(success)
self.assertGreater(len(msg), 0)
# Currently, we enforce the restriction that there must be
# exactly two assessments, in the order (a) peer, then (b) self.
# If and when we remove that restriction, this test can be deleted.
@ddt.file_data('data/assessment_combo.json')
def test_enforce_peer_then_self(self, data):
success, msg = validate_assessments(data['assessments'], enforce_peer_then_self=True)
self.assertEqual(success, data['valid'], msg=msg)
if not success:
self.assertGreater(len(msg), 0)
@ddt.ddt
class RubricValidationTest(TestCase):
@ddt.file_data('data/valid_rubrics.json')
def test_valid_assessment(self, data):
success, msg = validate_rubric(data['rubric'])
self.assertTrue(success)
self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_rubrics.json')
def test_invalid_assessment(self, data):
success, msg = validate_rubric(data['rubric'])
self.assertFalse(success)
self.assertGreater(len(msg), 0)
@ddt.ddt
class DateValidationTest(TestCase):
def setUp(self):
self.DATES = {
(day - 1): dt(2014, 1, day).replace(tzinfo=pytz.UTC).isoformat()
for day in range(1, 15)
}
self.DATES[None] = None
@ddt.file_data('data/valid_dates.json')
def test_valid_dates(self, data):
# Input data dict specifies the index for each date
date = lambda key: self.DATES[data[key]]
# This lambda is a convenience to map these dates to (start, due) tuples
date_range = lambda start_key, due_key: (date(start_key), date(due_key))
success, msg = validate_dates(
date('xblock_start'), date('xblock_due'),
[
date_range('submission_start', 'submission_due'),
date_range('peer_start', 'peer_due'),
date_range('self_start', 'self_due'),
]
)
self.assertTrue(success, msg=msg)
self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_dates.json')
def test_invalid_dates(self, data):
# Input data dict specifies the index for each date
date = lambda key: self.DATES[data[key]]
# This lambda is a convenience to map these dates to (start, due) tuples
date_range = lambda start_key, due_key: (date(start_key), date(due_key))
success, msg = validate_dates(
date('xblock_start'), date('xblock_due'),
[
date_range('submission_start', 'submission_due'),
date_range('peer_start', 'peer_due'),
date_range('self_start', 'self_due'),
]
)
self.assertFalse(success)
self.assertGreater(len(msg), 0)
def test_invalid_date_format(self):
valid = dt(2014, 1, 1).replace(tzinfo=pytz.UTC).isoformat()
success, _ = validate_dates("invalid", valid, [(valid, valid)])
self.assertFalse(success)
success, _ = validate_dates(valid, "invalid", [(valid, valid)])
self.assertFalse(success)
success, _ = validate_dates(valid, valid, [("invalid", valid)])
self.assertFalse(success)
success, _ = validate_dates(valid, valid, [(valid, "invalid")])
self.assertFalse(success)
......@@ -8,8 +8,7 @@ from django.test import TestCase
from ddt import ddt, data, file_data, unpack
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock, UI_MODELS
from openassessment.xblock.xml import (
serialize_content, update_from_xml_str,
UpdateFromXmlError, InvalidRubricError, InvalidAssessmentError
serialize_content, update_from_xml_str, ValidationError, UpdateFromXmlError
)
......@@ -37,15 +36,15 @@ class TestSerializeContent(TestCase):
BASIC_ASSESSMENTS = [
{
"name": "peer-assessment",
"start_datetime": "2014-02-27T09:46:28.873926",
"due_datetime": "2014-05-30T00:00:00.92926",
"start": "2014-02-27T09:46:28.873926",
"due": "2014-05-30T00:00:00.92926",
"must_grade": 5,
"must_be_graded_by": 3,
},
{
"name": "self-assessment",
"start_datetime": '2014-04-01T00:00:00.000000',
"due_datetime": "2014-06-01T00:00:00.92926",
"start": '2014-04-01T00:00:00.000000',
"due": "2014-06-01T00:00:00.92926",
"must_grade": 5,
"must_be_graded_by": 3,
}
......@@ -61,6 +60,9 @@ class TestSerializeContent(TestCase):
def test_serialize(self, data):
self.oa_block.title = data['title']
self.oa_block.prompt = data['prompt']
self.oa_block.start = data['start']
self.oa_block.due = data['due']
self.oa_block.submission_due = data['submission_due']
self.oa_block.rubric_criteria = data['criteria']
self.oa_block.rubric_assessments = data['assessments']
xml = serialize_content(self.oa_block)
......@@ -81,7 +83,6 @@ class TestSerializeContent(TestCase):
pretty_expected = etree.tostring(parsed_expected, pretty_print=True, encoding='utf-8')
parsed_expected = etree.fromstring(pretty_expected)
# Walk both trees, comparing elements and attributes
actual_elements = [el for el in parsed_actual.getiterator()]
expected_elements = [el for el in parsed_expected.getiterator()]
......@@ -95,16 +96,23 @@ class TestSerializeContent(TestCase):
self.assertEqual(actual.tag, expected.tag)
self.assertEqual(
actual.text, expected.text,
msg="Incorrect text for {tag}".format(tag=actual.tag)
msg=u"Incorrect text for {tag}. Expected '{expected}' but found '{actual}'".format(
tag=actual.tag, expected=expected.text, actual=actual.text
)
)
self.assertItemsEqual(
actual.items(), expected.items(),
msg="Incorrect attributes for {tag}".format(tag=actual.tag)
msg=u"Incorrect attributes for {tag}. Expected {expected} but found {actual}".format(
tag=actual.tag, expected=expected.items(), actual=actual.items()
)
)
def test_mutated_criteria_dict(self):
self.oa_block.title = "Test title"
self.oa_block.rubric_assessments = self.BASIC_ASSESSMENTS
self.oa_block.start = None
self.oa_block.due = None
self.oa_block.submission_due = None
# We have to be really permissive with the data we'll accept.
# If the data we're retrieving is somehow corrupted,
......@@ -126,6 +134,9 @@ class TestSerializeContent(TestCase):
def test_mutated_assessments_dict(self):
self.oa_block.title = "Test title"
self.oa_block.rubric_criteria = self.BASIC_CRITERIA
self.oa_block.start = None
self.oa_block.due = None
self.oa_block.submission_due = None
for assessment_dict in self.BASIC_ASSESSMENTS:
for mutated_dict in self._dict_mutations(assessment_dict):
......@@ -138,10 +149,13 @@ class TestSerializeContent(TestCase):
msg = "Could not parse mutated assessment dict {assessment}\n{ex}".format(assessment=mutated_dict, ex=ex)
self.fail(msg)
@data("title", "prompt")
@data("title", "prompt", "start", "due", "submission_due")
def test_mutated_field(self, field):
self.oa_block.rubric_criteria = self.BASIC_CRITERIA
self.oa_block.rubric_assessments = self.BASIC_ASSESSMENTS
self.oa_block.start = None
self.oa_block.due = None
self.oa_block.submission_due = None
for mutated_value in [0, u"\u9282", None]:
setattr(self.oa_block, field, mutated_value)
......@@ -246,6 +260,7 @@ class TestSerializeContent(TestCase):
mutated[key] = new_val
return mutated
@ddt
class TestUpdateFromXml(TestCase):
"""
......@@ -263,6 +278,10 @@ class TestUpdateFromXml(TestCase):
self.oa_block.rubric_criteria = dict()
self.oa_block.rubric_assessments = list()
self.oa_block.start = "2000-01-01T00:00:00"
self.oa_block.due = "3000-01-01T00:00:00"
self.oa_block.submission_due = "2000-01-01T00:00:00"
@file_data('data/update_from_xml.json')
def test_update_from_xml(self, data):
......@@ -275,6 +294,9 @@ class TestUpdateFromXml(TestCase):
# Check that the contents of the modified XBlock are correct
self.assertEqual(self.oa_block.title, data['title'])
self.assertEqual(self.oa_block.prompt, data['prompt'])
self.assertEqual(self.oa_block.start, data['start'])
self.assertEqual(self.oa_block.due, data['due'])
self.assertEqual(self.oa_block.submission_due, data['submission_due'])
self.assertEqual(self.oa_block.rubric_criteria, data['criteria'])
self.assertEqual(self.oa_block.rubric_assessments, data['assessments'])
......@@ -284,21 +306,12 @@ class TestUpdateFromXml(TestCase):
update_from_xml_str(self.oa_block, "".join(data['xml']))
@file_data('data/update_from_xml.json')
def test_invalid_rubric(self, data):
def test_invalid(self, data):
# Plug in a rubric validator that always reports that the rubric dict is invalid.
# We need to back this up with an integration test that checks whether the XBlock
# provides an appropriate rubric validator.
with self.assertRaises(InvalidRubricError):
update_from_xml_str(
self.oa_block, "".join(data['xml']),
rubric_validator=lambda _: (False, '')
)
@file_data('data/update_from_xml.json')
def test_invalid_assessment(self, data):
# Plug in an assessment validator that always reports that the assessment dict is invalid.
with self.assertRaises(InvalidAssessmentError):
with self.assertRaises(ValidationError):
update_from_xml_str(
self.oa_block, "".join(data['xml']),
assessment_validator=lambda _: (False, '')
validator=lambda *args: (False, '')
)
"""
"""
from django.utils.translation import ugettext as _
from openassessment.assessment.serializers import rubric_from_dict, InvalidRubric
from openassessment.xblock.resolve_dates import resolve_dates, DateValidationError, InvalidDateFormat
def validate_assessments(assessments, enforce_peer_then_self=False):
"""
Check that the assessment dict is semantically valid.
Args:
assessments (list of dict): list of serialized assessment models.
Kwargs:
enforce_peer_then_self (bool): If True, enforce the requirement that there
must be exactly two assessments: first, a peer-assessment, then a self-assessment.
Returns:
tuple (is_valid, msg) where
is_valid is a boolean indicating whether the assessment is semantically valid
and msg describes any validation errors found.
"""
if enforce_peer_then_self:
if len(assessments) != 2:
return (False, _("Problem must have exactly two assessments"))
if assessments[0].get('name') != 'peer-assessment':
return (False, _("The first assessment must be a peer-assessment"))
if assessments[1].get('name') != 'self-assessment':
return (False, _("The second assessment must be a self-assessment"))
if len(assessments) == 0:
return (False, _("Problem must include at least one assessment"))
for assessment_dict in assessments:
# Supported assessment
if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']:
return (False, _("Assessment type is not supported"))
# Number you need to grade is >= the number of people that need to grade you
if assessment_dict.get('name') == 'peer-assessment':
must_grade = assessment_dict.get('must_grade')
must_be_graded_by = assessment_dict.get('must_be_graded_by')
if must_grade is None or must_grade < 1:
return (False, _('"must_grade" must be a positive integer'))
if must_be_graded_by is None or must_be_graded_by < 1:
return (False, _('"must_be_graded_by" must be a positive integer'))
if must_grade < must_be_graded_by:
return (False, _('"must_grade" should be greater than or equal to "must_be_graded_by"'))
return (True, u'')
def validate_rubric(rubric_dict):
"""
Check that the rubric is semantically valid.
Args:
rubric_dict (dict): Serialized Rubric model
Returns:
tuple (is_valid, msg) where
is_valid is a boolean indicating whether the assessment is semantically valid
and msg describes any validation errors found.
"""
try:
rubric_from_dict(rubric_dict)
except InvalidRubric:
return (False, u'Rubric definition is not valid')
else:
return (True, u'')
def validate_dates(start, end, date_ranges):
"""
Check that start and due dates are valid.
Args:
start (str): ISO-formatted date string indicating when the problem opens.
end (str): ISO-formatted date string indicating when the problem closes.
date_ranges (list of tuples): List of (start, end) pair for each submission / assessment.
Returns:
tuple (is_valid, msg) where
is_valid is a boolean indicating whether the assessment is semantically valid
and msg describes any validation errors found.
"""
try:
resolve_dates(start, end, date_ranges)
except (DateValidationError, InvalidDateFormat) as ex:
return (False, ex.message)
else:
return (True, u'')
def validator(start, due):
"""
Return a validator function configured with the problem's start and end dates.
This will validate assessments, rubrics, and dates.
Args:
start (str): ISO-formatted date string indicating when the problem opens.
end (str): ISO-formatted date string indicating when the problem closes.
Returns:
callable, of a form that can be passed to `update_from_xml`.
"""
def _inner(rubric_dict, submission_dict, assessments):
success, msg = validate_assessments(assessments, enforce_peer_then_self=True)
if not success:
return (False, msg)
success, msg = validate_rubric(rubric_dict)
if not success:
return (False, msg)
submission_dates = [(start, submission_dict['due'])]
assessment_dates = [(asmnt['start'], asmnt['due']) for asmnt in assessments]
success, msg = validate_dates(start, due, submission_dates + assessment_dates)
if not success:
return (False, msg)
return (True, u'')
return _inner
\ No newline at end of file
......@@ -32,7 +32,6 @@ ANSWER_TWO = u"this is my other answer!"
@ddt
class TestSubmissionsApi(TestCase):
"""
Testing Submissions
"""
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment