Commit 468cf9db by Stephen Sanchez

Code Review changes

parent 4f428554
...@@ -223,6 +223,11 @@ def get_assessment_median_scores(submission_id, must_be_graded_by): ...@@ -223,6 +223,11 @@ def get_assessment_median_scores(submission_id, must_be_graded_by):
values, the average of those two values is returned, rounded up to the values, the average of those two values is returned, rounded up to the
greatest integer value. greatest integer value.
If OverGrading occurs, the 'must_be_graded_by' parameter is the number of
assessments we want to use to calculate the median values. If this limit is
less than the total number of assessments available, the earliest
assessments are used.
Args: Args:
submission_id (str): The submission uuid to get all rubric criterion submission_id (str): The submission uuid to get all rubric criterion
median scores. median scores.
...@@ -241,7 +246,9 @@ def get_assessment_median_scores(submission_id, must_be_graded_by): ...@@ -241,7 +246,9 @@ def get_assessment_median_scores(submission_id, must_be_graded_by):
# found in an assessment. # found in an assessment.
try: try:
submission = Submission.objects.get(uuid=submission_id) submission = Submission.objects.get(uuid=submission_id)
assessments = Assessment.objects.filter(submission=submission)[:must_be_graded_by] assessments = Assessment.objects.filter(
submission=submission
).order_by("scored_at")[:must_be_graded_by]
except DatabaseError: except DatabaseError:
error_message = ( error_message = (
u"Error getting assessment median scores {}".format(submission_id) u"Error getting assessment median scores {}".format(submission_id)
...@@ -249,10 +256,14 @@ def get_assessment_median_scores(submission_id, must_be_graded_by): ...@@ -249,10 +256,14 @@ def get_assessment_median_scores(submission_id, must_be_graded_by):
logger.exception(error_message) logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
# Iterate over every part of every assessment. Each part is associated with
# a criterion name, which becomes a key in the score dictionary, with a list
# of scores. These collected lists of scores are used to find a median value
# per criterion.
scores = {} scores = {}
median_scores = {} median_scores = {}
for assessment in assessments: for assessment in assessments:
for part in AssessmentPart.objects.filter(assessment=assessment): for part in assessment.parts.all():
criterion_name = part.option.criterion.name criterion_name = part.option.criterion.name
if criterion_name not in scores: if criterion_name not in scores:
scores[criterion_name] = [] scores[criterion_name] = []
......
...@@ -10,6 +10,7 @@ from openassessment.peer.models import ( ...@@ -10,6 +10,7 @@ from openassessment.peer.models import (
Assessment, AssessmentPart, Criterion, CriterionOption, Rubric Assessment, AssessmentPart, Criterion, CriterionOption, Rubric
) )
class InvalidRubric(Exception): class InvalidRubric(Exception):
"""This can be raised during the deserialization process.""" """This can be raised during the deserialization process."""
def __init__(self, errors): def __init__(self, errors):
...@@ -146,6 +147,40 @@ def get_assessment_review(submission): ...@@ -146,6 +147,40 @@ def get_assessment_review(submission):
(list): A list of assessment reviews, combining assessments with (list): A list of assessment reviews, combining assessments with
rubrics and assessment parts, to allow a cohesive object for rubrics and assessment parts, to allow a cohesive object for
rendering the complete peer grading workflow. rendering the complete peer grading workflow.
Examples:
>>> get_assessment_review(submission)
{
'submission': 1,
'rubric': {
'id': 1,
'content_hash': u'45cc932c4da12a1c2b929018cd6f0785c1f8bc07',
'criteria': [{
'order_num': 0,
'name': u'Spelling',
'prompt': u'Did the student have spelling errors?',
'options': [{
'order_num': 0,
'points': 2,
'name': u'No spelling errors',
'explanation': u'No spelling errors were found in this submission.',
}]
}]
},
'scored_at': datetime.datetime(2014, 2, 25, 19, 50, 7, 290464, tzinfo=<UTC>),
'scorer_id': u'Bob',
'score_type': u'PE',
'parts': [{
'option': {
'order_num': 0,
'points': 2,
'name': u'No spelling errors',
'explanation': u'No spelling errors were found in this submission.'}
}],
'submission_uuid': u'0a600160-be7f-429d-a853-1283d49205e7',
'points_earned': 9,
'points_possible': 20,
}
""" """
reviews = [] reviews = []
assessments = Assessment.objects.filter(submission=submission) assessments = Assessment.objects.filter(submission=submission)
...@@ -154,7 +189,7 @@ def get_assessment_review(submission): ...@@ -154,7 +189,7 @@ def get_assessment_review(submission):
rubric_dict = RubricSerializer(assessment.rubric).data rubric_dict = RubricSerializer(assessment.rubric).data
assessment_dict["rubric"] = rubric_dict assessment_dict["rubric"] = rubric_dict
parts = [] parts = []
for part in AssessmentPart.objects.filter(assessment=assessment): for part in assessment.parts.all():
part_dict = AssessmentPartSerializer(part).data part_dict = AssessmentPartSerializer(part).data
options_dict = CriterionOptionSerializer(part.option).data options_dict = CriterionOptionSerializer(part.option).data
criterion_dict = CriterionSerializer(part.option.criterion).data criterion_dict = CriterionSerializer(part.option.criterion).data
......
...@@ -20,7 +20,9 @@ ...@@ -20,7 +20,9 @@
<!--header class="step__header ui-toggle-visibility__control"--> <!--header class="step__header ui-toggle-visibility__control"-->
<h2 class="step__title"> <h2 class="step__title">
<span class="step__label">Your Response</span> <span class="step__label">Your Response</span>
{% if formatted_due_datetime %}
<span class="step__deadline">due <span class="date">{{ formatted_due_datetime }}</span></span> <span class="step__deadline">due <span class="date">{{ formatted_due_datetime }}</span></span>
{% endif %}
</h2> </h2>
<span class="step__status"> <span class="step__status">
......
...@@ -155,6 +155,9 @@ DEFAULT_ASSESSMENT_MODULES = [ ...@@ -155,6 +155,9 @@ DEFAULT_ASSESSMENT_MODULES = [
DEFAULT_PEER_ASSESSMENT, DEFAULT_PEER_ASSESSMENT,
] ]
# Used to parse datetime strings from the XML configuration.
TIME_PARSE_FORMAT = "%Y-%m-%dT%H:%M:%S"
def load(path): def load(path):
"""Handy helper for getting resources from our kit.""" """Handy helper for getting resources from our kit."""
...@@ -378,14 +381,16 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse ...@@ -378,14 +381,16 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
if not context_dict: if not context_dict:
context_dict = {} context_dict = {}
start = datetime.datetime.strptime(self.start_datetime, "%Y-%m-%dT%H:%M:%S")
due = datetime.datetime.strptime(self.due_datetime, "%Y-%m-%dT%H:%M:%S")
context_dict["xblock_trace"] = self.get_xblock_trace() context_dict["xblock_trace"] = self.get_xblock_trace()
context_dict["formatted_start_date"] = start.strftime("%A, %B %d, %Y")
context_dict["formatted_start_datetime"] = start.strftime("%A, %B %d, %Y %X") if self.start_datetime:
context_dict["formatted_due_date"] = due.strftime("%A, %B %d, %Y") start = datetime.datetime.strptime(self.start_datetime, TIME_PARSE_FORMAT)
context_dict["formatted_due_datetime"] = due.strftime("%A, %B %d, %Y %X") context_dict["formatted_start_date"] = start.strftime("%A, %B %d, %Y")
context_dict["formatted_start_datetime"] = start.strftime("%A, %B %d, %Y %X")
if self.due_datetime:
due = datetime.datetime.strptime(self.due_datetime, TIME_PARSE_FORMAT)
context_dict["formatted_due_date"] = due.strftime("%A, %B %d, %Y")
context_dict["formatted_due_datetime"] = due.strftime("%A, %B %d, %Y %X")
template = get_template(path) template = get_template(path)
context = Context(context_dict) context = Context(context_dict)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment