Commit e58391d8 by Stephen Sanchez

Merge pull request #48 from edx/sanchez/TIM-171-Submission-UI

Sanchez/tim 171 submission ui
parents 80a39cd1 50d04441
...@@ -6,14 +6,12 @@ the workflow for a given submission. ...@@ -6,14 +6,12 @@ the workflow for a given submission.
""" """
import copy import copy
import logging import logging
import math
from django.db import DatabaseError from django.db import DatabaseError
from openassessment.peer.models import Assessment from openassessment.peer.models import Assessment
from openassessment.peer.serializers import ( from openassessment.peer.serializers import (
AssessmentSerializer, RubricSerializer, rubric_from_dict AssessmentSerializer, rubric_from_dict, get_assessment_review)
)
from submissions import api as submission_api from submissions import api as submission_api
from submissions.models import Submission, StudentItem, Score from submissions.models import Submission, StudentItem, Score
from submissions.serializers import SubmissionSerializer, StudentItemSerializer from submissions.serializers import SubmissionSerializer, StudentItemSerializer
...@@ -69,8 +67,8 @@ class PeerAssessmentInternalError(PeerAssessmentError): ...@@ -69,8 +67,8 @@ class PeerAssessmentInternalError(PeerAssessmentError):
def create_assessment( def create_assessment(
submission_uuid, submission_uuid,
scorer_id, scorer_id,
required_assessments_for_student, must_grade,
required_assessments_for_submission, must_be_graded_by,
assessment_dict, assessment_dict,
rubric_dict, rubric_dict,
scored_at=None): scored_at=None):
...@@ -85,9 +83,9 @@ def create_assessment( ...@@ -85,9 +83,9 @@ def create_assessment(
Submission model. Submission model.
scorer_id (str): The user ID for the user giving this assessment. This scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission. is required to create an assessment on a submission.
required_assessments_for_student (int): The number of assessments must_grade (int): The number of assessments
required for the student to receive a score for their submission. required for the student to receive a score for their submission.
required_assessments_for_submission (int): The number of assessments must_be_graded_by (int): The number of assessments
required on the submission for it to be scored. required on the submission for it to be scored.
assessment_dict (dict): All related information for the assessment. An assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback. assessment contains points_earned, points_possible, and feedback.
...@@ -132,8 +130,6 @@ def create_assessment( ...@@ -132,8 +130,6 @@ def create_assessment(
"rubric": rubric.id, "rubric": rubric.id,
"scorer_id": scorer_id, "scorer_id": scorer_id,
"submission": submission.pk, "submission": submission.pk,
#"points_earned": sum(assessment_dict["points_earned"]),
#"points_possible": assessment_dict["points_possible"],
"score_type": PEER_TYPE, "score_type": PEER_TYPE,
"parts": [{"option": option_id} for option_id in option_ids] "parts": [{"option": option_id} for option_id in option_ids]
} }
...@@ -151,8 +147,8 @@ def create_assessment( ...@@ -151,8 +147,8 @@ def create_assessment(
_score_if_finished( _score_if_finished(
student_item, student_item,
submission, submission,
required_assessments_for_student, must_grade,
required_assessments_for_submission must_be_graded_by
) )
# Check if the grader is finished and has enough assessments # Check if the grader is finished and has enough assessments
...@@ -170,8 +166,8 @@ def create_assessment( ...@@ -170,8 +166,8 @@ def create_assessment(
_score_if_finished( _score_if_finished(
scorer_item, scorer_item,
scorer_submissions[0], scorer_submissions[0],
required_assessments_for_student, must_grade,
required_assessments_for_submission must_be_graded_by
) )
return peer_serializer.data return peer_serializer.data
...@@ -188,7 +184,7 @@ def create_assessment( ...@@ -188,7 +184,7 @@ def create_assessment(
def _score_if_finished(student_item, def _score_if_finished(student_item,
submission, submission,
required_assessments_for_student, required_assessments_for_student,
required_assessments_for_submission): must_be_graded_by):
"""Calculate final grade iff peer evaluation flow is satisfied. """Calculate final grade iff peer evaluation flow is satisfied.
Checks if the student is finished with the peer assessment workflow. If the Checks if the student is finished with the peer assessment workflow. If the
...@@ -204,36 +200,59 @@ def _score_if_finished(student_item, ...@@ -204,36 +200,59 @@ def _score_if_finished(student_item,
required_assessments_for_student required_assessments_for_student
) )
assessments = Assessment.objects.filter(submission=submission) assessments = Assessment.objects.filter(submission=submission)
submission_finished = assessments.count() >= required_assessments_for_submission submission_finished = assessments.count() >= must_be_graded_by
scores = []
for assessment in assessments:
scores.append(assessment.points_earned)
if finished_evaluating and submission_finished: if finished_evaluating and submission_finished:
submission_api.set_score( submission_api.set_score(
StudentItemSerializer(student_item).data, StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data, SubmissionSerializer(submission).data,
_calculate_final_score(scores), sum(get_assessment_median_scores(submission.uuid, must_be_graded_by).values()),
assessments[0].points_possible assessments[0].points_possible
) )
def _calculate_final_score(scores): def get_assessment_median_scores(submission_id, must_be_graded_by):
"""Final grade is calculated using integer values, rounding up. """Get the median score for each rubric criterion
For a given assessment, collect the median score for each criterion on the
rubric. This set can be used to determine the overall score, as well as each
part of the individual rubric scores.
If there is a true median score, it is returned. If there are two median If there is a true median score, it is returned. If there are two median
values, the average of those two values is returned, rounded up to the values, the average of those two values is returned, rounded up to the
greatest integer value. greatest integer value.
If OverGrading occurs, the 'must_be_graded_by' parameter is the number of
assessments we want to use to calculate the median values. If this limit is
less than the total number of assessments available, the earliest
assessments are used.
Args:
submission_id (str): The submission uuid to get all rubric criterion
median scores.
must_be_graded_by (int): The number of assessments to include in this
score analysis.
Returns:
(dict): A dictionary of rubric criterion names, with a median score of
the peer assessments.
Raises:
PeerAssessmentInternalError: If any error occurs while retrieving
information to form the median scores, an error is raised.
""" """
total_scores = len(scores) # Create a key value in a dict with a list of values, for every criterion
scores = sorted(scores) # found in an assessment.
median = int(math.ceil(total_scores / float(2))) try:
if total_scores == 0: submission = Submission.objects.get(uuid=submission_id)
return 0 scores = Assessment.scores_by_criterion(submission, must_be_graded_by)
elif total_scores % 2: return Assessment.get_median_score_dict(scores)
return scores[median-1] except DatabaseError:
else: error_message = (
return int(math.ceil(sum(scores[median-1:median+1])/float(2))) u"Error getting assessment median scores {}".format(submission_id)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def has_finished_required_evaluating(student_id, required_assessments): def has_finished_required_evaluating(student_id, required_assessments):
...@@ -316,9 +335,7 @@ def get_assessments(submission_id): ...@@ -316,9 +335,7 @@ def get_assessments(submission_id):
""" """
try: try:
submission = Submission.objects.get(uuid=submission_id) submission = Submission.objects.get(uuid=submission_id)
assessments = Assessment.objects.filter(submission=submission) return get_assessment_review(submission)
serializer = AssessmentSerializer(assessments, many=True)
return serializer.data
except DatabaseError: except DatabaseError:
error_message = ( error_message = (
u"Error getting assessments for submission {}".format(submission_id) u"Error getting assessments for submission {}".format(submission_id)
......
...@@ -4,12 +4,14 @@ These models have to capture not only the state of assessments made for certain ...@@ -4,12 +4,14 @@ These models have to capture not only the state of assessments made for certain
submissions, but also the state of the rubrics at the time those assessments submissions, but also the state of the rubrics at the time those assessments
were made. were made.
""" """
from collections import defaultdict
from copy import deepcopy from copy import deepcopy
from hashlib import sha1 from hashlib import sha1
import json import json
from django.db import models from django.db import models
from django.utils.timezone import now from django.utils.timezone import now
import math
from submissions.models import Submission from submissions.models import Submission
...@@ -206,6 +208,104 @@ class Assessment(models.Model): ...@@ -206,6 +208,104 @@ class Assessment(models.Model):
def __unicode__(self): def __unicode__(self):
return u"Assessment {}".format(self.id) return u"Assessment {}".format(self.id)
@classmethod
def get_median_score_dict(cls, scores_dict):
"""Determine the median score in a dictionary of lists of scores
For a dictionary of lists, where each list contains a set of scores,
determine the median value in each list.
Args:
scores_dict (dict): A dictionary of lists of int values. These int
values are reduced to a single value that represents the median.
Returns:
(dict): A dictionary with criterion name keys and median score
values.
Examples:
>>> scores = {
>>> "foo": [1, 2, 3, 4, 5],
>>> "bar": [6, 7, 8, 9, 10]
>>> }
>>> Assessment.get_median_score_dict(scores)
{"foo": 3, "bar": 8}
"""
median_scores = {}
for criterion, criterion_scores in scores_dict.iteritems():
criterion_score = Assessment.get_median_score(criterion_scores)
median_scores[criterion] = criterion_score
return median_scores
@staticmethod
def get_median_score(scores):
"""Determine the median score in a list of scores
Determine the median value in the list.
Args:
scores (list): A list of int values. These int values
are reduced to a single value that represents the median.
Returns:
(int): The median score.
Examples:
>>> scores = 1, 2, 3, 4, 5]
>>> Assessment.get_median_score(scores)
3
"""
total_criterion_scores = len(scores)
sorted_scores = sorted(scores)
median = int(math.ceil(total_criterion_scores / float(2)))
if total_criterion_scores == 0:
median_score = 0
elif total_criterion_scores % 2:
median_score = sorted_scores[median-1]
else:
median_score = int(
math.ceil(
sum(sorted_scores[median-1:median+1])/float(2)
)
)
return median_score
@classmethod
def scores_by_criterion(cls, submission, must_be_graded_by):
"""Create a dictionary of lists for scores associated with criterion
Create a key value in a dict with a list of values, for every criterion
found in an assessment.
Iterate over every part of every assessment. Each part is associated with
a criterion name, which becomes a key in the score dictionary, with a list
of scores.
Args:
submission (Submission): Obtain assessments associated with this
submission
must_be_graded_by (int): The number of assessments to include in
this score analysis.
Examples:
>>> Assessment.scores_by_criterion(submission, 3)
{
"foo": [1, 2, 3],
"bar": [6, 7, 8]
}
"""
assessments = cls.objects.filter(
submission=submission).order_by("scored_at")[:must_be_graded_by]
scores = defaultdict(list)
for assessment in assessments:
for part in assessment.parts.all():
criterion_name = part.option.criterion.name
scores[criterion_name].append(part.option.points)
return scores
class AssessmentPart(models.Model): class AssessmentPart(models.Model):
"""Part of an Assessment corresponding to a particular Criterion. """Part of an Assessment corresponding to a particular Criterion.
......
...@@ -4,14 +4,13 @@ Serializers are created to ensure models do not have to be accessed outside the ...@@ -4,14 +4,13 @@ Serializers are created to ensure models do not have to be accessed outside the
scope of the Tim APIs. scope of the Tim APIs.
""" """
from copy import deepcopy from copy import deepcopy
from hashlib import sha1
import json
from rest_framework import serializers from rest_framework import serializers
from openassessment.peer.models import ( from openassessment.peer.models import (
Assessment, AssessmentPart, Criterion, CriterionOption, Rubric Assessment, AssessmentPart, Criterion, CriterionOption, Rubric
) )
class InvalidRubric(Exception): class InvalidRubric(Exception):
"""This can be raised during the deserialization process.""" """This can be raised during the deserialization process."""
def __init__(self, errors): def __init__(self, errors):
...@@ -71,7 +70,6 @@ class CriterionSerializer(NestedModelSerializer): ...@@ -71,7 +70,6 @@ class CriterionSerializer(NestedModelSerializer):
model = Criterion model = Criterion
fields = ('order_num', 'name', 'prompt', 'options') fields = ('order_num', 'name', 'prompt', 'options')
def validate_options(self, attrs, source): def validate_options(self, attrs, source):
"""Make sure we have at least one CriterionOption in a Criterion.""" """Make sure we have at least one CriterionOption in a Criterion."""
options = attrs[source] options = attrs[source]
...@@ -91,7 +89,6 @@ class RubricSerializer(NestedModelSerializer): ...@@ -91,7 +89,6 @@ class RubricSerializer(NestedModelSerializer):
model = Rubric model = Rubric
fields = ('id', 'content_hash', 'criteria', 'points_possible') fields = ('id', 'content_hash', 'criteria', 'points_possible')
def validate_criteria(self, attrs, source): def validate_criteria(self, attrs, source):
"""Make sure we have at least one Criterion in the Rubric.""" """Make sure we have at least one Criterion in the Rubric."""
criteria = attrs[source] criteria = attrs[source]
...@@ -134,6 +131,77 @@ class AssessmentSerializer(serializers.ModelSerializer): ...@@ -134,6 +131,77 @@ class AssessmentSerializer(serializers.ModelSerializer):
'points_possible', 'points_possible',
) )
def get_assessment_review(submission):
"""Get all information pertaining to an assessment for review.
Given an assessment serializer, return a serializable formatted model of
the assessment, all assessment parts, all criterion options, and the
associated rubric.
Args:
submission (Submission): The Submission Model object to get
assessment reviews for.
Returns:
(list): A list of assessment reviews, combining assessments with
rubrics and assessment parts, to allow a cohesive object for
rendering the complete peer grading workflow.
Examples:
>>> get_assessment_review(submission)
{
'submission': 1,
'rubric': {
'id': 1,
'content_hash': u'45cc932c4da12a1c2b929018cd6f0785c1f8bc07',
'criteria': [{
'order_num': 0,
'name': u'Spelling',
'prompt': u'Did the student have spelling errors?',
'options': [{
'order_num': 0,
'points': 2,
'name': u'No spelling errors',
'explanation': u'No spelling errors were found in this submission.',
}]
}]
},
'scored_at': datetime.datetime(2014, 2, 25, 19, 50, 7, 290464, tzinfo=<UTC>),
'scorer_id': u'Bob',
'score_type': u'PE',
'parts': [{
'option': {
'order_num': 0,
'points': 2,
'name': u'No spelling errors',
'explanation': u'No spelling errors were found in this submission.'}
}],
'submission_uuid': u'0a600160-be7f-429d-a853-1283d49205e7',
'points_earned': 9,
'points_possible': 20,
}
"""
reviews = []
assessments = Assessment.objects.filter(submission=submission)
for assessment in assessments:
assessment_dict = AssessmentSerializer(assessment).data
rubric_dict = RubricSerializer(assessment.rubric).data
assessment_dict["rubric"] = rubric_dict
parts = []
for part in assessment.parts.all():
part_dict = AssessmentPartSerializer(part).data
options_dict = CriterionOptionSerializer(part.option).data
criterion_dict = CriterionSerializer(part.option.criterion).data
options_dict["criterion"] = criterion_dict
part_dict["option"] = options_dict
parts.append(part_dict)
assessment_dict["parts"] = parts
reviews.append(assessment_dict)
return reviews
def rubric_from_dict(rubric_dict): def rubric_from_dict(rubric_dict):
"""Given a dict of rubric information, return the corresponding Rubric """Given a dict of rubric information, return the corresponding Rubric
......
...@@ -77,6 +77,28 @@ ASSESSMENT_DICT = dict( ...@@ -77,6 +77,28 @@ ASSESSMENT_DICT = dict(
} }
) )
# Answers are against RUBRIC_DICT -- this is worth 0 points
ASSESSMENT_DICT_FAIL = dict(
feedback=u"fail",
options_selected={
"secret": "no",
u"ⓢⓐⓕⓔ": "no",
"giveup": "unwilling",
"singing": "yes",
}
)
# Answers are against RUBRIC_DICT -- this is worth 12 points
ASSESSMENT_DICT_PASS = dict(
feedback=u"这是中国",
options_selected={
"secret": "yes",
u"ⓢⓐⓕⓔ": "yes",
"giveup": "eager",
"singing": "no",
}
)
REQUIRED_GRADED = 5 REQUIRED_GRADED = 5
REQUIRED_GRADED_BY = 3 REQUIRED_GRADED_BY = 3
...@@ -175,10 +197,10 @@ class TestApi(TestCase): ...@@ -175,10 +197,10 @@ class TestApi(TestCase):
tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
peer_api.create_assessment( peer_api.create_assessment(
tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT_FAIL, RUBRIC_DICT
) )
peer_api.create_assessment( peer_api.create_assessment(
tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT_PASS, RUBRIC_DICT
) )
# Tim has met the critera, and should now have a score. # Tim has met the critera, and should now have a score.
...@@ -211,6 +233,19 @@ class TestApi(TestCase): ...@@ -211,6 +233,19 @@ class TestApi(TestCase):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY) self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.get_submission_to_assess(STUDENT_ITEM, 3) peer_api.get_submission_to_assess(STUDENT_ITEM, 3)
@patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError)
def test_median_score_db_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
tim = self._create_student_and_submission("Tim", "Tim's answer")
peer_api.get_assessment_median_scores(tim["uuid"], 3)
@patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError)
def test_get_assessments_db_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
tim = self._create_student_and_submission("Tim", "Tim's answer")
peer_api.get_assessments(tim["uuid"])
@patch.object(Submission.objects, 'get') @patch.object(Submission.objects, 'get')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
...@@ -244,15 +279,14 @@ class TestApi(TestCase): ...@@ -244,15 +279,14 @@ class TestApi(TestCase):
peer_api.get_assessments(submission["uuid"]) peer_api.get_assessments(submission["uuid"])
def test_choose_score(self): def test_choose_score(self):
self.assertEqual(0, peer_api._calculate_final_score([])) self.assertEqual(0, Assessment.get_median_score([]))
self.assertEqual(5, peer_api._calculate_final_score([5])) self.assertEqual(5, Assessment.get_median_score([5]))
# average of 5, 6, rounded down. # average of 5, 6, rounded down.
self.assertEqual(6, peer_api._calculate_final_score([5, 6])) self.assertEqual(6, Assessment.get_median_score([5, 6]))
self.assertEqual(14, peer_api._calculate_final_score([5, 6, 12, 16, 22, 53])) self.assertEqual(14, Assessment.get_median_score([5, 6, 12, 16, 22, 53]))
self.assertEqual(14, peer_api._calculate_final_score([6, 5, 12, 53, 16, 22])) self.assertEqual(14, Assessment.get_median_score([6, 5, 12, 53, 16, 22]))
self.assertEqual(16, peer_api._calculate_final_score([5, 6, 12, 16, 22, 53, 102])) self.assertEqual(16, Assessment.get_median_score([5, 6, 12, 16, 22, 53, 102]))
self.assertEqual(16, peer_api._calculate_final_score([16, 6, 12, 102, 22, 53, 5])) self.assertEqual(16, Assessment.get_median_score([16, 6, 12, 102, 22, 53, 5]))
@staticmethod @staticmethod
def _create_student_and_submission(student, answer, date=None): def _create_student_and_submission(student, answer, date=None):
......
...@@ -69,7 +69,7 @@ ...@@ -69,7 +69,7 @@
<!-- STEP: response --> <!-- STEP: response -->
{% for assessment in rubric_assessments %} {% for assessment in rubric_assessments %}
<li id="{{ assessment.name }}">{{ assessment.title }}</li> <li id="{{ assessment.class_id }}">{{ assessment.title }}</li>
{% endfor %} {% endfor %}
</ol> </ol>
......
...@@ -13,21 +13,28 @@ ...@@ -13,21 +13,28 @@
<!-- CASE: default/unanswered --> <!-- CASE: default/unanswered -->
{% block list_item %}
<li id="openassessment__response" class="openassessment__steps__step step--response ui-toggle-visibility"> <li id="openassessment__response" class="openassessment__steps__step step--response ui-toggle-visibility">
{% endblock %}
{% block header %}
<!--header class="step__header ui-toggle-visibility__control"--> <!--header class="step__header ui-toggle-visibility__control"-->
<h2 class="step__title"> <h2 class="step__title">
<span class="step__label">Your Response</span> <span class="step__label">Your Response</span>
<span class="step__deadline">due <span class="date">January 24, 2014</span> at <span class="time">15:00 UTC</span></span> {% if formatted_due_datetime %}
<span class="step__deadline">due <span class="date">{{ formatted_due_datetime }}</span></span>
{% endif %}
</h2> </h2>
<span class="step__status"> <span class="step__status">
<span class="step__status__label">This step's status:</span> <span class="step__status__label">This step's status:</span>
<span class="step__status__value">Incomplete</span> <span class="step__status__value">{{ step_status }}</span>
</span> </span>
<!--/header--> <!--/header-->
{% endblock %}
{% block body %}
<div class="step__instruction"> <div class="step__instruction">
<p>Please provide your response to the following question. You may save your progress and return to complete your response anytime before the due date of <span class="step__deadline">due <span class="date">January 24, 2014</span></span>. <strong class="emphasis--beta">Once you submit, you may not edit your response</strong>.</p> <p>Please provide your response to the following question. You may save your progress and return to complete your response anytime before the due date of <span class="step__deadline">due <span class="date">{{ formatted_due_date }}</span></span>. <strong class="emphasis--beta">Once you submit, you may not edit your response</strong>.</p>
</div> </div>
<div class="step__content"> <div class="step__content">
...@@ -55,4 +62,5 @@ ...@@ -55,4 +62,5 @@
</li> </li>
</ul> </ul>
</div> </div>
{% endblock %}
</li> </li>
{% extends "openassessmentblock/oa_response.html" %}
{% block list_item %}
<li id="openassessment__response" class="openassessment__steps__step step--response is--unavailable ui-toggle-visibility">
{% endblock %}
{% block body %}
<div class="step__instruction">
<p>You did not complete this portion of the problem before its due date.</p>
</div>
{% endblock %}
\ No newline at end of file
{% extends "openassessmentblock/oa_response.html" %}
{% block list_item %}
<li id="openassessment__response" class="openassessment__steps__step step--response is--graded is--collapsed ui-toggle-visibility">
{% endblock %}
{% block body %}
<div class="step__content">
<!-- user's response -->
<article class="submission__answer__display">
<h3 class="submission__answer__display__title">Your Submitted Response</h3>
<div class="submission__answer__display__content">
{{ student_submission.answer }}
</div>
</article>
<!-- peer evaluations -->
<article class="submission__peer-evaluations">
<h3 class="submission__peer-evaluations__title">Peer Evaluations Of Your Response</h3>
<ol class="list submission__peer-evaluations__questions">
{% for criterion in rubric_criteria %}
{% with criterion_num=forloop.counter %}
<!-- individual question from rubric -->
<li class="question question--{{ criterion_num }} ui-toggle-visibility">
<h4 class="question__title ui-toggle-visibility__control">
<span class="title__copy">{{ criterion.name }}</span>
<span class="question__score">
<span class="label sr">Overall Question Score</span>
<span class="question__score__value">{{ criterion.median_score }}</span>
<span class="label label--divider sr">out of</span>
<span class="question__score__potential">{{ criterion.total_value }}</span>
</span>
</h4>
{% for assessment in peer_assessments %}
{% with peer_num=forloop.counter %}
{% for part in assessment.parts %}
{% if part.option.criterion.name == criterion.name %}
<ul class="question__answers ui-toggle-visibility__content">
<li class="answer peer-assessment--{{ peer_num}}"
id="question--{{ criterion_num }}__answer-{{ peer_num }}">
<h5 class="answer__title">
<span class="answer__source">
<span class="label sr">Assessor: </span>
<span class="value">Peer {{ peer_num }}</span>
</span>
<span class="answer__value">
<span class="label sr">Peer's Assessment: </span>
<span class="value">{{ part.option.name }}</span>
</span>
</h5>
<span class="answer__content">
{{ part.option.explanation }}
</span>
</li>
</ul>
{% endif %}
{% endfor %}
{% endwith %}
{% endfor %}
</li>
{% endwith %}
{% endfor %}
</ol>
</article>
<!-- peer assessment feedback -->
<form id="submission__feeedback" class="submission__feeedback ui-toggle-visibility" method="post">
<h3 class="submission__feeedback__title ui-toggle-visibility__control">Give Feedback On Peer Evaluations</h3>
<ol class="list list--fields submission__feeedback__content ui-toggle-visibility__content">
<li class="field field--textarea feedback__remarks" id="feedback__remarks">
<label for="feedback__remarks__value">Please provide any thoughts or comments on the feedback you received from your peers here. Donec sed odio dui. Vivamus sagittis lacus vel augue laoreet rutrum faucibus dolor auctor.</label>
<textarea id="feedback__remarks__value" placeholder="I feel the feedback I received was..."></textarea>
</li>
</ol>
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="feedback__submit" class="action action--submit feedback__submit">Submit Feedback On Peer Evaluations</button>
</li>
</ul>
</form>
</div>
{% endblock %}
\ No newline at end of file
{% extends "openassessmentblock/oa_response.html" %}
{% block list_item %}
<li id="openassessment__response" class="openassessment__steps__step step--response is--submitted is--unavailable ui-toggle-visibility">
{% endblock %}
{% block body %}
<div class="step__content">
<!-- user's response -->
<article class="submission__answer__display">
<h3 class="submission__answer__display__title">Your Submitted Response</h3>
<div class="submission__answer__display__content">
{{ student_submission.answer }}
</div>
</article>
</div>
{% endblock %}
\ No newline at end of file
...@@ -117,21 +117,21 @@ UI_MODELS = { ...@@ -117,21 +117,21 @@ UI_MODELS = {
"submission": { "submission": {
"assessment_type": "submission", "assessment_type": "submission",
"name": "submission", "name": "submission",
"class_id": "", "class_id": "openassessment__response",
"navigation_text": "Your response to this problem", "navigation_text": "Your response to this problem",
"title": "Your Response" "title": "Your Response"
}, },
"peer-assessment": { "peer-assessment": {
"assessment_type": "peer-assessment", "assessment_type": "peer-assessment",
"namne": "peer-assessment", "name": "peer-assessment",
"class_id": "", "class_id": "openassessment__peer-assessment",
"navigation_text": "Your assessment(s) of peer responses", "navigation_text": "Your assessment(s) of peer responses",
"title": "Assess Peers' Responses" "title": "Assess Peers' Responses"
}, },
"self-assessment": { "self-assessment": {
"assessment_type": "self-assessment", "assessment_type": "self-assessment",
"name": "self-assessment", "name": "self-assessment",
"class_id": "", "class_id": "openassessment__self-assessment",
"navigation_text": "Your assessment of your response", "navigation_text": "Your assessment of your response",
"title": "Assess Your Response" "title": "Assess Your Response"
} }
...@@ -155,6 +155,9 @@ DEFAULT_ASSESSMENT_MODULES = [ ...@@ -155,6 +155,9 @@ DEFAULT_ASSESSMENT_MODULES = [
DEFAULT_PEER_ASSESSMENT, DEFAULT_PEER_ASSESSMENT,
] ]
# Used to parse datetime strings from the XML configuration.
TIME_PARSE_FORMAT = "%Y-%m-%dT%H:%M:%S"
def load(path): def load(path):
"""Handy helper for getting resources from our kit.""" """Handy helper for getting resources from our kit."""
...@@ -348,7 +351,7 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse ...@@ -348,7 +351,7 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
return block return block
def get_grade_state(self): def get_grade_state(self):
# TODO: Determine if we want to build out grade state right now. # TODO: Placeholder for workflow state.
grade_state = { grade_state = {
"style_class": "is--incomplete", "style_class": "is--incomplete",
...@@ -379,9 +382,43 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse ...@@ -379,9 +382,43 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
context_dict = {} context_dict = {}
context_dict["xblock_trace"] = self.get_xblock_trace() context_dict["xblock_trace"] = self.get_xblock_trace()
context_dict["rubric_instructions"] = self.rubric_instructions
context_dict["rubric_criteria"] = self.rubric_criteria if self.start_datetime:
start = datetime.datetime.strptime(self.start_datetime, TIME_PARSE_FORMAT)
context_dict["formatted_start_date"] = start.strftime("%A, %B %d, %Y")
context_dict["formatted_start_datetime"] = start.strftime("%A, %B %d, %Y %X")
if self.due_datetime:
due = datetime.datetime.strptime(self.due_datetime, TIME_PARSE_FORMAT)
context_dict["formatted_due_date"] = due.strftime("%A, %B %d, %Y")
context_dict["formatted_due_datetime"] = due.strftime("%A, %B %d, %Y %X")
template = get_template(path) template = get_template(path)
context = Context(context_dict) context = Context(context_dict)
return Response(template.render(context), content_type='application/html', charset='UTF-8') return Response(template.render(context), content_type='application/html', charset='UTF-8')
def is_open(self):
"""Checks if the question is open.
Determines if the start date has occurred and the end date has not
passed.
Returns:
(tuple): True if the question is open, False if not. If False,
specifies if the "start" date or "due" date is the closing
factor.
Examples:
>>> is_open()
False, "due"
"""
# Is the question closed?
if self.start_datetime:
start = datetime.datetime.strptime(self.start_datetime, TIME_PARSE_FORMAT)
if start > datetime.datetime.utcnow():
return False, "start"
if self.due_datetime:
due = datetime.datetime.strptime(self.due_datetime, TIME_PARSE_FORMAT)
if due < datetime.datetime.utcnow():
return False, "due"
return True, None
...@@ -70,7 +70,11 @@ class PeerAssessmentMixin(object): ...@@ -70,7 +70,11 @@ class PeerAssessmentMixin(object):
assessment = self.get_assessment_module('peer-assessment') assessment = self.get_assessment_module('peer-assessment')
if assessment: if assessment:
peer_sub = self.get_peer_submission(self.get_student_item_dict(), assessment) peer_sub = self.get_peer_submission(self.get_student_item_dict(), assessment)
context_dict = {"peer_submission": peer_sub} context_dict = {
"peer_submission": peer_sub,
"rubric_instructions": self.rubric_instructions,
"rubric_criteria": self.rubric_criteria
}
return self.render_assessment('openassessmentblock/oa_peer_assessment.html', context_dict) return self.render_assessment('openassessmentblock/oa_peer_assessment.html', context_dict)
def get_peer_submission(self, student_item_dict, assessment): def get_peer_submission(self, student_item_dict, assessment):
......
...@@ -50,6 +50,7 @@ class ScenarioParser(object): ...@@ -50,6 +50,7 @@ class ScenarioParser(object):
crit = { crit = {
'name': criterion.attrib.get('name', ''), 'name': criterion.attrib.get('name', ''),
'prompt': criterion.text.strip(), 'prompt': criterion.text.strip(),
'total_value': criterion.attrib.get('total_value', None),
'options': [], 'options': [],
} }
for option in criterion: for option in criterion:
...@@ -104,6 +105,10 @@ class ScenarioParser(object): ...@@ -104,6 +105,10 @@ class ScenarioParser(object):
def parse(self): def parse(self):
"""Instantiate xblock object from runtime XML definition.""" """Instantiate xblock object from runtime XML definition."""
self.xblock.start_datetime = self.root.attrib.get('start', None)
self.xblock.due_datetime = self.root.attrib.get('due', None)
for child in self.root: for child in self.root:
if child.tag == 'prompt': if child.tag == 'prompt':
self.xblock.prompt = self.get_prompt(child) self.xblock.prompt = self.get_prompt(child)
......
...@@ -11,7 +11,7 @@ function OpenAssessmentBlock(runtime, element) { ...@@ -11,7 +11,7 @@ function OpenAssessmentBlock(runtime, element) {
* Submission Functions * Submission Functions
*/ */
function render_submissions(data) { function render_submissions(data) {
$('#submission', element).replaceWith(data); $('#openassessment__response', element).replaceWith(data);
$('#step--response__submit', element).click(function(eventObject) { $('#step--response__submit', element).click(function(eventObject) {
$.ajax({ $.ajax({
type: "POST", type: "POST",
...@@ -41,7 +41,7 @@ function OpenAssessmentBlock(runtime, element) { ...@@ -41,7 +41,7 @@ function OpenAssessmentBlock(runtime, element) {
* Peer Assessment Functions * Peer Assessment Functions
*/ */
function render_peer_assessment(data) { function render_peer_assessment(data) {
$('#peer-assessment', element).replaceWith(data); $('#openassessment__peer-assessment', element).replaceWith(data);
function prepare_assessment_post(element) { function prepare_assessment_post(element) {
var selector = $("input[type=radio]:checked", element); var selector = $("input[type=radio]:checked", element);
...@@ -70,7 +70,7 @@ function OpenAssessmentBlock(runtime, element) { ...@@ -70,7 +70,7 @@ function OpenAssessmentBlock(runtime, element) {
type: "POST", type: "POST",
url: renderSelfUrl, url: renderSelfUrl,
success: function(data) { success: function(data) {
$('#self-assessment', element).replaceWith(data); $('#openassessment__self-assessment', element).replaceWith(data);
} }
}); });
$.ajax({ $.ajax({
......
<?xml version="1.0" encoding="UTF-8" standalone="no" ?> <?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<vertical_demo> <vertical_demo>
<openassessment start="2013-12-19T23:00-7:00" due="2014-12-21T23:00-7:00"> <openassessment start="2013-02-24T13:53:50" due="2040-02-24T13:53:50">
<title> <title>
Censorship in Public Libraries Censorship in Public Libraries
</title> </title>
...@@ -10,19 +10,19 @@ ...@@ -10,19 +10,19 @@
</prompt> </prompt>
<rubric> <rubric>
Read for conciseness, clarity of thought, and form. Read for conciseness, clarity of thought, and form.
<criterion name="concise"> <criterion name="concise" total_value="3">
How concise is it? How concise is it?
<option val="0">The Bible</option> <option val="0">The Bible</option>
<option val="1">Earnest Hemingway</option> <option val="1">Earnest Hemingway</option>
<option val="3">Matsuo Basho</option> <option val="3">Matsuo Basho</option>
</criterion> </criterion>
<criterion name="clearheaded"> <criterion name="clearheaded" total_value="2">
How clear is the thinking? How clear is the thinking?
<option val="0">Eric</option> <option val="0">Eric</option>
<option val="1">John</option> <option val="1">John</option>
<option val="2">Ian</option> <option val="2">Ian</option>
</criterion> </criterion>
<criterion name="form"> <criterion name="form" total_value="2">
Lastly, how is it's form? Punctuation, grammar, and spelling all count. Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">IRC</option> <option val="0">IRC</option>
<option val="1">Real Email</option> <option val="1">Real Email</option>
......
<?xml version="1.0" encoding="UTF-8" standalone="no" ?> <?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<vertical_demo> <vertical_demo>
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00"> <openassessment start="2013-02-24T13:53:50" due="2040-02-24T13:53:50">
<title> <title>
Global Poverty Global Poverty
</title> </title>
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
</prompt> </prompt>
<rubric> <rubric>
Read for conciseness, clarity of thought, and form. Read for conciseness, clarity of thought, and form.
<criterion name="concise"> <criterion name="concise" total_value="5">
How concise is it? How concise is it?
<option val="0">Neal Stephenson (late) <option val="0">Neal Stephenson (late)
<explain> <explain>
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
</explain> </explain>
</option> </option>
</criterion> </criterion>
<criterion name="clearheaded"> <criterion name="clearheaded" total_value="10">
How clear is the thinking? How clear is the thinking?
<option val="0">Yogi Berra</option> <option val="0">Yogi Berra</option>
<option val="1">Hunter S. Thompson</option> <option val="1">Hunter S. Thompson</option>
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
</explain> </explain>
</option> </option>
</criterion> </criterion>
<criterion name="form"> <criterion name="form" total_value="5">
Lastly, how is it's form? Punctuation, grammar, and spelling all count. Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">lolcats</option> <option val="0">lolcats</option>
<option val="1">Facebook</option> <option val="1">Facebook</option>
......
import datetime
from xblock.core import XBlock from xblock.core import XBlock
from submissions import api from submissions import api
from openassessment.peer import api as peer_api
class SubmissionMixin(object): class SubmissionMixin(object):
...@@ -68,7 +70,7 @@ class SubmissionMixin(object): ...@@ -68,7 +70,7 @@ class SubmissionMixin(object):
return status, status_tag, status_text return status, status_tag, status_text
@staticmethod @staticmethod
def _get_submission_score(student_item_dict, submission=False): def _get_submission_score(student_item_dict):
"""Return the most recent score, if any, for student item """Return the most recent score, if any, for student item
Gets the score, if available. Gets the score, if available.
...@@ -82,9 +84,7 @@ class SubmissionMixin(object): ...@@ -82,9 +84,7 @@ class SubmissionMixin(object):
question. question.
""" """
scores = False scores = api.get_score(student_item_dict)
if submission:
scores = api.get_score(student_item_dict)
return scores[0] if scores else None return scores[0] if scores else None
@staticmethod @staticmethod
...@@ -120,5 +120,51 @@ class SubmissionMixin(object): ...@@ -120,5 +120,51 @@ class SubmissionMixin(object):
Generates the submission HTML for the first section of an Open Generates the submission HTML for the first section of an Open
Assessment XBlock. See OpenAssessmentBlock.render_assessment() for Assessment XBlock. See OpenAssessmentBlock.render_assessment() for
more information on rendering XBlock sections. more information on rendering XBlock sections.
Needs to support the following scenarios:
Unanswered and Open
Unanswered and Closed
Saved
Saved and Closed
Submitted
Submitted and Closed
Submitted, waiting assessment
Submitted and graded
""" """
return self.render_assessment('openassessmentblock/oa_response.html') # TODO Check if Saved
student_item = self.get_student_item_dict()
# Has the student submitted?
student_submission = self._get_user_submission(student_item)
# Has it been graded yet?
student_score = self._get_submission_score(student_item)
step_status = "Graded" if student_score else "Submitted"
step_status = step_status if student_submission else "Incomplete"
assessment_ui_model = self.get_assessment_module('peer-assessment')
problem_open, date = self.is_open()
context = {
"student_submission": student_submission,
"student_score": student_score,
"step_status": step_status,
}
path = "openassessmentblock/oa_response.html"
if student_score:
assessments = peer_api.get_assessments(student_submission["uuid"])
median_scores = peer_api.get_assessment_median_scores(
student_submission["uuid"],
assessment_ui_model["must_be_graded_by"]
)
context["peer_assessments"] = assessments
context["rubric_instructions"] = self.rubric_instructions
context["rubric_criteria"] = self.rubric_criteria
for criterion in context["rubric_criteria"]:
criterion["median_score"] = median_scores[criterion["name"]]
path = 'openassessmentblock/oa_response_graded.html'
elif student_submission:
path = 'openassessmentblock/oa_response_submitted.html'
elif not problem_open and date == "due" and not student_submission:
path = 'openassessmentblock/oa_response_closed.html'
return self.render_assessment(path, context_dict=context)
...@@ -2,18 +2,20 @@ ...@@ -2,18 +2,20 @@
Tests the Open Assessment XBlock functionality. Tests the Open Assessment XBlock functionality.
""" """
import json import json
import datetime
from django.test import TestCase from django.test import TestCase
from mock import patch from mock import patch
from workbench.runtime import WorkbenchRuntime from workbench.runtime import WorkbenchRuntime
import webob import webob
from openassessment.xblock.openassessmentblock import TIME_PARSE_FORMAT
from openassessment.xblock.submission_mixin import SubmissionMixin from openassessment.xblock.submission_mixin import SubmissionMixin
from submissions import api as sub_api from submissions import api as sub_api
from submissions.api import SubmissionRequestError, SubmissionInternalError from submissions.api import SubmissionRequestError, SubmissionInternalError
RUBRIC_CONFIG = """ RUBRIC_CONFIG = """
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00"> <openassessment start="2014-12-19T23:00:00" due="2014-12-21T23:00:00">
<prompt> <prompt>
Given the state of the world today, what do you think should be done to Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words. combat poverty? Please answer in a short essay of 200-300 words.
...@@ -48,8 +50,8 @@ RUBRIC_CONFIG = """ ...@@ -48,8 +50,8 @@ RUBRIC_CONFIG = """
</rubric> </rubric>
<assessments> <assessments>
<peer-assessment name="peer-assessment" <peer-assessment name="peer-assessment"
start="2014-12-20T19:00-7:00" start="2014-12-20T19:00"
due="2014-12-21T22:22-7:00" due="2014-12-21T22:22"
must_grade="5" must_grade="5"
must_be_graded_by="3" /> must_be_graded_by="3" />
<self-assessment/> <self-assessment/>
...@@ -140,3 +142,32 @@ class TestOpenAssessment(TestCase): ...@@ -140,3 +142,32 @@ class TestOpenAssessment(TestCase):
xblock_fragment = self.runtime.render(self.assessment, "student_view") xblock_fragment = self.runtime.render(self.assessment, "student_view")
self.assertTrue(xblock_fragment.body_html().find("Openassessmentblock")) self.assertTrue(xblock_fragment.body_html().find("Openassessmentblock"))
submission_response = self.assessment.render_submission({})
self.assertIsNotNone(submission_response)
self.assertTrue(submission_response.body.find("openassessment__response"))
def test_start_end_date_checks(self):
"""
Check if the start and end date checks work appropriately.
"""
now = datetime.datetime.utcnow()
past = now - datetime.timedelta(minutes = 10)
future = now + datetime.timedelta(minutes = 10)
way_future = now + datetime.timedelta(minutes = 20)
self.assessment.start_datetime = past.strftime(TIME_PARSE_FORMAT)
self.assessment.due_datetime = past.strftime(TIME_PARSE_FORMAT)
problem_open, reason = self.assessment.is_open()
self.assertFalse(problem_open)
self.assertEqual("due", reason)
self.assessment.start_datetime = past.strftime(TIME_PARSE_FORMAT)
self.assessment.due_datetime = future.strftime(TIME_PARSE_FORMAT)
problem_open, reason = self.assessment.is_open()
self.assertTrue(problem_open)
self.assertEqual(None, reason)
self.assessment.start_datetime = future.strftime(TIME_PARSE_FORMAT)
self.assessment.due_datetime = way_future.strftime(TIME_PARSE_FORMAT)
problem_open, reason = self.assessment.is_open()
self.assertFalse(problem_open)
self.assertEqual("start", reason)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment