Commit 7213266f by Stephen Sanchez

Merge pull request #31 from edx/sanchez/xblock_peer_eval

Sanchez/xblock peer eval
parents d10282c9 8e20f9e4
...@@ -8,11 +8,13 @@ import copy ...@@ -8,11 +8,13 @@ import copy
import logging import logging
from django.db import DatabaseError from django.db import DatabaseError
import math
from openassessment.peer.models import PeerEvaluation from openassessment.peer.models import PeerEvaluation
from openassessment.peer.serializers import PeerEvaluationSerializer from openassessment.peer.serializers import PeerEvaluationSerializer
from submissions.models import Submission, StudentItem from submissions import api as submission_api
from submissions.serializers import SubmissionSerializer from submissions.models import Submission, StudentItem, Score
from submissions.serializers import SubmissionSerializer, StudentItemSerializer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -62,8 +64,13 @@ class PeerEvaluationInternalError(PeerEvaluationError): ...@@ -62,8 +64,13 @@ class PeerEvaluationInternalError(PeerEvaluationError):
pass pass
def create_evaluation(submission_uuid, scorer_id, assessment_dict, def create_evaluation(
scored_at=None): submission_uuid,
scorer_id,
required_evaluations_for_student,
required_evaluations_for_submission,
assessment_dict,
scored_at=None):
"""Creates an evaluation on the given submission. """Creates an evaluation on the given submission.
Evaluations are created based on feedback associated with a particular Evaluations are created based on feedback associated with a particular
...@@ -75,6 +82,10 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict, ...@@ -75,6 +82,10 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict,
Submission model. Submission model.
scorer_id (str): The user ID for the user giving this assessment. This scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission. is required to create an assessment on a submission.
required_evaluations_for_student (int): The number of evaluations
required for the student to receive a score for their submission.
required_evaluations_for_submission (int): The number of evaluations
required on the submission for it to be scored.
assessment_dict (dict): All related information for the assessment. An assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback. assessment contains points_earned, points_possible, and feedback.
scored_at (datetime): Optional argument to override the time in which scored_at (datetime): Optional argument to override the time in which
...@@ -126,6 +137,35 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict, ...@@ -126,6 +137,35 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict,
if not peer_serializer.is_valid(): if not peer_serializer.is_valid():
raise PeerEvaluationRequestError(peer_serializer.errors) raise PeerEvaluationRequestError(peer_serializer.errors)
peer_serializer.save() peer_serializer.save()
# Check if the submission is finished and its Author has graded enough.
student_item = submission.student_item
_check_if_finished_and_create_score(
student_item,
submission,
required_evaluations_for_student,
required_evaluations_for_submission
)
# Check if the grader is finished and has enough evaluations
scorer_item = StudentItem.objects.get(
student_id=scorer_id,
item_id=student_item.item_id,
course_id=student_item.course_id,
item_type=student_item.item_type
)
scorer_submissions = Submission.objects.filter(
student_item=scorer_item
).order_by("-attempt_number")
_check_if_finished_and_create_score(
scorer_item,
scorer_submissions[0],
required_evaluations_for_student,
required_evaluations_for_submission
)
return peer_serializer.data return peer_serializer.data
except DatabaseError: except DatabaseError:
error_message = u"An error occurred while creating evaluation {} for submission: {} by: {}".format( error_message = u"An error occurred while creating evaluation {} for submission: {} by: {}".format(
...@@ -137,6 +177,57 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict, ...@@ -137,6 +177,57 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict,
raise PeerEvaluationInternalError(error_message) raise PeerEvaluationInternalError(error_message)
def _check_if_finished_and_create_score(student_item,
submission,
required_evaluations_for_student,
required_evaluations_for_submission):
"""Basic function for checking if a student is finished with peer workflow.
Checks if the student is finished with the peer evaluation workflow. If the
student already has a final grade calculated, there is no need to proceed.
If they do not have a grade, the student has a final grade calculated.
"""
if Score.objects.filter(student_item=student_item):
return
finished_evaluating = has_finished_required_evaluating(
student_item.student_id,
required_evaluations_for_student
)
evaluations = PeerEvaluation.objects.filter(submission=submission)
submission_finished = evaluations.count() >= required_evaluations_for_submission
scores = []
for evaluation in evaluations:
scores.append(evaluation.points_earned)
if finished_evaluating and submission_finished:
submission_api.set_score(
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
_calculate_final_score(scores),
evaluations[0].points_possible
)
def _calculate_final_score(scores):
"""Final grade is calculated using integer values, rounding up.
If there is a true median score, it is returned. If there are two median
values, the average of those two values is returned, rounded up to the
greatest integer value.
"""
total_scores = len(scores)
scores = sorted(scores)
median = int(math.ceil(total_scores / float(2)))
if total_scores == 0:
return 0
elif total_scores % 2:
return scores[median-1]
else:
return int(math.ceil(sum(scores[median-1:median+1])/float(2)))
def has_finished_required_evaluating(student_id, required_evaluations): def has_finished_required_evaluating(student_id, required_evaluations):
"""Check if a student still needs to evaluate more submissions """Check if a student still needs to evaluate more submissions
...@@ -162,7 +253,7 @@ def has_finished_required_evaluating(student_id, required_evaluations): ...@@ -162,7 +253,7 @@ def has_finished_required_evaluating(student_id, required_evaluations):
while evaluating this workflow rule. while evaluating this workflow rule.
Examples: Examples:
>>> has_finished_required_evaluating("Tim") >>> has_finished_required_evaluating("Tim", 3)
True True
""" """
...@@ -228,7 +319,7 @@ def get_evaluations(submission_id): ...@@ -228,7 +319,7 @@ def get_evaluations(submission_id):
raise PeerEvaluationInternalError(error_message) raise PeerEvaluationInternalError(error_message)
def get_submission_to_evaluate(student_item_dict): def get_submission_to_evaluate(student_item_dict, required_num_evaluations):
"""Get a submission to peer evaluate. """Get a submission to peer evaluate.
Retrieves a submission for evaluation for the given student_item. This will Retrieves a submission for evaluation for the given student_item. This will
...@@ -243,6 +334,8 @@ def get_submission_to_evaluate(student_item_dict): ...@@ -243,6 +334,8 @@ def get_submission_to_evaluate(student_item_dict):
item_id, course_id, and item_type, used to identify the unique item_id, course_id, and item_type, used to identify the unique
question for the review, while the student_id is used to explicitly question for the review, while the student_id is used to explicitly
avoid giving the student their own submission. avoid giving the student their own submission.
required_num_evaluations (int): The number of evaluations a submission
requires before it has completed the peer evaluation process.
Returns: Returns:
dict: A peer submission for evaluation. This contains a 'student_item', dict: A peer submission for evaluation. This contains a 'student_item',
...@@ -262,7 +355,7 @@ def get_submission_to_evaluate(student_item_dict): ...@@ -262,7 +355,7 @@ def get_submission_to_evaluate(student_item_dict):
>>> item_type="type_one", >>> item_type="type_one",
>>> student_id="Bob", >>> student_id="Bob",
>>> ) >>> )
>>> get_submission_to_evaluate(student_item_dict) >>> get_submission_to_evaluate(student_item_dict, 3)
{ {
'student_item': 2, 'student_item': 2,
'attempt_number': 1, 'attempt_number': 1,
...@@ -279,12 +372,29 @@ def get_submission_to_evaluate(student_item_dict): ...@@ -279,12 +372,29 @@ def get_submission_to_evaluate(student_item_dict):
item_id=student_item_dict["item_id"], item_id=student_item_dict["item_id"],
).exclude(student_id=student_item_dict["student_id"]) ).exclude(student_id=student_item_dict["student_id"])
# TODO: We need a priority queue. submission = _get_first_submission_not_evaluated(
submission = Submission.objects.filter(student_item__in=student_items).order_by( student_items,
"submitted_at", student_item_dict["student_id"],
"-attempt_number")[:1] required_num_evaluations
)
if not submission: if not submission:
raise PeerEvaluationWorkflowError( raise PeerEvaluationWorkflowError(
"There are no submissions available for evaluation." "There are no submissions available for evaluation."
) )
return SubmissionSerializer(submission[0]).data return SubmissionSerializer(submission).data
def _get_first_submission_not_evaluated(student_items, student_id, required_num_evaluations):
# TODO: We need a priority queue.
submissions = Submission.objects.filter(student_item__in=student_items).order_by(
"submitted_at",
"-attempt_number"
)
for submission in submissions:
evaluations = PeerEvaluation.objects.filter(submission=submission)
if evaluations.count() < required_num_evaluations:
already_evaluated = False
for evaluation in evaluations:
already_evaluated = already_evaluated or evaluation.scorer_id == student_id
if not already_evaluated:
return submission
\ No newline at end of file
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
<th>Submission UUID</th> <th>Submission UUID</th>
<th>Points Earned</th> <th>Points Earned</th>
<th>Points Possible</th> <th>Points Possible</th>
<th>Scored By</th>
<th>Scored At</th> <th>Scored At</th>
<th>Score Type</th> <th>Score Type</th>
<th>Feedback</th> <th>Feedback</th>
...@@ -15,6 +16,7 @@ ...@@ -15,6 +16,7 @@
<td>{{ evaluation.points_earned }}</td> <td>{{ evaluation.points_earned }}</td>
<td>{{ evaluation.points_possible }}</td> <td>{{ evaluation.points_possible }}</td>
<td>{{ evaluation.scorer_id }}</td> <td>{{ evaluation.scorer_id }}</td>
<td>{{ evaluation.scored_at }}</td>
<td>{{ evaluation.score_type }}</td> <td>{{ evaluation.score_type }}</td>
<td>{{ evaluation.feedback }}</td> <td>{{ evaluation.feedback }}</td>
</tr> </tr>
......
...@@ -9,7 +9,7 @@ from mock import patch ...@@ -9,7 +9,7 @@ from mock import patch
from openassessment.peer import api from openassessment.peer import api
from openassessment.peer.models import PeerEvaluation from openassessment.peer.models import PeerEvaluation
from submissions.api import create_submission from submissions import api as sub_api
from submissions.models import Submission from submissions.models import Submission
from submissions.tests.test_api import STUDENT_ITEM, ANSWER_ONE from submissions.tests.test_api import STUDENT_ITEM, ANSWER_ONE
...@@ -19,6 +19,9 @@ ASSESSMENT_DICT = dict( ...@@ -19,6 +19,9 @@ ASSESSMENT_DICT = dict(
feedback="Your submission was thrilling.", feedback="Your submission was thrilling.",
) )
REQUIRED_GRADED = 5
REQUIRED_GRADED_BY = 3
MONDAY = datetime.datetime(2007, 9, 12, 0, 0, 0, 0, pytz.UTC) MONDAY = datetime.datetime(2007, 9, 12, 0, 0, 0, 0, pytz.UTC)
TUESDAY = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC) TUESDAY = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC)
WEDNESDAY = datetime.datetime(2007, 9, 15, 0, 0, 0, 0, pytz.UTC) WEDNESDAY = datetime.datetime(2007, 9, 15, 0, 0, 0, 0, pytz.UTC)
...@@ -28,20 +31,24 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC) ...@@ -28,20 +31,24 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
@ddt @ddt
class TestApi(TestCase): class TestApi(TestCase):
def test_create_evaluation(self): def test_create_evaluation(self):
submission = create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
evaluation = api.create_evaluation( evaluation = api.create_evaluation(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
ASSESSMENT_DICT ASSESSMENT_DICT
) )
self._assert_evaluation(evaluation, **ASSESSMENT_DICT) self._assert_evaluation(evaluation, **ASSESSMENT_DICT)
@file_data('test_valid_evaluations.json') @file_data('test_valid_evaluations.json')
def test_get_evaluations(self, assessment_dict): def test_get_evaluations(self, assessment_dict):
submission = create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation( api.create_evaluation(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
assessment_dict assessment_dict
) )
evaluations = api.get_evaluations(submission["uuid"]) evaluations = api.get_evaluations(submission["uuid"])
...@@ -50,10 +57,12 @@ class TestApi(TestCase): ...@@ -50,10 +57,12 @@ class TestApi(TestCase):
@file_data('test_valid_evaluations.json') @file_data('test_valid_evaluations.json')
def test_get_evaluations_with_date(self, assessment_dict): def test_get_evaluations_with_date(self, assessment_dict):
submission = create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation( api.create_evaluation(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
assessment_dict, assessment_dict,
MONDAY MONDAY
) )
...@@ -62,17 +71,61 @@ class TestApi(TestCase): ...@@ -62,17 +71,61 @@ class TestApi(TestCase):
self._assert_evaluation(evaluations[0], **assessment_dict) self._assert_evaluation(evaluations[0], **assessment_dict)
self.assertEqual(evaluations[0]["scored_at"], MONDAY) self.assertEqual(evaluations[0]["scored_at"], MONDAY)
def test_student_finished_evaluating(self): def test_peer_evaluation_workflow(self):
tim = self._create_student_and_submission("Tim", "Tim's answer")
bob = self._create_student_and_submission("Bob", "Bob's answer") bob = self._create_student_and_submission("Bob", "Bob's answer")
sally = self._create_student_and_submission("Sally", "Sally's answer") sally = self._create_student_and_submission("Sally", "Sally's answer")
jim = self._create_student_and_submission("Jim", "Jim's answer") jim = self._create_student_and_submission("Jim", "Jim's answer")
buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
xander = self._create_student_and_submission("Xander", "Xander's answer")
# Tim should not have a score, because he has not evaluated enough
# peer submissions.
scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertTrue(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
# Tim should not have a score, because his submission does not have
# enough evaluations.
scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores)
api.create_evaluation(
tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
# Tim has met the critera, and should now have a score.
scores = sub_api.get_score(STUDENT_ITEM)
self.assertTrue(scores)
self.assertEqual(6, scores[0]["points_earned"])
self.assertEqual(12, scores[0]["points_possible"])
self.assertFalse(api.has_finished_required_evaluating("Tim", 3))
api.create_evaluation(bob["uuid"], "Tim", ASSESSMENT_DICT)
api.create_evaluation(sally["uuid"], "Tim", ASSESSMENT_DICT)
self.assertFalse(api.has_finished_required_evaluating("Tim", 3))
api.create_evaluation(jim["uuid"], "Tim", ASSESSMENT_DICT)
self.assertTrue(api.has_finished_required_evaluating("Tim", 3))
@raises(api.PeerEvaluationRequestError) @raises(api.PeerEvaluationRequestError)
def test_bad_configuration(self): def test_bad_configuration(self):
...@@ -86,7 +139,7 @@ class TestApi(TestCase): ...@@ -86,7 +139,7 @@ class TestApi(TestCase):
) )
self._create_student_and_submission("Jim", "Jim's answer", THURSDAY) self._create_student_and_submission("Jim", "Jim's answer", THURSDAY)
submission = api.get_submission_to_evaluate(STUDENT_ITEM) submission = api.get_submission_to_evaluate(STUDENT_ITEM, 3)
self.assertIsNotNone(submission) self.assertIsNotNone(submission)
self.assertEqual(submission["answer"], u"Bob's answer") self.assertEqual(submission["answer"], u"Bob's answer")
self.assertEqual(submission["student_item"], 2) self.assertEqual(submission["student_item"], 2)
...@@ -95,7 +148,7 @@ class TestApi(TestCase): ...@@ -95,7 +148,7 @@ class TestApi(TestCase):
@raises(api.PeerEvaluationWorkflowError) @raises(api.PeerEvaluationWorkflowError)
def test_no_submissions_to_evaluate_for_tim(self): def test_no_submissions_to_evaluate_for_tim(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY) self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
api.get_submission_to_evaluate(STUDENT_ITEM) api.get_submission_to_evaluate(STUDENT_ITEM, 3)
""" """
Some Error Checking Tests against DB failures. Some Error Checking Tests against DB failures.
...@@ -105,32 +158,47 @@ class TestApi(TestCase): ...@@ -105,32 +158,47 @@ class TestApi(TestCase):
@raises(api.PeerEvaluationInternalError) @raises(api.PeerEvaluationInternalError)
def test_error_on_evaluation_creation(self, mock_filter): def test_error_on_evaluation_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
submission = create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation( api.create_evaluation(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
ASSESSMENT_DICT, ASSESSMENT_DICT,
MONDAY MONDAY
) )
@patch.object(PeerEvaluation.objects, 'filter') @patch.object(PeerEvaluation.objects, 'filter')
@raises(api.PeerEvaluationInternalError) @raises(sub_api.SubmissionInternalError)
def test_error_on_get_evaluation(self, mock_filter): def test_error_on_get_evaluation(self, mock_filter):
submission = create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation( api.create_evaluation(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
ASSESSMENT_DICT, ASSESSMENT_DICT,
MONDAY MONDAY
) )
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
api.get_evaluations(submission["uuid"]) api.get_evaluations(submission["uuid"])
def test_choose_score(self):
self.assertEqual(0, api._calculate_final_score([]))
self.assertEqual(5, api._calculate_final_score([5]))
# average of 5, 6, rounded down.
self.assertEqual(6, api._calculate_final_score([5, 6]))
self.assertEqual(14, api._calculate_final_score([5, 6, 12, 16, 22, 53]))
self.assertEqual(14, api._calculate_final_score([6, 5, 12, 53, 16, 22]))
self.assertEqual(16, api._calculate_final_score([5, 6, 12, 16, 22, 53, 102]))
self.assertEqual(16, api._calculate_final_score([16, 6, 12, 102, 22, 53, 5]))
@staticmethod @staticmethod
def _create_student_and_submission(student, answer, date=None): def _create_student_and_submission(student, answer, date=None):
new_student_item = STUDENT_ITEM.copy() new_student_item = STUDENT_ITEM.copy()
new_student_item["student_id"] = student new_student_item["student_id"] = student
return create_submission(new_student_item, answer, date) return sub_api.create_submission(new_student_item, answer, date)
def _assert_evaluation(self, evaluation, points_earned, points_possible, def _assert_evaluation(self, evaluation, points_earned, points_possible,
feedback): feedback):
......
<!-- START OpenAssessmentBlock HTML --> <!-- START OpenAssessmentBlock HTML -->
<div class="openassessment_block" id="openassessment_block_${xblock_trace[0]}"> <div class="openassessment_block" id="openassessment_block_${xblock_trace[0]}">
<div id="peer_submission_uuid" hidden="true">${peer_submission["uuid"]}</div>
<p>${peer_submission["answer"]}</p>
<p class="openassessment_prompt" <p class="openassessment_prompt"
id="openassessment_rubric_instructions_${xblock_trace[0]}">${rubric_instructions}</p> id="openassessment_rubric_instructions_${xblock_trace[0]}">${rubric_instructions}</p>
% for criterion in rubric_criteria: % for criterion in rubric_criteria:
<div> <div>
<p class="openassessment_prompt">${criterion["instructions"]}</p> <p class="openassessment_prompt">${criterion["instructions"]}</p>
% for value in sorted([k for k in criterion.keys() if k != 'name' and k != 'instructions']): % for value in sorted([k for k in criterion.keys() if k != 'name' and k != 'instructions']):
<input type="radio" value="${value}">${criterion[value]}</input> <input name="${criterion['name']}" type="radio" value="${value}">${criterion[value]}</input>
% endfor % endfor
</div> </div>
% endfor % endfor
<input type="button" <input type="button"
class="openassessment_submit" id="openassessment_submit_${xblock_trace[0]}" value="Submit" /> class="openassessment_submit" id="openassessment_submit_${xblock_trace[0]}" value="Submit" />
</div> </div>
<div class="openassessment_response_status_block" id=openassessment_response_status_block_${xblock_trace[0]}"> <div class="openassessment_response_status_block" id="openassessment_response_status_block_${xblock_trace[0]}">
This message should be invisible; please upgrade your browser. This message should be invisible; please upgrade your browser.
</div> </div>
<!-- END OpenAssessmentBlock HTML --> <!-- END OpenAssessmentBlock HTML -->
...@@ -7,6 +7,15 @@ function OpenAssessmentBlock(runtime, element) { ...@@ -7,6 +7,15 @@ function OpenAssessmentBlock(runtime, element) {
var click_msg = '<p class="clickhere">(click here to dismiss this message)</p>'; var click_msg = '<p class="clickhere">(click here to dismiss this message)</p>';
/* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */ /* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */
function prepare_assessment_post(element) {
selector = $("input[type=radio]:checked", element);
values = [];
for (i=0; i<selector.length; i++) {
values[i] = selector[i].value;
}
return {"submission_uuid":$("div#peer_submission_uuid")[0].innerText, "points_earned":values};
}
function displayStatus(result) { function displayStatus(result) {
status = result[0] status = result[0]
error_msg = result[1] error_msg = result[1]
...@@ -26,7 +35,7 @@ function OpenAssessmentBlock(runtime, element) { ...@@ -26,7 +35,7 @@ function OpenAssessmentBlock(runtime, element) {
type: "POST", type: "POST",
url: handlerUrl, url: handlerUrl,
/* data: JSON.stringify({"submission": $('.openassessment_submission', element).val()}), */ /* data: JSON.stringify({"submission": $('.openassessment_submission', element).val()}), */
data: JSON.stringify({"assessment": "I'm not sure how to stringify a form"}), data: JSON.stringify(prepare_assessment_post(element)),
success: displayStatus success: displayStatus
}); });
}); });
......
...@@ -12,6 +12,50 @@ from workbench.runtime import WorkbenchRuntime ...@@ -12,6 +12,50 @@ from workbench.runtime import WorkbenchRuntime
from submissions import api from submissions import api
from submissions.api import SubmissionRequestError, SubmissionInternalError from submissions.api import SubmissionRequestError, SubmissionInternalError
RUBRIC_CONFIG = """
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">Neal Stephenson (late)</option>
<option val="1">HP Lovecraft</option>
<option val="3">Robert Heinlein</option>
<option val="4">Neal Stephenson (early)</option>
<option val="5">Earnest Hemingway</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">Yogi Berra</option>
<option val="1">Hunter S. Thompson</option>
<option val="2">Robert Heinlein</option>
<option val="3">Isaac Asimov</option>
<option val="10">Spock</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">lolcats</option>
<option val="1">Facebook</option>
<option val="2">Reddit</option>
<option val="3">metafilter</option>
<option val="4">Usenet, 1996</option>
<option val="5">The Elements of Style</option>
</criterion>
</rubric>
<evals>
<peereval start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<selfeval/>
</evals>
</openassessment>
"""
class TestOpenAssessment(TestCase): class TestOpenAssessment(TestCase):
...@@ -22,11 +66,7 @@ class TestOpenAssessment(TestCase): ...@@ -22,11 +66,7 @@ class TestOpenAssessment(TestCase):
self.runtime = WorkbenchRuntime() self.runtime = WorkbenchRuntime()
self.runtime.user_id = "Bob" self.runtime.user_id = "Bob"
assessment_id = self.runtime.parse_xml_string( assessment_id = self.runtime.parse_xml_string(
"""<openassessment RUBRIC_CONFIG, self.runtime.id_generator)
prompt="This is my prompt. There are many like it, but this one is mine."
course_id="RopesCourse"
/>
""", self.runtime.id_generator)
self.assessment = self.runtime.get_block(assessment_id) self.assessment = self.runtime.get_block(assessment_id)
self.default_json_submission = json.dumps({"submission": "This is my answer to this test question!"}) self.default_json_submission = json.dumps({"submission": "This is my answer to this test question!"})
......
...@@ -8,8 +8,8 @@ import logging ...@@ -8,8 +8,8 @@ import logging
from django.db import DatabaseError from django.db import DatabaseError
from django.utils.encoding import force_unicode from django.utils.encoding import force_unicode
from submissions.serializers import SubmissionSerializer, StudentItemSerializer from submissions.serializers import SubmissionSerializer, StudentItemSerializer, ScoreSerializer
from submissions.models import Submission, StudentItem from submissions.models import Submission, StudentItem, Score
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -212,20 +212,129 @@ def get_submissions(student_item_dict, limit=None): ...@@ -212,20 +212,129 @@ def get_submissions(student_item_dict, limit=None):
if limit: if limit:
submission_models = submission_models[:limit] submission_models = submission_models[:limit]
return [SubmissionSerializer(submission).data for submission in return SubmissionSerializer(submission_models, many=True).data
submission_models]
def get_score(student_item): def get_score(student_item):
pass """Get the score for a particular student item
Each student item should have a unique score. This function will return the
score if it is available. A score is only calculated for a student item if
it has completed the workflow for a particular assessment module.
Args:
student_item (dict): The dictionary representation of a student item.
Function returns the score related to this student item.
Returns:
score (dict): The score associated with this student item. None if there
is no score found.
Raises:
SubmissionInternalError: Raised if a score cannot be retrieved because
of an internal server error.
Examples:
>>> student_item = {
>>> "student_id":"Tim",
>>> "course_id":"TestCourse",
>>> "item_id":"u_67",
>>> "item_type":"openassessment"
>>> }
>>>
>>> get_score(student_item)
[{
'student_item': 2,
'submission': 2,
'points_earned': 8,
'points_possible': 20,
'created_at': datetime.datetime(2014, 2, 7, 18, 30, 1, 807911, tzinfo=<UTC>)
}]
"""
student_item_model = StudentItem.objects.get(**student_item)
scores = Score.objects.filter(student_item=student_item_model)
return ScoreSerializer(scores, many=True).data
def get_scores(course_id, student_id, types=None): def get_scores(course_id, student_id, types=None):
pass pass
def set_score(student_item): def set_score(student_item, submission, score, points_possible):
pass """Set a score for a particular student item, submission pair.
Sets the score for a particular student item and submission pair. This score
is calculated externally to the API.
Args:
student_item (dict): The student item associated with this score. This
dictionary must contain a course_id, student_id, and item_id.
submission (dict): The submission associated with this score. This
dictionary must contain all submission fields to properly get a
unique submission item.
score (int): The score to associate with the given submission and
student item.
points_possible (int): The total points possible for this particular
student item.
Returns:
(dict): The dictionary representation of the saved score.
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>>
>>> submission_dict = dict(
>>> student_item=2,
>>> attempt_number=1,
>>> submitted_at=datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
>>> created_at=datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
>>> answer=u'The answer is 42.'
>>> )
>>> set_score(student_item_dict, submission_dict, 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
}
"""
try:
student_item_model = StudentItem.objects.get(**student_item)
submission_model = Submission.objects.get(**submission)
except DatabaseError:
error_msg = u"Could not retrieve student item: {} or submission {}.".format(
student_item, submission
)
logger.exception(error_msg)
raise SubmissionRequestError(error_msg)
score = ScoreSerializer(
data={
"student_item": student_item_model.pk,
"submission": submission_model.pk,
"points_earned": score,
"points_possible": points_possible,
}
)
if not score.is_valid():
logger.exception(score.errors)
raise SubmissionInternalError(score.errors)
score.save()
return score.data
def _get_or_create_student_item(student_item_dict): def _get_or_create_student_item(student_item_dict):
...@@ -262,7 +371,8 @@ def _get_or_create_student_item(student_item_dict): ...@@ -262,7 +371,8 @@ def _get_or_create_student_item(student_item_dict):
try: try:
return StudentItem.objects.get(**student_item_dict) return StudentItem.objects.get(**student_item_dict)
except StudentItem.DoesNotExist: except StudentItem.DoesNotExist:
student_item_serializer = StudentItemSerializer(data=student_item_dict) student_item_serializer = StudentItemSerializer(
data=student_item_dict)
if not student_item_serializer.is_valid(): if not student_item_serializer.is_valid():
raise SubmissionRequestError(student_item_serializer.errors) raise SubmissionRequestError(student_item_serializer.errors)
return student_item_serializer.save() return student_item_serializer.save()
......
...@@ -7,7 +7,7 @@ from nose.tools import raises ...@@ -7,7 +7,7 @@ from nose.tools import raises
from mock import patch from mock import patch
import pytz import pytz
from submissions.api import create_submission, get_submissions, SubmissionRequestError, SubmissionInternalError from submissions import api as api
from submissions.models import Submission from submissions.models import Submission
from submissions.serializers import StudentItemSerializer from submissions.serializers import StudentItemSerializer
...@@ -31,79 +31,84 @@ ANSWER_TWO = u"this is my other answer!" ...@@ -31,79 +31,84 @@ ANSWER_TWO = u"this is my other answer!"
@ddt @ddt
class TestApi(TestCase): class TestApi(TestCase):
"""
Testing Submissions
"""
def test_create_submission(self): def test_create_submission(self):
submission = create_submission(STUDENT_ITEM, ANSWER_ONE) submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
self._assert_submission(submission, ANSWER_ONE, 1, 1) self._assert_submission(submission, ANSWER_ONE, 1, 1)
def test_get_submissions(self): def test_get_submissions(self):
create_submission(STUDENT_ITEM, ANSWER_ONE) api.create_submission(STUDENT_ITEM, ANSWER_ONE)
create_submission(STUDENT_ITEM, ANSWER_TWO) api.create_submission(STUDENT_ITEM, ANSWER_TWO)
submissions = get_submissions(STUDENT_ITEM) submissions = api.get_submissions(STUDENT_ITEM)
self._assert_submission(submissions[1], ANSWER_ONE, 1, 1) self._assert_submission(submissions[1], ANSWER_ONE, 1, 1)
self._assert_submission(submissions[0], ANSWER_TWO, 1, 2) self._assert_submission(submissions[0], ANSWER_TWO, 1, 2)
def test_two_students(self): def test_two_students(self):
create_submission(STUDENT_ITEM, ANSWER_ONE) api.create_submission(STUDENT_ITEM, ANSWER_ONE)
create_submission(SECOND_STUDENT_ITEM, ANSWER_TWO) api.create_submission(SECOND_STUDENT_ITEM, ANSWER_TWO)
submissions = get_submissions(STUDENT_ITEM) submissions = api.get_submissions(STUDENT_ITEM)
self.assertEqual(1, len(submissions)) self.assertEqual(1, len(submissions))
self._assert_submission(submissions[0], ANSWER_ONE, 1, 1) self._assert_submission(submissions[0], ANSWER_ONE, 1, 1)
submissions = get_submissions(SECOND_STUDENT_ITEM) submissions = api.get_submissions(SECOND_STUDENT_ITEM)
self.assertEqual(1, len(submissions)) self.assertEqual(1, len(submissions))
self._assert_submission(submissions[0], ANSWER_TWO, 2, 1) self._assert_submission(submissions[0], ANSWER_TWO, 2, 1)
@file_data('test_valid_student_items.json') @file_data('test_valid_student_items.json')
def test_various_student_items(self, valid_student_item): def test_various_student_items(self, valid_student_item):
create_submission(valid_student_item, ANSWER_ONE) api.create_submission(valid_student_item, ANSWER_ONE)
submission = get_submissions(valid_student_item)[0] submission = api.get_submissions(valid_student_item)[0]
self._assert_submission(submission, ANSWER_ONE, 1, 1) self._assert_submission(submission, ANSWER_ONE, 1, 1)
def test_get_latest_submission(self): def test_get_latest_submission(self):
past_date = datetime.datetime(2007, 9, 12, 0, 0, 0, 0, pytz.UTC) past_date = datetime.datetime(2007, 9, 12, 0, 0, 0, 0, pytz.UTC)
more_recent_date = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC) more_recent_date = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC)
create_submission(STUDENT_ITEM, ANSWER_ONE, more_recent_date) api.create_submission(STUDENT_ITEM, ANSWER_ONE, more_recent_date)
create_submission(STUDENT_ITEM, ANSWER_TWO, past_date) api.create_submission(STUDENT_ITEM, ANSWER_TWO, past_date)
# Test a limit on the submissions # Test a limit on the submissions
submissions = get_submissions(STUDENT_ITEM, 1) submissions = api.get_submissions(STUDENT_ITEM, 1)
self.assertEqual(1, len(submissions)) self.assertEqual(1, len(submissions))
self.assertEqual(ANSWER_ONE, submissions[0]["answer"]) self.assertEqual(ANSWER_ONE, submissions[0]["answer"])
self.assertEqual(more_recent_date.year, self.assertEqual(more_recent_date.year,
submissions[0]["submitted_at"].year) submissions[0]["submitted_at"].year)
def test_set_attempt_number(self): def test_set_attempt_number(self):
create_submission(STUDENT_ITEM, ANSWER_ONE, None, 2) api.create_submission(STUDENT_ITEM, ANSWER_ONE, None, 2)
submissions = get_submissions(STUDENT_ITEM) submissions = api.get_submissions(STUDENT_ITEM)
self._assert_submission(submissions[0], ANSWER_ONE, 1, 2) self._assert_submission(submissions[0], ANSWER_ONE, 1, 2)
@raises(SubmissionRequestError) @raises(api.SubmissionRequestError)
@file_data('test_bad_student_items.json') @file_data('test_bad_student_items.json')
def test_error_checking(self, bad_student_item): def test_error_checking(self, bad_student_item):
create_submission(bad_student_item, -100) api.create_submission(bad_student_item, -100)
@raises(SubmissionRequestError) @raises(api.SubmissionRequestError)
def test_error_checking_submissions(self): def test_error_checking_submissions(self):
create_submission(STUDENT_ITEM, ANSWER_ONE, None, -1) api.create_submission(STUDENT_ITEM, ANSWER_ONE, None, -1)
@patch.object(Submission.objects, 'filter') @patch.object(Submission.objects, 'filter')
@raises(SubmissionInternalError) @raises(api.SubmissionInternalError)
def test_error_on_submission_creation(self, mock_filter): def test_error_on_submission_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
create_submission(STUDENT_ITEM, ANSWER_ONE) api.create_submission(STUDENT_ITEM, ANSWER_ONE)
@patch.object(StudentItemSerializer, 'save') @patch.object(StudentItemSerializer, 'save')
@raises(SubmissionInternalError) @raises(api.SubmissionInternalError)
def test_create_student_item_validation(self, mock_save): def test_create_student_item_validation(self, mock_save):
mock_save.side_effect = DatabaseError("Bad things happened") mock_save.side_effect = DatabaseError("Bad things happened")
create_submission(STUDENT_ITEM, ANSWER_ONE) api.create_submission(STUDENT_ITEM, ANSWER_ONE)
def test_unicode_enforcement(self): def test_unicode_enforcement(self):
create_submission(STUDENT_ITEM, "Testing unicode answers.") api.create_submission(STUDENT_ITEM, "Testing unicode answers.")
submissions = get_submissions(STUDENT_ITEM, 1) submissions = api.get_submissions(STUDENT_ITEM, 1)
self.assertEqual(u"Testing unicode answers.", submissions[0]["answer"]) self.assertEqual(u"Testing unicode answers.", submissions[0]["answer"])
def _assert_submission(self, submission, expected_answer, expected_item, def _assert_submission(self, submission, expected_answer, expected_item,
...@@ -111,4 +116,29 @@ class TestApi(TestCase): ...@@ -111,4 +116,29 @@ class TestApi(TestCase):
self.assertIsNotNone(submission) self.assertIsNotNone(submission)
self.assertEqual(submission["answer"], expected_answer) self.assertEqual(submission["answer"], expected_answer)
self.assertEqual(submission["student_item"], expected_item) self.assertEqual(submission["student_item"], expected_item)
self.assertEqual(submission["attempt_number"], expected_attempt) self.assertEqual(submission["attempt_number"], expected_attempt)
\ No newline at end of file
"""
Testing Scores
"""
def test_create_score(self):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
self._assert_submission(submission, ANSWER_ONE, 1, 1)
score = api.set_score(STUDENT_ITEM, submission, 11, 12)
self._assert_score(score, 11, 12)
def test_get_score(self):
self.test_create_score()
scores = api.get_score(STUDENT_ITEM)
self._assert_score(scores[0], 11, 12)
def _assert_score(
self,
score,
expected_points_earned,
expected_points_possible):
self.assertIsNotNone(score)
self.assertEqual(score["points_earned"], expected_points_earned)
self.assertEqual(score["points_possible"], expected_points_possible)
\ No newline at end of file
...@@ -10,7 +10,7 @@ if __name__ == "__main__": ...@@ -10,7 +10,7 @@ if __name__ == "__main__":
if 'test' in sys.argv or 'harvest' in sys.argv: if 'test' in sys.argv or 'harvest' in sys.argv:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.test") os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.test")
else: else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.base") os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.dev")
from django.core.management import execute_from_command_line from django.core.management import execute_from_command_line
......
...@@ -8,4 +8,3 @@ django-extensions==1.3.3 ...@@ -8,4 +8,3 @@ django-extensions==1.3.3
djangorestframework==2.3.5 djangorestframework==2.3.5
Mako==0.9.1 Mako==0.9.1
pytz==2013.9 pytz==2013.9
django-pdb==0.3.2
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
# Debug tools # Debug tools
bpython==0.12 bpython==0.12
django-debug-toolbar==0.11.0 django-debug-toolbar==0.11.0
django-pdb==0.3.2
sqlparse==0.1.10 sqlparse==0.1.10
# Doc generation # Doc generation
......
...@@ -107,7 +107,6 @@ MIDDLEWARE_CLASSES = ( ...@@ -107,7 +107,6 @@ MIDDLEWARE_CLASSES = (
'django.contrib.messages.middleware.MessageMiddleware', 'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection: # Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware', # 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_pdb.middleware.PdbMiddleware', # Needed to enable shell-on-crash behavior
) )
ROOT_URLCONF = 'urls' ROOT_URLCONF = 'urls'
...@@ -131,7 +130,6 @@ INSTALLED_APPS = ( ...@@ -131,7 +130,6 @@ INSTALLED_APPS = (
# Third party # Third party
'django_extensions', 'django_extensions',
'django_pdb', # Allows post-mortem debugging on exceptions
# XBlock # XBlock
'workbench', 'workbench',
......
"""
Dev-specific Django settings.
"""
# Inherit from base settings
from .base import *
MIDDLEWARE_CLASSES += (
'django_pdb.middleware.PdbMiddleware', # Needed to enable shell-on-crash behavior
)
INSTALLED_APPS += (
'django_pdb', # Allows post-mortem debugging on exceptions
)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment