Commit 4086a497 by Will Daly

Merge pull request #73 from edx/will/tim-230

Provide feedback in peer assessment
parents 25a63c91 bd79e1c4
......@@ -7,6 +7,7 @@ the workflow for a given submission.
import copy
import logging
from django.utils.translation import ugettext as _
from django.db import DatabaseError
from openassessment.peer.models import Assessment
......@@ -87,16 +88,15 @@ def create_assessment(
required for the student to receive a score for their submission.
must_be_graded_by (int): The number of assessments
required on the submission for it to be scored.
assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback.
assessment_dict (dict): All related information for the assessment. The dictionary
must have the following keys: "options_selected" (mapping of criterion names to option values),
and "feedback" (string of written feedback for the submission).
scored_at (datetime): Optional argument to override the time in which
the assessment took place. If not specified, scored_at is set to
now.
Returns:
dict: The dictionary representing the assessment. This includes the
points earned, points possible, time scored, scorer id, score type,
and feedback.
dict: the Assessment model, serialized as a dict.
Raises:
PeerAssessmentRequestError: Raised when the submission_id is invalid, or
......@@ -107,33 +107,33 @@ def create_assessment(
Examples:
>>> assessment_dict = dict(
>>> points_earned=[1, 0, 3, 2],
>>> points_possible=12,
>>> options_selected={"clarity": "Very clear", "precision": "Somewhat precise"},
>>> feedback="Your submission was thrilling.",
>>> )
>>> create_assessment("1", "Tim", assessment_dict)
{
'points_earned': 6,
'points_possible': 12,
'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'scorer_id': u"Tim",
'feedback': u'Your submission was thrilling.'
}
>>> create_assessment("1", "Tim", 3, 2, assessment_dict, rubric_dict)
"""
try:
submission = Submission.objects.get(uuid=submission_uuid)
rubric = rubric_from_dict(rubric_dict)
option_ids = rubric.options_ids(assessment_dict["options_selected"])
# Validate that the selected options matched the rubric
# and raise an error if this is not the case
if None in option_ids:
raise PeerAssessmentRequestError(_("Selected options do not match the rubric options."))
feedback = assessment_dict.get('feedback', u'')
peer_assessment = {
"rubric": rubric.id,
"scorer_id": scorer_id,
"submission": submission.pk,
"score_type": PEER_TYPE,
"feedback": feedback,
"parts": [{"option": option_id} for option_id in option_ids]
}
if scored_at:
if scored_at is not None:
peer_assessment["scored_at"] = scored_at
peer_serializer = AssessmentSerializer(data=peer_assessment)
......@@ -152,23 +152,33 @@ def create_assessment(
)
# Check if the grader is finished and has enough assessments
scorer_item = StudentItem.objects.get(
student_id=scorer_id,
item_id=student_item.item_id,
course_id=student_item.course_id,
item_type=student_item.item_type
)
try:
scorer_item = StudentItem.objects.get(
student_id=scorer_id,
item_id=student_item.item_id,
course_id=student_item.course_id,
item_type=student_item.item_type
)
except StudentItem.DoesNotExist:
raise PeerAssessmentWorkflowError(_("You must make a submission before assessing another student"))
scorer_submissions = Submission.objects.filter(
student_item=scorer_item
).order_by("-attempt_number")
_score_if_finished(
scorer_item,
scorer_submissions[0],
must_grade,
must_be_graded_by
)
if len(scorer_submissions) > 0:
_score_if_finished(
scorer_item,
scorer_submissions[0],
must_grade,
must_be_graded_by
)
# Currently, this condition is unreachable, since the only way to create a StudentItem is to
# create a submission for that student. We check anyway just in case this invariant changes.
else:
raise PeerAssessmentWorkflowError(_("You must make at least one submission before assessing another student"))
return peer_serializer.data
except DatabaseError:
......
......@@ -81,6 +81,9 @@ class Rubric(models.Model):
options_selected (dict): Mapping of criteria names to the names of
the option that was selected for that criterion.
Returns:
list of option ids (set to None if the selected option does not match the rubric)
Examples:
>>> options_selected = {"secret": "yes", "safe": "no"}
>>> rubric.options_ids(options_selected)
......@@ -97,6 +100,8 @@ class Rubric(models.Model):
return [
crit_to_all_opts[crit][opt]
if crit in crit_to_all_opts and opt in crit_to_all_opts[crit]
else None
for crit, opt in options_selected.items()
]
......@@ -186,7 +191,6 @@ class Assessment(models.Model):
scorer_id = models.CharField(max_length=40, db_index=True)
score_type = models.CharField(max_length=2)
# TODO: move this to its own model
feedback = models.TextField(max_length=10000, default="")
class Meta:
......
......@@ -123,6 +123,7 @@ class AssessmentSerializer(serializers.ModelSerializer):
'scored_at',
'scorer_id',
'score_type',
'feedback',
# Foreign Key
'parts',
......
......@@ -122,6 +122,7 @@ class TestApi(TestCase):
)
self.assertEqual(assessment["points_earned"], 6)
self.assertEqual(assessment["points_possible"], 14)
self.assertEqual(assessment["feedback"], ASSESSMENT_DICT["feedback"])
@file_data('valid_assessments.json')
def test_get_assessments(self, assessment_dict):
......@@ -288,6 +289,22 @@ class TestApi(TestCase):
self.assertEqual(16, Assessment.get_median_score([5, 6, 12, 16, 22, 53, 102]))
self.assertEqual(16, Assessment.get_median_score([16, 6, 12, 102, 22, 53, 5]))
@raises(peer_api.PeerAssessmentWorkflowError)
def test_assess_before_submitting(self):
# Create a submission for another student
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
# Attempt to create the assessment from another student without first making a submission
peer_api.create_assessment(
submission["uuid"],
"another_student",
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
ASSESSMENT_DICT,
RUBRIC_DICT,
MONDAY
)
@staticmethod
def _create_student_and_submission(student, answer, date=None):
new_student_item = STUDENT_ITEM.copy()
......
......@@ -6,6 +6,16 @@
<span class="grade__value">{{ score.points_earned }}</span> out of <span class="grade__potential">{{ score.points_possible }}</span>
</div>
<!-- PLACEHOLDER: @talbs, you can overwrite this with the actual template -->
<div class="openassessment__feedback">
{% for asmnt in assessments %}
<div class="openassessment__peer_assessment_{{ forloop.counter }}">
<div>Peer #{{ forloop.counter }}</div>
<div>Feedback: {{ asmnt.feedback }}</div>
</div>
{% endfor %}
</div>
<div class="grade__actions">
<ul class="list list--actions">
<li class="list--actions__item">
......@@ -13,4 +23,4 @@
</li>
</ul>
</div>
</div>
\ No newline at end of file
</div>
from xblock.core import XBlock
from openassessment.peer.api import get_assessments
from submissions.api import get_score
......@@ -23,7 +24,14 @@ class GradeMixin(object):
student_item = self.get_student_item_dict()
scores = get_score(student_item)
if scores:
context = {"score": scores[0]}
context = {
"score": scores[0],
"assessments": [],
}
# Look up assessment feedback
for assessment in get_assessments(scores[0]['submission_uuid']):
context['assessments'].append(assessment)
else:
path = 'openassessmentblock/grade/oa_grade_waiting.html'
elif not problem_open and date == "due":
......@@ -33,4 +41,3 @@ class GradeMixin(object):
# TODO: How do we determine which modules are incomplete?
return self.render_assessment(path, context)
import logging
from django.utils.translation import ugettext as _
from xblock.core import XBlock
from openassessment.peer import api as peer_api
from openassessment.peer.api import PeerAssessmentWorkflowError
from openassessment.peer.api import (
PeerAssessmentWorkflowError, PeerAssessmentRequestError,
PeerAssessmentInternalError
)
logger = logging.getLogger(__name__)
class PeerAssessmentMixin(object):
......@@ -25,37 +33,59 @@ class PeerAssessmentMixin(object):
Args:
data (dict): A dictionary containing information required to create
a new peer assessment. Expecting attributes "points_earned",
"total_value", and "submission_uuid". If these attributes are
not available, a new assessment cannot be stored.
a new peer assessment. This dict should have the following attributes:
`submission_uuid` (string): The unique identifier for the submission being assessed.
`options_selected` (dict): Dictionary mapping criterion names to option values.
`feedback` (unicode): Written feedback for the submission.
Returns:
(tuple): A tuple containing the dictionary representation of the
newly created assessment, and a "Success" string.
Dict with keys "success" (bool) indicating success/failure.
and "msg" (unicode) containing additional information if an error occurs.
"""
# Validate the request
if 'feedback' not in data:
return {'success': False, 'msg': _('Must provide feedback in the assessment')}
if 'options_selected' not in data:
return {'success': False, 'msg': _('Must provide options selected in the assessment')}
if 'submission_uuid' not in data:
return {'success': False, 'msg': _('Must provide submission uuid for the assessment')}
assessment_ui_model = self.get_assessment_module('peer-assessment')
if assessment_ui_model:
rubric_dict = {
'criteria': self.rubric_criteria
}
assessment_dict = {
"feedback": "Not yet implemented.",
"feedback": data['feedback'],
"options_selected": data["options_selected"],
}
assessment = peer_api.create_assessment(
data["submission_uuid"],
self.get_student_item_dict()["student_id"],
int(assessment_ui_model["must_grade"]),
int(assessment_ui_model["must_be_graded_by"]),
assessment_dict,
rubric_dict,
)
try:
assessment = peer_api.create_assessment(
data["submission_uuid"],
self.get_student_item_dict()["student_id"],
int(assessment_ui_model["must_grade"]),
int(assessment_ui_model["must_be_graded_by"]),
assessment_dict,
rubric_dict,
)
except PeerAssessmentRequestError as ex:
return {'success': False, 'msg': ex.message}
except PeerAssessmentInternalError as ex:
logger.exception()
return {'success': False, 'msg': _("Internal error occurred while creating the assessment")}
# Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"])
return {}
return {'success': True, 'msg': u''}
else:
return {'success': False, 'msg': _('Could not load peer assessment.')}
@XBlock.handler
......
/*
/**
Tests for OA XBlock editing.
*/
**/
describe("OpenAssessment.StudioUI", function() {
......
......@@ -51,7 +51,7 @@ describe("OpenAssessment.Server", function() {
});
});
it("Sends a submission the XBlock", function() {
it("Sends a submission to the XBlock", function() {
// Status, student ID, attempt number
stubAjax(true, [true, 1, 2]);
......@@ -69,7 +69,28 @@ describe("OpenAssessment.Server", function() {
expect($.ajax).toHaveBeenCalledWith({
url: '/submit',
type: "POST",
data: {submission: "This is only a test"}
data: JSON.stringify({submission: "This is only a test"})
});
});
it("Sends an assessment to the XBlock", function() {
stubAjax(true, {success: true, msg:''});
var success = false;
var options = {clarity: "Very clear", precision: "Somewhat precise"}
server.assess("abc1234", options, "Excellent job!").done(function() {
success = true;
});
expect(success).toBe(true);
expect($.ajax).toHaveBeenCalledWith({
url: '/assess',
type: "POST",
data: JSON.stringify({
submission_uuid: "abc1234",
options_selected: options,
feedback: "Excellent job!"
})
});
});
......@@ -183,4 +204,28 @@ describe("OpenAssessment.Server", function() {
expect(receivedMsg).toEqual("Test error");
});
it("informs the caller of a server error when sending an assessment", function() {
stubAjax(true, {success:false, msg:'Test error!'});
var receivedMsg = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"}
server.assess("abc1234", options, "Excellent job!").fail(function(msg) {
receivedMsg = msg;
});
expect(receivedMsg).toEqual("Test error!");
});
it("informs the caller of an AJAX error when sending an assessment", function() {
stubAjax(false, null);
var receivedMsg = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"}
server.assess("abc1234", options, "Excellent job!").fail(function(msg) {
receivedMsg = msg;
});
expect(receivedMsg).toEqual("Could not contact server.");
});
});
......@@ -85,7 +85,7 @@ OpenAssessment.Server.prototype = {
$.ajax({
type: "POST",
url: url,
data: {submission: submission}
data: JSON.stringify({submission: submission})
}).done(function(data) {
var success = data[0];
if (success) {
......@@ -105,6 +105,51 @@ OpenAssessment.Server.prototype = {
},
/**
Send an assessment to the XBlock.
Args:
submissionId (string): The UUID of the submission.
optionsSelected (object literal): Keys are criteria names,
values are the option text the user selected for the criterion.
feedback (string): Written feedback on the submission.
Returns:
A JQuery promise, which resolves with no args if successful
and fails with an error message otherise.
Example:
var options = { clarity: "Very clear", precision: "Somewhat precise" };
var feedback = "Good job!";
server.assess("abc123", options, feedback).done(
function() { console.log("Success!"); }
).fail(
function(errorMsg) { console.log(errorMsg); }
);
**/
assess: function(submissionId, optionsSelected, feedback) {
var url = this.url('assess');
var payload = JSON.stringify({
submission_uuid: submissionId,
options_selected: optionsSelected,
feedback: feedback
});
return $.Deferred(function(defer) {
$.ajax({ type: "POST", url: url, data: payload }).done(
function(data) {
if (data.success) {
defer.resolve();
}
else {
defer.rejectWith(this, [data.msg]);
}
}
).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']);
});
}).promise()
},
/**
Load the XBlock's XML definition from the server.
Returns:
......
......@@ -60,11 +60,6 @@
</criterion>
</rubric>
<assessments>
<assessment name="self-assessment"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
......
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment"
start="2014-12-20T19:00"
due="2014-12-21T22:22"
must_grade="5"
must_be_graded_by="3" />
</assessments>
</openassessment>
......@@ -84,11 +84,10 @@
</criterion>
</rubric>
<assessments>
<peer-assessment name="peer-assessment"
<assessment name="peer-assessment"
start="2014-12-20T19:00"
due="2014-12-21T22:22"
must_grade="5"
must_be_graded_by="3" />
<self-assessment name="self-assessment"/>
</assessments>
</openassessment>
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment"
start="2014-12-20T19:00"
due="2014-12-21T22:22"
must_grade="2"
must_be_graded_by="2" />
</assessments>
</openassessment>
# -*- coding: utf-8 -*-
"""
Tests for grade handlers in Open Assessment XBlock.
"""
import copy
import json
from submissions import api as submission_api
from openassessment.peer import api as peer_api
from .base import XBlockHandlerTestCase, scenario
class TestGrade(XBlockHandlerTestCase):
ASSESSMENTS = [
{
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
},
{
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'feedback': u'Good job!',
},
]
SUBMISSION = u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
@scenario('data/grade_scenario.xml', user_id='Greggs')
def test_render_grade(self, xblock):
# Create a submission from the user
student_item = xblock.get_student_item_dict()
submission = submission_api.create_submission(student_item, self.SUBMISSION)
scorer_submissions = []
for scorer_name, assessment in zip(['McNulty', 'Freamon'], self.ASSESSMENTS):
# Create a submission for each scorer
scorer = copy.deepcopy(student_item)
scorer['student_id'] = scorer_name
scorer_sub = submission_api.create_submission(scorer, self.SUBMISSION)
# Store the scorer's submission so our user can assess it later
scorer_submissions.append(scorer_sub)
# Create an assessment of the user's submission
peer_api.create_assessment(
submission['uuid'], scorer_name, 2, 2,
assessment, {'criteria': xblock.rubric_criteria}
)
# Have our user make assessments (so she can get a score)
for submission in scorer_submissions:
peer_api.create_assessment(
submission['uuid'], 'Greggs', 2, 2,
self.ASSESSMENTS[0], {'criteria': xblock.rubric_criteria}
)
# Render the view
resp = self.request(xblock, 'render_grade', json.dumps(dict()))
# Verify that feedback from each scorer appears in the view
self.assertIn(u'єאςєɭɭєภՇ ฬ๏гк!', resp.decode('utf-8'))
self.assertIn(u'Good job!', resp.decode('utf-8'))
# -*- coding: utf-8 -*-
"""
Tests for peer assessment handlers in Open Assessment XBlock.
"""
import copy
import json
from submissions import api as submission_api
from openassessment.peer import api as peer_api
from .base import XBlockHandlerTestCase, scenario
class TestPeerAssessment(XBlockHandlerTestCase):
ASSESSMENT = {
'submission_uuid': None,
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
}
SUBMISSION = u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
@scenario('data/assessment_scenario.xml', user_id='Bob')
def test_assess_handler(self, xblock):
# Create a submission for this problem from another user
student_item = xblock.get_student_item_dict()
student_item['student_id'] = 'Sally'
submission = submission_api.create_submission(student_item, self.SUBMISSION)
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
submission_api.create_submission(another_student, self.SUBMISSION)
# Submit an assessment and expect a successful response
assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = submission['uuid']
resp = self.request(xblock, 'assess', json.dumps(assessment), response_format='json')
self.assertTrue(resp['success'])
# Retrieve the assessment and check that it matches what we sent
actual = peer_api.get_assessments(submission['uuid'])
self.assertEqual(len(actual), 1)
self.assertEqual(actual[0]['submission_uuid'], assessment['submission_uuid'])
self.assertEqual(actual[0]['points_earned'], 5)
self.assertEqual(actual[0]['points_possible'], 6)
self.assertEqual(actual[0]['scorer_id'], 'Bob')
self.assertEqual(actual[0]['score_type'], 'PE')
self.assertEqual(len(actual[0]['parts']), 2)
parts = sorted(actual[0]['parts'])
self.assertEqual(parts[0]['option']['criterion']['name'], u'Form')
self.assertEqual(parts[0]['option']['name'], 'Fair')
self.assertEqual(parts[1]['option']['criterion']['name'], u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮')
self.assertEqual(parts[1]['option']['name'], u'ﻉซƈﻉɭɭﻉกՇ')
self.assertEqual(actual[0]['feedback'], assessment['feedback'])
@scenario('data/assessment_scenario.xml', user_id='Bob')
def test_assess_rubric_option_mismatch(self, xblock):
# Create a submission for this problem from another user
student_item = xblock.get_student_item_dict()
student_item['student_id'] = 'Sally'
submission = submission_api.create_submission(student_item, self.SUBMISSION)
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
submission_api.create_submission(another_student, self.SUBMISSION)
# Submit an assessment, but mutate the options selected so they do NOT match the rubric
assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = submission['uuid']
assessment['options_selected']['invalid'] = 'not a part of the rubric!'
resp = self.request(xblock, 'assess', json.dumps(assessment), response_format='json')
# Expect an error response
self.assertFalse(resp['success'])
@scenario('data/assessment_scenario.xml', user_id='Bob')
def test_missing_keys_in_request(self, xblock):
for missing in ['feedback', 'submission_uuid', 'options_selected']:
assessment = copy.deepcopy(self.ASSESSMENT)
del assessment[missing]
resp = self.request(xblock, 'assess', json.dumps(assessment), response_format='json')
self.assertEqual(resp['success'], False)
......@@ -101,6 +101,10 @@ class Score(models.Model):
points_possible = models.PositiveIntegerField(default=0)
created_at = models.DateTimeField(editable=False, default=now, db_index=True)
@property
def submission_uuid(self):
return self.submission.uuid
def __repr__(self):
return repr(dict(
student_item=self.student_item,
......
......@@ -26,6 +26,18 @@ class SubmissionSerializer(serializers.ModelSerializer):
class ScoreSerializer(serializers.ModelSerializer):
submission_uuid = serializers.Field(source='submission_uuid')
class Meta:
model = Score
fields = ('student_item', 'submission', 'points_earned', 'points_possible', 'created_at')
fields = (
'student_item',
'submission',
'points_earned',
'points_possible',
'created_at',
# Computed
'submission_uuid',
)
......@@ -131,9 +131,11 @@ class TestApi(TestCase):
self._assert_score(score, 11, 12)
def test_get_score(self):
self.test_create_score()
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.set_score(STUDENT_ITEM, submission, 11, 12)
scores = api.get_score(STUDENT_ITEM)
self._assert_score(scores[0], 11, 12)
self.assertEqual(scores[0]['submission_uuid'], submission['uuid'])
def test_get_score_no_student_id(self):
student_item = copy.deepcopy(STUDENT_ITEM)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment