Commit c51eb79c by Stephen Sanchez

Changed some models and apis around. Some small tweaks to the UI. Migration.

parent c6a73bfe
...@@ -4,10 +4,9 @@ from openassessment.assessment.models import Assessment, AssessmentFeedback, Ass ...@@ -4,10 +4,9 @@ from openassessment.assessment.models import Assessment, AssessmentFeedback, Ass
admin.site.register(Assessment) admin.site.register(Assessment)
admin.site.register(AssessmentPart) admin.site.register(AssessmentPart)
admin.site.register(AssessmentFeedback)
admin.site.register(Rubric) admin.site.register(Rubric)
admin.site.register(Criterion) admin.site.register(Criterion)
admin.site.register(CriterionOption) admin.site.register(CriterionOption)
admin.site.register(PeerWorkflow) admin.site.register(PeerWorkflow)
admin.site.register(PeerWorkflowItem) admin.site.register(PeerWorkflowItem)
admin.site.register(AssessmentFeedback)
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AssessmentFeedback'
db.create_table('assessment_assessmentfeedback', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('submission_uuid', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128, db_index=True)),
('helpfulness', self.gf('django.db.models.fields.IntegerField')(default=2)),
('feedback', self.gf('django.db.models.fields.TextField')(default='', max_length=10000)),
))
db.send_create_signal('assessment', ['AssessmentFeedback'])
# Adding M2M table for field assessments on 'AssessmentFeedback'
db.create_table('assessment_assessmentfeedback_assessments', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('assessmentfeedback', models.ForeignKey(orm['assessment.assessmentfeedback'], null=False)),
('assessment', models.ForeignKey(orm['assessment.assessment'], null=False))
))
db.create_unique('assessment_assessmentfeedback_assessments', ['assessmentfeedback_id', 'assessment_id'])
def backwards(self, orm):
# Deleting model 'AssessmentFeedback'
db.delete_table('assessment_assessmentfeedback')
# Removing M2M table for field assessments on 'AssessmentFeedback'
db.delete_table('assessment_assessmentfeedback_assessments')
models = {
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['submissions.Submission']"})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'helpfulness': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scorer_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'submissions.studentitem': {
'Meta': {'unique_together': "(('course_id', 'student_id', 'item_id'),)", 'object_name': 'StudentItem'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'item_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'submissions.submission': {
'Meta': {'ordering': "['-submitted_at', '-id']", 'object_name': 'Submission'},
'answer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'attempt_number': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'student_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['submissions.StudentItem']"}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '36', 'blank': 'True'})
}
}
complete_apps = ['assessment']
\ No newline at end of file
...@@ -373,6 +373,21 @@ class AssessmentPart(models.Model): ...@@ -373,6 +373,21 @@ class AssessmentPart(models.Model):
return self.option.criterion.points_possible return self.option.criterion.points_possible
class AssessmentFeedback(models.Model):
"""A response to a submission's feedback, judging accuracy or helpfulness."""
submission_uuid = models.CharField(max_length=128, unique=True, db_index=True)
assessments = models.ManyToManyField(Assessment, related_name='assessment_feedback')
HELPFULNESS_CHOICES = (
(0, 'These results were not at all helpful'),
(1, 'These results were somewhat helpful'),
(2, 'These results were helpful'),
(3, 'These results were very helpful'),
(4, 'These results were extremely helpful'),
)
helpfulness = models.IntegerField(choices=HELPFULNESS_CHOICES, default=2)
feedback = models.TextField(max_length=10000, default="")
class PeerWorkflow(models.Model): class PeerWorkflow(models.Model):
"""Internal Model for tracking Peer Assessment Workflow """Internal Model for tracking Peer Assessment Workflow
...@@ -439,18 +454,3 @@ class PeerWorkflowItem(models.Model): ...@@ -439,18 +454,3 @@ class PeerWorkflowItem(models.Model):
def __unicode__(self): def __unicode__(self):
return repr(self) return repr(self)
class AssessmentFeedback(models.Model):
"""A response to a submission's feedback, judging accuracy or helpfulness."""
peerworkflows = models.ManyToManyField(PeerWorkflowItem)
HELPFULNESS_CHOICES = (
(0, 'These results were not at all helpful'),
(1, 'These results were somewhat helpful'),
(2, 'These results were helpful'),
(3, 'These results were very helpful'),
(4, 'These results were extremely helpful'),
)
helpfulness = models.IntegerField(choices=HELPFULNESS_CHOICES, default=2)
feedback = models.TextField(max_length=10000, default="")
...@@ -13,9 +13,9 @@ from django.db import DatabaseError ...@@ -13,9 +13,9 @@ from django.db import DatabaseError
from django.db.models import Q from django.db.models import Q
from pytz import UTC from pytz import UTC
from openassessment.assessment.models import Assessment, InvalidOptionSelection, PeerWorkflow, PeerWorkflowItem from openassessment.assessment.models import Assessment, InvalidOptionSelection, PeerWorkflow, PeerWorkflowItem, AssessmentFeedback
from openassessment.assessment.serializers import ( from openassessment.assessment.serializers import (
AssessmentSerializer, rubric_from_dict, get_assessment_review) AssessmentSerializer, rubric_from_dict, get_assessment_review, AssessmentFeedbackSerializer)
from submissions.models import Submission, StudentItem from submissions.models import Submission, StudentItem
from submissions.serializers import SubmissionSerializer, StudentItemSerializer from submissions.serializers import SubmissionSerializer, StudentItemSerializer
...@@ -889,3 +889,63 @@ def _check_submission_graded(submission_uuid, must_be_graded_by): ...@@ -889,3 +889,63 @@ def _check_submission_graded(submission_uuid, must_be_graded_by):
return PeerWorkflowItem.objects.filter( return PeerWorkflowItem.objects.filter(
submission_uuid=submission_uuid submission_uuid=submission_uuid
).exclude(assessment=-1).count() >= must_be_graded_by ).exclude(assessment=-1).count() >= must_be_graded_by
def get_assessment_feedback(submission_uuid):
"""Retrieve a feedback object for an assessment whether it exists or not.
Gets or creates a new Assessment Feedback model for the given submission.
Args:
submission_uuid: The submission we want to create assessment feedback
for.
Returns:
The assessment feedback object that exists, or a newly created model.
Raises:
PeerAssessmentInternalError: Raised when the AssessmentFeedback cannot
be created or retrieved because of internal exceptions.
"""
try:
feedback, feedback_created = AssessmentFeedback.objects.get_or_create(
submission_uuid=submission_uuid
)
return AssessmentFeedbackSerializer(feedback).data
except DatabaseError:
error_message = (
u"An error occurred retrieving assessment feedback for {}."
.format(submission_uuid)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def set_assessment_feedback(must_grade, feedback_dict):
"""Set a feedback object for an assessment to have some new values.
Sets or updates the assessment feedback with the given values in the
dict.
Args:
must_grade (int): The required number of assessments for the associated
submission.
feedback_dict (dict): A dictionary of all the values to update or create
a new assessment feedback.
Returns:
The modified or created feedback.
"""
feedback_model = AssessmentFeedback.objects.get(
submission_uuid=feedback_dict['submission_uuid']
)
submission = Submission.objects.get(uuid=feedback_dict['submission_uuid'])
feedback_dict['assessments'] = [
assessment.pk for assessment in Assessment.objects.filter(
submission=submission,
score_type="PE"
)[:must_grade]
]
feedback = AssessmentFeedbackSerializer(feedback_model, data=feedback_dict)
if not feedback.is_valid():
raise PeerAssessmentRequestError(feedback.errors)
feedback.save()
return feedback.data
\ No newline at end of file
...@@ -277,9 +277,9 @@ class AssessmentFeedbackSerializer(serializers.ModelSerializer): ...@@ -277,9 +277,9 @@ class AssessmentFeedbackSerializer(serializers.ModelSerializer):
class Meta: class Meta:
model = AssessmentFeedback model = AssessmentFeedback
fields = ('id', fields = (
'peerworkflows', 'submission_uuid',
'helpfulness', 'helpfulness',
'feedback', 'feedback',
) )
...@@ -134,7 +134,7 @@ ...@@ -134,7 +134,7 @@
<ol class="list list--fields submission__feeedback__content"> <ol class="list list--fields submission__feeedback__content">
<li class="field field--textarea feedback__remarks" id="feedback__remarks"> <li class="field field--textarea feedback__remarks" id="feedback__remarks">
<label for="feedback__remarks__value">Please provide any thoughts or comments on the feedback you received from your peers here.</label> <label for="feedback__remarks__value">Please provide any thoughts or comments on the feedback you received from your peers here.</label>
<textarea id="feedback__remarks__value" placeholder="I feel the feedback I received was..."></textarea> <textarea id="feedback__remarks__value" placeholder="I feel the feedback I received was...">{{ feedback_text }}</textarea>
</li> </li>
</ol> </ol>
......
...@@ -4,8 +4,6 @@ from django.utils.translation import ugettext as _ ...@@ -4,8 +4,6 @@ from django.utils.translation import ugettext as _
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment import peer_api from openassessment.assessment import peer_api
from submissions import api as submissions_api
class GradeMixin(object): class GradeMixin(object):
...@@ -30,6 +28,7 @@ class GradeMixin(object): ...@@ -30,6 +28,7 @@ class GradeMixin(object):
status = workflow.get('status') status = workflow.get('status')
context = {} context = {}
if status == "done": if status == "done":
feedback = peer_api.get_assessment_feedback(self.submission_uuid)
max_scores = peer_api.get_rubric_max_scores(self.submission_uuid) max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
path = 'openassessmentblock/grade/oa_grade_complete.html' path = 'openassessmentblock/grade/oa_grade_complete.html'
assessment_ui_model = self.get_assessment_module('peer-assessment') assessment_ui_model = self.get_assessment_module('peer-assessment')
...@@ -50,6 +49,7 @@ class GradeMixin(object): ...@@ -50,6 +49,7 @@ class GradeMixin(object):
student_submission["uuid"], student_submission["uuid"],
assessment_ui_model["must_be_graded_by"] assessment_ui_model["must_be_graded_by"]
) )
context["feedback_text"] = feedback.get('feedback', '')
context["student_submission"] = student_submission context["student_submission"] = student_submission
context["peer_assessments"] = peer_assessments context["peer_assessments"] = peer_assessments
context["self_assessment"] = self_assessment context["self_assessment"] = self_assessment
...@@ -79,12 +79,25 @@ class GradeMixin(object): ...@@ -79,12 +79,25 @@ class GradeMixin(object):
@XBlock.json_handler @XBlock.json_handler
def feedback_submit(self, data, suffix=''): def feedback_submit(self, data, suffix=''):
"""Attach the Assessment Feedback text to some submission.""" """Attach the Assessment Feedback text to some submission."""
submission_uuid = self.submission_uuid assessment_ui_model = self.get_assessment_module('peer-assessment') or {}
assessment_feedback = data.get('feedback', '') assessment_feedback = data.get('feedback', '')
raise Exception, "jrbl everything worked up to here" # DEBUG
if not assessment_feedback: if not assessment_feedback:
return {'success': False, 'msg': _(u"No feedback given, so none recorded")} return {
feedback_dict = submissions_api.get_assessment_feedback(submission_uuid) 'success': False,
feedback_dict['feedback'] = assessment_feedback 'msg': _(u"No feedback given, so none recorded")
__ = submissions_api.set_assessment_feedback(feedback_dict) }
return {'success': True, 'msg': _(u"Feedback saved!")}
peer_api.set_assessment_feedback(
assessment_ui_model.get('must_grade', 0),
{
'submission_uuid': self.submission_uuid,
'feedback': assessment_feedback,
'helpfulness': 0
}
)
return {
'success': True,
'msg': _(u"Feedback saved!")
}
...@@ -253,7 +253,7 @@ OpenAssessment.BaseUI.prototype = { ...@@ -253,7 +253,7 @@ OpenAssessment.BaseUI.prototype = {
function(html) { function(html) {
// Load the HTML // Load the HTML
$('#openassessment__grade', ui.element).replaceWith(html); $('#openassessment__grade', ui.element).replaceWith(html);
// Install a click handler for collapse/expand // Install a click handler for collapse/expand
var sel = $('#openassessment__grade', ui.element); var sel = $('#openassessment__grade', ui.element);
ui.setUpCollapseExpand(sel); ui.setUpCollapseExpand(sel);
...@@ -322,10 +322,9 @@ OpenAssessment.BaseUI.prototype = { ...@@ -322,10 +322,9 @@ OpenAssessment.BaseUI.prototype = {
feedback_assess: function() { feedback_assess: function() {
// Send the submission to the server // Send the submission to the server
var feedback = $('#feedback__remarks__value', this.element).val(); var feedback = $('#feedback__remarks__value', this.element).val();
var ui = this;
this.server.feedback_submit(feedback).done( this.server.feedback_submit(feedback).done(
// When we have successfully sent the submission, textarea no longer editable // When we have successfully sent the submission, textarea no longer editable
console.log("Feedback to the assessments submitted, thanks!") // JRBL: FIXME: TODO: make this true console.log("Feedback to the assessments submitted, thanks!")
).fail(function(errMsg) { ).fail(function(errMsg) {
// TODO: display to the user // TODO: display to the user
console.log(errMsg); console.log(errMsg);
......
...@@ -162,9 +162,8 @@ OpenAssessment.Server.prototype = { ...@@ -162,9 +162,8 @@ OpenAssessment.Server.prototype = {
}, },
/** /**
Send feedback on assessments to the XBlock. * Send feedback on assessments to the XBlock.
FIXME: JRBL: write documentation */
**/
feedback_submit: function(feedback) { feedback_submit: function(feedback) {
var url = this.url('feedback_submit'); var url = this.url('feedback_submit');
var payload = JSON.stringify({ var payload = JSON.stringify({
...@@ -175,11 +174,9 @@ OpenAssessment.Server.prototype = { ...@@ -175,11 +174,9 @@ OpenAssessment.Server.prototype = {
function(data) { function(data) {
if (data.success) { if (data.success) {
defer.resolve(); defer.resolve();
alert("resolved!");
} }
else { else {
defer.rejectWith(this, [data.msg]); defer.rejectWith(this, [data.msg]);
alert("rejected!");
} }
} }
).fail(function(data) { ).fail(function(data) {
......
...@@ -10,8 +10,6 @@ from django.utils.encoding import force_unicode ...@@ -10,8 +10,6 @@ from django.utils.encoding import force_unicode
from submissions.serializers import SubmissionSerializer, StudentItemSerializer, ScoreSerializer from submissions.serializers import SubmissionSerializer, StudentItemSerializer, ScoreSerializer
from submissions.models import Submission, StudentItem, Score, ScoreSummary from submissions.models import Submission, StudentItem, Score, ScoreSummary
from openassessment.assessment.serializers import AssessmentFeedbackSerializer
from openassessment.assessment.models import AssessmentFeedback
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -489,25 +487,3 @@ def _get_or_create_student_item(student_item_dict): ...@@ -489,25 +487,3 @@ def _get_or_create_student_item(student_item_dict):
student_item_dict) student_item_dict)
logger.exception(error_message) logger.exception(error_message)
raise SubmissionInternalError(error_message) raise SubmissionInternalError(error_message)
def get_assessment_feedback(submission_uuid):
"""Retrieve a feedback object for an assessment whether it exists or not."""
try:
submission = Submission.objects.get(uuid=submission_uuid)
feedback_obj, feedback_created = AssessmentFeedback.objects.get_or_create(submission=submission)
return AssessmentFeedbackSerializer(feedback_obj).data
except DatabaseError, msg:
error_message = u"An error occurred retrieving assessment feedback for {}.".format(submission_uuid)
logger.exception(error_message)
raise DatabaseError, msg
def set_assessment_feedback(feedback_dict):
"""Set a feedback object for an assessment to have some new values."""
submission = Submission.objects.get(submission_uuid=feedback_dict['submission_uuid'])
feedback_obj = AssessmentFeedback.objects.get(pk=feedback_dict['id'])
if feedback_obj.submission != submission:
raise Exception, "Can't re-associate a piece of feedback" # TODO: less generic
feedback_obj.helpfulness = feedback_dict['helpfulness']
feedback_obj.feedback = feedback_dict['feedback']
feedback_obj.save()
return AssessmentFeedbackSerializer(feedback_obj).data
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment