Commit 30a5cf60 by David Ormsbee Committed by Stephen Sanchez

Make peer assessment an optional step.

This also creates the AssessmentWorkflowStep model to track all
steps for new workflows. Some problems with it now:

* a lot of broken tests
* some display oddities on the peer step during (peer, self) workflows
* have not tested to see if there are concurrency issues with step
  model creation under load.
parent 85d6c5a2
...@@ -72,6 +72,11 @@ class PeerAssessmentInternalError(PeerAssessmentError): ...@@ -72,6 +72,11 @@ class PeerAssessmentInternalError(PeerAssessmentError):
pass pass
def submitter_is_finished(submission_uuid, requirements):
"""Temporary bridge method."""
return is_complete(submission_uuid, requirements)
def is_complete(submission_uuid, requirements): def is_complete(submission_uuid, requirements):
try: try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid) workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
...@@ -134,6 +139,9 @@ def get_score(submission_uuid, requirements): ...@@ -134,6 +139,9 @@ def get_score(submission_uuid, requirements):
"points_possible": items[0].assessment.points_possible, "points_possible": items[0].assessment.points_possible,
} }
def assessment_is_finished(submission_uuid, requirements):
return bool(get_score(submission_uuid, requirements))
def create_assessment( def create_assessment(
scorer_submission_uuid, scorer_submission_uuid,
......
...@@ -139,8 +139,11 @@ def get_assessment(submission_uuid): ...@@ -139,8 +139,11 @@ def get_assessment(submission_uuid):
return serialized_assessment return serialized_assessment
def submitter_is_finished(submission_uuid, requirements):
"""Temporary bridge method."""
return is_complete(submission_uuid, requirements)
def is_complete(submission_uuid): def is_complete(submission_uuid, requirements):
""" """
Check whether a self-assessment has been completed for a submission. Check whether a self-assessment has been completed for a submission.
...@@ -154,6 +157,48 @@ def is_complete(submission_uuid): ...@@ -154,6 +157,48 @@ def is_complete(submission_uuid):
score_type=SELF_TYPE, submission_uuid=submission_uuid score_type=SELF_TYPE, submission_uuid=submission_uuid
).exists() ).exists()
def assessment_is_finished(submission_uuid, requirements):
return is_complete(submission_uuid, requirements)
def get_score(submission_uuid, requirements):
assessment = get_assessment(submission_uuid)
if not assessment:
return None
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
}
def get_assessment_scores_by_criteria(submission_uuid):
"""Get the median score for each rubric criterion
Args:
submission_uuid (str): The submission uuid is used to get the
assessments used to score this submission, and generate the
appropriate median score.
Returns:
(dict): A dictionary of rubric criterion names, with a median score of
the peer assessments.
Raises:
SelfAssessmentInternalError: If any error occurs while retrieving
information to form the median scores, an error is raised.
"""
try:
assessments = list(
Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).order_by('-scored_at')[:1]
)
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = _(u"Error getting self assessment scores for {}").format(submission_uuid)
logger.exception(error_message)
raise SelfAssessmentInternalError(error_message)
def _log_assessment(assessment, submission): def _log_assessment(assessment, submission):
""" """
......
from django.contrib import admin from django.contrib import admin
from .models import AssessmentWorkflow from .models import AssessmentWorkflow, AssessmentWorkflowStep
class AssessmentWorkflowStepInline(admin.StackedInline):
model = AssessmentWorkflowStep
extra = 0
class AssessmentWorkflowAdmin(admin.ModelAdmin): class AssessmentWorkflowAdmin(admin.ModelAdmin):
"""Admin for the user's overall workflow through open assessment. """Admin for the user's overall workflow through open assessment.
...@@ -15,5 +20,6 @@ class AssessmentWorkflowAdmin(admin.ModelAdmin): ...@@ -15,5 +20,6 @@ class AssessmentWorkflowAdmin(admin.ModelAdmin):
) )
list_filter = ('status',) list_filter = ('status',)
search_fields = ('uuid', 'submission_uuid', 'course_id', 'item_id') search_fields = ('uuid', 'submission_uuid', 'course_id', 'item_id')
inlines = (AssessmentWorkflowStepInline,)
admin.site.register(AssessmentWorkflow, AssessmentWorkflowAdmin) admin.site.register(AssessmentWorkflow, AssessmentWorkflowAdmin)
...@@ -9,7 +9,7 @@ from django.db import DatabaseError ...@@ -9,7 +9,7 @@ from django.db import DatabaseError
from openassessment.assessment import peer_api from openassessment.assessment import peer_api
from submissions import api as sub_api from submissions import api as sub_api
from .models import AssessmentWorkflow from .models import AssessmentWorkflow, AssessmentWorkflowStep
from .serializers import AssessmentWorkflowSerializer from .serializers import AssessmentWorkflowSerializer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -58,7 +58,7 @@ class AssessmentWorkflowNotFoundError(AssessmentWorkflowError): ...@@ -58,7 +58,7 @@ class AssessmentWorkflowNotFoundError(AssessmentWorkflowError):
pass pass
def create_workflow(submission_uuid): def create_workflow(submission_uuid, steps):
"""Begins a new assessment workflow. """Begins a new assessment workflow.
Create a new workflow that other assessments will record themselves against. Create a new workflow that other assessments will record themselves against.
...@@ -66,6 +66,8 @@ def create_workflow(submission_uuid): ...@@ -66,6 +66,8 @@ def create_workflow(submission_uuid):
Args: Args:
submission_uuid (str): The UUID for the submission that all our submission_uuid (str): The UUID for the submission that all our
assessments will be evaluating. assessments will be evaluating.
steps (list): List of steps that are part of the workflow, in the order
that the user must complete them. Example: `["peer", "self"]`
Returns: Returns:
dict: Assessment workflow information with the following dict: Assessment workflow information with the following
...@@ -114,6 +116,15 @@ def create_workflow(submission_uuid): ...@@ -114,6 +116,15 @@ def create_workflow(submission_uuid):
.format(submission_uuid, err) .format(submission_uuid, err)
) )
# Raise an error if they specify a step we don't recognize...
for step in steps:
if step not in AssessmentWorkflow.STEPS:
raise AssessmentWorkflowRequestError(
u"Step not recognized: {}; Must be one of: {}".format(
step, AssessmentWorkflow.STEPS
)
)
# We're not using a serializer to deserialize this because the only variable # We're not using a serializer to deserialize this because the only variable
# we're getting from the outside is the submission_uuid, which is already # we're getting from the outside is the submission_uuid, which is already
# validated by this point. # validated by this point.
...@@ -125,6 +136,13 @@ def create_workflow(submission_uuid): ...@@ -125,6 +136,13 @@ def create_workflow(submission_uuid):
course_id=submission_dict['student_item']['course_id'], course_id=submission_dict['student_item']['course_id'],
item_id=submission_dict['student_item']['item_id'], item_id=submission_dict['student_item']['item_id'],
) )
workflow_steps = [
AssessmentWorkflowStep(
workflow=workflow, name=step, order_num=i
)
for i, step in enumerate(steps)
]
workflow.steps.add(*workflow_steps)
except ( except (
DatabaseError, DatabaseError,
peer_api.PeerAssessmentError, peer_api.PeerAssessmentError,
......
...@@ -40,7 +40,6 @@ class Migration(SchemaMigration): ...@@ -40,7 +40,6 @@ class Migration(SchemaMigration):
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}), 'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'item_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'item_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}), 'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('model_utils.fields.StatusField', [], {'default': "'peer'", 'max_length': '100', u'no_check_for_status': 'True'}), 'status': ('model_utils.fields.StatusField', [], {'default': "'peer'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}), 'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
......
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AssessmentWorkflowStep'
db.create_table('workflow_assessmentworkflowstep', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('workflow', self.gf('django.db.models.fields.related.ForeignKey')(related_name='steps', to=orm['workflow.AssessmentWorkflow'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
('submitter_completed_at', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True)),
('assessment_completed_at', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True)),
('order_num', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('workflow', ['AssessmentWorkflowStep'])
def backwards(self, orm):
# Deleting model 'AssessmentWorkflowStep'
db.delete_table('workflow_assessmentworkflowstep')
models = {
'workflow.assessmentworkflow': {
'Meta': {'ordering': "['-created']", 'object_name': 'AssessmentWorkflow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('model_utils.fields.StatusField', [], {'default': "'peer'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'workflow.assessmentworkflowstep': {
'Meta': {'ordering': "['workflow', 'order_num']", 'object_name': 'AssessmentWorkflowStep'},
'assessment_completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'submitter_completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'steps'", 'to': "orm['workflow.AssessmentWorkflow']"})
}
}
complete_apps = ['workflow']
\ No newline at end of file
...@@ -16,6 +16,7 @@ import importlib ...@@ -16,6 +16,7 @@ import importlib
from django.conf import settings from django.conf import settings
from django.db import models from django.db import models
from django_extensions.db.fields import UUIDField from django_extensions.db.fields import UUIDField
from django.utils.timezone import now
from model_utils import Choices from model_utils import Choices
from model_utils.models import StatusModel, TimeStampedModel from model_utils.models import StatusModel, TimeStampedModel
...@@ -46,9 +47,12 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -46,9 +47,12 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
an after the fact recording of the last known state of that information so an after the fact recording of the last known state of that information so
we can search easily. we can search easily.
""" """
STATUS_VALUES = [ STEPS = [
"peer", # User needs to assess peer submissions "peer", # User needs to assess peer submissions
"self", # User needs to assess themselves "self", # User needs to assess themselves
]
STATUS_VALUES = STEPS + [
"waiting", # User has done all necessary assessment but hasn't been "waiting", # User has done all necessary assessment but hasn't been
# graded yet -- we're waiting for assessments of their # graded yet -- we're waiting for assessments of their
# submission by others. # submission by others.
...@@ -81,23 +85,23 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -81,23 +85,23 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return sub_api.get_latest_score_for_submission(self.submission_uuid) return sub_api.get_latest_score_for_submission(self.submission_uuid)
def status_details(self, assessment_requirements): def status_details(self, assessment_requirements):
return { from openassessment.assessment import peer_api, self_api
"peer": {
"complete": self._is_peer_complete(assessment_requirements), status_dict = {}
},
"self": {
"complete": self._is_self_complete(),
},
}
def _is_peer_complete(self, assessment_requirements): if "peer" in assessment_requirements:
from openassessment.assessment import peer_api status_dict["peer"] = {
peer_requirements = assessment_requirements["peer"] "complete": peer_api.submitter_is_finished(
return peer_api.is_complete(self.submission_uuid, peer_requirements) self.submission_uuid,
assessment_requirements["peer"]
)
}
if "self" in assessment_requirements:
status_dict["self"] = {
"complete": self_api.submitter_is_finished(self.submission_uuid, {})
}
def _is_self_complete(self): return status_dict
from openassessment.assessment import self_api
return self_api.is_complete(self.submission_uuid)
def update_from_assessments(self, assessment_requirements): def update_from_assessments(self, assessment_requirements):
"""Query self and peer APIs and change our status if appropriate. """Query self and peer APIs and change our status if appropriate.
...@@ -130,65 +134,140 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -130,65 +134,140 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
specific requirements in this dict. specific requirements in this dict.
""" """
from openassessment.assessment import peer_api from openassessment.assessment import peer_api, self_api
# If we're done, we're done -- it doesn't matter if requirements have # If we're done, we're done -- it doesn't matter if requirements have
# changed because we've already written a score. # changed because we've already written a score.
if self.status == self.STATUS.done: if self.status == self.STATUS.done:
return return
# Have they completed the peer and self steps? # Update our AssessmentWorkflowStep models with the latest from our APIs
peer_complete = self._is_peer_complete(assessment_requirements) steps = self.update_steps(assessment_requirements)
self_complete = self._is_self_complete()
# Fetch name of the first step that the submitter hasn't yet completed.
if peer_complete and self_complete: new_status = next(
# If they've completed both, they're at least waiting, possibly done (step.name for step in steps if step.submitter_completed_at is None),
new_status = self.STATUS.waiting self.STATUS.waiting # if nothing's left to complete, we're waiting
elif peer_complete: )
# If they haven't done self assessment yet, that's their status
new_status = self.STATUS.self # If the submitter has done all they need to do, let's check to see if
else: # all steps have been fully assessed (i.e. we can score it).
# Default starting status is peer if (new_status == self.STATUS.waiting and
new_status = self.STATUS.peer all(step.assessment_completed_at for step in steps)):
# If we're at least waiting, let's check if we have a peer score and # At this point, we're trying to give a score. We currently have a
# can move all the way to done # very simple rule for this -- if it has a peer step, use that for
if new_status == self.STATUS.waiting: # scoring. If not, use the self step. Later on, we may put more
score = peer_api.get_score( # interesting rules here.
self.submission_uuid, assessment_requirements["peer"] step_names = [step.name for step in steps]
) score = None
if score: if self.STATUS.peer in step_names:
sub_api.set_score( score = peer_api.get_score(
self.submission_uuid, self.submission_uuid,
score["points_earned"], assessment_requirements[self.STATUS.peer]
score["points_possible"]
) )
elif self.STATUS.self in step_names:
score = self_api.get_score(self.submission_uuid, {})
# This should be replaced by using the event tracking API, but if score:
# that's not quite ready yet. So we're making this temp hack. self.set_score(score)
emit_event({
"context": {
"course_id": self.course_id
},
"event": {
"submission_uuid": self.submission_uuid,
"points_earned": score["points_earned"],
"points_possible": score["points_possible"],
},
"event_source": "server",
"event_type": "openassessment.workflow.score",
"time": datetime.utcnow(),
})
new_status = self.STATUS.done new_status = self.STATUS.done
# Finally save our changes if the status has changed # Finally save our changes if the status has changed
if self.status != new_status: if self.status != new_status:
self.status = new_status self.status = new_status
self.save() self.save()
def update_steps(self, assessment_requirements):
from openassessment.assessment import peer_api, self_api
steps = list(self.steps.all())
if not steps:
# If no steps exist for this AssessmentWorkflow, assume
# peer -> self for backwards compatibility
self.steps.add(
AssessmentWorkflowStep(name=self.STATUS.peer, order_num=0),
AssessmentWorkflowStep(name=self.STATUS.self, order_num=1)
)
steps = list(self.steps.all())
# Mapping of step names to the APIs that power them
steps_to_apis = {
self.STATUS.self: self_api,
self.STATUS.peer: peer_api
}
# Go through each step and update its status. Note that because we take
# the philosophy that once you're done, you're done. That means
for step in steps:
step_changed = False
step_api = steps_to_apis[step.name]
step_reqs = assessment_requirements.get(step.name, {})
# Has the user completed their obligations for this step?
if (step.submitter_completed_at is None and
step_api.submitter_is_finished(self.submission_uuid, step_reqs)):
step.submitter_completed_at = now()
step_changed = True
# Has the step received a score?
if (step.assessment_completed_at is None and
step_api.assessment_is_finished(self.submission_uuid, step_reqs)):
step.assessment_completed_at = now()
step_changed = True
if step_changed:
step.save()
return steps
def set_score(self, score):
sub_api.set_score(
self.submission_uuid,
score["points_earned"],
score["points_possible"]
)
# This should be replaced by using the event tracking API, but
# that's not quite ready yet. So we're making this temp hack.
emit_event({
"context": {
"course_id": self.course_id
},
"event": {
"submission_uuid": self.submission_uuid,
"points_earned": score["points_earned"],
"points_possible": score["points_possible"],
},
"event_source": "server",
"event_type": "openassessment.workflow.score",
"time": datetime.utcnow(),
})
class AssessmentWorkflowStep(models.Model):
"""An individual step in the overall workflow process.
Similar caveats apply to this class as apply to `AssessmentWorkflow`. What
we're storing in the database is usually but not always current information.
In particular, if the problem definition has changed the requirements for a
particular step in the workflow, then what is in the database will be out of
sync until someone views this problem again (which will trigger a workflow
update to occur).
"""
workflow = models.ForeignKey(AssessmentWorkflow, related_name="steps")
name = models.CharField(max_length=20)
submitter_completed_at = models.DateTimeField(default=None, null=True)
assessment_completed_at = models.DateTimeField(default=None, null=True)
order_num = models.PositiveIntegerField()
# Store the score for this step as well?
class Meta:
ordering = ["workflow", "order_num"]
# Just here to record thoughts for later: # Just here to record thoughts for later:
# #
......
...@@ -70,12 +70,26 @@ class GradeMixin(object): ...@@ -70,12 +70,26 @@ class GradeMixin(object):
Returns: Returns:
tuple of context (dict), template_path (string) tuple of context (dict), template_path (string)
""" """
feedback = peer_api.get_assessment_feedback(self.submission_uuid) # Peer specific stuff...
assessment_steps = [asmnt['name'] for asmnt in self.rubric_assessments]
submission_uuid = workflow['submission_uuid']
if "peer-assessment" in assessment_steps:
feedback = peer_api.get_assessment_feedback(submission_uuid)
peer_assessments = peer_api.get_assessments(submission_uuid)
has_submitted_feedback = peer_api.get_assessment_feedback(submission_uuid) is not None
else:
feedback = None
peer_assessments = []
has_submitted_feedback = False
if "self-assessment" in assessment_steps:
self_assessment = self_api.get_assessment(submission_uuid)
else:
self_assessment = None
feedback_text = feedback.get('feedback', '') if feedback else '' feedback_text = feedback.get('feedback', '') if feedback else ''
student_submission = sub_api.get_submission(workflow['submission_uuid']) student_submission = sub_api.get_submission(submission_uuid)
peer_assessments = peer_api.get_assessments(student_submission['uuid'])
self_assessment = self_api.get_assessment(student_submission['uuid'])
has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None
# We retrieve the score from the workflow, which in turn retrieves # We retrieve the score from the workflow, which in turn retrieves
# the score for our current submission UUID. # the score for our current submission UUID.
...@@ -94,14 +108,23 @@ class GradeMixin(object): ...@@ -94,14 +108,23 @@ class GradeMixin(object):
} }
# Update the scores we will display to the user # Update the scores we will display to the user
# Note that we are updating a *copy* of the rubric criteria stored in the XBlock field # Note that we are updating a *copy* of the rubric criteria stored in
max_scores = peer_api.get_rubric_max_scores(self.submission_uuid) # the XBlock field
median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"]) max_scores = peer_api.get_rubric_max_scores(submission_uuid)
if "peer-assessment" in assessment_steps:
median_scores = peer_api.get_assessment_median_scores(submission_uuid)
elif "self-assessment" in assessment_steps:
median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
if median_scores is not None and max_scores is not None: if median_scores is not None and max_scores is not None:
for criterion in context["rubric_criteria"]: for criterion in context["rubric_criteria"]:
criterion["median_score"] = median_scores[criterion["name"]] criterion["median_score"] = median_scores[criterion["name"]]
criterion["total_value"] = max_scores[criterion["name"]] criterion["total_value"] = max_scores[criterion["name"]]
from pprint import pprint
pprint(self_assessment)
return ('openassessmentblock/grade/oa_grade_complete.html', context) return ('openassessmentblock/grade/oa_grade_complete.html', context)
def render_grade_incomplete(self, workflow): def render_grade_incomplete(self, workflow):
...@@ -114,10 +137,16 @@ class GradeMixin(object): ...@@ -114,10 +137,16 @@ class GradeMixin(object):
Returns: Returns:
tuple of context (dict), template_path (string) tuple of context (dict), template_path (string)
""" """
def _is_incomplete(step):
return (
step in workflow["status_details"] and
not workflow["status_details"][step]["complete"]
)
incomplete_steps = [] incomplete_steps = []
if not workflow["status_details"]["peer"]["complete"]: if _is_incomplete("peer"):
incomplete_steps.append("Peer Assessment") incomplete_steps.append("Peer Assessment")
if not workflow["status_details"]["self"]["complete"]: if _is_incomplete("self"):
incomplete_steps.append("Self Assessment") incomplete_steps.append("Self Assessment")
return ( return (
...@@ -131,7 +160,8 @@ class GradeMixin(object): ...@@ -131,7 +160,8 @@ class GradeMixin(object):
Submit feedback on an assessment. Submit feedback on an assessment.
Args: Args:
data (dict): Can provide keys 'feedback_text' (unicode) and 'feedback_options' (list of unicode). data (dict): Can provide keys 'feedback_text' (unicode) and
'feedback_options' (list of unicode).
Kwargs: Kwargs:
suffix (str): Unused suffix (str): Unused
......
...@@ -313,6 +313,10 @@ class OpenAssessmentBlock( ...@@ -313,6 +313,10 @@ class OpenAssessmentBlock(
load('static/xml/poverty_rubric_example.xml') load('static/xml/poverty_rubric_example.xml')
), ),
( (
"OpenAssessmentBlock (Self Only) Rubric",
load('static/xml/poverty_self_only_example.xml')
),
(
"OpenAssessmentBlock Censorship Rubric", "OpenAssessmentBlock Censorship Rubric",
load('static/xml/censorship_rubric_example.xml') load('static/xml/censorship_rubric_example.xml')
), ),
...@@ -333,6 +337,10 @@ class OpenAssessmentBlock( ...@@ -333,6 +337,10 @@ class OpenAssessmentBlock(
return update_from_xml(block, node, validator=validator(block, strict_post_release=False)) return update_from_xml(block, node, validator=validator(block, strict_post_release=False))
@property
def assessment_steps(self):
return [asmnt['name'] for asmnt in self.rubric_assessments]
def render_assessment(self, path, context_dict=None): def render_assessment(self, path, context_dict=None):
"""Render an Assessment Module's HTML """Render an Assessment Module's HTML
...@@ -421,18 +429,19 @@ class OpenAssessmentBlock( ...@@ -421,18 +429,19 @@ class OpenAssessmentBlock(
] ]
# Resolve unspecified dates and date strings to datetimes # Resolve unspecified dates and date strings to datetimes
start, due, date_ranges = resolve_dates(self.start, self.due, [submission_range] + assessment_ranges) start, due, date_ranges = resolve_dates(
self.start, self.due, [submission_range] + assessment_ranges
)
step_ranges = {"submission": date_ranges[0]}
for step_num, asmnt in enumerate(self.rubric_assessments, start=1):
step_ranges[asmnt["name"]] = date_ranges[step_num]
# Based on the step, choose the date range to consider # Based on the step, choose the date range to consider
# We hard-code this to the submission -> peer -> self workflow for now; # We hard-code this to the submission -> peer -> self workflow for now;
# later, we can revisit to make this more flexible. # later, we can revisit to make this more flexible.
open_range = (start, due) open_range = (start, due)
if step == "submission": if step in ["submission", "peer-assessment", "self-assessment"]:
open_range = date_ranges[0] open_range = step_ranges[step]
if step == "peer-assessment":
open_range = date_ranges[1]
if step == "self-assessment":
open_range = date_ranges[2]
# Course staff always have access to the problem # Course staff always have access to the problem
if course_staff is None: if course_staff is None:
......
import logging import logging
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from webob import Response
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment import peer_api from openassessment.assessment import peer_api
from openassessment.assessment.peer_api import ( from openassessment.assessment.peer_api import (
PeerAssessmentInternalError, PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentRequestError,
...@@ -114,6 +117,8 @@ class PeerAssessmentMixin(object): ...@@ -114,6 +117,8 @@ class PeerAssessmentMixin(object):
number of assessments. number of assessments.
""" """
if "peer-assessment" not in self.assessment_steps:
return Response(u"")
continue_grading = data.params.get('continue_grading', False) continue_grading = data.params.get('continue_grading', False)
path, context_dict = self.peer_path_and_context(continue_grading) path, context_dict = self.peer_path_and_context(continue_grading)
return self.render_assessment(path, context_dict) return self.render_assessment(path, context_dict)
......
...@@ -2,6 +2,8 @@ import logging ...@@ -2,6 +2,8 @@ import logging
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from xblock.core import XBlock from xblock.core import XBlock
from webob import Response
from openassessment.assessment import self_api from openassessment.assessment import self_api
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from submissions import api as submission_api from submissions import api as submission_api
...@@ -24,6 +26,9 @@ class SelfAssessmentMixin(object): ...@@ -24,6 +26,9 @@ class SelfAssessmentMixin(object):
@XBlock.handler @XBlock.handler
def render_self_assessment(self, data, suffix=''): def render_self_assessment(self, data, suffix=''):
if "self-assessment" not in self.assessment_steps:
return Response(u"")
try: try:
path, context = self.self_path_and_context() path, context = self.self_path_and_context()
except: except:
......
...@@ -181,14 +181,14 @@ OpenAssessment.BaseView.prototype = { ...@@ -181,14 +181,14 @@ OpenAssessment.BaseView.prototype = {
toggleActionError: function(type, msg) { toggleActionError: function(type, msg) {
var element = this.element; var element = this.element;
var container = null; var container = null;
if (type == 'save') { if (type == 'save') {
container = '.response__submission__actions'; container = '.response__submission__actions';
} }
else if (type == 'submit' || type == 'peer' || type == 'self') { else if (type == 'submit' || type == 'peer' || type == 'self') {
container = '.step__actions'; container = '.step__actions';
} }
else if (type == 'feedback_assess') { else if (type == 'feedback_assess') {
container = '.submission__feedback__actions'; container = '.submission__feedback__actions';
} }
// If we don't have anywhere to put the message, just log it to the console // If we don't have anywhere to put the message, just log it to the console
...@@ -219,10 +219,10 @@ OpenAssessment.BaseView.prototype = { ...@@ -219,10 +219,10 @@ OpenAssessment.BaseView.prototype = {
$(container + ' .step__status__value .copy').html(gettext('Unable to Load')); $(container + ' .step__status__value .copy').html(gettext('Unable to Load'));
}, },
/** /**
* Get the contents of the Step Actions error message box, for unit test validation. * Get the contents of the Step Actions error message box, for unit test validation.
* *
* Step Actions are the UX-level parts of the student interaction flow - * Step Actions are the UX-level parts of the student interaction flow -
* Submission, Peer Assessment, and Self Assessment. Since steps are mutually * Submission, Peer Assessment, and Self Assessment. Since steps are mutually
* exclusive, only one error box should be rendered on screen at a time. * exclusive, only one error box should be rendered on screen at a time.
* *
......
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<openassessment submission_due="2015-03-11T18:20">
<title>
Global Poverty
</title>
<rubric>
<prompt>
Given the state of the world today, what do you think should be done to combat poverty?
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'.
</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
</explanation>
</option>
</criterion>
<criterion>
<name>clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation></explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation></explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation></explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation></explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
to show that explained and unexplained items can be mixed.
</explanation>
</option>
</criterion>
<criterion>
<name>form</name>
<prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation></explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation></explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation></explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation></explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation></explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation></explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="self-assessment" />
</assessments>
</openassessment>
...@@ -124,7 +124,7 @@ class SubmissionMixin(object): ...@@ -124,7 +124,7 @@ class SubmissionMixin(object):
student_sub_dict = {'text': student_sub} student_sub_dict = {'text': student_sub}
submission = api.create_submission(student_item_dict, student_sub_dict) submission = api.create_submission(student_item_dict, student_sub_dict)
workflow_api.create_workflow(submission["uuid"]) self.create_workflow(submission["uuid"])
self.submission_uuid = submission["uuid"] self.submission_uuid = submission["uuid"]
# Emit analytics event... # Emit analytics event...
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
] ]
}, },
"self_only": { "self_only": {
"valid": false, "valid": true,
"assessments": [ "assessments": [
{ {
"name": "self-assessment" "name": "self-assessment"
......
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="self-assessment" />
<assessment name="peer-assessment" />
</assessments>
</openassessment>
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" />
</assessments>
</openassessment>
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Only Self Assessment</title>
<prompt> <prompt>
Given the state of the world today, what do you think should be done to Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words. combat poverty? Please answer in a short essay of 200-300 words.
......
{ {
"peer": { "peer_then_self": [
"assessment": { {
"name": "peer-assessment", "name": "peer-assessment",
"must_grade": 5, "must_grade": 5,
"must_be_graded_by": 3 "must_be_graded_by": 3
},
{
"name": "self-assessment"
} }
}, ],
"self": { "self_only": [
"assessment": { {
"name": "self-assessment", "name": "self-assessment"
"must_grade": 2,
"must_be_graded_by": 1
} }
}, ],
"must_be_graded_by_equals_must_grade": { "must_be_graded_by_equals_must_grade": [
"assessment": { {
"name": "self-assessment", "name": "peer-assessment",
"must_grade": 1, "must_grade": 1,
"must_be_graded_by": 1 "must_be_graded_by": 1
},
{
"name": "self-assessment"
} }
} ]
} }
...@@ -94,10 +94,17 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -94,10 +94,17 @@ class StudioViewTest(XBlockHandlerTestCase):
# If and when we remove this restriction, this test can be deleted. # If and when we remove this restriction, this test can be deleted.
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_update_xml_invalid_assessment_combo(self, xblock): def test_update_xml_invalid_assessment_combo(self, xblock):
request = json.dumps({'xml': self.load_fixture_str('data/invalid_assessment_combo.xml')}) invalid_workflows = [
resp = self.request(xblock, 'update_xml', request, response_format='json') 'invalid_assessment_combo_order',
self.assertFalse(resp['success']) 'invalid_assessment_combo_peer_only'
self.assertIn("must have exactly two assessments", resp['msg'].lower()) ]
for invalid_workflow in invalid_workflows:
request = json.dumps(
{'xml': self.load_fixture_str('data/{}.xml'.format(invalid_workflow))}
)
resp = self.request(xblock, 'update_xml', request, response_format='json')
self.assertFalse(resp['success'])
self.assertIn("supported assessment flows are", resp['msg'].lower())
@data(('data/invalid_rubric.xml', 'rubric'), ('data/invalid_assessment.xml', 'assessment')) @data(('data/invalid_rubric.xml', 'rubric'), ('data/invalid_assessment.xml', 'assessment'))
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
......
...@@ -14,7 +14,7 @@ class AssessmentValidationTest(TestCase): ...@@ -14,7 +14,7 @@ class AssessmentValidationTest(TestCase):
@ddt.file_data('data/valid_assessments.json') @ddt.file_data('data/valid_assessments.json')
def test_valid_assessment(self, data): def test_valid_assessment(self, data):
success, msg = validate_assessments([data['assessment']]) success, msg = validate_assessments(data)
self.assertTrue(success) self.assertTrue(success)
self.assertEqual(msg, u'') self.assertEqual(msg, u'')
...@@ -29,12 +29,11 @@ class AssessmentValidationTest(TestCase): ...@@ -29,12 +29,11 @@ class AssessmentValidationTest(TestCase):
self.assertFalse(success) self.assertFalse(success)
self.assertGreater(len(msg), 0) self.assertGreater(len(msg), 0)
# Currently, we enforce the restriction that there must be # Make sure only legal assessment combinations are allowed. For now, that's
# exactly two assessments, in the order (a) peer, then (b) self. # (peer -> self), and (self)
# If and when we remove that restriction, this test can be deleted.
@ddt.file_data('data/assessment_combo.json') @ddt.file_data('data/assessment_combo.json')
def test_enforce_peer_then_self(self, data): def test_enforce_assessment_combo_restrictions(self, data):
success, msg = validate_assessments(data['assessments'], enforce_peer_then_self=True) success, msg = validate_assessments(data['assessments'])
self.assertEqual(success, data['valid'], msg=msg) self.assertEqual(success, data['valid'], msg=msg)
if not success: if not success:
......
...@@ -43,29 +43,38 @@ def _duplicates(items): ...@@ -43,29 +43,38 @@ def _duplicates(items):
return set(x for x in items if counts[x] > 1) return set(x for x in items if counts[x] > 1)
def validate_assessments(assessments, enforce_peer_then_self=False): def validate_assessments(assessments):
""" """
Check that the assessment dict is semantically valid. Check that the assessment dict is semantically valid.
Valid assessment steps are currently:
* peer, then self
* self only
Args: Args:
assessments (list of dict): list of serialized assessment models. assessments (list of dict): list of serialized assessment models.
Kwargs:
enforce_peer_then_self (bool): If True, enforce the requirement that there
must be exactly two assessments: first, a peer-assessment, then a self-assessment.
Returns: Returns:
tuple (is_valid, msg) where tuple (is_valid, msg) where
is_valid is a boolean indicating whether the assessment is semantically valid is_valid is a boolean indicating whether the assessment is semantically valid
and msg describes any validation errors found. and msg describes any validation errors found.
""" """
if enforce_peer_then_self: def _self_only(assessments):
if len(assessments) != 2: return len(assessments) == 1 and assessments[0].get('name') == 'self-assessment'
return (False, _("This problem must have exactly two assessments."))
if assessments[0].get('name') != 'peer-assessment': def _peer_then_self(assessments):
return (False, _("The first assessment must be a peer assessment.")) return (
if assessments[1].get('name') != 'self-assessment': len(assessments) == 2 and
return (False, _("The second assessment must be a self assessment.")) assessments[0].get('name') == 'peer-assessment' and
assessments[1].get('name') == 'self-assessment'
)
# Right now, there are two allowed scenarios: (peer -> self) and (self)
if not (_self_only(assessments) or _peer_then_self(assessments)):
return (
False,
_("The only supported assessment flows are (peer, self) or (self).")
)
if len(assessments) == 0: if len(assessments) == 0:
return (False, _("This problem must include at least one assessment.")) return (False, _("This problem must include at least one assessment."))
...@@ -188,7 +197,7 @@ def validator(oa_block, strict_post_release=True): ...@@ -188,7 +197,7 @@ def validator(oa_block, strict_post_release=True):
""" """
def _inner(rubric_dict, submission_dict, assessments): def _inner(rubric_dict, submission_dict, assessments):
success, msg = validate_assessments(assessments, enforce_peer_then_self=True) success, msg = validate_assessments(assessments)
if not success: if not success:
return (False, msg) return (False, msg)
......
...@@ -8,6 +8,22 @@ class WorkflowMixin(object): ...@@ -8,6 +8,22 @@ class WorkflowMixin(object):
def handle_workflow_info(self, data, suffix=''): def handle_workflow_info(self, data, suffix=''):
return self.get_workflow_info() return self.get_workflow_info()
def create_workflow(self, submission_uuid):
def _convert_rubric_assessment_name(ra_name):
"""'self-assessment' -> 'self', 'peer-assessment' -> 'peer'"""
short_name, suffix = ra_name.split("-")
return short_name
# rubric_assessments stores names as "self-assessment",
# "peer-assessment", while the model is expecting "self", "peer".
# Therefore, this conversion step. We should refactor later to
# standardize.
steps = [
_convert_rubric_assessment_name(ra["name"])
for ra in self.rubric_assessments
]
workflow_api.create_workflow(submission_uuid, steps)
def workflow_requirements(self): def workflow_requirements(self):
""" """
Retrieve the requirements from each assessment module Retrieve the requirements from each assessment module
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment