Commit aaf9bdb2 by Stephen Sanchez

Merge pull request #302 from edx/ormsbee/workflow_steps

Make peer assessment an optional step.
parents ecb8e976 2702e77b
......@@ -72,7 +72,7 @@ class PeerAssessmentInternalError(PeerAssessmentError):
pass
def is_complete(submission_uuid, requirements):
def submitter_is_finished(submission_uuid, requirements):
try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
if workflow.completed_at is not None:
......@@ -99,7 +99,7 @@ def get_score(submission_uuid, requirements):
dict with keys "points_earned" and "points_possible".
"""
# User hasn't completed their own submission yet
if not is_complete(submission_uuid, requirements):
if not submitter_is_finished(submission_uuid, requirements):
return None
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
......@@ -135,6 +135,10 @@ def get_score(submission_uuid, requirements):
}
def assessment_is_finished(submission_uuid, requirements):
return bool(get_score(submission_uuid, requirements))
def create_assessment(
scorer_submission_uuid,
scorer_id,
......
......@@ -3,6 +3,7 @@ Public interface for self-assessment.
"""
import logging
from django.utils.translation import ugettext as _
from django.db import DatabaseError
from dogapi import dog_stats_api
from submissions.api import get_submission_and_student, SubmissionNotFoundError
......@@ -21,13 +22,30 @@ SELF_TYPE = "SE"
logger = logging.getLogger("openassessment.assessment.self_api")
class SelfAssessmentRequestError(Exception):
class SelfAssessmentError(Exception):
"""Generic Self Assessment Error
Raised when an error occurs while processing a request related to the
Self Assessment Workflow.
"""
pass
class SelfAssessmentRequestError(SelfAssessmentError):
"""
There was a problem with the request for a self-assessment.
"""
pass
class SelfAssessmentInternalError(SelfAssessmentError):
"""
There was an internal problem while accessing the self-assessment api.
"""
pass
def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None):
"""
Create a self-assessment for a submission.
......@@ -99,7 +117,6 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, submission)
# Return the serialized assessment
return assessment_dict
......@@ -140,21 +157,104 @@ def get_assessment(submission_uuid):
return serialized_assessment
def is_complete(submission_uuid):
def submitter_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed for a submission.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
bool
True if the submitter has assessed their answer
Examples:
>>> submitter_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).exists()
def assessment_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed. For self-assessment,
this function is synonymous with submitter_is_finished.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the assessment is complete.
Examples:
>>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return submitter_is_finished(submission_uuid, requirements)
def get_score(submission_uuid, requirements):
"""
Get the score for this particular assessment.
Args:
submission_uuid (str): The unique identifier for the submission
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
'points_earned': 5,
'points_possible': 10
}
"""
assessment = get_assessment(submission_uuid)
if not assessment:
return None
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
}
def get_assessment_scores_by_criteria(submission_uuid):
"""Get the median score for each rubric criterion
Args:
submission_uuid (str): The submission uuid is used to get the
assessments used to score this submission, and generate the
appropriate median score.
Returns:
(dict): A dictionary of rubric criterion names, with a median score of
the peer assessments.
Raises:
SelfAssessmentInternalError: If any error occurs while retrieving
information to form the median scores, an error is raised.
"""
try:
assessments = list(
Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).order_by('-scored_at')[:1]
)
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = _(u"Error getting self assessment scores for {}").format(submission_uuid)
logger.exception(error_message)
raise SelfAssessmentInternalError(error_message)
def _log_assessment(assessment, submission):
"""
Log the creation of a self-assessment.
......
......@@ -134,6 +134,7 @@ TUESDAY = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC)
WEDNESDAY = datetime.datetime(2007, 9, 15, 0, 0, 0, 0, pytz.UTC)
THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
STEPS = ['peer', 'self']
@ddt
class TestPeerApi(CacheResetTest):
......@@ -449,7 +450,7 @@ class TestPeerApi(CacheResetTest):
'must_grade': REQUIRED_GRADED,
'must_be_graded_by': REQUIRED_GRADED_BY
}
self.assertTrue(peer_api.is_complete(tim_sub["uuid"], requirements))
self.assertTrue(peer_api.submitter_is_finished(tim_sub["uuid"], requirements))
def test_completeness(self):
"""
......@@ -788,7 +789,7 @@ class TestPeerApi(CacheResetTest):
'must_grade': REQUIRED_GRADED,
'must_be_graded_by': REQUIRED_GRADED_BY
}
self.assertTrue(peer_api.is_complete(buffy_sub["uuid"], requirements))
self.assertTrue(peer_api.submitter_is_finished(buffy_sub["uuid"], requirements))
def test_find_active_assessments(self):
buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
......@@ -1137,5 +1138,5 @@ class TestPeerApi(CacheResetTest):
new_student_item["student_id"] = student
submission = sub_api.create_submission(new_student_item, answer, date)
peer_api.create_peer_workflow(submission["uuid"])
workflow_api.create_workflow(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], STEPS)
return submission, new_student_item
......@@ -9,7 +9,7 @@ import pytz
from openassessment.test_utils import CacheResetTest
from submissions.api import create_submission
from openassessment.assessment.self_api import (
create_assessment, is_complete, SelfAssessmentRequestError, get_assessment
create_assessment, submitter_is_finished, SelfAssessmentRequestError, get_assessment
)
......@@ -60,7 +60,7 @@ class TestSelfApi(CacheResetTest):
# Now there should be a submission, but no self-assessment
assessment = get_assessment(submission["uuid"])
self.assertIs(assessment, None)
self.assertFalse(is_complete(submission['uuid']))
self.assertFalse(submitter_is_finished(submission['uuid'], {}))
# Create a self-assessment for the submission
assessment = create_assessment(
......@@ -70,7 +70,7 @@ class TestSelfApi(CacheResetTest):
)
# Self-assessment should be complete
self.assertTrue(is_complete(submission['uuid']))
self.assertTrue(submitter_is_finished(submission['uuid'], {}))
# Retrieve the self-assessment
retrieved = get_assessment(submission["uuid"])
......@@ -198,4 +198,4 @@ class TestSelfApi(CacheResetTest):
def test_is_complete_no_submission(self):
# This submission uuid does not exist
self.assertFalse(is_complete('abc1234'))
self.assertFalse(submitter_is_finished('abc1234', {}))
......@@ -9,6 +9,7 @@ from submissions import api as sub_api
from openassessment.workflow import api as workflow_api
from openassessment.assessment import peer_api, self_api
STEPS = ['peer', 'self']
class Command(BaseCommand):
......@@ -131,7 +132,7 @@ class Command(BaseCommand):
"""
answer = {'text': " ".join(loremipsum.get_paragraphs(5))}
submission = sub_api.create_submission(student_item, answer)
workflow_api.create_workflow(submission['uuid'])
workflow_api.create_workflow(submission['uuid'], STEPS)
workflow_api.update_from_assessments(
submission['uuid'], {'peer': {'must_grade': 1, 'must_be_graded_by': 1}}
)
......
......@@ -43,7 +43,7 @@ class UploadDataTest(TestCase):
}
submission_text = "test submission {}".format(index)
submission = sub_api.create_submission(student_item, submission_text)
workflow_api.create_workflow(submission['uuid'])
workflow_api.create_workflow(submission['uuid'], ['peer', 'self'])
# Create and upload the archive of CSV files
# This should generate the files even though
......
......@@ -73,7 +73,7 @@ class CsvWriterTest(CacheResetTest):
}
submission_text = "test submission {}".format(index)
submission = sub_api.create_submission(student_item, submission_text)
workflow_api.create_workflow(submission['uuid'])
workflow_api.create_workflow(submission['uuid'], ['peer', 'self'])
# Generate a CSV file for the submissions
output_streams = self._output_streams(['submission'])
......
from django.contrib import admin
from .models import AssessmentWorkflow
from .models import AssessmentWorkflow, AssessmentWorkflowStep
class AssessmentWorkflowStepInline(admin.StackedInline):
model = AssessmentWorkflowStep
extra = 0
class AssessmentWorkflowAdmin(admin.ModelAdmin):
"""Admin for the user's overall workflow through open assessment.
......@@ -15,5 +20,6 @@ class AssessmentWorkflowAdmin(admin.ModelAdmin):
)
list_filter = ('status',)
search_fields = ('uuid', 'submission_uuid', 'course_id', 'item_id')
inlines = (AssessmentWorkflowStepInline,)
admin.site.register(AssessmentWorkflow, AssessmentWorkflowAdmin)
......@@ -9,7 +9,7 @@ from django.db import DatabaseError
from openassessment.assessment import peer_api
from submissions import api as sub_api
from .models import AssessmentWorkflow
from .models import AssessmentWorkflow, AssessmentWorkflowStep
from .serializers import AssessmentWorkflowSerializer
logger = logging.getLogger(__name__)
......@@ -58,7 +58,7 @@ class AssessmentWorkflowNotFoundError(AssessmentWorkflowError):
pass
def create_workflow(submission_uuid):
def create_workflow(submission_uuid, steps):
"""Begins a new assessment workflow.
Create a new workflow that other assessments will record themselves against.
......@@ -66,6 +66,8 @@ def create_workflow(submission_uuid):
Args:
submission_uuid (str): The UUID for the submission that all our
assessments will be evaluating.
steps (list): List of steps that are part of the workflow, in the order
that the user must complete them. Example: `["peer", "self"]`
Returns:
dict: Assessment workflow information with the following
......@@ -85,7 +87,7 @@ def create_workflow(submission_uuid):
AssessmentWorkflowRequestError: If the `submission_uuid` passed in does
not exist or is of an invalid type.
AssessmentWorkflowInternalError: Unexpected internal error, such as the
submissions app not being available or a database configuation
submissions app not being available or a database configuration
problem.
"""
......@@ -98,7 +100,7 @@ def create_workflow(submission_uuid):
try:
submission_dict = sub_api.get_submission_and_student(submission_uuid)
except sub_api.SubmissionNotFoundError as err:
except sub_api.SubmissionNotFoundError:
err_msg = sub_err_msg("submission not found")
logger.error(err_msg)
raise AssessmentWorkflowRequestError(err_msg)
......@@ -107,27 +109,51 @@ def create_workflow(submission_uuid):
logger.error(err_msg)
raise AssessmentWorkflowRequestError(err_msg)
except sub_api.SubmissionInternalError as err:
err_msg = sub_err_msg(err)
logger.error(err)
raise AssessmentWorkflowInternalError(
u"retrieving submission {} failed with unknown error: {}"
.format(submission_uuid, err)
)
# Raise an error if they specify a step we don't recognize...
invalid_steps = set(steps) - set(AssessmentWorkflow.STEPS)
if invalid_steps:
raise AssessmentWorkflowRequestError(
u"The following steps were not recognized: {}; Must be one of {}".format(
invalid_steps, AssessmentWorkflow.STEPS
)
)
# We're not using a serializer to deserialize this because the only variable
# we're getting from the outside is the submission_uuid, which is already
# validated by this point.
status = AssessmentWorkflow.STATUS.peer
if steps[0] == "peer":
try:
peer_api.create_peer_workflow(submission_uuid)
except peer_api.PeerAssessmentError as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
elif steps[0] == "self":
status = AssessmentWorkflow.STATUS.self
try:
peer_api.create_peer_workflow(submission_uuid)
workflow = AssessmentWorkflow.objects.create(
submission_uuid=submission_uuid,
status=AssessmentWorkflow.STATUS.peer,
status=status,
course_id=submission_dict['student_item']['course_id'],
item_id=submission_dict['student_item']['item_id'],
)
workflow_steps = [
AssessmentWorkflowStep(
workflow=workflow, name=step, order_num=i
)
for i, step in enumerate(steps)
]
workflow.steps.add(*workflow_steps)
except (
DatabaseError,
peer_api.PeerAssessmentError,
sub_api.SubmissionError
) as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
......@@ -298,19 +324,20 @@ def update_from_assessments(submission_uuid, assessment_requirements):
return _serialized_with_details(workflow, assessment_requirements)
def get_status_counts(course_id, item_id):
def get_status_counts(course_id, item_id, steps):
"""
Count how many workflows have each status, for a given item in a course.
Kwargs:
course_id (unicode): The ID of the course.
item_id (unicode): The ID of the item in the course.
steps (list): A list of assessment steps for this problem.
Returns:
list of dictionaries with keys "status" (str) and "count" (int)
Example usage:
>>> get_status_counts("ora2/1/1", "peer-assessment-problem")
>>> get_status_counts("ora2/1/1", "peer-assessment-problem", ["peer"])
[
{"status": "peer", "count": 5},
{"status": "self", "count": 10},
......@@ -327,7 +354,8 @@ def get_status_counts(course_id, item_id):
course_id=course_id,
item_id=item_id,
).count()
} for status in AssessmentWorkflow.STATUS_VALUES
}
for status in steps + AssessmentWorkflow.STATUSES
]
......
......@@ -40,7 +40,6 @@ class Migration(SchemaMigration):
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'item_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('model_utils.fields.StatusField', [], {'default': "'peer'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
......
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AssessmentWorkflowStep'
db.create_table('workflow_assessmentworkflowstep', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('workflow', self.gf('django.db.models.fields.related.ForeignKey')(related_name='steps', to=orm['workflow.AssessmentWorkflow'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
('submitter_completed_at', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True)),
('assessment_completed_at', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True)),
('order_num', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('workflow', ['AssessmentWorkflowStep'])
def backwards(self, orm):
# Deleting model 'AssessmentWorkflowStep'
db.delete_table('workflow_assessmentworkflowstep')
models = {
'workflow.assessmentworkflow': {
'Meta': {'ordering': "['-created']", 'object_name': 'AssessmentWorkflow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('model_utils.fields.StatusField', [], {'default': "'peer'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'workflow.assessmentworkflowstep': {
'Meta': {'ordering': "['workflow', 'order_num']", 'object_name': 'AssessmentWorkflowStep'},
'assessment_completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'submitter_completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'steps'", 'to': "orm['workflow.AssessmentWorkflow']"})
}
}
complete_apps = ['workflow']
\ No newline at end of file
{
"peer": {
"steps": ["peer"],
"requirements": {
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
}
}
},
"both": {
"steps": ["peer", "self"],
"requirements": {
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
},
"self": {}
}
},
"self": {
"steps": ["self"],
"requirements": {
"self": {}
}
}
}
\ No newline at end of file
from django.db import DatabaseError
import ddt
from mock import patch
from nose.tools import raises
from openassessment.assessment.models import PeerWorkflow
from openassessment.test_utils import CacheResetTest
from openassessment.assessment import peer_api
from openassessment.workflow.models import AssessmentWorkflow
from submissions.models import Submission
......@@ -18,18 +18,14 @@ ITEM_1 = {
"item_type": "openassessment",
}
REQUIREMENTS = {
"peer": {
"must_grade": 5,
"must_be_graded_by": 3,
}
}
@ddt.ddt
class TestAssessmentWorkflowApi(CacheResetTest):
def test_create_workflow(self):
@ddt.file_data('data/assessments.json')
def test_create_workflow(self, data):
first_step = data["steps"][0] if data["steps"] else "peer"
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow = workflow_api.create_workflow(submission["uuid"])
workflow = workflow_api.create_workflow(submission["uuid"], data["steps"])
workflow_keys = set(workflow.keys())
self.assertEqual(
......@@ -39,53 +35,73 @@ class TestAssessmentWorkflowApi(CacheResetTest):
}
)
self.assertEqual(workflow["submission_uuid"], submission["uuid"])
self.assertEqual(workflow["status"], "peer")
self.assertEqual(workflow["status"], first_step)
workflow_from_get = workflow_api.get_workflow_for_submission(
submission["uuid"], REQUIREMENTS
submission["uuid"], data["requirements"]
)
del workflow_from_get['status_details']
self.assertEqual(workflow, workflow_from_get)
def test_need_valid_submission_uuid(self):
@ddt.file_data('data/assessments.json')
def test_need_valid_submission_uuid(self, data):
# submission doesn't exist
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow = workflow_api.create_workflow("xxxxxxxxxxx")
workflow = workflow_api.create_workflow("xxxxxxxxxxx", data["steps"])
# submission_uuid is the wrong type
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow = workflow_api.create_workflow(123)
workflow = workflow_api.create_workflow(123, data["steps"])
@patch.object(Submission.objects, 'get')
@ddt.file_data('data/assessments.json')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_submissions_errors_wrapped(self, mock_get):
def test_unexpected_submissions_errors_wrapped(self, data, mock_get):
mock_get.side_effect = Exception("Kaboom!")
workflow_api.create_workflow("zzzzzzzzzzzzzzz")
workflow_api.create_workflow("zzzzzzzzzzzzzzz", data["steps"])
@patch.object(AssessmentWorkflow.objects, 'create')
@ddt.file_data('data/assessments.json')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_workflow_errors_wrapped(self, data, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"], data["steps"])
@patch.object(PeerWorkflow.objects, 'get_or_create')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_workflow_errors_wrapped(self, mock_create):
def test_unexpected_peer_workflow_errors_wrapped(self, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ["peer", "self"])
@patch.object(AssessmentWorkflow.objects, 'get')
@ddt.file_data('data/assessments.json')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_exception_wrapped(self, data, mock_create):
mock_create.side_effect = Exception("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.update_from_assessments(submission["uuid"], data["steps"])
def test_get_assessment_workflow_expected_errors(self):
@ddt.file_data('data/assessments.json')
def test_get_assessment_workflow_expected_errors(self, data):
with self.assertRaises(workflow_api.AssessmentWorkflowNotFoundError):
workflow_api.get_workflow_for_submission("0000000000000", REQUIREMENTS)
workflow_api.get_workflow_for_submission("0000000000000", data["requirements"])
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow_api.get_workflow_for_submission(123, REQUIREMENTS)
workflow_api.get_workflow_for_submission(123, data["requirements"])
@patch.object(Submission.objects, 'get')
@ddt.file_data('data/assessments.json')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_workflow_get_errors_wrapped(self, mock_get):
def test_unexpected_workflow_get_errors_wrapped(self, data, mock_get):
mock_get.side_effect = Exception("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "We talk TV!")
workflow = workflow_api.create_workflow(submission["uuid"])
workflow_api.get_workflow_for_submission(workflow["uuid"], REQUIREMENTS)
workflow = workflow_api.create_workflow(submission["uuid"], data["steps"])
workflow_api.get_workflow_for_submission(workflow["uuid"], {})
def test_get_status_counts(self):
# Initially, the counts should all be zero
counts = workflow_api.get_status_counts("test/1/1", "peer-problem")
counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"])
self.assertEqual(counts, [
{"status": "peer", "count": 0},
{"status": "self", "count": 0},
......@@ -108,7 +124,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
self._create_workflow_with_status("user 10", "test/1/1", "peer-problem", "done")
# Now the counts should be updated
counts = workflow_api.get_status_counts("test/1/1", "peer-problem")
counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"])
self.assertEqual(counts, [
{"status": "peer", "count": 1},
{"status": "self", "count": 2},
......@@ -119,13 +135,13 @@ class TestAssessmentWorkflowApi(CacheResetTest):
# Create a workflow in a different course, same user and item
# Counts should be the same
self._create_workflow_with_status("user 1", "other_course", "peer-problem", "peer")
updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem")
updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"])
self.assertEqual(counts, updated_counts)
# Create a workflow in the same course, different item
# Counts should be the same
self._create_workflow_with_status("user 1", "test/1/1", "other problem", "peer")
updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem")
updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"])
self.assertEqual(counts, updated_counts)
def _create_workflow_with_status(self, student_id, course_id, item_id, status, answer="answer"):
......@@ -151,7 +167,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
"item_type": "openassessment",
}, answer)
workflow = workflow_api.create_workflow(submission['uuid'])
workflow = workflow_api.create_workflow(submission['uuid'], ["peer", "self"])
workflow_model = AssessmentWorkflow.objects.get(uuid=workflow['uuid'])
workflow_model.status = status
workflow_model.save()
......@@ -70,12 +70,26 @@ class GradeMixin(object):
Returns:
tuple of context (dict), template_path (string)
"""
feedback = peer_api.get_assessment_feedback(self.submission_uuid)
# Peer specific stuff...
assessment_steps = self.assessment_steps
submission_uuid = workflow['submission_uuid']
if "peer-assessment" in assessment_steps:
feedback = peer_api.get_assessment_feedback(submission_uuid)
peer_assessments = peer_api.get_assessments(submission_uuid)
has_submitted_feedback = feedback is not None
else:
feedback = None
peer_assessments = []
has_submitted_feedback = False
if "self-assessment" in assessment_steps:
self_assessment = self_api.get_assessment(submission_uuid)
else:
self_assessment = None
feedback_text = feedback.get('feedback', '') if feedback else ''
student_submission = sub_api.get_submission(workflow['submission_uuid'])
peer_assessments = peer_api.get_assessments(student_submission['uuid'])
self_assessment = self_api.get_assessment(student_submission['uuid'])
has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None
student_submission = sub_api.get_submission(submission_uuid)
# We retrieve the score from the workflow, which in turn retrieves
# the score for our current submission UUID.
......@@ -94,9 +108,14 @@ class GradeMixin(object):
}
# Update the scores we will display to the user
# Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"])
# Note that we are updating a *copy* of the rubric criteria stored in
# the XBlock field
max_scores = peer_api.get_rubric_max_scores(submission_uuid)
if "peer-assessment" in assessment_steps:
median_scores = peer_api.get_assessment_median_scores(submission_uuid)
elif "self-assessment" in assessment_steps:
median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
if median_scores is not None and max_scores is not None:
for criterion in context["rubric_criteria"]:
criterion["median_score"] = median_scores[criterion["name"]]
......@@ -114,11 +133,17 @@ class GradeMixin(object):
Returns:
tuple of context (dict), template_path (string)
"""
def _is_incomplete(step):
return (
step in workflow["status_details"] and
not workflow["status_details"][step]["complete"]
)
incomplete_steps = []
if not workflow["status_details"]["peer"]["complete"]:
incomplete_steps.append("Peer Assessment")
if not workflow["status_details"]["self"]["complete"]:
incomplete_steps.append("Self Assessment")
if _is_incomplete("peer"):
incomplete_steps.append(_("Peer Assessment"))
if _is_incomplete("self"):
incomplete_steps.append(_("Self Assessment"))
return (
'openassessmentblock/grade/oa_grade_incomplete.html',
......@@ -131,7 +156,8 @@ class GradeMixin(object):
Submit feedback on an assessment.
Args:
data (dict): Can provide keys 'feedback_text' (unicode) and 'feedback_options' (list of unicode).
data (dict): Can provide keys 'feedback_text' (unicode) and
'feedback_options' (list of unicode).
Kwargs:
suffix (str): Unused
......
......@@ -2,7 +2,6 @@
import datetime as dt
import logging
import dateutil
import pkg_resources
import pytz
......@@ -239,7 +238,9 @@ class OpenAssessmentBlock(
# Include release/due dates for each step in the problem
context['step_dates'] = list()
for step in ['submission', 'peer-assessment', 'self-assessment']:
steps = ['submission'] + self.assessment_steps
for step in steps:
# Get the dates as a student would see them
__, __, start_date, due_date = self.is_closed(step=step, course_staff=False)
......@@ -313,6 +314,10 @@ class OpenAssessmentBlock(
load('static/xml/poverty_rubric_example.xml')
),
(
"OpenAssessmentBlock (Self Only) Rubric",
load('static/xml/poverty_self_only_example.xml')
),
(
"OpenAssessmentBlock Censorship Rubric",
load('static/xml/censorship_rubric_example.xml')
),
......@@ -333,6 +338,10 @@ class OpenAssessmentBlock(
return update_from_xml(block, node, validator=validator(block, strict_post_release=False))
@property
def assessment_steps(self):
return [asmnt['name'] for asmnt in self.rubric_assessments]
def render_assessment(self, path, context_dict=None):
"""Render an Assessment Module's HTML
......@@ -421,18 +430,17 @@ class OpenAssessmentBlock(
]
# Resolve unspecified dates and date strings to datetimes
start, due, date_ranges = resolve_dates(self.start, self.due, [submission_range] + assessment_ranges)
start, due, date_ranges = resolve_dates(
self.start, self.due, [submission_range] + assessment_ranges
)
# Based on the step, choose the date range to consider
# We hard-code this to the submission -> peer -> self workflow for now;
# later, we can revisit to make this more flexible.
open_range = (start, due)
if step == "submission":
open_range = (start, due)
assessment_steps = self.assessment_steps
if step == 'submission':
open_range = date_ranges[0]
if step == "peer-assessment":
open_range = date_ranges[1]
if step == "self-assessment":
open_range = date_ranges[2]
elif step in assessment_steps:
step_index = assessment_steps.index(step)
open_range = date_ranges[1 + step_index]
# Course staff always have access to the problem
if course_staff is None:
......
import logging
from django.utils.translation import ugettext as _
from webob import Response
from xblock.core import XBlock
from openassessment.assessment import peer_api
from openassessment.assessment.peer_api import (
PeerAssessmentInternalError, PeerAssessmentRequestError,
......@@ -114,6 +117,8 @@ class PeerAssessmentMixin(object):
number of assessments.
"""
if "peer-assessment" not in self.assessment_steps:
return Response(u"")
continue_grading = data.params.get('continue_grading', False)
path, context_dict = self.peer_path_and_context(continue_grading)
return self.render_assessment(path, context_dict)
......
......@@ -2,6 +2,8 @@ import logging
from django.utils.translation import ugettext as _
from xblock.core import XBlock
from webob import Response
from openassessment.assessment import self_api
from openassessment.workflow import api as workflow_api
from submissions import api as submission_api
......@@ -24,6 +26,9 @@ class SelfAssessmentMixin(object):
@XBlock.handler
def render_self_assessment(self, data, suffix=''):
if "self-assessment" not in self.assessment_steps:
return Response(u"")
try:
path, context = self.self_path_and_context()
except:
......
......@@ -134,7 +134,59 @@
"score": "",
"feedback_text": "",
"student_submission": "",
"peer_assessments": [],
"peer_assessments": [
{
"submission_uuid": "52d2158a-c568-11e3-b9b9-28cfe9182465",
"points_earned": 5,
"points_possible": 6,
"rubric": {
"criteria": [
{
"name": "Criterion 1",
"prompt": "Prompt 1",
"order_num": 0,
"feedback": "optional",
"options": [
{
"order_num": 2,
"points": 2,
"name": "Good"
}
],
"points_possible": 2
},
{
"name": "Criterion 2",
"prompt": "Prompt 2",
"order_num": 1,
"options": [
{
"order_num": 1,
"points": 1,
"name": "Fair"
}
],
"points_possible": 2
},
{
"name": "Criterion 3",
"prompt": "Prompt 3",
"order_num": 2,
"feedback": "optional",
"options": [
{
"order_num": 2,
"points": 2,
"name": "Good"
}
],
"points_possible": 2
}
]
}
}
],
"self_assessment": {},
"rubric_criteria": [],
"has_submitted_feedback": false
......@@ -146,4 +198,4 @@
"context": {},
"output": "oa_edit.html"
}
]
]
\ No newline at end of file
......@@ -26,9 +26,8 @@ describe("OpenAssessment.PeerView", function() {
this.showLoadError = function(msg) {};
this.toggleActionError = function(msg, step) {};
this.setUpCollapseExpand = function(sel) {};
this.renderSelfAssessmentStep = function() {};
this.scrollToTop = function() {};
this.gradeView = { load: function() {} };
this.loadAssessmentModules = function() {};
};
// Stubs
......
......@@ -27,6 +27,7 @@ describe("OpenAssessment.ResponseView", function() {
// Stub base view
var StubBaseView = function() {
this.loadAssessmentModules = function() {};
this.peerView = { load: function() {} };
this.gradeView = { load: function() {} };
this.showLoadError = function(msg) {};
......@@ -221,14 +222,14 @@ describe("OpenAssessment.ResponseView", function() {
}).promise();
});
spyOn(view, 'load');
spyOn(baseView.peerView, 'load');
spyOn(baseView, 'loadAssessmentModules');
view.response('Test response');
view.submit();
// Expect the current and next step to have been reloaded
expect(view.load).toHaveBeenCalled();
expect(baseView.peerView.load).toHaveBeenCalled();
expect(baseView.loadAssessmentModules).toHaveBeenCalled();
});
it("enables the unsaved work warning when the user changes the response text", function() {
......
......@@ -58,13 +58,11 @@ OpenAssessment.BaseView.prototype = {
},
/**
* Asynchronously load each sub-view into the DOM.
*/
Asynchronously load each sub-view into the DOM.
**/
load: function() {
this.responseView.load();
this.peerView.load();
this.renderSelfAssessmentStep();
this.gradeView.load();
this.loadAssessmentModules();
// Set up expand/collapse for course staff debug, if available
courseStaffDebug = $('.wrapper--staff-info');
......@@ -74,6 +72,16 @@ OpenAssessment.BaseView.prototype = {
},
/**
Refresh the Assessment Modules. This should be called any time an action is
performed by the user.
**/
loadAssessmentModules: function() {
this.peerView.load();
this.renderSelfAssessmentStep();
this.gradeView.load();
},
/**
Render the self-assessment step.
**/
renderSelfAssessmentStep: function() {
......@@ -158,9 +166,7 @@ OpenAssessment.BaseView.prototype = {
this.server.selfAssess(optionsSelected).done(
function() {
view.peerView.load();
view.renderSelfAssessmentStep();
view.gradeView.load();
view.loadAssessmentModules();
view.scrollToTop();
}
).fail(function(errMsg) {
......@@ -181,14 +187,14 @@ OpenAssessment.BaseView.prototype = {
toggleActionError: function(type, msg) {
var element = this.element;
var container = null;
if (type == 'save') {
container = '.response__submission__actions';
if (type == 'save') {
container = '.response__submission__actions';
}
else if (type == 'submit' || type == 'peer' || type == 'self') {
container = '.step__actions';
else if (type == 'submit' || type == 'peer' || type == 'self') {
container = '.step__actions';
}
else if (type == 'feedback_assess') {
container = '.submission__feedback__actions';
else if (type == 'feedback_assess') {
container = '.submission__feedback__actions';
}
// If we don't have anywhere to put the message, just log it to the console
......@@ -219,10 +225,10 @@ OpenAssessment.BaseView.prototype = {
$(container + ' .step__status__value .copy').html(gettext('Unable to Load'));
},
/**
/**
* Get the contents of the Step Actions error message box, for unit test validation.
*
* Step Actions are the UX-level parts of the student interaction flow -
* Step Actions are the UX-level parts of the student interaction flow -
* Submission, Peer Assessment, and Self Assessment. Since steps are mutually
* exclusive, only one error box should be rendered on screen at a time.
*
......
......@@ -147,8 +147,7 @@ OpenAssessment.PeerView.prototype = {
var baseView = view.baseView;
this.peerAssessRequest(function() {
view.load();
baseView.renderSelfAssessmentStep();
baseView.gradeView.load();
baseView.loadAssessmentModules();
baseView.scrollToTop();
});
},
......
......@@ -291,8 +291,7 @@ OpenAssessment.ResponseView.prototype = {
**/
moveToNextStep: function() {
this.load();
this.baseView.peerView.load();
this.baseView.gradeView.load();
this.baseView.loadAssessmentModules();
// Disable the "unsaved changes" warning if the user
// tries to navigate to another page.
......
<openassessment submission_due="2015-03-11T18:20">
<title>
Global Poverty
</title>
<rubric>
<prompt>
Given the state of the world today, what do you think should be done to combat poverty?
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'.
</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
</explanation>
</option>
</criterion>
<criterion>
<name>clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation></explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation></explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation></explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation></explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
to show that explained and unexplained items can be mixed.
</explanation>
</option>
</criterion>
<criterion>
<name>form</name>
<prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation></explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation></explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation></explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation></explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation></explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation></explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="self-assessment" />
</assessments>
</openassessment>
......@@ -124,7 +124,7 @@ class SubmissionMixin(object):
student_sub_dict = {'text': student_sub}
submission = api.create_submission(student_item_dict, student_sub_dict)
workflow_api.create_workflow(submission["uuid"])
self.create_workflow(submission["uuid"])
self.submission_uuid = submission["uuid"]
# Emit analytics event...
......
......@@ -10,7 +10,9 @@
{
"name": "self-assessment"
}
]
],
"current_assessments": null,
"is_released": false
},
"peer_only": {
"valid": false,
......@@ -20,15 +22,19 @@
"must_grade": 5,
"must_be_graded_by": 3
}
]
],
"current_assessments": null,
"is_released": false
},
"self_only": {
"valid": false,
"valid": true,
"assessments": [
{
"name": "self-assessment"
}
]
],
"current_assessments": null,
"is_released": false
},
"self_before_peer": {
"valid": false,
......@@ -41,7 +47,9 @@
"must_grade": 5,
"must_be_graded_by": 3
}
]
],
"current_assessments": null,
"is_released": false
},
"peer_then_peer": {
"valid": false,
......@@ -56,6 +64,8 @@
"must_grade": 5,
"must_be_graded_by": 3
}
]
],
"current_assessments": null,
"is_released": false
}
}
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="self-assessment" />
<assessment name="peer-assessment" />
</assessments>
</openassessment>
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" />
</assessments>
</openassessment>
{
"empty_dict": {
"assessment": {}
},
"must_be_graded_by_zero": {
"assessment": {
"name": "self-assessment",
"must_grade": 1,
"must_be_graded_by": 0
}
"assessments": [{}],
"current_assessments": null,
"is_released": false
},
"unsupported_type": {
"assessment": {
"name": "unsupported-assessment",
"must_grade": 5,
"must_be_graded_by": 3
}
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment"
},
{
"name": "unsupported-assessment",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"current_assessments": null,
"is_released": false
},
"no_type": {
"assessment": {
"must_grade": 5,
"must_be_graded_by": 3
}
"assessments": [
{
"name": "self-assessment"
},
{
"must_grade": 5,
"must_be_graded_by": 3
}
],
"current_assessments": null,
"is_released": false
},
"unsupported_unicode_type": {
"assessment": {
"name": "𝓹𝓮𝓮𝓻-𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽",
"must_grade": 5,
"must_be_graded_by": 3
}
"assessments": [
{
"name": "self-assessment"
},
{
"name": "𝓹𝓮𝓮𝓻-𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"current_assessments": null,
"is_released": false
},
"no_must_grade": {
"assessment": {
"name": "peer-assessment",
"must_be_graded_by": 3
}
"assessments": [
{
"name": "peer-assessment",
"must_be_graded_by": 3
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
},
"no_must_be_graded_by": {
"assessment": {
"name": "peer-assessment",
"must_grade": 5
}
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
},
"must_grade_less_than_must_be_graded_by": {
"assessment": {
"name": "peer-assessment",
"must_grade": 4,
"must_be_graded_by": 5
}
"assessments": [
{
"name": "peer-assessment",
"must_grade": 4,
"must_be_graded_by": 5
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
},
"must_grade_zero": {
"assessment": {
"name": "peer-assessment",
"must_grade": 0,
"must_be_graded_by": 0
}
"assessments": [
{
"name": "peer-assessment",
"must_grade": 0,
"must_be_graded_by": 0
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
},
"must_be_graded_by_zero": {
"assessment": {
"name": "peer-assessment",
"must_grade": 1,
"must_be_graded_by": 0
}
"assessments": [
{
"name": "peer-assessment",
"must_grade": 1,
"must_be_graded_by": 0
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
},
"remove_peer_mid_flight": {
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment"
}
],
"current_assessments": [
{
"name": "self-assessment"
}
],
"is_released": true
},
"swap_peer_and_self_mid_flight": {
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment"
}
],
"current_assessments": [
{
"name": "self-assessment"
},
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"is_released": true
}
}
<openassessment>
<title>Open Assessment Test</title>
<title>Only Self Assessment</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
......
{
"peer": {
"assessment": {
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
}
"peer_then_self": {
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
},
"self": {
"assessment": {
"name": "self-assessment",
"must_grade": 2,
"must_be_graded_by": 1
}
"self_only": {
"assessments": [
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
},
"must_be_graded_by_equals_must_grade": {
"assessment": {
"name": "self-assessment",
"must_grade": 1,
"must_be_graded_by": 1
}
"assessments": [
{
"name": "peer-assessment",
"must_grade": 1,
"must_be_graded_by": 1
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
}
}
......@@ -37,6 +37,8 @@ class TestGrade(XBlockHandlerTestCase):
SUBMISSION = u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
STEPS = ['peer', 'self']
@scenario('data/grade_scenario.xml', user_id='Greggs')
def test_render_grade(self, xblock):
# Submit, assess, and render the grade view
......@@ -224,7 +226,7 @@ class TestGrade(XBlockHandlerTestCase):
scorer['student_id'] = scorer_name
scorer_sub = sub_api.create_submission(scorer, {'text': submission_text})
workflow_api.create_workflow(scorer_sub['uuid'])
workflow_api.create_workflow(scorer_sub['uuid'], self.STEPS)
submission = peer_api.get_submission_to_assess(scorer_sub['uuid'], len(peers))
......
......@@ -92,12 +92,15 @@ class StudioViewTest(XBlockHandlerTestCase):
# Test that we enforce that there are exactly two assessments,
# peer ==> self
# If and when we remove this restriction, this test can be deleted.
@data('data/invalid_assessment_combo_order.xml', 'data/invalid_assessment_combo_peer_only.xml')
@scenario('data/basic_scenario.xml')
def test_update_xml_invalid_assessment_combo(self, xblock):
request = json.dumps({'xml': self.load_fixture_str('data/invalid_assessment_combo.xml')})
def test_update_xml_invalid_assessment_combo(self, xblock, invalid_workflow):
request = json.dumps(
{'xml': self.load_fixture_str(invalid_workflow)}
)
resp = self.request(xblock, 'update_xml', request, response_format='json')
self.assertFalse(resp['success'])
self.assertIn("must have exactly two assessments", resp['msg'].lower())
self.assertIn("for this assignment", resp['msg'].lower())
@data(('data/invalid_rubric.xml', 'rubric'), ('data/invalid_assessment.xml', 'assessment'))
@scenario('data/basic_scenario.xml')
......
......@@ -14,27 +14,26 @@ class AssessmentValidationTest(TestCase):
@ddt.file_data('data/valid_assessments.json')
def test_valid_assessment(self, data):
success, msg = validate_assessments([data['assessment']])
success, msg = validate_assessments(data["assessments"], data["current_assessments"], data["is_released"])
self.assertTrue(success)
self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_assessments.json')
def test_invalid_assessment(self, data):
success, msg = validate_assessments([data['assessment']])
success, msg = validate_assessments(data["assessments"], data["current_assessments"], data["is_released"])
self.assertFalse(success)
self.assertGreater(len(msg), 0)
def test_no_assessments(self):
success, msg = validate_assessments([])
success, msg = validate_assessments([], [], False)
self.assertFalse(success)
self.assertGreater(len(msg), 0)
# Currently, we enforce the restriction that there must be
# exactly two assessments, in the order (a) peer, then (b) self.
# If and when we remove that restriction, this test can be deleted.
# Make sure only legal assessment combinations are allowed. For now, that's
# (peer -> self), and (self)
@ddt.file_data('data/assessment_combo.json')
def test_enforce_peer_then_self(self, data):
success, msg = validate_assessments(data['assessments'], enforce_peer_then_self=True)
def test_enforce_assessment_combo_restrictions(self, data):
success, msg = validate_assessments(data["assessments"], data["current_assessments"], data["is_released"])
self.assertEqual(success, data['valid'], msg=msg)
if not success:
......
......@@ -43,33 +43,49 @@ def _duplicates(items):
return set(x for x in items if counts[x] > 1)
def validate_assessments(assessments, enforce_peer_then_self=False):
def validate_assessments(assessments, current_assessments, is_released):
"""
Check that the assessment dict is semantically valid.
Valid assessment steps are currently:
* peer, then self
* self only
If a question has been released, the type and number of assessment steps
cannot be changed.
Args:
assessments (list of dict): list of serialized assessment models.
Kwargs:
enforce_peer_then_self (bool): If True, enforce the requirement that there
must be exactly two assessments: first, a peer-assessment, then a self-assessment.
current_assessments (list of dict): list of the current serialized
assessment models. Used to determine if the assessment configuration
has changed since the question had been released.
is_released (boolean) : True if the question has been released.
Returns:
tuple (is_valid, msg) where
is_valid is a boolean indicating whether the assessment is semantically valid
and msg describes any validation errors found.
"""
if enforce_peer_then_self:
if len(assessments) != 2:
return (False, _("This problem must have exactly two assessments."))
if assessments[0].get('name') != 'peer-assessment':
return (False, _("The first assessment must be a peer assessment."))
if assessments[1].get('name') != 'self-assessment':
return (False, _("The second assessment must be a self assessment."))
def _self_only(assessments):
return len(assessments) == 1 and assessments[0].get('name') == 'self-assessment'
def _peer_then_self(assessments):
return (
len(assessments) == 2 and
assessments[0].get('name') == 'peer-assessment' and
assessments[1].get('name') == 'self-assessment'
)
if len(assessments) == 0:
return (False, _("This problem must include at least one assessment."))
# Right now, there are two allowed scenarios: (peer -> self) and (self)
if not (_self_only(assessments) or _peer_then_self(assessments)):
return (
False,
_("For this assignment, you can set either a peer assessment followed by a self assessment or a self assessment only.")
)
for assessment_dict in assessments:
# Supported assessment
if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']:
......@@ -89,6 +105,15 @@ def validate_assessments(assessments, enforce_peer_then_self=False):
if must_grade < must_be_graded_by:
return (False, _('The "must_grade" value must be greater than or equal to the "must_be_graded_by" value.'))
if is_released:
if len(assessments) != len(current_assessments):
return (False, _("The number of assessments cannot be changed after the problem has been released."))
names = [assessment.get('name') for assessment in assessments]
current_names = [assessment.get('name') for assessment in current_assessments]
if names != current_names:
return (False, _("The assessment type cannot be changed after the problem has been released."))
return (True, u'')
......@@ -188,7 +213,12 @@ def validator(oa_block, strict_post_release=True):
"""
def _inner(rubric_dict, submission_dict, assessments):
success, msg = validate_assessments(assessments, enforce_peer_then_self=True)
current_assessments = oa_block.rubric_assessments
success, msg = validate_assessments(
assessments,
current_assessments,
strict_post_release and oa_block.is_released()
)
if not success:
return (False, msg)
......
......@@ -8,6 +8,25 @@ class WorkflowMixin(object):
def handle_workflow_info(self, data, suffix=''):
return self.get_workflow_info()
def create_workflow(self, submission_uuid):
steps = self._create_step_list()
workflow_api.create_workflow(submission_uuid, steps)
def _create_step_list(self):
def _convert_rubric_assessment_name(ra_name):
"""'self-assessment' -> 'self', 'peer-assessment' -> 'peer'"""
short_name, suffix = ra_name.split("-")
return short_name
# rubric_assessments stores names as "self-assessment",
# "peer-assessment", while the model is expecting "self", "peer".
# Therefore, this conversion step. We should refactor later to
# standardize.
return [
_convert_rubric_assessment_name(ra["name"])
for ra in self.rubric_assessments
]
def workflow_requirements(self):
"""
Retrieve the requirements from each assessment module
......@@ -93,6 +112,7 @@ class WorkflowMixin(object):
status_counts = workflow_api.get_status_counts(
course_id=student_item['course_id'],
item_id=student_item['item_id'],
steps=self._create_step_list(),
)
num_submissions = sum(item['count'] for item in status_counts)
return status_counts, num_submissions
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment