Commit 2702e77b by Stephen Sanchez

Updating tests and some simple cleanup.

parent 30a5cf60
...@@ -73,11 +73,6 @@ class PeerAssessmentInternalError(PeerAssessmentError): ...@@ -73,11 +73,6 @@ class PeerAssessmentInternalError(PeerAssessmentError):
def submitter_is_finished(submission_uuid, requirements): def submitter_is_finished(submission_uuid, requirements):
"""Temporary bridge method."""
return is_complete(submission_uuid, requirements)
def is_complete(submission_uuid, requirements):
try: try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid) workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
if workflow.completed_at is not None: if workflow.completed_at is not None:
...@@ -104,7 +99,7 @@ def get_score(submission_uuid, requirements): ...@@ -104,7 +99,7 @@ def get_score(submission_uuid, requirements):
dict with keys "points_earned" and "points_possible". dict with keys "points_earned" and "points_possible".
""" """
# User hasn't completed their own submission yet # User hasn't completed their own submission yet
if not is_complete(submission_uuid, requirements): if not submitter_is_finished(submission_uuid, requirements):
return None return None
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid) workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
...@@ -139,6 +134,7 @@ def get_score(submission_uuid, requirements): ...@@ -139,6 +134,7 @@ def get_score(submission_uuid, requirements):
"points_possible": items[0].assessment.points_possible, "points_possible": items[0].assessment.points_possible,
} }
def assessment_is_finished(submission_uuid, requirements): def assessment_is_finished(submission_uuid, requirements):
return bool(get_score(submission_uuid, requirements)) return bool(get_score(submission_uuid, requirements))
......
...@@ -3,6 +3,7 @@ Public interface for self-assessment. ...@@ -3,6 +3,7 @@ Public interface for self-assessment.
""" """
import logging import logging
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from django.db import DatabaseError
from dogapi import dog_stats_api from dogapi import dog_stats_api
from submissions.api import get_submission_and_student, SubmissionNotFoundError from submissions.api import get_submission_and_student, SubmissionNotFoundError
...@@ -21,13 +22,30 @@ SELF_TYPE = "SE" ...@@ -21,13 +22,30 @@ SELF_TYPE = "SE"
logger = logging.getLogger("openassessment.assessment.self_api") logger = logging.getLogger("openassessment.assessment.self_api")
class SelfAssessmentRequestError(Exception): class SelfAssessmentError(Exception):
"""Generic Self Assessment Error
Raised when an error occurs while processing a request related to the
Self Assessment Workflow.
"""
pass
class SelfAssessmentRequestError(SelfAssessmentError):
""" """
There was a problem with the request for a self-assessment. There was a problem with the request for a self-assessment.
""" """
pass pass
class SelfAssessmentInternalError(SelfAssessmentError):
"""
There was an internal problem while accessing the self-assessment api.
"""
pass
def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None): def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None):
""" """
Create a self-assessment for a submission. Create a self-assessment for a submission.
...@@ -99,7 +117,6 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s ...@@ -99,7 +117,6 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
assessment_dict = full_assessment_dict(assessment) assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, submission) _log_assessment(assessment, submission)
# Return the serialized assessment # Return the serialized assessment
return assessment_dict return assessment_dict
...@@ -139,28 +156,65 @@ def get_assessment(submission_uuid): ...@@ -139,28 +156,65 @@ def get_assessment(submission_uuid):
return serialized_assessment return serialized_assessment
def submitter_is_finished(submission_uuid, requirements):
"""Temporary bridge method."""
return is_complete(submission_uuid, requirements)
def is_complete(submission_uuid, requirements): def submitter_is_finished(submission_uuid, requirements):
""" """
Check whether a self-assessment has been completed for a submission. Check whether a self-assessment has been completed for a submission.
Args: Args:
submission_uuid (str): The unique identifier of the submission. submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns: Returns:
bool True if the submitter has assessed their answer
Examples:
>>> submitter_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
""" """
return Assessment.objects.filter( return Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid score_type=SELF_TYPE, submission_uuid=submission_uuid
).exists() ).exists()
def assessment_is_finished(submission_uuid, requirements): def assessment_is_finished(submission_uuid, requirements):
return is_complete(submission_uuid, requirements) """
Check whether a self-assessment has been completed. For self-assessment,
this function is synonymous with submitter_is_finished.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the assessment is complete.
Examples:
>>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return submitter_is_finished(submission_uuid, requirements)
def get_score(submission_uuid, requirements): def get_score(submission_uuid, requirements):
"""
Get the score for this particular assessment.
Args:
submission_uuid (str): The unique identifier for the submission
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
'points_earned': 5,
'points_possible': 10
}
"""
assessment = get_assessment(submission_uuid) assessment = get_assessment(submission_uuid)
if not assessment: if not assessment:
return None return None
...@@ -170,6 +224,7 @@ def get_score(submission_uuid, requirements): ...@@ -170,6 +224,7 @@ def get_score(submission_uuid, requirements):
"points_possible": assessment["points_possible"] "points_possible": assessment["points_possible"]
} }
def get_assessment_scores_by_criteria(submission_uuid): def get_assessment_scores_by_criteria(submission_uuid):
"""Get the median score for each rubric criterion """Get the median score for each rubric criterion
......
...@@ -134,6 +134,7 @@ TUESDAY = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC) ...@@ -134,6 +134,7 @@ TUESDAY = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC)
WEDNESDAY = datetime.datetime(2007, 9, 15, 0, 0, 0, 0, pytz.UTC) WEDNESDAY = datetime.datetime(2007, 9, 15, 0, 0, 0, 0, pytz.UTC)
THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC) THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
STEPS = ['peer', 'self']
@ddt @ddt
class TestPeerApi(CacheResetTest): class TestPeerApi(CacheResetTest):
...@@ -449,7 +450,7 @@ class TestPeerApi(CacheResetTest): ...@@ -449,7 +450,7 @@ class TestPeerApi(CacheResetTest):
'must_grade': REQUIRED_GRADED, 'must_grade': REQUIRED_GRADED,
'must_be_graded_by': REQUIRED_GRADED_BY 'must_be_graded_by': REQUIRED_GRADED_BY
} }
self.assertTrue(peer_api.is_complete(tim_sub["uuid"], requirements)) self.assertTrue(peer_api.submitter_is_finished(tim_sub["uuid"], requirements))
def test_completeness(self): def test_completeness(self):
""" """
...@@ -788,7 +789,7 @@ class TestPeerApi(CacheResetTest): ...@@ -788,7 +789,7 @@ class TestPeerApi(CacheResetTest):
'must_grade': REQUIRED_GRADED, 'must_grade': REQUIRED_GRADED,
'must_be_graded_by': REQUIRED_GRADED_BY 'must_be_graded_by': REQUIRED_GRADED_BY
} }
self.assertTrue(peer_api.is_complete(buffy_sub["uuid"], requirements)) self.assertTrue(peer_api.submitter_is_finished(buffy_sub["uuid"], requirements))
def test_find_active_assessments(self): def test_find_active_assessments(self):
buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer") buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
...@@ -1137,5 +1138,5 @@ class TestPeerApi(CacheResetTest): ...@@ -1137,5 +1138,5 @@ class TestPeerApi(CacheResetTest):
new_student_item["student_id"] = student new_student_item["student_id"] = student
submission = sub_api.create_submission(new_student_item, answer, date) submission = sub_api.create_submission(new_student_item, answer, date)
peer_api.create_peer_workflow(submission["uuid"]) peer_api.create_peer_workflow(submission["uuid"])
workflow_api.create_workflow(submission["uuid"]) workflow_api.create_workflow(submission["uuid"], STEPS)
return submission, new_student_item return submission, new_student_item
...@@ -9,7 +9,7 @@ import pytz ...@@ -9,7 +9,7 @@ import pytz
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from submissions.api import create_submission from submissions.api import create_submission
from openassessment.assessment.self_api import ( from openassessment.assessment.self_api import (
create_assessment, is_complete, SelfAssessmentRequestError, get_assessment create_assessment, submitter_is_finished, SelfAssessmentRequestError, get_assessment
) )
...@@ -60,7 +60,7 @@ class TestSelfApi(CacheResetTest): ...@@ -60,7 +60,7 @@ class TestSelfApi(CacheResetTest):
# Now there should be a submission, but no self-assessment # Now there should be a submission, but no self-assessment
assessment = get_assessment(submission["uuid"]) assessment = get_assessment(submission["uuid"])
self.assertIs(assessment, None) self.assertIs(assessment, None)
self.assertFalse(is_complete(submission['uuid'])) self.assertFalse(submitter_is_finished(submission['uuid'], {}))
# Create a self-assessment for the submission # Create a self-assessment for the submission
assessment = create_assessment( assessment = create_assessment(
...@@ -70,7 +70,7 @@ class TestSelfApi(CacheResetTest): ...@@ -70,7 +70,7 @@ class TestSelfApi(CacheResetTest):
) )
# Self-assessment should be complete # Self-assessment should be complete
self.assertTrue(is_complete(submission['uuid'])) self.assertTrue(submitter_is_finished(submission['uuid'], {}))
# Retrieve the self-assessment # Retrieve the self-assessment
retrieved = get_assessment(submission["uuid"]) retrieved = get_assessment(submission["uuid"])
...@@ -198,4 +198,4 @@ class TestSelfApi(CacheResetTest): ...@@ -198,4 +198,4 @@ class TestSelfApi(CacheResetTest):
def test_is_complete_no_submission(self): def test_is_complete_no_submission(self):
# This submission uuid does not exist # This submission uuid does not exist
self.assertFalse(is_complete('abc1234')) self.assertFalse(submitter_is_finished('abc1234', {}))
...@@ -9,6 +9,7 @@ from submissions import api as sub_api ...@@ -9,6 +9,7 @@ from submissions import api as sub_api
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from openassessment.assessment import peer_api, self_api from openassessment.assessment import peer_api, self_api
STEPS = ['peer', 'self']
class Command(BaseCommand): class Command(BaseCommand):
...@@ -131,7 +132,7 @@ class Command(BaseCommand): ...@@ -131,7 +132,7 @@ class Command(BaseCommand):
""" """
answer = {'text': " ".join(loremipsum.get_paragraphs(5))} answer = {'text': " ".join(loremipsum.get_paragraphs(5))}
submission = sub_api.create_submission(student_item, answer) submission = sub_api.create_submission(student_item, answer)
workflow_api.create_workflow(submission['uuid']) workflow_api.create_workflow(submission['uuid'], STEPS)
workflow_api.update_from_assessments( workflow_api.update_from_assessments(
submission['uuid'], {'peer': {'must_grade': 1, 'must_be_graded_by': 1}} submission['uuid'], {'peer': {'must_grade': 1, 'must_be_graded_by': 1}}
) )
......
...@@ -43,7 +43,7 @@ class UploadDataTest(TestCase): ...@@ -43,7 +43,7 @@ class UploadDataTest(TestCase):
} }
submission_text = "test submission {}".format(index) submission_text = "test submission {}".format(index)
submission = sub_api.create_submission(student_item, submission_text) submission = sub_api.create_submission(student_item, submission_text)
workflow_api.create_workflow(submission['uuid']) workflow_api.create_workflow(submission['uuid'], ['peer', 'self'])
# Create and upload the archive of CSV files # Create and upload the archive of CSV files
# This should generate the files even though # This should generate the files even though
......
...@@ -143,8 +143,7 @@ ...@@ -143,8 +143,7 @@
</li> </li>
{% endwith %} {% endwith %}
{% endfor %} {% endfor %}
{% if peer_assessments %}
{% if feedback_text %}
<li class="question question--feedback ui-toggle-visibility"> <li class="question question--feedback ui-toggle-visibility">
<h4 class="question__title ui-toggle-visibility__control"> <h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i> <i class="ico icon-caret-right"></i>
...@@ -175,7 +174,7 @@ ...@@ -175,7 +174,7 @@
{% endfor %} {% endfor %}
</ul> </ul>
</li> </li>
{% endif %} {% endif %}
</ol> </ol>
</article> </article>
......
...@@ -73,7 +73,7 @@ class CsvWriterTest(CacheResetTest): ...@@ -73,7 +73,7 @@ class CsvWriterTest(CacheResetTest):
} }
submission_text = "test submission {}".format(index) submission_text = "test submission {}".format(index)
submission = sub_api.create_submission(student_item, submission_text) submission = sub_api.create_submission(student_item, submission_text)
workflow_api.create_workflow(submission['uuid']) workflow_api.create_workflow(submission['uuid'], ['peer', 'self'])
# Generate a CSV file for the submissions # Generate a CSV file for the submissions
output_streams = self._output_streams(['submission']) output_streams = self._output_streams(['submission'])
......
...@@ -87,7 +87,7 @@ def create_workflow(submission_uuid, steps): ...@@ -87,7 +87,7 @@ def create_workflow(submission_uuid, steps):
AssessmentWorkflowRequestError: If the `submission_uuid` passed in does AssessmentWorkflowRequestError: If the `submission_uuid` passed in does
not exist or is of an invalid type. not exist or is of an invalid type.
AssessmentWorkflowInternalError: Unexpected internal error, such as the AssessmentWorkflowInternalError: Unexpected internal error, such as the
submissions app not being available or a database configuation submissions app not being available or a database configuration
problem. problem.
""" """
...@@ -100,7 +100,7 @@ def create_workflow(submission_uuid, steps): ...@@ -100,7 +100,7 @@ def create_workflow(submission_uuid, steps):
try: try:
submission_dict = sub_api.get_submission_and_student(submission_uuid) submission_dict = sub_api.get_submission_and_student(submission_uuid)
except sub_api.SubmissionNotFoundError as err: except sub_api.SubmissionNotFoundError:
err_msg = sub_err_msg("submission not found") err_msg = sub_err_msg("submission not found")
logger.error(err_msg) logger.error(err_msg)
raise AssessmentWorkflowRequestError(err_msg) raise AssessmentWorkflowRequestError(err_msg)
...@@ -109,7 +109,6 @@ def create_workflow(submission_uuid, steps): ...@@ -109,7 +109,6 @@ def create_workflow(submission_uuid, steps):
logger.error(err_msg) logger.error(err_msg)
raise AssessmentWorkflowRequestError(err_msg) raise AssessmentWorkflowRequestError(err_msg)
except sub_api.SubmissionInternalError as err: except sub_api.SubmissionInternalError as err:
err_msg = sub_err_msg(err)
logger.error(err) logger.error(err)
raise AssessmentWorkflowInternalError( raise AssessmentWorkflowInternalError(
u"retrieving submission {} failed with unknown error: {}" u"retrieving submission {} failed with unknown error: {}"
...@@ -117,22 +116,32 @@ def create_workflow(submission_uuid, steps): ...@@ -117,22 +116,32 @@ def create_workflow(submission_uuid, steps):
) )
# Raise an error if they specify a step we don't recognize... # Raise an error if they specify a step we don't recognize...
for step in steps: invalid_steps = set(steps) - set(AssessmentWorkflow.STEPS)
if step not in AssessmentWorkflow.STEPS: if invalid_steps:
raise AssessmentWorkflowRequestError( raise AssessmentWorkflowRequestError(
u"Step not recognized: {}; Must be one of: {}".format( u"The following steps were not recognized: {}; Must be one of {}".format(
step, AssessmentWorkflow.STEPS invalid_steps, AssessmentWorkflow.STEPS
)
) )
)
# We're not using a serializer to deserialize this because the only variable # We're not using a serializer to deserialize this because the only variable
# we're getting from the outside is the submission_uuid, which is already # we're getting from the outside is the submission_uuid, which is already
# validated by this point. # validated by this point.
status = AssessmentWorkflow.STATUS.peer
if steps[0] == "peer":
try:
peer_api.create_peer_workflow(submission_uuid)
except peer_api.PeerAssessmentError as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
elif steps[0] == "self":
status = AssessmentWorkflow.STATUS.self
try: try:
peer_api.create_peer_workflow(submission_uuid)
workflow = AssessmentWorkflow.objects.create( workflow = AssessmentWorkflow.objects.create(
submission_uuid=submission_uuid, submission_uuid=submission_uuid,
status=AssessmentWorkflow.STATUS.peer, status=status,
course_id=submission_dict['student_item']['course_id'], course_id=submission_dict['student_item']['course_id'],
item_id=submission_dict['student_item']['item_id'], item_id=submission_dict['student_item']['item_id'],
) )
...@@ -145,7 +154,6 @@ def create_workflow(submission_uuid, steps): ...@@ -145,7 +154,6 @@ def create_workflow(submission_uuid, steps):
workflow.steps.add(*workflow_steps) workflow.steps.add(*workflow_steps)
except ( except (
DatabaseError, DatabaseError,
peer_api.PeerAssessmentError,
sub_api.SubmissionError sub_api.SubmissionError
) as err: ) as err:
err_msg = u"Could not create assessment workflow: {}".format(err) err_msg = u"Could not create assessment workflow: {}".format(err)
...@@ -316,19 +324,20 @@ def update_from_assessments(submission_uuid, assessment_requirements): ...@@ -316,19 +324,20 @@ def update_from_assessments(submission_uuid, assessment_requirements):
return _serialized_with_details(workflow, assessment_requirements) return _serialized_with_details(workflow, assessment_requirements)
def get_status_counts(course_id, item_id): def get_status_counts(course_id, item_id, steps):
""" """
Count how many workflows have each status, for a given item in a course. Count how many workflows have each status, for a given item in a course.
Kwargs: Kwargs:
course_id (unicode): The ID of the course. course_id (unicode): The ID of the course.
item_id (unicode): The ID of the item in the course. item_id (unicode): The ID of the item in the course.
steps (list): A list of assessment steps for this problem.
Returns: Returns:
list of dictionaries with keys "status" (str) and "count" (int) list of dictionaries with keys "status" (str) and "count" (int)
Example usage: Example usage:
>>> get_status_counts("ora2/1/1", "peer-assessment-problem") >>> get_status_counts("ora2/1/1", "peer-assessment-problem", ["peer"])
[ [
{"status": "peer", "count": 5}, {"status": "peer", "count": 5},
{"status": "self", "count": 10}, {"status": "self", "count": 10},
...@@ -345,7 +354,8 @@ def get_status_counts(course_id, item_id): ...@@ -345,7 +354,8 @@ def get_status_counts(course_id, item_id):
course_id=course_id, course_id=course_id,
item_id=item_id, item_id=item_id,
).count() ).count()
} for status in AssessmentWorkflow.STATUS_VALUES }
for status in steps + AssessmentWorkflow.STATUSES
] ]
......
...@@ -52,13 +52,15 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -52,13 +52,15 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
"self", # User needs to assess themselves "self", # User needs to assess themselves
] ]
STATUS_VALUES = STEPS + [ STATUSES = [
"waiting", # User has done all necessary assessment but hasn't been "waiting", # User has done all necessary assessment but hasn't been
# graded yet -- we're waiting for assessments of their # graded yet -- we're waiting for assessments of their
# submission by others. # submission by others.
"done", # Complete "done", # Complete
] ]
STATUS_VALUES = STEPS + STATUSES
STATUS = Choices(*STATUS_VALUES) # implicit "status" field STATUS = Choices(*STATUS_VALUES) # implicit "status" field
submission_uuid = models.CharField(max_length=36, db_index=True, unique=True) submission_uuid = models.CharField(max_length=36, db_index=True, unique=True)
...@@ -85,22 +87,15 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -85,22 +87,15 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return sub_api.get_latest_score_for_submission(self.submission_uuid) return sub_api.get_latest_score_for_submission(self.submission_uuid)
def status_details(self, assessment_requirements): def status_details(self, assessment_requirements):
from openassessment.assessment import peer_api, self_api
status_dict = {} status_dict = {}
steps = self._get_steps()
if "peer" in assessment_requirements: for step in steps:
status_dict["peer"] = { status_dict[step.name] = {
"complete": peer_api.submitter_is_finished( "complete": step.api().submitter_is_finished(
self.submission_uuid, self.submission_uuid,
assessment_requirements["peer"] assessment_requirements.get(step.name, {})
) )
} }
if "self" in assessment_requirements:
status_dict["self"] = {
"complete": self_api.submitter_is_finished(self.submission_uuid, {})
}
return status_dict return status_dict
def update_from_assessments(self, assessment_requirements): def update_from_assessments(self, assessment_requirements):
...@@ -142,7 +137,11 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -142,7 +137,11 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return return
# Update our AssessmentWorkflowStep models with the latest from our APIs # Update our AssessmentWorkflowStep models with the latest from our APIs
steps = self.update_steps(assessment_requirements) steps = self._get_steps()
# Go through each step and update its status.
for step in steps:
step.update(self.submission_uuid, assessment_requirements)
# Fetch name of the first step that the submitter hasn't yet completed. # Fetch name of the first step that the submitter hasn't yet completed.
new_status = next( new_status = next(
...@@ -178,10 +177,11 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -178,10 +177,11 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
self.status = new_status self.status = new_status
self.save() self.save()
def _get_steps(self):
def update_steps(self, assessment_requirements): """
from openassessment.assessment import peer_api, self_api Simple helper function for retrieving all the steps in the given
Workflow.
"""
steps = list(self.steps.all()) steps = list(self.steps.all())
if not steps: if not steps:
# If no steps exist for this AssessmentWorkflow, assume # If no steps exist for this AssessmentWorkflow, assume
...@@ -191,39 +191,20 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -191,39 +191,20 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
AssessmentWorkflowStep(name=self.STATUS.self, order_num=1) AssessmentWorkflowStep(name=self.STATUS.self, order_num=1)
) )
steps = list(self.steps.all()) steps = list(self.steps.all())
return steps
# Mapping of step names to the APIs that power them def set_score(self, score):
steps_to_apis = { """
self.STATUS.self: self_api, Set a score for the workflow.
self.STATUS.peer: peer_api
}
# Go through each step and update its status. Note that because we take
# the philosophy that once you're done, you're done. That means
for step in steps:
step_changed = False
step_api = steps_to_apis[step.name]
step_reqs = assessment_requirements.get(step.name, {})
# Has the user completed their obligations for this step?
if (step.submitter_completed_at is None and
step_api.submitter_is_finished(self.submission_uuid, step_reqs)):
step.submitter_completed_at = now()
step_changed = True
# Has the step received a score?
if (step.assessment_completed_at is None and
step_api.assessment_is_finished(self.submission_uuid, step_reqs)):
step.assessment_completed_at = now()
step_changed = True
if step_changed:
step.save()
return steps Scores are persisted via the Submissions API, separate from the Workflow
Data. Score is associated with the same submission_uuid as this workflow
Args:
score (dict): A dict containing 'points_earned' and
'points_possible'.
def set_score(self, score): """
sub_api.set_score( sub_api.set_score(
self.submission_uuid, self.submission_uuid,
score["points_earned"], score["points_earned"],
...@@ -264,10 +245,56 @@ class AssessmentWorkflowStep(models.Model): ...@@ -264,10 +245,56 @@ class AssessmentWorkflowStep(models.Model):
assessment_completed_at = models.DateTimeField(default=None, null=True) assessment_completed_at = models.DateTimeField(default=None, null=True)
order_num = models.PositiveIntegerField() order_num = models.PositiveIntegerField()
# Store the score for this step as well?
class Meta: class Meta:
ordering = ["workflow", "order_num"] ordering = ["workflow", "order_num"]
def is_submitter_complete(self):
return self.submitter_completed_at is not None
def is_assessment_complete(self):
return self.assessment_completed_at is not None
def api(self):
"""
Returns an API associated with this workflow step. If no API is
associated with this workflow step, None is returned.
"""
from openassessment.assessment import peer_api, self_api
api = None
if self.name == AssessmentWorkflow.STATUS.self:
api = self_api
elif self.name == AssessmentWorkflow.STATUS.peer:
api = peer_api
return api
def update(self, submission_uuid, assessment_requirements):
"""
Updates the AssessmentWorkflowStep models with the requirements
specified from the Workflow API.
Intended for internal use by update_from_assessments(). See
update_from_assessments() documentation for more details.
"""
# Once a step is completed, it will not be revisited based on updated
# requirements.
step_changed = False
step_reqs = assessment_requirements.get(self.name, {})
# Has the user completed their obligations for this step?
if (not self.is_submitter_complete() and
self.api().submitter_is_finished(submission_uuid, step_reqs)):
self.submitter_completed_at = now()
step_changed = True
# Has the step received a score?
if (not self.is_assessment_complete() and
self.api().assessment_is_finished(submission_uuid, step_reqs)):
self.assessment_completed_at = now()
step_changed = True
if step_changed:
self.save()
# Just here to record thoughts for later: # Just here to record thoughts for later:
# #
......
{
"peer": {
"steps": ["peer"],
"requirements": {
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
}
}
},
"both": {
"steps": ["peer", "self"],
"requirements": {
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
},
"self": {}
}
},
"self": {
"steps": ["self"],
"requirements": {
"self": {}
}
}
}
\ No newline at end of file
from django.db import DatabaseError from django.db import DatabaseError
import ddt
from mock import patch from mock import patch
from nose.tools import raises from nose.tools import raises
from openassessment.assessment.models import PeerWorkflow
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from openassessment.assessment import peer_api
from openassessment.workflow.models import AssessmentWorkflow from openassessment.workflow.models import AssessmentWorkflow
from submissions.models import Submission from submissions.models import Submission
...@@ -18,18 +18,14 @@ ITEM_1 = { ...@@ -18,18 +18,14 @@ ITEM_1 = {
"item_type": "openassessment", "item_type": "openassessment",
} }
REQUIREMENTS = { @ddt.ddt
"peer": {
"must_grade": 5,
"must_be_graded_by": 3,
}
}
class TestAssessmentWorkflowApi(CacheResetTest): class TestAssessmentWorkflowApi(CacheResetTest):
def test_create_workflow(self): @ddt.file_data('data/assessments.json')
def test_create_workflow(self, data):
first_step = data["steps"][0] if data["steps"] else "peer"
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow = workflow_api.create_workflow(submission["uuid"]) workflow = workflow_api.create_workflow(submission["uuid"], data["steps"])
workflow_keys = set(workflow.keys()) workflow_keys = set(workflow.keys())
self.assertEqual( self.assertEqual(
...@@ -39,53 +35,73 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -39,53 +35,73 @@ class TestAssessmentWorkflowApi(CacheResetTest):
} }
) )
self.assertEqual(workflow["submission_uuid"], submission["uuid"]) self.assertEqual(workflow["submission_uuid"], submission["uuid"])
self.assertEqual(workflow["status"], "peer") self.assertEqual(workflow["status"], first_step)
workflow_from_get = workflow_api.get_workflow_for_submission( workflow_from_get = workflow_api.get_workflow_for_submission(
submission["uuid"], REQUIREMENTS submission["uuid"], data["requirements"]
) )
del workflow_from_get['status_details'] del workflow_from_get['status_details']
self.assertEqual(workflow, workflow_from_get) self.assertEqual(workflow, workflow_from_get)
def test_need_valid_submission_uuid(self): @ddt.file_data('data/assessments.json')
def test_need_valid_submission_uuid(self, data):
# submission doesn't exist # submission doesn't exist
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError): with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow = workflow_api.create_workflow("xxxxxxxxxxx") workflow = workflow_api.create_workflow("xxxxxxxxxxx", data["steps"])
# submission_uuid is the wrong type # submission_uuid is the wrong type
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError): with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow = workflow_api.create_workflow(123) workflow = workflow_api.create_workflow(123, data["steps"])
@patch.object(Submission.objects, 'get') @patch.object(Submission.objects, 'get')
@ddt.file_data('data/assessments.json')
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_submissions_errors_wrapped(self, mock_get): def test_unexpected_submissions_errors_wrapped(self, data, mock_get):
mock_get.side_effect = Exception("Kaboom!") mock_get.side_effect = Exception("Kaboom!")
workflow_api.create_workflow("zzzzzzzzzzzzzzz") workflow_api.create_workflow("zzzzzzzzzzzzzzz", data["steps"])
@patch.object(AssessmentWorkflow.objects, 'create') @patch.object(AssessmentWorkflow.objects, 'create')
@ddt.file_data('data/assessments.json')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_workflow_errors_wrapped(self, data, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"], data["steps"])
@patch.object(PeerWorkflow.objects, 'get_or_create')
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_workflow_errors_wrapped(self, mock_create): def test_unexpected_peer_workflow_errors_wrapped(self, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!") mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.create_workflow(submission["uuid"]) workflow_api.create_workflow(submission["uuid"], ["peer", "self"])
@patch.object(AssessmentWorkflow.objects, 'get')
@ddt.file_data('data/assessments.json')
@raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_exception_wrapped(self, data, mock_create):
mock_create.side_effect = Exception("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
workflow_api.update_from_assessments(submission["uuid"], data["steps"])
def test_get_assessment_workflow_expected_errors(self): @ddt.file_data('data/assessments.json')
def test_get_assessment_workflow_expected_errors(self, data):
with self.assertRaises(workflow_api.AssessmentWorkflowNotFoundError): with self.assertRaises(workflow_api.AssessmentWorkflowNotFoundError):
workflow_api.get_workflow_for_submission("0000000000000", REQUIREMENTS) workflow_api.get_workflow_for_submission("0000000000000", data["requirements"])
with self.assertRaises(workflow_api.AssessmentWorkflowRequestError): with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
workflow_api.get_workflow_for_submission(123, REQUIREMENTS) workflow_api.get_workflow_for_submission(123, data["requirements"])
@patch.object(Submission.objects, 'get') @patch.object(Submission.objects, 'get')
@ddt.file_data('data/assessments.json')
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_workflow_get_errors_wrapped(self, mock_get): def test_unexpected_workflow_get_errors_wrapped(self, data, mock_get):
mock_get.side_effect = Exception("Kaboom!") mock_get.side_effect = Exception("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "We talk TV!") submission = sub_api.create_submission(ITEM_1, "We talk TV!")
workflow = workflow_api.create_workflow(submission["uuid"]) workflow = workflow_api.create_workflow(submission["uuid"], data["steps"])
workflow_api.get_workflow_for_submission(workflow["uuid"], REQUIREMENTS) workflow_api.get_workflow_for_submission(workflow["uuid"], {})
def test_get_status_counts(self): def test_get_status_counts(self):
# Initially, the counts should all be zero # Initially, the counts should all be zero
counts = workflow_api.get_status_counts("test/1/1", "peer-problem") counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"])
self.assertEqual(counts, [ self.assertEqual(counts, [
{"status": "peer", "count": 0}, {"status": "peer", "count": 0},
{"status": "self", "count": 0}, {"status": "self", "count": 0},
...@@ -108,7 +124,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -108,7 +124,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
self._create_workflow_with_status("user 10", "test/1/1", "peer-problem", "done") self._create_workflow_with_status("user 10", "test/1/1", "peer-problem", "done")
# Now the counts should be updated # Now the counts should be updated
counts = workflow_api.get_status_counts("test/1/1", "peer-problem") counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"])
self.assertEqual(counts, [ self.assertEqual(counts, [
{"status": "peer", "count": 1}, {"status": "peer", "count": 1},
{"status": "self", "count": 2}, {"status": "self", "count": 2},
...@@ -119,13 +135,13 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -119,13 +135,13 @@ class TestAssessmentWorkflowApi(CacheResetTest):
# Create a workflow in a different course, same user and item # Create a workflow in a different course, same user and item
# Counts should be the same # Counts should be the same
self._create_workflow_with_status("user 1", "other_course", "peer-problem", "peer") self._create_workflow_with_status("user 1", "other_course", "peer-problem", "peer")
updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem") updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"])
self.assertEqual(counts, updated_counts) self.assertEqual(counts, updated_counts)
# Create a workflow in the same course, different item # Create a workflow in the same course, different item
# Counts should be the same # Counts should be the same
self._create_workflow_with_status("user 1", "test/1/1", "other problem", "peer") self._create_workflow_with_status("user 1", "test/1/1", "other problem", "peer")
updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem") updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem", ["peer", "self"])
self.assertEqual(counts, updated_counts) self.assertEqual(counts, updated_counts)
def _create_workflow_with_status(self, student_id, course_id, item_id, status, answer="answer"): def _create_workflow_with_status(self, student_id, course_id, item_id, status, answer="answer"):
...@@ -151,7 +167,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -151,7 +167,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
"item_type": "openassessment", "item_type": "openassessment",
}, answer) }, answer)
workflow = workflow_api.create_workflow(submission['uuid']) workflow = workflow_api.create_workflow(submission['uuid'], ["peer", "self"])
workflow_model = AssessmentWorkflow.objects.get(uuid=workflow['uuid']) workflow_model = AssessmentWorkflow.objects.get(uuid=workflow['uuid'])
workflow_model.status = status workflow_model.status = status
workflow_model.save() workflow_model.save()
...@@ -71,13 +71,13 @@ class GradeMixin(object): ...@@ -71,13 +71,13 @@ class GradeMixin(object):
tuple of context (dict), template_path (string) tuple of context (dict), template_path (string)
""" """
# Peer specific stuff... # Peer specific stuff...
assessment_steps = [asmnt['name'] for asmnt in self.rubric_assessments] assessment_steps = self.assessment_steps
submission_uuid = workflow['submission_uuid'] submission_uuid = workflow['submission_uuid']
if "peer-assessment" in assessment_steps: if "peer-assessment" in assessment_steps:
feedback = peer_api.get_assessment_feedback(submission_uuid) feedback = peer_api.get_assessment_feedback(submission_uuid)
peer_assessments = peer_api.get_assessments(submission_uuid) peer_assessments = peer_api.get_assessments(submission_uuid)
has_submitted_feedback = peer_api.get_assessment_feedback(submission_uuid) is not None has_submitted_feedback = feedback is not None
else: else:
feedback = None feedback = None
peer_assessments = [] peer_assessments = []
...@@ -121,10 +121,6 @@ class GradeMixin(object): ...@@ -121,10 +121,6 @@ class GradeMixin(object):
criterion["median_score"] = median_scores[criterion["name"]] criterion["median_score"] = median_scores[criterion["name"]]
criterion["total_value"] = max_scores[criterion["name"]] criterion["total_value"] = max_scores[criterion["name"]]
from pprint import pprint
pprint(self_assessment)
return ('openassessmentblock/grade/oa_grade_complete.html', context) return ('openassessmentblock/grade/oa_grade_complete.html', context)
def render_grade_incomplete(self, workflow): def render_grade_incomplete(self, workflow):
...@@ -145,9 +141,9 @@ class GradeMixin(object): ...@@ -145,9 +141,9 @@ class GradeMixin(object):
incomplete_steps = [] incomplete_steps = []
if _is_incomplete("peer"): if _is_incomplete("peer"):
incomplete_steps.append("Peer Assessment") incomplete_steps.append(_("Peer Assessment"))
if _is_incomplete("self"): if _is_incomplete("self"):
incomplete_steps.append("Self Assessment") incomplete_steps.append(_("Self Assessment"))
return ( return (
'openassessmentblock/grade/oa_grade_incomplete.html', 'openassessmentblock/grade/oa_grade_incomplete.html',
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
import datetime as dt import datetime as dt
import logging import logging
import dateutil
import pkg_resources import pkg_resources
import pytz import pytz
...@@ -239,7 +238,9 @@ class OpenAssessmentBlock( ...@@ -239,7 +238,9 @@ class OpenAssessmentBlock(
# Include release/due dates for each step in the problem # Include release/due dates for each step in the problem
context['step_dates'] = list() context['step_dates'] = list()
for step in ['submission', 'peer-assessment', 'self-assessment']:
steps = ['submission'] + self.assessment_steps
for step in steps:
# Get the dates as a student would see them # Get the dates as a student would see them
__, __, start_date, due_date = self.is_closed(step=step, course_staff=False) __, __, start_date, due_date = self.is_closed(step=step, course_staff=False)
...@@ -432,16 +433,14 @@ class OpenAssessmentBlock( ...@@ -432,16 +433,14 @@ class OpenAssessmentBlock(
start, due, date_ranges = resolve_dates( start, due, date_ranges = resolve_dates(
self.start, self.due, [submission_range] + assessment_ranges self.start, self.due, [submission_range] + assessment_ranges
) )
step_ranges = {"submission": date_ranges[0]}
for step_num, asmnt in enumerate(self.rubric_assessments, start=1): open_range = (start, due)
step_ranges[asmnt["name"]] = date_ranges[step_num] assessment_steps = self.assessment_steps
if step == 'submission':
# Based on the step, choose the date range to consider open_range = date_ranges[0]
# We hard-code this to the submission -> peer -> self workflow for now; elif step in assessment_steps:
# later, we can revisit to make this more flexible. step_index = assessment_steps.index(step)
open_range = (start, due) open_range = date_ranges[1 + step_index]
if step in ["submission", "peer-assessment", "self-assessment"]:
open_range = step_ranges[step]
# Course staff always have access to the problem # Course staff always have access to the problem
if course_staff is None: if course_staff is None:
......
...@@ -134,7 +134,59 @@ ...@@ -134,7 +134,59 @@
"score": "", "score": "",
"feedback_text": "", "feedback_text": "",
"student_submission": "", "student_submission": "",
"peer_assessments": [], "peer_assessments": [
{
"submission_uuid": "52d2158a-c568-11e3-b9b9-28cfe9182465",
"points_earned": 5,
"points_possible": 6,
"rubric": {
"criteria": [
{
"name": "Criterion 1",
"prompt": "Prompt 1",
"order_num": 0,
"feedback": "optional",
"options": [
{
"order_num": 2,
"points": 2,
"name": "Good"
}
],
"points_possible": 2
},
{
"name": "Criterion 2",
"prompt": "Prompt 2",
"order_num": 1,
"options": [
{
"order_num": 1,
"points": 1,
"name": "Fair"
}
],
"points_possible": 2
},
{
"name": "Criterion 3",
"prompt": "Prompt 3",
"order_num": 2,
"feedback": "optional",
"options": [
{
"order_num": 2,
"points": 2,
"name": "Good"
}
],
"points_possible": 2
}
]
}
}
],
"self_assessment": {}, "self_assessment": {},
"rubric_criteria": [], "rubric_criteria": [],
"has_submitted_feedback": false "has_submitted_feedback": false
...@@ -146,4 +198,4 @@ ...@@ -146,4 +198,4 @@
"context": {}, "context": {},
"output": "oa_edit.html" "output": "oa_edit.html"
} }
] ]
\ No newline at end of file
...@@ -26,9 +26,8 @@ describe("OpenAssessment.PeerView", function() { ...@@ -26,9 +26,8 @@ describe("OpenAssessment.PeerView", function() {
this.showLoadError = function(msg) {}; this.showLoadError = function(msg) {};
this.toggleActionError = function(msg, step) {}; this.toggleActionError = function(msg, step) {};
this.setUpCollapseExpand = function(sel) {}; this.setUpCollapseExpand = function(sel) {};
this.renderSelfAssessmentStep = function() {};
this.scrollToTop = function() {}; this.scrollToTop = function() {};
this.gradeView = { load: function() {} }; this.loadAssessmentModules = function() {};
}; };
// Stubs // Stubs
......
...@@ -27,6 +27,7 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -27,6 +27,7 @@ describe("OpenAssessment.ResponseView", function() {
// Stub base view // Stub base view
var StubBaseView = function() { var StubBaseView = function() {
this.loadAssessmentModules = function() {};
this.peerView = { load: function() {} }; this.peerView = { load: function() {} };
this.gradeView = { load: function() {} }; this.gradeView = { load: function() {} };
this.showLoadError = function(msg) {}; this.showLoadError = function(msg) {};
...@@ -221,14 +222,14 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -221,14 +222,14 @@ describe("OpenAssessment.ResponseView", function() {
}).promise(); }).promise();
}); });
spyOn(view, 'load'); spyOn(view, 'load');
spyOn(baseView.peerView, 'load'); spyOn(baseView, 'loadAssessmentModules');
view.response('Test response'); view.response('Test response');
view.submit(); view.submit();
// Expect the current and next step to have been reloaded // Expect the current and next step to have been reloaded
expect(view.load).toHaveBeenCalled(); expect(view.load).toHaveBeenCalled();
expect(baseView.peerView.load).toHaveBeenCalled(); expect(baseView.loadAssessmentModules).toHaveBeenCalled();
}); });
it("enables the unsaved work warning when the user changes the response text", function() { it("enables the unsaved work warning when the user changes the response text", function() {
......
...@@ -58,13 +58,11 @@ OpenAssessment.BaseView.prototype = { ...@@ -58,13 +58,11 @@ OpenAssessment.BaseView.prototype = {
}, },
/** /**
* Asynchronously load each sub-view into the DOM. Asynchronously load each sub-view into the DOM.
*/ **/
load: function() { load: function() {
this.responseView.load(); this.responseView.load();
this.peerView.load(); this.loadAssessmentModules();
this.renderSelfAssessmentStep();
this.gradeView.load();
// Set up expand/collapse for course staff debug, if available // Set up expand/collapse for course staff debug, if available
courseStaffDebug = $('.wrapper--staff-info'); courseStaffDebug = $('.wrapper--staff-info');
...@@ -74,6 +72,16 @@ OpenAssessment.BaseView.prototype = { ...@@ -74,6 +72,16 @@ OpenAssessment.BaseView.prototype = {
}, },
/** /**
Refresh the Assessment Modules. This should be called any time an action is
performed by the user.
**/
loadAssessmentModules: function() {
this.peerView.load();
this.renderSelfAssessmentStep();
this.gradeView.load();
},
/**
Render the self-assessment step. Render the self-assessment step.
**/ **/
renderSelfAssessmentStep: function() { renderSelfAssessmentStep: function() {
...@@ -158,9 +166,7 @@ OpenAssessment.BaseView.prototype = { ...@@ -158,9 +166,7 @@ OpenAssessment.BaseView.prototype = {
this.server.selfAssess(optionsSelected).done( this.server.selfAssess(optionsSelected).done(
function() { function() {
view.peerView.load(); view.loadAssessmentModules();
view.renderSelfAssessmentStep();
view.gradeView.load();
view.scrollToTop(); view.scrollToTop();
} }
).fail(function(errMsg) { ).fail(function(errMsg) {
......
...@@ -147,8 +147,7 @@ OpenAssessment.PeerView.prototype = { ...@@ -147,8 +147,7 @@ OpenAssessment.PeerView.prototype = {
var baseView = view.baseView; var baseView = view.baseView;
this.peerAssessRequest(function() { this.peerAssessRequest(function() {
view.load(); view.load();
baseView.renderSelfAssessmentStep(); baseView.loadAssessmentModules();
baseView.gradeView.load();
baseView.scrollToTop(); baseView.scrollToTop();
}); });
}, },
......
...@@ -291,8 +291,7 @@ OpenAssessment.ResponseView.prototype = { ...@@ -291,8 +291,7 @@ OpenAssessment.ResponseView.prototype = {
**/ **/
moveToNextStep: function() { moveToNextStep: function() {
this.load(); this.load();
this.baseView.peerView.load(); this.baseView.loadAssessmentModules();
this.baseView.gradeView.load();
// Disable the "unsaved changes" warning if the user // Disable the "unsaved changes" warning if the user
// tries to navigate to another page. // tries to navigate to another page.
......
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<openassessment submission_due="2015-03-11T18:20"> <openassessment submission_due="2015-03-11T18:20">
<title> <title>
Global Poverty Global Poverty
......
...@@ -10,7 +10,9 @@ ...@@ -10,7 +10,9 @@
{ {
"name": "self-assessment" "name": "self-assessment"
} }
] ],
"current_assessments": null,
"is_released": false
}, },
"peer_only": { "peer_only": {
"valid": false, "valid": false,
...@@ -20,7 +22,9 @@ ...@@ -20,7 +22,9 @@
"must_grade": 5, "must_grade": 5,
"must_be_graded_by": 3 "must_be_graded_by": 3
} }
] ],
"current_assessments": null,
"is_released": false
}, },
"self_only": { "self_only": {
"valid": true, "valid": true,
...@@ -28,7 +32,9 @@ ...@@ -28,7 +32,9 @@
{ {
"name": "self-assessment" "name": "self-assessment"
} }
] ],
"current_assessments": null,
"is_released": false
}, },
"self_before_peer": { "self_before_peer": {
"valid": false, "valid": false,
...@@ -41,7 +47,9 @@ ...@@ -41,7 +47,9 @@
"must_grade": 5, "must_grade": 5,
"must_be_graded_by": 3 "must_be_graded_by": 3
} }
] ],
"current_assessments": null,
"is_released": false
}, },
"peer_then_peer": { "peer_then_peer": {
"valid": false, "valid": false,
...@@ -56,6 +64,8 @@ ...@@ -56,6 +64,8 @@
"must_grade": 5, "must_grade": 5,
"must_be_graded_by": 3 "must_be_graded_by": 3
} }
] ],
"current_assessments": null,
"is_released": false
} }
} }
{ {
"empty_dict": { "empty_dict": {
"assessment": {} "assessments": [{}],
}, "current_assessments": null,
"must_be_graded_by_zero": { "is_released": false
"assessment": {
"name": "self-assessment",
"must_grade": 1,
"must_be_graded_by": 0
}
}, },
"unsupported_type": { "unsupported_type": {
"assessment": { "assessments": [
"name": "unsupported-assessment", {
"must_grade": 5, "name": "peer-assessment",
"must_be_graded_by": 3 "must_grade": 5,
} "must_be_graded_by": 3
},
{
"name": "self-assessment"
},
{
"name": "unsupported-assessment",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"current_assessments": null,
"is_released": false
}, },
"no_type": { "no_type": {
"assessment": { "assessments": [
"must_grade": 5, {
"must_be_graded_by": 3 "name": "self-assessment"
} },
{
"must_grade": 5,
"must_be_graded_by": 3
}
],
"current_assessments": null,
"is_released": false
}, },
"unsupported_unicode_type": { "unsupported_unicode_type": {
"assessment": { "assessments": [
"name": "𝓹𝓮𝓮𝓻-𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽", {
"must_grade": 5, "name": "self-assessment"
"must_be_graded_by": 3 },
} {
"name": "𝓹𝓮𝓮𝓻-𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"current_assessments": null,
"is_released": false
}, },
"no_must_grade": { "no_must_grade": {
"assessment": { "assessments": [
"name": "peer-assessment", {
"must_be_graded_by": 3 "name": "peer-assessment",
} "must_be_graded_by": 3
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
}, },
"no_must_be_graded_by": { "no_must_be_graded_by": {
"assessment": { "assessments": [
"name": "peer-assessment", {
"must_grade": 5 "name": "peer-assessment",
} "must_grade": 5
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
}, },
"must_grade_less_than_must_be_graded_by": { "must_grade_less_than_must_be_graded_by": {
"assessment": { "assessments": [
"name": "peer-assessment", {
"must_grade": 4, "name": "peer-assessment",
"must_be_graded_by": 5 "must_grade": 4,
} "must_be_graded_by": 5
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
}, },
"must_grade_zero": { "must_grade_zero": {
"assessment": { "assessments": [
"name": "peer-assessment", {
"must_grade": 0, "name": "peer-assessment",
"must_be_graded_by": 0 "must_grade": 0,
} "must_be_graded_by": 0
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
}, },
"must_be_graded_by_zero": { "must_be_graded_by_zero": {
"assessment": { "assessments": [
"name": "peer-assessment", {
"must_grade": 1, "name": "peer-assessment",
"must_be_graded_by": 0 "must_grade": 1,
} "must_be_graded_by": 0
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
},
"remove_peer_mid_flight": {
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment"
}
],
"current_assessments": [
{
"name": "self-assessment"
}
],
"is_released": true
},
"swap_peer_and_self_mid_flight": {
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment"
}
],
"current_assessments": [
{
"name": "self-assessment"
},
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"is_released": true
} }
} }
{ {
"peer_then_self": [ "peer_then_self": {
{ "assessments": [
"name": "peer-assessment", {
"must_grade": 5, "name": "peer-assessment",
"must_be_graded_by": 3 "must_grade": 5,
}, "must_be_graded_by": 3
{ },
"name": "self-assessment" {
} "name": "self-assessment"
], }
"self_only": [ ],
{ "current_assessments": null,
"name": "self-assessment" "is_released": false
} },
], "self_only": {
"must_be_graded_by_equals_must_grade": [ "assessments": [
{ {
"name": "peer-assessment", "name": "self-assessment"
"must_grade": 1, }
"must_be_graded_by": 1 ],
}, "current_assessments": null,
{ "is_released": false
"name": "self-assessment" },
} "must_be_graded_by_equals_must_grade": {
] "assessments": [
{
"name": "peer-assessment",
"must_grade": 1,
"must_be_graded_by": 1
},
{
"name": "self-assessment"
}
],
"current_assessments": null,
"is_released": false
}
} }
...@@ -37,6 +37,8 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -37,6 +37,8 @@ class TestGrade(XBlockHandlerTestCase):
SUBMISSION = u'ՇﻉรՇ รપ๒๓ٱรรٱѻก' SUBMISSION = u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
STEPS = ['peer', 'self']
@scenario('data/grade_scenario.xml', user_id='Greggs') @scenario('data/grade_scenario.xml', user_id='Greggs')
def test_render_grade(self, xblock): def test_render_grade(self, xblock):
# Submit, assess, and render the grade view # Submit, assess, and render the grade view
...@@ -224,7 +226,7 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -224,7 +226,7 @@ class TestGrade(XBlockHandlerTestCase):
scorer['student_id'] = scorer_name scorer['student_id'] = scorer_name
scorer_sub = sub_api.create_submission(scorer, {'text': submission_text}) scorer_sub = sub_api.create_submission(scorer, {'text': submission_text})
workflow_api.create_workflow(scorer_sub['uuid']) workflow_api.create_workflow(scorer_sub['uuid'], self.STEPS)
submission = peer_api.get_submission_to_assess(scorer_sub['uuid'], len(peers)) submission = peer_api.get_submission_to_assess(scorer_sub['uuid'], len(peers))
......
...@@ -92,19 +92,15 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -92,19 +92,15 @@ class StudioViewTest(XBlockHandlerTestCase):
# Test that we enforce that there are exactly two assessments, # Test that we enforce that there are exactly two assessments,
# peer ==> self # peer ==> self
# If and when we remove this restriction, this test can be deleted. # If and when we remove this restriction, this test can be deleted.
@data('data/invalid_assessment_combo_order.xml', 'data/invalid_assessment_combo_peer_only.xml')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_update_xml_invalid_assessment_combo(self, xblock): def test_update_xml_invalid_assessment_combo(self, xblock, invalid_workflow):
invalid_workflows = [ request = json.dumps(
'invalid_assessment_combo_order', {'xml': self.load_fixture_str(invalid_workflow)}
'invalid_assessment_combo_peer_only' )
] resp = self.request(xblock, 'update_xml', request, response_format='json')
for invalid_workflow in invalid_workflows: self.assertFalse(resp['success'])
request = json.dumps( self.assertIn("for this assignment", resp['msg'].lower())
{'xml': self.load_fixture_str('data/{}.xml'.format(invalid_workflow))}
)
resp = self.request(xblock, 'update_xml', request, response_format='json')
self.assertFalse(resp['success'])
self.assertIn("supported assessment flows are", resp['msg'].lower())
@data(('data/invalid_rubric.xml', 'rubric'), ('data/invalid_assessment.xml', 'assessment')) @data(('data/invalid_rubric.xml', 'rubric'), ('data/invalid_assessment.xml', 'assessment'))
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
......
...@@ -14,18 +14,18 @@ class AssessmentValidationTest(TestCase): ...@@ -14,18 +14,18 @@ class AssessmentValidationTest(TestCase):
@ddt.file_data('data/valid_assessments.json') @ddt.file_data('data/valid_assessments.json')
def test_valid_assessment(self, data): def test_valid_assessment(self, data):
success, msg = validate_assessments(data) success, msg = validate_assessments(data["assessments"], data["current_assessments"], data["is_released"])
self.assertTrue(success) self.assertTrue(success)
self.assertEqual(msg, u'') self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_assessments.json') @ddt.file_data('data/invalid_assessments.json')
def test_invalid_assessment(self, data): def test_invalid_assessment(self, data):
success, msg = validate_assessments([data['assessment']]) success, msg = validate_assessments(data["assessments"], data["current_assessments"], data["is_released"])
self.assertFalse(success) self.assertFalse(success)
self.assertGreater(len(msg), 0) self.assertGreater(len(msg), 0)
def test_no_assessments(self): def test_no_assessments(self):
success, msg = validate_assessments([]) success, msg = validate_assessments([], [], False)
self.assertFalse(success) self.assertFalse(success)
self.assertGreater(len(msg), 0) self.assertGreater(len(msg), 0)
...@@ -33,7 +33,7 @@ class AssessmentValidationTest(TestCase): ...@@ -33,7 +33,7 @@ class AssessmentValidationTest(TestCase):
# (peer -> self), and (self) # (peer -> self), and (self)
@ddt.file_data('data/assessment_combo.json') @ddt.file_data('data/assessment_combo.json')
def test_enforce_assessment_combo_restrictions(self, data): def test_enforce_assessment_combo_restrictions(self, data):
success, msg = validate_assessments(data['assessments']) success, msg = validate_assessments(data["assessments"], data["current_assessments"], data["is_released"])
self.assertEqual(success, data['valid'], msg=msg) self.assertEqual(success, data['valid'], msg=msg)
if not success: if not success:
......
...@@ -43,7 +43,7 @@ def _duplicates(items): ...@@ -43,7 +43,7 @@ def _duplicates(items):
return set(x for x in items if counts[x] > 1) return set(x for x in items if counts[x] > 1)
def validate_assessments(assessments): def validate_assessments(assessments, current_assessments, is_released):
""" """
Check that the assessment dict is semantically valid. Check that the assessment dict is semantically valid.
...@@ -51,8 +51,15 @@ def validate_assessments(assessments): ...@@ -51,8 +51,15 @@ def validate_assessments(assessments):
* peer, then self * peer, then self
* self only * self only
If a question has been released, the type and number of assessment steps
cannot be changed.
Args: Args:
assessments (list of dict): list of serialized assessment models. assessments (list of dict): list of serialized assessment models.
current_assessments (list of dict): list of the current serialized
assessment models. Used to determine if the assessment configuration
has changed since the question had been released.
is_released (boolean) : True if the question has been released.
Returns: Returns:
tuple (is_valid, msg) where tuple (is_valid, msg) where
...@@ -69,16 +76,16 @@ def validate_assessments(assessments): ...@@ -69,16 +76,16 @@ def validate_assessments(assessments):
assessments[1].get('name') == 'self-assessment' assessments[1].get('name') == 'self-assessment'
) )
if len(assessments) == 0:
return (False, _("This problem must include at least one assessment."))
# Right now, there are two allowed scenarios: (peer -> self) and (self) # Right now, there are two allowed scenarios: (peer -> self) and (self)
if not (_self_only(assessments) or _peer_then_self(assessments)): if not (_self_only(assessments) or _peer_then_self(assessments)):
return ( return (
False, False,
_("The only supported assessment flows are (peer, self) or (self).") _("For this assignment, you can set either a peer assessment followed by a self assessment or a self assessment only.")
) )
if len(assessments) == 0:
return (False, _("This problem must include at least one assessment."))
for assessment_dict in assessments: for assessment_dict in assessments:
# Supported assessment # Supported assessment
if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']: if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']:
...@@ -98,6 +105,15 @@ def validate_assessments(assessments): ...@@ -98,6 +105,15 @@ def validate_assessments(assessments):
if must_grade < must_be_graded_by: if must_grade < must_be_graded_by:
return (False, _('The "must_grade" value must be greater than or equal to the "must_be_graded_by" value.')) return (False, _('The "must_grade" value must be greater than or equal to the "must_be_graded_by" value.'))
if is_released:
if len(assessments) != len(current_assessments):
return (False, _("The number of assessments cannot be changed after the problem has been released."))
names = [assessment.get('name') for assessment in assessments]
current_names = [assessment.get('name') for assessment in current_assessments]
if names != current_names:
return (False, _("The assessment type cannot be changed after the problem has been released."))
return (True, u'') return (True, u'')
...@@ -197,7 +213,12 @@ def validator(oa_block, strict_post_release=True): ...@@ -197,7 +213,12 @@ def validator(oa_block, strict_post_release=True):
""" """
def _inner(rubric_dict, submission_dict, assessments): def _inner(rubric_dict, submission_dict, assessments):
success, msg = validate_assessments(assessments) current_assessments = oa_block.rubric_assessments
success, msg = validate_assessments(
assessments,
current_assessments,
strict_post_release and oa_block.is_released()
)
if not success: if not success:
return (False, msg) return (False, msg)
......
...@@ -9,6 +9,10 @@ class WorkflowMixin(object): ...@@ -9,6 +9,10 @@ class WorkflowMixin(object):
return self.get_workflow_info() return self.get_workflow_info()
def create_workflow(self, submission_uuid): def create_workflow(self, submission_uuid):
steps = self._create_step_list()
workflow_api.create_workflow(submission_uuid, steps)
def _create_step_list(self):
def _convert_rubric_assessment_name(ra_name): def _convert_rubric_assessment_name(ra_name):
"""'self-assessment' -> 'self', 'peer-assessment' -> 'peer'""" """'self-assessment' -> 'self', 'peer-assessment' -> 'peer'"""
short_name, suffix = ra_name.split("-") short_name, suffix = ra_name.split("-")
...@@ -18,11 +22,10 @@ class WorkflowMixin(object): ...@@ -18,11 +22,10 @@ class WorkflowMixin(object):
# "peer-assessment", while the model is expecting "self", "peer". # "peer-assessment", while the model is expecting "self", "peer".
# Therefore, this conversion step. We should refactor later to # Therefore, this conversion step. We should refactor later to
# standardize. # standardize.
steps = [ return [
_convert_rubric_assessment_name(ra["name"]) _convert_rubric_assessment_name(ra["name"])
for ra in self.rubric_assessments for ra in self.rubric_assessments
] ]
workflow_api.create_workflow(submission_uuid, steps)
def workflow_requirements(self): def workflow_requirements(self):
""" """
...@@ -109,6 +112,7 @@ class WorkflowMixin(object): ...@@ -109,6 +112,7 @@ class WorkflowMixin(object):
status_counts = workflow_api.get_status_counts( status_counts = workflow_api.get_status_counts(
course_id=student_item['course_id'], course_id=student_item['course_id'],
item_id=student_item['item_id'], item_id=student_item['item_id'],
steps=self._create_step_list(),
) )
num_submissions = sum(item['count'] for item in status_counts) num_submissions = sum(item['count'] for item in status_counts)
return status_counts, num_submissions return status_counts, num_submissions
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment