Commit a2f58dda by muzaffaryousaf

Remove Submission from grading pool via PeerWorkflow & AssessmentWorkflow cancellation.

TNL-900
parent 0bbc69fc
...@@ -661,11 +661,16 @@ def get_submission_to_assess(submission_uuid, graded_by): ...@@ -661,11 +661,16 @@ def get_submission_to_assess(submission_uuid, graded_by):
""" """
workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid) workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
if not workflow: if not workflow:
raise PeerAssessmentWorkflowError( raise PeerAssessmentWorkflowError(
u"A Peer Assessment Workflow does not exist for the student " u"A Peer Assessment Workflow does not exist for the student "
u"with submission UUID {}".format(submission_uuid) u"with submission UUID {}".format(submission_uuid)
) )
if workflow.is_cancelled:
return None
open_item = workflow.find_active_assessments() open_item = workflow.find_active_assessments()
peer_submission_uuid = open_item.submission_uuid if open_item else None peer_submission_uuid = open_item.submission_uuid if open_item else None
# If there is an active assessment for this user, get that submission, # If there is an active assessment for this user, get that submission,
...@@ -945,3 +950,48 @@ def _log_workflow(submission_uuid, workflow): ...@@ -945,3 +950,48 @@ def _log_workflow(submission_uuid, workflow):
tags.append(u"overgrading") tags.append(u"overgrading")
dog_stats_api.increment('openassessment.assessment.peer_workflow.count', tags=tags) dog_stats_api.increment('openassessment.assessment.peer_workflow.count', tags=tags)
def is_workflow_cancelled(submission_uuid):
"""
Check if workflow submission is cancelled.
Args:
submission_uuid (str): The UUID of the workflow's submission.
Returns:
True/False
"""
if submission_uuid is None:
return False
try:
workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
return workflow.is_cancelled if workflow else False
except PeerAssessmentWorkflowError:
return False
def on_cancel(submission_uuid):
"""Cancel the peer workflow for submission.
Sets the cancelled_at field in peer workflow.
Args:
submission_uuid (str): The submission UUID associated with this workflow.
Returns:
None
"""
try:
workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
workflow.cancelled_at = timezone.now()
workflow.save()
except (PeerAssessmentWorkflowError, DatabaseError):
error_message = (
u"An internal error occurred while cancelling the peer"
u"workflow for submission {}"
.format(submission_uuid)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
...@@ -114,11 +114,22 @@ class PeerWorkflow(models.Model): ...@@ -114,11 +114,22 @@ class PeerWorkflow(models.Model):
created_at = models.DateTimeField(default=now, db_index=True) created_at = models.DateTimeField(default=now, db_index=True)
completed_at = models.DateTimeField(null=True, db_index=True) completed_at = models.DateTimeField(null=True, db_index=True)
grading_completed_at = models.DateTimeField(null=True, db_index=True) grading_completed_at = models.DateTimeField(null=True, db_index=True)
cancelled_at = models.DateTimeField(null=True, db_index=True)
class Meta: class Meta:
ordering = ["created_at", "id"] ordering = ["created_at", "id"]
app_label = "assessment" app_label = "assessment"
@property
def is_cancelled(self):
"""
Check if workflow is cancelled.
Returns:
True/False
"""
return bool(self.cancelled_at)
@classmethod @classmethod
def get_by_submission_uuid(cls, submission_uuid): def get_by_submission_uuid(cls, submission_uuid):
""" """
...@@ -206,7 +217,8 @@ class PeerWorkflow(models.Model): ...@@ -206,7 +217,8 @@ class PeerWorkflow(models.Model):
Before retrieving a new submission for a peer assessor, check to see if that Before retrieving a new submission for a peer assessor, check to see if that
assessor already has a submission out for assessment. If an unfinished assessor already has a submission out for assessment. If an unfinished
assessment is found that has not expired, return the associated submission. assessment is found that has not expired or has not been cancelled,
return the associated submission.
TODO: If a user begins an assessment, then resubmits, this will never find TODO: If a user begins an assessment, then resubmits, this will never find
the unfinished assessment. Is this OK? the unfinished assessment. Is this OK?
...@@ -221,13 +233,12 @@ class PeerWorkflow(models.Model): ...@@ -221,13 +233,12 @@ class PeerWorkflow(models.Model):
""" """
oldest_acceptable = now() - self.TIME_LIMIT oldest_acceptable = now() - self.TIME_LIMIT
items = list(self.graded.all().order_by("-started_at", "-id")) items = list(self.graded.all().select_related('author').order_by("-started_at", "-id"))
valid_open_items = [] valid_open_items = []
completed_sub_uuids = [] completed_sub_uuids = []
# First, remove all completed items. # First, remove all completed items.
for item in items: for item in items:
if item.assessment is not None: if item.assessment is not None or item.author.is_cancelled:
completed_sub_uuids.append(item.submission_uuid) completed_sub_uuids.append(item.submission_uuid)
else: else:
valid_open_items.append(item) valid_open_items.append(item)
...@@ -266,6 +277,7 @@ class PeerWorkflow(models.Model): ...@@ -266,6 +277,7 @@ class PeerWorkflow(models.Model):
# 3) Is not something you have already scored. # 3) Is not something you have already scored.
# 4) Does not have a combination of completed assessments or open # 4) Does not have a combination of completed assessments or open
# assessments equal to or more than the requirement. # assessments equal to or more than the requirement.
# 5) Has not been cancelled.
try: try:
peer_workflows = list(PeerWorkflow.objects.raw( peer_workflows = list(PeerWorkflow.objects.raw(
"select pw.id, pw.submission_uuid " "select pw.id, pw.submission_uuid "
...@@ -274,6 +286,7 @@ class PeerWorkflow(models.Model): ...@@ -274,6 +286,7 @@ class PeerWorkflow(models.Model):
"and pw.course_id=%s " "and pw.course_id=%s "
"and pw.student_id<>%s " "and pw.student_id<>%s "
"and pw.grading_completed_at is NULL " "and pw.grading_completed_at is NULL "
"and pw.cancelled_at is NULL "
"and pw.id not in (" "and pw.id not in ("
" select pwi.author_id " " select pwi.author_id "
" from assessment_peerworkflowitem pwi " " from assessment_peerworkflowitem pwi "
...@@ -318,6 +331,7 @@ class PeerWorkflow(models.Model): ...@@ -318,6 +331,7 @@ class PeerWorkflow(models.Model):
# that: # that:
# 1) Does not belong to you # 1) Does not belong to you
# 2) Is not something you have already scored # 2) Is not something you have already scored
# 3) Has not been cancelled.
try: try:
query = list(PeerWorkflow.objects.raw( query = list(PeerWorkflow.objects.raw(
"select pw.id, pw.submission_uuid " "select pw.id, pw.submission_uuid "
...@@ -325,6 +339,7 @@ class PeerWorkflow(models.Model): ...@@ -325,6 +339,7 @@ class PeerWorkflow(models.Model):
"where course_id=%s " "where course_id=%s "
"and item_id=%s " "and item_id=%s "
"and student_id<>%s " "and student_id<>%s "
"and pw.cancelled_at is NULL "
"and pw.id not in ( " "and pw.id not in ( "
"select pwi.author_id " "select pwi.author_id "
"from assessment_peerworkflowitem pwi " "from assessment_peerworkflowitem pwi "
......
...@@ -145,6 +145,13 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC) ...@@ -145,6 +145,13 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
STEPS = ['peer', 'self'] STEPS = ['peer', 'self']
STEP_REQUIREMENTS = {
"peer": {
"must_grade": 1,
"must_be_graded_by": 1
}
}
@ddt @ddt
class TestPeerApi(CacheResetTest): class TestPeerApi(CacheResetTest):
""" """
...@@ -882,6 +889,82 @@ class TestPeerApi(CacheResetTest): ...@@ -882,6 +889,82 @@ class TestPeerApi(CacheResetTest):
item = buffy_workflow.find_active_assessments() item = buffy_workflow.find_active_assessments()
self.assertEqual(xander_answer["uuid"], item.submission_uuid) self.assertEqual(xander_answer["uuid"], item.submission_uuid)
# Cancel the Xander's submission.
xander_workflow = PeerWorkflow.get_by_submission_uuid(xander_answer['uuid'])
workflow_api.cancel_workflow(
submission_uuid=xander_answer["uuid"], comments='Cancellation reason', cancelled_by_id=_['student_id'],
assessment_requirements=STEP_REQUIREMENTS
)
# Check to see if Buffy is actively reviewing Xander's submission.
# She isn't able to get the submission to assess.
item = buffy_workflow.find_active_assessments()
self.assertIsNone(item)
def test_submission_cancelled_while_being_assessed(self):
# Test that if student pulls the submission for review and the
# submission is cancelled their assessment will not be accepted.
buffy_sub, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_sub, xander = self._create_student_and_submission("Xander", "Xander's answer")
# Check for a workflow for Buffy.
buffy_workflow = PeerWorkflow.get_by_submission_uuid(buffy_sub['uuid'])
self.assertIsNotNone(buffy_workflow)
# Buffy is going to review Xander's submission, so create a workflow
# item for Buffy.
PeerWorkflow.create_item(buffy_workflow, xander_sub["uuid"])
# Check to see if Buffy is actively reviewing Xander's submission.
submission = peer_api.get_submission_to_assess(buffy_sub['uuid'], 1)
self.assertEqual(xander_sub["uuid"], submission['uuid'])
# Cancel the Xander's submission.
workflow_api.cancel_workflow(
submission_uuid=xander_sub['uuid'],
comments="Inappropriate language",
cancelled_by_id=buffy['student_id'],
assessment_requirements=STEP_REQUIREMENTS
)
# Check to see if Buffy is actively reviewing Xander's submission.
# She isn't able to get the submission to assess.
submission = peer_api.get_submission_to_assess(buffy_sub['uuid'], 1)
self.assertIsNone(submission)
# Try to assess the cancelled submission
# This will raise PeerAssessmentWorkflowError
with self.assertRaises(peer_api.PeerAssessmentWorkflowError):
peer_api.create_assessment(
buffy_sub['uuid'],
buffy["student_id"],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
def test_cancelled_submission_peerworkflow_status(self):
# Test peerworkflow is cancelled.
buffy_sub, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
# Check for a workflow for Buffy.
buffy_workflow = PeerWorkflow.get_by_submission_uuid(buffy_sub['uuid'])
self.assertIsNotNone(buffy_workflow)
# Cancel the buffy's submission (peer workflow and assessment workflow).
workflow_api.cancel_workflow(
submission_uuid=buffy_sub['uuid'],
comments="Inappropriate language",
cancelled_by_id=buffy['student_id'],
assessment_requirements=STEP_REQUIREMENTS
)
workflow = PeerWorkflow.get_by_submission_uuid(buffy_sub["uuid"])
self.assertTrue(workflow.is_cancelled)
def test_get_workflow_by_uuid(self): def test_get_workflow_by_uuid(self):
buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer") buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
self._create_student_and_submission("Xander", "Xander's answer") self._create_student_and_submission("Xander", "Xander's answer")
...@@ -893,8 +976,8 @@ class TestPeerApi(CacheResetTest): ...@@ -893,8 +976,8 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(buffy_answer_two["uuid"], workflow.submission_uuid) self.assertEqual(buffy_answer_two["uuid"], workflow.submission_uuid)
def test_get_submission_for_review(self): def test_get_submission_for_review(self):
buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer") buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer") xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer")
self._create_student_and_submission("Willow", "Willow's answer") self._create_student_and_submission("Willow", "Willow's answer")
buffy_workflow = PeerWorkflow.get_by_submission_uuid(buffy_answer['uuid']) buffy_workflow = PeerWorkflow.get_by_submission_uuid(buffy_answer['uuid'])
...@@ -903,6 +986,19 @@ class TestPeerApi(CacheResetTest): ...@@ -903,6 +986,19 @@ class TestPeerApi(CacheResetTest):
submission_uuid = buffy_workflow.get_submission_for_review(3) submission_uuid = buffy_workflow.get_submission_for_review(3)
self.assertEqual(xander_answer["uuid"], submission_uuid) self.assertEqual(xander_answer["uuid"], submission_uuid)
# Cancel the Xander's submission.
workflow_api.cancel_workflow(
submission_uuid=xander_answer['uuid'],
comments="Inappropriate language",
cancelled_by_id=buffy['student_id'],
assessment_requirements=STEP_REQUIREMENTS
)
# Check to see if Buffy is actively reviewing Xander's submission.
# She isn't able to get the submission uuid to assess.
submission_uuid = buffy_workflow.get_submission_for_review(3)
self.assertNotEqual(xander_answer["uuid"], submission_uuid)
def test_get_submission_for_over_grading(self): def test_get_submission_for_over_grading(self):
buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer") buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer") xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer")
...@@ -1274,14 +1370,69 @@ class TestPeerApi(CacheResetTest): ...@@ -1274,14 +1370,69 @@ class TestPeerApi(CacheResetTest):
jane_assessments = peer_api.get_assessments(jane_sub['uuid'], scored_only=False) jane_assessments = peer_api.get_assessments(jane_sub['uuid'], scored_only=False)
self.assertEqual(0, len(jane_assessments)) self.assertEqual(0, len(jane_assessments))
def test_get_submission_to_assess_no_workflow(self): def test_get_submission_to_assess_no_workflow(self):
# Try to retrieve a submission to assess when the student # Try to retrieve a submission to assess when the student
# doing the assessment hasn't yet submitted. # doing the assessment hasn't yet submitted.
with self.assertRaises(peer_api.PeerAssessmentWorkflowError): with self.assertRaises(peer_api.PeerAssessmentWorkflowError):
peer_api.get_submission_to_assess("no_such_submission", "scorer ID") peer_api.get_submission_to_assess("no_such_submission", "scorer ID")
def test_get_submission_to_assess_for_cancelled_submission(self):
# Test that student will not be able to pull the cancelled
# submission for review.
buffy_sub, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_sub, xander = self._create_student_and_submission("Xander", "Xander's answer")
# Check for a workflow for Buffy.
buffy_workflow = PeerWorkflow.get_by_submission_uuid(buffy_sub['uuid'])
self.assertIsNotNone(buffy_workflow)
# Buffy is going to review Xander's submission, so create a workflow
# item for Buffy.
PeerWorkflow.create_item(buffy_workflow, xander_sub["uuid"])
# Cancel the Xander's submission.
workflow_api.cancel_workflow(
submission_uuid=xander_sub['uuid'],
comments="Inappropriate language",
cancelled_by_id=buffy['student_id'],
assessment_requirements=STEP_REQUIREMENTS
)
# Check to see if Buffy is able to review Xander's submission.
# She isn't able to get the submission to assess because xander's
# submission is cancelled.
item = peer_api.get_submission_to_assess(buffy_sub['uuid'], 1)
self.assertIsNone(item)
def test_get_submission_to_assess_for_student_with_cancelled_submission(self):
# Test that student with cancelled submission will not be able to
# review submissions by others.
buffy_sub, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_sub, xander = self._create_student_and_submission("Xander", "Xander's answer")
# Check for a workflow for Buffy.
buffy_workflow = PeerWorkflow.get_by_submission_uuid(buffy_sub['uuid'])
self.assertIsNotNone(buffy_workflow)
# Buffy is going to review Xander's submission, so create a workflow
# item for Buffy.
PeerWorkflow.create_item(buffy_workflow, xander_sub["uuid"])
# Cancel the Buffy's submission.
workflow_api.cancel_workflow(
submission_uuid=buffy_sub['uuid'],
comments="Inappropriate language",
cancelled_by_id=xander['student_id'],
assessment_requirements=STEP_REQUIREMENTS
)
self.assertTrue(peer_api.is_workflow_cancelled(submission_uuid=buffy_sub['uuid']))
# Check to see if Buffy is able to review Xander's submission.
# She isn't able to get the submission to assess because it's own
# submission is cancelled.
item = peer_api.get_submission_to_assess(buffy_sub['uuid'], 1)
self.assertIsNone(item)
def test_too_many_assessments_counted_in_score_bug(self): def test_too_many_assessments_counted_in_score_bug(self):
# This bug allowed a score to be calculated using more # This bug allowed a score to be calculated using more
# assessments, than the required number in the problem definition. # assessments, than the required number in the problem definition.
......
...@@ -24,7 +24,7 @@ class Command(BaseCommand): ...@@ -24,7 +24,7 @@ class Command(BaseCommand):
""" """
help = 'Create dummy submissions and assessments' help = 'Create dummy submissions and assessments'
args = '<COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>' args = '<COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS> <PERCENTAGE>'
# Number of peer assessments to create per submission # Number of peer assessments to create per submission
NUM_PEER_ASSESSMENTS = 3 NUM_PEER_ASSESSMENTS = 3
...@@ -34,9 +34,12 @@ class Command(BaseCommand): ...@@ -34,9 +34,12 @@ class Command(BaseCommand):
NUM_OPTIONS = 5 NUM_OPTIONS = 5
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
self.self_assessment_required = kwargs.get('self_assessment_required', False)
kwargs = {}
super(Command, self).__init__(*args, **kwargs) super(Command, self).__init__(*args, **kwargs)
self._student_items = list() self._student_items = list()
def handle(self, *args, **options): def handle(self, *args, **options):
""" """
Execute the command. Execute the command.
...@@ -45,9 +48,10 @@ class Command(BaseCommand): ...@@ -45,9 +48,10 @@ class Command(BaseCommand):
course_id (unicode): The ID of the course to create submissions for. course_id (unicode): The ID of the course to create submissions for.
item_id (unicode): The ID of the item in the course to create submissions for. item_id (unicode): The ID of the item in the course to create submissions for.
num_submissions (int): Number of submissions to create. num_submissions (int): Number of submissions to create.
percentage (int or float): Percentage for assessments to be made against submissions.
""" """
if len(args) < 3: if len(args) < 4:
raise CommandError('Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>') raise CommandError('Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS> <PERCENTAGE>')
course_id = unicode(args[0]) course_id = unicode(args[0])
item_id = unicode(args[1]) item_id = unicode(args[1])
...@@ -57,10 +61,18 @@ class Command(BaseCommand): ...@@ -57,10 +61,18 @@ class Command(BaseCommand):
except ValueError: except ValueError:
raise CommandError('Number of submissions must be an integer') raise CommandError('Number of submissions must be an integer')
try:
percentage = float(args[3])
assessments_to_create = (percentage / 100) * num_submissions
except ValueError:
raise CommandError('Percentage for completed submissions must be an integer or float')
print u"Creating {num} submissions for {item} in {course}".format( print u"Creating {num} submissions for {item} in {course}".format(
num=num_submissions, item=item_id, course=course_id num=num_submissions, item=item_id, course=course_id
) )
assessments_created = 0
for sub_num in range(num_submissions): for sub_num in range(num_submissions):
print "Creating submission {num}".format(num=sub_num) print "Creating submission {num}".format(num=sub_num)
...@@ -80,7 +92,7 @@ class Command(BaseCommand): ...@@ -80,7 +92,7 @@ class Command(BaseCommand):
# Create peer assessments # Create peer assessments
for num in range(self.NUM_PEER_ASSESSMENTS): for num in range(self.NUM_PEER_ASSESSMENTS):
print "-- Creating peer-assessment {num}".format(num=num) print "-- Creating peer-workflow {num}".format(num=num)
scorer_id = 'test_{num}'.format(num=num) scorer_id = 'test_{num}'.format(num=num)
...@@ -93,22 +105,26 @@ class Command(BaseCommand): ...@@ -93,22 +105,26 @@ class Command(BaseCommand):
# Note that we are NOT using the priority queue here, since we know # Note that we are NOT using the priority queue here, since we know
# exactly which submission we want to score. # exactly which submission we want to score.
peer_api.create_peer_workflow_item(scorer_submission_uuid, submission_uuid) peer_api.create_peer_workflow_item(scorer_submission_uuid, submission_uuid)
if assessments_created < assessments_to_create:
# Create the peer assessment print "-- Creating peer-assessment {num}".format(num=num)
peer_api.create_assessment( # Create the peer assessment
scorer_submission_uuid, peer_api.create_assessment(
scorer_id, scorer_submission_uuid,
options_selected, {}, " ".join(loremipsum.get_paragraphs(2)), scorer_id,
rubric, options_selected, {}, " ".join(loremipsum.get_paragraphs(2)),
self.NUM_PEER_ASSESSMENTS rubric,
self.NUM_PEER_ASSESSMENTS
)
assessments_created += 1
if self.self_assessment_required:
# Create a self-assessment
print "-- Creating self assessment"
self_api.create_assessment(
submission_uuid, student_item['student_id'],
options_selected, {}, " ".join(loremipsum.get_paragraphs(2)), rubric
) )
print "%s assessments being completed for %s submissions" % (assessments_created, num_submissions)
# Create a self-assessment
print "-- Creating self assessment"
self_api.create_assessment(
submission_uuid, student_item['student_id'],
options_selected, {}, " ".join(loremipsum.get_paragraphs(2)), rubric
)
@property @property
def student_items(self): def student_items(self):
......
"""
Gives the time taken by
find_active_assessments
get_submission_for_review
get_submission_for_over_grading
methods for particular set of workflows.
"""
import random
import datetime
from django.core.management.base import BaseCommand
from openassessment.assessment.models import PeerWorkflow
class Command(BaseCommand):
"""
Note the time taken by queries.
"""
help = ("Test the performance for "
"find_active_assessments, "
"get_submission_for_review & "
"get_submission_for_over_grading"
"methods.")
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **options):
"""
Execute the command.
Args:
None
"""
peer_workflow_count = PeerWorkflow.objects.filter(submission_uuid__isnull=False).count()
peer_workflow_ids = [random.randint(1, peer_workflow_count) for num in range(100)]
peer_workflows = list(PeerWorkflow.objects.filter(id__in=peer_workflow_ids))
pw_dt_before = datetime.datetime.now()
for peer_workflow in peer_workflows:
peer_workflow.find_active_assessments()
pw_dt_after = datetime.datetime.now()
time_taken = pw_dt_after - pw_dt_before
print "Time taken by (find_active_assessments) method Is: %s " % time_taken
#### get_submission_for_review ####
pw_dt_before = datetime.datetime.now()
for peer_workflow in peer_workflows:
peer_workflow.get_submission_for_review(2)
pw_dt_after = datetime.datetime.now()
time_taken = pw_dt_after - pw_dt_before
print "Time taken by (get_submission_for_review) method Is: %s " % time_taken
#### get_submission_for_over_grading ####
pw_dt_before = datetime.datetime.now()
for peer_workflow in peer_workflows:
peer_workflow.get_submission_for_over_grading()
pw_dt_after = datetime.datetime.now()
time_taken = pw_dt_after - pw_dt_before
print "Time taken by (get_submission_for_over_grading) method Is: %s " % time_taken
...@@ -14,9 +14,8 @@ class CreateSubmissionsTest(TestCase): ...@@ -14,9 +14,8 @@ class CreateSubmissionsTest(TestCase):
def test_create_submissions(self): def test_create_submissions(self):
# Create some submissions # Create some submissions
cmd = create_oa_submissions.Command() cmd = create_oa_submissions.Command(**{'self_assessment_required': True})
cmd.handle("test_course", "test_item", "5") cmd.handle("test_course", "test_item", "5", 100)
self.assertEqual(len(cmd.student_items), 5) self.assertEqual(len(cmd.student_items), 5)
for student_item in cmd.student_items: for student_item in cmd.student_items:
......
{% load i18n %}
{% spaceless %}
<li id="openassessment__grade" class="openassessment__steps__step step--grade has--error">
<header class="step__header ui-toggle-visibility__control">
<h2 class="step__title">
<span class="wrapper--copy">
<span class="step__label">{% trans "Your Grade" %}: </span>
<span class="grade__value">
<span class="grade__value__title">
{% with points_earned_string=score.points_earned|stringformat:"s" points_possible_string=score.points_possible|stringformat:"s" %}
{% blocktrans with points_earned='<span class="grade__value__earned">'|safe|add:points_earned_string|add:'</span>'|safe points_possible='<span class="grade__value__potential">'|safe|add:points_possible_string|add:'</span>'|safe %}
{{ points_earned }} out of {{ points_possible }}
{% endblocktrans %}
{% endwith %}
</span>
</span>
</span>
</h2>
</header>
<div class="ui-toggle-visibility__content">
<div class="wrapper--step__content">
<div class="step__content">
<div class="grade__value__description">
<p>{% trans "Your submission has been cancelled." %}</p>
</div>
</div>
</div>
</div>
</li>
{% endspaceless %}
{% extends "openassessmentblock/peer/oa_peer_assessment.html" %}
{% load i18n %}
{% load tz %}
{% block list_item %}
<li id="openassessment__peer-assessment" class="openassessment__steps__step step--peer-assessment">
{% endblock %}
{% block title %}
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<i class="ico icon-warning-sign"></i>
<span class="copy">
{% trans "Cancelled" %}
</span>
</span>
</span>
{% endblock %}
{% block body %}
{% endblock %}
{% extends "openassessmentblock/response/oa_response.html" %}
{% load i18n %}
{% load tz %}
{% block list_item %}
<li id="openassessment__response" class="openassessment__steps__step step--response ui-toggle-visibility has--error">
{% endblock %}
{% block title %}
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<i class="ico icon-warning-sign"></i>
<span class="copy">{% trans "Cancelled" %}</span>
</span>
</span>
{% endblock %}
{% block body %}
<div class="ui-toggle-visibility__content">
<div class="wrapper--step__content">
<div class="step__message message message--incomplete">
<h3 class="message__title">{% trans "Submission Cancelled" %}</h3>
<div class="message__content">
<p>
{% blocktrans with removed_datetime=workflow_cancellation.created_at|utc|date:"N j, Y H:i e" removed_by_username=workflow_cancellation.cancelled_by %}
Your submission has been cancelled by {{ removed_by_username }} on {{ removed_datetime }}
{% endblocktrans %}
<br>
<!-- Comments: Reason for Cancellation-->
{% blocktrans with comments=workflow_cancellation.comments %}
Comments: {{ comments }}
{% endblocktrans %}
</p>
</div>
<div class="step__content">
<article class="submission__answer__display">
<h3 class="submission__answer__display__title">{% trans "Your Response" %}</h3>
<div class="submission__answer__display__content">
{{ student_submission.answer.text|linebreaks }}
</div>
</article>
</div>
</div>
</div>
</div>
{% endblock %}
{% extends "openassessmentblock/self/oa_self_assessment.html" %}
{% load i18n %}
{% load tz %}
{% block list_item %}
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment">
{% endblock %}
{% block title %}
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<i class="ico icon-warning-sign"></i>
<span class="copy">
{% trans "Cancelled" %}
</span>
</span>
</span>
{% endblock %}
{% block body %}
{% endblock %}
...@@ -11,7 +11,18 @@ ...@@ -11,7 +11,18 @@
<div class="step__content"> <div class="step__content">
<h3 class="title">{% trans "Student Response" %}</h3> <h3 class="title">{% trans "Student Response" %}</h3>
<div class="student__answer__display__content"> <div class="student__answer__display__content">
{{ submission.answer.text|linebreaks }} {% if workflow_cancellation %}
{% blocktrans with removed_by_username=workflow_cancellation.cancelled_by removed_datetime=workflow_cancellation.created_at|utc|date:"N j, Y H:i e" %}
Student submission removed by {{ removed_by_username }} on {{ removed_datetime }}
{% endblocktrans %}
<br>
<!-- Comments: Reason for Cancellation-->
{% blocktrans with comments=workflow_cancellation.comments %}
Comments: {{ comments }}
{% endblocktrans %}
{% else %}
{{ submission.answer.text|linebreaks }}
{% endif %}
</div> </div>
{% if submission.image_url %} {% if submission.image_url %}
...@@ -24,6 +35,60 @@ ...@@ -24,6 +35,60 @@
</div> </div>
</div> </div>
{% if not workflow_cancellation %}
<div id="openassessment__staff-info__cancel__submission"
class="openassessment__staff-info__cancel__submission wrapper--ui-staff wrapper--ui--collapse">
<div class="ui-staff ui-toggle-visibility is--collapsed">
<h2 class="staff-info__title ui-staff__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="staff-info__title__copy">{% trans "Remove submission from peer grading" %}</span>
</h2>
<div class="staff-info__cancel-submission__content ui-toggle-visibility__content">
<div class="ui-staff__content__section">
<div class="wrapper--input">
<form id="openassessment_staff_cancel_submission_form"
data-submission-uuid="{{ submission.uuid }}">
<ul class="list list--actions">
<li>
<div class="has--warnings">
<div class="warning">
{% trans "Caution: Removing a student's submission is irreversible. It should only be used in cases where the student's submission was inappropriate." %}
</div>
</div>
</li>
<li>
<label for="staff-info__cancel-submission__comments"
class="label">{% trans "Comments:" %}</label>
</li>
<li>
<textarea
id="staff-info__cancel-submission__comments"
class="cancel_submission_comments"
value=""
maxlength="10000"></textarea>
</li>
</ul>
<ul class="list list--actions">
<li class="list--actions__item">
<a data-submission-uuid="{{ submission.uuid }}" aria-role="button" href=""
id="submit_cancel_submission" class="action--submit is--disabled">
<span class="copy">{% trans "Remove submission" %}</span>
</a>
<div class="cancel-submission-error"></div>
</li>
</ul>
</form>
</div>
</div>
</div>
</div>
</div>
{% endif %}
{% if peer_assessments %} {% if peer_assessments %}
<div class="staff-info__status ui-staff__content__section"> <div class="staff-info__status ui-staff__content__section">
<h3 class="title">{% trans "Peer Assessments for This Student" %}</h3> <h3 class="title">{% trans "Peer Assessments for This Student" %}</h3>
...@@ -64,7 +129,7 @@ ...@@ -64,7 +129,7 @@
{% endwith %} {% endwith %}
{% endfor %} {% endfor %}
</div> </div>
{% endif %} {% endif %}
{% if submitted_assessments %} {% if submitted_assessments %}
<div class="staff-info__status ui-staff__content__section"> <div class="staff-info__status ui-staff__content__section">
......
{% extends "openassessmentblock/student_training/student_training.html" %}
{% load i18n %}
{% load tz %}
{% block list_item %}
<li id="openassessment__student-training" class="openassessment__steps__step step--student-training">
{% endblock %}
{% block title %}
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<i class="ico icon-warning-sign"></i>
<span class="copy">{% trans "Cancelled" %}</span>
</span>
</span>
{% endblock %}
{% block body %}
{% endblock %}
...@@ -10,14 +10,14 @@ from openassessment.assessment.api import peer as peer_api ...@@ -10,14 +10,14 @@ from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import ai as ai_api from openassessment.assessment.api import ai as ai_api
from openassessment.assessment.api import student_training as training_api from openassessment.assessment.api import student_training as training_api
from openassessment.assessment.errors import ( from openassessment.assessment.errors import (
PeerAssessmentError, StudentTrainingInternalError, AIError PeerAssessmentError, StudentTrainingInternalError, AIError,
) PeerAssessmentInternalError)
from submissions import api as sub_api from submissions import api as sub_api
from .models import AssessmentWorkflow, AssessmentWorkflowStep from .models import AssessmentWorkflow, AssessmentWorkflowCancellation, AssessmentWorkflowStep
from .serializers import AssessmentWorkflowSerializer from .serializers import AssessmentWorkflowSerializer, AssessmentWorkflowCancellationSerializer
from .errors import ( from .errors import (
AssessmentWorkflowInternalError, AssessmentWorkflowRequestError, AssessmentWorkflowError, AssessmentWorkflowInternalError,
AssessmentWorkflowNotFoundError AssessmentWorkflowRequestError, AssessmentWorkflowNotFoundError
) )
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -372,3 +372,58 @@ def _serialized_with_details(workflow, assessment_requirements): ...@@ -372,3 +372,58 @@ def _serialized_with_details(workflow, assessment_requirements):
data_dict["status_details"] = workflow.status_details(assessment_requirements) data_dict["status_details"] = workflow.status_details(assessment_requirements)
return data_dict return data_dict
def cancel_workflow(submission_uuid, comments, cancelled_by_id, assessment_requirements):
"""
Add an entry in AssessmentWorkflowCancellation table for a AssessmentWorkflow.
AssessmentWorkflow which has been cancelled is no longer included in the
peer grading pool.
Args:
submission_uuid (str): The UUID of the workflow's submission.
comments (str): The reason for cancellation.
cancelled_by_id (str): The ID of the user who cancelled the peer workflow.
assessment_requirements (dict): Dictionary that currently looks like:
`{"peer": {"must_grade": <int>, "must_be_graded_by": <int>}}`
`must_grade` is the number of assessments a student must complete.
`must_be_graded_by` is the number of assessments a submission must
receive to be scored. `must_grade` should be greater than
`must_be_graded_by` to ensure that everyone will get scored.
The intention is to eventually pass in more assessment sequence
specific requirements in this dict.
"""
AssessmentWorkflow.cancel_workflow(submission_uuid, comments, cancelled_by_id, assessment_requirements)
def get_assessment_workflow_cancellation(submission_uuid):
"""
Get cancellation information for a assessment workflow.
Args:
submission_uuid (str): The UUID of assessment workflow.
"""
try:
workflow_cancellation = AssessmentWorkflowCancellation.get_latest_workflow_cancellation(submission_uuid)
return AssessmentWorkflowCancellationSerializer(workflow_cancellation).data if workflow_cancellation else None
except DatabaseError:
error_message = u"Error finding assessment workflow cancellation for submission UUID {}.".format(submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def is_workflow_cancelled(submission_uuid):
"""
Check if assessment workflow is cancelled?
Args:
submission_uuid (str): The UUID of the assessment workflow.
Returns:
True/False
"""
try:
workflow = AssessmentWorkflow.get_by_submission_uuid(submission_uuid)
return workflow.is_cancelled if workflow else False
except AssessmentWorkflowError:
return False
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AssessmentWorkflowCancellation'
db.create_table('workflow_assessmentworkflowcancellation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('workflow', self.gf('django.db.models.fields.related.ForeignKey')(related_name='cancellations', to=orm['workflow.AssessmentWorkflow'])),
('comments', self.gf('django.db.models.fields.TextField')(max_length=10000)),
('cancelled_by_id', self.gf('django.db.models.fields.CharField')(max_length=40, db_index=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
))
db.send_create_signal('workflow', ['AssessmentWorkflowCancellation'])
def backwards(self, orm):
# Deleting model 'AssessmentWorkflowCancellation'
db.delete_table('workflow_assessmentworkflowcancellation')
models = {
'workflow.assessmentworkflow': {
'Meta': {'ordering': "['-created']", 'object_name': 'AssessmentWorkflow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('model_utils.fields.StatusField', [], {'default': "'peer'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'workflow.assessmentworkflowcancellation': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'AssessmentWorkflowCancellation'},
'cancelled_by_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cancellations'", 'to': "orm['workflow.AssessmentWorkflow']"})
},
'workflow.assessmentworkflowstep': {
'Meta': {'ordering': "['workflow', 'order_num']", 'object_name': 'AssessmentWorkflowStep'},
'assessment_completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'submitter_completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'steps'", 'to': "orm['workflow.AssessmentWorkflow']"})
}
}
complete_apps = ['workflow']
\ No newline at end of file
...@@ -3,7 +3,7 @@ Serializers are created to ensure models do not have to be accessed outside the ...@@ -3,7 +3,7 @@ Serializers are created to ensure models do not have to be accessed outside the
scope of the Tim APIs. scope of the Tim APIs.
""" """
from rest_framework import serializers from rest_framework import serializers
from openassessment.workflow.models import AssessmentWorkflow from openassessment.workflow.models import AssessmentWorkflow, AssessmentWorkflowCancellation
class AssessmentWorkflowSerializer(serializers.ModelSerializer): class AssessmentWorkflowSerializer(serializers.ModelSerializer):
...@@ -35,3 +35,17 @@ class AssessmentWorkflowSerializer(serializers.ModelSerializer): ...@@ -35,3 +35,17 @@ class AssessmentWorkflowSerializer(serializers.ModelSerializer):
# 'description', # 'description',
# 'created_at' # 'created_at'
# ) # )
class AssessmentWorkflowCancellationSerializer(serializers.ModelSerializer):
"""
Serialize a `AssessmentWorkflowCancellation` model.
"""
class Meta:
model = AssessmentWorkflowCancellation
fields = (
'comments',
'cancelled_by_id',
'created_at',
)
...@@ -48,6 +48,13 @@ ITEM_1 = { ...@@ -48,6 +48,13 @@ ITEM_1 = {
"item_type": "openassessment", "item_type": "openassessment",
} }
ITEM_2 = {
"student_id": "Optimus Prime 002",
"item_id": "Matrix of Leadership(COPY)",
"course_id": "Advanced Auto Mechanics 201",
"item_type": "openassessment",
}
@ddt.ddt @ddt.ddt
class TestAssessmentWorkflowApi(CacheResetTest): class TestAssessmentWorkflowApi(CacheResetTest):
...@@ -281,6 +288,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -281,6 +288,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
{"status": "self", "count": 0}, {"status": "self", "count": 0},
{"status": "waiting", "count": 0}, {"status": "waiting", "count": 0},
{"status": "done", "count": 0}, {"status": "done", "count": 0},
{"status": "cancelled", "count": 0},
]) ])
self.assertFalse("ai" in [count['status'] for count in counts]) self.assertFalse("ai" in [count['status'] for count in counts])
...@@ -299,6 +307,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -299,6 +307,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
self._create_workflow_with_status("user 8", "test/1/1", "peer-problem", "done") self._create_workflow_with_status("user 8", "test/1/1", "peer-problem", "done")
self._create_workflow_with_status("user 9", "test/1/1", "peer-problem", "done") self._create_workflow_with_status("user 9", "test/1/1", "peer-problem", "done")
self._create_workflow_with_status("user 10", "test/1/1", "peer-problem", "done") self._create_workflow_with_status("user 10", "test/1/1", "peer-problem", "done")
self._create_workflow_with_status("user 11", "test/1/1", "peer-problem", "cancelled")
# Now the counts should be updated # Now the counts should be updated
counts = workflow_api.get_status_counts( counts = workflow_api.get_status_counts(
...@@ -312,6 +321,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -312,6 +321,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
{"status": "self", "count": 2}, {"status": "self", "count": 2},
{"status": "waiting", "count": 3}, {"status": "waiting", "count": 3},
{"status": "done", "count": 4}, {"status": "done", "count": 4},
{"status": "cancelled", "count": 1},
]) ])
self.assertFalse("ai" in [count['status'] for count in counts]) self.assertFalse("ai" in [count['status'] for count in counts])
...@@ -351,6 +361,113 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -351,6 +361,113 @@ class TestAssessmentWorkflowApi(CacheResetTest):
with self.assertRaises(AssessmentWorkflowInternalError): with self.assertRaises(AssessmentWorkflowInternalError):
workflow_api.update_from_assessments(submission['uuid'], {}) workflow_api.update_from_assessments(submission['uuid'], {})
def test_cancel_the_assessment_workflow(self):
# Create the submission and assessment workflow.
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])
requirements = {
"peer": {
"must_grade": 1,
"must_be_graded_by": 1
}
}
# Check the workflow is not cancelled.
self.assertFalse(workflow_api.is_workflow_cancelled(submission["uuid"]))
# Check the status is not cancelled.
self.assertNotEqual(workflow.get('status'), 'cancelled')
# Check the points_earned are not 0
self.assertNotEqual(workflow['score'], 0)
# Cancel the workflow for submission.
workflow_api.cancel_workflow(
submission_uuid=submission["uuid"],
comments="Inappropriate language",
cancelled_by_id=ITEM_2['student_id'],
assessment_requirements=requirements
)
# Check workflow is cancelled.
self.assertTrue(workflow_api.is_workflow_cancelled(submission["uuid"]))
# Status for workflow should be cancelled.
workflow = AssessmentWorkflow.get_by_submission_uuid(submission["uuid"])
self.assertEqual(workflow.status, 'cancelled')
# Score points_earned should be 0.
# In case of 0 earned points the score would be None.
self.assertEqual(workflow.score, None)
def test_cancel_the_assessment_workflow_does_not_exist(self):
# Create the submission and assessment workflow.
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])
requirements = {
"peer": {
"must_grade": 1,
"must_be_graded_by": 1
}
}
# Check if workflow is cancelled.
self.assertFalse(workflow_api.is_workflow_cancelled(submission["uuid"]))
self.assertNotEqual(workflow.get('status'), 'cancelled')
# Cancel the workflow raises DoesNotExist.
with self.assertRaises(workflow_api.AssessmentWorkflowError):
workflow_api.cancel_workflow(
submission_uuid="1234567098789",
comments="Inappropriate language",
cancelled_by_id=ITEM_2['student_id'],
assessment_requirements=requirements
)
# Status for workflow should not be cancelled.
workflow = AssessmentWorkflow.get_by_submission_uuid(submission["uuid"])
self.assertNotEqual(workflow.status, 'cancelled')
def test_get_the_cancelled_workflow(self):
# Create the submission and assessment workflow.
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])
requirements = {
"peer": {
"must_grade": 1,
"must_be_graded_by": 1
}
}
# Check the workflow is not cancelled.
self.assertFalse(workflow_api.is_workflow_cancelled(submission["uuid"]))
# Check the status is not cancelled.
self.assertNotEqual(workflow.get('status'), 'cancelled')
# Check the points_earned are not 0
self.assertNotEqual(workflow['score'], 0)
cancelled_workflow = workflow_api.get_assessment_workflow_cancellation(submission["uuid"])
self.assertIsNone(cancelled_workflow)
# Cancel the workflow for submission.
workflow_api.cancel_workflow(
submission_uuid=submission["uuid"],
comments="Inappropriate language",
cancelled_by_id=ITEM_2['student_id'],
assessment_requirements=requirements
)
# Check workflow is cancelled.
self.assertTrue(workflow_api.is_workflow_cancelled(submission["uuid"]))
workflow = workflow_api.get_assessment_workflow_cancellation(submission["uuid"])
self.assertIsNotNone(workflow)
def _create_workflow_with_status( def _create_workflow_with_status(
self, student_id, course_id, item_id, self, student_id, course_id, item_id,
status, answer="answer", steps=None status, answer="answer", steps=None
......
...@@ -49,7 +49,10 @@ class GradeMixin(object): ...@@ -49,7 +49,10 @@ class GradeMixin(object):
# Render the grading section based on the status of the workflow # Render the grading section based on the status of the workflow
try: try:
if status == "done": if status == "cancelled":
path = 'openassessmentblock/grade/oa_grade_cancelled.html'
context = {'score': workflow['score']}
elif status == "done":
path, context = self.render_grade_complete(workflow) path, context = self.render_grade_complete(workflow)
elif status == "waiting": elif status == "waiting":
path, context = self.render_grade_waiting(workflow) path, context = self.render_grade_waiting(workflow)
......
...@@ -211,7 +211,6 @@ class OpenAssessmentBlock( ...@@ -211,7 +211,6 @@ class OpenAssessmentBlock(
""" """
item_id = self._serialize_opaque_key(self.scope_ids.usage_id) item_id = self._serialize_opaque_key(self.scope_ids.usage_id)
# This is not the real way course_ids should work, but this is a # This is not the real way course_ids should work, but this is a
# temporary expediency for LMS integration # temporary expediency for LMS integration
if hasattr(self, "xmodule_runtime"): if hasattr(self, "xmodule_runtime"):
...@@ -746,3 +745,7 @@ class OpenAssessmentBlock( ...@@ -746,3 +745,7 @@ class OpenAssessmentBlock(
return key.to_deprecated_string() return key.to_deprecated_string()
else: else:
return unicode(key) return unicode(key)
def get_username(self, anonymous_user_id):
if hasattr(self, "xmodule_runtime"):
return self.xmodule_runtime.get_real_user(anonymous_user_id).username
...@@ -7,6 +7,7 @@ from openassessment.assessment.api import peer as peer_api ...@@ -7,6 +7,7 @@ from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.errors import ( from openassessment.assessment.errors import (
PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentWorkflowError PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentWorkflowError
) )
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.defaults import DEFAULT_RUBRIC_FEEDBACK_TEXT from openassessment.xblock.defaults import DEFAULT_RUBRIC_FEEDBACK_TEXT
from .data_conversion import create_rubric_dict from .data_conversion import create_rubric_dict
...@@ -72,7 +73,7 @@ class PeerAssessmentMixin(object): ...@@ -72,7 +73,7 @@ class PeerAssessmentMixin(object):
) )
return { return {
'success': False, 'success': False,
'msg': self._('This feedback has already been submitted.'), 'msg': self._('This feedback has already been submitted or the submission has been cancelled.'),
} }
assessment_ui_model = self.get_assessment_module('peer-assessment') assessment_ui_model = self.get_assessment_module('peer-assessment')
...@@ -187,6 +188,7 @@ class PeerAssessmentMixin(object): ...@@ -187,6 +188,7 @@ class PeerAssessmentMixin(object):
context_dict['peer_due'] = due_date context_dict['peer_due'] = due_date
workflow = self.get_workflow_info() workflow = self.get_workflow_info()
workflow_status = workflow.get('status')
peer_complete = workflow.get('status_details', {}).get('peer', {}).get('complete', False) peer_complete = workflow.get('status_details', {}).get('peer', {}).get('complete', False)
continue_grading = continue_grading and peer_complete continue_grading = continue_grading and peer_complete
...@@ -214,9 +216,14 @@ class PeerAssessmentMixin(object): ...@@ -214,9 +216,14 @@ class PeerAssessmentMixin(object):
"Submit your assessment & move to response #{response_number}" "Submit your assessment & move to response #{response_number}"
).format(response_number=(count + 2)) ).format(response_number=(count + 2))
if workflow_status == "cancelled":
path = 'openassessmentblock/peer/oa_peer_cancelled.html'
# Sets the XBlock boolean to signal to Message that it WAS able to grab a submission
self.no_peers = True
# Once a student has completed a problem, it stays complete, # Once a student has completed a problem, it stays complete,
# so this condition needs to be first. # so this condition needs to be first.
if (workflow.get('status') == 'done' or finished) and not continue_grading: elif (workflow.get('status') == 'done' or finished) and not continue_grading:
path = "openassessmentblock/peer/oa_peer_complete.html" path = "openassessmentblock/peer/oa_peer_complete.html"
# Allow continued grading even if the problem due date has passed # Allow continued grading even if the problem due date has passed
......
...@@ -66,8 +66,12 @@ class SelfAssessmentMixin(object): ...@@ -66,8 +66,12 @@ class SelfAssessmentMixin(object):
workflow = self.get_workflow_info() workflow = self.get_workflow_info()
workflow_status = workflow.get('status') workflow_status = workflow.get('status')
self_complete = workflow.get('status_details', {}).get('self', {}).get('complete', False) self_complete = workflow.get('status_details', {}).get('self', {}).get('complete', False)
if workflow_status == 'cancelled':
path = 'openassessmentblock/self/oa_self_cancelled.html'
# Sets the XBlock boolean to signal to Message that it WAS able to grab a submission
self.no_peers = True
if self_complete: elif self_complete:
path = 'openassessmentblock/self/oa_self_complete.html' path = 'openassessmentblock/self/oa_self_complete.html'
elif workflow_status == 'self' or problem_closed: elif workflow_status == 'self' or problem_closed:
assessment = self_api.get_assessment(workflow.get("submission_uuid")) assessment = self_api.get_assessment(workflow.get("submission_uuid"))
......
...@@ -7,6 +7,12 @@ from functools import wraps ...@@ -7,6 +7,12 @@ from functools import wraps
import logging import logging
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment.errors import (
PeerAssessmentInternalError,
)
from openassessment.workflow.errors import (
AssessmentWorkflowError, AssessmentWorkflowInternalError
)
from openassessment.assessment.errors.ai import AIError from openassessment.assessment.errors.ai import AIError
from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE
from openassessment.xblock.data_conversion import ( from openassessment.xblock.data_conversion import (
...@@ -17,6 +23,7 @@ from openassessment.assessment.api import peer as peer_api ...@@ -17,6 +23,7 @@ from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import ai as ai_api from openassessment.assessment.api import ai as ai_api
from openassessment.fileupload import api as file_api from openassessment.fileupload import api as file_api
from openassessment.workflow import api as workflow_api
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -35,7 +42,7 @@ def require_global_admin(error_key): ...@@ -35,7 +42,7 @@ def require_global_admin(error_key):
Decorated function Decorated function
""" """
def _decorator(func): # pylint: disable=C0111 def _decorator(func): # pylint: disable=C0111
@wraps(func) @wraps(func)
def _wrapped(xblock, *args, **kwargs): # pylint: disable=C0111 def _wrapped(xblock, *args, **kwargs): # pylint: disable=C0111
permission_errors = { permission_errors = {
...@@ -50,7 +57,7 @@ def require_global_admin(error_key): ...@@ -50,7 +57,7 @@ def require_global_admin(error_key):
return _decorator return _decorator
def require_course_staff(error_key): def require_course_staff(error_key, with_json_handler=False):
""" """
Method decorator to restrict access to an XBlock render Method decorator to restrict access to an XBlock render
method to only course staff. method to only course staff.
...@@ -71,7 +78,10 @@ def require_course_staff(error_key): ...@@ -71,7 +78,10 @@ def require_course_staff(error_key):
"STUDENT_INFO": xblock._(u"You do not have permission to access student information."), "STUDENT_INFO": xblock._(u"You do not have permission to access student information."),
} }
if not xblock.is_course_staff or xblock.in_studio_preview:
if not xblock.is_course_staff and with_json_handler:
return {"success": False, "msg": permission_errors[error_key]}
elif not xblock.is_course_staff or xblock.in_studio_preview:
return xblock.render_error(permission_errors[error_key]) return xblock.render_error(permission_errors[error_key])
else: else:
return func(xblock, *args, **kwargs) return func(xblock, *args, **kwargs)
...@@ -86,7 +96,7 @@ class StaffInfoMixin(object): ...@@ -86,7 +96,7 @@ class StaffInfoMixin(object):
@XBlock.handler @XBlock.handler
@require_course_staff("STAFF_INFO") @require_course_staff("STAFF_INFO")
def render_staff_info(self, data, suffix=''): # pylint: disable=W0613 def render_staff_info(self, data, suffix=''): # pylint: disable=W0613
""" """
Template context dictionary for course staff debug panel. Template context dictionary for course staff debug panel.
...@@ -158,7 +168,7 @@ class StaffInfoMixin(object): ...@@ -158,7 +168,7 @@ class StaffInfoMixin(object):
@XBlock.json_handler @XBlock.json_handler
@require_global_admin("SCHEDULE_TRAINING") @require_global_admin("SCHEDULE_TRAINING")
def schedule_training(self, data, suffix=''): # pylint: disable=W0613 def schedule_training(self, data, suffix=''): # pylint: disable=W0613
""" """
Schedule a new training task for example-based grading. Schedule a new training task for example-based grading.
""" """
...@@ -194,7 +204,7 @@ class StaffInfoMixin(object): ...@@ -194,7 +204,7 @@ class StaffInfoMixin(object):
@XBlock.handler @XBlock.handler
@require_course_staff("STUDENT_INFO") @require_course_staff("STUDENT_INFO")
def render_student_info(self, data, suffix=''): # pylint: disable=W0613 def render_student_info(self, data, suffix=''): # pylint: disable=W0613
""" """
Renders all relative information for a specific student's workflow. Renders all relative information for a specific student's workflow.
...@@ -204,9 +214,13 @@ class StaffInfoMixin(object): ...@@ -204,9 +214,13 @@ class StaffInfoMixin(object):
Must be course staff to render this view. Must be course staff to render this view.
""" """
student_id = data.params.get('student_id', '') try:
path, context = self.get_student_info_path_and_context(student_id) student_id = data.params.get('student_id', '')
return self.render_assessment(path, context) path, context = self.get_student_info_path_and_context(student_id)
return self.render_assessment(path, context)
except PeerAssessmentInternalError:
return self.render_error(self._(u"Error finding assessment workflow cancellation."))
def get_student_info_path_and_context(self, student_id): def get_student_info_path_and_context(self, student_id):
""" """
...@@ -262,8 +276,13 @@ class StaffInfoMixin(object): ...@@ -262,8 +276,13 @@ class StaffInfoMixin(object):
if "example-based-assessment" in assessment_steps: if "example-based-assessment" in assessment_steps:
example_based_assessment = ai_api.get_latest_assessment(submission_uuid) example_based_assessment = ai_api.get_latest_assessment(submission_uuid)
workflow_cancellation = workflow_api.get_assessment_workflow_cancellation(submission_uuid)
if workflow_cancellation:
workflow_cancellation['cancelled_by'] = self.get_username(workflow_cancellation['cancelled_by_id'])
context = { context = {
'submission': submission, 'submission': submission,
'workflow_cancellation': workflow_cancellation,
'peer_assessments': peer_assessments, 'peer_assessments': peer_assessments,
'submitted_assessments': submitted_assessments, 'submitted_assessments': submitted_assessments,
'self_assessment': self_assessment, 'self_assessment': self_assessment,
...@@ -317,3 +336,49 @@ class StaffInfoMixin(object): ...@@ -317,3 +336,49 @@ class StaffInfoMixin(object):
'success': False, 'success': False,
'msg': self._(u"An error occurred while rescheduling tasks: {}".format(ex)) 'msg': self._(u"An error occurred while rescheduling tasks: {}".format(ex))
} }
@XBlock.json_handler
@require_course_staff("STUDENT_INFO", with_json_handler=True)
def cancel_submission(self, data, suffix=''):
"""
This will cancel the assessment + peer workflow for the particular submission.
Args:
data (dict): Data contain two attributes: submission_uuid and
comments. submission_uuid is id of submission which is to be
removed from the grading pool. Comments is the reason given
by the user.
suffix (not used)
Return:
Json serializable dict with the following elements:
'success': (bool) Indicates whether or not the workflow cancelled successfully.
'msg': The response (could be error message or success message).
"""
submission_uuid = data.get('submission_uuid')
comments = data.get('comments')
if not comments:
return {"success": False, "msg": self._(u'Please enter valid reason to remove the submission.')}
student_item_dict = self.get_student_item_dict()
try:
assessment_requirements = self.workflow_requirements()
# Cancel the related workflow.
workflow_api.cancel_workflow(
submission_uuid=submission_uuid, comments=comments,
cancelled_by_id=student_item_dict['student_id'],
assessment_requirements=assessment_requirements
)
return {"success": True, 'msg': self._(u"Student submission was removed from the peer grading pool."
u" If you'd like to allow the student to submit a new response,"
u" please also reset the student state of the problem from"
u" the Instructor Dashboard.")}
except (
AssessmentWorkflowError,
AssessmentWorkflowInternalError
) as ex:
msg = ex.message
logger.exception(msg)
return {"success": False, 'msg': msg}
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -656,6 +656,19 @@ ...@@ -656,6 +656,19 @@
"output": "oa_staff_info.html" "output": "oa_staff_info.html"
}, },
{ {
"template": "openassessmentblock/staff_debug/student_info.html",
"context": {
"submission": {
"image_url": "/test-url",
"answer":{
"text": "testing response text"
}
},
"submission_cancellation": 0
},
"output": "oa_student_info.html"
},
{
"template": "openassessmentblock/peer/oa_peer_assessment.html", "template": "openassessmentblock/peer/oa_peer_assessment.html",
"context": { "context": {
"rubric_criteria": [ "rubric_criteria": [
......
...@@ -34,6 +34,14 @@ describe("OpenAssessment.StaffInfoView", function() { ...@@ -34,6 +34,14 @@ describe("OpenAssessment.StaffInfoView", function() {
}).promise(); }).promise();
}; };
var successPromise = $.Deferred(
function(defer) { defer.resolve(); }
).promise();
this.cancelSubmission = function(submissionUUID) {
return successPromise;
};
this.data = {}; this.data = {};
}; };
...@@ -153,4 +161,49 @@ describe("OpenAssessment.StaffInfoView", function() { ...@@ -153,4 +161,49 @@ describe("OpenAssessment.StaffInfoView", function() {
expect(server.rescheduleUnfinishedTasks).toHaveBeenCalled(); expect(server.rescheduleUnfinishedTasks).toHaveBeenCalled();
}); });
it("updates submission cancellation button when comments changes", function() {
// Prevent the server's response from resolving,
// so we can see what happens before view gets re-rendered.
spyOn(server, 'cancelSubmission').andCallFake(function() {
return $.Deferred(function(defer) {}).promise();
});
// Load the fixture
loadFixtures('oa_student_info.html');
var el = $("#openassessment-base").get(0);
var view = new OpenAssessment.StaffInfoView(el, server, baseView);
// comments is blank --> cancel submission button disabled
view.comment('');
view.handleCommentChanged();
expect(view.cancelSubmissionEnabled()).toBe(false);
// Response is whitespace --> cancel submission button disabled
view.comment(' \n \n ');
view.handleCommentChanged();
expect(view.cancelSubmissionEnabled()).toBe(false);
// Response is not blank --> cancel submission button enabled
view.comment('Cancellation reason.');
view.handleCommentChanged();
expect(view.cancelSubmissionEnabled()).toBe(true);
});
it("submits the cancel submission comments to the server", function() {
spyOn(server, 'cancelSubmission').andCallThrough();
// Load the fixture
loadFixtures('oa_student_info.html');
var el = $("#openassessment-base").get(0);
var view = new OpenAssessment.StaffInfoView(el, server, baseView);
view.comment('Cancellation reason.');
view.cancelSubmission('Bob');
expect(server.cancelSubmission).toHaveBeenCalledWith('Bob', 'Cancellation reason.');
});
}); });
...@@ -130,6 +130,26 @@ describe("OpenAssessment.Server", function() { ...@@ -130,6 +130,26 @@ describe("OpenAssessment.Server", function() {
}); });
}); });
it("sends a submission to XBlock for cancellation", function() {
stubAjax(true, {success:true, msg:'test message'});
var submissionUUID = 'Bob';
var comments = 'Cancellation reason.';
var success = false;
server.cancelSubmission(submissionUUID, comments).done(
function() {
success=true;
}
);
expect(success).toBe(true);
expect($.ajax).toHaveBeenCalledWith({
url: '/cancel_submission',
type: "POST",
data: JSON.stringify({submission_uuid: submissionUUID, comments: comments})
});
});
it("saves a response submission", function() { it("saves a response submission", function() {
stubAjax(true, {'success': true, 'msg': ''}); stubAjax(true, {'success': true, 'msg': ''});
var success = false; var success = false;
......
...@@ -48,9 +48,8 @@ OpenAssessment.BaseView.prototype = { ...@@ -48,9 +48,8 @@ OpenAssessment.BaseView.prototype = {
Args: Args:
parentSel (JQuery selector): CSS selector for the container element. parentSel (JQuery selector): CSS selector for the container element.
**/ **/
setUpCollapseExpand: function(parentSel) { setUpCollapseExpand: function (parentSel) {
parentSel.find('.ui-toggle-visibility__control').click( parentSel.on('click', '.ui-toggle-visibility__control', function (eventData) {
function(eventData) {
var sel = $(eventData.target).closest('.ui-toggle-visibility'); var sel = $(eventData.target).closest('.ui-toggle-visibility');
sel.toggleClass('is--collapsed'); sel.toggleClass('is--collapsed');
} }
......
...@@ -53,10 +53,23 @@ OpenAssessment.StaffInfoView.prototype = { ...@@ -53,10 +53,23 @@ OpenAssessment.StaffInfoView.prototype = {
function(html) { function(html) {
// Load the HTML and install event handlers // Load the HTML and install event handlers
$('#openassessment__student-info', view.element).replaceWith(html); $('#openassessment__student-info', view.element).replaceWith(html);
// Install key handler for new staff grade Save button.
var selCancelSub = $('#openassessment__staff-info__cancel__submission', this.element);
selCancelSub.on('click', '#submit_cancel_submission', function (eventObject) {
eventObject.preventDefault();
view.cancelSubmission($(this).data('submission-uuid'));
}
);
// Install change handler for textarea (to enable cancel submission button)
var handleChange = function(eventData) { view.handleCommentChanged(); };
selCancelSub.find('#staff-info__cancel-submission__comments').on('change keyup drop paste', handleChange);
} }
).fail(function(errMsg) { ).fail(function(errMsg) {
view.showLoadError('student_info'); view.showLoadError('student_info');
}); });
}, },
/** /**
...@@ -64,6 +77,7 @@ OpenAssessment.StaffInfoView.prototype = { ...@@ -64,6 +77,7 @@ OpenAssessment.StaffInfoView.prototype = {
**/ **/
installHandlers: function() { installHandlers: function() {
var sel = $('#openassessment__staff-info', this.element); var sel = $('#openassessment__staff-info', this.element);
var selStudentInfo = $('#openassessment__student-info', this.element);
var view = this; var view = this;
if (sel.length <= 0) { if (sel.length <= 0) {
...@@ -71,6 +85,7 @@ OpenAssessment.StaffInfoView.prototype = { ...@@ -71,6 +85,7 @@ OpenAssessment.StaffInfoView.prototype = {
} }
this.baseView.setUpCollapseExpand(sel, function() {}); this.baseView.setUpCollapseExpand(sel, function() {});
this.baseView.setUpCollapseExpand(selStudentInfo, function() {});
// Install key handler for student id field // Install key handler for student id field
sel.find('#openassessment_student_info_form').submit( sel.find('#openassessment_student_info_form').submit(
...@@ -136,5 +151,78 @@ OpenAssessment.StaffInfoView.prototype = { ...@@ -136,5 +151,78 @@ OpenAssessment.StaffInfoView.prototype = {
).fail(function(errMsg) { ).fail(function(errMsg) {
$('#reschedule_unfinished_tasks_message', this.element).text(errMsg) $('#reschedule_unfinished_tasks_message', this.element).text(errMsg)
}); });
},
/**
Upon request, cancel the submission from grading pool.
**/
cancelSubmission: function(submissionUUID) {
// Immediately disable the button to prevent multiple requests.
this.cancelSubmissionEnabled(false);
var view = this;
var sel = $('#openassessment__student-info', this.element);
var comments = sel.find('#staff-info__cancel-submission__comments').val();
this.server.cancelSubmission(submissionUUID, comments).done(
function(msg) {
$('.cancel-submission-error').html('');
$('#openassessment__staff-info__cancel__submission', view.element).html(msg);
}
).fail(function(errMsg) {
$('.cancel-submission-error').html(errMsg);
});
},
/**
Enable/disable the cancel submission button.
Check whether the cancel submission button is enabled.
Args:
enabled (bool): If specified, set the state of the button.
Returns:
bool: Whether the button is enabled.
Examples:
>> view.submitEnabled(true); // enable the button
>> view.submitEnabled(); // check whether the button is enabled
>> true
**/
cancelSubmissionEnabled: function(enabled) {
var sel = $('#submit_cancel_submission', this.element);
if (typeof enabled === 'undefined') {
return !sel.hasClass('is--disabled');
} else {
sel.toggleClass('is--disabled', !enabled);
}
},
/**
Set the comment text.
Retrieve the comment text.
Args:
text (string): reason to .
Returns:
string: The current comment text.
**/
comment: function(text) {
var sel = $('#staff-info__cancel-submission__comments', this.element);
if (typeof text === 'undefined') {
return sel.val();
} else {
sel.val(text);
}
},
/**
Enable/disable the cancel submission based on whether
the user has entered a comment.
**/
handleCommentChanged: function() {
// Enable the cancel submission button only for non-blank comments
var isBlank = ($.trim(this.comment()) !== '');
this.cancelSubmissionEnabled(isBlank);
} }
}; };
...@@ -549,6 +549,34 @@ if (typeof OpenAssessment.Server == "undefined" || !OpenAssessment.Server) { ...@@ -549,6 +549,34 @@ if (typeof OpenAssessment.Server == "undefined" || !OpenAssessment.Server) {
defer.rejectWith(this, [gettext('Could not retrieve download url.')]); defer.rejectWith(this, [gettext('Could not retrieve download url.')]);
}); });
}).promise(); }).promise();
},
/**
Cancel the submission from peer grading pool.
Args:
submissionUUID: ID for submission to be cancelled from pool.
comments: reason to cancel the submission
         **/
cancelSubmission: function (submissionUUID, comments) {
var url = this.url('cancel_submission');
var payload = JSON.stringify({
submission_uuid: submissionUUID,
comments: comments
});
return $.Deferred(function (defer) {
$.ajax({ type: "POST", url: url, data: payload }).done(
function(data) {
if (data.success) {
defer.resolveWith(this, [data.msg]);
}
else {
defer.rejectWith(this, [data.msg]);
}
}
).fail(function(data) {
defer.rejectWith(this, [gettext('The submission could not be removed from the grading pool.')]);
});
}).promise();
} }
}; };
} }
...@@ -134,3 +134,24 @@ ...@@ -134,3 +134,24 @@
} }
} }
// UI - cancel submission (action)
.openassessment__staff-info__cancel__submission {
.staff-info__cancel-submission__content {
.cancel_submission_comments {
width: 100%;
min-height: ($baseline-v*5);
text-align: left;
}
.list--actions {
.action--submit {
margin: ($baseline-v/2) 0;
}
}
}
}
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
.list--actions { .list--actions {
list-style: none !important; list-style: none !important;
@include padding-left(0); @include padding-left(0 !important);
text-indent: 0 !important; text-indent: 0 !important;
li { li {
...@@ -204,4 +204,8 @@ ...@@ -204,4 +204,8 @@
.ui-staff__title { .ui-staff__title {
color: $copy-staff-color !important; color: $copy-staff-color !important;
} }
.openassessment__staff-info__cancel__submission {
margin-bottom: ($baseline-v) !important;
}
} }
...@@ -5,6 +5,7 @@ import logging ...@@ -5,6 +5,7 @@ import logging
from webob import Response from webob import Response
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment.api import student_training from openassessment.assessment.api import student_training
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
...@@ -83,7 +84,9 @@ class StudentTrainingMixin(object): ...@@ -83,7 +84,9 @@ class StudentTrainingMixin(object):
# shows as complete. # shows as complete.
# We're assuming here that the training step always precedes the other assessment steps # We're assuming here that the training step always precedes the other assessment steps
# (peer/self) -- we may need to make this more flexible later. # (peer/self) -- we may need to make this more flexible later.
if workflow_status and workflow_status != "training": if workflow_status == 'cancelled':
template = 'openassessmentblock/student_training/student_training_cancelled.html'
elif workflow_status and workflow_status != "training":
template = 'openassessmentblock/student_training/student_training_complete.html' template = 'openassessmentblock/student_training/student_training_complete.html'
# If the problem is closed, then do not allow students to access the training step # If the problem is closed, then do not allow students to access the training step
......
...@@ -5,6 +5,7 @@ from xblock.core import XBlock ...@@ -5,6 +5,7 @@ from xblock.core import XBlock
from submissions import api from submissions import api
from openassessment.fileupload import api as file_upload_api from openassessment.fileupload import api as file_upload_api
from openassessment.fileupload.api import FileUploadError from openassessment.fileupload.api import FileUploadError
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError from openassessment.workflow.errors import AssessmentWorkflowError
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
...@@ -355,6 +356,18 @@ class SubmissionMixin(object): ...@@ -355,6 +356,18 @@ class SubmissionMixin(object):
context['save_status'] = self.save_status context['save_status'] = self.save_status
context['submit_enabled'] = self.saved_response != '' context['submit_enabled'] = self.saved_response != ''
path = "openassessmentblock/response/oa_response.html" path = "openassessmentblock/response/oa_response.html"
elif workflow["status"] == "cancelled":
workflow_cancellation = workflow_api.get_assessment_workflow_cancellation(self.submission_uuid)
if workflow_cancellation:
workflow_cancellation['cancelled_by'] = self.get_username(workflow_cancellation['cancelled_by_id'])
context['workflow_cancellation'] = workflow_cancellation
context["student_submission"] = self.get_user_submission(
workflow["submission_uuid"]
)
path = 'openassessmentblock/response/oa_response_cancelled.html'
elif workflow["status"] == "done": elif workflow["status"] == "done":
student_submission = self.get_user_submission( student_submission = self.get_user_submission(
workflow["submission_uuid"] workflow["submission_uuid"]
......
...@@ -103,6 +103,41 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -103,6 +103,41 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertGreater(len(resp['msg']), 0) self.assertGreater(len(resp['msg']), 0)
@scenario('data/peer_assessment_scenario.xml', user_id='Bob') @scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_assess_for_already_cancelled_submission(self, xblock):
# Create a submission for this problem from another user
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_submission = xblock.create_submission(another_student, self.SUBMISSION)
assessment = self.ASSESSMENT
assessment['submission_uuid'] = assessment.get('submission_uuid', submission.get('uuid', None))
# Pull the submission to assess
peer_api.get_submission_to_assess(another_submission['uuid'], 3)
requirements = {
"peer": {
"must_grade": 1,
"must_be_graded_by": 1
},
}
workflow_api.cancel_workflow(
submission_uuid=submission['uuid'],
comments="Inappropriate language",
cancelled_by_id=another_student['student_id'],
assessment_requirements=requirements
)
# Submit an assessment and expect a failure
resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
self.assertEqual(resp['success'], False)
self.assertGreater(len(resp['msg']), 0)
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_missing_keys_in_request(self, xblock): def test_missing_keys_in_request(self, xblock):
for missing in ['criterion_feedback', 'overall_feedback', 'options_selected']: for missing in ['criterion_feedback', 'overall_feedback', 'options_selected']:
assessment = copy.deepcopy(self.ASSESSMENT) assessment = copy.deepcopy(self.ASSESSMENT)
...@@ -336,6 +371,28 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase): ...@@ -336,6 +371,28 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase):
workflow_status='peer', workflow_status='peer',
) )
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_cancelled_workflow(self, xblock):
# Make a submission, so we get to peer assessment
xblock.create_submission(xblock.get_student_item_dict(), u"ฬє'гє รՇเɭɭ ๓єภ")
expected_context = {
'graded': 0,
'estimated_time': '20 minutes',
'rubric_criteria': xblock.rubric_criteria,
'must_grade': 5,
'review_num': 1,
'submit_button_text': 'submit your assessment & move to response #2',
'allow_latex': False,
}
self._assert_path_and_context(
xblock, 'openassessmentblock/peer/oa_peer_cancelled.html',
expected_context,
workflow_status='cancelled',
graded_enough=True,
)
@scenario('data/peer_closed_scenario.xml', user_id='Bob') @scenario('data/peer_closed_scenario.xml', user_id='Bob')
def test_peer_closed_no_assessments_available(self, xblock): def test_peer_closed_no_assessments_available(self, xblock):
# Make a submission, so we get to peer assessment # Make a submission, so we get to peer assessment
......
...@@ -239,11 +239,22 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase): ...@@ -239,11 +239,22 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase):
xblock.get_student_item_dict(), u"Ⱥɨn'ŧ ɨŧ fᵾnꝁɏ" xblock.get_student_item_dict(), u"Ⱥɨn'ŧ ɨŧ fᵾnꝁɏ"
) )
self._assert_path_and_context( self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {'allow_latex':False}, xblock, 'openassessmentblock/self/oa_self_complete.html', {'allow_latex': False},
workflow_status='done' workflow_status='done'
) )
@scenario('data/self_assessment_open.xml', user_id='James Brown') @scenario('data/self_assessment_open.xml', user_id='James Brown')
def test_open_cancelled_status(self, xblock):
# Simulate the workflow status being "done"
xblock.create_submission(
xblock.get_student_item_dict(), u"Ⱥɨn'ŧ ɨŧ fᵾnꝁɏ"
)
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_cancelled.html', {'allow_latex': False},
workflow_status='cancelled'
)
@scenario('data/self_assessment_open.xml', user_id='James Brown')
def test_open_self_assessing(self, xblock): def test_open_self_assessing(self, xblock):
# Simulate the workflow being in the self assessment step # Simulate the workflow being in the self assessment step
submission = xblock.create_submission( submission = xblock.create_submission(
......
...@@ -198,7 +198,7 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -198,7 +198,7 @@ class TestCourseStaff(XBlockHandlerTestCase):
@scenario('data/self_only_scenario.xml', user_id='Bob') @scenario('data/self_only_scenario.xml', user_id='Bob')
def test_staff_debug_student_info_self_only(self, xblock): def test_staff_debug_student_info_self_only(self, xblock):
# Simulate that we are course staff # Simulate that we are course staff
xblock.xmodule_runtime = self._create_mock_runtime( xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, False, "Bob" xblock.scope_ids.usage_id, True, False, "Bob"
) )
...@@ -224,10 +224,76 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -224,10 +224,76 @@ class TestCourseStaff(XBlockHandlerTestCase):
self.assertEquals([], context['peer_assessments']) self.assertEquals([], context['peer_assessments'])
self.assertEquals("openassessmentblock/staff_debug/student_info.html", path) self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
@scenario('data/basic_scenario.xml', user_id='Bob')
def test_staff_debug_student_info_with_cancelled_submission(self, xblock):
requirements = {
"peer": {
"must_grade": 1,
"must_be_graded_by": 1
},
}
# Simulate that we are course staff
xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, False, "Bob"
)
bob_item = STUDENT_ITEM.copy()
bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text': "Bob Answer"})
peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['peer'])
workflow_api.cancel_workflow(
submission_uuid=submission["uuid"],
comments="Inappropriate language",
cancelled_by_id=bob_item['student_id'],
assessment_requirements=requirements
)
path, context = xblock.get_student_info_path_and_context("Bob")
self.assertEquals("Bob Answer", context['submission']['answer']['text'])
self.assertIsNotNone(context['workflow_cancellation'])
self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
@scenario('data/basic_scenario.xml', user_id='Bob')
def test_cancelled_submission_peer_assessment_render_path(self, xblock):
# Test that peer assessment path should be oa_peer_cancelled.html for a cancelled submission.
# Simulate that we are course staff
xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, False, "Bob"
)
bob_item = STUDENT_ITEM.copy()
bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text': "Bob Answer"})
peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['peer'])
requirements = {
"peer": {
"must_grade": 1,
"must_be_graded_by": 1
},
}
workflow_api.cancel_workflow(
submission_uuid=submission['uuid'],
comments="Inappropriate language",
cancelled_by_id=bob_item['student_id'],
assessment_requirements=requirements
)
xblock.submission_uuid = submission["uuid"]
path, context = xblock.peer_path_and_context(False)
self.assertEquals("openassessmentblock/peer/oa_peer_cancelled.html", path)
@scenario('data/self_only_scenario.xml', user_id='Bob') @scenario('data/self_only_scenario.xml', user_id='Bob')
def test_staff_debug_student_info_image_submission(self, xblock): def test_staff_debug_student_info_image_submission(self, xblock):
# Simulate that we are course staff # Simulate that we are course staff
xblock.xmodule_runtime = self._create_mock_runtime( xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, False, "Bob" xblock.scope_ids.usage_id, True, False, "Bob"
) )
...@@ -491,6 +557,51 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -491,6 +557,51 @@ class TestCourseStaff(XBlockHandlerTestCase):
__, context = xblock.get_staff_path_and_context() __, context = xblock.get_staff_path_and_context()
self.assertNotIn('classifierset', context) self.assertNotIn('classifierset', context)
@scenario('data/basic_scenario.xml', user_id='Bob')
def test_cancel_submission_without_reason(self, xblock):
# If we're not course staff, we shouldn't be able to see the
# cancel submission option
xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, False, False, "Bob"
)
resp = self.request(xblock, 'cancel_submission', json.dumps({}))
self.assertIn("you do not have permission", resp.decode('utf-8').lower())
# If we ARE course staff, then we should see the cancel submission option
# with valid error message.
xblock.xmodule_runtime.user_is_staff = True
resp = self.request(xblock, 'cancel_submission', json.dumps({}), response_format='json')
self.assertIn("Please enter valid reason", resp['msg'])
self.assertEqual(False, resp['success'])
@scenario('data/basic_scenario.xml', user_id='Bob')
def test_cancel_submission_full_flow(self, xblock):
# Simulate that we are course staff
xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, False, "Bob"
)
bob_item = STUDENT_ITEM.copy()
bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text': "Bob Answer"})
peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['peer'])
incorrect_submission_uuid = 'abc'
params = {"submission_uuid": incorrect_submission_uuid, "comments": "Inappropriate language."}
# Raise flow not found exception.
resp = self.request(xblock, 'cancel_submission', json.dumps(params), response_format='json')
self.assertIn("Error finding workflow", resp['msg'])
self.assertEqual(False, resp['success'])
# Verify that we can render without error
params = {"submission_uuid": submission["uuid"], "comments": "Inappropriate language."}
resp = self.request(xblock, 'cancel_submission', json.dumps(params), response_format='json')
self.assertIn("Student submission was removed from the ", resp['msg'])
self.assertEqual(True, resp['success'])
def _create_mock_runtime(self, item_id, is_staff, is_admin, anonymous_user_id): def _create_mock_runtime(self, item_id, is_staff, is_admin, anonymous_user_id):
mock_runtime = Mock( mock_runtime = Mock(
course_id='test_course', course_id='test_course',
......
...@@ -6,7 +6,7 @@ import datetime ...@@ -6,7 +6,7 @@ import datetime
import ddt import ddt
import json import json
import pprint import pprint
from mock import patch from mock import Mock, patch
import pytz import pytz
from django.db import DatabaseError from django.db import DatabaseError
from openassessment.assessment.models import StudentTrainingWorkflow from openassessment.assessment.models import StudentTrainingWorkflow
...@@ -295,6 +295,19 @@ class StudentTrainingRenderTest(StudentTrainingTest): ...@@ -295,6 +295,19 @@ class StudentTrainingRenderTest(StudentTrainingTest):
self.assert_path_and_context(xblock, expected_template, expected_context) self.assert_path_and_context(xblock, expected_template, expected_context)
@scenario('data/student_training.xml', user_id="Plato") @scenario('data/student_training.xml', user_id="Plato")
def test_cancelled_submission(self, xblock):
submission = xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
xblock.get_workflow_info = Mock(return_value={
'status': 'cancelled',
'submission_uuid': submission['uuid']
})
expected_template = "openassessmentblock/student_training/student_training_cancelled.html"
expected_context = {
'allow_latex': False,
}
self.assert_path_and_context(xblock, expected_template, expected_context)
@scenario('data/student_training.xml', user_id="Plato")
@patch.object(StudentTrainingWorkflow, "get_workflow") @patch.object(StudentTrainingWorkflow, "get_workflow")
def test_internal_error(self, xblock, mock_workflow): def test_internal_error(self, xblock, mock_workflow):
mock_workflow.side_effect = DatabaseError("Oh no.") mock_workflow.side_effect = DatabaseError("Oh no.")
......
...@@ -7,6 +7,7 @@ import json ...@@ -7,6 +7,7 @@ import json
import datetime as dt import datetime as dt
import pytz import pytz
from mock import patch, Mock from mock import patch, Mock
from openassessment.workflow import api as workflow_api
from submissions import api as sub_api from submissions import api as sub_api
from submissions.api import SubmissionRequestError, SubmissionInternalError from submissions.api import SubmissionRequestError, SubmissionInternalError
from .base import XBlockHandlerTestCase, scenario from .base import XBlockHandlerTestCase, scenario
...@@ -199,6 +200,45 @@ class SubmissionRenderTest(XBlockHandlerTestCase): ...@@ -199,6 +200,45 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
} }
) )
@scenario('data/submission_open.xml', user_id="Bob")
def test_cancelled_submission(self, xblock):
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(
student_item,
'A man must have a code'
)
xblock.get_workflow_info = Mock(return_value={
'status': 'cancelled',
'submission_uuid': submission['uuid']
})
xblock.get_username = Mock(return_value='Bob')
workflow_api.get_assessment_workflow_cancellation = Mock(return_value={
'comments': 'Inappropriate language',
'cancelled_by_id': 'Bob',
'created_at': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
'cancelled_by': 'Bob'
})
self._assert_path_and_context(
xblock, 'openassessmentblock/response/oa_response_cancelled.html',
{
'submission_due': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
'student_submission': submission,
'allow_file_upload': False,
'has_peer': True,
'has_self': True,
'allow_latex': False,
'workflow_cancellation': {
'comments': 'Inappropriate language',
'cancelled_by_id': 'Bob',
'created_at': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
'cancelled_by': 'Bob'
}
}
)
@scenario('data/submission_closed.xml', user_id="Bob") @scenario('data/submission_closed.xml', user_id="Bob")
def test_closed_incomplete(self, xblock): def test_closed_incomplete(self, xblock):
self._assert_path_and_context( self._assert_path_and_context(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment