Commit 8c0e01b1 by Stephen Sanchez

Merge pull request #270 from edx/sanchez/TIM-267-Student-State

WIP: Removing the peer uuid from the assessment api
parents ff52a5c4 9605ed9c
......@@ -196,11 +196,10 @@ def create_assessment(
raise PeerAssessmentRequestError(msg)
scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
feedback = assessment_dict.get('feedback', u'')
open_items = list(scorer_workflow.graded.filter(
assessment__isnull=True).order_by("-started_at", "-id")[:1])
if not open_items:
peer_workflow_item = _get_latest_open_workflow_item(scorer_workflow)
if peer_workflow_item is None:
message = _(
u"There are no open assessments associated with the scorer's "
u"submission UUID {}.".format(scorer_submission_uuid)
......@@ -208,13 +207,11 @@ def create_assessment(
logger.error(message)
raise PeerAssessmentWorkflowError(message)
item = open_items[0]
feedback = assessment_dict.get('feedback', u'')
peer_submission_uuid = peer_workflow_item.author.submission_uuid
peer_assessment = {
"rubric": rubric.id,
"scorer_id": scorer_id,
"submission_uuid": item.submission_uuid,
"submission_uuid": peer_submission_uuid,
"score_type": PEER_TYPE,
"feedback": feedback,
}
......@@ -235,7 +232,7 @@ def create_assessment(
AssessmentPart.add_to_assessment(assessment, option_ids)
# Close the active assessment
_close_active_assessment(scorer_workflow, item.submission_uuid, assessment, num_required_grades)
_close_active_assessment(scorer_workflow, peer_submission_uuid, assessment, num_required_grades)
assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, scorer_workflow)
......@@ -275,7 +272,7 @@ def get_rubric_max_scores(submission_uuid):
assessments = list(
Assessment.objects.filter(
submission_uuid=submission_uuid
).order_by( "-scored_at", "-id").select_related("rubric")[:1]
).order_by("-scored_at", "-id").select_related("rubric")[:1]
)
if not assessments:
return None
......@@ -331,17 +328,16 @@ def get_assessment_median_scores(submission_uuid):
raise PeerAssessmentInternalError(error_message)
def has_finished_required_evaluating(student_item_dict, required_assessments):
def has_finished_required_evaluating(submission_uuid, required_assessments):
"""Check if a student still needs to evaluate more submissions
Per the contract of the peer assessment workflow, a student must evaluate a
number of peers before receiving feedback on their submission.
Args:
student_item (dict): The student id is required to determine if the
student has completed enough assessments, relative to the item id
and course id available in the student item. This argument is
required.
submission_uuid (str): The submission UUID is required to determine if
the associated student has completed enough assessments. This
argument is required.
required_assessments (int): The number of assessments a student has to
submit before receiving the feedback on their submission. This is a
required argument.
......@@ -353,23 +349,17 @@ def has_finished_required_evaluating(student_item_dict, required_assessments):
assessments completed.
Raises:
PeerAssessmentRequestError: Raised when the student_id is invalid, or
the required_assessments is not a positive integer.
PeerAssessmentRequestError: Raised when the submission UUID is invalid,
or the required_assessments is not a positive integer.
PeerAssessmentInternalError: Raised when there is an internal error
while evaluating this workflow rule.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> has_finished_required_evaluating(student_item_dict, 3)
>>> has_finished_required_evaluating("abc123", 3)
True, 3
"""
workflow = _get_latest_workflow(student_item_dict)
workflow = _get_workflow_by_submission_uuid(submission_uuid)
done = False
peers_graded = 0
if workflow:
......@@ -444,12 +434,12 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
def get_submission_to_assess(
student_item_dict,
submission_uuid,
graded_by,
over_grading=False):
"""Get a submission to peer evaluate.
Retrieves a submission for assessment for the given student_item. This will
Retrieves a submission for assessment for the given student. This will
not return a submission submitted by the requesting scorer. Submissions are
returned based on how many assessments are still required, and if there are
peers actively assessing a particular submission. If there are no
......@@ -458,11 +448,10 @@ def get_submission_to_assess(
grade.
Args:
student_item_dict (dict): The student item information from the student
requesting a submission for assessment. The dict contains an
item_id, course_id, and item_type, used to identify the unique
question for the review, while the student_id is used to explicitly
avoid giving the student their own submission.
submission_uuid (str): The submission UUID from the student
requesting a submission for assessment. This is used to explicitly
avoid giving the student their own submission, and determines the
associated Peer Workflow.
graded_by (int): The number of assessments a submission
requires before it has completed the peer assessment process.
over_grading (bool): Allows over grading to be performed if no submission
......@@ -485,13 +474,7 @@ def get_submission_to_assess(
to retrieve a peer submission.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> get_submission_to_assess(student_item_dict, 3)
>>> get_submission_to_assess("abc123", 3)
{
'student_item': 2,
'attempt_number': 1,
......@@ -501,30 +484,30 @@ def get_submission_to_assess(
}
"""
workflow = _get_latest_workflow(student_item_dict)
workflow = _get_workflow_by_submission_uuid(submission_uuid)
if not workflow:
raise PeerAssessmentWorkflowError(_(
u"A Peer Assessment Workflow does not exist for the specified "
u"student."))
submission_uuid = _find_active_assessments(workflow)
peer_submission_uuid = _find_active_assessments(workflow)
# If there is an active assessment for this user, get that submission,
# otherwise, get the first assessment for review, otherwise, if over grading
# is turned on, get the first submission available for over grading.
if submission_uuid is None:
submission_uuid = _get_submission_for_review(workflow, graded_by)
if submission_uuid is None and over_grading:
submission_uuid = _get_submission_for_over_grading(workflow)
if submission_uuid:
if peer_submission_uuid is None:
peer_submission_uuid = _get_submission_for_review(workflow, graded_by)
if peer_submission_uuid is None and over_grading:
peer_submission_uuid = _get_submission_for_over_grading(workflow)
if peer_submission_uuid:
try:
submission_data = sub_api.get_submission(submission_uuid)
_create_peer_workflow_item(workflow, submission_uuid)
_log_workflow(submission_uuid, student_item_dict, over_grading)
submission_data = sub_api.get_submission(peer_submission_uuid)
_create_peer_workflow_item(workflow, peer_submission_uuid)
_log_workflow(peer_submission_uuid, workflow, over_grading)
return submission_data
except sub_api.SubmissionNotFoundError:
error_message = _(
u"Could not find a submission with the uuid {} for student {} "
u"in the peer workflow."
.format(submission_uuid, student_item_dict)
.format(peer_submission_uuid, workflow.student_id)
)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
......@@ -532,9 +515,9 @@ def get_submission_to_assess(
logger.info(
u"No submission found for {} to assess ({}, {})"
.format(
student_item_dict["student_id"],
student_item_dict["course_id"],
student_item_dict["item_id"],
workflow.student_id,
workflow.course_id,
workflow.item_id,
)
)
return None
......@@ -581,14 +564,14 @@ def create_peer_workflow(submission_uuid):
raise PeerAssessmentInternalError(error_message)
def create_peer_workflow_item(scorer, submission_uuid):
def create_peer_workflow_item(scorer_submission_uuid, submission_uuid):
"""
Begin peer-assessing a particular submission.
Note that this does NOT pick the submission from the prioritized list of available submissions.
Mainly useful for testing.
Args:
scorer (str): The ID of the scoring student.
scorer_submission_uuid (str): The ID of the scoring student.
submission_uuid (str): The unique identifier of the submission being scored
Returns:
......@@ -597,12 +580,8 @@ def create_peer_workflow_item(scorer, submission_uuid):
Raises:
PeerAssessmentWorkflowError: Could not find the workflow for the student.
PeerAssessmentInternalError: Could not create the peer workflow item.
SubmissionError: An error occurred while retrieving the submission.
"""
submission = get_submission_and_student(submission_uuid)
student_item_dict = copy.copy(submission['student_item'])
student_item_dict['student_id'] = scorer
workflow = _get_latest_workflow(student_item_dict)
workflow = _get_workflow_by_submission_uuid(scorer_submission_uuid)
_create_peer_workflow_item(workflow, submission_uuid)
......@@ -691,36 +670,27 @@ def set_assessment_feedback(feedback_dict):
raise PeerAssessmentInternalError(msg)
def _get_latest_workflow(student_item_dict):
"""Given a student item, return the current workflow for this student.
Given a student item, get the most recent workflow for the student.
def _get_workflow_by_submission_uuid(submission_uuid):
"""Get the Peer Workflow associated with the given submission UUID.
TODO: API doesn't take in current submission; do we pass that in, or get
the latest workflow item? Currently using "latest".
If available, returns the Peer Workflow associated with the given
submission UUID.
Args:
student_item_dict (dict): Dictionary representation of a student item.
The most recent workflow associated with this student item is
returned.
submission_uuid (str): The string representation of the UUID belonging
to the associated Peer Workflow.
Returns:
workflow (PeerWorkflow): The most recent peer workflow associated with
this student item.
this submission UUID.
Raises:
PeerAssessmentWorkflowError: Thrown when no workflow can be found for
the associated student item. This should always exist before a
the associated submission UUID. This should always exist before a
student is allow to request submissions for peer assessment.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> workflow = _get_latest_workflow(student_item_dict)
>>> workflow = _get_workflow_by_submission_uuid("abc123")
{
'student_id': u'Bob',
'item_id': u'type_one',
......@@ -731,17 +701,14 @@ def _get_latest_workflow(student_item_dict):
"""
try:
workflows = PeerWorkflow.objects.filter(
student_id=student_item_dict["student_id"],
item_id=student_item_dict["item_id"],
course_id=student_item_dict["course_id"]
).order_by("-created_at", "-id")
return workflows[0] if workflows else None
return PeerWorkflow.objects.get(submission_uuid=submission_uuid)
except PeerWorkflow.DoesNotExist:
return None
except DatabaseError:
error_message = _(
u"Error finding workflow for student {}. Workflow must be created "
u"for student before beginning peer assessment."
.format(student_item_dict)
u"Error finding workflow for submission UUID {}. Workflow must be "
u"created for submission before beginning peer assessment."
.format(submission_uuid)
)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
......@@ -768,7 +735,7 @@ def _create_peer_workflow_item(workflow, submission_uuid):
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> workflow = _get_latest_workflow(student_item_dict)
>>> workflow = _get_workflow_by_submission_uuid(student_item_dict)
>>> _create_peer_workflow_item(workflow, "1")
"""
......@@ -814,7 +781,7 @@ def _find_active_assessments(workflow):
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> workflow = _get_latest_workflow(student_item_dict)
>>> workflow = _get_workflow_by_submission_uuid(student_item_dict)
>>> _find_active_assessments(student_item_dict)
"1"
......@@ -950,6 +917,35 @@ def _get_submission_for_over_grading(workflow):
raise PeerAssessmentInternalError(error_message)
def _get_latest_open_workflow_item(workflow):
"""Gets the latest open workflow item for a given workflow.
If there is an open workflow item for the given workflow, return this item.
Args:
workflow (PeerWorkflow): The scorer's workflow.
Returns:
A PeerWorkflowItem that is open for assessment. None if no item is
found.
Examples:
>>> workflow = _get_workflow_by_submission_uuid("abc123")
>>> _get_latest_open_workflow_item(workflow)
{
'student_id': u'Bob',
'item_id': u'type_one',
'course_id': u'course_1',
'submission_uuid': u'1',
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>)
}
"""
workflow_query = workflow.graded.filter(
assessment__isnull=True).order_by("-started_at", "-id")
items = list(workflow_query[:1])
return items[0] if items else None
def _close_active_assessment(
workflow,
submission_uuid,
......@@ -970,13 +966,7 @@ def _close_active_assessment(
requires to be considered complete.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> workflow = _get_latest_workflow(student_item_dict)
>>> workflow = _get_workflow_by_submission_uuid("abc123")
>>> assessment = Assessment.objects.all()[0]
>>> _close_active_assessment(workflow, "1", assessment, 3)
......@@ -1019,7 +1009,7 @@ def _num_peers_graded(workflow):
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> workflow = _get_latest_workflow(student_item_dict)
>>> workflow = _get_workflow_by_submission_uuid(student_item_dict)
>>> _num_peers_graded(workflow, 3)
True
"""
......@@ -1032,8 +1022,8 @@ def _log_assessment(assessment, scorer_workflow):
Args:
assessment (Assessment): The assessment model that was created.
student_item (dict): The serialized student item model of the student being scored.
scorer_item (dict): The serialized student item model of the student creating the assessment.
scorer_workflow (dict): A dictionary representation of the Workflow
belonging to the scorer of this assessment.
Returns:
None
......@@ -1092,28 +1082,29 @@ def _log_assessment(assessment, scorer_workflow):
dog_stats_api.increment('openassessment.assessment.count', tags=tags)
def _log_workflow(submission_uuid, student_item, over_grading):
def _log_workflow(submission_uuid, workflow, over_grading):
"""
Log the creation of a peer-assessment workflow.
Args:
submission_uuid (str): The UUID of the submission being assessed.
student_item (dict): The serialized student item of the student making the assessment.
workflow (PeerWorkflow): The Peer Workflow of the student making the
assessment.
over_grading (bool): Whether over-grading is enabled.
"""
logger.info(
u"Retrieved submission {} ({}, {}) to be assessed by {}"
.format(
submission_uuid,
student_item["course_id"],
student_item["item_id"],
student_item["student_id"],
workflow.course_id,
workflow.item_id,
workflow.student_id,
)
)
tags = [
u"course_id:{course_id}".format(course_id=student_item['course_id']),
u"item_id:{item_id}".format(item_id=student_item['item_id']),
u"course_id:{course_id}".format(course_id=workflow.course_id),
u"item_id:{item_id}".format(item_id=workflow.item_id),
u"type:peer"
]
......
......@@ -125,7 +125,7 @@ class TestPeerApi(CacheResetTest):
def test_create_assessment(self):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 1)
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
assessment = peer_api.create_assessment(
bob_sub["uuid"],
bob["student_id"],
......@@ -140,7 +140,7 @@ class TestPeerApi(CacheResetTest):
def test_create_huge_assessment_fails(self):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 1)
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
with self.assertRaises(peer_api.PeerAssessmentRequestError):
peer_api.create_assessment(
bob_sub["uuid"],
......@@ -154,7 +154,7 @@ class TestPeerApi(CacheResetTest):
def test_get_assessments(self, assessment_dict):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 3)
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 3)
peer_api.create_assessment(
bob_sub["uuid"],
bob["student_id"],
......@@ -169,7 +169,7 @@ class TestPeerApi(CacheResetTest):
def test_get_assessments_with_date(self, assessment_dict):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 3)
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 3)
peer_api.create_assessment(
bob_sub["uuid"],
bob["student_id"],
......@@ -189,16 +189,16 @@ class TestPeerApi(CacheResetTest):
"""
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, REQUIRED_GRADED)
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
self.assertEqual(sub["uuid"], tim_sub["uuid"])
finished, count = peer_api.has_finished_required_evaluating(bob, 1)
finished, count = peer_api.has_finished_required_evaluating(bob_sub['uuid'], 1)
self.assertFalse(finished)
self.assertEqual(count, 0)
peer_api.create_assessment(
bob_sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
1,
)
finished, count = peer_api.has_finished_required_evaluating(bob, 1)
finished, count = peer_api.has_finished_required_evaluating(bob_sub['uuid'], 1)
self.assertTrue(finished)
self.assertEqual(count, 1)
......@@ -224,34 +224,34 @@ class TestPeerApi(CacheResetTest):
self.assertIsNone(score)
for i in range(5):
self.assertEquals((False, i), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED))
sub = peer_api.get_submission_to_assess(tim, REQUIRED_GRADED)
self.assertEquals((False, i), peer_api.has_finished_required_evaluating(tim_sub['uuid'], REQUIRED_GRADED))
sub = peer_api.get_submission_to_assess(tim_sub['uuid'], REQUIRED_GRADED)
peer_api.create_assessment(
tim_sub["uuid"], tim["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
self.assertEquals((True, 5), peer_api.has_finished_required_evaluating(STUDENT_ITEM, REQUIRED_GRADED))
self.assertEquals((True, 5), peer_api.has_finished_required_evaluating(tim_sub['uuid'], REQUIRED_GRADED))
# Tim should not have a score, because his submission does not have
# enough assessments.
self.assertIsNone(sub_api.get_score(STUDENT_ITEM))
sub = peer_api.get_submission_to_assess(bob, REQUIRED_GRADED)
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment(
bob_sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
sub = peer_api.get_submission_to_assess(sally, REQUIRED_GRADED)
sub = peer_api.get_submission_to_assess(sally_sub['uuid'], REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment(
sally_sub["uuid"], sally["student_id"], ASSESSMENT_DICT_FAIL, RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
sub = peer_api.get_submission_to_assess(jim, REQUIRED_GRADED)
sub = peer_api.get_submission_to_assess(jim_sub['uuid'], REQUIRED_GRADED)
self.assertEqual(sub["uuid"], tim_sub["uuid"])
peer_api.create_assessment(
jim_sub["uuid"], jim["student_id"], ASSESSMENT_DICT_PASS, RUBRIC_DICT,
......@@ -308,25 +308,25 @@ class TestPeerApi(CacheResetTest):
angel_sub, angel = self._create_student_and_submission("Angel", "Angel's answer")
# 2) Angel waits for peers
sub = peer_api.get_submission_to_assess(angel, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(angel_sub['uuid'], REQUIRED_GRADED_BY)
self.assertIsNone(sub)
# 3) Bob submits
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(angel_sub["uuid"], sub["uuid"])
# 4) Sally submits
sally_sub, sally = self._create_student_and_submission("Sally", "Sally's answer")
# 5) Sally pulls Angel's Submission but never reviews it.
sub = peer_api.get_submission_to_assess(sally, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(sally_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(angel_sub["uuid"], sub["uuid"])
# 6) Jim submits
jim_sub, jim = self._create_student_and_submission("Jim", "Jim's answer")
# 7) Jim also doesn't care about Angel and does not bother to review.
sub = peer_api.get_submission_to_assess(jim, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(jim_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(angel_sub["uuid"], sub["uuid"])
# 8) Buffy comes along and she submits
......@@ -334,7 +334,7 @@ class TestPeerApi(CacheResetTest):
# 9) Buffy cares about Angel, but she won't get Angel's submission;
# it's held by Bob, Sally, and Jim.
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(bob_sub["uuid"], sub["uuid"])
# 10) Buffy goes on to review Bob, Sally, and Jim, but needs two more.
......@@ -342,20 +342,20 @@ class TestPeerApi(CacheResetTest):
buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
self.assertIsNone(sub)
# 11) Xander comes along and submits.
......@@ -363,19 +363,19 @@ class TestPeerApi(CacheResetTest):
# 12) Xander means well, so Xander grades Bob, Sally, and Jim, but gets
# lazy and doesn't grade Buffy when her submission comes along.
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(bob_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
xander_sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
xander_sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
xander_sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
......@@ -393,7 +393,7 @@ class TestPeerApi(CacheResetTest):
# 13) Buffy is waiting in the wings. She pulls Xander's submission and
# grades it.
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(xander_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
......@@ -404,32 +404,32 @@ class TestPeerApi(CacheResetTest):
spike_sub, spike = self._create_student_and_submission("Spike", "Spike's answer")
# 15) Spike reviews Bob, Sally, Jim, Buffy, and Xander.
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(bob_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(sally_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(jim_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
sub = peer_api.get_submission_to_assess(spike, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(xander_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
spike_sub["uuid"], spike["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
......@@ -437,7 +437,7 @@ class TestPeerApi(CacheResetTest):
)
# 16) Buffy reviews Spike
sub = peer_api.get_submission_to_assess(buffy, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(spike_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
buffy_sub["uuid"], buffy["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
......@@ -448,7 +448,7 @@ class TestPeerApi(CacheResetTest):
willow_sub, willow = self._create_student_and_submission("Willow", "Willow's answer")
# 18) Willow goes to grade, and should get Buffy
sub = peer_api.get_submission_to_assess(willow, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(willow_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
willow_sub["uuid"], willow["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
......@@ -456,7 +456,7 @@ class TestPeerApi(CacheResetTest):
)
# 19) Xander comes back and gets Buffy's submission, and grades it.
sub = peer_api.get_submission_to_assess(xander, REQUIRED_GRADED_BY)
sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
self.assertEquals(buffy_sub["uuid"], sub["uuid"])
peer_api.create_assessment(
xander_sub["uuid"], xander["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
......@@ -475,7 +475,7 @@ class TestPeerApi(CacheResetTest):
xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer")
# Check for a workflow for Buffy.
buffy_workflow = peer_api._get_latest_workflow(buffy)
buffy_workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer['uuid'])
self.assertIsNotNone(buffy_workflow)
# Check to see if Buffy is actively reviewing Xander's submission.
......@@ -491,13 +491,13 @@ class TestPeerApi(CacheResetTest):
submission_uuid = peer_api._find_active_assessments(buffy_workflow)
self.assertEqual(xander_answer["uuid"], submission_uuid)
def test_get_latest_workflow(self):
def test_get_workflow_by_uuid(self):
buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
self._create_student_and_submission("Xander", "Xander's answer")
self._create_student_and_submission("Willow", "Willow's answer")
buffy_answer_two, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
workflow = peer_api._get_latest_workflow(buffy)
workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer_two['uuid'])
self.assertNotEqual(buffy_answer["uuid"], workflow.submission_uuid)
self.assertEqual(buffy_answer_two["uuid"], workflow.submission_uuid)
......@@ -506,7 +506,7 @@ class TestPeerApi(CacheResetTest):
xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer")
self._create_student_and_submission("Willow", "Willow's answer")
buffy_workflow = peer_api._get_latest_workflow(buffy)
buffy_workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer['uuid'])
# Get the next submission for review
submission_uuid = peer_api._get_submission_for_review(buffy_workflow, 3)
......@@ -517,9 +517,9 @@ class TestPeerApi(CacheResetTest):
xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer")
willow_answer, willow = self._create_student_and_submission("Willow", "Willow's answer")
buffy_workflow = peer_api._get_latest_workflow(buffy)
xander_workflow = peer_api._get_latest_workflow(xander)
willow_workflow = peer_api._get_latest_workflow(willow)
buffy_workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer['uuid'])
xander_workflow = peer_api._get_workflow_by_submission_uuid(xander_answer['uuid'])
willow_workflow = peer_api._get_workflow_by_submission_uuid(willow_answer['uuid'])
# Get a bunch of workflow items opened up.
peer_api._create_peer_workflow_item(buffy_workflow, xander_answer["uuid"])
......@@ -537,7 +537,7 @@ class TestPeerApi(CacheResetTest):
def test_create_assessment_feedback(self):
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 1)
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
assessment = peer_api.create_assessment(
bob_sub["uuid"],
bob["student_id"],
......@@ -545,7 +545,7 @@ class TestPeerApi(CacheResetTest):
RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
sub = peer_api.get_submission_to_assess(tim, 1)
sub = peer_api.get_submission_to_assess(tim_sub['uuid'], 1)
peer_api.create_assessment(
tim_sub["uuid"],
tim["student_id"],
......@@ -587,10 +587,10 @@ class TestPeerApi(CacheResetTest):
xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer")
# Create a workflow for Buffy.
buffy_workflow = peer_api._get_latest_workflow(buffy)
buffy_workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer['uuid'])
# Get a workflow item opened up.
submission = peer_api.get_submission_to_assess(buffy, 3)
submission = peer_api.get_submission_to_assess(buffy_answer['uuid'], 3)
self.assertEqual(xander_answer["uuid"], submission["uuid"])
......@@ -611,7 +611,7 @@ class TestPeerApi(CacheResetTest):
@raises(peer_api.PeerAssessmentInternalError)
def test_failure_to_get_review_submission(self, mock_filter):
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
tim_workflow = peer_api._get_latest_workflow(tim)
tim_workflow = peer_api._get_workflow_by_submission_uuid(tim_answer['uuid'])
mock_filter.side_effect = DatabaseError("Oh no.")
peer_api._get_submission_for_review(tim_workflow, 3)
......@@ -652,12 +652,12 @@ class TestPeerApi(CacheResetTest):
}
)
@patch.object(PeerWorkflow.objects, 'filter')
@patch.object(PeerWorkflow.objects, 'get')
@raises(peer_api.PeerAssessmentWorkflowError)
def test_failure_to_get_latest_workflow(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api._get_latest_workflow(tim)
peer_api._get_workflow_by_submission_uuid(tim_answer['uuid'])
@patch.object(PeerWorkflow.objects, 'get_or_create')
@raises(peer_api.PeerAssessmentInternalError)
......@@ -673,28 +673,28 @@ class TestPeerApi(CacheResetTest):
peer_api._create_peer_workflow_item(tim, tim_answer['uuid'])
def test_get_submission_to_evaluate(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
submission, __ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
self._create_student_and_submission("Bob", "Bob's answer", TUESDAY)
self._create_student_and_submission(
"Sally", "Sally's answer", WEDNESDAY
)
self._create_student_and_submission("Jim", "Jim's answer", THURSDAY)
submission = peer_api.get_submission_to_assess(STUDENT_ITEM, 3)
submission = peer_api.get_submission_to_assess(submission['uuid'], 3)
self.assertIsNotNone(submission)
self.assertEqual(submission["answer"], u"Bob's answer")
self.assertEqual(submission["student_item"], 2)
self.assertEqual(submission["attempt_number"], 1)
def test_no_submissions_to_evaluate_for_tim(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
submission = peer_api.get_submission_to_assess(STUDENT_ITEM, 3)
submission, __ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
submission = peer_api.get_submission_to_assess(submission['uuid'], 3)
self.assertIsNone(submission)
def test_get_max_scores(self):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 1)
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
assessment = peer_api.create_assessment(
bob_sub["uuid"],
bob["student_id"],
......@@ -763,7 +763,7 @@ class TestPeerApi(CacheResetTest):
def test_error_on_get_assessment(self, mock_filter):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
sub = peer_api.get_submission_to_assess(bob, 3)
sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 3)
peer_api.create_assessment(
bob_sub["uuid"],
bob["student_id"],
......
......@@ -90,7 +90,7 @@ class Command(BaseCommand):
# Retrieve the submission we want to score
# Note that we are NOT using the priority queue here, since we know
# exactly which submission we want to score.
peer_api.create_peer_workflow_item(scorer_id, submission_uuid)
peer_api.create_peer_workflow_item(scorer_submission_uuid, submission_uuid)
# Create the peer assessment
assessment = {
......
......@@ -4,9 +4,6 @@
<li id="openassessment__peer-assessment" class="openassessment__steps__step step--peer-assessment ui-toggle-visibility">
{% endblock %}
{% spaceless %}
<span class="system__element" id="peer_submission_uuid">
{{ peer_submission.uuid }}
</span>
<header class="step__header ui-toggle-visibility__control">
<h2 class="step__title">
......
......@@ -4,9 +4,6 @@
{% block list_item %}
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment ui-toggle-visibility">
{% endblock %}
<span class="system__element" id="self_submission_uuid">
{{ self_submission.uuid }}
</span>
<header class="step__header ui-toggle-visibility__control">
<h2 class="step__title">
......
......@@ -161,7 +161,7 @@ class PeerAssessmentMixin(object):
if assessment:
context_dict["must_grade"] = assessment["must_grade"]
finished, count = peer_api.has_finished_required_evaluating(
student_item,
self.submission_uuid,
assessment["must_grade"]
)
context_dict["graded"] = count
......@@ -212,7 +212,7 @@ class PeerAssessmentMixin(object):
peer_submission = False
try:
peer_submission = peer_api.get_submission_to_assess(
student_item_dict,
self.submission_uuid,
assessment["must_be_graded_by"],
True
)
......
......@@ -99,8 +99,6 @@ class SelfAssessmentMixin(object):
Dict with keys "success" (bool) indicating success/failure
and "msg" (unicode) containing additional information if an error occurs.
"""
if 'submission_uuid' not in data:
return {'success': False, 'msg': _(u"Missing submission_uuid key in request")}
if 'options_selected' not in data:
return {'success': False, 'msg': _(u"Missing options_selected key in request")}
......
if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.BaseView=function(runtime,element,server){this.runtime=runtime;this.element=element;this.server=server;this.responseView=new OpenAssessment.ResponseView(this.element,this.server,this);this.gradeView=new OpenAssessment.GradeView(this.element,this.server,this)};OpenAssessment.BaseView.prototype={scrollToTop:function(){if($.scrollTo instanceof Function){$(window).scrollTo($("#openassessment__steps"),800,{offset:-50})}},setUpCollapseExpand:function(parentSel,onExpand){parentSel.find(".ui-toggle-visibility__control").click(function(eventData){var sel=$(eventData.target).closest(".ui-toggle-visibility");if(sel.hasClass("is--collapsed")&&onExpand!==undefined){onExpand()}sel.toggleClass("is--collapsed")})},load:function(){this.responseView.load();this.renderPeerAssessmentStep();this.renderSelfAssessmentStep();this.gradeView.load();courseStaffDebug=$(".wrapper--staff-info");if(courseStaffDebug.length>0){this.setUpCollapseExpand(courseStaffDebug,function(){})}},renderPeerAssessmentStep:function(){var view=this;this.server.render("peer_assessment").done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);var sel=$("#openassessment__peer-assessment",view.element);view.setUpCollapseExpand(sel,$.proxy(view.renderContinuedPeerAssessmentStep,view));sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#peer-assessment--001__assessment__submit",view.element).toggleClass("is--disabled",numChecked!=numAvailable)});sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.peerAssess()})}).fail(function(errMsg){view.showLoadError("peer-assessment")})},renderContinuedPeerAssessmentStep:function(){var view=this;this.server.renderContinuedPeer().done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);var sel=$("#openassessment__peer-assessment",view.element);view.setUpCollapseExpand(sel);sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.continuedPeerAssess()});sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#peer-assessment--001__assessment__submit",view.element).toggleClass("is--disabled",numChecked!=numAvailable)})}).fail(function(errMsg){view.showLoadError("peer-assessment")})},renderSelfAssessmentStep:function(){var view=this;this.server.render("self_assessment").done(function(html){$("#openassessment__self-assessment",view.element).replaceWith(html);var sel=$("#openassessment__self-assessment",view.element);view.setUpCollapseExpand(sel);$("#self-assessment--001__assessment",view.element).change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#self-assessment--001__assessment__submit",view.element).toggleClass("is--disabled",numChecked!=numAvailable)});sel.find("#self-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.selfAssess()})}).fail(function(errMsg){view.showLoadError("self-assessment")})},peerAssess:function(){var view=this;this.peerAssessRequest(function(){view.renderPeerAssessmentStep();view.renderSelfAssessmentStep();view.gradeView.load();view.scrollToTop()})},continuedPeerAssess:function(){var view=this;view.peerAssessRequest(function(){view.renderContinuedPeerAssessmentStep();view.gradeView.load()})},peerAssessRequest:function(successFunction){var submissionId=$("#peer_submission_uuid",this.element)[0].innerHTML.trim();var optionsSelected={};$("#peer-assessment--001__assessment input[type=radio]:checked",this.element).each(function(index,sel){optionsSelected[sel.name]=sel.value});var feedback=$("#assessment__rubric__question--feedback__value",this.element).val();var view=this;this.toggleActionError("peer",null);this.server.peerAssess(submissionId,optionsSelected,feedback).done(successFunction).fail(function(errMsg){view.toggleActionError("peer",errMsg)})},selfAssess:function(){var submissionId=$("#self_submission_uuid",this.element)[0].innerHTML.trim();var optionsSelected={};$("#self-assessment--001__assessment input[type=radio]:checked",this.element).each(function(index,sel){optionsSelected[sel.name]=sel.value});var view=this;this.toggleActionError("self",null);this.server.selfAssess(submissionId,optionsSelected).done(function(){view.renderPeerAssessmentStep();view.renderSelfAssessmentStep();view.gradeView.load();view.scrollToTop()}).fail(function(errMsg){view.toggleActionError("self",errMsg)})},toggleActionError:function(type,msg){var element=this.element;var container=null;if(type=="save"){container=".response__submission__actions"}else if(type=="submit"||type=="peer"||type=="self"){container=".step__actions"}else if(type=="feedback_assess"){container=".submission__feedback__actions"}if(container===null){if(msg!==null){console.log(msg)}}else{var msgHtml=msg===null?"":msg;$(container+" .message__content",element).html("<p>"+msgHtml+"</p>");$(container,element).toggleClass("has--error",msg!==null)}},showLoadError:function(step){var container="#openassessment__"+step;$(container).toggleClass("has--error",true);$(container+" .step__status__value i").removeClass().addClass("ico icon-warning-sign");$(container+" .step__status__value .copy").html("Unable to Load")},getStepActionsErrorMessage:function(){return $(".step__actions .message__content").html()}};function OpenAssessmentBlock(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.BaseView(runtime,element,server);view.load()})}if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.StudioView=function(runtime,element,server){this.runtime=runtime;this.server=server;this.codeBox=CodeMirror.fromTextArea($(element).find(".openassessment-editor").first().get(0),{mode:"xml",lineNumbers:true,lineWrapping:true});var view=this;$(element).find(".openassessment-save-button").click(function(eventData){view.save()});$(element).find(".openassessment-cancel-button").click(function(eventData){view.cancel()})};OpenAssessment.StudioView.prototype={load:function(){var view=this;this.server.loadXml().done(function(xml){view.codeBox.setValue(xml)}).fail(function(msg){view.showError(msg)})},save:function(){var view=this;this.server.checkReleased().done(function(isReleased){if(isReleased){view.confirmPostReleaseUpdate($.proxy(view.updateXml,view))}else{view.updateXml()}}).fail(function(errMsg){view.showError(msg)})},confirmPostReleaseUpdate:function(onConfirm){var msg="This problem has already been released. Any changes will apply only to future assessments.";if(confirm(msg)){onConfirm()}},updateXml:function(){this.runtime.notify("save",{state:"start"});var xml=this.codeBox.getValue();var view=this;this.server.updateXml(xml).done(function(){view.runtime.notify("save",{state:"end"});view.load()}).fail(function(msg){view.showError(msg)})},cancel:function(){this.runtime.notify("cancel",{})},showError:function(errorMsg){this.runtime.notify("error",{msg:errorMsg})}};function OpenAssessmentEditor(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.StudioView(runtime,element,server);view.load()})}if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.GradeView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.GradeView.prototype={load:function(){var view=this;var baseView=this.baseView;this.server.render("grade").done(function(html){$("#openassessment__grade",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){baseView.showLoadError("grade",errMsg)})},installHandlers:function(){var sel=$("#openassessment__grade",this.element);this.baseView.setUpCollapseExpand(sel);var view=this;sel.find("#feedback__submit").click(function(eventObject){eventObject.preventDefault();view.submitFeedbackOnAssessment()})},feedbackText:function(text){if(typeof text==="undefined"){return $("#feedback__remarks__value",this.element).val()}else{$("#feedback__remarks__value",this.element).val(text)}},feedbackOptions:function(options){var view=this;if(typeof options==="undefined"){return $.map($(".feedback__overall__value:checked",view.element),function(element,index){return $(element).val()})}else{$(".feedback__overall__value",this.element).prop("checked",false);$.each(options,function(index,opt){$("#feedback__overall__value--"+opt,view.element).prop("checked",true)})}},setHidden:function(sel,hidden){sel.toggleClass("is--hidden",hidden);sel.attr("aria-hidden",hidden?"true":"false")},isHidden:function(sel){return sel.hasClass("is--hidden")&&sel.attr("aria-hidden")=="true"},feedbackState:function(newState){var containerSel=$(".submission__feedback__content",this.element);var instructionsSel=containerSel.find(".submission__feedback__instructions");var fieldsSel=containerSel.find(".submission__feedback__fields");var actionsSel=containerSel.find(".submission__feedback__actions");var transitionSel=containerSel.find(".transition__status");var messageSel=containerSel.find(".message--complete");if(typeof newState==="undefined"){var isSubmitting=containerSel.hasClass("is--transitioning")&&containerSel.hasClass("is--submitting")&&!this.isHidden(transitionSel)&&this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var hasSubmitted=containerSel.hasClass("is--submitted")&&this.isHidden(transitionSel)&&!this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var isOpen=!containerSel.hasClass("is--submitted")&&!containerSel.hasClass("is--transitioning")&&!containerSel.hasClass("is--submitting")&&this.isHidden(transitionSel)&&this.isHidden(messageSel)&&!this.isHidden(instructionsSel)&&!this.isHidden(fieldsSel)&&!this.isHidden(actionsSel);if(isOpen){return"open"}else if(isSubmitting){return"submitting"}else if(hasSubmitted){return"submitted"}else{throw"Invalid feedback state"}}else{if(newState=="open"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,false);this.setHidden(fieldsSel,false);this.setHidden(actionsSel,false);this.setHidden(transitionSel,true);this.setHidden(messageSel,true)}else if(newState=="submitting"){containerSel.toggleClass("is--transitioning",true);containerSel.toggleClass("is--submitting",true);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,false);this.setHidden(messageSel,true)}else if(newState=="submitted"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",true);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,true);this.setHidden(messageSel,false)}}},submitFeedbackOnAssessment:function(){var view=this;var baseView=this.baseView;$("#feedback__submit",this.element).toggleClass("is--disabled",true);view.feedbackState("submitting");this.server.submitFeedbackOnAssessment(this.feedbackText(),this.feedbackOptions()).done(function(){view.feedbackState("submitted")}).fail(function(errMsg){baseView.toggleActionError("feedback_assess",errMsg)})}};if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.ResponseView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView;this.savedResponse=""};OpenAssessment.ResponseView.prototype={load:function(){var view=this;this.server.render("submission").done(function(html){$("#openassessment__response",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.baseView.showLoadError("response")})},installHandlers:function(){var sel=$("#openassessment__response",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);this.savedResponse=this.response();var handleChange=function(eventData){view.responseChanged()};sel.find("#submission__answer__value").on("change keyup drop paste",handleChange);sel.find("#step--response__submit").click(function(eventObject){eventObject.preventDefault();view.submit()});sel.find("#submission__save").click(function(eventObject){eventObject.preventDefault();view.save()})},submitEnabled:function(enabled){var sel=$("#step--response__submit",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveEnabled:function(enabled){var sel=$("#submission__save",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveStatus:function(msg){var sel=$("#response__save_status h3",this.element);if(typeof msg==="undefined"){return sel.text()}else{sel.html('<span class="sr">Status of Your Response:</span>\n'+msg)}},response:function(text){var sel=$("#submission__answer__value",this.element);if(typeof text==="undefined"){return sel.val()}else{sel.val(text)}},responseChanged:function(){var currentResponse=$.trim(this.response());var isBlank=currentResponse!=="";this.submitEnabled(isBlank);if($.trim(this.savedResponse)!==currentResponse){this.saveEnabled(isBlank);this.saveStatus("This response has not been saved.")}},save:function(){this.saveStatus("Saving...");this.baseView.toggleActionError("save",null);var view=this;var savedResponse=this.response();this.server.save(savedResponse).done(function(){view.savedResponse=savedResponse;var currentResponse=view.response();view.submitEnabled(currentResponse!=="");if(currentResponse==savedResponse){view.saveEnabled(false);view.saveStatus("This response has been saved but not submitted.")}}).fail(function(errMsg){view.saveStatus("Error");view.baseView.toggleActionError("save",errMsg)})},submit:function(){this.submitEnabled(false);var submission=$("#submission__answer__value",this.element).val();this.baseView.toggleActionError("response",null);var view=this;var baseView=this.baseView;var moveToNextStep=function(){view.load();baseView.renderPeerAssessmentStep()};this.server.submit(submission).done(moveToNextStep).fail(function(errCode,errMsg){if(errCode=="ENOMULTI"){moveToNextStep()}else{baseView.toggleActionError("submit",errMsg);view.submitEnabled(true)}})}};if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.Server=function(runtime,element){this.runtime=runtime;this.element=element};OpenAssessment.Server.prototype={url:function(handler){return this.runtime.handlerUrl(this.element,handler)},maxInputSize:1024*64,render:function(component){var url=this.url("render_"+component);return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html"}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,["This section could not be loaded."])})}).promise()},renderContinuedPeer:function(){var url=this.url("render_peer_assessment");return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html",data:{continue_grading:true}}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,["This section could not be loaded."])})}).promise()},submit:function(submission){var url=this.url("submit");if(submission.length>this.maxInputSize){return $.Deferred(function(defer){defer.rejectWith(this,["submit","This response is too long. Please shorten the response and try to submit it again."])}).promise()}return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){var success=data[0];if(success){var studentId=data[1];var attemptNum=data[2];defer.resolveWith(this,[studentId,attemptNum])}else{var errorNum=data[1];var errorMsg=data[2];defer.rejectWith(this,[errorNum,errorMsg])}}).fail(function(data){defer.rejectWith(this,["AJAX","This response could not be submitted."])})}).promise()},save:function(submission){var url=this.url("save_submission");if(submission.length>this.maxInputSize){return $.Deferred(function(defer){defer.rejectWith(this,["This response is too long. Please shorten the response and try to save it again."])}).promise()}return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["This response could not be saved."])})}).promise()},submitFeedbackOnAssessment:function(text,options){var url=this.url("submit_feedback");if(text.length>this.maxInputSize){return $.Deferred(function(defer){defer.rejectWith(this,["This feedback is too long. Please shorten your feedback and try to submit it again."])}).promise()}var payload=JSON.stringify({feedback_text:text,feedback_options:options});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["This feedback could not be submitted."])})}).promise()},peerAssess:function(submissionId,optionsSelected,feedback){var url=this.url("peer_assess");if(feedback.length>this.maxInputSize){return $.Deferred(function(defer){defer.rejectWith(this,["The comments on this assessment are too long. Please shorten your comments and try to submit them again."])}).promise()}var payload=JSON.stringify({submission_uuid:submissionId,options_selected:optionsSelected,feedback:feedback});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["This assessment could not be submitted."])})}).promise()},selfAssess:function(submissionId,optionsSelected){var url=this.url("self_assess");var payload=JSON.stringify({submission_uuid:submissionId,options_selected:optionsSelected});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["This assessment could not be submitted."])})})},loadXml:function(){var url=this.url("xml");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:'""'}).done(function(data){if(data.success){defer.resolveWith(this,[data.xml])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["This problem could not be loaded."])})}).promise()},updateXml:function(xml){var url=this.url("update_xml");var payload=JSON.stringify({xml:xml});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["This problem could not be saved."])})}).promise()},checkReleased:function(){var url=this.url("check_released");var payload='""';return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolveWith(this,[data.is_released])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["The server could not be contacted."])})}).promise()}};
\ No newline at end of file
if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.BaseView=function(runtime,element,server){this.runtime=runtime;this.element=element;this.server=server;this.responseView=new OpenAssessment.ResponseView(this.element,this.server,this);this.gradeView=new OpenAssessment.GradeView(this.element,this.server,this)};OpenAssessment.BaseView.prototype={scrollToTop:function(){if($.scrollTo instanceof Function){$(window).scrollTo($("#openassessment__steps"),800,{offset:-50})}},setUpCollapseExpand:function(parentSel,onExpand){parentSel.find(".ui-toggle-visibility__control").click(function(eventData){var sel=$(eventData.target).closest(".ui-toggle-visibility");if(sel.hasClass("is--collapsed")&&onExpand!==undefined){onExpand()}sel.toggleClass("is--collapsed")})},load:function(){this.responseView.load();this.renderPeerAssessmentStep();this.renderSelfAssessmentStep();this.gradeView.load();courseStaffDebug=$(".wrapper--staff-info");if(courseStaffDebug.length>0){this.setUpCollapseExpand(courseStaffDebug,function(){})}},renderPeerAssessmentStep:function(){var view=this;this.server.render("peer_assessment").done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);var sel=$("#openassessment__peer-assessment",view.element);view.setUpCollapseExpand(sel,$.proxy(view.renderContinuedPeerAssessmentStep,view));sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#peer-assessment--001__assessment__submit",view.element).toggleClass("is--disabled",numChecked!=numAvailable)});sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.peerAssess()})}).fail(function(errMsg){view.showLoadError("peer-assessment")})},renderContinuedPeerAssessmentStep:function(){var view=this;this.server.renderContinuedPeer().done(function(html){$("#openassessment__peer-assessment",view.element).replaceWith(html);var sel=$("#openassessment__peer-assessment",view.element);view.setUpCollapseExpand(sel);sel.find("#peer-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.continuedPeerAssess()});sel.find("#peer-assessment--001__assessment").change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#peer-assessment--001__assessment__submit",view.element).toggleClass("is--disabled",numChecked!=numAvailable)})}).fail(function(errMsg){view.showLoadError("peer-assessment")})},renderSelfAssessmentStep:function(){var view=this;this.server.render("self_assessment").done(function(html){$("#openassessment__self-assessment",view.element).replaceWith(html);var sel=$("#openassessment__self-assessment",view.element);view.setUpCollapseExpand(sel);$("#self-assessment--001__assessment",view.element).change(function(){var numChecked=$("input[type=radio]:checked",this).length;var numAvailable=$(".field--radio.assessment__rubric__question",this).length;$("#self-assessment--001__assessment__submit",view.element).toggleClass("is--disabled",numChecked!=numAvailable)});sel.find("#self-assessment--001__assessment__submit").click(function(eventObject){eventObject.preventDefault();view.selfAssess()})}).fail(function(errMsg){view.showLoadError("self-assessment")})},peerAssess:function(){var view=this;this.peerAssessRequest(function(){view.renderPeerAssessmentStep();view.renderSelfAssessmentStep();view.gradeView.load();view.scrollToTop()})},continuedPeerAssess:function(){var view=this;view.peerAssessRequest(function(){view.renderContinuedPeerAssessmentStep();view.gradeView.load()})},peerAssessRequest:function(successFunction){var optionsSelected={};$("#peer-assessment--001__assessment input[type=radio]:checked",this.element).each(function(index,sel){optionsSelected[sel.name]=sel.value});var feedback=$("#assessment__rubric__question--feedback__value",this.element).val();var view=this;this.toggleActionError("peer",null);this.server.peerAssess(optionsSelected,feedback).done(successFunction).fail(function(errMsg){view.toggleActionError("peer",errMsg)})},selfAssess:function(){var optionsSelected={};$("#self-assessment--001__assessment input[type=radio]:checked",this.element).each(function(index,sel){optionsSelected[sel.name]=sel.value});var view=this;this.toggleActionError("self",null);this.server.selfAssess(optionsSelected).done(function(){view.renderPeerAssessmentStep();view.renderSelfAssessmentStep();view.gradeView.load();view.scrollToTop()}).fail(function(errMsg){view.toggleActionError("self",errMsg)})},toggleActionError:function(type,msg){var element=this.element;var container=null;if(type=="save"){container=".response__submission__actions"}else if(type=="submit"||type=="peer"||type=="self"){container=".step__actions"}else if(type=="feedback_assess"){container=".submission__feedback__actions"}if(container===null){if(msg!==null){console.log(msg)}}else{var msgHtml=msg===null?"":msg;$(container+" .message__content",element).html("<p>"+msgHtml+"</p>");$(container,element).toggleClass("has--error",msg!==null)}},showLoadError:function(step){var container="#openassessment__"+step;$(container).toggleClass("has--error",true);$(container+" .step__status__value i").removeClass().addClass("ico icon-warning-sign");$(container+" .step__status__value .copy").html("Unable to Load")},getStepActionsErrorMessage:function(){return $(".step__actions .message__content").html()}};function OpenAssessmentBlock(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.BaseView(runtime,element,server);view.load()})}if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.StudioView=function(runtime,element,server){this.runtime=runtime;this.server=server;this.codeBox=CodeMirror.fromTextArea($(element).find(".openassessment-editor").first().get(0),{mode:"xml",lineNumbers:true,lineWrapping:true});var view=this;$(element).find(".openassessment-save-button").click(function(eventData){view.save()});$(element).find(".openassessment-cancel-button").click(function(eventData){view.cancel()})};OpenAssessment.StudioView.prototype={load:function(){var view=this;this.server.loadXml().done(function(xml){view.codeBox.setValue(xml)}).fail(function(msg){view.showError(msg)})},save:function(){var view=this;this.server.checkReleased().done(function(isReleased){if(isReleased){view.confirmPostReleaseUpdate($.proxy(view.updateXml,view))}else{view.updateXml()}}).fail(function(errMsg){view.showError(msg)})},confirmPostReleaseUpdate:function(onConfirm){var msg="This problem has already been released. Any changes will apply only to future assessments.";if(confirm(msg)){onConfirm()}},updateXml:function(){this.runtime.notify("save",{state:"start"});var xml=this.codeBox.getValue();var view=this;this.server.updateXml(xml).done(function(){view.runtime.notify("save",{state:"end"});view.load()}).fail(function(msg){view.showError(msg)})},cancel:function(){this.runtime.notify("cancel",{})},showError:function(errorMsg){this.runtime.notify("error",{msg:errorMsg})}};function OpenAssessmentEditor(runtime,element){$(function($){var server=new OpenAssessment.Server(runtime,element);var view=new OpenAssessment.StudioView(runtime,element,server);view.load()})}if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.GradeView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView};OpenAssessment.GradeView.prototype={load:function(){var view=this;var baseView=this.baseView;this.server.render("grade").done(function(html){$("#openassessment__grade",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){baseView.showLoadError("grade",errMsg)})},installHandlers:function(){var sel=$("#openassessment__grade",this.element);this.baseView.setUpCollapseExpand(sel);var view=this;sel.find("#feedback__submit").click(function(eventObject){eventObject.preventDefault();view.submitFeedbackOnAssessment()})},feedbackText:function(text){if(typeof text==="undefined"){return $("#feedback__remarks__value",this.element).val()}else{$("#feedback__remarks__value",this.element).val(text)}},feedbackOptions:function(options){var view=this;if(typeof options==="undefined"){return $.map($(".feedback__overall__value:checked",view.element),function(element,index){return $(element).val()})}else{$(".feedback__overall__value",this.element).prop("checked",false);$.each(options,function(index,opt){$("#feedback__overall__value--"+opt,view.element).prop("checked",true)})}},setHidden:function(sel,hidden){sel.toggleClass("is--hidden",hidden);sel.attr("aria-hidden",hidden?"true":"false")},isHidden:function(sel){return sel.hasClass("is--hidden")&&sel.attr("aria-hidden")=="true"},feedbackState:function(newState){var containerSel=$(".submission__feedback__content",this.element);var instructionsSel=containerSel.find(".submission__feedback__instructions");var fieldsSel=containerSel.find(".submission__feedback__fields");var actionsSel=containerSel.find(".submission__feedback__actions");var transitionSel=containerSel.find(".transition__status");var messageSel=containerSel.find(".message--complete");if(typeof newState==="undefined"){var isSubmitting=containerSel.hasClass("is--transitioning")&&containerSel.hasClass("is--submitting")&&!this.isHidden(transitionSel)&&this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var hasSubmitted=containerSel.hasClass("is--submitted")&&this.isHidden(transitionSel)&&!this.isHidden(messageSel)&&this.isHidden(instructionsSel)&&this.isHidden(fieldsSel)&&this.isHidden(actionsSel);var isOpen=!containerSel.hasClass("is--submitted")&&!containerSel.hasClass("is--transitioning")&&!containerSel.hasClass("is--submitting")&&this.isHidden(transitionSel)&&this.isHidden(messageSel)&&!this.isHidden(instructionsSel)&&!this.isHidden(fieldsSel)&&!this.isHidden(actionsSel);if(isOpen){return"open"}else if(isSubmitting){return"submitting"}else if(hasSubmitted){return"submitted"}else{throw"Invalid feedback state"}}else{if(newState=="open"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,false);this.setHidden(fieldsSel,false);this.setHidden(actionsSel,false);this.setHidden(transitionSel,true);this.setHidden(messageSel,true)}else if(newState=="submitting"){containerSel.toggleClass("is--transitioning",true);containerSel.toggleClass("is--submitting",true);containerSel.toggleClass("is--submitted",false);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,false);this.setHidden(messageSel,true)}else if(newState=="submitted"){containerSel.toggleClass("is--transitioning",false);containerSel.toggleClass("is--submitting",false);containerSel.toggleClass("is--submitted",true);this.setHidden(instructionsSel,true);this.setHidden(fieldsSel,true);this.setHidden(actionsSel,true);this.setHidden(transitionSel,true);this.setHidden(messageSel,false)}}},submitFeedbackOnAssessment:function(){var view=this;var baseView=this.baseView;$("#feedback__submit",this.element).toggleClass("is--disabled",true);view.feedbackState("submitting");this.server.submitFeedbackOnAssessment(this.feedbackText(),this.feedbackOptions()).done(function(){view.feedbackState("submitted")}).fail(function(errMsg){baseView.toggleActionError("feedback_assess",errMsg)})}};if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.ResponseView=function(element,server,baseView){this.element=element;this.server=server;this.baseView=baseView;this.savedResponse=""};OpenAssessment.ResponseView.prototype={load:function(){var view=this;this.server.render("submission").done(function(html){$("#openassessment__response",view.element).replaceWith(html);view.installHandlers()}).fail(function(errMsg){view.baseView.showLoadError("response")})},installHandlers:function(){var sel=$("#openassessment__response",this.element);var view=this;this.baseView.setUpCollapseExpand(sel);this.savedResponse=this.response();var handleChange=function(eventData){view.responseChanged()};sel.find("#submission__answer__value").on("change keyup drop paste",handleChange);sel.find("#step--response__submit").click(function(eventObject){eventObject.preventDefault();view.submit()});sel.find("#submission__save").click(function(eventObject){eventObject.preventDefault();view.save()})},submitEnabled:function(enabled){var sel=$("#step--response__submit",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveEnabled:function(enabled){var sel=$("#submission__save",this.element);if(typeof enabled==="undefined"){return!sel.hasClass("is--disabled")}else{sel.toggleClass("is--disabled",!enabled)}},saveStatus:function(msg){var sel=$("#response__save_status h3",this.element);if(typeof msg==="undefined"){return sel.text()}else{sel.html('<span class="sr">Status of Your Response:</span>\n'+msg)}},response:function(text){var sel=$("#submission__answer__value",this.element);if(typeof text==="undefined"){return sel.val()}else{sel.val(text)}},responseChanged:function(){var currentResponse=$.trim(this.response());var isBlank=currentResponse!=="";this.submitEnabled(isBlank);if($.trim(this.savedResponse)!==currentResponse){this.saveEnabled(isBlank);this.saveStatus("This response has not been saved.")}},save:function(){this.saveStatus("Saving...");this.baseView.toggleActionError("save",null);var view=this;var savedResponse=this.response();this.server.save(savedResponse).done(function(){view.savedResponse=savedResponse;var currentResponse=view.response();view.submitEnabled(currentResponse!=="");if(currentResponse==savedResponse){view.saveEnabled(false);view.saveStatus("This response has been saved but not submitted.")}}).fail(function(errMsg){view.saveStatus("Error");view.baseView.toggleActionError("save",errMsg)})},submit:function(){this.submitEnabled(false);var submission=$("#submission__answer__value",this.element).val();this.baseView.toggleActionError("response",null);var view=this;var baseView=this.baseView;var moveToNextStep=function(){view.load();baseView.renderPeerAssessmentStep()};this.server.submit(submission).done(moveToNextStep).fail(function(errCode,errMsg){if(errCode=="ENOMULTI"){moveToNextStep()}else{baseView.toggleActionError("submit",errMsg);view.submitEnabled(true)}})}};if(typeof OpenAssessment=="undefined"||!OpenAssessment){OpenAssessment={}}OpenAssessment.Server=function(runtime,element){this.runtime=runtime;this.element=element};OpenAssessment.Server.prototype={url:function(handler){return this.runtime.handlerUrl(this.element,handler)},maxInputSize:1024*64,render:function(component){var url=this.url("render_"+component);return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html"}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,["This section could not be loaded."])})}).promise()},renderContinuedPeer:function(){var url=this.url("render_peer_assessment");return $.Deferred(function(defer){$.ajax({url:url,type:"POST",dataType:"html",data:{continue_grading:true}}).done(function(data){defer.resolveWith(this,[data])}).fail(function(data){defer.rejectWith(this,["This section could not be loaded."])})}).promise()},submit:function(submission){var url=this.url("submit");if(submission.length>this.maxInputSize){return $.Deferred(function(defer){defer.rejectWith(this,["submit","This response is too long. Please shorten the response and try to submit it again."])}).promise()}return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){var success=data[0];if(success){var studentId=data[1];var attemptNum=data[2];defer.resolveWith(this,[studentId,attemptNum])}else{var errorNum=data[1];var errorMsg=data[2];defer.rejectWith(this,[errorNum,errorMsg])}}).fail(function(data){defer.rejectWith(this,["AJAX","This response could not be submitted."])})}).promise()},save:function(submission){var url=this.url("save_submission");if(submission.length>this.maxInputSize){return $.Deferred(function(defer){defer.rejectWith(this,["This response is too long. Please shorten the response and try to save it again."])}).promise()}return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:JSON.stringify({submission:submission})}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["This response could not be saved."])})}).promise()},submitFeedbackOnAssessment:function(text,options){var url=this.url("submit_feedback");if(text.length>this.maxInputSize){return $.Deferred(function(defer){defer.rejectWith(this,["This feedback is too long. Please shorten your feedback and try to submit it again."])}).promise()}var payload=JSON.stringify({feedback_text:text,feedback_options:options});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["This feedback could not be submitted."])})}).promise()},peerAssess:function(optionsSelected,feedback){var url=this.url("peer_assess");if(feedback.length>this.maxInputSize){return $.Deferred(function(defer){defer.rejectWith(this,["The comments on this assessment are too long. Please shorten your comments and try to submit them again."])}).promise()}var payload=JSON.stringify({options_selected:optionsSelected,feedback:feedback});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["This assessment could not be submitted."])})}).promise()},selfAssess:function(optionsSelected){var url=this.url("self_assess");var payload=JSON.stringify({options_selected:optionsSelected});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["This assessment could not be submitted."])})})},loadXml:function(){var url=this.url("xml");return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:'""'}).done(function(data){if(data.success){defer.resolveWith(this,[data.xml])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["This problem could not be loaded."])})}).promise()},updateXml:function(xml){var url=this.url("update_xml");var payload=JSON.stringify({xml:xml});return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolve()}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["This problem could not be saved."])})}).promise()},checkReleased:function(){var url=this.url("check_released");var payload='""';return $.Deferred(function(defer){$.ajax({type:"POST",url:url,data:payload}).done(function(data){if(data.success){defer.resolveWith(this,[data.is_released])}else{defer.rejectWith(this,[data.msg])}}).fail(function(data){defer.rejectWith(this,["The server could not be contacted."])})}).promise()}};
\ No newline at end of file
......@@ -15,11 +15,11 @@ describe("OpenAssessment.BaseView", function() {
grade: readFixtures("oa_grade_complete.html")
};
this.peerAssess = function(submissionId, optionsSelected, feedback) {
this.peerAssess = function(optionsSelected, feedback) {
return $.Deferred(function(defer) { defer.resolve(); }).promise();
};
this.selfAssess = function(submissionId, optionsSelected) {
this.selfAssess = function(optionsSelected) {
return $.Deferred(function(defer) { defer.resolve(); }).promise();
};
......@@ -90,7 +90,7 @@ describe("OpenAssessment.BaseView", function() {
var testError = 'Test failure contacting server message';
loadSubviews(function() {
/* stub our selfAssess to fail */
spyOn(server, 'selfAssess').andCallFake(function(submissionId, optionsSelected) {
spyOn(server, 'selfAssess').andCallFake(function(optionsSelected) {
return $.Deferred(function(defer) { defer.rejectWith(server, [testError]); }).promise();
});
view.selfAssess();
......
......@@ -97,7 +97,7 @@ describe("OpenAssessment.Server", function() {
var success = false;
var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.peerAssess("abc1234", options, "Excellent job!").done(function() {
server.peerAssess(options, "Excellent job!").done(function() {
success = true;
});
......@@ -106,7 +106,6 @@ describe("OpenAssessment.Server", function() {
url: '/peer_assess',
type: "POST",
data: JSON.stringify({
submission_uuid: "abc1234",
options_selected: options,
feedback: "Excellent job!"
})
......@@ -306,7 +305,7 @@ describe("OpenAssessment.Server", function() {
var options = {clarity: "Very clear", precision: "Somewhat precise"};
var receivedErrorMsg = "";
var testString = getHugeTestString();
server.peerAssess("abc1234", options, testString).fail(
server.peerAssess(options, testString).fail(
function(errorMsg) {
receivedErrorMsg = errorMsg;
}
......@@ -319,7 +318,7 @@ describe("OpenAssessment.Server", function() {
var receivedMsg = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.peerAssess("abc1234", options, "Excellent job!").fail(function(msg) {
server.peerAssess(options, "Excellent job!").fail(function(msg) {
receivedMsg = msg;
});
......@@ -331,7 +330,7 @@ describe("OpenAssessment.Server", function() {
var receivedMsg = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.peerAssess("abc1234", options, "Excellent job!").fail(function(msg) {
server.peerAssess(options, "Excellent job!").fail(function(msg) {
receivedMsg = msg;
});
......
......@@ -245,7 +245,6 @@ OpenAssessment.BaseView.prototype = {
*/
peerAssessRequest: function(successFunction) {
// Retrieve assessment info from the DOM
var submissionId = $("#peer_submission_uuid", this.element)[0].innerHTML.trim();
var optionsSelected = {};
$("#peer-assessment--001__assessment input[type=radio]:checked", this.element).each(
function(index, sel) {
......@@ -257,7 +256,7 @@ OpenAssessment.BaseView.prototype = {
// Send the assessment to the server
var view = this;
this.toggleActionError('peer', null);
this.server.peerAssess(submissionId, optionsSelected, feedback).done(
this.server.peerAssess(optionsSelected, feedback).done(
successFunction
).fail(function(errMsg) {
view.toggleActionError('peer', errMsg);
......@@ -269,7 +268,6 @@ OpenAssessment.BaseView.prototype = {
**/
selfAssess: function() {
// Retrieve self-assessment info from the DOM
var submissionId = $("#self_submission_uuid", this.element)[0].innerHTML.trim();
var optionsSelected = {};
$("#self-assessment--001__assessment input[type=radio]:checked", this.element).each(
function(index, sel) {
......@@ -280,7 +278,7 @@ OpenAssessment.BaseView.prototype = {
// Send the assessment to the server
var view = this;
this.toggleActionError('self', null);
this.server.selfAssess(submissionId, optionsSelected).done(
this.server.selfAssess(optionsSelected).done(
function() {
view.renderPeerAssessmentStep();
view.renderSelfAssessmentStep();
......
......@@ -221,7 +221,6 @@ OpenAssessment.Server.prototype = {
/**
Send a peer assessment to the XBlock.
Args:
submissionId (string): The UUID of the submission.
optionsSelected (object literal): Keys are criteria names,
values are the option text the user selected for the criterion.
feedback (string): Written feedback on the submission.
......@@ -233,13 +232,13 @@ OpenAssessment.Server.prototype = {
Example:
var options = { clarity: "Very clear", precision: "Somewhat precise" };
var feedback = "Good job!";
server.peerAssess("abc123", options, feedback).done(
server.peerAssess(options, feedback).done(
function() { console.log("Success!"); }
).fail(
function(errorMsg) { console.log(errorMsg); }
);
**/
peerAssess: function(submissionId, optionsSelected, feedback) {
peerAssess: function(optionsSelected, feedback) {
var url = this.url('peer_assess');
if (feedback.length > this.maxInputSize) {
return $.Deferred(function(defer) {
......@@ -247,7 +246,6 @@ OpenAssessment.Server.prototype = {
}).promise();
}
var payload = JSON.stringify({
submission_uuid: submissionId,
options_selected: optionsSelected,
feedback: feedback
});
......@@ -271,7 +269,6 @@ OpenAssessment.Server.prototype = {
Send a self-assessment to the XBlock.
Args:
submissionId (string): The UUID of the submission.
optionsSelected (object literal): Keys are criteria names,
values are the option text the user selected for the criterion.
......@@ -281,16 +278,15 @@ OpenAssessment.Server.prototype = {
Example:
var options = { clarity: "Very clear", precision: "Somewhat precise" };
server.selfAssess("abc123", options).done(
server.selfAssess(options).done(
function() { console.log("Success!"); }
).fail(
function(errorMsg) { console.log(errorMsg); }
);
**/
selfAssess: function(submissionId, optionsSelected) {
selfAssess: function(optionsSelected) {
var url = this.url('self_assess');
var payload = JSON.stringify({
submission_uuid: submissionId,
options_selected: optionsSelected
});
return $.Deferred(function(defer) {
......
......@@ -188,7 +188,7 @@ class TestGrade(XBlockHandlerTestCase):
scorer_sub = sub_api.create_submission(scorer, {'text': submission_text})
workflow_api.create_workflow(scorer_sub['uuid'])
submission = peer_api.get_submission_to_assess(scorer, len(peers))
submission = peer_api.get_submission_to_assess(scorer_sub['uuid'], len(peers))
# Store the scorer's submission so our user can assess it later
scorer_submissions.append(scorer_sub)
......@@ -203,7 +203,7 @@ class TestGrade(XBlockHandlerTestCase):
# Have our user make assessments (so she can get a score)
for asmnt in peer_assessments:
new_submission = peer_api.get_submission_to_assess(student_item, len(peers))
new_submission = peer_api.get_submission_to_assess(submission['uuid'], len(peers))
peer_api.create_assessment(
submission['uuid'], student_id, asmnt, {'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
......
......@@ -6,9 +6,6 @@ from collections import namedtuple
import copy
import json
import mock
import submissions.api as sub_api
from openassessment.workflow import api as workflow_api
from openassessment.assessment import peer_api
from .base import XBlockHandlerTestCase, scenario
......@@ -37,7 +34,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Now Hal will assess Sally.
assessment = copy.deepcopy(self.ASSESSMENT)
sub = peer_api.get_submission_to_assess(hal_student_item, 1)
peer_api.get_submission_to_assess(hal_submission['uuid'], 1)
peer_api.create_assessment(
hal_submission['uuid'],
hal_student_item['student_id'],
......@@ -48,7 +45,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Now Sally will assess Hal.
assessment = copy.deepcopy(self.ASSESSMENT)
sub = peer_api.get_submission_to_assess(sally_student_item, 1)
peer_api.get_submission_to_assess(sally_submission['uuid'], 1)
peer_api.create_assessment(
sally_submission['uuid'],
sally_student_item['student_id'],
......@@ -86,8 +83,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
xblock.create_submission(another_student, self.SUBMISSION)
peer_api.get_submission_to_assess(another_student, 3)
another_submission = xblock.create_submission(another_student, self.SUBMISSION)
peer_api.get_submission_to_assess(another_submission['uuid'], 3)
# Submit an assessment and expect a successful response
......@@ -126,8 +123,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
xblock.create_submission(another_student, self.SUBMISSION)
peer_api.get_submission_to_assess(another_student, 3)
another_sub = xblock.create_submission(another_student, self.SUBMISSION)
peer_api.get_submission_to_assess(another_sub['uuid'], 3)
# Submit an assessment and expect a successful response
......@@ -160,7 +157,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Create a submission for the scorer (required before assessing another student)
another_student = copy.deepcopy(student_item)
another_student['student_id'] = "Bob"
another_submission = xblock.create_submission(another_student, self.SUBMISSION)
xblock.create_submission(another_student, self.SUBMISSION)
# Submit an assessment, but mutate the options selected so they do NOT match the rubric
assessment = copy.deepcopy(self.ASSESSMENT)
......@@ -211,7 +208,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Now Hal will assess Sally.
assessment = copy.deepcopy(self.ASSESSMENT)
sally_sub = peer_api.get_submission_to_assess(hal_student_item, 1)
sally_sub = peer_api.get_submission_to_assess(hal_submission['uuid'], 1)
assessment['submission_uuid'] = sally_sub['uuid']
peer_api.create_assessment(
hal_submission['uuid'],
hal_student_item['student_id'],
......@@ -222,7 +220,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Now Sally will assess Hal.
assessment = copy.deepcopy(self.ASSESSMENT)
hal_sub = peer_api.get_submission_to_assess(sally_student_item, 1)
hal_sub = peer_api.get_submission_to_assess(sally_submission['uuid'], 1)
assessment['submission_uuid'] = hal_sub['uuid']
peer_api.create_assessment(
sally_submission['uuid'],
sally_student_item['student_id'],
......@@ -243,9 +242,6 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
hal_response = "Hal".encode('utf-8') in peer_response.body
sally_response = "Sally".encode('utf-8') in peer_response.body
peer_api.create_assessment(
submission['uuid'],
student_item['student_id'],
......
......@@ -95,13 +95,6 @@ class TestSelfAssessment(XBlockHandlerTestCase):
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_handler_missing_keys(self, xblock):
# Missing submission_uuid
assessment = copy.deepcopy(self.ASSESSMENT)
del assessment['submission_uuid']
resp = self.request(xblock, 'self_assess', json.dumps(assessment), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('submission_uuid', resp['msg'])
# Missing options_selected
assessment = copy.deepcopy(self.ASSESSMENT)
del assessment['options_selected']
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment