Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
dfd2a367
Commit
dfd2a367
authored
Dec 28, 2014
by
muhammad-ammar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix3
parent
e8c4e31b
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
127 additions
and
7 deletions
+127
-7
openassessment/assessment/api/peer.py
+38
-6
openassessment/assessment/test/test_peer.py
+56
-0
openassessment/xblock/peer_assessment_mixin.py
+6
-1
openassessment/xblock/test/test_staff_info.py
+27
-0
No files found.
openassessment/assessment/api/peer.py
View file @
dfd2a367
...
...
@@ -426,9 +426,19 @@ def get_assessment_median_scores(submission_uuid):
try
:
workflow
=
PeerWorkflow
.
objects
.
get
(
submission_uuid
=
submission_uuid
)
items
=
workflow
.
graded_by
.
filter
(
scored
=
True
)
assessments
=
[
item
.
assessment
for
item
in
items
]
scores
=
Assessment
.
scores_by_criterion
(
assessments
)
return
Assessment
.
get_median_score_dict
(
scores
)
score_dict
=
Assessment
.
get_median_score_dict
(
scores
)
# Is it OK way to give zero score to a cancelled submission? Another way could be don't calculate
# the score above and just return a fake score dict but this may have side effects?
if
workflow
.
is_cancelled
:
for
key
in
score_dict
:
score_dict
[
key
]
=
0
return
score_dict
except
DatabaseError
:
error_message
=
(
u"Error getting assessment median scores for submission {uuid}"
...
...
@@ -661,13 +671,16 @@ def get_submission_to_assess(submission_uuid, graded_by):
"""
workflow
=
PeerWorkflow
.
get_by_submission_uuid
(
submission_uuid
)
if
workflow
.
is_cancelled
:
return
None
if
not
workflow
:
raise
PeerAssessmentWorkflowError
(
u"A Peer Assessment Workflow does not exist for the student "
u"with submission UUID {}"
.
format
(
submission_uuid
)
)
if
workflow
.
is_cancelled
:
return
None
open_item
=
workflow
.
find_active_assessments
()
peer_submission_uuid
=
open_item
.
submission_uuid
if
open_item
else
None
# If there is an active assessment for this user, get that submission,
...
...
@@ -964,15 +977,15 @@ def cancel_submission_peer_workflow(submission_uuid, comments, cancelled_by_id):
try
:
workflow
=
PeerWorkflow
.
objects
.
get
(
submission_uuid
=
submission_uuid
)
assessment
=
workflow
.
graded_by
.
filter
(
items
=
workflow
.
graded_by
.
filter
(
assessment__submission_uuid
=
submission_uuid
,
assessment__score_type
=
PEER_TYPE
)
.
order_by
(
'-assessment'
)
if
assessment
:
if
items
:
sub_api
.
set_score
(
submission_uuid
,
0
,
assessment
.
points_possible
items
[
0
]
.
assessment
.
points_possible
)
return
PeerWorkflowCancellation
.
create
(
workflow
=
workflow
,
comments
=
comments
,
cancelled_by_id
=
cancelled_by_id
)
...
...
@@ -1003,3 +1016,22 @@ def get_submission_cancellation(submission_uuid):
error_message
=
u"Error finding peer workflow cancellation for submission UUID {}."
.
format
(
submission_uuid
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
def
is_peer_workflow_submission_cancelled
(
submission_uuid
):
"""
Check if peer workflow submission is cancelled?
Args:
submission_uuid (str): The UUID of the peer workflow's submission.
"""
# Users must submit a response before they can peer-assess.
if
submission_uuid
is
None
:
return
False
workflow
=
PeerWorkflow
.
get_by_submission_uuid
(
submission_uuid
)
if
workflow
:
return
workflow
.
is_cancelled
return
False
openassessment/assessment/test/test_peer.py
View file @
dfd2a367
...
...
@@ -934,6 +934,62 @@ class TestPeerApi(CacheResetTest):
REQUIRED_GRADED_BY
,
)
def
test_cancelled_submission_peerworkflow_status
(
self
):
"""
Test cancelled submissions peerworkflow status.
"""
buffy_sub
,
buffy
=
self
.
_create_student_and_submission
(
"Buffy"
,
"Buffy's answer"
)
# Check for a workflow for Buffy.
buffy_workflow
=
PeerWorkflow
.
get_by_submission_uuid
(
buffy_sub
[
'uuid'
])
self
.
assertIsNotNone
(
buffy_workflow
)
# Cancel the buffy's submission.
PeerWorkflowCancellation
.
create
(
workflow
=
buffy_workflow
,
comments
=
'Cancellation reason'
,
cancelled_by_id
=
buffy
[
'student_id'
]
)
workflow
=
PeerWorkflow
.
get_by_submission_uuid
(
buffy_sub
[
"uuid"
])
self
.
assertTrue
(
workflow
.
is_cancelled
)
def
test_cancelled_submission_peerworkflow_score
(
self
):
tim_sub
,
tim
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
)
bob_sub
,
bob
=
self
.
_create_student_and_submission
(
"Bob"
,
"Bob's answer"
)
sub
=
peer_api
.
get_submission_to_assess
(
tim_sub
[
'uuid'
],
1
)
peer_api
.
create_assessment
(
tim_sub
[
"uuid"
],
tim
[
"student_id"
],
ASSESSMENT_DICT
[
'options_selected'
],
ASSESSMENT_DICT
[
'criterion_feedback'
],
ASSESSMENT_DICT
[
'overall_feedback'
],
RUBRIC_DICT
,
1
,
)
sub
=
peer_api
.
get_submission_to_assess
(
bob_sub
[
'uuid'
],
1
)
peer_api
.
create_assessment
(
bob_sub
[
"uuid"
],
bob
[
"student_id"
],
ASSESSMENT_DICT
[
'options_selected'
],
ASSESSMENT_DICT
[
'criterion_feedback'
],
ASSESSMENT_DICT
[
'overall_feedback'
],
RUBRIC_DICT
,
1
,
)
requirements
=
{
'must_grade'
:
1
,
'must_be_graded_by'
:
1
}
peer_api
.
cancel_submission_peer_workflow
(
submission_uuid
=
bob_sub
[
"uuid"
],
comments
=
"Inappropriate language"
,
cancelled_by_id
=
bob
[
'student_id'
]
)
score
=
peer_api
.
get_score
(
bob_sub
[
"uuid"
],
requirements
)
self
.
assertEqual
(
score
[
'points_earned'
],
0
)
def
test_get_workflow_by_uuid
(
self
):
buffy_answer
,
_
=
self
.
_create_student_and_submission
(
"Buffy"
,
"Buffy's answer"
)
self
.
_create_student_and_submission
(
"Xander"
,
"Xander's answer"
)
...
...
openassessment/xblock/peer_assessment_mixin.py
View file @
dfd2a367
...
...
@@ -214,9 +214,14 @@ class PeerAssessmentMixin(object):
"Submit your assessment & move to response #{response_number}"
)
.
format
(
response_number
=
(
count
+
2
))
if
peer_api
.
is_peer_workflow_submission_cancelled
(
self
.
submission_uuid
):
path
=
'openassessmentblock/peer/oa_peer_waiting.html'
# Sets the XBlock boolean to signal to Message that it WAS able to grab a submission
self
.
no_peers
=
True
# Once a student has completed a problem, it stays complete,
# so this condition needs to be first.
if
(
workflow
.
get
(
'status'
)
==
'done'
or
finished
)
and
not
continue_grading
:
el
if
(
workflow
.
get
(
'status'
)
==
'done'
or
finished
)
and
not
continue_grading
:
path
=
"openassessmentblock/peer/oa_peer_complete.html"
# Allow continued grading even if the problem due date has passed
...
...
openassessment/xblock/test/test_staff_info.py
View file @
dfd2a367
...
...
@@ -249,6 +249,33 @@ class TestCourseStaff(XBlockHandlerTestCase):
self
.
assertIsNotNone
(
context
[
'submission_cancellation'
])
self
.
assertEquals
(
"openassessmentblock/staff_debug/student_info.html"
,
path
)
@scenario
(
'data/basic_scenario.xml'
,
user_id
=
'Bob'
)
def
test_cancelled_submission_peer_aseseement_render_path
(
self
,
xblock
):
"""
Test that peer assessment path should be oa_peer_waiting.html for a cancelled submission.
"""
# Simulate that we are course staff
xblock
.
xmodule_runtime
=
self
.
_create_mock_runtime
(
xblock
.
scope_ids
.
usage_id
,
True
,
False
,
"Bob"
)
bob_item
=
STUDENT_ITEM
.
copy
()
bob_item
[
"item_id"
]
=
xblock
.
scope_ids
.
usage_id
# Create a submission for Bob, and corresponding workflow.
submission
=
sub_api
.
create_submission
(
bob_item
,
{
'text'
:
"Bob Answer"
})
peer_api
.
on_start
(
submission
[
"uuid"
])
workflow_api
.
create_workflow
(
submission
[
"uuid"
],
[
'self'
])
peer_api
.
cancel_submission_peer_workflow
(
submission_uuid
=
submission
[
"uuid"
],
comments
=
"Inappropriate language"
,
cancelled_by_id
=
bob_item
[
'student_id'
]
)
xblock
.
submission_uuid
=
submission
[
"uuid"
]
path
,
context
=
xblock
.
peer_path_and_context
(
False
)
self
.
assertEquals
(
"openassessmentblock/peer/oa_peer_waiting.html"
,
path
)
@scenario
(
'data/self_only_scenario.xml'
,
user_id
=
'Bob'
)
def
test_staff_debug_student_info_image_submission
(
self
,
xblock
):
# Simulate that we are course staff
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment