Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
5ceccba4
Commit
5ceccba4
authored
Apr 24, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Continue showing peer assessment step as completed after the deadline passes
Allow continued grading after the deadline passes.
parent
994cf3db
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
174 additions
and
23 deletions
+174
-23
apps/openassessment/xblock/peer_assessment_mixin.py
+43
-20
apps/openassessment/xblock/test/test_peer.py
+131
-3
No files found.
apps/openassessment/xblock/peer_assessment_mixin.py
View file @
5ceccba4
...
...
@@ -133,6 +133,21 @@ class PeerAssessmentMixin(object):
number of assessments.
"""
continue_grading
=
data
.
params
.
get
(
'continue_grading'
,
False
)
path
,
context_dict
=
self
.
peer_path_and_context
(
continue_grading
)
return
self
.
render_assessment
(
path
,
context_dict
)
def
peer_path_and_context
(
self
,
continue_grading
):
"""
Return the template path and context for rendering the peer assessment step.
Args:
continue_grading (bool): If true, the user has chosen to continue grading.
Returns:
tuple of (template_path, context_dict)
"""
path
=
'openassessmentblock/peer/oa_peer_unavailable.html'
finished
=
False
problem_closed
,
reason
,
start_date
,
due_date
=
self
.
is_closed
(
step
=
"peer-assessment"
)
...
...
@@ -154,10 +169,7 @@ class PeerAssessmentMixin(object):
workflow
=
self
.
get_workflow_info
()
if
workflow
is
None
:
return
self
.
render_assessment
(
path
,
context_dict
)
continue_grading
=
(
data
.
params
.
get
(
'continue_grading'
,
False
)
and
workflow
[
"status_details"
][
"peer"
][
"complete"
]
)
continue_grading
=
continue_grading
and
workflow
[
"status_details"
][
"peer"
][
"complete"
]
student_item
=
self
.
get_student_item_dict
()
assessment
=
self
.
get_assessment_module
(
'peer-assessment'
)
...
...
@@ -183,7 +195,20 @@ class PeerAssessmentMixin(object):
"Submit your assessment & move to response #{}"
)
.
format
(
count
+
2
)
if
reason
==
'due'
and
problem_closed
:
# Once a student has completed a problem, it stays complete,
# so this condition needs to be first.
if
(
workflow
.
get
(
'status'
)
==
'done'
or
finished
)
and
not
continue_grading
:
path
=
"openassessmentblock/peer/oa_peer_complete.html"
# Allow continued grading even if the problem due date has passed
elif
continue_grading
and
student_item
:
peer_sub
=
self
.
get_peer_submission
(
student_item
,
assessment
)
if
peer_sub
:
path
=
'openassessmentblock/peer/oa_peer_turbo_mode.html'
context_dict
[
"peer_submission"
]
=
peer_sub
else
:
path
=
'openassessmentblock/peer/oa_peer_turbo_mode_waiting.html'
elif
reason
==
'due'
and
problem_closed
:
path
=
'openassessmentblock/peer/oa_peer_closed.html'
elif
reason
==
'start'
and
problem_closed
:
context_dict
[
"peer_start"
]
=
start_date
...
...
@@ -195,23 +220,21 @@ class PeerAssessmentMixin(object):
context_dict
[
"peer_submission"
]
=
peer_sub
else
:
path
=
'openassessmentblock/peer/oa_peer_waiting.html'
elif
continue_grading
and
student_item
:
peer_sub
=
self
.
get_peer_submission
(
student_item
,
assessment
)
if
peer_sub
:
path
=
'openassessmentblock/peer/oa_peer_turbo_mode.html'
context_dict
[
"peer_submission"
]
=
peer_sub
else
:
path
=
'openassessmentblock/peer/oa_peer_turbo_mode_waiting.html'
elif
workflow
.
get
(
"status"
)
==
"done"
or
finished
:
path
=
"openassessmentblock/peer/oa_peer_complete.html"
return
self
.
render_assessment
(
path
,
context_dict
)
return
path
,
context_dict
def
get_peer_submission
(
self
,
student_item_dict
,
assessment
):
"""
Retrieve a submission to peer-assess.
Args:
student_item_dict (dict): The student item for the student creating the submission.
assessment (dict): A dict describing the requirements for grading.
Returns:
dict: The serialized submission model.
def
get_peer_submission
(
self
,
student_item_dict
,
assessment
):
"""
peer_submission
=
False
try
:
peer_submission
=
peer_api
.
get_submission_to_assess
(
...
...
apps/openassessment/xblock/test/test_peer.py
View file @
5ceccba4
...
...
@@ -6,6 +6,9 @@ from collections import namedtuple
import
copy
import
json
import
mock
import
datetime
as
dt
import
pytz
from
openassessment.assessment
import
peer_api
from
.base
import
XBlockHandlerTestCase
,
scenario
...
...
@@ -54,8 +57,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
1
)
# If Over Grading is on, this should now return Sally or Hal's response
# to Bob.
# If Over Grading is on, this should now return Sally or Hal's response to Bob.
submission
=
xblock
.
create_submission
(
student_item
,
u"Bob's answer"
)
workflow_info
=
xblock
.
get_workflow_info
()
self
.
assertEqual
(
workflow_info
[
"status"
],
u'peer'
)
...
...
@@ -67,7 +69,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self
.
assertIsNotNone
(
peer_response
)
self
.
assertNotIn
(
submission
[
"answer"
][
"text"
]
.
encode
(
'utf-8'
),
peer_response
.
body
)
#Validate Peer Rendering.
#
Validate Peer Rendering.
self
.
assertTrue
(
"Sally"
.
encode
(
'utf-8'
)
in
peer_response
.
body
or
"Hal"
.
encode
(
'utf-8'
)
in
peer_response
.
body
)
...
...
@@ -296,3 +298,129 @@ class TestPeerAssessment(XBlockHandlerTestCase):
resp
=
self
.
request
(
xblock
,
'render_peer_assessment'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
'waiting'
,
resp
.
lower
())
self
.
assertIn
(
'peer'
,
resp
.
lower
())
class
TestPeerAssessmentRender
(
XBlockHandlerTestCase
):
"""
Test rendering of the peer assessment step.
The basic strategy is to verify that we're providing the right
template and context for each possible state,
plus an integration test to verify that the context
is being rendered correctly.
"""
@scenario
(
'data/peer_closed_scenario.xml'
,
user_id
=
'Tyler'
)
def
test_completed_and_past_due
(
self
,
xblock
):
# Simulate having complete peer-assessment
# Even though the problem is closed, we should still see
# that the step is complete.
xblock
.
create_submission
(
xblock
.
get_student_item_dict
(),
u"𝕿𝖍𝖊 𝖋𝖎𝖗𝖘𝖙 𝖗𝖚𝖑𝖊 𝖔𝖋 𝖋𝖎𝖌𝖍𝖙 𝖈𝖑𝖚𝖇 𝖎𝖘 𝖞𝖔𝖚 𝖉𝖔 𝖓𝖔𝖙 𝖙𝖆𝖑𝖐 𝖆𝖇𝖔𝖚𝖙 𝖋𝖎𝖌𝖍𝖙 𝖈𝖑𝖚𝖇."
)
# Simulate a workflow status of "done" and expect to see the "completed" step
expected_context
=
{
'peer_due'
:
dt
.
datetime
(
2000
,
1
,
1
)
.
replace
(
tzinfo
=
pytz
.
utc
),
'graded'
:
0
,
'estimated_time'
:
'20 minutes'
,
'submit_button_text'
:
'Submit your assessment & move to response #2'
,
'rubric_criteria'
:
xblock
.
rubric_criteria
,
'must_grade'
:
5
,
'review_num'
:
1
,
}
self
.
_assert_path_and_context
(
xblock
,
'openassessmentblock/peer/oa_peer_complete.html'
,
expected_context
,
workflow_status
=
'done'
,
)
@scenario
(
'data/peer_closed_scenario.xml'
,
user_id
=
'Marla'
)
def
test_turbo_grade_past_due
(
self
,
xblock
):
xblock
.
create_submission
(
xblock
.
get_student_item_dict
(),
u"ı ƃoʇ ʇɥıs pɹǝss ɐʇ ɐ ʇɥɹıɟʇ sʇoɹǝ ɟoɹ ouǝ poןןɐɹ."
)
# Try to continue grading after the due date has passed
# Continued grading should still be available,
# but since there are no other submissions, we're in the waiting state.
expected_context
=
{
'estimated_time'
:
'20 minutes'
,
'graded'
:
0
,
'must_grade'
:
5
,
'peer_due'
:
dt
.
datetime
(
2000
,
1
,
1
)
.
replace
(
tzinfo
=
pytz
.
utc
),
'review_num'
:
1
,
'rubric_criteria'
:
xblock
.
rubric_criteria
,
'submit_button_text'
:
'Submit your assessment & review another response'
,
}
self
.
_assert_path_and_context
(
xblock
,
'openassessmentblock/peer/oa_peer_turbo_mode_waiting.html'
,
expected_context
,
continue_grading
=
True
,
workflow_status
=
'done'
,
workflow_status_details
=
{
'peer'
:
{
'complete'
:
True
}}
)
# Create a submission from another student.
# We should now be able to continue grading that submission
other_student_item
=
copy
.
deepcopy
(
xblock
.
get_student_item_dict
())
other_student_item
[
'student_id'
]
=
"Tyler"
submission
=
xblock
.
create_submission
(
other_student_item
,
u"Other submission"
)
expected_context
=
{
'estimated_time'
:
'20 minutes'
,
'graded'
:
0
,
'must_grade'
:
5
,
'peer_due'
:
dt
.
datetime
(
2000
,
1
,
1
)
.
replace
(
tzinfo
=
pytz
.
utc
),
'peer_submission'
:
submission
,
'review_num'
:
1
,
'rubric_criteria'
:
xblock
.
rubric_criteria
,
'submit_button_text'
:
'Submit your assessment & review another response'
,
}
self
.
_assert_path_and_context
(
xblock
,
'openassessmentblock/peer/oa_peer_turbo_mode.html'
,
expected_context
,
continue_grading
=
True
,
workflow_status
=
'done'
,
workflow_status_details
=
{
'peer'
:
{
'complete'
:
True
}}
)
def
_assert_path_and_context
(
self
,
xblock
,
expected_path
,
expected_context
,
continue_grading
=
False
,
workflow_status
=
None
,
workflow_status_details
=
None
,
):
"""
Render the peer assessment step and verify:
1) that the correct template and context were used
2) that the rendering occurred without an error
Args:
xblock (OpenAssessmentBlock): The XBlock under test.
expected_path (str): The expected template path.
expected_context (dict): The expected template context.
Kwargs:
continue_grading (bool): If true, the user has chosen to continue grading.
workflow_status (str): If provided, simulate this status from the workflow API.
workflow_status_details (dict): If provided, simulate these workflow details from the workflow API.
"""
if
workflow_status
is
not
None
:
xblock
.
get_workflow_info
=
mock
.
Mock
(
return_value
=
{
'status'
:
workflow_status
,
'status_details'
:
(
workflow_status_details
if
workflow_status_details
is
not
None
else
dict
()
),
})
path
,
context
=
xblock
.
peer_path_and_context
(
continue_grading
)
self
.
assertEqual
(
path
,
expected_path
)
self
.
assertItemsEqual
(
context
,
expected_context
)
# Verify that we render without error
resp
=
self
.
request
(
xblock
,
'render_peer_assessment'
,
json
.
dumps
({}))
self
.
assertGreater
(
len
(
resp
),
0
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment