Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
93ef7906
Commit
93ef7906
authored
Apr 24, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #298 from edx/will/peer-rendering-test-cleanup
Will/peer rendering test cleanup
parents
3ae16f20
00a6943f
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
243 additions
and
58 deletions
+243
-58
apps/openassessment/xblock/self_assessment_mixin.py
+0
-1
apps/openassessment/xblock/test/data/peer_closed_scenario.xml
+1
-1
apps/openassessment/xblock/test/data/peer_future_scenario.xml
+46
-0
apps/openassessment/xblock/test/test_peer.py
+187
-40
apps/openassessment/xblock/test/test_self.py
+9
-16
No files found.
apps/openassessment/xblock/self_assessment_mixin.py
View file @
93ef7906
...
...
@@ -92,7 +92,6 @@ class SelfAssessmentMixin(object):
Args:
data (dict): Must have the following keys:
submission_uuid (string): The unique identifier of the submission being assessed.
options_selected (dict): Dictionary mapping criterion names to option values.
Returns:
...
...
apps/openassessment/xblock/test/data/peer_closed_scenario.xml
View file @
93ef7906
...
...
@@ -40,7 +40,7 @@
</criterion>
</rubric>
<assessments>
<assessment
name=
"peer-assessment"
must_grade=
"
5"
must_be_graded_by=
"3
"
due=
"2000-01-01T00:00:00"
/>
<assessment
name=
"peer-assessment"
must_grade=
"
1"
must_be_graded_by=
"1
"
due=
"2000-01-01T00:00:00"
/>
<assessment
name=
"self-assessment"
/>
</assessments>
</openassessment>
apps/openassessment/xblock/test/data/peer_future_scenario.xml
0 → 100644
View file @
93ef7906
<openassessment>
<title>
Open Assessment Test
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>
𝓒𝓸𝓷𝓬𝓲𝓼𝓮
</name>
<prompt>
How concise is it?
</prompt>
<option
points=
"3"
>
<name>
ﻉซƈﻉɭɭﻉกՇ
</name>
<explanation>
Extremely concise
</explanation>
</option>
<option
points=
"2"
>
<name>
Ġööḋ
</name>
<explanation>
Concise
</explanation>
</option>
<option
points=
"1"
>
<name>
ק๏๏г
</name>
<explanation>
Wordy
</explanation>
</option>
</criterion>
<criterion>
<name>
Form
</name>
<prompt>
How well-formed is it?
</prompt>
<option
points=
"3"
>
<name>
Good
</name>
<explanation>
Good
</explanation>
</option>
<option
points=
"2"
>
<name>
Fair
</name>
<explanation>
Fair
</explanation>
</option>
<option
points=
"1"
>
<name>
Poor
</name>
<explanation>
Poor
</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment
name=
"peer-assessment"
must_grade=
"5"
must_be_graded_by=
"3"
start=
"2999-01-01T00:00:00"
/>
<assessment
name=
"self-assessment"
/>
</assessments>
</openassessment>
apps/openassessment/xblock/test/test_peer.py
View file @
93ef7906
...
...
@@ -9,6 +9,7 @@ import json
import
mock
import
datetime
as
dt
import
pytz
import
ddt
from
openassessment.assessment
import
peer_api
from
.base
import
XBlockHandlerTestCase
,
scenario
...
...
@@ -275,31 +276,8 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self
.
assertNotIn
(
submission
[
"answer"
][
"text"
]
.
encode
(
'utf-8'
),
peer_response
.
body
)
self
.
assertIn
(
"Peer Assessments Complete"
,
peer_response
.
body
)
@scenario
(
'data/peer_assessment_scenario.xml'
,
user_id
=
'Bob'
)
def
test_peer_unavailable
(
self
,
xblock
):
# Before creating a submission, the peer step should be unavailable
resp
=
self
.
request
(
xblock
,
'render_peer_assessment'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
'not available'
,
resp
.
lower
())
self
.
assertIn
(
'peer-assessment'
,
resp
.
lower
())
@scenario
(
'data/peer_closed_scenario.xml'
,
user_id
=
'Bob'
)
def
test_peer_closed
(
self
,
xblock
):
# The scenario defines a peer assessment with a due date in the past
resp
=
self
.
request
(
xblock
,
'render_peer_assessment'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
'closed'
,
resp
.
lower
())
self
.
assertIn
(
'peer assessment'
,
resp
.
lower
())
@scenario
(
'data/peer_assessment_scenario.xml'
,
user_id
=
'Bob'
)
def
test_peer_waiting
(
self
,
xblock
):
# Make a submission, but no peer assessments available
xblock
.
create_submission
(
xblock
.
get_student_item_dict
(),
"Test answer"
)
# Check the rendered peer step
resp
=
self
.
request
(
xblock
,
'render_peer_assessment'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
'waiting'
,
resp
.
lower
())
self
.
assertIn
(
'peer'
,
resp
.
lower
())
@ddt.ddt
class
TestPeerAssessmentRender
(
XBlockHandlerTestCase
):
"""
Test rendering of the peer assessment step.
...
...
@@ -309,8 +287,167 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase):
is being rendered correctly.
"""
@scenario
(
'data/peer_assessment_scenario.xml'
,
user_id
=
'Bob'
)
def
test_released_no_submission
(
self
,
xblock
):
# No submission, so the peer step should be unavailable
expected_context
=
{
'graded'
:
0
,
'estimated_time'
:
'20 minutes'
,
'submit_button_text'
:
'Submit your assessment & move to response #2'
,
'rubric_criteria'
:
xblock
.
rubric_criteria
,
'must_grade'
:
5
,
'review_num'
:
1
,
}
self
.
_assert_path_and_context
(
xblock
,
'openassessmentblock/peer/oa_peer_unavailable.html'
,
expected_context
)
@scenario
(
'data/peer_closed_scenario.xml'
,
user_id
=
'Bob'
)
def
test_closed_no_submission
(
self
,
xblock
):
expected_context
=
{
'peer_due'
:
dt
.
datetime
(
2000
,
1
,
1
)
.
replace
(
tzinfo
=
pytz
.
utc
),
'graded'
:
0
,
'estimated_time'
:
'20 minutes'
,
'submit_button_text'
:
'Submit your assessment & move to response #2'
,
'rubric_criteria'
:
xblock
.
rubric_criteria
,
'must_grade'
:
5
,
'review_num'
:
1
,
}
self
.
_assert_path_and_context
(
xblock
,
'openassessmentblock/peer/oa_peer_closed.html'
,
expected_context
)
@scenario
(
'data/peer_future_scenario.xml'
,
user_id
=
'Bob'
)
def
test_before_release
(
self
,
xblock
):
expected_context
=
{
'peer_start'
:
dt
.
datetime
(
2999
,
1
,
1
)
.
replace
(
tzinfo
=
pytz
.
utc
),
'graded'
:
0
,
'estimated_time'
:
'20 minutes'
,
'submit_button_text'
:
'Submit your assessment & move to response #2'
,
'rubric_criteria'
:
xblock
.
rubric_criteria
,
'must_grade'
:
5
,
'review_num'
:
1
,
}
self
.
_assert_path_and_context
(
xblock
,
'openassessmentblock/peer/oa_peer_unavailable.html'
,
expected_context
)
@scenario
(
'data/peer_assessment_scenario.xml'
,
user_id
=
'Bob'
)
def
test_waiting_for_peers
(
self
,
xblock
):
# Make a submission, but no peer assessments available
xblock
.
create_submission
(
xblock
.
get_student_item_dict
(),
u'Ǥø ȺħɇȺđ, Ȼøɍnɇłɨᵾs, ɏøᵾ ȼȺn ȼɍɏ'
)
# Expect to be in the waiting for peers state
expected_context
=
{
'graded'
:
0
,
'estimated_time'
:
'20 minutes'
,
'rubric_criteria'
:
xblock
.
rubric_criteria
,
'must_grade'
:
5
,
'review_num'
:
1
,
'submit_button_text'
:
'submit your assessment & move to response #2'
,
}
self
.
_assert_path_and_context
(
xblock
,
'openassessmentblock/peer/oa_peer_waiting.html'
,
expected_context
,
workflow_status
=
'peer'
,
graded_enough
=
False
,
was_graded_enough
=
False
,
)
@scenario
(
'data/peer_assessment_scenario.xml'
,
user_id
=
'Richard'
)
def
test_peer_assessment_available
(
self
,
xblock
):
# Make a submission, so we get to peer assessment
xblock
.
create_submission
(
xblock
.
get_student_item_dict
(),
u"𝒀𝒆𝒔. 𝑴𝒂𝒌𝒆 𝒕𝒉𝒆𝒔𝒆 𝒚𝒐𝒖𝒓 𝒑𝒓𝒊𝒎𝒂𝒓𝒚 𝒂𝒄𝒕𝒊𝒐𝒏 𝒊𝒕𝒆𝒎𝒔."
)
# Create a submission from another user so we have something to assess
other_student
=
copy
.
deepcopy
(
xblock
.
get_student_item_dict
())
other_student
[
'student_id'
]
=
'Tyler'
submission
=
xblock
.
create_submission
(
other_student
,
(
u"ησω, αη¢ιєηт ρєσρℓє ƒσυη∂ тнєιя ¢ℓσтнєѕ ﻭσт ¢ℓєαηєя"
u" ιƒ тнєу ωαѕнє∂ тнєм αт α ¢єятαιη ѕρσт ιη тнє яινєя."
)
)
# We should pull the other student's submission
expected_context
=
{
'graded'
:
0
,
'estimated_time'
:
'20 minutes'
,
'rubric_criteria'
:
xblock
.
rubric_criteria
,
'must_grade'
:
5
,
'review_num'
:
1
,
'peer_submission'
:
submission
,
'submit_button_text'
:
'submit your assessment & move to response #2'
,
}
self
.
_assert_path_and_context
(
xblock
,
'openassessmentblock/peer/oa_peer_assessment.html'
,
expected_context
,
workflow_status
=
'peer'
,
)
@scenario
(
'data/peer_closed_scenario.xml'
,
user_id
=
'Bob'
)
def
test_peer_closed_no_assessments_available
(
self
,
xblock
):
# Make a submission, so we get to peer assessment
xblock
.
create_submission
(
xblock
.
get_student_item_dict
(),
u"ฬє'гє รՇเɭɭ ๓єภ"
)
# No assessments are available, and the step has closed
expected_context
=
{
'peer_due'
:
dt
.
datetime
(
2000
,
1
,
1
)
.
replace
(
tzinfo
=
pytz
.
utc
),
'graded'
:
0
,
'estimated_time'
:
'20 minutes'
,
'rubric_criteria'
:
xblock
.
rubric_criteria
,
'must_grade'
:
5
,
'review_num'
:
1
,
'submit_button_text'
:
'submit your assessment & move to response #2'
,
}
self
.
_assert_path_and_context
(
xblock
,
'openassessmentblock/peer/oa_peer_closed.html'
,
expected_context
,
workflow_status
=
'peer'
,
)
@scenario
(
'data/peer_closed_scenario.xml'
,
user_id
=
'Richard'
)
def
test_peer_closed_assessments_available
(
self
,
xblock
):
# Make a submission, so we get to peer assessment
xblock
.
create_submission
(
xblock
.
get_student_item_dict
(),
u"𝒀𝒆𝒔. 𝑴𝒂𝒌𝒆 𝒕𝒉𝒆𝒔𝒆 𝒚𝒐𝒖𝒓 𝒑𝒓𝒊𝒎𝒂𝒓𝒚 𝒂𝒄𝒕𝒊𝒐𝒏 𝒊𝒕𝒆𝒎𝒔."
)
# Create a submission from another user so we have something to assess
other_student
=
copy
.
deepcopy
(
xblock
.
get_student_item_dict
())
other_student
[
'student_id'
]
=
'Tyler'
xblock
.
create_submission
(
other_student
,
(
u"ησω, αη¢ιєηт ρєσρℓє ƒσυη∂ тнєιя ¢ℓσтнєѕ ﻭσт ¢ℓєαηєя"
u" ιƒ тнєу ωαѕнє∂ тнєм αт α ¢єятαιη ѕρσт ιη тнє яινєя."
)
)
# ... but the problem is closed, so we can't assess them
expected_context
=
{
'peer_due'
:
dt
.
datetime
(
2000
,
1
,
1
)
.
replace
(
tzinfo
=
pytz
.
utc
),
'graded'
:
0
,
'estimated_time'
:
'20 minutes'
,
'rubric_criteria'
:
xblock
.
rubric_criteria
,
'must_grade'
:
5
,
'review_num'
:
1
,
'submit_button_text'
:
'submit your assessment & move to response #2'
,
}
self
.
_assert_path_and_context
(
xblock
,
'openassessmentblock/peer/oa_peer_closed.html'
,
expected_context
,
workflow_status
=
'peer'
,
)
@ddt.data
(
'self'
,
'waiting'
,
'done'
)
@scenario
(
'data/peer_closed_scenario.xml'
,
user_id
=
'Tyler'
)
def
test_completed_and_past_due
(
self
,
xblock
):
def
test_completed_and_past_due
(
self
,
xblock
,
workflow_status
):
# Simulate having complete peer-assessment
# Even though the problem is closed, we should still see
# that the step is complete.
...
...
@@ -324,19 +461,23 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase):
'peer_due'
:
dt
.
datetime
(
2000
,
1
,
1
)
.
replace
(
tzinfo
=
pytz
.
utc
),
'graded'
:
0
,
'estimated_time'
:
'20 minutes'
,
'submit_button_text'
:
'
S
ubmit your assessment & move to response #2'
,
'submit_button_text'
:
'
s
ubmit your assessment & move to response #2'
,
'rubric_criteria'
:
xblock
.
rubric_criteria
,
'must_grade'
:
5
,
'review_num'
:
1
,
}
self
.
_assert_path_and_context
(
xblock
,
'openassessmentblock/peer/oa_peer_complete.html'
,
expected_context
,
workflow_status
=
'done'
,
workflow_status
=
workflow_status
,
graded_enough
=
True
,
was_graded_enough
=
True
,
)
@ddt.data
(
'self'
,
'done'
)
@scenario
(
'data/peer_closed_scenario.xml'
,
user_id
=
'Marla'
)
def
test_turbo_grade_past_due
(
self
,
xblock
):
def
test_turbo_grade_past_due
(
self
,
xblock
,
workflow_status
):
xblock
.
create_submission
(
xblock
.
get_student_item_dict
(),
u"ı ƃoʇ ʇɥıs pɹǝss ɐʇ ɐ ʇɥɹıɟʇ sʇoɹǝ ɟoɹ ouǝ poןןɐɹ."
...
...
@@ -358,8 +499,9 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase):
xblock
,
'openassessmentblock/peer/oa_peer_turbo_mode_waiting.html'
,
expected_context
,
continue_grading
=
True
,
workflow_status
=
'done'
,
workflow_status_details
=
{
'peer'
:
{
'complete'
:
True
}}
workflow_status
=
workflow_status
,
graded_enough
=
True
,
was_graded_enough
=
True
,
)
# Create a submission from another student.
...
...
@@ -383,13 +525,15 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase):
expected_context
,
continue_grading
=
True
,
workflow_status
=
'done'
,
workflow_status_details
=
{
'peer'
:
{
'complete'
:
True
}}
graded_enough
=
True
,
was_graded_enough
=
True
,
)
def
_assert_path_and_context
(
self
,
xblock
,
expected_path
,
expected_context
,
continue_grading
=
False
,
workflow_status
=
None
,
workflow_status_details
=
None
,
graded_enough
=
False
,
was_graded_enough
=
False
,
):
"""
Render the peer assessment step and verify:
...
...
@@ -404,18 +548,21 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase):
Kwargs:
continue_grading (bool): If true, the user has chosen to continue grading.
workflow_status (str): If provided, simulate this status from the workflow API.
workflow_status_details (dict): If provided, simulate these workflow details from the workflow API.
graded_enough (bool): Did the student meet the requirement by assessing enough peers?
was_graded_enough (bool): Did the student receive enough assessments from peers?
"""
# Simulate the response from the workflow API
if
workflow_status
is
not
None
:
xblock
.
get_workflow_info
=
mock
.
Mock
(
return_value
=
{
workflow_info
=
{
'status'
:
workflow_status
,
'status_details'
:
(
workflow_status_details
if
workflow_status_details
is
not
None
else
dict
()
),
})
'status_details'
:
{
'peer'
:
{
'complete'
:
graded_enough
}}
}
xblock
.
get_workflow_info
=
mock
.
Mock
(
return_value
=
workflow_info
)
# Simulate that we've either finished or not finished required grading
patched_module
=
'openassessment.xblock.peer_assessment_mixin.peer_api'
with
mock
.
patch
(
patched_module
+
'.has_finished_required_evaluating'
)
as
mock_finished
:
mock_finished
.
return_value
=
(
was_graded_enough
,
1
)
path
,
context
=
xblock
.
peer_path_and_context
(
continue_grading
)
self
.
assertEqual
(
path
,
expected_path
)
...
...
apps/openassessment/xblock/test/test_self.py
View file @
93ef7906
...
...
@@ -13,13 +13,15 @@ from .base import XBlockHandlerTestCase, scenario
class
TestSelfAssessment
(
XBlockHandlerTestCase
):
"""
Tests for the self-assessment XBlock handler.
"""
maxDiff
=
None
SUBMISSION
=
u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
ASSESSMENT
=
{
'submission_uuid'
:
None
,
'options_selected'
:
{
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮'
:
u'ﻉซƈﻉɭɭﻉกՇ'
,
u'Form'
:
u'Fair'
},
}
...
...
@@ -31,9 +33,7 @@ class TestSelfAssessment(XBlockHandlerTestCase):
submission
=
xblock
.
create_submission
(
student_item
,
self
.
SUBMISSION
)
# Submit a self-assessment
assessment
=
copy
.
deepcopy
(
self
.
ASSESSMENT
)
assessment
[
'submission_uuid'
]
=
submission
[
'uuid'
]
resp
=
self
.
request
(
xblock
,
'self_assess'
,
json
.
dumps
(
assessment
),
response_format
=
'json'
)
resp
=
self
.
request
(
xblock
,
'self_assess'
,
json
.
dumps
(
self
.
ASSESSMENT
),
response_format
=
'json'
)
self
.
assertTrue
(
resp
[
'success'
])
# Expect that a self-assessment was created
...
...
@@ -62,9 +62,7 @@ class TestSelfAssessment(XBlockHandlerTestCase):
with
mock
.
patch
(
'openassessment.xblock.workflow_mixin.workflow_api'
)
as
mock_api
:
# Submit a self-assessment
assessment
=
copy
.
deepcopy
(
self
.
ASSESSMENT
)
assessment
[
'submission_uuid'
]
=
submission
[
'uuid'
]
resp
=
self
.
request
(
xblock
,
'self_assess'
,
json
.
dumps
(
assessment
),
response_format
=
'json'
)
resp
=
self
.
request
(
xblock
,
'self_assess'
,
json
.
dumps
(
self
.
ASSESSMENT
),
response_format
=
'json'
)
# Verify that the workflow is updated when we submit a self-assessment
self
.
assertTrue
(
resp
[
'success'
])
...
...
@@ -77,7 +75,7 @@ class TestSelfAssessment(XBlockHandlerTestCase):
def
test_self_assess_workflow_error
(
self
,
xblock
):
# Create a submission for the student
student_item
=
xblock
.
get_student_item_dict
()
submission
=
xblock
.
create_submission
(
student_item
,
self
.
SUBMISSION
)
xblock
.
create_submission
(
student_item
,
self
.
SUBMISSION
)
with
mock
.
patch
(
'openassessment.xblock.workflow_mixin.workflow_api'
)
as
mock_api
:
...
...
@@ -85,9 +83,7 @@ class TestSelfAssessment(XBlockHandlerTestCase):
mock_api
.
update_from_assessments
.
side_effect
=
workflow_api
.
AssessmentWorkflowError
# Submit a self-assessment
assessment
=
copy
.
deepcopy
(
self
.
ASSESSMENT
)
assessment
[
'submission_uuid'
]
=
submission
[
'uuid'
]
resp
=
self
.
request
(
xblock
,
'self_assess'
,
json
.
dumps
(
assessment
),
response_format
=
'json'
)
resp
=
self
.
request
(
xblock
,
'self_assess'
,
json
.
dumps
(
self
.
ASSESSMENT
),
response_format
=
'json'
)
# Verify that the we get an error response
self
.
assertFalse
(
resp
[
'success'
])
...
...
@@ -106,17 +102,14 @@ class TestSelfAssessment(XBlockHandlerTestCase):
def
test_self_assess_api_error
(
self
,
xblock
):
# Create a submission for the student
student_item
=
xblock
.
get_student_item_dict
()
submission
=
xblock
.
create_submission
(
student_item
,
self
.
SUBMISSION
)
xblock
.
create_submission
(
student_item
,
self
.
SUBMISSION
)
# Submit a self-assessment
assessment
=
copy
.
deepcopy
(
self
.
ASSESSMENT
)
assessment
[
'submission_uuid'
]
=
submission
[
'uuid'
]
# Simulate an error and expect a failure response
with
mock
.
patch
(
'openassessment.xblock.self_assessment_mixin.self_api'
)
as
mock_api
:
mock_api
.
SelfAssessmentRequestError
=
self_api
.
SelfAssessmentRequestError
mock_api
.
create_assessment
.
side_effect
=
self_api
.
SelfAssessmentRequestError
resp
=
self
.
request
(
xblock
,
'self_assess'
,
json
.
dumps
(
assessment
),
response_format
=
'json'
)
resp
=
self
.
request
(
xblock
,
'self_assess'
,
json
.
dumps
(
self
.
ASSESSMENT
),
response_format
=
'json'
)
self
.
assertFalse
(
resp
[
'success'
])
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment