Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
c7448bb8
Commit
c7448bb8
authored
Apr 01, 2014
by
Will Daly
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #243 from edx/will/view-level-tests
Add missing view-level tests
parents
26fc1126
cf4b5e3e
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
201 additions
and
15 deletions
+201
-15
apps/openassessment/xblock/test/data/grade_incomplete_scenario.xml
+46
-0
apps/openassessment/xblock/test/data/peer_closed_scenario.xml
+46
-0
apps/openassessment/xblock/test/test_grade.py
+71
-10
apps/openassessment/xblock/test/test_peer.py
+26
-3
apps/openassessment/xblock/test/test_submission.py
+12
-2
No files found.
apps/openassessment/xblock/test/data/grade_incomplete_scenario.xml
0 → 100644
View file @
c7448bb8
<openassessment>
<title>
Open Assessment Test
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>
𝓒𝓸𝓷𝓬𝓲𝓼𝓮
</name>
<prompt>
How concise is it?
</prompt>
<option
points=
"3"
>
<name>
ﻉซƈﻉɭɭﻉกՇ
</name>
<explanation>
Extremely concise
</explanation>
</option>
<option
points=
"2"
>
<name>
Ġööḋ
</name>
<explanation>
Concise
</explanation>
</option>
<option
points=
"1"
>
<name>
ק๏๏г
</name>
<explanation>
Wordy
</explanation>
</option>
</criterion>
<criterion>
<name>
Form
</name>
<prompt>
How well-formed is it?
</prompt>
<option
points=
"3"
>
<name>
Good
</name>
<explanation>
Good
</explanation>
</option>
<option
points=
"2"
>
<name>
Fair
</name>
<explanation>
Fair
</explanation>
</option>
<option
points=
"1"
>
<name>
Poor
</name>
<explanation>
Poor
</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment
name=
"peer-assessment"
must_grade=
"1"
must_be_graded_by=
"1"
/>
<assessment
name=
"self-assessment"
/>
</assessments>
</openassessment>
apps/openassessment/xblock/test/data/peer_closed_scenario.xml
0 → 100644
View file @
c7448bb8
<openassessment>
<title>
Open Assessment Test
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>
Read for conciseness, clarity of thought, and form.
</prompt>
<criterion>
<name>
𝓒𝓸𝓷𝓬𝓲𝓼𝓮
</name>
<prompt>
How concise is it?
</prompt>
<option
points=
"3"
>
<name>
ﻉซƈﻉɭɭﻉกՇ
</name>
<explanation>
Extremely concise
</explanation>
</option>
<option
points=
"2"
>
<name>
Ġööḋ
</name>
<explanation>
Concise
</explanation>
</option>
<option
points=
"1"
>
<name>
ק๏๏г
</name>
<explanation>
Wordy
</explanation>
</option>
</criterion>
<criterion>
<name>
Form
</name>
<prompt>
How well-formed is it?
</prompt>
<option
points=
"3"
>
<name>
Good
</name>
<explanation>
Good
</explanation>
</option>
<option
points=
"2"
>
<name>
Fair
</name>
<explanation>
Fair
</explanation>
</option>
<option
points=
"1"
>
<name>
Poor
</name>
<explanation>
Poor
</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment
name=
"peer-assessment"
must_grade=
"5"
must_be_graded_by=
"3"
due=
"2000-01-01T00:00:00"
/>
<assessment
name=
"self-assessment"
/>
</assessments>
</openassessment>
apps/openassessment/xblock/test/test_grade.py
View file @
c7448bb8
...
...
@@ -42,6 +42,58 @@ class TestGrade(XBlockHandlerTestCase):
self
.
assertIn
(
u'єאςєɭɭєภՇ ฬ๏гк!'
,
resp
.
decode
(
'utf-8'
))
self
.
assertIn
(
u'Good job!'
,
resp
.
decode
(
'utf-8'
))
# Verify that the submission and peer steps show that we're graded
# This isn't strictly speaking part of the grade step rendering,
# but we've already done all the setup to get to this point in the flow,
# so we might as well verify it here.
resp
=
self
.
request
(
xblock
,
'render_submission'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
'response'
,
resp
.
lower
())
self
.
assertIn
(
'complete'
,
resp
.
lower
())
resp
=
self
.
request
(
xblock
,
'render_peer_assessment'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
'peer'
,
resp
.
lower
())
self
.
assertIn
(
'complete'
,
resp
.
lower
())
resp
=
self
.
request
(
xblock
,
'render_self_assessment'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
'self'
,
resp
.
lower
())
self
.
assertIn
(
'complete'
,
resp
.
lower
())
@scenario
(
'data/grade_scenario.xml'
,
user_id
=
'Omar'
)
def
test_grade_waiting
(
self
,
xblock
):
# Waiting to be assessed by a peer
self
.
_create_submission_and_assessments
(
xblock
,
self
.
SUBMISSION
,
self
.
PEERS
,
self
.
ASSESSMENTS
,
self
.
ASSESSMENTS
[
0
],
waiting_for_peer
=
True
)
resp
=
self
.
request
(
xblock
,
'render_grade'
,
json
.
dumps
(
dict
()))
# Verify that we're on the waiting template
self
.
assertIn
(
u'waiting for peer assessment'
,
resp
.
decode
(
'utf-8'
)
.
lower
())
@scenario
(
'data/grade_incomplete_scenario.xml'
,
user_id
=
'Bunk'
)
def
test_grade_incomplete_missing_self
(
self
,
xblock
):
# Graded peers, but haven't completed self assessment
self
.
_create_submission_and_assessments
(
xblock
,
self
.
SUBMISSION
,
[
self
.
PEERS
[
0
]],
[
self
.
ASSESSMENTS
[
0
]],
None
)
resp
=
self
.
request
(
xblock
,
'render_grade'
,
json
.
dumps
(
dict
()))
# Verify that we're on the right template
self
.
assertIn
(
u'not completed'
,
resp
.
decode
(
'utf-8'
)
.
lower
())
self
.
assertIn
(
u'self assessment'
,
resp
.
decode
(
'utf-8'
)
.
lower
())
@scenario
(
'data/grade_incomplete_scenario.xml'
,
user_id
=
'Daniels'
)
def
test_grade_incomplete_missing_peer
(
self
,
xblock
):
# Have not yet completed peer assessment
self
.
_create_submission_and_assessments
(
xblock
,
self
.
SUBMISSION
,
[],
[],
None
)
resp
=
self
.
request
(
xblock
,
'render_grade'
,
json
.
dumps
(
dict
()))
# Verify that we're on the right template
self
.
assertIn
(
u'not completed'
,
resp
.
decode
(
'utf-8'
)
.
lower
())
self
.
assertIn
(
u'peer assessment'
,
resp
.
decode
(
'utf-8'
)
.
lower
())
@scenario
(
'data/grade_scenario.xml'
,
user_id
=
'Greggs'
)
def
test_submit_feedback
(
self
,
xblock
):
# Create submissions and assessments
...
...
@@ -101,7 +153,10 @@ class TestGrade(XBlockHandlerTestCase):
self
.
assertFalse
(
resp
[
'success'
])
self
.
assertGreater
(
len
(
resp
[
'msg'
]),
0
)
def
_create_submission_and_assessments
(
self
,
xblock
,
submission_text
,
peers
,
peer_assessments
,
self_assessment
):
def
_create_submission_and_assessments
(
self
,
xblock
,
submission_text
,
peers
,
peer_assessments
,
self_assessment
,
waiting_for_peer
=
False
):
"""
Create a submission and peer/self assessments, so that the user can receive a grade.
...
...
@@ -112,8 +167,12 @@ class TestGrade(XBlockHandlerTestCase):
peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
self_assessment (dict): Dict of assessment for self-assessment.
Kwargs:
waiting_for_peer (bool): If true, skip creation of peer assessments for the user's submission.
Returns:
None
"""
# Create a submission from the user
student_item
=
xblock
.
get_student_item_dict
()
...
...
@@ -137,11 +196,12 @@ class TestGrade(XBlockHandlerTestCase):
scorer_submissions
.
append
(
scorer_sub
)
# Create an assessment of the user's submission
peer_api
.
create_assessment
(
submission
[
'uuid'
],
scorer_name
,
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
},
xblock
.
get_assessment_module
(
'peer-assessment'
)[
'must_be_graded_by'
]
)
if
not
waiting_for_peer
:
peer_api
.
create_assessment
(
submission
[
'uuid'
],
scorer_name
,
assessment
,
{
'criteria'
:
xblock
.
rubric_criteria
},
xblock
.
get_assessment_module
(
'peer-assessment'
)[
'must_be_graded_by'
]
)
# Have our user make assessments (so she can get a score)
for
asmnt
in
peer_assessments
:
...
...
@@ -152,7 +212,8 @@ class TestGrade(XBlockHandlerTestCase):
)
# Have the user submit a self-assessment (so she can get a score)
self_api
.
create_assessment
(
submission
[
'uuid'
],
student_id
,
self_assessment
[
'options_selected'
],
{
'criteria'
:
xblock
.
rubric_criteria
}
)
if
self_assessment
is
not
None
:
self_api
.
create_assessment
(
submission
[
'uuid'
],
student_id
,
self_assessment
[
'options_selected'
],
{
'criteria'
:
xblock
.
rubric_criteria
}
)
apps/openassessment/xblock/test/test_peer.py
View file @
c7448bb8
...
...
@@ -164,7 +164,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Validate Peer Rendering.
self
.
assertIn
(
"available"
.
encode
(
'utf-8'
),
peer_response
.
body
)
@scenario
(
'data/over_grade_scenario.xml'
,
user_id
=
'Bob'
)
def
test_turbo_grading
(
self
,
xblock
):
student_item
=
xblock
.
get_student_item_dict
()
...
...
@@ -263,4 +263,28 @@ class TestPeerAssessment(XBlockHandlerTestCase):
peer_response
=
xblock
.
render_peer_assessment
(
request
)
self
.
assertIsNotNone
(
peer_response
)
self
.
assertNotIn
(
submission
[
"answer"
][
"text"
]
.
encode
(
'utf-8'
),
peer_response
.
body
)
self
.
assertIn
(
"Complete"
.
encode
(
'utf-8'
),
peer_response
.
body
)
\ No newline at end of file
self
.
assertIn
(
"Congratulations"
.
encode
(
'utf-8'
),
peer_response
.
body
)
@scenario
(
'data/peer_assessment_scenario.xml'
,
user_id
=
'Bob'
)
def
test_peer_unavailable
(
self
,
xblock
):
# Before creating a submission, the peer step should be unavailable
resp
=
self
.
request
(
xblock
,
'render_peer_assessment'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
'not available'
,
resp
.
lower
())
self
.
assertIn
(
'peer-assessment'
,
resp
.
lower
())
@scenario
(
'data/peer_closed_scenario.xml'
,
user_id
=
'Bob'
)
def
test_peer_closed
(
self
,
xblock
):
# The scenario defines a peer assessment with a due date in the past
resp
=
self
.
request
(
xblock
,
'render_peer_assessment'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
'closed'
,
resp
.
lower
())
self
.
assertIn
(
'peer assessment'
,
resp
.
lower
())
@scenario
(
'data/peer_assessment_scenario.xml'
,
user_id
=
'Bob'
)
def
test_peer_waiting
(
self
,
xblock
):
# Make a submission, but no peer assessments available
xblock
.
create_submission
(
xblock
.
get_student_item_dict
(),
"Test answer"
)
# Check the rendered peer step
resp
=
self
.
request
(
xblock
,
'render_peer_assessment'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
'waiting'
,
resp
.
lower
())
self
.
assertIn
(
'peer'
,
resp
.
lower
())
apps/openassessment/xblock/test/test_submission.py
View file @
c7448bb8
# -*- coding: utf-8 -*-
"""
Test submission to the OpenAssessment XBlock.
"""
...
...
@@ -49,7 +50,7 @@ class SubmissionTest(XBlockHandlerTestCase):
# In Studio preview mode, the runtime sets the user ID to None
@scenario
(
'data/basic_scenario.xml'
,
user_id
=
None
)
def
test_cannot_submit_in_preview_mode
(
self
,
xblock
,
):
def
test_cannot_submit_in_preview_mode
(
self
,
xblock
):
# The Studio runtime apparently provides an anonymous student ID,
# even though we're running in Preview mode. We should check the scope id
...
...
@@ -66,6 +67,15 @@ class SubmissionTest(XBlockHandlerTestCase):
# In Studio preview mode, the runtime sets the user ID to None
@scenario
(
'data/over_grade_scenario.xml'
,
user_id
=
'Alice'
)
def
test_closed_submissions
(
self
,
xblock
,
):
def
test_closed_submissions
(
self
,
xblock
):
resp
=
self
.
request
(
xblock
,
'render_submission'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
"Incomplete"
,
resp
)
@scenario
(
'data/basic_scenario.xml'
,
user_id
=
'Omar Little'
)
def
test_response_submitted
(
self
,
xblock
):
# Create a submission for the user
xblock
.
create_submission
(
xblock
.
get_student_item_dict
(),
u'Ⱥ mȺn mᵾsŧ ħȺvɇ Ⱥ ȼøđɇ.'
)
# Expect that the response step is "submitted"
resp
=
self
.
request
(
xblock
,
'render_submission'
,
json
.
dumps
(
dict
()))
self
.
assertIn
(
'your response has been submitted'
,
resp
.
lower
())
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment