Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
01fa7f2a
Commit
01fa7f2a
authored
Feb 24, 2014
by
David Ormsbee
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fix broken Peer API tests
parent
a6249bc1
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
75 additions
and
109 deletions
+75
-109
apps/openassessment/peer/test/rubric_data/empty_options.json
+2
-0
apps/openassessment/peer/test/rubric_data/missing_options.json
+2
-0
apps/openassessment/peer/test/test_api.py
+71
-84
apps/openassessment/peer/test/test_valid_evaluations.json
+0
-25
No files found.
apps/openassessment/peer/test/rubric_data/empty_options.json
View file @
01fa7f2a
...
...
@@ -3,6 +3,7 @@
"criteria"
:
[
{
"order_num"
:
0
,
"name"
:
"realistic"
,
"prompt"
:
"Is the deadline realistic?"
,
"options"
:
[
{
...
...
@@ -27,6 +28,7 @@
},
{
"order_num"
:
1
,
"name"
:
"architecture"
,
"prompt"
:
"Describe the architecture."
,
"options"
:
[
]
...
...
apps/openassessment/peer/test/rubric_data/missing_options.json
View file @
01fa7f2a
...
...
@@ -3,10 +3,12 @@
"criteria"
:
[
{
"order_num"
:
0
,
"name"
:
"realistic"
,
"prompt"
:
"Is the deadline realistic?"
},
{
"order_num"
:
1
,
"name"
:
"architecture"
,
"prompt"
:
"Describe the architecture."
,
"options"
:
[
{
...
...
apps/openassessment/peer/test/test_api.py
View file @
01fa7f2a
...
...
@@ -15,10 +15,60 @@ from submissions import api as sub_api
from
submissions.models
import
Submission
from
submissions.tests.test_api
import
STUDENT_ITEM
,
ANSWER_ONE
# Possible points: 14
RUBRIC_DICT
=
{
"criteria"
:
[
{
"name"
:
"secret"
,
"prompt"
:
"Did the writer keep it secret?"
,
"options"
:
[
{
"name"
:
"no"
,
"points"
:
"0"
,
"explanation"
:
""
},
{
"name"
:
"yes"
,
"points"
:
"1"
,
"explanation"
:
""
},
]
},
{
"name"
:
u"ⓢⓐⓕⓔ"
,
"prompt"
:
"Did the writer keep it safe?"
,
"options"
:
[
{
"name"
:
"no"
,
"points"
:
"0"
,
"explanation"
:
""
},
{
"name"
:
"yes"
,
"points"
:
"1"
,
"explanation"
:
""
},
]
},
{
"name"
:
"giveup"
,
"prompt"
:
"How willing is the writer to give up the ring?"
,
"options"
:
[
{
"name"
:
"unwilling"
,
"points"
:
"0"
,
"explanation"
:
"Likely to use force to keep it."
},
{
"name"
:
"reluctant"
,
"points"
:
"3"
,
"explanation"
:
"May argue, but will give it up voluntarily."
},
{
"name"
:
"eager"
,
"points"
:
"10"
,
"explanation"
:
"Happy to give it up."
}
]
},
{
"name"
:
"singing"
,
"prompt"
:
"Did the writer break into tedious elvish lyrics?"
,
"options"
:
[
{
"name"
:
"no"
,
"points"
:
"2"
,
"explanation"
:
""
},
{
"name"
:
"yes"
,
"points"
:
"0"
,
"explanation"
:
""
}
]
},
]
}
# Answers are against RUBRIC_DICT -- this is worth 6 points
ASSESSMENT_DICT
=
dict
(
points_earned
=
[
1
,
0
,
3
,
2
],
points_possible
=
14
,
feedback
=
"Your submission was thrilling."
,
feedback
=
u"这是中国"
,
options_selected
=
{
"secret"
:
"yes"
,
u"ⓢⓐⓕⓔ"
:
"no"
,
...
...
@@ -27,56 +77,6 @@ ASSESSMENT_DICT = dict(
}
)
RUBRIC_DICT
=
dict
(
criteria
=
[
dict
(
name
=
"secret"
,
prompt
=
"Did the writer keep it secret?"
,
options
=
[
dict
(
name
=
"no"
,
points
=
"0"
,
explanation
=
""
),
dict
(
name
=
"yes"
,
points
=
"1"
,
explanation
=
""
)
]
),
dict
(
name
=
u"ⓢⓐⓕⓔ"
,
prompt
=
"Did the writer keep it safe?"
,
options
=
[
dict
(
name
=
"no"
,
points
=
"0"
,
explanation
=
""
),
dict
(
name
=
"yes"
,
points
=
"1"
,
explanation
=
""
)
]
),
dict
(
name
=
"giveup"
,
prompt
=
"How willing is the writer to give up the ring?"
,
options
=
[
dict
(
name
=
"unwilling"
,
points
=
"0"
,
explanation
=
"Likely to use force to keep it."
),
dict
(
name
=
"reluctant"
,
points
=
"3"
,
explanation
=
"May argue, but will give it up voluntarily."
),
dict
(
name
=
"eager"
,
points
=
"10"
,
explanation
=
"Happy to give it up."
)
]
),
dict
(
name
=
"singing"
,
prompt
=
"Did the writer break into tedious elvish lyrics?"
,
options
=
[
dict
(
name
=
"no"
,
points
=
"2"
,
explanation
=
""
),
dict
(
name
=
"yes"
,
points
=
"0"
,
explanation
=
""
)
]
),
]
)
REQUIRED_GRADED
=
5
REQUIRED_GRADED_BY
=
3
...
...
@@ -88,9 +88,9 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
@ddt
class
TestApi
(
TestCase
):
def
test_create_
evaluation
(
self
):
def
test_create_
assessment
(
self
):
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
evaluation
=
peer_api
.
create_assessment
(
assessment
=
peer_api
.
create_assessment
(
submission
[
"uuid"
],
STUDENT_ITEM
[
"student_id"
],
REQUIRED_GRADED
,
...
...
@@ -98,10 +98,11 @@ class TestApi(TestCase):
ASSESSMENT_DICT
,
RUBRIC_DICT
,
)
self
.
_assert_evaluation
(
evaluation
,
**
ASSESSMENT_DICT
)
self
.
assertEqual
(
assessment
[
"points_earned"
],
6
)
self
.
assertEqual
(
assessment
[
"points_possible"
],
14
)
@file_data
(
'
test_valid_evaluation
s.json'
)
def
test_get_
evaluation
s
(
self
,
assessment_dict
):
@file_data
(
'
valid_assessment
s.json'
)
def
test_get_
assessment
s
(
self
,
assessment_dict
):
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
peer_api
.
create_assessment
(
submission
[
"uuid"
],
...
...
@@ -111,12 +112,11 @@ class TestApi(TestCase):
assessment_dict
,
RUBRIC_DICT
,
)
evaluations
=
peer_api
.
get_assessments
(
submission
[
"uuid"
])
self
.
assertEqual
(
1
,
len
(
evaluations
))
self
.
_assert_evaluation
(
evaluations
[
0
],
**
assessment_dict
)
assessments
=
peer_api
.
get_assessments
(
submission
[
"uuid"
])
self
.
assertEqual
(
1
,
len
(
assessments
))
@file_data
(
'
test_valid_evaluation
s.json'
)
def
test_get_
evaluation
s_with_date
(
self
,
assessment_dict
):
@file_data
(
'
valid_assessment
s.json'
)
def
test_get_
assessment
s_with_date
(
self
,
assessment_dict
):
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
peer_api
.
create_assessment
(
submission
[
"uuid"
],
...
...
@@ -127,12 +127,11 @@ class TestApi(TestCase):
RUBRIC_DICT
,
MONDAY
)
evaluations
=
peer_api
.
get_assessments
(
submission
[
"uuid"
])
self
.
assertEqual
(
1
,
len
(
evaluations
))
self
.
_assert_evaluation
(
evaluations
[
0
],
**
assessment_dict
)
self
.
assertEqual
(
evaluations
[
0
][
"scored_at"
],
MONDAY
)
assessments
=
peer_api
.
get_assessments
(
submission
[
"uuid"
])
self
.
assertEqual
(
1
,
len
(
assessments
))
self
.
assertEqual
(
assessments
[
0
][
"scored_at"
],
MONDAY
)
def
test_peer_
evaluation
_workflow
(
self
):
def
test_peer_
assessment
_workflow
(
self
):
tim
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
)
bob
=
self
.
_create_student_and_submission
(
"Bob"
,
"Bob's answer"
)
sally
=
self
.
_create_student_and_submission
(
"Sally"
,
"Sally's answer"
)
...
...
@@ -168,7 +167,7 @@ class TestApi(TestCase):
self
.
assertTrue
(
peer_api
.
has_finished_required_evaluating
(
"Tim"
,
REQUIRED_GRADED
))
# Tim should not have a score, because his submission does not have
# enough
evaluation
s.
# enough
assessment
s.
scores
=
sub_api
.
get_score
(
STUDENT_ITEM
)
self
.
assertFalse
(
scores
)
...
...
@@ -186,7 +185,7 @@ class TestApi(TestCase):
scores
=
sub_api
.
get_score
(
STUDENT_ITEM
)
self
.
assertTrue
(
scores
)
self
.
assertEqual
(
6
,
scores
[
0
][
"points_earned"
])
self
.
assertEqual
(
1
2
,
scores
[
0
][
"points_possible"
])
self
.
assertEqual
(
1
4
,
scores
[
0
][
"points_possible"
])
@raises
(
peer_api
.
PeerAssessmentRequestError
)
...
...
@@ -212,13 +211,10 @@ class TestApi(TestCase):
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
,
MONDAY
)
peer_api
.
get_submission_to_assess
(
STUDENT_ITEM
,
3
)
"""
Some Error Checking Tests against DB failures.
"""
@patch.object
(
Submission
.
objects
,
'get'
)
@raises
(
peer_api
.
PeerAssessmentInternalError
)
def
test_error_on_
evaluation
_creation
(
self
,
mock_filter
):
def
test_error_on_
assessment
_creation
(
self
,
mock_filter
):
mock_filter
.
side_effect
=
DatabaseError
(
"Bad things happened"
)
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
peer_api
.
create_assessment
(
...
...
@@ -233,7 +229,7 @@ class TestApi(TestCase):
@patch.object
(
Assessment
.
objects
,
'filter'
)
@raises
(
sub_api
.
SubmissionInternalError
)
def
test_error_on_get_
evaluation
(
self
,
mock_filter
):
def
test_error_on_get_
assessment
(
self
,
mock_filter
):
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
peer_api
.
create_assessment
(
submission
[
"uuid"
],
...
...
@@ -263,12 +259,3 @@ class TestApi(TestCase):
new_student_item
=
STUDENT_ITEM
.
copy
()
new_student_item
[
"student_id"
]
=
student
return
sub_api
.
create_submission
(
new_student_item
,
answer
,
date
)
def
_assert_evaluation
(
self
,
evaluation
,
points_earned
,
points_possible
,
feedback
,
options_selected
):
print
evaluation
self
.
assertIsNotNone
(
evaluation
)
self
.
assertEqual
(
evaluation
[
"points_earned"
],
sum
(
points_earned
))
self
.
assertEqual
(
evaluation
[
"points_possible"
],
points_possible
)
# self.assertEqual(evaluation["feedback"], feedback)
apps/openassessment/peer/test/test_valid_evaluations.json
deleted
100644 → 0
View file @
a6249bc1
{
"unicode_evaluation"
:
{
"points_earned"
:
[
10
,
0
,
24
,
36
],
"points_possible"
:
12
,
"feedback"
:
"这是中国"
,
"options_selected"
:
{
"secret"
:
"yes"
,
"ⓢⓐⓕⓔ"
:
"no"
,
"giveup"
:
"reluctant"
,
"singing"
:
"no"
}
},
"basic_evaluation"
:
{
"points_earned"
:
[
1
,
0
,
3
,
2
],
"points_possible"
:
12
,
"feedback"
:
"Your submission was thrilling."
,
"options_selected"
:
{
"secret"
:
"yes"
,
"ⓢⓐⓕⓔ"
:
"no"
,
"giveup"
:
"reluctant"
,
"singing"
:
"no"
}
}
}
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment