Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
dddf9194
Commit
dddf9194
authored
Feb 20, 2014
by
David Ormsbee
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Break out Assessment into parts, Rubrics into their various pieces, with serializers.
parent
c83d5948
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
388 additions
and
115 deletions
+388
-115
apps/openassessment/peer/admin.py
+2
-2
apps/openassessment/peer/api.py
+12
-15
apps/openassessment/peer/models.py
+78
-41
apps/openassessment/peer/serializers.py
+94
-38
apps/openassessment/peer/test/rubric_data/empty_criteria.json
+5
-0
apps/openassessment/peer/test/rubric_data/empty_options.json
+35
-0
apps/openassessment/peer/test/rubric_data/missing_criteria.json
+3
-0
apps/openassessment/peer/test/rubric_data/missing_options.json
+33
-0
apps/openassessment/peer/test/rubric_data/no_points.json
+53
-0
apps/openassessment/peer/test/test_api.py
+3
-4
apps/openassessment/peer/test/test_serializers.py
+67
-15
doc/source/api.rst
+3
-0
No files found.
apps/openassessment/peer/admin.py
View file @
dddf9194
from
django.contrib
import
admin
from
django.contrib
import
admin
from
openassessment.peer.models
import
PeerEvaluation
from
openassessment.peer.models
import
Assessment
admin
.
site
.
register
(
PeerEvaluation
)
admin
.
site
.
register
(
Assessment
)
apps/openassessment/peer/api.py
View file @
dddf9194
...
@@ -6,15 +6,12 @@ the workflow for a given submission.
...
@@ -6,15 +6,12 @@ the workflow for a given submission.
"""
"""
import
copy
import
copy
import
logging
import
logging
import
math
from
django.db
import
DatabaseError
from
django.db
import
DatabaseError
import
math
from
openassessment.peer.models
import
PeerEvaluation
from
openassessment.peer.serializers
import
PeerAssessmentSerializer
from
openassessment.peer.models
import
Assessment
from
openassessment.peer.serializers
import
(
from
openassessment.peer.serializers
import
AssessmentSerializer
PeerEvaluationSerializer
,
content_hash_for_rubric_dict
)
from
submissions
import
api
as
submission_api
from
submissions
import
api
as
submission_api
from
submissions.models
import
Submission
,
StudentItem
,
Score
from
submissions.models
import
Submission
,
StudentItem
,
Score
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
...
@@ -73,7 +70,7 @@ def create_assessment(
...
@@ -73,7 +70,7 @@ def create_assessment(
required_assessments_for_student
,
required_assessments_for_student
,
required_assessments_for_submission
,
required_assessments_for_submission
,
assessment_dict
,
assessment_dict
,
rubric_dict
,
#
rubric_dict,
scored_at
=
None
):
scored_at
=
None
):
"""Creates an assessment on the given submission.
"""Creates an assessment on the given submission.
...
@@ -137,7 +134,8 @@ def create_assessment(
...
@@ -137,7 +134,8 @@ def create_assessment(
if
scored_at
:
if
scored_at
:
peer_assessment
[
"scored_at"
]
=
scored_at
peer_assessment
[
"scored_at"
]
=
scored_at
peer_serializer
=
PeerAssessmentSerializer
(
data
=
peer_assessment
)
peer_serializer
=
AssessmentSerializer
(
data
=
peer_evaluation
)
if
not
peer_serializer
.
is_valid
():
if
not
peer_serializer
.
is_valid
():
raise
PeerAssessmentRequestError
(
peer_serializer
.
errors
)
raise
PeerAssessmentRequestError
(
peer_serializer
.
errors
)
peer_serializer
.
save
()
peer_serializer
.
save
()
...
@@ -199,7 +197,7 @@ def _check_if_finished_and_create_score(student_item,
...
@@ -199,7 +197,7 @@ def _check_if_finished_and_create_score(student_item,
student_item
.
student_id
,
student_item
.
student_id
,
required_assessments_for_student
required_assessments_for_student
)
)
assessments
=
PeerEvaluation
.
objects
.
filter
(
submission
=
submission
)
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
)
submission_finished
=
assessments
.
count
()
>=
required_assessments_for_submission
submission_finished
=
assessments
.
count
()
>=
required_assessments_for_submission
scores
=
[]
scores
=
[]
for
assessment
in
assessments
:
for
assessment
in
assessments
:
...
@@ -264,7 +262,7 @@ def has_finished_required_evaluating(student_id, required_assessments):
...
@@ -264,7 +262,7 @@ def has_finished_required_evaluating(student_id, required_assessments):
if
required_assessments
<
0
:
if
required_assessments
<
0
:
raise
PeerAssessmentRequestError
(
raise
PeerAssessmentRequestError
(
"Required Assessment count must be a positive integer."
)
"Required Assessment count must be a positive integer."
)
return
PeerEvaluation
.
objects
.
filter
(
return
Assessment
.
objects
.
filter
(
scorer_id
=
student_id
scorer_id
=
student_id
)
.
count
()
>=
required_assessments
)
.
count
()
>=
required_assessments
...
@@ -312,8 +310,8 @@ def get_assessments(submission_id):
...
@@ -312,8 +310,8 @@ def get_assessments(submission_id):
"""
"""
try
:
try
:
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_id
)
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_id
)
assessments
=
PeerEvaluation
.
objects
.
filter
(
submission
=
submission
)
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
)
serializer
=
Peer
AssessmentSerializer
(
assessments
,
many
=
True
)
serializer
=
AssessmentSerializer
(
assessments
,
many
=
True
)
return
serializer
.
data
return
serializer
.
data
except
DatabaseError
:
except
DatabaseError
:
error_message
=
(
error_message
=
(
...
@@ -395,10 +393,10 @@ def _get_first_submission_not_evaluated(student_items, student_id, required_num_
...
@@ -395,10 +393,10 @@ def _get_first_submission_not_evaluated(student_items, student_id, required_num_
"-attempt_number"
"-attempt_number"
)
)
for
submission
in
submissions
:
for
submission
in
submissions
:
assessments
=
PeerEvaluation
.
objects
.
filter
(
submission
=
submission
)
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
)
if
assessments
.
count
()
<
required_num_assessments
:
if
assessments
.
count
()
<
required_num_assessments
:
already_evaluated
=
False
already_evaluated
=
False
for
assessment
in
assessments
:
for
assessment
in
assessments
:
already_evaluated
=
already_evaluated
or
assessment
.
scorer_id
==
student_id
already_evaluated
=
already_evaluated
or
assessment
.
scorer_id
==
student_id
if
not
already_evaluated
:
if
not
already_evaluated
:
return
submission
return
submission
\ No newline at end of file
apps/openassessment/peer/models.py
View file @
dddf9194
# coding=utf-8
"""
"""
These Models have to capture not only the state of evaluations made for certain
These models have to capture not only the state of assessments made for certain
submissions, but also the state of the specific rubrics at the time those
submissions, but also the state of the rubrics at the time those assessments
evaluations were made. This means we have a number of little models, and that
were made.
much of this data is immutable once created, so that we don't lose historical
information. This also means that if you change the Rubric in a problem and
this system is seeing that new Rubric for the first time, we're going to be
writing a whole little tree of objects into the database. Fortunately, we only
need to write this when we see a changed problem (rare). Performance concerns
when reading this data is mitigated by the fact that it's easy to cache the
entire tree of objects (since everything is immutable).
"""
"""
from
copy
import
deepcopy
from
hashlib
import
sha1
from
hashlib
import
sha1
import
json
import
json
...
@@ -19,43 +14,49 @@ from django.utils.timezone import now
...
@@ -19,43 +14,49 @@ from django.utils.timezone import now
from
submissions.models
import
Submission
from
submissions.models
import
Submission
class
PeerEvaluation
(
models
.
Model
):
submission
=
models
.
ForeignKey
(
Submission
)
points_earned
=
models
.
PositiveIntegerField
(
default
=
0
)
points_possible
=
models
.
PositiveIntegerField
(
default
=
0
)
scored_at
=
models
.
DateTimeField
(
default
=
now
,
db_index
=
True
)
scorer_id
=
models
.
CharField
(
max_length
=
255
,
db_index
=
True
)
score_type
=
models
.
CharField
(
max_length
=
2
)
feedback
=
models
.
TextField
(
max_length
=
10000
,
default
=
""
)
def
__repr__
(
self
):
return
repr
(
dict
(
submission
=
self
.
submission
,
points_earned
=
self
.
points_earned
,
points_possible
=
self
.
points_possible
,
scored_at
=
self
.
scored_at
,
scorer_id
=
self
.
scorer_id
,
score_type
=
self
.
score_type
,
feedback
=
self
.
feedback
,
))
class
Meta
:
ordering
=
[
"-scored_at"
]
class
Rubric
(
models
.
Model
):
class
Rubric
(
models
.
Model
):
"""
"""A Rubric contains the guidelines on how to assess a submission.
A Rubric
Rubrics are composed of :class:`Criterion` objects which are in turn
composed of :class:`CriterionOption` objects.
This model is a bit unusual in that it is the representation of the rubric
that an assessment was made with *at the time of assessment*. The source
rubric data lives in the problem definition, which is in the
:class:`OpenAssessmentBlock`. When an assessment is made, the XBlock passes
that rubric information along as well. When this Django app records the
:class:`Assessment`, we do a lookup to see if the Rubric model object
already exists (using hashing). If the Rubric is not found, we create a new
one with the information OpenAssessmentBlock has passed in.
.. warning::
Never change Rubric model data after it's written!
The little tree of objects that compose a Rubric is meant to be immutable —
once created, they're never updated. When the problem changes, we end up
creating a new Rubric instead. This makes it easy to cache and do hash-based
lookups.
"""
"""
# SHA1 hash
# SHA1 hash
content_hash
=
models
.
CharField
(
max_length
=
40
)
content_hash
=
models
.
CharField
(
max_length
=
40
)
# This is actually the prompt for the whole question, which may be a
@property
# complex, nested XML structure.
prompt
=
models
.
TextField
(
max_length
=
10000
)
def
points_possible
(
self
):
def
points_possible
(
self
):
return
sum
(
crit
.
points_possible
()
for
crit
in
self
.
criteria
.
all
())
return
sum
(
crit
.
points_possible
for
crit
in
self
.
criteria
.
all
())
@staticmethod
def
content_hash_for_rubric_dict
(
rubric_dict
):
"""
"""
rubric_dict
=
deepcopy
(
rubric_dict
)
# Neither "id" nor "content_hash" would count towards calculating the
# content_hash.
rubric_dict
.
pop
(
"id"
,
None
)
rubric_dict
.
pop
(
"content_hash"
,
None
)
canonical_form
=
json
.
dumps
(
rubric_dict
,
sort_keys
=
True
)
return
sha1
(
canonical_form
)
.
hexdigest
()
class
Criterion
(
models
.
Model
):
class
Criterion
(
models
.
Model
):
...
@@ -72,6 +73,7 @@ class Criterion(models.Model):
...
@@ -72,6 +73,7 @@ class Criterion(models.Model):
ordering
=
[
"rubric"
,
"order_num"
]
ordering
=
[
"rubric"
,
"order_num"
]
@property
def
points_possible
(
self
):
def
points_possible
(
self
):
return
max
(
option
.
points
for
option
in
self
.
options
.
all
())
return
max
(
option
.
points
for
option
in
self
.
options
.
all
())
...
@@ -109,3 +111,38 @@ class CriterionOption(models.Model):
...
@@ -109,3 +111,38 @@ class CriterionOption(models.Model):
def
__unicode__
(
self
):
def
__unicode__
(
self
):
return
repr
(
self
)
return
repr
(
self
)
class
Assessment
(
models
.
Model
):
submission
=
models
.
ForeignKey
(
Submission
)
rubric
=
models
.
ForeignKey
(
Rubric
)
scored_at
=
models
.
DateTimeField
(
default
=
now
,
db_index
=
True
)
scorer_id
=
models
.
CharField
(
max_length
=
40
,
db_index
=
True
)
score_type
=
models
.
CharField
(
max_length
=
2
)
# TODO: move this to its own model
feedback
=
models
.
TextField
(
max_length
=
10000
,
default
=
""
)
class
Meta
:
ordering
=
[
"-scored_at"
]
@property
def
points_earned
(
self
):
return
sum
(
part
.
points_earned
for
part
in
self
.
parts
.
all
())
@property
def
points_possible
(
self
):
return
self
.
rubric
.
points_possible
class
AssessmentPart
(
models
.
Model
):
assessment
=
models
.
ForeignKey
(
Assessment
,
related_name
=
'parts'
)
option
=
models
.
ForeignKey
(
CriterionOption
)
# TODO: no reverse
@property
def
points_earned
(
self
):
return
self
.
option
.
points
@property
def
points_possible
(
self
):
return
self
.
option
.
criterion
.
points_possible
apps/openassessment/peer/serializers.py
View file @
dddf9194
...
@@ -8,61 +8,118 @@ import json
...
@@ -8,61 +8,118 @@ import json
from
rest_framework
import
serializers
from
rest_framework
import
serializers
from
openassessment.peer.models
import
(
from
openassessment.peer.models
import
(
Criterion
,
CriterionOption
,
PeerEvalua
tion
,
Rubric
Assessment
,
AssessmentPart
,
Criterion
,
CriterionOp
tion
,
Rubric
)
)
class
InvalidRubric
(
Exception
):
def
__init__
(
self
,
errors
):
Exception
.
__init__
(
self
,
repr
(
errors
))
self
.
errors
=
deepcopy
(
errors
)
class
PeerAssessmentSerializer
(
serializers
.
ModelSerializer
):
class
Meta
:
class
NestedModelSerializer
(
serializers
.
ModelSerializer
):
model
=
PeerEvaluation
"""Model Serializer that supports arbitrary nesting.
fields
=
(
'submission'
,
The Django REST Framework does not currently support deserialization more
'points_earned'
,
than one level deep (so a parent and children). We want to be able to
'points_possible'
,
create a Rubric -> Criterion -> CriterionOption hierarchy.
'scored_at'
,
'scorer_id'
,
Much of the base logic already "just works" and serialization of arbritrary
'score_type'
,
depth is supported. So we just override the save_object method to
'feedback'
,
recursively link foreign key relations instead of doing it one level deep.
)
We don't touch many-to-many relationships because we don't need to for our
purposes.
class
CriterionOptionSerializer
(
serializers
.
ModelSerializer
):
"""
def
recursively_link_related
(
self
,
obj
,
**
kwargs
):
if
getattr
(
obj
,
'_related_data'
,
None
):
for
accessor_name
,
related
in
obj
.
_related_data
.
items
():
setattr
(
obj
,
accessor_name
,
related
)
for
related_obj
in
related
:
self
.
recursively_link_related
(
related_obj
,
**
kwargs
)
del
(
obj
.
_related_data
)
def
save_object
(
self
,
obj
,
**
kwargs
):
obj
.
save
(
**
kwargs
)
if
getattr
(
obj
,
'_m2m_data'
,
None
):
for
accessor_name
,
object_list
in
obj
.
_m2m_data
.
items
():
setattr
(
obj
,
accessor_name
,
object_list
)
del
(
obj
.
_m2m_data
)
self
.
recursively_link_related
(
obj
,
**
kwargs
)
class
CriterionOptionSerializer
(
NestedModelSerializer
):
class
Meta
:
class
Meta
:
model
=
CriterionOption
model
=
CriterionOption
fields
=
(
'order_num'
,
'points'
,
'name'
,
'explanation'
)
fields
=
(
'order_num'
,
'points'
,
'name'
,
'explanation'
)
class
CriterionSerializer
(
serializers
.
ModelSerializer
):
class
CriterionSerializer
(
Nested
ModelSerializer
):
options
=
CriterionOptionSerializer
(
many
=
True
)
options
=
CriterionOptionSerializer
(
required
=
True
,
many
=
True
)
class
Meta
:
class
Meta
:
model
=
Criterion
model
=
Criterion
fields
=
(
'order_num'
,
'prompt'
,
'options'
)
fields
=
(
'order_num'
,
'prompt'
,
'options'
)
class
RubricSerializer
(
serializers
.
ModelSerializer
):
def
validate_options
(
self
,
attrs
,
source
):
criteria
=
CriterionSerializer
(
many
=
True
)
options
=
attrs
[
source
]
if
not
options
:
raise
serializers
.
ValidationError
(
"Criterion must have at least one option."
)
return
attrs
class
RubricSerializer
(
NestedModelSerializer
):
criteria
=
CriterionSerializer
(
required
=
True
,
many
=
True
)
class
Meta
:
class
Meta
:
model
=
Rubric
model
=
Rubric
fields
=
(
'id'
,
'content_hash'
,
'
prompt'
,
'
criteria'
)
fields
=
(
'id'
,
'content_hash'
,
'criteria'
)
def
content_hash_for_rubric_dict
(
rubric_dict
):
def
validate_criteria
(
self
,
attrs
,
source
):
"""
criteria
=
attrs
[
source
]
It's passing in the results from a RubricSerializer, so we just have to get
if
not
criteria
:
rid of the content_hash.
raise
serializers
.
ValidationError
(
"Must have at least one criterion"
)
"""
return
attrs
rubric_dict
=
deepcopy
(
rubric_dict
)
# Neither "id" nor "content_hash" would count towards calculating the
#def validate(self, attrs):
# content_hash.
#total_possible = sum(
rubric_dict
.
pop
(
"id"
,
None
)
# max(option.get("points", 0) for option in criterion["options"])
rubric_dict
.
pop
(
"content_hash"
,
None
)
# for criterion in attrs["criteria"]
#)
# total_possible = sum(crit.points_possible() for crit in attrs['criteria'])
canonical_form
=
json
.
dumps
(
rubric_dict
,
sort_keys
=
True
)
# if total_possible <= 0:
return
sha1
(
canonical_form
)
.
hexdigest
()
# raise serializers.ValidationError(
# "Rubric must have > 0 possible points."
# )
class
AssessmentPartSerializer
(
serializers
.
ModelSerializer
):
option
=
CriterionOptionSerializer
()
class
Meta
:
model
=
AssessmentPart
fields
=
(
'option'
,)
def
rubric_id_for
(
rubric_dict
):
class
AssessmentSerializer
(
serializers
.
ModelSerializer
):
parts
=
AssessmentPartSerializer
(
required
=
True
,
many
=
True
)
class
Meta
:
model
=
Assessment
fields
=
(
'submission'
,
'rubric'
,
'scored_at'
,
'scorer_id'
,
'score_type'
)
def
rubric_from_dict
(
rubric_dict
):
"""Given a rubric_dict, return the rubric ID we're going to submit against.
"""Given a rubric_dict, return the rubric ID we're going to submit against.
This will create the Rubric and its children if it does not exist already.
This will create the Rubric and its children if it does not exist already.
...
@@ -70,7 +127,7 @@ def rubric_id_for(rubric_dict):
...
@@ -70,7 +127,7 @@ def rubric_id_for(rubric_dict):
rubric_dict
=
deepcopy
(
rubric_dict
)
rubric_dict
=
deepcopy
(
rubric_dict
)
# Calculate the hash based on the rubric content...
# Calculate the hash based on the rubric content...
content_hash
=
content_hash_for_rubric_dict
(
rubric_dict
)
content_hash
=
Rubric
.
content_hash_for_rubric_dict
(
rubric_dict
)
try
:
try
:
rubric
=
Rubric
.
objects
.
get
(
content_hash
=
content_hash
)
rubric
=
Rubric
.
objects
.
get
(
content_hash
=
content_hash
)
...
@@ -78,8 +135,7 @@ def rubric_id_for(rubric_dict):
...
@@ -78,8 +135,7 @@ def rubric_id_for(rubric_dict):
rubric_dict
[
"content_hash"
]
=
content_hash
rubric_dict
[
"content_hash"
]
=
content_hash
rubric_serializer
=
RubricSerializer
(
data
=
rubric_dict
)
rubric_serializer
=
RubricSerializer
(
data
=
rubric_dict
)
if
not
rubric_serializer
.
is_valid
():
if
not
rubric_serializer
.
is_valid
():
raise
ValueError
(
"Some better Exception here"
)
raise
InvalidRubric
(
rubric_serializer
.
errors
)
rubric
=
rubric_serializer
.
save
()
rubric
=
rubric_serializer
.
save
()
return
rubric
.
id
return
rubric
apps/openassessment/peer/test/rubric_data/empty_criteria.json
0 → 100644
View file @
dddf9194
{
"prompt"
:
"Create a plan to deliver edx-tim!"
,
"criteria"
:
[
]
}
apps/openassessment/peer/test/rubric_data/empty_options.json
0 → 100644
View file @
dddf9194
{
"prompt"
:
"Create a plan to deliver edx-tim!"
,
"criteria"
:
[
{
"order_num"
:
0
,
"prompt"
:
"Is the deadline realistic?"
,
"options"
:
[
{
"order_num"
:
0
,
"points"
:
0
,
"name"
:
"No"
,
"explanation"
:
""
},
{
"order_num"
:
1
,
"points"
:
2
,
"name"
:
"Maybe"
,
"explanation"
:
""
},
{
"order_num"
:
2
,
"points"
:
4
,
"name"
:
"Yes"
,
"explanation"
:
""
}
]
},
{
"order_num"
:
1
,
"prompt"
:
"Describe the architecture."
,
"options"
:
[
]
}
]
}
apps/openassessment/peer/test/rubric_data/missing_criteria.json
0 → 100644
View file @
dddf9194
{
"prompt"
:
"Create a plan to deliver edx-tim!"
}
apps/openassessment/peer/test/rubric_data/missing_options.json
0 → 100644
View file @
dddf9194
{
"prompt"
:
"Create a plan to deliver edx-tim!"
,
"criteria"
:
[
{
"order_num"
:
0
,
"prompt"
:
"Is the deadline realistic?"
},
{
"order_num"
:
1
,
"prompt"
:
"Describe the architecture."
,
"options"
:
[
{
"order_num"
:
0
,
"points"
:
0
,
"name"
:
"Crazy"
,
"explanation"
:
""
},
{
"order_num"
:
1
,
"points"
:
1
,
"name"
:
"Plausible"
,
"explanation"
:
""
},
{
"order_num"
:
2
,
"points"
:
2
,
"name"
:
"Solid"
,
"explanation"
:
""
}
]
}
]
}
apps/openassessment/peer/test/rubric_data/no_points.json
0 → 100644
View file @
dddf9194
{
"prompt"
:
"Create a plan to deliver edx-tim!"
,
"criteria"
:
[
{
"order_num"
:
0
,
"prompt"
:
"Is the deadline realistic?"
,
"options"
:
[
{
"order_num"
:
0
,
"points"
:
0
,
"name"
:
"No"
,
"explanation"
:
""
},
{
"order_num"
:
1
,
"points"
:
0
,
"name"
:
"Maybe"
,
"explanation"
:
""
},
{
"order_num"
:
2
,
"points"
:
0
,
"name"
:
"Yes"
,
"explanation"
:
""
}
]
},
{
"order_num"
:
1
,
"prompt"
:
"Describe the architecture."
,
"options"
:
[
{
"order_num"
:
0
,
"points"
:
0
,
"name"
:
"Crazy"
,
"explanation"
:
""
},
{
"order_num"
:
1
,
"points"
:
0
,
"name"
:
"Plausible"
,
"explanation"
:
""
},
{
"order_num"
:
2
,
"points"
:
0
,
"name"
:
"Solid"
,
"explanation"
:
""
}
]
}
]
}
apps/openassessment/peer/test/test_api.py
View file @
dddf9194
...
@@ -8,7 +8,7 @@ from nose.tools import raises
...
@@ -8,7 +8,7 @@ from nose.tools import raises
from
mock
import
patch
from
mock
import
patch
from
openassessment.peer
import
api
from
openassessment.peer
import
api
from
openassessment.peer.models
import
PeerEvaluation
from
openassessment.peer.models
import
Assessment
from
submissions
import
api
as
sub_api
from
submissions
import
api
as
sub_api
from
submissions.models
import
Submission
from
submissions.models
import
Submission
from
submissions.tests.test_api
import
STUDENT_ITEM
,
ANSWER_ONE
from
submissions.tests.test_api
import
STUDENT_ITEM
,
ANSWER_ONE
...
@@ -168,7 +168,7 @@ class TestApi(TestCase):
...
@@ -168,7 +168,7 @@ class TestApi(TestCase):
MONDAY
MONDAY
)
)
@patch.object
(
PeerEvaluation
.
objects
,
'filter'
)
@patch.object
(
Assessment
.
objects
,
'filter'
)
@raises
(
sub_api
.
SubmissionInternalError
)
@raises
(
sub_api
.
SubmissionInternalError
)
def
test_error_on_get_evaluation
(
self
,
mock_filter
):
def
test_error_on_get_evaluation
(
self
,
mock_filter
):
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
...
@@ -205,4 +205,4 @@ class TestApi(TestCase):
...
@@ -205,4 +205,4 @@ class TestApi(TestCase):
self
.
assertIsNotNone
(
evaluation
)
self
.
assertIsNotNone
(
evaluation
)
self
.
assertEqual
(
evaluation
[
"points_earned"
],
sum
(
points_earned
))
self
.
assertEqual
(
evaluation
[
"points_earned"
],
sum
(
points_earned
))
self
.
assertEqual
(
evaluation
[
"points_possible"
],
points_possible
)
self
.
assertEqual
(
evaluation
[
"points_possible"
],
points_possible
)
self
.
assertEqual
(
evaluation
[
"feedback"
],
feedback
)
self
.
assertEqual
(
evaluation
[
"feedback"
],
feedback
)
\ No newline at end of file
apps/openassessment/peer/test/test_serializers.py
View file @
dddf9194
...
@@ -5,32 +5,85 @@ from ddt import ddt, file_data
...
@@ -5,32 +5,85 @@ from ddt import ddt, file_data
from
django.test
import
TestCase
from
django.test
import
TestCase
from
openassessment.peer.models
import
Criterion
,
CriterionOption
,
Rubric
from
openassessment.peer.models
import
Criterion
,
CriterionOption
,
Rubric
from
openassessment.peer.serializers
import
rubric_id_for
from
openassessment.peer.serializers
import
(
InvalidRubric
,
RubricSerializer
,
rubric_from_dict
)
def
json_data
(
filename
):
def
json_data
(
filename
):
curr_dir
=
os
.
path
.
dirname
(
__file__
)
curr_dir
=
os
.
path
.
dirname
(
__file__
)
with
open
(
os
.
path
.
join
(
curr_dir
,
filename
),
"rb"
)
as
json_file
:
with
open
(
os
.
path
.
join
(
curr_dir
,
filename
),
"rb"
)
as
json_file
:
return
json
.
load
(
json_file
)
return
json
.
load
(
json_file
)
class
TestPeerSerializers
(
TestCase
):
def
test_repeat_data
(
self
):
class
TestRubricDeserialization
(
TestCase
):
def
test_rubric_only_created_once
(
self
):
# Make sure sending the same Rubric data twice only creates one Rubric,
# and returns a reference to it the next time.
rubric_data
=
json_data
(
'rubric_data/project_plan_rubric.json'
)
rubric_data
=
json_data
(
'rubric_data/project_plan_rubric.json'
)
rubric_id1
=
rubric_id_for
(
rubric_data
)
r1
=
rubric_from_dict
(
rubric_data
)
rubric_id2
=
rubric_id_for
(
rubric_data
)
self
.
assertEqual
(
rubric_id1
,
rubric_id2
)
with
self
.
assertNumQueries
(
1
):
# Just the select -- shouldn't need the create queries
r2
=
rubric_from_dict
(
rubric_data
)
Rubric
.
objects
.
get
(
id
=
rubric_id1
)
.
delete
()
self
.
assertEqual
(
r1
.
id
,
r2
.
id
)
r1
.
delete
()
def
test_db_access
(
self
):
def
test_rubric_requires_positive_score
(
self
):
rubric_data
=
json_data
(
'rubric_data/project_plan_rubric.json'
)
with
self
.
assertRaises
(
InvalidRubric
):
rubric_from_dict
(
json_data
(
'rubric_data/no_points.json'
))
with
self
.
assertNumQueries
(
4
):
def
test_dont_accept_content_hash
(
self
):
rubric_id1
=
rubric_id_for
(
rubric_data
)
# we should always calculate this ourselves and not trust the user.
pass
with
self
.
assertNumQueries
(
1
):
rubric_id2
=
rubric_id_for
(
rubric_data
)
Rubric
.
objects
.
get
(
id
=
rubric_id1
)
.
delete
()
\ No newline at end of file
class
TestCriterionDeserialization
(
TestCase
):
def
test_empty_criteria
(
self
):
with
self
.
assertRaises
(
InvalidRubric
)
as
cm
:
rubric_from_dict
(
json_data
(
'rubric_data/empty_criteria.json'
))
self
.
assertEqual
(
cm
.
exception
.
errors
,
{
'criteria'
:
[
u'Must have at least one criterion'
]}
)
def
test_missing_criteria
(
self
):
with
self
.
assertRaises
(
InvalidRubric
)
as
cm
:
rubric_from_dict
(
json_data
(
'rubric_data/missing_criteria.json'
))
self
.
assertEqual
(
cm
.
exception
.
errors
,
{
'criteria'
:
[
u'This field is required.'
]}
)
class
TestCriterionOptionDeserialization
(
TestCase
):
def
test_empty_options
(
self
):
with
self
.
assertRaises
(
InvalidRubric
)
as
cm
:
rubric_from_dict
(
json_data
(
'rubric_data/empty_options.json'
))
self
.
assertEqual
(
cm
.
exception
.
errors
,
{
'criteria'
:
[
{},
# There are no errors in the first criterion
{
'options'
:
[
u'Criterion must have at least one option.'
]}
]
}
)
def
test_missing_options
(
self
):
with
self
.
assertRaises
(
InvalidRubric
)
as
cm
:
rubric_from_dict
(
json_data
(
'rubric_data/missing_options.json'
))
self
.
assertEqual
(
cm
.
exception
.
errors
,
{
'criteria'
:
[
{
'options'
:
[
u'This field is required.'
]},
{}
# No errors in second criterion
]
}
)
doc/source/api.rst
View file @
dddf9194
...
@@ -48,3 +48,6 @@ Models
...
@@ -48,3 +48,6 @@ Models
++++++
++++++
.. automodule:: submissions.models
.. automodule:: submissions.models
:members:
:members:
.. automodule:: openassessment.peer.models
:members:
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment