Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
7213266f
Commit
7213266f
authored
Feb 10, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #31 from edx/sanchez/xblock_peer_eval
Sanchez/xblock peer eval
parents
d10282c9
8e20f9e4
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
608 additions
and
175 deletions
+608
-175
apps/openassessment/peer/api.py
+123
-12
apps/openassessment/peer/templates/evaluations.html
+2
-0
apps/openassessment/peer/test/test_api.py
+85
-17
apps/openassessment/xblock/openassessmentblock.py
+143
-95
apps/openassessment/xblock/static/html/oa_rubric.html
+7
-5
apps/openassessment/xblock/static/js/src/oa_assessment.js
+10
-1
apps/openassessment/xblock/test/test_openassessment.py
+45
-5
apps/submissions/api.py
+118
-8
apps/submissions/tests/test_api.py
+58
-28
manage.py
+1
-1
requirements/base.txt
+0
-1
requirements/dev.txt
+1
-0
settings/base.py
+0
-2
settings/dev.py
+15
-0
No files found.
apps/openassessment/peer/api.py
View file @
7213266f
...
...
@@ -8,11 +8,13 @@ import copy
import
logging
from
django.db
import
DatabaseError
import
math
from
openassessment.peer.models
import
PeerEvaluation
from
openassessment.peer.serializers
import
PeerEvaluationSerializer
from
submissions.models
import
Submission
,
StudentItem
from
submissions.serializers
import
SubmissionSerializer
from
submissions
import
api
as
submission_api
from
submissions.models
import
Submission
,
StudentItem
,
Score
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
logger
=
logging
.
getLogger
(
__name__
)
...
...
@@ -62,8 +64,13 @@ class PeerEvaluationInternalError(PeerEvaluationError):
pass
def
create_evaluation
(
submission_uuid
,
scorer_id
,
assessment_dict
,
scored_at
=
None
):
def
create_evaluation
(
submission_uuid
,
scorer_id
,
required_evaluations_for_student
,
required_evaluations_for_submission
,
assessment_dict
,
scored_at
=
None
):
"""Creates an evaluation on the given submission.
Evaluations are created based on feedback associated with a particular
...
...
@@ -75,6 +82,10 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict,
Submission model.
scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission.
required_evaluations_for_student (int): The number of evaluations
required for the student to receive a score for their submission.
required_evaluations_for_submission (int): The number of evaluations
required on the submission for it to be scored.
assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback.
scored_at (datetime): Optional argument to override the time in which
...
...
@@ -126,6 +137,35 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict,
if
not
peer_serializer
.
is_valid
():
raise
PeerEvaluationRequestError
(
peer_serializer
.
errors
)
peer_serializer
.
save
()
# Check if the submission is finished and its Author has graded enough.
student_item
=
submission
.
student_item
_check_if_finished_and_create_score
(
student_item
,
submission
,
required_evaluations_for_student
,
required_evaluations_for_submission
)
# Check if the grader is finished and has enough evaluations
scorer_item
=
StudentItem
.
objects
.
get
(
student_id
=
scorer_id
,
item_id
=
student_item
.
item_id
,
course_id
=
student_item
.
course_id
,
item_type
=
student_item
.
item_type
)
scorer_submissions
=
Submission
.
objects
.
filter
(
student_item
=
scorer_item
)
.
order_by
(
"-attempt_number"
)
_check_if_finished_and_create_score
(
scorer_item
,
scorer_submissions
[
0
],
required_evaluations_for_student
,
required_evaluations_for_submission
)
return
peer_serializer
.
data
except
DatabaseError
:
error_message
=
u"An error occurred while creating evaluation {} for submission: {} by: {}"
.
format
(
...
...
@@ -137,6 +177,57 @@ def create_evaluation(submission_uuid, scorer_id, assessment_dict,
raise
PeerEvaluationInternalError
(
error_message
)
def
_check_if_finished_and_create_score
(
student_item
,
submission
,
required_evaluations_for_student
,
required_evaluations_for_submission
):
"""Basic function for checking if a student is finished with peer workflow.
Checks if the student is finished with the peer evaluation workflow. If the
student already has a final grade calculated, there is no need to proceed.
If they do not have a grade, the student has a final grade calculated.
"""
if
Score
.
objects
.
filter
(
student_item
=
student_item
):
return
finished_evaluating
=
has_finished_required_evaluating
(
student_item
.
student_id
,
required_evaluations_for_student
)
evaluations
=
PeerEvaluation
.
objects
.
filter
(
submission
=
submission
)
submission_finished
=
evaluations
.
count
()
>=
required_evaluations_for_submission
scores
=
[]
for
evaluation
in
evaluations
:
scores
.
append
(
evaluation
.
points_earned
)
if
finished_evaluating
and
submission_finished
:
submission_api
.
set_score
(
StudentItemSerializer
(
student_item
)
.
data
,
SubmissionSerializer
(
submission
)
.
data
,
_calculate_final_score
(
scores
),
evaluations
[
0
]
.
points_possible
)
def
_calculate_final_score
(
scores
):
"""Final grade is calculated using integer values, rounding up.
If there is a true median score, it is returned. If there are two median
values, the average of those two values is returned, rounded up to the
greatest integer value.
"""
total_scores
=
len
(
scores
)
scores
=
sorted
(
scores
)
median
=
int
(
math
.
ceil
(
total_scores
/
float
(
2
)))
if
total_scores
==
0
:
return
0
elif
total_scores
%
2
:
return
scores
[
median
-
1
]
else
:
return
int
(
math
.
ceil
(
sum
(
scores
[
median
-
1
:
median
+
1
])
/
float
(
2
)))
def
has_finished_required_evaluating
(
student_id
,
required_evaluations
):
"""Check if a student still needs to evaluate more submissions
...
...
@@ -162,7 +253,7 @@ def has_finished_required_evaluating(student_id, required_evaluations):
while evaluating this workflow rule.
Examples:
>>> has_finished_required_evaluating("Tim")
>>> has_finished_required_evaluating("Tim"
, 3
)
True
"""
...
...
@@ -228,7 +319,7 @@ def get_evaluations(submission_id):
raise
PeerEvaluationInternalError
(
error_message
)
def
get_submission_to_evaluate
(
student_item_dict
):
def
get_submission_to_evaluate
(
student_item_dict
,
required_num_evaluations
):
"""Get a submission to peer evaluate.
Retrieves a submission for evaluation for the given student_item. This will
...
...
@@ -243,6 +334,8 @@ def get_submission_to_evaluate(student_item_dict):
item_id, course_id, and item_type, used to identify the unique
question for the review, while the student_id is used to explicitly
avoid giving the student their own submission.
required_num_evaluations (int): The number of evaluations a submission
requires before it has completed the peer evaluation process.
Returns:
dict: A peer submission for evaluation. This contains a 'student_item',
...
...
@@ -262,7 +355,7 @@ def get_submission_to_evaluate(student_item_dict):
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> get_submission_to_evaluate(student_item_dict)
>>> get_submission_to_evaluate(student_item_dict
, 3
)
{
'student_item': 2,
'attempt_number': 1,
...
...
@@ -279,12 +372,29 @@ def get_submission_to_evaluate(student_item_dict):
item_id
=
student_item_dict
[
"item_id"
],
)
.
exclude
(
student_id
=
student_item_dict
[
"student_id"
])
# TODO: We need a priority queue.
submission
=
Submission
.
objects
.
filter
(
student_item__in
=
student_items
)
.
order_by
(
"submitted_at"
,
"-attempt_number"
)[:
1
]
submission
=
_get_first_submission_not_evaluated
(
student_items
,
student_item_dict
[
"student_id"
],
required_num_evaluations
)
if
not
submission
:
raise
PeerEvaluationWorkflowError
(
"There are no submissions available for evaluation."
)
return
SubmissionSerializer
(
submission
[
0
])
.
data
return
SubmissionSerializer
(
submission
)
.
data
def
_get_first_submission_not_evaluated
(
student_items
,
student_id
,
required_num_evaluations
):
# TODO: We need a priority queue.
submissions
=
Submission
.
objects
.
filter
(
student_item__in
=
student_items
)
.
order_by
(
"submitted_at"
,
"-attempt_number"
)
for
submission
in
submissions
:
evaluations
=
PeerEvaluation
.
objects
.
filter
(
submission
=
submission
)
if
evaluations
.
count
()
<
required_num_evaluations
:
already_evaluated
=
False
for
evaluation
in
evaluations
:
already_evaluated
=
already_evaluated
or
evaluation
.
scorer_id
==
student_id
if
not
already_evaluated
:
return
submission
\ No newline at end of file
apps/openassessment/peer/templates/evaluations.html
View file @
7213266f
...
...
@@ -6,6 +6,7 @@
<th>
Submission UUID
</th>
<th>
Points Earned
</th>
<th>
Points Possible
</th>
<th>
Scored By
</th>
<th>
Scored At
</th>
<th>
Score Type
</th>
<th>
Feedback
</th>
...
...
@@ -15,6 +16,7 @@
<td>
{{ evaluation.points_earned }}
</td>
<td>
{{ evaluation.points_possible }}
</td>
<td>
{{ evaluation.scorer_id }}
</td>
<td>
{{ evaluation.scored_at }}
</td>
<td>
{{ evaluation.score_type }}
</td>
<td>
{{ evaluation.feedback }}
</td>
</tr>
...
...
apps/openassessment/peer/test/test_api.py
View file @
7213266f
...
...
@@ -9,7 +9,7 @@ from mock import patch
from
openassessment.peer
import
api
from
openassessment.peer.models
import
PeerEvaluation
from
submissions
.api
import
create_submission
from
submissions
import
api
as
sub_api
from
submissions.models
import
Submission
from
submissions.tests.test_api
import
STUDENT_ITEM
,
ANSWER_ONE
...
...
@@ -19,6 +19,9 @@ ASSESSMENT_DICT = dict(
feedback
=
"Your submission was thrilling."
,
)
REQUIRED_GRADED
=
5
REQUIRED_GRADED_BY
=
3
MONDAY
=
datetime
.
datetime
(
2007
,
9
,
12
,
0
,
0
,
0
,
0
,
pytz
.
UTC
)
TUESDAY
=
datetime
.
datetime
(
2007
,
9
,
13
,
0
,
0
,
0
,
0
,
pytz
.
UTC
)
WEDNESDAY
=
datetime
.
datetime
(
2007
,
9
,
15
,
0
,
0
,
0
,
0
,
pytz
.
UTC
)
...
...
@@ -28,20 +31,24 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
@ddt
class
TestApi
(
TestCase
):
def
test_create_evaluation
(
self
):
submission
=
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
evaluation
=
api
.
create_evaluation
(
submission
[
"uuid"
],
STUDENT_ITEM
[
"student_id"
],
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
self
.
_assert_evaluation
(
evaluation
,
**
ASSESSMENT_DICT
)
@file_data
(
'test_valid_evaluations.json'
)
def
test_get_evaluations
(
self
,
assessment_dict
):
submission
=
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
api
.
create_evaluation
(
submission
[
"uuid"
],
STUDENT_ITEM
[
"student_id"
],
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
assessment_dict
)
evaluations
=
api
.
get_evaluations
(
submission
[
"uuid"
])
...
...
@@ -50,10 +57,12 @@ class TestApi(TestCase):
@file_data
(
'test_valid_evaluations.json'
)
def
test_get_evaluations_with_date
(
self
,
assessment_dict
):
submission
=
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
api
.
create_evaluation
(
submission
[
"uuid"
],
STUDENT_ITEM
[
"student_id"
],
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
assessment_dict
,
MONDAY
)
...
...
@@ -62,17 +71,61 @@ class TestApi(TestCase):
self
.
_assert_evaluation
(
evaluations
[
0
],
**
assessment_dict
)
self
.
assertEqual
(
evaluations
[
0
][
"scored_at"
],
MONDAY
)
def
test_student_finished_evaluating
(
self
):
def
test_peer_evaluation_workflow
(
self
):
tim
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
)
bob
=
self
.
_create_student_and_submission
(
"Bob"
,
"Bob's answer"
)
sally
=
self
.
_create_student_and_submission
(
"Sally"
,
"Sally's answer"
)
jim
=
self
.
_create_student_and_submission
(
"Jim"
,
"Jim's answer"
)
buffy
=
self
.
_create_student_and_submission
(
"Buffy"
,
"Buffy's answer"
)
xander
=
self
.
_create_student_and_submission
(
"Xander"
,
"Xander's answer"
)
# Tim should not have a score, because he has not evaluated enough
# peer submissions.
scores
=
sub_api
.
get_score
(
STUDENT_ITEM
)
self
.
assertFalse
(
scores
)
self
.
assertFalse
(
api
.
has_finished_required_evaluating
(
"Tim"
,
REQUIRED_GRADED
))
api
.
create_evaluation
(
bob
[
"uuid"
],
"Tim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
api
.
create_evaluation
(
sally
[
"uuid"
],
"Tim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
self
.
assertFalse
(
api
.
has_finished_required_evaluating
(
"Tim"
,
REQUIRED_GRADED
))
api
.
create_evaluation
(
jim
[
"uuid"
],
"Tim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
self
.
assertFalse
(
api
.
has_finished_required_evaluating
(
"Tim"
,
REQUIRED_GRADED
))
api
.
create_evaluation
(
buffy
[
"uuid"
],
"Tim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
self
.
assertFalse
(
api
.
has_finished_required_evaluating
(
"Tim"
,
REQUIRED_GRADED
))
api
.
create_evaluation
(
xander
[
"uuid"
],
"Tim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
self
.
assertTrue
(
api
.
has_finished_required_evaluating
(
"Tim"
,
REQUIRED_GRADED
))
# Tim should not have a score, because his submission does not have
# enough evaluations.
scores
=
sub_api
.
get_score
(
STUDENT_ITEM
)
self
.
assertFalse
(
scores
)
api
.
create_evaluation
(
tim
[
"uuid"
],
"Bob"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
api
.
create_evaluation
(
tim
[
"uuid"
],
"Sally"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
api
.
create_evaluation
(
tim
[
"uuid"
],
"Jim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
# Tim has met the critera, and should now have a score.
scores
=
sub_api
.
get_score
(
STUDENT_ITEM
)
self
.
assertTrue
(
scores
)
self
.
assertEqual
(
6
,
scores
[
0
][
"points_earned"
])
self
.
assertEqual
(
12
,
scores
[
0
][
"points_possible"
])
self
.
assertFalse
(
api
.
has_finished_required_evaluating
(
"Tim"
,
3
))
api
.
create_evaluation
(
bob
[
"uuid"
],
"Tim"
,
ASSESSMENT_DICT
)
api
.
create_evaluation
(
sally
[
"uuid"
],
"Tim"
,
ASSESSMENT_DICT
)
self
.
assertFalse
(
api
.
has_finished_required_evaluating
(
"Tim"
,
3
))
api
.
create_evaluation
(
jim
[
"uuid"
],
"Tim"
,
ASSESSMENT_DICT
)
self
.
assertTrue
(
api
.
has_finished_required_evaluating
(
"Tim"
,
3
))
@raises
(
api
.
PeerEvaluationRequestError
)
def
test_bad_configuration
(
self
):
...
...
@@ -86,7 +139,7 @@ class TestApi(TestCase):
)
self
.
_create_student_and_submission
(
"Jim"
,
"Jim's answer"
,
THURSDAY
)
submission
=
api
.
get_submission_to_evaluate
(
STUDENT_ITEM
)
submission
=
api
.
get_submission_to_evaluate
(
STUDENT_ITEM
,
3
)
self
.
assertIsNotNone
(
submission
)
self
.
assertEqual
(
submission
[
"answer"
],
u"Bob's answer"
)
self
.
assertEqual
(
submission
[
"student_item"
],
2
)
...
...
@@ -95,7 +148,7 @@ class TestApi(TestCase):
@raises
(
api
.
PeerEvaluationWorkflowError
)
def
test_no_submissions_to_evaluate_for_tim
(
self
):
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
,
MONDAY
)
api
.
get_submission_to_evaluate
(
STUDENT_ITEM
)
api
.
get_submission_to_evaluate
(
STUDENT_ITEM
,
3
)
"""
Some Error Checking Tests against DB failures.
...
...
@@ -105,32 +158,47 @@ class TestApi(TestCase):
@raises
(
api
.
PeerEvaluationInternalError
)
def
test_error_on_evaluation_creation
(
self
,
mock_filter
):
mock_filter
.
side_effect
=
DatabaseError
(
"Bad things happened"
)
submission
=
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
api
.
create_evaluation
(
submission
[
"uuid"
],
STUDENT_ITEM
[
"student_id"
],
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
,
MONDAY
)
@patch.object
(
PeerEvaluation
.
objects
,
'filter'
)
@raises
(
api
.
PeerEvaluat
ionInternalError
)
@raises
(
sub_api
.
Submiss
ionInternalError
)
def
test_error_on_get_evaluation
(
self
,
mock_filter
):
submission
=
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
api
.
create_evaluation
(
submission
[
"uuid"
],
STUDENT_ITEM
[
"student_id"
],
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
,
MONDAY
)
mock_filter
.
side_effect
=
DatabaseError
(
"Bad things happened"
)
api
.
get_evaluations
(
submission
[
"uuid"
])
def
test_choose_score
(
self
):
self
.
assertEqual
(
0
,
api
.
_calculate_final_score
([]))
self
.
assertEqual
(
5
,
api
.
_calculate_final_score
([
5
]))
# average of 5, 6, rounded down.
self
.
assertEqual
(
6
,
api
.
_calculate_final_score
([
5
,
6
]))
self
.
assertEqual
(
14
,
api
.
_calculate_final_score
([
5
,
6
,
12
,
16
,
22
,
53
]))
self
.
assertEqual
(
14
,
api
.
_calculate_final_score
([
6
,
5
,
12
,
53
,
16
,
22
]))
self
.
assertEqual
(
16
,
api
.
_calculate_final_score
([
5
,
6
,
12
,
16
,
22
,
53
,
102
]))
self
.
assertEqual
(
16
,
api
.
_calculate_final_score
([
16
,
6
,
12
,
102
,
22
,
53
,
5
]))
@staticmethod
def
_create_student_and_submission
(
student
,
answer
,
date
=
None
):
new_student_item
=
STUDENT_ITEM
.
copy
()
new_student_item
[
"student_id"
]
=
student
return
create_submission
(
new_student_item
,
answer
,
date
)
return
sub_api
.
create_submission
(
new_student_item
,
answer
,
date
)
def
_assert_evaluation
(
self
,
evaluation
,
points_earned
,
points_possible
,
feedback
):
...
...
apps/openassessment/xblock/openassessmentblock.py
View file @
7213266f
...
...
@@ -3,8 +3,10 @@
import
pkg_resources
from
mako.template
import
Template
from
openassessment.peer.api
import
PeerEvaluationWorkflowError
from
submissions
import
api
from
openassessment.peer
import
api
as
peer_api
from
xblock.core
import
XBlock
from
xblock.fields
import
List
,
Scope
,
String
...
...
@@ -13,6 +15,99 @@ from xblock.fragment import Fragment
mako_default_filters
=
[
'unicode'
,
'h'
,
'trim'
]
EXAMPLE_POVERTY_RUBRIC
=
(
"OpenAssessmentBlock Poverty Rubric"
,
"""
<vertical_demo>
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">Neal Stephenson (late)</option>
<option val="1">HP Lovecraft</option>
<option val="3">Robert Heinlein</option>
<option val="4">Neal Stephenson (early)</option>
<option val="5">Earnest Hemingway</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">Yogi Berra</option>
<option val="1">Hunter S. Thompson</option>
<option val="2">Robert Heinlein</option>
<option val="3">Isaac Asimov</option>
<option val="10">Spock</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">lolcats</option>
<option val="1">Facebook</option>
<option val="2">Reddit</option>
<option val="3">metafilter</option>
<option val="4">Usenet, 1996</option>
<option val="5">The Elements of Style</option>
</criterion>
</rubric>
<evals>
<peereval start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<selfeval/>
</evals>
</openassessment>
</vertical_demo>
"""
)
EXAMPLE_CENSORSHIP_RUBRIC
=
(
"OpenAssessmentBlock Censorship Rubric"
,
"""
<vertical_demo>
<openassessment start="2013-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<prompt>
What do you think about censorship in libraries? I think it's pretty great.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">The Bible</option>
<option val="1">Earnest Hemingway</option>
<option val="3">Matsuo Basho</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">Eric</option>
<option val="1">John</option>
<option val="2">Ian</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">IRC</option>
<option val="1">Real Email</option>
<option val="2">Old-timey letters</option>
</criterion>
</rubric>
<evals>
<selfeval/>
<peereval start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</evals>
</openassessment>
</vertical_demo>
"""
)
class
OpenAssessmentBlock
(
XBlock
):
"""Displays a question and gives an area where students can compose a response."""
...
...
@@ -47,7 +142,7 @@ class OpenAssessmentBlock(XBlock):
def
_get_student_item_dict
(
self
):
"""Create a student_item_dict from our surrounding context.
See also: submissions.api for details.
"""
item_id
,
student_id
=
self
.
_get_xblock_trace
()
...
...
@@ -69,18 +164,28 @@ class OpenAssessmentBlock(XBlock):
trace
=
self
.
_get_xblock_trace
()
student_item_dict
=
self
.
_get_student_item_dict
()
previous_submissions
=
api
.
get_submissions
(
student_item_dict
)
if
previous_submissions
:
# XXX: until workflow better, move on w/ prev submit
try
:
# HACK: Replace with proper workflow.
peer_eval
=
self
.
_hack_get_peer_eval
()
peer_submission
=
peer_api
.
get_submission_to_evaluate
(
student_item_dict
,
peer_eval
[
"must_be_graded_by"
])
except
PeerEvaluationWorkflowError
:
peer_submission
=
False
if
previous_submissions
and
peer_submission
:
# XXX: until workflow better, move on w/ prev submit
html
=
Template
(
load
(
"static/html/oa_rubric.html"
),
default_filters
=
mako_default_filters
,
input_encoding
=
'utf-8'
,
)
frag
=
Fragment
(
html
.
render_unicode
(
xblock_trace
=
trace
,
frag
=
Fragment
(
html
.
render_unicode
(
xblock_trace
=
trace
,
peer_submission
=
peer_submission
,
rubric_instructions
=
self
.
rubric_instructions
,
rubric_criteria
=
self
.
rubric_criteria
,
))
frag
.
add_css
(
load
(
"static/css/openassessment.css"
))
frag
.
add_javascript
(
load
(
"static/js/src/oa_assessment.js"
))
frag
.
initialize_js
(
'OpenAssessmentBlock'
)
elif
previous_submissions
:
return
Fragment
(
u"<div>There are no submissions to review.</div>"
)
else
:
# XXX: until workflow better, submit until submitted
html
=
Template
(
load
(
"static/html/oa_submission.html"
),
default_filters
=
mako_default_filters
,
...
...
@@ -92,10 +197,42 @@ class OpenAssessmentBlock(XBlock):
frag
.
initialize_js
(
'OpenAssessmentBlock'
)
return
frag
def
_hack_get_peer_eval
(
self
):
# HACK: Forcing Peer Eval, we'll get the Eval config.
for
next_eval
in
self
.
rubric_evals
:
if
next_eval
[
"type"
]
==
"peereval"
:
return
next_eval
@XBlock.json_handler
def
assess
(
self
,
data
,
suffix
=
''
):
# HACK: Replace with proper workflow.
peer_eval
=
self
.
_hack_get_peer_eval
()
"""Place an assessment into Openassessment system"""
return
(
False
,
"Assessment handler is not implemented yet."
)
# TODO: We're not doing points possible in a good way, need to refactor
# the rubric criteria type, Joe has thoughts on this.
student_item_dict
=
self
.
_get_student_item_dict
()
points_possible
=
sum
(
max
(
int
(
val
)
for
val
in
criteria
if
val
.
isdigit
())
for
criteria
in
self
.
rubric_criteria
)
assessment_dict
=
{
"points_earned"
:
map
(
int
,
data
[
"points_earned"
]),
"points_possible"
:
points_possible
,
"feedback"
:
"Not yet implemented."
,
}
evaluation
=
peer_api
.
create_evaluation
(
data
[
"submission_uuid"
],
student_item_dict
[
"student_id"
],
int
(
peer_eval
[
"must_grade"
]),
int
(
peer_eval
[
"must_be_graded_by"
]),
assessment_dict
)
# Temp kludge until we fix JSON serialization for datetime
evaluation
[
"scored_at"
]
=
str
(
evaluation
[
"scored_at"
])
return
evaluation
,
"Success"
@XBlock.json_handler
def
submit
(
self
,
data
,
suffix
=
''
):
...
...
@@ -156,98 +293,9 @@ class OpenAssessmentBlock(XBlock):
block
.
runtime
.
add_node_as_child
(
block
,
child
,
id_generator
)
return
block
# Arbitrary attributes can be defined on the
# Arbitrary attributes can be defined on the
@staticmethod
def
workbench_scenarios
():
"""A canned scenario for display in the workbench."""
return
[
(
"OpenAssessmentBlock Poverty Rubric"
,
"""
<vertical_demo>
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">Neal Stephenson (late)</option>
<option val="1">HP Lovecraft</option>
<option val="3">Robert Heinlein</option>
<option val="4">Neal Stephenson (early)</option>
<option val="5">Earnest Hemingway</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">The Unabomber</option>
<option val="1">Hunter S. Thompson</option>
<option val="2">Robert Heinlein</option>
<option val="3">Isaac Asimov</option>
<option val="55">Spock</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">lolcats</option>
<option val="1">Facebook</option>
<option val="2">Reddit</option>
<option val="3">metafilter</option>
<option val="4">Usenet, 1996</option>
<option val="99">The Elements of Style</option>
</criterion>
</rubric>
<evals>
<peereval start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<selfeval/>
</evals>
</openassessment>
</vertical_demo>
"""
),
(
"OpenAssessmentBlock Censorship Rubric"
,
"""
<vertical_demo>
<openassessment start="2013-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<prompt>
What do you think about censorship in libraries? I think it's pretty great.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">The Bible</option>
<option val="1">Earnest Hemingway</option>
<option val="3">Matsuo Basho</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">Eric</option>
<option val="1">John</option>
<option val="2">Ian</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">IRC</option>
<option val="1">Real Email</option>
<option val="2">Old-timey letters</option>
</criterion>
</rubric>
<evals>
<selfeval/>
<peereval start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</evals>
</openassessment>
</vertical_demo>
"""
),
]
return
[
EXAMPLE_POVERTY_RUBRIC
,
EXAMPLE_CENSORSHIP_RUBRIC
,]
apps/openassessment/xblock/static/html/oa_rubric.html
View file @
7213266f
<!-- START OpenAssessmentBlock HTML -->
<div
class=
"openassessment_block"
id=
"openassessment_block_${xblock_trace[0]}"
>
<div
id=
"peer_submission_uuid"
hidden=
"true"
>
${peer_submission["uuid"]}
</div>
<p>
${peer_submission["answer"]}
</p>
<p
class=
"openassessment_prompt"
id=
"openassessment_rubric_instructions_${xblock_trace[0]}"
>
${rubric_instructions}
</p>
% for criterion in rubric_criteria:
<div>
<p
class=
"openassessment_prompt"
>
${criterion["instructions"]}
</p>
% for value in sorted([k for k in criterion.keys() if k != 'name' and k != 'instructions']):
<input
type=
"radio"
value=
"${value}"
>
${criterion[value]}
</input>
<input
name=
"${criterion['name']}"
type=
"radio"
value=
"${value}"
>
${criterion[value]}
</input>
% endfor
</div>
% endfor
<input
type=
"button"
class=
"openassessment_submit"
id=
"openassessment_submit_${xblock_trace[0]}"
value=
"Submit"
/>
% endfor
<input
type=
"button"
class=
"openassessment_submit"
id=
"openassessment_submit_${xblock_trace[0]}"
value=
"Submit"
/>
</div>
<div
class=
"openassessment_response_status_block"
id=
openassessment_response_status_block_${xblock_trace[0]}"
>
<div
class=
"openassessment_response_status_block"
id=
"
openassessment_response_status_block_${xblock_trace[0]}"
>
This message should be invisible; please upgrade your browser.
</div>
<!-- END OpenAssessmentBlock HTML -->
apps/openassessment/xblock/static/js/src/oa_assessment.js
View file @
7213266f
...
...
@@ -7,6 +7,15 @@ function OpenAssessmentBlock(runtime, element) {
var
click_msg
=
'<p class="clickhere">(click here to dismiss this message)</p>'
;
/* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */
function
prepare_assessment_post
(
element
)
{
selector
=
$
(
"input[type=radio]:checked"
,
element
);
values
=
[];
for
(
i
=
0
;
i
<
selector
.
length
;
i
++
)
{
values
[
i
]
=
selector
[
i
].
value
;
}
return
{
"submission_uuid"
:
$
(
"div#peer_submission_uuid"
)[
0
].
innerText
,
"points_earned"
:
values
};
}
function
displayStatus
(
result
)
{
status
=
result
[
0
]
error_msg
=
result
[
1
]
...
...
@@ -26,7 +35,7 @@ function OpenAssessmentBlock(runtime, element) {
type
:
"POST"
,
url
:
handlerUrl
,
/* data: JSON.stringify({"submission": $('.openassessment_submission', element).val()}), */
data
:
JSON
.
stringify
(
{
"assessment"
:
"I'm not sure how to stringify a form"
}
),
data
:
JSON
.
stringify
(
prepare_assessment_post
(
element
)
),
success
:
displayStatus
});
});
...
...
apps/openassessment/xblock/test/test_openassessment.py
View file @
7213266f
...
...
@@ -12,6 +12,50 @@ from workbench.runtime import WorkbenchRuntime
from
submissions
import
api
from
submissions.api
import
SubmissionRequestError
,
SubmissionInternalError
RUBRIC_CONFIG
=
"""
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">Neal Stephenson (late)</option>
<option val="1">HP Lovecraft</option>
<option val="3">Robert Heinlein</option>
<option val="4">Neal Stephenson (early)</option>
<option val="5">Earnest Hemingway</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">Yogi Berra</option>
<option val="1">Hunter S. Thompson</option>
<option val="2">Robert Heinlein</option>
<option val="3">Isaac Asimov</option>
<option val="10">Spock</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">lolcats</option>
<option val="1">Facebook</option>
<option val="2">Reddit</option>
<option val="3">metafilter</option>
<option val="4">Usenet, 1996</option>
<option val="5">The Elements of Style</option>
</criterion>
</rubric>
<evals>
<peereval start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<selfeval/>
</evals>
</openassessment>
"""
class
TestOpenAssessment
(
TestCase
):
...
...
@@ -22,11 +66,7 @@ class TestOpenAssessment(TestCase):
self
.
runtime
=
WorkbenchRuntime
()
self
.
runtime
.
user_id
=
"Bob"
assessment_id
=
self
.
runtime
.
parse_xml_string
(
"""<openassessment
prompt="This is my prompt. There are many like it, but this one is mine."
course_id="RopesCourse"
/>
"""
,
self
.
runtime
.
id_generator
)
RUBRIC_CONFIG
,
self
.
runtime
.
id_generator
)
self
.
assessment
=
self
.
runtime
.
get_block
(
assessment_id
)
self
.
default_json_submission
=
json
.
dumps
({
"submission"
:
"This is my answer to this test question!"
})
...
...
apps/submissions/api.py
View file @
7213266f
...
...
@@ -8,8 +8,8 @@ import logging
from
django.db
import
DatabaseError
from
django.utils.encoding
import
force_unicode
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
from
submissions.models
import
Submission
,
StudentItem
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
,
ScoreSerializer
from
submissions.models
import
Submission
,
StudentItem
,
Score
logger
=
logging
.
getLogger
(
__name__
)
...
...
@@ -212,20 +212,129 @@ def get_submissions(student_item_dict, limit=None):
if
limit
:
submission_models
=
submission_models
[:
limit
]
return
[
SubmissionSerializer
(
submission
)
.
data
for
submission
in
submission_models
]
return
SubmissionSerializer
(
submission_models
,
many
=
True
)
.
data
def
get_score
(
student_item
):
pass
"""Get the score for a particular student item
Each student item should have a unique score. This function will return the
score if it is available. A score is only calculated for a student item if
it has completed the workflow for a particular assessment module.
Args:
student_item (dict): The dictionary representation of a student item.
Function returns the score related to this student item.
Returns:
score (dict): The score associated with this student item. None if there
is no score found.
Raises:
SubmissionInternalError: Raised if a score cannot be retrieved because
of an internal server error.
Examples:
>>> student_item = {
>>> "student_id":"Tim",
>>> "course_id":"TestCourse",
>>> "item_id":"u_67",
>>> "item_type":"openassessment"
>>> }
>>>
>>> get_score(student_item)
[{
'student_item': 2,
'submission': 2,
'points_earned': 8,
'points_possible': 20,
'created_at': datetime.datetime(2014, 2, 7, 18, 30, 1, 807911, tzinfo=<UTC>)
}]
"""
student_item_model
=
StudentItem
.
objects
.
get
(
**
student_item
)
scores
=
Score
.
objects
.
filter
(
student_item
=
student_item_model
)
return
ScoreSerializer
(
scores
,
many
=
True
)
.
data
def
get_scores
(
course_id
,
student_id
,
types
=
None
):
pass
def
set_score
(
student_item
):
pass
def
set_score
(
student_item
,
submission
,
score
,
points_possible
):
"""Set a score for a particular student item, submission pair.
Sets the score for a particular student item and submission pair. This score
is calculated externally to the API.
Args:
student_item (dict): The student item associated with this score. This
dictionary must contain a course_id, student_id, and item_id.
submission (dict): The submission associated with this score. This
dictionary must contain all submission fields to properly get a
unique submission item.
score (int): The score to associate with the given submission and
student item.
points_possible (int): The total points possible for this particular
student item.
Returns:
(dict): The dictionary representation of the saved score.
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>>
>>> submission_dict = dict(
>>> student_item=2,
>>> attempt_number=1,
>>> submitted_at=datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
>>> created_at=datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
>>> answer=u'The answer is 42.'
>>> )
>>> set_score(student_item_dict, submission_dict, 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
}
"""
try
:
student_item_model
=
StudentItem
.
objects
.
get
(
**
student_item
)
submission_model
=
Submission
.
objects
.
get
(
**
submission
)
except
DatabaseError
:
error_msg
=
u"Could not retrieve student item: {} or submission {}."
.
format
(
student_item
,
submission
)
logger
.
exception
(
error_msg
)
raise
SubmissionRequestError
(
error_msg
)
score
=
ScoreSerializer
(
data
=
{
"student_item"
:
student_item_model
.
pk
,
"submission"
:
submission_model
.
pk
,
"points_earned"
:
score
,
"points_possible"
:
points_possible
,
}
)
if
not
score
.
is_valid
():
logger
.
exception
(
score
.
errors
)
raise
SubmissionInternalError
(
score
.
errors
)
score
.
save
()
return
score
.
data
def
_get_or_create_student_item
(
student_item_dict
):
...
...
@@ -262,7 +371,8 @@ def _get_or_create_student_item(student_item_dict):
try
:
return
StudentItem
.
objects
.
get
(
**
student_item_dict
)
except
StudentItem
.
DoesNotExist
:
student_item_serializer
=
StudentItemSerializer
(
data
=
student_item_dict
)
student_item_serializer
=
StudentItemSerializer
(
data
=
student_item_dict
)
if
not
student_item_serializer
.
is_valid
():
raise
SubmissionRequestError
(
student_item_serializer
.
errors
)
return
student_item_serializer
.
save
()
...
...
apps/submissions/tests/test_api.py
View file @
7213266f
...
...
@@ -7,7 +7,7 @@ from nose.tools import raises
from
mock
import
patch
import
pytz
from
submissions
.api
import
create_submission
,
get_submissions
,
SubmissionRequestError
,
SubmissionInternalError
from
submissions
import
api
as
api
from
submissions.models
import
Submission
from
submissions.serializers
import
StudentItemSerializer
...
...
@@ -31,79 +31,84 @@ ANSWER_TWO = u"this is my other answer!"
@ddt
class
TestApi
(
TestCase
):
"""
Testing Submissions
"""
def
test_create_submission
(
self
):
submission
=
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
submission
=
api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
self
.
_assert_submission
(
submission
,
ANSWER_ONE
,
1
,
1
)
def
test_get_submissions
(
self
):
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
create_submission
(
STUDENT_ITEM
,
ANSWER_TWO
)
submissions
=
get_submissions
(
STUDENT_ITEM
)
api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_TWO
)
submissions
=
api
.
get_submissions
(
STUDENT_ITEM
)
self
.
_assert_submission
(
submissions
[
1
],
ANSWER_ONE
,
1
,
1
)
self
.
_assert_submission
(
submissions
[
0
],
ANSWER_TWO
,
1
,
2
)
def
test_two_students
(
self
):
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
create_submission
(
SECOND_STUDENT_ITEM
,
ANSWER_TWO
)
api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
api
.
create_submission
(
SECOND_STUDENT_ITEM
,
ANSWER_TWO
)
submissions
=
get_submissions
(
STUDENT_ITEM
)
submissions
=
api
.
get_submissions
(
STUDENT_ITEM
)
self
.
assertEqual
(
1
,
len
(
submissions
))
self
.
_assert_submission
(
submissions
[
0
],
ANSWER_ONE
,
1
,
1
)
submissions
=
get_submissions
(
SECOND_STUDENT_ITEM
)
submissions
=
api
.
get_submissions
(
SECOND_STUDENT_ITEM
)
self
.
assertEqual
(
1
,
len
(
submissions
))
self
.
_assert_submission
(
submissions
[
0
],
ANSWER_TWO
,
2
,
1
)
@file_data
(
'test_valid_student_items.json'
)
def
test_various_student_items
(
self
,
valid_student_item
):
create_submission
(
valid_student_item
,
ANSWER_ONE
)
submission
=
get_submissions
(
valid_student_item
)[
0
]
api
.
create_submission
(
valid_student_item
,
ANSWER_ONE
)
submission
=
api
.
get_submissions
(
valid_student_item
)[
0
]
self
.
_assert_submission
(
submission
,
ANSWER_ONE
,
1
,
1
)
def
test_get_latest_submission
(
self
):
past_date
=
datetime
.
datetime
(
2007
,
9
,
12
,
0
,
0
,
0
,
0
,
pytz
.
UTC
)
more_recent_date
=
datetime
.
datetime
(
2007
,
9
,
13
,
0
,
0
,
0
,
0
,
pytz
.
UTC
)
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
,
more_recent_date
)
create_submission
(
STUDENT_ITEM
,
ANSWER_TWO
,
past_date
)
api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
,
more_recent_date
)
api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_TWO
,
past_date
)
# Test a limit on the submissions
submissions
=
get_submissions
(
STUDENT_ITEM
,
1
)
submissions
=
api
.
get_submissions
(
STUDENT_ITEM
,
1
)
self
.
assertEqual
(
1
,
len
(
submissions
))
self
.
assertEqual
(
ANSWER_ONE
,
submissions
[
0
][
"answer"
])
self
.
assertEqual
(
more_recent_date
.
year
,
submissions
[
0
][
"submitted_at"
]
.
year
)
def
test_set_attempt_number
(
self
):
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
,
None
,
2
)
submissions
=
get_submissions
(
STUDENT_ITEM
)
api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
,
None
,
2
)
submissions
=
api
.
get_submissions
(
STUDENT_ITEM
)
self
.
_assert_submission
(
submissions
[
0
],
ANSWER_ONE
,
1
,
2
)
@raises
(
SubmissionRequestError
)
@raises
(
api
.
SubmissionRequestError
)
@file_data
(
'test_bad_student_items.json'
)
def
test_error_checking
(
self
,
bad_student_item
):
create_submission
(
bad_student_item
,
-
100
)
api
.
create_submission
(
bad_student_item
,
-
100
)
@raises
(
SubmissionRequestError
)
@raises
(
api
.
SubmissionRequestError
)
def
test_error_checking_submissions
(
self
):
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
,
None
,
-
1
)
api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
,
None
,
-
1
)
@patch.object
(
Submission
.
objects
,
'filter'
)
@raises
(
SubmissionInternalError
)
@raises
(
api
.
SubmissionInternalError
)
def
test_error_on_submission_creation
(
self
,
mock_filter
):
mock_filter
.
side_effect
=
DatabaseError
(
"Bad things happened"
)
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
@patch.object
(
StudentItemSerializer
,
'save'
)
@raises
(
SubmissionInternalError
)
@raises
(
api
.
SubmissionInternalError
)
def
test_create_student_item_validation
(
self
,
mock_save
):
mock_save
.
side_effect
=
DatabaseError
(
"Bad things happened"
)
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
def
test_unicode_enforcement
(
self
):
create_submission
(
STUDENT_ITEM
,
"Testing unicode answers."
)
submissions
=
get_submissions
(
STUDENT_ITEM
,
1
)
api
.
create_submission
(
STUDENT_ITEM
,
"Testing unicode answers."
)
submissions
=
api
.
get_submissions
(
STUDENT_ITEM
,
1
)
self
.
assertEqual
(
u"Testing unicode answers."
,
submissions
[
0
][
"answer"
])
def
_assert_submission
(
self
,
submission
,
expected_answer
,
expected_item
,
...
...
@@ -111,4 +116,29 @@ class TestApi(TestCase):
self
.
assertIsNotNone
(
submission
)
self
.
assertEqual
(
submission
[
"answer"
],
expected_answer
)
self
.
assertEqual
(
submission
[
"student_item"
],
expected_item
)
self
.
assertEqual
(
submission
[
"attempt_number"
],
expected_attempt
)
\ No newline at end of file
self
.
assertEqual
(
submission
[
"attempt_number"
],
expected_attempt
)
"""
Testing Scores
"""
def
test_create_score
(
self
):
submission
=
api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
self
.
_assert_submission
(
submission
,
ANSWER_ONE
,
1
,
1
)
score
=
api
.
set_score
(
STUDENT_ITEM
,
submission
,
11
,
12
)
self
.
_assert_score
(
score
,
11
,
12
)
def
test_get_score
(
self
):
self
.
test_create_score
()
scores
=
api
.
get_score
(
STUDENT_ITEM
)
self
.
_assert_score
(
scores
[
0
],
11
,
12
)
def
_assert_score
(
self
,
score
,
expected_points_earned
,
expected_points_possible
):
self
.
assertIsNotNone
(
score
)
self
.
assertEqual
(
score
[
"points_earned"
],
expected_points_earned
)
self
.
assertEqual
(
score
[
"points_possible"
],
expected_points_possible
)
\ No newline at end of file
manage.py
View file @
7213266f
...
...
@@ -10,7 +10,7 @@ if __name__ == "__main__":
if
'test'
in
sys
.
argv
or
'harvest'
in
sys
.
argv
:
os
.
environ
.
setdefault
(
"DJANGO_SETTINGS_MODULE"
,
"settings.test"
)
else
:
os
.
environ
.
setdefault
(
"DJANGO_SETTINGS_MODULE"
,
"settings.
base
"
)
os
.
environ
.
setdefault
(
"DJANGO_SETTINGS_MODULE"
,
"settings.
dev
"
)
from
django.core.management
import
execute_from_command_line
...
...
requirements/base.txt
View file @
7213266f
...
...
@@ -8,4 +8,3 @@ django-extensions==1.3.3
djangorestframework==2.3.5
Mako==0.9.1
pytz==2013.9
django-pdb==0.3.2
requirements/dev.txt
View file @
7213266f
...
...
@@ -3,6 +3,7 @@
# Debug tools
bpython==0.12
django-debug-toolbar==0.11.0
django-pdb==0.3.2
sqlparse==0.1.10
# Doc generation
...
...
settings/base.py
View file @
7213266f
...
...
@@ -107,7 +107,6 @@ MIDDLEWARE_CLASSES = (
'django.contrib.messages.middleware.MessageMiddleware'
,
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_pdb.middleware.PdbMiddleware'
,
# Needed to enable shell-on-crash behavior
)
ROOT_URLCONF
=
'urls'
...
...
@@ -131,7 +130,6 @@ INSTALLED_APPS = (
# Third party
'django_extensions'
,
'django_pdb'
,
# Allows post-mortem debugging on exceptions
# XBlock
'workbench'
,
...
...
settings/dev.py
0 → 100644
View file @
7213266f
"""
Dev-specific Django settings.
"""
# Inherit from base settings
from
.base
import
*
MIDDLEWARE_CLASSES
+=
(
'django_pdb.middleware.PdbMiddleware'
,
# Needed to enable shell-on-crash behavior
)
INSTALLED_APPS
+=
(
'django_pdb'
,
# Allows post-mortem debugging on exceptions
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment