Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
807710f2
Commit
807710f2
authored
Feb 10, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Some changes to the API based on review comments
parent
413ce1de
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
78 additions
and
35 deletions
+78
-35
apps/openassessment/peer/api.py
+7
-4
apps/openassessment/peer/test/test_api.py
+56
-29
settings/base.py
+0
-2
settings/dev.py
+15
-0
No files found.
apps/openassessment/peer/api.py
View file @
807710f2
...
@@ -8,6 +8,7 @@ import copy
...
@@ -8,6 +8,7 @@ import copy
import
logging
import
logging
from
django.db
import
DatabaseError
from
django.db
import
DatabaseError
import
math
from
openassessment.peer.models
import
PeerEvaluation
from
openassessment.peer.models
import
PeerEvaluation
from
openassessment.peer.serializers
import
PeerEvaluationSerializer
from
openassessment.peer.serializers
import
PeerEvaluationSerializer
...
@@ -194,7 +195,7 @@ def _check_if_finished_and_create_score(student_item,
...
@@ -194,7 +195,7 @@ def _check_if_finished_and_create_score(student_item,
student_item
.
student_id
,
student_item
.
student_id
,
required_evaluations_for_student
required_evaluations_for_student
)
)
evaluations
=
PeerEvaluation
.
objects
.
filter
(
submission
=
submission
)
.
order_by
(
"-points_earned"
)
evaluations
=
PeerEvaluation
.
objects
.
filter
(
submission
=
submission
)
submission_finished
=
evaluations
.
count
()
>=
required_evaluations_for_submission
submission_finished
=
evaluations
.
count
()
>=
required_evaluations_for_submission
scores
=
[]
scores
=
[]
for
evaluation
in
evaluations
:
for
evaluation
in
evaluations
:
...
@@ -217,12 +218,14 @@ def _calculate_final_score(scores):
...
@@ -217,12 +218,14 @@ def _calculate_final_score(scores):
"""
"""
total_scores
=
len
(
scores
)
total_scores
=
len
(
scores
)
scores
=
sorted
(
scores
)
median
=
int
(
math
.
ceil
(
total_scores
/
float
(
2
)))
if
total_scores
==
0
:
if
total_scores
==
0
:
return
0
return
0
elif
total_scores
%
2
:
elif
total_scores
%
2
:
return
scores
[
total_scores
/
2
]
return
scores
[
median
-
1
]
else
:
else
:
return
(
scores
[
total_scores
/
2
-
1
]
+
scores
[
total_scores
/
2
])
/
2
return
int
(
math
.
ceil
(
sum
(
scores
[
median
-
1
:
median
+
1
])
/
float
(
2
)))
def
has_finished_required_evaluating
(
student_id
,
required_evaluations
):
def
has_finished_required_evaluating
(
student_id
,
required_evaluations
):
...
@@ -352,7 +355,7 @@ def get_submission_to_evaluate(student_item_dict, required_num_evaluations):
...
@@ -352,7 +355,7 @@ def get_submission_to_evaluate(student_item_dict, required_num_evaluations):
>>> item_type="type_one",
>>> item_type="type_one",
>>> student_id="Bob",
>>> student_id="Bob",
>>> )
>>> )
>>> get_submission_to_evaluate(student_item_dict)
>>> get_submission_to_evaluate(student_item_dict
, 3
)
{
{
'student_item': 2,
'student_item': 2,
'attempt_number': 1,
'attempt_number': 1,
...
...
apps/openassessment/peer/test/test_api.py
View file @
807710f2
...
@@ -9,7 +9,7 @@ from mock import patch
...
@@ -9,7 +9,7 @@ from mock import patch
from
openassessment.peer
import
api
from
openassessment.peer
import
api
from
openassessment.peer.models
import
PeerEvaluation
from
openassessment.peer.models
import
PeerEvaluation
from
submissions
.api
import
create_submission
,
SubmissionInternalError
from
submissions
import
api
as
sub_api
from
submissions.models
import
Submission
from
submissions.models
import
Submission
from
submissions.tests.test_api
import
STUDENT_ITEM
,
ANSWER_ONE
from
submissions.tests.test_api
import
STUDENT_ITEM
,
ANSWER_ONE
...
@@ -31,7 +31,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
...
@@ -31,7 +31,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
@ddt
@ddt
class
TestApi
(
TestCase
):
class
TestApi
(
TestCase
):
def
test_create_evaluation
(
self
):
def
test_create_evaluation
(
self
):
submission
=
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
evaluation
=
api
.
create_evaluation
(
evaluation
=
api
.
create_evaluation
(
submission
[
"uuid"
],
submission
[
"uuid"
],
STUDENT_ITEM
[
"student_id"
],
STUDENT_ITEM
[
"student_id"
],
...
@@ -43,7 +43,7 @@ class TestApi(TestCase):
...
@@ -43,7 +43,7 @@ class TestApi(TestCase):
@file_data
(
'test_valid_evaluations.json'
)
@file_data
(
'test_valid_evaluations.json'
)
def
test_get_evaluations
(
self
,
assessment_dict
):
def
test_get_evaluations
(
self
,
assessment_dict
):
submission
=
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
api
.
create_evaluation
(
api
.
create_evaluation
(
submission
[
"uuid"
],
submission
[
"uuid"
],
STUDENT_ITEM
[
"student_id"
],
STUDENT_ITEM
[
"student_id"
],
...
@@ -57,7 +57,7 @@ class TestApi(TestCase):
...
@@ -57,7 +57,7 @@ class TestApi(TestCase):
@file_data
(
'test_valid_evaluations.json'
)
@file_data
(
'test_valid_evaluations.json'
)
def
test_get_evaluations_with_date
(
self
,
assessment_dict
):
def
test_get_evaluations_with_date
(
self
,
assessment_dict
):
submission
=
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
api
.
create_evaluation
(
api
.
create_evaluation
(
submission
[
"uuid"
],
submission
[
"uuid"
],
STUDENT_ITEM
[
"student_id"
],
STUDENT_ITEM
[
"student_id"
],
...
@@ -71,36 +71,61 @@ class TestApi(TestCase):
...
@@ -71,36 +71,61 @@ class TestApi(TestCase):
self
.
_assert_evaluation
(
evaluations
[
0
],
**
assessment_dict
)
self
.
_assert_evaluation
(
evaluations
[
0
],
**
assessment_dict
)
self
.
assertEqual
(
evaluations
[
0
][
"scored_at"
],
MONDAY
)
self
.
assertEqual
(
evaluations
[
0
][
"scored_at"
],
MONDAY
)
def
test_
student_finished_evaluating
(
self
):
def
test_
peer_evaluation_workflow
(
self
):
bob
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
)
tim
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
)
bob
=
self
.
_create_student_and_submission
(
"Bob"
,
"Bob's answer"
)
bob
=
self
.
_create_student_and_submission
(
"Bob"
,
"Bob's answer"
)
sally
=
self
.
_create_student_and_submission
(
"Sally"
,
"Sally's answer"
)
sally
=
self
.
_create_student_and_submission
(
"Sally"
,
"Sally's answer"
)
jim
=
self
.
_create_student_and_submission
(
"Jim"
,
"Jim's answer"
)
jim
=
self
.
_create_student_and_submission
(
"Jim"
,
"Jim's answer"
)
buffy
=
self
.
_create_student_and_submission
(
"Buffy"
,
"Buffy's answer"
)
xander
=
self
.
_create_student_and_submission
(
"Xander"
,
"Xander's answer"
)
self
.
assertFalse
(
api
.
has_finished_required_evaluating
(
"Tim"
,
3
))
# Tim should not have a score, because he has not evaluated enough
# peer submissions.
scores
=
sub_api
.
get_score
(
STUDENT_ITEM
)
self
.
assertFalse
(
scores
)
self
.
assertFalse
(
api
.
has_finished_required_evaluating
(
"Tim"
,
REQUIRED_GRADED
))
api
.
create_evaluation
(
api
.
create_evaluation
(
bob
[
"uuid"
],
bob
[
"uuid"
],
"Tim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
"Tim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
)
api
.
create_evaluation
(
api
.
create_evaluation
(
sally
[
"uuid"
],
sally
[
"uuid"
],
"Tim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
"Tim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
)
self
.
assertFalse
(
api
.
has_finished_required_evaluating
(
"Tim"
,
3
))
self
.
assertFalse
(
api
.
has_finished_required_evaluating
(
"Tim"
,
REQUIRED_GRADED
))
api
.
create_evaluation
(
api
.
create_evaluation
(
jim
[
"uuid"
],
jim
[
"uuid"
],
"Tim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
"Tim"
,
)
REQUIRED_GRADED
,
self
.
assertFalse
(
api
.
has_finished_required_evaluating
(
"Tim"
,
REQUIRED_GRADED
))
REQUIRED_GRADED_BY
,
api
.
create_evaluation
(
ASSESSMENT_DICT
buffy
[
"uuid"
],
"Tim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
)
self
.
assertTrue
(
api
.
has_finished_required_evaluating
(
"Tim"
,
3
))
self
.
assertFalse
(
api
.
has_finished_required_evaluating
(
"Tim"
,
REQUIRED_GRADED
))
api
.
create_evaluation
(
xander
[
"uuid"
],
"Tim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
self
.
assertTrue
(
api
.
has_finished_required_evaluating
(
"Tim"
,
REQUIRED_GRADED
))
# Tim should not have a score, because his submission does not have
# enough evaluations.
scores
=
sub_api
.
get_score
(
STUDENT_ITEM
)
self
.
assertFalse
(
scores
)
api
.
create_evaluation
(
tim
[
"uuid"
],
"Bob"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
api
.
create_evaluation
(
tim
[
"uuid"
],
"Sally"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
api
.
create_evaluation
(
tim
[
"uuid"
],
"Jim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
)
# Tim has met the critera, and should now have a score.
scores
=
sub_api
.
get_score
(
STUDENT_ITEM
)
self
.
assertTrue
(
scores
)
self
.
assertEqual
(
6
,
scores
[
0
][
"points_earned"
])
self
.
assertEqual
(
12
,
scores
[
0
][
"points_possible"
])
@raises
(
api
.
PeerEvaluationRequestError
)
@raises
(
api
.
PeerEvaluationRequestError
)
def
test_bad_configuration
(
self
):
def
test_bad_configuration
(
self
):
...
@@ -133,7 +158,7 @@ class TestApi(TestCase):
...
@@ -133,7 +158,7 @@ class TestApi(TestCase):
@raises
(
api
.
PeerEvaluationInternalError
)
@raises
(
api
.
PeerEvaluationInternalError
)
def
test_error_on_evaluation_creation
(
self
,
mock_filter
):
def
test_error_on_evaluation_creation
(
self
,
mock_filter
):
mock_filter
.
side_effect
=
DatabaseError
(
"Bad things happened"
)
mock_filter
.
side_effect
=
DatabaseError
(
"Bad things happened"
)
submission
=
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
api
.
create_evaluation
(
api
.
create_evaluation
(
submission
[
"uuid"
],
submission
[
"uuid"
],
STUDENT_ITEM
[
"student_id"
],
STUDENT_ITEM
[
"student_id"
],
...
@@ -144,9 +169,9 @@ class TestApi(TestCase):
...
@@ -144,9 +169,9 @@ class TestApi(TestCase):
)
)
@patch.object
(
PeerEvaluation
.
objects
,
'filter'
)
@patch.object
(
PeerEvaluation
.
objects
,
'filter'
)
@raises
(
SubmissionInternalError
)
@raises
(
sub_api
.
SubmissionInternalError
)
def
test_error_on_get_evaluation
(
self
,
mock_filter
):
def
test_error_on_get_evaluation
(
self
,
mock_filter
):
submission
=
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
submission
=
sub_api
.
create_submission
(
STUDENT_ITEM
,
ANSWER_ONE
)
api
.
create_evaluation
(
api
.
create_evaluation
(
submission
[
"uuid"
],
submission
[
"uuid"
],
STUDENT_ITEM
[
"student_id"
],
STUDENT_ITEM
[
"student_id"
],
...
@@ -162,16 +187,18 @@ class TestApi(TestCase):
...
@@ -162,16 +187,18 @@ class TestApi(TestCase):
self
.
assertEqual
(
0
,
api
.
_calculate_final_score
([]))
self
.
assertEqual
(
0
,
api
.
_calculate_final_score
([]))
self
.
assertEqual
(
5
,
api
.
_calculate_final_score
([
5
]))
self
.
assertEqual
(
5
,
api
.
_calculate_final_score
([
5
]))
# average of 5, 6, rounded down.
# average of 5, 6, rounded down.
self
.
assertEqual
(
5
,
api
.
_calculate_final_score
([
5
,
6
]))
self
.
assertEqual
(
6
,
api
.
_calculate_final_score
([
5
,
6
]))
self
.
assertEqual
(
14
,
api
.
_calculate_final_score
([
5
,
6
,
12
,
16
,
22
,
53
]))
self
.
assertEqual
(
14
,
api
.
_calculate_final_score
([
5
,
6
,
12
,
16
,
22
,
53
]))
self
.
assertEqual
(
14
,
api
.
_calculate_final_score
([
6
,
5
,
12
,
53
,
16
,
22
]))
self
.
assertEqual
(
16
,
api
.
_calculate_final_score
([
5
,
6
,
12
,
16
,
22
,
53
,
102
]))
self
.
assertEqual
(
16
,
api
.
_calculate_final_score
([
5
,
6
,
12
,
16
,
22
,
53
,
102
]))
self
.
assertEqual
(
16
,
api
.
_calculate_final_score
([
16
,
6
,
12
,
102
,
22
,
53
,
5
]))
@staticmethod
@staticmethod
def
_create_student_and_submission
(
student
,
answer
,
date
=
None
):
def
_create_student_and_submission
(
student
,
answer
,
date
=
None
):
new_student_item
=
STUDENT_ITEM
.
copy
()
new_student_item
=
STUDENT_ITEM
.
copy
()
new_student_item
[
"student_id"
]
=
student
new_student_item
[
"student_id"
]
=
student
return
create_submission
(
new_student_item
,
answer
,
date
)
return
sub_api
.
create_submission
(
new_student_item
,
answer
,
date
)
def
_assert_evaluation
(
self
,
evaluation
,
points_earned
,
points_possible
,
def
_assert_evaluation
(
self
,
evaluation
,
points_earned
,
points_possible
,
feedback
):
feedback
):
...
...
settings/base.py
View file @
807710f2
...
@@ -107,7 +107,6 @@ MIDDLEWARE_CLASSES = (
...
@@ -107,7 +107,6 @@ MIDDLEWARE_CLASSES = (
'django.contrib.messages.middleware.MessageMiddleware'
,
'django.contrib.messages.middleware.MessageMiddleware'
,
# Uncomment the next line for simple clickjacking protection:
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_pdb.middleware.PdbMiddleware'
,
# Needed to enable shell-on-crash behavior
)
)
ROOT_URLCONF
=
'urls'
ROOT_URLCONF
=
'urls'
...
@@ -131,7 +130,6 @@ INSTALLED_APPS = (
...
@@ -131,7 +130,6 @@ INSTALLED_APPS = (
# Third party
# Third party
'django_extensions'
,
'django_extensions'
,
'django_pdb'
,
# Allows post-mortem debugging on exceptions
# XBlock
# XBlock
'workbench'
,
'workbench'
,
...
...
settings/dev.py
0 → 100644
View file @
807710f2
"""
Dev-specific Django settings.
"""
# Inherit from base settings
from
.base
import
*
MIDDLEWARE_CLASSES
+=
(
'django_pdb.middleware.PdbMiddleware'
,
# Needed to enable shell-on-crash behavior
)
INSTALLED_APPS
+=
(
'django_pdb'
,
# Allows post-mortem debugging on exceptions
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment