Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
883f6ba3
Commit
883f6ba3
authored
Feb 25, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Cleanup and tests
parent
45399b9b
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
118 additions
and
100 deletions
+118
-100
apps/openassessment/peer/api.py
+67
-48
apps/openassessment/peer/serializers.py
+0
-35
apps/openassessment/peer/test/test_api.py
+37
-13
apps/openassessment/xblock/submission_mixin.py
+5
-1
apps/openassessment/xblock/test/test_openassessment.py
+9
-3
No files found.
apps/openassessment/peer/api.py
View file @
883f6ba3
...
...
@@ -10,10 +10,9 @@ import math
from
django.db
import
DatabaseError
from
openassessment.peer.models
import
Assessment
,
Rubric
,
AssessmentPart
from
openassessment.peer.models
import
Assessment
,
AssessmentPart
from
openassessment.peer.serializers
import
(
AssessmentSerializer
,
RubricSerializer
,
rubric_from_dict
,
AssessmentPartSerializer
,
CriterionOptionSerializer
,
get_assessment_review
,
get_assessment_median_scores
)
AssessmentSerializer
,
rubric_from_dict
,
get_assessment_review
)
from
submissions
import
api
as
submission_api
from
submissions.models
import
Submission
,
StudentItem
,
Score
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
...
...
@@ -69,8 +68,8 @@ class PeerAssessmentInternalError(PeerAssessmentError):
def
create_assessment
(
submission_uuid
,
scorer_id
,
required_assessments_for_student
,
required_assessments_for_submission
,
must_grade
,
must_be_graded_by
,
assessment_dict
,
rubric_dict
,
scored_at
=
None
):
...
...
@@ -85,9 +84,9 @@ def create_assessment(
Submission model.
scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission.
required_assessments_for_student
(int): The number of assessments
must_grade
(int): The number of assessments
required for the student to receive a score for their submission.
required_assessments_for_submission
(int): The number of assessments
must_be_graded_by
(int): The number of assessments
required on the submission for it to be scored.
assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback.
...
...
@@ -149,8 +148,8 @@ def create_assessment(
_score_if_finished
(
student_item
,
submission
,
required_assessments_for_student
,
required_assessments_for_submission
must_grade
,
must_be_graded_by
)
# Check if the grader is finished and has enough assessments
...
...
@@ -168,8 +167,8 @@ def create_assessment(
_score_if_finished
(
scorer_item
,
scorer_submissions
[
0
],
required_assessments_for_student
,
required_assessments_for_submission
must_grade
,
must_be_graded_by
)
return
peer_serializer
.
data
...
...
@@ -186,7 +185,7 @@ def create_assessment(
def
_score_if_finished
(
student_item
,
submission
,
required_assessments_for_student
,
required_assessments_for_submission
):
must_be_graded_by
):
"""Calculate final grade iff peer evaluation flow is satisfied.
Checks if the student is finished with the peer assessment workflow. If the
...
...
@@ -202,27 +201,77 @@ def _score_if_finished(student_item,
required_assessments_for_student
)
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
)
submission_finished
=
assessments
.
count
()
>=
required_assessments_for_submission
submission_finished
=
assessments
.
count
()
>=
must_be_graded_by
if
finished_evaluating
and
submission_finished
:
submission_api
.
set_score
(
StudentItemSerializer
(
student_item
)
.
data
,
SubmissionSerializer
(
submission
)
.
data
,
_calculate_final_score
(
assessments
),
sum
(
get_assessment_median_scores
(
submission
.
uuid
,
must_be_graded_by
)
.
values
()
),
assessments
[
0
]
.
points_possible
)
def
_calculate_final_score
(
assessments
):
"""Final grade is calculated using integer values, rounding up.
def
get_assessment_median_scores
(
submission_id
,
must_be_graded_by
):
"""Get the median score for each rubric criterion
For a given assessment, collect the median score for each criterion on the
rubric. This set can be used to determine the overall score, as well as each
part of the individual rubric scores.
If there is a true median score, it is returned. If there are two median
values, the average of those two values is returned, rounded up to the
greatest integer value.
Args:
submission_id (str): The submission uuid to get all rubric criterion
median scores.
must_be_graded_by (int): The number of assessments to include in this
score analysis.
Returns:
(dict): A dictionary of rubric criterion names, with a median score of
the peer assessments.
Raises:
PeerAssessmentInternalError: If any error occurs while retrieving
information to form the median scores, an error is raised.
"""
median_scores
=
get_assessment_median_scores
(
assessments
)
return
sum
(
median_scores
)
# Create a key value in a dict with a list of values, for every criterion
# found in an assessment.
try
:
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_id
)
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
)[:
must_be_graded_by
]
except
DatabaseError
:
error_message
=
(
u"Error getting assessment median scores {}"
.
format
(
submission_id
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
scores
=
{}
median_scores
=
{}
for
assessment
in
assessments
:
for
part
in
AssessmentPart
.
objects
.
filter
(
assessment
=
assessment
):
criterion_name
=
part
.
option
.
criterion
.
name
if
not
scores
.
has_key
(
criterion_name
):
scores
[
criterion_name
]
=
[]
scores
[
criterion_name
]
.
append
(
part
.
option
.
points
)
# Once we have lists of values for each criterion, sort each value and set
# to the median value for each.
for
criterion
in
scores
.
keys
():
total_criterion_scores
=
len
(
scores
[
criterion
])
criterion_scores
=
sorted
(
scores
[
criterion
])
median
=
int
(
math
.
ceil
(
total_criterion_scores
/
float
(
2
)))
if
total_criterion_scores
==
0
:
criterion_score
=
0
elif
total_criterion_scores
%
2
:
criterion_score
=
criterion_scores
[
median
-
1
]
else
:
criterion_score
=
int
(
math
.
ceil
(
sum
(
criterion_scores
[
median
-
1
:
median
+
1
])
/
float
(
2
)))
median_scores
[
criterion
]
=
criterion_score
return
median_scores
def
has_finished_required_evaluating
(
student_id
,
required_assessments
):
...
...
@@ -314,36 +363,6 @@ def get_assessments(submission_id):
raise
PeerAssessmentInternalError
(
error_message
)
def
get_median_scores_for_assessments
(
submission_id
):
"""Returns a dictionary of scores per rubric criterion
Retrieve all the median scores for a particular submission, for each
criterion in the rubric.
Args:
submission_id (str): The submission uuid to get all rubric criterion
median scores.
Returns:
(dict): A dictionary of rubric criterion names, with a median score of
the peer assessments.
Raises:
PeerAssessmentInternalError: If any error occurs while retrieving
information to form the median scores, an error is raised.
"""
try
:
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_id
)
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
)
return
get_assessment_median_scores
(
assessments
)
except
DatabaseError
:
error_message
=
(
u"Error getting assessment median scores {}"
.
format
(
submission_id
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
def
get_submission_to_assess
(
student_item_dict
,
required_num_assessments
):
"""Get a submission to peer evaluate.
...
...
apps/openassessment/peer/serializers.py
View file @
883f6ba3
...
...
@@ -159,41 +159,6 @@ def get_assessment_review(submission):
return
reviews
def
get_assessment_median_scores
(
assessments
):
"""Get the median score for each rubric criterion
For a given assessment, collect the median score for each criterion on the
rubric. This set can be used to determine the overall score, as well as each
part of the individual rubric scores.
"""
# Create a key value in a dict with a list of values, for every criterion
# found in an assessment.
scores
=
{}
median_scores
=
{}
for
assessment
in
assessments
:
for
part
in
AssessmentPart
.
objects
.
filter
(
assessment
=
assessment
):
criterion_name
=
part
.
option
.
criterion
.
name
if
not
scores
.
has_key
(
criterion_name
):
scores
[
criterion_name
]
=
[]
scores
[
criterion_name
]
.
append
(
part
.
option
.
points
)
# Once we have lists of values for each criterion, sort each value and set
# to the median value for each.
for
criterion
in
scores
.
keys
():
total_criterion_scores
=
len
(
scores
[
criterion
])
criterion_scores
=
sorted
(
scores
[
criterion
])
median
=
int
(
math
.
ceil
(
total_criterion_scores
/
float
(
2
)))
if
total_criterion_scores
==
0
:
criterion_score
=
0
elif
total_criterion_scores
%
2
:
criterion_score
=
criterion_scores
[
median
-
1
]
else
:
criterion_score
=
int
(
math
.
ceil
(
sum
(
criterion_scores
[
median
-
1
:
median
+
1
])
/
float
(
2
)))
median_scores
[
criterion
]
=
criterion_score
return
median_scores
def
rubric_from_dict
(
rubric_dict
):
"""Given a dict of rubric information, return the corresponding Rubric
...
...
apps/openassessment/peer/test/test_api.py
View file @
883f6ba3
...
...
@@ -77,6 +77,28 @@ ASSESSMENT_DICT = dict(
}
)
# Answers are against RUBRIC_DICT -- this is worth 0 points
ASSESSMENT_DICT_FAIL
=
dict
(
feedback
=
u"fail"
,
options_selected
=
{
"secret"
:
"no"
,
u"ⓢⓐⓕⓔ"
:
"no"
,
"giveup"
:
"unwilling"
,
"singing"
:
"yes"
,
}
)
# Answers are against RUBRIC_DICT -- this is worth 12 points
ASSESSMENT_DICT_PASS
=
dict
(
feedback
=
u"这是中国"
,
options_selected
=
{
"secret"
:
"yes"
,
u"ⓢⓐⓕⓔ"
:
"yes"
,
"giveup"
:
"eager"
,
"singing"
:
"no"
,
}
)
REQUIRED_GRADED
=
5
REQUIRED_GRADED_BY
=
3
...
...
@@ -175,10 +197,10 @@ class TestApi(TestCase):
tim
[
"uuid"
],
"Bob"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
,
RUBRIC_DICT
)
peer_api
.
create_assessment
(
tim
[
"uuid"
],
"Sally"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
,
RUBRIC_DICT
tim
[
"uuid"
],
"Sally"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
_FAIL
,
RUBRIC_DICT
)
peer_api
.
create_assessment
(
tim
[
"uuid"
],
"Jim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
,
RUBRIC_DICT
tim
[
"uuid"
],
"Jim"
,
REQUIRED_GRADED
,
REQUIRED_GRADED_BY
,
ASSESSMENT_DICT
_PASS
,
RUBRIC_DICT
)
# Tim has met the critera, and should now have a score.
...
...
@@ -211,6 +233,19 @@ class TestApi(TestCase):
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
,
MONDAY
)
peer_api
.
get_submission_to_assess
(
STUDENT_ITEM
,
3
)
@patch.object
(
Assessment
.
objects
,
'filter'
)
@raises
(
peer_api
.
PeerAssessmentInternalError
)
def
test_median_score_db_error
(
self
,
mock_filter
):
mock_filter
.
side_effect
=
DatabaseError
(
"Bad things happened"
)
tim
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
)
peer_api
.
get_assessment_median_scores
(
tim
[
"uuid"
],
3
)
@patch.object
(
Assessment
.
objects
,
'filter'
)
@raises
(
peer_api
.
PeerAssessmentInternalError
)
def
test_median_score_db_error
(
self
,
mock_filter
):
mock_filter
.
side_effect
=
DatabaseError
(
"Bad things happened"
)
tim
=
self
.
_create_student_and_submission
(
"Tim"
,
"Tim's answer"
)
peer_api
.
get_assessments
(
tim
[
"uuid"
])
@patch.object
(
Submission
.
objects
,
'get'
)
@raises
(
peer_api
.
PeerAssessmentInternalError
)
...
...
@@ -243,17 +278,6 @@ class TestApi(TestCase):
mock_filter
.
side_effect
=
DatabaseError
(
"Bad things happened"
)
peer_api
.
get_assessments
(
submission
[
"uuid"
])
def
test_choose_score
(
self
):
self
.
assertEqual
(
0
,
peer_api
.
_calculate_final_score
([]))
self
.
assertEqual
(
5
,
peer_api
.
_calculate_final_score
([
5
]))
# average of 5, 6, rounded down.
self
.
assertEqual
(
6
,
peer_api
.
_calculate_final_score
([
5
,
6
]))
self
.
assertEqual
(
14
,
peer_api
.
_calculate_final_score
([
5
,
6
,
12
,
16
,
22
,
53
]))
self
.
assertEqual
(
14
,
peer_api
.
_calculate_final_score
([
6
,
5
,
12
,
53
,
16
,
22
]))
self
.
assertEqual
(
16
,
peer_api
.
_calculate_final_score
([
5
,
6
,
12
,
16
,
22
,
53
,
102
]))
self
.
assertEqual
(
16
,
peer_api
.
_calculate_final_score
([
16
,
6
,
12
,
102
,
22
,
53
,
5
]))
@staticmethod
def
_create_student_and_submission
(
student
,
answer
,
date
=
None
):
...
...
apps/openassessment/xblock/submission_mixin.py
View file @
883f6ba3
...
...
@@ -142,6 +142,7 @@ class SubmissionMixin(object):
student_score
=
self
.
_get_submission_score
(
student_item
)
step_status
=
"Graded"
if
student_score
else
"Submitted"
step_status
=
step_status
if
student_submission
else
"Incomplete"
assessment_ui_model
=
self
.
get_assessment_module
(
'peer-assessment'
)
context
=
{
"student_submission"
:
student_submission
,
...
...
@@ -152,7 +153,10 @@ class SubmissionMixin(object):
path
=
"oa_response.html"
if
student_score
:
assessments
=
peer_api
.
get_assessments
(
student_submission
[
"uuid"
])
median_scores
=
peer_api
.
get_median_scores_for_assessments
(
student_submission
[
"uuid"
])
median_scores
=
peer_api
.
get_assessment_median_scores
(
student_submission
[
"uuid"
],
assessment_ui_model
[
"must_be_graded_by"
]
)
context
[
"peer_assessments"
]
=
assessments
context
[
"rubric_instructions"
]
=
self
.
rubric_instructions
context
[
"rubric_criteria"
]
=
self
.
rubric_criteria
...
...
apps/openassessment/xblock/test/test_openassessment.py
View file @
883f6ba3
...
...
@@ -13,7 +13,7 @@ from submissions import api as sub_api
from
submissions.api
import
SubmissionRequestError
,
SubmissionInternalError
RUBRIC_CONFIG
=
"""
<openassessment start="2014-12-19T23:00
-7:00" due="2014-12-21T23:00-7
:00">
<openassessment start="2014-12-19T23:00
:00" due="2014-12-21T23:00
:00">
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
...
...
@@ -48,8 +48,8 @@ RUBRIC_CONFIG = """
</rubric>
<assessments>
<peer-assessment name="peer-assessment"
start="2014-12-20T19:00
-7:00
"
due="2014-12-21T22:22
-7:00
"
start="2014-12-20T19:00"
due="2014-12-21T22:22"
must_grade="5"
must_be_graded_by="3" />
<self-assessment/>
...
...
@@ -140,3 +140,9 @@ class TestOpenAssessment(TestCase):
xblock_fragment
=
self
.
runtime
.
render
(
self
.
assessment
,
"student_view"
)
self
.
assertTrue
(
xblock_fragment
.
body_html
()
.
find
(
"Openassessmentblock"
))
submission_response
=
self
.
assessment
.
render_submission
({})
self
.
assertIsNotNone
(
submission_response
)
self
.
assertTrue
(
submission_response
.
body
.
find
(
"openassessment__response"
))
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment