Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
b4394e28
Commit
b4394e28
authored
Feb 25, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Changing score logic (incomplete)
parent
6cba22a4
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
129 additions
and
40 deletions
+129
-40
apps/openassessment/peer/api.py
+39
-20
apps/openassessment/peer/serializers.py
+65
-4
apps/openassessment/peer/test/test_api.py
+1
-0
apps/openassessment/templates/oa_response_graded.html
+9
-5
apps/openassessment/xblock/submission_mixin.py
+15
-11
No files found.
apps/openassessment/peer/api.py
View file @
b4394e28
...
...
@@ -10,10 +10,10 @@ import math
from
django.db
import
DatabaseError
from
openassessment.peer.models
import
Assessment
from
openassessment.peer.models
import
Assessment
,
Rubric
,
AssessmentPart
from
openassessment.peer.serializers
import
(
AssessmentSerializer
,
RubricSerializer
,
rubric_from_dict
)
AssessmentSerializer
,
RubricSerializer
,
rubric_from_dict
,
AssessmentPartSerializer
,
CriterionOptionSerializer
,
get_assessment_review
,
get_assessment_median_scores
)
from
submissions
import
api
as
submission_api
from
submissions.models
import
Submission
,
StudentItem
,
Score
from
submissions.serializers
import
SubmissionSerializer
,
StudentItemSerializer
...
...
@@ -205,19 +205,17 @@ def _score_if_finished(student_item,
)
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
)
submission_finished
=
assessments
.
count
()
>=
required_assessments_for_submission
scores
=
[]
for
assessment
in
assessments
:
scores
.
append
(
assessment
.
points_earned
)
if
finished_evaluating
and
submission_finished
:
submission_api
.
set_score
(
StudentItemSerializer
(
student_item
)
.
data
,
SubmissionSerializer
(
submission
)
.
data
,
_calculate_final_score
(
score
s
),
_calculate_final_score
(
assessment
s
),
assessments
[
0
]
.
points_possible
)
def
_calculate_final_score
(
score
s
):
def
_calculate_final_score
(
assessment
s
):
"""Final grade is calculated using integer values, rounding up.
If there is a true median score, it is returned. If there are two median
...
...
@@ -225,15 +223,8 @@ def _calculate_final_score(scores):
greatest integer value.
"""
total_scores
=
len
(
scores
)
scores
=
sorted
(
scores
)
median
=
int
(
math
.
ceil
(
total_scores
/
float
(
2
)))
if
total_scores
==
0
:
return
0
elif
total_scores
%
2
:
return
scores
[
median
-
1
]
else
:
return
int
(
math
.
ceil
(
sum
(
scores
[
median
-
1
:
median
+
1
])
/
float
(
2
)))
median_scores
=
get_assessment_median_scores
(
assessments
)
return
sum
(
median_scores
)
def
has_finished_required_evaluating
(
student_id
,
required_assessments
):
...
...
@@ -316,9 +307,7 @@ def get_assessments(submission_id):
"""
try
:
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_id
)
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
)
serializer
=
AssessmentSerializer
(
assessments
,
many
=
True
)
return
serializer
.
data
return
get_assessment_review
(
submission
)
except
DatabaseError
:
error_message
=
(
u"Error getting assessments for submission {}"
.
format
(
submission_id
)
...
...
@@ -327,6 +316,36 @@ def get_assessments(submission_id):
raise
PeerAssessmentInternalError
(
error_message
)
def
get_median_scores_for_assessments
(
submission_id
):
"""Returns a dictionary of scores per rubric criterion
Retrieve all the median scores for a particular submission, for each
criterion in the rubric.
Args:
submission_id (str): The submission uuid to get all rubric criterion
median scores.
Returns:
(dict): A dictionary of rubric criterion names, with a median score of
the peer assessments.
Raises:
PeerAssessmentInternalError: If any error occurs while retrieving
information to form the median scores, an error is raised.
"""
try
:
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_id
)
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
)
return
get_assessment_median_scores
(
assessments
)
except
DatabaseError
:
error_message
=
(
u"Error getting assessment median scores {}"
.
format
(
submission_id
)
)
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
def
get_submission_to_assess
(
student_item_dict
,
required_num_assessments
):
"""Get a submission to peer evaluate.
...
...
apps/openassessment/peer/serializers.py
View file @
b4394e28
...
...
@@ -4,8 +4,7 @@ Serializers are created to ensure models do not have to be accessed outside the
scope of the Tim APIs.
"""
from
copy
import
deepcopy
from
hashlib
import
sha1
import
json
import
math
from
rest_framework
import
serializers
from
openassessment.peer.models
import
(
...
...
@@ -71,7 +70,6 @@ class CriterionSerializer(NestedModelSerializer):
model
=
Criterion
fields
=
(
'order_num'
,
'name'
,
'prompt'
,
'options'
)
def
validate_options
(
self
,
attrs
,
source
):
"""Make sure we have at least one CriterionOption in a Criterion."""
options
=
attrs
[
source
]
...
...
@@ -91,7 +89,6 @@ class RubricSerializer(NestedModelSerializer):
model
=
Rubric
fields
=
(
'id'
,
'content_hash'
,
'criteria'
,
'points_possible'
)
def
validate_criteria
(
self
,
attrs
,
source
):
"""Make sure we have at least one Criterion in the Rubric."""
criteria
=
attrs
[
source
]
...
...
@@ -134,6 +131,70 @@ class AssessmentSerializer(serializers.ModelSerializer):
'points_possible'
,
)
def
get_assessment_review
(
submission
):
"""Get all information pertaining to an assessment for review.
Given an assessment serializer, return a serializable formatted model of
the assessment, all assessment parts, all criterion options, and the
associated rubric.
"""
reviews
=
[]
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
)
for
assessment
in
assessments
:
assessment_dict
=
AssessmentSerializer
(
assessment
)
.
data
rubric_dict
=
RubricSerializer
(
assessment
.
rubric
)
.
data
assessment_dict
[
"rubric"
]
=
rubric_dict
parts
=
[]
for
part
in
AssessmentPart
.
objects
.
filter
(
assessment
=
assessment
):
part_dict
=
AssessmentPartSerializer
(
part
)
.
data
options_dict
=
CriterionOptionSerializer
(
part
.
option
)
.
data
criterion_dict
=
CriterionSerializer
(
part
.
option
.
criterion
)
.
data
options_dict
[
"criterion"
]
=
criterion_dict
part_dict
[
"option"
]
=
options_dict
parts
.
append
(
part_dict
)
assessment_dict
[
"parts"
]
=
parts
reviews
.
append
(
assessment_dict
)
return
reviews
def
get_assessment_median_scores
(
assessments
):
"""Get the median score for each rubric criterion
For a given assessment, collect the median score for each criterion on the
rubric. This set can be used to determine the overall score, as well as each
part of the individual rubric scores.
"""
# Create a key value in a dict with a list of values, for every criterion
# found in an assessment.
scores
=
{}
median_scores
=
{}
for
assessment
in
assessments
:
for
part
in
AssessmentPart
.
objects
.
filter
(
assessment
=
assessment
):
criterion_name
=
part
.
option
.
criterion
.
name
if
not
scores
.
has_key
(
criterion_name
):
scores
[
criterion_name
]
=
[]
scores
[
criterion_name
]
.
append
(
part
.
option
.
points
)
# Once we have lists of values for each criterion, sort each value and set
# to the median value for each.
for
criterion
in
scores
.
keys
():
total_criterion_scores
=
len
(
scores
[
criterion
])
criterion_scores
=
sorted
(
scores
)
median
=
int
(
math
.
ceil
(
total_criterion_scores
/
float
(
2
)))
if
total_criterion_scores
==
0
:
criterion_score
=
0
elif
total_criterion_scores
%
2
:
criterion_score
=
criterion_scores
[
median
-
1
]
else
:
criterion_score
=
int
(
math
.
ceil
(
sum
(
criterion_scores
[
median
-
1
:
median
+
1
])
/
float
(
2
)))
median_scores
[
criterion
]
=
criterion_score
return
median_scores
def
rubric_from_dict
(
rubric_dict
):
"""Given a dict of rubric information, return the corresponding Rubric
...
...
apps/openassessment/peer/test/test_api.py
View file @
b4394e28
...
...
@@ -244,6 +244,7 @@ class TestApi(TestCase):
peer_api
.
get_assessments
(
submission
[
"uuid"
])
def
test_choose_score
(
self
):
self
.
assertEqual
(
0
,
peer_api
.
_calculate_final_score
([]))
self
.
assertEqual
(
5
,
peer_api
.
_calculate_final_score
([
5
]))
# average of 5, 6, rounded down.
...
...
apps/openassessment/templates/oa_response_graded.html
View file @
b4394e28
...
...
@@ -22,15 +22,17 @@
<!-- individual question from rubric -->
<li
class=
"question question--001 ui-toggle-visibility"
>
<h4
class=
"question__title ui-toggle-visibility__control"
>
<span
class=
"title__copy"
>
{{ criterion.
instructions
}}
</span>
<span
class=
"title__copy"
>
{{ criterion.
name
}}
</span>
<span
class=
"question__score"
>
<span
class=
"label sr"
>
Overall Question Score
</span>
<span
class=
"question__score__value"
>
10
</span>
<span
class=
"question__score__value"
>
{% criterion.name in median_scores %}
</span>
<span
class=
"label label--divider sr"
>
out of
</span>
<span
class=
"question__score__potential"
>
10
</span>
<span
class=
"question__score__potential"
>
{{ student_score.points_possible }}
</span>
</span>
</h4>
{% for assessment in peer_assessments %}
{% for part in assessment.parts %}
{% if part.option.criterion.name == criterion.name %}
<ul
class=
"question__answers ui-toggle-visibility__content"
>
<li
class=
"answer peer-assessment--001"
id=
"question--001__answer-001"
>
<h5
class=
"answer__title"
>
...
...
@@ -40,15 +42,17 @@
</span>
<span
class=
"answer__value"
>
<span
class=
"label sr"
>
Peer's Assessment:
</span>
<span
class=
"value"
>
{{
assessment
}}
</span>
<span
class=
"value"
>
{{
part.option.name
}}
</span>
</span>
</h5>
<span
class=
"answer__content"
>
{{
assessment
}}
{{
part.option.explanation
}}
</span>
</li>
</ul>
{% endif %}
{% endfor %}
{% endfor %}
</li>
{% endfor %}
...
...
apps/openassessment/xblock/submission_mixin.py
View file @
b4394e28
import
datetime
from
xblock.core
import
XBlock
from
submissions
import
api
from
openassessment.peer
import
api
as
peer_api
class
SubmissionMixin
(
object
):
...
...
@@ -69,7 +70,7 @@ class SubmissionMixin(object):
return
status
,
status_tag
,
status_text
@staticmethod
def
_get_submission_score
(
student_item_dict
,
submission
=
False
):
def
_get_submission_score
(
student_item_dict
):
"""Return the most recent score, if any, for student item
Gets the score, if available.
...
...
@@ -83,9 +84,7 @@ class SubmissionMixin(object):
question.
"""
scores
=
False
if
submission
:
scores
=
api
.
get_score
(
student_item_dict
)
scores
=
api
.
get_score
(
student_item_dict
)
return
scores
[
0
]
if
scores
else
None
@staticmethod
...
...
@@ -141,8 +140,8 @@ class SubmissionMixin(object):
due
=
datetime
.
datetime
.
strptime
(
self
.
due_datetime
,
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S"
)
# Has it been graded yet?
student_score
=
self
.
_get_submission_score
(
student_item
)
step_status
=
"Submitted"
if
student_submission
else
"Incomplete"
step_status
=
"Graded"
if
student_score
else
"Submitted"
step_status
=
step_status
if
student_submission
else
"Incomplete"
context
=
{
"student_submission"
:
student_submission
,
...
...
@@ -150,11 +149,16 @@ class SubmissionMixin(object):
"step_status"
:
step_status
,
}
path
=
'oa_response.html'
if
due
<
datetime
.
datetime
.
now
()
and
not
student_submission
:
path
=
'oa_response_closed.html'
if
student_submission
:
path
=
"oa_response.html"
if
student_score
:
assessments
=
peer_api
.
get_assessments
(
student_submission
[
"uuid"
])
context
[
"peer_assessments"
]
=
assessments
median_scores
=
peer_api
.
get_median_scores_for_assessments
(
student_submission
[
"uuid"
])
context
[
"median_scores"
]
=
median_scores
path
=
'oa_response_graded.html'
elif
student_submission
:
path
=
'oa_response_submitted.html'
elif
due
<
datetime
.
datetime
.
now
()
and
not
student_submission
:
path
=
'oa_response_closed.html'
return
self
.
render_assessment
(
path
,
context_dict
=
context
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment