Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
468cf9db
Commit
468cf9db
authored
11 years ago
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Code Review changes
parent
4f428554
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
63 additions
and
10 deletions
+63
-10
apps/openassessment/peer/api.py
+13
-2
apps/openassessment/peer/serializers.py
+36
-1
apps/openassessment/templates/openassessmentblock/oa_response.html
+2
-0
apps/openassessment/xblock/openassessmentblock.py
+12
-7
No files found.
apps/openassessment/peer/api.py
View file @
468cf9db
...
...
@@ -223,6 +223,11 @@ def get_assessment_median_scores(submission_id, must_be_graded_by):
values, the average of those two values is returned, rounded up to the
greatest integer value.
If OverGrading occurs, the 'must_be_graded_by' parameter is the number of
assessments we want to use to calculate the median values. If this limit is
less than the total number of assessments available, the earliest
assessments are used.
Args:
submission_id (str): The submission uuid to get all rubric criterion
median scores.
...
...
@@ -241,7 +246,9 @@ def get_assessment_median_scores(submission_id, must_be_graded_by):
# found in an assessment.
try
:
submission
=
Submission
.
objects
.
get
(
uuid
=
submission_id
)
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
)[:
must_be_graded_by
]
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
)
.
order_by
(
"scored_at"
)[:
must_be_graded_by
]
except
DatabaseError
:
error_message
=
(
u"Error getting assessment median scores {}"
.
format
(
submission_id
)
...
...
@@ -249,10 +256,14 @@ def get_assessment_median_scores(submission_id, must_be_graded_by):
logger
.
exception
(
error_message
)
raise
PeerAssessmentInternalError
(
error_message
)
# Iterate over every part of every assessment. Each part is associated with
# a criterion name, which becomes a key in the score dictionary, with a list
# of scores. These collected lists of scores are used to find a median value
# per criterion.
scores
=
{}
median_scores
=
{}
for
assessment
in
assessments
:
for
part
in
AssessmentPart
.
objects
.
filter
(
assessment
=
assessment
):
for
part
in
assessment
.
parts
.
all
(
):
criterion_name
=
part
.
option
.
criterion
.
name
if
criterion_name
not
in
scores
:
scores
[
criterion_name
]
=
[]
...
...
This diff is collapsed.
Click to expand it.
apps/openassessment/peer/serializers.py
View file @
468cf9db
...
...
@@ -10,6 +10,7 @@ from openassessment.peer.models import (
Assessment
,
AssessmentPart
,
Criterion
,
CriterionOption
,
Rubric
)
class
InvalidRubric
(
Exception
):
"""This can be raised during the deserialization process."""
def
__init__
(
self
,
errors
):
...
...
@@ -146,6 +147,40 @@ def get_assessment_review(submission):
(list): A list of assessment reviews, combining assessments with
rubrics and assessment parts, to allow a cohesive object for
rendering the complete peer grading workflow.
Examples:
>>> get_assessment_review(submission)
{
'submission': 1,
'rubric': {
'id': 1,
'content_hash': u'45cc932c4da12a1c2b929018cd6f0785c1f8bc07',
'criteria': [{
'order_num': 0,
'name': u'Spelling',
'prompt': u'Did the student have spelling errors?',
'options': [{
'order_num': 0,
'points': 2,
'name': u'No spelling errors',
'explanation': u'No spelling errors were found in this submission.',
}]
}]
},
'scored_at': datetime.datetime(2014, 2, 25, 19, 50, 7, 290464, tzinfo=<UTC>),
'scorer_id': u'Bob',
'score_type': u'PE',
'parts': [{
'option': {
'order_num': 0,
'points': 2,
'name': u'No spelling errors',
'explanation': u'No spelling errors were found in this submission.'}
}],
'submission_uuid': u'0a600160-be7f-429d-a853-1283d49205e7',
'points_earned': 9,
'points_possible': 20,
}
"""
reviews
=
[]
assessments
=
Assessment
.
objects
.
filter
(
submission
=
submission
)
...
...
@@ -154,7 +189,7 @@ def get_assessment_review(submission):
rubric_dict
=
RubricSerializer
(
assessment
.
rubric
)
.
data
assessment_dict
[
"rubric"
]
=
rubric_dict
parts
=
[]
for
part
in
AssessmentPart
.
objects
.
filter
(
assessment
=
assessment
):
for
part
in
assessment
.
parts
.
all
(
):
part_dict
=
AssessmentPartSerializer
(
part
)
.
data
options_dict
=
CriterionOptionSerializer
(
part
.
option
)
.
data
criterion_dict
=
CriterionSerializer
(
part
.
option
.
criterion
)
.
data
...
...
This diff is collapsed.
Click to expand it.
apps/openassessment/templates/openassessmentblock/oa_response.html
View file @
468cf9db
...
...
@@ -20,7 +20,9 @@
<!--header class="step__header ui-toggle-visibility__control"-->
<h2
class=
"step__title"
>
<span
class=
"step__label"
>
Your Response
</span>
{% if formatted_due_datetime %}
<span
class=
"step__deadline"
>
due
<span
class=
"date"
>
{{ formatted_due_datetime }}
</span></span>
{% endif %}
</h2>
<span
class=
"step__status"
>
...
...
This diff is collapsed.
Click to expand it.
apps/openassessment/xblock/openassessmentblock.py
View file @
468cf9db
...
...
@@ -155,6 +155,9 @@ DEFAULT_ASSESSMENT_MODULES = [
DEFAULT_PEER_ASSESSMENT
,
]
# Used to parse datetime strings from the XML configuration.
TIME_PARSE_FORMAT
=
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S"
def
load
(
path
):
"""Handy helper for getting resources from our kit."""
...
...
@@ -378,14 +381,16 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
if
not
context_dict
:
context_dict
=
{}
start
=
datetime
.
datetime
.
strptime
(
self
.
start_datetime
,
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S"
)
due
=
datetime
.
datetime
.
strptime
(
self
.
due_datetime
,
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S"
)
context_dict
[
"xblock_trace"
]
=
self
.
get_xblock_trace
()
context_dict
[
"formatted_start_date"
]
=
start
.
strftime
(
"
%
A,
%
B
%
d,
%
Y"
)
context_dict
[
"formatted_start_datetime"
]
=
start
.
strftime
(
"
%
A,
%
B
%
d,
%
Y
%
X"
)
context_dict
[
"formatted_due_date"
]
=
due
.
strftime
(
"
%
A,
%
B
%
d,
%
Y"
)
context_dict
[
"formatted_due_datetime"
]
=
due
.
strftime
(
"
%
A,
%
B
%
d,
%
Y
%
X"
)
if
self
.
start_datetime
:
start
=
datetime
.
datetime
.
strptime
(
self
.
start_datetime
,
TIME_PARSE_FORMAT
)
context_dict
[
"formatted_start_date"
]
=
start
.
strftime
(
"
%
A,
%
B
%
d,
%
Y"
)
context_dict
[
"formatted_start_datetime"
]
=
start
.
strftime
(
"
%
A,
%
B
%
d,
%
Y
%
X"
)
if
self
.
due_datetime
:
due
=
datetime
.
datetime
.
strptime
(
self
.
due_datetime
,
TIME_PARSE_FORMAT
)
context_dict
[
"formatted_due_date"
]
=
due
.
strftime
(
"
%
A,
%
B
%
d,
%
Y"
)
context_dict
[
"formatted_due_datetime"
]
=
due
.
strftime
(
"
%
A,
%
B
%
d,
%
Y
%
X"
)
template
=
get_template
(
path
)
context
=
Context
(
context_dict
)
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment