Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-ora2
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-ora2
Commits
45399b9b
Commit
45399b9b
authored
Feb 25, 2014
by
Stephen Sanchez
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Working approach to new scores and displaying them.
parent
b4394e28
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
29 additions
and
19 deletions
+29
-19
apps/openassessment/peer/api.py
+0
-2
apps/openassessment/peer/serializers.py
+1
-1
apps/openassessment/templates/oa_response_graded.html
+10
-5
apps/openassessment/xblock/openassessmentblock.py
+0
-2
apps/openassessment/xblock/peer_assessment_mixin.py
+5
-1
apps/openassessment/xblock/scenario_parser.py
+1
-0
apps/openassessment/xblock/static/xml/censorship_rubric_example.xml
+3
-3
apps/openassessment/xblock/static/xml/poverty_rubric_example.xml
+3
-3
apps/openassessment/xblock/submission_mixin.py
+6
-2
No files found.
apps/openassessment/peer/api.py
View file @
45399b9b
...
...
@@ -132,8 +132,6 @@ def create_assessment(
"rubric"
:
rubric
.
id
,
"scorer_id"
:
scorer_id
,
"submission"
:
submission
.
pk
,
#"points_earned": sum(assessment_dict["points_earned"]),
#"points_possible": assessment_dict["points_possible"],
"score_type"
:
PEER_TYPE
,
"parts"
:
[{
"option"
:
option_id
}
for
option_id
in
option_ids
]
}
...
...
apps/openassessment/peer/serializers.py
View file @
45399b9b
...
...
@@ -182,7 +182,7 @@ def get_assessment_median_scores(assessments):
# to the median value for each.
for
criterion
in
scores
.
keys
():
total_criterion_scores
=
len
(
scores
[
criterion
])
criterion_scores
=
sorted
(
scores
)
criterion_scores
=
sorted
(
scores
[
criterion
]
)
median
=
int
(
math
.
ceil
(
total_criterion_scores
/
float
(
2
)))
if
total_criterion_scores
==
0
:
criterion_score
=
0
...
...
apps/openassessment/templates/oa_response_graded.html
View file @
45399b9b
...
...
@@ -19,26 +19,29 @@
<ol
class=
"list submission__peer-evaluations__questions"
>
{% for criterion in rubric_criteria %}
{% with criterion_num=forloop.counter %}
<!-- individual question from rubric -->
<li
class=
"question question--
001
ui-toggle-visibility"
>
<li
class=
"question question--
{{ criterion_num }}
ui-toggle-visibility"
>
<h4
class=
"question__title ui-toggle-visibility__control"
>
<span
class=
"title__copy"
>
{{ criterion.name }}
</span>
<span
class=
"question__score"
>
<span
class=
"label sr"
>
Overall Question Score
</span>
<span
class=
"question__score__value"
>
{
% criterion.name in median_scores %
}
</span>
<span
class=
"question__score__value"
>
{
{ criterion.median_score }
}
</span>
<span
class=
"label label--divider sr"
>
out of
</span>
<span
class=
"question__score__potential"
>
{{
student_score.points_possibl
e }}
</span>
<span
class=
"question__score__potential"
>
{{
criterion.total_valu
e }}
</span>
</span>
</h4>
{% for assessment in peer_assessments %}
{% with peer_num=forloop.counter %}
{% for part in assessment.parts %}
{% if part.option.criterion.name == criterion.name %}
<ul
class=
"question__answers ui-toggle-visibility__content"
>
<li
class=
"answer peer-assessment--001"
id=
"question--001__answer-001"
>
<li
class=
"answer peer-assessment--{{ peer_num}}"
id=
"question--{{ criterion_num }}__answer-{{ peer_num }}"
>
<h5
class=
"answer__title"
>
<span
class=
"answer__source"
>
<span
class=
"label sr"
>
Assessor:
</span>
<span
class=
"value"
>
Peer
1
</span>
<span
class=
"value"
>
Peer
{{ peer_num }}
</span>
</span>
<span
class=
"answer__value"
>
<span
class=
"label sr"
>
Peer's Assessment:
</span>
...
...
@@ -53,8 +56,10 @@
</ul>
{% endif %}
{% endfor %}
{% endwith %}
{% endfor %}
</li>
{% endwith %}
{% endfor %}
</ol>
</article>
...
...
apps/openassessment/xblock/openassessmentblock.py
View file @
45399b9b
...
...
@@ -382,8 +382,6 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
due
=
datetime
.
datetime
.
strptime
(
self
.
due_datetime
,
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S"
)
context_dict
[
"xblock_trace"
]
=
self
.
get_xblock_trace
()
context_dict
[
"rubric_instructions"
]
=
self
.
rubric_instructions
context_dict
[
"rubric_criteria"
]
=
self
.
rubric_criteria
context_dict
[
"formatted_start_date"
]
=
start
.
strftime
(
"
%
A,
%
B
%
d,
%
Y"
)
context_dict
[
"formatted_start_datetime"
]
=
start
.
strftime
(
"
%
A,
%
B
%
d,
%
Y
%
X"
)
context_dict
[
"formatted_due_date"
]
=
due
.
strftime
(
"
%
A,
%
B
%
d,
%
Y"
)
...
...
apps/openassessment/xblock/peer_assessment_mixin.py
View file @
45399b9b
...
...
@@ -70,7 +70,11 @@ class PeerAssessmentMixin(object):
assessment
=
self
.
get_assessment_module
(
'peer-assessment'
)
if
assessment
:
peer_sub
=
self
.
get_peer_submission
(
self
.
get_student_item_dict
(),
assessment
)
context_dict
=
{
"peer_submission"
:
peer_sub
}
context_dict
=
{
"peer_submission"
:
peer_sub
,
"rubric_instructions"
:
self
.
rubric_instructions
,
"rubric_criteria"
:
self
.
rubric_criteria
}
return
self
.
render_assessment
(
'oa_peer_assessment.html'
,
context_dict
)
def
get_peer_submission
(
self
,
student_item_dict
,
assessment
):
...
...
apps/openassessment/xblock/scenario_parser.py
View file @
45399b9b
...
...
@@ -50,6 +50,7 @@ class ScenarioParser(object):
crit
=
{
'name'
:
criterion
.
attrib
.
get
(
'name'
,
''
),
'prompt'
:
criterion
.
text
.
strip
(),
'total_value'
:
criterion
.
attrib
.
get
(
'total_value'
,
None
),
'options'
:
[],
}
for
option
in
criterion
:
...
...
apps/openassessment/xblock/static/xml/censorship_rubric_example.xml
View file @
45399b9b
...
...
@@ -10,19 +10,19 @@
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion
name=
"concise"
>
<criterion
name=
"concise"
total_value=
"3"
>
How concise is it?
<option
val=
"0"
>
The Bible
</option>
<option
val=
"1"
>
Earnest Hemingway
</option>
<option
val=
"3"
>
Matsuo Basho
</option>
</criterion>
<criterion
name=
"clearheaded"
>
<criterion
name=
"clearheaded"
total_value=
"2"
>
How clear is the thinking?
<option
val=
"0"
>
Eric
</option>
<option
val=
"1"
>
John
</option>
<option
val=
"2"
>
Ian
</option>
</criterion>
<criterion
name=
"form"
>
<criterion
name=
"form"
total_value=
"2"
>
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option
val=
"0"
>
IRC
</option>
<option
val=
"1"
>
Real Email
</option>
...
...
apps/openassessment/xblock/static/xml/poverty_rubric_example.xml
View file @
45399b9b
...
...
@@ -11,7 +11,7 @@
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion
name=
"concise"
>
<criterion
name=
"concise"
total_value=
"5"
>
How concise is it?
<option
val=
"0"
>
Neal Stephenson (late)
<explain>
...
...
@@ -43,7 +43,7 @@
</explain>
</option>
</criterion>
<criterion
name=
"clearheaded"
>
<criterion
name=
"clearheaded"
total_value=
"10"
>
How clear is the thinking?
<option
val=
"0"
>
Yogi Berra
</option>
<option
val=
"1"
>
Hunter S. Thompson
</option>
...
...
@@ -57,7 +57,7 @@
</explain>
</option>
</criterion>
<criterion
name=
"form"
>
<criterion
name=
"form"
total_value=
"5"
>
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option
val=
"0"
>
lolcats
</option>
<option
val=
"1"
>
Facebook
</option>
...
...
apps/openassessment/xblock/submission_mixin.py
View file @
45399b9b
...
...
@@ -152,9 +152,13 @@ class SubmissionMixin(object):
path
=
"oa_response.html"
if
student_score
:
assessments
=
peer_api
.
get_assessments
(
student_submission
[
"uuid"
])
context
[
"peer_assessments"
]
=
assessments
median_scores
=
peer_api
.
get_median_scores_for_assessments
(
student_submission
[
"uuid"
])
context
[
"median_scores"
]
=
median_scores
context
[
"peer_assessments"
]
=
assessments
context
[
"rubric_instructions"
]
=
self
.
rubric_instructions
context
[
"rubric_criteria"
]
=
self
.
rubric_criteria
for
criterion
in
context
[
"rubric_criteria"
]:
criterion
[
"median_score"
]
=
median_scores
[
criterion
[
"name"
]]
path
=
'oa_response_graded.html'
elif
student_submission
:
path
=
'oa_response_submitted.html'
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment