Commit 45399b9b by Stephen Sanchez

Working approach to new scores and displaying them.

parent b4394e28
......@@ -132,8 +132,6 @@ def create_assessment(
"rubric": rubric.id,
"scorer_id": scorer_id,
"submission": submission.pk,
#"points_earned": sum(assessment_dict["points_earned"]),
#"points_possible": assessment_dict["points_possible"],
"score_type": PEER_TYPE,
"parts": [{"option": option_id} for option_id in option_ids]
}
......
......@@ -182,7 +182,7 @@ def get_assessment_median_scores(assessments):
# to the median value for each.
for criterion in scores.keys():
total_criterion_scores = len(scores[criterion])
criterion_scores = sorted(scores)
criterion_scores = sorted(scores[criterion])
median = int(math.ceil(total_criterion_scores / float(2)))
if total_criterion_scores == 0:
criterion_score = 0
......
......@@ -19,26 +19,29 @@
<ol class="list submission__peer-evaluations__questions">
{% for criterion in rubric_criteria %}
{% with criterion_num=forloop.counter %}
<!-- individual question from rubric -->
<li class="question question--001 ui-toggle-visibility">
<li class="question question--{{ criterion_num }} ui-toggle-visibility">
<h4 class="question__title ui-toggle-visibility__control">
<span class="title__copy">{{ criterion.name }}</span>
<span class="question__score">
<span class="label sr">Overall Question Score</span>
<span class="question__score__value">{% criterion.name in median_scores %}</span>
<span class="question__score__value">{{ criterion.median_score }}</span>
<span class="label label--divider sr">out of</span>
<span class="question__score__potential">{{ student_score.points_possible }}</span>
<span class="question__score__potential">{{ criterion.total_value }}</span>
</span>
</h4>
{% for assessment in peer_assessments %}
{% with peer_num=forloop.counter %}
{% for part in assessment.parts %}
{% if part.option.criterion.name == criterion.name %}
<ul class="question__answers ui-toggle-visibility__content">
<li class="answer peer-assessment--001" id="question--001__answer-001">
<li class="answer peer-assessment--{{ peer_num}}"
id="question--{{ criterion_num }}__answer-{{ peer_num }}">
<h5 class="answer__title">
<span class="answer__source">
<span class="label sr">Assessor: </span>
<span class="value">Peer 1</span>
<span class="value">Peer {{ peer_num }}</span>
</span>
<span class="answer__value">
<span class="label sr">Peer's Assessment: </span>
......@@ -53,8 +56,10 @@
</ul>
{% endif %}
{% endfor %}
{% endwith %}
{% endfor %}
</li>
{% endwith %}
{% endfor %}
</ol>
</article>
......
......@@ -382,8 +382,6 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
due = datetime.datetime.strptime(self.due_datetime, "%Y-%m-%dT%H:%M:%S")
context_dict["xblock_trace"] = self.get_xblock_trace()
context_dict["rubric_instructions"] = self.rubric_instructions
context_dict["rubric_criteria"] = self.rubric_criteria
context_dict["formatted_start_date"] = start.strftime("%A, %B %d, %Y")
context_dict["formatted_start_datetime"] = start.strftime("%A, %B %d, %Y %X")
context_dict["formatted_due_date"] = due.strftime("%A, %B %d, %Y")
......
......@@ -70,7 +70,11 @@ class PeerAssessmentMixin(object):
assessment = self.get_assessment_module('peer-assessment')
if assessment:
peer_sub = self.get_peer_submission(self.get_student_item_dict(), assessment)
context_dict = {"peer_submission": peer_sub}
context_dict = {
"peer_submission": peer_sub,
"rubric_instructions": self.rubric_instructions,
"rubric_criteria": self.rubric_criteria
}
return self.render_assessment('oa_peer_assessment.html', context_dict)
def get_peer_submission(self, student_item_dict, assessment):
......
......@@ -50,6 +50,7 @@ class ScenarioParser(object):
crit = {
'name': criterion.attrib.get('name', ''),
'prompt': criterion.text.strip(),
'total_value': criterion.attrib.get('total_value', None),
'options': [],
}
for option in criterion:
......
......@@ -10,19 +10,19 @@
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
<criterion name="concise" total_value="3">
How concise is it?
<option val="0">The Bible</option>
<option val="1">Earnest Hemingway</option>
<option val="3">Matsuo Basho</option>
</criterion>
<criterion name="clearheaded">
<criterion name="clearheaded" total_value="2">
How clear is the thinking?
<option val="0">Eric</option>
<option val="1">John</option>
<option val="2">Ian</option>
</criterion>
<criterion name="form">
<criterion name="form" total_value="2">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">IRC</option>
<option val="1">Real Email</option>
......
......@@ -11,7 +11,7 @@
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
<criterion name="concise" total_value="5">
How concise is it?
<option val="0">Neal Stephenson (late)
<explain>
......@@ -43,7 +43,7 @@
</explain>
</option>
</criterion>
<criterion name="clearheaded">
<criterion name="clearheaded" total_value="10">
How clear is the thinking?
<option val="0">Yogi Berra</option>
<option val="1">Hunter S. Thompson</option>
......@@ -57,7 +57,7 @@
</explain>
</option>
</criterion>
<criterion name="form">
<criterion name="form" total_value="5">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">lolcats</option>
<option val="1">Facebook</option>
......
......@@ -152,9 +152,13 @@ class SubmissionMixin(object):
path = "oa_response.html"
if student_score:
assessments = peer_api.get_assessments(student_submission["uuid"])
context["peer_assessments"] = assessments
median_scores = peer_api.get_median_scores_for_assessments(student_submission["uuid"])
context["median_scores"] = median_scores
context["peer_assessments"] = assessments
context["rubric_instructions"] = self.rubric_instructions
context["rubric_criteria"] = self.rubric_criteria
for criterion in context["rubric_criteria"]:
criterion["median_score"] = median_scores[criterion["name"]]
path = 'oa_response_graded.html'
elif student_submission:
path = 'oa_response_submitted.html'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment