Commit 1d55c981 by Stephen Sanchez Committed by Brian Talbot

Fixing some of the grade meta data for the template

parent b266ea39
...@@ -347,7 +347,7 @@ def get_assessments(submission_id): ...@@ -347,7 +347,7 @@ def get_assessments(submission_id):
""" """
try: try:
submission = Submission.objects.get(uuid=submission_id) submission = Submission.objects.get(uuid=submission_id)
return get_assessment_review(submission) return get_assessment_review(submission, "PE")
except DatabaseError: except DatabaseError:
error_message = _( error_message = _(
u"Error getting assessments for submission {}".format(submission_id) u"Error getting assessments for submission {}".format(submission_id)
......
...@@ -134,7 +134,7 @@ class AssessmentSerializer(serializers.ModelSerializer): ...@@ -134,7 +134,7 @@ class AssessmentSerializer(serializers.ModelSerializer):
) )
def get_assessment_review(submission): def get_assessment_review(submission, score_type):
"""Get all information pertaining to an assessment for review. """Get all information pertaining to an assessment for review.
Given an assessment serializer, return a serializable formatted model of Given an assessment serializer, return a serializable formatted model of
...@@ -144,6 +144,8 @@ def get_assessment_review(submission): ...@@ -144,6 +144,8 @@ def get_assessment_review(submission):
Args: Args:
submission (Submission): The Submission Model object to get submission (Submission): The Submission Model object to get
assessment reviews for. assessment reviews for.
score_type (str): The score type we want to get assessments back for
to review.
Returns: Returns:
(list): A list of assessment reviews, combining assessments with (list): A list of assessment reviews, combining assessments with
...@@ -151,7 +153,7 @@ def get_assessment_review(submission): ...@@ -151,7 +153,7 @@ def get_assessment_review(submission):
rendering the complete peer grading workflow. rendering the complete peer grading workflow.
Examples: Examples:
>>> get_assessment_review(submission) >>> get_assessment_review(submission, score_type)
[{ [{
'submission': 1, 'submission': 1,
'rubric': { 'rubric': {
...@@ -186,7 +188,9 @@ def get_assessment_review(submission): ...@@ -186,7 +188,9 @@ def get_assessment_review(submission):
""" """
return [ return [
full_assessment_dict(assessment) full_assessment_dict(assessment)
for assessment in Assessment.objects.filter(submission=submission) for assessment in Assessment.objects.filter(
submission=submission, score_type=score_type
)
] ]
......
...@@ -83,6 +83,32 @@ ...@@ -83,6 +83,32 @@
</li> </li>
{% endwith %} {% endwith %}
{% endfor %} {% endfor %}
<li class="question question--001 question--feedback ui-toggle-visibility">
<h4 class="question__title ui-toggle-visibility__control">How well did this response answer the overall question?</h4>
{% for assessment in peer_assessments %}
{% with peer_num=forloop.counter %}
{% if assessment.feedback %}
<ul class="question__answers ui-toggle-visibility__content">
<li class="answer peer-evaluation--{{ peer_num }}" id="question--001__answer-{{ peer_num }}">
<h5 class="answer__title">
<span class="answer__source">
<span class="label sr">Evaluator: </span>
<span class="value">Peer {{ peer_num }}</span>
</span>
</h5>
<div class="answer__value">
<h6 class="label sr">Evaluator's Assessment: </h6>
<div class="value">
<p>{{ assessment.feedback }}</p>
</div>
</div>
</li>
</ul>
{% endif %}
{% endwith %}
{% endfor %}
</li>
</ol> </ol>
</article> </article>
......
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment.peer_api import get_assessments from openassessment.assessment import peer_api
class GradeMixin(object): class GradeMixin(object):
"""Grade Mixin introduces all handlers for displaying grades """Grade Mixin introduces all handlers for displaying grades
...@@ -21,14 +20,23 @@ class GradeMixin(object): ...@@ -21,14 +20,23 @@ class GradeMixin(object):
context = {} context = {}
if status == "done": if status == "done":
path = 'openassessmentblock/grade/oa_grade_complete.html' path = 'openassessmentblock/grade/oa_grade_complete.html'
context = { assessment_ui_model = self.get_assessment_module('peer-assessment')
"score": workflow["score"], student_submission = self.get_user_submission(
"assessments": [ workflow["submission_uuid"]
assessment )
for assessment in get_assessments(self.submission_uuid) student_score = workflow["score"]
], assessments = peer_api.get_assessments(student_submission["uuid"])
} median_scores = peer_api.get_assessment_median_scores(
elif status == "waiting": student_submission["uuid"],
assessment_ui_model["must_be_graded_by"]
)
context["student_submission"] = student_submission
context["peer_assessments"] = assessments
context["rubric_criteria"] = self.rubric_criteria
context["score"] = student_score
for criterion in context["rubric_criteria"]:
criterion["median_score"] = median_scores[criterion["name"]]
elif workflow.get('status') == "waiting":
path = 'openassessmentblock/grade/oa_grade_waiting.html' path = 'openassessmentblock/grade/oa_grade_waiting.html'
elif not status: elif not status:
path = 'openassessmentblock/grade/oa_grade_not_started.html' path = 'openassessmentblock/grade/oa_grade_not_started.html'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment