Commit ba7b9fac by Stephen Sanchez

Merge pull request #121 from edx/sanchez/turbo-mode-peer-grading

Add turbo-mode to the peer assessment rendering.
parents e0ffc198 2fc631ea
......@@ -30,6 +30,9 @@
{% block body %}
<div class="ui-toggle-visibility__content">
<div class="wrapper--step__content">
{% block message %}
{% endblock %}
<div class="step__instruction">
<p>Please read and assess the following response from one of your peers.</p>
</div>
......@@ -61,7 +64,7 @@
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<li class="field field--radio is--required assessment__rubric__question ui-toggle-visibility is--collapsed" id="assessment__rubric__question--{{ criterion.name }}">
<li class="field field--radio is--required assessment__rubric__question ui-toggle-visibility" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="ui-toggle-visibility__control__copy question__title__copy">{{ criterion.prompt }}</span>
......@@ -75,10 +78,10 @@
<div class="wrapper--input">
<input type="radio"
name="{{ criterion.name }}"
id="assessment__rubric__question--{{ criterion.name }}__{{ option.name }}"
id="assessment__rubric__question--{{ criterion.name }}"
class="answer__value"
value="{{ option.name }}" />
<label for="assessment__rubric__question--{{ criterion.name }}__{{ option.name }}"
<label for="assessment__rubric__question--{{ criterion.name }}"
class="answer__label"
>{{ option.name }}</label>
</div>
......
......@@ -10,8 +10,7 @@
<span class="step__status__value">
<i class="ico icon-ok"></i>
<span class="copy">
<span class="step__status__value--completed">{{ graded }}</span> of
<span class="step__status__value--required">{{ must_grade }}</span> completed
<span class="step__status__value--completed">{{ graded }}</span> completed
</span>
</span>
</span>
......
{% extends "openassessmentblock/peer/oa_peer_assessment.html" %}
{% block list_item %}
<li id="openassessment__peer-assessment"class="openassessment__steps__step step--peer-assessment ui-toggle-visibility is--complete">
{% endblock %}
{% block title %}
<span class="step__status">
<span class="step__status__label">This step's status:</span>
<span class="step__status__value">
<span class="copy">Turbo Mode</span>
</span>
</span>
{% endblock %}
{% block message %}
<div class="step__message message message--confirmation message--confirmation-turbo-mode">
<h3 class="message__title">Congratulations!</h3>
<div class="message__content">You have successfully completed all of the peer assessment that you have been asked to do for this step. If you would like to continue providing feedback to your peers you may do so here, but it will not influence your final grade.</div>
</div>
{% endblock %}
......@@ -27,6 +27,9 @@
{% block body %}
<div class="ui-toggle-visibility__content">
<div class="wrapper--step__content">
{% block message %}
{% endblock %}
<div class="step__instruction">
<p>Please provide your response to the following question. You can save your progress and return to complete your response at any time before the due date of <span class="step__deadline"><span class="date">{{ formatted_due_date }}</span></span>. <strong class="emphasis--beta">After you submit your response, you cannot edit it</strong>.</p>
</div>
......
......@@ -28,53 +28,55 @@
{% block body %}
<div class="ui-toggle-visibility__content">
<div class="step__content">
<article class="self-assessment__display" id="self-assessment">
<header class="self-assessment__display__header">
<h3 class="self-assessment__display__title">Your Submitted Response</h3>
</header>
<div class="wrapper--step__content">
{% block message %}
{% endblock %}
<div class="self-assessment__display__response">
{{ self_submission.answer|linebreaks }}
</div>
</article>
<form id="self-assessment--001__assessment" class="self-assessment__assessment" method="post">
<fieldset class="assessment__fields">
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title">
<i class="ico icon-caret-right"></i>
<span class="question__title__copy">{{ criterion.prompt }}</span>
<span class="label--required sr">* (Required)</span>
</h4>
<div class="step__content">
<article class="self-assessment__display" id="self-assessment">
<header class="self-assessment__display__header">
<h3 class="self-assessment__display__title">Your Submitted Response</h3>
</header>
<ol class="question__answers">
{% for option in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio"
name="{{ criterion.name }}"
id="assessment__rubric__question--{{ criterion.name }}__{{ option.name }}"
class="answer__value"
value="{{ option.name }}" />
<label for="assessment__rubric__question--{{ criterion.name }}__{{ option.name }}"
class="answer__label">{{ option.name }}</label>
</div>
<div class="wrapper--metadata">
<span class="answer__tip">{{ option.explanation }}</span>
<span class="answer__points">{{option.points}} <span class="label">points</span></span>
</div>
</li>
{% endfor %}
</ol>
</li>
{% endfor %}
<div class="self-assessment__display__response">
{{ self_submission.answer|linebreaks }}
</div>
</article>
</ol>
</fieldset>
<form id="self-assessment--001__assessment" class="self-assessment__assessment" method="post">
<fieldset class="assessment__fields">
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title">
<i class="ico icon-caret-right"></i>
<span class="question__title__copy">{{ criterion.prompt }}</span>
<span class="label--required sr">* (Required)</span>
</h4>
<ol class="question__answers">
{% for option in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio"
name="{{ criterion.name }}"
id="assessment__rubric__question--{{ criterion.name }}"
class="answer__value"
value="{{ option.name }}" />
<label for="assessment__rubric__question--{{ criterion.name }}"
class="answer__label">{{ option.name }}</label>
</div>
<div class="wrapper--metadata">
<span class="answer__tip">{{ option.explanation }}</span>
<span class="answer__points">{{option.points}} <span class="label">points</span></span>
</div>
</li>
{% endfor %}
</ol>
</li>
{% endfor %}
</ol>
</fieldset>
<div class="self-assessment__actions">
<div class="message message--error message--error-server">
<h3 class="message__title">We could not submit your assessment</h3>
......
......@@ -2,6 +2,7 @@ import copy
from xblock.core import XBlock
from openassessment.assessment import peer_api
class GradeMixin(object):
"""Grade Mixin introduces all handlers for displaying grades
......
......@@ -3,11 +3,10 @@ from django.utils.translation import ugettext as _
from xblock.core import XBlock
from openassessment.assessment import peer_api
from openassessment.assessment.peer_api import (
PeerAssessmentWorkflowError, PeerAssessmentRequestError,
PeerAssessmentInternalError
PeerAssessmentInternalError, PeerAssessmentRequestError,
PeerAssessmentWorkflowError
)
logger = logging.getLogger(__name__)
......@@ -90,8 +89,6 @@ class PeerAssessmentMixin(object):
else:
return {'success': False, 'msg': _('Could not load peer assessment.')}
@XBlock.handler
def render_peer_assessment(self, data, suffix=''):
"""Renders the Peer Assessment HTML section of the XBlock
......@@ -100,21 +97,33 @@ class PeerAssessmentMixin(object):
Assessment XBlock. See OpenAssessmentBlock.render_assessment() for
more information on rendering XBlock sections.
Args:
data (dict): May contain an attribute 'continue_grading', which
allows a student to continue grading peers past the required
number of assessments.
"""
student_item = None
workflow = self.get_workflow_info()
path = 'openassessmentblock/peer/oa_peer_unavailable.html'
finished = False
problem_open, date = self.is_open(step="peer")
context_dict = {
"rubric_criteria": self.rubric_criteria,
"estimated_time": "20 minutes" # TODO: Need to configure this.
}
finished = False
workflow = self.get_workflow_info()
if workflow is None:
return self.render_assessment(path, context_dict)
continue_grading = (
data.params.get('continue_grading', False)
and workflow["status_details"]["peer"]["complete"]
)
student_item = self.get_student_item_dict()
assessment = self.get_assessment_module('peer-assessment')
if assessment:
context_dict["must_grade"] = assessment["must_grade"]
student_item = self.get_student_item_dict()
finished, count = peer_api.has_finished_required_evaluating(
student_item,
assessment["must_grade"]
......@@ -122,7 +131,11 @@ class PeerAssessmentMixin(object):
context_dict["graded"] = count
context_dict["review_num"] = count + 1
if assessment["must_grade"] - count == 1:
if continue_grading:
context_dict["submit_button_text"] = (
"Submit your assessment & review another response."
)
elif assessment["must_grade"] - count == 1:
context_dict["submit_button_text"] = (
"Submit your assessment & move onto next step."
)
......@@ -130,25 +143,38 @@ class PeerAssessmentMixin(object):
context_dict["submit_button_text"] = (
"Submit your assessment & move to response #{}"
).format(count + 2)
path = 'openassessmentblock/peer/oa_peer_unavailable.html'
if date == "due" and not problem_open:
path = 'openassessmentblock/peer/oa_peer_closed.html'
elif workflow and workflow["status"] == "peer" and student_item:
elif workflow.get("status") == "peer":
peer_sub = self.get_peer_submission(student_item, assessment)
if peer_sub:
path = 'openassessmentblock/peer/oa_peer_assessment.html'
context_dict["peer_submission"] = peer_sub
elif workflow and workflow["status"] == "done":
else:
path = 'openassessmentblock/peer/oa_peer_waiting.html'
elif continue_grading and student_item:
peer_sub = self.get_peer_submission(student_item, assessment, continue_grading)
if peer_sub:
path = 'openassessmentblock/peer/oa_peer_turbo_mode.html'
context_dict["peer_submission"] = peer_sub
else:
path = 'openassessmentblock/peer/oa_peer_complete.html'
elif workflow.get("status") == "done":
path = "openassessmentblock/peer/oa_peer_complete.html"
elif workflow and finished:
elif finished:
path = 'openassessmentblock/peer/oa_peer_waiting.html'
return self.render_assessment(path, context_dict)
def get_peer_submission(self, student_item_dict, assessment):
def get_peer_submission(
self,
student_item_dict,
assessment,
over_grading=False
):
submissions_open, __ = self.is_open(step="submission")
over_grading = not submissions_open
over_grading = over_grading or not submissions_open
peer_submission = False
try:
peer_submission = peer_api.get_submission_to_assess(
......@@ -159,5 +185,3 @@ class PeerAssessmentMixin(object):
except PeerAssessmentWorkflowError as err:
logger.exception(err)
return peer_submission
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -70,6 +70,37 @@ OpenAssessment.Server.prototype = {
},
/**
Render the Peer Assessment Section after a complete workflow, in order to
continue grading peers.
Returns:
A JQuery promise, which resolves with the HTML of the rendered peer
assessment section or fails with an error message.
Example:
server.render_continued_peer().done(
function(html) { console.log(html); }
).fail(
function(err) { console.log(err); }
)
**/
renderContinuedPeer: function() {
var url = this.url('render_peer_assessment');
return $.Deferred(function(defer) {
$.ajax({
url: url,
type: "POST",
dataType: "html",
data: {continue_grading: true}
}).done(function(data) {
defer.resolveWith(this, [data]);
}).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']);
})
}).promise();
},
/**
Send a submission to the XBlock.
Args:
......
......@@ -196,6 +196,18 @@
margin-top: $baseline-v;
}
// step message
.step__message {
@include row();
margin-bottom: $baseline-v;
border-radius: ($baseline-v/5);
padding: $baseline-v ($baseline-h/2);
&.message--confirmation {
background: tint($color-complete, 90%);
}
}
// step instructions
.step__instruction {
@extend %copy-4;
......@@ -465,6 +477,9 @@
// TYPE: confirmation
.message--confirmation {
.message__title {
color: $color-complete;
}
}
// --------------------
......
"""
Tests the Open Assessment XBlock functionality.
"""
from collections import namedtuple
import datetime as dt
import pytz
from mock import Mock, patch
from openassessment.xblock import openassessmentblock
from openassessment.xblock.submission_mixin import SubmissionMixin
from .base import XBlockHandlerTestCase, scenario
......@@ -32,12 +29,14 @@ class TestOpenAssessment(XBlockHandlerTestCase):
self.assertTrue(submission_response.body.find("openassessment__response"))
# Validate Peer Rendering.
peer_response = xblock.render_peer_assessment({})
request = namedtuple('Request', 'params')
request.params = {}
peer_response = xblock.render_peer_assessment(request)
self.assertIsNotNone(peer_response)
self.assertTrue(peer_response.body.find("openassessment__peer-assessment"))
# Validate Self Rendering.
self_response = xblock.render_self_assessment({})
self_response = xblock.render_self_assessment(request)
self.assertIsNotNone(self_response)
self.assertTrue(self_response.body.find("openassessment__peer-assessment"))
......@@ -70,7 +69,9 @@ class TestOpenAssessment(XBlockHandlerTestCase):
xblock.start = dt.datetime(2014, 4, 1, 1, 1, 1)
xblock.due = dt.datetime(2014, 5, 1)
resp = xblock.render_peer_assessment({})
request = namedtuple('Request', 'params')
request.params = {}
resp = xblock.render_peer_assessment(request)
self.assertTrue(resp.body.find('Tuesday, April 01, 2014'))
self.assertTrue(resp.body.find('Thursday, May 01, 2014'))
......
......@@ -2,6 +2,7 @@
"""
Tests for peer assessment handlers in Open Assessment XBlock.
"""
from collections import namedtuple
import copy
import json
......@@ -63,7 +64,9 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertEqual(workflow_info["status"], u'peer')
# Validate Submission Rendering.
peer_response = xblock.render_peer_assessment({})
request = namedtuple('Request', 'params')
request.params = {}
peer_response = xblock.render_peer_assessment(request)
self.assertIsNotNone(peer_response)
self.assertNotIn(submission["answer"].encode('utf-8'), peer_response.body)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment