Commit ca501c73 by Stephen Sanchez

Sass cleaning and date visibility.

parent 7f5b3634
......@@ -7,7 +7,7 @@ the workflow for a given submission.
import logging
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.db import DatabaseError
from django.db import DatabaseError, IntegrityError
from dogapi import dog_stats_api
from openassessment.assessment.models import (
......@@ -581,13 +581,14 @@ def create_peer_workflow(submission_uuid):
"""
try:
submission = sub_api.get_submission_and_student(submission_uuid)
workflow = PeerWorkflow.objects.get_or_create(
workflow, __ = PeerWorkflow.objects.get_or_create(
student_id=submission['student_item']['student_id'],
course_id=submission['student_item']['course_id'],
item_id=submission['student_item']['item_id'],
submission_uuid=submission_uuid
)
return workflow
except IntegrityError:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
except DatabaseError:
error_message = _(
u"An internal error occurred while creating a new peer "
......@@ -596,6 +597,8 @@ def create_peer_workflow(submission_uuid):
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
workflow.save()
return workflow
def create_peer_workflow_item(scorer_submission_uuid, submission_uuid):
......
......@@ -2,7 +2,7 @@
import datetime
import pytz
from django.db import DatabaseError
from django.db import DatabaseError, IntegrityError
from django.utils import timezone
from ddt import ddt, file_data
from mock import patch
......@@ -357,6 +357,13 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(len(pwis), 1)
self.assertNotEqual(pwis[0].started_at, yesterday)
def test_peer_workflow_integrity_error(self):
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
with patch.object(PeerWorkflow.objects, "get_or_create") as mock_peer:
mock_peer.side_effect = IntegrityError("Oh no!")
workflow = peer_api.create_peer_workflow(tim_sub["uuid"])
self.assertEquals(tim_sub["uuid"], workflow.submission_uuid)
@raises(peer_api.PeerAssessmentWorkflowError)
def test_no_submission_found_closing_assessment(self):
"""
......
......@@ -9,7 +9,7 @@
<h2 class="step__title">
<span class="step__counter"></span>
<span class="wrapper--copy">
<span class="step__label">{% trans "Learn to Assess" %}</span>
<span class="step__label">{% trans "Learn to Assess Responses" %}</span>
{% if training_start %}
<span class="step__deadline">{% trans "available" %}
<span class="date">
......@@ -32,7 +32,10 @@
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<span class="copy">{% trans "In Progress" %}</span>
<span class="copy">{% trans "In Progress" %}
(<span class="step__status__value--completed">{{ training_num_completed }}</span> of
<span class="step__status__value--required">{{ training_num_available }}</span>)
</span>
</span>
</span>
{% endblock %}
......@@ -43,25 +46,28 @@
<div class="wrapper--step__content">
<div id="openassessment__student-training--instructions" class="step__message message message--correct">
<h3 class="message__title">{% trans "Learning to Assess" %}</h3>
<h3 class="message__title">{% trans "Learning to Assess Responses" %}</h3>
<div class="message__content">
<p>{% blocktrans %}Prior to moving on to the next assessment step, review the following instructor provided responses as an introduction to the assessment format for this question. Your goal is to match the assessment score given to each of these responses by instructors.{% endblocktrans %}</p>
<p>{% blocktrans %}Before you begin to assess your peers' responses, you'll learn how to complete peer assessments by reviewing responses that instructors have already assessed. If you select the same options for the response that the instructor selected, you'll move to the next step. If you don't select the same options, you'll review the response and try again.{% endblocktrans %}</p>
</div>
</div>
<div id="openassessment__student-training--incorrect" class="step__message message message--incorrect is--hidden">
<h3 class="message__title">{% trans "Learning to Assess" %}</h3>
<h3 class="message__title">{% trans "Learning to Assess Responses" %}</h3>
<div class="message__content">
<p>{% blocktrans %}Your scoring differs from the instructor's score for this assessment section. Please review the response above to review why it might be different, re-score this assessment section and check your assessment again.{% endblocktrans %}</p>
<p>{% blocktrans %}Your assessment differs from the instructor's assessment of this response. Review the response and consider why the instructor may have assessed it differently. Then, try the assessment again.{% endblocktrans %}</p>
</div>
</div>
<div class="step__content">
<article class="student-training__display" id="student-training">
<header class="student-training__display__header">
<h3 class="student-training__display__title">{% trans "Training Essay" %}</h3>
<h3 class="student-training__display__title">{% trans "Training Essay #" %}
<span class="student-training__number--current">{{ training_num_current }}</span> of
<span class="student-training__number--required">{{ training_num_available }}</span>
</h3>
</header>
<div class="student-training__display__response">
......@@ -79,21 +85,21 @@
<span class="question__title__copy">{{ criterion.prompt }}</span>
<span class="label--required sr">* ({% trans "Required" %})</span>
</h4>
<div class="step__message message message--correct ui-toggle-visibility is--hidden">
<h3 class="message__title">{% trans "Correct Selection" %}</h3>
<div class="ui-toggle-visibility__content">
<div class="step__message message message--correct ui-toggle-visibility is--hidden">
<h3 class="message__title">{% trans "Selected Options Agree" %}</h3>
<div class="message__content">
<p>{% trans "Your selection matches staff." %}</p>
<div class="message__content">
<p>{% trans "The option you selected is the option that the instructor selected." %}</p>
</div>
</div>
</div>
<div class="step__message message message--incorrect ui-toggle-visibility is--hidden">
<h3 class="message__title">{% trans "Incorrect Selection" %}</h3>
<div class="step__message message message--incorrect ui-toggle-visibility is--hidden">
<h3 class="message__title">{% trans "Selected Options Differ" %}</h3>
<div class="message__content">
<p>{% trans "Your selection does not match staff." %}</p>
<div class="message__content">
<p>{% trans "The option you selected is not the option that the instructor selected." %}</p>
</div>
</div>
</div>
<div class="ui-toggle-visibility__content">
<ol class="question__answers">
{% for option in criterion.options %}
<li class="answer">
......@@ -123,14 +129,14 @@
<div class="step__actions">
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">{% trans "We could not submit your assessment" %}</h3>
<h3 class="message__title">{% trans "We could not check your assessment" %}</h3>
<div class="message__content"></div>
</div>
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="student-training--001__assessment__submit" class="action action--submit is--disabled">
<span class="copy">{% trans "Check Rubric" %}</span>
<span class="copy">{% trans "Compare your selections with the instructor's selections" %}</span>
<i class="ico icon-caret-right"></i>
</button>
</li>
......
......@@ -22,7 +22,7 @@
<div class="step__message message message--incomplete">
<h3 class="message__title">{% trans "The Due Date for This Step Has Passed" %}</h3>
<div class="message__content">
<p>{% trans "This step is now closed. You can no longer complete a assessment training or continue with this assignment, and you'll receive a grade of Incomplete." %}</p>
<p>{% trans "This step is now closed.You can no longer continue with this assignment, and you'll receive a grade of Incomplete." %}</p>
</div>
</div>
</div>
......
......@@ -8,7 +8,6 @@ import logging
from django.db import DatabaseError
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import student_training as student_training
from openassessment.assessment.errors import PeerAssessmentError
from submissions import api as sub_api
from .models import AssessmentWorkflow, AssessmentWorkflowStep
......@@ -324,8 +323,14 @@ def update_from_assessments(submission_uuid, assessment_requirements):
"""
workflow = _get_workflow_model(submission_uuid)
workflow.update_from_assessments(assessment_requirements)
return _serialized_with_details(workflow, assessment_requirements)
try:
workflow.update_from_assessments(assessment_requirements)
return _serialized_with_details(workflow, assessment_requirements)
except PeerAssessmentError as err:
err_msg = u"Could not update assessment workflow: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
def get_status_counts(course_id, item_id, steps):
......
......@@ -151,6 +151,11 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
self.STATUS.waiting # if nothing's left to complete, we're waiting
)
# If the submitter is beginning peer assessment, add them to the queue
# by creating a new peer workflow
if new_status == "peer":
peer_api.create_peer_workflow(self.submission_uuid)
# If the submitter has done all they need to do, let's check to see if
# all steps have been fully assessed (i.e. we can score it).
if (new_status == self.STATUS.waiting and
......
......@@ -8,7 +8,7 @@
}
}
},
"both": {
"peer_and_self": {
"steps": ["peer", "self"],
"requirements": {
"peer": {
......@@ -23,5 +23,40 @@
"requirements": {
"self": {}
}
},
"training_peer": {
"steps": ["training", "peer"],
"requirements": {
"training": {
"num_required": 2
},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
}
}
},
"training_self": {
"steps": ["training", "self"],
"requirements": {
"training": {
"num_required": 2
},
"self": {}
}
},
"training_peer_self": {
"steps": ["training", "peer", "self"],
"requirements": {
"training": {
"num_required": 2
},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
},
"self": {}
}
}
}
\ No newline at end of file
......@@ -9,6 +9,7 @@ from openassessment.test_utils import CacheResetTest
from openassessment.workflow.models import AssessmentWorkflow
from submissions.models import Submission
import openassessment.workflow.api as workflow_api
from openassessment.assessment.models import StudentTrainingWorkflow
import submissions.api as sub_api
ITEM_1 = {
......@@ -43,6 +44,56 @@ class TestAssessmentWorkflowApi(CacheResetTest):
del workflow_from_get['status_details']
self.assertEqual(workflow, workflow_from_get)
# Test that the Peer Workflow is, or is not created, based on when peer
# is a step in the workflow.
if "peer" == first_step:
peer_workflow = PeerWorkflow.objects.get(submission_uuid=submission["uuid"])
self.assertIsNotNone(peer_workflow)
else:
peer_workflows = list(PeerWorkflow.objects.filter(submission_uuid=submission["uuid"]))
self.assertFalse(peer_workflows)
def test_update_peer_workflow(self):
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow = workflow_api.create_workflow(submission["uuid"], ["training", "peer"])
StudentTrainingWorkflow.get_or_create_workflow(submission_uuid=submission["uuid"])
requirements = {
"training": {
"num_required": 2
},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
}
}
workflow_keys = set(workflow.keys())
self.assertEqual(
workflow_keys,
{
'submission_uuid', 'uuid', 'status', 'created', 'modified', 'score'
}
)
self.assertEqual(workflow["submission_uuid"], submission["uuid"])
self.assertEqual(workflow["status"], "training")
peer_workflows = list(PeerWorkflow.objects.filter(submission_uuid=submission["uuid"]))
self.assertFalse(peer_workflows)
workflow_from_get = workflow_api.get_workflow_for_submission(
submission["uuid"], requirements
)
del workflow_from_get['status_details']
self.assertEqual(workflow, workflow_from_get)
requirements["training"]["num_required"] = 0
workflow = workflow_api.update_from_assessments(submission["uuid"], requirements)
# New step is Peer, and a Workflow has been created.
self.assertEqual(workflow["status"], "peer")
peer_workflow = PeerWorkflow.objects.get(submission_uuid=submission["uuid"])
self.assertIsNotNone(peer_workflow)
@ddt.file_data('data/assessments.json')
def test_need_valid_submission_uuid(self, data):
# submission doesn't exist
......
......@@ -64,14 +64,14 @@ DEFAULT_RUBRIC_FEEDBACK_PROMPT = """
(Optional) What aspects of this response stood out to you? What did it do well? How could it improve?
"""
DEFAULT_EXAMPLE_ANSWER = "Replace this text with a sample response for this assignment. You'll assess this sample response in the courseware, and students will then learn to assess responses by assessing this response and comparing the options that they select in the rubric with the options that you selected."
DEFAULT_STUDENT_TRAINING = {
"name": "student-training",
"start": None,
"due": None,
"examples": [
{
"answer": "Example Calibration Response",
"answer": DEFAULT_EXAMPLE_ANSWER,
"options_selected": [
{
"criterion": "Ideas",
......@@ -84,7 +84,7 @@ DEFAULT_STUDENT_TRAINING = {
]
},
{
"answer": "Another Example Calibration Response",
"answer": DEFAULT_EXAMPLE_ANSWER,
"options_selected": [
{
"criterion": "Ideas",
......
......@@ -37,7 +37,7 @@ UI_MODELS = {
"submission": {
"name": "submission",
"class_id": "openassessment__response",
"navigation_text": "Your response to this problem",
"navigation_text": "Your response to this assignment",
"title": "Your Response"
},
"student-training": {
......@@ -61,7 +61,7 @@ UI_MODELS = {
"grade": {
"name": "grade",
"class_id": "openassessment__grade",
"navigation_text": "Your grade for this problem",
"navigation_text": "Your grade for this assignment",
"title": "Your Grade:"
}
}
......@@ -91,7 +91,7 @@ class OpenAssessmentBlock(
StudentTrainingMixin,
LmsCompatibilityMixin
):
"""Displays a question and gives an area where students can compose a response."""
"""Displays a prompt and provides an area where students can compose a response."""
submission_start = String(
default=None, scope=Scope.settings,
......
......@@ -1352,7 +1352,6 @@ hr.divider,
-moz-transition: opacity 0.5s ease-in 0, max-height 0.25s ease-in 0;
transition: opacity 0.5s ease-in 0, max-height 0.25s ease-in 0;
max-height: 40000px;
overflow: auto;
opacity: 1.0;
padding-left: 2px;
padding-right: 2px; }
......@@ -2115,14 +2114,24 @@ hr.divider,
overflow: visible; }
.step--student-training .student-training__assessment .assessment__rubric__question--feedback textarea {
min-height: 100px; }
.step--student-training .is--hidden {
height: 0;
width: 0;
padding: 0 0 0 0; }
.step--student-training .is--hidden .step__header {
padding-bottom: 0;
border-bottom: none;
margin-bottom: 0; }
.step--student-training .message--correct {
margin-top: 20px; }
.step--student-training .message--correct.is--hidden {
height: 0;
width: 0;
padding: 0;
margin: 0; }
.step--student-training .message--correct.is--hidden .step__header {
border-bottom: none; }
.step--student-training .message--incorrect {
margin-top: 20px; }
.step--student-training .message--incorrect.is--hidden {
height: 0;
width: 0;
padding: 0;
margin: 0; }
.step--student-training .message--incorrect.is--hidden .step__header {
border-bottom: none; }
.openassessment .self-assessment__display__header, .openassessment .peer-assessment__display__header, .openassessment .step__header {
margin-bottom: 0 !important;
......
......@@ -96,7 +96,7 @@ OpenAssessment.BaseView.prototype = {
if (type == 'save') {
container = '.response__submission__actions';
}
else if (type == 'submit' || type == 'peer' || type == 'self') {
else if (type == 'submit' || type == 'peer' || type == 'self' || type == 'student-training') {
container = '.step__actions';
}
else if (type == 'feedback_assess') {
......
......@@ -113,10 +113,14 @@ OpenAssessment.Rubric.prototype = {
criterion. For each correction provided, the associated criterion will have
an appropriate message displayed.
Args: Corrections (list): A list of corrections to the rubric criteria that
Args:
Corrections (list): A list of corrections to the rubric criteria that
did not match the expected selected options.
Returns:
True if there were errors found, False if there are no corrections.
**/
updateRubric: function(corrections) {
showCorrections: function(corrections) {
var selector = "input[type=radio]";
var hasErrors = false;
// Display appropriate messages for each selection
......
......@@ -303,13 +303,14 @@ OpenAssessment.Server.prototype = {
values are the option text the user selected for the criterion.
Returns:
A JQuery promise, which resolves with a boolean if successful
and fails with an error message otherwise.
A JQuery promise, which resolves with a list of corrections if
successful and fails with an error message otherwise.
Example:
var options = { clarity: "Very clear", precision: "Somewhat precise" };
server.trainingAssess(options).done(
function(isCorrect) { console.log("Success!"); }
function(corrections) { console.log("Success!"); }
alert(corrections);
).fail(
function(errorMsg) { console.log(errorMsg); }
);
......
......@@ -87,7 +87,7 @@ OpenAssessment.StudentTrainingView.prototype = {
var incorrect = $("#openassessment__student-training--incorrect", this.element);
var instructions = $("#openassessment__student-training--instructions", this.element);
if (!view.rubric.updateRubric(corrections)) {
if (!view.rubric.showCorrections(corrections)) {
baseView.loadAssessmentModules();
incorrect.addClass("is--hidden");
instructions.removeClass("is--hidden");
......
......@@ -44,7 +44,6 @@
@include transition(opacity $tmg-f1 ease-in 0, max-height $tmg-f2 ease-in 0);
@extend %trans-opacity;
max-height: ($baseline-v*2000);
overflow: auto;
opacity: 1.0;
padding-left: ($baseline-h/20);
padding-right: ($baseline-h/20);
......
......@@ -135,22 +135,36 @@
// TYPE: correct
.message--correct {
@extend .message--complete;
margin-top: $baseline-v;
&.is--hidden {
height: 0;
width: 0;
padding: 0;
margin: 0;
.step__header {
border-bottom: none;
}
}
}
// TYPE: incorrect
.message--incorrect {
@extend .message--incomplete;
}
margin-top: $baseline-v;
// Stolen from oa_base is--collapsed.
.is--hidden {
height: 0;
width: 0;
padding: 0 0 0 0;
.step__header {
padding-bottom: 0;
border-bottom: none;
margin-bottom: 0;
&.is--hidden {
height: 0;
width: 0;
padding: 0;
margin: 0;
.step__header {
border-bottom: none;
}
}
}
}
......@@ -78,7 +78,7 @@
</example>
</assessment>
<assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
start="2013-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="1"
must_be_graded_by="1" />
......
......@@ -7,6 +7,7 @@ from webob import Response
from xblock.core import XBlock
from openassessment.assessment.api import student_training
from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict
from .resolve_dates import DISTANT_FUTURE
logger = logging.getLogger(__name__)
......@@ -97,11 +98,13 @@ class StudentTrainingMixin(object):
if not training_module:
return template, context
context['training_due'] = due_date
if due_date < DISTANT_FUTURE:
context['training_due'] = due_date
# Report progress in the student training workflow (completed X out of Y)
context['training_num_available'] = len(training_module["examples"])
context['training_num_completed'] = student_training.get_num_completed(self.submission_uuid)
context['training_num_current'] = context['training_num_completed'] + 1
# Retrieve the example essay for the student to submit
# This will contain the essay text, the rubric, and the options the instructor selected.
......
......@@ -3,8 +3,8 @@
"expected_template": "openassessmentblock/student_training/student_training.html",
"expected_context": {
"training_num_completed": 0,
"training_num_current": 1,
"training_num_available": 2,
"training_due": "9999-01-01T00:00:00+00:00",
"training_essay": "This is my answer.",
"training_rubric": {
"id": 2,
......
......@@ -92,6 +92,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
}
expected_context["training_num_completed"] = 1
expected_context["training_num_current"] = 2
expected_context["training_essay"] = u"тєѕт αηѕωєя"
self._assert_path_and_context(xblock, expected_template, expected_context)
resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json')
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment