Commit 12518162 by Stephen Sanchez Committed by Brian Talbot

Sass cleaning and date visibility.

parent 0d105e13
......@@ -7,7 +7,7 @@ the workflow for a given submission.
import logging
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.db import DatabaseError
from django.db import DatabaseError, IntegrityError
from dogapi import dog_stats_api
from openassessment.assessment.models import (
......@@ -588,6 +588,8 @@ def create_peer_workflow(submission_uuid):
submission_uuid=submission_uuid
)
return workflow
except IntegrityError:
return PeerWorkflow.objects.get(submission_uuid)
except DatabaseError:
error_message = _(
u"An internal error occurred while creating a new peer "
......
......@@ -32,7 +32,10 @@
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<span class="copy">{% trans "In Progress" %}</span>
<span class="copy">{% trans "In Progress" %}
(<span class="step__status__value--completed">{{ training_num_completed }}</span> of
<span class="step__status__value--required">{{ training_num_available }}</span>)
</span>
</span>
</span>
{% endblock %}
......@@ -61,7 +64,10 @@
<div class="step__content">
<article class="student-training__display" id="student-training">
<header class="student-training__display__header">
<h3 class="student-training__display__title">{% trans "Training Essay" %}</h3>
<h3 class="student-training__display__title">{% trans "Training Essay #" %}
<span class="student-training__number--current">{{ training_num_current }}</span> of
<span class="student-training__number--required">{{ training_num_available }}</span>
</h3>
</header>
<div class="student-training__display__response">
......@@ -79,21 +85,21 @@
<span class="question__title__copy">{{ criterion.prompt }}</span>
<span class="label--required sr">* ({% trans "Required" %})</span>
</h4>
<div class="step__message message message--correct ui-toggle-visibility is--hidden">
<h3 class="message__title">{% trans "Correct Selection" %}</h3>
<div class="ui-toggle-visibility__content">
<div class="step__message message message--correct ui-toggle-visibility is--hidden">
<h3 class="message__title">{% trans "Correct Selection" %}</h3>
<div class="message__content">
<p>{% trans "Your selection matches staff." %}</p>
<div class="message__content">
<p>{% trans "Your selection matches staff." %}</p>
</div>
</div>
</div>
<div class="step__message message message--incorrect ui-toggle-visibility is--hidden">
<h3 class="message__title">{% trans "Incorrect Selection" %}</h3>
<div class="step__message message message--incorrect ui-toggle-visibility is--hidden">
<h3 class="message__title">{% trans "Incorrect Selection" %}</h3>
<div class="message__content">
<p>{% trans "Your selection does not match staff." %}</p>
<div class="message__content">
<p>{% trans "Your selection does not match staff." %}</p>
</div>
</div>
</div>
<div class="ui-toggle-visibility__content">
<ol class="question__answers">
{% for option in criterion.options %}
<li class="answer">
......@@ -123,7 +129,7 @@
<div class="step__actions">
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">{% trans "We could not submit your assessment" %}</h3>
<h3 class="message__title">{% trans "We could not check your assessment" %}</h3>
<div class="message__content"></div>
</div>
......
......@@ -8,7 +8,6 @@ import logging
from django.db import DatabaseError
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import student_training as student_training
from openassessment.assessment.errors import PeerAssessmentError
from submissions import api as sub_api
from .models import AssessmentWorkflow, AssessmentWorkflowStep
......@@ -324,8 +323,14 @@ def update_from_assessments(submission_uuid, assessment_requirements):
"""
workflow = _get_workflow_model(submission_uuid)
workflow.update_from_assessments(assessment_requirements)
return _serialized_with_details(workflow, assessment_requirements)
try:
workflow.update_from_assessments(assessment_requirements)
return _serialized_with_details(workflow, assessment_requirements)
except PeerAssessmentError as err:
err_msg = u"Could not update assessment workflow: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
def get_status_counts(course_id, item_id, steps):
......
......@@ -151,6 +151,11 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
self.STATUS.waiting # if nothing's left to complete, we're waiting
)
# If the submitter is beginning peer assessment, add them to the queue
# by creating a new peer workflow
if new_status == "peer":
peer_api.create_peer_workflow(self.submission_uuid)
# If the submitter has done all they need to do, let's check to see if
# all steps have been fully assessed (i.e. we can score it).
if (new_status == self.STATUS.waiting and
......
......@@ -8,7 +8,7 @@
}
}
},
"both": {
"peer_and_self": {
"steps": ["peer", "self"],
"requirements": {
"peer": {
......@@ -23,5 +23,40 @@
"requirements": {
"self": {}
}
},
"training_peer": {
"steps": ["training", "peer"],
"requirements": {
"training": {
"num_required": 2
},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
}
}
},
"training_self": {
"steps": ["training", "self"],
"requirements": {
"training": {
"num_required": 2
},
"self": {}
}
},
"training_peer_self": {
"steps": ["training", "peer", "self"],
"requirements": {
"training": {
"num_required": 2
},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
},
"self": {}
}
}
}
\ No newline at end of file
......@@ -9,6 +9,7 @@ from openassessment.test_utils import CacheResetTest
from openassessment.workflow.models import AssessmentWorkflow
from submissions.models import Submission
import openassessment.workflow.api as workflow_api
from openassessment.assessment.models import StudentTrainingWorkflow
import submissions.api as sub_api
ITEM_1 = {
......@@ -43,6 +44,56 @@ class TestAssessmentWorkflowApi(CacheResetTest):
del workflow_from_get['status_details']
self.assertEqual(workflow, workflow_from_get)
# Test that the Peer Workflow is, or is not created, based on when peer
# is a step in the workflow.
if "peer" == first_step:
peer_workflow = PeerWorkflow.objects.get(submission_uuid=submission["uuid"])
self.assertIsNotNone(peer_workflow)
else:
peer_workflows = list(PeerWorkflow.objects.filter(submission_uuid=submission["uuid"]))
self.assertFalse(peer_workflows)
def test_update_peer_workflow(self):
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow = workflow_api.create_workflow(submission["uuid"], ["training", "peer"])
StudentTrainingWorkflow.get_or_create_workflow(submission_uuid=submission["uuid"])
requirements = {
"training": {
"num_required": 2
},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
}
}
workflow_keys = set(workflow.keys())
self.assertEqual(
workflow_keys,
{
'submission_uuid', 'uuid', 'status', 'created', 'modified', 'score'
}
)
self.assertEqual(workflow["submission_uuid"], submission["uuid"])
self.assertEqual(workflow["status"], "training")
peer_workflows = list(PeerWorkflow.objects.filter(submission_uuid=submission["uuid"]))
self.assertFalse(peer_workflows)
workflow_from_get = workflow_api.get_workflow_for_submission(
submission["uuid"], requirements
)
del workflow_from_get['status_details']
self.assertEqual(workflow, workflow_from_get)
requirements["training"]["num_required"] = 0
workflow = workflow_api.update_from_assessments(submission["uuid"], requirements)
# New step is Peer, and a Workflow has been created.
self.assertEqual(workflow["status"], "peer")
peer_workflow = PeerWorkflow.objects.get(submission_uuid=submission["uuid"])
self.assertIsNotNone(peer_workflow)
@ddt.file_data('data/assessments.json')
def test_need_valid_submission_uuid(self, data):
# submission doesn't exist
......
......@@ -1352,7 +1352,6 @@ hr.divider,
-moz-transition: opacity 0.5s ease-in 0, max-height 0.25s ease-in 0;
transition: opacity 0.5s ease-in 0, max-height 0.25s ease-in 0;
max-height: 40000px;
overflow: auto;
opacity: 1.0;
padding-left: 2px;
padding-right: 2px; }
......@@ -2115,14 +2114,24 @@ hr.divider,
overflow: visible; }
.step--student-training .student-training__assessment .assessment__rubric__question--feedback textarea {
min-height: 100px; }
.step--student-training .is--hidden {
height: 0;
width: 0;
padding: 0 0 0 0; }
.step--student-training .is--hidden .step__header {
padding-bottom: 0;
border-bottom: none;
margin-bottom: 0; }
.step--student-training .message--correct {
margin-top: 20px; }
.step--student-training .message--correct.is--hidden {
height: 0;
width: 0;
padding: 0;
margin: 0; }
.step--student-training .message--correct.is--hidden .step__header {
border-bottom: none; }
.step--student-training .message--incorrect {
margin-top: 20px; }
.step--student-training .message--incorrect.is--hidden {
height: 0;
width: 0;
padding: 0;
margin: 0; }
.step--student-training .message--incorrect.is--hidden .step__header {
border-bottom: none; }
.openassessment .self-assessment__display__header, .openassessment .peer-assessment__display__header, .openassessment .step__header {
margin-bottom: 0 !important;
......
......@@ -96,7 +96,7 @@ OpenAssessment.BaseView.prototype = {
if (type == 'save') {
container = '.response__submission__actions';
}
else if (type == 'submit' || type == 'peer' || type == 'self') {
else if (type == 'submit' || type == 'peer' || type == 'self' || type == 'student-training') {
container = '.step__actions';
}
else if (type == 'feedback_assess') {
......
......@@ -113,10 +113,14 @@ OpenAssessment.Rubric.prototype = {
criterion. For each correction provided, the associated criterion will have
an appropriate message displayed.
Args: Corrections (list): A list of corrections to the rubric criteria that
Args:
Corrections (list): A list of corrections to the rubric criteria that
did not match the expected selected options.
Returns:
True if there were errors found, False if there are no corrections.
**/
updateRubric: function(corrections) {
showCorrections: function(corrections) {
var selector = "input[type=radio]";
var hasErrors = false;
// Display appropriate messages for each selection
......
......@@ -303,13 +303,14 @@ OpenAssessment.Server.prototype = {
values are the option text the user selected for the criterion.
Returns:
A JQuery promise, which resolves with a boolean if successful
and fails with an error message otherwise.
A JQuery promise, which resolves with a list of corrections if
successful and fails with an error message otherwise.
Example:
var options = { clarity: "Very clear", precision: "Somewhat precise" };
server.trainingAssess(options).done(
function(isCorrect) { console.log("Success!"); }
function(corrections) { console.log("Success!"); }
alert(corrections);
).fail(
function(errorMsg) { console.log(errorMsg); }
);
......
......@@ -87,7 +87,7 @@ OpenAssessment.StudentTrainingView.prototype = {
var incorrect = $("#openassessment__student-training--incorrect", this.element);
var instructions = $("#openassessment__student-training--instructions", this.element);
if (!view.rubric.updateRubric(corrections)) {
if (!view.rubric.showCorrections(corrections)) {
baseView.loadAssessmentModules();
incorrect.addClass("is--hidden");
instructions.removeClass("is--hidden");
......
......@@ -44,7 +44,6 @@
@include transition(opacity $tmg-f1 ease-in 0, max-height $tmg-f2 ease-in 0);
@extend %trans-opacity;
max-height: ($baseline-v*2000);
overflow: auto;
opacity: 1.0;
padding-left: ($baseline-h/20);
padding-right: ($baseline-h/20);
......
......@@ -135,22 +135,36 @@
// TYPE: correct
.message--correct {
@extend .message--complete;
margin-top: $baseline-v;
&.is--hidden {
height: 0;
width: 0;
padding: 0;
margin: 0;
.step__header {
border-bottom: none;
}
}
}
// TYPE: incorrect
.message--incorrect {
@extend .message--incomplete;
}
margin-top: $baseline-v;
// Stolen from oa_base is--collapsed.
.is--hidden {
height: 0;
width: 0;
padding: 0 0 0 0;
.step__header {
padding-bottom: 0;
border-bottom: none;
margin-bottom: 0;
&.is--hidden {
height: 0;
width: 0;
padding: 0;
margin: 0;
.step__header {
border-bottom: none;
}
}
}
}
......@@ -78,7 +78,7 @@
</example>
</assessment>
<assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
start="2013-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="1"
must_be_graded_by="1" />
......
......@@ -7,6 +7,7 @@ from webob import Response
from xblock.core import XBlock
from openassessment.assessment.api import student_training
from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict
from .resolve_dates import DISTANT_FUTURE
logger = logging.getLogger(__name__)
......@@ -97,11 +98,13 @@ class StudentTrainingMixin(object):
if not training_module:
return template, context
context['training_due'] = due_date
if due_date < DISTANT_FUTURE:
context['training_due'] = due_date
# Report progress in the student training workflow (completed X out of Y)
context['training_num_available'] = len(training_module["examples"])
context['training_num_completed'] = student_training.get_num_completed(self.submission_uuid)
context['training_num_current'] = context['training_num_completed'] + 1
# Retrieve the example essay for the student to submit
# This will contain the essay text, the rubric, and the options the instructor selected.
......
......@@ -3,8 +3,8 @@
"expected_template": "openassessmentblock/student_training/student_training.html",
"expected_context": {
"training_num_completed": 0,
"training_num_current": 1,
"training_num_available": 2,
"training_due": "9999-01-01T00:00:00+00:00",
"training_essay": "This is my answer.",
"training_rubric": {
"id": 2,
......
......@@ -92,6 +92,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
}
expected_context["training_num_completed"] = 1
expected_context["training_num_current"] = 2
expected_context["training_essay"] = u"тєѕт αηѕωєя"
self._assert_path_and_context(xblock, expected_template, expected_context)
resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json')
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment