Commit 7d599598 by Andy Armstrong

Complete ORA staff regrading UI

TNL-3467
parent 352ff90e
......@@ -6,11 +6,11 @@
{% for criterion in rubric_criteria %}
<li
class="field field--radio is--required assessment__rubric__question ui-toggle-visibility {% if criterion.options %}has--options{% endif %}"
id="assessment__rubric__question--{{ criterion.order_num }}"
id="{{ rubric_type }}__assessment__rubric__question--{{ criterion.order_num }}"
>
<h4 class="question__title ui-toggle-visibility__control">
<i class="icon fa fa-caret-right" aria-hidden="true"></i>
<span id="assessment__rubric__prompt--{{ criterion.order_num }}" class="ui-toggle-visibility__control__copy question__title__copy">{{ criterion.prompt }}</span>
<span id="{{ rubric_type }}__assessment__rubric__prompt--{{ criterion.order_num }}" class="ui-toggle-visibility__control__copy question__title__copy">{{ criterion.prompt }}</span>
<span class="label--required sr">* ({% trans "Required" %})</span>
</h4>
......@@ -21,11 +21,11 @@
<div class="wrapper--input">
<input type="radio"
name="{{ criterion.name }}"
id="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
id="{{ rubric_type }}__assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__value"
value="{{ option.name }}"
aria-labelledby="assessment__rubric__prompt--{{ criterion.order_num }}"/>
<label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
aria-labelledby="{{ rubric_type }}__assessment__rubric__prompt--{{ criterion.order_num }}"/>
<label for="{{ rubric_type }}__assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__label"
>{{ option.label }}</label>
</div>
......@@ -39,9 +39,9 @@
{% if criterion.feedback == 'optional' or criterion.feedback == 'required' %}
<li class="answer--feedback">
<div class="wrapper--input">
<label for="assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label>
<label for="{{ rubric_type }}__assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label>
<textarea
id="assessment__rubric__question--{{ criterion.order_num }}__feedback"
id="{{ rubric_type }}__assessment__rubric__question--{{ criterion.order_num }}__feedback"
class="answer__value"
value="{{ criterion.name }}"
name="{{ criterion.name }}"
......@@ -56,15 +56,16 @@
</div>
</li>
{% endfor %}
<li class="wrapper--input field field--textarea assessment__rubric__question assessment__rubric__question--feedback" id="assessment__rubric__question--feedback">
<label class="question__title" for="assessment__rubric__question--feedback__value">
<span class="question__title__copy">{{ rubric_feedback_prompt }}</span>
<li class="wrapper--input field field--textarea assessment__rubric__question assessment__rubric__question--feedback">
<label class="question__title" for="{{ rubric_type }}__assessment__rubric__question--feedback__value">
<span class="question__title__copy">{% trans rubric_feedback_prompt %}</span>
</label>
<div class="wrapper--input">
<textarea
id="assessment__rubric__question--feedback__value"
placeholder="{{ rubric_feedback_default_text }}"
id="{{ rubric_type }}__assessment__rubric__question--feedback__value"
class="assessment__rubric__question--feedback__value"
placeholder="{% trans rubric_feedback_default_text %}"
maxlength="500"
>
</textarea>
......
......@@ -21,7 +21,7 @@
{% trans "View the file associated with this submission." %}
</a>
{% if show_warning %}
<p class="submission_file_warning">{% trans "(Caution: This file was uploaded by another course learner and has not been verified, screened, approved, reviewed or endorsed by edX. If you decide to access it, you do so at your own risk.)" %}</p>
<p class="submission_file_warning">{% trans "(Caution: This file was uploaded by another course learner and has not been verified, screened, approved, reviewed, or endorsed by edX. If you decide to access it, you do so at your own risk.)" %}</p>
{% endif %}
{% endif %}
</div>
......
......@@ -72,7 +72,7 @@
</div>
<form id="peer-assessment--001__assessment" class="peer-assessment__assessment" method="post">
{% include "openassessmentblock/oa_rubric.html" %}
{% include "openassessmentblock/oa_rubric.html" with rubric_type="peer" %}
</form>
</article>
</li>
......
......@@ -62,7 +62,7 @@
{% for criterion in rubric_criteria %}
<li
class="field field--radio is--required assessment__rubric__question ui-toggle-visibility {% if criterion.options %}has--options{% endif %}"
id="assessment__rubric__question--{{ criterion.order_num }}"
id="peer__assessment__rubric__question--{{ criterion.order_num }}"
>
<h4 class="question__title ui-toggle-visibility__control">
<i class="icon fa fa-caret-right" aria-hidden="true"></i>
......@@ -77,10 +77,10 @@
<div class="wrapper--input">
<input type="radio"
name="{{ criterion.name }}"
id="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
id="peer__assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__value"
value="{{ option.name }}" />
<label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
<label for="peer__assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__label"
>{{ option.label }}</label>
</div>
......@@ -94,9 +94,9 @@
{% if criterion.feedback == 'optional' or criterion.feedback == 'required' %}
<li class="answer--feedback">
<div class="wrapper--input">
<label for="assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label>
<label for="peer__assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label>
<textarea
id="assessment__rubric__question--{{ criterion.order_num }}__feedback"
id="peer__assessment__rubric__question--{{ criterion.order_num }}__feedback"
class="answer__value"
value="{{ criterion.name }}"
name="{{ criterion.name }}"
......@@ -112,13 +112,13 @@
</li>
{% endfor %}
<li class="wrapper--input field field--textarea assessment__rubric__question assessment__rubric__question--feedback" id="assessment__rubric__question--feedback">
<label class="question__title" for="assessment__rubric__question--feedback__value">
<li class="wrapper--input field field--textarea assessment__rubric__question assessment__rubric__question--feedback">
<label class="question__title" for="peer__assessment__rubric__question--feedback__value">
<span class="question__title__copy">{{ rubric_feedback_prompt }}</span>
</label>
<div class="wrapper--input">
<textarea
id="assessment__rubric__question--feedback__value"
id="peer__assessment__rubric__question--feedback__value"
placeholder="{{ rubric_feedback_default_text }}"
maxlength="500"
>
......
......@@ -3,16 +3,16 @@
{% load tz %}
{% block list_item %}
<li id="openassessment__response" class="openassessment__steps__step step--response ui-toggle-visibility has--error">
<li id="openassessment__response" class="openassessment__steps__step step--response ui-toggle-visibility has--error">
{% endblock %}
{% block title %}
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<i class="icon fa fa-exclamation-triangle" aria-hidden="true"></i>
<span class="copy">{% trans "Cancelled" %}</span>
</span>
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<i class="icon fa fa-exclamation-triangle" aria-hidden="true"></i>
<span class="copy">{% trans "Cancelled" %}</span>
</span>
</span>
{% endblock %}
......@@ -24,26 +24,17 @@
<h3 class="message__title">{% trans "Submission Cancelled" %}</h3>
<div class="message__content">
<p>
{% blocktrans with removed_datetime=workflow_cancellation.created_at|utc|date:"N j, Y H:i e" removed_by_username=workflow_cancellation.cancelled_by %}
Your submission has been cancelled by {{ removed_by_username }} on {{ removed_datetime }}
{% endblocktrans %}
<br>
<!-- Comments: Reason for Cancellation-->
{% blocktrans with comments=workflow_cancellation.comments %}
Comments: {{ comments }}
{% endblocktrans %}
</p>
</div>
<div class="step__content">
<article class="submission__answer__display">
<h3 class="submission__answer__display__title">{% trans "Your Response" %}</h3>
<div class="submission__answer__display__content">
{{ student_submission.answer.text|linebreaks }}
</div>
</article>
<p>
{% blocktrans with removed_datetime=workflow_cancellation.cancelled_at|utc|date:"N j, Y H:i e" removed_by_username=workflow_cancellation.cancelled_by %}
Your submission has been cancelled by {{ removed_by_username }} on {{ removed_datetime }}
{% endblocktrans %}
</p>
<p>
<!-- Comments: Reason for Cancellation-->
{% blocktrans with comments=workflow_cancellation.comments %}
Comments: {{ comments }}
{% endblocktrans %}
</p>
</div>
</div>
</div>
......
......@@ -49,7 +49,7 @@
</article>
<form id="self-assessment--001__assessment" class="self-assessment__assessment" method="post">
{% include "openassessmentblock/oa_rubric.html" %}
{% include "openassessmentblock/oa_rubric.html" with rubric_type="self" %}
</form>
</div>
......
{% load tz %}
{% load i18n %}
{% spaceless %}
......@@ -6,7 +5,7 @@
<div class="ui-toggle-visibility__content">
<div class="wrapper--staff-assessment">
<div class="step__instruction">
<p>{% trans "Allows you to override the current learner's grade using the problem's rubric." %}</p>
<p>{% trans "Override this learner's current grade using the problem's rubric." %}</p>
</div>
<div class="step__content">
......@@ -26,7 +25,7 @@
</div>
<form class="staff-assessment__assessment" method="post">
{% include "openassessmentblock/oa_rubric.html" with rubric_feedback_prompt="(Optional) What aspects of this response stood out to you? What did it do well? How could it improve?" rubric_feedback_default_text="I noticed that this response..." %}
{% include "openassessmentblock/oa_rubric.html" with rubric_type="staff" rubric_feedback_prompt="(Optional) What aspects of this response stood out to you? What did it do well? How could it improve?" rubric_feedback_default_text="I noticed that this response..." %}
</form>
</article>
</div>
......@@ -42,6 +41,8 @@
<button type="submit" class="action action--submit is--disabled">
<span class="copy">{% trans "Submit your assessment" %}</span>
</button>
<div class="staff-override-error"></div>
</li>
</ul>
</div>
......
......@@ -83,7 +83,7 @@
{% if criterion.options %}
<li
class="field field--radio is--required assessment__rubric__question ui-toggle-visibility has--options"
id="assessment__rubric__question--{{ criterion.order_num }}"
id="training__assessment__rubric__question--{{ criterion.order_num }}"
>
<h4 class="question__title ui-toggle-visibility__control">
<i class="icon fa fa-caret-right" aria-hidden="true"></i>
......@@ -111,7 +111,7 @@
<div class="wrapper--input">
<input type="radio"
name="{{ criterion.name }}"
id="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
id="training__assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__value"
value="{{ option.name }}" />
<label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
......
......@@ -394,10 +394,10 @@ def cancel_workflow(submission_uuid, comments, cancelled_by_id, assessment_requi
def get_assessment_workflow_cancellation(submission_uuid):
"""
Get cancellation information for a assessment workflow.
Get cancellation information for an assessment workflow.
Args:
submission_uuid (str): The UUID of assessment workflow.
submission_uuid (str): The UUID of the submission.
"""
try:
workflow_cancellation = AssessmentWorkflowCancellation.get_latest_workflow_cancellation(submission_uuid)
......
......@@ -378,7 +378,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
# A staff step must always be available, to allow for staff overrides
try:
self.steps.get(name=self.STATUS.staff)
except AssessmentWorkflowStep.DoesNotExist:
except AttributeError:
for step in list(self.steps.all()):
step.order_num += 1
self.steps.add(
......
......@@ -24,7 +24,6 @@ from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import ai as ai_api
from openassessment.fileupload import api as file_api
from openassessment.workflow import api as workflow_api
from openassessment.workflow.models import AssessmentWorkflowCancellation
from openassessment.fileupload import exceptions as file_exceptions
......@@ -232,7 +231,8 @@ class StaffAreaMixin(object):
Args:
student_username (unicode): The username of the student to report.
expanded_view (str): An optional view to be shown initially expanded.
The default is None meaning that all views are shown collapsed.
"""
submission_uuid = None
submission = None
......@@ -270,8 +270,8 @@ class StaffAreaMixin(object):
example_based_assessment = None
self_assessment = None
peer_assessments = []
submitted_assessments = []
peer_assessments = None
submitted_assessments = None
if "peer-assessment" in assessment_steps:
peer_assessments = peer_api.get_assessments(submission_uuid)
......@@ -285,22 +285,13 @@ class StaffAreaMixin(object):
workflow = self.get_workflow_info(submission_uuid=submission_uuid)
workflow_cancellation = workflow_api.get_assessment_workflow_cancellation(submission_uuid)
if workflow_cancellation:
workflow_cancellation['cancelled_by'] = self.get_username(workflow_cancellation['cancelled_by_id'])
# Get the date that the workflow was cancelled to use in preference to the serialized date string
cancellation_model = AssessmentWorkflowCancellation.get_latest_workflow_cancellation(submission_uuid)
workflow_cancelled_at = cancellation_model.created_at
else:
workflow_cancelled_at = None
workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)
context = {
'submission': create_submission_dict(submission, self.prompts) if submission else None,
'score': workflow.get('score'),
'workflow_status': workflow.get('status'),
'workflow_cancellation': workflow_cancellation,
'workflow_cancelled_at': workflow_cancelled_at,
'peer_assessments': peer_assessments,
'submitted_assessments': submitted_assessments,
'self_assessment': self_assessment,
......
......@@ -86,11 +86,13 @@
{
"template": "openassessmentblock/response/oa_response.html",
"context": {
"saved_response": {"answer":
{"parts": [
{ "text": "", "prompt": { "description": "Prompt 1" }},
{ "text": "", "prompt": { "description": "Prompt 2" }}
]}
"saved_response": {
"answer": {
"parts": [
{ "text": "", "prompt": { "description": "Prompt 1" }},
{ "text": "", "prompt": { "description": "Prompt 2" }}
]
}
},
"save_status": "This response has not been saved.",
"submit_enabled": false,
......@@ -585,12 +587,12 @@
}
],
"template": {
"answer": {
"parts": [
{ "text": ""},
{ "text": ""}
]
},
"answer": {
"parts": [
{ "text": ""},
{ "text": ""}
]
},
"criteria": [
{
"name": "criterion_with_two_options",
......@@ -684,6 +686,103 @@
{
"template": "openassessmentblock/staff_area/oa_student_info.html",
"context": {
"rubric_criteria": [
{
"name": "vocabulary",
"prompt": "vocabulary",
"order_num": 0,
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Bad"
},
{
"order_num": 1,
"points": 1,
"name": "Good"
}
]
},
{
"name": "grammar",
"prompt": "grammar",
"order_num": 1,
"options": [
{
"order_num": 0,
"points": 0,
"name": "Bad"
},
{
"order_num": 1,
"points": 1,
"name": "Good"
}
]
},
{
"name": "feedback_only",
"prompt": "Feedback only, no options!",
"order_num": 2,
"feedback": "required",
"options": []
}
],
"peer_assessments": [
{
"submission_uuid": "52d2158a-c568-11e3-b9b9-28cfe9182465",
"points_earned": 5,
"points_possible": 6,
"rubric": {
"criteria": [
{
"name": "Criterion 1",
"prompt": "Prompt 1",
"order_num": 0,
"feedback": "optional",
"options": [
{
"order_num": 2,
"points": 2,
"name": "Good"
}
],
"points_possible": 2
},
{
"name": "Criterion 2",
"prompt": "Prompt 2",
"order_num": 1,
"options": [
{
"order_num": 1,
"points": 1,
"name": "Fair"
}
],
"points_possible": 2
},
{
"name": "Criterion 3",
"prompt": "Prompt 3",
"order_num": 2,
"feedback": "optional",
"options": [
{
"order_num": 2,
"points": 2,
"name": "Good"
}
],
"points_possible": 2
}
]
}
}
],
"submission": {
"image_url": "/test-url",
"answer":{
......@@ -695,6 +794,85 @@
"output": "oa_student_info.html"
},
{
"template": "openassessmentblock/staff_area/oa_student_info.html",
"context": {
"submission": {
"image_url": "/test-url",
"answer": {
"text": "testing response text"
}
},
"workflow_cancellation": {
"cancelled_by": "staff",
"cancelled_at": "2015-10-01T04:53",
"comments": "Cancelled!"
}
},
"output": "oa_staff_cancelled_submission.html"
},
{
"template": "openassessmentblock/staff_area/oa_student_info.html",
"context": {
"rubric_criteria": [
{
"name": "vocabulary",
"prompt": "vocabulary",
"order_num": 0,
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Bad"
},
{
"order_num": 1,
"points": 1,
"name": "Good"
}
]
},
{
"name": "grammar",
"prompt": "grammar",
"order_num": 1,
"options": [
{
"order_num": 0,
"points": 0,
"name": "Bad"
},
{
"order_num": 1,
"points": 1,
"name": "Good"
}
]
},
{
"name": "feedback_only",
"prompt": "Feedback only, no options!",
"order_num": 2,
"feedback": "required",
"options": []
}
],
"submission": {
"image_url": "/test-url",
"answer": {
"text": "testing response text"
}
},
"score": {
"points_earned": 1,
"points_possible": 2
},
"workflow_status": "done",
"expanded_view": "final-grade"
},
"output": "oa_staff_graded_submission.html"
},
{
"template": "openassessmentblock/peer/oa_peer_assessment.html",
"context": {
"rubric_criteria": [
......
......@@ -126,8 +126,7 @@ OpenAssessment.BaseView.prototype = {
else {
// Insert the error message
var msgHtml = (message === null) ? "" : message;
$(container + " .message__content", element).html('<p>' + msgHtml + '</p>');
$(container + " .message__content", element).html('<p>' + (message ? _.escape(message) : "") + '</p>');
// Toggle the error class
$(container, element).toggleClass('has--error', message !== null);
}
......@@ -146,7 +145,7 @@ OpenAssessment.BaseView.prototype = {
var $container = $('#openassessment__' + stepName);
$container.toggleClass('has--error', true);
$container.find('.step__status__value i').removeClass().addClass('icon fa fa-exclamation-triangle');
$container.find('.step__status__value .copy').html(errorMessage);
$container.find('.step__status__value .copy').html(_.escape(errorMessage));
}
};
......
......@@ -149,6 +149,7 @@ OpenAssessment.PeerView.prototype = {
return !button.hasClass('is--disabled');
} else {
button.toggleClass('is--disabled', !enabled);
return enabled;
}
},
......
......@@ -168,6 +168,7 @@ OpenAssessment.ResponseView.prototype = {
return !sel.hasClass('is--disabled');
} else {
sel.toggleClass('is--disabled', !enabled);
return enabled;
}
},
......@@ -229,7 +230,7 @@ OpenAssessment.ResponseView.prototype = {
// Setting the HTML will overwrite the screen reader tag,
// so prepend it to the message.
var label = gettext("Status of Your Response");
sel.html('<span class="sr">' + label + ':' + '</span>\n' + msg);
sel.html('<span class="sr">' + _.escape(label) + ':' + '</span>\n' + msg);
}
},
......
......@@ -61,7 +61,7 @@ OpenAssessment.Rubric.prototype = {
**/
overallFeedback: function(overallFeedback) {
var selector = '#assessment__rubric__question--feedback__value';
var selector = '.assessment__rubric__question--feedback__value';
if (typeof overallFeedback === 'undefined') {
return $(selector, this.element).val();
}
......
......@@ -90,6 +90,7 @@ OpenAssessment.SelfView.prototype = {
return !button.hasClass('is--disabled');
} else {
button.toggleClass('is--disabled', !enabled);
return enabled;
}
},
......
......@@ -6,8 +6,8 @@ from xblock.core import XBlock
from submissions import api
from openassessment.fileupload import api as file_upload_api
from openassessment.fileupload.exceptions import FileUploadError
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from .resolve_dates import DISTANT_FUTURE
from data_conversion import create_submission_dict, prepare_submission_for_serialization
......@@ -421,18 +421,12 @@ class SubmissionMixin(object):
context['save_status'] = self.save_status
context['submit_enabled'] = self.saved_response != ''
path = "openassessmentblock/response/oa_response.html"
elif workflow["status"] == "cancelled":
workflow_cancellation = workflow_api.get_assessment_workflow_cancellation(self.submission_uuid)
if workflow_cancellation:
workflow_cancellation['cancelled_by'] = self.get_username(workflow_cancellation['cancelled_by_id'])
context['workflow_cancellation'] = workflow_cancellation
context["workflow_cancellation"] = self.get_workflow_cancellation_info(self.submission_uuid)
context["student_submission"] = self.get_user_submission(
workflow["submission_uuid"]
)
path = 'openassessmentblock/response/oa_response_cancelled.html'
elif workflow["status"] == "done":
student_submission = self.get_user_submission(
workflow["submission_uuid"]
......
......@@ -231,7 +231,7 @@ class TestCourseStaff(XBlockHandlerTestCase):
path, context = xblock.get_student_info_path_and_context("Bob")
self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
self.assertEquals([], context['peer_assessments'])
self.assertEquals(None, context['peer_assessments'])
self.assertEquals("openassessmentblock/staff_area/oa_student_info.html", path)
@scenario('data/basic_scenario.xml', user_id='Bob')
......
......@@ -318,23 +318,17 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
@scenario('data/submission_open.xml', user_id="Bob")
def test_cancelled_submission(self, xblock):
student_item = xblock.get_student_item_dict()
mock_staff = Mock(name='Bob')
xblock.get_username = Mock(return_value=mock_staff)
submission = xblock.create_submission(
student_item,
('A man must have a code', 'A man must have an umbrella too.')
)
xblock.get_workflow_info = Mock(return_value={
'status': 'cancelled',
'submission_uuid': submission['uuid']
})
xblock.get_username = Mock(return_value='Bob')
workflow_api.get_assessment_workflow_cancellation = Mock(return_value={
'comments': 'Inappropriate language',
'cancelled_by_id': 'Bob',
'created_at': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
'cancelled_by': 'Bob'
})
workflow_api.cancel_workflow(
submission_uuid=submission['uuid'], comments='Inappropriate language',
cancelled_by_id='Bob',
assessment_requirements=xblock.workflow_requirements()
)
self._assert_path_and_context(
xblock, 'openassessmentblock/response/oa_response_cancelled.html',
......@@ -347,9 +341,9 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
'student_submission': submission,
'workflow_cancellation': {
'comments': 'Inappropriate language',
'cancelled_at': xblock.get_workflow_cancellation_info(submission['uuid']).get('cancelled_at'),
'cancelled_by_id': 'Bob',
'created_at': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
'cancelled_by': 'Bob'
'cancelled_by': mock_staff
}
}
)
......@@ -498,6 +492,7 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
"""
path, context = xblock.submission_path_and_context()
self.maxDiff = None # Show a full diff
self.assertEqual(path, expected_path)
self.assertEqual(context, expected_context)
......
......@@ -3,7 +3,9 @@ Handle OpenAssessment XBlock requests to the Workflow API.
"""
from xblock.core import XBlock
from openassessment.workflow import api as workflow_api
from openassessment.workflow.models import AssessmentWorkflowCancellation
from openassessment.xblock.data_conversion import create_rubric_dict
......@@ -106,7 +108,7 @@ class WorkflowMixin(object):
if submission_uuid is None:
submission_uuid = self.submission_uuid
if submission_uuid:
if submission_uuid is not None:
requirements = self.workflow_requirements()
workflow_api.update_from_assessments(submission_uuid, requirements)
......@@ -125,9 +127,9 @@ class WorkflowMixin(object):
Raises:
AssessmentWorkflowError
"""
if not submission_uuid:
if submission_uuid is None:
submission_uuid = self.submission_uuid
if not submission_uuid:
if submission_uuid is None:
return {}
return workflow_api.get_workflow_for_submission(
submission_uuid, self.workflow_requirements()
......@@ -179,3 +181,26 @@ class WorkflowMixin(object):
for ra in self.valid_assessments
if ra['name'] in self.ASSESSMENT_STEP_NAMES
]
def get_workflow_cancellation_info(self, submission_uuid):
"""
Returns cancellation information for a particular submission.
:param submission_uuid: The submission to return information for.
:return: The cancellation information, or None if the submission has
not been cancelled.
"""
cancellation_info = workflow_api.get_assessment_workflow_cancellation(submission_uuid)
if not cancellation_info:
return None
# Add the username of the staff member who cancelled the submission
cancellation_info['cancelled_by'] = self.get_username(cancellation_info['cancelled_by_id'])
# Add the date that the workflow was cancelled (in preference to the serialized date string)
del cancellation_info['created_at']
cancellation_model = AssessmentWorkflowCancellation.get_latest_workflow_cancellation(submission_uuid)
if cancellation_model:
cancellation_info['cancelled_at'] = cancellation_model.created_at
return cancellation_info
......@@ -3,6 +3,7 @@
"version": "0.2.0",
"repository": "https://github.com/edx/edx-ora2.git",
"devDependencies": {
"underscore": "^1.8.2",
"karma": "^0.12.16",
"karma-coverage": "^0.2.6",
"karma-jasmine": "^0.3.6",
......
#!/usr/bin/env bash
# Need to exit with an error code to fail the Travis build
set -e
cd `dirname $BASH_SOURCE` && cd ..
export DJANGO_SETTINGS_MODULE=${DJANGO_SETTINGS_MODULE:-"settings.test_with_coverage"}
./scripts/test-python.sh $1
./scripts/render-templates.sh
./scripts/test-js.sh
......@@ -167,7 +167,7 @@ class AssessmentMixin(object):
"""
Mixin for interacting with the assessment rubric.
"""
def assess(self, options_selected):
def assess(self, assessment_type, options_selected):
"""
Create an assessment.
......@@ -183,7 +183,8 @@ class AssessmentMixin(object):
"""
for criterion_num, option_num in enumerate(options_selected):
sel = "#assessment__rubric__question--{criterion_num}__{option_num}".format(
sel = "#{assessment_type}__assessment__rubric__question--{criterion_num}__{option_num}".format(
assessment_type=assessment_type,
criterion_num=criterion_num,
option_num=option_num
)
......
......@@ -103,7 +103,7 @@ class OpenAssessmentTest(WebAppTest):
# Submit a self-assessment
self.self_asmnt_page.wait_for_page().wait_for_response()
self.assertIn(self.SUBMISSION, self.self_asmnt_page.response_text)
self.self_asmnt_page.assess(self.OPTIONS_SELECTED).wait_for_complete()
self.self_asmnt_page.assess("self", self.OPTIONS_SELECTED).wait_for_complete()
self.assertTrue(self.self_asmnt_page.is_complete)
# Verify the grade
......@@ -170,7 +170,7 @@ class PeerAssessmentTest(OpenAssessmentTest):
self.submission_page.visit().submit_response(self.SUBMISSION)
# Assess the submission (there should be at least one available)
self.peer_asmnt_page.wait_for_page().wait_for_response().assess(self.OPTIONS_SELECTED)
self.peer_asmnt_page.wait_for_page().wait_for_response().assess("peer", self.OPTIONS_SELECTED)
# Check that the status indicates we've assessed one submission
try:
......@@ -208,7 +208,7 @@ class StudentTrainingTest(OpenAssessmentTest):
msg = "Did not complete at least {num} student training example(s).".format(num=example_num)
self.fail(msg)
self.student_training_page.wait_for_page().wait_for_response().assess(options_selected)
self.student_training_page.wait_for_page().wait_for_response().assess("training", options_selected)
# Check browser scrolled back to top only on first example
......@@ -365,11 +365,11 @@ class StaffAreaTest(OpenAssessmentTest):
self.staff_area_page.verify_learner_final_score("Final grade: 6 out of 8")
# Do staff override and wait for final score to change.
self.staff_area_page.assess([0, 1])
self.staff_area_page.assess("staff", [0, 1])
# Verify that the new student score is different from the original one.
# TODO: uncomment this after hooked up to the API. Also verify other state if appropriate.
# self.staff_area_page.verify_learner_final_score("Final grade: 1 out of 8")
# Unfortunately there is no indication presently that this was a staff override.
self.staff_area_page.verify_learner_final_score("Final grade: 1 out of 8")
@retry()
@attr('acceptance')
......@@ -399,7 +399,7 @@ class StaffAreaTest(OpenAssessmentTest):
self.staff_area_page.verify_learner_final_score(
"The learner's submission has been removed from peer assessment. "
"The learner receives a grade of zero unless you reset the learner's attempts for the "
"The learner receives a grade of zero unless you delete the learner's state for the "
"problem to allow them to resubmit a response."
)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment