Commit 756e07c5 by Andy Armstrong

Add staff grade details to "Your Grade" section

TNL-3465
parent c10b1f61
{% load i18n %}
{% spaceless %}
{% if assessment %}
<li class="answer feedback">
{% if assessment.individual_assessments %}
{% for individual_assessment in assessment.individual_assessments %}
{% if individual_assessment.feedback %}
<h5 class="answer_title">
<span class="answer__source">
{% if individual_assessment.option %}
{% blocktrans with title=individual_assessment.title grade=individual_assessment.option.label start_tag='<span class="answer__source__value">'|safe end_tag="</span>"|safe %}
{{ start_tag }}{{ title }}{{ end_tag }} - {{ grade }}
{% endblocktrans %}
{% else %}
<span class="answer__source__value">
{{ individual_assessment.title }}
</span>
{% endif %}
</span>
</h5>
<div class="feedback__value">
<p class="feedback__value__raw">{{ individual_assessment.feedback }}</p>
</div>
{% endif %}
{% endfor %}
{% else %}
<h5 class="answer_title">
<span class="answer__source">
<span class="answer__source__value">{{ title }}</span>
</span>
</h5>
<div class="feedback__value">
<p class="feedback__value__raw">{{ assessment.feedback }}</p>
</div>
{% endif %}
</li>
{% endif %}
{% endspaceless %}
{% load i18n %}
{% spaceless %}
{% if assessment %}
<li class="answer">
<h5 class="answer__title">
<span class="answer__source">
{% if assessment.points != None %}
<span class="answer__source__value answer__source__value-with-points">
{% blocktrans with assessment_title=assessment.title count points=assessment.points %}
{{ assessment_title }} - {{ points }} point
{% plural %}
{{ assessment_title }} - {{ points }} points
{% endblocktrans %}
</span>
{% else %}
<span class="answer__source__value">{{ assessment.title }}</span>
{% endif %}
</span>
<span class="answer__value">
<span class="answer__value__label sr">{{ assessment.title }}</span>
<span class="answer__value__value">
{{ assessment.option.label }}
{% if assessment.option.explanation %}
<span class="ui-hint hint--top" data-hint="{{ assessment.option.explanation }}">
<i class="icon fa fa-info-circle" aria-hidden="true"
title="{% blocktrans with name=assessment.option.label %}More information about {{ name }}{% endblocktrans %}">
</i>
</span>
{% endif %}
</span>
</span>
</h5>
</li>
{% endif %}
{% endspaceless %}
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
/**
Interface for grade view.
Args:
element (DOM element): The DOM element representing the XBlock.
server (OpenAssessment.Server): The interface to the XBlock server.
baseView (OpenAssessment.BaseView): Container view.
Returns:
OpenAssessment.ResponseView
**/
* The GradeView class.
*
* @param {element} element - The DOM element representing the XBlock
* @param {OpenAssessment.Server} server - The interface to the XBlock server
* @param {OpenAssessment.BaseView} baseView - The container view.
* @constructor
*/
OpenAssessment.GradeView = function(element, server, baseView) {
this.element = element;
this.server = server;
......@@ -17,8 +14,8 @@ OpenAssessment.GradeView = function(element, server, baseView) {
OpenAssessment.GradeView.prototype = {
/**
Load the grade view.
**/
* Load the grade view.
*/
load: function() {
var view = this;
var baseView = this.baseView;
......@@ -35,8 +32,8 @@ OpenAssessment.GradeView.prototype = {
},
/**
Install event handlers for the view.
**/
* Install event handlers for the view.
*/
installHandlers: function() {
// Install a click handler for collapse/expand
var sel = $('#openassessment__grade', this.element);
......@@ -44,26 +41,18 @@ OpenAssessment.GradeView.prototype = {
// Install a click handler for assessment feedback
var view = this;
sel.find('#feedback__submit').click(function(eventObject) {
sel.find('.feedback__submit').click(function(eventObject) {
eventObject.preventDefault();
view.submitFeedbackOnAssessment();
});
},
/**
Get or set the text for feedback on assessments.
Args:
text (string or undefined): The text of the assessment to set (optional).
Returns:
string or undefined: The text of the feedback.
Example usage:
>>> view.feedbackText('I liked my assessment'); // Set the feedback text
>>> view.feedbackText(); // Retrieve the feedback text
'I liked my assessment'
**/
* Get or set the text for feedback on assessments.
*
* @param {string} text - The text of the assessment to set (optional).
* @returns {string} The text of the feedback
*/
feedbackText: function(text) {
if (typeof text === 'undefined') {
return $('#feedback__remarks__value', this.element).val();
......@@ -73,25 +62,11 @@ OpenAssessment.GradeView.prototype = {
},
/**
Get or set the options for feedback on assessments.
Args:
options (array of strings or undefined): List of options to check (optional).
Returns:
list of strings or undefined: The values of the options the user selected.
Example usage:
// Set the feedback options; all others will be unchecked
>>> view.feedbackOptions('notuseful', 'disagree');
// Retrieve the feedback options that are checked
>>> view.feedbackOptions();
[
'These assessments were not useful.',
'I disagree with the ways that my peers assessed me'
]
**/
* Get or set the options for feedback on assessments.
*
* @param {dict} options - List of options to check (optional).
* @returns {list} - The values of the options the user selected.
*/
feedbackOptions: function(options) {
var view = this;
if (typeof options === 'undefined') {
......@@ -111,60 +86,40 @@ OpenAssessment.GradeView.prototype = {
},
/**
Hide elements, including setting the aria-hidden attribute for screen readers.
Args:
sel (JQuery selector): The selector matching elements to hide.
hidden (boolean): Whether to hide or show the elements.
Returns:
undefined
**/
setHidden: function(sel, hidden) {
sel.toggleClass('is--hidden', hidden);
sel.attr('aria-hidden', hidden ? 'true' : 'false');
* Hide elements, including setting the aria-hidden attribute for screen readers.
*
* @param {JQuery.selector} selector - The selector matching the elements to hide.
* @param {boolean} hidden - Whether to hide or show the elements.
*/
setHidden: function(selector, hidden) {
selector.toggleClass('is--hidden', hidden);
selector.attr('aria-hidden', hidden ? 'true' : 'false');
},
/**
Check whether elements are hidden.
Args:
sel (JQuery selector): The selector matching elements to hide.
Returns:
boolean
**/
isHidden: function(sel) {
return sel.hasClass('is--hidden') && sel.attr('aria-hidden') === 'true';
* Check whether elements are hidden.
*
* @param {JQuery.selector} selector - The selector matching the elements to check.
* @returns {boolean} - True if all the elements are hidden, else false.
*/
isHidden: function(selector) {
return selector.hasClass('is--hidden') && selector.attr('aria-hidden') === 'true';
},
/**
Get or set the state of the feedback on assessment.
Each state corresponds to a particular configuration of attributes
in the DOM, which control what the user sees in the UI.
Valid states are:
'open': The user has not yet submitted feedback on assessments.
'submitting': The user has submitted feedback, but the server has not yet responded.
'submitted': The feedback was successfully submitted
Args:
newState (string or undefined): One of above states.
Returns:
string or undefined: The current state.
Throws:
'Invalid feedback state' if the DOM is not in one of the valid states.
Example usage:
>>> view.feedbackState();
'open'
>>> view.feedbackState('submitted');
>>> view.feedbackState();
'submitted'
**/
* Get or set the state of the feedback on assessment.
*
* Each state corresponds to a particular configuration of attributes
* in the DOM, which control what the user sees in the UI.
*
* Valid states are:
* 'open': The user has not yet submitted feedback on assessments.
* 'submitting': The user has submitted feedback, but the server has not yet responded.
* 'submitted': The feedback was successfully submitted.
*
* @param {string} newState - the new state to set for the feedback (optional).
* @returns {*} The current state.
*/
feedbackState: function(newState) {
var containerSel = $('.submission__feedback__content', this.element);
var instructionsSel = containerSel.find('.submission__feedback__instructions');
......@@ -234,15 +189,15 @@ OpenAssessment.GradeView.prototype = {
},
/**
Send assessment feedback to the server and update the view.
**/
* Send assessment feedback to the server and update the view.
*/
submitFeedbackOnAssessment: function() {
// Send the submission to the server
var view = this;
var baseView = this.baseView;
// Disable the submission button to prevent duplicate submissions
$("#feedback__submit", this.element).toggleClass('is--disabled', true);
$(".feedback__submit", this.element).toggleClass('is--disabled', true);
// Indicate to the user that we're starting to submit
view.feedbackState('submitting');
......
......@@ -963,6 +963,11 @@
@extend %t-titlecase;
display: block;
color: $heading-secondary-color;
.answer__source__value-with-points {
@extend %t-score;
color: $heading-primary-color;
}
}
.answer__value {
......
# -*- coding: utf-8 -*-
"""
Base class for handler-level testing of the XBlock.
"""
import copy
import mock
import os.path
import json
from functools import wraps
from submissions import api as submissions_api
from openassessment.workflow import api as workflow_api
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.test_utils import CacheResetTest, TransactionCacheResetTest
from workbench.runtime import WorkbenchRuntime
import webob
# Sample peer assessments
PEER_ASSESSMENTS = [
{
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Good'},
'criterion_feedback': {
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'Peer 1: ฝﻉɭɭ ɗѻกﻉ!'
},
'overall_feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
},
{
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'Ġööḋ', u'Form': u'Fair'},
'criterion_feedback': {
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'Peer 2: ฝﻉɭɭ ɗѻกﻉ!',
u'Form': u'Peer 2: ƒαιя נσв'
},
'overall_feedback': u'Good job!',
},
]
# Sample self assessment
SELF_ASSESSMENT = {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'criterion_feedback': {
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'Peer 1: ฝﻉɭɭ ɗѻกﻉ!'
},
'overall_feedback': u'єאςєɭɭєภՇ ฬ๏гк!',
}
# A sample good staff assessment
STAFF_GOOD_ASSESSMENT = {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'criterion_feedback': {
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'Staff: ฝﻉɭɭ ɗѻกﻉ!',
u'Form': u'Staff: ƒαιя נσв'
},
'overall_feedback': u'Staff: good job!'
}
# A sample bad staff assessment
STAFF_BAD_ASSESSMENT = {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ק๏๏г', u'Form': u'Poor'},
'criterion_feedback': {
u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'Staff: ק๏๏г נσв',
u'Form': u'Staff: ק๏๏г נσв'
},
'overall_feedback': u'Staff: very poor'
}
def scenario(scenario_path, user_id=None):
"""
......@@ -172,3 +229,120 @@ class XBlockHandlerTransactionTestCase(XBlockHandlerTestCaseMixin, TransactionCa
use `XBlockHandlerTestCase` instead.
"""
pass
class SubmitAssessmentsMixin(object):
"""
A mixin for creating a submission and peer/self assessments so that the user can
receive a grade. This is useful for getting into the "waiting for peer assessment" state.
"""
maxDiff = None
PEERS = ['McNulty', 'Moreland']
SUBMISSION = (u'ՇﻉรՇ', u'รપ๒๓ٱรรٱѻก')
STEPS = ['peer', 'self']
def create_submission_and_assessments(
self, xblock, submission_text, peers, peer_assessments, self_assessment,
waiting_for_peer=False,
):
"""
Create a submission and peer/self assessments, so that the user can receive a grade.
Args:
xblock (OpenAssessmentBlock): The XBlock, loaded for the user who needs a grade.
submission_text (unicode): Text of the submission from the user.
peers (list of unicode): List of user IDs of peers who will assess the user.
peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
self_assessment (dict): Dict of assessment for self-assessment.
Keyword Arguments:
waiting_for_peer (bool): If true, skip creation of peer assessments for the user's submission.
Returns:
the submission
"""
# Create a submission from the user
student_item = xblock.get_student_item_dict()
student_id = student_item['student_id']
submission = xblock.create_submission(student_item, submission_text)
# Create submissions and assessments from other users
scorer_submissions = []
for scorer_name, assessment in zip(peers, peer_assessments):
# Create a submission for each scorer for the same problem
scorer = copy.deepcopy(student_item)
scorer['student_id'] = scorer_name
scorer_sub = submissions_api.create_submission(scorer, {'text': submission_text})
workflow_api.create_workflow(scorer_sub['uuid'], self.STEPS)
submission = peer_api.get_submission_to_assess(scorer_sub['uuid'], len(peers))
# Store the scorer's submission so our user can assess it later
scorer_submissions.append(scorer_sub)
# Create an assessment of the user's submission
if not waiting_for_peer:
peer_api.create_assessment(
scorer_sub['uuid'], scorer_name,
assessment['options_selected'],
assessment['criterion_feedback'],
assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
)
# Have our user make assessments (so she can get a score)
for assessment in peer_assessments:
peer_api.get_submission_to_assess(submission['uuid'], len(peers))
peer_api.create_assessment(
submission['uuid'],
student_id,
assessment['options_selected'],
assessment['criterion_feedback'],
assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
)
# Have the user submit a self-assessment (so she can get a score)
if self_assessment is not None:
self_api.create_assessment(
submission['uuid'], student_id, self_assessment['options_selected'],
self_assessment['criterion_feedback'], self_assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria}
)
return submission
def set_staff_access(self, xblock):
xblock.xmodule_runtime = mock.Mock(user_is_staff=True)
xblock.xmodule_runtime.anonymous_student_id = 'Bob'
@staticmethod
def set_mock_workflow_info(xblock, workflow_status, status_details, submission_uuid):
xblock.get_workflow_info = mock.Mock(return_value={
'status': workflow_status,
'status_details': status_details,
'submission_uuid': submission_uuid
})
def submit_staff_assessment(self, xblock, submission, assessment):
"""
Submits a staff assessment for the specified submission.
Args:
xblock: The XBlock being assessed.
submission: The submission being assessed.
assessment: The staff assessment.
"""
self.set_staff_access(xblock)
assessment = copy.deepcopy(assessment)
assessment['submission_uuid'] = submission['uuid']
resp = self.request(xblock, 'staff_assess', json.dumps(assessment), response_format='json')
self.assertTrue(resp['success'])
......@@ -12,7 +12,7 @@
<criterion feedback="optional">
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<option points="4">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
......@@ -32,7 +32,7 @@
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<option points="3">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
......
......@@ -5,24 +5,17 @@ Tests for staff assessment handlers in Open Assessment XBlock.
import json
import mock
import copy
from openassessment.assessment.api import staff as staff_api
from .base import XBlockHandlerTestCase, scenario
from .test_grade import SubmitAssessmentsMixin
class StaffAssessmentTestBase(XBlockHandlerTestCase):
maxDiff = None
from openassessment.assessment.api import staff as staff_api
SUBMISSION = (u'ՇﻉรՇ', u'รપ๒๓ٱรรٱѻก')
from .base import (
scenario, SubmitAssessmentsMixin, XBlockHandlerTestCase,
PEER_ASSESSMENTS, SELF_ASSESSMENT, STAFF_GOOD_ASSESSMENT,
)
ASSESSMENT = {
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
'criterion_feedback': {},
'overall_feedback': ""
}
def set_staff_access(self, xblock):
xblock.xmodule_runtime = mock.Mock(user_is_staff=True)
xblock.xmodule_runtime.anonymous_student_id = 'Bob'
class StaffAssessmentTestBase(XBlockHandlerTestCase, SubmitAssessmentsMixin):
maxDiff = None
def _assert_path_and_context(self, xblock, expected_context):
path, context = xblock.staff_path_and_context()
......@@ -34,23 +27,8 @@ class StaffAssessmentTestBase(XBlockHandlerTestCase):
resp = self.request(xblock, 'render_staff_assessment', json.dumps({}))
self.assertGreater(len(resp), 0)
@staticmethod
def _set_mock_workflow_info(xblock, workflow_status, status_details, submission_uuid):
xblock.get_workflow_info = mock.Mock(return_value={
'status': workflow_status,
'status_details': status_details,
'submission_uuid': submission_uuid
})
def _submit_staff_assessment(self, xblock, submission):
# Submit a staff-assessment
self.set_staff_access(xblock)
self.ASSESSMENT['submission_uuid'] = submission['uuid']
resp = self.request(xblock, 'staff_assess', json.dumps(self.ASSESSMENT), response_format='json')
self.assertTrue(resp['success'])
class TestStaffAssessmentRender(StaffAssessmentTestBase, SubmitAssessmentsMixin):
class TestStaffAssessmentRender(StaffAssessmentTestBase):
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_staff_grade_templates(self, xblock):
......@@ -76,7 +54,7 @@ class TestStaffAssessmentRender(StaffAssessmentTestBase, SubmitAssessmentsMixin)
self._assert_path_and_context(xblock, unavailable_context)
# Submit a staff-assessment
self._submit_staff_assessment(xblock, submission)
self.submit_staff_assessment(xblock, submission, assessment=STAFF_GOOD_ASSESSMENT)
# Staff assessment exists, still waiting for self assessment.
self._assert_path_and_context(
......@@ -91,7 +69,7 @@ class TestStaffAssessmentRender(StaffAssessmentTestBase, SubmitAssessmentsMixin)
# Verify that once the required step (self assessment) is done, the staff grade is shown as complete.
status_details = {'peer': {'complete': True}}
self._set_mock_workflow_info(
self.set_mock_workflow_info(
xblock, workflow_status='done', status_details=status_details, submission_uuid=submission['uuid']
)
self._assert_path_and_context(
......@@ -104,7 +82,7 @@ class TestStaffAssessmentRender(StaffAssessmentTestBase, SubmitAssessmentsMixin)
)
# Verify that if the problem is cancelled, the staff grade reflects this.
self._set_mock_workflow_info(
self.set_mock_workflow_info(
xblock, workflow_status='cancelled', status_details=status_details, submission_uuid=submission['uuid']
)
self._assert_path_and_context(
......@@ -118,8 +96,8 @@ class TestStaffAssessmentRender(StaffAssessmentTestBase, SubmitAssessmentsMixin)
@scenario('data/grade_waiting_scenario.xml', user_id='Omar')
def test_staff_grade_templates_no_peer(self, xblock):
# Waiting to be assessed by a peer
submission = self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, self.ASSESSMENTS, self.ASSESSMENTS[0], waiting_for_peer=True
submission = self.create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, PEER_ASSESSMENTS, SELF_ASSESSMENT, waiting_for_peer=True
)
# Waiting for a peer assessment (though it is not used because staff grading is required),
......@@ -134,7 +112,7 @@ class TestStaffAssessmentRender(StaffAssessmentTestBase, SubmitAssessmentsMixin)
)
# Submit a staff-assessment. The student can now see the score even though no peer assessments have been done.
self._submit_staff_assessment(xblock, submission)
self.submit_staff_assessment(xblock, submission, assessment=STAFF_GOOD_ASSESSMENT)
self._assert_path_and_context(
xblock,
{
......@@ -155,7 +133,7 @@ class TestStaffAssessment(StaffAssessmentTestBase):
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Submit a staff-assessment
self._submit_staff_assessment(xblock, submission)
self.submit_staff_assessment(xblock, submission, assessment=STAFF_GOOD_ASSESSMENT)
# Expect that a staff-assessment was created
assessment = staff_api.get_latest_staff_assessment(submission['uuid'])
......@@ -164,7 +142,7 @@ class TestStaffAssessment(StaffAssessmentTestBase):
self.assertEqual(assessment['points_possible'], 6)
self.assertEqual(assessment['scorer_id'], 'Bob')
self.assertEqual(assessment['score_type'], 'ST')
self.assertEqual(assessment['feedback'], u'')
self.assertEqual(assessment['feedback'], u'Staff: good job!')
parts = sorted(assessment['parts'])
self.assertEqual(len(parts), 2)
......@@ -187,7 +165,7 @@ class TestStaffAssessment(StaffAssessmentTestBase):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
xblock.create_submission(student_item, self.SUBMISSION)
resp = self.request(xblock, 'staff_assess', json.dumps(self.ASSESSMENT))
resp = self.request(xblock, 'staff_assess', json.dumps(STAFF_GOOD_ASSESSMENT))
self.assertIn("You do not have permission", resp)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
......@@ -198,10 +176,10 @@ class TestStaffAssessment(StaffAssessmentTestBase):
submission = xblock.create_submission(student_item, self.SUBMISSION)
self.set_staff_access(xblock)
self.ASSESSMENT['submission_uuid'] = submission['uuid']
STAFF_GOOD_ASSESSMENT['submission_uuid'] = submission['uuid']
for key in self.ASSESSMENT:
assessment_copy = copy.copy(self.ASSESSMENT)
for key in STAFF_GOOD_ASSESSMENT:
assessment_copy = copy.copy(STAFF_GOOD_ASSESSMENT)
del assessment_copy[key]
resp = self.request(xblock, 'staff_assess', json.dumps(assessment_copy), response_format='json')
self.assertFalse(resp['success'])
......@@ -215,16 +193,16 @@ class TestStaffAssessment(StaffAssessmentTestBase):
submission = xblock.create_submission(student_item, self.SUBMISSION)
self.set_staff_access(xblock)
self.ASSESSMENT['submission_uuid'] = submission['uuid']
STAFF_GOOD_ASSESSMENT['submission_uuid'] = submission['uuid']
with mock.patch('openassessment.xblock.staff_assessment_mixin.staff_api') as mock_api:
# Simulate a error
mock_api.create_assessment.side_effect = staff_api.StaffAssessmentRequestError
resp = self.request(xblock, 'staff_assess', json.dumps(self.ASSESSMENT), response_format='json')
resp = self.request(xblock, 'staff_assess', json.dumps(STAFF_GOOD_ASSESSMENT), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('msg', resp)
# Simulate a different error
mock_api.create_assessment.side_effect = staff_api.StaffAssessmentInternalError
resp = self.request(xblock, 'staff_assess', json.dumps(self.ASSESSMENT), response_format='json')
resp = self.request(xblock, 'staff_assess', json.dumps(STAFF_GOOD_ASSESSMENT), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('msg', resp)
......@@ -780,6 +780,11 @@ class FullWorkflowTest(OpenAssessmentTest):
)
self.staff_area_page.verify_learner_final_score(self.PEER_ASSESSMENT_STAFF_AREA_SCORE)
self.verify_grade_entries([
[(u"PEER MEDIAN GRADE - 0 POINTS", u"Poor"), (u"PEER MEDIAN GRADE - 0 POINTS", u"Poor")],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")]
])
# Now do a staff override, changing the score (to 1).
self.do_staff_override(learner)
......@@ -791,6 +796,13 @@ class FullWorkflowTest(OpenAssessmentTest):
)
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.STAFF_OVERRIDE_SCORE))
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u"PEER MEDIAN GRADE", u"Poor"), (u"PEER MEDIAN GRADE", u"Poor")],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")]
])
@retry()
@attr('acceptance')
def test_staff_override_at_beginning(self):
......@@ -848,6 +860,11 @@ class FullWorkflowTest(OpenAssessmentTest):
self.verify_staff_area_fields(learner, [], self.STAFF_AREA_SUBMITTED, self.STAFF_AREA_SELF_ASSESSMENT)
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.STAFF_OVERRIDE_SCORE))
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")]
])
if __name__ == "__main__":
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment