Commit ee0c25a9 by cahrens Committed by Andy Armstrong

Show Staff Grade section to the learner.

TNL-3464
parent 50b39be9
{% load i18n %}
{% spaceless %}
<li id="openassessment__grade" class="openassessment__steps__step step--grade is--complete">
<header class="step__header ui-toggle-visibility__control">
<h2 class="step__title">
<span class="wrapper--copy">
<span class="step__label">{% trans "Your Grade" %}: </span>
<span class="grade__value">
<span class="grade__value__title">{% trans "Incomplete" %}</span>
<p class="grade__value__description">{% trans "You have not completed all the steps of this problem." %}</p>
</span>
</span>
</h2>
</header>
</li>
{% endspaceless %}
......@@ -22,7 +22,7 @@
<h2 class="step__title">
<span class="step__counter"></span>
<span class="wrapper--copy">
<span class="step__label">{{ assessment.title }}</span>
<span class="step__label">{% trans assessment.title %}</span>
</span>
</h2>
......
{% load i18n %}
<li id="openassessment__staff-assessment" class="openassessment__steps__step step--staff-assessment {{ step_classes }}">
<header class="step__header">
<h2 class="step__title">
<span class="step__counter"></span>
<span class="wrapper--copy">
<span class="step__label">{% trans "Staff Grade" %} </span>
</span>
</h2>
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
{% if icon_class %}
<i class="icon fa {{ icon_class }}" aria-hidden="true"></i>
{% endif %}
<span class="copy">{{ status_value }}</span>
</span>
</span>
</header>
{% if message_title %}
<div class="wrapper--step__content">
<div class="step__message message message--incomplete">
<h3 class="message__title">{{ message_title }}</h3>
<div class="message__content">
<p>{{ message_content }}</p>
</div>
</div>
</div>
{% endif %}
</li>
......@@ -342,11 +342,9 @@ def _get_workflow_model(submission_uuid):
raise AssessmentWorkflowRequestError("submission_uuid must be a string type")
try:
workflow = AssessmentWorkflow.objects.get(submission_uuid=submission_uuid)
except AssessmentWorkflow.DoesNotExist:
raise AssessmentWorkflowNotFoundError(
u"No assessment workflow matching submission_uuid {}".format(submission_uuid)
)
workflow = AssessmentWorkflow.get_by_submission_uuid(submission_uuid)
except AssessmentWorkflowError as exc:
raise AssessmentWorkflowInternalError(repr(exc))
except Exception as exc:
# Something very unexpected has just happened (like DB misconfig)
err_msg = (
......@@ -356,6 +354,11 @@ def _get_workflow_model(submission_uuid):
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
if workflow is None:
raise AssessmentWorkflowNotFoundError(
u"No assessment workflow matching submission_uuid {}".format(submission_uuid)
)
return workflow
......
......@@ -574,10 +574,10 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
return cls.objects.get(submission_uuid=submission_uuid)
except cls.DoesNotExist:
return None
except DatabaseError:
error_message = u"Error finding workflow for submission UUID {}.".format(submission_uuid)
logger.exception(error_message)
raise AssessmentWorkflowError(error_message)
except DatabaseError as exc:
message = u"Error finding workflow for submission UUID {} due to error: {}.".format(submission_uuid, exc)
logger.exception(message)
raise AssessmentWorkflowError(message)
@property
def is_cancelled(self):
......
......@@ -46,43 +46,36 @@ UI_MODELS = {
"submission": {
"name": "submission",
"class_id": "openassessment__response",
"navigation_text": "Your response to this assignment",
"title": "Your Response"
},
"student-training": {
"name": "student-training",
"class_id": "openassessment__student-training",
"navigation_text": "Learn to assess responses",
"title": "Learn to Assess"
},
"peer-assessment": {
"name": "peer-assessment",
"class_id": "openassessment__peer-assessment",
"navigation_text": "Your assessment(s) of peer responses",
"title": "Assess Peers' Responses"
},
"self-assessment": {
"name": "self-assessment",
"class_id": "openassessment__self-assessment",
"navigation_text": "Your assessment of your response",
"title": "Assess Your Response"
},
"staff-assessment": {
"name": "staff-assessment",
"class_id": "openassessment__staff-assessment",
"navigation_text": "Staff assessment of your response",
"title": "Staff Assessment"
"title": "Staff Grade"
},
"grade": {
"name": "grade",
"class_id": "openassessment__grade",
"navigation_text": "Your grade for this assignment",
"title": "Your Grade:"
},
"leaderboard": {
"name": "leaderboard",
"class_id": "openassessment__leaderboard",
"navigation_text": "A leaderboard of the top submissions",
"title": "Leaderboard"
}
}
......@@ -455,15 +448,20 @@ class OpenAssessmentBlock(
"""
ui_models = [UI_MODELS["submission"]]
staff_assessment_required = False
for assessment in self.valid_assessments:
if assessment["name"] == "staff-assessment" and assessment["required"] == False:
# If we don't have a staff grade, and it's not required, hide
# this UI model.
if not self.staff_assessment_exists(self.submission_uuid):
if assessment["name"] == "staff-assessment":
if not assessment["required"]:
continue
else:
staff_assessment_required = True
ui_model = UI_MODELS.get(assessment["name"])
if ui_model:
ui_models.append(dict(assessment, **ui_model))
if not staff_assessment_required and self.staff_assessment_exists(self.submission_uuid):
ui_models.append(UI_MODELS["staff-assessment"])
ui_models.append(UI_MODELS["grade"])
if self.leaderboard_show > 0:
......
......@@ -13,7 +13,7 @@ from openassessment.assessment.errors import (
)
from .data_conversion import create_rubric_dict
from .data_conversion import clean_criterion_feedback, create_submission_dict, verify_assessment_parameters
from .data_conversion import clean_criterion_feedback, verify_assessment_parameters
logger = logging.getLogger(__name__)
......@@ -69,3 +69,64 @@ class StaffAssessmentMixin(object):
return {'success': False, 'msg': msg}
else:
return {'success': True, 'msg': u""}
@XBlock.handler
def render_staff_assessment(self, data, suffix=''):
"""
Renders the Staff Assessment HTML section of the XBlock
Generates the staff assessment HTML for the Open
Assessment XBlock. See OpenAssessmentBlock.render_assessment() for
more information on rendering XBlock sections.
Args:
data (dict):
"""
path, context_dict = self.staff_path_and_context()
return self.render_assessment(path, context_dict)
def staff_path_and_context(self):
"""
Retrieve the correct template path and template context for the handler to render.
"""
workflow = self.get_workflow_info()
status = workflow.get('status')
path = 'openassessmentblock/staff/oa_staff_grade.html'
not_available_context = {
'status_value': self._('Not Available'),
'step_classes': 'is--unavailable is--empty is--collapsed',
}
if status == 'cancelled':
context = {
'status_value': self._('Cancelled'),
'icon_class': 'fa-exclamation-triangle',
}
elif status == 'done': # Staff grade exists and all steps completed.
context = {
'status_value': self._('Complete'),
'icon_class': 'fa-check',
'step_classes': 'is--complete is--empty is--collapsed',
}
elif status == 'waiting':
# If we are in the 'waiting' workflow, this means that a staff grade cannot exist
# (because if a staff grade did exist, we would be in 'done' regardless of whether other
# peers have assessed). Therefore we show that we are waiting on staff to provide a grade.
context = {
'status_value': self._('Not Available'),
'message_title': self._('Waiting for a Staff Grade'),
'message_content': self._('Check back later to see if a course staff member has assessed your response. You will receive your grade after the assessment is complete.'),
}
elif status is None: # not started
context = not_available_context
else: # status is 'self' or 'peer', indicating that the student still has work to do.
if self.staff_assessment_exists(self.submission_uuid):
context = {
'status_value': self._('Complete'),
'icon_class': 'fa-check',
'message_title': self._('You Must Complete the Above Steps to View Your Grade'),
'message_content': self._('Although a course staff member has assessed your response, you will receive your grade only after you have completed all the required steps of this problem.'),
}
else: # Both student and staff still have work to do, just show "Not Available".
context = not_available_context
return path, context
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -64,6 +64,7 @@ describe("OpenAssessment.BaseView", function() {
expect(server.fragmentsLoaded).toContain("student_training");
expect(server.fragmentsLoaded).toContain("self_assessment");
expect(server.fragmentsLoaded).toContain("peer_assessment");
expect(server.fragmentsLoaded).toContain("staff_assessment");
expect(server.fragmentsLoaded).toContain("grade");
});
......@@ -72,7 +73,7 @@ describe("OpenAssessment.BaseView", function() {
view.peerView.peerAssess();
var numPeerLoads = 0;
for (var i = 0; i < server.fragmentsLoaded.length; i++) {
if (server.fragmentsLoaded[i] == 'peer_assessment') {
if (server.fragmentsLoaded[i] === 'peer_assessment') {
numPeerLoads++;
}
}
......
......@@ -20,6 +20,7 @@ OpenAssessment.BaseView = function(runtime, element, server, data) {
this.trainingView = new OpenAssessment.StudentTrainingView(this.element, this.server, this);
this.selfView = new OpenAssessment.SelfView(this.element, this.server, this);
this.peerView = new OpenAssessment.PeerView(this.element, this.server, this);
this.staffView = new OpenAssessment.StaffView(this.element, this.server, this);
this.gradeView = new OpenAssessment.GradeView(this.element, this.server, this);
this.leaderboardView = new OpenAssessment.LeaderboardView(this.element, this.server, this);
this.messageView = new OpenAssessment.MessageView(this.element, this.server, this);
......@@ -71,6 +72,7 @@ OpenAssessment.BaseView.prototype = {
loadAssessmentModules: function() {
this.trainingView.load();
this.peerView.load();
this.staffView.load();
this.selfView.load();
this.gradeView.load();
this.leaderboardView.load();
......
/**
* Interface for staff assessment view.
*
* @param {Element} element - The DOM element representing the XBlock.
* @param {OpenAssessment.Server} server - The interface to the XBlock server.
* @param {OpenAssessment.BaseView} baseView - Container view.
*/
OpenAssessment.StaffView = function(element, server, baseView) {
this.element = element;
this.server = server;
this.baseView = baseView;
};
OpenAssessment.StaffView.prototype = {
/**
* Load the staff assessment view.
**/
load: function() {
var view = this;
this.server.render('staff_assessment').done(
function(html) {
$('#openassessment__staff-assessment', view.element).replaceWith(html);
}
).fail(function() {
view.baseView.showLoadError('staff-assessment');
});
}
};
......@@ -15,12 +15,11 @@ from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from .base import XBlockHandlerTestCase, scenario
@ddt.ddt
class TestGrade(XBlockHandlerTestCase):
class SubmitAssessmentsMixin(object):
"""
View-level tests for the XBlock grade handlers.
A mixin for creating a submission and peer/self assessments so that the user can
receive a grade. This is useful for getting into the "waiting for peer assessment" state.
"""
PEERS = ['McNulty', 'Moreland']
ASSESSMENTS = [
......@@ -45,6 +44,88 @@ class TestGrade(XBlockHandlerTestCase):
STEPS = ['peer', 'self']
def _create_submission_and_assessments(
self, xblock, submission_text, peers, peer_assessments, self_assessment,
waiting_for_peer=False,
):
"""
Create a submission and peer/self assessments, so that the user can receive a grade.
Args:
xblock (OpenAssessmentBlock): The XBlock, loaded for the user who needs a grade.
submission_text (unicode): Text of the submission from the user.
peers (list of unicode): List of user IDs of peers who will assess the user.
peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
self_assessment (dict): Dict of assessment for self-assessment.
Keyword Arguments:
waiting_for_peer (bool): If true, skip creation of peer assessments for the user's submission.
Returns:
the submission
"""
# Create a submission from the user
student_item = xblock.get_student_item_dict()
student_id = student_item['student_id']
submission = xblock.create_submission(student_item, submission_text)
# Create submissions and assessments from other users
scorer_submissions = []
for scorer_name, assessment in zip(peers, peer_assessments):
# Create a submission for each scorer for the same problem
scorer = copy.deepcopy(student_item)
scorer['student_id'] = scorer_name
scorer_sub = sub_api.create_submission(scorer, {'text': submission_text})
workflow_api.create_workflow(scorer_sub['uuid'], self.STEPS)
submission = peer_api.get_submission_to_assess(scorer_sub['uuid'], len(peers))
# Store the scorer's submission so our user can assess it later
scorer_submissions.append(scorer_sub)
# Create an assessment of the user's submission
if not waiting_for_peer:
peer_api.create_assessment(
scorer_sub['uuid'], scorer_name,
assessment['options_selected'],
assessment['criterion_feedback'],
assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
)
# Have our user make assessments (so she can get a score)
for assessment in peer_assessments:
peer_api.get_submission_to_assess(submission['uuid'], len(peers))
peer_api.create_assessment(
submission['uuid'],
student_id,
assessment['options_selected'],
assessment['criterion_feedback'],
assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
)
# Have the user submit a self-assessment (so she can get a score)
if self_assessment is not None:
self_api.create_assessment(
submission['uuid'], student_id, self_assessment['options_selected'],
self_assessment['criterion_feedback'], self_assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria}
)
return submission
@ddt.ddt
class TestGrade(XBlockHandlerTestCase, SubmitAssessmentsMixin):
"""
View-level tests for the XBlock grade handlers.
"""
AI_ALGORITHMS = {
'fake': 'openassessment.assessment.worker.algorithm.FakeAIAlgorithm'
}
......@@ -367,77 +448,3 @@ class TestGrade(XBlockHandlerTestCase):
self.assertEqual(part['criterion']['label'], expected_criterion_label)
expected_option_label = option_labels[(part['criterion']['name'], part['option']['name'])]
self.assertEqual(part['option']['label'], expected_option_label)
def _create_submission_and_assessments(
self, xblock, submission_text, peers, peer_assessments, self_assessment,
waiting_for_peer=False,
):
"""
Create a submission and peer/self assessments, so that the user can receive a grade.
Args:
xblock (OpenAssessmentBlock): The XBlock, loaded for the user who needs a grade.
submission_text (unicode): Text of the submission from the user.
peers (list of unicode): List of user IDs of peers who will assess the user.
peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
self_assessment (dict): Dict of assessment for self-assessment.
Keyword Arguments:
waiting_for_peer (bool): If true, skip creation of peer assessments for the user's submission.
Returns:
None
"""
# Create a submission from the user
student_item = xblock.get_student_item_dict()
student_id = student_item['student_id']
submission = xblock.create_submission(student_item, submission_text)
# Create submissions and assessments from other users
scorer_submissions = []
for scorer_name, assessment in zip(peers, peer_assessments):
# Create a submission for each scorer for the same problem
scorer = copy.deepcopy(student_item)
scorer['student_id'] = scorer_name
scorer_sub = sub_api.create_submission(scorer, {'text': submission_text})
workflow_api.create_workflow(scorer_sub['uuid'], self.STEPS)
submission = peer_api.get_submission_to_assess(scorer_sub['uuid'], len(peers))
# Store the scorer's submission so our user can assess it later
scorer_submissions.append(scorer_sub)
# Create an assessment of the user's submission
if not waiting_for_peer:
peer_api.create_assessment(
scorer_sub['uuid'], scorer_name,
assessment['options_selected'],
assessment['criterion_feedback'],
assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
)
# Have our user make assessments (so she can get a score)
for asmnt in peer_assessments:
peer_api.get_submission_to_assess(submission['uuid'], len(peers))
peer_api.create_assessment(
submission['uuid'],
student_id,
asmnt['options_selected'],
asmnt['criterion_feedback'],
asmnt['overall_feedback'],
{'criteria': xblock.rubric_criteria},
xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
)
# Have the user submit a self-assessment (so she can get a score)
if self_assessment is not None:
self_api.create_assessment(
submission['uuid'], student_id, self_assessment['options_selected'],
self_assessment['criterion_feedback'], self_assessment['overall_feedback'],
{'criteria': xblock.rubric_criteria}
)
......@@ -43,6 +43,11 @@ class TestOpenAssessment(XBlockHandlerTestCase):
self.assertIsNotNone(self_response)
self.assertTrue(self_response.body.find("openassessment__peer-assessment"))
# Validate Staff Grade.
staff_response = xblock.render_staff_assessment(request)
self.assertIsNotNone(self_response)
self.assertTrue(staff_response.body.find("openassessment__staff-assessment"))
# Validate Grading.
grade_response = xblock.render_grade({})
self.assertIsNotNone(grade_response)
......
......@@ -6,9 +6,8 @@ import json
import mock
import copy
from openassessment.assessment.api import staff as staff_api
from openassessment.xblock.data_conversion import create_rubric_dict
from .base import XBlockHandlerTestCase, scenario
from .test_grade import SubmitAssessmentsMixin
class StaffAssessmentTestBase(XBlockHandlerTestCase):
maxDiff = None
......@@ -25,6 +24,126 @@ class StaffAssessmentTestBase(XBlockHandlerTestCase):
xblock.xmodule_runtime = mock.Mock(user_is_staff=True)
xblock.xmodule_runtime.anonymous_student_id = 'Bob'
def _assert_path_and_context(self, xblock, expected_context):
path, context = xblock.staff_path_and_context()
self.assertEqual('openassessmentblock/staff/oa_staff_grade.html', path)
self.assertItemsEqual(expected_context, context)
# Verify that we render without error
resp = self.request(xblock, 'render_staff_assessment', json.dumps({}))
self.assertGreater(len(resp), 0)
@staticmethod
def _set_mock_workflow_info(xblock, workflow_status, status_details, submission_uuid):
xblock.get_workflow_info = mock.Mock(return_value={
'status': workflow_status,
'status_details': status_details,
'submission_uuid': submission_uuid
})
def _submit_staff_assessment(self, xblock, submission):
# Submit a staff-assessment
self.set_staff_access(xblock)
self.ASSESSMENT['submission_uuid'] = submission['uuid']
resp = self.request(xblock, 'staff_assess', json.dumps(self.ASSESSMENT), response_format='json')
self.assertTrue(resp['success'])
class TestStaffAssessmentRender(StaffAssessmentTestBase, SubmitAssessmentsMixin):
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_staff_grade_templates(self, xblock):
self._verify_grade_templates_workflow(xblock)
@scenario('data/self_assessment_closed.xml', user_id='Bob')
def test_staff_grade_templates_closed(self, xblock):
# Whether or not a problem is closed (past due date) has no impact on Staff Grade section.
self._verify_grade_templates_workflow(xblock)
def _verify_grade_templates_workflow(self, xblock):
unavailable_context = {
'status_value': 'Not Available',
'step_classes': 'is--unavailable is--empty is--collapsed',
}
# Problem not yet started, Staff Grade section is marked "Not Available"
self._assert_path_and_context(xblock, unavailable_context)
# Create a submission for the student
submission = xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
# Response has been created, waiting for self assessment (no staff assessment exists either)
self._assert_path_and_context(xblock, unavailable_context)
# Submit a staff-assessment
self._submit_staff_assessment(xblock, submission)
# Staff assessment exists, still waiting for self assessment.
self._assert_path_and_context(
xblock,
{
'status_value': 'Complete',
'icon_class': 'fa-check',
'message_title': 'You Must Complete the Above Steps to View Your Grade',
'message_content': 'Although a course staff member has assessed your response, you will receive your grade only after you have completed all the required steps of this problem.'
}
)
# Verify that once the required step (self assessment) is done, the staff grade is shown as complete.
status_details = {'peer': {'complete': True}}
self._set_mock_workflow_info(
xblock, workflow_status='done', status_details=status_details, submission_uuid=submission['uuid']
)
self._assert_path_and_context(
xblock,
{
'status_value': 'Complete',
'icon_class': 'fa-check',
'step_classes': 'is--complete is--empty is--collapsed',
}
)
# Verify that if the problem is cancelled, the staff grade reflects this.
self._set_mock_workflow_info(
xblock, workflow_status='cancelled', status_details=status_details, submission_uuid=submission['uuid']
)
self._assert_path_and_context(
xblock,
{
'status_value': 'Cancelled',
'icon_class': 'fa-exclamation-triangle',
}
)
@scenario('data/grade_waiting_scenario.xml', user_id='Omar')
def test_staff_grade_templates_no_peer(self, xblock):
# Waiting to be assessed by a peer
submission = self._create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, self.ASSESSMENTS, self.ASSESSMENTS[0], waiting_for_peer=True
)
# Waiting for a peer assessment (though it is not used because staff grading is required),
# no staff grade exists.
self._assert_path_and_context(
xblock,
{
'status_value': 'Not Available',
'message_title': 'Waiting for a Staff Grade',
'message_content': 'Check back later to see if a course staff member has assessed your response. You will receive your grade after the assessment is complete.',
}
)
# Submit a staff-assessment. The student can now see the score even though no peer assessments have been done.
self._submit_staff_assessment(xblock, submission)
self._assert_path_and_context(
xblock,
{
'status_value': 'Complete',
'icon_class': 'fa-check',
'step_classes': 'is--complete is--empty is--collapsed',
}
)
class TestStaffAssessment(StaffAssessmentTestBase):
......@@ -36,10 +155,7 @@ class TestStaffAssessment(StaffAssessmentTestBase):
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Submit a staff-assessment
self.set_staff_access(xblock)
self.ASSESSMENT['submission_uuid'] = submission['uuid']
resp = self.request(xblock, 'staff_assess', json.dumps(self.ASSESSMENT), response_format='json')
self.assertTrue(resp['success'])
self._submit_staff_assessment(xblock, submission)
# Expect that a staff-assessment was created
assessment = staff_api.get_latest_staff_assessment(submission['uuid'])
......
......@@ -114,6 +114,31 @@ class StaffAreaA11yTest(OpenAssessmentA11yTest):
self._check_a11y(self.staff_area_page)
def test_staff_grade(self):
"""
Check the accessibility of the Staff Grade section, as shown to the learner.
"""
self.auto_auth_page.visit()
username = self.auto_auth_page.get_username()
self.submission_page.visit().submit_response(self.SUBMISSION)
self.assertTrue(self.submission_page.has_submitted)
# Submit a staff override
self.staff_area_page.visit()
self.staff_area_page.show_learner(username)
self.staff_area_page.expand_learner_report_sections()
self.staff_area_page.assess("staff", self.STAFF_OVERRIDE_OPTIONS_SELECTED)
# Refresh the page, and learner completes a self-assessment.
# Then verify accessibility of the Staff Grade section (marked Complete).
self.browser.refresh()
self.self_asmnt_page.wait_for_page().wait_for_response()
self.self_asmnt_page.assess("self", self.OPTIONS_SELECTED).wait_for_complete()
self.assertTrue(self.self_asmnt_page.is_complete)
self._verify_staff_grade_section("COMPLETE", None)
self._check_a11y(self.staff_asmnt_page)
if __name__ == "__main__":
......
......@@ -204,7 +204,7 @@ class AssessmentPage(OpenAssessmentPage, AssessmentMixin):
Page object representing an "assessment" step in an ORA problem.
"""
ASSESSMENT_TYPES = ['self-assessment', 'peer-assessment', 'student-training']
ASSESSMENT_TYPES = ['self-assessment', 'peer-assessment', 'student-training', 'staff-assessment']
def __init__(self, assessment_type, *args):
"""
......@@ -223,6 +223,13 @@ class AssessmentPage(OpenAssessmentPage, AssessmentMixin):
raise PageConfigurationError(msg)
self._assessment_type = assessment_type
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this Assignment Page.
"""
return '#openassessment__{assessment_type} {selector}'.format(
assessment_type=self._assessment_type, selector=selector)
def is_browser_on_page(self):
css_id = "#openassessment__{assessment_type}".format(
assessment_type=self._assessment_type
......@@ -337,6 +344,48 @@ class AssessmentPage(OpenAssessmentPage, AssessmentMixin):
candidates = [int(x) for x in self.q(css=".step__status__value--completed").text]
return candidates[0] if len(candidates) > 0 else None
@property
def label(self):
"""
Returns the label of this assessment step.
Returns:
string
"""
return self.q(css=self._bounded_selector(".step__label")).text[0]
@property
def status_value(self):
"""
Returns the status value (ie., "COMPLETE", "CANCELLED", etc.) of this assessment step.
Returns:
string
"""
return self.q(css=self._bounded_selector(".step__status__value")).text[0]
@property
def message_title(self):
"""
Returns the message title, if present, of this assesment step.
Returns:
string is message title is present, else None
"""
message_title = self.q(css=self._bounded_selector(".message__title"))
if len(message_title) == 0:
return None
return message_title.text[0]
def verify_status_value(self, expected_value):
"""
Waits until the expected status value appears. If it does not appear, fails the test.
"""
EmptyPromise(
lambda: self.status_value == expected_value,
"Expected status value present"
).fulfill()
class GradePage(OpenAssessmentPage):
"""
......
......@@ -68,6 +68,7 @@ class OpenAssessmentTest(WebAppTest):
SUBMISSION = u"This is a test submission."
LATEX_SUBMISSION = u"[mathjaxinline]( \int_{0}^{1}xdx \)[/mathjaxinline]"
OPTIONS_SELECTED = [1, 2]
STAFF_OVERRIDE_OPTIONS_SELECTED = [0, 1]
EXPECTED_SCORE = 6
def setUp(self, problem_type, staff=False):
......@@ -88,6 +89,7 @@ class OpenAssessmentTest(WebAppTest):
self.self_asmnt_page = AssessmentPage('self-assessment', self.browser, self.problem_loc)
self.peer_asmnt_page = AssessmentPage('peer-assessment', self.browser, self.problem_loc)
self.student_training_page = AssessmentPage('student-training', self.browser, self.problem_loc)
self.staff_asmnt_page = AssessmentPage('staff-assessment', self.browser, self.problem_loc)
self.grade_page = GradePage(self.browser, self.problem_loc)
def do_self_assessment(self):
......@@ -111,6 +113,12 @@ class OpenAssessmentTest(WebAppTest):
return username
def _verify_staff_grade_section(self, expected_status, expected_message_title):
self.staff_asmnt_page.wait_for_page()
self.assertEqual("Staff Grade", self.staff_asmnt_page.label)
self.staff_asmnt_page.verify_status_value(expected_status)
self.assertEqual(expected_message_title, self.staff_asmnt_page.message_title)
class SelfAssessmentTest(OpenAssessmentTest):
"""
......@@ -179,6 +187,69 @@ class PeerAssessmentTest(OpenAssessmentTest):
self.fail("Did not complete at least one peer assessment.")
class PeerAssessmentTestStaffOverride(OpenAssessmentTest):
"""
Test setting a staff override on a problem which requires peer assessment.
"""
def setUp(self):
super(PeerAssessmentTestStaffOverride, self).setUp('peer_only', staff=True)
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
@retry()
@attr('acceptance')
def test_staff_override(self):
"""
Scenario: staff can override a learner's grade
Given I am viewing a new peer assessment problem as a learner
And if I create a response to the problem
Then there is no Staff Grade section present
And if a staff member creates a grade override
Then when I refresh the page, I see that a staff override exists
And the message says that I must complete my steps to view the grade
And if I submit required peer assessments
Then the Staff Grade section is marked complete with no message
And I can see my final grade, even though no peers have assessed me
"""
# Create two students with a submission each so that there are 2 submissions to assess.
for _ in range(0, 2):
self.auto_auth_page.visit()
self.submission_page.visit().submit_response(self.SUBMISSION)
# Create a submission for the third student (used for the remainder of the test).
self.auto_auth_page.visit()
username = self.auto_auth_page.get_username()
self.submission_page.visit().submit_response(self.SUBMISSION)
# Staff Grade field should not be visible yet.
self.assertFalse(self.staff_asmnt_page.is_browser_on_page())
# Submit a staff override.
self.staff_area_page.visit()
self.staff_area_page.show_learner(username)
self.staff_area_page.expand_learner_report_sections()
self.staff_area_page.assess("staff", self.STAFF_OVERRIDE_OPTIONS_SELECTED)
# Refresh the page so the learner sees the Staff Grade section.
self.browser.refresh()
self._verify_staff_grade_section("COMPLETE", "YOU MUST COMPLETE THE ABOVE STEPS TO VIEW YOUR GRADE")
# Verify no final grade yet.
self.assertIsNone(self.grade_page.wait_for_page().score)
# Assess two submissions
for count_assessed in range(1, 3):
self.peer_asmnt_page.wait_for_page().wait_for_response().assess("peer", self.OPTIONS_SELECTED)
self.peer_asmnt_page.wait_for_num_completed(count_assessed)
# Staff grade section is now marked complete, even though no students have submitted
# assessments for this particular student (no longer required since staff grade exists).
self._verify_staff_grade_section("COMPLETE", None)
# Verify the staff override grade
self.assertEqual(self.grade_page.wait_for_page().score, 1)
class StudentTrainingTest(OpenAssessmentTest):
"""
Test student training (the "learning to assess" step).
......@@ -315,7 +386,7 @@ class StaffAreaTest(OpenAssessmentTest):
# Click on staff tools and search for user
self.staff_area_page.show_learner(username)
self.assertEqual(
[u'Learner Response', u"Learner's Self Assessment", u"Learner's Final Grade",
[u"Learner's Response", u"Learner's Self Assessment", u"Learner's Final Grade",
u"Submit Assessment Grade Override", u"Remove Submission From Peer Grading"],
self.staff_area_page.learner_report_sections
)
......@@ -365,7 +436,7 @@ class StaffAreaTest(OpenAssessmentTest):
self.staff_area_page.verify_learner_final_score("Final grade: 6 out of 8")
# Do staff override and wait for final score to change.
self.staff_area_page.assess("staff", [0, 1])
self.staff_area_page.assess("staff", self.STAFF_OVERRIDE_OPTIONS_SELECTED)
# Verify that the new student score is different from the original one.
# Unfortunately there is no indication presently that this was a staff override.
......@@ -405,7 +476,7 @@ class StaffAreaTest(OpenAssessmentTest):
# Verify that the staff override and submission removal sections are now gone.
self.assertEqual(
[u'Learner Response', u"Learner's Self Assessment", u"Learner's Final Grade"],
[u"Learner's Response", u"Learner's Self Assessment", u"Learner's Final Grade"],
self.staff_area_page.learner_report_sections
)
......@@ -413,6 +484,86 @@ class StaffAreaTest(OpenAssessmentTest):
self.staff_area_page.expand_learner_report_sections()
self.assertIn("Learner submission removed", self.staff_area_page.learner_response)
@retry()
@attr('acceptance')
def test_staff_grade_override(self):
"""
Scenario: the staff grade section displays correctly
Given I am viewing a new self assessment problem as a learner
Then there is no Staff Grade section present
And if I create a response to the problem
Then there is no Staff Grade section present
And if a staff member creates a grade override
Then when I refresh the page, I see that a staff override exists
And the message says that I must complete my steps to view the grade
And if I submit my self-assessment
Then the Staff Grade section is marked complete with no message
And I can see my final grade
"""
# View the problem-- no Staff Grade area.
self.auto_auth_page.visit()
username = self.auto_auth_page.get_username()
self.submission_page.visit()
self.assertFalse(self.staff_asmnt_page.is_browser_on_page())
self.submission_page.submit_response(self.SUBMISSION)
self.assertTrue(self.submission_page.has_submitted)
self.assertFalse(self.staff_asmnt_page.is_browser_on_page())
# Submit a staff override
self.staff_area_page.visit()
self.staff_area_page.show_learner(username)
self.staff_area_page.expand_learner_report_sections()
self.staff_area_page.assess("staff", self.STAFF_OVERRIDE_OPTIONS_SELECTED)
# Refresh the page so the learner sees the Staff Grade section.
self.browser.refresh()
self._verify_staff_grade_section("COMPLETE", "YOU MUST COMPLETE THE ABOVE STEPS TO VIEW YOUR GRADE")
# Verify no final grade yet.
self.assertIsNone(self.grade_page.wait_for_page().score)
# Learner does required self-assessment
self.self_asmnt_page.wait_for_page().wait_for_response()
self.assertIn(self.SUBMISSION, self.self_asmnt_page.response_text)
self.self_asmnt_page.assess("self", self.OPTIONS_SELECTED).wait_for_complete()
self.assertTrue(self.self_asmnt_page.is_complete)
self._verify_staff_grade_section("COMPLETE", None)
# Verify the staff override grade
self.assertEqual(self.grade_page.wait_for_page().score, 1)
@retry()
@attr('acceptance')
def test_staff_grade_override_cancelled(self):
"""
Scenario: the staff grade section displays cancelled when the submission is cancelled
Given I have created a response and a self-assessment
And a staff member creates a grade override and then cancels my submission
Then when I refresh the page, the Staff Grade section is marked cancelled
And I have no final grade
"""
username = self.do_self_assessment()
# Submit a staff override
self.staff_area_page.visit()
self.staff_area_page.show_learner(username)
self.staff_area_page.expand_learner_report_sections()
# Do staff override.
self.staff_area_page.assess("staff", self.STAFF_OVERRIDE_OPTIONS_SELECTED)
# And cancel the submission
self.staff_area_page.expand_learner_report_sections()
self.staff_area_page.cancel_submission()
# Refresh the page so the learner sees the Staff Grade section shows the submission has been cancelled.
self.browser.refresh()
self._verify_staff_grade_section("CANCELLED", None)
self.assertIsNone(self.grade_page.wait_for_page().score)
class FileUploadTest(OpenAssessmentTest):
"""
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment