Commit 1d4370e5 by Eric Fischer Committed by Andy Armstrong

Fix up issues with peer scores

The crux of this fix is that scored_only behavior in various api/peer.py
methods is no longer needed, or correct. This value has been removed, so
that assessments may be gotten (and medians found) when the assessments
are not used in determining the final score (say, if a staff score exists).

Also includes updates to the "waiting for steps" string as prescribed by
@catong in https://github.com/edx/edx-ora2/pull/764, along with logic for
that method, and updates to tests and get_wating_details logic in light
of this simplification.
parent db29b35f
...@@ -483,7 +483,7 @@ def has_finished_required_evaluating(submission_uuid, required_assessments): ...@@ -483,7 +483,7 @@ def has_finished_required_evaluating(submission_uuid, required_assessments):
return done, peers_graded return done, peers_graded
def get_assessments(submission_uuid, scored_only=True, limit=None): def get_assessments(submission_uuid, limit=None):
"""Retrieve the assessments for a submission. """Retrieve the assessments for a submission.
Retrieves all the assessments for a submissions. This API returns related Retrieves all the assessments for a submissions. This API returns related
...@@ -495,9 +495,6 @@ def get_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -495,9 +495,6 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
associated with. Required. associated with. Required.
Keyword Arguments: Keyword Arguments:
scored (boolean): Only retrieve the assessments used to generate a score
for this submission.
limit (int): Limit the returned assessments. If None, returns all. limit (int): Limit the returned assessments. If None, returns all.
...@@ -513,7 +510,7 @@ def get_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -513,7 +510,7 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
while retrieving the assessments associated with this submission. while retrieving the assessments associated with this submission.
Examples: Examples:
>>> get_assessments("1", scored_only=True, limit=2) >>> get_assessments("1", limit=2)
[ [
{ {
'points_earned': 6, 'points_earned': 6,
...@@ -533,11 +530,6 @@ def get_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -533,11 +530,6 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
""" """
try: try:
if scored_only:
assessments = PeerWorkflowItem.get_scored_assessments(
submission_uuid
)[:limit]
else:
assessments = Assessment.objects.filter( assessments = Assessment.objects.filter(
submission_uuid=submission_uuid, submission_uuid=submission_uuid,
score_type=PEER_TYPE score_type=PEER_TYPE
...@@ -551,7 +543,7 @@ def get_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -551,7 +543,7 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
def get_submitted_assessments(submission_uuid, scored_only=True, limit=None): def get_submitted_assessments(submission_uuid, limit=None):
"""Retrieve the assessments created by the given submission's author. """Retrieve the assessments created by the given submission's author.
Retrieves all the assessments created by the given submission's author. This Retrieves all the assessments created by the given submission's author. This
...@@ -564,8 +556,6 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -564,8 +556,6 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
we are requesting. Required. we are requesting. Required.
Keyword Arguments: Keyword Arguments:
scored (boolean): Only retrieve the assessments used to generate a score
for this submission.
limit (int): Limit the returned assessments. If None, returns all. limit (int): Limit the returned assessments. If None, returns all.
Returns: Returns:
...@@ -581,7 +571,7 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -581,7 +571,7 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
while retrieving the assessments associated with this submission. while retrieving the assessments associated with this submission.
Examples: Examples:
>>> get_submitted_assessments("1", scored_only=True, limit=2) >>> get_submitted_assessments("1", limit=2)
[ [
{ {
'points_earned': 6, 'points_earned': 6,
...@@ -608,8 +598,6 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None): ...@@ -608,8 +598,6 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
scorer=workflow, scorer=workflow,
assessment__isnull=False assessment__isnull=False
) )
if scored_only:
items = items.exclude(scored=False)
assessments = Assessment.objects.filter( assessments = Assessment.objects.filter(
pk__in=[item.assessment.pk for item in items])[:limit] pk__in=[item.assessment.pk for item in items])[:limit]
return serialize_assessments(assessments) return serialize_assessments(assessments)
......
...@@ -104,7 +104,7 @@ ASSESSMENT_DICT_FAIL = { ...@@ -104,7 +104,7 @@ ASSESSMENT_DICT_FAIL = {
} }
} }
# Answers are against RUBRIC_DICT -- this is worth 12 points # Answers are against RUBRIC_DICT -- this is worth 14 points
ASSESSMENT_DICT_PASS = { ASSESSMENT_DICT_PASS = {
'overall_feedback': u"这是中国", 'overall_feedback': u"这是中国",
'criterion_feedback': {}, 'criterion_feedback': {},
...@@ -116,7 +116,7 @@ ASSESSMENT_DICT_PASS = { ...@@ -116,7 +116,7 @@ ASSESSMENT_DICT_PASS = {
} }
} }
# Answers are against RUBRIC_DICT -- this is worth 12 points # Answers are against RUBRIC_DICT -- this is worth 14 points
# Feedback text is one character over the limit. # Feedback text is one character over the limit.
LONG_FEEDBACK_TEXT = u"是" * Assessment.MAX_FEEDBACK_SIZE + "." LONG_FEEDBACK_TEXT = u"是" * Assessment.MAX_FEEDBACK_SIZE + "."
ASSESSMENT_DICT_HUGE = { ASSESSMENT_DICT_HUGE = {
...@@ -322,7 +322,7 @@ class TestPeerApi(CacheResetTest): ...@@ -322,7 +322,7 @@ class TestPeerApi(CacheResetTest):
RUBRIC_DICT, RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
assessments = peer_api.get_assessments(sub["uuid"], scored_only=False) assessments = peer_api.get_assessments(sub["uuid"])
self.assertEqual(1, len(assessments)) self.assertEqual(1, len(assessments))
@file_data('data/valid_assessments.json') @file_data('data/valid_assessments.json')
...@@ -340,7 +340,7 @@ class TestPeerApi(CacheResetTest): ...@@ -340,7 +340,7 @@ class TestPeerApi(CacheResetTest):
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
MONDAY, MONDAY,
) )
assessments = peer_api.get_assessments(sub["uuid"], scored_only=False) assessments = peer_api.get_assessments(sub["uuid"])
self.assertEqual(1, len(assessments)) self.assertEqual(1, len(assessments))
self.assertEqual(assessments[0]["scored_at"], MONDAY) self.assertEqual(assessments[0]["scored_at"], MONDAY)
...@@ -859,14 +859,12 @@ class TestPeerApi(CacheResetTest): ...@@ -859,14 +859,12 @@ class TestPeerApi(CacheResetTest):
) )
self.assertEqual(assessment["points_earned"], 6) self.assertEqual(assessment["points_earned"], 6)
self.assertEqual(assessment["points_possible"], 14) self.assertEqual(assessment["points_possible"], 14)
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=True)
self.assertEqual(0, len(submitted_assessments))
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=False) submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"])
self.assertEqual(1, len(submitted_assessments)) self.assertEqual(1, len(submitted_assessments))
def test_get_submitted_assessments_with_bad_submission(self): def test_get_submitted_assessments_with_bad_submission(self):
submitted_assessments = peer_api.get_submitted_assessments("bad-uuid", scored_only=True) submitted_assessments = peer_api.get_submitted_assessments("bad-uuid")
self.assertEqual(0, len(submitted_assessments)) self.assertEqual(0, len(submitted_assessments))
def test_find_active_assessments(self): def test_find_active_assessments(self):
...@@ -1122,7 +1120,7 @@ class TestPeerApi(CacheResetTest): ...@@ -1122,7 +1120,7 @@ class TestPeerApi(CacheResetTest):
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY) peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY)
mock_filter.side_effect = DatabaseError("Oh no.") mock_filter.side_effect = DatabaseError("Oh no.")
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=False) submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"])
self.assertEqual(1, len(submitted_assessments)) self.assertEqual(1, len(submitted_assessments))
@patch.object(PeerWorkflow.objects, 'raw') @patch.object(PeerWorkflow.objects, 'raw')
...@@ -1253,11 +1251,11 @@ class TestPeerApi(CacheResetTest): ...@@ -1253,11 +1251,11 @@ class TestPeerApi(CacheResetTest):
tim, _ = self._create_student_and_submission("Tim", "Tim's answer") tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
peer_api.get_assessment_median_scores(tim["uuid"]) peer_api.get_assessment_median_scores(tim["uuid"])
@patch.object(PeerWorkflowItem, 'get_scored_assessments') @patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_get_assessments_db_error(self, mock_filter): def test_get_assessments_db_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
tim, _ = self._create_student_and_submission("Tim", "Tim's answer") tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
mock_filter.side_effect = DatabaseError("Bad things happened")
peer_api.get_assessments(tim["uuid"]) peer_api.get_assessments(tim["uuid"])
@patch.object(PeerWorkflow.objects, 'get_or_create') @patch.object(PeerWorkflow.objects, 'get_or_create')
...@@ -1276,7 +1274,7 @@ class TestPeerApi(CacheResetTest): ...@@ -1276,7 +1274,7 @@ class TestPeerApi(CacheResetTest):
MONDAY, MONDAY,
) )
@patch.object(PeerWorkflowItem, 'get_scored_assessments') @patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_error_on_get_assessment(self, mock_filter): def test_error_on_get_assessment(self, mock_filter):
self._create_student_and_submission("Tim", "Tim's answer") self._create_student_and_submission("Tim", "Tim's answer")
...@@ -1372,15 +1370,15 @@ class TestPeerApi(CacheResetTest): ...@@ -1372,15 +1370,15 @@ class TestPeerApi(CacheResetTest):
) )
# Make sure Tim has one assessment. # Make sure Tim has one assessment.
tim_assessments = peer_api.get_assessments(tim_sub['uuid'], scored_only=False) tim_assessments = peer_api.get_assessments(tim_sub['uuid'])
self.assertEqual(1, len(tim_assessments)) self.assertEqual(1, len(tim_assessments))
# Make sure Sally has one assessment. # Make sure Sally has one assessment.
sally_assessments = peer_api.get_assessments(sally_sub['uuid'], scored_only=False) sally_assessments = peer_api.get_assessments(sally_sub['uuid'])
self.assertEqual(1, len(sally_assessments)) self.assertEqual(1, len(sally_assessments))
# Make sure Jane has no assessment. # Make sure Jane has no assessment.
jane_assessments = peer_api.get_assessments(jane_sub['uuid'], scored_only=False) jane_assessments = peer_api.get_assessments(jane_sub['uuid'])
self.assertEqual(0, len(jane_assessments)) self.assertEqual(0, len(jane_assessments))
def test_get_submission_to_assess_no_workflow(self): def test_get_submission_to_assess_no_workflow(self):
...@@ -1472,14 +1470,14 @@ class TestPeerApi(CacheResetTest): ...@@ -1472,14 +1470,14 @@ class TestPeerApi(CacheResetTest):
required_graded_by required_graded_by
) )
# Tim grades Bob, so now Bob has one assessment # Tim grades Bob, so now Bob has one assessment with a good grade
peer_api.get_submission_to_assess(tim_sub['uuid'], tim['student_id']) peer_api.get_submission_to_assess(tim_sub['uuid'], tim['student_id'])
peer_api.create_assessment( peer_api.create_assessment(
tim_sub['uuid'], tim_sub['uuid'],
tim['student_id'], tim['student_id'],
ASSESSMENT_DICT['options_selected'], ASSESSMENT_DICT_PASS['options_selected'],
ASSESSMENT_DICT['criterion_feedback'], ASSESSMENT_DICT_PASS['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'], ASSESSMENT_DICT_PASS['overall_feedback'],
RUBRIC_DICT, RUBRIC_DICT,
required_graded_by required_graded_by
) )
...@@ -1506,27 +1504,24 @@ class TestPeerApi(CacheResetTest): ...@@ -1506,27 +1504,24 @@ class TestPeerApi(CacheResetTest):
required_graded_by required_graded_by
) )
# Sue grades the only person she hasn't graded yet (Bob) # Sue grades the only person she hasn't graded yet (Bob), with a failing grade
peer_api.get_submission_to_assess(sue_sub['uuid'], sue['student_id']) peer_api.get_submission_to_assess(sue_sub['uuid'], sue['student_id'])
peer_api.create_assessment( peer_api.create_assessment(
sue_sub['uuid'], sue_sub['uuid'],
sue['student_id'], sue['student_id'],
ASSESSMENT_DICT['options_selected'], ASSESSMENT_DICT_FAIL['options_selected'],
ASSESSMENT_DICT['criterion_feedback'], ASSESSMENT_DICT_FAIL['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'], ASSESSMENT_DICT_FAIL['overall_feedback'],
RUBRIC_DICT, RUBRIC_DICT,
required_graded_by required_graded_by
) )
# This used to create a second assessment, # This used to create a second assessment,
# which was the bug. # which was the bug.
peer_api.get_score(bob_sub['uuid'], requirements) score = peer_api.get_score(bob_sub['uuid'], requirements)
# Get the assessments used to generate the score # Verify that only the first assessment was used to generate the score
# Only the first assessment should be used self.assertEqual(score['points_earned'], 14)
scored_assessments = peer_api.get_assessments(bob_sub['uuid'], scored_only=True)
self.assertEqual(len(scored_assessments), 1)
self.assertEqual(scored_assessments[0]['scorer_id'], tim['student_id'])
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_create_assessment_database_error(self): def test_create_assessment_database_error(self):
......
...@@ -32,7 +32,7 @@ class CreateSubmissionsTest(TestCase): ...@@ -32,7 +32,7 @@ class CreateSubmissionsTest(TestCase):
self.assertGreater(len(answer_dict['text']), 0) self.assertGreater(len(answer_dict['text']), 0)
# Check that peer and self assessments were created # Check that peer and self assessments were created
assessments = peer_api.get_assessments(submissions[0]['uuid'], scored_only=False) assessments = peer_api.get_assessments(submissions[0]['uuid'])
# Verify that the assessments exist and have content # Verify that the assessments exist and have content
self.assertEqual(len(assessments), cmd.NUM_PEER_ASSESSMENTS) self.assertEqual(len(assessments), cmd.NUM_PEER_ASSESSMENTS)
......
...@@ -16,13 +16,7 @@ ...@@ -16,13 +16,7 @@
<div class="wrapper--step__content"> <div class="wrapper--step__content">
<div class="step__content"> <div class="step__content">
<div class="grade__value__description"> <div class="grade__value__description">
{% if waiting == 'peer' %} <p>{% trans "Not all assessment steps have been completed for your response. After all assessment steps are complete, you will see feedback from everyone who assessed your response, and you will receive your final grade." %}</p>
<p>{% trans "Your response is still undergoing peer assessment. After your peers have assessed your response, you'll see their comments and receive your final grade." %}</p>
{% elif waiting == 'example-based' %}
<p>{% trans "Your response is still undergoing example-based assessment. After your response has been assessed, you'll see the comments and receive your final grade." %}</p>
{% elif waiting == 'all' %}
<p>{% trans "Your response is still undergoing peer assessment and example-based assessment. After your peers have assessed your response and example-based assessment is complete, you'll see your peers' comments and receive your final grade." %}</p>
{% endif %}
</div> </div>
</div> </div>
</div> </div>
......
...@@ -4,12 +4,8 @@ ...@@ -4,12 +4,8 @@
<h3 class="message__title">{% trans "You Have Completed This Assignment" %} </h3> <h3 class="message__title">{% trans "You Have Completed This Assignment" %} </h3>
<div class="message__content"> <div class="message__content">
<p> <p>
{% if waiting == 'peer' %} {% if waiting %}
<p>{% trans "Your grade will be available when your peers have completed their assessments of your response." %}</p> <p>{% trans "Your final grade will be available when all assessment steps for your response have been completed." %}</p>
{% elif waiting == 'example-based' %}
<p>{% trans "Your grade will be available when the example-based assessment of your response is complete." %}</p>
{% elif waiting == 'all' %}
<p>{% trans "Your grade will be available when your peers have completed their assessments of your response and the example-based assessment of your response is complete." %}</p>
{% else %} {% else %}
<a data-behavior="ui-scroll" href="#openassessment__grade">{% trans "Review your grade and your assessment details." %}</a> <a data-behavior="ui-scroll" href="#openassessment__grade">{% trans "Review your grade and your assessment details." %}</a>
{% endif %} {% endif %}
......
...@@ -60,7 +60,7 @@ class GradeMixin(object): ...@@ -60,7 +60,7 @@ class GradeMixin(object):
elif status == "done": elif status == "done":
path, context = self.render_grade_complete(workflow) path, context = self.render_grade_complete(workflow)
elif status == "waiting": elif status == "waiting":
path, context = self.render_grade_waiting(workflow) path, context = 'openassessmentblock/grade/oa_grade_waiting.html', {}
elif status is None: elif status is None:
path = 'openassessmentblock/grade/oa_grade_not_started.html' path = 'openassessmentblock/grade/oa_grade_not_started.html'
else: # status is 'self' or 'peer', which implies that the workflow is incomplete else: # status is 'self' or 'peer', which implies that the workflow is incomplete
...@@ -70,22 +70,6 @@ class GradeMixin(object): ...@@ -70,22 +70,6 @@ class GradeMixin(object):
else: else:
return self.render_assessment(path, context) return self.render_assessment(path, context)
def render_grade_waiting(self, workflow):
"""
Render the grade waiting state.
Args:
workflow (dict): The serialized Workflow model.
Returns:
tuple of context (dict) and template_path (string)
"""
context = {
"waiting": self.get_waiting_details(workflow["status_details"])
}
return 'openassessmentblock/grade/oa_grade_waiting.html', context
def render_grade_complete(self, workflow): def render_grade_complete(self, workflow):
""" """
Render the grade complete state. Render the grade complete state.
...@@ -108,6 +92,7 @@ class GradeMixin(object): ...@@ -108,6 +92,7 @@ class GradeMixin(object):
has_submitted_feedback = False has_submitted_feedback = False
if "peer-assessment" in assessment_steps: if "peer-assessment" in assessment_steps:
peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
feedback = peer_api.get_assessment_feedback(submission_uuid) feedback = peer_api.get_assessment_feedback(submission_uuid)
peer_assessments = [ peer_assessments = [
self._assessment_grade_context(peer_assessment) self._assessment_grade_context(peer_assessment)
......
...@@ -777,22 +777,17 @@ class OpenAssessmentBlock( ...@@ -777,22 +777,17 @@ class OpenAssessmentBlock(
def get_waiting_details(self, status_details): def get_waiting_details(self, status_details):
""" """
Returns the specific waiting status based on the given status_details. Returns waiting status (boolean value) based on the given status_details.
This status can currently be peer, example-based, or both. This is
determined by checking that status details to see if all assessment
modules have been graded.
Args: Args:
status_details (dict): A dictionary containing the details of each status_details (dict): A dictionary containing the details of each
assessment module status. This will contain keys such as assessment module status. This will contain keys such as
"peer" and "ai", referring to dictionaries, which in turn will "peer", "ai", and "staff", referring to dictionaries, which in
have the key "graded". If this key has a value set, these turn will have the key "graded". If this key has a value set,
assessment modules have been graded. these assessment modules have been graded.
Returns: Returns:
A string of "peer", "exampled-based", or "all" to indicate which True if waiting for a grade from peer, ai, or staff assessment, else False.
assessment modules in the workflow are waiting on assessments.
Returns None if no module is waiting on an assessment.
Examples: Examples:
>>> now = dt.datetime.utcnow().replace(tzinfo=pytz.utc) >>> now = dt.datetime.utcnow().replace(tzinfo=pytz.utc)
...@@ -807,18 +802,13 @@ class OpenAssessmentBlock( ...@@ -807,18 +802,13 @@ class OpenAssessmentBlock(
>>> } >>> }
>>> } >>> }
>>> self.get_waiting_details(status_details) >>> self.get_waiting_details(status_details)
"peer" True
""" """
waiting = None steps = ["peer", "ai", "staff"] # These are the steps that can be submitter-complete, but lack a grade
peer_waiting = "peer" in status_details and not status_details["peer"]["graded"] for step in steps:
ai_waiting = "ai" in status_details and not status_details["ai"]["graded"] if step in status_details and not status_details[step]["graded"]:
if peer_waiting and ai_waiting: return True
waiting = "all" return False
elif peer_waiting:
waiting = "peer"
elif ai_waiting:
waiting = "example-based"
return waiting
def is_released(self, step=None): def is_released(self, step=None):
""" """
......
...@@ -391,7 +391,7 @@ class StaffAreaMixin(object): ...@@ -391,7 +391,7 @@ class StaffAreaMixin(object):
if "peer-assessment" in assessment_steps: if "peer-assessment" in assessment_steps:
peer_assessments = peer_api.get_assessments(submission_uuid) peer_assessments = peer_api.get_assessments(submission_uuid)
submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False) submitted_assessments = peer_api.get_submitted_assessments(submission_uuid)
if "self-assessment" in assessment_steps: if "self-assessment" in assessment_steps:
self_assessment = self_api.get_assessment(submission_uuid) self_assessment = self_api.get_assessment(submission_uuid)
......
...@@ -2,17 +2,17 @@ ...@@ -2,17 +2,17 @@
"waiting_for_peer": { "waiting_for_peer": {
"waiting_for_peer": true, "waiting_for_peer": true,
"waiting_for_ai": false, "waiting_for_ai": false,
"expected_response": "peer assessment" "expected_response": "not all assessment steps have been completed"
}, },
"waiting_for_ai": { "waiting_for_ai": {
"waiting_for_peer": false, "waiting_for_peer": false,
"waiting_for_ai": true, "waiting_for_ai": true,
"expected_response": "example-based assessment" "expected_response": "not all assessment steps have been completed"
}, },
"waiting_for_both": { "waiting_for_both": {
"waiting_for_peer": true, "waiting_for_peer": true,
"waiting_for_ai": true, "waiting_for_ai": true,
"expected_response": "peer assessment and example-based assessment" "expected_response": "not all assessment steps have been completed"
}, },
"not_waiting": { "not_waiting": {
"waiting_for_peer": false, "waiting_for_peer": false,
......
...@@ -7,12 +7,14 @@ import copy ...@@ -7,12 +7,14 @@ import copy
import mock import mock
import pytz import pytz
import ddt
import datetime as dt import datetime as dt
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from .base import XBlockHandlerTestCase, scenario from .base import XBlockHandlerTestCase, scenario
@ddt.ddt
class TestMessageRender(XBlockHandlerTestCase): class TestMessageRender(XBlockHandlerTestCase):
""" """
Tests for the Message XBlock Handler Tests for the Message XBlock Handler
...@@ -38,6 +40,10 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -38,6 +40,10 @@ class TestMessageRender(XBlockHandlerTestCase):
'completed': TODAY, 'completed': TODAY,
'graded': TODAY, 'graded': TODAY,
}, },
'staff': {
'completed': TODAY,
'graded': TODAY,
},
} }
def _assert_path_and_context( def _assert_path_and_context(
...@@ -717,7 +723,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -717,7 +723,7 @@ class TestMessageRender(XBlockHandlerTestCase):
expected_path = 'openassessmentblock/message/oa_message_complete.html' expected_path = 'openassessmentblock/message/oa_message_complete.html'
expected_context = { expected_context = {
"waiting": "peer" "waiting": True
} }
self._assert_path_and_context( self._assert_path_and_context(
...@@ -744,34 +750,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -744,34 +750,7 @@ class TestMessageRender(XBlockHandlerTestCase):
expected_path = 'openassessmentblock/message/oa_message_complete.html' expected_path = 'openassessmentblock/message/oa_message_complete.html'
expected_context = { expected_context = {
"waiting": "peer" "waiting": True
}
self._assert_path_and_context(
xblock, expected_path, expected_context,
status, deadline_information, has_peers_to_grade, status_details
)
@scenario('data/message_scenario.xml', user_id="Linda")
def test_waiting_on_ai(self, xblock):
status = 'waiting'
status_details = copy.deepcopy(self.DEFAULT_STATUS_DETAILS)
status_details["ai"]["graded"] = None
deadline_information = {
'submission': (True, 'due', self.FAR_PAST, self.YESTERDAY),
'peer-assessment': (True, 'due', self.YESTERDAY, self.TODAY),
'self-assessment': (True, 'due', self.YESTERDAY, self.TODAY),
'over-all': (True, 'due', self.FAR_PAST, self.TODAY)
}
has_peers_to_grade = False
expected_path = 'openassessmentblock/message/oa_message_complete.html'
expected_context = {
"waiting": "example-based"
} }
self._assert_path_and_context( self._assert_path_and_context(
...@@ -779,13 +758,18 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -779,13 +758,18 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade, status_details status, deadline_information, has_peers_to_grade, status_details
) )
@ddt.data("peer", "ai", "staff", "all")
@scenario('data/message_scenario.xml', user_id="Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_waiting_on_all(self, xblock): def test_waiting_on_steps(self, xblock, step):
status = 'waiting' status = 'waiting'
status_details = copy.deepcopy(self.DEFAULT_STATUS_DETAILS) status_details = copy.deepcopy(self.DEFAULT_STATUS_DETAILS)
status_details["ai"]["graded"] = None if step == "all":
status_details["peer"]["graded"] = None status_details["peer"]["graded"] = None
status_details["ai"]["graded"] = None
status_details["staff"]["graded"] = None
else:
status_details[step]["graded"] = None
deadline_information = { deadline_information = {
'submission': (True, 'due', self.FAR_PAST, self.YESTERDAY), 'submission': (True, 'due', self.FAR_PAST, self.YESTERDAY),
...@@ -799,7 +783,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -799,7 +783,7 @@ class TestMessageRender(XBlockHandlerTestCase):
expected_path = 'openassessmentblock/message/oa_message_complete.html' expected_path = 'openassessmentblock/message/oa_message_complete.html'
expected_context = { expected_context = {
"waiting": "all" "waiting": True
} }
self._assert_path_and_context( self._assert_path_and_context(
...@@ -824,7 +808,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -824,7 +808,7 @@ class TestMessageRender(XBlockHandlerTestCase):
expected_path = 'openassessmentblock/message/oa_message_complete.html' expected_path = 'openassessmentblock/message/oa_message_complete.html'
expected_context = { expected_context = {
"waiting": None "waiting": False
} }
self._assert_path_and_context( self._assert_path_and_context(
...@@ -849,7 +833,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -849,7 +833,7 @@ class TestMessageRender(XBlockHandlerTestCase):
expected_path = 'openassessmentblock/message/oa_message_complete.html' expected_path = 'openassessmentblock/message/oa_message_complete.html'
expected_context = { expected_context = {
"waiting": None "waiting": False
} }
self._assert_path_and_context( self._assert_path_and_context(
......
...@@ -823,5 +823,5 @@ class TestPeerAssessHandler(XBlockHandlerTestCase): ...@@ -823,5 +823,5 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
self.assertTrue(resp['success']) self.assertTrue(resp['success'])
# Retrieve the peer assessment # Retrieve the peer assessment
retrieved_assessment = peer_api.get_assessments(submission['uuid'], scored_only=False)[0] retrieved_assessment = peer_api.get_assessments(submission['uuid'])[0]
return submission['uuid'], retrieved_assessment return submission['uuid'], retrieved_assessment
...@@ -1001,8 +1001,8 @@ class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin): ...@@ -1001,8 +1001,8 @@ class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin):
@retry() @retry()
@attr('acceptance') @attr('acceptance')
@ddt.data(False) @ddt.data(True, False)
def test_train_self_peer_staff(self, peer_grades_me): # TODO: can't run with "True" due to TNL-3957 def test_train_self_peer_staff(self, peer_grades_me):
""" """
Scenario: complete workflow that included staff required step. Scenario: complete workflow that included staff required step.
...@@ -1032,8 +1032,13 @@ class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin): ...@@ -1032,8 +1032,13 @@ class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin):
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS, None) self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS, None)
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score) self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
# Note that PEER ASSESSMENT isn't shown here - it gets cut off the page in this case if peer_grades_me:
# See TNL-3930 for details and possible fix. self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u"PEER MEDIAN GRADE", u"Poor"), (u"PEER MEDIAN GRADE", u"Poor")],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")],
])
else:
self.verify_grade_entries([ self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")], [(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")], [(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment