Commit 1d4370e5 by Eric Fischer Committed by Andy Armstrong

Fix up issues with peer scores

The crux of this fix is that scored_only behavior in various api/peer.py
methods is no longer needed, or correct. This value has been removed, so
that assessments may be gotten (and medians found) when the assessments
are not used in determining the final score (say, if a staff score exists).

Also includes updates to the "waiting for steps" string as prescribed by
@catong in https://github.com/edx/edx-ora2/pull/764, along with logic for
that method, and updates to tests and get_wating_details logic in light
of this simplification.
parent db29b35f
......@@ -483,7 +483,7 @@ def has_finished_required_evaluating(submission_uuid, required_assessments):
return done, peers_graded
def get_assessments(submission_uuid, scored_only=True, limit=None):
def get_assessments(submission_uuid, limit=None):
"""Retrieve the assessments for a submission.
Retrieves all the assessments for a submissions. This API returns related
......@@ -495,9 +495,6 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
associated with. Required.
Keyword Arguments:
scored (boolean): Only retrieve the assessments used to generate a score
for this submission.
limit (int): Limit the returned assessments. If None, returns all.
......@@ -513,7 +510,7 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
while retrieving the assessments associated with this submission.
Examples:
>>> get_assessments("1", scored_only=True, limit=2)
>>> get_assessments("1", limit=2)
[
{
'points_earned': 6,
......@@ -533,15 +530,10 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
"""
try:
if scored_only:
assessments = PeerWorkflowItem.get_scored_assessments(
submission_uuid
)[:limit]
else:
assessments = Assessment.objects.filter(
submission_uuid=submission_uuid,
score_type=PEER_TYPE
)[:limit]
assessments = Assessment.objects.filter(
submission_uuid=submission_uuid,
score_type=PEER_TYPE
)[:limit]
return serialize_assessments(assessments)
except DatabaseError:
error_message = (
......@@ -551,7 +543,7 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
raise PeerAssessmentInternalError(error_message)
def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
def get_submitted_assessments(submission_uuid, limit=None):
"""Retrieve the assessments created by the given submission's author.
Retrieves all the assessments created by the given submission's author. This
......@@ -564,8 +556,6 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
we are requesting. Required.
Keyword Arguments:
scored (boolean): Only retrieve the assessments used to generate a score
for this submission.
limit (int): Limit the returned assessments. If None, returns all.
Returns:
......@@ -581,7 +571,7 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
while retrieving the assessments associated with this submission.
Examples:
>>> get_submitted_assessments("1", scored_only=True, limit=2)
>>> get_submitted_assessments("1", limit=2)
[
{
'points_earned': 6,
......@@ -608,8 +598,6 @@ def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
scorer=workflow,
assessment__isnull=False
)
if scored_only:
items = items.exclude(scored=False)
assessments = Assessment.objects.filter(
pk__in=[item.assessment.pk for item in items])[:limit]
return serialize_assessments(assessments)
......
......@@ -104,7 +104,7 @@ ASSESSMENT_DICT_FAIL = {
}
}
# Answers are against RUBRIC_DICT -- this is worth 12 points
# Answers are against RUBRIC_DICT -- this is worth 14 points
ASSESSMENT_DICT_PASS = {
'overall_feedback': u"这是中国",
'criterion_feedback': {},
......@@ -116,7 +116,7 @@ ASSESSMENT_DICT_PASS = {
}
}
# Answers are against RUBRIC_DICT -- this is worth 12 points
# Answers are against RUBRIC_DICT -- this is worth 14 points
# Feedback text is one character over the limit.
LONG_FEEDBACK_TEXT = u"是" * Assessment.MAX_FEEDBACK_SIZE + "."
ASSESSMENT_DICT_HUGE = {
......@@ -322,7 +322,7 @@ class TestPeerApi(CacheResetTest):
RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
assessments = peer_api.get_assessments(sub["uuid"], scored_only=False)
assessments = peer_api.get_assessments(sub["uuid"])
self.assertEqual(1, len(assessments))
@file_data('data/valid_assessments.json')
......@@ -340,7 +340,7 @@ class TestPeerApi(CacheResetTest):
REQUIRED_GRADED_BY,
MONDAY,
)
assessments = peer_api.get_assessments(sub["uuid"], scored_only=False)
assessments = peer_api.get_assessments(sub["uuid"])
self.assertEqual(1, len(assessments))
self.assertEqual(assessments[0]["scored_at"], MONDAY)
......@@ -859,14 +859,12 @@ class TestPeerApi(CacheResetTest):
)
self.assertEqual(assessment["points_earned"], 6)
self.assertEqual(assessment["points_possible"], 14)
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=True)
self.assertEqual(0, len(submitted_assessments))
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=False)
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"])
self.assertEqual(1, len(submitted_assessments))
def test_get_submitted_assessments_with_bad_submission(self):
submitted_assessments = peer_api.get_submitted_assessments("bad-uuid", scored_only=True)
submitted_assessments = peer_api.get_submitted_assessments("bad-uuid")
self.assertEqual(0, len(submitted_assessments))
def test_find_active_assessments(self):
......@@ -1122,7 +1120,7 @@ class TestPeerApi(CacheResetTest):
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY)
mock_filter.side_effect = DatabaseError("Oh no.")
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=False)
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"])
self.assertEqual(1, len(submitted_assessments))
@patch.object(PeerWorkflow.objects, 'raw')
......@@ -1253,11 +1251,11 @@ class TestPeerApi(CacheResetTest):
tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
peer_api.get_assessment_median_scores(tim["uuid"])
@patch.object(PeerWorkflowItem, 'get_scored_assessments')
@patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError)
def test_get_assessments_db_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
mock_filter.side_effect = DatabaseError("Bad things happened")
peer_api.get_assessments(tim["uuid"])
@patch.object(PeerWorkflow.objects, 'get_or_create')
......@@ -1276,7 +1274,7 @@ class TestPeerApi(CacheResetTest):
MONDAY,
)
@patch.object(PeerWorkflowItem, 'get_scored_assessments')
@patch.object(Assessment.objects, 'filter')
@raises(peer_api.PeerAssessmentInternalError)
def test_error_on_get_assessment(self, mock_filter):
self._create_student_and_submission("Tim", "Tim's answer")
......@@ -1372,15 +1370,15 @@ class TestPeerApi(CacheResetTest):
)
# Make sure Tim has one assessment.
tim_assessments = peer_api.get_assessments(tim_sub['uuid'], scored_only=False)
tim_assessments = peer_api.get_assessments(tim_sub['uuid'])
self.assertEqual(1, len(tim_assessments))
# Make sure Sally has one assessment.
sally_assessments = peer_api.get_assessments(sally_sub['uuid'], scored_only=False)
sally_assessments = peer_api.get_assessments(sally_sub['uuid'])
self.assertEqual(1, len(sally_assessments))
# Make sure Jane has no assessment.
jane_assessments = peer_api.get_assessments(jane_sub['uuid'], scored_only=False)
jane_assessments = peer_api.get_assessments(jane_sub['uuid'])
self.assertEqual(0, len(jane_assessments))
def test_get_submission_to_assess_no_workflow(self):
......@@ -1472,14 +1470,14 @@ class TestPeerApi(CacheResetTest):
required_graded_by
)
# Tim grades Bob, so now Bob has one assessment
# Tim grades Bob, so now Bob has one assessment with a good grade
peer_api.get_submission_to_assess(tim_sub['uuid'], tim['student_id'])
peer_api.create_assessment(
tim_sub['uuid'],
tim['student_id'],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
ASSESSMENT_DICT_PASS['options_selected'],
ASSESSMENT_DICT_PASS['criterion_feedback'],
ASSESSMENT_DICT_PASS['overall_feedback'],
RUBRIC_DICT,
required_graded_by
)
......@@ -1506,27 +1504,24 @@ class TestPeerApi(CacheResetTest):
required_graded_by
)
# Sue grades the only person she hasn't graded yet (Bob)
# Sue grades the only person she hasn't graded yet (Bob), with a failing grade
peer_api.get_submission_to_assess(sue_sub['uuid'], sue['student_id'])
peer_api.create_assessment(
sue_sub['uuid'],
sue['student_id'],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
ASSESSMENT_DICT_FAIL['options_selected'],
ASSESSMENT_DICT_FAIL['criterion_feedback'],
ASSESSMENT_DICT_FAIL['overall_feedback'],
RUBRIC_DICT,
required_graded_by
)
# This used to create a second assessment,
# which was the bug.
peer_api.get_score(bob_sub['uuid'], requirements)
score = peer_api.get_score(bob_sub['uuid'], requirements)
# Get the assessments used to generate the score
# Only the first assessment should be used
scored_assessments = peer_api.get_assessments(bob_sub['uuid'], scored_only=True)
self.assertEqual(len(scored_assessments), 1)
self.assertEqual(scored_assessments[0]['scorer_id'], tim['student_id'])
# Verify that only the first assessment was used to generate the score
self.assertEqual(score['points_earned'], 14)
@raises(peer_api.PeerAssessmentInternalError)
def test_create_assessment_database_error(self):
......
......@@ -32,7 +32,7 @@ class CreateSubmissionsTest(TestCase):
self.assertGreater(len(answer_dict['text']), 0)
# Check that peer and self assessments were created
assessments = peer_api.get_assessments(submissions[0]['uuid'], scored_only=False)
assessments = peer_api.get_assessments(submissions[0]['uuid'])
# Verify that the assessments exist and have content
self.assertEqual(len(assessments), cmd.NUM_PEER_ASSESSMENTS)
......
......@@ -16,13 +16,7 @@
<div class="wrapper--step__content">
<div class="step__content">
<div class="grade__value__description">
{% if waiting == 'peer' %}
<p>{% trans "Your response is still undergoing peer assessment. After your peers have assessed your response, you'll see their comments and receive your final grade." %}</p>
{% elif waiting == 'example-based' %}
<p>{% trans "Your response is still undergoing example-based assessment. After your response has been assessed, you'll see the comments and receive your final grade." %}</p>
{% elif waiting == 'all' %}
<p>{% trans "Your response is still undergoing peer assessment and example-based assessment. After your peers have assessed your response and example-based assessment is complete, you'll see your peers' comments and receive your final grade." %}</p>
{% endif %}
<p>{% trans "Not all assessment steps have been completed for your response. After all assessment steps are complete, you will see feedback from everyone who assessed your response, and you will receive your final grade." %}</p>
</div>
</div>
</div>
......
......@@ -4,12 +4,8 @@
<h3 class="message__title">{% trans "You Have Completed This Assignment" %} </h3>
<div class="message__content">
<p>
{% if waiting == 'peer' %}
<p>{% trans "Your grade will be available when your peers have completed their assessments of your response." %}</p>
{% elif waiting == 'example-based' %}
<p>{% trans "Your grade will be available when the example-based assessment of your response is complete." %}</p>
{% elif waiting == 'all' %}
<p>{% trans "Your grade will be available when your peers have completed their assessments of your response and the example-based assessment of your response is complete." %}</p>
{% if waiting %}
<p>{% trans "Your final grade will be available when all assessment steps for your response have been completed." %}</p>
{% else %}
<a data-behavior="ui-scroll" href="#openassessment__grade">{% trans "Review your grade and your assessment details." %}</a>
{% endif %}
......
......@@ -60,7 +60,7 @@ class GradeMixin(object):
elif status == "done":
path, context = self.render_grade_complete(workflow)
elif status == "waiting":
path, context = self.render_grade_waiting(workflow)
path, context = 'openassessmentblock/grade/oa_grade_waiting.html', {}
elif status is None:
path = 'openassessmentblock/grade/oa_grade_not_started.html'
else: # status is 'self' or 'peer', which implies that the workflow is incomplete
......@@ -70,22 +70,6 @@ class GradeMixin(object):
else:
return self.render_assessment(path, context)
def render_grade_waiting(self, workflow):
"""
Render the grade waiting state.
Args:
workflow (dict): The serialized Workflow model.
Returns:
tuple of context (dict) and template_path (string)
"""
context = {
"waiting": self.get_waiting_details(workflow["status_details"])
}
return 'openassessmentblock/grade/oa_grade_waiting.html', context
def render_grade_complete(self, workflow):
"""
Render the grade complete state.
......@@ -108,6 +92,7 @@ class GradeMixin(object):
has_submitted_feedback = False
if "peer-assessment" in assessment_steps:
peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
feedback = peer_api.get_assessment_feedback(submission_uuid)
peer_assessments = [
self._assessment_grade_context(peer_assessment)
......
......@@ -777,22 +777,17 @@ class OpenAssessmentBlock(
def get_waiting_details(self, status_details):
"""
Returns the specific waiting status based on the given status_details.
This status can currently be peer, example-based, or both. This is
determined by checking that status details to see if all assessment
modules have been graded.
Returns waiting status (boolean value) based on the given status_details.
Args:
status_details (dict): A dictionary containing the details of each
assessment module status. This will contain keys such as
"peer" and "ai", referring to dictionaries, which in turn will
have the key "graded". If this key has a value set, these
assessment modules have been graded.
"peer", "ai", and "staff", referring to dictionaries, which in
turn will have the key "graded". If this key has a value set,
these assessment modules have been graded.
Returns:
A string of "peer", "exampled-based", or "all" to indicate which
assessment modules in the workflow are waiting on assessments.
Returns None if no module is waiting on an assessment.
True if waiting for a grade from peer, ai, or staff assessment, else False.
Examples:
>>> now = dt.datetime.utcnow().replace(tzinfo=pytz.utc)
......@@ -807,18 +802,13 @@ class OpenAssessmentBlock(
>>> }
>>> }
>>> self.get_waiting_details(status_details)
"peer"
"""
waiting = None
peer_waiting = "peer" in status_details and not status_details["peer"]["graded"]
ai_waiting = "ai" in status_details and not status_details["ai"]["graded"]
if peer_waiting and ai_waiting:
waiting = "all"
elif peer_waiting:
waiting = "peer"
elif ai_waiting:
waiting = "example-based"
return waiting
True
"""
steps = ["peer", "ai", "staff"] # These are the steps that can be submitter-complete, but lack a grade
for step in steps:
if step in status_details and not status_details[step]["graded"]:
return True
return False
def is_released(self, step=None):
"""
......
......@@ -391,7 +391,7 @@ class StaffAreaMixin(object):
if "peer-assessment" in assessment_steps:
peer_assessments = peer_api.get_assessments(submission_uuid)
submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)
submitted_assessments = peer_api.get_submitted_assessments(submission_uuid)
if "self-assessment" in assessment_steps:
self_assessment = self_api.get_assessment(submission_uuid)
......
......@@ -2,21 +2,21 @@
"waiting_for_peer": {
"waiting_for_peer": true,
"waiting_for_ai": false,
"expected_response": "peer assessment"
"expected_response": "not all assessment steps have been completed"
},
"waiting_for_ai": {
"waiting_for_peer": false,
"waiting_for_ai": true,
"expected_response": "example-based assessment"
"expected_response": "not all assessment steps have been completed"
},
"waiting_for_both": {
"waiting_for_peer": true,
"waiting_for_ai": true,
"expected_response": "peer assessment and example-based assessment"
"expected_response": "not all assessment steps have been completed"
},
"not_waiting": {
"waiting_for_peer": false,
"waiting_for_ai": false,
"expected_response": "your grade:"
}
}
\ No newline at end of file
}
......@@ -7,12 +7,14 @@ import copy
import mock
import pytz
import ddt
import datetime as dt
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from .base import XBlockHandlerTestCase, scenario
@ddt.ddt
class TestMessageRender(XBlockHandlerTestCase):
"""
Tests for the Message XBlock Handler
......@@ -38,6 +40,10 @@ class TestMessageRender(XBlockHandlerTestCase):
'completed': TODAY,
'graded': TODAY,
},
'staff': {
'completed': TODAY,
'graded': TODAY,
},
}
def _assert_path_and_context(
......@@ -717,7 +723,7 @@ class TestMessageRender(XBlockHandlerTestCase):
expected_path = 'openassessmentblock/message/oa_message_complete.html'
expected_context = {
"waiting": "peer"
"waiting": True
}
self._assert_path_and_context(
......@@ -744,34 +750,7 @@ class TestMessageRender(XBlockHandlerTestCase):
expected_path = 'openassessmentblock/message/oa_message_complete.html'
expected_context = {
"waiting": "peer"
}
self._assert_path_and_context(
xblock, expected_path, expected_context,
status, deadline_information, has_peers_to_grade, status_details
)
@scenario('data/message_scenario.xml', user_id="Linda")
def test_waiting_on_ai(self, xblock):
status = 'waiting'
status_details = copy.deepcopy(self.DEFAULT_STATUS_DETAILS)
status_details["ai"]["graded"] = None
deadline_information = {
'submission': (True, 'due', self.FAR_PAST, self.YESTERDAY),
'peer-assessment': (True, 'due', self.YESTERDAY, self.TODAY),
'self-assessment': (True, 'due', self.YESTERDAY, self.TODAY),
'over-all': (True, 'due', self.FAR_PAST, self.TODAY)
}
has_peers_to_grade = False
expected_path = 'openassessmentblock/message/oa_message_complete.html'
expected_context = {
"waiting": "example-based"
"waiting": True
}
self._assert_path_and_context(
......@@ -779,13 +758,18 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade, status_details
)
@ddt.data("peer", "ai", "staff", "all")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_waiting_on_all(self, xblock):
def test_waiting_on_steps(self, xblock, step):
status = 'waiting'
status_details = copy.deepcopy(self.DEFAULT_STATUS_DETAILS)
status_details["ai"]["graded"] = None
status_details["peer"]["graded"] = None
if step == "all":
status_details["peer"]["graded"] = None
status_details["ai"]["graded"] = None
status_details["staff"]["graded"] = None
else:
status_details[step]["graded"] = None
deadline_information = {
'submission': (True, 'due', self.FAR_PAST, self.YESTERDAY),
......@@ -799,7 +783,7 @@ class TestMessageRender(XBlockHandlerTestCase):
expected_path = 'openassessmentblock/message/oa_message_complete.html'
expected_context = {
"waiting": "all"
"waiting": True
}
self._assert_path_and_context(
......@@ -824,7 +808,7 @@ class TestMessageRender(XBlockHandlerTestCase):
expected_path = 'openassessmentblock/message/oa_message_complete.html'
expected_context = {
"waiting": None
"waiting": False
}
self._assert_path_and_context(
......@@ -849,7 +833,7 @@ class TestMessageRender(XBlockHandlerTestCase):
expected_path = 'openassessmentblock/message/oa_message_complete.html'
expected_context = {
"waiting": None
"waiting": False
}
self._assert_path_and_context(
......
......@@ -823,5 +823,5 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
self.assertTrue(resp['success'])
# Retrieve the peer assessment
retrieved_assessment = peer_api.get_assessments(submission['uuid'], scored_only=False)[0]
retrieved_assessment = peer_api.get_assessments(submission['uuid'])[0]
return submission['uuid'], retrieved_assessment
......@@ -1001,8 +1001,8 @@ class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin):
@retry()
@attr('acceptance')
@ddt.data(False)
def test_train_self_peer_staff(self, peer_grades_me): # TODO: can't run with "True" due to TNL-3957
@ddt.data(True, False)
def test_train_self_peer_staff(self, peer_grades_me):
"""
Scenario: complete workflow that included staff required step.
......@@ -1032,12 +1032,17 @@ class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin):
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS, None)
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
# Note that PEER ASSESSMENT isn't shown here - it gets cut off the page in this case
# See TNL-3930 for details and possible fix.
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")],
])
if peer_grades_me:
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u"PEER MEDIAN GRADE", u"Poor"), (u"PEER MEDIAN GRADE", u"Poor")],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")],
])
else:
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")],
])
if __name__ == "__main__":
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment