Commit 1c2f6c45 by Christina Roberts

Merge pull request #855 from edx/christina/fix-pep8

Christina/fix pep8
parents ed2853f6 f41dd245
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
Grade step in the OpenAssessment XBlock. Grade step in the OpenAssessment XBlock.
""" """
import copy import copy
from collections import defaultdict
from lazy import lazy from lazy import lazy
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
...@@ -309,7 +308,7 @@ class GradeMixin(object): ...@@ -309,7 +308,7 @@ class GradeMixin(object):
'additional_feedback': self._additional_feedback( 'additional_feedback': self._additional_feedback(
staff_assessment=staff_assessment, staff_assessment=staff_assessment,
peer_assessments=peer_assessments, peer_assessments=peer_assessments,
self_assessment= self_assessment, self_assessment=self_assessment,
), ),
} }
...@@ -398,6 +397,7 @@ class GradeMixin(object): ...@@ -398,6 +397,7 @@ class GradeMixin(object):
""" """
median_scores = peer_api.get_assessment_median_scores(submission_uuid) median_scores = peer_api.get_assessment_median_scores(submission_uuid)
median_score = median_scores.get(criterion['name'], None) median_score = median_scores.get(criterion['name'], None)
def median_options(): def median_options():
""" """
Returns a list of options that should be shown to represent the median. Returns a list of options that should be shown to represent the median.
...@@ -458,7 +458,6 @@ class GradeMixin(object): ...@@ -458,7 +458,6 @@ class GradeMixin(object):
'explanation': None, 'explanation': None,
} }
def _additional_feedback(self, staff_assessment, peer_assessments, self_assessment): def _additional_feedback(self, staff_assessment, peer_assessments, self_assessment):
""" """
Returns an array of additional feedback for the specified assessments. Returns an array of additional feedback for the specified assessments.
......
...@@ -86,10 +86,9 @@ class LeaderboardMixin(object): ...@@ -86,10 +86,9 @@ class LeaderboardMixin(object):
score.pop('content', None) score.pop('content', None)
context = { 'topscores': scores, context = {'topscores': scores, 'allow_latex': self.allow_latex,}
'allow_latex': self.allow_latex,
} return 'openassessmentblock/leaderboard/oa_leaderboard_show.html', context
return ('openassessmentblock/leaderboard/oa_leaderboard_show.html', context)
def render_leaderboard_incomplete(self): def render_leaderboard_incomplete(self):
""" """
...@@ -98,4 +97,4 @@ class LeaderboardMixin(object): ...@@ -98,4 +97,4 @@ class LeaderboardMixin(object):
Returns: Returns:
template_path (string), tuple of context (dict) template_path (string), tuple of context (dict)
""" """
return ('openassessmentblock/leaderboard/oa_leaderboard_waiting.html', {}) return 'openassessmentblock/leaderboard/oa_leaderboard_waiting.html', {}
...@@ -79,7 +79,8 @@ class MessageMixin(object): ...@@ -79,7 +79,8 @@ class MessageMixin(object):
"{}_approaching".format(status): step_info.get('approaching', False), "{}_approaching".format(status): step_info.get('approaching', False),
"{}_not_released".format(status): (step_info.get("reason") == "start"), "{}_not_released".format(status): (step_info.get("reason") == "start"),
#Uses a static field in the XBlock to determine if the PeerAssessment Block was able to pick up an assessment. # Uses a static field in the XBlock to determine if the PeerAssessment Block
# was able to pick up an assessment.
"peer_not_available": self.no_peers, "peer_not_available": self.no_peers,
} }
...@@ -120,7 +121,6 @@ class MessageMixin(object): ...@@ -120,7 +121,6 @@ class MessageMixin(object):
return 'openassessmentblock/message/oa_message_closed.html', context return 'openassessmentblock/message/oa_message_closed.html', context
def render_message_open(self, deadline_info): def render_message_open(self, deadline_info):
""" """
Renders the "Open" message state Renders the "Open" message state
......
...@@ -21,7 +21,7 @@ from xblock.fragment import Fragment ...@@ -21,7 +21,7 @@ from xblock.fragment import Fragment
from openassessment.xblock.grade_mixin import GradeMixin from openassessment.xblock.grade_mixin import GradeMixin
from openassessment.xblock.leaderboard_mixin import LeaderboardMixin from openassessment.xblock.leaderboard_mixin import LeaderboardMixin
from openassessment.xblock.defaults import * # pylint: disable=wildcard-import, unused-wildcard-import from openassessment.xblock.defaults import * # pylint: disable=wildcard-import, unused-wildcard-import
from openassessment.xblock.message_mixin import MessageMixin from openassessment.xblock.message_mixin import MessageMixin
from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin
from openassessment.xblock.lms_mixin import LmsCompatibilityMixin from openassessment.xblock.lms_mixin import LmsCompatibilityMixin
...@@ -73,7 +73,7 @@ UI_MODELS = { ...@@ -73,7 +73,7 @@ UI_MODELS = {
"class_id": "openassessment__grade", "class_id": "openassessment__grade",
"title": "Your Grade:" "title": "Your Grade:"
}, },
"leaderboard": { "leaderboard": {
"name": "leaderboard", "name": "leaderboard",
"class_id": "openassessment__leaderboard", "class_id": "openassessment__leaderboard",
"title": "Leaderboard" "title": "Leaderboard"
...@@ -94,6 +94,7 @@ def load(path): ...@@ -94,6 +94,7 @@ def load(path):
data = pkg_resources.resource_string(__name__, path) data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8") return data.decode("utf8")
@XBlock.needs("i18n") @XBlock.needs("i18n")
@XBlock.needs("user") @XBlock.needs("user")
class OpenAssessmentBlock( class OpenAssessmentBlock(
...@@ -729,11 +730,14 @@ class OpenAssessmentBlock( ...@@ -729,11 +730,14 @@ class OpenAssessmentBlock(
Examples: Examples:
>>> is_closed() >>> is_closed()
False, None, datetime.datetime(2014, 3, 27, 22, 7, 38, 788861), datetime.datetime(2015, 3, 27, 22, 7, 38, 788861) False, None, datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
>>> is_closed(step="submission") >>> is_closed(step="submission")
True, "due", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861), datetime.datetime(2015, 3, 27, 22, 7, 38, 788861) True, "due", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
>>> is_closed(step="self-assessment") >>> is_closed(step="self-assessment")
True, "start", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861), datetime.datetime(2015, 3, 27, 22, 7, 38, 788861) True, "start", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
""" """
submission_range = (self.submission_start, self.submission_due) submission_range = (self.submission_start, self.submission_due)
......
...@@ -121,8 +121,10 @@ def resolve_dates(start, end, date_ranges, _): ...@@ -121,8 +121,10 @@ def resolve_dates(start, end, date_ranges, _):
Args: Args:
start (str, ISO date format, or datetime): When the problem opens. A value of None indicates that the problem is always open. start (str, ISO date format, or datetime): When the problem opens.
end (str, ISO date format, or datetime): When the problem closes. A value of None indicates that the problem never closes. A value of None indicates that the problem is always open.
end (str, ISO date format, or datetime): When the problem closes.
A value of None indicates that the problem never closes.
date_ranges (list of tuples): list of (start, end) ISO date string tuples indicating date_ranges (list of tuples): list of (start, end) ISO date string tuples indicating
the start/end timestamps (date string or datetime) of each submission/assessment. the start/end timestamps (date string or datetime) of each submission/assessment.
_ (function): An i18n service function to use for retrieving the _ (function): An i18n service function to use for retrieving the
......
...@@ -17,7 +17,9 @@ from xblock.fragment import Fragment ...@@ -17,7 +17,9 @@ from xblock.fragment import Fragment
from openassessment.xblock.defaults import DEFAULT_EDITOR_ASSESSMENTS_ORDER, DEFAULT_RUBRIC_FEEDBACK_TEXT from openassessment.xblock.defaults import DEFAULT_EDITOR_ASSESSMENTS_ORDER, DEFAULT_RUBRIC_FEEDBACK_TEXT
from openassessment.xblock.validation import validator from openassessment.xblock.validation import validator
from openassessment.xblock.data_conversion import create_rubric_dict, make_django_template_key, update_assessments_format from openassessment.xblock.data_conversion import (
create_rubric_dict, make_django_template_key, update_assessments_format
)
from openassessment.xblock.schema import EDITOR_UPDATE_SCHEMA from openassessment.xblock.schema import EDITOR_UPDATE_SCHEMA
from openassessment.xblock.resolve_dates import resolve_dates from openassessment.xblock.resolve_dates import resolve_dates
from openassessment.xblock.xml import serialize_examples_to_xml_str, parse_examples_from_xml_str from openassessment.xblock.xml import serialize_examples_to_xml_str, parse_examples_from_xml_str
...@@ -218,7 +220,6 @@ class StudioMixin(object): ...@@ -218,7 +220,6 @@ class StudioMixin(object):
for example in assessment['examples']: for example in assessment['examples']:
example['answer'] = {'parts': [{'text': text} for text in example['answer']]} example['answer'] = {'parts': [{'text': text} for text in example['answer']]}
xblock_validator = validator(self, self._) xblock_validator = validator(self, self._)
success, msg = xblock_validator( success, msg = xblock_validator(
create_rubric_dict(data['prompts'], data['criteria']), create_rubric_dict(data['prompts'], data['criteria']),
......
...@@ -435,8 +435,10 @@ class SubmissionMixin(object): ...@@ -435,8 +435,10 @@ class SubmissionMixin(object):
student_submission = self.get_user_submission( student_submission = self.get_user_submission(
workflow["submission_uuid"] workflow["submission_uuid"]
) )
context["peer_incomplete"] = "peer" in workflow["status_details"] and not workflow["status_details"]["peer"]["complete"] peer_in_workflow = "peer" in workflow["status_details"]
context["self_incomplete"] = "self" in workflow["status_details"] and not workflow["status_details"]["self"]["complete"] self_in_workflow = "self" in workflow["status_details"]
context["peer_incomplete"] = peer_in_workflow and not workflow["status_details"]["peer"]["complete"]
context["self_incomplete"] = self_in_workflow and not workflow["status_details"]["self"]["complete"]
context["student_submission"] = create_submission_dict(student_submission, self.prompts) context["student_submission"] = create_submission_dict(student_submission, self.prompts)
path = 'openassessmentblock/response/oa_response_submitted.html' path = 'openassessmentblock/response/oa_response_submitted.html'
......
...@@ -132,7 +132,7 @@ class XBlockHandlerTestCaseMixin(object): ...@@ -132,7 +132,7 @@ class XBlockHandlerTestCaseMixin(object):
super(XBlockHandlerTestCaseMixin, self).setUp() super(XBlockHandlerTestCaseMixin, self).setUp()
self.runtime = WorkbenchRuntime() self.runtime = WorkbenchRuntime()
mock_publish = mock.MagicMock(side_effect=self.runtime.publish) mock_publish = mock.MagicMock(side_effect=self.runtime.publish)
self.runtime.publish=mock_publish self.runtime.publish = mock_publish
def set_user(self, user_id): def set_user(self, user_id):
""" """
...@@ -379,7 +379,8 @@ class SubmitAssessmentsMixin(object): ...@@ -379,7 +379,8 @@ class SubmitAssessmentsMixin(object):
return submission return submission
def set_staff_access(self, xblock): @staticmethod
def set_staff_access(xblock):
xblock.xmodule_runtime = mock.Mock(user_is_staff=True) xblock.xmodule_runtime = mock.Mock(user_is_staff=True)
xblock.xmodule_runtime.anonymous_student_id = 'Bob' xblock.xmodule_runtime.anonymous_student_id = 'Bob'
......
...@@ -12,6 +12,7 @@ from openassessment.xblock.data_conversion import ( ...@@ -12,6 +12,7 @@ from openassessment.xblock.data_conversion import (
create_prompts_list, create_submission_dict, prepare_submission_for_serialization, update_assessments_format create_prompts_list, create_submission_dict, prepare_submission_for_serialization, update_assessments_format
) )
@ddt.ddt @ddt.ddt
class DataConversionTest(TestCase): class DataConversionTest(TestCase):
...@@ -38,7 +39,8 @@ class DataConversionTest(TestCase): ...@@ -38,7 +39,8 @@ class DataConversionTest(TestCase):
( (
{'answer': {'parts': [{'text': 'a'}]}}, {'answer': {'parts': [{'text': 'a'}]}},
[{'description': '1'}, {'description': '2'}], [{'description': '1'}, {'description': '2'}],
{'answer': {'parts': [{'prompt': {'description': '1'}, 'text': 'a'}, {'prompt': {'description': '2'}, 'text': ''}]}} {'answer': {'parts': [{'prompt': {'description': '1'}, 'text': 'a'},
{'prompt': {'description': '2'}, 'text': ''}]}}
) )
) )
@ddt.unpack @ddt.unpack
...@@ -60,6 +62,6 @@ class DataConversionTest(TestCase): ...@@ -60,6 +62,6 @@ class DataConversionTest(TestCase):
def test_update_assessments_format(self, input, output): def test_update_assessments_format(self, input, output):
self.assertEqual(update_assessments_format([{ self.assertEqual(update_assessments_format([{
'examples': input, 'examples': input,
}]), [{ }]), [{
'examples': output, 'examples': output,
}]) }])
...@@ -279,7 +279,7 @@ class TestGrade(XBlockHandlerTestCase, SubmitAssessmentsMixin): ...@@ -279,7 +279,7 @@ class TestGrade(XBlockHandlerTestCase, SubmitAssessmentsMixin):
@scenario('data/grade_scenario.xml', user_id='Bob') @scenario('data/grade_scenario.xml', user_id='Bob')
def test_assessment_does_not_match_rubric(self, xblock): def test_assessment_does_not_match_rubric(self, xblock):
# Get to the grade complete section # Get to the grade complete section
self.create_submission_and_assessments( self.create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, PEER_ASSESSMENTS, SELF_ASSESSMENT xblock, self.SUBMISSION, self.PEERS, PEER_ASSESSMENTS, SELF_ASSESSMENT
) )
......
...@@ -46,8 +46,9 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -46,8 +46,9 @@ class TestMessageRender(XBlockHandlerTestCase):
}, },
} }
@staticmethod
def _assert_path_and_context( def _assert_path_and_context(
self, xblock, expected_path, expected_context, xblock, expected_path, expected_context,
workflow_status, deadline_information, has_peers_to_grade, workflow_status, deadline_information, has_peers_to_grade,
workflow_status_details=DEFAULT_STATUS_DETAILS workflow_status_details=DEFAULT_STATUS_DETAILS
): ):
...@@ -102,8 +103,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -102,8 +103,7 @@ class TestMessageRender(XBlockHandlerTestCase):
# Asserts that the message_mixin correctly derived the path and context to be rendered # Asserts that the message_mixin correctly derived the path and context to be rendered
xblock.render_assessment.assert_called_with(expected_path, expected_context) xblock.render_assessment.assert_called_with(expected_path, expected_context)
@scenario('data/message_scenario.xml', user_id="Linda")
@scenario('data/message_scenario.xml', user_id = "Linda")
def test_submission(self, xblock): def test_submission(self, xblock):
status = None status = None
...@@ -128,7 +128,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -128,7 +128,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario_no_peer.xml', user_id = "Linda") @scenario('data/message_scenario_no_peer.xml', user_id="Linda")
def test_submission_no_peer(self, xblock): def test_submission_no_peer(self, xblock):
status = None status = None
...@@ -152,7 +152,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -152,7 +152,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_submission_approaching(self, xblock): def test_submission_approaching(self, xblock):
status = None status = None
...@@ -177,7 +177,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -177,7 +177,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario_no_self.xml', user_id = "Linda") @scenario('data/message_scenario_no_self.xml', user_id="Linda")
def test_submission_no_self_approaching(self, xblock): def test_submission_no_self_approaching(self, xblock):
status = None status = None
...@@ -201,7 +201,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -201,7 +201,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_submission_not_yet_open(self, xblock): def test_submission_not_yet_open(self, xblock):
status = None status = None
...@@ -226,7 +226,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -226,7 +226,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_submission_incomplete(self, xblock): def test_submission_incomplete(self, xblock):
status = None status = None
...@@ -251,7 +251,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -251,7 +251,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario_student_training.xml', user_id = "Linda") @scenario('data/message_scenario_student_training.xml', user_id="Linda")
def test_training(self, xblock): def test_training(self, xblock):
status = 'training' status = 'training'
...@@ -280,7 +280,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -280,7 +280,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario_student_training.xml', user_id = "Linda") @scenario('data/message_scenario_student_training.xml', user_id="Linda")
def test_training_approaching(self, xblock): def test_training_approaching(self, xblock):
status = 'training' status = 'training'
...@@ -309,7 +309,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -309,7 +309,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario_student_training.xml', user_id = "Linda") @scenario('data/message_scenario_student_training.xml', user_id="Linda")
def test_training_not_released(self, xblock): def test_training_not_released(self, xblock):
status = 'training' status = 'training'
...@@ -335,7 +335,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -335,7 +335,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario_student_training.xml', user_id = "Linda") @scenario('data/message_scenario_student_training.xml', user_id="Linda")
def test_training_closed(self, xblock): def test_training_closed(self, xblock):
status = 'training' status = 'training'
...@@ -361,7 +361,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -361,7 +361,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_peer(self, xblock): def test_peer(self, xblock):
status = 'peer' status = 'peer'
...@@ -389,7 +389,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -389,7 +389,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario_no_self.xml', user_id = "Linda") @scenario('data/message_scenario_no_self.xml', user_id="Linda")
def test_peer_no_self(self, xblock): def test_peer_no_self(self, xblock):
status = 'peer' status = 'peer'
...@@ -416,7 +416,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -416,7 +416,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario_no_self.xml', user_id = "Linda") @scenario('data/message_scenario_no_self.xml', user_id="Linda")
def test_peer_no_self_approaching(self, xblock): def test_peer_no_self_approaching(self, xblock):
status = 'peer' status = 'peer'
...@@ -443,7 +443,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -443,7 +443,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_peer_not_released(self, xblock): def test_peer_not_released(self, xblock):
status = 'peer' status = 'peer'
...@@ -468,7 +468,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -468,7 +468,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_peer_incomplete(self, xblock): def test_peer_incomplete(self, xblock):
status = 'peer' status = 'peer'
...@@ -493,7 +493,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -493,7 +493,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_peer_no_peers_to_assess(self, xblock): def test_peer_no_peers_to_assess(self, xblock):
status = 'peer' status = 'peer'
...@@ -521,7 +521,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -521,7 +521,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_peer_no_peers_to_assess_approaching(self, xblock): def test_peer_no_peers_to_assess_approaching(self, xblock):
status = 'peer' status = 'peer'
...@@ -549,7 +549,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -549,7 +549,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_peer_not_open_approaching(self, xblock): def test_peer_not_open_approaching(self, xblock):
status = 'peer' status = 'peer'
...@@ -574,7 +574,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -574,7 +574,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_self(self, xblock): def test_self(self, xblock):
status = 'self' status = 'self'
...@@ -602,7 +602,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -602,7 +602,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario_no_peer.xml', user_id = "Linda") @scenario('data/message_scenario_no_peer.xml', user_id="Linda")
def test_self_no_peer(self, xblock): def test_self_no_peer(self, xblock):
status = 'self' status = 'self'
...@@ -629,7 +629,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -629,7 +629,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario_no_peer.xml', user_id = "Linda") @scenario('data/message_scenario_no_peer.xml', user_id="Linda")
def test_self_no_peer_approaching(self, xblock): def test_self_no_peer_approaching(self, xblock):
status = 'self' status = 'self'
...@@ -656,7 +656,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -656,7 +656,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_self_closed(self, xblock): def test_self_closed(self, xblock):
status = 'self' status = 'self'
...@@ -681,7 +681,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -681,7 +681,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario_no_peer.xml', user_id = "Linda") @scenario('data/message_scenario_no_peer.xml', user_id="Linda")
def test_self_no_peer_incomplete(self, xblock): def test_self_no_peer_incomplete(self, xblock):
status = 'self' status = 'self'
...@@ -705,7 +705,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -705,7 +705,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_waiting_due(self, xblock): def test_waiting_due(self, xblock):
status = 'waiting' status = 'waiting'
...@@ -732,7 +732,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -732,7 +732,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade, status_details status, deadline_information, has_peers_to_grade, status_details
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_waiting_not_due(self, xblock): def test_waiting_not_due(self, xblock):
status = 'waiting' status = 'waiting'
...@@ -792,7 +792,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -792,7 +792,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade, status_details status, deadline_information, has_peers_to_grade, status_details
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_done_due(self, xblock): def test_done_due(self, xblock):
status = 'done' status = 'done'
...@@ -817,7 +817,7 @@ class TestMessageRender(XBlockHandlerTestCase): ...@@ -817,7 +817,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade status, deadline_information, has_peers_to_grade
) )
@scenario('data/message_scenario.xml', user_id = "Linda") @scenario('data/message_scenario.xml', user_id="Linda")
def test_done_not_due(self, xblock): def test_done_not_due(self, xblock):
status = 'done' status = 'done'
......
...@@ -77,7 +77,7 @@ class TestOpenAssessment(XBlockHandlerTestCase): ...@@ -77,7 +77,7 @@ class TestOpenAssessment(XBlockHandlerTestCase):
with patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api: with patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api:
self.runtime.render(xblock, "student_view") self.runtime.render(xblock, "student_view")
expected_reqs = { expected_reqs = {
"peer": { "must_grade": 5, "must_be_graded_by": 3 } "peer": {"must_grade": 5, "must_be_graded_by": 3}
} }
mock_api.update_from_assessments.assert_called_once_with('test_submission', expected_reqs) mock_api.update_from_assessments.assert_called_once_with('test_submission', expected_reqs)
...@@ -259,6 +259,7 @@ class TestOpenAssessment(XBlockHandlerTestCase): ...@@ -259,6 +259,7 @@ class TestOpenAssessment(XBlockHandlerTestCase):
xblock.prompts = [{'description': 'Prompt 4.'}, {'description': 'Prompt 5.'}] xblock.prompts = [{'description': 'Prompt 4.'}, {'description': 'Prompt 5.'}]
self.assertEqual(xblock.prompt, '[{"description": "Prompt 4."}, {"description": "Prompt 5."}]') self.assertEqual(xblock.prompt, '[{"description": "Prompt 4."}, {"description": "Prompt 5."}]')
class TestDates(XBlockHandlerTestCase): class TestDates(XBlockHandlerTestCase):
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
......
...@@ -52,7 +52,7 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -52,7 +52,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Validate Peer Rendering. # Validate Peer Rendering.
self.assertTrue("Sally".encode('utf-8') in peer_response.body or self.assertTrue("Sally".encode('utf-8') in peer_response.body or
"Hal".encode('utf-8') in peer_response.body) "Hal".encode('utf-8') in peer_response.body)
@mock.patch('openassessment.xblock.workflow_mixin.WorkflowMixin.workflow_requirements') @mock.patch('openassessment.xblock.workflow_mixin.WorkflowMixin.workflow_requirements')
@scenario('data/peer_assessment_scenario.xml', user_id='Sally') @scenario('data/peer_assessment_scenario.xml', user_id='Sally')
...@@ -65,12 +65,12 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -65,12 +65,12 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self._sally_and_hal_grade_each_other_helper(xblock) self._sally_and_hal_grade_each_other_helper(xblock)
# Verify that Sally's workflow is not marked done, as the requirements are higher than 1. # Verify that Sally's workflow is not marked done, as the requirements are higher than 1.
mock_requirements.return_value = {"peer": {"must_grade":2, "must_be_graded_by":2}} mock_requirements.return_value = {"peer": {"must_grade": 2, "must_be_graded_by": 2}}
workflow_info = xblock.get_workflow_info() workflow_info = xblock.get_workflow_info()
self.assertEqual(workflow_info["status"], u'peer') self.assertEqual(workflow_info["status"], u'peer')
# Now, change the requirements and verify that Sally's workflow updates to 'self' status. # Now, change the requirements and verify that Sally's workflow updates to 'self' status.
mock_requirements.return_value = {"peer": {"must_grade":1, "must_be_graded_by":1}} mock_requirements.return_value = {"peer": {"must_grade": 1, "must_be_graded_by": 1}}
workflow_info = xblock.get_workflow_info() workflow_info = xblock.get_workflow_info()
self.assertEqual(workflow_info["status"], u'self') self.assertEqual(workflow_info["status"], u'self')
...@@ -127,7 +127,7 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -127,7 +127,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
def test_peer_assess_without_leasing_submission(self, xblock): def test_peer_assess_without_leasing_submission(self, xblock):
# Create a submission # Create a submission
student_item = xblock.get_student_item_dict() student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, (u"Bob's answer 1", u"Bob's answer 2")) xblock.create_submission(student_item, (u"Bob's answer 1", u"Bob's answer 2"))
# Attempt to assess a peer without first leasing their submission # Attempt to assess a peer without first leasing their submission
# (usually occurs by rendering the peer assessment step) # (usually occurs by rendering the peer assessment step)
...@@ -525,13 +525,13 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase): ...@@ -525,13 +525,13 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase):
# Continued grading should still be available, # Continued grading should still be available,
# but since there are no other submissions, we're in the waiting state. # but since there are no other submissions, we're in the waiting state.
expected_context = { expected_context = {
'graded': 0, 'graded': 0,
'must_grade': 5, 'must_grade': 5,
'peer_due': dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc), 'peer_due': dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc),
'review_num': 1, 'review_num': 1,
'rubric_criteria': xblock.rubric_criteria, 'rubric_criteria': xblock.rubric_criteria,
'submit_button_text': 'Submit your assessment & review another response', 'submit_button_text': 'Submit your assessment & review another response',
'allow_latex': False, 'allow_latex': False,
} }
self._assert_path_and_context( self._assert_path_and_context(
xblock, 'openassessmentblock/peer/oa_peer_turbo_mode_waiting.html', xblock, 'openassessmentblock/peer/oa_peer_turbo_mode_waiting.html',
...@@ -552,16 +552,16 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase): ...@@ -552,16 +552,16 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase):
) )
expected_context = { expected_context = {
'graded': 0, 'graded': 0,
'must_grade': 5, 'must_grade': 5,
'peer_due': dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc), 'peer_due': dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc),
'peer_submission': create_submission_dict(submission, xblock.prompts), 'peer_submission': create_submission_dict(submission, xblock.prompts),
'file_upload_type': None, 'file_upload_type': None,
'peer_file_url': '', 'peer_file_url': '',
'review_num': 1, 'review_num': 1,
'rubric_criteria': xblock.rubric_criteria, 'rubric_criteria': xblock.rubric_criteria,
'submit_button_text': 'Submit your assessment & review another response', 'submit_button_text': 'Submit your assessment & review another response',
'allow_latex': False, 'allow_latex': False,
} }
self._assert_path_and_context( self._assert_path_and_context(
xblock, 'openassessmentblock/peer/oa_peer_turbo_mode.html', xblock, 'openassessmentblock/peer/oa_peer_turbo_mode.html',
...@@ -667,7 +667,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase): ...@@ -667,7 +667,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
@scenario('data/peer_assessment_scenario.xml', user_id='Bob') @scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_assess_handler(self, xblock): def test_peer_assess_handler(self, xblock):
# Submit a peer assessment # Submit a peer assessment
submission_uuid, assessment = self._submit_peer_assessment(xblock, 'Sally', 'Bob', self.ASSESSMENT) submission_uuid, assessment = self._submit_peer_assessment(xblock, u'Sally', u'Bob', self.ASSESSMENT)
# Check that the stored assessment matches what we expect # Check that the stored assessment matches what we expect
self.assertEqual(assessment['submission_uuid'], submission_uuid) self.assertEqual(assessment['submission_uuid'], submission_uuid)
...@@ -688,7 +688,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase): ...@@ -688,7 +688,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
@scenario('data/feedback_per_criterion.xml', user_id='Bob') @scenario('data/feedback_per_criterion.xml', user_id='Bob')
def test_peer_assess_feedback(self, xblock): def test_peer_assess_feedback(self, xblock):
# Submit a peer assessment # Submit a peer assessment
_, assessment = self._submit_peer_assessment(xblock, 'Sally', 'Bob', self.ASSESSMENT) _, assessment = self._submit_peer_assessment(xblock, u'Sally', u'Bob', self.ASSESSMENT)
# Retrieve the assessment and check the feedback # Retrieve the assessment and check the feedback
self.assertEqual(assessment['feedback'], self.ASSESSMENT['overall_feedback']) self.assertEqual(assessment['feedback'], self.ASSESSMENT['overall_feedback'])
...@@ -702,7 +702,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase): ...@@ -702,7 +702,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
def test_peer_assess_send_unsolicited_criterion_feedback(self, xblock): def test_peer_assess_send_unsolicited_criterion_feedback(self, xblock):
# Submit an assessment containing per-criterion feedback, # Submit an assessment containing per-criterion feedback,
# even though the rubric in this scenario has per-criterion feedback disabled. # even though the rubric in this scenario has per-criterion feedback disabled.
_, assessment = self._submit_peer_assessment(xblock, 'Sally', 'Bob', self.ASSESSMENT) _, assessment = self._submit_peer_assessment(xblock, u'Sally', u'Bob', self.ASSESSMENT)
# Expect that per-criterion feedback were ignored # Expect that per-criterion feedback were ignored
for part in assessment['parts']: for part in assessment['parts']:
...@@ -716,7 +716,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase): ...@@ -716,7 +716,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
'criterion_feedback': {u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞': u'Ṫḧïṡ ïṡ ṡöṁë ḟëëḋḅäċḳ'}, 'criterion_feedback': {u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞': u'Ṫḧïṡ ïṡ ṡöṁë ḟëëḋḅäċḳ'},
'overall_feedback': u'' 'overall_feedback': u''
} }
_, assessment = self._submit_peer_assessment(xblock, 'Sally', 'Bob', assessment_dict) _, assessment = self._submit_peer_assessment(xblock, u'Sally', u'Bob', assessment_dict)
# Check the assessment for the criterion that has options # Check the assessment for the criterion that has options
self.assertEqual(assessment['parts'][0]['criterion']['name'], 'vocabulary') self.assertEqual(assessment['parts'][0]['criterion']['name'], 'vocabulary')
...@@ -733,8 +733,8 @@ class TestPeerAssessHandler(XBlockHandlerTestCase): ...@@ -733,8 +733,8 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
# Submit a peer assessment # Submit a peer assessment
assessment = self._submit_peer_assessment( assessment = self._submit_peer_assessment(
xblock, xblock,
'Sally', u'Sally',
'Bob', u'Bob',
self.ASSESSMENT_WITH_INVALID_SUBMISSION_UUID, self.ASSESSMENT_WITH_INVALID_SUBMISSION_UUID,
expect_failure=True, expect_failure=True,
) )
...@@ -746,7 +746,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase): ...@@ -746,7 +746,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
# Submit an assessment, but mutate the options selected so they do NOT match the rubric # Submit an assessment, but mutate the options selected so they do NOT match the rubric
# Expect a failure response # Expect a failure response
self._submit_peer_assessment( self._submit_peer_assessment(
xblock, 'Sally', 'Bob', self.ASSESSMENT_WITH_INVALID_OPTION, xblock, u'Sally', u'Bob', self.ASSESSMENT_WITH_INVALID_OPTION,
expect_failure=True expect_failure=True
) )
...@@ -754,19 +754,19 @@ class TestPeerAssessHandler(XBlockHandlerTestCase): ...@@ -754,19 +754,19 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
@scenario('data/peer_assessment_scenario.xml', user_id='Bob') @scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_api_request_error(self, xblock, mock_api): def test_peer_api_request_error(self, xblock, mock_api):
mock_api.create_assessment.side_effect = peer_api.PeerAssessmentRequestError mock_api.create_assessment.side_effect = peer_api.PeerAssessmentRequestError
self._submit_peer_assessment(xblock, "Sally", "Bob", self.ASSESSMENT, expect_failure=True) self._submit_peer_assessment(xblock, u"Sally", u"Bob", self.ASSESSMENT, expect_failure=True)
@mock.patch('openassessment.xblock.peer_assessment_mixin.peer_api') @mock.patch('openassessment.xblock.peer_assessment_mixin.peer_api')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob') @scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_api_internal_error(self, xblock, mock_api): def test_peer_api_internal_error(self, xblock, mock_api):
mock_api.create_assessment.side_effect = peer_api.PeerAssessmentInternalError mock_api.create_assessment.side_effect = peer_api.PeerAssessmentInternalError
self._submit_peer_assessment(xblock, "Sally", "Bob", self.ASSESSMENT, expect_failure=True) self._submit_peer_assessment(xblock, u"Sally", u"Bob", self.ASSESSMENT, expect_failure=True)
@mock.patch('openassessment.xblock.workflow_mixin.workflow_api.update_from_assessments') @mock.patch('openassessment.xblock.workflow_mixin.workflow_api.update_from_assessments')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob') @scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_api_workflow_error(self, xblock, mock_call): def test_peer_api_workflow_error(self, xblock, mock_call):
mock_call.side_effect = workflow_api.AssessmentWorkflowInternalError mock_call.side_effect = workflow_api.AssessmentWorkflowInternalError
self._submit_peer_assessment(xblock, "Sally", "Bob", self.ASSESSMENT, expect_failure=True) self._submit_peer_assessment(xblock, u"Sally", u"Bob", self.ASSESSMENT, expect_failure=True)
def _submit_peer_assessment(self, xblock, student_id, scorer_id, assessment, expect_failure=False): def _submit_peer_assessment(self, xblock, student_id, scorer_id, assessment, expect_failure=False):
""" """
......
...@@ -11,6 +11,7 @@ from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DIS ...@@ -11,6 +11,7 @@ from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DIS
STUB_I18N = lambda x: x STUB_I18N = lambda x: x
@ddt.ddt @ddt.ddt
class ResolveDatesTest(TestCase): class ResolveDatesTest(TestCase):
......
...@@ -22,7 +22,7 @@ class SaveResponseTest(XBlockHandlerTestCase): ...@@ -22,7 +22,7 @@ class SaveResponseTest(XBlockHandlerTestCase):
def test_save_response(self, xblock, data): def test_save_response(self, xblock, data):
# Save the response # Save the response
submission = [" ".join(data[0]), " ".join(data[1])] submission = [" ".join(data[0]), " ".join(data[1])]
payload = json.dumps({'submission': submission }) payload = json.dumps({'submission': submission})
resp = self.request(xblock, 'save_submission', payload, response_format="json") resp = self.request(xblock, 'save_submission', payload, response_format="json")
self.assertTrue(resp['success']) self.assertTrue(resp['success'])
self.assertEqual(resp['msg'], u'') self.assertEqual(resp['msg'], u'')
...@@ -44,7 +44,7 @@ class SaveResponseTest(XBlockHandlerTestCase): ...@@ -44,7 +44,7 @@ class SaveResponseTest(XBlockHandlerTestCase):
# Save another response # Save another response
submission = [u"ГЂіи lіиэ ъэтшээи", u"Ђэаvэи аиↁ Ђэѓэ."] submission = [u"ГЂіи lіиэ ъэтшээи", u"Ђэаvэи аиↁ Ђэѓэ."]
payload = json.dumps({'submission': submission }) payload = json.dumps({'submission': submission})
resp = self.request(xblock, 'save_submission', payload, response_format="json") resp = self.request(xblock, 'save_submission', payload, response_format="json")
self.assertTrue(resp['success']) self.assertTrue(resp['success'])
......
...@@ -79,7 +79,7 @@ class TestSelfAssessment(XBlockHandlerTestCase): ...@@ -79,7 +79,7 @@ class TestSelfAssessment(XBlockHandlerTestCase):
# Verify that the workflow is updated when we submit a self-assessment # Verify that the workflow is updated when we submit a self-assessment
self.assertTrue(resp['success']) self.assertTrue(resp['success'])
expected_reqs = { expected_reqs = {
"peer": { "must_grade": 5, "must_be_graded_by": 3 } "peer": {"must_grade": 5, "must_be_graded_by": 3}
} }
mock_api.update_from_assessments.assert_called_once_with(submission['uuid'], expected_reqs) mock_api.update_from_assessments.assert_called_once_with(submission['uuid'], expected_reqs)
......
...@@ -63,7 +63,9 @@ class TestStaffAssessmentRender(StaffAssessmentTestBase): ...@@ -63,7 +63,9 @@ class TestStaffAssessmentRender(StaffAssessmentTestBase):
'status_value': 'Complete', 'status_value': 'Complete',
'icon_class': 'fa-check', 'icon_class': 'fa-check',
'message_title': 'You Must Complete the Steps Above to View Your Grade', 'message_title': 'You Must Complete the Steps Above to View Your Grade',
'message_content': 'Although a course staff member has assessed your response, you will receive your grade only after you have completed all the required steps of this problem.' 'message_content': 'Although a course staff member has assessed your response, '
'you will receive your grade only after you have completed all '
'the required steps of this problem.'
} }
) )
...@@ -107,7 +109,8 @@ class TestStaffAssessmentRender(StaffAssessmentTestBase): ...@@ -107,7 +109,8 @@ class TestStaffAssessmentRender(StaffAssessmentTestBase):
{ {
'status_value': 'Not Available', 'status_value': 'Not Available',
'message_title': 'Waiting for a Staff Grade', 'message_title': 'Waiting for a Staff Grade',
'message_content': 'Check back later to see if a course staff member has assessed your response. You will receive your grade after the assessment is complete.', 'message_content': 'Check back later to see if a course staff member has assessed your response. '
'You will receive your grade after the assessment is complete.',
} }
) )
...@@ -160,7 +163,9 @@ class TestStaffAssessment(StaffAssessmentTestBase): ...@@ -160,7 +163,9 @@ class TestStaffAssessment(StaffAssessmentTestBase):
self.assertEqual(assessment['points_earned'], score['points_earned']) self.assertEqual(assessment['points_earned'], score['points_earned'])
self.assertEqual(assessment['points_possible'], score['points_possible']) self.assertEqual(assessment['points_possible'], score['points_possible'])
self.assert_assessment_event_published(xblock, 'openassessmentblock.staff_assess', assessment, type='full-grade') self.assert_assessment_event_published(
xblock, 'openassessmentblock.staff_assess', assessment, type='full-grade'
)
@scenario('data/self_assessment_scenario.xml', user_id='Bob') @scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_staff_assess_handler_regrade(self, xblock): def test_staff_assess_handler_regrade(self, xblock):
......
...@@ -51,7 +51,8 @@ class NullUserService(object): ...@@ -51,7 +51,8 @@ class NullUserService(object):
""" """
A simple implementation of the runtime "user" service. A simple implementation of the runtime "user" service.
""" """
def get_anonymous_user_id(self, username, course_id): @staticmethod
def get_anonymous_user_id(username, _):
return username return username
...@@ -739,7 +740,6 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -739,7 +740,6 @@ class TestCourseStaff(XBlockHandlerTestCase):
'submission_returned_uuid': submission['uuid'] 'submission_returned_uuid': submission['uuid']
}) })
def _verify_staff_assessment_context(self, context, required, ungraded=None, in_progress=None): def _verify_staff_assessment_context(self, context, required, ungraded=None, in_progress=None):
self.assertEquals(required, context['staff_assessment_required']) self.assertEquals(required, context['staff_assessment_required'])
if not required: if not required:
...@@ -749,8 +749,8 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -749,8 +749,8 @@ class TestCourseStaff(XBlockHandlerTestCase):
self.assertEqual(ungraded, context['staff_assessment_ungraded']) self.assertEqual(ungraded, context['staff_assessment_ungraded'])
self.assertEqual(in_progress, context['staff_assessment_in_progress']) self.assertEqual(in_progress, context['staff_assessment_in_progress'])
@staticmethod
def _create_mock_runtime( def _create_mock_runtime(
self,
item_id, item_id,
is_staff, is_staff,
is_admin, is_admin,
...@@ -772,7 +772,8 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -772,7 +772,8 @@ class TestCourseStaff(XBlockHandlerTestCase):
) )
return mock_runtime return mock_runtime
def _create_submission(self, item, values, types): @staticmethod
def _create_submission(item, values, types):
""" Create a submission and corresponding workflow. """ """ Create a submission and corresponding workflow. """
submission = sub_api.create_submission(item, values) submission = sub_api.create_submission(item, values)
......
...@@ -157,14 +157,13 @@ class StudentTrainingAssessTest(StudentTrainingTest): ...@@ -157,14 +157,13 @@ class StudentTrainingAssessTest(StudentTrainingTest):
expected_context["training_num_current"] = 2 expected_context["training_num_current"] = 2
expected_context["training_essay"] = { expected_context["training_essay"] = {
'answer': { 'answer': {
'parts': [ 'parts': [{
{ 'text': u"тєѕт αηѕωєя",
'text': u"тєѕт αηѕωєя", 'prompt': {
'prompt': { 'description':
'description': u'Given the state of the world today, what do you think should be done to combat poverty?' u'Given the state of the world today, what do you think should be done to combat poverty?'
}
} }
] }]
} }
} }
......
...@@ -96,11 +96,11 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -96,11 +96,11 @@ class StudioViewTest(XBlockHandlerTestCase):
] ]
EXAMPLE_BASED_ASSESSMENT_EXAMPLES = '<examples>' + \ EXAMPLE_BASED_ASSESSMENT_EXAMPLES = '<examples>' + \
'<example>' + \ '<example>' + \
'<answer> TEST ANSWER </answer>' + \ '<answer> TEST ANSWER </answer>' + \
'<select criterion="Test criterion" option="Test option" />' + \ '<select criterion="Test criterion" option="Test option" />' + \
'</example>' + \ '</example>' + \
'</examples>' '</examples>'
ASSESSMENT_CSS_IDS = { ASSESSMENT_CSS_IDS = {
"example-based-assessment": "oa_ai_assessment_editor", "example-based-assessment": "oa_ai_assessment_editor",
......
...@@ -119,7 +119,8 @@ class SubmissionTest(XBlockHandlerTestCase): ...@@ -119,7 +119,8 @@ class SubmissionTest(XBlockHandlerTestCase):
"filename": "test.jpg"}), response_format='json') "filename": "test.jpg"}), response_format='json')
self.assertTrue(resp['success']) self.assertTrue(resp['success'])
self.assertTrue(resp['url'].startswith( self.assertTrue(resp['url'].startswith(
'https://mybucket.s3.amazonaws.com/submissions_attachments/test_student/test_course/' + xblock.scope_ids.usage_id 'https://mybucket.s3.amazonaws.com/submissions_attachments/test_student/test_course/' +
xblock.scope_ids.usage_id
)) ))
@mock_s3 @mock_s3
...@@ -339,7 +340,7 @@ class SubmissionRenderTest(XBlockHandlerTestCase): ...@@ -339,7 +340,7 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
@patch.object(OpenAssessmentBlock, 'get_user_submission') @patch.object(OpenAssessmentBlock, 'get_user_submission')
@scenario('data/submission_open.xml', user_id="Bob") @scenario('data/submission_open.xml', user_id="Bob")
def test_open_submitted_old_format(self, xblock, mock_get_user_submission): def test_open_submitted_old_format(self, xblock, mock_get_user_submission):
submission = xblock.create_submission( xblock.create_submission(
xblock.get_student_item_dict(), xblock.get_student_item_dict(),
('A man must have a code', 'A man must have an umbrella too.') ('A man must have a code', 'A man must have an umbrella too.')
) )
......
...@@ -17,18 +17,23 @@ from openassessment.xblock.validation import ( ...@@ -17,18 +17,23 @@ from openassessment.xblock.validation import (
STUB_I18N = lambda x: x STUB_I18N = lambda x: x
@ddt.ddt @ddt.ddt
class AssessmentValidationTest(TestCase): class AssessmentValidationTest(TestCase):
@ddt.file_data('data/valid_assessments.json') @ddt.file_data('data/valid_assessments.json')
def test_valid_assessment(self, data): def test_valid_assessment(self, data):
success, msg = validate_assessments(data["assessments"], data["current_assessments"], data["is_released"], STUB_I18N) success, msg = validate_assessments(
data["assessments"], data["current_assessments"], data["is_released"], STUB_I18N
)
self.assertTrue(success) self.assertTrue(success)
self.assertEqual(msg, u'') self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_assessments.json') @ddt.file_data('data/invalid_assessments.json')
def test_invalid_assessment(self, data): def test_invalid_assessment(self, data):
success, msg = validate_assessments(data["assessments"], data["current_assessments"], data["is_released"], STUB_I18N) success, msg = validate_assessments(
data["assessments"], data["current_assessments"], data["is_released"], STUB_I18N
)
self.assertFalse(success) self.assertFalse(success)
self.assertGreater(len(msg), 0) self.assertGreater(len(msg), 0)
...@@ -383,7 +388,7 @@ class ValidationSubmissionTest(TestCase): ...@@ -383,7 +388,7 @@ class ValidationSubmissionTest(TestCase):
self.assertTrue(success) self.assertTrue(success)
success, msg = validate_submission( success, msg = validate_submission(
[u"Response 1.", u"Response 2" ], self.PROMPT, STUB_I18N [u"Response 1.", u"Response 2"], self.PROMPT, STUB_I18N
) )
self.assertTrue(success) self.assertTrue(success)
......
...@@ -243,7 +243,9 @@ class TestSerializeContent(TestCase): ...@@ -243,7 +243,9 @@ class TestSerializeContent(TestCase):
try: try:
etree.fromstring(xml) etree.fromstring(xml)
except Exception as ex: # pylint:disable=W0703 except Exception as ex: # pylint:disable=W0703
msg = "Could not parse mutated assessment dict {assessment}\n{ex}".format(assessment=mutated_dict, ex=ex) msg = "Could not parse mutated assessment dict {assessment}\n{ex}".format(
assessment=mutated_dict, ex=ex
)
self.fail(msg) self.fail(msg)
@ddt.data("title", "prompt", "start", "due", "submission_due", "submission_start", "leaderboard_show") @ddt.data("title", "prompt", "start", "due", "submission_due", "submission_start", "leaderboard_show")
...@@ -316,7 +318,7 @@ class TestSerializeContent(TestCase): ...@@ -316,7 +318,7 @@ class TestSerializeContent(TestCase):
# Mutation #1: Remove the key # Mutation #1: Remove the key
print "== Removing key {}".format(key) print "== Removing key {}".format(key)
yield {k:v for k, v in input_dict.iteritems() if k != key} yield {k: v for k, v in input_dict.iteritems() if k != key}
if isinstance(val, dict): if isinstance(val, dict):
...@@ -399,7 +401,8 @@ class TestSerializeContent(TestCase): ...@@ -399,7 +401,8 @@ class TestSerializeContent(TestCase):
print "== int value {}".format(key) print "== int value {}".format(key)
yield self._mutate_dict(input_dict, key, 0) yield self._mutate_dict(input_dict, key, 0)
def _mutate_dict(self, input_dict, key, new_val): @staticmethod
def _mutate_dict(input_dict, key, new_val):
""" """
Copy and update a dictionary. Copy and update a dictionary.
...@@ -415,7 +418,8 @@ class TestSerializeContent(TestCase): ...@@ -415,7 +418,8 @@ class TestSerializeContent(TestCase):
mutated[key] = new_val mutated[key] = new_val
return mutated return mutated
def _mutate_list(self, input_list, index, new_val): @staticmethod
def _mutate_list(input_list, index, new_val):
""" """
Copy and update a list. Copy and update a list.
...@@ -464,6 +468,7 @@ class TestParseExamplesFromXml(TestCase): ...@@ -464,6 +468,7 @@ class TestParseExamplesFromXml(TestCase):
examples = parse_examples_xml(xml) examples = parse_examples_xml(xml)
self.assertEqual(examples, data['examples']) self.assertEqual(examples, data['examples'])
@ddt.ddt @ddt.ddt
class TestParseAssessmentsFromXml(TestCase): class TestParseAssessmentsFromXml(TestCase):
......
...@@ -288,6 +288,7 @@ def _parse_prompts_xml(root): ...@@ -288,6 +288,7 @@ def _parse_prompts_xml(root):
return prompts_list return prompts_list
def _parse_options_xml(options_root): def _parse_options_xml(options_root):
""" """
Parse <options> element in the OpenAssessment XBlock's content XML. Parse <options> element in the OpenAssessment XBlock's content XML.
...@@ -402,7 +403,9 @@ def _parse_criteria_xml(criteria_root): ...@@ -402,7 +403,9 @@ def _parse_criteria_xml(criteria_root):
if criterion_feedback in ['optional', 'disabled', 'required']: if criterion_feedback in ['optional', 'disabled', 'required']:
criterion_dict['feedback'] = criterion_feedback criterion_dict['feedback'] = criterion_feedback
else: else:
raise UpdateFromXmlError('Invalid value for "feedback" attribute: if specified, it must be set set to "optional" or "required".') raise UpdateFromXmlError(
'Invalid value for "feedback" attribute: if specified, it must be set set to "optional" or "required".'
)
# Criterion options # Criterion options
criterion_dict['options'] = _parse_options_xml(criterion) criterion_dict['options'] = _parse_options_xml(criterion)
...@@ -894,6 +897,7 @@ def parse_from_xml(root): ...@@ -894,6 +897,7 @@ def parse_from_xml(root):
'leaderboard_show': leaderboard_show 'leaderboard_show': leaderboard_show
} }
def parse_from_xml_str(xml): def parse_from_xml_str(xml):
""" """
Create a dictionary for the OpenAssessment XBlock's content from an XML Create a dictionary for the OpenAssessment XBlock's content from an XML
......
#!/usr/bin/env bash #!/usr/bin/env bash
MAX_PEP8_VIOLATIONS=230 MAX_PEP8_VIOLATIONS=113
mkdir -p test/logs mkdir -p test/logs
PEP8_VIOLATIONS=test/logs/pep8.txt PEP8_VIOLATIONS=test/logs/pep8.txt
...@@ -10,7 +10,7 @@ pep8 --config=.pep8 openassessment > $PEP8_VIOLATIONS ...@@ -10,7 +10,7 @@ pep8 --config=.pep8 openassessment > $PEP8_VIOLATIONS
NUM_PEP8_VIOLATIONS=$(cat $PEP8_VIOLATIONS | wc -l) NUM_PEP8_VIOLATIONS=$(cat $PEP8_VIOLATIONS | wc -l)
echo "Found" $NUM_PEP8_VIOLATIONS "pep8 violations, threshold is" $MAX_PEP8_VIOLATIONS echo "Found" $NUM_PEP8_VIOLATIONS "pep8 violations, threshold is" $MAX_PEP8_VIOLATIONS
if [[ $NUM_PEP8_VIOLATIONS > $MAX_PEP8_VIOLATIONS ]]; then if [[ $NUM_PEP8_VIOLATIONS -gt $MAX_PEP8_VIOLATIONS ]]; then
cat $PEP8_VIOLATIONS cat $PEP8_VIOLATIONS
echo "NUMBER OF PEP8 VIOLATIONS ("$NUM_PEP8_VIOLATIONS") EXCEEDED THRESHOLD" $MAX_PEP8_VIOLATIONS echo "NUMBER OF PEP8 VIOLATIONS ("$NUM_PEP8_VIOLATIONS") EXCEEDED THRESHOLD" $MAX_PEP8_VIOLATIONS
exit 1 exit 1
......
#!/usr/bin/env bash #!/usr/bin/env bash
MAX_PYLINT_VIOLATIONS=609 MAX_PYLINT_VIOLATIONS=522
mkdir -p test/logs mkdir -p test/logs
PYLINT_VIOLATIONS=test/logs/pylint.txt PYLINT_VIOLATIONS=test/logs/pylint.txt
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment