Commit f41dd245 by cahrens

pep8 cleanup

parent ed2853f6
......@@ -2,7 +2,6 @@
Grade step in the OpenAssessment XBlock.
"""
import copy
from collections import defaultdict
from lazy import lazy
from django.utils.translation import ugettext as _
......@@ -309,7 +308,7 @@ class GradeMixin(object):
'additional_feedback': self._additional_feedback(
staff_assessment=staff_assessment,
peer_assessments=peer_assessments,
self_assessment= self_assessment,
self_assessment=self_assessment,
),
}
......@@ -398,6 +397,7 @@ class GradeMixin(object):
"""
median_scores = peer_api.get_assessment_median_scores(submission_uuid)
median_score = median_scores.get(criterion['name'], None)
def median_options():
"""
Returns a list of options that should be shown to represent the median.
......@@ -458,7 +458,6 @@ class GradeMixin(object):
'explanation': None,
}
def _additional_feedback(self, staff_assessment, peer_assessments, self_assessment):
"""
Returns an array of additional feedback for the specified assessments.
......
......@@ -86,10 +86,9 @@ class LeaderboardMixin(object):
score.pop('content', None)
context = { 'topscores': scores,
'allow_latex': self.allow_latex,
}
return ('openassessmentblock/leaderboard/oa_leaderboard_show.html', context)
context = {'topscores': scores, 'allow_latex': self.allow_latex,}
return 'openassessmentblock/leaderboard/oa_leaderboard_show.html', context
def render_leaderboard_incomplete(self):
"""
......@@ -98,4 +97,4 @@ class LeaderboardMixin(object):
Returns:
template_path (string), tuple of context (dict)
"""
return ('openassessmentblock/leaderboard/oa_leaderboard_waiting.html', {})
return 'openassessmentblock/leaderboard/oa_leaderboard_waiting.html', {}
......@@ -79,7 +79,8 @@ class MessageMixin(object):
"{}_approaching".format(status): step_info.get('approaching', False),
"{}_not_released".format(status): (step_info.get("reason") == "start"),
#Uses a static field in the XBlock to determine if the PeerAssessment Block was able to pick up an assessment.
# Uses a static field in the XBlock to determine if the PeerAssessment Block
# was able to pick up an assessment.
"peer_not_available": self.no_peers,
}
......@@ -120,7 +121,6 @@ class MessageMixin(object):
return 'openassessmentblock/message/oa_message_closed.html', context
def render_message_open(self, deadline_info):
"""
Renders the "Open" message state
......
......@@ -21,7 +21,7 @@ from xblock.fragment import Fragment
from openassessment.xblock.grade_mixin import GradeMixin
from openassessment.xblock.leaderboard_mixin import LeaderboardMixin
from openassessment.xblock.defaults import * # pylint: disable=wildcard-import, unused-wildcard-import
from openassessment.xblock.defaults import * # pylint: disable=wildcard-import, unused-wildcard-import
from openassessment.xblock.message_mixin import MessageMixin
from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin
from openassessment.xblock.lms_mixin import LmsCompatibilityMixin
......@@ -73,7 +73,7 @@ UI_MODELS = {
"class_id": "openassessment__grade",
"title": "Your Grade:"
},
"leaderboard": {
"leaderboard": {
"name": "leaderboard",
"class_id": "openassessment__leaderboard",
"title": "Leaderboard"
......@@ -94,6 +94,7 @@ def load(path):
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
@XBlock.needs("i18n")
@XBlock.needs("user")
class OpenAssessmentBlock(
......@@ -729,11 +730,14 @@ class OpenAssessmentBlock(
Examples:
>>> is_closed()
False, None, datetime.datetime(2014, 3, 27, 22, 7, 38, 788861), datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
False, None, datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
>>> is_closed(step="submission")
True, "due", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861), datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
True, "due", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
>>> is_closed(step="self-assessment")
True, "start", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861), datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
True, "start", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
"""
submission_range = (self.submission_start, self.submission_due)
......
......@@ -121,8 +121,10 @@ def resolve_dates(start, end, date_ranges, _):
Args:
start (str, ISO date format, or datetime): When the problem opens. A value of None indicates that the problem is always open.
end (str, ISO date format, or datetime): When the problem closes. A value of None indicates that the problem never closes.
start (str, ISO date format, or datetime): When the problem opens.
A value of None indicates that the problem is always open.
end (str, ISO date format, or datetime): When the problem closes.
A value of None indicates that the problem never closes.
date_ranges (list of tuples): list of (start, end) ISO date string tuples indicating
the start/end timestamps (date string or datetime) of each submission/assessment.
_ (function): An i18n service function to use for retrieving the
......
......@@ -17,7 +17,9 @@ from xblock.fragment import Fragment
from openassessment.xblock.defaults import DEFAULT_EDITOR_ASSESSMENTS_ORDER, DEFAULT_RUBRIC_FEEDBACK_TEXT
from openassessment.xblock.validation import validator
from openassessment.xblock.data_conversion import create_rubric_dict, make_django_template_key, update_assessments_format
from openassessment.xblock.data_conversion import (
create_rubric_dict, make_django_template_key, update_assessments_format
)
from openassessment.xblock.schema import EDITOR_UPDATE_SCHEMA
from openassessment.xblock.resolve_dates import resolve_dates
from openassessment.xblock.xml import serialize_examples_to_xml_str, parse_examples_from_xml_str
......@@ -218,7 +220,6 @@ class StudioMixin(object):
for example in assessment['examples']:
example['answer'] = {'parts': [{'text': text} for text in example['answer']]}
xblock_validator = validator(self, self._)
success, msg = xblock_validator(
create_rubric_dict(data['prompts'], data['criteria']),
......
......@@ -435,8 +435,10 @@ class SubmissionMixin(object):
student_submission = self.get_user_submission(
workflow["submission_uuid"]
)
context["peer_incomplete"] = "peer" in workflow["status_details"] and not workflow["status_details"]["peer"]["complete"]
context["self_incomplete"] = "self" in workflow["status_details"] and not workflow["status_details"]["self"]["complete"]
peer_in_workflow = "peer" in workflow["status_details"]
self_in_workflow = "self" in workflow["status_details"]
context["peer_incomplete"] = peer_in_workflow and not workflow["status_details"]["peer"]["complete"]
context["self_incomplete"] = self_in_workflow and not workflow["status_details"]["self"]["complete"]
context["student_submission"] = create_submission_dict(student_submission, self.prompts)
path = 'openassessmentblock/response/oa_response_submitted.html'
......
......@@ -132,7 +132,7 @@ class XBlockHandlerTestCaseMixin(object):
super(XBlockHandlerTestCaseMixin, self).setUp()
self.runtime = WorkbenchRuntime()
mock_publish = mock.MagicMock(side_effect=self.runtime.publish)
self.runtime.publish=mock_publish
self.runtime.publish = mock_publish
def set_user(self, user_id):
"""
......@@ -379,7 +379,8 @@ class SubmitAssessmentsMixin(object):
return submission
def set_staff_access(self, xblock):
@staticmethod
def set_staff_access(xblock):
xblock.xmodule_runtime = mock.Mock(user_is_staff=True)
xblock.xmodule_runtime.anonymous_student_id = 'Bob'
......
......@@ -12,6 +12,7 @@ from openassessment.xblock.data_conversion import (
create_prompts_list, create_submission_dict, prepare_submission_for_serialization, update_assessments_format
)
@ddt.ddt
class DataConversionTest(TestCase):
......@@ -38,7 +39,8 @@ class DataConversionTest(TestCase):
(
{'answer': {'parts': [{'text': 'a'}]}},
[{'description': '1'}, {'description': '2'}],
{'answer': {'parts': [{'prompt': {'description': '1'}, 'text': 'a'}, {'prompt': {'description': '2'}, 'text': ''}]}}
{'answer': {'parts': [{'prompt': {'description': '1'}, 'text': 'a'},
{'prompt': {'description': '2'}, 'text': ''}]}}
)
)
@ddt.unpack
......@@ -60,6 +62,6 @@ class DataConversionTest(TestCase):
def test_update_assessments_format(self, input, output):
self.assertEqual(update_assessments_format([{
'examples': input,
}]), [{
}]), [{
'examples': output,
}])
......@@ -279,7 +279,7 @@ class TestGrade(XBlockHandlerTestCase, SubmitAssessmentsMixin):
@scenario('data/grade_scenario.xml', user_id='Bob')
def test_assessment_does_not_match_rubric(self, xblock):
# Get to the grade complete section
# Get to the grade complete section
self.create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, PEER_ASSESSMENTS, SELF_ASSESSMENT
)
......
......@@ -46,8 +46,9 @@ class TestMessageRender(XBlockHandlerTestCase):
},
}
@staticmethod
def _assert_path_and_context(
self, xblock, expected_path, expected_context,
xblock, expected_path, expected_context,
workflow_status, deadline_information, has_peers_to_grade,
workflow_status_details=DEFAULT_STATUS_DETAILS
):
......@@ -102,8 +103,7 @@ class TestMessageRender(XBlockHandlerTestCase):
# Asserts that the message_mixin correctly derived the path and context to be rendered
xblock.render_assessment.assert_called_with(expected_path, expected_context)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_submission(self, xblock):
status = None
......@@ -128,7 +128,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario_no_peer.xml', user_id = "Linda")
@scenario('data/message_scenario_no_peer.xml', user_id="Linda")
def test_submission_no_peer(self, xblock):
status = None
......@@ -152,7 +152,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_submission_approaching(self, xblock):
status = None
......@@ -177,7 +177,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario_no_self.xml', user_id = "Linda")
@scenario('data/message_scenario_no_self.xml', user_id="Linda")
def test_submission_no_self_approaching(self, xblock):
status = None
......@@ -201,7 +201,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_submission_not_yet_open(self, xblock):
status = None
......@@ -226,7 +226,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_submission_incomplete(self, xblock):
status = None
......@@ -251,7 +251,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario_student_training.xml', user_id = "Linda")
@scenario('data/message_scenario_student_training.xml', user_id="Linda")
def test_training(self, xblock):
status = 'training'
......@@ -280,7 +280,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario_student_training.xml', user_id = "Linda")
@scenario('data/message_scenario_student_training.xml', user_id="Linda")
def test_training_approaching(self, xblock):
status = 'training'
......@@ -309,7 +309,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario_student_training.xml', user_id = "Linda")
@scenario('data/message_scenario_student_training.xml', user_id="Linda")
def test_training_not_released(self, xblock):
status = 'training'
......@@ -335,7 +335,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario_student_training.xml', user_id = "Linda")
@scenario('data/message_scenario_student_training.xml', user_id="Linda")
def test_training_closed(self, xblock):
status = 'training'
......@@ -361,7 +361,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_peer(self, xblock):
status = 'peer'
......@@ -389,7 +389,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario_no_self.xml', user_id = "Linda")
@scenario('data/message_scenario_no_self.xml', user_id="Linda")
def test_peer_no_self(self, xblock):
status = 'peer'
......@@ -416,7 +416,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario_no_self.xml', user_id = "Linda")
@scenario('data/message_scenario_no_self.xml', user_id="Linda")
def test_peer_no_self_approaching(self, xblock):
status = 'peer'
......@@ -443,7 +443,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_peer_not_released(self, xblock):
status = 'peer'
......@@ -468,7 +468,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_peer_incomplete(self, xblock):
status = 'peer'
......@@ -493,7 +493,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_peer_no_peers_to_assess(self, xblock):
status = 'peer'
......@@ -521,7 +521,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_peer_no_peers_to_assess_approaching(self, xblock):
status = 'peer'
......@@ -549,7 +549,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_peer_not_open_approaching(self, xblock):
status = 'peer'
......@@ -574,7 +574,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_self(self, xblock):
status = 'self'
......@@ -602,7 +602,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario_no_peer.xml', user_id = "Linda")
@scenario('data/message_scenario_no_peer.xml', user_id="Linda")
def test_self_no_peer(self, xblock):
status = 'self'
......@@ -629,7 +629,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario_no_peer.xml', user_id = "Linda")
@scenario('data/message_scenario_no_peer.xml', user_id="Linda")
def test_self_no_peer_approaching(self, xblock):
status = 'self'
......@@ -656,7 +656,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_self_closed(self, xblock):
status = 'self'
......@@ -681,7 +681,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario_no_peer.xml', user_id = "Linda")
@scenario('data/message_scenario_no_peer.xml', user_id="Linda")
def test_self_no_peer_incomplete(self, xblock):
status = 'self'
......@@ -705,7 +705,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_waiting_due(self, xblock):
status = 'waiting'
......@@ -732,7 +732,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade, status_details
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_waiting_not_due(self, xblock):
status = 'waiting'
......@@ -792,7 +792,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade, status_details
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_done_due(self, xblock):
status = 'done'
......@@ -817,7 +817,7 @@ class TestMessageRender(XBlockHandlerTestCase):
status, deadline_information, has_peers_to_grade
)
@scenario('data/message_scenario.xml', user_id = "Linda")
@scenario('data/message_scenario.xml', user_id="Linda")
def test_done_not_due(self, xblock):
status = 'done'
......
......@@ -77,7 +77,7 @@ class TestOpenAssessment(XBlockHandlerTestCase):
with patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api:
self.runtime.render(xblock, "student_view")
expected_reqs = {
"peer": { "must_grade": 5, "must_be_graded_by": 3 }
"peer": {"must_grade": 5, "must_be_graded_by": 3}
}
mock_api.update_from_assessments.assert_called_once_with('test_submission', expected_reqs)
......@@ -259,6 +259,7 @@ class TestOpenAssessment(XBlockHandlerTestCase):
xblock.prompts = [{'description': 'Prompt 4.'}, {'description': 'Prompt 5.'}]
self.assertEqual(xblock.prompt, '[{"description": "Prompt 4."}, {"description": "Prompt 5."}]')
class TestDates(XBlockHandlerTestCase):
@scenario('data/basic_scenario.xml')
......
......@@ -52,7 +52,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Validate Peer Rendering.
self.assertTrue("Sally".encode('utf-8') in peer_response.body or
"Hal".encode('utf-8') in peer_response.body)
"Hal".encode('utf-8') in peer_response.body)
@mock.patch('openassessment.xblock.workflow_mixin.WorkflowMixin.workflow_requirements')
@scenario('data/peer_assessment_scenario.xml', user_id='Sally')
......@@ -65,12 +65,12 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self._sally_and_hal_grade_each_other_helper(xblock)
# Verify that Sally's workflow is not marked done, as the requirements are higher than 1.
mock_requirements.return_value = {"peer": {"must_grade":2, "must_be_graded_by":2}}
mock_requirements.return_value = {"peer": {"must_grade": 2, "must_be_graded_by": 2}}
workflow_info = xblock.get_workflow_info()
self.assertEqual(workflow_info["status"], u'peer')
# Now, change the requirements and verify that Sally's workflow updates to 'self' status.
mock_requirements.return_value = {"peer": {"must_grade":1, "must_be_graded_by":1}}
mock_requirements.return_value = {"peer": {"must_grade": 1, "must_be_graded_by": 1}}
workflow_info = xblock.get_workflow_info()
self.assertEqual(workflow_info["status"], u'self')
......@@ -127,7 +127,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
def test_peer_assess_without_leasing_submission(self, xblock):
# Create a submission
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, (u"Bob's answer 1", u"Bob's answer 2"))
xblock.create_submission(student_item, (u"Bob's answer 1", u"Bob's answer 2"))
# Attempt to assess a peer without first leasing their submission
# (usually occurs by rendering the peer assessment step)
......@@ -525,13 +525,13 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase):
# Continued grading should still be available,
# but since there are no other submissions, we're in the waiting state.
expected_context = {
'graded': 0,
'must_grade': 5,
'peer_due': dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc),
'review_num': 1,
'rubric_criteria': xblock.rubric_criteria,
'submit_button_text': 'Submit your assessment & review another response',
'allow_latex': False,
'graded': 0,
'must_grade': 5,
'peer_due': dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc),
'review_num': 1,
'rubric_criteria': xblock.rubric_criteria,
'submit_button_text': 'Submit your assessment & review another response',
'allow_latex': False,
}
self._assert_path_and_context(
xblock, 'openassessmentblock/peer/oa_peer_turbo_mode_waiting.html',
......@@ -552,16 +552,16 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase):
)
expected_context = {
'graded': 0,
'must_grade': 5,
'peer_due': dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc),
'peer_submission': create_submission_dict(submission, xblock.prompts),
'file_upload_type': None,
'peer_file_url': '',
'review_num': 1,
'rubric_criteria': xblock.rubric_criteria,
'submit_button_text': 'Submit your assessment & review another response',
'allow_latex': False,
'graded': 0,
'must_grade': 5,
'peer_due': dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc),
'peer_submission': create_submission_dict(submission, xblock.prompts),
'file_upload_type': None,
'peer_file_url': '',
'review_num': 1,
'rubric_criteria': xblock.rubric_criteria,
'submit_button_text': 'Submit your assessment & review another response',
'allow_latex': False,
}
self._assert_path_and_context(
xblock, 'openassessmentblock/peer/oa_peer_turbo_mode.html',
......@@ -667,7 +667,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_assess_handler(self, xblock):
# Submit a peer assessment
submission_uuid, assessment = self._submit_peer_assessment(xblock, 'Sally', 'Bob', self.ASSESSMENT)
submission_uuid, assessment = self._submit_peer_assessment(xblock, u'Sally', u'Bob', self.ASSESSMENT)
# Check that the stored assessment matches what we expect
self.assertEqual(assessment['submission_uuid'], submission_uuid)
......@@ -688,7 +688,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
@scenario('data/feedback_per_criterion.xml', user_id='Bob')
def test_peer_assess_feedback(self, xblock):
# Submit a peer assessment
_, assessment = self._submit_peer_assessment(xblock, 'Sally', 'Bob', self.ASSESSMENT)
_, assessment = self._submit_peer_assessment(xblock, u'Sally', u'Bob', self.ASSESSMENT)
# Retrieve the assessment and check the feedback
self.assertEqual(assessment['feedback'], self.ASSESSMENT['overall_feedback'])
......@@ -702,7 +702,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
def test_peer_assess_send_unsolicited_criterion_feedback(self, xblock):
# Submit an assessment containing per-criterion feedback,
# even though the rubric in this scenario has per-criterion feedback disabled.
_, assessment = self._submit_peer_assessment(xblock, 'Sally', 'Bob', self.ASSESSMENT)
_, assessment = self._submit_peer_assessment(xblock, u'Sally', u'Bob', self.ASSESSMENT)
# Expect that per-criterion feedback were ignored
for part in assessment['parts']:
......@@ -716,7 +716,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
'criterion_feedback': {u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞': u'Ṫḧïṡ ïṡ ṡöṁë ḟëëḋḅäċḳ'},
'overall_feedback': u''
}
_, assessment = self._submit_peer_assessment(xblock, 'Sally', 'Bob', assessment_dict)
_, assessment = self._submit_peer_assessment(xblock, u'Sally', u'Bob', assessment_dict)
# Check the assessment for the criterion that has options
self.assertEqual(assessment['parts'][0]['criterion']['name'], 'vocabulary')
......@@ -733,8 +733,8 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
# Submit a peer assessment
assessment = self._submit_peer_assessment(
xblock,
'Sally',
'Bob',
u'Sally',
u'Bob',
self.ASSESSMENT_WITH_INVALID_SUBMISSION_UUID,
expect_failure=True,
)
......@@ -746,7 +746,7 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
# Submit an assessment, but mutate the options selected so they do NOT match the rubric
# Expect a failure response
self._submit_peer_assessment(
xblock, 'Sally', 'Bob', self.ASSESSMENT_WITH_INVALID_OPTION,
xblock, u'Sally', u'Bob', self.ASSESSMENT_WITH_INVALID_OPTION,
expect_failure=True
)
......@@ -754,19 +754,19 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_api_request_error(self, xblock, mock_api):
mock_api.create_assessment.side_effect = peer_api.PeerAssessmentRequestError
self._submit_peer_assessment(xblock, "Sally", "Bob", self.ASSESSMENT, expect_failure=True)
self._submit_peer_assessment(xblock, u"Sally", u"Bob", self.ASSESSMENT, expect_failure=True)
@mock.patch('openassessment.xblock.peer_assessment_mixin.peer_api')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_api_internal_error(self, xblock, mock_api):
mock_api.create_assessment.side_effect = peer_api.PeerAssessmentInternalError
self._submit_peer_assessment(xblock, "Sally", "Bob", self.ASSESSMENT, expect_failure=True)
self._submit_peer_assessment(xblock, u"Sally", u"Bob", self.ASSESSMENT, expect_failure=True)
@mock.patch('openassessment.xblock.workflow_mixin.workflow_api.update_from_assessments')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_api_workflow_error(self, xblock, mock_call):
mock_call.side_effect = workflow_api.AssessmentWorkflowInternalError
self._submit_peer_assessment(xblock, "Sally", "Bob", self.ASSESSMENT, expect_failure=True)
self._submit_peer_assessment(xblock, u"Sally", u"Bob", self.ASSESSMENT, expect_failure=True)
def _submit_peer_assessment(self, xblock, student_id, scorer_id, assessment, expect_failure=False):
"""
......
......@@ -11,6 +11,7 @@ from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DIS
STUB_I18N = lambda x: x
@ddt.ddt
class ResolveDatesTest(TestCase):
......
......@@ -22,7 +22,7 @@ class SaveResponseTest(XBlockHandlerTestCase):
def test_save_response(self, xblock, data):
# Save the response
submission = [" ".join(data[0]), " ".join(data[1])]
payload = json.dumps({'submission': submission })
payload = json.dumps({'submission': submission})
resp = self.request(xblock, 'save_submission', payload, response_format="json")
self.assertTrue(resp['success'])
self.assertEqual(resp['msg'], u'')
......@@ -44,7 +44,7 @@ class SaveResponseTest(XBlockHandlerTestCase):
# Save another response
submission = [u"ГЂіи lіиэ ъэтшээи", u"Ђэаvэи аиↁ Ђэѓэ."]
payload = json.dumps({'submission': submission })
payload = json.dumps({'submission': submission})
resp = self.request(xblock, 'save_submission', payload, response_format="json")
self.assertTrue(resp['success'])
......
......@@ -79,7 +79,7 @@ class TestSelfAssessment(XBlockHandlerTestCase):
# Verify that the workflow is updated when we submit a self-assessment
self.assertTrue(resp['success'])
expected_reqs = {
"peer": { "must_grade": 5, "must_be_graded_by": 3 }
"peer": {"must_grade": 5, "must_be_graded_by": 3}
}
mock_api.update_from_assessments.assert_called_once_with(submission['uuid'], expected_reqs)
......
......@@ -63,7 +63,9 @@ class TestStaffAssessmentRender(StaffAssessmentTestBase):
'status_value': 'Complete',
'icon_class': 'fa-check',
'message_title': 'You Must Complete the Steps Above to View Your Grade',
'message_content': 'Although a course staff member has assessed your response, you will receive your grade only after you have completed all the required steps of this problem.'
'message_content': 'Although a course staff member has assessed your response, '
'you will receive your grade only after you have completed all '
'the required steps of this problem.'
}
)
......@@ -107,7 +109,8 @@ class TestStaffAssessmentRender(StaffAssessmentTestBase):
{
'status_value': 'Not Available',
'message_title': 'Waiting for a Staff Grade',
'message_content': 'Check back later to see if a course staff member has assessed your response. You will receive your grade after the assessment is complete.',
'message_content': 'Check back later to see if a course staff member has assessed your response. '
'You will receive your grade after the assessment is complete.',
}
)
......@@ -160,7 +163,9 @@ class TestStaffAssessment(StaffAssessmentTestBase):
self.assertEqual(assessment['points_earned'], score['points_earned'])
self.assertEqual(assessment['points_possible'], score['points_possible'])
self.assert_assessment_event_published(xblock, 'openassessmentblock.staff_assess', assessment, type='full-grade')
self.assert_assessment_event_published(
xblock, 'openassessmentblock.staff_assess', assessment, type='full-grade'
)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_staff_assess_handler_regrade(self, xblock):
......
......@@ -51,7 +51,8 @@ class NullUserService(object):
"""
A simple implementation of the runtime "user" service.
"""
def get_anonymous_user_id(self, username, course_id):
@staticmethod
def get_anonymous_user_id(username, _):
return username
......@@ -739,7 +740,6 @@ class TestCourseStaff(XBlockHandlerTestCase):
'submission_returned_uuid': submission['uuid']
})
def _verify_staff_assessment_context(self, context, required, ungraded=None, in_progress=None):
self.assertEquals(required, context['staff_assessment_required'])
if not required:
......@@ -749,8 +749,8 @@ class TestCourseStaff(XBlockHandlerTestCase):
self.assertEqual(ungraded, context['staff_assessment_ungraded'])
self.assertEqual(in_progress, context['staff_assessment_in_progress'])
@staticmethod
def _create_mock_runtime(
self,
item_id,
is_staff,
is_admin,
......@@ -772,7 +772,8 @@ class TestCourseStaff(XBlockHandlerTestCase):
)
return mock_runtime
def _create_submission(self, item, values, types):
@staticmethod
def _create_submission(item, values, types):
""" Create a submission and corresponding workflow. """
submission = sub_api.create_submission(item, values)
......
......@@ -157,14 +157,13 @@ class StudentTrainingAssessTest(StudentTrainingTest):
expected_context["training_num_current"] = 2
expected_context["training_essay"] = {
'answer': {
'parts': [
{
'text': u"тєѕт αηѕωєя",
'prompt': {
'description': u'Given the state of the world today, what do you think should be done to combat poverty?'
}
'parts': [{
'text': u"тєѕт αηѕωєя",
'prompt': {
'description':
u'Given the state of the world today, what do you think should be done to combat poverty?'
}
]
}]
}
}
......
......@@ -96,11 +96,11 @@ class StudioViewTest(XBlockHandlerTestCase):
]
EXAMPLE_BASED_ASSESSMENT_EXAMPLES = '<examples>' + \
'<example>' + \
'<answer> TEST ANSWER </answer>' + \
'<select criterion="Test criterion" option="Test option" />' + \
'</example>' + \
'</examples>'
'<example>' + \
'<answer> TEST ANSWER </answer>' + \
'<select criterion="Test criterion" option="Test option" />' + \
'</example>' + \
'</examples>'
ASSESSMENT_CSS_IDS = {
"example-based-assessment": "oa_ai_assessment_editor",
......
......@@ -119,7 +119,8 @@ class SubmissionTest(XBlockHandlerTestCase):
"filename": "test.jpg"}), response_format='json')
self.assertTrue(resp['success'])
self.assertTrue(resp['url'].startswith(
'https://mybucket.s3.amazonaws.com/submissions_attachments/test_student/test_course/' + xblock.scope_ids.usage_id
'https://mybucket.s3.amazonaws.com/submissions_attachments/test_student/test_course/' +
xblock.scope_ids.usage_id
))
@mock_s3
......@@ -339,7 +340,7 @@ class SubmissionRenderTest(XBlockHandlerTestCase):
@patch.object(OpenAssessmentBlock, 'get_user_submission')
@scenario('data/submission_open.xml', user_id="Bob")
def test_open_submitted_old_format(self, xblock, mock_get_user_submission):
submission = xblock.create_submission(
xblock.create_submission(
xblock.get_student_item_dict(),
('A man must have a code', 'A man must have an umbrella too.')
)
......
......@@ -17,18 +17,23 @@ from openassessment.xblock.validation import (
STUB_I18N = lambda x: x
@ddt.ddt
class AssessmentValidationTest(TestCase):
@ddt.file_data('data/valid_assessments.json')
def test_valid_assessment(self, data):
success, msg = validate_assessments(data["assessments"], data["current_assessments"], data["is_released"], STUB_I18N)
success, msg = validate_assessments(
data["assessments"], data["current_assessments"], data["is_released"], STUB_I18N
)
self.assertTrue(success)
self.assertEqual(msg, u'')
@ddt.file_data('data/invalid_assessments.json')
def test_invalid_assessment(self, data):
success, msg = validate_assessments(data["assessments"], data["current_assessments"], data["is_released"], STUB_I18N)
success, msg = validate_assessments(
data["assessments"], data["current_assessments"], data["is_released"], STUB_I18N
)
self.assertFalse(success)
self.assertGreater(len(msg), 0)
......@@ -383,7 +388,7 @@ class ValidationSubmissionTest(TestCase):
self.assertTrue(success)
success, msg = validate_submission(
[u"Response 1.", u"Response 2" ], self.PROMPT, STUB_I18N
[u"Response 1.", u"Response 2"], self.PROMPT, STUB_I18N
)
self.assertTrue(success)
......
......@@ -243,7 +243,9 @@ class TestSerializeContent(TestCase):
try:
etree.fromstring(xml)
except Exception as ex: # pylint:disable=W0703
msg = "Could not parse mutated assessment dict {assessment}\n{ex}".format(assessment=mutated_dict, ex=ex)
msg = "Could not parse mutated assessment dict {assessment}\n{ex}".format(
assessment=mutated_dict, ex=ex
)
self.fail(msg)
@ddt.data("title", "prompt", "start", "due", "submission_due", "submission_start", "leaderboard_show")
......@@ -316,7 +318,7 @@ class TestSerializeContent(TestCase):
# Mutation #1: Remove the key
print "== Removing key {}".format(key)
yield {k:v for k, v in input_dict.iteritems() if k != key}
yield {k: v for k, v in input_dict.iteritems() if k != key}
if isinstance(val, dict):
......@@ -399,7 +401,8 @@ class TestSerializeContent(TestCase):
print "== int value {}".format(key)
yield self._mutate_dict(input_dict, key, 0)
def _mutate_dict(self, input_dict, key, new_val):
@staticmethod
def _mutate_dict(input_dict, key, new_val):
"""
Copy and update a dictionary.
......@@ -415,7 +418,8 @@ class TestSerializeContent(TestCase):
mutated[key] = new_val
return mutated
def _mutate_list(self, input_list, index, new_val):
@staticmethod
def _mutate_list(input_list, index, new_val):
"""
Copy and update a list.
......@@ -464,6 +468,7 @@ class TestParseExamplesFromXml(TestCase):
examples = parse_examples_xml(xml)
self.assertEqual(examples, data['examples'])
@ddt.ddt
class TestParseAssessmentsFromXml(TestCase):
......
......@@ -288,6 +288,7 @@ def _parse_prompts_xml(root):
return prompts_list
def _parse_options_xml(options_root):
"""
Parse <options> element in the OpenAssessment XBlock's content XML.
......@@ -402,7 +403,9 @@ def _parse_criteria_xml(criteria_root):
if criterion_feedback in ['optional', 'disabled', 'required']:
criterion_dict['feedback'] = criterion_feedback
else:
raise UpdateFromXmlError('Invalid value for "feedback" attribute: if specified, it must be set set to "optional" or "required".')
raise UpdateFromXmlError(
'Invalid value for "feedback" attribute: if specified, it must be set set to "optional" or "required".'
)
# Criterion options
criterion_dict['options'] = _parse_options_xml(criterion)
......@@ -894,6 +897,7 @@ def parse_from_xml(root):
'leaderboard_show': leaderboard_show
}
def parse_from_xml_str(xml):
"""
Create a dictionary for the OpenAssessment XBlock's content from an XML
......
#!/usr/bin/env bash
MAX_PEP8_VIOLATIONS=230
MAX_PEP8_VIOLATIONS=113
mkdir -p test/logs
PEP8_VIOLATIONS=test/logs/pep8.txt
......@@ -10,7 +10,7 @@ pep8 --config=.pep8 openassessment > $PEP8_VIOLATIONS
NUM_PEP8_VIOLATIONS=$(cat $PEP8_VIOLATIONS | wc -l)
echo "Found" $NUM_PEP8_VIOLATIONS "pep8 violations, threshold is" $MAX_PEP8_VIOLATIONS
if [[ $NUM_PEP8_VIOLATIONS > $MAX_PEP8_VIOLATIONS ]]; then
if [[ $NUM_PEP8_VIOLATIONS -gt $MAX_PEP8_VIOLATIONS ]]; then
cat $PEP8_VIOLATIONS
echo "NUMBER OF PEP8 VIOLATIONS ("$NUM_PEP8_VIOLATIONS") EXCEEDED THRESHOLD" $MAX_PEP8_VIOLATIONS
exit 1
......
#!/usr/bin/env bash
MAX_PYLINT_VIOLATIONS=609
MAX_PYLINT_VIOLATIONS=522
mkdir -p test/logs
PYLINT_VIOLATIONS=test/logs/pylint.txt
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment