Commit 65ebba35 by John Eskew Committed by GitHub

Merge pull request #1051 from edx/jeskew/more_django1.11_changes

Another import move to support Django 1.11
parents b0cc7c69 b5d7a8c2
......@@ -24,6 +24,7 @@ class CourseItemsListingMixin(object):
Get information about all ora2 blocks in the course with response count for each step.
"""
# Import is placed here to avoid model import at project startup.
from openassessment.data import OraAggregateData
responses = OraAggregateData.collect_ora2_responses(unicode(self.course_id))
return Response(json.dumps(responses), content_type='application/json')
......@@ -37,6 +37,7 @@ class GradeMixin(object):
Returns:
unicode: HTML content of the grade step.
"""
# Import is placed here to avoid model import at project startup.
from submissions import api as sub_api
# Retrieve the status of the workflow. If no workflows have been
......@@ -82,6 +83,7 @@ class GradeMixin(object):
Returns:
tuple of context (dict), template_path (string)
"""
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import staff as staff_api
......@@ -189,6 +191,7 @@ class GradeMixin(object):
Dict with keys 'success' (bool) and 'msg' (unicode)
"""
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import peer as peer_api
feedback_text = data.get('feedback_text', u'')
......@@ -250,6 +253,7 @@ class GradeMixin(object):
...
}
"""
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import staff as staff_api
......@@ -400,6 +404,7 @@ class GradeMixin(object):
The option for the median peer grade.
"""
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import peer as peer_api
median_scores = peer_api.get_assessment_median_scores(submission_uuid)
......
......@@ -36,6 +36,7 @@ class LeaderboardMixin(object):
Returns:
unicode: HTML content of the leaderboard.
"""
# Import is placed here to avoid model import at project startup.
from submissions import api as sub_api
# Retrieve the status of the workflow. If no workflows have been
# started this will be an empty dict, so status will be None.
......@@ -63,6 +64,7 @@ class LeaderboardMixin(object):
Returns:
template_path (string), tuple of context (dict)
"""
# Import is placed here to avoid model import at project startup.
from submissions import api as sub_api
# Retrieve top scores from the submissions API
......
......@@ -8,7 +8,6 @@ import logging
from webob import Response
from xblock.core import XBlock
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.errors import (PeerAssessmentInternalError, PeerAssessmentRequestError,
PeerAssessmentWorkflowError)
from openassessment.workflow.errors import AssessmentWorkflowError
......@@ -56,6 +55,8 @@ class PeerAssessmentMixin(object):
and "msg" (unicode) containing additional information if an error occurs.
"""
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import peer as peer_api
if self.submission_uuid is None:
return {
'success': False, 'msg': self._('You must submit a response before you can perform a peer assessment.')
......@@ -164,6 +165,8 @@ class PeerAssessmentMixin(object):
tuple of (template_path, context_dict)
"""
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import peer as peer_api
path = 'openassessmentblock/peer/oa_peer_unavailable.html'
finished = False
problem_closed, reason, start_date, due_date = self.is_closed(step="peer-assessment")
......@@ -274,6 +277,8 @@ class PeerAssessmentMixin(object):
dict: The serialized submission model.
"""
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import peer as peer_api
peer_submission = False
try:
peer_submission = peer_api.get_submission_to_assess(
......
......@@ -52,6 +52,7 @@ class SelfAssessmentMixin(object):
SubmissionError: Error occurred while retrieving the current submission.
SelfAssessmentRequestError: Error occurred while checking if we had a self-assessment.
"""
# Import is placed here to avoid model import at project startup.
from submissions import api as submission_api
path = 'openassessmentblock/self/oa_self_unavailable.html'
......
......@@ -147,6 +147,7 @@ class StaffAreaMixin(object):
"""
Returns a context with staff assessment "ungraded" and "in-progress" counts.
"""
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import staff as staff_api
grading_stats = staff_api.get_staff_grading_statistics(course_id, item_id)
......@@ -183,6 +184,7 @@ class StaffAreaMixin(object):
Must be course staff to render this view.
"""
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import staff as staff_api
from submissions import api as submission_api
try:
......@@ -284,6 +286,7 @@ class StaffAreaMixin(object):
Args:
student_username (unicode): The username of the student to report.
"""
# Import is placed here to avoid model import at project startup.
from submissions import api as submission_api
anonymous_user_id = None
......@@ -323,6 +326,7 @@ class StaffAreaMixin(object):
submission_uuid (unicode): The uuid of the submission, should NOT be None.
context: the context to update with additional information
"""
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import staff as staff_api
......@@ -396,6 +400,7 @@ class StaffAreaMixin(object):
for a given problem. It will cancel the workflow using traditional methods to remove it from the grading pools,
and pass through to the submissions API to orphan the submission so that the user can create a new one.
"""
# Import is placed here to avoid model import at project startup.
from submissions import api as submission_api
# Note that student_item cannot be constructed using get_student_item_dict, since we're in a staff context
student_item = {
......@@ -452,6 +457,7 @@ class StaffAreaMixin(object):
If requesting_user is not provided, we will use the user to which this xblock is currently bound.
"""
# Import is placed here to avoid model import at project startup.
from openassessment.workflow import api as workflow_api
try:
assessment_requirements = self.workflow_requirements()
......
......@@ -68,6 +68,7 @@ class SubmissionMixin(object):
associated status tag (str), and status text (unicode).
"""
# Import is placed here to avoid model import at project startup.
from submissions import api
if 'submission' not in data:
return (
......@@ -225,6 +226,7 @@ class SubmissionMixin(object):
return {'success': False, 'msg': self._(u"Files descriptions were not submitted.")}
def create_submission(self, student_item_dict, student_sub_data, files_descriptions=None):
# Import is placed here to avoid model import at project startup.
from submissions import api
# Store the student's response text in a JSON-encodable dict
......@@ -434,6 +436,7 @@ class SubmissionMixin(object):
the front end.
"""
# Import is placed here to avoid model import at project startup.
from submissions import api
try:
return api.get_submission(submission_uuid)
......
......@@ -645,7 +645,7 @@ class TestPeerAssessmentRender(XBlockHandlerTestCase):
xblock.get_workflow_info = mock.Mock(return_value=workflow_info)
# Simulate that we've either finished or not finished required grading
patched_module = 'openassessment.xblock.peer_assessment_mixin.peer_api'
patched_module = 'openassessment.assessment.api.peer'
with mock.patch(patched_module + '.has_finished_required_evaluating') as mock_finished:
mock_finished.return_value = (was_graded_enough, 1)
path, context = xblock.peer_path_and_context(continue_grading)
......@@ -775,13 +775,13 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
expect_failure=True
)
@mock.patch('openassessment.xblock.peer_assessment_mixin.peer_api')
@mock.patch('openassessment.assessment.api.peer')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_api_request_error(self, xblock, mock_api):
mock_api.create_assessment.side_effect = peer_api.PeerAssessmentRequestError
self._submit_peer_assessment(xblock, u"Sally", u"Bob", self.ASSESSMENT, expect_failure=True)
@mock.patch('openassessment.xblock.peer_assessment_mixin.peer_api')
@mock.patch('openassessment.assessment.api.peer')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_peer_api_internal_error(self, xblock, mock_api):
mock_api.create_assessment.side_effect = peer_api.PeerAssessmentInternalError
......
......@@ -314,6 +314,7 @@ def validator(oa_block, _, strict_post_release=True):
Returns:
callable, of a form that can be passed to `update_from_xml`.
"""
# Import is placed here to avoid model import at project startup.
from submissions.api import MAX_TOP_SUBMISSIONS
def _inner(rubric_dict, assessments, leaderboard_show=0, submission_start=None, submission_due=None):
......
......@@ -34,7 +34,7 @@ def load_requirements(*requirements_paths):
setup(
name='ora2',
version='2.1.2',
version='2.1.3',
author='edX',
url='http://github.com/edx/edx-ora2',
description='edx-ora2',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment