Commit 440d9ddb by Will Daly

Install edx-ora2

Support for displaying submissions API scores in Progress page.
parent 982f24ae
......@@ -143,7 +143,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
self.check_components_on_page(
ADVANCED_COMPONENT_TYPES,
['Word cloud', 'Annotation', 'Text Annotation', 'Video Annotation',
'Open Response Assessment', 'Peer Grading Interface'],
'Open Response Assessment', 'Peer Grading Interface', 'openassessment'],
)
def test_advanced_components_require_two_clicks(self):
......
......@@ -60,6 +60,7 @@ else:
'word_cloud',
'graphical_slider_tool',
'lti',
'openassessment', # edx-ora2
] + OPEN_ENDED_COMPONENT_TYPES + NOTE_COMPONENT_TYPES
ADVANCED_COMPONENT_CATEGORY = 'advanced'
......
......@@ -547,6 +547,13 @@ MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
OPTIONAL_APPS = (
'edx_jsdraw',
'mentoring',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.workflow',
'openassessment.xblock'
)
for app_name in OPTIONAL_APPS:
......
......@@ -14,6 +14,8 @@ from dogapi import dog_stats_api
from courseware import courses
from courseware.model_data import FieldDataCache
from student.models import anonymous_id_for_user
from submissions import api as sub_api
from xmodule import graders
from xmodule.graders import Score
from xmodule.modulestore.django import modulestore
......@@ -178,6 +180,11 @@ def _grade(student, request, course, keep_raw_scores):
grading_context = course.grading_context
raw_scores = []
# Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
# scores that were registered with the submissions API, which for the moment
# means only openassessment (edx-ora2)
submissions_scores = sub_api.get_scores(course.id, anonymous_id_for_user(student, course.id))
totaled_scores = {}
# This next complicated loop is just to collect the totaled_scores, which is
# passed to the grader
......@@ -194,7 +201,15 @@ def _grade(student, request, course, keep_raw_scores):
descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']
)
# If we haven't seen a single problem in the section, we don't have to grade it at all! We can assume 0%
# If there are no problems that always have to be regraded, check to
# see if any of our locations are in the scores from the submissions
# API. If scores exist, we have to calculate grades for this section.
if not should_grade_section:
should_grade_section = any(
descriptor.location.url() in submissions_scores
for descriptor in section['xmoduledescriptors']
)
if not should_grade_section:
with manual_transaction():
should_grade_section = StudentModule.objects.filter(
......@@ -204,6 +219,8 @@ def _grade(student, request, course, keep_raw_scores):
]
).exists()
# If we haven't seen a single problem in the section, we don't have
# to grade it at all! We can assume 0%
if should_grade_section:
scores = []
......@@ -217,7 +234,9 @@ def _grade(student, request, course, keep_raw_scores):
for module_descriptor in yield_dynamic_descriptor_descendents(section_descriptor, create_module):
(correct, total) = get_score(course.id, student, module_descriptor, create_module)
(correct, total) = get_score(
course.id, student, module_descriptor, create_module, scores_cache=submissions_scores
)
if correct is None and total is None:
continue
......@@ -331,6 +350,8 @@ def _progress_summary(student, request, course):
# This student must not have access to the course.
return None
submissions_scores = sub_api.get_scores(course.id, anonymous_id_for_user(student, course.id))
chapters = []
# Don't include chapters that aren't displayable (e.g. due to error)
for chapter_module in course_module.get_display_items():
......@@ -353,7 +374,9 @@ def _progress_summary(student, request, course):
for module_descriptor in yield_dynamic_descriptor_descendents(section_module, module_creator):
course_id = course.id
(correct, total) = get_score(course_id, student, module_descriptor, module_creator)
(correct, total) = get_score(
course_id, student, module_descriptor, module_creator, scores_cache=submissions_scores
)
if correct is None and total is None:
continue
......@@ -383,7 +406,8 @@ def _progress_summary(student, request, course):
return chapters
def get_score(course_id, user, problem_descriptor, module_creator):
def get_score(course_id, user, problem_descriptor, module_creator, scores_cache=None):
"""
Return the score for a user on a problem, as a tuple (correct, total).
e.g. (5,7) if you got 5 out of 7 points.
......@@ -395,11 +419,18 @@ def get_score(course_id, user, problem_descriptor, module_creator):
problem_descriptor: an XModuleDescriptor
module_creator: a function that takes a descriptor, and returns the corresponding XModule for this user.
Can return None if user doesn't have access, or if something else went wrong.
cache: A FieldDataCache
scores_cache: A dict of location names to (earned, possible) point tuples.
If an entry is found in this cache, it takes precedence.
"""
scores_cache = scores_cache or {}
if not user.is_authenticated():
return (None, None)
location_url = problem_descriptor.location.url()
if location_url in scores_cache:
return scores_cache[location_url]
# some problems have state that is updated independently of interaction
# with the LMS, so they need to always be scored. (E.g. foldit.)
if problem_descriptor.always_recalculate_grades:
......
......@@ -467,7 +467,7 @@ class TestCourseGrader(TestSubmittingProblems):
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
def test_wrong_asnwers(self):
def test_wrong_answers(self):
"""
Check that answering incorrectly is graded properly.
"""
......@@ -478,6 +478,44 @@ class TestCourseGrader(TestSubmittingProblems):
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_submissions_api_overrides_scores(self):
"""
Check that answering incorrectly is graded properly.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
# But now we mock out a get_scores call, and watch as it overrides the
# score read from StudentModule and our student gets an A instead.
with patch('submissions.api.get_scores') as mock_get_scores:
mock_get_scores.return_value = {
self.problem_location('p3'): (1, 1)
}
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
def test_submissions_api_anonymous_student_id(self):
"""
Check that the submissions API is sent an anonymous student ID.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
with patch('submissions.api.get_scores') as mock_get_scores:
mock_get_scores.return_value = {
self.problem_location('p3'): (1, 1)
}
self.get_grade_summary()
# Verify that the submissions API was sent an anonymized student ID
mock_get_scores.assert_called_with(self.course.id, '99ac6730dc5f900d69fd735975243b31')
def test_weighted_homework(self):
"""
Test that the homework section has proper weight.
......
......@@ -1473,6 +1473,13 @@ ALL_LANGUAGES = (
OPTIONAL_APPS = (
'edx_jsdraw',
'mentoring',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.workflow',
'openassessment.xblock'
)
for app_name in OPTIONAL_APPS:
......
......@@ -26,3 +26,4 @@
-e git+https://github.com/edx/bok-choy.git@25a47b3bf87c503fc4996e52addac83b42ec6f38#egg=bok_choy
-e git+https://github.com/edx-solutions/django-splash.git@9965a53c269666a30bb4e2b3f6037c138aef2a55#egg=django-splash
-e git+https://github.com/edx/acid-block.git@459aff7b63db8f2c5decd1755706c1a64fb4ebb1#egg=acid-xblock
-e git+https://github.com/edx/edx-ora2.git@87fd72f9927cd37e553d7f68cfaf80d6c574eb55#egg=edx-ora2
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment