Commit 3bd04290 by Vik Paruchuri

Add tests, address review comments

parent a44eacd4
......@@ -290,7 +290,6 @@ class XMLModuleStore(ModuleStoreBase):
if course_dirs is None:
course_dirs = sorted([d for d in os.listdir(self.data_dir) if
os.path.exists(self.data_dir / d / "course.xml")])
for course_dir in course_dirs:
self.try_load_course(course_dir)
......
......@@ -6,7 +6,7 @@ log = logging.getLogger(__name__)
class ControllerQueryService(GradingService):
"""
Interface to staff grading backend.
Interface to controller query backend.
"""
def __init__(self, config, system):
......@@ -77,6 +77,50 @@ class ControllerQueryService(GradingService):
return response
class MockControllerQueryService(object):
"""
Mock controller query service for testing
"""
def __init__(self, config, system):
pass
def check_if_name_is_unique(self, **params):
"""
Mock later if needed. Stub function for now.
@param params:
@return:
"""
pass
def check_for_eta(self, **params):
"""
Mock later if needed. Stub function for now.
@param params:
@return:
"""
pass
def check_combined_notifications(self, **params):
combined_notifications = '{"flagged_submissions_exist": false, "version": 1, "new_student_grading_to_view": false, "success": true, "staff_needs_to_grade": false, "student_needs_to_peer_grade": true, "overall_need_to_check": true}'
return combined_notifications
def get_grading_status_list(self, **params):
grading_status_list = '{"version": 1, "problem_list": [{"problem_name": "Science Question -- Machine Assessed", "grader_type": "NA", "eta_available": true, "state": "Waiting to be Graded", "eta": 259200, "location": "i4x://MITx/oe101x/combinedopenended/Science_SA_ML"}, {"problem_name": "Humanities Question -- Peer Assessed", "grader_type": "NA", "eta_available": true, "state": "Waiting to be Graded", "eta": 259200, "location": "i4x://MITx/oe101x/combinedopenended/Humanities_SA_Peer"}], "success": true}'
return grading_status_list
def get_flagged_problem_list(self, **params):
flagged_problem_list = '{"version": 1, "success": false, "error": "No flagged submissions exist for course: MITx/oe101x/2012_Fall"}'
return flagged_problem_list
def take_action_on_flags(self, **params):
"""
Mock later if needed. Stub function for now.
@param params:
@return:
"""
pass
def convert_seconds_to_human_readable(seconds):
if seconds < 60:
human_string = "{0} seconds".format(seconds)
......
......@@ -5,19 +5,20 @@ django-admin.py test --settings=lms.envs.test --pythonpath=. lms/djangoapps/open
"""
import json
from mock import MagicMock
from mock import MagicMock, patch, Mock
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Group
from django.http import HttpResponse
from mitxmako.shortcuts import render_to_string
from xmodule.open_ended_grading_classes import peer_grading_service
from xmodule.open_ended_grading_classes import peer_grading_service, controller_query_service
from xmodule import peer_grading_module
from xmodule.modulestore.django import modulestore
import xmodule.modulestore.django
from xmodule.x_module import ModuleSystem
from open_ended_grading import staff_grading_service
from open_ended_grading import staff_grading_service, views
from courseware.access import _course_staff_group_name
from courseware.tests.tests import LoginEnrollmentTestCase, TEST_DATA_XML_MODULESTORE, get_user
......@@ -25,10 +26,11 @@ import logging
log = logging.getLogger(__name__)
from django.test.utils import override_settings
from django.http import QueryDict
from xmodule.tests import test_util_open_ended
from courseware.tests import factories
@override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE)
class TestStaffGradingService(LoginEnrollmentTestCase):
......@@ -55,8 +57,8 @@ class TestStaffGradingService(LoginEnrollmentTestCase):
def make_instructor(course):
group_name = _course_staff_group_name(course.location)
g = Group.objects.create(name=group_name)
g.user_set.add(get_user(self.instructor))
group = Group.objects.create(name=group_name)
group.user_set.add(get_user(self.instructor))
make_instructor(self.toy)
......@@ -76,30 +78,28 @@ class TestStaffGradingService(LoginEnrollmentTestCase):
self.check_for_get_code(404, url)
self.check_for_post_code(404, url)
def test_get_next(self):
self.login(self.instructor, self.password)
url = reverse('staff_grading_get_next', kwargs={'course_id': self.course_id})
data = {'location': self.location}
r = self.check_for_post_code(200, url, data)
response = self.check_for_post_code(200, url, data)
d = json.loads(r.content)
content = json.loads(response.content)
self.assertTrue(d['success'])
self.assertEquals(d['submission_id'], self.mock_service.cnt)
self.assertIsNotNone(d['submission'])
self.assertIsNotNone(d['num_graded'])
self.assertIsNotNone(d['min_for_ml'])
self.assertIsNotNone(d['num_pending'])
self.assertIsNotNone(d['prompt'])
self.assertIsNotNone(d['ml_error_info'])
self.assertIsNotNone(d['max_score'])
self.assertIsNotNone(d['rubric'])
self.assertTrue(content['success'])
self.assertEquals(content['submission_id'], self.mock_service.cnt)
self.assertIsNotNone(content['submission'])
self.assertIsNotNone(content['num_graded'])
self.assertIsNotNone(content['min_for_ml'])
self.assertIsNotNone(content['num_pending'])
self.assertIsNotNone(content['prompt'])
self.assertIsNotNone(content['ml_error_info'])
self.assertIsNotNone(content['max_score'])
self.assertIsNotNone(content['rubric'])
def save_grade_base(self,skip=False):
def save_grade_base(self, skip=False):
self.login(self.instructor, self.password)
url = reverse('staff_grading_save_grade', kwargs={'course_id': self.course_id})
......@@ -111,12 +111,12 @@ class TestStaffGradingService(LoginEnrollmentTestCase):
'submission_flagged': "true",
'rubric_scores[]': ['1', '2']}
if skip:
data.update({'skipped' : True})
data.update({'skipped': True})
r = self.check_for_post_code(200, url, data)
d = json.loads(r.content)
self.assertTrue(d['success'], str(d))
self.assertEquals(d['submission_id'], self.mock_service.cnt)
response = self.check_for_post_code(200, url, data)
content = json.loads(response.content)
self.assertTrue(content['success'], str(content))
self.assertEquals(content['submission_id'], self.mock_service.cnt)
def test_save_grade(self):
self.save_grade_base(skip=False)
......@@ -130,11 +130,11 @@ class TestStaffGradingService(LoginEnrollmentTestCase):
url = reverse('staff_grading_get_problem_list', kwargs={'course_id': self.course_id})
data = {}
r = self.check_for_post_code(200, url, data)
d = json.loads(r.content)
response = self.check_for_post_code(200, url, data)
content = json.loads(response.content)
self.assertTrue(d['success'], str(d))
self.assertIsNotNone(d['problem_list'])
self.assertTrue(content['success'], str(content))
self.assertIsNotNone(content['problem_list'])
@override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE)
......@@ -181,14 +181,14 @@ class TestPeerGradingService(LoginEnrollmentTestCase):
def test_get_next_submission_success(self):
data = {'location': self.location}
r = self.peer_module.get_next_submission(data)
d = r
response = self.peer_module.get_next_submission(data)
content = response
self.assertTrue(d['success'])
self.assertIsNotNone(d['submission_id'])
self.assertIsNotNone(d['prompt'])
self.assertIsNotNone(d['submission_key'])
self.assertIsNotNone(d['max_score'])
self.assertTrue(content['success'])
self.assertIsNotNone(content['submission_id'])
self.assertIsNotNone(content['prompt'])
self.assertIsNotNone(content['submission_key'])
self.assertIsNotNone(content['max_score'])
def test_get_next_submission_missing_location(self):
data = {}
......@@ -216,10 +216,9 @@ class TestPeerGradingService(LoginEnrollmentTestCase):
qdict.getlist = fake_get_item
qdict.keys = data.keys
r = self.peer_module.save_grade(qdict)
d = r
response = self.peer_module.save_grade(qdict)
self.assertTrue(d['success'])
self.assertTrue(response['success'])
def test_save_grade_missing_keys(self):
data = {}
......@@ -229,37 +228,35 @@ class TestPeerGradingService(LoginEnrollmentTestCase):
def test_is_calibrated_success(self):
data = {'location': self.location}
r = self.peer_module.is_student_calibrated(data)
d = r
response = self.peer_module.is_student_calibrated(data)
self.assertTrue(d['success'])
self.assertTrue('calibrated' in d)
self.assertTrue(response['success'])
self.assertTrue('calibrated' in response)
def test_is_calibrated_failure(self):
data = {}
d = self.peer_module.is_student_calibrated(data)
self.assertFalse(d['success'])
self.assertFalse('calibrated' in d)
response = self.peer_module.is_student_calibrated(data)
self.assertFalse(response['success'])
self.assertFalse('calibrated' in response)
def test_show_calibration_essay_success(self):
data = {'location': self.location}
r = self.peer_module.show_calibration_essay(data)
d = r
response = self.peer_module.show_calibration_essay(data)
self.assertTrue(d['success'])
self.assertIsNotNone(d['submission_id'])
self.assertIsNotNone(d['prompt'])
self.assertIsNotNone(d['submission_key'])
self.assertIsNotNone(d['max_score'])
self.assertTrue(response['success'])
self.assertIsNotNone(response['submission_id'])
self.assertIsNotNone(response['prompt'])
self.assertIsNotNone(response['submission_key'])
self.assertIsNotNone(response['max_score'])
def test_show_calibration_essay_missing_key(self):
data = {}
d = self.peer_module.show_calibration_essay(data)
response = self.peer_module.show_calibration_essay(data)
self.assertFalse(d['success'])
self.assertEqual(d['error'], "Missing required keys: location")
self.assertFalse(response['success'])
self.assertEqual(response['error'], "Missing required keys: location")
def test_save_calibration_essay_success(self):
data = {
......@@ -281,13 +278,39 @@ class TestPeerGradingService(LoginEnrollmentTestCase):
qdict.getlist = fake_get_item
qdict.keys = data.keys
d = self.peer_module.save_calibration_essay(qdict)
self.assertTrue(d['success'])
self.assertTrue('actual_score' in d)
response = self.peer_module.save_calibration_essay(qdict)
self.assertTrue(response['success'])
self.assertTrue('actual_score' in response)
def test_save_calibration_essay_missing_keys(self):
data = {}
d = self.peer_module.save_calibration_essay(data)
self.assertFalse(d['success'])
self.assertTrue(d['error'].find('Missing required keys:') > -1)
self.assertFalse('actual_score' in d)
response = self.peer_module.save_calibration_essay(data)
self.assertFalse(response['success'])
self.assertTrue(response['error'].find('Missing required keys:') > -1)
self.assertFalse('actual_score' in response)
@override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE)
class TestPanel(LoginEnrollmentTestCase):
"""Check the Table of Contents for a course"""
def setUp(self):
# Toy courses should be loaded
self.course_name = 'edX/open_ended/2012_Fall'
self.course = modulestore().get_course(self.course_name)
self.user = factories.UserFactory()
def test_open_ended_panel(self):
"""
Test to see if the peer grading module in the demo course is found
@return:
"""
found_module, peer_grading_module = views.find_peer_grading_module(self.course)
self.assertTrue(found_module)
@patch('xmodule.open_ended_grading_classes.controller_query_service.ControllerQueryService',
controller_query_service.MockControllerQueryService)
def test_problem_list(self):
request = Mock(user=self.user)
response = views.student_problem_list(request, self.course.id)
self.assertTrue(isinstance(response, HttpResponse))
......@@ -31,10 +31,10 @@ log = logging.getLogger(__name__)
system = ModuleSystem(
ajax_url=None,
track_function=None,
get_module = None,
get_module=None,
render_template=render_to_string,
replace_urls = None,
xblock_model_data= {}
replace_urls=None,
xblock_model_data={}
)
controller_qs = ControllerQueryService(settings.OPEN_ENDED_GRADING_INTERFACE, system)
......@@ -90,6 +90,7 @@ def staff_grading(request, course_id):
# Checked above
'staff_access': True, })
def find_peer_grading_module(course):
"""
Given a course, finds the first peer grading module in it.
......@@ -103,14 +104,15 @@ def find_peer_grading_module(course):
#Get the course id and split it
course_id_parts = course.id.split("/")
#TODO: This will not work with multiple runs of a course. Make it work. The last key in the Location passed
#to get_items is called revision. Is this the same as run?
#Get the peer grading modules currently in the course
items = modulestore().get_items(['i4x', None, course_id_parts[1], 'peergrading', None])
log.info("COURSE ID PARTS")
log.info(course_id_parts)
#Get the peer grading modules currently in the course. Explicitly specify the course id to avoid issues with different runs.
items = modulestore().get_items(['i4x', course_id_parts[0], course_id_parts[1], 'peergrading', None],
course_id=course.id)
#See if any of the modules are centralized modules (ie display info from multiple problems)
items = [i for i in items if not getattr(i,"use_for_single_location", True)]
items = [i for i in items if not getattr(i, "use_for_single_location", True)]
#Get the first one
if len(items)>0:
if len(items) > 0:
item_location = items[0].location
#Generate a url for the first module and redirect the user to it
problem_url_parts = search.path_to_location(modulestore(), course.id, item_location)
......@@ -119,10 +121,12 @@ def find_peer_grading_module(course):
return found_module, problem_url
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def peer_grading(request, course_id):
'''
Show a peer grading interface to the student. The interface is linked to from the button.
When a student clicks on the "peer grading" button in the open ended interface, link them to a peer grading
xmodule in the course.
'''
#Get the current course
......@@ -138,6 +142,7 @@ def peer_grading(request, course_id):
return HttpResponseRedirect(problem_url)
def generate_problem_url(problem_url_parts, base_course_url):
"""
From a list of problem url parts generated by search.path_to_location and a base course url, generates a url to a problem
......@@ -190,7 +195,9 @@ def student_problem_list(request, course_id):
except ItemNotFoundError:
#If the problem cannot be found at the location received from the grading controller server, it has been deleted by the course author.
#Continue with the rest of the location to construct the list
error_message = "Could not find module for course {0} at location {1}".format(course.id, problem_list[i]['location'])
error_message = "Could not find module for course {0} at location {1}".format(course.id,
problem_list[i][
'location'])
log.error(error_message)
#Mark the problem for removal from the list
list_to_remove.append(i)
......@@ -225,7 +232,7 @@ def student_problem_list(request, course_id):
success = False
#Remove problems that cannot be found in the courseware from the list
problem_list = [problem_list[i] for i in xrange(0,len(problem_list)) if i not in list_to_remove]
problem_list = [problem_list[i] for i in xrange(0, len(problem_list)) if i not in list_to_remove]
ajax_url = _reverse_with_slash('open_ended_problems', course_id)
return render_to_response('open_ended_problems/open_ended_problems.html', {
......@@ -329,6 +336,10 @@ def combined_notifications(request, course_id):
'description': description,
'alert_message': alert_message
}
#The open ended panel will need to link the "peer grading" button in the panel to a peer grading
#xmodule defined in the course. This checks to see if the human name of the server notification
#that we are currently processing is "peer grading". If it is, it looks for a peer grading
#module in the course. If none exists, it removes the peer grading item from the panel.
if human_name == "Peer Grading":
found_module, problem_url = find_peer_grading_module(course)
if found_module:
......@@ -345,9 +356,7 @@ def combined_notifications(request, course_id):
'ajax_url': ajax_url,
}
return render_to_response('open_ended_problems/combined_notifications.html',
combined_dict
)
return render_to_response('open_ended_problems/combined_notifications.html', combined_dict)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment