Commit e122efc5 by VikParuchuri

Merge pull request #1991 from edx/feature/vik/oe-tests

Feature/vik/oe tests
parents 4e922778 451784dc
...@@ -104,11 +104,14 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule): ...@@ -104,11 +104,14 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
icon_class = 'problem' icon_class = 'problem'
js = {'coffee': js = {
[resource_string(__name__, 'js/src/combinedopenended/display.coffee'), 'coffee':
resource_string(__name__, 'js/src/collapsible.coffee'), [
resource_string(__name__, 'js/src/javascript_loader.coffee'), resource_string(__name__, 'js/src/combinedopenended/display.coffee'),
]} resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
]
}
js_module_name = "CombinedOpenEnded" js_module_name = "CombinedOpenEnded"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
......
...@@ -294,9 +294,8 @@ class CombinedOpenEndedV1Module(): ...@@ -294,9 +294,8 @@ class CombinedOpenEndedV1Module():
if self.current_task_number > 0: if self.current_task_number > 0:
last_response_data = self.get_last_response(self.current_task_number - 1) last_response_data = self.get_last_response(self.current_task_number - 1)
current_response_data = self.get_current_attributes(self.current_task_number) current_response_data = self.get_current_attributes(self.current_task_number)
if (current_response_data['min_score_to_attempt'] > last_response_data['score'] if (current_response_data['min_score_to_attempt'] > last_response_data['score']
or current_response_data['max_score_to_attempt'] < last_response_data['score']): or current_response_data['max_score_to_attempt'] < last_response_data['score']):
self.state = self.DONE self.state = self.DONE
self.ready_to_reset = True self.ready_to_reset = True
...@@ -662,9 +661,10 @@ class CombinedOpenEndedV1Module(): ...@@ -662,9 +661,10 @@ class CombinedOpenEndedV1Module():
return { return {
'success': False, 'success': False,
#This is a student_facing_error #This is a student_facing_error
'error': ('You have attempted this question {0} times. ' 'error': (
'You are only allowed to attempt it {1} times.').format( 'You have attempted this question {0} times. '
self.student_attempts, self.attempts) 'You are only allowed to attempt it {1} times.'
).format(self.student_attempts, self.attempts)
} }
self.state = self.INITIAL self.state = self.INITIAL
self.ready_to_reset = False self.ready_to_reset = False
...@@ -803,6 +803,17 @@ class CombinedOpenEndedV1Module(): ...@@ -803,6 +803,17 @@ class CombinedOpenEndedV1Module():
return progress_object return progress_object
def out_of_sync_error(self, get, msg=''):
"""
return dict out-of-sync error message, and also log.
"""
#This is a dev_facing_error
log.warning("Combined module state out sync. state: %r, get: %r. %s",
self.state, get, msg)
#This is a student_facing_error
return {'success': False,
'error': 'The problem state got out-of-sync. Please try reloading the page.'}
class CombinedOpenEndedV1Descriptor(): class CombinedOpenEndedV1Descriptor():
""" """
...@@ -849,7 +860,6 @@ class CombinedOpenEndedV1Descriptor(): ...@@ -849,7 +860,6 @@ class CombinedOpenEndedV1Descriptor():
return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')} return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')}
def definition_to_xml(self, resource_fs): def definition_to_xml(self, resource_fs):
'''Return an xml element representing this definition.''' '''Return an xml element representing this definition.'''
elt = etree.Element('combinedopenended') elt = etree.Element('combinedopenended')
......
...@@ -76,7 +76,6 @@ class GradingService(object): ...@@ -76,7 +76,6 @@ class GradingService(object):
return r.text return r.text
def _try_with_login(self, operation): def _try_with_login(self, operation):
""" """
Call operation(), which should return a requests response object. If Call operation(), which should return a requests response object. If
...@@ -87,7 +86,7 @@ class GradingService(object): ...@@ -87,7 +86,7 @@ class GradingService(object):
""" """
response = operation() response = operation()
if (response.json if (response.json
and response.json.get('success') == False and response.json.get('success') is False
and response.json.get('error') == 'login_required'): and response.json.get('error') == 'login_required'):
# apparrently we aren't logged in. Try to fix that. # apparrently we aren't logged in. Try to fix that.
r = self._login() r = self._login()
......
...@@ -72,7 +72,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -72,7 +72,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self._parse(oeparam, self.child_prompt, self.child_rubric, system) self._parse(oeparam, self.child_prompt, self.child_rubric, system)
if self.child_created == True and self.child_state == self.ASSESSING: if self.child_created is True and self.child_state == self.ASSESSING:
self.child_created = False self.child_created = False
self.send_to_grader(self.latest_answer(), system) self.send_to_grader(self.latest_answer(), system)
self.child_created = False self.child_created = False
...@@ -159,9 +159,11 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -159,9 +159,11 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
score = int(survey_responses['score']) score = int(survey_responses['score'])
except: except:
#This is a dev_facing_error #This is a dev_facing_error
error_message = ("Could not parse submission id, grader id, " error_message = (
"or feedback from message_post ajax call. Here is the message data: {0}".format( "Could not parse submission id, grader id, "
survey_responses)) "or feedback from message_post ajax call. "
"Here is the message data: {0}".format(survey_responses)
)
log.exception(error_message) log.exception(error_message)
#This is a student_facing_error #This is a student_facing_error
return {'success': False, 'msg': "There was an error saving your feedback. Please contact course staff."} return {'success': False, 'msg': "There was an error saving your feedback. Please contact course staff."}
...@@ -179,8 +181,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -179,8 +181,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
queue_name=self.message_queue_name queue_name=self.message_queue_name
) )
student_info = {'anonymous_student_id': anonymous_student_id, student_info = {
'submission_time': qtime, 'anonymous_student_id': anonymous_student_id,
'submission_time': qtime,
} }
contents = { contents = {
'feedback': feedback, 'feedback': feedback,
...@@ -190,8 +193,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -190,8 +193,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'student_info': json.dumps(student_info), 'student_info': json.dumps(student_info),
} }
(error, msg) = qinterface.send_to_queue(header=xheader, (error, msg) = qinterface.send_to_queue(
body=json.dumps(contents)) header=xheader,
body=json.dumps(contents)
)
#Convert error to a success value #Convert error to a success value
success = True success = True
...@@ -224,15 +229,18 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -224,15 +229,18 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
anonymous_student_id + anonymous_student_id +
str(len(self.child_history))) str(len(self.child_history)))
xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['construct_callback'](), xheader = xqueue_interface.make_xheader(
lms_key=queuekey, lms_callback_url=system.xqueue['construct_callback'](),
queue_name=self.queue_name) lms_key=queuekey,
queue_name=self.queue_name
)
contents = self.payload.copy() contents = self.payload.copy()
# Metadata related to the student submission revealed to the external grader # Metadata related to the student submission revealed to the external grader
student_info = {'anonymous_student_id': anonymous_student_id, student_info = {
'submission_time': qtime, 'anonymous_student_id': anonymous_student_id,
'submission_time': qtime,
} }
#Update contents with student response and student info #Update contents with student response and student info
...@@ -243,12 +251,16 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -243,12 +251,16 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
}) })
# Submit request. When successful, 'msg' is the prior length of the queue # Submit request. When successful, 'msg' is the prior length of the queue
(error, msg) = qinterface.send_to_queue(header=xheader, qinterface.send_to_queue(
body=json.dumps(contents)) header=xheader,
body=json.dumps(contents)
)
# State associated with the queueing request # State associated with the queueing request
queuestate = {'key': queuekey, queuestate = {
'time': qtime, } 'key': queuekey,
'time': qtime,
}
return True return True
def _update_score(self, score_msg, queuekey, system): def _update_score(self, score_msg, queuekey, system):
...@@ -302,11 +314,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -302,11 +314,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# We want to display available feedback in a particular order. # We want to display available feedback in a particular order.
# This dictionary specifies which goes first--lower first. # This dictionary specifies which goes first--lower first.
priorities = {# These go at the start of the feedback priorities = {
'spelling': 0, # These go at the start of the feedback
'grammar': 1, 'spelling': 0,
# needs to be after all the other feedback 'grammar': 1,
'markup_text': 3} # needs to be after all the other feedback
'markup_text': 3
}
do_not_render = ['topicality', 'prompt-overlap'] do_not_render = ['topicality', 'prompt-overlap']
default_priority = 2 default_priority = 2
...@@ -393,7 +407,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -393,7 +407,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
rubric_feedback = "" rubric_feedback = ""
feedback = self._convert_longform_feedback_to_html(response_items) feedback = self._convert_longform_feedback_to_html(response_items)
rubric_scores = [] rubric_scores = []
if response_items['rubric_scores_complete'] == True: if response_items['rubric_scores_complete'] is True:
rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_renderer = CombinedOpenEndedRubric(system, True)
rubric_dict = rubric_renderer.render_rubric(response_items['rubric_xml']) rubric_dict = rubric_renderer.render_rubric(response_items['rubric_xml'])
success = rubric_dict['success'] success = rubric_dict['success']
...@@ -401,8 +415,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -401,8 +415,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
rubric_scores = rubric_dict['rubric_scores'] rubric_scores = rubric_dict['rubric_scores']
if not response_items['success']: if not response_items['success']:
return system.render_template("{0}/open_ended_error.html".format(self.TEMPLATE_DIR), return system.render_template(
{'errors': feedback}) "{0}/open_ended_error.html".format(self.TEMPLATE_DIR),
{'errors': feedback}
)
feedback_template = system.render_template("{0}/open_ended_feedback.html".format(self.TEMPLATE_DIR), { feedback_template = system.render_template("{0}/open_ended_feedback.html".format(self.TEMPLATE_DIR), {
'grader_type': response_items['grader_type'], 'grader_type': response_items['grader_type'],
...@@ -496,8 +512,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -496,8 +512,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
grader_types.append(score_result['grader_type']) grader_types.append(score_result['grader_type'])
try: try:
feedback_dict = json.loads(score_result['feedback'][i]) feedback_dict = json.loads(score_result['feedback'][i])
except: except Exception:
pass feedback_dict = score_result['feedback'][i]
feedback_dicts.append(feedback_dict) feedback_dicts.append(feedback_dict)
grader_ids.append(score_result['grader_id'][i]) grader_ids.append(score_result['grader_id'][i])
submission_ids.append(score_result['submission_id']) submission_ids.append(score_result['submission_id'])
...@@ -515,8 +531,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -515,8 +531,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
feedback_items = [feedback] feedback_items = [feedback]
try: try:
feedback_dict = json.loads(score_result['feedback']) feedback_dict = json.loads(score_result['feedback'])
except: except Exception:
pass feedback_dict = score_result.get('feedback', '')
feedback_dicts = [feedback_dict] feedback_dicts = [feedback_dict]
grader_ids = [score_result['grader_id']] grader_ids = [score_result['grader_id']]
submission_ids = [score_result['submission_id']] submission_ids = [score_result['submission_id']]
...@@ -545,8 +561,11 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -545,8 +561,11 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
if not self.child_history: if not self.child_history:
return "" return ""
feedback_dict = self._parse_score_msg(self.child_history[-1].get('post_assessment', ""), system, feedback_dict = self._parse_score_msg(
join_feedback=join_feedback) self.child_history[-1].get('post_assessment', ""),
system,
join_feedback=join_feedback
)
if not short_feedback: if not short_feedback:
return feedback_dict['feedback'] if feedback_dict['valid'] else '' return feedback_dict['feedback'] if feedback_dict['valid'] else ''
if feedback_dict['valid']: if feedback_dict['valid']:
...@@ -711,7 +730,7 @@ class OpenEndedDescriptor(): ...@@ -711,7 +730,7 @@ class OpenEndedDescriptor():
template_dir_name = "openended" template_dir_name = "openended"
def __init__(self, system): def __init__(self, system):
self.system =system self.system = system
@classmethod @classmethod
def definition_from_xml(cls, xml_object, system): def definition_from_xml(cls, xml_object, system):
...@@ -734,8 +753,9 @@ class OpenEndedDescriptor(): ...@@ -734,8 +753,9 @@ class OpenEndedDescriptor():
"""Assumes that xml_object has child k""" """Assumes that xml_object has child k"""
return xml_object.xpath(k)[0] return xml_object.xpath(k)[0]
return {'oeparam': parse('openendedparam')} return {
'oeparam': parse('openendedparam')
}
def definition_to_xml(self, resource_fs): def definition_to_xml(self, resource_fs):
'''Return an xml element representing this definition.''' '''Return an xml element representing this definition.'''
......
...@@ -101,8 +101,9 @@ class OpenEndedChild(object): ...@@ -101,8 +101,9 @@ class OpenEndedChild(object):
# completion (doesn't matter if you self-assessed correct/incorrect). # completion (doesn't matter if you self-assessed correct/incorrect).
if system.open_ended_grading_interface: if system.open_ended_grading_interface:
self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system) self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system)
self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface, self.controller_qs = controller_query_service.ControllerQueryService(
system) system.open_ended_grading_interface,system
)
else: else:
self.peer_gs = MockPeerGradingService() self.peer_gs = MockPeerGradingService()
self.controller_qs = None self.controller_qs = None
......
...@@ -37,7 +37,7 @@ class PeerGradingService(GradingService): ...@@ -37,7 +37,7 @@ class PeerGradingService(GradingService):
def get_next_submission(self, problem_location, grader_id): def get_next_submission(self, problem_location, grader_id):
response = self.get(self.get_next_submission_url, response = self.get(self.get_next_submission_url,
{'location': problem_location, 'grader_id': grader_id}) {'location': problem_location, 'grader_id': grader_id})
return self.try_to_decode(self._render_rubric(response)) return self.try_to_decode(self._render_rubric(response))
def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores,
...@@ -100,29 +100,29 @@ without making actual service calls to the grading controller ...@@ -100,29 +100,29 @@ without making actual service calls to the grading controller
class MockPeerGradingService(object): class MockPeerGradingService(object):
def get_next_submission(self, problem_location, grader_id): def get_next_submission(self, problem_location, grader_id):
return json.dumps({'success': True, return {'success': True,
'submission_id': 1, 'submission_id': 1,
'submission_key': "", 'submission_key': "",
'student_response': 'fake student response', 'student_response': 'fake student response',
'prompt': 'fake submission prompt', 'prompt': 'fake submission prompt',
'rubric': 'fake rubric', 'rubric': 'fake rubric',
'max_score': 4}) 'max_score': 4}
def save_grade(self, location, grader_id, submission_id, def save_grade(self, location, grader_id, submission_id,
score, feedback, submission_key, rubric_scores, submission_flagged): score, feedback, submission_key, rubric_scores, submission_flagged):
return json.dumps({'success': True}) return {'success': True}
def is_student_calibrated(self, problem_location, grader_id): def is_student_calibrated(self, problem_location, grader_id):
return json.dumps({'success': True, 'calibrated': True}) return {'success': True, 'calibrated': True}
def show_calibration_essay(self, problem_location, grader_id): def show_calibration_essay(self, problem_location, grader_id):
return json.dumps({'success': True, return {'success': True,
'submission_id': 1, 'submission_id': 1,
'submission_key': '', 'submission_key': '',
'student_response': 'fake student response', 'student_response': 'fake student response',
'prompt': 'fake submission prompt', 'prompt': 'fake submission prompt',
'rubric': 'fake rubric', 'rubric': 'fake rubric',
'max_score': 4}) 'max_score': 4}
def save_calibration_essay(self, problem_location, grader_id, def save_calibration_essay(self, problem_location, grader_id,
calibration_essay_id, submission_key, score, calibration_essay_id, submission_key, score,
...@@ -130,10 +130,9 @@ class MockPeerGradingService(object): ...@@ -130,10 +130,9 @@ class MockPeerGradingService(object):
return {'success': True, 'actual_score': 2} return {'success': True, 'actual_score': 2}
def get_problem_list(self, course_id, grader_id): def get_problem_list(self, course_id, grader_id):
return json.dumps({'success': True, return {'success': True,
'problem_list': [ 'problem_list': [
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', ]}
'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5}),
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', def get_data_for_location(self, problem_location, student_id):
'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5}) return {"version": 1, "count_graded": 3, "count_required": 3, "success": True, "student_sub_count": 1}
]})
...@@ -498,7 +498,6 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -498,7 +498,6 @@ class PeerGradingModule(PeerGradingFields, XModule):
log.error("Problem {0} does not exist in this course".format(location)) log.error("Problem {0} does not exist in this course".format(location))
raise raise
for problem in problem_list: for problem in problem_list:
problem_location = problem['location'] problem_location = problem['location']
descriptor = _find_corresponding_module_for_location(problem_location) descriptor = _find_corresponding_module_for_location(problem_location)
......
...@@ -20,7 +20,7 @@ from xmodule.x_module import ModuleSystem ...@@ -20,7 +20,7 @@ from xmodule.x_module import ModuleSystem
from mock import Mock from mock import Mock
open_ended_grading_interface = { open_ended_grading_interface = {
'url': 'http://sandbox-grader-001.m.edx.org/peer_grading', 'url': 'blah/',
'username': 'incorrect_user', 'username': 'incorrect_user',
'password': 'incorrect_pass', 'password': 'incorrect_pass',
'staff_grading' : 'staff_grading', 'staff_grading' : 'staff_grading',
...@@ -52,7 +52,7 @@ def test_system(): ...@@ -52,7 +52,7 @@ def test_system():
user=Mock(is_staff=False), user=Mock(is_staff=False),
filestore=Mock(), filestore=Mock(),
debug=True, debug=True,
xqueue={'interface': None, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 10}, xqueue={'interface': None, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 10, 'construct_callback' : Mock(side_effect="/")},
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
xblock_model_data=lambda descriptor: descriptor._model_data, xblock_model_data=lambda descriptor: descriptor._model_data,
anonymous_student_id='student', anonymous_student_id='student',
......
import unittest
from xmodule.modulestore import Location
from .import test_system
from test_util_open_ended import MockQueryDict, DummyModulestore
import json
from xmodule.peer_grading_module import PeerGradingModule, PeerGradingDescriptor
from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError
import logging
log = logging.getLogger(__name__)
ORG = "edX"
COURSE = "open_ended"
class PeerGradingModuleTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an
external grading service.
"""
problem_location = Location(["i4x", "edX", "open_ended", "peergrading",
"PeerGradingSample"])
calibrated_dict = {'location': "blah"}
save_dict = MockQueryDict()
save_dict.update({
'location': "blah",
'submission_id': 1,
'submission_key': "",
'score': 1,
'feedback': "",
'rubric_scores[]': [0, 1],
'submission_flagged': False,
})
def setUp(self):
"""
Create a peer grading module from a test system
@return:
"""
self.test_system = test_system()
self.test_system.open_ended_grading_interface = None
self.setup_modulestore(COURSE)
self.peer_grading = self.get_module_from_location(self.problem_location, COURSE)
def test_module_closed(self):
"""
Test if peer grading is closed
@return:
"""
closed = self.peer_grading.closed()
self.assertEqual(closed, False)
def test_get_html(self):
"""
Test to see if the module can be rendered
@return:
"""
html = self.peer_grading.get_html()
def test_get_data(self):
"""
Try getting data from the external grading service
@return:
"""
success, data = self.peer_grading.query_data_for_location()
self.assertEqual(success, True)
def test_get_score(self):
"""
Test getting the score
@return:
"""
score = self.peer_grading.get_score()
self.assertEquals(score['score'], None)
def test_get_max_score(self):
"""
Test getting the max score
@return:
"""
max_score = self.peer_grading.max_score()
self.assertEquals(max_score, None)
def get_next_submission(self):
"""
Test to see if we can get the next mock submission
@return:
"""
success, next_submission = self.peer_grading.get_next_submission({'location': 'blah'})
self.assertEqual(success, True)
def test_save_grade(self):
"""
Test if we can save the grade
@return:
"""
response = self.peer_grading.save_grade(self.save_dict)
self.assertEqual(response['success'], True)
def test_is_student_calibrated(self):
"""
Check to see if the student has calibrated yet
@return:
"""
calibrated_dict = {'location': "blah"}
response = self.peer_grading.is_student_calibrated(self.calibrated_dict)
self.assertEqual(response['success'], True)
def test_show_calibration_essay(self):
"""
Test showing the calibration essay
@return:
"""
response = self.peer_grading.show_calibration_essay(self.calibrated_dict)
self.assertEqual(response['success'], True)
def test_save_calibration_essay(self):
"""
Test saving the calibration essay
@return:
"""
response = self.peer_grading.save_calibration_essay(self.save_dict)
self.assertEqual(response['success'], True)
def test_peer_grading_problem(self):
"""
See if we can render a single problem
@return:
"""
response = self.peer_grading.peer_grading_problem(self.calibrated_dict)
self.assertEqual(response['success'], True)
def test_get_instance_state(self):
"""
Get the instance state dict
@return:
"""
self.peer_grading.get_instance_state()
class PeerGradingModuleScoredTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an
external grading service.
"""
problem_location = Location(["i4x", "edX", "open_ended", "peergrading",
"PeerGradingScored"])
def setUp(self):
"""
Create a peer grading module from a test system
@return:
"""
self.test_system = test_system()
self.test_system.open_ended_grading_interface = None
self.setup_modulestore(COURSE)
def test_metadata_load(self):
peer_grading = self.get_module_from_location(self.problem_location, COURSE)
self.assertEqual(peer_grading.closed(), False)
\ No newline at end of file
from .import test_system
from xmodule.modulestore import Location
from xmodule.modulestore.xml import ImportSystem, XMLModuleStore
from xmodule.tests.test_export import DATA_DIR
OPEN_ENDED_GRADING_INTERFACE = { OPEN_ENDED_GRADING_INTERFACE = {
'url': 'http://127.0.0.1:3033/', 'url': 'blah/',
'username': 'incorrect', 'username': 'incorrect',
'password': 'incorrect', 'password': 'incorrect',
'staff_grading': 'staff_grading', 'staff_grading': 'staff_grading',
...@@ -11,4 +16,40 @@ S3_INTERFACE = { ...@@ -11,4 +16,40 @@ S3_INTERFACE = {
'aws_access_key': "", 'aws_access_key': "",
'aws_secret_key': "", 'aws_secret_key': "",
"aws_bucket_name": "", "aws_bucket_name": "",
} }
\ No newline at end of file
class MockQueryDict(dict):
"""
Mock a query dict so that it can be used in test classes. This will only work with the combinedopenended tests,
and does not mock the full query dict, only the behavior that is needed there (namely get_list).
"""
def getlist(self, key, default=None):
try:
return super(MockQueryDict, self).__getitem__(key)
except KeyError:
if default is None:
return []
return default
class DummyModulestore(object):
"""
A mixin that allows test classes to have convenience functions to get a module given a location
"""
test_system = test_system()
def setup_modulestore(self, name):
self.modulestore = XMLModuleStore(DATA_DIR, course_dirs=[name])
def get_course(self, name):
"""Get a test course by directory name. If there's more than one, error."""
courses = self.modulestore.get_courses()
return courses[0]
def get_module_from_location(self, location, course):
course = self.get_course(course)
if not isinstance(location, Location):
location = Location(location)
descriptor = self.modulestore.get_instance(course.id, location, depth=None)
return descriptor.xmodule(self.test_system)
This is a very very simple course, useful for debugging open ended grading code.
<combinedopenended attempts="10000" display_name = "Humanities Question -- Machine Assessed">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<selfassessment/>
</task>
<task>
<openended min_score_to_attempt="2" max_score_to_attempt="3">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
\ No newline at end of file
<course org="edX" course="open_ended" url_name="2012_Fall"/>
<course>
<chapter url_name="Overview">
<combinedopenended url_name="SampleQuestion"/>
<peergrading url_name="PeerGradingSample"/>
<peergrading url_name="PeerGradingScored"/>
</chapter>
</course>
<peergrading/>
\ No newline at end of file
<peergrading is_graded="True" max_grade="1" use_for_single_location="False" link_to_location="i4x://edX/open_ended/combinedopenended/SampleQuestion"/>
\ No newline at end of file
{
"course/2012_Fall": {
"graceperiod": "2 days 5 hours 59 minutes 59 seconds",
"start": "2015-07-17T12:00",
"display_name": "Self Assessment Test",
"graded": "true"
},
"chapter/Overview": {
"display_name": "Overview"
},
"combinedopenended/SampleQuestion": {
"display_name": "Sample Question"
},
"peergrading/PeerGradingSample": {
"display_name": "Sample Question"
}
}
<course org="edX" course="sa_test" url_name="2012_Fall"/>
<selfassessment attempts='10'> <selfassessment attempts='10'>
<prompt> <prompt>
What is the meaning of life? What is the meaning of life?
</prompt> </prompt>
<rubric> <rubric>
This is a rubric. This is a rubric.
</rubric> </rubric>
<submitmessage> <submitmessage>
Thanks for your submission! Thanks for your submission!
</submitmessage> </submitmessage>
<hintprompt> <hintprompt>
Enter a hint below: Enter a hint below:
</hintprompt> </hintprompt>
</selfassessment> </selfassessment>
\ No newline at end of file
...@@ -84,7 +84,9 @@ class TestStaffGradingService(LoginEnrollmentTestCase): ...@@ -84,7 +84,9 @@ class TestStaffGradingService(LoginEnrollmentTestCase):
data = {'location': self.location} data = {'location': self.location}
r = self.check_for_post_code(200, url, data) r = self.check_for_post_code(200, url, data)
d = json.loads(r.content) d = json.loads(r.content)
self.assertTrue(d['success']) self.assertTrue(d['success'])
self.assertEquals(d['submission_id'], self.mock_service.cnt) self.assertEquals(d['submission_id'], self.mock_service.cnt)
self.assertIsNotNone(d['submission']) self.assertIsNotNone(d['submission'])
...@@ -130,6 +132,7 @@ class TestStaffGradingService(LoginEnrollmentTestCase): ...@@ -130,6 +132,7 @@ class TestStaffGradingService(LoginEnrollmentTestCase):
r = self.check_for_post_code(200, url, data) r = self.check_for_post_code(200, url, data)
d = json.loads(r.content) d = json.loads(r.content)
self.assertTrue(d['success'], str(d)) self.assertTrue(d['success'], str(d))
self.assertIsNotNone(d['problem_list']) self.assertIsNotNone(d['problem_list'])
...@@ -179,7 +182,8 @@ class TestPeerGradingService(LoginEnrollmentTestCase): ...@@ -179,7 +182,8 @@ class TestPeerGradingService(LoginEnrollmentTestCase):
data = {'location': self.location} data = {'location': self.location}
r = self.peer_module.get_next_submission(data) r = self.peer_module.get_next_submission(data)
d = json.loads(r) d = r
self.assertTrue(d['success']) self.assertTrue(d['success'])
self.assertIsNotNone(d['submission_id']) self.assertIsNotNone(d['submission_id'])
self.assertIsNotNone(d['prompt']) self.assertIsNotNone(d['prompt'])
...@@ -213,7 +217,8 @@ class TestPeerGradingService(LoginEnrollmentTestCase): ...@@ -213,7 +217,8 @@ class TestPeerGradingService(LoginEnrollmentTestCase):
qdict.keys = data.keys qdict.keys = data.keys
r = self.peer_module.save_grade(qdict) r = self.peer_module.save_grade(qdict)
d = json.loads(r) d = r
self.assertTrue(d['success']) self.assertTrue(d['success'])
def test_save_grade_missing_keys(self): def test_save_grade_missing_keys(self):
...@@ -225,7 +230,8 @@ class TestPeerGradingService(LoginEnrollmentTestCase): ...@@ -225,7 +230,8 @@ class TestPeerGradingService(LoginEnrollmentTestCase):
def test_is_calibrated_success(self): def test_is_calibrated_success(self):
data = {'location': self.location} data = {'location': self.location}
r = self.peer_module.is_student_calibrated(data) r = self.peer_module.is_student_calibrated(data)
d = json.loads(r) d = r
self.assertTrue(d['success']) self.assertTrue(d['success'])
self.assertTrue('calibrated' in d) self.assertTrue('calibrated' in d)
...@@ -239,9 +245,8 @@ class TestPeerGradingService(LoginEnrollmentTestCase): ...@@ -239,9 +245,8 @@ class TestPeerGradingService(LoginEnrollmentTestCase):
data = {'location': self.location} data = {'location': self.location}
r = self.peer_module.show_calibration_essay(data) r = self.peer_module.show_calibration_essay(data)
d = json.loads(r) d = r
log.debug(d)
log.debug(type(d))
self.assertTrue(d['success']) self.assertTrue(d['success'])
self.assertIsNotNone(d['submission_id']) self.assertIsNotNone(d['submission_id'])
self.assertIsNotNone(d['prompt']) self.assertIsNotNone(d['prompt'])
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment