Commit a6cc30d1 by Vik Paruchuri

Fix tests, address review feedback

parent d782278d
...@@ -171,7 +171,6 @@ class @CombinedOpenEnded ...@@ -171,7 +171,6 @@ class @CombinedOpenEnded
@results_container = @$(@result_container_sel) @results_container = @$(@result_container_sel)
@combined_rubric_container = @$(@combined_rubric_sel) @combined_rubric_container = @$(@combined_rubric_sel)
# Where to put the rubric once we load it # Where to put the rubric once we load it
@oe = @$(@open_ended_child_sel) @oe = @$(@open_ended_child_sel)
......
...@@ -9,7 +9,7 @@ import self_assessment_module ...@@ -9,7 +9,7 @@ import self_assessment_module
import open_ended_module import open_ended_module
from functools import partial from functools import partial
from .combined_open_ended_rubric import CombinedOpenEndedRubric, GRADER_TYPE_IMAGE_DICT, HUMAN_GRADER_TYPE, LEGEND_LIST from .combined_open_ended_rubric import CombinedOpenEndedRubric, GRADER_TYPE_IMAGE_DICT, HUMAN_GRADER_TYPE, LEGEND_LIST
from peer_grading_service import PeerGradingService, MockPeerGradingService from peer_grading_service import PeerGradingService, MockPeerGradingService, GradingServiceError
log = logging.getLogger("mitx.courseware") log = logging.getLogger("mitx.courseware")
...@@ -40,10 +40,10 @@ HUMAN_TASK_TYPE = { ...@@ -40,10 +40,10 @@ HUMAN_TASK_TYPE = {
} }
HUMAN_STATES = { HUMAN_STATES = {
'intitial' : "Not started.", 'intitial': "Not started.",
'assessing' : "Being scored.", 'assessing': "Being scored.",
'intermediate_done' : "Scoring finished.", 'intermediate_done': "Scoring finished.",
'done' : "Complete." 'done': "Complete.",
} }
# Default value that controls whether or not to skip basic spelling checks in the controller # Default value that controls whether or not to skip basic spelling checks in the controller
...@@ -438,6 +438,7 @@ class CombinedOpenEndedV1Module(): ...@@ -438,6 +438,7 @@ class CombinedOpenEndedV1Module():
grader_type = grader_types[0] grader_type = grader_types[0]
else: else:
grader_type = "IN" grader_type = "IN"
grader_types = ["IN"]
if grader_type in HUMAN_GRADER_TYPE: if grader_type in HUMAN_GRADER_TYPE:
human_grader_name = HUMAN_GRADER_TYPE[grader_type] human_grader_name = HUMAN_GRADER_TYPE[grader_type]
...@@ -514,33 +515,46 @@ class CombinedOpenEndedV1Module(): ...@@ -514,33 +515,46 @@ class CombinedOpenEndedV1Module():
return return_html return return_html
def check_if_student_has_done_needed_grading(self): def check_if_student_has_done_needed_grading(self):
"""
Checks with the ORA server to see if the student has completed the needed peer grading to be shown their grade.
For example, if a student submits one response, and three peers grade their response, the student
cannot see their grades and feedback unless they reciprocate.
Output:
success - boolean indicator of success
allowed_to_submit - boolean indicator of whether student has done their needed grading or not
error_message - If not success, explains why
"""
student_id = self.system.anonymous_student_id student_id = self.system.anonymous_student_id
success = False success = False
allowed_to_submit = True allowed_to_submit = True
error_string = ("<h4>Feedback not available yet</h4>"
"<p>You need to peer grade {0} more submissions in order to see your feedback.</p>"
"<p>You have graded responses from {1} students, and {2} students have graded your submissions. </p>"
"<p>You have made {3} submissions.</p>")
try: try:
response = self.peer_gs.get_data_for_location(self.location.url(), student_id) response = self.peer_gs.get_data_for_location(self.location.url(), student_id)
log.info(response)
count_graded = response['count_graded'] count_graded = response['count_graded']
count_required = response['count_required'] count_required = response['count_required']
student_sub_count = response['student_sub_count'] student_sub_count = response['student_sub_count']
count_available = response['count_available'] count_available = response['count_available']
success = True success = True
except: except GradingServiceError:
# This is a dev_facing_error # This is a dev_facing_error
log.error("Could not contact external open ended graders for location {0} and student {1}".format( log.error("Could not contact external open ended graders for location {0} and student {1}".format(
self.location, student_id)) self.location, student_id))
# This is a student_facing_error # This is a student_facing_error
error_message = "Could not contact the graders. Please notify course staff." error_message = "Could not contact the graders. Please notify course staff."
return success, allowed_to_submit, error_message return success, allowed_to_submit, error_message
except KeyError:
log.error("Invalid response from grading server for location {0} and student {1}".format(self.location, student_id))
error_message = "Received invalid response from the graders. Please notify course staff."
return success, allowed_to_submit, error_message
if count_graded >= count_required or count_available==0: if count_graded >= count_required or count_available==0:
return success, allowed_to_submit, "" error_message = ""
return success, allowed_to_submit, error_message
else: else:
allowed_to_submit = False allowed_to_submit = False
# This is a student_facing_error # This is a student_facing_error
error_string = ("<h4>Feedback not available yet</h4>"
"<p>You need to peer grade {0} more submissions in order to see your feedback.</p>"
"<p>You have graded responses from {1} students, and {2} students have graded your submissions. </p>"
"<p>You have made {3} submissions.</p>")
error_message = error_string.format(count_required - count_graded, count_graded, count_required, error_message = error_string.format(count_required - count_graded, count_graded, count_required,
student_sub_count) student_sub_count)
return success, allowed_to_submit, error_message return success, allowed_to_submit, error_message
...@@ -562,9 +576,12 @@ class CombinedOpenEndedV1Module(): ...@@ -562,9 +576,12 @@ class CombinedOpenEndedV1Module():
rubric_number+=1 rubric_number+=1
response = self.get_last_response(rubric_number) response = self.get_last_response(rubric_number)
score_length = len(response['grader_types']) score_length = len(response['grader_types'])
for z in xrange(0,score_length): for z in xrange(score_length):
feedback = response['feedback_dicts'][z].get('feedback', '') if response['grader_types'][z] in HUMAN_GRADER_TYPE:
if response['grader_types'][z] in HUMAN_GRADER_TYPE.keys(): try:
feedback = response['feedback_dicts'][z].get('feedback', '')
except TypeError:
return {'success' : False}
rubric_scores = [[response['rubric_scores'][z]]] rubric_scores = [[response['rubric_scores'][z]]]
grader_types = [[response['grader_types'][z]]] grader_types = [[response['grader_types'][z]]]
feedback_items = [[response['feedback_items'][z]]] feedback_items = [[response['feedback_items'][z]]]
...@@ -664,7 +681,7 @@ class CombinedOpenEndedV1Module(): ...@@ -664,7 +681,7 @@ class CombinedOpenEndedV1Module():
self.student_attempts +=1 self.student_attempts +=1
self.state = self.INITIAL self.state = self.INITIAL
self.ready_to_reset = False self.ready_to_reset = False
for i in xrange(0, len(self.task_xml)): for i in xrange(len(self.task_xml)):
self.current_task_number = i self.current_task_number = i
self.setup_next_task(reset=True) self.setup_next_task(reset=True)
self.current_task.reset(self.system) self.current_task.reset(self.system)
......
...@@ -206,27 +206,39 @@ class CombinedOpenEndedRubric(object): ...@@ -206,27 +206,39 @@ class CombinedOpenEndedRubric(object):
def render_combined_rubric(self, rubric_xml, scores, score_types, feedback_types): def render_combined_rubric(self, rubric_xml, scores, score_types, feedback_types):
success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores, score_types, success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores, score_types,
feedback_types) feedback_types)
#Get all the categories in the rubric
rubric_categories = self.extract_categories(rubric_xml) rubric_categories = self.extract_categories(rubric_xml)
#Get a list of max scores, each entry belonging to a rubric category
max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories)
actual_scores = [] actual_scores = []
#Get the highest possible score across all categories
max_score = max(max_scores) max_score = max(max_scores)
for i in xrange(0, len(rubric_categories)): #Loop through each category
category = rubric_categories[i] for i,category in enumerate(rubric_categories):
for j in xrange(0, len(category['options'])): #Loop through each option in the category
for j in xrange(len(category['options'])):
#Intialize empty grader types list
rubric_categories[i]['options'][j]['grader_types'] = [] rubric_categories[i]['options'][j]['grader_types'] = []
for tuple in score_tuples: #Score tuples are a flat data structure with (category, option, grader_type_list) for selected graders
if tuple[1] == i and tuple[2] == j: for tup in score_tuples:
for grader_type in tuple[3]: if tup[1] == i and tup[2] == j:
for grader_type in tup[3]:
#Set the rubric grader type to the tuple grader types
rubric_categories[i]['options'][j]['grader_types'].append(grader_type) rubric_categories[i]['options'][j]['grader_types'].append(grader_type)
#Grab the score and add it to the actual scores. J will be the score for the selected
#grader type
if len(actual_scores)<=i: if len(actual_scores)<=i:
#Initialize a new list in the list of lists
actual_scores.append([j]) actual_scores.append([j])
else: else:
#If a list in the list of lists for this position exists, append to it
actual_scores[i] += [j] actual_scores[i] += [j]
actual_scores = [sum(i)/len(i) for i in actual_scores] actual_scores = [sum(i)/len(i) for i in actual_scores]
correct = [] correct = []
#Define if the student is "correct" (1) "incorrect" (0) or "partially correct" (.5)
for (i,a) in enumerate(actual_scores): for (i,a) in enumerate(actual_scores):
if int(a)/max_scores[i]==1: if int(a) == max_scores[i]:
correct.append(1) correct.append(1)
elif int(a)==0: elif int(a)==0:
correct.append(0) correct.append(0)
......
...@@ -62,7 +62,6 @@ class GradingService(object): ...@@ -62,7 +62,6 @@ class GradingService(object):
""" """
Make a get request to the grading controller Make a get request to the grading controller
""" """
log.debug(params)
op = lambda: self.session.get(url, op = lambda: self.session.get(url,
allow_redirects=allow_redirects, allow_redirects=allow_redirects,
params=params) params=params)
......
...@@ -227,7 +227,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -227,7 +227,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
count_graded = self.student_data_for_location['count_graded'] count_graded = self.student_data_for_location['count_graded']
count_required = self.student_data_for_location['count_required'] count_required = self.student_data_for_location['count_required']
except: except:
success, response = self.query_data_for_location() success, response = self.query_data_for_location(self.location)
if not success: if not success:
log.exception( log.exception(
"No instance data found and could not get data from controller for loc {0} student {1}".format( "No instance data found and could not get data from controller for loc {0} student {1}".format(
...@@ -311,7 +311,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -311,7 +311,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
""" """
required = ['location', 'submission_id', 'submission_key', 'score', 'feedback', 'submission_flagged', 'answer_unknown'] required = ['location', 'submission_id', 'submission_key', 'score', 'feedback', 'submission_flagged', 'answer_unknown']
if 'submission_flagged' not in data or data['submission_flagged'] in ["false", False, "False"]: if data.get("submission_flagged", False) in ["false", False, "False", "FALSE"]:
required.append("rubric_scores[]") required.append("rubric_scores[]")
success, message = self._check_required(data, set(required)) success, message = self._check_required(data, set(required))
if not success: if not success:
...@@ -325,6 +325,8 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -325,6 +325,8 @@ class PeerGradingModule(PeerGradingFields, XModule):
try: try:
response = self.peer_gs.save_grade(**data_dict) response = self.peer_gs.save_grade(**data_dict)
success, location_data = self.query_data_for_location(data_dict['location']) success, location_data = self.query_data_for_location(data_dict['location'])
#Don't check for success above because the response = statement will raise the same Exception as the one
#that will cause success to be false.
response.update({'required_done' : False}) response.update({'required_done' : False})
if 'count_graded' in location_data and 'count_required' in location_data and int(location_data['count_graded'])>=int(location_data['count_required']): if 'count_graded' in location_data and 'count_required' in location_data and int(location_data['count_graded'])>=int(location_data['count_required']):
response['required_done'] = True response['required_done'] = True
...@@ -507,7 +509,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -507,7 +509,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
error_text = "Could not get list of problems to peer grade. Please notify course staff." error_text = "Could not get list of problems to peer grade. Please notify course staff."
log.error(error_text) log.error(error_text)
success = False success = False
except: except Exception:
log.exception("Could not contact peer grading service.") log.exception("Could not contact peer grading service.")
success = False success = False
...@@ -518,7 +520,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -518,7 +520,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
''' '''
try: try:
return modulestore().get_instance(self.system.course_id, location) return modulestore().get_instance(self.system.course_id, location)
except: except Exception:
# the linked problem doesn't exist # the linked problem doesn't exist
log.error("Problem {0} does not exist in this course".format(location)) log.error("Problem {0} does not exist in this course".format(location))
raise raise
...@@ -528,14 +530,14 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -528,14 +530,14 @@ class PeerGradingModule(PeerGradingFields, XModule):
problem_location = problem['location'] problem_location = problem['location']
try: try:
descriptor = _find_corresponding_module_for_location(problem_location) descriptor = _find_corresponding_module_for_location(problem_location)
except: except Exception:
continue continue
if descriptor: if descriptor:
problem['due'] = descriptor.lms.due problem['due'] = descriptor.lms.due
grace_period = descriptor.lms.graceperiod grace_period = descriptor.lms.graceperiod
try: try:
problem_timeinfo = TimeInfo(problem['due'], grace_period) problem_timeinfo = TimeInfo(problem['due'], grace_period)
except: except Exception:
log.error("Malformed due date or grace period string for location {0}".format(problem_location)) log.error("Malformed due date or grace period string for location {0}".format(problem_location))
raise raise
if self._closed(problem_timeinfo): if self._closed(problem_timeinfo):
......
...@@ -73,6 +73,7 @@ class OpenEndedChildTest(unittest.TestCase): ...@@ -73,6 +73,7 @@ class OpenEndedChildTest(unittest.TestCase):
def setUp(self): def setUp(self):
self.test_system = get_test_system() self.test_system = get_test_system()
self.test_system.open_ended_grading_interface = None
self.openendedchild = OpenEndedChild(self.test_system, self.location, self.openendedchild = OpenEndedChild(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata) self.definition, self.descriptor, self.static_data, self.metadata)
...@@ -203,7 +204,7 @@ class OpenEndedModuleTest(unittest.TestCase): ...@@ -203,7 +204,7 @@ class OpenEndedModuleTest(unittest.TestCase):
def setUp(self): def setUp(self):
self.test_system = get_test_system() self.test_system = get_test_system()
self.test_system.open_ended_grading_interface = None
self.test_system.location = self.location self.test_system.location = self.location
self.mock_xqueue = MagicMock() self.mock_xqueue = MagicMock()
self.mock_xqueue.send_to_queue.return_value = (None, "Message") self.mock_xqueue.send_to_queue.return_value = (None, "Message")
...@@ -378,6 +379,7 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): ...@@ -378,6 +379,7 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2) full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2)
descriptor = Mock(data=full_definition) descriptor = Mock(data=full_definition)
test_system = get_test_system() test_system = get_test_system()
test_system.open_ended_grading_interface = None
combinedoe_container = CombinedOpenEndedModule( combinedoe_container = CombinedOpenEndedModule(
test_system, test_system,
descriptor, descriptor,
...@@ -504,6 +506,7 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore): ...@@ -504,6 +506,7 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
def setUp(self): def setUp(self):
self.test_system = get_test_system() self.test_system = get_test_system()
self.test_system.open_ended_grading_interface = None
self.test_system.xqueue['interface'] = Mock( self.test_system.xqueue['interface'] = Mock(
send_to_queue=Mock(side_effect=[1, "queued"]) send_to_queue=Mock(side_effect=[1, "queued"])
) )
...@@ -537,9 +540,9 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore): ...@@ -537,9 +540,9 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
module = self.get_module_from_location(self.problem_location, COURSE) module = self.get_module_from_location(self.problem_location, COURSE)
#Simulate a student saving an answer #Simulate a student saving an answer
module.handle_ajax("save_answer", {"student_answer": self.answer}) html = module.handle_ajax("get_html", {})
status = module.handle_ajax("get_status", {}) module.handle_ajax("save_answer", {"student_answer": self.answer, "can_upload_files" : False, "student_file" : None})
self.assertTrue(isinstance(status, basestring)) html = module.handle_ajax("get_html", {})
#Mock a student submitting an assessment #Mock a student submitting an assessment
assessment_dict = MockQueryDict() assessment_dict = MockQueryDict()
...@@ -547,8 +550,7 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore): ...@@ -547,8 +550,7 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
module.handle_ajax("save_assessment", assessment_dict) module.handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(module.task_states[0]) task_one_json = json.loads(module.task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment) self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
status = module.handle_ajax("get_status", {}) rubric = module.handle_ajax("get_combined_rubric", {})
self.assertTrue(isinstance(status, basestring))
#Move to the next step in the problem #Move to the next step in the problem
module.handle_ajax("next_problem", {}) module.handle_ajax("next_problem", {})
...@@ -585,7 +587,6 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore): ...@@ -585,7 +587,6 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
module.handle_ajax("save_assessment", assessment_dict) module.handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(module.task_states[0]) task_one_json = json.loads(module.task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment) self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
module.handle_ajax("get_status", {})
#Move to the next step in the problem #Move to the next step in the problem
try: try:
...@@ -628,12 +629,8 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore): ...@@ -628,12 +629,8 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
#Get html and other data client will request #Get html and other data client will request
module.get_html() module.get_html()
legend = module.handle_ajax("get_legend", {})
self.assertTrue(isinstance(legend, basestring))
module.handle_ajax("get_status", {})
module.handle_ajax("skip_post_assessment", {}) module.handle_ajax("skip_post_assessment", {})
self.assertTrue(isinstance(legend, basestring))
#Get all results #Get all results
module.handle_ajax("get_combined_rubric", {}) module.handle_ajax("get_combined_rubric", {})
...@@ -654,6 +651,7 @@ class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore): ...@@ -654,6 +651,7 @@ class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore):
def setUp(self): def setUp(self):
self.test_system = get_test_system() self.test_system = get_test_system()
self.test_system.open_ended_grading_interface = None
self.test_system.xqueue['interface'] = Mock( self.test_system.xqueue['interface'] = Mock(
send_to_queue=Mock(side_effect=[1, "queued"]) send_to_queue=Mock(side_effect=[1, "queued"])
) )
...@@ -670,8 +668,6 @@ class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore): ...@@ -670,8 +668,6 @@ class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore):
#Simulate a student saving an answer #Simulate a student saving an answer
module.handle_ajax("save_answer", {"student_answer": self.answer}) module.handle_ajax("save_answer", {"student_answer": self.answer})
status = module.handle_ajax("get_status", {})
self.assertTrue(isinstance(status, basestring))
#Mock a student submitting an assessment #Mock a student submitting an assessment
assessment_dict = MockQueryDict() assessment_dict = MockQueryDict()
...@@ -679,8 +675,6 @@ class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore): ...@@ -679,8 +675,6 @@ class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore):
module.handle_ajax("save_assessment", assessment_dict) module.handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(module.task_states[0]) task_one_json = json.loads(module.task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment) self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
status = module.handle_ajax("get_status", {})
self.assertTrue(isinstance(status, basestring))
#Move to the next step in the problem #Move to the next step in the problem
module.handle_ajax("next_problem", {}) module.handle_ajax("next_problem", {})
......
...@@ -61,7 +61,7 @@ class PeerGradingModuleTest(unittest.TestCase, DummyModulestore): ...@@ -61,7 +61,7 @@ class PeerGradingModuleTest(unittest.TestCase, DummyModulestore):
Try getting data from the external grading service Try getting data from the external grading service
@return: @return:
""" """
success, data = self.peer_grading.query_data_for_location() success, data = self.peer_grading.query_data_for_location(self.problem_location.url())
self.assertEqual(success, True) self.assertEqual(success, True)
def test_get_score(self): def test_get_score(self):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment