Commit 9c656b3d by Sarina Canelake

Remove ORA1 XModule Python tests

parent 2af299c8
......@@ -678,57 +678,6 @@ class TestMongoModuleStore(TestMongoModuleStoreBase):
self.assertEqual(component.published_on, published_date)
self.assertEqual(component.published_by, published_by)
def test_export_course_with_peer_component(self):
"""
Test export course when link_to_location is given in peer grading interface settings.
"""
name = "export_peer_component"
locations = self._create_test_tree(name)
# Insert the test block directly into the module store
problem_location = Location('edX', 'tree{}'.format(name), name, 'combinedopenended', 'test_peer_problem')
self.draft_store.create_child(
self.dummy_user,
locations["child"],
problem_location.block_type,
block_id=problem_location.block_id
)
interface_location = Location('edX', 'tree{}'.format(name), name, 'peergrading', 'test_peer_interface')
self.draft_store.create_child(
self.dummy_user,
locations["child"],
interface_location.block_type,
block_id=interface_location.block_id
)
self.draft_store._update_single_item(
as_draft(interface_location),
{
'definition.data': {},
'metadata': {
'link_to_location': unicode(problem_location),
'use_for_single_location': True,
},
},
)
component = self.draft_store.get_item(interface_location)
self.assertEqual(unicode(component.link_to_location), unicode(problem_location))
root_dir = path(mkdtemp())
self.addCleanup(shutil.rmtree, root_dir)
# export_course_to_xml should work.
export_course_to_xml(
self.draft_store, self.content_store, interface_location.course_key,
root_dir, 'test_export'
)
def test_draft_modulestore_create_child_with_position(self):
"""
This test is designed to hit a specific set of use cases having to do with
......
"""
Tests for the various pieces of the CombinedOpenEndedGrading system
OpenEndedChild
OpenEndedModule
"""
import json
import logging
import unittest
from datetime import datetime
from lxml import etree
from lxml.html import fragment_fromstring
from mock import Mock, MagicMock, patch
from pytz import UTC
from webob.multidict import MultiDict
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule
from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module
from xmodule.combined_open_ended_module import CombinedOpenEndedModule
from opaque_keys.edx.locations import Location
from xmodule.tests import get_test_system, test_util_open_ended
from xmodule.progress import Progress
from xmodule.validation import StudioValidationMessage
from xmodule.x_module import STUDENT_VIEW
from xmodule.tests.test_util_open_ended import (
DummyModulestore, TEST_STATE_SA_IN,
MOCK_INSTANCE_STATE, TEST_STATE_SA, TEST_STATE_AI, TEST_STATE_AI2, TEST_STATE_AI2_INVALID,
TEST_STATE_SINGLE, TEST_STATE_PE_SINGLE, MockUploadedFile, INSTANCE_INCONSISTENT_STATE,
INSTANCE_INCONSISTENT_STATE2, INSTANCE_INCONSISTENT_STATE3, INSTANCE_INCONSISTENT_STATE4,
INSTANCE_INCONSISTENT_STATE5
)
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
import capa.xqueue_interface as xqueue_interface
log = logging.getLogger(__name__)
ORG = 'edX'
COURSE = 'open_ended' # name of directory with course data
class OpenEndedChildTest(unittest.TestCase):
"""
Test the open ended child class
"""
location = Location("edX", "sa_test", "2012_Fall", "selfassessment", "SampleQuestion")
metadata = json.dumps({'attempts': '10'})
prompt = etree.XML("<prompt>This is a question prompt</prompt>")
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>
The response is not a satisfactory answer to the question.
It either fails to address the question or does so in a limited way,
with no evidence of higher-order thinking.
</option>
<option>Second option</option>
</category>
</rubric></rubric>'''
max_score = 1
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': None,
's3_interface': "",
'open_ended_grading_interface': {},
'skip_basic_checks': False,
'control': {
'required_peer_grading': 1,
'peer_grader_count': 1,
'min_to_calibrate': 3,
'max_to_calibrate': 6,
'peer_grade_finished_submissions_when_none_pending': False,
}
}
definition = Mock()
descriptor = Mock()
def setUp(self):
super(OpenEndedChildTest, self).setUp()
self.test_system = get_test_system()
self.test_system.open_ended_grading_interface = None
self.openendedchild = OpenEndedChild(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
def test_latest_answer_empty(self):
answer = self.openendedchild.latest_answer()
self.assertEqual(answer, "")
def test_latest_score_empty(self):
answer = self.openendedchild.latest_score()
self.assertEqual(answer, None)
def test_latest_post_assessment_empty(self):
answer = self.openendedchild.latest_post_assessment(self.test_system)
self.assertEqual(answer, "")
def test_new_history_entry(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
answer = self.openendedchild.latest_answer()
self.assertEqual(answer, new_answer)
new_answer = "Newer Answer"
self.openendedchild.new_history_entry(new_answer)
answer = self.openendedchild.latest_answer()
self.assertEqual(new_answer, answer)
def test_record_latest_score(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
new_score = 3
self.openendedchild.record_latest_score(new_score)
score = self.openendedchild.latest_score()
self.assertEqual(score, 3)
new_score = 4
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(new_score)
score = self.openendedchild.latest_score()
self.assertEqual(score, 4)
def test_record_latest_post_assessment(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
post_assessment = "Post assessment"
self.openendedchild.record_latest_post_assessment(post_assessment)
self.assertEqual(post_assessment,
self.openendedchild.latest_post_assessment(self.test_system))
def test_get_score(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
score = self.openendedchild.get_score()
self.assertEqual(score['score'], 0)
self.assertEqual(score['total'], self.static_data['max_score'])
new_score = 4
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(new_score)
score = self.openendedchild.get_score()
self.assertEqual(score['score'], new_score)
self.assertEqual(score['total'], self.static_data['max_score'])
def test_reset(self):
self.openendedchild.reset(self.test_system)
state = json.loads(self.openendedchild.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedChild.INITIAL)
def test_is_last_response_correct(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(self.static_data['max_score'])
self.assertEqual(self.openendedchild.is_last_response_correct(),
'correct')
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(0)
self.assertEqual(self.openendedchild.is_last_response_correct(),
'incorrect')
class OpenEndedModuleTest(unittest.TestCase):
"""
Test the open ended module class
"""
location = Location("edX", "sa_test", "2012_Fall", "selfassessment", "SampleQuestion")
metadata = json.dumps({'attempts': '10'})
prompt = etree.XML("<prompt>This is a question prompt</prompt>")
rubric = etree.XML('''<rubric>
<category>
<description>Response Quality</description>
<option>
The response is not a satisfactory answer to the question.
It either fails to address the question or does so in a limited way,
with no evidence of higher-order thinking.
</option>
</category>
</rubric>''')
max_score = 4
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': None,
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'control': {
'required_peer_grading': 1,
'peer_grader_count': 1,
'min_to_calibrate': 3,
'max_to_calibrate': 6,
'peer_grade_finished_submissions_when_none_pending': False,
}
}
oeparam = etree.XML('''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>
{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
</grader_payload>
</openendedparam>
''')
definition = {'oeparam': oeparam}
descriptor = Mock()
feedback = {
"success": True,
"feedback": "Grader Feedback"
}
single_score_msg = {
'correct': True,
'score': 4,
'msg': 'Grader Message',
'feedback': json.dumps(feedback),
'grader_type': 'IN',
'grader_id': '1',
'submission_id': '1',
'success': True,
'rubric_scores': [0],
'rubric_scores_complete': True,
'rubric_xml': etree.tostring(rubric)
}
multiple_score_msg = {
'correct': True,
'score': [0, 1],
'msg': 'Grader Message',
'feedback': [json.dumps(feedback), json.dumps(feedback)],
'grader_type': 'PE',
'grader_id': ['1', '2'],
'submission_id': '1',
'success': True,
'rubric_scores': [[0], [0]],
'rubric_scores_complete': [True, True],
'rubric_xml': [etree.tostring(rubric), etree.tostring(rubric)]
}
def setUp(self):
super(OpenEndedModuleTest, self).setUp()
self.test_system = get_test_system()
self.test_system.open_ended_grading_interface = None
self.test_system.location = self.location
self.mock_xqueue = MagicMock()
self.mock_xqueue.send_to_queue.return_value = (0, "Queued")
def constructed_callback(dispatch="score_update"):
return dispatch
self.test_system.xqueue = {'interface': self.mock_xqueue, 'construct_callback': constructed_callback,
'default_queuename': 'testqueue',
'waittime': 1}
self.openendedmodule = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
def test_message_post(self):
"""Test message_post() sends feedback to xqueue."""
submission_time = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
feedback_post = {
'feedback': 'feedback text',
'submission_id': '1',
'grader_id': '1',
'score': 3
}
result = self.openendedmodule.message_post(feedback_post, self.test_system)
self.assertTrue(result['success'])
# make sure it's actually sending something we want to the queue
mock_send_to_queue_body_arg = json.loads(self.mock_xqueue.send_to_queue.call_args[1]['body'])
self.assertEqual(mock_send_to_queue_body_arg['feedback'], feedback_post['feedback'])
self.assertEqual(mock_send_to_queue_body_arg['submission_id'], int(feedback_post['submission_id']))
self.assertEqual(mock_send_to_queue_body_arg['grader_id'], int(feedback_post['grader_id']))
self.assertEqual(mock_send_to_queue_body_arg['score'], feedback_post['score'])
body_arg_student_info = json.loads(mock_send_to_queue_body_arg['student_info'])
self.assertEqual(body_arg_student_info['anonymous_student_id'], self.test_system.anonymous_student_id)
self.assertGreaterEqual(body_arg_student_info['submission_time'], submission_time)
state = json.loads(self.openendedmodule.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedModule.DONE)
def test_message_post_fail(self):
"""Test message_post() if unable to send feedback to xqueue."""
self.mock_xqueue.send_to_queue.return_value = (1, "Not Queued")
feedback_post = {
'feedback': 'feedback text',
'submission_id': '1',
'grader_id': '1',
'score': 3
}
result = self.openendedmodule.message_post(feedback_post, self.test_system)
self.assertFalse(result['success'])
state = json.loads(self.openendedmodule.get_instance_state())
self.assertNotEqual(state['child_state'], OpenEndedModule.DONE)
def test_send_to_grader(self):
student_response = "This is a student submission"
submission_time = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
result, __ = self.openendedmodule.send_to_grader(student_response, self.test_system)
self.assertTrue(result)
mock_send_to_queue_body_arg = json.loads(self.mock_xqueue.send_to_queue.call_args[1]['body'])
self.assertEqual(mock_send_to_queue_body_arg['student_response'], student_response)
self.assertEqual(mock_send_to_queue_body_arg['max_score'], self.max_score)
body_arg_student_info = json.loads(mock_send_to_queue_body_arg['student_info'])
self.assertEqual(body_arg_student_info['anonymous_student_id'], self.test_system.anonymous_student_id)
self.assertGreaterEqual(body_arg_student_info['submission_time'], submission_time)
def test_send_to_grader_fail(self):
"""Test send_to_grader() if unable to send submission to xqueue."""
student_response = "This is a student submission"
self.mock_xqueue.send_to_queue.return_value = (1, "Not Queued")
result, __ = self.openendedmodule.send_to_grader(student_response, self.test_system)
self.assertFalse(result)
def test_save_answer_fail(self):
"""Test save_answer() if unable to send submission to grader."""
submission = "This is a student submission"
self.openendedmodule.send_to_grader = Mock(return_value=(False, "Failed"))
response = self.openendedmodule.save_answer(
{"student_answer": submission},
get_test_system()
)
self.assertFalse(response['success'])
self.assertNotEqual(self.openendedmodule.latest_answer(), submission)
self.assertEqual(self.openendedmodule.stored_answer, submission)
state = json.loads(self.openendedmodule.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedModule.INITIAL)
self.assertEqual(state['stored_answer'], submission)
def update_score_single(self):
self.openendedmodule.new_history_entry("New Entry")
get = {'queuekey': "abcd",
'xqueue_body': json.dumps(self.single_score_msg)}
self.openendedmodule.update_score(get, self.test_system)
def update_score_multiple(self):
self.openendedmodule.new_history_entry("New Entry")
get = {'queuekey': "abcd",
'xqueue_body': json.dumps(self.multiple_score_msg)}
self.openendedmodule.update_score(get, self.test_system)
def test_latest_post_assessment(self):
self.update_score_single()
assessment = self.openendedmodule.latest_post_assessment(self.test_system)
self.assertNotEqual(assessment, '')
# check for errors
self.assertNotIn('errors', assessment)
def test_update_score_single(self):
self.update_score_single()
score = self.openendedmodule.latest_score()
self.assertEqual(score, 4)
def test_update_score_multiple(self):
"""
Tests that a score of [0, 1] gets aggregated to 1. A change in behavior added by @jbau
"""
self.update_score_multiple()
score = self.openendedmodule.latest_score()
self.assertEquals(score, 1)
@patch('xmodule.open_ended_grading_classes.open_ended_module.log.error')
def test_update_score_nohistory(self, error_logger):
"""
Tests error handling when there is no child_history
"""
# NOTE that we are not creating any history items
get = {'queuekey': "abcd",
'xqueue_body': json.dumps(self.multiple_score_msg)}
error_msg = ("Trying to update score without existing studentmodule child_history:\n"
" location: i4x://edX/sa_test/selfassessment/SampleQuestion\n"
" score: 1\n"
" grader_ids: [u'1', u'2']\n"
" submission_ids: [u'1', u'1']")
self.openendedmodule.update_score(get, self.test_system)
(msg,), _ = error_logger.call_args
self.assertTrue(error_logger.called)
self.assertEqual(msg, error_msg)
def test_open_ended_display(self):
"""
Test storing answer with the open ended module.
"""
# Create a module with no state yet. Important that this start off as a blank slate.
test_module = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
saved_response = "Saved response."
submitted_response = "Submitted response."
# Initially, there will be no stored answer.
self.assertEqual(test_module.stored_answer, None)
# And the initial answer to display will be an empty string.
self.assertEqual(test_module.get_display_answer(), "")
# Now, store an answer in the module.
test_module.handle_ajax("store_answer", {'student_answer': saved_response}, get_test_system())
# The stored answer should now equal our response.
self.assertEqual(test_module.stored_answer, saved_response)
self.assertEqual(test_module.get_display_answer(), saved_response)
# Mock out the send_to_grader function so it doesn't try to connect to the xqueue.
test_module.send_to_grader = Mock(return_value=(True, "Success"))
# Submit a student response to the question.
test_module.handle_ajax(
"save_answer",
{"student_answer": submitted_response},
get_test_system()
)
# Submitting an answer should clear the stored answer.
self.assertEqual(test_module.stored_answer, None)
# Confirm that the answer is stored properly.
self.assertEqual(test_module.latest_answer(), submitted_response)
def test_parse_score_msg(self):
"""
Test _parse_score_msg with empty dict.
"""
assessment = self.openendedmodule._parse_score_msg("{}", self.test_system)
self.assertEqual(assessment.get("valid"), False)
class CombinedOpenEndedModuleTest(unittest.TestCase):
"""
Unit tests for the combined open ended xmodule
"""
location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
definition_template = """
<combinedopenended attempts="10000">
{rubric}
{prompt}
<task>
{task1}
</task>
<task>
{task2}
</task>
</combinedopenended>
"""
prompt = "<prompt>This is a question prompt</prompt>"
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>
The response is not a satisfactory answer to the question.
It either fails to address the question or does so in a limited way,
with no evidence of higher-order thinking.
</option>
<option>Second option</option>
</category>
</rubric></rubric>'''
max_score = 1
metadata = {'attempts': '10', 'max_score': max_score}
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': "",
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'graded': True,
}
oeparam = etree.XML('''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>
{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
</grader_payload>
</openendedparam>
''')
task_xml1 = '''
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
'''
task_xml2 = '''
<openended min_score_to_attempt="1" max_score_to_attempt="1">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>
{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
</grader_payload>
</openendedparam>
</openended>'''
definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2)
descriptor = Mock(data=full_definition)
test_system = get_test_system()
test_system.open_ended_grading_interface = None
usage_key = test_system.course_id.make_usage_key('combinedopenended', 'test_loc')
# ScopeIds has 4 fields: user_id, block_type, def_id, usage_id
scope_ids = ScopeIds(1, 'combinedopenended', usage_key, usage_key)
combinedoe_container = CombinedOpenEndedModule(
descriptor=descriptor,
runtime=test_system,
field_data=DictFieldData({
'data': full_definition,
'weight': '1',
}),
scope_ids=scope_ids,
)
def setUp(self):
super(CombinedOpenEndedModuleTest, self).setUp()
self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=self.static_data)
def test_get_tag_name(self):
"""
Test to see if the xml tag name is correct
"""
name = self.combinedoe.get_tag_name("<t>Tag</t>")
self.assertEqual(name, "t")
def test_get_last_response(self):
"""
See if we can parse the last response
"""
response_dict = self.combinedoe.get_last_response(0)
self.assertEqual(response_dict['type'], "selfassessment")
self.assertEqual(response_dict['max_score'], self.max_score)
self.assertEqual(response_dict['state'], CombinedOpenEndedV1Module.INITIAL)
def test_create_task(self):
combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2])
first_task = combinedoe.create_task(combinedoe.task_states[0], combinedoe.task_xml[0])
self.assertIsInstance(first_task, SelfAssessmentModule)
second_task = combinedoe.create_task(combinedoe.task_states[1], combinedoe.task_xml[1])
self.assertIsInstance(second_task, OpenEndedModule)
def test_get_task_number(self):
combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2])
first_task = combinedoe.get_task_number(0)
self.assertIsInstance(first_task, SelfAssessmentModule)
second_task = combinedoe.get_task_number(1)
self.assertIsInstance(second_task, OpenEndedModule)
third_task = combinedoe.get_task_number(2)
self.assertIsNone(third_task)
def test_update_task_states(self):
"""
See if we can update the task states properly
"""
changed = self.combinedoe.update_task_states()
self.assertFalse(changed)
current_task = self.combinedoe.current_task
current_task.change_state(CombinedOpenEndedV1Module.DONE)
changed = self.combinedoe.update_task_states()
self.assertTrue(changed)
def test_get_max_score(self):
"""
Try to get the max score of the problem
"""
self.combinedoe.update_task_states()
self.combinedoe.state = "done"
self.combinedoe.is_scored = True
max_score = self.combinedoe.max_score()
self.assertEqual(max_score, 1)
def test_container_get_max_score(self):
"""
See if we can get the max score from the actual xmodule
"""
# The progress view requires that this function be exposed
max_score = self.combinedoe_container.max_score()
self.assertEqual(max_score, None)
def test_container_get_progress(self):
"""
See if we can get the progress from the actual xmodule
"""
progress = self.combinedoe_container.max_score()
self.assertEqual(progress, None)
def test_get_progress(self):
"""
Test if we can get the correct progress from the combined open ended class
"""
self.combinedoe.update_task_states()
self.combinedoe.state = "done"
self.combinedoe.is_scored = True
progress = self.combinedoe.get_progress()
self.assertIsInstance(progress, Progress)
# progress._a is the score of the xmodule, which is 0 right now.
self.assertEqual(progress._a, 0)
# progress._b is the max_score (which is 1), divided by the weight (which is 1).
self.assertEqual(progress._b, 1)
def test_container_weight(self):
"""
Check the problem weight in the container
"""
weight = self.combinedoe_container.weight
self.assertEqual(weight, 1)
def test_container_child_weight(self):
"""
Test the class to see if it picks up the right weight
"""
weight = self.combinedoe_container.child_module.weight
self.assertEqual(weight, 1)
def test_get_score(self):
"""
See if scoring works
"""
score_dict = self.combinedoe.get_score()
self.assertEqual(score_dict['score'], 0)
self.assertEqual(score_dict['total'], 1)
def test_alternate_orderings(self):
"""
Try multiple ordering of definitions to see if the problem renders different steps correctly.
"""
t1 = self.task_xml1
t2 = self.task_xml2
xml_to_test = [[t1], [t2], [t1, t1], [t1, t2], [t2, t2], [t2, t1], [t1, t2, t1]]
for xml in xml_to_test:
definition = {'prompt': etree.XML(self.prompt), 'rubric': etree.XML(self.rubric), 'task_xml': xml}
descriptor = Mock(data=definition)
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=self.static_data)
changed = combinedoe.update_task_states()
self.assertFalse(changed)
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state={'task_states': TEST_STATE_SA})
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state={'task_states': TEST_STATE_SA_IN})
def test_get_score_realistic(self):
"""
Try to parse the correct score from a json instance state
"""
instance_state = json.loads(MOCK_INSTANCE_STATE)
rubric = """
<rubric>
<rubric>
<category>
<description>Response Quality</description>
<option>
The response is not a satisfactory answer to the question. It either fails to address
the question or does so in a limited way, with no evidence of higher-order thinking.
</option>
<option>
The response is a marginal answer to the question. It may contain some elements of a
proficient response, but it is inaccurate or incomplete.
</option>
<option>
The response is a proficient answer to the question. It is generally correct, although
it may contain minor inaccuracies. There is limited evidence of higher-order thinking.
</option>
<option>The response is correct, complete, and contains evidence of higher-order thinking.</option>
</category>
</rubric>
</rubric>
"""
definition = {'prompt': etree.XML(self.prompt), 'rubric': etree.XML(rubric),
'task_xml': [self.task_xml1, self.task_xml2]}
descriptor = Mock(data=definition)
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=instance_state)
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 15.0)
def generate_oe_module(self, task_state, task_number, task_xml):
"""
Return a combined open ended module with the specified parameters
"""
definition = {
'prompt': etree.XML(self.prompt),
'rubric': etree.XML(self.rubric),
'task_xml': task_xml
}
descriptor = Mock(data=definition)
module = Mock(scope_ids=Mock(usage_id='dummy-usage-id'))
instance_state = {'task_states': task_state, 'graded': True}
if task_number is not None:
instance_state.update({'current_task_number': task_number})
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=instance_state)
return combinedoe
def ai_state_reset(self, task_state, task_number=None):
"""
See if state is properly reset
"""
combinedoe = self.generate_oe_module(task_state, task_number, [self.task_xml2])
html = combinedoe.get_html()
self.assertIsInstance(html, basestring)
score = combinedoe.get_score()
if combinedoe.is_scored:
self.assertEqual(score['score'], 0)
else:
self.assertEqual(score['score'], None)
def ai_state_success(self, task_state, task_number=None, iscore=2, tasks=None):
"""
See if state stays the same
"""
if tasks is None:
tasks = [self.task_xml1, self.task_xml2]
combinedoe = self.generate_oe_module(task_state, task_number, tasks)
html = combinedoe.get_html()
self.assertIsInstance(html, basestring)
score = combinedoe.get_score()
self.assertEqual(int(score['score']), iscore)
def test_ai_state_reset(self):
self.ai_state_reset(TEST_STATE_AI)
def test_ai_state2_reset(self):
self.ai_state_reset(TEST_STATE_AI2)
def test_ai_invalid_state(self):
self.ai_state_reset(TEST_STATE_AI2_INVALID)
def test_ai_state_rest_task_number(self):
self.ai_state_reset(TEST_STATE_AI, task_number=2)
self.ai_state_reset(TEST_STATE_AI, task_number=5)
self.ai_state_reset(TEST_STATE_AI, task_number=1)
self.ai_state_reset(TEST_STATE_AI, task_number=0)
def test_ai_state_success(self):
self.ai_state_success(TEST_STATE_AI)
def test_state_single(self):
self.ai_state_success(TEST_STATE_SINGLE, iscore=12)
def test_state_pe_single(self):
self.ai_state_success(TEST_STATE_PE_SINGLE, iscore=0, tasks=[self.task_xml2])
def test_deprecation_message(self):
"""
Test the validation message produced for deprecation.
"""
# pylint: disable=no-member
validation = self.combinedoe_container.validate()
deprecation_msg = "ORA1 is no longer supported. To use this assessment, " \
"replace this ORA1 component with an ORA2 component."
validation.summary.text = deprecation_msg
validation.summary.type = 'error'
self.assertEqual(
validation.summary.text,
deprecation_msg
)
self.assertEqual(validation.summary.type, StudioValidationMessage.ERROR)
class CombinedOpenEndedModuleConsistencyTest(unittest.TestCase):
"""
Unit tests for the combined open ended xmodule rubric scores consistency.
"""
# location, definition_template, prompt, rubric, max_score, metadata, oeparam, task_xml1, task_xml2
# All these variables are used to construct the xmodule descriptor.
location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
definition_template = """
<combinedopenended attempts="10000">
{rubric}
{prompt}
<task>
{task1}
</task>
<task>
{task2}
</task>
</combinedopenended>
"""
prompt = "<prompt>This is a question prompt</prompt>"
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>
The response is not a satisfactory answer to the question. It either fails to address the question
or does so in a limited way, with no evidence of higher-order thinking.
</option>
<option>Second option</option>
</category>
</rubric></rubric>'''
max_score = 10
metadata = {'attempts': '10', 'max_score': max_score}
oeparam = etree.XML('''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>
{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
</grader_payload>
</openendedparam>
''')
task_xml1 = '''
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
'''
task_xml2 = '''
<openended min_score_to_attempt="1" max_score_to_attempt="10">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>
{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
</grader_payload>
</openendedparam>
</openended>'''
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': "",
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'graded': True,
}
definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2)
descriptor = Mock(data=full_definition)
test_system = get_test_system()
test_system.open_ended_grading_interface = None
usage_key = test_system.course_id.make_usage_key('combinedopenended', 'test_loc')
# ScopeIds has 4 fields: user_id, block_type, def_id, usage_id
scope_ids = ScopeIds(1, 'combinedopenended', usage_key, usage_key)
combinedoe_container = CombinedOpenEndedModule(
descriptor=descriptor,
runtime=test_system,
field_data=DictFieldData({
'data': full_definition,
'weight': '1',
}),
scope_ids=scope_ids,
)
def setUp(self):
super(CombinedOpenEndedModuleConsistencyTest, self).setUp()
self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE))
def test_get_score(self):
"""
If grader type is ML score should be updated from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
score_dict = self.combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_pe_grader(self):
"""
If grader type is PE score should not be updated from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE2))
score_dict = combinedoe.get_score()
self.assertNotEqual(score_dict['score'], 15.0)
def test_get_score_with_different_score_value_in_rubric(self):
"""
If grader type is ML score should be updated from rubric scores. Aggregate rubric scores = sum([5])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE3))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 25.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_old_task_states(self):
"""
If grader type is ML and old_task_states are present in instance inconsistent state score should be updated
from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE4))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_score_missing(self):
"""
If grader type is ML and score field is missing in instance inconsistent state score should be updated from
rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE5))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
"""
Test the student flow in the combined open ended xmodule
"""
problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
answer = "blah blah"
assessment = [0, 1]
hint = "blah"
def get_module_system(self, descriptor):
def construct_callback(dispatch="score_update"):
return dispatch
test_system = get_test_system()
test_system.open_ended_grading_interface = None
test_system.xqueue['interface'] = Mock(
send_to_queue=Mock(return_value=(0, "Queued"))
)
test_system.xqueue['construct_callback'] = construct_callback
return test_system
def setUp(self):
super(OpenEndedModuleXmlTest, self).setUp()
self.setup_modulestore(COURSE)
def _handle_ajax(self, dispatch, content):
# Load the module from persistence
module = self._module()
# Call handle_ajax on the module
result = module.handle_ajax(dispatch, content)
# Persist the state
module.save()
return result
def _module(self):
return self.get_module_from_location(self.problem_location)
def test_open_ended_load_and_save(self):
"""
See if we can load the module and save an answer
@return:
"""
# Try saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(task_one_json['child_history'][0]['answer'], self.answer)
def test_open_ended_flow_reset(self):
"""
Test the flow of the module if we complete the self assessment step and then reset
@return:
"""
assessment = [0, 1]
# Simulate a student saving an answer
self._handle_ajax("get_html", {})
self._handle_ajax("save_answer", {"student_answer": self.answer})
self._handle_ajax("get_html", {})
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
self._handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
self._handle_ajax("get_combined_rubric", {})
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 0)
html = self._module().render(STUDENT_VIEW).content
self.assertIsInstance(html, basestring)
rubric = self._handle_ajax("get_combined_rubric", {})
self.assertIsInstance(rubric, basestring)
self.assertEqual(self._module().state, "assessing")
self._handle_ajax("reset", {})
self.assertEqual(self._module().current_task_number, 0)
def test_open_ended_flow_with_xqueue_failure(self):
"""
Test a two step problem where the student first goes through the self assessment step, and then the
open ended step with the xqueue failing in the first step.
"""
assessment = [1, 1]
# Simulate a student saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
status = self._handle_ajax("get_status", {})
self.assertIsInstance(status, basestring)
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
mock_xqueue_interface = Mock(
send_to_queue=Mock(return_value=(1, "Not Queued"))
)
# Call handle_ajax on the module with xqueue down
module = self._module()
with patch.dict(module.xmodule_runtime.xqueue, {'interface': mock_xqueue_interface}):
module.handle_ajax("save_assessment", assessment_dict)
self.assertEqual(module.current_task_number, 1)
self.assertTrue((module.child_module.get_task_number(1).child_created))
module.save()
# Check that next time the OpenEndedModule is loaded it calls send_to_grader
with patch.object(OpenEndedModule, 'send_to_grader') as mock_send_to_grader:
mock_send_to_grader.return_value = (False, "Not Queued")
module = self._module().child_module.get_score()
self.assertTrue(mock_send_to_grader.called)
self.assertTrue((self._module().child_module.get_task_number(1).child_created))
# Loading it this time should send submission to xqueue correctly
self.assertFalse((self._module().child_module.get_task_number(1).child_created))
self.assertEqual(self._module().current_task_number, 1)
self.assertEqual(self._module().state, OpenEndedChild.ASSESSING)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 1)
self._module().render(STUDENT_VIEW)
# Try to get the rubric from the module
self._handle_ajax("get_combined_rubric", {})
self.assertEqual(self._module().state, OpenEndedChild.ASSESSING)
# Make a fake reply from the queue
queue_reply = {
'queuekey': "",
'xqueue_body': json.dumps({
'score': 0,
'feedback': json.dumps({
"spelling": "Spelling: Ok.",
"grammar": "Grammar: Ok.",
"markup-text": " all of us can think of a book that we hope none of our children or any other "
"children have taken off the shelf . but if i have the right to remove that book "
"from the shelf that work i abhor then you also have exactly the same right and "
"so does everyone else . and then we <bg>have no books left</bg> "
"on the shelf for any of us . <bs>katherine</bs> <bs>paterson</bs> , author "
"write a persuasive essay to a newspaper reflecting your vies on censorship "
"<bg>in libraries . do</bg> you believe that certain materials , such as books , "
"music , movies , magazines , <bg>etc . , should be</bg> removed from the shelves "
"if they are found <bg>offensive ? support your</bg> position with convincing "
"arguments from your own experience , observations <bg>, and or reading .</bg> "
}),
'grader_type': "ML",
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_xml': '''
<rubric>
<category>
<description>Writing Applications</description>
<score>0</score>
<option points='0'>
The essay loses focus, has little information or supporting details, and the
organization makes it difficult to follow.
</option>
<option points='1'>
The essay presents a mostly unified theme, includes sufficient information to convey
the theme, and is generally organized well.
</option>
</category>
<category>
<description> Language Conventions </description>
<score>0</score>
<option points='0'>
The essay demonstrates a reasonable command of proper spelling and grammar.
</option>
<option points='1'>
The essay demonstrates superior command of proper spelling and grammar.
</option>
</category>
</rubric>
''',
'rubric_scores_complete': True,
})
}
self._handle_ajax("check_for_score", {})
# Update the module with the fake queue reply
self._handle_ajax("score_update", queue_reply)
module = self._module()
self.assertFalse(module.ready_to_reset)
self.assertEqual(module.current_task_number, 1)
# Get html and other data client will request
module.render(STUDENT_VIEW)
self._handle_ajax("skip_post_assessment", {})
# Get all results
self._handle_ajax("get_combined_rubric", {})
# reset the problem
self._handle_ajax("reset", {})
self.assertEqual(self._module().state, "initial")
def test_open_ended_flow_correct(self):
"""
Test a two step problem where the student first goes through the self assessment step, and then the
open ended step.
@return:
"""
assessment = [1, 1]
# Simulate a student saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
status = self._handle_ajax("get_status", {})
self.assertIsInstance(status, basestring)
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
self._handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 1)
self._module().render(STUDENT_VIEW)
# Try to get the rubric from the module
self._handle_ajax("get_combined_rubric", {})
# Make a fake reply from the queue
queue_reply = {
'queuekey': "",
'xqueue_body': json.dumps({
'score': 0,
'feedback': json.dumps({
"spelling": "Spelling: Ok.", "grammar": "Grammar: Ok.",
"markup-text": " all of us can think of a book that we hope none of our children or any other "
"children have taken off the shelf . but if i have the right to remove that book "
"from the shelf that work i abhor then you also have exactly the same right and "
"so does everyone else . and then we <bg>have no books left</bg> on the shelf for "
"any of us . <bs>katherine</bs> <bs>paterson</bs> , author write a persuasive essay "
"to a newspaper reflecting your vies on censorship <bg>in libraries . do</bg> "
"you believe that certain materials , such as books , music , movies , magazines , "
"<bg>etc . , should be</bg> removed from the shelves if they are found "
"<bg>offensive ? support your</bg> position with convincing arguments from your "
"own experience , observations <bg>, and or reading .</bg> "
}),
'grader_type': "ML",
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_xml': '''
<rubric>
<category>
<description>Writing Applications</description>
<score>0</score>
<option points='0'>
The essay loses focus, has little information or supporting details, and
the organization makes it difficult to follow.
</option>
<option points='1'>
The essay presents a mostly unified theme, includes sufficient
information to convey the theme, and is generally organized well.
</option>
</category>
<category>
<description> Language Conventions </description>
<score>0</score>
<option points='0'>
The essay demonstrates a reasonable command of proper spelling and grammar.
</option>
<option points='1'>
The essay demonstrates superior command of proper spelling and grammar.
</option>
</category>
</rubric>
''',
'rubric_scores_complete': True,
})
}
self._handle_ajax("check_for_score", {})
# Update the module with the fake queue reply
self._handle_ajax("score_update", queue_reply)
module = self._module()
self.assertFalse(module.ready_to_reset)
self.assertEqual(module.current_task_number, 1)
# Get html and other data client will request
module.render(STUDENT_VIEW)
self._handle_ajax("skip_post_assessment", {})
# Get all results
self._handle_ajax("get_combined_rubric", {})
# reset the problem
self._handle_ajax("reset", {})
self.assertEqual(self._module().state, "initial")
class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore):
"""
Test if student is able to reset the problem
"""
problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion1Attempt")
answer = "blah blah"
assessment = [0, 1]
hint = "blah"
def get_module_system(self, descriptor):
test_system = get_test_system()
test_system.open_ended_grading_interface = None
test_system.xqueue['interface'] = Mock(
send_to_queue=Mock(return_value=(0, "Queued"))
)
return test_system
def setUp(self):
super(OpenEndedModuleXmlAttemptTest, self).setUp()
self.setup_modulestore(COURSE)
def _handle_ajax(self, dispatch, content):
# Load the module from persistence
module = self._module()
# Call handle_ajax on the module
result = module.handle_ajax(dispatch, content)
# Persist the state
module.save()
return result
def _module(self):
return self.get_module_from_location(self.problem_location)
def test_reset_fail(self):
"""
Test the flow of the module if we complete the self assessment step and then reset
Since the problem only allows one attempt, should fail.
@return:
"""
assessment = [0, 1]
# Simulate a student saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
self._handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 0)
html = self._module().render(STUDENT_VIEW).content
self.assertIsInstance(html, basestring)
# Module should now be done
rubric = self._handle_ajax("get_combined_rubric", {})
self.assertIsInstance(rubric, basestring)
self.assertEqual(self._module().state, "done")
# Try to reset, should fail because only 1 attempt is allowed
reset_data = json.loads(self._handle_ajax("reset", {}))
self.assertEqual(reset_data['success'], False)
class OpenEndedModuleXmlImageUploadTest(unittest.TestCase, DummyModulestore):
"""
Test if student is able to upload images properly.
"""
problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestionImageUpload")
answer_text = "Hello, this is my amazing answer."
file_text = "Hello, this is my amazing file."
file_name = "Student file 1"
answer_link = "http://www.edx.org"
autolink_tag = '<a target="_blank" href='
autolink_tag_swapped = '<a href='
def get_module_system(self, descriptor):
test_system = get_test_system()
test_system.open_ended_grading_interface = None
test_system.s3_interface = test_util_open_ended.S3_INTERFACE
test_system.xqueue['interface'] = Mock(
send_to_queue=Mock(return_value=(0, "Queued"))
)
return test_system
def setUp(self):
super(OpenEndedModuleXmlImageUploadTest, self).setUp()
self.setup_modulestore(COURSE)
def test_file_upload_fail(self):
"""
Test to see if a student submission without a file attached fails.
"""
module = self.get_module_from_location(self.problem_location)
# Simulate a student saving an answer
response = module.handle_ajax("save_answer", {"student_answer": self.answer_text})
response = json.loads(response)
self.assertFalse(response['success'])
self.assertIn('error', response)
@patch(
'xmodule.open_ended_grading_classes.openendedchild.S3Connection',
test_util_open_ended.MockS3Connection
)
@patch(
'xmodule.open_ended_grading_classes.openendedchild.Key',
test_util_open_ended.MockS3Key
)
def test_file_upload_success(self):
"""
Test to see if a student submission with a file is handled properly.
"""
module = self.get_module_from_location(self.problem_location)
# Simulate a student saving an answer with a file
response = module.handle_ajax("save_answer", {
"student_answer": self.answer_text,
"valid_files_attached": True,
"student_file": [MockUploadedFile(self.file_name, self.file_text)],
})
response = json.loads(response)
self.assertTrue(response['success'])
self.assertIn(self.file_name, response['student_response'])
self.assertTrue(self.autolink_tag in response['student_response'] or
self.autolink_tag_swapped in response['student_response'])
def test_link_submission_success(self):
"""
Students can submit links instead of files. Check that the link is properly handled.
"""
module = self.get_module_from_location(self.problem_location)
# Simulate a student saving an answer with a link.
response = module.handle_ajax("save_answer", {
"student_answer": "{0} {1}".format(self.answer_text, self.answer_link)
})
response = json.loads(response)
self.assertTrue(response['success'])
self.assertIn(self.answer_link, response['student_response'])
self.assertTrue(self.autolink_tag in response['student_response'] or
self.autolink_tag_swapped in response['student_response'])
class OpenEndedModuleUtilTest(unittest.TestCase):
"""
Tests for the util functions of OpenEndedModule. Currently just for the html_sanitizer and <br/> inserter
"""
script_dirty = u'<script>alert("xss!")</script>'
script_clean = u'alert("xss!")'
img_dirty = u'<img alt="cats" height="200" onclick="eval()" src="http://example.com/lolcats.jpg" width="200">'
img_clean = u'<img width="200" alt="cats" height="200" src="http://example.com/lolcats.jpg">'
embed_dirty = u'<embed height="200" id="cats" onhover="eval()" src="http://example.com/lolcats.swf" width="200"/>'
embed_clean = u'<embed width="200" height="200" id="cats" src="http://example.com/lolcats.swf">'
iframe_dirty = u'<iframe class="cats" height="200" onerror="eval()" src="http://example.com/lolcats" width="200"/>'
iframe_clean = ur'<iframe (height="200" ?|class="cats" ?|width="200" ?|src="http://example.com/lolcats" ?)+></iframe>'
text = u'I am a \u201c\xfcber student\u201d'
text_lessthan_noencd = u'This used to be broken < by the other parser. 3>5'
text_lessthan_encode = u'This used to be broken &lt; by the other parser. 3&gt;5'
text_linebreaks = u"St\xfcdent submission:\nI like lamp."
text_brs = u"St\xfcdent submission:<br/>I like lamp."
link_text = u'I love going to www.lolcatz.com'
link_atag = u'I love going to <a target="_blank" href="http://www.lolcatz.com">www.lolcatz.com</a>'
def assertHtmlEqual(self, actual, expected):
"""
Assert that two strings represent the same html.
"""
return self._assertHtmlEqual(
fragment_fromstring(actual, create_parent='div'),
fragment_fromstring(expected, create_parent='div')
)
def _assertHtmlEqual(self, actual, expected):
"""
Assert that two HTML ElementTree elements are equal.
"""
self.assertEqual(actual.tag, expected.tag)
self.assertEqual(actual.attrib, expected.attrib)
self.assertEqual(actual.text, expected.text)
self.assertEqual(actual.tail, expected.tail)
self.assertEqual(len(actual), len(expected))
for actual_child, expected_child in zip(actual, expected):
self._assertHtmlEqual(actual_child, expected_child)
def test_script(self):
"""
Basic test for stripping <script>
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.script_dirty), self.script_clean)
def test_img(self):
"""
Basic test for passing through img, but stripping bad attr
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.img_dirty), self.img_clean)
def test_embed(self):
"""
Basic test for passing through embed, but stripping bad attr
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.embed_dirty), self.embed_clean)
def test_iframe(self):
"""
Basic test for passing through iframe, but stripping bad attr
"""
self.assertRegexpMatches(OpenEndedChild.sanitize_html(self.iframe_dirty), self.iframe_clean)
def test_text(self):
"""
Test for passing through text unchanged, including unicode
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text), self.text)
def test_lessthan(self):
"""
Tests that `<` in text context is handled properly
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text_lessthan_noencd), self.text_lessthan_encode)
def test_linebreaks(self):
"""
tests the replace_newlines function
"""
self.assertHtmlEqual(OpenEndedChild.replace_newlines(self.text_linebreaks), self.text_brs)
def test_linkify(self):
"""
tests the replace_newlines function
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.link_text), self.link_atag)
def test_combined(self):
"""
tests a combination of inputs
"""
test_input = u"{}\n{}\n{}\n\n{}{}\n{}".format(self.link_text,
self.text,
self.script_dirty,
self.embed_dirty,
self.text_lessthan_noencd,
self.img_dirty)
test_output = u"{}<br/>{}<br/>{}<br/><br/>{}{}<br/>{}".format(self.link_atag,
self.text,
self.script_clean,
self.embed_clean,
self.text_lessthan_encode,
self.img_clean)
self.assertHtmlEqual(OpenEndedChild.sanitize_html(test_input), test_output)
"""
Test cases covering behaviors and workflows of the Peer Grading XBlock
"""
import unittest
import json
import logging
from mock import Mock, patch
from webob.multidict import MultiDict
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from opaque_keys.edx.locations import Location, SlashSeparatedCourseKey
from xmodule.tests import get_test_system, get_test_descriptor_system
from xmodule.tests.test_util_open_ended import DummyModulestore
from xmodule.open_ended_grading_classes.peer_grading_service import MockPeerGradingService
from xmodule.peer_grading_module import PeerGradingModule, PeerGradingDescriptor, MAX_ALLOWED_FEEDBACK_LENGTH
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.validation import StudioValidationMessage
log = logging.getLogger(__name__)
class PeerGradingModuleTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an
external grading service.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingSample")
coe_location = course_id.make_usage_key("combinedopenended", "SampleQuestion")
calibrated_dict = {'location': "blah"}
coe_dict = {'location': coe_location.to_deprecated_string()}
save_dict = MultiDict({
'location': "blah",
'submission_id': 1,
'submission_key': "",
'score': 1,
'feedback': "",
'submission_flagged': False,
'answer_unknown': False,
})
save_dict.extend(('rubric_scores[]', val) for val in (0, 1))
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system
@return:
"""
super(PeerGradingModuleTest, self).setUp()
self.setup_modulestore(self.course_id.course)
self.peer_grading = self.get_module_from_location(self.problem_location)
self.coe = self.get_module_from_location(self.coe_location)
def test_module_closed(self):
"""
Test if peer grading is closed
@return:
"""
closed = self.peer_grading.closed()
self.assertFalse(closed)
def test_get_html(self):
"""
Test to see if the module can be rendered
@return:
"""
_html = self.peer_grading.get_html()
def test_get_data(self):
"""
Try getting data from the external grading service
@return:
"""
success, _data = self.peer_grading.query_data_for_location(self.problem_location)
self.assertTrue(success)
def test_get_score_none(self):
"""
Test getting the score.
"""
score = self.peer_grading.get_score()
# Score should be None.
self.assertIsNone(score['score'])
def test_get_max_score(self):
"""
Test getting the max score
@return:
"""
max_score = self.peer_grading.max_score()
self.assertEquals(max_score, None)
def get_next_submission(self):
"""
Test to see if we can get the next mock submission
@return:
"""
success, _next_submission = self.peer_grading.get_next_submission({'location': 'blah'})
self.assertEqual(success, True)
def test_save_grade(self):
"""
Test if we can save the grade
@return:
"""
response = self.peer_grading.save_grade(self.save_dict)
self.assertEqual(response['success'], True)
def test_is_student_calibrated(self):
"""
Check to see if the student has calibrated yet
@return:
"""
response = self.peer_grading.is_student_calibrated(self.calibrated_dict)
self.assertTrue(response['success'])
def test_show_calibration_essay(self):
"""
Test showing the calibration essay
@return:
"""
response = self.peer_grading.show_calibration_essay(self.calibrated_dict)
self.assertTrue(response['success'])
def test_save_calibration_essay(self):
"""
Test saving the calibration essay
@return:
"""
response = self.peer_grading.save_calibration_essay(self.save_dict)
self.assertTrue(response['success'])
def test_peer_grading_problem(self):
"""
See if we can render a single problem
@return:
"""
response = self.peer_grading.peer_grading_problem(self.coe_dict)
self.assertTrue(response['success'])
def test___find_corresponding_module_for_location_exceptions(self):
"""
Unit test for the exception cases of __find_corresponding_module_for_location
Mainly for diff coverage
@return:
"""
# pylint: disable=protected-access
with self.assertRaises(ItemNotFoundError):
self.peer_grading._find_corresponding_module_for_location(
Location('org', 'course', 'run', 'category', 'name', 'revision')
)
def test_get_instance_state(self):
"""
Get the instance state dict
@return:
"""
self.peer_grading.get_instance_state()
def test_save_grade_with_long_feedback(self):
"""
Test if feedback is too long save_grade() should return error message.
"""
feedback_fragment = "This is very long feedback."
self.save_dict["feedback"] = feedback_fragment * (
(MAX_ALLOWED_FEEDBACK_LENGTH / len(feedback_fragment) + 1)
)
response = self.peer_grading.save_grade(self.save_dict)
# Should not succeed.
self.assertEqual(response['success'], False)
self.assertEqual(
response['error'],
"Feedback is too long, Max length is {0} characters.".format(
MAX_ALLOWED_FEEDBACK_LENGTH
)
)
def test_get_score_success_fails(self):
"""
Test if query_data_for_location not succeed, their score is None.
"""
score_dict = self.get_score(False, 0, 0)
# Score dict should be None.
self.assertIsNone(score_dict)
def test_get_score(self):
"""
Test if the student has graded equal to required submissions,
their score is 1.0.
"""
score_dict = self.get_score(True, 3, 3)
# Score should be 1.0.
self.assertEqual(score_dict["score"], 1.0)
# Testing score after data is stored in student_data_for_location in xmodule.
_score_dict = self.peer_grading.get_score()
# Score should be 1.0.
self.assertEqual(_score_dict["score"], 1.0)
def test_get_score_zero(self):
"""
Test if the student has graded not equal to required submissions,
their score is 0.0.
"""
score_dict = self.get_score(True, 2, 3)
# Score should be 0.0.
self.assertEqual(score_dict["score"], 0.0)
def get_score(self, success, count_graded, count_required):
"""
Returns the peer-graded score based on the provided graded/required values
"""
self.peer_grading.use_for_single_location_local = True
self.peer_grading.graded = True
# Patch for external grading service.
module_name = 'xmodule.peer_grading_module.PeerGradingModule.query_data_for_location'
with patch(module_name) as mock_query_data_for_location:
mock_query_data_for_location.return_value = (
success,
{"count_graded": count_graded, "count_required": count_required}
)
# Returning score dict.
return self.peer_grading.get_score()
def test_deprecation_message(self):
"""
Test the validation message produced for deprecation.
"""
peer_grading_module = self.peer_grading
validation = peer_grading_module.validate()
self.assertEqual(len(validation.messages), 0)
self.assertEqual(
validation.summary.text,
"ORA1 is no longer supported. To use this assessment, replace this ORA1 component with an ORA2 component."
)
self.assertEqual(validation.summary.type, StudioValidationMessage.ERROR)
class MockPeerGradingServiceProblemList(MockPeerGradingService):
"""
Mock object representing a set of peer-grading problems
"""
def get_problem_list(self, course_id, grader_id):
return {'success': True,
'problem_list': [
{
"num_graded": 3,
"num_pending": 681,
"num_required": 3,
"location": course_id.make_usage_key('combinedopenended', 'SampleQuestion'),
"problem_name": "Peer-Graded Essay"
},
]}
class PeerGradingModuleScoredTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an
external grading service.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingScored")
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system
@return:
"""
super(PeerGradingModuleScoredTest, self).setUp()
self.setup_modulestore(self.course_id.course)
def test_metadata_load(self):
peer_grading = self.get_module_from_location(self.problem_location)
self.assertFalse(peer_grading.closed())
def test_problem_list(self):
"""
Test to see if a peer grading problem list can be correctly initialized.
"""
# Initialize peer grading module.
peer_grading = self.get_module_from_location(self.problem_location)
# Ensure that it cannot find any peer grading.
html = peer_grading.peer_grading()
self.assertNotIn("Peer-Graded", html)
# Swap for our mock class, which will find peer grading.
peer_grading.peer_gs = MockPeerGradingServiceProblemList()
html = peer_grading.peer_grading()
self.assertIn("Peer-Graded", html)
class PeerGradingModuleLinkedTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading that is linked to an open ended module.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingLinked")
coe_location = course_id.make_usage_key("combinedopenended", "SampleQuestion")
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system.
"""
super(PeerGradingModuleLinkedTest, self).setUp()
self.setup_modulestore(self.course_id.course)
@property
def field_data(self):
"""
Setup the proper field data for a peer grading module.
"""
return DictFieldData({
'data': '<peergrading/>',
'location': self.problem_location,
'use_for_single_location': True,
'link_to_location': self.coe_location.to_deprecated_string(),
'graded': True,
})
@property
def scope_ids(self):
"""
Return the proper scope ids for the peer grading module.
"""
return ScopeIds(None, None, self.problem_location, self.problem_location)
def _create_peer_grading_descriptor_with_linked_problem(self):
"""
Internal helper method to construct a peer grading XBlock
"""
# Initialize the peer grading module.
system = get_test_descriptor_system()
return system.construct_xblock_from_class(
PeerGradingDescriptor,
field_data=self.field_data,
scope_ids=self.scope_ids
)
def _create_peer_grading_with_linked_problem(self, location, valid_linked_descriptor=True):
"""
Create a peer grading problem with a linked location.
"""
# Mock the linked problem descriptor.
linked_descriptor = Mock()
linked_descriptor.location = location
# Mock the peer grading descriptor.
pg_descriptor = Mock()
pg_descriptor.location = self.problem_location
if valid_linked_descriptor:
pg_descriptor.get_required_module_descriptors = lambda: [linked_descriptor, ]
else:
pg_descriptor.get_required_module_descriptors = lambda: []
test_system = self.get_module_system(pg_descriptor)
# Initialize the peer grading module.
peer_grading = PeerGradingModule(
pg_descriptor,
test_system,
self.field_data,
self.scope_ids,
)
return peer_grading
def _get_descriptor_with_invalid_link(self, exception_to_raise):
"""
Ensure that a peer grading descriptor with an invalid link will return an empty list.
"""
# Create a descriptor, and make loading an item throw an error.
descriptor = self._create_peer_grading_descriptor_with_linked_problem()
descriptor.system.load_item = Mock(side_effect=exception_to_raise)
# Ensure that modules is a list of length 0.
modules = descriptor.get_required_module_descriptors()
self.assertIsInstance(modules, list)
self.assertEqual(len(modules), 0)
def test_descriptor_with_nopath(self):
"""
Test to see if a descriptor with a NoPathToItem error when trying to get
its linked module behaves properly.
"""
self._get_descriptor_with_invalid_link(NoPathToItem)
def test_descriptor_with_item_not_found(self):
"""
Test to see if a descriptor with an ItemNotFound error when trying to get
its linked module behaves properly.
"""
self._get_descriptor_with_invalid_link(ItemNotFoundError)
def test_invalid_link(self):
"""
Ensure that a peer grading problem with no linked locations stays in panel mode.
"""
# Setup the peer grading module with no linked locations.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location, valid_linked_descriptor=False)
self.assertFalse(peer_grading.use_for_single_location_local)
self.assertTrue(peer_grading.use_for_single_location)
def test_linked_problem(self):
"""
Ensure that a peer grading problem with a linked location loads properly.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
# Ensure that it is properly setup.
self.assertTrue(peer_grading.use_for_single_location)
def test_linked_ajax(self):
"""
Ensure that a peer grading problem with a linked location responds to ajax calls.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
# If we specify a location, it will render the problem for that location.
data = peer_grading.handle_ajax('problem', {'location': self.coe_location.to_deprecated_string()})
self.assertTrue(json.loads(data)['success'])
# If we don't specify a location, it should use the linked location.
data = peer_grading.handle_ajax('problem', {})
self.assertTrue(json.loads(data)['success'])
def test_linked_score(self):
"""
Ensure that a peer grading problem with a linked location is properly scored.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
score_dict = peer_grading.get_score()
self.assertEqual(score_dict['score'], 1)
self.assertEqual(score_dict['total'], 1)
def test_get_next_submission(self):
"""
Ensure that a peer grading problem with a linked location can get a submission to score.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
data = peer_grading.handle_ajax('get_next_submission', {'location': self.coe_location})
self.assertEqual(json.loads(data)['submission_id'], 1)
"""
Test cases covering workflows and behaviors of the Self Assessment feature
"""
from datetime import datetime
import json
import unittest
from mock import Mock, MagicMock
from webob.multidict import MultiDict
from pytz import UTC
from xblock.fields import ScopeIds
from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule
from opaque_keys.edx.locations import Location
from lxml import etree
from . import get_test_system
import test_util_open_ended
class SelfAssessmentTest(unittest.TestCase):
"""
Test cases covering workflows and behaviors of the Self Assessment feature
"""
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
</category>
</rubric></rubric>'''
prompt = etree.XML("<prompt>This is sample prompt text.</prompt>")
definition = {
'rubric': rubric,
'prompt': prompt,
'submitmessage': 'Shall we submit now?',
'hintprompt': 'Consider this...',
}
location = Location("edX", "sa_test", "run", "selfassessment", "SampleQuestion", None)
descriptor = Mock()
def setUp(self):
super(SelfAssessmentTest, self).setUp()
self.static_data = {
'max_attempts': 10,
'rubric': etree.XML(self.rubric),
'prompt': self.prompt,
'max_score': 1,
'display_name': "Name",
'accept_file_upload': False,
'close_date': None,
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'control': {
'required_peer_grading': 1,
'peer_grader_count': 1,
'min_to_calibrate': 3,
'max_to_calibrate': 6,
'peer_grade_finished_submissions_when_none_pending': False,
}
}
system = get_test_system()
usage_key = system.course_id.make_usage_key('combinedopenended', 'test_loc')
scope_ids = ScopeIds(1, 'combinedopenended', usage_key, usage_key)
system.xmodule_instance = Mock(scope_ids=scope_ids)
self.module = SelfAssessmentModule(
system,
self.location,
self.definition,
self.descriptor,
self.static_data
)
def test_get_html(self):
html = self.module.get_html(self.module.system)
self.assertIn("This is sample prompt text", html)
def test_self_assessment_flow(self):
responses = {'assessment': '0', 'score_list[]': ['0', '0']}
def get_fake_item(name):
"""
Returns the specified key from the parent workflow container
"""
return responses[name]
def get_data_for_location(self, location, student):
"""
Returns a dictionary of keys having zero values
"""
return {
'count_graded': 0,
'count_required': 0,
'student_sub_count': 0,
}
mock_query_dict = MagicMock()
mock_query_dict.__getitem__.side_effect = get_fake_item
mock_query_dict.getall = get_fake_item
self.module.peer_gs.get_data_for_location = get_data_for_location
self.assertEqual(self.module.get_score()['score'], 0)
self.module.save_answer({'student_answer': "I am an answer"},
self.module.system)
self.assertEqual(self.module.child_state, self.module.ASSESSING)
self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.child_state, self.module.DONE)
d = self.module.reset({})
self.assertTrue(d['success'])
self.assertEqual(self.module.child_state, self.module.INITIAL)
# if we now assess as right, skip the REQUEST_HINT state
self.module.save_answer({'student_answer': 'answer 4'},
self.module.system)
responses['assessment'] = '1'
self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.child_state, self.module.DONE)
def test_self_assessment_display(self):
"""
Test storing an answer with the self assessment module.
"""
# Create a module with no state yet. Important that this start off as a blank slate.
test_module = SelfAssessmentModule(
get_test_system(),
self.location,
self.definition,
self.descriptor,
self.static_data
)
saved_response = "Saved response."
submitted_response = "Submitted response."
# Initially, there will be no stored answer.
self.assertEqual(test_module.stored_answer, None)
# And the initial answer to display will be an empty string.
self.assertEqual(test_module.get_display_answer(), "")
# Now, store an answer in the module.
test_module.handle_ajax("store_answer", {'student_answer': saved_response}, get_test_system())
# The stored answer should now equal our response.
self.assertEqual(test_module.stored_answer, saved_response)
self.assertEqual(test_module.get_display_answer(), saved_response)
# Submit a student response to the question.
test_module.handle_ajax("save_answer", {"student_answer": submitted_response}, get_test_system())
# Submitting an answer should clear the stored answer.
self.assertEqual(test_module.stored_answer, None)
# Confirm that the answer is stored properly.
self.assertEqual(test_module.latest_answer(), submitted_response)
# Mock saving an assessment.
assessment_dict = MultiDict({'assessment': 0, 'score_list[]': 0})
data = test_module.handle_ajax("save_assessment", assessment_dict, get_test_system())
self.assertTrue(json.loads(data)['success'])
# Reset the module so the student can try again.
test_module.reset(get_test_system())
# Confirm that the right response is loaded.
self.assertEqual(test_module.get_display_answer(), submitted_response)
def test_save_assessment_after_closing(self):
"""
Test storing assessment when close date is passed.
"""
responses = {'assessment': '0', 'score_list[]': ['0', '0']}
self.module.save_answer({'student_answer': "I am an answer"}, self.module.system)
self.assertEqual(self.module.child_state, self.module.ASSESSING)
#Set close date to current datetime.
self.module.close_date = datetime.now(UTC)
#Save assessment when close date is passed.
self.module.save_assessment(responses, self.module.system)
self.assertNotEqual(self.module.child_state, self.module.DONE)
"""
Utility classes and data structures supporting the Open-Ended Grading feature of Open edX
"""
import json
from StringIO import StringIO
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.tests import DATA_DIR
OPEN_ENDED_GRADING_INTERFACE = {
'url': 'blah/',
'username': 'incorrect',
'password': 'incorrect',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller'
}
S3_INTERFACE = {
'access_key': "",
'secret_access_key': "",
"storage_bucket_name": "",
}
class MockS3Key(object):
"""
Mock an S3 Key object from boto. Used for file upload testing.
"""
def __init__(self, bucket):
pass
def set_metadata(self, key, value):
"""
Appends an attribute the current instance using the provided key/value pair
"""
setattr(self, key, value)
def set_contents_from_file(self, fileobject):
"""
Sets the 'data' parameter to the contents of the provided file object
"""
self.data = fileobject.read()
def set_acl(self, acl):
"""
Sets the 'acl' metadata parameter to the provided value
"""
self.set_metadata("acl", acl)
def generate_url(self, timeout):
"""
Returns a sample URL for use in tests
"""
return "http://www.edx.org/sample_url"
class MockS3Connection(object):
"""
Mock boto S3Connection for testing image uploads.
"""
def __init__(self, access_key, secret_key, **kwargs):
"""
Mock the init call. S3Connection has a lot of arguments, but we don't need them.
"""
pass
def create_bucket(self, bucket_name, **kwargs):
"""
Mock boto operation: create_bucket -- returns a simple string value
"""
return "edX Bucket"
def lookup(self, bucket_name):
"""
Mock boto operation: lookup -- returns None
"""
return None
class MockUploadedFile(object):
"""
Create a mock uploaded file for image submission tests.
value - String data to place into the mock file.
return - A StringIO object that behaves like a file.
"""
def __init__(self, name, value):
self.mock_file = StringIO()
self.mock_file.write(value)
self.name = name
def seek(self, index):
"""
Returns the file contents at the provided index
"""
return self.mock_file.seek(index)
def read(self):
"""
Returns the contents of the mock file
"""
return self.mock_file.read()
class DummyModulestore(object):
"""
A mixin that allows test classes to have convenience functions to get a module given a location
"""
def get_module_system(self, descriptor):
"""
Pseudo-abstract method that forces derivatives to devise a module system implementation
"""
raise NotImplementedError("Sub-tests must specify how to generate a module-system")
def setup_modulestore(self, name):
"""
Sets the modulestore to an XMLModuleStore instance
"""
# pylint: disable=attribute-defined-outside-init
self.modulestore = XMLModuleStore(DATA_DIR, source_dirs=[name])
def get_course(self, _):
"""Get a test course by directory name. If there's more than one, error."""
courses = self.modulestore.get_courses()
return courses[0]
def get_module_from_location(self, usage_key):
"""
Returns the content descriptor for the given usage key
"""
descriptor = self.modulestore.get_item(usage_key, depth=None)
descriptor.xmodule_runtime = self.get_module_system(descriptor)
return descriptor
def serialize_child_history(task_state):
"""
To json serialize feedback and post_assessment in child_history of task state.
"""
child_history = task_state.get("child_history", [])
for i, attempt in enumerate(child_history):
if "post_assessment" in attempt:
if "feedback" in attempt["post_assessment"]:
attempt["post_assessment"]["feedback"] = json.dumps(attempt["post_assessment"].get("feedback"))
task_state["child_history"][i]["post_assessment"] = json.dumps(attempt["post_assessment"])
def serialize_open_ended_instance_state(json_str):
"""
To json serialize task_states and old_task_states in instance state.
"""
json_data = json.loads(json_str)
task_states = json_data.get('task_states', [])
for i, task_state in enumerate(task_states):
serialize_child_history(task_state)
json_data['task_states'][i] = json.dumps(task_state)
old_task_states = json_data.get('old_task_states', [])
for i, old_task in enumerate(old_task_states):
for j, task_state in enumerate(old_task):
old_task[j] = json.dumps(task_state)
json_data['old_task_states'][i] = old_task
return json.dumps(json_data)
# Task state for a module with self assessment then instructor assessment.
TEST_STATE_SA_IN = ["{\"child_created\": false, \"child_attempts\": 2, \"version\": 1, \"child_history\": [{\"answer\": \"However venture pursuit he am mr cordial. Forming musical am hearing studied be luckily. Ourselves for determine attending how led gentleman sincerity. Valley afford uneasy joy she thrown though bed set. In me forming general prudent on country carried. Behaved an or suppose justice. Seemed whence how son rather easily and change missed. Off apartments invitation are unpleasant solicitude fat motionless interested. Hardly suffer wisdom wishes valley as an. As friendship advantages resolution it alteration stimulated he or increasing. \\r<br><br>Now led tedious shy lasting females off. Dashwood marianne in of entrance be on wondered possible building. Wondered sociable he carriage in speedily margaret. Up devonshire of he thoroughly insensible alteration. An mr settling occasion insisted distance ladyship so. Not attention say frankness intention out dashwoods now curiosity. Stronger ecstatic as no judgment daughter speedily thoughts. Worse downs nor might she court did nay forth these. \", \"post_assessment\": \"[3, 3, 2, 2, 2]\", \"score\": 12}, {\"answer\": \"Delightful remarkably mr on announcing themselves entreaties favourable. About to in so terms voice at. Equal an would is found seems of. The particular friendship one sufficient terminated frequently themselves. It more shed went up is roof if loud case. Delay music in lived noise an. Beyond genius really enough passed is up. \\r<br><br>John draw real poor on call my from. May she mrs furnished discourse extremely. Ask doubt noisy shade guest did built her him. Ignorant repeated hastened it do. Consider bachelor he yourself expenses no. Her itself active giving for expect vulgar months. Discovery commanded fat mrs remaining son she principle middleton neglected. Be miss he in post sons held. No tried is defer do money scale rooms. \", \"post_assessment\": \"[3, 3, 2, 2, 2]\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"done\"}", "{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"However venture pursuit he am mr cordial. Forming musical am hearing studied be luckily. Ourselves for determine attending how led gentleman sincerity. Valley afford uneasy joy she thrown though bed set. In me forming general prudent on country carried. Behaved an or suppose justice. Seemed whence how son rather easily and change missed. Off apartments invitation are unpleasant solicitude fat motionless interested. Hardly suffer wisdom wishes valley as an. As friendship advantages resolution it alteration stimulated he or increasing. \\r<br><br>Now led tedious shy lasting females off. Dashwood marianne in of entrance be on wondered possible building. Wondered sociable he carriage in speedily margaret. Up devonshire of he thoroughly insensible alteration. An mr settling occasion insisted distance ladyship so. Not attention say frankness intention out dashwoods now curiosity. Stronger ecstatic as no judgment daughter speedily thoughts. Worse downs nor might she court did nay forth these. \", \"post_assessment\": \"{\\\"submission_id\\\": 1460, \\\"score\\\": 12, \\\"feedback\\\": \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 5413, \\\"grader_type\\\": \\\"IN\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>\\\\nIdeas\\\\n</description><score>3</score><option points='0'>\\\\nDifficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.\\\\n</option><option points='1'>\\\\nAttempts a main idea. Sometimes loses focus or ineffectively displays focus.\\\\n</option><option points='2'>\\\\nPresents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.\\\\n</option><option points='3'>\\\\nPresents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.\\\\n</option></category><category><description>\\\\nContent\\\\n</description><score>3</score><option points='0'>\\\\nIncludes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.\\\\n</option><option points='1'>\\\\nIncludes little information and few or no details. Explores only one or two facets of the topic.\\\\n</option><option points='2'>\\\\nIncludes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.\\\\n</option><option points='3'>\\\\nIncludes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.\\\\n</option></category><category><description>\\\\nOrganization\\\\n</description><score>2</score><option points='0'>\\\\nIdeas organized illogically, transitions weak, and response difficult to follow.\\\\n</option><option points='1'>\\\\nAttempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.\\\\n</option><option points='2'>\\\\nIdeas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.\\\\n</option></category><category><description>\\\\nStyle\\\\n</description><score>2</score><option points='0'>\\\\nContains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.\\\\n</option><option points='1'>\\\\nContains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).\\\\n</option><option points='2'>\\\\nIncludes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.\\\\n</option></category><category><description>\\\\nVoice\\\\n</description><score>2</score><option points='0'>\\\\nDemonstrates language and tone that may be inappropriate to task and reader.\\\\n</option><option points='1'>\\\\nDemonstrates an attempt to adjust language and tone to task and reader.\\\\n</option><option points='2'>\\\\nDemonstrates effective adjustment of language and tone to task and reader.\\\\n</option></category></rubric>\\\"}\", \"score\": 12}, {\"answer\": \"Delightful remarkably mr on announcing themselves entreaties favourable. About to in so terms voice at. Equal an would is found seems of. The particular friendship one sufficient terminated frequently themselves. It more shed went up is roof if loud case. Delay music in lived noise an. Beyond genius really enough passed is up. \\r<br><br>John draw real poor on call my from. May she mrs furnished discourse extremely. Ask doubt noisy shade guest did built her him. Ignorant repeated hastened it do. Consider bachelor he yourself expenses no. Her itself active giving for expect vulgar months. Discovery commanded fat mrs remaining son she principle middleton neglected. Be miss he in post sons held. No tried is defer do money scale rooms. \", \"post_assessment\": \"{\\\"submission_id\\\": 1462, \\\"score\\\": 12, \\\"feedback\\\": \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 5418, \\\"grader_type\\\": \\\"IN\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>\\\\nIdeas\\\\n</description><score>3</score><option points='0'>\\\\nDifficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.\\\\n</option><option points='1'>\\\\nAttempts a main idea. Sometimes loses focus or ineffectively displays focus.\\\\n</option><option points='2'>\\\\nPresents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.\\\\n</option><option points='3'>\\\\nPresents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.\\\\n</option></category><category><description>\\\\nContent\\\\n</description><score>3</score><option points='0'>\\\\nIncludes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.\\\\n</option><option points='1'>\\\\nIncludes little information and few or no details. Explores only one or two facets of the topic.\\\\n</option><option points='2'>\\\\nIncludes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.\\\\n</option><option points='3'>\\\\nIncludes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.\\\\n</option></category><category><description>\\\\nOrganization\\\\n</description><score>2</score><option points='0'>\\\\nIdeas organized illogically, transitions weak, and response difficult to follow.\\\\n</option><option points='1'>\\\\nAttempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.\\\\n</option><option points='2'>\\\\nIdeas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.\\\\n</option></category><category><description>\\\\nStyle\\\\n</description><score>2</score><option points='0'>\\\\nContains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.\\\\n</option><option points='1'>\\\\nContains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).\\\\n</option><option points='2'>\\\\nIncludes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.\\\\n</option></category><category><description>\\\\nVoice\\\\n</description><score>2</score><option points='0'>\\\\nDemonstrates language and tone that may be inappropriate to task and reader.\\\\n</option><option points='1'>\\\\nDemonstrates an attempt to adjust language and tone to task and reader.\\\\n</option><option points='2'>\\\\nDemonstrates effective adjustment of language and tone to task and reader.\\\\n</option></category></rubric>\\\"}\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"post_assessment\"}"]
# Mock instance state. Should receive a score of 15.
MOCK_INSTANCE_STATE = r"""{"ready_to_reset": false, "skip_spelling_checks": true, "current_task_number": 1, "weight": 5.0, "graceperiod": "1 day 12 hours 59 minutes 59 seconds", "graded": "True", "task_states": ["{\"child_created\": false, \"child_attempts\": 4, \"version\": 1, \"child_history\": [{\"answer\": \"After 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"To replicate the experiment, the procedure would require more detail. One piece of information that is omitted is the amount of vinegar used in the experiment. It is also important to know what temperature the experiment was kept at during the 24 hours. Finally, the procedure needs to include details about the experiment, for example if the whole sample must be submerged.\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"e the mass of four different samples.\\r\\nPour vinegar in each of four separate, but identical, containers.\\r\\nPlace a sample of one material into one container and label. Repeat with remaining samples, placing a single sample into a single container.\\r\\nAfter 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"\", \"post_assessment\": \"[3]\", \"score\": 3}], \"max_score\": 3, \"child_state\": \"done\"}", "{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"The students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the group\\u2019s procedure, describe what additional information you would need in order to replicate the expe\", \"post_assessment\": \"{\\\"submission_id\\\": 3097, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: More grammar errors than average.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"the students data are recorded in the <bg>table below . starting mass</bg> g ending mass g difference in mass g marble . . . limestone . . . wood . . . plastic . . . after reading the groups <bg>procedure , describe what additional</bg> information you would need in order to replicate the <bs>expe</bs>\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3233, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>0</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 0}, {\"answer\": \"After 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the\", \"post_assessment\": \"{\\\"submission_id\\\": 3098, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"after hours , remove the samples from the containers and rinse each sample with distilled water . allow the samples to sit and dry for minutes . determine the mass of each sample . the students data are recorded in the <bg>table below . starting mass</bg> g ending mass g difference in mass g marble . . . limestone . . . wood . . . plastic . . . after reading the\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3235, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>0</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 0}, {\"answer\": \"To replicate the experiment, the procedure would require more detail. One piece of information that is omitted is the amount of vinegar used in the experiment. It is also important to know what temperature the experiment was kept at during the 24 hours. Finally, the procedure needs to include details about the experiment, for example if the whole sample must be submerged.\", \"post_assessment\": \"{\\\"submission_id\\\": 3099, \\\"score\\\": 3, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"to replicate the experiment , the procedure would require <bg>more detail . one</bg> piece of information <bg>that is omitted is the</bg> amount of vinegar used in the experiment . it is also important to know what temperature the experiment was kept at during the hours . finally , the procedure needs to include details about the experiment , for example if the whole sample must be submerged .\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3237, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>3</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 3}, {\"answer\": \"e the mass of four different samples.\\r\\nPour vinegar in each of four separate, but identical, containers.\\r\\nPlace a sample of one material into one container and label. Repeat with remaining samples, placing a single sample into a single container.\\r\\nAfter 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\", \"post_assessment\": \"{\\\"submission_id\\\": 3100, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"e the mass of four different samples . pour vinegar in <bg>each of four separate</bg> , but identical , containers . place a sample of one material into one container and label . repeat with remaining samples , placing a single sample into a single container . after hours , remove the samples from the containers and rinse each sample with distilled water . allow the samples to sit and dry for minutes . determine the mass of each sample . the students data are recorded in the table below . \\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3239, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>0</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 0}, {\"answer\": \"\", \"post_assessment\": \"{\\\"submission_id\\\": 3101, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"invalid essay .\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3241, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>0</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 0}], \"max_score\": 3, \"child_state\": \"done\"}"], "attempts": "10000", "student_attempts": 0, "due": null, "state": "done", "accept_file_upload": false, "display_name": "Science Question -- Machine Assessed"}"""
# Instance state. To test the rubric scores are consistent. Should receive a score of 15.
INSTANCE_INCONSISTENT_STATE = serialize_open_ended_instance_state("""
{ "accept_file_upload" : false,
"attempts" : "10000",
"current_task_number" : 1,
"display_name" : "Science Question -- Machine Assessed",
"due" : null,
"graceperiod" : "1 day 12 hours 59 minutes 59 seconds",
"graded" : "True",
"ready_to_reset" : false,
"skip_spelling_checks" : true,
"state" : "done",
"student_attempts" : 0,
"task_states" : [ { "child_attempts" : 4,
"child_created" : false,
"child_history" : [ { "answer" : "Student answer 1st attempt.",
"post_assessment" : [ 3 ],
"score" : 1
},
{ "answer" : "Student answer 2nd attempt.",
"post_assessment" : [ 3 ],
"score" : 1
},
{ "answer" : "Student answer 3rd attempt.",
"post_assessment" : [ 3 ],
"score" : 1
},
{ "answer" : "",
"post_assessment" : [ 3 ],
"score" : 1
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
},
{ "child_attempts" : 0,
"child_created" : false,
"child_history" : [ { "answer" : "Student answer 1st attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: More grammar errors than average.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3233,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3097,
"success" : true
},
"score" : 0
},
{ "answer" : "Student answer 2nd attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3235,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3098,
"success" : true
},
"score" : 0
},
{ "answer" : "Student answer 3rd attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3237,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>3</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 2,
"submission_id" : 3099,
"success" : true
},
"score" : 2
},
{ "answer" : "Student answer 4th attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3239,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3100,
"success" : true
},
"score" : 0
},
{ "answer" : "",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "invalid essay .",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3241,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3101,
"success" : true
},
"score" : 0
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
}
],
"weight" : 5.0
}
""")
# Instance state. Should receive a score of 10 if grader type is PE.
INSTANCE_INCONSISTENT_STATE2 = serialize_open_ended_instance_state("""
{ "accept_file_upload" : false,
"attempts" : "10000",
"current_task_number" : 1,
"display_name" : "Science Question -- Machine Assessed",
"due" : null,
"graceperiod" : "1 day 12 hours 59 minutes 59 seconds",
"graded" : "True",
"ready_to_reset" : false,
"skip_spelling_checks" : true,
"state" : "done",
"student_attempts" : 0,
"task_states" : [ { "child_attempts" : 4,
"child_created" : false,
"child_history" : [ { "answer" : "Student answer 1st attempt.",
"post_assessment" : [3],
"score" : 1
},
{ "answer" : "Student answer 2nd attempt.",
"post_assessment" : [3],
"score" : 1
},
{ "answer" : "Student answer 3rd attempt.",
"post_assessment" : [3],
"score" : 1
},
{ "answer" : "",
"post_assessment" : [3],
"score" : 1
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
},
{ "child_attempts" : 0,
"child_created" : false,
"child_history" : [ { "answer" : "Student answer 1st attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: More grammar errors than average.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3233,
"grader_type" : "PE",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3097,
"success" : true
},
"score" : 0
},
{ "answer" : "Student answer 2nd attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3235,
"grader_type" : "PE",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3098,
"success" : true
},
"score" : 0
},
{ "answer" : "Student answer 3rd attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3237,
"grader_type" : "PE",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>5</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 2,
"submission_id" : 3099,
"success" : true
},
"score" : 2
},
{ "answer" : "Student answer 4th attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3239,
"grader_type" : "PE",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3100,
"success" : true
},
"score" : 0
},
{ "answer" : "",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "invalid essay .",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3241,
"grader_type" : "PE",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3101,
"success" : true
},
"score" : 0
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
}
],
"weight" : 5.0
}
""")
# Instance state. To test score if sum of rubric score is different from score value. Should receive score of 25.
INSTANCE_INCONSISTENT_STATE3 = serialize_open_ended_instance_state("""
{ "accept_file_upload" : false,
"attempts" : "10000",
"current_task_number" : 1,
"display_name" : "Science Question -- Machine Assessed",
"due" : null,
"graceperiod" : "1 day 12 hours 59 minutes 59 seconds",
"graded" : "True",
"ready_to_reset" : false,
"skip_spelling_checks" : true,
"state" : "done",
"student_attempts" : 0,
"task_states" : [ { "child_attempts" : 4,
"child_created" : false,
"child_history" : [ { "answer" : "Student answer 1st attempt.",
"post_assessment" : [3],
"score" : 1
},
{ "answer" : "Student answer 2nd attempt.",
"post_assessment" : [3],
"score" : 1
},
{ "answer" : "Student answer 3rd attempt.",
"post_assessment" : [3],
"score" : 1
},
{ "answer" : "",
"post_assessment" : [3],
"score" : 1
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
},
{ "child_attempts" : 0,
"child_created" : false,
"child_history" : [ { "answer" : "Student answer 1st attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: More grammar errors than average.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3233,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>2</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3097,
"success" : true
},
"score" : 0
},
{ "answer" : "Student answer 2nd attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3235,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3098,
"success" : true
},
"score" : 0
},
{ "answer" : "Student answer 3rd attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3237,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>5</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 2,
"submission_id" : 3099,
"success" : true
},
"score" : 2
},
{ "answer" : "Student answer 4th attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3239,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3100,
"success" : true
},
"score" : 0
},
{ "answer" : "",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "invalid essay .",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3241,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3101,
"success" : true
},
"score" : 0
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
}
],
"weight" : 5.0
}
""")
# Instance state. To test score if old task states are available. Should receive a score of 15.
INSTANCE_INCONSISTENT_STATE4 = serialize_open_ended_instance_state("""
{ "accept_file_upload" : false,
"attempts" : "10000",
"current_task_number" : 0,
"display_name" : "Science Question -- Machine Assessed",
"due" : null,
"graceperiod" : "1 day 12 hours 59 minutes 59 seconds",
"graded" : "True",
"old_task_states" : [ [ { "child_attempts" : 4,
"child_created" : false,
"child_history" : [ { "answer" : "Student answer 1st attempt.",
"post_assessment" : "[3]",
"score" : 1
},
{ "answer" : "Student answer 2nd attempt.",
"post_assessment" : "[3]",
"score" : 1
},
{ "answer" : "Student answer 3rd attempt.",
"post_assesssment" : "[3]",
"score" : 1
},
{ "answer" : "",
"post_assessment" : "[3]",
"score" : 1
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
} ] ],
"ready_to_reset" : false,
"skip_spelling_checks" : true,
"state" : "assessing",
"student_attempts" : 0,
"task_states" : [ { "child_attempts" : 4,
"child_created" : false,
"child_history" : [ { "answer" : "Student answer 1st attempt.",
"post_assessment" : [3],
"score" : 1
},
{ "answer" : "Student answer 2nd attempt.",
"post_assessment" : [3],
"score" : 1
},
{ "answer" : "Student answer 3rd attempt.",
"post_assessment" : [3],
"score" : 1
},
{ "answer" : "",
"post_assessment" : [3],
"score" : 1
}
],
"child_state" : "done",
"max_score" : 3,
"stored_answer" : null,
"version" : 1
},
{ "child_attempts" : 0,
"child_created" : false,
"child_history" : [ { "answer" : "Student answer 1st attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: More grammar errors than average.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3233,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3097,
"success" : true
},
"score" : 0
},
{ "answer" : "Student answer 2nd attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3235,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3098,
"success" : true
},
"score" : 0
},
{ "answer" : "Student answer 3rd attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3237,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>3</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 2,
"submission_id" : 3099,
"success" : true
},
"score" : 2
},
{ "answer" : "Student answer 4th attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3239,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3100,
"success" : true
},
"score" : 0
},
{ "answer" : "",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "invalid essay .",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3241,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3101,
"success" : true
},
"score" : 0
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
}
],
"weight" : 5.0
}
""")
# Instance state. To test score if rubric scores are available but score is missing. Should receive a score of 15.
INSTANCE_INCONSISTENT_STATE5 = serialize_open_ended_instance_state("""
{ "accept_file_upload" : false,
"attempts" : "10000",
"current_task_number" : 1,
"display_name" : "Science Question -- Machine Assessed",
"due" : null,
"graceperiod" : "1 day 12 hours 59 minutes 59 seconds",
"graded" : "True",
"ready_to_reset" : false,
"skip_spelling_checks" : true,
"state" : "done",
"student_attempts" : 0,
"task_states" : [ { "child_attempts" : 4,
"child_created" : false,
"child_history" : [ { "answer" : "Student answer 1st attempt.",
"post_assessment" : [3],
"score" : 1
},
{ "answer" : "Student answer 2nd attempt.",
"post_assessment" : [3],
"score" : 1
},
{ "answer" : "Student answer 3rd attempt.",
"post_assessment" : [3],
"score" : 1
},
{ "answer" : "",
"post_assessment" : [3],
"score" : 1
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
},
{ "child_attempts" : 0,
"child_created" : false,
"child_history" : [ { "answer" : "Student answer 1st attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: More grammar errors than average.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3233,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3097,
"success" : true
},
"score" : 0
},
{ "answer" : "Student answer 2nd attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3235,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3098,
"success" : true
},
"score" : 0
},
{ "answer" : "Student answer 3rd attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3237,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>3</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 2,
"submission_id" : 3099,
"success" : true
}
},
{ "answer" : "Student answer 4th attempt.",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3239,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3100,
"success" : true
},
"score" : 0
},
{ "answer" : "",
"post_assessment" : { "feedback" : { "grammar" : "Grammar: Ok.",
"markup-text" : "invalid essay .",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3241,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>0</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 0,
"submission_id" : 3101,
"success" : true
},
"score" : 0
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
}
],
"weight" : 5.0
}
""")
# State Initial
STATE_INITIAL = serialize_open_ended_instance_state("""
{
"ready_to_reset": false,
"skip_spelling_checks": false,
"current_task_number": 0,
"old_task_states": [],
"weight": 1,
"task_states": [
{
"child_attempts" : 1,
"child_created" : false,
"child_history" : [],
"child_state" : "done",
"max_score" : 3,
"version" : 1
},
{
"child_created": false,
"child_attempts": 0,
"stored_answer": "A stored answer.",
"version": 1,
"child_history": [],
"max_score": 3,
"child_state": "initial"
}
],
"graded": true,
"student_attempts": 0,
"required_peer_grading": 3,
"state": "initial",
"accept_file_upload": false,
"min_to_calibrate": 3,
"max_to_calibrate": 6,
"display_name": "Open Response Assessment",
"peer_grader_count": 3,
"max_attempts": 1
}""")
STATE_ACCESSING = serialize_open_ended_instance_state("""
{
"ready_to_reset": false,
"skip_spelling_checks": false,
"current_task_number": 0,
"old_task_states": [],
"weight": 1,
"task_states": [
{
"child_attempts" : 1,
"child_created" : false,
"child_history": [
{
"answer": "Here is an answer."
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
},
{
"child_created": false,
"child_attempts": 0,
"stored_answer": null,
"version": 1,
"child_history": [
{
"answer": "Here is an answer."
}
],
"max_score": 3,
"child_state": "assessing"
}
],
"graded": true,
"student_attempts": 0,
"required_peer_grading": 3,
"state": "assessing",
"accept_file_upload": false,
"min_to_calibrate": 3,
"max_to_calibrate": 6,
"display_name": "Open Response Assessment",
"peer_grader_count": 3,
"max_attempts": 1
}""")
STATE_POST_ASSESSMENT = serialize_open_ended_instance_state("""
{
"ready_to_reset": false,
"skip_spelling_checks": false,
"current_task_number": 0,
"old_task_states": [],
"weight": 1,
"task_states": [
{
"child_attempts" : 1,
"child_created" : false,
"child_history": [
{
"answer": "Here is an answer."
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
},
{
"child_created": false,
"child_attempts": 0,
"stored_answer": null,
"version": 1,
"child_history": [
{
"answer": "Here is an answer."
}
],
"max_score": 3,
"post_assessment": {
"feedback" : {
"grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3237,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>3</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 2,
"submission_id" : 3099,
"success" : true
},
"child_state": "post_assessment"
}
],
"graded": true,
"student_attempts": 0,
"required_peer_grading": 3,
"state": "done",
"accept_file_upload": false,
"min_to_calibrate": 3,
"max_to_calibrate": 6,
"display_name": "Open Response Assessment",
"peer_grader_count": 3,
"max_attempts": 1
}""")
# pylint: disable=line-too-long
# Task state with self assessment only.
TEST_STATE_SA = ["{\"child_created\": false, \"child_attempts\": 1, \"version\": 1, \"child_history\": [{\"answer\": \"Censorship in the Libraries\\r<br>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author\\r<br><br>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.\", \"post_assessment\": \"[3, 3, 2, 2, 2]\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"done\"}", "{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"Censorship in the Libraries\\r<br>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author\\r<br><br>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.\", \"post_assessment\": \"{\\\"submission_id\\\": 1461, \\\"score\\\": 12, \\\"feedback\\\": \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 5414, \\\"grader_type\\\": \\\"IN\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>\\\\nIdeas\\\\n</description><score>3</score><option points='0'>\\\\nDifficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.\\\\n</option><option points='1'>\\\\nAttempts a main idea. Sometimes loses focus or ineffectively displays focus.\\\\n</option><option points='2'>\\\\nPresents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.\\\\n</option><option points='3'>\\\\nPresents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.\\\\n</option></category><category><description>\\\\nContent\\\\n</description><score>3</score><option points='0'>\\\\nIncludes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.\\\\n</option><option points='1'>\\\\nIncludes little information and few or no details. Explores only one or two facets of the topic.\\\\n</option><option points='2'>\\\\nIncludes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.\\\\n</option><option points='3'>\\\\nIncludes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.\\\\n</option></category><category><description>\\\\nOrganization\\\\n</description><score>2</score><option points='0'>\\\\nIdeas organized illogically, transitions weak, and response difficult to follow.\\\\n</option><option points='1'>\\\\nAttempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.\\\\n</option><option points='2'>\\\\nIdeas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.\\\\n</option></category><category><description>\\\\nStyle\\\\n</description><score>2</score><option points='0'>\\\\nContains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.\\\\n</option><option points='1'>\\\\nContains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).\\\\n</option><option points='2'>\\\\nIncludes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.\\\\n</option></category><category><description>\\\\nVoice\\\\n</description><score>2</score><option points='0'>\\\\nDemonstrates language and tone that may be inappropriate to task and reader.\\\\n</option><option points='1'>\\\\nDemonstrates an attempt to adjust language and tone to task and reader.\\\\n</option><option points='2'>\\\\nDemonstrates effective adjustment of language and tone to task and reader.\\\\n</option></category></rubric>\\\"}\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"post_assessment\"}"]
# Task state with self and then ai assessment.
TEST_STATE_AI = ["{\"child_created\": false, \"child_attempts\": 2, \"version\": 1, \"child_history\": [{\"answer\": \"In libraries, there should not be censorship on materials considering that it's an individual's decision to read what they prefer. There is no appropriate standard on what makes a book offensive to a group, so it should be undetermined as to what makes a book offensive. In a public library, many children, who the books are censored for, are with their parents. Parents should make an independent choice on what they can allow their children to read. Letting society ban a book simply for the use of inappropriate materials is ridiculous. If an author spent time creating a story, it should be appreciated, and should not put on a list of no-nos. If a certain person doesn't like a book's reputation, all they have to do is not read it. Even in school systems, librarians are there to guide kids to read good books. If a child wants to read an inappropriate book, the librarian will most likely discourage him or her not to read it. In my experience, I wanted to read a book that my mother suggested to me, but as I went to the school library it turned out to be a censored book. Some parents believe children should be ignorant about offensive things written in books, but honestly many of the same ideas are exploited to them everyday on television and internet. So trying to shield your child from the bad things may be a great thing, but the efforts are usually failed attempts. It also never occurs to the people censoring the books, that some people can't afford to buy the books they want to read. The libraries, for some, are the main means for getting books. To conclude there is very little reason to ban a book from the shelves. Many of the books banned have important lessons that can be obtained through reading it. If a person doesn't like a book, the simplest thing to do is not to pick it up.\", \"post_assessment\": \"[1, 1]\", \"score\": 2}, {\"answer\": \"This is another response\", \"post_assessment\": \"[1, 1]\", \"score\": 2}], \"max_score\": 2, \"child_state\": \"done\"}", "{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"In libraries, there should not be censorship on materials considering that it's an individual's decision to read what they prefer. There is no appropriate standard on what makes a book offensive to a group, so it should be undetermined as to what makes a book offensive. In a public library, many children, who the books are censored for, are with their parents. Parents should make an independent choice on what they can allow their children to read. Letting society ban a book simply for the use of inappropriate materials is ridiculous. If an author spent time creating a story, it should be appreciated, and should not put on a list of no-nos. If a certain person doesn't like a book's reputation, all they have to do is not read it. Even in school systems, librarians are there to guide kids to read good books. If a child wants to read an inappropriate book, the librarian will most likely discourage him or her not to read it. In my experience, I wanted to read a book that my mother suggested to me, but as I went to the school library it turned out to be a censored book. Some parents believe children should be ignorant about offensive things written in books, but honestly many of the same ideas are exploited to them everyday on television and internet. So trying to shield your child from the bad things may be a great thing, but the efforts are usually failed attempts. It also never occurs to the people censoring the books, that some people can't afford to buy the books they want to read. The libraries, for some, are the main means for getting books. To conclude there is very little reason to ban a book from the shelves. Many of the books banned have important lessons that can be obtained through reading it. If a person doesn't like a book, the simplest thing to do is not to pick it up.\", \"post_assessment\": \"{\\\"submission_id\\\": 6107, \\\"score\\\": 2, \\\"feedback\\\": \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 1898718, \\\"grader_type\\\": \\\"IN\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Writing Applications</description><score>1</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>1</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\\\"}\", \"score\": 2}, {\"answer\": \"This is another response\"}], \"max_score\": 2, \"child_state\": \"assessing\"}"]
# Task state with ai assessment only.
TEST_STATE_AI2 = ["{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"This isn't a real essay, and you should give me a zero on it. \", \"post_assessment\": \"{\\\"submission_id\\\": 18446, \\\"score\\\": [0, 1, 0], \\\"feedback\\\": [\\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"Zero it is! \\\\\\\"}\\\"], \\\"success\\\": true, \\\"grader_id\\\": [1944146, 1943188, 1940991], \\\"grader_type\\\": \\\"PE\\\", \\\"rubric_scores_complete\\\": [true, true, true], \\\"rubric_xml\\\": [\\\"<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\\\", \\\"<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>1</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\\\", \\\"<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\\\"]}\", \"score\": 0}], \"max_score\": 2, \"child_state\": \"post_assessment\"}"]
# Invalid task state with ai assessment.
TEST_STATE_AI2_INVALID = ["{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"This isn't a real essay, and you should give me a zero on it. \", \"post_assessment\": \"{\\\"submission_id\\\": 18446, \\\"score\\\": [0, 1, 0], \\\"feedback\\\": [\\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"Zero it is! \\\\\\\"}\\\"], \\\"success\\\": true, \\\"grader_id\\\": [1943188, 1940991], \\\"grader_type\\\": \\\"PE\\\", \\\"rubric_scores_complete\\\": [true, true, true], \\\"rubric_xml\\\": [\\\"<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\\\", \\\"<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>1</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\\\", \\\"<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\\\"]}\", \"score\": 0}], \"max_score\": 2, \"child_state\": \"post_assessment\"}"]
# Self assessment state.
TEST_STATE_SINGLE = ["{\"child_created\": false, \"child_attempts\": 1, \"version\": 1, \"child_history\": [{\"answer\": \"'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author\\r<br><br>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading. \", \"post_assessment\": \"[3, 3, 2, 2, 2]\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"done\"}"]
# Peer grading state.
TEST_STATE_PE_SINGLE = ["{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"Passage its ten led hearted removal cordial. Preference any astonished unreserved mrs. Prosperous understood middletons in conviction an uncommonly do. Supposing so be resolving breakfast am or perfectly. Is drew am hill from mr. Valley by oh twenty direct me so. Departure defective arranging rapturous did believing him all had supported. Family months lasted simple set nature vulgar him. Picture for attempt joy excited ten carried manners talking how. Suspicion neglected he resolving agreement perceived at an. \\r<br><br>Ye on properly handsome returned throwing am no whatever. In without wishing he of picture no exposed talking minutes. Curiosity continual belonging offending so explained it exquisite. Do remember to followed yourself material mr recurred carriage. High drew west we no or at john. About or given on witty event. Or sociable up material bachelor bringing landlord confined. Busy so many in hung easy find well up. So of exquisite my an explained remainder. Dashwood denoting securing be on perceive my laughing so. \\r<br><br>Ought these are balls place mrs their times add she. Taken no great widow spoke of it small. Genius use except son esteem merely her limits. Sons park by do make on. It do oh cottage offered cottage in written. Especially of dissimilar up attachment themselves by interested boisterous. Linen mrs seems men table. Jennings dashwood to quitting marriage bachelor in. On as conviction in of appearance apartments boisterous. \", \"post_assessment\": \"{\\\"submission_id\\\": 1439, \\\"score\\\": [0], \\\"feedback\\\": [\\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\"], \\\"success\\\": true, \\\"grader_id\\\": [5337], \\\"grader_type\\\": \\\"PE\\\", \\\"rubric_scores_complete\\\": [true], \\\"rubric_xml\\\": [\\\"<rubric><category><description>\\\\nIdeas\\\\n</description><score>0</score><option points='0'>\\\\nDifficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.\\\\n</option><option points='1'>\\\\nAttempts a main idea. Sometimes loses focus or ineffectively displays focus.\\\\n</option><option points='2'>\\\\nPresents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.\\\\n</option><option points='3'>\\\\nPresents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.\\\\n</option></category><category><description>\\\\nContent\\\\n</description><score>0</score><option points='0'>\\\\nIncludes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.\\\\n</option><option points='1'>\\\\nIncludes little information and few or no details. Explores only one or two facets of the topic.\\\\n</option><option points='2'>\\\\nIncludes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.\\\\n</option><option points='3'>\\\\nIncludes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.\\\\n</option></category><category><description>\\\\nOrganization\\\\n</description><score>0</score><option points='0'>\\\\nIdeas organized illogically, transitions weak, and response difficult to follow.\\\\n</option><option points='1'>\\\\nAttempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.\\\\n</option><option points='2'>\\\\nIdeas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.\\\\n</option></category><category><description>\\\\nStyle\\\\n</description><score>0</score><option points='0'>\\\\nContains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.\\\\n</option><option points='1'>\\\\nContains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).\\\\n</option><option points='2'>\\\\nIncludes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.\\\\n</option></category><category><description>\\\\nVoice\\\\n</description><score>0</score><option points='0'>\\\\nDemonstrates language and tone that may be inappropriate to task and reader.\\\\n</option><option points='1'>\\\\nDemonstrates an attempt to adjust language and tone to task and reader.\\\\n</option><option points='2'>\\\\nDemonstrates effective adjustment of language and tone to task and reader.\\\\n</option></category></rubric>\\\"]}\", \"score\": 0}], \"max_score\": 12, \"child_state\": \"done\"}"]
......@@ -31,7 +31,6 @@ from xmodule.x_module import ModuleSystem, XModule, XModuleDescriptor, Descripto
from xmodule.annotatable_module import AnnotatableDescriptor
from xmodule.capa_module import CapaDescriptor
from xmodule.course_module import CourseDescriptor
from xmodule.combined_open_ended_module import CombinedOpenEndedDescriptor
from xmodule.discussion_module import DiscussionDescriptor
from xmodule.gst_module import GraphicalSliderToolDescriptor
from xmodule.html_module import HtmlDescriptor
......@@ -54,7 +53,6 @@ from xmodule.tests import get_test_descriptor_system, get_test_system
LEAF_XMODULES = {
AnnotatableDescriptor: [{}],
CapaDescriptor: [{}],
CombinedOpenEndedDescriptor: [{}],
DiscussionDescriptor: [{}],
GraphicalSliderToolDescriptor: [{}],
HtmlDescriptor: [{}],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment