Commit 44ad63d8 by Will Daly

Merge pull request #352 from edx/will/ai-grading-test-fixup

Will/ai grading test fixup
parents b93fd0d7 3c0ac810
......@@ -53,4 +53,23 @@ def convert_training_examples_list_to_dict(examples_list):
}
}
for ex in examples_list
]
\ No newline at end of file
]
def create_rubric_dict(prompt, criteria):
"""
Construct a serialized rubric model in the format expected
by the assessments app.
Args:
prompt (unicode): The rubric prompt.
criteria (list of dict): The serialized rubric criteria.
Returns:
dict
"""
return {
"prompt": prompt,
"criteria": criteria
}
......@@ -8,6 +8,7 @@ from django.utils.translation import ugettext as _
from xblock.core import XBlock
from openassessment.assessment.errors.ai import AIError
from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE
from openassessment.xblock.data_conversion import create_rubric_dict, convert_training_examples_list_to_dict
from submissions import api as submission_api
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
......@@ -85,8 +86,8 @@ class StaffInfoMixin(object):
examples = assessment["examples"]
try:
workflow_uuid = ai_api.train_classifiers(
self.rubric_criteria,
examples,
create_rubric_dict(self.prompt, self.rubric_criteria),
convert_training_examples_list_to_dict(examples),
assessment["algorithm_id"]
)
return {
......
<openassessment>
<title>Open Assessment Test</title>
<prompt>Example-based assessment</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>Ideas</name>
<prompt>How good are the ideas?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job!</explanation>
</option>
<option points="1">
<name>Fair</name>
<explanation>Fair job</explanation>
</option>
<option points="3">
<name>Good</name>
<explanation>Good job</explanation>
</option>
</criterion>
<criterion>
<name>Content</name>
<prompt>How good is the content?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job!</explanation>
</option>
<option points="1">
<name>Fair</name>
<explanation>Fair job</explanation>
</option>
<option points="3">
<name>Good</name>
<explanation>Good job</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" />
<!-- TODO: for now we're inserting the example-based assessment programmatically, until the XML format changes land -->
</assessments>
</openassessment>
......@@ -282,7 +282,7 @@ class TestCourseStaff(XBlockHandlerTestCase):
resp = xblock.render_student_info(request)
self.assertIn("bob answer", resp.body.lower())
@scenario('data/basic_scenario.xml', user_id='Bob')
@scenario('data/example_based_assessment.xml', user_id='Bob')
def test_display_schedule_training(self, xblock):
xblock.rubric_assessments.append(EXAMPLE_BASED_ASSESSMENT)
xblock.xmodule_runtime = self._create_mock_runtime(
......@@ -292,17 +292,17 @@ class TestCourseStaff(XBlockHandlerTestCase):
self.assertEquals('openassessmentblock/staff_debug/staff_debug.html', path)
self.assertTrue(context['display_schedule_training'])
@scenario('data/basic_scenario.xml', user_id='Bob')
@scenario('data/example_based_assessment.xml', user_id='Bob')
def test_schedule_training(self, xblock):
xblock.rubric_assessments.append(EXAMPLE_BASED_ASSESSMENT)
xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, True, "Bob"
)
response = self.request(xblock, 'schedule_training', json.dumps({}), response_format='json')
self.assertTrue(response['success'])
self.assertTrue(response['success'], msg=response.get('msg'))
self.assertTrue('workflow_uuid' in response)
@scenario('data/basic_scenario.xml', user_id='Bob')
@scenario('data/example_based_assessment.xml', user_id='Bob')
def test_not_displaying_schedule_training(self, xblock):
xblock.rubric_assessments.append(EXAMPLE_BASED_ASSESSMENT)
xblock.xmodule_runtime = self._create_mock_runtime(
......@@ -322,7 +322,7 @@ class TestCourseStaff(XBlockHandlerTestCase):
self.assertTrue('permission' in response['msg'])
@patch.object(ai_api, "train_classifiers")
@scenario('data/basic_scenario.xml', user_id='Bob')
@scenario('data/example_based_assessment.xml', user_id='Bob')
def test_admin_schedule_training_error(self, xblock, mock_api):
mock_api.side_effect = AIError("Oh no!")
xblock.rubric_assessments.append(EXAMPLE_BASED_ASSESSMENT)
......@@ -333,7 +333,7 @@ class TestCourseStaff(XBlockHandlerTestCase):
self.assertFalse(response['success'])
self.assertTrue('error' in response['msg'])
@scenario('data/basic_scenario.xml', user_id='Bob')
@scenario('data/example_based_assessment.xml', user_id='Bob')
def test_no_example_based_assessment(self, xblock):
xblock.xmodule_runtime = self._create_mock_runtime(
xblock.scope_ids.usage_id, True, True, "Bob"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment