Commit 2c2192b7 by Usman Khalid

Update student training examples format for multiple answers.

TNL-708
parent 5afc5007
...@@ -146,14 +146,14 @@ def validate_training_examples(rubric, examples): ...@@ -146,14 +146,14 @@ def validate_training_examples(rubric, examples):
>>> >>>
>>> examples = [ >>> examples = [
>>> { >>> {
>>> 'answer': u'Lorem ipsum', >>> 'answer': {'parts': [{'text': u'Lorem ipsum'}]},
>>> 'options_selected': { >>> 'options_selected': {
>>> 'vocabulary': 'good', >>> 'vocabulary': 'good',
>>> 'grammar': 'excellent' >>> 'grammar': 'excellent'
>>> } >>> }
>>> }, >>> },
>>> { >>> {
>>> 'answer': u'Doler', >>> 'answer': {'parts': [{'text': u'Doler'}]},
>>> 'options_selected': { >>> 'options_selected': {
>>> 'vocabulary': 'good', >>> 'vocabulary': 'good',
>>> 'grammar': 'poor' >>> 'grammar': 'poor'
...@@ -312,7 +312,15 @@ def get_training_example(submission_uuid, rubric, examples): ...@@ -312,7 +312,15 @@ def get_training_example(submission_uuid, rubric, examples):
>>> examples = [ >>> examples = [
>>> { >>> {
>>> 'answer': u'Doler', >>> 'answer': {
>>> 'parts': {
>>> [
>>> {'text:' 'Answer part 1'},
>>> {'text:' 'Answer part 2'},
>>> {'text:' 'Answer part 3'}
>>> ]
>>> }
>>> },
>>> 'options_selected': { >>> 'options_selected': {
>>> 'vocabulary': 'good', >>> 'vocabulary': 'good',
>>> 'grammar': 'poor' >>> 'grammar': 'poor'
...@@ -322,9 +330,21 @@ def get_training_example(submission_uuid, rubric, examples): ...@@ -322,9 +330,21 @@ def get_training_example(submission_uuid, rubric, examples):
>>> >>>
>>> get_training_example("5443ebbbe2297b30f503736e26be84f6c7303c57", rubric, examples) >>> get_training_example("5443ebbbe2297b30f503736e26be84f6c7303c57", rubric, examples)
{ {
'answer': u'Lorem ipsum', 'answer': {
'parts': {
[
{'text:' 'Answer part 1'},
{'text:' 'Answer part 2'},
{'text:' 'Answer part 3'}
]
}
},
'rubric': { 'rubric': {
"prompts": [{"description": "Write an essay!"}], "prompts": [
{"description": "Prompt 1"},
{"description": "Prompt 2"},
{"description": "Prompt 3"}
],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
......
...@@ -107,7 +107,11 @@ def deserialize_training_examples(examples, rubric_dict): ...@@ -107,7 +107,11 @@ def deserialize_training_examples(examples, rubric_dict):
>>> ] >>> ]
>>> >>>
>>> rubric = { >>> rubric = {
>>> "prompts": [{"description": "Write an essay!"}], >>> "prompts": [
>>> {"description": "Prompt 1"}
>>> {"description": "Prompt 2"}
>>> {"description": "Prompt 3"}
>>> ],
>>> "criteria": [ >>> "criteria": [
>>> { >>> {
>>> "order_num": 0, >>> "order_num": 0,
......
...@@ -20,7 +20,15 @@ def convert_training_examples_list_to_dict(examples_list): ...@@ -20,7 +20,15 @@ def convert_training_examples_list_to_dict(examples_list):
Example: Example:
>>> examples = [ >>> examples = [
>>> { >>> {
>>> "answer": "This is my response", >>> "answer": {
>>> "parts": {
>>> [
>>> {"text:" "Answer part 1"},
>>> {"text:" "Answer part 2"},
>>> {"text:" "Answer part 3"}
>>> ]
>>> }
>>> },
>>> "options_selected": [ >>> "options_selected": [
>>> { >>> {
>>> "criterion": "Ideas", >>> "criterion": "Ideas",
...@@ -36,7 +44,15 @@ def convert_training_examples_list_to_dict(examples_list): ...@@ -36,7 +44,15 @@ def convert_training_examples_list_to_dict(examples_list):
>>> convert_training_examples_list_to_dict(examples) >>> convert_training_examples_list_to_dict(examples)
[ [
{ {
'answer': 'This is my response', 'answer': {
'parts': {
[
{'text:' 'Answer part 1'},
{'text:' 'Answer part 2'},
{'text:' 'Answer part 3'}
]
}
},
'options_selected': { 'options_selected': {
'Ideas': 'Fair', 'Ideas': 'Fair',
'Content': 'Good' 'Content': 'Good'
...@@ -57,6 +73,27 @@ def convert_training_examples_list_to_dict(examples_list): ...@@ -57,6 +73,27 @@ def convert_training_examples_list_to_dict(examples_list):
] ]
def update_assessments_format(assessments):
"""
For each example update 'answer' to newer format.
Args:
assessments (list): list of assessments
Returns:
list of dict
"""
for assessment in assessments:
if 'examples' in assessment:
for example in assessment['examples']:
if isinstance(example['answer'], unicode) or isinstance(example['answer'], str):
example['answer'] = {
'parts': [
{'text': example['answer']}
]
}
return assessments
def create_prompts_list(prompt_or_serialized_prompts): def create_prompts_list(prompt_or_serialized_prompts):
""" """
Construct a list of prompts. Construct a list of prompts.
......
...@@ -32,7 +32,7 @@ from openassessment.workflow.errors import AssessmentWorkflowError ...@@ -32,7 +32,7 @@ from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.student_training_mixin import StudentTrainingMixin from openassessment.xblock.student_training_mixin import StudentTrainingMixin
from openassessment.xblock.validation import validator from openassessment.xblock.validation import validator
from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DISTANT_FUTURE from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DISTANT_FUTURE
from openassessment.xblock.data_conversion import create_prompts_list, create_rubric_dict from openassessment.xblock.data_conversion import create_prompts_list, create_rubric_dict, update_assessments_format
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -491,10 +491,11 @@ class OpenAssessmentBlock( ...@@ -491,10 +491,11 @@ class OpenAssessmentBlock(
list list
""" """
return [ _valid_assessments = [
asmnt for asmnt in self.rubric_assessments asmnt for asmnt in self.rubric_assessments
if asmnt.get('name') in VALID_ASSESSMENT_TYPES if asmnt.get('name') in VALID_ASSESSMENT_TYPES
] ]
return update_assessments_format(copy.deepcopy(_valid_assessments))
@property @property
def assessment_steps(self): def assessment_steps(self):
......
<openassessment> <openassessment>
<title>Student training test</title> <title>Student training test</title>
<prompt>Test prompt</prompt> <prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Test rubric prompt</prompt>
<criterion> <criterion>
<name>Vocabulary</name> <name>Vocabulary</name>
<prompt>How varied is the vocabulary?</prompt> <prompt>How varied is the vocabulary?</prompt>
......
<openassessment> <openassessment>
<title>Student training test</title> <title>Student training test</title>
<prompt>Test prompt</prompt> <prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Test rubric prompt</prompt>
<criterion> <criterion>
<name>Vocabulary</name> <name>Vocabulary</name>
<prompt>How varied is the vocabulary?</prompt> <prompt>How varied is the vocabulary?</prompt>
......
<openassessment> <openassessment>
<title>Student training test</title> <title>Student training test</title>
<prompt>Test prompt</prompt> <prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Test rubric prompt</prompt>
<criterion> <criterion>
<name>Vocabulary</name> <name>Vocabulary</name>
<prompt>How varied is the vocabulary?</prompt> <prompt>How varied is the vocabulary?</prompt>
......
...@@ -5,7 +5,14 @@ ...@@ -5,7 +5,14 @@
"training_num_completed": 0, "training_num_completed": 0,
"training_num_current": 1, "training_num_current": 1,
"training_num_available": 2, "training_num_available": 2,
"training_essay": "This is my answer.", "training_essay": { "answer": {
"parts": [
{
"prompt": "Given the state of the world today, what do you think should be done to combat poverty?",
"text": "This is my answer."
}
]}
},
"allow_latex": false, "allow_latex": false,
"training_rubric": { "training_rubric": {
"criteria": [ "criteria": [
......
...@@ -9,7 +9,7 @@ import mock ...@@ -9,7 +9,7 @@ import mock
from django.test import TestCase from django.test import TestCase
from openassessment.xblock.data_conversion import ( from openassessment.xblock.data_conversion import (
create_prompts_list, create_submission_dict, prepare_submission_for_serialization create_prompts_list, create_submission_dict, prepare_submission_for_serialization, update_assessments_format
) )
@ddt.ddt @ddt.ddt
...@@ -52,3 +52,14 @@ class DataConversionTest(TestCase): ...@@ -52,3 +52,14 @@ class DataConversionTest(TestCase):
@ddt.unpack @ddt.unpack
def test_prepare_submission_for_serialization(self, input, output): def test_prepare_submission_for_serialization(self, input, output):
self.assertEqual(prepare_submission_for_serialization(input), output) self.assertEqual(prepare_submission_for_serialization(input), output)
@ddt.data(
([{'answer': 'Ans'}], [{'answer': {'parts': [{'text': 'Ans'}]}}]),
)
@ddt.unpack
def test_update_assessments_format(self, input, output):
self.assertEqual(update_assessments_format([{
'examples': input,
}]), [{
'examples': output,
}])
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment