Commit 0fc29dfc by Usman Khalid

TraningExamples should accept answers with multiple parts.

TNL-708
parent d20853f8
...@@ -6,6 +6,7 @@ from httplib import HTTPException ...@@ -6,6 +6,7 @@ from httplib import HTTPException
from django.db import DatabaseError from django.db import DatabaseError
from dogapi import dog_stats_api from dogapi import dog_stats_api
from openassessment.assessment.models import ( from openassessment.assessment.models import (
essay_text_from_submission,
AITrainingWorkflow, AIGradingWorkflow, AITrainingWorkflow, AIGradingWorkflow,
ClassifierUploadError, ClassifierSerializeError, ClassifierUploadError, ClassifierSerializeError,
IncompleteClassifierSet, NoTrainingExamples, IncompleteClassifierSet, NoTrainingExamples,
...@@ -197,19 +198,13 @@ def get_training_task_params(training_workflow_uuid): ...@@ -197,19 +198,13 @@ def get_training_task_params(training_workflow_uuid):
returned_examples = [] returned_examples = []
for example in workflow.training_examples.all(): for example in workflow.training_examples.all():
answer = example.answer
if isinstance(answer, dict):
text = answer.get('answer', '')
else:
text = answer
scores = { scores = {
option.criterion.name: option.points option.criterion.name: option.points
for option in example.options_selected.all() for option in example.options_selected.all()
} }
returned_examples.append({ returned_examples.append({
'text': text, 'text': essay_text_from_submission({'answer': example.answer}),
'scores': scores 'scores': scores
}) })
......
...@@ -51,17 +51,21 @@ def essay_text_from_submission(submission): ...@@ -51,17 +51,21 @@ def essay_text_from_submission(submission):
directly (convenient for testing). directly (convenient for testing).
""" """
if isinstance(submission, dict): if isinstance(submission, dict):
# Initially there was one prompt and submission had the structure if 'answer' in submission:
# {'answer': {'text': 'The text.'}} # Format used for answer in examples.
if 'text' in submission['answer']: if isinstance(submission['answer'], unicode):
essay_text = submission['answer']['text'] return submission['answer']
# When multiple prompts were introduced the structure of submission become: # Initially there was one prompt and submission had the structure
# {'answer': {'parts': [{'text': 'The text part 1.'}, {'text': 'The text part 2.'}]}} # {'answer': {'text': 'The text.'}}
# We concatenate these parts and let AI grader evaluate the total text. elif 'text' in submission['answer']:
else: essay_text = submission['answer']['text']
essay_text = u'' # When multiple prompts were introduced the structure of submission become:
for part in submission['answer']['parts']: # {'answer': {'parts': [{'text': 'The text part 1.'}, {'text': 'The text part 2.'}]}}
essay_text += '\n' + part['text'] # We concatenate these parts and let AI grader evaluate the total text.
else:
essay_text = u''
for part in submission['answer']['parts']:
essay_text += '\n' + part['text']
else: else:
essay_text = unicode(submission) essay_text = unicode(submission)
return essay_text return essay_text
......
...@@ -126,7 +126,15 @@ def deserialize_training_examples(examples, rubric_dict): ...@@ -126,7 +126,15 @@ def deserialize_training_examples(examples, rubric_dict):
>>> >>>
>>> examples = [ >>> examples = [
>>> { >>> {
>>> 'answer': u'Lorem ipsum', >>> 'answer': {
>>> 'parts': {
>>> [
>>> {'text:' 'Answer part 1'},
>>> {'text:' 'Answer part 2'},
>>> {'text:' 'Answer part 3'}
>>> ]
>>> }
>>> },
>>> 'options_selected': { >>> 'options_selected': {
>>> 'vocabulary': 'good', >>> 'vocabulary': 'good',
>>> 'grammar': 'excellent' >>> 'grammar': 'excellent'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment