Commit 0c81980e by Usman Khalid

Merge pull request #667 from edx/usman/tnl708-multiple-prompts-misc

Multiple prompts 4: Misc 
parents 3a1fb1c7 da4fa3e5
...@@ -6,6 +6,7 @@ from httplib import HTTPException ...@@ -6,6 +6,7 @@ from httplib import HTTPException
from django.db import DatabaseError from django.db import DatabaseError
from dogapi import dog_stats_api from dogapi import dog_stats_api
from openassessment.assessment.models import ( from openassessment.assessment.models import (
essay_text_from_submission,
AITrainingWorkflow, AIGradingWorkflow, AITrainingWorkflow, AIGradingWorkflow,
ClassifierUploadError, ClassifierSerializeError, ClassifierUploadError, ClassifierSerializeError,
IncompleteClassifierSet, NoTrainingExamples, IncompleteClassifierSet, NoTrainingExamples,
...@@ -197,19 +198,13 @@ def get_training_task_params(training_workflow_uuid): ...@@ -197,19 +198,13 @@ def get_training_task_params(training_workflow_uuid):
returned_examples = [] returned_examples = []
for example in workflow.training_examples.all(): for example in workflow.training_examples.all():
answer = example.answer
if isinstance(answer, dict):
text = answer.get('answer', '')
else:
text = answer
scores = { scores = {
option.criterion.name: option.points option.criterion.name: option.points
for option in example.options_selected.all() for option in example.options_selected.all()
} }
returned_examples.append({ returned_examples.append({
'text': text, 'text': essay_text_from_submission({'answer': example.answer}),
'scores': scores 'scores': scores
}) })
......
...@@ -146,14 +146,14 @@ def validate_training_examples(rubric, examples): ...@@ -146,14 +146,14 @@ def validate_training_examples(rubric, examples):
>>> >>>
>>> examples = [ >>> examples = [
>>> { >>> {
>>> 'answer': u'Lorem ipsum', >>> 'answer': {'parts': [{'text': u'Lorem ipsum'}]},
>>> 'options_selected': { >>> 'options_selected': {
>>> 'vocabulary': 'good', >>> 'vocabulary': 'good',
>>> 'grammar': 'excellent' >>> 'grammar': 'excellent'
>>> } >>> }
>>> }, >>> },
>>> { >>> {
>>> 'answer': u'Doler', >>> 'answer': {'parts': [{'text': u'Doler'}]},
>>> 'options_selected': { >>> 'options_selected': {
>>> 'vocabulary': 'good', >>> 'vocabulary': 'good',
>>> 'grammar': 'poor' >>> 'grammar': 'poor'
...@@ -312,7 +312,15 @@ def get_training_example(submission_uuid, rubric, examples): ...@@ -312,7 +312,15 @@ def get_training_example(submission_uuid, rubric, examples):
>>> examples = [ >>> examples = [
>>> { >>> {
>>> 'answer': u'Doler', >>> 'answer': {
>>> 'parts': {
>>> [
>>> {'text:' 'Answer part 1'},
>>> {'text:' 'Answer part 2'},
>>> {'text:' 'Answer part 3'}
>>> ]
>>> }
>>> },
>>> 'options_selected': { >>> 'options_selected': {
>>> 'vocabulary': 'good', >>> 'vocabulary': 'good',
>>> 'grammar': 'poor' >>> 'grammar': 'poor'
...@@ -322,9 +330,21 @@ def get_training_example(submission_uuid, rubric, examples): ...@@ -322,9 +330,21 @@ def get_training_example(submission_uuid, rubric, examples):
>>> >>>
>>> get_training_example("5443ebbbe2297b30f503736e26be84f6c7303c57", rubric, examples) >>> get_training_example("5443ebbbe2297b30f503736e26be84f6c7303c57", rubric, examples)
{ {
'answer': u'Lorem ipsum', 'answer': {
'parts': {
[
{'text:' 'Answer part 1'},
{'text:' 'Answer part 2'},
{'text:' 'Answer part 3'}
]
}
},
'rubric': { 'rubric': {
"prompts": [{"description": "Write an essay!"}], "prompts": [
{"description": "Prompt 1"},
{"description": "Prompt 2"},
{"description": "Prompt 3"}
],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
......
...@@ -40,6 +40,35 @@ CLASSIFIERS_CACHE_IN_FILE = getattr( ...@@ -40,6 +40,35 @@ CLASSIFIERS_CACHE_IN_FILE = getattr(
) )
def essay_text_from_submission(submission):
"""
Retrieve the submission text.
Submissions are arbitrary JSON-blobs, which *should*
contain a single key, "answer", containing the essay
submission text.
If not, though, assume we've been given the essay text
directly (convenient for testing).
"""
if isinstance(submission, dict):
if 'answer' in submission:
# Format used for answer in examples.
if isinstance(submission['answer'], unicode):
return submission['answer']
# Initially there was one prompt and submission had the structure
# {'answer': {'text': 'The text.'}}
elif 'text' in submission['answer']:
essay_text = submission['answer']['text']
# When multiple prompts were introduced the structure of submission become:
# {'answer': {'parts': [{'text': 'The text part 1.'}, {'text': 'The text part 2.'}]}}
# We concatenate these parts and let AI grader evaluate the total text.
else:
essay_text = u'\n'.join([part['text'] for part in submission['answer']['parts']])
else:
essay_text = unicode(submission)
return essay_text
class IncompleteClassifierSet(Exception): class IncompleteClassifierSet(Exception):
""" """
The classifier set is missing a classifier for a criterion in the rubric. The classifier set is missing a classifier for a criterion in the rubric.
...@@ -792,20 +821,10 @@ class AIGradingWorkflow(AIWorkflow): ...@@ -792,20 +821,10 @@ class AIGradingWorkflow(AIWorkflow):
from openassessment.assessment.serializers import rubric_from_dict from openassessment.assessment.serializers import rubric_from_dict
rubric = rubric_from_dict(rubric_dict) rubric = rubric_from_dict(rubric_dict)
# Retrieve the submission text
# Submissions are arbitrary JSON-blobs, which *should*
# contain a single key, "answer", containing the essay
# submission text. If not, though, assume we've been
# given the essay text directly (convenient for testing).
if isinstance(submission, dict):
essay_text = submission.get('answer')
else:
essay_text = unicode(submission)
# Create the workflow # Create the workflow
workflow = cls.objects.create( workflow = cls.objects.create(
submission_uuid=submission_uuid, submission_uuid=submission_uuid,
essay_text=essay_text, essay_text=essay_text_from_submission(submission),
algorithm_id=algorithm_id, algorithm_id=algorithm_id,
student_id=submission['student_item']['student_id'], student_id=submission['student_item']['student_id'],
item_id=submission['student_item']['item_id'], item_id=submission['student_item']['item_id'],
......
...@@ -107,7 +107,11 @@ def deserialize_training_examples(examples, rubric_dict): ...@@ -107,7 +107,11 @@ def deserialize_training_examples(examples, rubric_dict):
>>> ] >>> ]
>>> >>>
>>> rubric = { >>> rubric = {
>>> "prompts": [{"description": "Write an essay!"}], >>> "prompts": [
>>> {"description": "Prompt 1"}
>>> {"description": "Prompt 2"}
>>> {"description": "Prompt 3"}
>>> ],
>>> "criteria": [ >>> "criteria": [
>>> { >>> {
>>> "order_num": 0, >>> "order_num": 0,
...@@ -126,7 +130,15 @@ def deserialize_training_examples(examples, rubric_dict): ...@@ -126,7 +130,15 @@ def deserialize_training_examples(examples, rubric_dict):
>>> >>>
>>> examples = [ >>> examples = [
>>> { >>> {
>>> 'answer': u'Lorem ipsum', >>> 'answer': {
>>> 'parts': {
>>> [
>>> {'text:' 'Answer part 1'},
>>> {'text:' 'Answer part 2'},
>>> {'text:' 'Answer part 3'}
>>> ]
>>> }
>>> },
>>> 'options_selected': { >>> 'options_selected': {
>>> 'vocabulary': 'good', >>> 'vocabulary': 'good',
>>> 'grammar': 'excellent' >>> 'grammar': 'excellent'
......
...@@ -10,7 +10,7 @@ STUDENT_ITEM = { ...@@ -10,7 +10,7 @@ STUDENT_ITEM = {
'item_type': u'openassessment' 'item_type': u'openassessment'
} }
ANSWER = u'ẗëṡẗ äṅṡẅëṛ' ANSWER = {'text': u'ẗëṡẗ äṅṡẅëṛ'}
RUBRIC_OPTIONS = [ RUBRIC_OPTIONS = [
{ {
......
...@@ -3,11 +3,14 @@ ...@@ -3,11 +3,14 @@
Test AI Django models. Test AI Django models.
""" """
import copy import copy
import ddt
from django.test import TestCase
from django.test.utils import override_settings from django.test.utils import override_settings
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import ( from openassessment.assessment.models import (
AIClassifierSet, AIClassifier, AIGradingWorkflow, AI_CLASSIFIER_STORAGE, AIClassifierSet, AIClassifier, AIGradingWorkflow, AI_CLASSIFIER_STORAGE,
CLASSIFIERS_CACHE_IN_MEM CLASSIFIERS_CACHE_IN_MEM, essay_text_from_submission
) )
from openassessment.assessment.serializers import rubric_from_dict from openassessment.assessment.serializers import rubric_from_dict
from .constants import RUBRIC from .constants import RUBRIC
...@@ -21,6 +24,19 @@ COURSE_ID = u"†3߆ çøU®ß3" ...@@ -21,6 +24,19 @@ COURSE_ID = u"†3߆ çøU®ß3"
ITEM_ID = u"fake_item_id" ITEM_ID = u"fake_item_id"
@ddt.ddt
class DataConversionTest(TestCase):
@ddt.data(
(u'Answer', u'Answer'),
({'answer': {'text': u'Answer'}}, u'Answer'),
({'answer': {'parts': [{'text': u'Answer 1'}, {'text': u'Answer 2'}]}}, u'Answer 1\nAnswer 2')
)
@ddt.unpack
def test_essay_text_from_submission(self, input, output):
self.assertEqual(essay_text_from_submission(input), output)
class AIClassifierTest(CacheResetTest): class AIClassifierTest(CacheResetTest):
""" """
Tests for the AIClassifier model. Tests for the AIClassifier model.
......
...@@ -91,7 +91,7 @@ class Command(BaseCommand): ...@@ -91,7 +91,7 @@ class Command(BaseCommand):
} }
STUDENT_ID = u'test_student' STUDENT_ID = u'test_student'
ANSWER = {'answer': 'test answer'} ANSWER = {"text": 'test answer'}
def handle(self, *args, **options): def handle(self, *args, **options):
""" """
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
{% if topscore.file %} {% if topscore.file %}
<img class="leaderboard__score__image" alt="{% trans "The image associated with your peer's submission." %}" src="{{ topscore.file }}" /> <img class="leaderboard__score__image" alt="{% trans "The image associated with your peer's submission." %}" src="{{ topscore.file }}" />
{% endif %} {% endif %}
{{ topscore.content|linebreaks }} {% include "openassessmentblock/oa_submission_answer.html" with answer=topscore.submission.answer answer_text_label="Your peer's response to the question above:" %}
</div> </div>
</li> </li>
{% endfor %} {% endfor %}
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
Comments: {{ comments }} Comments: {{ comments }}
{% endblocktrans %} {% endblocktrans %}
{% else %} {% else %}
{{ submission.answer.text|linebreaks }} {% include "openassessmentblock/oa_submission_answer.html" with answer=submission.answer answer_text_label="The student's response to the question above:" %}
{% endif %} {% endif %}
</div> </div>
......
...@@ -72,9 +72,8 @@ ...@@ -72,9 +72,8 @@
{% endwith %} {% endwith %}
</header> </header>
<div class="student-training__display__response"> {% include "openassessmentblock/oa_submission_answer.html" with answer=training_essay.answer answer_text_label="The response to the question above:" %}
{{ training_essay|linebreaks }}
</div>
</article> </article>
<form id="student-training--001__assessment" class="student-training__assessment" method="post"> <form id="student-training--001__assessment" class="student-training__assessment" method="post">
......
...@@ -32,6 +32,9 @@ RUBRIC_DICT = { ...@@ -32,6 +32,9 @@ RUBRIC_DICT = {
] ]
} }
ANSWER_1 = {"text": "Shoot Hot Rod"}
ANSWER_2 = {"text": "Ultra Magnus fumble"}
ALGORITHM_ID = "Ease" ALGORITHM_ID = "Ease"
ON_INIT_PARAMS = { ON_INIT_PARAMS = {
...@@ -64,7 +67,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -64,7 +67,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
first_step = data["steps"][0] if data["steps"] else "peer" first_step = data["steps"][0] if data["steps"] else "peer"
if "ai" in data["steps"]: if "ai" in data["steps"]:
first_step = data["steps"][1] if len(data["steps"]) > 1 else "waiting" first_step = data["steps"][1] if len(data["steps"]) > 1 else "waiting"
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, ANSWER_1)
workflow = workflow_api.create_workflow(submission["uuid"], data["steps"], ON_INIT_PARAMS) workflow = workflow_api.create_workflow(submission["uuid"], data["steps"], ON_INIT_PARAMS)
workflow_keys = set(workflow.keys()) workflow_keys = set(workflow.keys())
...@@ -147,7 +150,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -147,7 +150,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
self.assertEquals("waiting", workflow['status']) self.assertEquals("waiting", workflow['status'])
def test_update_peer_workflow(self): def test_update_peer_workflow(self):
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, ANSWER_1)
workflow = workflow_api.create_workflow(submission["uuid"], ["training", "peer"], ON_INIT_PARAMS) workflow = workflow_api.create_workflow(submission["uuid"], ["training", "peer"], ON_INIT_PARAMS)
StudentTrainingWorkflow.create_workflow(submission_uuid=submission["uuid"]) StudentTrainingWorkflow.create_workflow(submission_uuid=submission["uuid"])
requirements = { requirements = {
...@@ -200,7 +203,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -200,7 +203,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@patch.object(ai_api, 'assessment_is_finished') @patch.object(ai_api, 'assessment_is_finished')
@patch.object(ai_api, 'get_score') @patch.object(ai_api, 'get_score')
def test_ai_score_set(self, mock_score, mock_is_finished): def test_ai_score_set(self, mock_score, mock_is_finished):
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, ANSWER_2)
mock_is_finished.return_value = True mock_is_finished.return_value = True
score = {"points_earned": 7, "points_possible": 10} score = {"points_earned": 7, "points_possible": 10}
mock_score.return_value = score mock_score.return_value = score
...@@ -213,7 +216,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -213,7 +216,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@ddt.unpack @ddt.unpack
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_create_ai_workflow_no_rubric(self, rubric, algorithm_id): def test_create_ai_workflow_no_rubric(self, rubric, algorithm_id):
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, ANSWER_1)
on_init_params = { on_init_params = {
'ai': { 'ai': {
'rubric': rubric, 'rubric': rubric,
...@@ -226,7 +229,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -226,7 +229,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_ai_on_init_failures(self, mock_on_init): def test_ai_on_init_failures(self, mock_on_init):
mock_on_init.side_effect = AIError("Kaboom!") mock_on_init.side_effect = AIError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, ANSWER_2)
workflow_api.create_workflow(submission["uuid"], ["ai"], ON_INIT_PARAMS) workflow_api.create_workflow(submission["uuid"], ["ai"], ON_INIT_PARAMS)
@patch.object(Submission.objects, 'get') @patch.object(Submission.objects, 'get')
...@@ -241,14 +244,14 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -241,14 +244,14 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_workflow_errors_wrapped(self, data, mock_create): def test_unexpected_workflow_errors_wrapped(self, data, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!") mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, ANSWER_2)
workflow_api.create_workflow(submission["uuid"], data["steps"], ON_INIT_PARAMS) workflow_api.create_workflow(submission["uuid"], data["steps"], ON_INIT_PARAMS)
@patch.object(PeerWorkflow.objects, 'get_or_create') @patch.object(PeerWorkflow.objects, 'get_or_create')
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_peer_workflow_errors_wrapped(self, mock_create): def test_unexpected_peer_workflow_errors_wrapped(self, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!") mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, ANSWER_2)
workflow_api.create_workflow(submission["uuid"], ["peer", "self"], ON_INIT_PARAMS) workflow_api.create_workflow(submission["uuid"], ["peer", "self"], ON_INIT_PARAMS)
@patch.object(AssessmentWorkflow.objects, 'get') @patch.object(AssessmentWorkflow.objects, 'get')
...@@ -256,7 +259,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -256,7 +259,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_exception_wrapped(self, data, mock_create): def test_unexpected_exception_wrapped(self, data, mock_create):
mock_create.side_effect = Exception("Kaboom!") mock_create.side_effect = Exception("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, ANSWER_2)
workflow_api.update_from_assessments(submission["uuid"], data["steps"]) workflow_api.update_from_assessments(submission["uuid"], data["steps"])
@ddt.file_data('data/assessments.json') @ddt.file_data('data/assessments.json')
...@@ -363,7 +366,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -363,7 +366,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def test_cancel_the_assessment_workflow(self): def test_cancel_the_assessment_workflow(self):
# Create the submission and assessment workflow. # Create the submission and assessment workflow.
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, ANSWER_1)
workflow = workflow_api.create_workflow(submission["uuid"], ["peer"]) workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])
requirements = { requirements = {
...@@ -403,7 +406,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -403,7 +406,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def test_cancel_the_assessment_workflow_does_not_exist(self): def test_cancel_the_assessment_workflow_does_not_exist(self):
# Create the submission and assessment workflow. # Create the submission and assessment workflow.
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, ANSWER_1)
workflow = workflow_api.create_workflow(submission["uuid"], ["peer"]) workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])
requirements = { requirements = {
...@@ -432,7 +435,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -432,7 +435,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def test_get_the_cancelled_workflow(self): def test_get_the_cancelled_workflow(self):
# Create the submission and assessment workflow. # Create the submission and assessment workflow.
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, ANSWER_1)
workflow = workflow_api.create_workflow(submission["uuid"], ["peer"]) workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])
requirements = { requirements = {
......
...@@ -20,7 +20,15 @@ def convert_training_examples_list_to_dict(examples_list): ...@@ -20,7 +20,15 @@ def convert_training_examples_list_to_dict(examples_list):
Example: Example:
>>> examples = [ >>> examples = [
>>> { >>> {
>>> "answer": "This is my response", >>> "answer": {
>>> "parts": {
>>> [
>>> {"text:" "Answer part 1"},
>>> {"text:" "Answer part 2"},
>>> {"text:" "Answer part 3"}
>>> ]
>>> }
>>> },
>>> "options_selected": [ >>> "options_selected": [
>>> { >>> {
>>> "criterion": "Ideas", >>> "criterion": "Ideas",
...@@ -36,7 +44,15 @@ def convert_training_examples_list_to_dict(examples_list): ...@@ -36,7 +44,15 @@ def convert_training_examples_list_to_dict(examples_list):
>>> convert_training_examples_list_to_dict(examples) >>> convert_training_examples_list_to_dict(examples)
[ [
{ {
'answer': 'This is my response', 'answer': {
'parts': {
[
{'text:' 'Answer part 1'},
{'text:' 'Answer part 2'},
{'text:' 'Answer part 3'}
]
}
},
'options_selected': { 'options_selected': {
'Ideas': 'Fair', 'Ideas': 'Fair',
'Content': 'Good' 'Content': 'Good'
...@@ -57,6 +73,27 @@ def convert_training_examples_list_to_dict(examples_list): ...@@ -57,6 +73,27 @@ def convert_training_examples_list_to_dict(examples_list):
] ]
def update_assessments_format(assessments):
"""
For each example update 'answer' to newer format.
Args:
assessments (list): list of assessments
Returns:
list of dict
"""
for assessment in assessments:
if 'examples' in assessment:
for example in assessment['examples']:
if isinstance(example['answer'], unicode) or isinstance(example['answer'], str):
example['answer'] = {
'parts': [
{'text': example['answer']}
]
}
return assessments
def create_prompts_list(prompt_or_serialized_prompts): def create_prompts_list(prompt_or_serialized_prompts):
""" """
Construct a list of prompts. Construct a list of prompts.
......
...@@ -4,9 +4,12 @@ Leaderboard step in the OpenAssessment XBlock. ...@@ -4,9 +4,12 @@ Leaderboard step in the OpenAssessment XBlock.
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment.errors import SelfAssessmentError, PeerAssessmentError
from submissions import api as sub_api from submissions import api as sub_api
from openassessment.assessment.errors import SelfAssessmentError, PeerAssessmentError
from openassessment.fileupload import api as file_upload_api from openassessment.fileupload import api as file_upload_api
from openassessment.xblock.data_conversion import create_submission_dict
class LeaderboardMixin(object): class LeaderboardMixin(object):
"""Leaderboard Mixin introduces all handlers for displaying the leaderboard """Leaderboard Mixin introduces all handlers for displaying the leaderboard
...@@ -72,13 +75,16 @@ class LeaderboardMixin(object): ...@@ -72,13 +75,16 @@ class LeaderboardMixin(object):
for score in scores: for score in scores:
if 'file_key' in score['content']: if 'file_key' in score['content']:
score['file'] = file_upload_api.get_download_url(score['content']['file_key']) score['file'] = file_upload_api.get_download_url(score['content']['file_key'])
if 'text' in score['content']: if 'text' in score['content'] or 'parts' in score['content']:
score['content'] = score['content']['text'] submission = {'answer': score.pop('content')}
score['submission'] = create_submission_dict(submission, self.prompts)
elif isinstance(score['content'], basestring): elif isinstance(score['content'], basestring):
pass pass
# Currently, we do not handle non-text submissions. # Currently, we do not handle non-text submissions.
else: else:
score['content'] = "" score['submission'] = ""
score.pop('content', None)
context = { 'topscores': scores, context = { 'topscores': scores,
'allow_latex': self.allow_latex, 'allow_latex': self.allow_latex,
......
...@@ -32,7 +32,7 @@ from openassessment.workflow.errors import AssessmentWorkflowError ...@@ -32,7 +32,7 @@ from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.student_training_mixin import StudentTrainingMixin from openassessment.xblock.student_training_mixin import StudentTrainingMixin
from openassessment.xblock.validation import validator from openassessment.xblock.validation import validator
from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DISTANT_FUTURE from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DISTANT_FUTURE
from openassessment.xblock.data_conversion import create_prompts_list, create_rubric_dict from openassessment.xblock.data_conversion import create_prompts_list, create_rubric_dict, update_assessments_format
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -467,10 +467,11 @@ class OpenAssessmentBlock( ...@@ -467,10 +467,11 @@ class OpenAssessmentBlock(
list list
""" """
return [ _valid_assessments = [
asmnt for asmnt in self.rubric_assessments asmnt for asmnt in self.rubric_assessments
if asmnt.get('name') in VALID_ASSESSMENT_TYPES if asmnt.get('name') in VALID_ASSESSMENT_TYPES
] ]
return update_assessments_format(copy.deepcopy(_valid_assessments))
@property @property
def assessment_steps(self): def assessment_steps(self):
......
...@@ -16,7 +16,7 @@ from openassessment.workflow.errors import ( ...@@ -16,7 +16,7 @@ from openassessment.workflow.errors import (
from openassessment.assessment.errors.ai import AIError from openassessment.assessment.errors.ai import AIError
from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE
from openassessment.xblock.data_conversion import ( from openassessment.xblock.data_conversion import (
create_rubric_dict, convert_training_examples_list_to_dict create_rubric_dict, convert_training_examples_list_to_dict, create_submission_dict
) )
from submissions import api as submission_api from submissions import api as submission_api
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
...@@ -281,7 +281,7 @@ class StaffInfoMixin(object): ...@@ -281,7 +281,7 @@ class StaffInfoMixin(object):
workflow_cancellation['cancelled_by'] = self.get_username(workflow_cancellation['cancelled_by_id']) workflow_cancellation['cancelled_by'] = self.get_username(workflow_cancellation['cancelled_by_id'])
context = { context = {
'submission': submission, 'submission': create_submission_dict(submission, self.prompts) if submission else None,
'workflow_cancellation': workflow_cancellation, 'workflow_cancellation': workflow_cancellation,
'peer_assessments': peer_assessments, 'peer_assessments': peer_assessments,
'submitted_assessments': submitted_assessments, 'submitted_assessments': submitted_assessments,
......
...@@ -7,7 +7,7 @@ from xblock.core import XBlock ...@@ -7,7 +7,7 @@ from xblock.core import XBlock
from openassessment.assessment.api import student_training from openassessment.assessment.api import student_training
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict, create_submission_dict
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
...@@ -124,7 +124,7 @@ class StudentTrainingMixin(object): ...@@ -124,7 +124,7 @@ class StudentTrainingMixin(object):
examples examples
) )
if example: if example:
context['training_essay'] = example['answer'] context['training_essay'] = create_submission_dict({'answer': example['answer']}, self.prompts)
context['training_rubric'] = { context['training_rubric'] = {
'criteria': example['rubric']['criteria'], 'criteria': example['rubric']['criteria'],
'points_possible': example['rubric']['points_possible'] 'points_possible': example['rubric']['points_possible']
......
<openassessment leaderboard_show="3"> <openassessment leaderboard_show="3">
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt> <prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
......
<openassessment leaderboard_show="3" allow_file_upload="True"> <openassessment leaderboard_show="3" allow_file_upload="True">
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment leaderboard_show="10"> <openassessment leaderboard_show="10">
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment> <openassessment>
<title>Student training test</title> <title>Student training test</title>
<prompt>Test prompt</prompt> <prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Test rubric prompt</prompt>
<criterion> <criterion>
<name>Vocabulary</name> <name>Vocabulary</name>
<prompt>How varied is the vocabulary?</prompt> <prompt>How varied is the vocabulary?</prompt>
......
<openassessment> <openassessment>
<title>Student training test</title> <title>Student training test</title>
<prompt>Test prompt</prompt> <prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Test rubric prompt</prompt>
<criterion> <criterion>
<name>Vocabulary</name> <name>Vocabulary</name>
<prompt>How varied is the vocabulary?</prompt> <prompt>How varied is the vocabulary?</prompt>
......
<openassessment> <openassessment>
<title>Student training test</title> <title>Student training test</title>
<prompt>Test prompt</prompt> <prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Test rubric prompt</prompt>
<criterion> <criterion>
<name>Vocabulary</name> <name>Vocabulary</name>
<prompt>How varied is the vocabulary?</prompt> <prompt>How varied is the vocabulary?</prompt>
......
...@@ -5,7 +5,16 @@ ...@@ -5,7 +5,16 @@
"training_num_completed": 0, "training_num_completed": 0,
"training_num_current": 1, "training_num_current": 1,
"training_num_available": 2, "training_num_available": 2,
"training_essay": "This is my answer.", "training_essay": { "answer": {
"parts": [
{
"prompt": {
"description": "Given the state of the world today, what do you think should be done to combat poverty?"
},
"text": "This is my answer."
}
]}
},
"allow_latex": false, "allow_latex": false,
"training_rubric": { "training_rubric": {
"criteria": [ "criteria": [
......
...@@ -9,7 +9,7 @@ import mock ...@@ -9,7 +9,7 @@ import mock
from django.test import TestCase from django.test import TestCase
from openassessment.xblock.data_conversion import ( from openassessment.xblock.data_conversion import (
create_prompts_list, create_submission_dict, prepare_submission_for_serialization create_prompts_list, create_submission_dict, prepare_submission_for_serialization, update_assessments_format
) )
@ddt.ddt @ddt.ddt
...@@ -52,3 +52,14 @@ class DataConversionTest(TestCase): ...@@ -52,3 +52,14 @@ class DataConversionTest(TestCase):
@ddt.unpack @ddt.unpack
def test_prepare_submission_for_serialization(self, input, output): def test_prepare_submission_for_serialization(self, input, output):
self.assertEqual(prepare_submission_for_serialization(input), output) self.assertEqual(prepare_submission_for_serialization(input), output)
@ddt.data(
([{'answer': 'Ans'}], [{'answer': {'parts': [{'text': 'Ans'}]}}]),
)
@ddt.unpack
def test_update_assessments_format(self, input, output):
self.assertEqual(update_assessments_format([{
'examples': input,
}]), [{
'examples': output,
}])
...@@ -150,7 +150,7 @@ class TestGrade(XBlockHandlerTestCase): ...@@ -150,7 +150,7 @@ class TestGrade(XBlockHandlerTestCase):
resp = self.request(xblock, 'render_grade', json.dumps(dict())) resp = self.request(xblock, 'render_grade', json.dumps(dict()))
# Verify that feedback from each scorer appears in the view # Verify that feedback from each scorer appears in the view
self.assertNotIn(u'єאςєɭɭєภՇ', resp.decode('utf-8')) self.assertNotIn(u'єאςєɭɭєภՇ', resp.decode('utf-8'))
self.assertIn(u'Good', resp.decode('utf-8')) self.assertIn(u'Poor', resp.decode('utf-8'))
# Verify that the submission and peer steps show that we're graded # Verify that the submission and peer steps show that we're graded
# This isn't strictly speaking part of the grade step rendering, # This isn't strictly speaking part of the grade step rendering,
......
...@@ -13,6 +13,8 @@ import boto ...@@ -13,6 +13,8 @@ import boto
from boto.s3.key import Key from boto.s3.key import Key
from openassessment.fileupload import api from openassessment.fileupload import api
from openassessment.xblock.data_conversion import create_submission_dict, prepare_submission_for_serialization
class TestLeaderboardRender(XBlockHandlerTransactionTestCase): class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
...@@ -39,15 +41,20 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -39,15 +41,20 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
@scenario('data/leaderboard_show.xml') @scenario('data/leaderboard_show.xml')
def test_show_submissions(self, xblock): def test_show_submissions(self, xblock):
# Create some submissions (but fewer than the max that can be shown) # Create some submissions (but fewer than the max that can be shown)
self._create_submissions_and_scores(xblock, [ self._create_submissions_and_scores(xblock, [
("test answer 1", 1), (prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2")), 1),
("test answer 2", 2) (prepare_submission_for_serialization(("test answer 2 part 1", "test answer 2 part 2")), 2)
]) ])
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"content": "test answer 2", "score": 2}, {"score": 2, "submission": create_submission_dict(
{"content": "test answer 1", "score": 1} {"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))},
xblock.prompts
)},
{"score": 1, "submission": create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))},
xblock.prompts
)}
]) ])
self._assert_leaderboard_visible(xblock, True) self._assert_leaderboard_visible(xblock, True)
...@@ -57,27 +64,38 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -57,27 +64,38 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
# Create more submissions than the max # Create more submissions than the max
self._create_submissions_and_scores(xblock, [ self._create_submissions_and_scores(xblock, [
("test answer 3", 0), (prepare_submission_for_serialization(("test answer 3 part 1", "test answer 3 part 2")), 0),
("test answer 4", 10), (prepare_submission_for_serialization(("test answer 4 part 1", "test answer 4 part 2")), 10),
("test answer 5", 3) (prepare_submission_for_serialization(("test answer 5 part 1", "test answer 5 part 2")), 3),
]) ])
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"content": "test answer 4", "score": 10}, {"score": 10, "submission": create_submission_dict(
{"content": "test answer 5", "score": 3}, {"answer": prepare_submission_for_serialization((u"test answer 4 part 1", u"test answer 4 part 2"))},
{"content": "test answer 2", "score": 2} xblock.prompts
)},
{"score": 3, "submission": create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 5 part 1", u"test answer 5 part 2"))},
xblock.prompts
)},
{"score": 2, "submission": create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))},
xblock.prompts
)}
]) ])
self._assert_leaderboard_visible(xblock, True) self._assert_leaderboard_visible(xblock, True)
@scenario('data/leaderboard_show.xml') @scenario('data/leaderboard_show.xml')
def test_show_submissions_that_have_greater_than_0_score(self, xblock): def test_show_submissions_that_have_greater_than_0_score(self, xblock):
# Create some submissions (but fewer than the max that can be shown) # Create some submissions (but fewer than the max that can be shown)
self._create_submissions_and_scores(xblock, [ self._create_submissions_and_scores(xblock, [
("test answer 0", 0), (prepare_submission_for_serialization(("test answer 0 part 1", "test answer 0 part 2")), 0),
("test answer 1", 1), (prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2")), 1)
]) ])
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"content": "test answer 1", "score": 1} {"score": 1, "submission": create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))},
xblock.prompts
)},
]) ])
self._assert_leaderboard_visible(xblock, True) self._assert_leaderboard_visible(xblock, True)
...@@ -87,24 +105,31 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -87,24 +105,31 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
# Create more submissions than the max # Create more submissions than the max
self._create_submissions_and_scores(xblock, [ self._create_submissions_and_scores(xblock, [
("test answer 2", 10), (prepare_submission_for_serialization(("test answer 2 part 1", "test answer 2 part 2")), 10),
("test answer 3", 0) (prepare_submission_for_serialization(("test answer 3 part 1", "test answer 3 part 2")), 0)
]) ])
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"content": "test answer 2", "score": 10}, {"score": 10, "submission": create_submission_dict(
{"content": "test answer 1", "score": 1} {"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))},
xblock.prompts
)},
{"score": 1, "submission": create_submission_dict(
{"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))},
xblock.prompts
)}
]) ])
self._assert_leaderboard_visible(xblock, True) self._assert_leaderboard_visible(xblock, True)
@scenario('data/leaderboard_show.xml') @scenario('data/leaderboard_show.xml')
def test_no_text_key_submission(self, xblock): def test_no_text_key_submission(self, xblock):
self.maxDiff = None
# Instead of using the default submission as a dict with "text", # Instead of using the default submission as a dict with "text",
# make the submission a string. # make the submission a string.
self._create_submissions_and_scores(xblock, [("test answer", 1)], submission_key=None) self._create_submissions_and_scores(xblock, [("test answer", 1)], submission_key=None)
# It should still work # It should still work
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"content": "test answer", "score": 1} {"score": 1}
]) ])
@mock_s3 @mock_s3
...@@ -123,7 +148,7 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -123,7 +148,7 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
# Expect that we default to an empty string for content # Expect that we default to an empty string for content
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"content": "", "score": 1, "file": ""} {"submission": "", "score": 1, "file": ""}
]) ])
@mock_s3 @mock_s3
...@@ -142,16 +167,23 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase): ...@@ -142,16 +167,23 @@ class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
key.set_contents_from_string("How d'ya do?") key.set_contents_from_string("How d'ya do?")
downloadUrl = api.get_download_url("foo") downloadUrl = api.get_download_url("foo")
# Create a image and text submission # Create a image and text submission
self._create_submissions_and_scores(xblock, [({"text": "test answer", "file_key": "foo"}, 1)], submission_key=None) submission = prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2"))
submission[u"file_key"] = "foo"
self._create_submissions_and_scores(xblock, [
(submission, 1)
])
self.maxDiff = None
# Expect that we retrieve both the text and the download URL for the file # Expect that we retrieve both the text and the download URL for the file
self._assert_scores(xblock, [ self._assert_scores(xblock, [
{"content": "test answer", "score": 1, "file": downloadUrl} {"file": downloadUrl, "score": 1, "submission": create_submission_dict(
{"answer": submission},
xblock.prompts
)}
]) ])
def _create_submissions_and_scores( def _create_submissions_and_scores(
self, xblock, submissions_and_scores, self, xblock, submissions_and_scores,
submission_key="text", points_possible=10 submission_key=None, points_possible=10
): ):
""" """
Create submissions and scores that should be displayed by the leaderboard. Create submissions and scores that should be displayed by the leaderboard.
......
...@@ -12,6 +12,8 @@ from openassessment.workflow import api as workflow_api ...@@ -12,6 +12,8 @@ from openassessment.workflow import api as workflow_api
from openassessment.assessment.errors.ai import AIError, AIGradingInternalError from openassessment.assessment.errors.ai import AIError, AIGradingInternalError
from openassessment.fileupload.api import FileUploadInternalError from openassessment.fileupload.api import FileUploadInternalError
from submissions import api as sub_api from submissions import api as sub_api
from openassessment.xblock.data_conversion import prepare_submission_for_serialization
from openassessment.xblock.test.base import scenario, XBlockHandlerTestCase from openassessment.xblock.test.base import scenario, XBlockHandlerTestCase
ALGORITHM_ID = 'fake' ALGORITHM_ID = 'fake'
...@@ -168,7 +170,9 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -168,7 +170,9 @@ class TestCourseStaff(XBlockHandlerTestCase):
bob_item = STUDENT_ITEM.copy() bob_item = STUDENT_ITEM.copy()
bob_item["item_id"] = xblock.scope_ids.usage_id bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow. # Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"}) submission = sub_api.create_submission(
bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2"))
)
peer_api.on_start(submission["uuid"]) peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['peer']) workflow_api.create_workflow(submission["uuid"], ['peer'])
...@@ -191,7 +195,7 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -191,7 +195,7 @@ class TestCourseStaff(XBlockHandlerTestCase):
# Now Bob should be fully populated in the student info view. # Now Bob should be fully populated in the student info view.
path, context = xblock.get_student_info_path_and_context("Bob") path, context = xblock.get_student_info_path_and_context("Bob")
self.assertEquals("Bob Answer", context['submission']['answer']['text']) self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
self.assertIsNone(context['self_assessment']) self.assertIsNone(context['self_assessment'])
self.assertEquals("openassessmentblock/staff_debug/student_info.html", path) self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
...@@ -205,7 +209,9 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -205,7 +209,9 @@ class TestCourseStaff(XBlockHandlerTestCase):
bob_item = STUDENT_ITEM.copy() bob_item = STUDENT_ITEM.copy()
bob_item["item_id"] = xblock.scope_ids.usage_id bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow. # Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"}) submission = sub_api.create_submission(
bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2"))
)
peer_api.on_start(submission["uuid"]) peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['self']) workflow_api.create_workflow(submission["uuid"], ['self'])
...@@ -220,7 +226,7 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -220,7 +226,7 @@ class TestCourseStaff(XBlockHandlerTestCase):
) )
path, context = xblock.get_student_info_path_and_context("Bob") path, context = xblock.get_student_info_path_and_context("Bob")
self.assertEquals("Bob Answer", context['submission']['answer']['text']) self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
self.assertEquals([], context['peer_assessments']) self.assertEquals([], context['peer_assessments'])
self.assertEquals("openassessmentblock/staff_debug/student_info.html", path) self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
...@@ -241,7 +247,9 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -241,7 +247,9 @@ class TestCourseStaff(XBlockHandlerTestCase):
bob_item = STUDENT_ITEM.copy() bob_item = STUDENT_ITEM.copy()
bob_item["item_id"] = xblock.scope_ids.usage_id bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow. # Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text': "Bob Answer"}) submission = sub_api.create_submission(
bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2"))
)
peer_api.on_start(submission["uuid"]) peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['peer']) workflow_api.create_workflow(submission["uuid"], ['peer'])
...@@ -253,7 +261,7 @@ class TestCourseStaff(XBlockHandlerTestCase): ...@@ -253,7 +261,7 @@ class TestCourseStaff(XBlockHandlerTestCase):
) )
path, context = xblock.get_student_info_path_and_context("Bob") path, context = xblock.get_student_info_path_and_context("Bob")
self.assertEquals("Bob Answer", context['submission']['answer']['text']) self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
self.assertIsNotNone(context['workflow_cancellation']) self.assertIsNotNone(context['workflow_cancellation'])
self.assertEquals("openassessmentblock/staff_debug/student_info.html", path) self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
......
...@@ -155,7 +155,19 @@ class StudentTrainingAssessTest(StudentTrainingTest): ...@@ -155,7 +155,19 @@ class StudentTrainingAssessTest(StudentTrainingTest):
expected_context["training_num_completed"] = 1 expected_context["training_num_completed"] = 1
expected_context["training_num_current"] = 2 expected_context["training_num_current"] = 2
expected_context["training_essay"] = u"тєѕт αηѕωєя" expected_context["training_essay"] = {
'answer': {
'parts': [
{
'text': u"тєѕт αηѕωєя",
'prompt': {
'description': u'Given the state of the world today, what do you think should be done to combat poverty?'
}
}
]
}
}
self.assert_path_and_context(xblock, expected_template, expected_context) self.assert_path_and_context(xblock, expected_template, expected_context)
resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json') resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json')
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment