Commit 2731556a by Usman Khalid

Merge pull request #651 from edx/usman/tnl708-multiple-prompts-xml

Adds support for multiple prompts in problems.
parents 86c1e53a 2d19c1d7
...@@ -6,6 +6,7 @@ from httplib import HTTPException ...@@ -6,6 +6,7 @@ from httplib import HTTPException
from django.db import DatabaseError from django.db import DatabaseError
from dogapi import dog_stats_api from dogapi import dog_stats_api
from openassessment.assessment.models import ( from openassessment.assessment.models import (
essay_text_from_submission,
AITrainingWorkflow, AIGradingWorkflow, AITrainingWorkflow, AIGradingWorkflow,
ClassifierUploadError, ClassifierSerializeError, ClassifierUploadError, ClassifierSerializeError,
IncompleteClassifierSet, NoTrainingExamples, IncompleteClassifierSet, NoTrainingExamples,
...@@ -197,19 +198,13 @@ def get_training_task_params(training_workflow_uuid): ...@@ -197,19 +198,13 @@ def get_training_task_params(training_workflow_uuid):
returned_examples = [] returned_examples = []
for example in workflow.training_examples.all(): for example in workflow.training_examples.all():
answer = example.answer
if isinstance(answer, dict):
text = answer.get('answer', '')
else:
text = answer
scores = { scores = {
option.criterion.name: option.points option.criterion.name: option.points
for option in example.options_selected.all() for option in example.options_selected.all()
} }
returned_examples.append({ returned_examples.append({
'text': text, 'text': essay_text_from_submission({'answer': example.answer}),
'scores': scores 'scores': scores
}) })
......
...@@ -127,7 +127,7 @@ def validate_training_examples(rubric, examples): ...@@ -127,7 +127,7 @@ def validate_training_examples(rubric, examples):
>>> ] >>> ]
>>> >>>
>>> rubric = { >>> rubric = {
>>> "prompt": "Write an essay!", >>> "prompts": [{"description": "Write an essay!"}],
>>> "criteria": [ >>> "criteria": [
>>> { >>> {
>>> "order_num": 0, >>> "order_num": 0,
...@@ -146,14 +146,14 @@ def validate_training_examples(rubric, examples): ...@@ -146,14 +146,14 @@ def validate_training_examples(rubric, examples):
>>> >>>
>>> examples = [ >>> examples = [
>>> { >>> {
>>> 'answer': u'Lorem ipsum', >>> 'answer': {'parts': [{'text': u'Lorem ipsum'}]},
>>> 'options_selected': { >>> 'options_selected': {
>>> 'vocabulary': 'good', >>> 'vocabulary': 'good',
>>> 'grammar': 'excellent' >>> 'grammar': 'excellent'
>>> } >>> }
>>> }, >>> },
>>> { >>> {
>>> 'answer': u'Doler', >>> 'answer': {'parts': [{'text': u'Doler'}]},
>>> 'options_selected': { >>> 'options_selected': {
>>> 'vocabulary': 'good', >>> 'vocabulary': 'good',
>>> 'grammar': 'poor' >>> 'grammar': 'poor'
...@@ -312,7 +312,15 @@ def get_training_example(submission_uuid, rubric, examples): ...@@ -312,7 +312,15 @@ def get_training_example(submission_uuid, rubric, examples):
>>> examples = [ >>> examples = [
>>> { >>> {
>>> 'answer': u'Doler', >>> 'answer': {
>>> 'parts': {
>>> [
>>> {'text:' 'Answer part 1'},
>>> {'text:' 'Answer part 2'},
>>> {'text:' 'Answer part 3'}
>>> ]
>>> }
>>> },
>>> 'options_selected': { >>> 'options_selected': {
>>> 'vocabulary': 'good', >>> 'vocabulary': 'good',
>>> 'grammar': 'poor' >>> 'grammar': 'poor'
...@@ -322,9 +330,21 @@ def get_training_example(submission_uuid, rubric, examples): ...@@ -322,9 +330,21 @@ def get_training_example(submission_uuid, rubric, examples):
>>> >>>
>>> get_training_example("5443ebbbe2297b30f503736e26be84f6c7303c57", rubric, examples) >>> get_training_example("5443ebbbe2297b30f503736e26be84f6c7303c57", rubric, examples)
{ {
'answer': u'Lorem ipsum', 'answer': {
'parts': {
[
{'text:' 'Answer part 1'},
{'text:' 'Answer part 2'},
{'text:' 'Answer part 3'}
]
}
},
'rubric': { 'rubric': {
"prompt": "Write an essay!", "prompts": [
{"description": "Prompt 1"},
{"description": "Prompt 2"},
{"description": "Prompt 3"}
],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
......
...@@ -40,6 +40,35 @@ CLASSIFIERS_CACHE_IN_FILE = getattr( ...@@ -40,6 +40,35 @@ CLASSIFIERS_CACHE_IN_FILE = getattr(
) )
def essay_text_from_submission(submission):
"""
Retrieve the submission text.
Submissions are arbitrary JSON-blobs, which *should*
contain a single key, "answer", containing the essay
submission text.
If not, though, assume we've been given the essay text
directly (convenient for testing).
"""
if isinstance(submission, dict):
if 'answer' in submission:
# Format used for answer in examples.
if isinstance(submission['answer'], unicode):
return submission['answer']
# Initially there was one prompt and submission had the structure
# {'answer': {'text': 'The text.'}}
elif 'text' in submission['answer']:
essay_text = submission['answer']['text']
# When multiple prompts were introduced the structure of submission become:
# {'answer': {'parts': [{'text': 'The text part 1.'}, {'text': 'The text part 2.'}]}}
# We concatenate these parts and let AI grader evaluate the total text.
else:
essay_text = u'\n'.join([part['text'] for part in submission['answer']['parts']])
else:
essay_text = unicode(submission)
return essay_text
class IncompleteClassifierSet(Exception): class IncompleteClassifierSet(Exception):
""" """
The classifier set is missing a classifier for a criterion in the rubric. The classifier set is missing a classifier for a criterion in the rubric.
...@@ -792,20 +821,10 @@ class AIGradingWorkflow(AIWorkflow): ...@@ -792,20 +821,10 @@ class AIGradingWorkflow(AIWorkflow):
from openassessment.assessment.serializers import rubric_from_dict from openassessment.assessment.serializers import rubric_from_dict
rubric = rubric_from_dict(rubric_dict) rubric = rubric_from_dict(rubric_dict)
# Retrieve the submission text
# Submissions are arbitrary JSON-blobs, which *should*
# contain a single key, "answer", containing the essay
# submission text. If not, though, assume we've been
# given the essay text directly (convenient for testing).
if isinstance(submission, dict):
essay_text = submission.get('answer')
else:
essay_text = unicode(submission)
# Create the workflow # Create the workflow
workflow = cls.objects.create( workflow = cls.objects.create(
submission_uuid=submission_uuid, submission_uuid=submission_uuid,
essay_text=essay_text, essay_text=essay_text_from_submission(submission),
algorithm_id=algorithm_id, algorithm_id=algorithm_id,
student_id=submission['student_item']['student_id'], student_id=submission['student_item']['student_id'],
item_id=submission['student_item']['item_id'], item_id=submission['student_item']['item_id'],
......
...@@ -245,7 +245,7 @@ def rubric_from_dict(rubric_dict): ...@@ -245,7 +245,7 @@ def rubric_from_dict(rubric_dict):
Sample data (one criterion, two options):: Sample data (one criterion, two options)::
{ {
"prompt": "Create a plan to deliver ora2!", "prompts": [{"description": "Create a plan to deliver ora2!"}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
......
...@@ -107,7 +107,11 @@ def deserialize_training_examples(examples, rubric_dict): ...@@ -107,7 +107,11 @@ def deserialize_training_examples(examples, rubric_dict):
>>> ] >>> ]
>>> >>>
>>> rubric = { >>> rubric = {
>>> "prompt": "Write an essay!", >>> "prompts": [
>>> {"description": "Prompt 1"}
>>> {"description": "Prompt 2"}
>>> {"description": "Prompt 3"}
>>> ],
>>> "criteria": [ >>> "criteria": [
>>> { >>> {
>>> "order_num": 0, >>> "order_num": 0,
...@@ -126,7 +130,15 @@ def deserialize_training_examples(examples, rubric_dict): ...@@ -126,7 +130,15 @@ def deserialize_training_examples(examples, rubric_dict):
>>> >>>
>>> examples = [ >>> examples = [
>>> { >>> {
>>> 'answer': u'Lorem ipsum', >>> 'answer': {
>>> 'parts': {
>>> [
>>> {'text:' 'Answer part 1'},
>>> {'text:' 'Answer part 2'},
>>> {'text:' 'Answer part 3'}
>>> ]
>>> }
>>> },
>>> 'options_selected': { >>> 'options_selected': {
>>> 'vocabulary': 'good', >>> 'vocabulary': 'good',
>>> 'grammar': 'excellent' >>> 'grammar': 'excellent'
......
...@@ -10,7 +10,7 @@ STUDENT_ITEM = { ...@@ -10,7 +10,7 @@ STUDENT_ITEM = {
'item_type': u'openassessment' 'item_type': u'openassessment'
} }
ANSWER = u'ẗëṡẗ äṅṡẅëṛ' ANSWER = {'text': u'ẗëṡẗ äṅṡẅëṛ'}
RUBRIC_OPTIONS = [ RUBRIC_OPTIONS = [
{ {
...@@ -34,7 +34,7 @@ RUBRIC_OPTIONS = [ ...@@ -34,7 +34,7 @@ RUBRIC_OPTIONS = [
] ]
RUBRIC = { RUBRIC = {
'prompt': u"МоъЎ-ↁіск; оѓ, ГЂэ ЩЂаlэ", 'prompts': [{"description": u"МоъЎ-ↁіск; оѓ, ГЂэ ЩЂаlэ"}],
'criteria': [ 'criteria': [
{ {
"order_num": 0, "order_num": 0,
......
{ {
"prompt": "Create a plan to deliver ora2!", "prompts": [{"description": "Create a plan to deliver ora2!"}],
"criteria": [ "criteria": [
] ]
} }
{ {
"prompt": "Create a plan to deliver ora2!", "prompts": [{"description": "Create a plan to deliver ora2!"}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
......
{ {
"prompt": "Create a plan to deliver ora2!" "prompts": [{"description": "Create a plan to deliver ora2!"}]
} }
{ {
"prompt": "Create a plan to deliver ora2!", "prompts": [{"description": "Create a plan to deliver ora2!"}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
......
{ {
"prompt": "Create a plan to deliver ora2!", "prompts": [{"description": "Create a plan to deliver ora2!"}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
......
{ {
"prompt": "Create a plan to deliver ora2!", "prompts": [{"description": "Create a plan to deliver ora2!"}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
......
{ {
"valid": { "valid": {
"rubric": { "rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽", "prompts": [{"description": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽"}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -78,7 +78,7 @@ ...@@ -78,7 +78,7 @@
"missing_options": { "missing_options": {
"rubric": { "rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽", "prompts": [{"description": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽"}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -148,7 +148,7 @@ ...@@ -148,7 +148,7 @@
"extra_options": { "extra_options": {
"rubric": { "rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽", "prompts": [{"description": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽"}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -225,7 +225,7 @@ ...@@ -225,7 +225,7 @@
"missing_and_extra_options": { "missing_and_extra_options": {
"rubric": { "rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽", "prompts": [{"description": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽"}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -291,7 +291,7 @@ ...@@ -291,7 +291,7 @@
"invalid_option_name": { "invalid_option_name": {
"rubric": { "rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽", "prompts": [{"description": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽"}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -374,7 +374,7 @@ ...@@ -374,7 +374,7 @@
"rubric_missing_options_list": { "rubric_missing_options_list": {
"rubric": { "rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽", "prompts": [{"description": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽"}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -397,7 +397,7 @@ ...@@ -397,7 +397,7 @@
"rubric_missing_criteria_list": { "rubric_missing_criteria_list": {
"rubric": { "rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽" "prompts": [{"description": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽"}]
}, },
"examples": [ "examples": [
{ {
...@@ -413,7 +413,7 @@ ...@@ -413,7 +413,7 @@
"example_missing_keys": { "example_missing_keys": {
"rubric": { "rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽", "prompts": [{"description": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽"}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -478,7 +478,7 @@ ...@@ -478,7 +478,7 @@
"feedback_only_criterion": { "feedback_only_criterion": {
"rubric": { "rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽", "prompts": [{"description": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽"}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -518,7 +518,7 @@ ...@@ -518,7 +518,7 @@
"feedback_only_criterion_extra_score": { "feedback_only_criterion_extra_score": {
"rubric": { "rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽", "prompts": [{"description": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽"}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -561,7 +561,7 @@ ...@@ -561,7 +561,7 @@
"feedback_only_all_criteria": { "feedback_only_all_criteria": {
"rubric": { "rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽", "prompts": [{"description": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽"}],
"criteria": [ "criteria": [
{ {
"order_num": 1, "order_num": 1,
......
...@@ -3,11 +3,14 @@ ...@@ -3,11 +3,14 @@
Test AI Django models. Test AI Django models.
""" """
import copy import copy
import ddt
from django.test import TestCase
from django.test.utils import override_settings from django.test.utils import override_settings
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import ( from openassessment.assessment.models import (
AIClassifierSet, AIClassifier, AIGradingWorkflow, AI_CLASSIFIER_STORAGE, AIClassifierSet, AIClassifier, AIGradingWorkflow, AI_CLASSIFIER_STORAGE,
CLASSIFIERS_CACHE_IN_MEM CLASSIFIERS_CACHE_IN_MEM, essay_text_from_submission
) )
from openassessment.assessment.serializers import rubric_from_dict from openassessment.assessment.serializers import rubric_from_dict
from .constants import RUBRIC from .constants import RUBRIC
...@@ -21,6 +24,19 @@ COURSE_ID = u"†3߆ çøU®ß3" ...@@ -21,6 +24,19 @@ COURSE_ID = u"†3߆ çøU®ß3"
ITEM_ID = u"fake_item_id" ITEM_ID = u"fake_item_id"
@ddt.ddt
class DataConversionTest(TestCase):
@ddt.data(
(u'Answer', u'Answer'),
({'answer': {'text': u'Answer'}}, u'Answer'),
({'answer': {'parts': [{'text': u'Answer 1'}, {'text': u'Answer 2'}]}}, u'Answer 1\nAnswer 2')
)
@ddt.unpack
def test_essay_text_from_submission(self, input, output):
self.assertEqual(essay_text_from_submission(input), output)
class AIClassifierTest(CacheResetTest): class AIClassifierTest(CacheResetTest):
""" """
Tests for the AIClassifier model. Tests for the AIClassifier model.
...@@ -122,7 +138,7 @@ class AIGradingWorkflowTest(CacheResetTest): ...@@ -122,7 +138,7 @@ class AIGradingWorkflowTest(CacheResetTest):
# Create a rubric with a similar structure, but different prompt # Create a rubric with a similar structure, but different prompt
similar_rubric_dict = copy.deepcopy(RUBRIC) similar_rubric_dict = copy.deepcopy(RUBRIC)
similar_rubric_dict['prompt'] = 'Different prompt!' similar_rubric_dict['prompts'] = [{"description": 'Different prompt!'}]
self.similar_rubric = rubric_from_dict(similar_rubric_dict) self.similar_rubric = rubric_from_dict(similar_rubric_dict)
def test_assign_most_recent_classifier_set(self): def test_assign_most_recent_classifier_set(self):
......
...@@ -161,7 +161,7 @@ class RubricHashTest(CacheResetTest): ...@@ -161,7 +161,7 @@ class RubricHashTest(CacheResetTest):
# Same structure, but different text should have the same structure hash # Same structure, but different text should have the same structure hash
altered_rubric = copy.deepcopy(RUBRIC) altered_rubric = copy.deepcopy(RUBRIC)
altered_rubric['prompt'] = 'altered!' altered_rubric['prompts'] = [{"description": 'altered!'}]
for criterion in altered_rubric['criteria']: for criterion in altered_rubric['criteria']:
criterion['prompt'] = 'altered!' criterion['prompt'] = 'altered!'
for option in criterion['options']: for option in criterion['options']:
......
...@@ -45,7 +45,7 @@ class Command(BaseCommand): ...@@ -45,7 +45,7 @@ class Command(BaseCommand):
] ]
RUBRIC = { RUBRIC = {
'prompt': u"Test prompt", 'prompts': [{"description": u"Test prompt"}],
'criteria': [ 'criteria': [
{ {
"order_num": 0, "order_num": 0,
...@@ -91,7 +91,7 @@ class Command(BaseCommand): ...@@ -91,7 +91,7 @@ class Command(BaseCommand):
} }
STUDENT_ID = u'test_student' STUDENT_ID = u'test_student'
ANSWER = {'answer': 'test answer'} ANSWER = {"text": 'test answer'}
def handle(self, *args, **options): def handle(self, *args, **options):
""" """
......
{% load i18n %} {% load i18n %}
{% load tz %} {% load tz %}
{% spaceless %} {% spaceless %}
<div id="openassessment-editor" class="editor-with-buttons editor-with-tabs"> <div
id="openassessment-editor"
class="editor-with-buttons editor-with-tabs"
data-is-released="{{ is_released|lower }}"
>
<div class="openassessment_editor_content_and_tabs"> <div class="openassessment_editor_content_and_tabs">
<div id="openassessment_editor_header"> <div id="openassessment_editor_header">
...@@ -25,9 +29,7 @@ ...@@ -25,9 +29,7 @@
</a> </a>
</div> </div>
<div id="oa_prompt_editor_wrapper" class="oa_editor_content_wrapper"> {% include "openassessmentblock/edit/oa_edit_prompts.html" %}
<textarea id="openassessment_prompt_editor" maxlength="10000">{{ prompt }}</textarea>
</div>
{% include "openassessmentblock/edit/oa_edit_rubric.html" %} {% include "openassessmentblock/edit/oa_edit_rubric.html" %}
......
{% load i18n %} {% load i18n %}
{% spaceless %} {% spaceless %}
<li class="openassessment_criterion is-collapsible" data-criterion="{{ criterion_name }}"> <li class="openassessment_criterion is-collapsible" data-criterion="{{ criterion_name }}">
<div class="openassessment_criterion_header view-outline"> <div class="openassessment_container_header openassessment_criterion_header view-outline">
<a class="action expand-collapse collapse"><i class="icon-caret-down ui-toggle-expansion"></i></a> <a class="action expand-collapse collapse"><i class="icon-caret-down ui-toggle-expansion"></i></a>
<div class="openassessment_criterion_header_title_box"> <div class="openassessment_container_header_title_box openassessment_criterion_header_title_box">
<h6 class="openassessment_criterion_header_title">{% trans "Criterion" %}</h6> <h6 class="openassessment_container_header_title openassessment_criterion_header_title">{% trans "Criterion" %}</h6>
<p class="openassessment_criterion_guide">{% trans "You cannot delete a criterion after the assignment has been released." %}</p> <p class="openassessment_container_guide openassessment_criterion_guide">{% trans "You cannot delete a criterion after the assignment has been released." %}</p>
</div> </div>
<div class="openassessment_criterion_remove_button"><h2>{% trans "Remove" %}</h2></div> <div class="openassessment_container_remove_button openassessment_criterion_remove_button"><h2>{% trans "Remove" %}</h2></div>
</div> </div>
<div class="openassessment_criterion_body wrapper-comp-settings"> <div class="openassessment_criterion_body wrapper-comp-settings">
<input type="hidden" class="openassessment_criterion_name" value="{{ criterion_name }}" /> <input type="hidden" class="openassessment_criterion_name" value="{{ criterion_name }}" />
......
{% load i18n %}
{% spaceless %}
<li class="openassessment_prompt is-collapsible" data-prompt="{{ prompt_uuid }}">
<div class="openassessment_container_header openassessment_prompt_header view-outline">
<a class="action expand-collapse collapse"><i class="icon-caret-down ui-toggle-expansion"></i></a>
<div class="openassessment_container_header_title_box openassessment_prompt_header_title_box">
<h6 class="openassessment_container_header_title openassessment_prompt_header_title">{% trans "Prompt" %}</h6>
<p class="openassessment_container_guide openassessment_prompt_guide">{% trans "You cannot delete a prompt after the assignment has been released." %}</p>
</div>
<div class="openassessment_container_remove_button openassessment_prompt_remove_button"><h2>{% trans "Remove" %}</h2></div>
</div>
<div class="openassessment_prompt_body wrapper-comp-settings">
<input type="hidden" class="openassessment_prompt_uuid" value="{{ prompt_uuid }}" />
<ul class="list-input settings-list">
<li class="field comp-setting-entry openassessment_prompt_description_wrapper">
<div class="wrapper-comp-settings">
<textarea class="openassessment_prompt_description setting-input" maxlength="10000">{{ prompt_description }}</textarea>
</div>
</li>
</ul>
</div>
</li>
{% endspaceless %}
{% load i18n %}
{% spaceless %}
<div id="oa_prompts_editor_wrapper" class="oa_editor_content_wrapper">
<div id="openassessment_prompt_template" class="is--hidden">
{% include "openassessmentblock/edit/oa_edit_prompt.html" with prompt_uuid="" prompt_description="" %}
</div>
<div id="openassessment_prompts_instructions" class="openassessment_tab_instructions">
<p class="openassessment_description">
{% trans "Prompts. Replace the sample text with your own text. For more information, see the ORA documentation." %}
</p>
</div>
<ul id="openassessment_prompts_list">
{% for prompt in prompts %}
{% include "openassessmentblock/edit/oa_edit_prompt.html" with prompt_uuid=prompt.uuid prompt_description=prompt.description %}
{% endfor %}
</ul>
<div id="openassessment_prompts_add_prompt" class="openassessment_container_add_button">
<h6>
{% trans "Add Prompt" %}
</h6>
</div>
</div>
{% endspaceless %}
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
{% include "openassessmentblock/edit/oa_edit_option.html" with option_name="" option_label="" option_points=1 option_explanation="" %} {% include "openassessmentblock/edit/oa_edit_option.html" with option_name="" option_label="" option_points=1 option_explanation="" %}
</div> </div>
<div id="openassessment_rubric_instructions"> <div id="openassessment_rubric_instructions" class="openassessment_tab_instructions">
<p class="openassessment_description"> <p class="openassessment_description">
{% trans "Rubrics are made up of criteria, which usually contain one or more options. Each option has a point value. This template contains two sample criteria and their options. Replace the sample text with your own text. For more information, see the ORA documentation." %} {% trans "Rubrics are made up of criteria, which usually contain one or more options. Each option has a point value. This template contains two sample criteria and their options. Replace the sample text with your own text. For more information, see the ORA documentation." %}
</p> </p>
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
{% endfor %} {% endfor %}
</ul> </ul>
<div id="openassessment_rubric_add_criterion"> <div id="openassessment_rubric_add_criterion" class="openassessment_container_add_button">
<h6> <h6>
{% trans "Add Criterion" %} {% trans "Add Criterion" %}
</h6> </h6>
......
...@@ -33,6 +33,11 @@ ...@@ -33,6 +33,11 @@
<ol id="openassessment_training_example_criterion_template" class="is--hidden"> <ol id="openassessment_training_example_criterion_template" class="is--hidden">
{% include "openassessmentblock/edit/oa_training_example_criterion.html" %} {% include "openassessmentblock/edit/oa_training_example_criterion.html" %}
</ol> </ol>
<ol id="openassessment_training_example_part_template" class="is--hidden">
<li class="openassessment_training_example_essay_part">
<textarea maxlength="100000"></textarea>
</li>
</ol>
</div> </div>
</li> </li>
......
...@@ -28,7 +28,13 @@ ...@@ -28,7 +28,13 @@
<div class="openassessment_training_example_essay_wrapper"> <div class="openassessment_training_example_essay_wrapper">
<h2>{% trans "Response" %}</h2> <h2>{% trans "Response" %}</h2>
<textarea class="openassessment_training_example_essay" maxlength="100000">{{ example.answer }}</textarea> <ol class="openassessment_training_example_essay">
{% for part in example.answer.parts %}
<li class="openassessment_training_example_essay_part">
<textarea maxlength="100000">{{ part.text }}</textarea>
</li>
{% endfor %}
</ol>
</div> </div>
</div> </div>
</li> </li>
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
{% if topscore.file %} {% if topscore.file %}
<img class="leaderboard__score__image" alt="{% trans "The image associated with your peer's submission." %}" src="{{ topscore.file }}" /> <img class="leaderboard__score__image" alt="{% trans "The image associated with your peer's submission." %}" src="{{ topscore.file }}" />
{% endif %} {% endif %}
{{ topscore.content|linebreaks }} {% include "openassessmentblock/oa_submission_answer.html" with answer=topscore.submission.answer answer_text_label="Your peer's response to the question above:" %}
</div> </div>
</li> </li>
{% endfor %} {% endfor %}
......
...@@ -15,18 +15,6 @@ ...@@ -15,18 +15,6 @@
{% endblock %} {% endblock %}
</div> </div>
<div class="wrapper--openassessment__prompt">
{% if question %}
<article class="openassessment__prompt ui-toggle-visibility">
<h2 class="openassessment__prompt__title">{% trans "The prompt for this assignment" %}</h2>
<div class="openassessment__prompt__copy ui-toggle-visibility__content">
{{ question|linebreaks }}
</div>
</article>
{% endif %}
</div>
<ol class="openassessment__steps" id="openassessment__steps"> <ol class="openassessment__steps" id="openassessment__steps">
{% for assessment in rubric_assessments %} {% for assessment in rubric_assessments %}
<li id="{{ assessment.class_id }}" class="openassessment__steps__step is--loading"> <li id="{{ assessment.class_id }}" class="openassessment__steps__step is--loading">
......
{% spaceless %}
{% load i18n %}
<ol class="submission__answer__display__content">
{% for part in answer.parts %}
<li class="submission__answer__part">
<article class="submission__answer__part__prompt">
<h2 class="submission__answer__part__prompt__title">{% trans "The question for this section." %}</h2>
<div class="submission__answer__part__prompt__value">
{{ part.prompt.description|linebreaks }}
</div>
</article>
<div class="submission__answer__part__text">
<h2 class="submission__answer__part__text__title">{% trans answer_text_label %}</h2>
<div class="submission__answer__part__text__value">
{{ part.text|linebreaks }}
</div>
</div>
</li>
{% endfor %}
</ol>
{% endspaceless %}
...@@ -66,9 +66,7 @@ ...@@ -66,9 +66,7 @@
</h3> </h3>
</header> </header>
<div class="peer-assessment__display__response"> {% include "openassessmentblock/oa_submission_answer.html" with answer=peer_submission.answer answer_text_label="Your peer's response to the question above:" %}
{{ peer_submission.answer.text|linebreaks }}
</div>
{% if allow_file_upload and peer_file_url %} {% if allow_file_upload and peer_file_url %}
<header class="peer-assessment__display__header"> <header class="peer-assessment__display__header">
......
...@@ -50,9 +50,7 @@ ...@@ -50,9 +50,7 @@
</h3> </h3>
</header> </header>
<div class="peer-assessment__display__response"> {% include "openassessmentblock/oa_submission_answer.html" with answer=peer_submission.answer answer_text_label="Your peer's response to the question above:" %}
{{ peer_submission.answer.text|linebreaks }}
</div>
{% if allow_file_upload and peer_file_url %} {% if allow_file_upload and peer_file_url %}
<header class="peer-assessment__display__header"> <header class="peer-assessment__display__header">
......
...@@ -52,15 +52,26 @@ ...@@ -52,15 +52,26 @@
<div class="step__content"> <div class="step__content">
<form id="response__submission" class="response__submission"> <form id="response__submission" class="response__submission">
<ol class="list list--fields response__submission__content"> <ol class="list list--fields response__submission__content">
<li class="field field--textarea submission__answer" id="submission__answer"> {% for part in saved_response.answer.parts %}
<label class="sr" for="submission__answer__value">{% trans "Enter your response to the question." %}</label> <li class="submission__answer__part">
<textarea <article class="submission__answer__part__prompt ui-toggle-visibility">
id="submission__answer__value" <h2 class="submission__answer__part__prompt__title">{% trans "The prompt for this section." %}</h2>
placeholder=""
maxlength="100000" <div class="submission__answer__part__prompt__copy ui-toggle-visibility__content">
>{{ saved_response }}</textarea> {{ part.prompt.description|linebreaks }}
<span class="tip">{% trans "You may continue to work on your response until you submit it." %}</span> </div>
</li> </article>
<div class="field field--textarea submission__answer__part__text">
<label class="sr" for="submission__answer__part__text__{{ forloop.counter }}">{% trans "Your response to the question above." %}</label>
<textarea
id="submission__answer__part__text__{{ forloop.counter }}"
class="submission__answer__part__text__value"
placeholder="Enter your response to the question above."
maxlength="100000"
>{{ part.text }}</textarea>
</div>
</li>
{% endfor %}
{% if allow_file_upload %} {% if allow_file_upload %}
<li class="field"> <li class="field">
<div id="upload__error"> <div id="upload__error">
...@@ -86,6 +97,8 @@ ...@@ -86,6 +97,8 @@
{% endif %} {% endif %}
</ol> </ol>
<span class="tip">{% trans "You may continue to work on your response until you submit it." %}</span>
<div class="response__submission__actions"> <div class="response__submission__actions">
<div class="message message--inline message--error message--error-server"> <div class="message message--inline message--error message--error-server">
<h3 class="message__title">{% trans "We could not save your progress" %}</h3> <h3 class="message__title">{% trans "We could not save your progress" %}</h3>
......
...@@ -42,9 +42,7 @@ ...@@ -42,9 +42,7 @@
<article class="submission__answer__display"> <article class="submission__answer__display">
<h3 class="submission__answer__display__title">{% trans "Your Response" %}</h3> <h3 class="submission__answer__display__title">{% trans "Your Response" %}</h3>
<div class="submission__answer__display__content"> {% include "openassessmentblock/oa_submission_answer.html" with answer=student_submission.answer answer_text_label="Your response to the question above:" %}
{{ student_submission.answer.text|linebreaks }}
</div>
{% if allow_file_upload and file_url %} {% if allow_file_upload and file_url %}
<h3 class="submission__answer__display__title">{% trans "Your Image" %}</h3> <h3 class="submission__answer__display__title">{% trans "Your Image" %}</h3>
......
...@@ -43,9 +43,7 @@ ...@@ -43,9 +43,7 @@
<h3 class="self-assessment__display__title">{% trans "Your Response" %}</h3> <h3 class="self-assessment__display__title">{% trans "Your Response" %}</h3>
</header> </header>
<div class="self-assessment__display__response"> {% include "openassessmentblock/oa_submission_answer.html" with answer=self_submission.answer answer_text_label="Your response to the question above:" %}
{{ self_submission.answer.text|linebreaks }}
</div>
{% if allow_file_upload and self_file_url %} {% if allow_file_upload and self_file_url %}
<header class="self-assessment__display__header"> <header class="self-assessment__display__header">
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
Comments: {{ comments }} Comments: {{ comments }}
{% endblocktrans %} {% endblocktrans %}
{% else %} {% else %}
{{ submission.answer.text|linebreaks }} {% include "openassessmentblock/oa_submission_answer.html" with answer=submission.answer answer_text_label="The student's response to the question above:" %}
{% endif %} {% endif %}
</div> </div>
......
...@@ -72,9 +72,8 @@ ...@@ -72,9 +72,8 @@
{% endwith %} {% endwith %}
</header> </header>
<div class="student-training__display__response"> {% include "openassessmentblock/oa_submission_answer.html" with answer=training_essay.answer answer_text_label="The response to the question above:" %}
{{ training_essay|linebreaks }}
</div>
</article> </article>
<form id="student-training--001__assessment" class="student-training__assessment" method="post"> <form id="student-training--001__assessment" class="student-training__assessment" method="post">
......
...@@ -32,6 +32,9 @@ RUBRIC_DICT = { ...@@ -32,6 +32,9 @@ RUBRIC_DICT = {
] ]
} }
ANSWER_1 = {"text": "Shoot Hot Rod"}
ANSWER_2 = {"text": "Ultra Magnus fumble"}
ALGORITHM_ID = "Ease" ALGORITHM_ID = "Ease"
ON_INIT_PARAMS = { ON_INIT_PARAMS = {
...@@ -64,7 +67,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -64,7 +67,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
first_step = data["steps"][0] if data["steps"] else "peer" first_step = data["steps"][0] if data["steps"] else "peer"
if "ai" in data["steps"]: if "ai" in data["steps"]:
first_step = data["steps"][1] if len(data["steps"]) > 1 else "waiting" first_step = data["steps"][1] if len(data["steps"]) > 1 else "waiting"
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, ANSWER_1)
workflow = workflow_api.create_workflow(submission["uuid"], data["steps"], ON_INIT_PARAMS) workflow = workflow_api.create_workflow(submission["uuid"], data["steps"], ON_INIT_PARAMS)
workflow_keys = set(workflow.keys()) workflow_keys = set(workflow.keys())
...@@ -147,7 +150,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -147,7 +150,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
self.assertEquals("waiting", workflow['status']) self.assertEquals("waiting", workflow['status'])
def test_update_peer_workflow(self): def test_update_peer_workflow(self):
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, ANSWER_1)
workflow = workflow_api.create_workflow(submission["uuid"], ["training", "peer"], ON_INIT_PARAMS) workflow = workflow_api.create_workflow(submission["uuid"], ["training", "peer"], ON_INIT_PARAMS)
StudentTrainingWorkflow.create_workflow(submission_uuid=submission["uuid"]) StudentTrainingWorkflow.create_workflow(submission_uuid=submission["uuid"])
requirements = { requirements = {
...@@ -200,7 +203,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -200,7 +203,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@patch.object(ai_api, 'assessment_is_finished') @patch.object(ai_api, 'assessment_is_finished')
@patch.object(ai_api, 'get_score') @patch.object(ai_api, 'get_score')
def test_ai_score_set(self, mock_score, mock_is_finished): def test_ai_score_set(self, mock_score, mock_is_finished):
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, ANSWER_2)
mock_is_finished.return_value = True mock_is_finished.return_value = True
score = {"points_earned": 7, "points_possible": 10} score = {"points_earned": 7, "points_possible": 10}
mock_score.return_value = score mock_score.return_value = score
...@@ -213,7 +216,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -213,7 +216,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@ddt.unpack @ddt.unpack
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_create_ai_workflow_no_rubric(self, rubric, algorithm_id): def test_create_ai_workflow_no_rubric(self, rubric, algorithm_id):
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, ANSWER_1)
on_init_params = { on_init_params = {
'ai': { 'ai': {
'rubric': rubric, 'rubric': rubric,
...@@ -226,7 +229,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -226,7 +229,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_ai_on_init_failures(self, mock_on_init): def test_ai_on_init_failures(self, mock_on_init):
mock_on_init.side_effect = AIError("Kaboom!") mock_on_init.side_effect = AIError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, ANSWER_2)
workflow_api.create_workflow(submission["uuid"], ["ai"], ON_INIT_PARAMS) workflow_api.create_workflow(submission["uuid"], ["ai"], ON_INIT_PARAMS)
@patch.object(Submission.objects, 'get') @patch.object(Submission.objects, 'get')
...@@ -241,14 +244,14 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -241,14 +244,14 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_workflow_errors_wrapped(self, data, mock_create): def test_unexpected_workflow_errors_wrapped(self, data, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!") mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, ANSWER_2)
workflow_api.create_workflow(submission["uuid"], data["steps"], ON_INIT_PARAMS) workflow_api.create_workflow(submission["uuid"], data["steps"], ON_INIT_PARAMS)
@patch.object(PeerWorkflow.objects, 'get_or_create') @patch.object(PeerWorkflow.objects, 'get_or_create')
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_peer_workflow_errors_wrapped(self, mock_create): def test_unexpected_peer_workflow_errors_wrapped(self, mock_create):
mock_create.side_effect = DatabaseError("Kaboom!") mock_create.side_effect = DatabaseError("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, ANSWER_2)
workflow_api.create_workflow(submission["uuid"], ["peer", "self"], ON_INIT_PARAMS) workflow_api.create_workflow(submission["uuid"], ["peer", "self"], ON_INIT_PARAMS)
@patch.object(AssessmentWorkflow.objects, 'get') @patch.object(AssessmentWorkflow.objects, 'get')
...@@ -256,7 +259,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -256,7 +259,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
@raises(workflow_api.AssessmentWorkflowInternalError) @raises(workflow_api.AssessmentWorkflowInternalError)
def test_unexpected_exception_wrapped(self, data, mock_create): def test_unexpected_exception_wrapped(self, data, mock_create):
mock_create.side_effect = Exception("Kaboom!") mock_create.side_effect = Exception("Kaboom!")
submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble") submission = sub_api.create_submission(ITEM_1, ANSWER_2)
workflow_api.update_from_assessments(submission["uuid"], data["steps"]) workflow_api.update_from_assessments(submission["uuid"], data["steps"])
@ddt.file_data('data/assessments.json') @ddt.file_data('data/assessments.json')
...@@ -363,7 +366,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -363,7 +366,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def test_cancel_the_assessment_workflow(self): def test_cancel_the_assessment_workflow(self):
# Create the submission and assessment workflow. # Create the submission and assessment workflow.
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, ANSWER_1)
workflow = workflow_api.create_workflow(submission["uuid"], ["peer"]) workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])
requirements = { requirements = {
...@@ -403,7 +406,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -403,7 +406,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def test_cancel_the_assessment_workflow_does_not_exist(self): def test_cancel_the_assessment_workflow_does_not_exist(self):
# Create the submission and assessment workflow. # Create the submission and assessment workflow.
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, ANSWER_1)
workflow = workflow_api.create_workflow(submission["uuid"], ["peer"]) workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])
requirements = { requirements = {
...@@ -432,7 +435,7 @@ class TestAssessmentWorkflowApi(CacheResetTest): ...@@ -432,7 +435,7 @@ class TestAssessmentWorkflowApi(CacheResetTest):
def test_get_the_cancelled_workflow(self): def test_get_the_cancelled_workflow(self):
# Create the submission and assessment workflow. # Create the submission and assessment workflow.
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, ANSWER_1)
workflow = workflow_api.create_workflow(submission["uuid"], ["peer"]) workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])
requirements = { requirements = {
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
Data Conversion utility methods for handling ORA2 XBlock data transformations. Data Conversion utility methods for handling ORA2 XBlock data transformations.
""" """
import json
def convert_training_examples_list_to_dict(examples_list): def convert_training_examples_list_to_dict(examples_list):
...@@ -19,7 +20,15 @@ def convert_training_examples_list_to_dict(examples_list): ...@@ -19,7 +20,15 @@ def convert_training_examples_list_to_dict(examples_list):
Example: Example:
>>> examples = [ >>> examples = [
>>> { >>> {
>>> "answer": "This is my response", >>> "answer": {
>>> "parts": {
>>> [
>>> {"text:" "Answer part 1"},
>>> {"text:" "Answer part 2"},
>>> {"text:" "Answer part 3"}
>>> ]
>>> }
>>> },
>>> "options_selected": [ >>> "options_selected": [
>>> { >>> {
>>> "criterion": "Ideas", >>> "criterion": "Ideas",
...@@ -35,7 +44,15 @@ def convert_training_examples_list_to_dict(examples_list): ...@@ -35,7 +44,15 @@ def convert_training_examples_list_to_dict(examples_list):
>>> convert_training_examples_list_to_dict(examples) >>> convert_training_examples_list_to_dict(examples)
[ [
{ {
'answer': 'This is my response', 'answer': {
'parts': {
[
{'text:' 'Answer part 1'},
{'text:' 'Answer part 2'},
{'text:' 'Answer part 3'}
]
}
},
'options_selected': { 'options_selected': {
'Ideas': 'Fair', 'Ideas': 'Fair',
'Content': 'Good' 'Content': 'Good'
...@@ -56,13 +73,63 @@ def convert_training_examples_list_to_dict(examples_list): ...@@ -56,13 +73,63 @@ def convert_training_examples_list_to_dict(examples_list):
] ]
def create_rubric_dict(prompt, criteria): def update_assessments_format(assessments):
"""
For each example update 'answer' to newer format.
Args:
assessments (list): list of assessments
Returns:
list of dict
"""
for assessment in assessments:
if 'examples' in assessment:
for example in assessment['examples']:
if isinstance(example['answer'], unicode) or isinstance(example['answer'], str):
example['answer'] = {
'parts': [
{'text': example['answer']}
]
}
return assessments
def create_prompts_list(prompt_or_serialized_prompts):
"""
Construct a list of prompts.
Initially a block had a single prompt which was saved as a simple string.
In that case a new prompt dict is constructed from it.
Args:
prompt_or_serialized_prompts (unicode): A string which can either
be a single prompt text or json for a list of prompts.
Returns:
list of dict
"""
if prompt_or_serialized_prompts is None:
prompt_or_serialized_prompts = ''
try:
prompts = json.loads(prompt_or_serialized_prompts)
except ValueError:
prompts = [
{
'description': prompt_or_serialized_prompts,
}
]
return prompts
def create_rubric_dict(prompts, criteria):
""" """
Construct a serialized rubric model in the format expected Construct a serialized rubric model in the format expected
by the assessments app. by the assessments app.
Args: Args:
prompt (unicode): The rubric prompt. prompts (list of dict): The rubric prompts.
criteria (list of dict): The serialized rubric criteria. criteria (list of dict): The serialized rubric criteria.
Returns: Returns:
...@@ -70,7 +137,7 @@ def create_rubric_dict(prompt, criteria): ...@@ -70,7 +137,7 @@ def create_rubric_dict(prompt, criteria):
""" """
return { return {
"prompt": prompt, "prompts" : prompts,
"criteria": criteria "criteria": criteria
} }
...@@ -96,6 +163,46 @@ def clean_criterion_feedback(rubric_criteria, criterion_feedback): ...@@ -96,6 +163,46 @@ def clean_criterion_feedback(rubric_criteria, criterion_feedback):
} }
def prepare_submission_for_serialization(submission_data):
"""
Convert a list of answers into the right format dict for serialization.
Args:
submission_data (list of unicode): The answers.
Returns:
dict
"""
return {
'parts': [{'text': text} for text in submission_data],
}
def create_submission_dict(submission, prompts):
"""
1. Convert from legacy format.
3. Add prompts to submission['answer']['parts'] to simplify iteration in the template.
Args:
submission (dict): Submission dictionary.
prompts (list of dict): The prompts from the problem definition.
Returns:
dict
"""
parts = [{ 'prompt': prompt, 'text': ''} for prompt in prompts]
if 'text' in submission['answer']:
parts[0]['text'] = submission['answer'].pop('text')
else:
for index, part in enumerate(submission['answer'].pop('parts')):
parts[index]['text'] = part['text']
submission['answer']['parts'] = parts
return submission
def make_django_template_key(key): def make_django_template_key(key):
""" """
Django templates access dictionary items using dot notation, Django templates access dictionary items using dot notation,
......
...@@ -4,9 +4,12 @@ Leaderboard step in the OpenAssessment XBlock. ...@@ -4,9 +4,12 @@ Leaderboard step in the OpenAssessment XBlock.
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment.errors import SelfAssessmentError, PeerAssessmentError
from submissions import api as sub_api from submissions import api as sub_api
from openassessment.assessment.errors import SelfAssessmentError, PeerAssessmentError
from openassessment.fileupload import api as file_upload_api from openassessment.fileupload import api as file_upload_api
from openassessment.xblock.data_conversion import create_submission_dict
class LeaderboardMixin(object): class LeaderboardMixin(object):
"""Leaderboard Mixin introduces all handlers for displaying the leaderboard """Leaderboard Mixin introduces all handlers for displaying the leaderboard
...@@ -72,13 +75,16 @@ class LeaderboardMixin(object): ...@@ -72,13 +75,16 @@ class LeaderboardMixin(object):
for score in scores: for score in scores:
if 'file_key' in score['content']: if 'file_key' in score['content']:
score['file'] = file_upload_api.get_download_url(score['content']['file_key']) score['file'] = file_upload_api.get_download_url(score['content']['file_key'])
if 'text' in score['content']: if 'text' in score['content'] or 'parts' in score['content']:
score['content'] = score['content']['text'] submission = {'answer': score.pop('content')}
score['submission'] = create_submission_dict(submission, self.prompts)
elif isinstance(score['content'], basestring): elif isinstance(score['content'], basestring):
pass pass
# Currently, we do not handle non-text submissions. # Currently, we do not handle non-text submissions.
else: else:
score['content'] = "" score['submission'] = ""
score.pop('content', None)
context = { 'topscores': scores, context = { 'topscores': scores,
'allow_latex': self.allow_latex, 'allow_latex': self.allow_latex,
......
"""An XBlock where students can read a question and compose their response""" """An XBlock where students can read a question and compose their response"""
import copy
import datetime as dt import datetime as dt
import json
import logging import logging
import pkg_resources import pkg_resources
import copy
import pytz import pytz
...@@ -31,7 +32,7 @@ from openassessment.workflow.errors import AssessmentWorkflowError ...@@ -31,7 +32,7 @@ from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.student_training_mixin import StudentTrainingMixin from openassessment.xblock.student_training_mixin import StudentTrainingMixin
from openassessment.xblock.validation import validator from openassessment.xblock.validation import validator
from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DISTANT_FUTURE from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DISTANT_FUTURE
from openassessment.xblock.data_conversion import create_rubric_dict from openassessment.xblock.data_conversion import create_prompts_list, create_rubric_dict, update_assessments_format
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -144,7 +145,7 @@ class OpenAssessmentBlock( ...@@ -144,7 +145,7 @@ class OpenAssessmentBlock(
prompt = String( prompt = String(
default=DEFAULT_PROMPT, default=DEFAULT_PROMPT,
scope=Scope.content, scope=Scope.content,
help="A prompt to display to a student (plain text)." help="The prompts to display to a student."
) )
rubric_criteria = List( rubric_criteria = List(
...@@ -283,7 +284,7 @@ class OpenAssessmentBlock( ...@@ -283,7 +284,7 @@ class OpenAssessmentBlock(
# All data we intend to pass to the front end. # All data we intend to pass to the front end.
context_dict = { context_dict = {
"title": self.title, "title": self.title,
"question": self.prompt, "prompts": self.prompts,
"rubric_assessments": ui_models, "rubric_assessments": ui_models,
"show_staff_debug_info": self.is_course_staff and not self.in_studio_preview, "show_staff_debug_info": self.is_course_staff and not self.in_studio_preview,
} }
...@@ -418,7 +419,7 @@ class OpenAssessmentBlock( ...@@ -418,7 +419,7 @@ class OpenAssessmentBlock(
xblock_validator = validator(block, block._, strict_post_release=False) xblock_validator = validator(block, block._, strict_post_release=False)
xblock_validator( xblock_validator(
create_rubric_dict(config['prompt'], config['rubric_criteria']), create_rubric_dict(config['prompts'], config['rubric_criteria']),
config['rubric_assessments'], config['rubric_assessments'],
submission_start=config['submission_start'], submission_start=config['submission_start'],
submission_due=config['submission_due'], submission_due=config['submission_due'],
...@@ -432,7 +433,7 @@ class OpenAssessmentBlock( ...@@ -432,7 +433,7 @@ class OpenAssessmentBlock(
block.submission_start = config['submission_start'] block.submission_start = config['submission_start']
block.submission_due = config['submission_due'] block.submission_due = config['submission_due']
block.title = config['title'] block.title = config['title']
block.prompt = config['prompt'] block.prompts = config['prompts']
block.allow_file_upload = config['allow_file_upload'] block.allow_file_upload = config['allow_file_upload']
block.allow_latex = config['allow_latex'] block.allow_latex = config['allow_latex']
block.leaderboard_show = config['leaderboard_show'] block.leaderboard_show = config['leaderboard_show']
...@@ -445,6 +446,40 @@ class OpenAssessmentBlock( ...@@ -445,6 +446,40 @@ class OpenAssessmentBlock(
return i18nService.ugettext return i18nService.ugettext
@property @property
def prompts(self):
"""
Return the prompts.
Initially a block had a single prompt which was saved as a simple
string in the prompt field. Now prompts are saved as a serialized
list of dicts in the same field. If prompt field contains valid json,
parse and return it. Otherwise, assume it is a simple string prompt
and return it in a list of dict.
Returns:
list of dict
"""
return create_prompts_list(self.prompt)
@prompts.setter
def prompts(self, value):
"""
Serialize the prompts and save to prompt field.
Args:
value (list of dict): The prompts to set.
"""
if value is None:
self.prompt = None
elif len(value) == 1:
# For backwards compatibility. To be removed after all code
# is migrated to use prompts property instead of prompt field.
self.prompt = value[0]['description']
else:
self.prompt = json.dumps(value)
@property
def valid_assessments(self): def valid_assessments(self):
""" """
Return a list of assessment dictionaries that we recognize. Return a list of assessment dictionaries that we recognize.
...@@ -456,10 +491,11 @@ class OpenAssessmentBlock( ...@@ -456,10 +491,11 @@ class OpenAssessmentBlock(
list list
""" """
return [ _valid_assessments = [
asmnt for asmnt in self.rubric_assessments asmnt for asmnt in self.rubric_assessments
if asmnt.get('name') in VALID_ASSESSMENT_TYPES if asmnt.get('name') in VALID_ASSESSMENT_TYPES
] ]
return update_assessments_format(copy.deepcopy(_valid_assessments))
@property @property
def assessment_steps(self): def assessment_steps(self):
......
...@@ -12,7 +12,7 @@ from openassessment.workflow.errors import AssessmentWorkflowError ...@@ -12,7 +12,7 @@ from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.defaults import DEFAULT_RUBRIC_FEEDBACK_TEXT from openassessment.xblock.defaults import DEFAULT_RUBRIC_FEEDBACK_TEXT
from .data_conversion import create_rubric_dict from .data_conversion import create_rubric_dict
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
from .data_conversion import clean_criterion_feedback from .data_conversion import clean_criterion_feedback, create_submission_dict
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -86,7 +86,7 @@ class PeerAssessmentMixin(object): ...@@ -86,7 +86,7 @@ class PeerAssessmentMixin(object):
data['options_selected'], data['options_selected'],
clean_criterion_feedback(self.rubric_criteria_with_labels, data['criterion_feedback']), clean_criterion_feedback(self.rubric_criteria_with_labels, data['criterion_feedback']),
data['overall_feedback'], data['overall_feedback'],
create_rubric_dict(self.prompt, self.rubric_criteria_with_labels), create_rubric_dict(self.prompts, self.rubric_criteria_with_labels),
assessment_ui_model['must_be_graded_by'] assessment_ui_model['must_be_graded_by']
) )
...@@ -231,7 +231,7 @@ class PeerAssessmentMixin(object): ...@@ -231,7 +231,7 @@ class PeerAssessmentMixin(object):
peer_sub = self.get_peer_submission(student_item, assessment) peer_sub = self.get_peer_submission(student_item, assessment)
if peer_sub: if peer_sub:
path = 'openassessmentblock/peer/oa_peer_turbo_mode.html' path = 'openassessmentblock/peer/oa_peer_turbo_mode.html'
context_dict["peer_submission"] = peer_sub context_dict["peer_submission"] = create_submission_dict(peer_sub, self.prompts)
# Determine if file upload is supported for this XBlock. # Determine if file upload is supported for this XBlock.
context_dict["allow_file_upload"] = self.allow_file_upload context_dict["allow_file_upload"] = self.allow_file_upload
...@@ -247,7 +247,7 @@ class PeerAssessmentMixin(object): ...@@ -247,7 +247,7 @@ class PeerAssessmentMixin(object):
peer_sub = self.get_peer_submission(student_item, assessment) peer_sub = self.get_peer_submission(student_item, assessment)
if peer_sub: if peer_sub:
path = 'openassessmentblock/peer/oa_peer_assessment.html' path = 'openassessmentblock/peer/oa_peer_assessment.html'
context_dict["peer_submission"] = peer_sub context_dict["peer_submission"] = create_submission_dict(peer_sub, self.prompts)
# Determine if file upload is supported for this XBlock. # Determine if file upload is supported for this XBlock.
context_dict["allow_file_upload"] = self.allow_file_upload context_dict["allow_file_upload"] = self.allow_file_upload
context_dict["peer_file_url"] = self.get_download_url_from_submission(peer_sub) context_dict["peer_file_url"] = self.get_download_url_from_submission(peer_sub)
......
...@@ -66,7 +66,11 @@ VALID_ASSESSMENT_TYPES = [ ...@@ -66,7 +66,11 @@ VALID_ASSESSMENT_TYPES = [
# Schema definition for an update from the Studio JavaScript editor. # Schema definition for an update from the Studio JavaScript editor.
EDITOR_UPDATE_SCHEMA = Schema({ EDITOR_UPDATE_SCHEMA = Schema({
Required('prompt'): utf8_validator, Required('prompts'): [
Schema({
Required('description'): utf8_validator,
})
],
Required('title'): utf8_validator, Required('title'): utf8_validator,
Required('feedback_prompt'): utf8_validator, Required('feedback_prompt'): utf8_validator,
Required('feedback_default_text'): utf8_validator, Required('feedback_default_text'): utf8_validator,
...@@ -84,7 +88,7 @@ EDITOR_UPDATE_SCHEMA = Schema({ ...@@ -84,7 +88,7 @@ EDITOR_UPDATE_SCHEMA = Schema({
'must_be_graded_by': All(int, Range(min=0)), 'must_be_graded_by': All(int, Range(min=0)),
'examples': [ 'examples': [
Schema({ Schema({
Required('answer'): utf8_validator, Required('answer'): [utf8_validator],
Required('options_selected'): [ Required('options_selected'): [
Schema({ Schema({
Required('criterion'): utf8_validator, Required('criterion'): utf8_validator,
......
...@@ -8,7 +8,7 @@ from openassessment.workflow import api as workflow_api ...@@ -8,7 +8,7 @@ from openassessment.workflow import api as workflow_api
from submissions import api as submission_api from submissions import api as submission_api
from .data_conversion import create_rubric_dict from .data_conversion import create_rubric_dict
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
from .data_conversion import clean_criterion_feedback from .data_conversion import clean_criterion_feedback, create_submission_dict
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -88,7 +88,7 @@ class SelfAssessmentMixin(object): ...@@ -88,7 +88,7 @@ class SelfAssessmentMixin(object):
submission = submission_api.get_submission(self.submission_uuid) submission = submission_api.get_submission(self.submission_uuid)
context["rubric_criteria"] = self.rubric_criteria_with_labels context["rubric_criteria"] = self.rubric_criteria_with_labels
context["estimated_time"] = "20 minutes" # TODO: Need to configure this. context["estimated_time"] = "20 minutes" # TODO: Need to configure this.
context["self_submission"] = submission context["self_submission"] = create_submission_dict(submission, self.prompts)
# Determine if file upload is supported for this XBlock. # Determine if file upload is supported for this XBlock.
context["allow_file_upload"] = self.allow_file_upload context["allow_file_upload"] = self.allow_file_upload
...@@ -133,7 +133,7 @@ class SelfAssessmentMixin(object): ...@@ -133,7 +133,7 @@ class SelfAssessmentMixin(object):
data['options_selected'], data['options_selected'],
clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']), clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']),
data['overall_feedback'], data['overall_feedback'],
create_rubric_dict(self.prompt, self.rubric_criteria_with_labels) create_rubric_dict(self.prompts, self.rubric_criteria_with_labels)
) )
self.publish_assessment_event("openassessmentblock.self_assess", assessment) self.publish_assessment_event("openassessmentblock.self_assess", assessment)
......
...@@ -16,7 +16,7 @@ from openassessment.workflow.errors import ( ...@@ -16,7 +16,7 @@ from openassessment.workflow.errors import (
from openassessment.assessment.errors.ai import AIError from openassessment.assessment.errors.ai import AIError
from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE
from openassessment.xblock.data_conversion import ( from openassessment.xblock.data_conversion import (
create_rubric_dict, convert_training_examples_list_to_dict create_rubric_dict, convert_training_examples_list_to_dict, create_submission_dict
) )
from submissions import api as submission_api from submissions import api as submission_api
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
...@@ -138,7 +138,7 @@ class StaffInfoMixin(object): ...@@ -138,7 +138,7 @@ class StaffInfoMixin(object):
context['display_reschedule_unfinished_tasks'] = display_ai_staff_info context['display_reschedule_unfinished_tasks'] = display_ai_staff_info
if display_ai_staff_info: if display_ai_staff_info:
context['classifierset'] = ai_api.get_classifier_set_info( context['classifierset'] = ai_api.get_classifier_set_info(
create_rubric_dict(self.prompt, self.rubric_criteria_with_labels), create_rubric_dict(self.prompts, self.rubric_criteria_with_labels),
example_based_assessment['algorithm_id'], example_based_assessment['algorithm_id'],
student_item['course_id'], student_item['course_id'],
student_item['item_id'] student_item['item_id']
...@@ -179,7 +179,7 @@ class StaffInfoMixin(object): ...@@ -179,7 +179,7 @@ class StaffInfoMixin(object):
examples = assessment["examples"] examples = assessment["examples"]
try: try:
workflow_uuid = ai_api.train_classifiers( workflow_uuid = ai_api.train_classifiers(
create_rubric_dict(self.prompt, self.rubric_criteria_with_labels), create_rubric_dict(self.prompts, self.rubric_criteria_with_labels),
convert_training_examples_list_to_dict(examples), convert_training_examples_list_to_dict(examples),
student_item_dict.get('course_id'), student_item_dict.get('course_id'),
student_item_dict.get('item_id'), student_item_dict.get('item_id'),
...@@ -285,7 +285,7 @@ class StaffInfoMixin(object): ...@@ -285,7 +285,7 @@ class StaffInfoMixin(object):
workflow_cancellation['cancelled_by'] = self.get_username(workflow_cancellation['cancelled_by_id']) workflow_cancellation['cancelled_by'] = self.get_username(workflow_cancellation['cancelled_by_id'])
context = { context = {
'submission': submission, 'submission': create_submission_dict(submission, self.prompts) if submission else None,
'workflow_cancellation': workflow_cancellation, 'workflow_cancellation': workflow_cancellation,
'peer_assessments': peer_assessments, 'peer_assessments': peer_assessments,
'submitted_assessments': submitted_assessments, 'submitted_assessments': submitted_assessments,
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -86,7 +86,12 @@ ...@@ -86,7 +86,12 @@
{ {
"template": "openassessmentblock/response/oa_response.html", "template": "openassessmentblock/response/oa_response.html",
"context": { "context": {
"saved_response": "", "saved_response": {"answer":
{"parts": [
{ "text": "", "prompt": { "description": "Prompt 1" }},
{ "text": "", "prompt": { "description": "Prompt 2" }}
]}
},
"save_status": "This response has not been saved.", "save_status": "This response has not been saved.",
"submit_enabled": false, "submit_enabled": false,
"submission_due": "" "submission_due": ""
...@@ -96,7 +101,18 @@ ...@@ -96,7 +101,18 @@
{ {
"template": "openassessmentblock/student_training/student_training.html", "template": "openassessmentblock/student_training/student_training.html",
"context": { "context": {
"training_essay": "My special essay.", "training_essay": {
"answer": {
"parts": [
{
"prompt": {
"description": "Given the state of the world today, what do you think should be done to combat poverty?"
},
"text": "My special essay."
}
]
}
},
"training_rubric": { "training_rubric": {
"criteria": [ "criteria": [
{ {
...@@ -398,7 +414,7 @@ ...@@ -398,7 +414,7 @@
{ {
"template": "openassessmentblock/edit/oa_edit.html", "template": "openassessmentblock/edit/oa_edit.html",
"context": { "context": {
"prompt": "How much do you like waffles?", "prompts": [{ "description": "How much do you like waffles?" }, { "description": "How much do you like waffles 2?" }],
"title": "The most important of all questions.", "title": "The most important of all questions.",
"submission_start": "2014-01-02T12:15", "submission_start": "2014-01-02T12:15",
"submission_due": "2014-10-01T04:53", "submission_due": "2014-10-01T04:53",
...@@ -479,7 +495,7 @@ ...@@ -479,7 +495,7 @@
{ {
"template": "openassessmentblock/edit/oa_edit.html", "template": "openassessmentblock/edit/oa_edit.html",
"context": { "context": {
"prompt": "Test prompt", "prompts": [{ "description": "How much do you like waffles?" }, { "description": "How much do you like waffles 2?" }],
"title": "Test title", "title": "Test title",
"submission_start": "2014-01-1T10:00:00", "submission_start": "2014-01-1T10:00:00",
"submission_due": "2014-10-1T10:00:00", "submission_due": "2014-10-1T10:00:00",
...@@ -523,7 +539,12 @@ ...@@ -523,7 +539,12 @@
"training": { "training": {
"examples": [ "examples": [
{ {
"answer": "Test answer", "answer": {
"parts": [
{ "text": "Test answer 1"},
{ "text": "Test answer 2"}
]
},
"criteria": [ "criteria": [
{ {
"name": "criterion_with_two_options", "name": "criterion_with_two_options",
...@@ -564,7 +585,12 @@ ...@@ -564,7 +585,12 @@
} }
], ],
"template": { "template": {
"answer": "", "answer": {
"parts": [
{ "text": ""},
{ "text": ""}
]
},
"criteria": [ "criteria": [
{ {
"name": "criterion_with_two_options", "name": "criterion_with_two_options",
......
...@@ -127,23 +127,29 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -127,23 +127,29 @@ describe("OpenAssessment.ResponseView", function() {
view.setAutoSaveEnabled(false); view.setAutoSaveEnabled(false);
}); });
it("updates and retrieves response text correctly", function() {
view.response(['Test response 1', 'Test response 2']);
expect(view.response()[0]).toBe('Test response 1');
expect(view.response()[1]).toBe('Test response 2');
});
it("updates submit/save buttons and save status when response text changes", function() { it("updates submit/save buttons and save status when response text changes", function() {
// Response is blank --> save/submit buttons disabled // Response is blank --> save/submit buttons disabled
view.response(''); view.response(['', '']);
view.handleResponseChanged(); view.handleResponseChanged();
expect(view.submitEnabled()).toBe(false); expect(view.submitEnabled()).toBe(false);
expect(view.saveEnabled()).toBe(false); expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('This response has not been saved.'); expect(view.saveStatus()).toContain('This response has not been saved.');
// Response is whitespace --> save/submit buttons disabled // Response is whitespace --> save/submit buttons disabled
view.response(' \n \n '); view.response([' \n \n ', ' ']);
view.handleResponseChanged(); view.handleResponseChanged();
expect(view.submitEnabled()).toBe(false); expect(view.submitEnabled()).toBe(false);
expect(view.saveEnabled()).toBe(false); expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('This response has not been saved.'); expect(view.saveStatus()).toContain('This response has not been saved.');
// Response is not blank --> submit button enabled // Response is not blank --> submit button enabled
view.response('Test response'); view.response(['Test response 1', ' ']);
view.handleResponseChanged(); view.handleResponseChanged();
expect(view.submitEnabled()).toBe(true); expect(view.submitEnabled()).toBe(true);
expect(view.saveEnabled()).toBe(true); expect(view.saveEnabled()).toBe(true);
...@@ -159,7 +165,7 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -159,7 +165,7 @@ describe("OpenAssessment.ResponseView", function() {
expect(view.saveStatus()).toContain('saved but not submitted'); expect(view.saveStatus()).toContain('saved but not submitted');
// Response is not blank --> submit button enabled // Response is not blank --> submit button enabled
view.response('Test response'); view.response(['Test response 1', 'Test response 2']);
view.save(); view.save();
expect(view.submitEnabled()).toBe(true); expect(view.submitEnabled()).toBe(true);
expect(view.saveEnabled()).toBe(false); expect(view.saveEnabled()).toBe(false);
...@@ -168,21 +174,21 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -168,21 +174,21 @@ describe("OpenAssessment.ResponseView", function() {
it("shows unsaved draft only when response text has changed", function() { it("shows unsaved draft only when response text has changed", function() {
// Save the initial response // Save the initial response
view.response('Lorem ipsum'); view.response(['Test response 1', 'Test response 2']);
view.save(); view.save();
expect(view.saveEnabled()).toBe(false); expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('saved but not submitted'); expect(view.saveStatus()).toContain('saved but not submitted');
// Keep the text the same, but trigger an update // Keep the text the same, but trigger an update
// Should still be saved // Should still be saved
view.response('Lorem ipsum'); view.response(['Test response 1', 'Test response 2']);
view.handleResponseChanged(); view.handleResponseChanged();
expect(view.saveEnabled()).toBe(false); expect(view.saveEnabled()).toBe(false);
expect(view.saveStatus()).toContain('saved but not submitted'); expect(view.saveStatus()).toContain('saved but not submitted');
// Change the text // Change the text
// This should cause it to change to unsaved draft // This should cause it to change to unsaved draft
view.response('changed '); view.response(['Test response 1', 'Test response 3']);
view.handleResponseChanged(); view.handleResponseChanged();
expect(view.saveEnabled()).toBe(true); expect(view.saveEnabled()).toBe(true);
expect(view.saveStatus()).toContain('This response has not been saved.'); expect(view.saveStatus()).toContain('This response has not been saved.');
...@@ -190,16 +196,16 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -190,16 +196,16 @@ describe("OpenAssessment.ResponseView", function() {
it("sends the saved submission to the server", function() { it("sends the saved submission to the server", function() {
spyOn(server, 'save').andCallThrough(); spyOn(server, 'save').andCallThrough();
view.response('Test response'); view.response(['Test response 1', 'Test response 2']);
view.save(); view.save();
expect(server.save).toHaveBeenCalledWith('Test response'); expect(server.save).toHaveBeenCalledWith(['Test response 1', 'Test response 2']);
}); });
it("submits a response to the server", function() { it("submits a response to the server", function() {
spyOn(server, 'submit').andCallThrough(); spyOn(server, 'submit').andCallThrough();
view.response('Test response'); view.response(['Test response 1', 'Test response 2']);
view.submit(); view.submit();
expect(server.submit).toHaveBeenCalledWith('Test response'); expect(server.submit).toHaveBeenCalledWith(['Test response 1', 'Test response 2']);
}); });
it("allows the user to cancel before submitting", function() { it("allows the user to cancel before submitting", function() {
...@@ -208,7 +214,7 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -208,7 +214,7 @@ describe("OpenAssessment.ResponseView", function() {
spyOn(server, 'submit').andCallThrough(); spyOn(server, 'submit').andCallThrough();
// Start a submission // Start a submission
view.response('Test response'); view.response(['Test response 1', 'Test response 2']);
view.submit(); view.submit();
// Expect that the submission was not sent to the server // Expect that the submission was not sent to the server
...@@ -222,7 +228,7 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -222,7 +228,7 @@ describe("OpenAssessment.ResponseView", function() {
return $.Deferred(function(defer) {}).promise(); return $.Deferred(function(defer) {}).promise();
}); });
view.response('Test response'); view.response(['Test response 1', 'Test response 2']);
view.submit(); view.submit();
expect(view.submitEnabled()).toBe(false); expect(view.submitEnabled()).toBe(false);
}); });
...@@ -235,7 +241,7 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -235,7 +241,7 @@ describe("OpenAssessment.ResponseView", function() {
}).promise(); }).promise();
}); });
view.response('Test response'); view.response(['Test response 1', 'Test response 2']);
view.submit(); view.submit();
// Expect the submit button to have been re-enabled // Expect the submit button to have been re-enabled
...@@ -248,7 +254,7 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -248,7 +254,7 @@ describe("OpenAssessment.ResponseView", function() {
spyOn(server, 'submit').andCallThrough(); spyOn(server, 'submit').andCallThrough();
// Start a submission // Start a submission
view.response('Test response'); view.response(['Test response 1', 'Test response 2']);
view.submit(); view.submit();
// Expect the submit button to be re-enabled // Expect the submit button to be re-enabled
...@@ -265,7 +271,7 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -265,7 +271,7 @@ describe("OpenAssessment.ResponseView", function() {
spyOn(view, 'load'); spyOn(view, 'load');
spyOn(baseView, 'loadAssessmentModules'); spyOn(baseView, 'loadAssessmentModules');
view.response('Test response'); view.response(['Test response 1', 'Test response 2']);
view.submit(); view.submit();
// Expect the current and next step to have been reloaded // Expect the current and next step to have been reloaded
...@@ -278,7 +284,7 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -278,7 +284,7 @@ describe("OpenAssessment.ResponseView", function() {
expect(view.unsavedWarningEnabled()).toBe(false); expect(view.unsavedWarningEnabled()).toBe(false);
// Change the text, then expect the unsaved warning to be enabled. // Change the text, then expect the unsaved warning to be enabled.
view.response('Lorem ipsum'); view.response(['Lorem ipsum 1', 'Lorem ipsum 2']);
view.handleResponseChanged(); view.handleResponseChanged();
// Expect the unsaved work warning to be enabled // Expect the unsaved work warning to be enabled
...@@ -287,7 +293,7 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -287,7 +293,7 @@ describe("OpenAssessment.ResponseView", function() {
it("disables the unsaved work warning when the user saves a response", function() { it("disables the unsaved work warning when the user saves a response", function() {
// Change the text, then expect the unsaved warning to be enabled. // Change the text, then expect the unsaved warning to be enabled.
view.response('Lorem ipsum'); view.response(['Lorem ipsum 1', 'Lorem ipsum 2']);
view.handleResponseChanged(); view.handleResponseChanged();
expect(view.unsavedWarningEnabled()).toBe(true); expect(view.unsavedWarningEnabled()).toBe(true);
...@@ -298,7 +304,7 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -298,7 +304,7 @@ describe("OpenAssessment.ResponseView", function() {
it("disables the unsaved work warning when the user submits a response", function() { it("disables the unsaved work warning when the user submits a response", function() {
// Change the text, then expect the unsaved warning to be enabled. // Change the text, then expect the unsaved warning to be enabled.
view.response('Lorem ipsum'); view.response(['Lorem ipsum 1', 'Lorem ipsum 2']);
view.handleResponseChanged(); view.handleResponseChanged();
expect(view.unsavedWarningEnabled()).toBe(true); expect(view.unsavedWarningEnabled()).toBe(true);
...@@ -315,7 +321,7 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -315,7 +321,7 @@ describe("OpenAssessment.ResponseView", function() {
expect(view.saveStatus()).toContain('not been saved'); expect(view.saveStatus()).toContain('not been saved');
// Change the response // Change the response
view.response('Lorem ipsum'); view.response(['Lorem ipsum 1', 'Lorem ipsum 2']);
view.handleResponseChanged(); view.handleResponseChanged();
// Usually autosave would be called by a timer. // Usually autosave would be called by a timer.
...@@ -357,7 +363,7 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -357,7 +363,7 @@ describe("OpenAssessment.ResponseView", function() {
spyOn(server, 'save').andCallFake(function() { return errorPromise; }); spyOn(server, 'save').andCallFake(function() { return errorPromise; });
// Change the response and save it // Change the response and save it
view.response('Lorem ipsum'); view.response(['Lorem ipsum 1', 'Lorem ipsum 2']);
view.handleResponseChanged(); view.handleResponseChanged();
view.save(); view.save();
...@@ -378,7 +384,7 @@ describe("OpenAssessment.ResponseView", function() { ...@@ -378,7 +384,7 @@ describe("OpenAssessment.ResponseView", function() {
view.AUTO_SAVE_WAIT = 900000; view.AUTO_SAVE_WAIT = 900000;
// Change the response // Change the response
view.response('Lorem ipsum'); view.response(['Lorem ipsum 1', 'Lorem ipsum 2']);
view.handleResponseChanged(); view.handleResponseChanged();
// Autosave // Autosave
......
...@@ -30,7 +30,7 @@ describe("OpenAssessment.Server", function() { ...@@ -30,7 +30,7 @@ describe("OpenAssessment.Server", function() {
); );
}; };
var PROMPT = "Hello this is the prompt yes."; var PROMPTS = [{"description": "Hello this is the prompt yes."}];
var FEEDBACK_PROMPT = "Prompt for feedback"; var FEEDBACK_PROMPT = "Prompt for feedback";
var FEEDBACK_DEFAULT_TEXT = "Default feedback response text"; var FEEDBACK_DEFAULT_TEXT = "Default feedback response text";
...@@ -253,7 +253,7 @@ describe("OpenAssessment.Server", function() { ...@@ -253,7 +253,7 @@ describe("OpenAssessment.Server", function() {
it("updates the XBlock's editor context definition", function() { it("updates the XBlock's editor context definition", function() {
stubAjax(true, { success: true }); stubAjax(true, { success: true });
server.updateEditorContext({ server.updateEditorContext({
prompt: PROMPT, prompts: PROMPTS,
feedbackPrompt: FEEDBACK_PROMPT, feedbackPrompt: FEEDBACK_PROMPT,
feedback_default_text: FEEDBACK_DEFAULT_TEXT, feedback_default_text: FEEDBACK_DEFAULT_TEXT,
title: TITLE, title: TITLE,
...@@ -268,7 +268,7 @@ describe("OpenAssessment.Server", function() { ...@@ -268,7 +268,7 @@ describe("OpenAssessment.Server", function() {
expect($.ajax).toHaveBeenCalledWith({ expect($.ajax).toHaveBeenCalledWith({
type: "POST", url: '/update_editor_context', type: "POST", url: '/update_editor_context',
data: JSON.stringify({ data: JSON.stringify({
prompt: PROMPT, prompts: PROMPTS,
feedback_prompt: FEEDBACK_PROMPT, feedback_prompt: FEEDBACK_PROMPT,
feedback_default_text: FEEDBACK_DEFAULT_TEXT, feedback_default_text: FEEDBACK_DEFAULT_TEXT,
title: TITLE, title: TITLE,
......
...@@ -43,7 +43,7 @@ describe("OpenAssessment.StudioView", function() { ...@@ -43,7 +43,7 @@ describe("OpenAssessment.StudioView", function() {
var EXPECTED_SERVER_DATA = { var EXPECTED_SERVER_DATA = {
title: "The most important of all questions.", title: "The most important of all questions.",
prompt: "How much do you like waffles?", prompts: [{"description": "How much do you like waffles?"}, {description : 'How much do you like waffles 2?'}],
feedbackPrompt: "", feedbackPrompt: "",
submissionStart: "2014-01-02T12:15", submissionStart: "2014-01-02T12:15",
submissionDue: "2014-10-01T04:53", submissionDue: "2014-10-01T04:53",
...@@ -145,7 +145,7 @@ describe("OpenAssessment.StudioView", function() { ...@@ -145,7 +145,7 @@ describe("OpenAssessment.StudioView", function() {
// Top-level attributes // Top-level attributes
expect(server.receivedData.title).toEqual(EXPECTED_SERVER_DATA.title); expect(server.receivedData.title).toEqual(EXPECTED_SERVER_DATA.title);
expect(server.receivedData.prompt).toEqual(EXPECTED_SERVER_DATA.prompt); expect(server.receivedData.prompts).toEqual(EXPECTED_SERVER_DATA.prompts);
expect(server.receivedData.feedbackPrompt).toEqual(EXPECTED_SERVER_DATA.feedbackPrompt); expect(server.receivedData.feedbackPrompt).toEqual(EXPECTED_SERVER_DATA.feedbackPrompt);
expect(server.receivedData.submissionStart).toEqual(EXPECTED_SERVER_DATA.submissionStart); expect(server.receivedData.submissionStart).toEqual(EXPECTED_SERVER_DATA.submissionStart);
expect(server.receivedData.submissionDue).toEqual(EXPECTED_SERVER_DATA.submissionDue); expect(server.receivedData.submissionDue).toEqual(EXPECTED_SERVER_DATA.submissionDue);
......
...@@ -180,7 +180,7 @@ describe("OpenAssessment edit assessment views", function() { ...@@ -180,7 +180,7 @@ describe("OpenAssessment edit assessment views", function() {
expect(view.description()).toEqual({ expect(view.description()).toEqual({
examples: [ examples: [
{ {
answer: 'Test answer', answer: ['Test answer 1', 'Test answer 2'],
options_selected: [ options_selected: [
{ {
criterion: 'criterion_with_two_options', criterion: 'criterion_with_two_options',
...@@ -197,7 +197,7 @@ describe("OpenAssessment edit assessment views", function() { ...@@ -197,7 +197,7 @@ describe("OpenAssessment edit assessment views", function() {
expect(view.description()).toEqual({ expect(view.description()).toEqual({
examples: [ examples: [
{ {
answer: 'Test answer', answer: ['Test answer 1', 'Test answer 2'],
options_selected: [ options_selected: [
{ {
criterion: 'criterion_with_two_options', criterion: 'criterion_with_two_options',
...@@ -206,7 +206,7 @@ describe("OpenAssessment edit assessment views", function() { ...@@ -206,7 +206,7 @@ describe("OpenAssessment edit assessment views", function() {
] ]
}, },
{ {
answer: '', answer: ['', ''],
options_selected: [ options_selected: [
{ {
criterion: 'criterion_with_two_options', criterion: 'criterion_with_two_options',
......
...@@ -2,7 +2,20 @@ ...@@ -2,7 +2,20 @@
Tests for OpenAssessment prompt editing view. Tests for OpenAssessment prompt editing view.
**/ **/
describe("OpenAssessment.EditPromptView", function() { describe("OpenAssessment.EditPromptViews", function() {
// Use a stub notifier implementation that simply stores
// the notifications it receives.
var notifier = null;
var StubNotifier = function() {
this.notifications = [];
this.notificationFired = function(name, data) {
this.notifications.push({
name: name,
data: data
});
};
};
var view = null; var view = null;
...@@ -11,14 +24,84 @@ describe("OpenAssessment.EditPromptView", function() { ...@@ -11,14 +24,84 @@ describe("OpenAssessment.EditPromptView", function() {
loadFixtures('oa_edit.html'); loadFixtures('oa_edit.html');
// Create the view // Create the view
var element = $("#oa_prompt_editor_wrapper").get(0); var element = $("#oa_prompts_editor_wrapper").get(0);
view = new OpenAssessment.EditPromptView(element); notifier = new StubNotifier();
view = new OpenAssessment.EditPromptsView(element, notifier);
});
it("reads prompts from the editor", function() {
// This assumes a particular structure of the DOM,
// which is set by the HTML fixture.
var prompts = view.promptsDefinition();
expect(prompts.length).toEqual(2);
expect(prompts[0]).toEqual({
"description": "How much do you like waffles?"
});
}); });
it("sets and loads prompt text", function() { it("creates new prompts", function() {
view.promptText(""); // Delete all existing prompts
expect(view.promptText()).toEqual(""); // Then add new prompts (created from a client-side template)
view.promptText("This is a test prompt!"); $.each(view.getAllPrompts(), function() { view.removePrompt(this); });
expect(view.promptText()).toEqual("This is a test prompt!"); view.addPrompt();
view.addPrompt();
view.addPrompt();
var prompts = view.promptsDefinition();
expect(prompts.length).toEqual(3);
expect(prompts[0]).toEqual({
description: ""
});
expect(prompts[1]).toEqual({
description: ""
});
});
});
describe("OpenAssessment.EditPromptViews after release", function() {
// Use a stub notifier implementation that simply stores
// the notifications it receives.
var notifier = null;
var StubNotifier = function() {
this.notifications = [];
this.notificationFired = function(name, data) {
this.notifications.push({
name: name,
data: data
});
};
};
var view = null;
beforeEach(function() {
// Load the DOM fixture
loadFixtures('oa_edit.html');
$("#openassessment-editor").attr('data-is-released', 'true');
// Create the view
var element = $("#oa_prompts_editor_wrapper").get(0);
notifier = new StubNotifier();
view = new OpenAssessment.EditPromptsView(element, notifier);
});
it("does not allow adding prompts", function() {
view.addPrompt(); // call method
$(view.promptsContainer.addButtonElement).click(); // click on button
var prompts = view.promptsDefinition();
expect(prompts.length).toEqual(2);
});
it("does not allow removing prompts", function() {
view.removePrompt(view.getAllPrompts()[0]); // call method
$("." + view.promptsContainer.removeButtonClass, view.element).click(); // click on buttons
var prompts = view.promptsDefinition();
expect(prompts.length).toEqual(2);
}); });
}); });
...@@ -14,7 +14,7 @@ OpenAssessment.ResponseView = function(element, server, fileUploader, baseView) ...@@ -14,7 +14,7 @@ OpenAssessment.ResponseView = function(element, server, fileUploader, baseView)
this.server = server; this.server = server;
this.fileUploader = fileUploader; this.fileUploader = fileUploader;
this.baseView = baseView; this.baseView = baseView;
this.savedResponse = ""; this.savedResponse = [];
this.files = null; this.files = null;
this.imageType = null; this.imageType = null;
this.lastChangeTime = Date.now(); this.lastChangeTime = Date.now();
...@@ -66,7 +66,7 @@ OpenAssessment.ResponseView.prototype = { ...@@ -66,7 +66,7 @@ OpenAssessment.ResponseView.prototype = {
// Install change handler for textarea (to enable submission button) // Install change handler for textarea (to enable submission button)
this.savedResponse = this.response(); this.savedResponse = this.response();
var handleChange = function(eventData) { view.handleResponseChanged(); }; var handleChange = function(eventData) { view.handleResponseChanged(); };
sel.find('#submission__answer__value').on('change keyup drop paste', handleChange); sel.find('.submission__answer__part__text__value').on('change keyup drop paste', handleChange);
var handlePrepareUpload = function(eventData) { view.prepareUpload(eventData.target.files); }; var handlePrepareUpload = function(eventData) { view.prepareUpload(eventData.target.files); };
sel.find('input[type=file]').on('change', handlePrepareUpload); sel.find('input[type=file]').on('change', handlePrepareUpload);
...@@ -258,33 +258,39 @@ OpenAssessment.ResponseView.prototype = { ...@@ -258,33 +258,39 @@ OpenAssessment.ResponseView.prototype = {
}, },
/** /**
Set the response text. Set the response texts.
Retrieve the response text. Retrieve the response texts.
Args: Args:
text (string): If specified, the text to set for the response. texts (array of strings): If specified, the texts to set for the response.
Returns: Returns:
string: The current response text. array of strings: The current response texts.
**/ **/
response: function(text) { response: function(texts) {
var sel = $('#submission__answer__value', this.element); var sel = $('.submission__answer__part__text__value', this.element);
if (typeof text === 'undefined') { if (typeof texts === 'undefined') {
return sel.val(); return sel.map(function() {
return $.trim($(this).val());
}).get();
} else { } else {
sel.val(text); sel.map(function(index, element) {
$(this).val(texts[index]);
})
} }
}, },
/** /**
Check whether the response text has changed since the last save. Check whether the response texts have changed since the last save.
Returns: boolean Returns: boolean
**/ **/
responseChanged: function() { responseChanged: function() {
var currentResponse = $.trim(this.response()); var savedResponse = this.savedResponse;
var savedResponse = $.trim(this.savedResponse); return this.response().some(function(element, index, array) {
return savedResponse !== currentResponse; return element !== savedResponse[index];
});
}, },
/** /**
...@@ -314,14 +320,16 @@ OpenAssessment.ResponseView.prototype = { ...@@ -314,14 +320,16 @@ OpenAssessment.ResponseView.prototype = {
**/ **/
handleResponseChanged: function() { handleResponseChanged: function() {
// Enable the save/submit button only for non-blank responses // Enable the save/submit button only for non-blank responses
var isBlank = ($.trim(this.response()) !== ''); var isNotBlank = !this.response().every(function(element, index, array) {
this.submitEnabled(isBlank); return $.trim(element) == '';
});
this.submitEnabled(isNotBlank);
// Update the save button, save status, and "unsaved changes" warning // Update the save button, save status, and "unsaved changes" warning
// only if the response has changed // only if the response has changed
if (this.responseChanged()) { if (this.responseChanged()) {
this.saveEnabled(isBlank); this.saveEnabled(isNotBlank);
this.previewEnabled(isBlank); this.previewEnabled(isNotBlank);
this.saveStatus(gettext('This response has not been saved.')); this.saveStatus(gettext('This response has not been saved.'));
this.unsavedWarningEnabled(true); this.unsavedWarningEnabled(true);
} }
...@@ -355,8 +363,15 @@ OpenAssessment.ResponseView.prototype = { ...@@ -355,8 +363,15 @@ OpenAssessment.ResponseView.prototype = {
// ... but update the UI based on what the user may have entered // ... but update the UI based on what the user may have entered
// since hitting the save button. // since hitting the save button.
var currentResponse = view.response(); var currentResponse = view.response();
view.submitEnabled(currentResponse !== ''); var currentResponseIsEmpty = currentResponse.every(function(element, index, array) {
if (currentResponse == savedResponse) { return element == '';
});
view.submitEnabled(!currentResponseIsEmpty);
var currentResponseEqualsSaved = currentResponse.every(function(element, index, array) {
return element === savedResponse[index];
});
if (currentResponseEqualsSaved) {
view.saveEnabled(false); view.saveEnabled(false);
view.saveStatus(gettext("This response has been saved but not submitted.")); view.saveStatus(gettext("This response has been saved but not submitted."));
} }
...@@ -388,7 +403,7 @@ OpenAssessment.ResponseView.prototype = { ...@@ -388,7 +403,7 @@ OpenAssessment.ResponseView.prototype = {
// NOTE: in JQuery >=1.8, `pipe()` is deprecated in favor of `then()`, // NOTE: in JQuery >=1.8, `pipe()` is deprecated in favor of `then()`,
// but we're using JQuery 1.7 in the LMS, so for now we're stuck with `pipe()`. // but we're using JQuery 1.7 in the LMS, so for now we're stuck with `pipe()`.
.pipe(function() { .pipe(function() {
var submission = $('#submission__answer__value', view.element).val(); var submission = view.response();
baseView.toggleActionError('response', null); baseView.toggleActionError('response', null);
// Send the submission to the server, returning the promise. // Send the submission to the server, returning the promise.
......
...@@ -445,7 +445,7 @@ if (typeof OpenAssessment.Server == "undefined" || !OpenAssessment.Server) { ...@@ -445,7 +445,7 @@ if (typeof OpenAssessment.Server == "undefined" || !OpenAssessment.Server) {
updateEditorContext: function(kwargs) { updateEditorContext: function(kwargs) {
var url = this.url('update_editor_context'); var url = this.url('update_editor_context');
var payload = JSON.stringify({ var payload = JSON.stringify({
prompt: kwargs.prompt, prompts: kwargs.prompts,
feedback_prompt: kwargs.feedbackPrompt, feedback_prompt: kwargs.feedbackPrompt,
feedback_default_text: kwargs.feedback_default_text, feedback_default_text: kwargs.feedback_default_text,
title: kwargs.title, title: kwargs.title,
......
...@@ -65,6 +65,7 @@ OpenAssessment.Container = function(containerItem, kwargs) { ...@@ -65,6 +65,7 @@ OpenAssessment.Container = function(containerItem, kwargs) {
this.removeButtonClass = kwargs.removeButtonClass; this.removeButtonClass = kwargs.removeButtonClass;
this.containerItemClass = kwargs.containerItemClass; this.containerItemClass = kwargs.containerItemClass;
this.notifier = kwargs.notifier; this.notifier = kwargs.notifier;
this.addRemoveEnabled = (typeof kwargs.addRemoveEnabled == 'undefined') || kwargs.addRemoveEnabled;
// Since every container item should be instantiated with // Since every container item should be instantiated with
// the notifier we were given, create a helper method // the notifier we were given, create a helper method
...@@ -83,17 +84,23 @@ OpenAssessment.Container.prototype = { ...@@ -83,17 +84,23 @@ OpenAssessment.Container.prototype = {
*/ */
addEventListeners: function() { addEventListeners: function() {
var container = this; var container = this;
// Install a click handler for the add button
$(this.addButtonElement).click($.proxy(this.add, this)); if (this.addRemoveEnabled) {
// Install a click handler for the add button
// Find items already in the container and install click $(this.addButtonElement).click($.proxy(this.add, this));
// handlers for the delete buttons.
$("." + this.removeButtonClass, this.containerElement).click( // Find items already in the container and install click
function(eventData) { // handlers for the delete buttons.
var item = container.createContainerItem(eventData.target); $("." + this.removeButtonClass, this.containerElement).click(
container.remove(item); function (eventData) {
} var item = container.createContainerItem(eventData.target);
); container.remove(item);
}
);
} else {
$(this.addButtonElement).addClass('is--disabled');
$("." + this.removeButtonClass, this.containerElement).addClass('is--disabled');
}
// Initialize existing items, in case they need to install their // Initialize existing items, in case they need to install their
// own event handlers. // own event handlers.
...@@ -122,16 +129,21 @@ OpenAssessment.Container.prototype = { ...@@ -122,16 +129,21 @@ OpenAssessment.Container.prototype = {
.toggleClass(this.containerItemClass, true) .toggleClass(this.containerItemClass, true)
.appendTo($(this.containerElement)); .appendTo($(this.containerElement));
// Install a click handler for the delete button
// Since we just added the new element to the container, // Since we just added the new element to the container,
// it should be the last one. // it should be the last one.
var container = this; var container = this;
var containerItem = $("." + this.containerItemClass, this.containerElement).last(); var containerItem = $("." + this.containerItemClass, this.containerElement).last();
containerItem.find('.' + this.removeButtonClass)
.click(function(eventData) { // Install a click handler for the delete button
var containerItem = container.createContainerItem(eventData.target); if (this.addRemoveEnabled) {
container.remove(containerItem); containerItem.find('.' + this.removeButtonClass)
} ); .click(function (eventData) {
var containerItem = container.createContainerItem(eventData.target);
container.remove(containerItem);
});
} else {
containerItem.find('.' + this.removeButtonClass).addClass('is--disabled');
}
// Initialize the item, allowing it to install event handlers. // Initialize the item, allowing it to install event handlers.
// Fire event handler for adding a new element // Fire event handler for adding a new element
......
...@@ -65,6 +65,113 @@ OpenAssessment.ItemUtilities = { ...@@ -65,6 +65,113 @@ OpenAssessment.ItemUtilities = {
}; };
/** /**
The Prompt Class is used to construct and maintain references to prompts from within a prompts
container object. Constructs a new Prompt element.
Args:
element (OpenAssessment.Container): The container that the prompt is a member of.
notifier (OpenAssessment.Notifier): Used to send notifications of updates to prompts.
Returns:
OpenAssessment.Prompt
**/
OpenAssessment.Prompt = function(element, notifier) {
this.element = element;
this.notifier = notifier;
};
OpenAssessment.Prompt.prototype = {
/**
Finds the values currently entered in the Prompts's fields, and returns them.
Returns:
object literal of the form:
{
'description': 'Write a nice long essay about anything.'
}
**/
getFieldValues: function () {
var fields = {
description: this.description()
};
return fields;
},
/**
Get or set the description of the prompt.
Args:
text (string, optional): If provided, set the description of the prompt.
Returns:
string
**/
description: function(text) {
var sel = $('.openassessment_prompt_description', this.element);
return OpenAssessment.Fields.stringField(sel, text);
},
addEventListeners: function() {},
/**
Hook into the event handler for addition of a prompt.
*/
addHandler: function (){
this.notifier.notificationFired(
"promptAdd",
{
"index": this.element.index()
}
);
},
/**
Hook into the event handler for removal of a prompt.
*/
removeHandler: function (){
this.notifier.notificationFired(
"promptRemove",
{
"index": this.element.index()
}
);
},
updateHandler: function() {},
/**
Mark validation errors.
Returns:
Boolean indicating whether the option is valid.
**/
validate: function() {
return true;
},
/**
Return a list of validation errors visible in the UI.
Mainly useful for testing.
Returns:
list of strings
**/
validationErrors: function() {
return [];
},
/**
Clear all validation errors from the UI.
**/
clearValidationErrors: function() {}
};
/**
The RubricOption Class used to construct and maintain references to rubric options from within an options The RubricOption Class used to construct and maintain references to rubric options from within an options
container object. Constructs a new RubricOption element. container object. Constructs a new RubricOption element.
...@@ -506,7 +613,7 @@ OpenAssessment.RubricCriterion.prototype = { ...@@ -506,7 +613,7 @@ OpenAssessment.RubricCriterion.prototype = {
OpenAssessment.TrainingExample = function(element){ OpenAssessment.TrainingExample = function(element){
this.element = element; this.element = element;
this.criteria = $(".openassessment_training_example_criterion_option", this.element); this.criteria = $(".openassessment_training_example_criterion_option", this.element);
this.answer = $('.openassessment_training_example_essay', this.element).first(); this.answer = $('.openassessment_training_example_essay_part textarea', this.element)
}; };
OpenAssessment.TrainingExample.prototype = { OpenAssessment.TrainingExample.prototype = {
...@@ -527,7 +634,9 @@ OpenAssessment.TrainingExample.prototype = { ...@@ -527,7 +634,9 @@ OpenAssessment.TrainingExample.prototype = {
).get(); ).get();
return { return {
answer: this.answer.prop('value'), answer: this.answer.map(function() {
return $(this).prop('value');
}).get(),
options_selected: optionsSelected options_selected: optionsSelected
}; };
}, },
......
...@@ -25,9 +25,14 @@ OpenAssessment.StudioView = function(runtime, element, server) { ...@@ -25,9 +25,14 @@ OpenAssessment.StudioView = function(runtime, element, server) {
// Initialize the validation alert // Initialize the validation alert
this.alert = new OpenAssessment.ValidationAlert().install(); this.alert = new OpenAssessment.ValidationAlert().install();
var studentTrainingListener = new OpenAssessment.StudentTrainingListener();
// Initialize the prompt tab view // Initialize the prompt tab view
this.promptView = new OpenAssessment.EditPromptView( this.promptsView = new OpenAssessment.EditPromptsView(
$("#oa_prompt_editor_wrapper", this.element).get(0) $("#oa_prompts_editor_wrapper", this.element).get(0),
new OpenAssessment.Notifier([
studentTrainingListener
])
); );
// Initialize the settings tab view // Initialize the settings tab view
...@@ -57,7 +62,7 @@ OpenAssessment.StudioView = function(runtime, element, server) { ...@@ -57,7 +62,7 @@ OpenAssessment.StudioView = function(runtime, element, server) {
this.rubricView = new OpenAssessment.EditRubricView( this.rubricView = new OpenAssessment.EditRubricView(
$("#oa_rubric_editor_wrapper", this.element).get(0), $("#oa_rubric_editor_wrapper", this.element).get(0),
new OpenAssessment.Notifier([ new OpenAssessment.Notifier([
new OpenAssessment.StudentTrainingListener() studentTrainingListener
]) ])
); );
...@@ -185,7 +190,7 @@ OpenAssessment.StudioView.prototype = { ...@@ -185,7 +190,7 @@ OpenAssessment.StudioView.prototype = {
var view = this; var view = this;
this.server.updateEditorContext({ this.server.updateEditorContext({
prompt: view.promptView.promptText(), prompts: view.promptsView.promptsDefinition(),
feedbackPrompt: view.rubricView.feedbackPrompt(), feedbackPrompt: view.rubricView.feedbackPrompt(),
feedback_default_text: view.rubricView.feedback_default_text(), feedback_default_text: view.rubricView.feedback_default_text(),
criteria: view.rubricView.criteriaDefinition(), criteria: view.rubricView.criteriaDefinition(),
...@@ -236,7 +241,8 @@ OpenAssessment.StudioView.prototype = { ...@@ -236,7 +241,8 @@ OpenAssessment.StudioView.prototype = {
validate: function() { validate: function() {
var settingsValid = this.settingsView.validate(); var settingsValid = this.settingsView.validate();
var rubricValid = this.rubricView.validate(); var rubricValid = this.rubricView.validate();
return settingsValid && rubricValid; var promptsValid = this.promptsView.validate();
return settingsValid && rubricValid && promptsValid;
}, },
/** /**
...@@ -249,7 +255,9 @@ OpenAssessment.StudioView.prototype = { ...@@ -249,7 +255,9 @@ OpenAssessment.StudioView.prototype = {
**/ **/
validationErrors: function() { validationErrors: function() {
return this.settingsView.validationErrors().concat( return this.settingsView.validationErrors().concat(
this.rubricView.validationErrors() this.rubricView.validationErrors().concat(
this.promptsView.validationErrors()
)
); );
}, },
...@@ -259,6 +267,7 @@ OpenAssessment.StudioView.prototype = { ...@@ -259,6 +267,7 @@ OpenAssessment.StudioView.prototype = {
clearValidationErrors: function() { clearValidationErrors: function() {
this.settingsView.clearValidationErrors(); this.settingsView.clearValidationErrors();
this.rubricView.clearValidationErrors(); this.rubricView.clearValidationErrors();
this.promptsView.clearValidationErrors();
}, },
}; };
......
...@@ -421,7 +421,7 @@ OpenAssessment.EditStudentTrainingView.prototype = { ...@@ -421,7 +421,7 @@ OpenAssessment.EditStudentTrainingView.prototype = {
{ {
examples: [ examples: [
{ {
answer: "I love pokemon", answer: ("I love pokemon 1", "I love pokemon 2"),
options_selected: [ options_selected: [
{ {
criterion: "brevity", criterion: "brevity",
......
/** /**
Dynamically update student training examples based on Dynamically update student training examples based on
changes to the rubric. changes to the prompts or the rubric.
**/ **/
OpenAssessment.StudentTrainingListener = function() { OpenAssessment.StudentTrainingListener = function() {
this.element = $('#oa_student_training_editor'); this.element = $('#oa_student_training_editor');
...@@ -8,6 +8,28 @@ OpenAssessment.StudentTrainingListener = function() { ...@@ -8,6 +8,28 @@ OpenAssessment.StudentTrainingListener = function() {
}; };
OpenAssessment.StudentTrainingListener.prototype = { OpenAssessment.StudentTrainingListener.prototype = {
/**
Add a answer part in the training examples when a prompt is added.
*/
promptAdd: function(data) {
var view = this.element;
var essay_part = $("#openassessment_training_example_part_template")
.children().first()
.clone()
.removeAttr('id')
.toggleClass('is--hidden', false)
.appendTo(".openassessment_training_example_essay", view);
},
/**
Remove the answer part in the training examples when a prompt is removed.
*/
promptRemove: function(data) {
var view = this.element;
$(".openassessment_training_example_essay li:nth-child(" + (data.index + 1) + ")", view).remove();
},
/** /**
Event handler for updating training examples when a criterion option has Event handler for updating training examples when a criterion option has
been updated. been updated.
......
/**
Editing interface for the rubric prompt.
Args:
element (DOM element): The DOM element representing this view.
Returns:
OpenAssessment.EditPromptView
**/
OpenAssessment.EditPromptView = function(element) {
this.element = element;
};
OpenAssessment.EditPromptView.prototype = {
/**
Get or set the text of the prompt.
Args:
text (string, optional): If provided, set the text of the prompt.
Returns:
string
**/
promptText: function(text) {
var sel = $('#openassessment_prompt_editor', this.element);
return OpenAssessment.Fields.stringField(sel, text);
},
};
\ No newline at end of file
/**
Editing interface for the prompts.
Args:
element (DOM element): The DOM element representing this view.
Returns:
OpenAssessment.EditPromptsView
**/
OpenAssessment.EditPromptsView = function(element, notifier) {
this.element = element;
this.editorElement = $(this.element).closest("#openassessment-editor");
this.addRemoveEnabled = !(this.editorElement.attr('data-is-released') === 'true');
this.promptsContainer = new OpenAssessment.Container(
OpenAssessment.Prompt, {
containerElement: $("#openassessment_prompts_list", this.element).get(0),
templateElement: $("#openassessment_prompt_template", this.element).get(0),
addButtonElement: $("#openassessment_prompts_add_prompt", this.element).get(0),
removeButtonClass: "openassessment_prompt_remove_button",
containerItemClass: "openassessment_prompt",
notifier: notifier,
addRemoveEnabled: this.addRemoveEnabled
}
);
this.promptsContainer.addEventListeners();
};
OpenAssessment.EditPromptsView.prototype = {
/**
Construct a list of prompts definitions from the editor UI.
Returns:
list of prompt objects
Example usage:
>>> editPromptsView.promptsDefinition();
[
{
uuid: "cfvgbh657",
description: "Description",
order_num: 0,
},
...
]
**/
promptsDefinition: function() {
var prompts = this.promptsContainer.getItemValues();
return prompts;
},
/**
Add a new prompt.
Uses a client-side template to create the new prompt.
**/
addPrompt: function() {
if (this.addRemoveEnabled) {
this.promptsContainer.add();
}
},
/**
Remove a prompt.
Args:
item (OpenAssessment.RubricCriterion): The criterion item to remove.
**/
removePrompt: function(item) {
if (this.addRemoveEnabled) {
this.promptsContainer.remove(item);
}
},
/**
Retrieve all prompts.
Returns:
Array of OpenAssessment.Prompt objects.
**/
getAllPrompts: function() {
return this.promptsContainer.getAllItems();
},
/**
Retrieve a prompt item from the prompts.
Args:
index (int): The index of the prompt, starting from 0.
Returns:
OpenAssessment.Prompt or null
**/
getPromptItem: function(index) {
return this.promptsContainer.getItem(index);
},
/**
Mark validation errors.
Returns:
Boolean indicating whether the view is valid.
**/
validate: function() {
return true;
},
/**
Return a list of validation errors visible in the UI.
Mainly useful for testing.
Returns:
list of string
**/
validationErrors: function() {
var errors = [];
return errors;
},
/**
Clear all validation errors from the UI.
**/
clearValidationErrors: function() {}
};
\ No newline at end of file
...@@ -331,26 +331,6 @@ ...@@ -331,26 +331,6 @@
-moz-transition: height 1s ease-in-out 0; -moz-transition: height 1s ease-in-out 0;
} }
#openassessment_prompt_editor {
width: 100%;
height: 100%;
resize: none;
border: none;
@include border-radius(4px);
padding: 10px;
font-family: $f-sans-serif;
textarea{
font-size: 14px;
border: none;
overflow: auto;
outline: none;
-webkit-box-shadow: none;
-moz-box-shadow: none;
box-shadow: none;
}
}
#openassessment_rubric_editor { #openassessment_rubric_editor {
width: 100%; width: 100%;
height: 100%; height: 100%;
...@@ -493,66 +473,146 @@ ...@@ -493,66 +473,146 @@
bottom: 0; bottom: 0;
} }
#oa_rubric_editor_wrapper{ .openassessment_tab_instructions {
.wrapper-comp-settings{
display: block;
}
#openassessment_rubric_instructions{
background-color: $edx-gray-t1; background-color: $edx-gray-t1;
border-bottom: 1px solid $edx-gray-d1; border-bottom: 1px solid $edx-gray-d1;
padding: 10px; padding: 10px;
}
.openassessment_container_header {
margin: ($baseline-v/2) ($baseline-h/4);
padding-bottom: $baseline-v/4;
border-bottom: 1px solid;
overflow: auto;
color: $edx-gray-d2;
}
.action.expand-collapse {
@include float(left);
.ui-toggle-expansion {
color: $edx-gray-d2;
cursor: pointer;
}
.ui-toggle-expansion:hover {
color: $edx-gray-d1;
cursor: pointer;
} }
}
.openassessment_criterion { .openassessment_container_header_title_box {
@include float(left);
width: 80%;
display: inline-block;
}
.openassessment_criterion_remove_button{ .openassessment_container_header_title {
@extend .openassessment_rubric_remove_button; text-transform: uppercase;
} cursor: default;
padding-top: 2px;
}
.openassessment_criterion_header { .openassessment_container_guide {
margin: 10px; @extend %t-small;
padding-bottom: 5px; }
border-bottom: 1px solid;
overflow: auto;
color: $edx-gray-d2;
.action.expand-collapse { .openassessment_container_remove_button{
@include float(left); cursor: pointer;
.ui-toggle-expansion { h2:after{
color: $edx-gray-d2; font-family: FontAwesome;
cursor: pointer; content: "\f00d";
} display: inline-block;
.ui-toggle-expansion:hover { color: inherit;
color: $edx-gray-d1; margin: 0 5px;
cursor: pointer; }
} h2{
} text-transform: uppercase;
font-size: 80%;
@include float(right);
display: inline-block;
padding: 3px 8px 3px 13px;
}
@include float(right);
}
.openassessment_container_remove_button:hover{
background-color: $edx-gray-d2;
border-radius: 4px;
color: white;
}
.openassessment_criterion_header_title_box { .openassessment_container_add_button {
@include float(left); h6:before{
width: 80%; font-family: "FontAwesome";
display: inline-block; display: inline-block;
} @include margin-left(5px);
@include margin-right(10px);
width: auto;
height: auto;
content: "\f067";
}
.openassessment_criterion_header_title { background-color: white;
@include float(left); border: 1px solid;
text-transform: uppercase; border-radius: 4px;
width: 50%; text-align: center;
display: inline-block; color: #009fe6;
cursor: default; padding: 10px;
padding-top: 2px; margin: 15px 10px;
} }
.openassessment_criterion_guide { .openassessment_container_add_button.openassessment_highlighted_field {
@extend %t-small; color: red;
} border-width: 2px;
}
.openassessment_criterion_header_remove { .openassessment_container_add_button:hover {
@extend .openassessment_rubric_remove_button; color: white;
} background-color: #009fe6;
} cursor: pointer;
}
.openassessment_container_remove_button.is--disabled,
.openassessment_container_remove_button.is--disabled:hover,
.openassessment_container_add_button.is--disabled,
.openassessment_container_add_button.is--disabled:hover {
color: $edx-gray-l2;
background-color: transparent;
cursor: auto;
}
#oa_prompts_editor_wrapper {
.wrapper-comp-settings {
display: block;
}
.openassessment_prompt_description {
width: 100%;
min-height: 100px;
resize: none;
border: 1px solid #b2b2b2;
border-radius: 4px;
padding: 10px;
font-family: $f-sans-serif;
font-size: 14px;
overflow: auto;
outline: none;
-webkit-box-shadow: none;
-moz-box-shadow: none;
box-shadow: none;
}
.openassessment_prompt_description.openassessment_highlighted_field {
border: 2px solid red;
}
}
#oa_rubric_editor_wrapper{
.wrapper-comp-settings{
display: block;
}
.openassessment_criterion {
.openassessment_criterion_add_option { .openassessment_criterion_add_option {
h2:before { h2:before {
...@@ -653,7 +713,7 @@ ...@@ -653,7 +713,7 @@
@include padding(5px, 5px, 5px, 15px); @include padding(5px, 5px, 5px, 15px);
.openassessment_criterion_option_remove_button{ .openassessment_criterion_option_remove_button{
@extend .openassessment_rubric_remove_button; @extend .openassessment_container_remove_button;
} }
.openassessment_option_header{ .openassessment_option_header{
...@@ -670,7 +730,7 @@ ...@@ -670,7 +730,7 @@
} }
.openassessment_option_header_remove{ .openassessment_option_header_remove{
@extend .openassessment_rubric_remove_button; @extend .openassessment_container_remove_button;
} }
} }
...@@ -767,30 +827,6 @@ ...@@ -767,30 +827,6 @@
outline: 0; outline: 0;
} }
.openassessment_rubric_remove_button{
cursor: pointer;
h2:after{
font-family: FontAwesome;
content: "\f00d";
display: inline-block;
color: inherit;
margin: 0 5px;
}
h2{
text-transform: uppercase;
font-size: 80%;
@include float(right);
display: inline-block;
@include padding(3px, 8px, 3px, 13px);
}
@include float(right);
}
.openassessment_rubric_remove_button:hover{
background-color: $edx-gray-d2;
border-radius: 4px;
color: white;
}
#openassessment_rubric_feedback_wrapper{ #openassessment_rubric_feedback_wrapper{
padding: 0; padding: 0;
...@@ -813,39 +849,6 @@ ...@@ -813,39 +849,6 @@
@include float(right); @include float(right);
} }
} }
#openassessment_rubric_add_criterion{
h6:before{
font-family: "FontAwesome";
display: inline-block;
@include margin-left(5px);
@include margin-right(10px);
width: auto;
height: auto;
content: "\f067";
}
background-color: white;
border: 1px solid;
border-radius: 4px;
@include text-align(center);
color: #009fe6;
padding: 10px;
margin: 15px 10px;
}
#openassessment_rubric_add_criterion.openassessment_highlighted_field{
color: red;
border-width: 2px;
}
#openassessment_rubric_add_criterion:hover{
color: white;
background-color: #009fe6;
cursor: pointer;
}
} }
...@@ -940,7 +943,7 @@ ...@@ -940,7 +943,7 @@
.openassessment_training_example_body { .openassessment_training_example_body {
@include padding(0, 15px, 15px, 15px); @include padding(0, 15px, 15px, 15px);
position: relative; position: relative;
overflow: hidden; overflow: scroll;
.openassessment_training_example_essay_wrapper { .openassessment_training_example_essay_wrapper {
width: 58%; width: 58%;
display: inline-block; display: inline-block;
......
...@@ -459,26 +459,26 @@ ...@@ -459,26 +459,26 @@
// -------------------- // --------------------
// problem // submission
// -------------------- // --------------------
.wrapper--openassessment__prompt { .wrapper--openassessment__prompt {
} }
.openassessment__prompt { .submission__answer__part__prompt {
@extend %ui-well; @extend %ui-well;
position: relative; position: relative;
margin-bottom: $baseline-v; margin: ($baseline-v/2) 0;
border: 1px solid $color-decorative-tertiary; border: 1px solid $color-decorative-tertiary;
@include border-left(($baseline-h/4) solid $color-decorative-secondary); @include border-left(($baseline-h/4) solid $color-decorative-secondary);
@include padding-left(($baseline-h*0.75)); @include padding-left(($baseline-h*0.75));
} }
.openassessment__prompt__title { .submission__answer__part__prompt__title {
@extend %text-sr; @extend %text-sr;
} }
.openassessment__prompt__copy { .submission__answer__part__prompt__copy {
@extend %copy-2; @extend %copy-2;
color: $copy-color; color: $copy-color;
...@@ -495,6 +495,11 @@ ...@@ -495,6 +495,11 @@
} }
} }
div.submission__answer__part__text__value {
border: 1px solid $color-decorative-secondary;
background-color: $white-t;
padding: $baseline-v;
}
// -------------------- // --------------------
// response // response
...@@ -816,9 +821,20 @@ ...@@ -816,9 +821,20 @@
.submission__answer__display__content { .submission__answer__display__content {
@extend %copy-3; @extend %copy-3;
@extend %ui-content-longanswer;
@extend %ui-subsection-content; @extend %ui-subsection-content;
@extend %ui-well;
padding: ($baseline-v/2) ($baseline-h/4) $baseline-v ($baseline-h/4);
background: $color-decorative-quaternary;
border-radius: ($baseline-v/10);
list-style: none;
.submission__answer__part {
margin: ($baseline-v*1.5) 0px 0px;
&:first-child {
margin-top: 0px;
}
}
} }
......
...@@ -7,7 +7,7 @@ from xblock.core import XBlock ...@@ -7,7 +7,7 @@ from xblock.core import XBlock
from openassessment.assessment.api import student_training from openassessment.assessment.api import student_training
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict, create_submission_dict
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
...@@ -124,7 +124,7 @@ class StudentTrainingMixin(object): ...@@ -124,7 +124,7 @@ class StudentTrainingMixin(object):
examples examples
) )
if example: if example:
context['training_essay'] = example['answer'] context['training_essay'] = create_submission_dict({'answer': example['answer']}, self.prompts)
context['training_rubric'] = { context['training_rubric'] = {
'criteria': example['rubric']['criteria'], 'criteria': example['rubric']['criteria'],
'points_possible': example['rubric']['points_possible'] 'points_possible': example['rubric']['points_possible']
......
""" """
Studio editing view for OpenAssessment XBlock. Studio editing view for OpenAssessment XBlock.
""" """
import pkg_resources
import copy import copy
import logging import logging
import pkg_resources
from uuid import uuid4
from django.template import Context from django.template import Context
from django.template.loader import get_template from django.template.loader import get_template
from voluptuous import MultipleInvalid from voluptuous import MultipleInvalid
...@@ -12,7 +14,7 @@ from xblock.fields import List, Scope ...@@ -12,7 +14,7 @@ from xblock.fields import List, Scope
from xblock.fragment import Fragment from xblock.fragment import Fragment
from openassessment.xblock.defaults import DEFAULT_EDITOR_ASSESSMENTS_ORDER, DEFAULT_RUBRIC_FEEDBACK_TEXT from openassessment.xblock.defaults import DEFAULT_EDITOR_ASSESSMENTS_ORDER, DEFAULT_RUBRIC_FEEDBACK_TEXT
from openassessment.xblock.validation import validator from openassessment.xblock.validation import validator
from openassessment.xblock.data_conversion import create_rubric_dict, make_django_template_key from openassessment.xblock.data_conversion import create_rubric_dict, make_django_template_key, update_assessments_format
from openassessment.xblock.schema import EDITOR_UPDATE_SCHEMA from openassessment.xblock.schema import EDITOR_UPDATE_SCHEMA
from openassessment.xblock.resolve_dates import resolve_dates from openassessment.xblock.resolve_dates import resolve_dates
from openassessment.xblock.xml import serialize_examples_to_xml_str, parse_examples_from_xml_str from openassessment.xblock.xml import serialize_examples_to_xml_str, parse_examples_from_xml_str
...@@ -112,7 +114,7 @@ class StudioMixin(object): ...@@ -112,7 +114,7 @@ class StudioMixin(object):
feedback_default_text = DEFAULT_RUBRIC_FEEDBACK_TEXT feedback_default_text = DEFAULT_RUBRIC_FEEDBACK_TEXT
return { return {
'prompt': self.prompt, 'prompts': self.prompts,
'title': self.title, 'title': self.title,
'submission_due': submission_due, 'submission_due': submission_due,
'submission_start': submission_start, 'submission_start': submission_start,
...@@ -127,6 +129,7 @@ class StudioMixin(object): ...@@ -127,6 +129,7 @@ class StudioMixin(object):
make_django_template_key(asmnt) make_django_template_key(asmnt)
for asmnt in editor_assessments_order for asmnt in editor_assessments_order
], ],
'is_released': self.is_released(),
} }
@XBlock.json_handler @XBlock.json_handler
...@@ -189,10 +192,14 @@ class StudioMixin(object): ...@@ -189,10 +192,14 @@ class StudioMixin(object):
)} )}
# This is where we default to EASE for problems which are edited in the GUI # This is where we default to EASE for problems which are edited in the GUI
assessment['algorithm_id'] = 'ease' assessment['algorithm_id'] = 'ease'
if assessment['name'] == 'student-training':
for example in assessment['examples']:
example['answer'] = {'parts': [{'text': text} for text in example['answer']]}
xblock_validator = validator(self, self._) xblock_validator = validator(self, self._)
success, msg = xblock_validator( success, msg = xblock_validator(
create_rubric_dict(data['prompt'], data['criteria']), create_rubric_dict(data['prompts'], data['criteria']),
data['assessments'], data['assessments'],
submission_start=data['submission_start'], submission_start=data['submission_start'],
submission_due=data['submission_due'], submission_due=data['submission_due'],
...@@ -205,7 +212,7 @@ class StudioMixin(object): ...@@ -205,7 +212,7 @@ class StudioMixin(object):
# so we can safely modify the XBlock fields. # so we can safely modify the XBlock fields.
self.title = data['title'] self.title = data['title']
self.display_name = data['title'] self.display_name = data['title']
self.prompt = data['prompt'] self.prompts = data['prompts']
self.rubric_criteria = data['criteria'] self.rubric_criteria = data['criteria']
self.rubric_assessments = data['assessments'] self.rubric_assessments = data['assessments']
self.editor_assessments_order = data['editor_assessments_order'] self.editor_assessments_order = data['editor_assessments_order']
...@@ -267,13 +274,20 @@ class StudioMixin(object): ...@@ -267,13 +274,20 @@ class StudioMixin(object):
# could be accomplished within the template, we are opting to remove logic from the template. # could be accomplished within the template, we are opting to remove logic from the template.
student_training_module = self.get_assessment_module('student-training') student_training_module = self.get_assessment_module('student-training')
student_training_template = {'answer': ""} student_training_template = {
'answer': {
'parts': [
{'text': ''} for prompt in self.prompts
]
}
}
criteria_list = copy.deepcopy(self.rubric_criteria_with_labels) criteria_list = copy.deepcopy(self.rubric_criteria_with_labels)
for criterion in criteria_list: for criterion in criteria_list:
criterion['option_selected'] = "" criterion['option_selected'] = ""
student_training_template['criteria'] = criteria_list student_training_template['criteria'] = criteria_list
if student_training_module: if student_training_module:
student_training_module = update_assessments_format([student_training_module])[0]
example_list = [] example_list = []
# Adds each example to a modified version of the student training module dictionary. # Adds each example to a modified version of the student training module dictionary.
for example in student_training_module['examples']: for example in student_training_module['examples']:
......
import json
import logging import logging
from xblock.core import XBlock from xblock.core import XBlock
...@@ -9,6 +10,8 @@ from openassessment.workflow import api as workflow_api ...@@ -9,6 +10,8 @@ from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError from openassessment.workflow.errors import AssessmentWorkflowError
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
from data_conversion import create_submission_dict, prepare_submission_for_serialization
from validation import validate_submission
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -54,7 +57,15 @@ class SubmissionMixin(object): ...@@ -54,7 +57,15 @@ class SubmissionMixin(object):
) )
status = False status = False
student_sub = data['submission'] student_sub_data = data['submission']
success, msg = validate_submission(student_sub_data, self.prompts, self._)
if not success:
return (
False,
'EBADARGS',
msg
)
student_item_dict = self.get_student_item_dict() student_item_dict = self.get_student_item_dict()
# Short-circuit if no user is defined (as in Studio Preview mode) # Short-circuit if no user is defined (as in Studio Preview mode)
...@@ -74,7 +85,7 @@ class SubmissionMixin(object): ...@@ -74,7 +85,7 @@ class SubmissionMixin(object):
try: try:
submission = self.create_submission( submission = self.create_submission(
student_item_dict, student_item_dict,
student_sub student_sub_data
) )
except api.SubmissionRequestError as err: except api.SubmissionRequestError as err:
...@@ -132,8 +143,14 @@ class SubmissionMixin(object): ...@@ -132,8 +143,14 @@ class SubmissionMixin(object):
dict: Contains a bool 'success' and unicode string 'msg'. dict: Contains a bool 'success' and unicode string 'msg'.
""" """
if 'submission' in data: if 'submission' in data:
student_sub_data = data['submission']
success, msg = validate_submission(student_sub_data, self.prompts, self._)
if not success:
return {'success': False, 'msg': msg}
try: try:
self.saved_response = unicode(data['submission']) self.saved_response = json.dumps(
prepare_submission_for_serialization(student_sub_data)
)
self.has_saved = True self.has_saved = True
# Emit analytics event... # Emit analytics event...
...@@ -149,11 +166,11 @@ class SubmissionMixin(object): ...@@ -149,11 +166,11 @@ class SubmissionMixin(object):
else: else:
return {'success': False, 'msg': self._(u"This response was not submitted.")} return {'success': False, 'msg': self._(u"This response was not submitted.")}
def create_submission(self, student_item_dict, student_sub): def create_submission(self, student_item_dict, student_sub_data):
# Store the student's response text in a JSON-encodable dict # Store the student's response text in a JSON-encodable dict
# so that later we can add additional response fields. # so that later we can add additional response fields.
student_sub_dict = {'text': student_sub} student_sub_dict = prepare_submission_for_serialization(student_sub_data)
if self.allow_file_upload: if self.allow_file_upload:
student_sub_dict['file_key'] = self._get_student_item_key() student_sub_dict['file_key'] = self._get_student_item_key()
...@@ -352,7 +369,21 @@ class SubmissionMixin(object): ...@@ -352,7 +369,21 @@ class SubmissionMixin(object):
context['submission_start'] = start_date context['submission_start'] = start_date
path = 'openassessmentblock/response/oa_response_unavailable.html' path = 'openassessmentblock/response/oa_response_unavailable.html'
elif not workflow: elif not workflow:
context['saved_response'] = self.saved_response # For backwards compatibility. Initially, problems had only one prompt
# and a string answer. We convert it to the appropriate dict.
try:
json.loads(self.saved_response)
saved_response = {
'answer': json.loads(self.saved_response),
}
except ValueError:
saved_response = {
'answer': {
'text': self.saved_response,
},
}
context['saved_response'] = create_submission_dict(saved_response, self.prompts)
context['save_status'] = self.save_status context['save_status'] = self.save_status
context['submit_enabled'] = self.saved_response != '' context['submit_enabled'] = self.saved_response != ''
path = "openassessmentblock/response/oa_response.html" path = "openassessmentblock/response/oa_response.html"
...@@ -372,12 +403,13 @@ class SubmissionMixin(object): ...@@ -372,12 +403,13 @@ class SubmissionMixin(object):
student_submission = self.get_user_submission( student_submission = self.get_user_submission(
workflow["submission_uuid"] workflow["submission_uuid"]
) )
context["student_submission"] = student_submission context["student_submission"] = create_submission_dict(student_submission, self.prompts)
path = 'openassessmentblock/response/oa_response_graded.html' path = 'openassessmentblock/response/oa_response_graded.html'
else: else:
context["student_submission"] = self.get_user_submission( student_submission = self.get_user_submission(
workflow["submission_uuid"] workflow["submission_uuid"]
) )
context["student_submission"] = create_submission_dict(student_submission, self.prompts)
path = 'openassessmentblock/response/oa_response_submitted.html' path = 'openassessmentblock/response/oa_response_submitted.html'
return path, context return path, context
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty? Please answer in a short essay of 200-300 words.</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>Concise</name> <name>Concise</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt>Example-based assessment</prompt> <prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>Ideas</name> <name>Ideas</name>
<prompt>How good are the ideas?</prompt> <prompt>How good are the ideas?</prompt>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt>Example-based assessment</prompt> <prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>Ideas</name> <name>Ideas</name>
<prompt>How good are the ideas?</prompt> <prompt>How good are the ideas?</prompt>
......
<openassessment> <openassessment>
<title>Feedback only criterion</title> <title>Feedback only criterion</title>
<prompt>Test prompt</prompt> <prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Test rubric prompt</prompt>
<criterion> <criterion>
<name>vocabulary</name> <name>vocabulary</name>
<prompt>How good is the vocabulary?</prompt> <prompt>How good is the vocabulary?</prompt>
......
<openassessment> <openassessment>
<title>Feedback only criterion</title> <title>Feedback only criterion</title>
<prompt>Test prompt</prompt> <prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Test rubric prompt</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment> <openassessment>
<title>Feedback only criterion</title> <title>Feedback only criterion</title>
<prompt>Test prompt</prompt> <prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Test rubric prompt</prompt>
<criterion> <criterion>
<name>vocabulary</name> <name>vocabulary</name>
<prompt>How good is the vocabulary?</prompt> <prompt>How good is the vocabulary?</prompt>
......
<openassessment> <openassessment>
<title>Feedback only criterion</title> <title>Feedback only criterion</title>
<prompt>Test prompt</prompt> <prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Test rubric prompt</prompt>
<criterion> <criterion>
<name>vocabulary</name> <name>vocabulary</name>
<prompt>How good is the vocabulary?</prompt> <prompt>How good is the vocabulary?</prompt>
......
<openassessment> <openassessment>
<title>Feedback only criterion</title> <title>Feedback only criterion</title>
<prompt>Test prompt</prompt> <prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Test rubric prompt</prompt>
<criterion> <criterion>
<name>vocabulary</name> <name>vocabulary</name>
<prompt>How good is the vocabulary?</prompt> <prompt>How good is the vocabulary?</prompt>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion feedback="optional"> <criterion feedback="optional">
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt> <prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
{ {
"missing_feedback": { "missing_feedback": {
"rubric": { "criteria": [
"prompt": "Test Prompt", {
"criteria": [ "order_num": 0,
{ "name": "Test criterion",
"order_num": 0, "label": "Test criterion label",
"name": "Test criterion", "prompt": "Test criterion prompt",
"prompt": "Test criterion prompt", "options": [
"options": [ {
{ "order_num": 0,
"order_num": 0, "points": 0,
"points": 0, "name": "No",
"name": "No", "label": "No label",
"explanation": "No explanation" "explanation": "No explanation"
} }
] ]
} }
] ],
}, "prompts": [{"description": "My new prompt."}],
"prompt": "My new prompt.",
"submission_due": "4014-02-27T09:46:28", "submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28", "submission_start": "4014-02-10T09:46:28",
"title": "My new title.", "title": "My new title.",
...@@ -36,9 +35,59 @@ ...@@ -36,9 +35,59 @@
"start": "", "start": "",
"due": "" "due": ""
} }
]
},
"prompts_is_string": {
"title": "Foo",
"prompts": "My new prompt.",
"feedback_prompt": "Test Feedback Prompt",
"feedback_default_text": "Test default text...",
"submission_start": null,
"submission_due": null,
"allow_file_upload": true,
"leaderboard_show": true,
"allow_latex": false,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"label": "Test criterion label",
"prompt": "Test criterion prompt",
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 0,
"label": "No label",
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"label": "Yes label",
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start": "2014-04-01T00:00:00",
"due": "2014-06-01T00:00:00"
}
], ],
"expected-assessment": "peer-assessment", "editor_assessments_order": ["student-training", "peer-assessment", "self-assessment"]
"expected-criterion-prompt": "Test criterion prompt"
} }
} }
{ {
"zero_criteria": { "zero_criteria": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [] "criteria": []
} }
}, },
"negative_points": { "negative_points": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
"duplicate_criteria_names": { "duplicate_criteria_names": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
"duplicate_option_names": { "duplicate_option_names": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -88,9 +88,62 @@ ...@@ -88,9 +88,62 @@
} }
}, },
"change_prompts_number_after_release": {
"rubric": {
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}, {"description": "Test Prompt 3."}],
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
]
},
"current_rubric": {
"prompts": [{"description": "Test Prompt 3."}, {"description": "Test Prompt 4."}],
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
]
},
"is_released": true
},
"change_points_after_release": { "change_points_after_release": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -114,7 +167,7 @@ ...@@ -114,7 +167,7 @@
] ]
}, },
"current_rubric": { "current_rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -142,7 +195,7 @@ ...@@ -142,7 +195,7 @@
"add_criteria_after_release": { "add_criteria_after_release": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -166,7 +219,7 @@ ...@@ -166,7 +219,7 @@
] ]
}, },
"current_rubric": { "current_rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -213,7 +266,7 @@ ...@@ -213,7 +266,7 @@
"remove_criteria_after_release": { "remove_criteria_after_release": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -256,7 +309,7 @@ ...@@ -256,7 +309,7 @@
] ]
}, },
"current_rubric": { "current_rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -284,7 +337,7 @@ ...@@ -284,7 +337,7 @@
"add_options_after_release": { "add_options_after_release": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -308,7 +361,7 @@ ...@@ -308,7 +361,7 @@
] ]
}, },
"current_rubric": { "current_rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -330,7 +383,7 @@ ...@@ -330,7 +383,7 @@
"remove_options_after_release": { "remove_options_after_release": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -348,7 +401,7 @@ ...@@ -348,7 +401,7 @@
] ]
}, },
"current_rubric": { "current_rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -376,7 +429,7 @@ ...@@ -376,7 +429,7 @@
"rename_criterion_name_after_release": { "rename_criterion_name_after_release": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -400,7 +453,7 @@ ...@@ -400,7 +453,7 @@
] ]
}, },
"current_rubric": { "current_rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -428,7 +481,7 @@ ...@@ -428,7 +481,7 @@
"rename_multiple_criteria_after_release": { "rename_multiple_criteria_after_release": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -465,7 +518,7 @@ ...@@ -465,7 +518,7 @@
] ]
}, },
"current_rubric": { "current_rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -507,7 +560,7 @@ ...@@ -507,7 +560,7 @@
"example_based_duplicate_option_points": { "example_based_duplicate_option_points": {
"is_example_based": true, "is_example_based": true,
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -534,7 +587,7 @@ ...@@ -534,7 +587,7 @@
"zero_options_feedback_optional": { "zero_options_feedback_optional": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -549,7 +602,7 @@ ...@@ -549,7 +602,7 @@
"zero_options_feedback_disabled": { "zero_options_feedback_disabled": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -564,7 +617,7 @@ ...@@ -564,7 +617,7 @@
"zero_options_no_feedback": { "zero_options_no_feedback": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
......
<openassessment leaderboard_show="3"> <openassessment leaderboard_show="3">
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt> <prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
......
<openassessment leaderboard_show="3" allow_file_upload="True"> <openassessment leaderboard_show="3" allow_file_upload="True">
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment leaderboard_show="10"> <openassessment leaderboard_show="10">
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment submission_due="2014-03-05"> <openassessment submission_due="2014-03-05">
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
...@@ -90,7 +90,11 @@ ...@@ -90,7 +90,11 @@
"due": "2014-06-01T00:00:00", "due": "2014-06-01T00:00:00",
"examples": [ "examples": [
{ {
"answer": "ẗëṡẗ äṅṡẅëṛ", "answer": {
"parts": [
{"text": "ẗëṡẗ äṅṡẅëṛ"}
]
},
"options_selected": [ "options_selected": [
{ {
"criterion": "Test criterion", "criterion": "Test criterion",
...@@ -127,7 +131,11 @@ ...@@ -127,7 +131,11 @@
"due": "2014-06-01T00:00:00", "due": "2014-06-01T00:00:00",
"examples": [ "examples": [
{ {
"answer": "ẗëṡẗ äṅṡẅëṛ", "answer": {
"parts": [
{"text": "ẗëṡẗ äṅṡẅëṛ"}
]
},
"options_selected": [ "options_selected": [
{ {
"criterion": "Test criterion", "criterion": "Test criterion",
...@@ -140,7 +148,11 @@ ...@@ -140,7 +148,11 @@
] ]
}, },
{ {
"answer": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ", "answer": {
"parts": [
{"text": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ"}
]
},
"options_selected": [ "options_selected": [
{ {
"criterion": "Another test criterion", "criterion": "Another test criterion",
......
...@@ -10,7 +10,11 @@ ...@@ -10,7 +10,11 @@
], ],
"examples": [ "examples": [
{ {
"answer": "ẗëṡẗ äṅṡẅëṛ", "answer": {
"parts": [
{"text": "ẗëṡẗ äṅṡẅëṛ"}
]
},
"options_selected": [ "options_selected": [
{ {
"criterion": "Test criterion", "criterion": "Test criterion",
...@@ -25,12 +29,18 @@ ...@@ -25,12 +29,18 @@
"xml": [ "xml": [
"<examples>", "<examples>",
"<example>", "<example>",
"<answer>ẗëṡẗ äṅṡẅëṛ</answer>", "<answer>",
"<part>ẗëṡẗ äṅṡẅëṛ 1</part>",
"<part>ẗëṡẗ äṅṡẅëṛ 2</part>",
"</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />", "<select criterion=\"Test criterion\" option=\"Yes\" />",
"<select criterion=\"Another test criterion\" option=\"No\" />", "<select criterion=\"Another test criterion\" option=\"No\" />",
"</example>", "</example>",
"<example>", "<example>",
"<answer>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ</answer>", "<answer>",
"<part>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ 1</part>",
"<part>äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ 2</part>",
"</answer>",
"<select criterion=\"Another test criterion\" option=\"Yes\" />", "<select criterion=\"Another test criterion\" option=\"Yes\" />",
"<select criterion=\"Test criterion\" option=\"No\" />", "<select criterion=\"Test criterion\" option=\"No\" />",
"</example>", "</example>",
...@@ -38,7 +48,12 @@ ...@@ -38,7 +48,12 @@
], ],
"examples": [ "examples": [
{ {
"answer": "ẗëṡẗ äṅṡẅëṛ", "answer": {
"parts": [
{"text": "ẗëṡẗ äṅṡẅëṛ 1"},
{"text": "ẗëṡẗ äṅṡẅëṛ 2"}
]
},
"options_selected": [ "options_selected": [
{ {
"criterion": "Test criterion", "criterion": "Test criterion",
...@@ -51,7 +66,12 @@ ...@@ -51,7 +66,12 @@
] ]
}, },
{ {
"answer": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ", "answer": {
"parts": [
{"text": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ 1"},
{"text": "äṅöẗḧëṛ ẗëṡẗ äṅṡẅëṛ 2"}
]
},
"options_selected": [ "options_selected": [
{ {
"criterion": "Another test criterion", "criterion": "Another test criterion",
......
{
"promptless": {
"xml": [
"<openassessment>",
"<rubric>",
"</rubric>",
"</openassessment>"
],
"prompts": [{"description": ""}]
},
"rubric_prompt": {
"xml": [
"<openassessment>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"</rubric>",
"</openassessment>"
],
"prompts": [{"description": "Test prompt"}]
},
"rubric_prompt_empty": {
"xml": [
"<openassessment>",
"<rubric>",
"<prompt></prompt>",
"</rubric>",
"</openassessment>"
],
"prompts": [{"description": ""}]
},
"rubric_prompt_unicode": {
"xml": [
"<openassessment>",
"<prompts>",
"<prompt><description>ՇєรՇ קг๏๓קՇ</description></prompt>",
"</prompts>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"</rubric>",
"</openassessment>"
],
"prompts": [{"description": "ՇєรՇ קг๏๓קՇ"}]
},
"prompts_one": {
"xml": [
"<openassessment>",
"<prompts>",
"<prompt><description>Test prompt</description></prompt>",
"</prompts>",
"<rubric>",
"</rubric>",
"</openassessment>"
],
"prompts": [{"description": "Test prompt"}]
},
"prompts_multiple": {
"xml": [
"<openassessment>",
"<prompts>",
"<prompt><description>Test prompt 1</description></prompt>",
"<prompt><description>Test prompt 2</description></prompt>",
"<prompt><description>Test prompt 3</description></prompt>",
"</prompts>",
"<rubric>",
"</rubric>",
"</openassessment>"
],
"prompts": [{"description": "Test prompt 1"}, {"description": "Test prompt 2"}, {"description": "Test prompt 3"}]
},
"prompts_empty": {
"xml": [
"<openassessment>",
"<prompts>",
"<prompt><description></description></prompt>",
"</prompts>",
"<rubric>",
"</rubric>",
"</openassessment>"
],
"prompts": [{"description": ""}]
},
"prompts_multiple_empty": {
"xml": [
"<openassessment>",
"<prompts>",
"<prompt><description></description></prompt>",
"<prompt><description></description></prompt>",
"</prompts>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"</rubric>",
"</openassessment>"
],
"prompts": [{"description": ""}, {"description": ""}]
},
"prompts_unicode": {
"xml": [
"<openassessment>",
"<prompts>",
"<prompt><description>ՇєรՇ קг๏๓קՇ 1</description></prompt>",
"<prompt><description>ՇєรՇ קг๏๓קՇ 2</description></prompt>",
"</prompts>",
"<rubric>",
"</rubric>",
"</openassessment>"
],
"prompts": [{"description": "ՇєรՇ קг๏๓קՇ 1"}, {"description": "ՇєรՇ קг๏๓קՇ 2"}]
},
"rubric_prompt_and_prompts": {
"xml": [
"<openassessment>",
"<prompts>",
"<prompt><description>Test prompt 1</description></prompt>",
"<prompt><description>Test prompt 2</description></prompt>",
"</prompts>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"</rubric>",
"</openassessment>"
],
"prompts": [{"description": "Test prompt 1"}, {"description": "Test prompt 2"}]
}
}
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
"simple": { "simple": {
"xml": [ "xml": [
"<rubric>", "<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>", "<criterion>",
"<name>Test criterion</name>", "<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>", "<prompt>Test criterion prompt</prompt>",
...@@ -11,7 +10,6 @@ ...@@ -11,7 +10,6 @@
"</criterion>", "</criterion>",
"</rubric>" "</rubric>"
], ],
"prompt": "Test prompt",
"feedbackprompt": null, "feedbackprompt": null,
"criteria": [ "criteria": [
{ {
...@@ -43,7 +41,6 @@ ...@@ -43,7 +41,6 @@
"feedback_prompt": { "feedback_prompt": {
"xml": [ "xml": [
"<rubric>", "<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>", "<criterion>",
"<name>Test criterion</name>", "<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>", "<prompt>Test criterion prompt</prompt>",
...@@ -53,7 +50,6 @@ ...@@ -53,7 +50,6 @@
"<feedbackprompt>This is the feedback prompt</feedbackprompt>", "<feedbackprompt>This is the feedback prompt</feedbackprompt>",
"</rubric>" "</rubric>"
], ],
"prompt": "Test prompt",
"feedbackprompt": "This is the feedback prompt", "feedbackprompt": "This is the feedback prompt",
"criteria": [ "criteria": [
{ {
...@@ -93,7 +89,6 @@ ...@@ -93,7 +89,6 @@
"</criterion>", "</criterion>",
"</rubric>" "</rubric>"
], ],
"prompt": null,
"feedbackprompt": null, "feedbackprompt": null,
"criteria": [ "criteria": [
{ {
...@@ -125,7 +120,6 @@ ...@@ -125,7 +120,6 @@
"empty_prompt": { "empty_prompt": {
"xml": [ "xml": [
"<rubric>", "<rubric>",
"<prompt></prompt>",
"<criterion>", "<criterion>",
"<name>Test criterion</name>", "<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>", "<prompt>Test criterion prompt</prompt>",
...@@ -134,7 +128,6 @@ ...@@ -134,7 +128,6 @@
"</criterion>", "</criterion>",
"</rubric>" "</rubric>"
], ],
"prompt": "",
"feedbackprompt": null, "feedbackprompt": null,
"criteria": [ "criteria": [
{ {
...@@ -166,7 +159,6 @@ ...@@ -166,7 +159,6 @@
"unicode": { "unicode": {
"xml": [ "xml": [
"<rubric>", "<rubric>",
"<prompt>ՇєรՇ קг๏๓קՇ</prompt>",
"<criterion>", "<criterion>",
"<name>𝓣𝓮𝓼𝓽 𝓬𝓻𝓲𝓽𝓮𝓻𝓲𝓸𝓷</name>", "<name>𝓣𝓮𝓼𝓽 𝓬𝓻𝓲𝓽𝓮𝓻𝓲𝓸𝓷</name>",
"<prompt>Ŧɇsŧ ȼɍɨŧɇɍɨøn ꝑɍømꝑŧ</prompt>", "<prompt>Ŧɇsŧ ȼɍɨŧɇɍɨøn ꝑɍømꝑŧ</prompt>",
...@@ -175,7 +167,6 @@ ...@@ -175,7 +167,6 @@
"</criterion>", "</criterion>",
"</rubric>" "</rubric>"
], ],
"prompt": "ՇєรՇ קг๏๓קՇ",
"feedbackprompt": null, "feedbackprompt": null,
"criteria": [ "criteria": [
{ {
...@@ -207,7 +198,6 @@ ...@@ -207,7 +198,6 @@
"multiple_criteria": { "multiple_criteria": {
"xml": [ "xml": [
"<rubric>", "<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>", "<criterion>",
"<name>Test criterion</name>", "<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>", "<prompt>Test criterion prompt</prompt>",
...@@ -221,7 +211,6 @@ ...@@ -221,7 +211,6 @@
"</criterion>", "</criterion>",
"</rubric>" "</rubric>"
], ],
"prompt": "Test prompt",
"feedbackprompt": null, "feedbackprompt": null,
"criteria": [ "criteria": [
{ {
...@@ -269,7 +258,6 @@ ...@@ -269,7 +258,6 @@
"criterion_feedback_optional": { "criterion_feedback_optional": {
"xml": [ "xml": [
"<rubric>", "<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>", "<criterion>",
"<name>Test criterion</name>", "<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>", "<prompt>Test criterion prompt</prompt>",
...@@ -283,7 +271,6 @@ ...@@ -283,7 +271,6 @@
"</criterion>", "</criterion>",
"</rubric>" "</rubric>"
], ],
"prompt": "Test prompt",
"feedbackprompt": null, "feedbackprompt": null,
"criteria": [ "criteria": [
{ {
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>Concise</name> <name>Concise</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
{ {
"empty": [""], "empty": [[""], [""]],
"unicode": ["Ѕраѓтаиѕ! ГоиіБЂт, Щэ ↁіиэ іи Нэll!"], "unicode": [["Ѕраѓтаиѕ! ГоиіБЂт, Щэ ↁіиэ іи Нэll!"], ["Ѕраѓтаиѕ! ГоиіБЂт, Щэ ↁіиэ іи Нэll!"]],
"long": [ "long": [
"Lorem ipsum dolor sit amet,", [ "Lorem ipsum dolor sit amet,"],
[
"consectetur adipiscing elit. Etiam luctus dapibus ante, vel luctus nibh bibendum et.", "consectetur adipiscing elit. Etiam luctus dapibus ante, vel luctus nibh bibendum et.",
"Praesent in commodo quam. Morbi lobortis at felis ac mollis.", "Praesent in commodo quam. Morbi lobortis at felis ac mollis.",
"Maecenas placerat nisl sed imperdiet posuere.", "Maecenas placerat nisl sed imperdiet posuere.",
...@@ -50,5 +51,6 @@ ...@@ -50,5 +51,6 @@
"nascetur ridiculus mus. Mauris at dapibus mauris, sed pharetra tortor.", "nascetur ridiculus mus. Mauris at dapibus mauris, sed pharetra tortor.",
"Pellentesque purus sem, congue sed elementum non, pretium in mi. Cras cursus gravida commodo.", "Pellentesque purus sem, congue sed elementum non, pretium in mi. Cras cursus gravida commodo.",
"Aenean eu massa rhoncus, faucibus tortor id, sollicitudin tortor." "Aenean eu massa rhoncus, faucibus tortor id, sollicitudin tortor."
]
] ]
} }
\ No newline at end of file
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt> <prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name> <name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
<openassessment> <openassessment>
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompt> <prompts>
Given the state of the world today, what do you think should be done to <prompt>
combat poverty? Please answer in a short essay of 200-300 words. <description>Given the state of the world today, what do you think should be done to combat poverty?</description>
</prompt> </prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric> <rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion> <criterion>
<name>Concise</name> <name>Concise</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment