Commit 58f45d79 by gradyward

Merge branch 'authoring' of https://github.com/edx/edx-ora2 into grady/ai-authoring

Conflicts:
	openassessment/xblock/defaults.py
	openassessment/xblock/static/css/openassessment.css
	openassessment/xblock/static/js/openassessment-studio.min.js
	openassessment/xblock/studio_mixin.py
	openassessment/xblock/test/data/invalid_update_xblock.json
	openassessment/xblock/test/data/update_xblock.json
	openassessment/xblock/test/test_studio.py
parents 64e90768 eefe1b46
......@@ -100,7 +100,11 @@
</ul>
<p class="openassessment_description" id="openassessment_step_select_description">
{% trans "Select the steps that students must complete. All steps are optional, but every assignment must include at least one step. To change the order in which students will complete the steps, drag them into the desired order." %}
{% if 'example_based_assessment' in editor_assessments_order %}
{% trans "In this assignment, you can include example based assessment, student training, peer assessment, and self assessment steps. Select the steps that you want below, and then drag them into the order that you want. If you include an example based assessment step, it must be the first step. If you include a student training step, it must precede the peer assessment step." %}
{% else %}
{% trans "In this assignment, you can include student training, peer assessment, and self assessment steps. Select the steps that you want below, and then drag them into the order that you want. If you include a student training step, it must precede the peer assessment step." %}
{% endif %}
</p>
<ol id="openassessment_assessment_module_settings_editors">
{% for assessment in editor_assessments_order %}
......
......@@ -63,7 +63,7 @@ DEFAULT_RUBRIC_CRITERIA = [
# The rubric's feedback prompt is a set of instructions letting the student
# know they can provide additional free form feedback in their assessment.
DEFAULT_RUBRIC_FEEDBACK_PROMPT = """
(Optional) What aspects of this response stood out to you? What did it do well? How could it improve?
(Optional) What aspects of this response stood out to you? What did it do well? How could it improve?
"""
DEFAULT_EXAMPLE_ANSWER = """
......@@ -133,8 +133,7 @@ DEFAULT_ASSESSMENT_MODULES = [
]
DEFAULT_EDITOR_ASSESSMENTS_ORDER = [
"example-based-assessment",
"student-training",
"peer-assessment",
"self-assessment"
"self-assessment",
]
......@@ -91,7 +91,6 @@ EDITOR_UPDATE_SCHEMA = Schema({
})
],
'examples_xml': utf8_validator,
'algorithm_id': All(utf8_validator, In(['ease','fake']))
})
],
Required('editor_assessments_order'): [
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -14,7 +14,7 @@ describe("OpenAssessment edit assessment views", function() {
var testLoadXMLExamples = function(view) {
var xml = "XML DEFINITIONS WOULD BE HERE";
view.exampleDefinitions(xml);
expect(view.description()).toEqual({ examples: xml });
expect(view.description()).toEqual({ examples_xml: xml });
};
beforeEach(function() {
......
......@@ -372,8 +372,6 @@ OpenAssessment.EditExampleBasedAssessmentView = function(element) {
this.element = element;
this.name = "example-based-assessment";
var view = this;
new OpenAssessment.ToggleControl(
this.element,
"#ai_assessment_description_closed",
......@@ -393,14 +391,12 @@ OpenAssessment.EditExampleBasedAssessmentView.prototype = {
>>> editTrainingView.description();
{
examples_xml: "XML DEFINITION HERE",
algorithm: 'ease'
}
**/
description: function() {
return {
examples_xml: this.exampleDefinitions(),
algorithm_id: 'ease'
examples_xml: this.exampleDefinitions()
};
},
......
......@@ -187,12 +187,6 @@
height: Calc(100% - 97px);
}
#openassessment_editor_header{
background-color: #e5e5e5;
width: 100%;
top: 0;
}
#oa_editor_window_title{
float: left;
}
......@@ -203,17 +197,40 @@
background-image: linear-gradient(#F2F2F2,#FFF);
}
.oa_editor_tab{
#openassessment_editor_header {
background-color: #e5e5e5;
width: 100%;
top: 0;
height: 42px;
.editor_tabs {
width: 35%;
min-width: 190px;
max-width: 360px;
float: right;
.oa_editor_tab {
float: right;
padding: ($baseline-v/8) ($baseline-h/8);
margin: ($baseline-v/8) ($baseline-h/8);
border-radius: ($baseline-v/4);
padding: 0;
width: 29%;
margin: 0px 2%;
height: 35px;
border-radius: 5px;
box-shadow: none;
border: 0;
text-align: center;
a {
padding: 8px 0 0 0;
width: 100%;
}
}
}
}
.oa_editor_content_wrapper {
height: Calc(100% - 1px);
height: 100%;
width: 100%;
border-radius: 3px;
border: 1px solid $edx-gray-d1;
......@@ -419,7 +436,6 @@
}
.openassessment_criterion {
padding-bottom: 10px;
.openassessment_criterion_remove_button{
@extend .openassessment_rubric_remove_button;
......@@ -469,8 +485,8 @@
border: 1px solid;
color: #009fe6;
background-color: white;
padding: 5px;
margin: 10px;
padding: 7.5px;
margin: 10px 10px 10px 20px;
border-radius: 3px;
cursor: pointer;
}
......@@ -551,7 +567,7 @@
}
.openassessment_criterion_option{
padding: 7.5px;
padding: 5px 5px 5px 15px;
.openassessment_criterion_option_remove_button{
@extend .openassessment_rubric_remove_button;
......@@ -583,13 +599,14 @@
margin: 0;
.wrapper-comp-setting{
min-width: 200px;
min-width: 150px;
}
label{
width: auto;
padding-left: 15%;
padding-left: Calc((100% - 150px)/2);
margin: 0;
float: right;
input{
min-width: 50px;
......@@ -599,8 +616,6 @@
font-size: 11px;
}
}
}
.openassessment_criterion_option_name_wrapper{
......@@ -728,13 +743,13 @@
}
background-color: white;
border-top: 1px solid;
border-bottom: 1px solid;
border: 1px solid;
border-radius: 4px;
text-align: center;
color: #009fe6;
padding: 10px;
margin: 10px, 0;
margin: 15px 10px;
}
#openassessment_rubric_add_criterion:hover{
......@@ -760,6 +775,7 @@
.modal-lg.modal-window.confirm.openassessment_modal_window{
height: 80%;
top: 10%;
min-width: 600px;
}
.openassessment_full_height.edit-xblock-modal,
......
......@@ -18,6 +18,7 @@ from openassessment.xblock.data_conversion import create_rubric_dict, make_djang
from openassessment.xblock.schema import EDITOR_UPDATE_SCHEMA
from openassessment.xblock.resolve_dates import resolve_dates
from openassessment.xblock.xml import serialize_examples_to_xml_str, parse_examples_from_xml_str
from xml import UpdateFromXmlError
logger = logging.getLogger(__name__)
......@@ -119,14 +120,8 @@ class StudioMixin(object):
Update the XBlock's configuration.
Args:
data (dict): Data from the request; should have a value for the keys: 'rubric', 'prompt',
'title', 'submission_start', 'submission_due', and 'assessments'.
-- The 'rubric' should be an XML representation of the new rubric.
-- The 'prompt' and 'title' should be plain text.
-- The dates 'submission_start' and 'submission_due' are both ISO strings
-- The 'assessments' is a list of assessment dictionaries (much like self.rubric_assessments)
with the notable exception that all examples (for Student Training and eventually AI)
are in XML string format and need to be parsed into dictionaries.
data (dict): Data from the request; should have the format described
in the editor schema.
Kwargs:
suffix (str): Not used
......@@ -145,8 +140,7 @@ class StudioMixin(object):
return {'success': False, 'msg': _('Error updating XBlock configuration')}
# Check that the editor assessment order contains all the assessments. We are more flexible on example-based.
if (set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) - {'example-based-assessment'}) \
!= (set(data['editor_assessments_order']) - {'example-based-assessment'}):
if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != (set(data['editor_assessments_order']) - {'example-based-assessment'}):
logger.exception('editor_assessments_order does not contain all expected assessment types')
return {'success': False, 'msg': _('Error updating XBlock configuration')}
......@@ -167,7 +161,19 @@ class StudioMixin(object):
# definition we expect for validation and storing.
for assessment in data['assessments']:
if assessment['name'] == 'example-based-assessment':
try:
assessment['examples'] = parse_examples_from_xml_str(assessment['examples_xml'])
except UpdateFromXmlError:
return {'success': False, 'msg': _(
u'Validation error: There was an error in the XML definition of the '
u'examples provided by the user. Please correct the XML definition before saving.')
}
except KeyError:
return {'success': False, 'msg': _(
u'Validation error: No examples were provided for example based assessment.'
)}
# This is where we default to EASE for problems which are edited in the GUI
assessment['algorithm_id'] = 'ease'
xblock_validator = validator(self)
success, msg = xblock_validator(
......@@ -299,7 +305,7 @@ class StudioMixin(object):
if asmnt_name in order
]
if problem_order_indices != sorted(problem_order_indices):
unused_assessments = list(set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) - {'example-based-assessment'} - set(used_assessments))
unused_assessments = list(set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) - set(used_assessments))
return sorted(unused_assessments) + used_assessments
# Forwards compatibility:
......
......@@ -1041,6 +1041,10 @@
"due": null
}
],
"editor_assessments_order": ["student-training", "peer-assessment", "self-assessment", "NOT A VALID ASSESSMENT"]
"editor_assessments_order": [
"student-training", "peer-assessment",
"self-assessment", "example-based-assessment",
"NOT A VALID ASSESSMENT"
]
}
}
......@@ -87,6 +87,13 @@ class StudioViewTest(XBlockHandlerTestCase):
}
]
EXAMPLE_BASED_ASSESSMENT_EXAMPLES = '<examples>' + \
'<example>' + \
'<answer> TEST ANSWER </answer>' + \
'<select criterion="Test criterion" option="Test option" />' + \
'</example>' + \
'</examples>'
ASSESSMENT_CSS_IDS = {
"example-based-assessment": "oa_ai_assessment_editor",
"peer-assessment": "oa_peer_assessment_editor",
......@@ -104,6 +111,11 @@ class StudioViewTest(XBlockHandlerTestCase):
frag = self.runtime.render(xblock, 'studio_view')
self.assertTrue(frag.body_html().find('openassessment-edit'))
@scenario('data/example_based_only.xml')
def test_render_studio_with_ai(self, xblock):
frag = self.runtime.render(xblock, 'studio_view')
self.assertTrue(frag.body_html().find('openassessment-edit'))
@file_data('data/update_xblock.json')
@scenario('data/basic_scenario.xml')
def test_update_editor_context(self, xblock, data):
......@@ -126,6 +138,25 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertEqual(xblock.editor_assessments_order, data['editor_assessments_order'])
@scenario('data/basic_scenario.xml')
def test_update_editor_context_saves_assessment_order_with_ai(self, xblock):
# Update the XBlock with a different editor assessment order
data = copy.deepcopy(self.UPDATE_EDITOR_DATA)
data['assessments'] = [{
'name': 'example-based-assessment',
'examples_xml': self.EXAMPLE_BASED_ASSESSMENT_EXAMPLES
}]
data['editor_assessments_order'] = [
"example-based-assessment",
"student-training",
"peer-assessment",
"self-assessment",
]
xblock.published_date = None
resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json')
self.assertTrue(resp['success'], msg=resp.get('msg'))
self.assertEqual(xblock.editor_assessments_order, data['editor_assessments_order'])
@scenario('data/basic_scenario.xml')
def test_update_editor_context_assign_unique_names(self, xblock):
# Update the XBlock with a rubric that is missing
# some of the (unique) names for rubric criteria/options.
......
......@@ -66,6 +66,7 @@ def _is_valid_assessment_sequence(assessments):
['student-training', 'peer-assessment'],
['student-training', 'peer-assessment', 'self-assessment'],
['student-training', 'self-assessment', 'peer-assessment'],
['self-assessment', 'student-training', 'peer-assessment'],
['example-based-assessment'],
['example-based-assessment', 'self-assessment'],
['example-based-assessment', 'peer-assessment'],
......@@ -74,6 +75,7 @@ def _is_valid_assessment_sequence(assessments):
['example-based-assessment', 'student-training', 'peer-assessment'],
['example-based-assessment', 'student-training', 'peer-assessment', 'self-assessment'],
['example-based-assessment', 'student-training', 'self-assessment', 'peer-assessment'],
['example-based-assessment', 'self-assessment', 'student-training', 'peer-assessment'],
]
sequence = [asmnt.get('name') for asmnt in assessments]
......@@ -108,13 +110,8 @@ def validate_assessments(assessments, current_assessments, is_released):
# Ensure that we support this sequence of assessments.
if not _is_valid_assessment_sequence(assessments):
msg = _(
"For this assignment, you can set a peer assessment only, a self "
"assessment only, or a peer assessment followed by a self "
"assessment. Student training is allowed only immediately before "
"peer assessment."
)
return (False, msg)
msg = _("The assessment order you selected is invalid.")
return False, msg
for assessment_dict in assessments:
# Number you need to grade is >= the number of people that need to grade you
......
......@@ -806,9 +806,16 @@ def _unicode_to_xml(xml):
def parse_examples_from_xml_str(xml):
"""
Converts an XML string of examples (Student Training or AI) into a dictionary
representing the same information.
examples_root = _unicode_to_xml(xml)
Args:
xml (unicode): The XML definition of the examples
Returns
(list of dict): The example definition
"""
examples_root = _unicode_to_xml(xml)
examples = examples_root.findall('example')
return parse_examples_xml(examples)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment