Commit 29946f2e by Will Daly

Update error logging; remove failing tests

Remove unused debugger statements
Cleanup of JavaScript docstrings and semicolons
Sets to lists; remove errors for extraneous keys
Handle unicode correctly; fix test cases
Use .parent() instead of wrapping the HTML with <li>
Fix formatting of missing key error message
Remove extra key check
Fix feedback checkbox selector in JS
Fix rubric dict being stored in the rubric criteria list XBlock field error
parent b2bc882c
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -16,7 +16,7 @@ OpenAssessment.StudioView = function(runtime, element, server) { ...@@ -16,7 +16,7 @@ OpenAssessment.StudioView = function(runtime, element, server) {
this.server = server; this.server = server;
this.liveElement = $(element); this.liveElement = $(element);
var liveElement = this.liveElement; var liveElement = this.liveElement;
// Instantiates JQuery selector variables which will allow manipulation and display controls. // Instantiates JQuery selector variables which will allow manipulation and display controls.
...@@ -49,19 +49,15 @@ OpenAssessment.StudioView = function(runtime, element, server) { ...@@ -49,19 +49,15 @@ OpenAssessment.StudioView = function(runtime, element, server) {
// Captures the HTML definition of the original criterion element. This will be the template // Captures the HTML definition of the original criterion element. This will be the template
// used for all other criterion creations // used for all other criterion creations
var criterionBodyHtml = $("#openassessment_criterion_1", liveElement).html(); var criterionHtml = $("#openassessment_criterion_1", liveElement).parent().html();
// Adds the wrapping LI tag which is not captured by the find element.
var criterionHtml = '<li class="openassessment_criterion" id="openassessment_criterion_1">'
+ criterionBodyHtml + '</li>';
// Replaces all instances of the original ID (1) with the new fake ID in the string // Replaces all instances of the original ID (1) with the new fake ID in the string
// representation of the Criterion LI. This is our new template, with a C-C-C marker to replace. // representation of the Criterion LI. This is our new template, with a C-C-C marker to replace.
this.criterionHtmlTemplate = criterionHtml.replace(new RegExp("1", "g"), "C-C-C"); this.criterionHtmlTemplate = criterionHtml.replace(new RegExp("1", "g"), "C-C-C");
// Captures the HTML definition of the original option element. Note that there are TWO template // Captures the HTML definition of the original option element. Note that there are TWO template
// tags that need to be replaced "C-C-C" for the Criterion ID, and "O-O-O" for the option ID. // tags that need to be replaced "C-C-C" for the Criterion ID, and "O-O-O" for the option ID.
var optionBodyHtml = $("#openassessment_criterion_1_option_1", liveElement).html(); var optionHtml = $("#openassessment_criterion_1_option_1", liveElement).parent().html();
var optionHtml = '<li id=openassessment_criterion_1_option_1 class="openassessment_criterion_option">' +
optionBodyHtml + '</li>';
var criteriaReplaced = optionHtml.replace(new RegExp("criterion_1", "g"), "criterion_C-C-C"); var criteriaReplaced = optionHtml.replace(new RegExp("criterion_1", "g"), "criterion_C-C-C");
this.optionHtmlTemplate = criteriaReplaced.replace(new RegExp("option_1", "g"), "option_O-O-O"); this.optionHtmlTemplate = criteriaReplaced.replace(new RegExp("option_1", "g"), "option_O-O-O");
...@@ -164,7 +160,7 @@ OpenAssessment.StudioView.prototype = { ...@@ -164,7 +160,7 @@ OpenAssessment.StudioView.prototype = {
view.addNewCriterionToRubric(); view.addNewCriterionToRubric();
} }
while(view.numberOfCriteria > rubric.criteria.length){ while(view.numberOfCriteria > rubric.criteria.length){
view.removeCriterionFromRubric(1) view.removeCriterionFromRubric(1);
} }
// Corrects the number of options in each criterion // Corrects the number of options in each criterion
...@@ -233,10 +229,12 @@ OpenAssessment.StudioView.prototype = { ...@@ -233,10 +229,12 @@ OpenAssessment.StudioView.prototype = {
}, },
/** /**
* A simple helper method which constructs checkbox listeners for all of our assessment modules Construct checkbox listeners for all of our assessment modules
* @param name name of assessment module to install listener on
* @param liveElement the live DOM selector Args:
*/ name (string): name of assessment module to install listener on
liveElement (DOM element): the live DOM selector
*/
addSettingsAssessmentCheckboxListener: function (name, liveElement) { addSettingsAssessmentCheckboxListener: function (name, liveElement) {
$("#include_" + name , liveElement) .change(function () { $("#include_" + name , liveElement) .change(function () {
if (this.checked){ if (this.checked){
...@@ -266,13 +264,12 @@ OpenAssessment.StudioView.prototype = { ...@@ -266,13 +264,12 @@ OpenAssessment.StudioView.prototype = {
}, },
/** /**
Initializes a new criterion for the rubric. Has multiple elements. This block of code dictates Adds a new criterion to the rubric.
the methodology that we add and remove rubric criteria */
*/
addNewCriterionToRubric: function (){ addNewCriterionToRubric: function (){
var view = this; var view = this;
var liveElement = this.liveElement; var liveElement = this.liveElement;
// Always appends the new criterion to the end of the list, and we force linear ordering. // Always appends the new criterion to the end of the list, and we force linear ordering.
var newCriterionID = this.numberOfCriteria + 1; var newCriterionID = this.numberOfCriteria + 1;
this.numberOfCriteria += 1; this.numberOfCriteria += 1;
...@@ -346,10 +343,9 @@ OpenAssessment.StudioView.prototype = { ...@@ -346,10 +343,9 @@ OpenAssessment.StudioView.prototype = {
}, },
/** /**
* Removes a specified criterion from the problem's rubric definition. Changes are made in the DOM, Removes a specified criterion from the problem's rubric definition.
* in support/control structures. Changes are made in the DOM, in support/control structures.
* @param criterionToRemove */
*/
removeCriterionFromRubric: function(criterionToRemove){ removeCriterionFromRubric: function(criterionToRemove){
var view = this; var view = this;
var numCriteria = view.numberOfCriteria; var numCriteria = view.numberOfCriteria;
...@@ -396,10 +392,12 @@ OpenAssessment.StudioView.prototype = { ...@@ -396,10 +392,12 @@ OpenAssessment.StudioView.prototype = {
}, },
/** /**
* Initializes a new option for a given criterion. This code block dictates the methodology for Initializes a new option for a given criterion. This code block dictates the methodology for
* adding and removing options to a rubric. adding and removing options to a rubric.
* @param liveElement A selector representing the current state of the Criterion DOM
* @param criterionID The specific number of the criterion the option is being added to Args:
liveElement (DOM element): An element containing the criterion interface in the DOM.
criterionID (string): The specific number of the criterion the option is being added to
*/ */
addNewOptionToCriterion: function (liveElement, criterionID){ addNewOptionToCriterion: function (liveElement, criterionID){
var view = this; var view = this;
...@@ -433,17 +431,19 @@ OpenAssessment.StudioView.prototype = { ...@@ -433,17 +431,19 @@ OpenAssessment.StudioView.prototype = {
function(eventData){ function(eventData){
view.removeOptionFromCriterion(liveElement, criterionID, newOptionID); view.removeOptionFromCriterion(liveElement, criterionID, newOptionID);
} }
) );
}, },
/** /**
* Removes a specified element from the DOM and from all tracking data. Note that no action is Removes a specified element from the DOM and from all tracking data. Note that no action is
* taken against the specified element, rather, data is shifted down the chain (to construct the taken against the specified element, rather, data is shifted down the chain (to construct the
* illusion that the specified element was deleted), and then the last element is actually deleted. illusion that the specified element was deleted), and then the last element is actually deleted.
* @param liveElement A selector for the criterion that we are deleting from
* @param criterionID The criterion ID that we are deleting from Args:
* @param optionToRemove The option ID (discriminator really) that we are "deleting" liveElement (DOM element): An element containing the criterion interface in the DOM.
*/ criterionID (string): The criterion ID that we are deleting from
optionToRemove (string): The option ID that we are "deleting"
*/
removeOptionFromCriterion: function(liveElement, criterionID, optionToRemove){ removeOptionFromCriterion: function(liveElement, criterionID, optionToRemove){
var view = this; var view = this;
var numberOfOptions = view.numberOfOptions[criterionID]; var numberOfOptions = view.numberOfOptions[criterionID];
...@@ -488,7 +488,7 @@ OpenAssessment.StudioView.prototype = { ...@@ -488,7 +488,7 @@ OpenAssessment.StudioView.prototype = {
order_num: i - 1, order_num: i - 1,
name: selectorDict.name.prop('value'), name: selectorDict.name.prop('value'),
prompt: selectorDict.prompt.prop('value'), prompt: selectorDict.prompt.prop('value'),
feedback: $('input[name="criterion_'+ i +'_feedback"]:checked', selectorDict.criterion).val() feedback: $("#openassessment_criterion_" + i + "_feedback").val()
}; };
var optionSelectorList = selectorDict.options; var optionSelectorList = selectorDict.options;
...@@ -530,25 +530,25 @@ OpenAssessment.StudioView.prototype = { ...@@ -530,25 +530,25 @@ OpenAssessment.StudioView.prototype = {
var startStr = this.settingsFieldSelectors.peerStart.prop('value'); var startStr = this.settingsFieldSelectors.peerStart.prop('value');
var dueStr = this.settingsFieldSelectors.peerDue.prop('value'); var dueStr = this.settingsFieldSelectors.peerDue.prop('value');
if (startStr){ if (startStr){
assessment = $.extend(assessment, {"start": startStr}) assessment = $.extend(assessment, {"start": startStr});
} }
if (dueStr){ if (dueStr){
assessment = $.extend(assessment, {"due": dueStr}) assessment = $.extend(assessment, {"due": dueStr});
} }
assessments[assessments.length] = assessment; assessments[assessments.length] = assessment;
} }
if (this.settingsFieldSelectors.hasSelf.prop('checked')) { if (this.settingsFieldSelectors.hasSelf.prop('checked')) {
assessment = { var assessment = {
"name": "self-assessment" "name": "self-assessment"
}; };
startStr = this.settingsFieldSelectors.selfStart.prop('value'); var startStr = this.settingsFieldSelectors.selfStart.prop('value');
dueStr = this.settingsFieldSelectors.selfDue.prop('value'); var dueStr = this.settingsFieldSelectors.selfDue.prop('value');
if (startStr){ if (startStr){
assessment = $.extend(assessment, {"start": startStr}) assessment = $.extend(assessment, {"start": startStr});
} }
if (dueStr){ if (dueStr){
assessment = $.extend(assessment, {"due": dueStr}) assessment = $.extend(assessment, {"due": dueStr});
} }
assessments[assessments.length] = assessment; assessments[assessments.length] = assessment;
} }
......
...@@ -82,7 +82,7 @@ class StudioMixin(object): ...@@ -82,7 +82,7 @@ class StudioMixin(object):
return {'success': False, 'msg': _('Validation error: {error}').format(error=msg)} return {'success': False, 'msg': _('Validation error: {error}').format(error=msg)}
self.update( self.update(
rubric, rubric.get('criteria', []),
rubric.get('feedbackprompt', None), rubric.get('feedbackprompt', None),
assessments, assessments,
submission_due, submission_due,
...@@ -124,9 +124,9 @@ class StudioMixin(object): ...@@ -124,9 +124,9 @@ class StudioMixin(object):
student_training_dictionary["examples"] = examples student_training_dictionary["examples"] = examples
# We do not expect serialization to raise an exception, but if it does, handle it gracefully. # We do not expect serialization to raise an exception, but if it does, handle it gracefully.
except Exception as ex: except:
msg = _('An unexpected error occurred while loading the problem: {error}').format(error=ex) logger.exception("An error occurred while serializing the XBlock")
logger.error(msg) msg = _('An unexpected error occurred while loading the problem')
return {'success': False, 'msg': msg, 'xml': u''} return {'success': False, 'msg': msg, 'xml': u''}
# Populates the context for the assessments section of the editing # Populates the context for the assessments section of the editing
...@@ -134,12 +134,12 @@ class StudioMixin(object): ...@@ -134,12 +134,12 @@ class StudioMixin(object):
# section. # section.
submission_due = self.submission_due if self.submission_due else '' submission_due = self.submission_due if self.submission_due else ''
submission_start = self.submission_start if self.submission_start else '' submission_start = self.submission_start if self.submission_start else ''
rubric_dict = { 'criteria' : self.rubric_criteria } rubric_dict = {
'criteria' : self.rubric_criteria,
rubric_dict['feedbackprompt'] = unicode(self.rubric_feedback_prompt) 'feedbackprompt': unicode(self.rubric_feedback_prompt)
}
return { return {
'success': True, 'success': True,
...@@ -247,7 +247,7 @@ def parse_assessment_dictionaries(input_assessments): ...@@ -247,7 +247,7 @@ def parse_assessment_dictionaries(input_assessments):
def verify_rubric_format(rubric): def verify_rubric_format(rubric):
""" """
Verifies that the rubric that was passed in follows the conventions that we expect, including Verifies that the rubric that was passed in follows the conventions that we expect, including
types and structure. The code documents itself well here. types and structure.
Args: Args:
rubric (dict): Unsanitized version of our rubric. Usually taken from the GUI. rubric (dict): Unsanitized version of our rubric. Usually taken from the GUI.
...@@ -259,22 +259,14 @@ def verify_rubric_format(rubric): ...@@ -259,22 +259,14 @@ def verify_rubric_format(rubric):
UpdateFromXMLError UpdateFromXMLError
""" """
# import pudb, sys as __sys;__sys.stdout=__sys.__stdout__;pudb.set_trace() # -={XX}=-={XX}=-={XX}=
if not isinstance(rubric, dict): if not isinstance(rubric, dict):
# import pudb,sys as __sys;__sys.stdout=__sys.__stdout__;pudb.set_trace() # -={XX}=-={XX}=-={XX}=
raise UpdateFromXmlError(_("The given rubric was not a dictionary of the form {criteria: [criteria1, criteria2...]}")) raise UpdateFromXmlError(_("The given rubric was not a dictionary of the form {criteria: [criteria1, criteria2...]}"))
if "criteria" not in rubric.keys(): if "criteria" not in rubric.keys():
raise UpdateFromXmlError(_("The given rubric did not contain a key for a list of criteria, and is invalid")) raise UpdateFromXmlError(_("The given rubric did not contain a key for a list of criteria, and is invalid"))
if len((set(rubric.keys()) - {'prompt', 'criteria'})) > 0:
unexpected_keys = list(set(rubric.keys()) - {"prompt", "criteria"})
raise UpdateFromXmlError(_("The following keys were included in the rubric when they were not allowed to be: {}".format(unexpected_keys)))
if rubric.get('prompt', False): if rubric.get('prompt', False):
if not isinstance(rubric['prompt'], basestring): if not isinstance(rubric['prompt'], basestring):
# import pudb,sys as __sys;__sys.stdout=__sys.__stdout__;pudb.set_trace() # -={XX}=-={XX}=-={XX}=
raise UpdateFromXmlError(_("The given rubric's feedback prompt was invalid, it must be a string.")) raise UpdateFromXmlError(_("The given rubric's feedback prompt was invalid, it must be a string."))
criteria = rubric["criteria"] criteria = rubric["criteria"]
...@@ -291,18 +283,25 @@ def verify_rubric_format(rubric): ...@@ -291,18 +283,25 @@ def verify_rubric_format(rubric):
criterion = dict(criterion) criterion = dict(criterion)
expected_keys = {'order_num', 'name', 'prompt', 'options', 'feedback'} expected_keys = {'order_num', 'name', 'prompt', 'options', 'feedback'}
unexpected_keys = list(set(criterion.keys()) - expected_keys) missing_keys = expected_keys - set(criterion.keys())
missing_keys = list(expected_keys - set(criterion.keys()))
if missing_keys: if missing_keys:
raise UpdateFromXmlError(_("The following keys were missing from the Definition of one or more criteria: {}".format(missing_keys))) raise UpdateFromXmlError(_("The following keys were missing from the definition of one or more criteria: {}".format(", ".join(missing_keys))))
if unexpected_keys: try:
raise UpdateFromXmlError(_("The following extraneous keys were found in the definition for one or more criteria: {}".format(unexpected_keys))) name = unicode(criterion['name'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("The name value must be a string."))
try:
prompt = unicode(criterion['prompt'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("The prompt value must be a string."))
name = str(criterion['name']) try:
prompt = str(criterion['prompt']) feedback = unicode(criterion['feedback'])
feedback = str(criterion['feedback']) except (TypeError, ValueError):
raise UpdateFromXmlError(_("The prompt value must be a string."))
try: try:
order_num = int(criterion['order_num']) order_num = int(criterion['order_num'])
...@@ -321,18 +320,22 @@ def verify_rubric_format(rubric): ...@@ -321,18 +320,22 @@ def verify_rubric_format(rubric):
if not isinstance(option, dict): if not isinstance(option, dict):
raise UpdateFromXmlError(_("An option given was not a dictionary.")) raise UpdateFromXmlError(_("An option given was not a dictionary."))
expected_keys = {'order_num','name', 'points', 'explanation'} expected_keys = {'order_num', 'name', 'points', 'explanation'}
unexpected_keys = list(set(option.keys()) - expected_keys) unexpected_keys = list(set(option.keys()) - expected_keys)
missing_keys = list(expected_keys - set(option.keys())) missing_keys = list(expected_keys - set(option.keys()))
if missing_keys: if missing_keys:
raise UpdateFromXmlError(_("The following keys were missing from the Definition of one or more options: {}".format(missing_keys))) raise UpdateFromXmlError(_("The following keys were missing from the definition of one or more options: {}".format(", ".join(missing_keys))))
if unexpected_keys: try:
raise UpdateFromXmlError(_("The following extraneous keys were found in the definition for one or more options: {}".format(unexpected_keys))) option_name = unicode(option['name'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("All option names values must be strings."))
option_name = str(option['name']) try:
option_explanation = str(option['explanation']) option_explanation = unicode(option['explanation'])
except (TypeError, ValueError):
raise UpdateFromXmlError(_("All option explanation values must be strings."))
try: try:
option_points = int(option['points']) option_points = int(option['points'])
...@@ -363,6 +366,9 @@ def verify_rubric_format(rubric): ...@@ -363,6 +366,9 @@ def verify_rubric_format(rubric):
} }
if rubric.get('prompt'): if rubric.get('prompt'):
sanitized_rubric['prompt'] = str(rubric.get('prompt')) try:
sanitized_rubric['prompt'] = unicode(rubric.get('prompt'))
except (TypeError, ValueError):
raise UpdateFromXmlError(_("All prompt values must be strings."))
return sanitized_rubric return sanitized_rubric
\ No newline at end of file
{ {
"simple": { "missing_feedback": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompt": "Test Prompt",
"criteria": [ "criteria": [
...@@ -40,4 +40,4 @@ ...@@ -40,4 +40,4 @@
"expected-assessment": "peer-assessment", "expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Test criterion prompt" "expected-criterion-prompt": "Test criterion prompt"
} }
} }
\ No newline at end of file
...@@ -302,47 +302,6 @@ ...@@ -302,47 +302,6 @@
"expected_error": "feedback" "expected_error": "feedback"
}, },
"extra rubric keys": {
"rubric": {
"prompt": "Test Prompt",
"hellooooo": "waddup",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
],
"feedback": "optional"
}
]
},
"prompt": "My new prompt.",
"title": "My new title.",
"assessments": [
{
"name": "self-assessment",
"start": "",
"due": ""
}
],
"submission_due": "",
"submission_start": "",
"expected_error": "following keys"
},
"expected rubric keys missing": { "expected rubric keys missing": {
"rubric": { "rubric": {
"prompt": "Test Prompt" "prompt": "Test Prompt"
...@@ -461,47 +420,6 @@ ...@@ -461,47 +420,6 @@
"expected_error": "keys were missing" "expected_error": "keys were missing"
}, },
"criteria too many keys": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test prompt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
],
"feedback": "optional",
"magic": "indeed"
}
]
},
"prompt": "My new prompt.",
"title": "My new title.",
"assessments": [
{
"name": "self-assessment",
"start": "",
"due": ""
}
],
"submission_due": "",
"submission_start": "",
"expected_error": "extraneous keys"
},
"options not a list": { "options not a list": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompt": "Test Prompt",
...@@ -603,47 +521,6 @@ ...@@ -603,47 +521,6 @@
"expected_error": "keys were missing" "expected_error": "keys were missing"
}, },
"option extraneous keys": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"kittens": "ADORABLE",
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
],
"feedback": "optional"
}
]
},
"prompt": "My new prompt.",
"title": "My new title.",
"assessments": [
{
"name": "self-assessment",
"start": "",
"due": ""
}
],
"submission_due": "",
"submission_start": "",
"expected_error": "extraneous keys"
},
"option points must be int": { "option points must be int": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompt": "Test Prompt",
...@@ -683,4 +560,4 @@ ...@@ -683,4 +560,4 @@
"submission_start": "", "submission_start": "",
"expected_error": "must be integers" "expected_error": "must be integers"
} }
} }
\ No newline at end of file
...@@ -46,5 +46,54 @@ ...@@ -46,5 +46,54 @@
"expected-assessment": "peer-assessment", "expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Test criterion prompt" "expected-criterion-prompt": "Test criterion prompt"
},
"unicode": {
"rubric": {
"prompt": "Ṫëṡẗ ṗṛöṁṗẗ",
"criteria": [
{
"order_num": 0,
"name": "Ṫëṡẗ ċṛïẗëïṛöṅ",
"prompt": "Téśt ćŕítéíŕőń ṕŕőḿṕt",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Ṅö",
"explanation": "Ńő éxṕĺáńátíőń"
},
{
"order_num": 1,
"points": 2,
"name": "sǝʎ",
"explanation": "Чэѕ эхрlаиатіои"
}
],
"feedback": "required"
}
]
},
"prompt": "Ṁÿ ṅëẅ ṗṛöṁṗẗ.",
"submission_due": "4014-02-27T09:46:28",
"submission_start": "4014-02-10T09:46:28",
"title": "ɯʎ uǝʍ ʇıʇןǝ",
"assessments": [
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3,
"start": "",
"due": "4014-03-10T00:00:00"
},
{
"name": "self-assessment",
"start": "",
"due": ""
}
],
"expected-assessment": "peer-assessment",
"expected-criterion-prompt": "Ṫëṡẗ ċṛïẗëṛïöṅ ṗṛöṁṗẗ"
} }
} }
\ No newline at end of file
...@@ -5,10 +5,8 @@ View-level tests for Studio view of OpenAssessment XBlock. ...@@ -5,10 +5,8 @@ View-level tests for Studio view of OpenAssessment XBlock.
import json import json
import datetime as dt import datetime as dt
import lxml.etree as etree import lxml.etree as etree
import mock
import pytz import pytz
from ddt import ddt, data, file_data from ddt import ddt, file_data
from openassessment.xblock.xml import UpdateFromXmlError
from .base import scenario, XBlockHandlerTestCase from .base import scenario, XBlockHandlerTestCase
...@@ -40,63 +38,17 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -40,63 +38,17 @@ class StudioViewTest(XBlockHandlerTestCase):
examples = etree.fromstring(assessment_dict['examples']) examples = etree.fromstring(assessment_dict['examples'])
self.assertEqual(examples.tag, 'examples') self.assertEqual(examples.tag, 'examples')
# WEDALY!!! I cannot figure out how to mock out this call correctly, so it is consequently failing.
@mock.patch('studio_mixin.verify_rubric_format')
@scenario('data/basic_scenario.xml')
def test_get_editor_context_error(self, xblock, mock_rubric_serializer):
# Simulate an unexpected error while serializing the XBlock
mock_rubric_serializer.side_effect = UpdateFromXmlError('Test error!')
# Check that we get a failure message
resp = self.request(xblock, 'editor_context', '""', response_format='json')
self.assertFalse(resp['success'])
self.assertIn(u'unexpected error', resp['msg'].lower())
# WEDALY!!! I don't know if this test is relevant any more (using update editor context with
# XML is so OVER am-i-right? Rather, we now test teh same behavior a million times with the
# Dictionary/List structures.
# Thoughts?
# @file_data('data/update_xblock.json')
# @scenario('data/basic_scenario.xml')
# def test_update_xblock(self, xblock, data):
# xblock.published_date = None
# # Test that we can update the xblock with the expected configuration.
# request = json.dumps(data)
#
# # Verify the response is successfully
# resp = self.request(xblock, 'update_editor_context', request, response_format='json')
# print "ERROR IS {}".format(resp['msg'])
# self.assertTrue(resp['success'])
# self.assertIn('success', resp['msg'].lower())
#
# # Check that the XBlock fields were updated
# # We don't need to be exhaustive here, because we have other unit tests
# # that verify this extensively.
# self.assertEqual(xblock.title, data['title'])
# self.assertEqual(xblock.prompt, data['prompt'])
# self.assertEqual(xblock.rubric_assessments[0]['name'], data['expected-assessment'])
# self.assertEqual(xblock.rubric_criteria[0]['prompt'], data['expected-criterion-prompt'])
@file_data('data/update_xblock.json') @file_data('data/update_xblock.json')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_update_context_post_release(self, xblock, data): def test_update_context(self, xblock, data):
# First, parse XML data into a single string. xblock.published_date = None
data['rubric'] = "".join(data['rubric']) resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json')
self.assertTrue(resp['success'], msg=resp.get('msg'))
# XBlock start date defaults to already open,
# so we should get an error when trying to update anything that change the number of points
request = json.dumps(data)
# Verify the response is successfully
resp = self.request(xblock, 'update_editor_context', request, response_format='json')
self.assertFalse(resp['success'])
@file_data('data/invalid_update_xblock.json') @file_data('data/invalid_update_xblock.json')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_update_context_invalid_request_data(self, xblock, data): def test_update_context_invalid_request_data(self, xblock, data):
xblock.published_date = None xblock.published_date = None
resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json') resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json')
self.assertFalse(resp['success']) self.assertFalse(resp['success'])
self.assertIn(data['expected_error'], resp['msg'].lower()) self.assertIn(data['expected_error'], resp['msg'].lower())
...@@ -104,7 +56,6 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -104,7 +56,6 @@ class StudioViewTest(XBlockHandlerTestCase):
@file_data('data/invalid_rubric.json') @file_data('data/invalid_rubric.json')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_update_rubric_invalid(self, xblock, data): def test_update_rubric_invalid(self, xblock, data):
request = json.dumps(data) request = json.dumps(data)
# Store old XBlock fields for later verification # Store old XBlock fields for later verification
...@@ -126,7 +77,6 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -126,7 +77,6 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertItemsEqual(xblock.rubric_assessments, old_assessments) self.assertItemsEqual(xblock.rubric_assessments, old_assessments)
self.assertItemsEqual(xblock.rubric_criteria, old_criteria) self.assertItemsEqual(xblock.rubric_criteria, old_criteria)
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_check_released(self, xblock): def test_check_released(self, xblock):
# By default, the problem should be released # By default, the problem should be released
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment