Commit d4a7b510 by Braden MacDonald

Merge pull request #21 from open-craft/fix-two-more-bugs

Fix two more bugs
parents e7e2ae46 d2100b60
...@@ -27,7 +27,7 @@ function MentoringAssessmentView(runtime, element, mentoring) { ...@@ -27,7 +27,7 @@ function MentoringAssessmentView(runtime, element, mentoring) {
function no_more_attempts() { function no_more_attempts() {
var attempts_data = $('.attempts', element).data(); var attempts_data = $('.attempts', element).data();
return attempts_data.num_attempts >= attempts_data.max_attempts; return (attempts_data.max_attempts > 0) && (attempts_data.num_attempts >= attempts_data.max_attempts);
} }
function renderGrade() { function renderGrade() {
...@@ -63,7 +63,7 @@ function MentoringAssessmentView(runtime, element, mentoring) { ...@@ -63,7 +63,7 @@ function MentoringAssessmentView(runtime, element, mentoring) {
} }
mentoring.renderAttempts(); mentoring.renderAttempts();
if (data.assessment_message && data.num_attempts < data.max_attempts) { if (data.assessment_message && (data.max_attempts === 0 || data.num_attempts < data.max_attempts)) {
mentoring.setContent(messagesDOM, data.assessment_message); mentoring.setContent(messagesDOM, data.assessment_message);
messagesDOM.show(); messagesDOM.show();
} }
......
...@@ -19,7 +19,8 @@ ...@@ -19,7 +19,8 @@
# #
from xblock.fields import String from xblock.fields import String
from xblockutils.base_test import SeleniumBaseTest from xblockutils.base_test import SeleniumBaseTest, SeleniumXBlockTest
from xblockutils.resources import ResourceLoader
# Studio adds a url_name property to each XBlock but Workbench doesn't. # Studio adds a url_name property to each XBlock but Workbench doesn't.
# Since we rely on it, we need to mock url_name support so it can be set via XML and # Since we rely on it, we need to mock url_name support so it can be set via XML and
...@@ -27,11 +28,13 @@ from xblockutils.base_test import SeleniumBaseTest ...@@ -27,11 +28,13 @@ from xblockutils.base_test import SeleniumBaseTest
from problem_builder import MentoringBlock from problem_builder import MentoringBlock
MentoringBlock.url_name = String() MentoringBlock.url_name = String()
loader = ResourceLoader(__name__)
class MentoringBaseTest(SeleniumBaseTest):
module_name = __name__
default_css_selector = 'div.mentoring'
class PopupCheckMixin(object):
"""
Code used by MentoringBaseTest and MentoringAssessmentBaseTest
"""
def popup_check(self, mentoring, item_feedbacks, prefix='', do_submit=True): def popup_check(self, mentoring, item_feedbacks, prefix='', do_submit=True):
submit = mentoring.find_element_by_css_selector('.submit input.input-main') submit = mentoring.find_element_by_css_selector('.submit input.input-main')
...@@ -57,7 +60,18 @@ class MentoringBaseTest(SeleniumBaseTest): ...@@ -57,7 +60,18 @@ class MentoringBaseTest(SeleniumBaseTest):
self.assertFalse(item_feedback_popup.is_displayed()) self.assertFalse(item_feedback_popup.is_displayed())
class MentoringAssessmentBaseTest(MentoringBaseTest): class MentoringBaseTest(SeleniumBaseTest, PopupCheckMixin):
module_name = __name__
default_css_selector = 'div.mentoring'
class MentoringAssessmentBaseTest(SeleniumXBlockTest, PopupCheckMixin):
"""
Base class for tests of assessment mode
"""
module_name = __name__
default_css_selector = 'div.mentoring'
@staticmethod @staticmethod
def question_text(number): def question_text(number):
if number: if number:
...@@ -65,9 +79,16 @@ class MentoringAssessmentBaseTest(MentoringBaseTest): ...@@ -65,9 +79,16 @@ class MentoringAssessmentBaseTest(MentoringBaseTest):
else: else:
return "Question" return "Question"
def go_to_assessment(self, page): def load_assessment_scenario(self, xml_file, params=None):
""" Loads an assessment scenario from an XML template """
params = params or {}
scenario = loader.render_template("xml_templates/{}".format(xml_file), params)
self.set_scenario_xml(scenario)
return self.go_to_assessment()
def go_to_assessment(self):
""" Navigates to assessment page """ """ Navigates to assessment page """
mentoring = self.go_to_page(page) mentoring = self.go_to_view("student_view")
class Namespace(object): class Namespace(object):
pass pass
......
...@@ -242,7 +242,7 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest): ...@@ -242,7 +242,7 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
def peek_at_review(self, mentoring, controls, expected, extended_feedback=False): def peek_at_review(self, mentoring, controls, expected, extended_feedback=False):
self.wait_until_text_in("You scored {percentage}% on this assessment.".format(**expected), mentoring) self.wait_until_text_in("You scored {percentage}% on this assessment.".format(**expected), mentoring)
self.assert_persistent_elements_present(mentoring) self.assert_persistent_elements_present(mentoring)
if expected["num_attempts"] < expected["max_attempts"]: if expected["max_attempts"] > 0 and expected["num_attempts"] < expected["max_attempts"]:
self.assertIn("Note: if you retake this assessment, only your final score counts.", mentoring.text) self.assertIn("Note: if you retake this assessment, only your final score counts.", mentoring.text)
self.assertFalse(mentoring.find_elements_by_css_selector('.review-list')) self.assertFalse(mentoring.find_elements_by_css_selector('.review-list'))
elif extended_feedback: elif extended_feedback:
...@@ -264,6 +264,8 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest): ...@@ -264,6 +264,8 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
self.assertIn("You answered {incorrect} questions incorrectly.".format(**expected), mentoring.text) self.assertIn("You answered {incorrect} questions incorrectly.".format(**expected), mentoring.text)
if expected["max_attempts"] == 1: if expected["max_attempts"] == 1:
self.assertIn("You have used {num_attempts} of 1 submission.".format(**expected), mentoring.text) self.assertIn("You have used {num_attempts} of 1 submission.".format(**expected), mentoring.text)
elif expected["max_attempts"] == 0:
self.assertNotIn("You have used", mentoring.text)
else: else:
self.assertIn( self.assertIn(
"You have used {num_attempts} of {max_attempts} submissions.".format(**expected), "You have used {num_attempts} of {max_attempts} submissions.".format(**expected),
...@@ -314,10 +316,16 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest): ...@@ -314,10 +316,16 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
controls.next_question.click() controls.next_question.click()
self.peek_at_multiple_response_question(4, mentoring, controls, extended_feedback=True, alternative_review=True) self.peek_at_multiple_response_question(4, mentoring, controls, extended_feedback=True, alternative_review=True)
@data((1, False), ('Extended Feedback', True)) @data(
@unpack {"max_attempts": 0, "extended_feedback": False}, # Note '0' means unlimited attempts
def test_assessment(self, assessment, extended_feedback): {"max_attempts": 1, "extended_feedback": False},
mentoring, controls = self.go_to_assessment("Assessment %s" % assessment) {"max_attempts": 4, "extended_feedback": False},
{"max_attempts": 2, "extended_feedback": True},
)
def test_assessment(self, params):
mentoring, controls = self.load_assessment_scenario("assessment.xml", params)
max_attempts = params['max_attempts']
extended_feedback = params['extended_feedback']
self.freeform_answer(1, mentoring, controls, 'This is the answer', CORRECT) self.freeform_answer(1, mentoring, controls, 'This is the answer', CORRECT)
self.single_choice_question(2, mentoring, controls, 'Maybe not', INCORRECT) self.single_choice_question(2, mentoring, controls, 'Maybe not', INCORRECT)
...@@ -326,16 +334,22 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest): ...@@ -326,16 +334,22 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
# see if assessment remembers the current step # see if assessment remembers the current step
self.go_to_workbench_main_page() self.go_to_workbench_main_page()
mentoring, controls = self.go_to_assessment("Assessment %s" % assessment) mentoring, controls = self.go_to_assessment()
self.multiple_response_question(4, mentoring, controls, ("Its beauty",), PARTIAL, last=True) self.multiple_response_question(4, mentoring, controls, ("Its beauty",), PARTIAL, last=True)
expected_results = { expected_results = {
"correct": 2, "partial": 1, "incorrect": 1, "percentage": 63, "correct": 2, "partial": 1, "incorrect": 1, "percentage": 63,
"num_attempts": 1, "max_attempts": 2 "num_attempts": 1, "max_attempts": max_attempts
} }
self.peek_at_review(mentoring, controls, expected_results, extended_feedback=extended_feedback) self.peek_at_review(mentoring, controls, expected_results, extended_feedback=extended_feedback)
if max_attempts == 1:
self.assert_messages_empty(mentoring)
self.assert_disabled(controls.try_again)
return
# The on-assessment-review message is shown if attempts remain:
self.assert_messages_text(mentoring, "Assessment additional feedback message text") self.assert_messages_text(mentoring, "Assessment additional feedback message text")
self.assert_clickable(controls.try_again) self.assert_clickable(controls.try_again)
controls.try_again.click() controls.try_again.click()
...@@ -351,11 +365,17 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest): ...@@ -351,11 +365,17 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
expected_results = { expected_results = {
"correct": 3, "partial": 0, "incorrect": 1, "percentage": 75, "correct": 3, "partial": 0, "incorrect": 1, "percentage": 75,
"num_attempts": 2, "max_attempts": 2 "num_attempts": 2, "max_attempts": max_attempts
} }
self.peek_at_review(mentoring, controls, expected_results, extended_feedback=extended_feedback) self.peek_at_review(mentoring, controls, expected_results, extended_feedback=extended_feedback)
if max_attempts == 2:
self.assert_disabled(controls.try_again) self.assert_disabled(controls.try_again)
self.assert_messages_empty(mentoring) else:
self.assert_clickable(controls.try_again)
if 1 <= max_attempts <= 2:
self.assert_messages_empty(mentoring) # The on-assessment-review message is not shown if no attempts remain
else:
self.assert_messages_text(mentoring, "Assessment additional feedback message text")
if extended_feedback: if extended_feedback:
self.extended_feedback_checks(mentoring, controls, expected_results) self.extended_feedback_checks(mentoring, controls, expected_results)
...@@ -363,7 +383,7 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest): ...@@ -363,7 +383,7 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
""" """
No 'Next Question' button on single question assessment. No 'Next Question' button on single question assessment.
""" """
mentoring, controls = self.go_to_assessment("Assessment 2") mentoring, controls = self.load_assessment_scenario("assessment_single.xml", {"max_attempts": 2})
self.single_choice_question(0, mentoring, controls, 'Maybe not', INCORRECT, last=True) self.single_choice_question(0, mentoring, controls, 'Maybe not', INCORRECT, last=True)
expected_results = { expected_results = {
......
...@@ -45,7 +45,7 @@ class MentoringThemeTest(MentoringAssessmentBaseTest): ...@@ -45,7 +45,7 @@ class MentoringThemeTest(MentoringAssessmentBaseTest):
return '#%02x%02x%02x' % (r, g, b) return '#%02x%02x%02x' % (r, g, b)
def assert_status_icon_color(self, color): def assert_status_icon_color(self, color):
mentoring, controls = self.go_to_assessment('Theme 1') mentoring, controls = self.load_assessment_scenario('assessment_single.xml', {"max_attempts": 2})
question = self.expect_question_visible(0, mentoring) question = self.expect_question_visible(0, mentoring)
choice_name = "Maybe not" choice_name = "Maybe not"
......
<problem-builder url_name="mentoring-assessment-2" display_name="A Simple Assessment" weight="1" mode="assessment" max_attempts="2">
<html_demo>
<p>This paragraph is shared between <strong>all</strong> questions.</p>
<p>Please answer the questions below.</p>
</html_demo>
<pb-mcq name="mcq_1_1" question="Do you like this MCQ?" correct_choices='["yes"]'>
<pb-choice value='["yes"]'>Yes</pb-choice>
<pb-choice value='["maybenot"]'>Maybe not</pb-choice>
<pb-choice value='["understand"]'>I don't understand</pb-choice>
<pb-tip values='["yes"]'>Great!</pb-tip>
<pb-tip values='["maybenot"]'>Ah, damn.</pb-tip>
<pb-tip values='["understand"]'><div id="test-custom-html">Really?</div></pb-tip>
</pb-mcq>
</problem-builder>
<problem-builder url_name="mentoring-assessment-1" display_name="A Simple Assessment" weight="1" mode="assessment" max_attempts="2" extended_feedback="true">
<html_demo>
<p>This paragraph is shared between <strong>all</strong> questions.</p>
<p>Please answer the questions below.</p>
</html_demo>
<html_demo>
We need an XBlock with JavaScript here to test that it doesn't interfere
with the assessment, since it will show up in runtime(element).children,
but it is not a "step" element:
</html_demo>
<acid/>
<pb-answer name="goal" question="What is your goal?" />
<pb-mcq name="mcq_1_1" question="Do you like this MCQ?" correct_choices='["yes"]'>
<pb-choice value="yes">Yes</pb-choice>
<pb-choice value="maybenot">Maybe not</pb-choice>
<pb-choice value="understand">I don't understand</pb-choice>
<pb-tip values='["yes"]'>Great!</pb-tip>
<pb-tip values='["maybenot"]'>Ah, damn.</pb-tip>
<pb-tip values='["understand"]'><div id="test-custom-html">Really?</div></pb-tip>
</pb-mcq>
<pb-rating name="mcq_1_2" low="Not good at all" high="Extremely good" question="How much do you rate this MCQ?" correct_choices='["4","5"]'>
<pb-choice value="notwant">I don't want to rate it</pb-choice>
<pb-tip values='["4","5"]'>I love good grades.</pb-tip>
<pb-tip values='["1","2", "3"]'>Will do better next time...</pb-tip>
<pb-tip values='["notwant"]'>Your loss!</pb-tip>
</pb-rating>
<pb-mrq name="mrq_1_1" question="What do you like in this MRQ?" required_choices='["gracefulness","elegance","beauty"]' message="Thank you for answering!">
<pb-choice value="elegance">Its elegance</pb-choice>
<pb-choice value="beauty">Its beauty</pb-choice>
<pb-choice value="gracefulness">Its gracefulness</pb-choice>
<pb-choice value="bugs">Its bugs</pb-choice>
<pb-tip values='["gracefulness"]'>This MRQ is indeed very graceful</pb-tip>
<pb-tip values='["elegance","beauty"]'>This is something everyone has to like about this MRQ</pb-tip>
<pb-tip values='["bugs"]'>Nah, there aren't any!</pb-tip>
</pb-mrq>
<pb-message type="on-assessment-review">
<html>Assessment additional feedback message text</html>
</pb-message>
</problem-builder>
<problem-builder url_name="mentoring-assessment-1" display_name="A Simple Assessment" weight="1" mode="assessment" max_attempts="2"> <problem-builder url_name="mentoring-assessment-1" display_name="A Simple Assessment" weight="1" mode="assessment" max_attempts="{{max_attempts}}" extended_feedback="{{extended_feedback}}">
<html_demo> <html_demo>
<p>This paragraph is shared between <strong>all</strong> questions.</p> <p>This paragraph is shared between <strong>all</strong> questions.</p>
......
<problem-builder url_name="mentoring-assessment-2" display_name="A Simple Assessment" weight="1" mode="assessment" max_attempts="2"> <problem-builder url_name="mentoring-assessment-2" display_name="A Simple Assessment" weight="1" mode="assessment" max_attempts="{{max_attempts}}">
<html_demo> <html_demo>
<p>This paragraph is shared between <strong>all</strong> questions.</p> <p>This paragraph is shared between <strong>all</strong> questions.</p>
<p>Please answer the questions below.</p> <p>Please answer the questions below.</p>
......
ddt ddt
mock mock
unicodecsv==0.9.4 unicodecsv==0.9.4
-e git+https://github.com/edx/xblock-utils.git@581ed636c862b286002bb9a3724cc883570eb54c#egg=xblock-utils -e git+https://github.com/edx/xblock-utils.git@b2a17fa3793e98e67bdb86273317c41b6297dcbb#egg=xblock-utils
-e . -e .
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment