Commit 9e94e81f by Tim Krones

Implement integration tests.

parent bef01e81
...@@ -918,7 +918,7 @@ class MentoringWithExplicitStepsBlock(BaseMentoringBlock, StudioContainerWithNes ...@@ -918,7 +918,7 @@ class MentoringWithExplicitStepsBlock(BaseMentoringBlock, StudioContainerWithNes
if not self.max_attempts_reached: if not self.max_attempts_reached:
return self.get_message_content('on-assessment-review', or_default=True) return self.get_message_content('on-assessment-review', or_default=True)
else: else:
assessment_message = _("Note: you have used all attempts. Continue to the next unit") assessment_message = _("Note: you have used all attempts. Continue to the next unit.")
return '<p>{}</p>'.format(assessment_message) return '<p>{}</p>'.format(assessment_message)
@property @property
...@@ -985,6 +985,7 @@ class MentoringWithExplicitStepsBlock(BaseMentoringBlock, StudioContainerWithNes ...@@ -985,6 +985,7 @@ class MentoringWithExplicitStepsBlock(BaseMentoringBlock, StudioContainerWithNes
'children_contents': children_contents, 'children_contents': children_contents,
})) }))
fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/problem-builder.css')) fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/problem-builder.css'))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/vendor/underscore-min.js'))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/mentoring_with_steps.js')) fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/mentoring_with_steps.js'))
fragment.add_resource(loader.load_unicode('templates/html/mentoring_attempts.html'), "text/html") fragment.add_resource(loader.load_unicode('templates/html/mentoring_attempts.html'), "text/html")
......
function MentoringWithStepsBlock(runtime, element) { function MentoringWithStepsBlock(runtime, element) {
// Set up gettext in case it isn't available in the client runtime:
if (typeof gettext == "undefined") {
window.gettext = function gettext_stub(string) { return string; };
window.ngettext = function ngettext_stub(strA, strB, n) { return n == 1 ? strA : strB; };
}
var children = runtime.children(element); var children = runtime.children(element);
var steps = children.filter( var steps = [];
function(c) { return c.element.className.indexOf('sb-step') > -1; }
);
var reviewStep; var reviewStep;
for (var i = 0; i < children.length; i++) { for (var i = 0; i < children.length; i++) {
var child = children[i]; var child = children[i];
if (child.type === 'sb-review-step') { var blockType = $(child.element).data('block-type');
if (blockType === 'sb-step') {
steps.push(child);
} else if (blockType === 'sb-review-step') {
reviewStep = child; reviewStep = child;
break;
} }
} }
...@@ -35,6 +41,11 @@ function MentoringWithStepsBlock(runtime, element) { ...@@ -35,6 +41,11 @@ function MentoringWithStepsBlock(runtime, element) {
return (data.num_attempts < data.max_attempts); return (data.num_attempts < data.max_attempts);
} }
function extendedFeedbackEnabled() {
var data = gradeDOM.data();
return data.extended_feedback === "True";
}
function showFeedback(response) { function showFeedback(response) {
if (response.step_status === 'correct') { if (response.step_status === 'correct') {
checkmark.addClass('checkmark-correct icon-ok fa-check'); checkmark.addClass('checkmark-correct icon-ok fa-check');
...@@ -139,6 +150,10 @@ function MentoringWithStepsBlock(runtime, element) { ...@@ -139,6 +150,10 @@ function MentoringWithStepsBlock(runtime, element) {
} }
} }
function clearSelections() {
$('input[type=radio], input[type=checkbox]', element).prop('checked', false);
}
function cleanAll() { function cleanAll() {
checkmark.removeClass('checkmark-correct icon-ok fa-check'); checkmark.removeClass('checkmark-correct icon-ok fa-check');
checkmark.removeClass('checkmark-partially-correct icon-ok fa-check'); checkmark.removeClass('checkmark-partially-correct icon-ok fa-check');
...@@ -176,7 +191,7 @@ function MentoringWithStepsBlock(runtime, element) { ...@@ -176,7 +191,7 @@ function MentoringWithStepsBlock(runtime, element) {
var data = gradeDOM.data(); var data = gradeDOM.data();
// Forward to review step to render grade data // Forward to review step to render grade data
var showExtendedFeedback = (!someAttemptsLeft() && data.extended_feedback); var showExtendedFeedback = (!someAttemptsLeft() && extendedFeedbackEnabled());
reviewStep.renderGrade(gradeDOM, showExtendedFeedback); reviewStep.renderGrade(gradeDOM, showExtendedFeedback);
// Add click handler that takes care of showing associated step to step links // Add click handler that takes care of showing associated step to step links
...@@ -272,7 +287,8 @@ function MentoringWithStepsBlock(runtime, element) { ...@@ -272,7 +287,8 @@ function MentoringWithStepsBlock(runtime, element) {
for (var i=0; i < steps.length; i++) { for (var i=0; i < steps.length; i++) {
var step = steps[i]; var step = steps[i];
var mentoring = { var mentoring = {
setContent: setContent setContent: setContent,
publish_event: publishEvent
}; };
options.mentoring = mentoring; options.mentoring = mentoring;
step.initChildren(options); step.initChildren(options);
...@@ -288,6 +304,14 @@ function MentoringWithStepsBlock(runtime, element) { ...@@ -288,6 +304,14 @@ function MentoringWithStepsBlock(runtime, element) {
} }
} }
function publishEvent(data) {
$.ajax({
type: "POST",
url: runtime.handlerUrl(element, 'publish_event'),
data: JSON.stringify(data)
});
}
function showGrade() { function showGrade() {
cleanAll(); cleanAll();
showAssessmentMessage(); showAssessmentMessage();
...@@ -310,6 +334,7 @@ function MentoringWithStepsBlock(runtime, element) { ...@@ -310,6 +334,7 @@ function MentoringWithStepsBlock(runtime, element) {
function handleTryAgain(result) { function handleTryAgain(result) {
activeStep = result.active_step; activeStep = result.active_step;
clearSelections();
updateDisplay(); updateDisplay();
tryAgainDOM.hide(); tryAgainDOM.hide();
submitDOM.show(); submitDOM.show();
...@@ -329,6 +354,23 @@ function MentoringWithStepsBlock(runtime, element) { ...@@ -329,6 +354,23 @@ function MentoringWithStepsBlock(runtime, element) {
submitXHR = $.post(handlerUrl, JSON.stringify({})).success(handleTryAgain); submitXHR = $.post(handlerUrl, JSON.stringify({})).success(handleTryAgain);
} }
function initClickHandlers() {
$(document).on("click", function(event, ui) {
var target = $(event.target);
var itemFeedbackParentSelector = '.choice';
var itemFeedbackSelector = ".choice .choice-tips";
function clickedInside(selector, parent_selector){
return target.is(selector) || target.parents(parent_selector).length>0;
}
if (!clickedInside(itemFeedbackSelector, itemFeedbackParentSelector)) {
$(itemFeedbackSelector).not(':hidden').hide();
$('.choice-tips-container').removeClass('with-tips');
}
});
}
function initXBlockView() { function initXBlockView() {
checkmark = $('.assessment-checkmark', element); checkmark = $('.assessment-checkmark', element);
...@@ -366,6 +408,7 @@ function MentoringWithStepsBlock(runtime, element) { ...@@ -366,6 +408,7 @@ function MentoringWithStepsBlock(runtime, element) {
updateDisplay(); updateDisplay();
} }
initClickHandlers();
initXBlockView(); initXBlockView();
} }
...@@ -30,6 +30,8 @@ MentoringBlock.url_name = String() ...@@ -30,6 +30,8 @@ MentoringBlock.url_name = String()
loader = ResourceLoader(__name__) loader = ResourceLoader(__name__)
CORRECT, INCORRECT, PARTIAL = "correct", "incorrect", "partially-correct"
class PopupCheckMixin(object): class PopupCheckMixin(object):
""" """
...@@ -133,6 +135,88 @@ class MentoringAssessmentBaseTest(ProblemBuilderBaseTest): ...@@ -133,6 +135,88 @@ class MentoringAssessmentBaseTest(ProblemBuilderBaseTest):
return mentoring, controls return mentoring, controls
def assert_hidden(self, elem):
self.assertFalse(elem.is_displayed())
def assert_disabled(self, elem):
self.assertTrue(elem.is_displayed())
self.assertFalse(elem.is_enabled())
def assert_clickable(self, elem):
self.assertTrue(elem.is_displayed())
self.assertTrue(elem.is_enabled())
def ending_controls(self, controls, last):
if last:
self.assert_hidden(controls.next_question)
self.assert_disabled(controls.review)
else:
self.assert_disabled(controls.next_question)
self.assert_hidden(controls.review)
def selected_controls(self, controls, last):
self.assert_clickable(controls.submit)
self.ending_controls(controls, last)
def assert_message_text(self, mentoring, text):
message_wrapper = mentoring.find_element_by_css_selector('.assessment-message')
self.assertEqual(message_wrapper.text, text)
self.assertTrue(message_wrapper.is_displayed())
def assert_no_message_text(self, mentoring):
message_wrapper = mentoring.find_element_by_css_selector('.assessment-message')
self.assertEqual(message_wrapper.text, '')
def check_question_feedback(self, step_builder, question):
question_checkmark = step_builder.find_element_by_css_selector('.assessment-checkmark')
question_feedback = question.find_element_by_css_selector(".feedback")
self.assertTrue(question_feedback.is_displayed())
self.assertEqual(question_feedback.text, "Question Feedback Message")
question.click()
self.assertFalse(question_feedback.is_displayed())
question_checkmark.click()
self.assertTrue(question_feedback.is_displayed())
def do_submit_wait(self, controls, last):
if last:
self.wait_until_clickable(controls.review)
else:
self.wait_until_clickable(controls.next_question)
def do_post(self, controls, last):
if last:
controls.review.click()
else:
controls.next_question.click()
def multiple_response_question(self, number, mentoring, controls, choice_names, result, last=False):
question = self.peek_at_multiple_response_question(number, mentoring, controls, last=last)
choices = GetChoices(question)
expected_choices = {
"Its elegance": False,
"Its beauty": False,
"Its gracefulness": False,
"Its bugs": False,
}
self.assertEquals(choices.state, expected_choices)
for name in choice_names:
choices.select(name)
expected_choices[name] = True
self.assertEquals(choices.state, expected_choices)
self.selected_controls(controls, last)
controls.submit.click()
self.do_submit_wait(controls, last)
self._assert_checkmark(mentoring, result)
controls.review.click()
def expect_question_visible(self, number, mentoring, question_text=None): def expect_question_visible(self, number, mentoring, question_text=None):
if not question_text: if not question_text:
question_text = self.question_text(number) question_text = self.question_text(number)
...@@ -163,6 +247,14 @@ class MentoringAssessmentBaseTest(ProblemBuilderBaseTest): ...@@ -163,6 +247,14 @@ class MentoringAssessmentBaseTest(ProblemBuilderBaseTest):
self.wait_until_clickable(controls.next_question) self.wait_until_clickable(controls.next_question)
controls.next_question.click() controls.next_question.click()
def _assert_checkmark(self, mentoring, result):
"""Assert that only the desired checkmark is present."""
states = {CORRECT: 0, INCORRECT: 0, PARTIAL: 0}
states[result] += 1
for name, count in states.items():
self.assertEqual(len(mentoring.find_elements_by_css_selector(".checkmark-{}".format(name))), count)
class GetChoices(object): class GetChoices(object):
""" Helper class for interacting with MCQ options """ """ Helper class for interacting with MCQ options """
......
...@@ -18,9 +18,7 @@ ...@@ -18,9 +18,7 @@
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>. # "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
# #
from ddt import ddt, unpack, data from ddt import ddt, unpack, data
from .base_test import MentoringAssessmentBaseTest, GetChoices from .base_test import CORRECT, INCORRECT, PARTIAL, MentoringAssessmentBaseTest, GetChoices
CORRECT, INCORRECT, PARTIAL = "correct", "incorrect", "partially-correct"
@ddt @ddt
...@@ -47,29 +45,10 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest): ...@@ -47,29 +45,10 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
controls.click() controls.click()
title.click() title.click()
def assert_hidden(self, elem):
self.assertFalse(elem.is_displayed())
def assert_disabled(self, elem):
self.assertTrue(elem.is_displayed())
self.assertFalse(elem.is_enabled())
def assert_clickable(self, elem):
self.assertTrue(elem.is_displayed())
self.assertTrue(elem.is_enabled())
def assert_persistent_elements_present(self, mentoring): def assert_persistent_elements_present(self, mentoring):
self.assertIn("A Simple Assessment", mentoring.text) self.assertIn("A Simple Assessment", mentoring.text)
self.assertIn("This paragraph is shared between all questions.", mentoring.text) self.assertIn("This paragraph is shared between all questions.", mentoring.text)
def _assert_checkmark(self, mentoring, result):
"""Assert that only the desired checkmark is present."""
states = {CORRECT: 0, INCORRECT: 0, PARTIAL: 0}
states[result] += 1
for name, count in states.items():
self.assertEqual(len(mentoring.find_elements_by_css_selector(".checkmark-{}".format(name))), count)
def go_to_workbench_main_page(self): def go_to_workbench_main_page(self):
self.browser.get(self.live_server_url) self.browser.get(self.live_server_url)
...@@ -104,35 +83,6 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest): ...@@ -104,35 +83,6 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
self._assert_checkmark(mentoring, result) self._assert_checkmark(mentoring, result)
self.do_post(controls, last) self.do_post(controls, last)
def ending_controls(self, controls, last):
if last:
self.assert_hidden(controls.next_question)
self.assert_disabled(controls.review)
else:
self.assert_disabled(controls.next_question)
self.assert_hidden(controls.review)
def selected_controls(self, controls, last):
self.assert_clickable(controls.submit)
if last:
self.assert_hidden(controls.next_question)
self.assert_disabled(controls.review)
else:
self.assert_disabled(controls.next_question)
self.assert_hidden(controls.review)
def do_submit_wait(self, controls, last):
if last:
self.wait_until_clickable(controls.review)
else:
self.wait_until_clickable(controls.next_question)
def do_post(self, controls, last):
if last:
controls.review.click()
else:
controls.next_question.click()
def single_choice_question(self, number, mentoring, controls, choice_name, result, last=False): def single_choice_question(self, number, mentoring, controls, choice_name, result, last=False):
question = self.expect_question_visible(number, mentoring) question = self.expect_question_visible(number, mentoring)
...@@ -213,44 +163,6 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest): ...@@ -213,44 +163,6 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
return question return question
def check_question_feedback(self, mentoring, question):
question_checkmark = mentoring.find_element_by_css_selector('.assessment-checkmark')
question_feedback = question.find_element_by_css_selector(".feedback")
self.assertTrue(question_feedback.is_displayed())
self.assertEqual(question_feedback.text, "Question Feedback Message")
question.click()
self.assertFalse(question_feedback.is_displayed())
question_checkmark.click()
self.assertTrue(question_feedback.is_displayed())
def multiple_response_question(self, number, mentoring, controls, choice_names, result, last=False):
question = self.peek_at_multiple_response_question(number, mentoring, controls, last=last)
choices = GetChoices(question)
expected_choices = {
"Its elegance": False,
"Its beauty": False,
"Its gracefulness": False,
"Its bugs": False,
}
self.assertEquals(choices.state, expected_choices)
for name in choice_names:
choices.select(name)
expected_choices[name] = True
self.assertEquals(choices.state, expected_choices)
self.selected_controls(controls, last)
controls.submit.click()
self.do_submit_wait(controls, last)
self._assert_checkmark(mentoring, result)
controls.review.click()
def peek_at_review(self, mentoring, controls, expected, extended_feedback=False): def peek_at_review(self, mentoring, controls, expected, extended_feedback=False):
self.wait_until_text_in("You scored {percentage}% on this assessment.".format(**expected), mentoring) self.wait_until_text_in("You scored {percentage}% on this assessment.".format(**expected), mentoring)
self.assert_persistent_elements_present(mentoring) self.assert_persistent_elements_present(mentoring)
...@@ -288,15 +200,6 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest): ...@@ -288,15 +200,6 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
self.assert_hidden(controls.review) self.assert_hidden(controls.review)
self.assert_hidden(controls.review_link) self.assert_hidden(controls.review_link)
def assert_message_text(self, mentoring, text):
message_wrapper = mentoring.find_element_by_css_selector('.assessment-message')
self.assertEqual(message_wrapper.text, text)
self.assertTrue(message_wrapper.is_displayed())
def assert_no_message_text(self, mentoring):
message_wrapper = mentoring.find_element_by_css_selector('.assessment-message')
self.assertEqual(message_wrapper.text, '')
def extended_feedback_checks(self, mentoring, controls, expected_results): def extended_feedback_checks(self, mentoring, controls, expected_results):
# Multiple choice is third correctly answered question # Multiple choice is third correctly answered question
self.assert_hidden(controls.review_link) self.assert_hidden(controls.review_link)
......
from .base_test import CORRECT, INCORRECT, PARTIAL, MentoringAssessmentBaseTest, GetChoices
from ddt import ddt, data
@ddt
class StepBuilderTest(MentoringAssessmentBaseTest):
def freeform_answer(self, number, step_builder, controls, text_input, result, saved_value="", last=False):
self.expect_question_visible(number, step_builder)
answer = step_builder.find_element_by_css_selector("textarea.answer.editable")
self.assertIn(self.question_text(number), step_builder.text)
self.assertIn("What is your goal?", step_builder.text)
self.assertEquals(saved_value, answer.get_attribute("value"))
if not saved_value:
self.assert_disabled(controls.submit)
self.assert_disabled(controls.next_question)
answer.clear()
answer.send_keys(text_input)
self.assertEquals(text_input, answer.get_attribute("value"))
self.assert_clickable(controls.submit)
self.ending_controls(controls, last)
self.assert_hidden(controls.review)
self.assert_hidden(controls.try_again)
controls.submit.click()
self.do_submit_wait(controls, last)
self._assert_checkmark(step_builder, result)
self.do_post(controls, last)
def single_choice_question(self, number, step_builder, controls, choice_name, result, last=False):
question = self.expect_question_visible(number, step_builder)
self.assertIn("Do you like this MCQ?", question.text)
self.assert_disabled(controls.submit)
self.ending_controls(controls, last)
self.assert_hidden(controls.try_again)
choices = GetChoices(question)
expected_state = {"Yes": False, "Maybe not": False, "I don't understand": False}
self.assertEquals(choices.state, expected_state)
choices.select(choice_name)
expected_state[choice_name] = True
self.assertEquals(choices.state, expected_state)
self.selected_controls(controls, last)
controls.submit.click()
self.do_submit_wait(controls, last)
self._assert_checkmark(step_builder, result)
self.do_post(controls, last)
def rating_question(self, number, step_builder, controls, choice_name, result, last=False):
self.expect_question_visible(number, step_builder)
self.assertIn("How much do you rate this MCQ?", step_builder.text)
self.assert_disabled(controls.submit)
self.ending_controls(controls, last)
self.assert_hidden(controls.try_again)
choices = GetChoices(step_builder, ".rating")
expected_choices = {
"1 - Not good at all": False,
"2": False, "3": False, "4": False,
"5 - Extremely good": False,
"I don't want to rate it": False,
}
self.assertEquals(choices.state, expected_choices)
choices.select(choice_name)
expected_choices[choice_name] = True
self.assertEquals(choices.state, expected_choices)
self.ending_controls(controls, last)
controls.submit.click()
self.do_submit_wait(controls, last)
self._assert_checkmark(step_builder, result)
self.do_post(controls, last)
def peek_at_multiple_response_question(
self, number, step_builder, controls, last=False, extended_feedback=False, alternative_review=False
):
question = self.expect_question_visible(number, step_builder)
self.assertIn("What do you like in this MRQ?", step_builder.text)
return question
if extended_feedback:
self.assert_disabled(controls.submit)
self.check_question_feedback(step_builder, question)
if alternative_review:
self.assert_clickable(controls.review_link)
self.assert_hidden(controls.try_again)
def peek_at_review(self, step_builder, controls, expected, extended_feedback=False):
self.wait_until_text_in("You scored {percentage}% on this assessment.".format(**expected), step_builder)
# Check grade breakdown
if expected["correct"] == 1:
self.assertIn("You answered 1 questions correctly.".format(**expected), step_builder.text)
else:
self.assertIn("You answered {correct} questions correctly.".format(**expected), step_builder.text)
if expected["partial"] == 1:
self.assertIn("You answered 1 question partially correctly.", step_builder.text)
else:
self.assertIn("You answered {partial} questions partially correctly.".format(**expected), step_builder.text)
if expected["incorrect"] == 1:
self.assertIn("You answered 1 question incorrectly.", step_builder.text)
else:
self.assertIn("You answered {incorrect} questions incorrectly.".format(**expected), step_builder.text)
# Check presence of review links
# - If unlimited attempts: no review links
# - If limited attempts:
# - If not max attempts reached: no review links
# - If max attempts reached:
# - If extended feedback: review links available
# - If not extended feedback: review links
review_list = step_builder.find_elements_by_css_selector('.review-list')
if expected["max_attempts"] == 0:
self.assertFalse(review_list)
else:
if expected["num_attempts"] < expected["max_attempts"]:
self.assertFalse(review_list)
elif expected["num_attempts"] == expected["max_attempts"]:
if extended_feedback:
for correctness in ['correct', 'incorrect', 'partial']:
review_items = step_builder.find_elements_by_css_selector('.%s-list li' % correctness)
self.assertEqual(len(review_items), expected[correctness])
else:
self.assertFalse(review_list)
# Check if info about number of attempts used is correct
if expected["max_attempts"] == 1:
self.assertIn("You have used {num_attempts} of 1 submission.".format(**expected), step_builder.text)
elif expected["max_attempts"] == 0:
self.assertNotIn("You have used", step_builder.text)
else:
self.assertIn(
"You have used {num_attempts} of {max_attempts} submissions.".format(**expected),
step_builder.text
)
# Check controls
self.assert_hidden(controls.submit)
self.assert_hidden(controls.next_question)
self.assert_hidden(controls.review)
self.assert_hidden(controls.review_link)
def popup_check(self, step_builder, item_feedbacks, prefix='', do_submit=True):
for index, expected_feedback in enumerate(item_feedbacks):
choice_wrapper = step_builder.find_elements_by_css_selector(prefix + " .choice")[index]
choice_wrapper.click()
item_feedback_icon = choice_wrapper.find_element_by_css_selector(".choice-result")
item_feedback_icon.click()
item_feedback_popup = choice_wrapper.find_element_by_css_selector(".choice-tips")
self.assertTrue(item_feedback_popup.is_displayed())
self.assertEqual(item_feedback_popup.text, expected_feedback)
item_feedback_popup.click()
self.assertTrue(item_feedback_popup.is_displayed())
step_builder.click()
self.assertFalse(item_feedback_popup.is_displayed())
def extended_feedback_checks(self, step_builder, controls, expected_results):
# MRQ is third correctly answered question
self.assert_hidden(controls.review_link)
step_builder.find_elements_by_css_selector('.correct-list li a')[2].click()
self.peek_at_multiple_response_question(
None, step_builder, controls, extended_feedback=True, alternative_review=True
)
# Step should display 5 checkmarks (4 correct items for MRQ, plus step-level feedback about correctness)
correct_marks = step_builder.find_elements_by_css_selector('.checkmark-correct')
incorrect_marks = step_builder.find_elements_by_css_selector('.checkmark-incorrect')
self.assertEqual(len(correct_marks), 5)
self.assertEqual(len(incorrect_marks), 0)
item_feedbacks = [
"This is something everyone has to like about this MRQ",
"This is something everyone has to like about this MRQ",
"This MRQ is indeed very graceful",
"Nah, there aren't any!"
]
self.popup_check(step_builder, item_feedbacks, prefix='div[data-name="mrq_1_1"]', do_submit=False)
controls.review_link.click()
self.peek_at_review(step_builder, controls, expected_results, extended_feedback=True)
# Review rating question (directly precedes MRQ)
step_builder.find_elements_by_css_selector('.incorrect-list li a')[0].click()
# It should be possible to visit the MRQ from here
self.wait_until_clickable(controls.next_question)
controls.next_question.click()
self.peek_at_multiple_response_question(
None, step_builder, controls, extended_feedback=True, alternative_review=True
)
@data(
{"max_attempts": 0, "extended_feedback": False}, # Unlimited attempts, no extended feedback
{"max_attempts": 1, "extended_feedback": True}, # Limited attempts, extended feedback
{"max_attempts": 1, "extended_feedback": False}, # Limited attempts, no extended feedback
{"max_attempts": 2, "extended_feedback": True}, # Limited attempts, extended feedback
)
def test_step_builder(self, params):
max_attempts = params['max_attempts']
extended_feedback = params['extended_feedback']
step_builder, controls = self.load_assessment_scenario("step_builder.xml", params)
# Step 1
# Submit free-form answer, go to next step
self.freeform_answer(None, step_builder, controls, 'This is the answer', CORRECT)
# Step 2
# Submit MCQ, go to next step
self.single_choice_question(None, step_builder, controls, 'Maybe not', INCORRECT)
# Step 3
# Submit rating, go to next step
self.rating_question(None, step_builder, controls, "5 - Extremely good", CORRECT)
# Last step
# Submit MRQ, go to review
self.multiple_response_question(None, step_builder, controls, ("Its beauty",), PARTIAL, last=True)
# Review step
expected_results = {
"correct": 2, "partial": 1, "incorrect": 1, "percentage": 63,
"num_attempts": 1, "max_attempts": max_attempts
}
self.peek_at_review(step_builder, controls, expected_results, extended_feedback=extended_feedback)
if max_attempts == 1:
self.assert_message_text(step_builder, "Note: you have used all attempts. Continue to the next unit.")
self.assert_disabled(controls.try_again)
return
self.assert_message_text(step_builder, "Assessment additional feedback message text")
self.assert_clickable(controls.try_again)
# Try again
controls.try_again.click()
self.wait_until_hidden(controls.try_again)
self.assert_no_message_text(step_builder)
self.freeform_answer(
None, step_builder, controls, 'This is a different answer', CORRECT, saved_value='This is the answer'
)
self.single_choice_question(None, step_builder, controls, 'Yes', CORRECT)
self.rating_question(None, step_builder, controls, "1 - Not good at all", INCORRECT)
user_selection = ("Its elegance", "Its beauty", "Its gracefulness")
self.multiple_response_question(None, step_builder, controls, user_selection, CORRECT, last=True)
expected_results = {
"correct": 3, "partial": 0, "incorrect": 1, "percentage": 75,
"num_attempts": 2, "max_attempts": max_attempts
}
self.peek_at_review(step_builder, controls, expected_results, extended_feedback=extended_feedback)
if max_attempts == 2:
self.assert_disabled(controls.try_again)
else:
self.assert_clickable(controls.try_again)
if 1 <= max_attempts <= 2:
self.assert_message_text(step_builder, "Note: you have used all attempts. Continue to the next unit.")
else:
self.assert_message_text(step_builder, "Assessment additional feedback message text")
if extended_feedback:
self.extended_feedback_checks(step_builder, controls, expected_results)
def test_review_tips(self):
params = {
"max_attempts": 3,
"extended_feedback": False,
"include_review_tips": True
}
step_builder, controls = self.load_assessment_scenario("step_builder.xml", params)
# Get one question wrong and one partially wrong on attempt 1 of 3: ####################
self.freeform_answer(None, step_builder, controls, 'This is the answer', CORRECT)
self.single_choice_question(None, step_builder, controls, 'Maybe not', INCORRECT)
self.rating_question(None, step_builder, controls, "5 - Extremely good", CORRECT)
self.multiple_response_question(None, step_builder, controls, ("Its beauty",), PARTIAL, last=True)
# The review tips for MCQ 2 and the MRQ should be shown:
review_tips = step_builder.find_element_by_css_selector('.assessment-review-tips')
self.assertTrue(review_tips.is_displayed())
self.assertIn('You might consider reviewing the following items', review_tips.text)
self.assertIn('Take another look at', review_tips.text)
self.assertIn('Lesson 1', review_tips.text)
self.assertNotIn('Lesson 2', review_tips.text) # This MCQ was correct
self.assertIn('Lesson 3', review_tips.text)
# The on-assessment-review message is also shown if attempts remain:
self.assert_message_text(step_builder, "Assessment additional feedback message text")
# Try again
self.assert_clickable(controls.try_again)
controls.try_again.click()
# Get no questions wrong on attempt 2 of 3: ############################################
self.freeform_answer(
None, step_builder, controls, 'This is the answer', CORRECT, saved_value='This is the answer'
)
self.single_choice_question(None, step_builder, controls, 'Yes', CORRECT)
self.rating_question(None, step_builder, controls, "5 - Extremely good", CORRECT)
user_selection = ("Its elegance", "Its beauty", "Its gracefulness")
self.multiple_response_question(None, step_builder, controls, user_selection, CORRECT, last=True)
self.assert_message_text(step_builder, "Assessment additional feedback message text")
self.assertFalse(review_tips.is_displayed())
# Try again
self.assert_clickable(controls.try_again)
controls.try_again.click()
# Get some questions wrong again on attempt 3 of 3:
self.freeform_answer(
None, step_builder, controls, 'This is the answer', CORRECT, saved_value='This is the answer'
)
self.single_choice_question(None, step_builder, controls, 'Maybe not', INCORRECT)
self.rating_question(None, step_builder, controls, "1 - Not good at all", INCORRECT)
self.multiple_response_question(None, step_builder, controls, ("Its beauty",), PARTIAL, last=True)
# The review tips will not be shown because no attempts remain:
self.assertFalse(review_tips.is_displayed())
<step-builder url_name="step-builder" display_name="Step Builder"
max_attempts="{{max_attempts}}" extended_feedback="{{extended_feedback}}">
<sb-step display_name="First step">
<pb-answer name="goal" question="What is your goal?" />
</sb-step>
<sb-step display_name="Second step">
<pb-mcq name="mcq_1_1" question="Do you like this MCQ?" correct_choices='["yes"]'>
<pb-choice value="yes">Yes</pb-choice>
<pb-choice value="maybenot">Maybe not</pb-choice>
<pb-choice value="understand">I don't understand</pb-choice>
<pb-tip values='["yes"]'>Great!</pb-tip>
<pb-tip values='["maybenot"]'>Ah, damn.</pb-tip>
<pb-tip values='["understand"]'><div id="test-custom-html">Really?</div></pb-tip>
{% if include_review_tips %}
<pb-message type="on-assessment-review-question">
<html>Take another look at <a href="#">Lesson 1</a></html>
</pb-message>
{% endif %}
</pb-mcq>
</sb-step>
<sb-step display_name="Third step">
<pb-rating name="mcq_1_2" low="Not good at all" high="Extremely good" question="How much do you rate this MCQ?" correct_choices='["4","5"]'>
<pb-choice value="notwant">I don't want to rate it</pb-choice>
<pb-tip values='["4","5"]'>I love good grades.</pb-tip>
<pb-tip values='["1","2", "3"]'>Will do better next time...</pb-tip>
<pb-tip values='["notwant"]'>Your loss!</pb-tip>
{% if include_review_tips %}
<pb-message type="on-assessment-review-question">
<html>Take another look at <a href="#">Lesson 2</a></html>
</pb-message>
{% endif %}
</pb-rating>
</sb-step>
<sb-step display_name="Last step">
<pb-mrq name="mrq_1_1" question="What do you like in this MRQ?" required_choices='["gracefulness","elegance","beauty"]' message="Question Feedback Message">
<pb-choice value="elegance">Its elegance</pb-choice>
<pb-choice value="beauty">Its beauty</pb-choice>
<pb-choice value="gracefulness">Its gracefulness</pb-choice>
<pb-choice value="bugs">Its bugs</pb-choice>
<pb-tip values='["gracefulness"]'>This MRQ is indeed very graceful</pb-tip>
<pb-tip values='["elegance","beauty"]'>This is something everyone has to like about this MRQ</pb-tip>
<pb-tip values='["bugs"]'>Nah, there aren't any!</pb-tip>
{% if include_review_tips %}
<pb-message type="on-assessment-review-question">
<html>Take another look at <a href="#">Lesson 3</a></html>
</pb-message>
{% endif %}
</pb-mrq>
</sb-step>
<sb-review-step></sb-review-step>
<pb-message type="on-assessment-review">
<html>Assessment additional feedback message text</html>
</pb-message>
</step-builder>
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment