Commit 98dd0679 by Jonathan Piacenti

Tests almost working right.

parent 22e12f6d
......@@ -527,8 +527,6 @@ class MentoringBlock(XBlock, StepParentMixin, StudioEditableXBlockMixin, StudioC
steps = [child for child in children if isinstance(child, StepMixin)] # Faster than the self.steps property
assessment_message = None
print children
print submissions
for child in children:
if child.name and child.name in submissions:
submission = submissions[child.name]
......@@ -568,7 +566,6 @@ class MentoringBlock(XBlock, StepParentMixin, StudioEditableXBlockMixin, StudioC
self.num_attempts += 1
self.completed = True
print current_child
event_data['exercise_id'] = current_child.name
event_data['num_attempts'] = self.num_attempts
event_data['submitted_answer'] = submissions
......
......@@ -276,7 +276,7 @@ function MentoringAssessmentView(runtime, element, mentoring) {
num_attempts: response.num_attempts
};
var result = response.results[1];
var child = mentoring.children[active_child];
var child = mentoring.steps[active_child];
callIfExists(child, 'handleSubmit', result, options);
callIfExists(child, 'handleReview', result, options);
}
......
<problem-builder url_name="mentoring-assessment-1" display_name="A Simple Assessment" weight="1" mode="assessment" max_attempts="2" extended_feedback="true">
<html_demo>
<p>This paragraph is shared between <strong>all</strong> questions.</p>
<p>Please answer the questions below.</p>
</html_demo>
<html_demo>
We need an XBlock with JavaScript here to test that it doesn't interfere
with the assessment, since it will show up in runtime(element).children,
but it is not a "step" element:
</html_demo>
<acid/>
<pb-answer name="goal" question="What is your goal?" />
<pb-mcq name="mcq_1_1" question="Do you like this MCQ?" correct_choices='["yes"]'>
<pb-choice value="yes">Yes</pb-choice>
<pb-choice value="maybenot">Maybe not</pb-choice>
<pb-choice value="understand">I don't understand</pb-choice>
<pb-tip values='["yes"]'>Great!</pb-tip>
<pb-tip values='["maybenot"]'>Ah, damn.</pb-tip>
<pb-tip values='["understand"]'><div id="test-custom-html">Really?</div></pb-tip>
</pb-mcq>
<pb-rating name="mcq_1_2" low="Not good at all" high="Extremely good" question="How much do you rate this MCQ?" correct_choices='["4","5"]'>
<pb-choice value="notwant">I don't want to rate it</pb-choice>
<pb-tip values='["4","5"]'>I love good grades.</pb-tip>
<pb-tip values='["1","2", "3"]'>Will do better next time...</pb-tip>
<pb-tip values='["notwant"]'>Your loss!</pb-tip>
</pb-rating>
<pb-mrq name="mrq_1_1" question="What do you like in this MRQ?" required_choices='["gracefulness","elegance","beauty"]' message="Thank you for answering!">
<pb-choice value="elegance">Its elegance</pb-choice>
<pb-choice value="beauty">Its beauty</pb-choice>
<pb-choice value="gracefulness">Its gracefulness</pb-choice>
<pb-choice value="bugs">Its bugs</pb-choice>
<pb-tip values='["gracefulness"]'>This MRQ is indeed very graceful</pb-tip>
<pb-tip values='["elegance","beauty"]'>This is something everyone has to like about this MRQ</pb-tip>
<pb-tip values='["bugs"]'>Nah, there aren't any!</pb-tip>
</pb-mrq>
<pb-message type="on-assessment-review">
<html>Assessment additional feedback message text</html>
</pb-message>
</problem-builder>
......@@ -6,32 +6,32 @@
<pb-answer name="goal" question="What is your goal?">
</pb-answer>
<pb-mcq name="mcq_1_1" question="Do you like this MCQ?" correct_choices="yes">
<pb-mcq name="mcq_1_1" question="Do you like this MCQ?" correct_choices='["yes"]'>
<pb-choice value="yes">Yes</pb-choice>
<pb-choice value="maybenot">Maybe not</pb-choice>
<pb-choice value="understand">I don't understand</pb-choice>
<pb-tip values="yes">Great!</pb-tip>
<pb-tip values="maybenot">Ah, damn.</pb-tip>
<pb-tip values="understand"><div id="test-custom-html">Really?</div></pb-tip>
<pb-tip values='["yes"]'>Great!</pb-tip>
<pb-tip values='["maybenot"]'>Ah, damn.</pb-tip>
<pb-tip values='["understand"]'><div id="test-custom-html">Really?</div></pb-tip>
</pb-mcq>
<pb-rating name="mcq_1_2" low="Not good at all" high="Extremely good" question="How much do you rate this MCQ?" correct_choices="4,5">
<pb-rating name="mcq_1_2" low="Not good at all" high="Extremely good" question="How much do you rate this MCQ?" correct_choices='["4","5"]'>
<pb-choice value="notwant">I don't want to rate it</pb-choice>
<pb-tip values="4,5">I love good grades.</pb-tip>
<pb-tip values="1,2,3">Will do better next time...</pb-tip>
<pb-tip values="notwant">Your loss!</pb-tip>
<pb-tip values='["4","5"]'>I love good grades.</pb-tip>
<pb-tip values='["1","2","3"]'>Will do better next time...</pb-tip>
<pb-tip values='["notwant"]'>Your loss!</pb-tip>
</pb-rating>
<pb-mrq name="mrq_1_1" question="What do you like in this MRQ?" required_choices="gracefulness,elegance,beauty" message="Thank you for answering!">
<pb-mrq name="mrq_1_1" question="What do you like in this MRQ?" required_choices='["gracefulness","elegance","beauty"]' message="Thank you for answering!">
<pb-choice value="elegance">Its elegance</pb-choice>
<pb-choice value="beauty">Its beauty</pb-choice>
<pb-choice value="gracefulness">Its gracefulness</pb-choice>
<pb-choice value="bugs">Its bugs</pb-choice>
<pb-tip values="gracefulness">This MRQ is indeed very graceful</pb-tip>
<pb-tip values="elegance,beauty">This is something everyone has to like about this MRQ</pb-tip>
<pb-tip values="bugs">Nah, there aren't any!</pb-tip>
<pb-tip values='["gracefulness"]'>This MRQ is indeed very graceful</pb-tip>
<pb-tip values='["elegance,beauty"]'>This is something everyone has to like about this MRQ</pb-tip>
<pb-tip values='["bugs"]'>Nah, there aren't any!</pb-tip>
</pb-mrq>
<pb-message type="completed">
......
......@@ -32,6 +32,30 @@ class MentoringBaseTest(SeleniumBaseTest):
module_name = __name__
default_css_selector = 'div.mentoring'
def popup_check(self, mentoring, item_feedbacks, prefix='', do_submit=True):
submit = mentoring.find_element_by_css_selector('.submit input.input-main')
for index, expected_feedback in enumerate(item_feedbacks):
choice_wrapper = mentoring.find_elements_by_css_selector(prefix + " .choice")[index]
if do_submit:
# clicking on actual radio button
choice_wrapper.find_element_by_css_selector(".choice-selector input").click()
submit.click()
self.wait_until_disabled(submit)
item_feedback_icon = choice_wrapper.find_element_by_css_selector(".choice-result")
choice_wrapper.click()
item_feedback_icon.click() # clicking on item feedback icon
item_feedback_popup = choice_wrapper.find_element_by_css_selector(".choice-tips")
self.assertTrue(item_feedback_popup.is_displayed())
self.assertEqual(item_feedback_popup.text, expected_feedback)
item_feedback_popup.click()
self.assertTrue(item_feedback_popup.is_displayed())
mentoring.click()
self.assertFalse(item_feedback_popup.is_displayed())
class MentoringAssessmentBaseTest(MentoringBaseTest):
@staticmethod
......@@ -54,6 +78,7 @@ class MentoringAssessmentBaseTest(MentoringBaseTest):
controls.next_question = mentoring.find_element_by_css_selector("input.input-next")
controls.review = mentoring.find_element_by_css_selector("input.input-review")
controls.try_again = mentoring.find_element_by_css_selector("input.input-try-again")
controls.review_link = mentoring.find_element_by_css_selector(".review-link a")
return mentoring, controls
......
......@@ -17,11 +17,13 @@
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
#
from ddt import ddt, unpack, data
from .base_test import MentoringAssessmentBaseTest, GetChoices
CORRECT, INCORRECT, PARTIAL = "correct", "incorrect", "partially-correct"
@ddt
class MentoringAssessmentTest(MentoringAssessmentBaseTest):
def _selenium_bug_workaround_scroll_to(self, mentoring, question):
"""Workaround for selenium bug:
......@@ -190,19 +192,29 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
self._assert_checkmark(mentoring, result)
self.do_post(controls, last)
def peek_at_multiple_choice_question(self, number, mentoring, controls, last=False):
def peek_at_multiple_response_question(
self, number, mentoring, controls, last=False, extended_feedback=False, alternative_review=False
):
question = self.expect_question_visible(number, mentoring)
self.assert_persistent_elements_present(mentoring)
self._selenium_bug_workaround_scroll_to(mentoring, question)
self.assertIn("What do you like in this MRQ?", mentoring.text)
if extended_feedback:
self.assert_disabled(controls.submit)
if alternative_review:
self.assert_clickable(controls.review_link)
self.assert_hidden(controls.try_again)
else:
self.assert_clickable(controls.review)
else:
self.assert_disabled(controls.submit)
self.ending_controls(controls, last)
return question
def multiple_choice_question(self, number, mentoring, controls, choice_names, result, last=False):
question = self.peek_at_multiple_choice_question(number, mentoring, controls, last=last)
def multiple_response_question(self, number, mentoring, controls, choice_names, result, last=False):
question = self.peek_at_multiple_response_question(number, mentoring, controls, last=last)
choices = GetChoices(question)
expected_choices = {
......@@ -227,11 +239,17 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
self._assert_checkmark(mentoring, result)
controls.review.click()
def peek_at_review(self, mentoring, controls, expected):
def peek_at_review(self, mentoring, controls, expected, extended_feedback=False):
self.wait_until_text_in("You scored {percentage}% on this assessment.".format(**expected), mentoring)
self.assert_persistent_elements_present(mentoring)
if expected["num_attempts"] < expected["max_attempts"]:
self.assertIn("Note: if you retake this assessment, only your final score counts.", mentoring.text)
self.assertFalse(mentoring.find_elements_by_css_selector('.review-list'))
elif extended_feedback:
for q_type in ['correct', 'incorrect', 'partial']:
self.assertEqual(len(mentoring.find_elements_by_css_selector('.%s-list li' % q_type)), expected[q_type])
else:
self.assertFalse(mentoring.find_elements_by_css_selector('.review-list'))
if expected["correct"] == 1:
self.assertIn("You answered 1 questions correctly.".format(**expected), mentoring.text)
else:
......@@ -255,6 +273,7 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
self.assert_hidden(controls.submit)
self.assert_hidden(controls.next_question)
self.assert_hidden(controls.review)
self.assert_hidden(controls.review_link)
def assert_messages_text(self, mentoring, text):
messages = mentoring.find_element_by_css_selector('.assessment-messages')
......@@ -267,25 +286,55 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
self.assertFalse(messages.find_elements_by_xpath('./*'))
self.assertFalse(messages.is_displayed())
def test_assessment(self):
mentoring, controls = self.go_to_assessment("Assessment 1")
def extended_feedback_checks(self, mentoring, controls, expected_results):
# Multiple choice is third correctly answered question
self.assert_hidden(controls.review_link)
mentoring.find_elements_by_css_selector('.correct-list li a')[2].click()
self.peek_at_multiple_response_question(4, mentoring, controls, extended_feedback=True, alternative_review=True)
# Four correct items, plus the overall correct indicator.
correct_marks = mentoring.find_elements_by_css_selector('.checkmark-correct')
incorrect_marks = mentoring.find_elements_by_css_selector('.checkmark-incorrect')
self.assertEqual(len(correct_marks), 5)
self.assertEqual(len(incorrect_marks), 0)
item_feedbacks = [
"This is something everyone has to like about this MRQ",
"This is something everyone has to like about this MRQ",
"This MRQ is indeed very graceful",
"Nah, there aren't any!"
]
self.popup_check(mentoring, item_feedbacks, prefix='div[data-name="mrq_1_1"]', do_submit=False)
self.assert_hidden(controls.review)
self.assert_disabled(controls.submit)
controls.review_link.click()
self.peek_at_review(mentoring, controls, expected_results, extended_feedback=True)
# Rating question, right before MRQ.
mentoring.find_elements_by_css_selector('.incorrect-list li a')[0].click()
# Should be possible to visit the MRQ from there.
self.wait_until_clickable(controls.next_question)
controls.next_question.click()
self.peek_at_multiple_response_question(4, mentoring, controls, extended_feedback=True, alternative_review=True)
@data((1, False), ('Extended Feedback', True))
@unpack
def test_assessment(self, assessment, extended_feedback):
mentoring, controls = self.go_to_assessment("Assessment %s" % assessment)
self.freeform_answer(1, mentoring, controls, 'This is the answer', CORRECT)
self.single_choice_question(2, mentoring, controls, 'Maybe not', INCORRECT)
self.rating_question(3, mentoring, controls, "5 - Extremely good", CORRECT)
self.peek_at_multiple_choice_question(4, mentoring, controls, last=True)
self.peek_at_multiple_response_question(4, mentoring, controls, last=True)
# see if assessment remembers the current step
self.go_to_workbench_main_page()
mentoring, controls = self.go_to_assessment("Assessment 1")
mentoring, controls = self.go_to_assessment("Assessment %s" % assessment)
self.multiple_choice_question(4, mentoring, controls, ("Its beauty",), PARTIAL, last=True)
self.multiple_response_question(4, mentoring, controls, ("Its beauty",), PARTIAL, last=True)
expected_results = {
"correct": 2, "partial": 1, "incorrect": 1, "percentage": 63,
"num_attempts": 1, "max_attempts": 2
}
self.peek_at_review(mentoring, controls, expected_results)
self.peek_at_review(mentoring, controls, expected_results, extended_feedback=extended_feedback)
self.assert_messages_text(mentoring, "Assessment additional feedback message text")
self.assert_clickable(controls.try_again)
......@@ -298,15 +347,17 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
self.rating_question(3, mentoring, controls, "1 - Not good at all", INCORRECT)
user_selection = ("Its elegance", "Its beauty", "Its gracefulness")
self.multiple_choice_question(4, mentoring, controls, user_selection, CORRECT, last=True)
self.multiple_response_question(4, mentoring, controls, user_selection, CORRECT, last=True)
expected_results = {
"correct": 3, "partial": 0, "incorrect": 1, "percentage": 75,
"num_attempts": 2, "max_attempts": 2
}
self.peek_at_review(mentoring, controls, expected_results)
self.peek_at_review(mentoring, controls, expected_results, extended_feedback=extended_feedback)
self.assert_disabled(controls.try_again)
self.assert_messages_empty(mentoring)
if extended_feedback:
self.extended_feedback_checks(mentoring, controls, expected_results)
def test_single_question_assessment(self):
"""
......
......@@ -31,7 +31,7 @@ from .base_test import MentoringBaseTest
@ddt.ddt
class MCQBlockTest(MentoringBaseTest):
class QuestionnaireBlockTest(MentoringBaseTest):
def _selenium_bug_workaround_scroll_to(self, mcq_legend):
"""Workaround for selenium bug:
......@@ -159,8 +159,8 @@ class MCQBlockTest(MentoringBaseTest):
self.assertFalse(mcq1_tips.is_displayed())
self.assertFalse(mcq2_tips.is_displayed())
def test_mcq_with_comments(self):
mentoring = self.go_to_page('Mcq With Comments 1')
def test_mrq_with_comments(self):
mentoring = self.go_to_page('Mrq With Comments 1')
mcq = mentoring.find_element_by_css_selector('fieldset.choices')
messages = mentoring.find_element_by_css_selector('.messages')
submit = mentoring.find_element_by_css_selector('.submit input.input-main')
......@@ -186,9 +186,8 @@ class MCQBlockTest(MentoringBaseTest):
self.assertEqual(mcq_choices_input[2].get_attribute('value'), 'gracefulness')
self.assertEqual(mcq_choices_input[3].get_attribute('value'), 'bugs')
def test_mcq_feedback_popups(self):
mentoring = self.go_to_page('Mcq With Comments 1')
choices_list = mentoring.find_element_by_css_selector(".choices-list")
def test_mrq_feedback_popups(self):
mentoring = self.go_to_page('Mrq With Comments 1')
item_feedbacks = [
"This is something everyone has to like about this MRQ",
......@@ -196,25 +195,7 @@ class MCQBlockTest(MentoringBaseTest):
"This MRQ is indeed very graceful",
"Nah, there aren\\'t any!"
]
submit = mentoring.find_element_by_css_selector('.submit input.input-main')
for index, expected_feedback in enumerate(item_feedbacks):
choice_wrapper = choices_list.find_elements_by_css_selector(".choice")[index]
choice_wrapper.find_element_by_css_selector(".choice-selector input").click() # click actual radio button
submit.click()
self.wait_until_disabled(submit)
item_feedback_icon = choice_wrapper.find_element_by_css_selector(".choice-result")
choice_wrapper.click()
item_feedback_icon.click() # clicking on item feedback icon
item_feedback_popup = choice_wrapper.find_element_by_css_selector(".choice-tips")
self.assertTrue(item_feedback_popup.is_displayed())
self.assertEqual(item_feedback_popup.text, expected_feedback)
item_feedback_popup.click()
self.assertTrue(item_feedback_popup.is_displayed())
mentoring.click()
self.assertFalse(item_feedback_popup.is_displayed())
self.popup_check(mentoring, item_feedbacks, prefix='div[data-name="mrq_1_1_7"]')
def _get_questionnaire_options(self, questionnaire):
result = []
......@@ -299,7 +280,7 @@ class MCQBlockTest(MentoringBaseTest):
@patch.object(MentoringBlock, 'get_theme', Mock(return_value={'package': 'problem_builder',
'locations': ['public/themes/lms.css']}))
class MCQBlockAprosThemeTest(MCQBlockTest):
class QuestionnaireBlockAprosThemeTest(QuestionnaireBlockTest):
"""
Test MRQ/MCQ questions without the LMS theme which is on by default.
"""
......
<problem-builder url_name="mentoring-assessment-1" display_name="A Simple Assessment" weight="1" mode="assessment" max_attempts="2" extended_feedback="true">
<html_demo>
<p>This paragraph is shared between <strong>all</strong> questions.</p>
<p>Please answer the questions below.</p>
</html_demo>
<html_demo>
We need an XBlock with JavaScript here to test that it doesn't interfere
with the assessment, since it will show up in runtime(element).children,
but it is not a "step" element:
</html_demo>
<acid/>
<pb-answer name="goal" question="What is your goal?" />
<pb-mcq name="mcq_1_1" question="Do you like this MCQ?" correct_choices='["yes"]'>
<pb-choice value="yes">Yes</pb-choice>
<pb-choice value="maybenot">Maybe not</pb-choice>
<pb-choice value="understand">I don't understand</pb-choice>
<pb-tip values='["yes"]'>Great!</pb-tip>
<pb-tip values='["maybenot"]'>Ah, damn.</pb-tip>
<pb-tip values='["understand"]'><div id="test-custom-html">Really?</div></pb-tip>
</pb-mcq>
<pb-rating name="mcq_1_2" low="Not good at all" high="Extremely good" question="How much do you rate this MCQ?" correct_choices='["4","5"]'>
<pb-choice value="notwant">I don't want to rate it</pb-choice>
<pb-tip values='["4","5"]'>I love good grades.</pb-tip>
<pb-tip values='["1","2", "3"]'>Will do better next time...</pb-tip>
<pb-tip values='["notwant"]'>Your loss!</pb-tip>
</pb-rating>
<pb-mrq name="mrq_1_1" question="What do you like in this MRQ?" required_choices='["gracefulness","elegance","beauty"]' message="Thank you for answering!">
<pb-choice value="elegance">Its elegance</pb-choice>
<pb-choice value="beauty">Its beauty</pb-choice>
<pb-choice value="gracefulness">Its gracefulness</pb-choice>
<pb-choice value="bugs">Its bugs</pb-choice>
<pb-tip values='["gracefulness"]'>This MRQ is indeed very graceful</pb-tip>
<pb-tip values='["elegance","beauty"]'>This is something everyone has to like about this MRQ</pb-tip>
<pb-tip values='["bugs"]'>Nah, there aren't any!</pb-tip>
</pb-mrq>
<pb-message type="on-assessment-review">
<html>Assessment additional feedback message text</html>
</pb-message>
</problem-builder>
<vertical_demo>
<problem-builder url_name="mcq_with_comments" display_name="MRQ With Resizable popups" weight="1" enforce_dependency="false">
<problem-builder url_name="mrq_with_comments" display_name="MRQ With Resizable popups" weight="1" enforce_dependency="false">
<pb-mrq name="mrq_1_1_7" question="What do you like in this MRQ?" required_choices='["elegance","gracefulness","beauty"]'>
<pb-choice value="elegance">Its elegance</pb-choice>
<pb-choice value="beauty">Its beauty</pb-choice>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment