Commit 4861c7f9 by Xavier Antoviaque

Merge pull request #89 from open-craft/fix-checkmark-on-second-assessment-run

Display correct checkmarks after retrying assessment
parents 496917c1 298123df
...@@ -10,6 +10,7 @@ function MentoringAssessmentView(runtime, element, mentoring) { ...@@ -10,6 +10,7 @@ function MentoringAssessmentView(runtime, element, mentoring) {
function cleanAll() { function cleanAll() {
// clean checkmark state // clean checkmark state
checkmark.removeClass('checkmark-correct icon-ok fa-check'); checkmark.removeClass('checkmark-correct icon-ok fa-check');
checkmark.removeClass('checkmark-partially-correct icon-ok fa-check');
checkmark.removeClass('checkmark-incorrect icon-exclamation fa-exclamation'); checkmark.removeClass('checkmark-incorrect icon-exclamation fa-exclamation');
/* hide all children */ /* hide all children */
......
<mentoring url_name="{{ url_name }}" display_name="Nav tooltip title" weight="1" mode="assessment"> <mentoring url_name="{{ url_name }}" display_name="Nav tooltip title" weight="1" mode="assessment" max_attempts="10">
<title>Default Title</title> <title>Default Title</title>
<shared-header> <shared-header>
<p>This paragraph is shared between <strong>all</strong> questions.</p> <p>This paragraph is shared between <strong>all</strong> questions.</p>
......
from .base_test import MentoringBaseTest from .base_test import MentoringBaseTest
CORRECT, INCORRECT, PARTIAL = "correct", "incorrect", "partially-correct"
class MentoringAssessmentTest(MentoringBaseTest): class MentoringAssessmentTest(MentoringBaseTest):
def _selenium_bug_workaround_scroll_to(self, mentoring): def _selenium_bug_workaround_scroll_to(self, mentoring):
...@@ -65,169 +66,214 @@ class MentoringAssessmentTest(MentoringBaseTest): ...@@ -65,169 +66,214 @@ class MentoringAssessmentTest(MentoringBaseTest):
return return
raise AssertionError("Expected selectable item present: {}".format(text)) raise AssertionError("Expected selectable item present: {}".format(text))
def test_assessment(self): def _assert_checkmark(self, mentoring, result):
# step 1 -- freeform answer """Assert that only the desired checkmark is present."""
states = {CORRECT: 0, INCORRECT: 0, PARTIAL: 0}
states[result] += 1
for name, count in states.items():
self.assertEqual(len(mentoring.find_elements_by_css_selector(".checkmark-{}".format(name))), count)
def go_to_workbench_main_page(self):
self.browser.get(self.live_server_url)
def go_to_assessment_1(self):
mentoring = self.go_to_page('Assessment 1') mentoring = self.go_to_page('Assessment 1')
class Namespace(object):
pass
controls = Namespace()
controls.submit = mentoring.find_element_by_css_selector("input.input-main")
controls.next_question = mentoring.find_element_by_css_selector("input.input-next")
controls.review = mentoring.find_element_by_css_selector("input.input-review")
controls.try_again = mentoring.find_element_by_css_selector("input.input-try-again")
return mentoring, controls
def at_step_1_freeform_answer(self, mentoring, controls, text_input, result, saved_value=""):
self.wait_until_text_in("QUESTION 1", mentoring)
self.assert_persistent_elements_present(mentoring) self.assert_persistent_elements_present(mentoring)
self._selenium_bug_workaround_scroll_to(mentoring) self._selenium_bug_workaround_scroll_to(mentoring)
submit = mentoring.find_element_by_css_selector("input.input-main")
next_question = mentoring.find_element_by_css_selector("input.input-next")
review = mentoring.find_element_by_css_selector("input.input-review")
try_again = mentoring.find_element_by_css_selector("input.input-try-again")
answer = mentoring.find_element_by_css_selector("textarea.answer.editable") answer = mentoring.find_element_by_css_selector("textarea.answer.editable")
self.assertIn("Please answer the questions below.", mentoring.text) self.assertIn("Please answer the questions below.", mentoring.text)
self.assertIn("QUESTION 1", mentoring.text) self.assertIn("QUESTION 1", mentoring.text)
self.assertIn("What is your goal?", mentoring.text) self.assertIn("What is your goal?", mentoring.text)
self.assertEquals("", answer.get_attribute("value")) self.assertEquals(saved_value, answer.get_attribute("value"))
self.assert_disabled(submit) if not saved_value:
self.assert_disabled(next_question) self.assert_disabled(controls.submit)
self.assert_disabled(controls.next_question)
answer.send_keys('This is the answer') answer.clear()
self.assertEquals('This is the answer', answer.get_attribute("value")) answer.send_keys(text_input)
self.assertEquals(text_input, answer.get_attribute("value"))
self.assert_clickable(submit) self.assert_clickable(controls.submit)
self.assert_disabled(next_question) self.assert_disabled(controls.next_question)
self.assert_hidden(review) self.assert_hidden(controls.review)
self.assert_hidden(try_again) self.assert_hidden(controls.try_again)
submit.click() controls.submit.click()
self.wait_until_clickable(next_question) self.wait_until_clickable(controls.next_question)
next_question.click() self._assert_checkmark(mentoring, result)
controls.next_question.click()
# step 2 -- single choice question def at_step_2_single_choice_question(self, mentoring, controls, choice_name, result):
self.wait_until_text_in("QUESTION 2", mentoring) self.wait_until_text_in("QUESTION 2", mentoring)
self.assert_persistent_elements_present(mentoring) self.assert_persistent_elements_present(mentoring)
self._selenium_bug_workaround_scroll_to(mentoring) self._selenium_bug_workaround_scroll_to(mentoring)
self.assertIn("Do you like this MCQ?", mentoring.text) self.assertIn("Do you like this MCQ?", mentoring.text)
self.assert_disabled(submit) self.assert_disabled(controls.submit)
self.assert_disabled(next_question) self.assert_disabled(controls.next_question)
self.assert_hidden(review) self.assert_hidden(controls.review)
self.assert_hidden(try_again) self.assert_hidden(controls.try_again)
choices = self._GetChoices(mentoring) choices = self._GetChoices(mentoring)
self.assertEquals(choices.state, {"Yes": False, "Maybe not": False, "I don't understand": False}) expected_state = {"Yes": False, "Maybe not": False, "I don't understand": False}
self.assertEquals(choices.state, expected_state)
choices.select(choice_name)
expected_state[choice_name] = True
self.assertEquals(choices.state, expected_state)
choices.select("Yes") self.assert_clickable(controls.submit)
self.assertEquals(choices.state, {"Yes": True, "Maybe not": False, "I don't understand": False}) self.assert_disabled(controls.next_question)
self.assert_clickable(submit) self.assert_hidden(controls.review)
self.assert_disabled(next_question) self.assert_hidden(controls.try_again)
self.assert_hidden(review)
self.assert_hidden(try_again)
submit.click() controls.submit.click()
self.wait_until_clickable(next_question) self.wait_until_clickable(controls.next_question)
next_question.click() self._assert_checkmark(mentoring, result)
controls.next_question.click()
# step 3 -- rating question def at_step_3_rating_question(self, mentoring, controls, choice_name, result):
self.wait_until_text_in("QUESTION 3", mentoring) self.wait_until_text_in("QUESTION 3", mentoring)
self.assert_persistent_elements_present(mentoring) self.assert_persistent_elements_present(mentoring)
self._selenium_bug_workaround_scroll_to(mentoring) self._selenium_bug_workaround_scroll_to(mentoring)
self.assertIn("How much do you rate this MCQ?", mentoring.text) self.assertIn("How much do you rate this MCQ?", mentoring.text)
self.assert_disabled(submit) self.assert_disabled(controls.submit)
self.assert_disabled(next_question) self.assert_disabled(controls.next_question)
self.assert_hidden(review) self.assert_hidden(controls.review)
self.assert_hidden(try_again) self.assert_hidden(controls.try_again)
choices = self._GetChoices(mentoring, ".rating") choices = self._GetChoices(mentoring, ".rating")
self.assertEquals(choices.state, { expected_choices = {
"1 - Not good at all": False, "1 - Not good at all": False,
"2": False, "3": False, "4": False, "2": False, "3": False, "4": False,
"5 - Extremely good": False, "5 - Extremely good": False,
"I don't want to rate it": False, "I don't want to rate it": False,
}) }
choices.select("5 - Extremely good") self.assertEquals(choices.state, expected_choices)
self.assertEquals(choices.state, { choices.select(choice_name)
"1 - Not good at all": False, expected_choices[choice_name] = True
"2": False, "3": False, "4": False, self.assertEquals(choices.state, expected_choices)
"5 - Extremely good": True,
"I don't want to rate it": False,
})
self.assert_clickable(submit)
self.assert_disabled(next_question)
self.assert_hidden(review)
self.assert_hidden(try_again)
submit.click()
self.wait_until_clickable(next_question) self.assert_clickable(controls.submit)
next_question.click() self.assert_disabled(controls.next_question)
self.assert_hidden(controls.review)
# step 4 -- multiple choice question self.assert_hidden(controls.try_again)
self.wait_until_text_in("QUESTION 4", mentoring)
self.assert_persistent_elements_present(mentoring)
self._selenium_bug_workaround_scroll_to(mentoring)
self.assertIn("What do you like in this MRQ?", mentoring.text)
self.assert_disabled(submit) controls.submit.click()
self.assert_hidden(next_question)
self.assert_disabled(review)
self.assert_hidden(try_again)
# see if assessment remembers the current step self.wait_until_clickable(controls.next_question)
self.browser.get(self.live_server_url) self._assert_checkmark(mentoring, result)
# step 4 -- a second time controls.next_question.click()
mentoring = self.go_to_page("Assessment 1")
def peek_at_step_4_multiple_choice_question(self, mentoring, controls):
self.wait_until_text_in("QUESTION 4", mentoring) self.wait_until_text_in("QUESTION 4", mentoring)
self.assert_persistent_elements_present(mentoring) self.assert_persistent_elements_present(mentoring)
self._selenium_bug_workaround_scroll_to(mentoring) self._selenium_bug_workaround_scroll_to(mentoring)
self.assertIn("What do you like in this MRQ?", mentoring.text) self.assertIn("What do you like in this MRQ?", mentoring.text)
submit = mentoring.find_element_by_css_selector("input.input-main") self.assert_disabled(controls.submit)
next_question = mentoring.find_element_by_css_selector("input.input-next") self.assert_hidden(controls.next_question)
review = mentoring.find_element_by_css_selector("input.input-review") self.assert_disabled(controls.review)
try_again = mentoring.find_element_by_css_selector("input.input-try-again") self.assert_hidden(controls.try_again)
self.assert_disabled(submit) def at_step_4_multiple_choice_question(self, mentoring, controls, choice_names, result):
self.assert_hidden(next_question) self.peek_at_step_4_multiple_choice_question(mentoring, controls)
self.assert_disabled(review)
self.assert_hidden(try_again)
choices = self._GetChoices(mentoring) choices = self._GetChoices(mentoring)
self.assertEquals(choices.state, { expected_choices = {
"Its elegance": False, "Its elegance": False,
"Its beauty": False, "Its beauty": False,
"Its gracefulness": False, "Its gracefulness": False,
"Its bugs": False, "Its bugs": False,
}) }
choices.select("Its elegance") self.assertEquals(choices.state, expected_choices)
choices.select("Its beauty")
choices.select("Its gracefulness") for name in choice_names:
self.assertEquals(choices.state, { choices.select(name)
"Its elegance": True, expected_choices[name] = True
"Its beauty": True,
"Its gracefulness": True, self.assertEquals(choices.state, expected_choices)
"Its bugs": False,
})
self.assert_clickable(submit) self.assert_clickable(controls.submit)
self.assert_hidden(next_question) self.assert_hidden(controls.next_question)
self.assert_disabled(review) self.assert_disabled(controls.review)
self.assert_hidden(try_again) self.assert_hidden(controls.try_again)
submit.click() controls.submit.click()
self.wait_until_clickable(review) self.wait_until_clickable(controls.review)
review.click() self._assert_checkmark(mentoring, result)
controls.review.click()
# step 5 -- review def peek_at_step_5_review(self, mentoring, controls, expected):
self.wait_until_text_in("You scored 100% on this assessment.", mentoring) self.wait_until_text_in("You scored {percentage}% on this assessment.".format(**expected), mentoring)
self.assert_persistent_elements_present(mentoring) self.assert_persistent_elements_present(mentoring)
self.assertIn("Note: if you retake this assessment, only your final score counts.", mentoring.text) if expected["num_attempts"] < expected["max_attempts"]:
self.assertIn("You answered 4 questions correctly.", mentoring.text) self.assertIn("Note: if you retake this assessment, only your final score counts.", mentoring.text)
self.assertIn("You answered 0 questions partially correct.", mentoring.text) self.assertIn("You answered {correct} questions correctly.".format(**expected), mentoring.text)
self.assertIn("You answered 0 questions incorrectly.", mentoring.text) self.assertIn("You answered {partial} questions partially correct.".format(**expected), mentoring.text)
self.assertIn("You answered {incorrect} questions incorrectly.".format(**expected), mentoring.text)
self.assert_hidden(submit) self.assertIn("You have used {num_attempts} of {max_attempts} submissions.".format(**expected), mentoring.text)
self.assert_hidden(next_question)
self.assert_hidden(review) self.assert_hidden(controls.submit)
self.assert_disabled(try_again) self.assert_hidden(controls.next_question)
self.assert_hidden(controls.review)
def test_assessment(self):
mentoring, controls = self.go_to_assessment_1()
self.at_step_1_freeform_answer(mentoring, controls, 'This is the answer', CORRECT)
self.at_step_2_single_choice_question(mentoring, controls, 'Maybe not', INCORRECT)
self.at_step_3_rating_question(mentoring, controls, "5 - Extremely good", CORRECT)
self.peek_at_step_4_multiple_choice_question(mentoring, controls)
# see if assessment remembers the current step
self.go_to_workbench_main_page()
mentoring, controls = self.go_to_assessment_1()
self.at_step_4_multiple_choice_question(mentoring, controls, ("Its beauty",), PARTIAL)
expected_results = {
"correct": 2, "partial": 1, "incorrect": 1, "percentage": 63,
"num_attempts": 1, "max_attempts": 2}
self.peek_at_step_5_review(mentoring, controls, expected_results)
self.assert_clickable(controls.try_again)
controls.try_again.click()
self.at_step_1_freeform_answer(mentoring, controls, 'This is a different answer', CORRECT,
saved_value='This is the answer')
self.at_step_2_single_choice_question(mentoring, controls, 'Yes', CORRECT)
self.at_step_3_rating_question(mentoring, controls, "1 - Not good at all", INCORRECT)
user_selection = ("Its elegance", "Its beauty", "Its gracefulness")
self.at_step_4_multiple_choice_question(mentoring, controls, user_selection, CORRECT)
expected_results = {
"correct": 3, "partial": 0, "incorrect": 1, "percentage": 75,
"num_attempts": 2, "max_attempts": 2}
self.peek_at_step_5_review(mentoring, controls, expected_results)
self.assert_disabled(controls.try_again)
<mentoring url_name="mentoring-assessment" display_name="Nav tooltip title" weight="1" mode="assessment"> <mentoring url_name="mentoring-assessment" display_name="Nav tooltip title" weight="1" mode="assessment" max_attempts="2">
<title>A Simple Assessment</title> <title>A Simple Assessment</title>
<shared-header> <shared-header>
<p>This paragraph is shared between <strong>all</strong> questions.</p> <p>This paragraph is shared between <strong>all</strong> questions.</p>
...@@ -17,11 +17,19 @@ ...@@ -17,11 +17,19 @@
<choice value="yes">Yes</choice> <choice value="yes">Yes</choice>
<choice value="maybenot">Maybe not</choice> <choice value="maybenot">Maybe not</choice>
<choice value="understand">I don't understand</choice> <choice value="understand">I don't understand</choice>
<tip display="yes">Great!</tip>
<tip reject="maybenot">Ah, damn.</tip>
<tip reject="understand"><html><div id="test-custom-html">Really?</div></html></tip>
</mcq> </mcq>
<mcq name="mcq_1_2" type="rating" low="Not good at all" high="Extremely good"> <mcq name="mcq_1_2" type="rating" low="Not good at all" high="Extremely good">
<question>How much do you rate this MCQ?</question> <question>How much do you rate this MCQ?</question>
<choice value="notwant">I don't want to rate it</choice> <choice value="notwant">I don't want to rate it</choice>
<tip display="4,5">I love good grades.</tip>
<tip reject="1,2,3">Will do better next time...</tip>
<tip reject="notwant">Your loss!</tip>
</mcq> </mcq>
<mrq name="mrq_1_1" type="choices"> <mrq name="mrq_1_1" type="choices">
...@@ -30,5 +38,9 @@ ...@@ -30,5 +38,9 @@
<choice value="beauty">Its beauty</choice> <choice value="beauty">Its beauty</choice>
<choice value="gracefulness">Its gracefulness</choice> <choice value="gracefulness">Its gracefulness</choice>
<choice value="bugs">Its bugs</choice> <choice value="bugs">Its bugs</choice>
<tip require="gracefulness">This MRQ is indeed very graceful</tip>
<tip require="elegance,beauty">This is something everyone has to like about this MRQ</tip>
<tip reject="bugs">Nah, there isn't any!</tip>
</mrq> </mrq>
</mentoring> </mentoring>
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment