Commit 2e72dcee by Eugeny Kolpakov

Merge pull request #30 from open-craft/feedback-persistence

Feedback persistence
parents 78decd9a 43ba649b
...@@ -189,6 +189,9 @@ class AnswerBlock(AnswerMixin, StepMixin, StudioEditableXBlockMixin, XBlock): ...@@ -189,6 +189,9 @@ class AnswerBlock(AnswerMixin, StepMixin, StudioEditableXBlockMixin, XBlock):
'score': 1 if self.status == 'correct' else 0, 'score': 1 if self.status == 'correct' else 0,
} }
def get_last_result(self):
return self.get_results(None) if self.student_input else {}
def submit(self, submission): def submit(self, submission):
""" """
The parent block is handling a student submission, including a new answer for this The parent block is handling a student submission, including a new answer for this
......
...@@ -105,6 +105,9 @@ class MCQBlock(SubmittingXBlockMixin, QuestionnaireAbstractBlock): ...@@ -105,6 +105,9 @@ class MCQBlock(SubmittingXBlockMixin, QuestionnaireAbstractBlock):
def get_results(self, previous_result): def get_results(self, previous_result):
return self.calculate_results(previous_result['submission']) return self.calculate_results(previous_result['submission'])
def get_last_result(self):
return self.get_results({'submission': self.student_choice}) if self.student_choice else {}
def submit(self, submission): def submit(self, submission):
log.debug(u'Received MCQ submission: "%s"', submission) log.debug(u'Received MCQ submission: "%s"', submission)
result = self.calculate_results(submission) result = self.calculate_results(submission)
......
...@@ -435,23 +435,68 @@ class MentoringBlock(XBlock, StepParentMixin, StudioEditableXBlockMixin, StudioC ...@@ -435,23 +435,68 @@ class MentoringBlock(XBlock, StepParentMixin, StudioEditableXBlockMixin, StudioC
""" """
Gets detailed results in the case of extended feedback. Gets detailed results in the case of extended feedback.
It may be a good idea to eventually have this function get results
in the general case instead of loading them in the template in the future,
and only using it for extended feedback situations.
Right now there are two ways to get results-- through the template upon loading up Right now there are two ways to get results-- through the template upon loading up
the mentoring block, or after submission of an AJAX request like in the mentoring block, or after submission of an AJAX request like in
submit or get_results here. submit or get_results here.
""" """
results = [] if self.mode == 'standard':
results, completed, show_message = self._get_standard_results()
mentoring_completed = completed
else:
if not self.show_extended_feedback(): if not self.show_extended_feedback():
return { return {
'results': [], 'results': [],
'error': 'Extended feedback results cannot be obtained.' 'error': 'Extended feedback results cannot be obtained.'
} }
results, completed, show_message = self._get_assessment_results(queries)
mentoring_completed = True
result = {
'results': results,
'completed': completed,
'step': self.step,
'max_attempts': self.max_attempts,
'num_attempts': self.num_attempts,
}
if show_message:
result['message'] = self.get_message(mentoring_completed)
return result
def _get_standard_results(self):
"""
Gets previous submissions results as if submit was called with exactly the same values as last time.
"""
results = []
completed = True
show_message = bool(self.student_results)
# In standard mode, all children is visible simultaneously, so need collecting responses from all of them
for child_id in self.steps:
child = self.runtime.get_block(child_id)
child_result = child.get_last_result()
results.append([child.name, child_result])
completed = completed and (child_result.get('status', None) == 'correct')
return results, completed, show_message
def _get_assessment_results(self, queries):
"""
Gets detailed results in the case of extended feedback.
It may be a good idea to eventually have this function get results
in the general case instead of loading them in the template in the future,
and only using it for extended feedback situations.
Right now there are two ways to get results-- through the template upon loading up
the mentoring block, or after submission of an AJAX request like in
submit or get_results here.
"""
results = []
completed = True completed = True
choices = dict(self.student_results) choices = dict(self.student_results)
step = self.step
# Only one child should ever be of concern with this method. # Only one child should ever be of concern with this method.
for child_id in self.steps: for child_id in self.steps:
child = self.runtime.get_block(child_id) child = self.runtime.get_block(child_id)
...@@ -464,17 +509,7 @@ class MentoringBlock(XBlock, StepParentMixin, StudioEditableXBlockMixin, StudioC ...@@ -464,17 +509,7 @@ class MentoringBlock(XBlock, StepParentMixin, StudioEditableXBlockMixin, StudioC
completed = choices[child.name]['status'] completed = choices[child.name]['status']
break break
# The 'completed' message should always be shown in this case, since no more attempts are available. return results, completed, True
message = self.get_message(True)
return {
'results': results,
'completed': completed,
'message': message,
'step': step,
'max_attempts': self.max_attempts,
'num_attempts': self.num_attempts,
}
@XBlock.json_handler @XBlock.json_handler
def submit(self, submissions, suffix=''): def submit(self, submissions, suffix=''):
......
...@@ -81,14 +81,20 @@ class MRQBlock(QuestionnaireAbstractBlock): ...@@ -81,14 +81,20 @@ class MRQBlock(QuestionnaireAbstractBlock):
return self._(u"Ignored") return self._(u"Ignored")
return self._(u"Not Acceptable") return self._(u"Not Acceptable")
def get_results(self, previous_result): def get_results(self, previous_result, only_selected=False):
""" """
Get the results a student has already submitted. Get the results a student has already submitted.
""" """
result = self.calculate_results(previous_result['submissions']) result = self.calculate_results(previous_result['submissions'], only_selected)
result['completed'] = True result['completed'] = True
return result return result
def get_last_result(self):
if self.student_choices:
return self.get_results({'submissions': self.student_choices}, only_selected=True)
else:
return {}
def submit(self, submissions): def submit(self, submissions):
log.debug(u'Received MRQ submissions: "%s"', submissions) log.debug(u'Received MRQ submissions: "%s"', submissions)
...@@ -98,13 +104,17 @@ class MRQBlock(QuestionnaireAbstractBlock): ...@@ -98,13 +104,17 @@ class MRQBlock(QuestionnaireAbstractBlock):
log.debug(u'MRQ submissions result: %s', result) log.debug(u'MRQ submissions result: %s', result)
return result return result
def calculate_results(self, submissions): def calculate_results(self, submissions, only_selected=False):
score = 0 score = 0
results = [] results = []
for choice in self.custom_choices: for choice in self.custom_choices:
choice_completed = True choice_completed = True
choice_tips_html = [] choice_tips_html = []
choice_selected = choice.value in submissions choice_selected = choice.value in submissions
if not choice_selected and only_selected:
continue
if choice.value in self.required_choices: if choice.value in self.required_choices:
if not choice_selected: if not choice_selected:
choice_completed = False choice_completed = False
......
...@@ -31,13 +31,14 @@ function AnswerBlock(runtime, element) { ...@@ -31,13 +31,14 @@ function AnswerBlock(runtime, element) {
// Display of checkmark would be redundant. // Display of checkmark would be redundant.
return return
} }
if (result.status) {
if (result.status === "correct") { if (result.status === "correct") {
checkmark.addClass('checkmark-correct icon-ok fa-check'); checkmark.addClass('checkmark-correct icon-ok fa-check');
} }
else { else {
checkmark.addClass('checkmark-incorrect icon-exclamation fa-exclamation'); checkmark.addClass('checkmark-incorrect icon-exclamation fa-exclamation');
} }
}
}, },
clearResult: function() { clearResult: function() {
......
...@@ -28,8 +28,6 @@ function MentoringStandardView(runtime, element, mentoring) { ...@@ -28,8 +28,6 @@ function MentoringStandardView(runtime, element, mentoring) {
messagesDOM.prepend('<div class="title1">' + gettext('Feedback') + '</div>'); messagesDOM.prepend('<div class="title1">' + gettext('Feedback') + '</div>');
messagesDOM.show(); messagesDOM.show();
} }
submitDOM.attr('disabled', 'disabled');
} }
function handleSubmitError(jqXHR, textStatus, errorThrown) { function handleSubmitError(jqXHR, textStatus, errorThrown) {
...@@ -45,12 +43,10 @@ function MentoringStandardView(runtime, element, mentoring) { ...@@ -45,12 +43,10 @@ function MentoringStandardView(runtime, element, mentoring) {
mentoring.setContent(messagesDOM, errMsg); mentoring.setContent(messagesDOM, errMsg);
messagesDOM.show(); messagesDOM.show();
submitDOM.attr('disabled', 'disabled');
} }
} }
function calculate_results(handler_name) { function calculate_results(handler_name, disable_submit) {
var data = {}; var data = {};
var children = mentoring.children; var children = mentoring.children;
for (var i = 0; i < children.length; i++) { for (var i = 0; i < children.length; i++) {
...@@ -64,10 +60,19 @@ function MentoringStandardView(runtime, element, mentoring) { ...@@ -64,10 +60,19 @@ function MentoringStandardView(runtime, element, mentoring) {
submitXHR.abort(); submitXHR.abort();
} }
submitXHR = $.post(handlerUrl, JSON.stringify(data)).success(handleSubmitResults).error(handleSubmitError); submitXHR = $.post(handlerUrl, JSON.stringify(data)).success(handleSubmitResults).error(handleSubmitError);
if (disable_submit) {
var disable_submit_callback = function(){ submitDOM.attr('disabled', 'disabled'); };
submitXHR.success(disable_submit_callback).error(disable_submit_callback);
}
}
function get_results(){
calculate_results('get_results', false);
} }
function submit() { function submit() {
calculate_results('submit'); calculate_results('submit', true);
} }
function clearResults() { function clearResults() {
...@@ -97,6 +102,8 @@ function MentoringStandardView(runtime, element, mentoring) { ...@@ -97,6 +102,8 @@ function MentoringStandardView(runtime, element, mentoring) {
mentoring.initChildren(options); mentoring.initChildren(options);
mentoring.renderDependency(); mentoring.renderDependency();
get_results();
var submitPossible = submitDOM.length > 0; var submitPossible = submitDOM.length > 0;
if (submitPossible) { if (submitPossible) {
mentoring.renderAttempts(); mentoring.renderAttempts();
......
...@@ -85,6 +85,13 @@ class ProblemBuilderBaseTest(SeleniumXBlockTest, PopupCheckMixin): ...@@ -85,6 +85,13 @@ class ProblemBuilderBaseTest(SeleniumXBlockTest, PopupCheckMixin):
submit.click() submit.click()
self.wait_until_disabled(submit) self.wait_until_disabled(submit)
def click_choice(self, container, choice_text):
""" Click on the choice label with the specified text """
for label in container.find_elements_by_css_selector('.choice label'):
if choice_text in label.text:
label.click()
break
class MentoringBaseTest(SeleniumBaseTest, PopupCheckMixin): class MentoringBaseTest(SeleniumBaseTest, PopupCheckMixin):
module_name = __name__ module_name = __name__
......
...@@ -21,7 +21,7 @@ import re ...@@ -21,7 +21,7 @@ import re
import mock import mock
import ddt import ddt
from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import NoSuchElementException
from .base_test import MentoringBaseTest, MentoringAssessmentBaseTest, GetChoices from .base_test import MentoringBaseTest, MentoringAssessmentBaseTest, GetChoices, ProblemBuilderBaseTest
class MentoringTest(MentoringBaseTest): class MentoringTest(MentoringBaseTest):
...@@ -72,3 +72,169 @@ class MentoringThemeTest(MentoringAssessmentBaseTest): ...@@ -72,3 +72,169 @@ class MentoringThemeTest(MentoringAssessmentBaseTest):
with mock.patch("problem_builder.MentoringBlock.get_theme") as patched_theme: with mock.patch("problem_builder.MentoringBlock.get_theme") as patched_theme:
patched_theme.return_value = _get_mentoring_theme_settings(theme) patched_theme.return_value = _get_mentoring_theme_settings(theme)
self.assert_status_icon_color(expected_color) self.assert_status_icon_color(expected_color)
@ddt.ddt
class ProblemBuilderQuestionnaireBlockTest(ProblemBuilderBaseTest):
def _get_xblock(self, mentoring, name):
return mentoring.find_element_by_css_selector(".xblock-v1[data-name='{}']".format(name))
def _get_choice(self, questionnaire, choice_index):
return questionnaire.find_elements_by_css_selector(".choices-list .choice")[choice_index]
def _get_messages_element(self, mentoring):
return mentoring.find_element_by_css_selector('.messages')
def _get_controls(self, mentoring):
answer = self._get_xblock(mentoring, "feedback_answer_1").find_element_by_css_selector('.answer')
mcq = self._get_xblock(mentoring, "feedback_mcq_2")
mrq = self._get_xblock(mentoring, "feedback_mrq_3")
rating = self._get_xblock(mentoring, "feedback_rating_4")
return answer, mcq, mrq, rating
def _assert_checkmark(self, checkmark, shown=True, checkmark_class=None):
choice_result_classes = checkmark.get_attribute('class').split()
if shown:
self.assertTrue(checkmark.is_displayed())
self.assertIn(checkmark_class, choice_result_classes)
else:
self.assertFalse(checkmark.is_displayed())
def _assert_feedback_showed(self, questionnaire, choice_index, expected_text,
click_choice_result=False, success=True):
"""
Asserts that feedback for given element contains particular text
If `click_choice_result` is True - clicks on `choice-result` icon before checking feedback visibility:
MRQ feedbacks are not shown right away
"""
choice = self._get_choice(questionnaire, choice_index)
choice_result = choice.find_element_by_css_selector('.choice-result')
if click_choice_result:
choice_result.click()
feedback_popup = choice.find_element_by_css_selector(".choice-tips")
checkmark_class = 'checkmark-correct' if success else 'checkmark-incorrect'
self._assert_checkmark(choice_result, shown=True, checkmark_class=checkmark_class)
self.assertTrue(feedback_popup.is_displayed())
self.assertEqual(feedback_popup.text, expected_text)
def _assert_feedback_hidden(self, questionnaire, choice_index):
choice = self._get_choice(questionnaire, choice_index)
choice_result = choice.find_element_by_css_selector('.choice-result')
feedback_popup = choice.find_element_by_css_selector(".choice-tips")
choice_result_classes = choice_result.get_attribute('class').split()
self.assertTrue(choice_result.is_displayed())
self.assertFalse(feedback_popup.is_displayed())
self.assertNotIn('checkmark-correct', choice_result_classes)
self.assertNotIn('checkmark-incorrect', choice_result_classes)
def _standard_filling(self, answer, mcq, mrq, rating):
answer.send_keys('This is the answer')
self.click_choice(mcq, "Yes")
# 1st, 3rd and 4th options, first three are correct, i.e. two mistakes: 2nd and 4th
self.click_choice(mrq, "Its elegance")
self.click_choice(mrq, "Its gracefulness")
self.click_choice(mrq, "Its bugs")
self.click_choice(rating, "4")
# mcq and rating can't be reset easily, but it's not required; listing them here to keep method signature similar
def _clear_filling(self, answer, mcq, mrq, rating): # pylint: disable=unused-argument
answer.clear()
for checkbox in mrq.find_elements_by_css_selector('.choice input'):
if checkbox.is_selected():
checkbox.click()
def _standard_checks(self, answer, mcq, mrq, rating, messages, only_selected=False):
self.assertEqual(answer.get_attribute('value'), 'This is the answer')
self._assert_feedback_showed(mcq, 0, "Great!")
self._assert_feedback_showed(
mrq, 0, "This is something everyone has to like about this MRQ",
click_choice_result=True
)
if not only_selected:
self._assert_feedback_showed(
mrq, 1, "This is something everyone has to like about beauty",
click_choice_result=True, success=False
)
else:
self._assert_feedback_hidden(mrq, 1)
self._assert_feedback_showed(mrq, 2, "This MRQ is indeed very graceful", click_choice_result=True)
self._assert_feedback_showed(mrq, 3, "Nah, there aren't any!", click_choice_result=True, success=False)
self._assert_feedback_showed(rating, 3, "I love good grades.", click_choice_result=True)
self.assertTrue(messages.is_displayed())
self.assertEqual(messages.text, "FEEDBACK\nNot done yet")
def test_feedbacks_and_messages_is_not_shown_on_first_load(self):
mentoring = self.load_scenario("feedback_persistence.xml")
answer, mcq, mrq, rating = self._get_controls(mentoring)
messages = self._get_messages_element(mentoring)
answer_checkmark = answer.find_element_by_xpath("parent::*").find_element_by_css_selector(".answer-checkmark")
self._assert_checkmark(answer_checkmark, shown=False)
for i in range(3):
self._assert_feedback_hidden(mcq, i)
for i in range(4):
self._assert_feedback_hidden(mrq, i)
for i in range(5):
self._assert_feedback_hidden(rating, i)
self.assertFalse(messages.is_displayed())
def test_persists_feedback_on_page_reload(self):
mentoring = self.load_scenario("feedback_persistence.xml")
answer, mcq, mrq, rating = self._get_controls(mentoring)
messages = self._get_messages_element(mentoring)
self._standard_filling(answer, mcq, mrq, rating)
self.click_submit(mentoring)
self._standard_checks(answer, mcq, mrq, rating, messages)
# now, reload the page and do the same checks again
mentoring = self.go_to_view("student_view")
answer, mcq, mrq, rating = self._get_controls(mentoring)
messages = self._get_messages_element(mentoring)
self._standard_checks(answer, mcq, mrq, rating, messages, only_selected=True)
def test_given_perfect_score_in_past_loads_current_result(self):
mentoring = self.load_scenario("feedback_persistence.xml")
answer, mcq, mrq, rating = self._get_controls(mentoring)
messages = self._get_messages_element(mentoring)
answer.send_keys('This is the answer')
self.click_choice(mcq, "Yes")
# 1st, 3rd and 4th options, first three are correct, i.e. two mistakes: 2nd and 4th
self.click_choice(mrq, "Its elegance")
self.click_choice(mrq, "Its gracefulness")
self.click_choice(mrq, "Its beauty")
self.click_choice(rating, "4")
self.click_submit(mentoring)
# precondition - verifying 100% score achieved
self.assertEqual(answer.get_attribute('value'), 'This is the answer')
self._assert_feedback_showed(mcq, 0, "Great!")
self._assert_feedback_showed(
mrq, 0, "This is something everyone has to like about this MRQ",
click_choice_result=True
)
self._assert_feedback_showed(
mrq, 1, "This is something everyone has to like about beauty",
click_choice_result=True
)
self._assert_feedback_showed(mrq, 2, "This MRQ is indeed very graceful", click_choice_result=True)
self._assert_feedback_showed(mrq, 3, "Nah, there aren't any!", click_choice_result=True)
self._assert_feedback_showed(rating, 3, "I love good grades.", click_choice_result=True)
self.assertTrue(messages.is_displayed())
self.assertEqual(messages.text, "FEEDBACK\nAll Good")
self._clear_filling(answer, mcq, mrq, rating)
self._standard_filling(answer, mcq, mrq, rating)
self.click_submit(mentoring)
self._standard_checks(answer, mcq, mrq, rating, messages)
# now, reload the page and make sure LATEST submission is loaded and feedback is shown
mentoring = self.go_to_view("student_view")
answer, mcq, mrq, rating = self._get_controls(mentoring)
messages = self._get_messages_element(mentoring)
self._standard_checks(answer, mcq, mrq, rating, messages, only_selected=True)
...@@ -50,13 +50,6 @@ class MessagesTest(ProblemBuilderBaseTest): ...@@ -50,13 +50,6 @@ class MessagesTest(ProblemBuilderBaseTest):
message_text = message_text[8:].lstrip() message_text = message_text[8:].lstrip()
self.assertEqual(MESSAGES[msg_type], message_text) self.assertEqual(MESSAGES[msg_type], message_text)
def click_choice(self, container, choice_text):
""" Click on the choice label with the specified text """
for label in container.find_elements_by_css_selector('.choices .choice label'):
if choice_text in label.text:
label.click()
break
@ddt.data( @ddt.data(
("One", COMPLETED), ("One", COMPLETED),
("Two", COMPLETED), ("Two", COMPLETED),
......
<vertical_demo>
<problem-builder url_name="feedback" enforce_dependency="false">
<pb-answer name="feedback_answer_1" />
<pb-mcq name="feedback_mcq_2" question="Do you like this MCQ?" correct_choices='["yes"]'>
<pb-choice value="yes">Yes</pb-choice>
<pb-choice value="maybenot">Maybe not</pb-choice>
<pb-choice value="understand">I don't understand</pb-choice>
<pb-tip values='["yes"]'>Great!</pb-tip>
<pb-tip values='["maybenot"]'>Ah, damn.</pb-tip>
<pb-tip values='["understand"]'><div id="test-custom-html">Really?</div></pb-tip>
</pb-mcq>
<pb-mrq name="feedback_mrq_3" question="What do you like in this MRQ?" required_choices='["elegance","gracefulness","beauty"]'>
<pb-choice value="elegance">Its elegance</pb-choice>
<pb-choice value="beauty">Its beauty</pb-choice>
<pb-choice value="gracefulness">Its gracefulness</pb-choice>
<pb-choice value="bugs">Its bugs</pb-choice>
<pb-tip values='["elegance"]'>This is something everyone has to like about this MRQ</pb-tip>
<pb-tip values='["beauty"]'>This is something everyone has to like about beauty</pb-tip>
<pb-tip values='["gracefulness"]'>This MRQ is indeed very graceful</pb-tip>
<pb-tip values='["bugs"]'>Nah, there aren't any!</pb-tip>
</pb-mrq>
<pb-rating name="feedback_rating_4" low="Not good at all" high="Extremely good" question="How do you rate this MCQ?" correct_choices='["4","5"]'>
<pb-choice value="notwant">I don't want to rate it</pb-choice>
<pb-tip values='["4","5"]'>I love good grades.</pb-tip>
<pb-tip values='["1","2","3"]'>Will do better next time...</pb-tip>
<pb-tip values='["notwant"]'>Your loss!</pb-tip>
</pb-rating>
<pb-message type="completed">All Good</pb-message>
<pb-message type="incomplete">Not done yet</pb-message>
</problem-builder>
</vertical_demo>
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment