Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
P
problem-builder
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
OpenEdx
problem-builder
Commits
2e72dcee
Commit
2e72dcee
authored
May 11, 2015
by
Eugeny Kolpakov
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #30 from open-craft/feedback-persistence
Feedback persistence
parents
78decd9a
43ba649b
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
303 additions
and
40 deletions
+303
-40
problem_builder/answer.py
+3
-0
problem_builder/mcq.py
+3
-0
problem_builder/mentoring.py
+52
-17
problem_builder/mrq.py
+13
-3
problem_builder/public/js/answer.js
+7
-6
problem_builder/public/js/mentoring_standard_view.js
+13
-6
problem_builder/tests/integration/base_test.py
+7
-0
problem_builder/tests/integration/test_mentoring.py
+167
-1
problem_builder/tests/integration/test_messages.py
+0
-7
problem_builder/tests/integration/xml_templates/feedback_persistence.xml
+38
-0
No files found.
problem_builder/answer.py
View file @
2e72dcee
...
@@ -189,6 +189,9 @@ class AnswerBlock(AnswerMixin, StepMixin, StudioEditableXBlockMixin, XBlock):
...
@@ -189,6 +189,9 @@ class AnswerBlock(AnswerMixin, StepMixin, StudioEditableXBlockMixin, XBlock):
'score'
:
1
if
self
.
status
==
'correct'
else
0
,
'score'
:
1
if
self
.
status
==
'correct'
else
0
,
}
}
def
get_last_result
(
self
):
return
self
.
get_results
(
None
)
if
self
.
student_input
else
{}
def
submit
(
self
,
submission
):
def
submit
(
self
,
submission
):
"""
"""
The parent block is handling a student submission, including a new answer for this
The parent block is handling a student submission, including a new answer for this
...
...
problem_builder/mcq.py
View file @
2e72dcee
...
@@ -105,6 +105,9 @@ class MCQBlock(SubmittingXBlockMixin, QuestionnaireAbstractBlock):
...
@@ -105,6 +105,9 @@ class MCQBlock(SubmittingXBlockMixin, QuestionnaireAbstractBlock):
def
get_results
(
self
,
previous_result
):
def
get_results
(
self
,
previous_result
):
return
self
.
calculate_results
(
previous_result
[
'submission'
])
return
self
.
calculate_results
(
previous_result
[
'submission'
])
def
get_last_result
(
self
):
return
self
.
get_results
({
'submission'
:
self
.
student_choice
})
if
self
.
student_choice
else
{}
def
submit
(
self
,
submission
):
def
submit
(
self
,
submission
):
log
.
debug
(
u'Received MCQ submission: "
%
s"'
,
submission
)
log
.
debug
(
u'Received MCQ submission: "
%
s"'
,
submission
)
result
=
self
.
calculate_results
(
submission
)
result
=
self
.
calculate_results
(
submission
)
...
...
problem_builder/mentoring.py
View file @
2e72dcee
...
@@ -435,6 +435,57 @@ class MentoringBlock(XBlock, StepParentMixin, StudioEditableXBlockMixin, StudioC
...
@@ -435,6 +435,57 @@ class MentoringBlock(XBlock, StepParentMixin, StudioEditableXBlockMixin, StudioC
"""
"""
Gets detailed results in the case of extended feedback.
Gets detailed results in the case of extended feedback.
Right now there are two ways to get results-- through the template upon loading up
the mentoring block, or after submission of an AJAX request like in
submit or get_results here.
"""
if
self
.
mode
==
'standard'
:
results
,
completed
,
show_message
=
self
.
_get_standard_results
()
mentoring_completed
=
completed
else
:
if
not
self
.
show_extended_feedback
():
return
{
'results'
:
[],
'error'
:
'Extended feedback results cannot be obtained.'
}
results
,
completed
,
show_message
=
self
.
_get_assessment_results
(
queries
)
mentoring_completed
=
True
result
=
{
'results'
:
results
,
'completed'
:
completed
,
'step'
:
self
.
step
,
'max_attempts'
:
self
.
max_attempts
,
'num_attempts'
:
self
.
num_attempts
,
}
if
show_message
:
result
[
'message'
]
=
self
.
get_message
(
mentoring_completed
)
return
result
def
_get_standard_results
(
self
):
"""
Gets previous submissions results as if submit was called with exactly the same values as last time.
"""
results
=
[]
completed
=
True
show_message
=
bool
(
self
.
student_results
)
# In standard mode, all children is visible simultaneously, so need collecting responses from all of them
for
child_id
in
self
.
steps
:
child
=
self
.
runtime
.
get_block
(
child_id
)
child_result
=
child
.
get_last_result
()
results
.
append
([
child
.
name
,
child_result
])
completed
=
completed
and
(
child_result
.
get
(
'status'
,
None
)
==
'correct'
)
return
results
,
completed
,
show_message
def
_get_assessment_results
(
self
,
queries
):
"""
Gets detailed results in the case of extended feedback.
It may be a good idea to eventually have this function get results
It may be a good idea to eventually have this function get results
in the general case instead of loading them in the template in the future,
in the general case instead of loading them in the template in the future,
and only using it for extended feedback situations.
and only using it for extended feedback situations.
...
@@ -444,14 +495,8 @@ class MentoringBlock(XBlock, StepParentMixin, StudioEditableXBlockMixin, StudioC
...
@@ -444,14 +495,8 @@ class MentoringBlock(XBlock, StepParentMixin, StudioEditableXBlockMixin, StudioC
submit or get_results here.
submit or get_results here.
"""
"""
results
=
[]
results
=
[]
if
not
self
.
show_extended_feedback
():
return
{
'results'
:
[],
'error'
:
'Extended feedback results cannot be obtained.'
}
completed
=
True
completed
=
True
choices
=
dict
(
self
.
student_results
)
choices
=
dict
(
self
.
student_results
)
step
=
self
.
step
# Only one child should ever be of concern with this method.
# Only one child should ever be of concern with this method.
for
child_id
in
self
.
steps
:
for
child_id
in
self
.
steps
:
child
=
self
.
runtime
.
get_block
(
child_id
)
child
=
self
.
runtime
.
get_block
(
child_id
)
...
@@ -464,17 +509,7 @@ class MentoringBlock(XBlock, StepParentMixin, StudioEditableXBlockMixin, StudioC
...
@@ -464,17 +509,7 @@ class MentoringBlock(XBlock, StepParentMixin, StudioEditableXBlockMixin, StudioC
completed
=
choices
[
child
.
name
][
'status'
]
completed
=
choices
[
child
.
name
][
'status'
]
break
break
# The 'completed' message should always be shown in this case, since no more attempts are available.
return
results
,
completed
,
True
message
=
self
.
get_message
(
True
)
return
{
'results'
:
results
,
'completed'
:
completed
,
'message'
:
message
,
'step'
:
step
,
'max_attempts'
:
self
.
max_attempts
,
'num_attempts'
:
self
.
num_attempts
,
}
@XBlock.json_handler
@XBlock.json_handler
def
submit
(
self
,
submissions
,
suffix
=
''
):
def
submit
(
self
,
submissions
,
suffix
=
''
):
...
...
problem_builder/mrq.py
View file @
2e72dcee
...
@@ -81,14 +81,20 @@ class MRQBlock(QuestionnaireAbstractBlock):
...
@@ -81,14 +81,20 @@ class MRQBlock(QuestionnaireAbstractBlock):
return
self
.
_
(
u"Ignored"
)
return
self
.
_
(
u"Ignored"
)
return
self
.
_
(
u"Not Acceptable"
)
return
self
.
_
(
u"Not Acceptable"
)
def
get_results
(
self
,
previous_result
):
def
get_results
(
self
,
previous_result
,
only_selected
=
False
):
"""
"""
Get the results a student has already submitted.
Get the results a student has already submitted.
"""
"""
result
=
self
.
calculate_results
(
previous_result
[
'submissions'
])
result
=
self
.
calculate_results
(
previous_result
[
'submissions'
]
,
only_selected
)
result
[
'completed'
]
=
True
result
[
'completed'
]
=
True
return
result
return
result
def
get_last_result
(
self
):
if
self
.
student_choices
:
return
self
.
get_results
({
'submissions'
:
self
.
student_choices
},
only_selected
=
True
)
else
:
return
{}
def
submit
(
self
,
submissions
):
def
submit
(
self
,
submissions
):
log
.
debug
(
u'Received MRQ submissions: "
%
s"'
,
submissions
)
log
.
debug
(
u'Received MRQ submissions: "
%
s"'
,
submissions
)
...
@@ -98,13 +104,17 @@ class MRQBlock(QuestionnaireAbstractBlock):
...
@@ -98,13 +104,17 @@ class MRQBlock(QuestionnaireAbstractBlock):
log
.
debug
(
u'MRQ submissions result:
%
s'
,
result
)
log
.
debug
(
u'MRQ submissions result:
%
s'
,
result
)
return
result
return
result
def
calculate_results
(
self
,
submissions
):
def
calculate_results
(
self
,
submissions
,
only_selected
=
False
):
score
=
0
score
=
0
results
=
[]
results
=
[]
for
choice
in
self
.
custom_choices
:
for
choice
in
self
.
custom_choices
:
choice_completed
=
True
choice_completed
=
True
choice_tips_html
=
[]
choice_tips_html
=
[]
choice_selected
=
choice
.
value
in
submissions
choice_selected
=
choice
.
value
in
submissions
if
not
choice_selected
and
only_selected
:
continue
if
choice
.
value
in
self
.
required_choices
:
if
choice
.
value
in
self
.
required_choices
:
if
not
choice_selected
:
if
not
choice_selected
:
choice_completed
=
False
choice_completed
=
False
...
...
problem_builder/public/js/answer.js
View file @
2e72dcee
...
@@ -31,12 +31,13 @@ function AnswerBlock(runtime, element) {
...
@@ -31,12 +31,13 @@ function AnswerBlock(runtime, element) {
// Display of checkmark would be redundant.
// Display of checkmark would be redundant.
return
return
}
}
if
(
result
.
status
)
{
if
(
result
.
status
===
"correct"
)
{
if
(
result
.
status
===
"correct"
)
{
checkmark
.
addClass
(
'checkmark-correct icon-ok fa-check'
);
checkmark
.
addClass
(
'checkmark-correct icon-ok fa-check'
);
}
}
else
{
else
{
checkmark
.
addClass
(
'checkmark-incorrect icon-exclamation fa-exclamation'
);
checkmark
.
addClass
(
'checkmark-incorrect icon-exclamation fa-exclamation'
);
}
}
}
},
},
...
...
problem_builder/public/js/mentoring_standard_view.js
View file @
2e72dcee
...
@@ -28,8 +28,6 @@ function MentoringStandardView(runtime, element, mentoring) {
...
@@ -28,8 +28,6 @@ function MentoringStandardView(runtime, element, mentoring) {
messagesDOM
.
prepend
(
'<div class="title1">'
+
gettext
(
'Feedback'
)
+
'</div>'
);
messagesDOM
.
prepend
(
'<div class="title1">'
+
gettext
(
'Feedback'
)
+
'</div>'
);
messagesDOM
.
show
();
messagesDOM
.
show
();
}
}
submitDOM
.
attr
(
'disabled'
,
'disabled'
);
}
}
function
handleSubmitError
(
jqXHR
,
textStatus
,
errorThrown
)
{
function
handleSubmitError
(
jqXHR
,
textStatus
,
errorThrown
)
{
...
@@ -45,12 +43,10 @@ function MentoringStandardView(runtime, element, mentoring) {
...
@@ -45,12 +43,10 @@ function MentoringStandardView(runtime, element, mentoring) {
mentoring
.
setContent
(
messagesDOM
,
errMsg
);
mentoring
.
setContent
(
messagesDOM
,
errMsg
);
messagesDOM
.
show
();
messagesDOM
.
show
();
submitDOM
.
attr
(
'disabled'
,
'disabled'
);
}
}
}
}
function
calculate_results
(
handler_name
)
{
function
calculate_results
(
handler_name
,
disable_submit
)
{
var
data
=
{};
var
data
=
{};
var
children
=
mentoring
.
children
;
var
children
=
mentoring
.
children
;
for
(
var
i
=
0
;
i
<
children
.
length
;
i
++
)
{
for
(
var
i
=
0
;
i
<
children
.
length
;
i
++
)
{
...
@@ -64,10 +60,19 @@ function MentoringStandardView(runtime, element, mentoring) {
...
@@ -64,10 +60,19 @@ function MentoringStandardView(runtime, element, mentoring) {
submitXHR
.
abort
();
submitXHR
.
abort
();
}
}
submitXHR
=
$
.
post
(
handlerUrl
,
JSON
.
stringify
(
data
)).
success
(
handleSubmitResults
).
error
(
handleSubmitError
);
submitXHR
=
$
.
post
(
handlerUrl
,
JSON
.
stringify
(
data
)).
success
(
handleSubmitResults
).
error
(
handleSubmitError
);
if
(
disable_submit
)
{
var
disable_submit_callback
=
function
(){
submitDOM
.
attr
(
'disabled'
,
'disabled'
);
};
submitXHR
.
success
(
disable_submit_callback
).
error
(
disable_submit_callback
);
}
}
function
get_results
(){
calculate_results
(
'get_results'
,
false
);
}
}
function
submit
()
{
function
submit
()
{
calculate_results
(
'submit'
);
calculate_results
(
'submit'
,
true
);
}
}
function
clearResults
()
{
function
clearResults
()
{
...
@@ -97,6 +102,8 @@ function MentoringStandardView(runtime, element, mentoring) {
...
@@ -97,6 +102,8 @@ function MentoringStandardView(runtime, element, mentoring) {
mentoring
.
initChildren
(
options
);
mentoring
.
initChildren
(
options
);
mentoring
.
renderDependency
();
mentoring
.
renderDependency
();
get_results
();
var
submitPossible
=
submitDOM
.
length
>
0
;
var
submitPossible
=
submitDOM
.
length
>
0
;
if
(
submitPossible
)
{
if
(
submitPossible
)
{
mentoring
.
renderAttempts
();
mentoring
.
renderAttempts
();
...
...
problem_builder/tests/integration/base_test.py
View file @
2e72dcee
...
@@ -85,6 +85,13 @@ class ProblemBuilderBaseTest(SeleniumXBlockTest, PopupCheckMixin):
...
@@ -85,6 +85,13 @@ class ProblemBuilderBaseTest(SeleniumXBlockTest, PopupCheckMixin):
submit
.
click
()
submit
.
click
()
self
.
wait_until_disabled
(
submit
)
self
.
wait_until_disabled
(
submit
)
def
click_choice
(
self
,
container
,
choice_text
):
""" Click on the choice label with the specified text """
for
label
in
container
.
find_elements_by_css_selector
(
'.choice label'
):
if
choice_text
in
label
.
text
:
label
.
click
()
break
class
MentoringBaseTest
(
SeleniumBaseTest
,
PopupCheckMixin
):
class
MentoringBaseTest
(
SeleniumBaseTest
,
PopupCheckMixin
):
module_name
=
__name__
module_name
=
__name__
...
...
problem_builder/tests/integration/test_mentoring.py
View file @
2e72dcee
...
@@ -21,7 +21,7 @@ import re
...
@@ -21,7 +21,7 @@ import re
import
mock
import
mock
import
ddt
import
ddt
from
selenium.common.exceptions
import
NoSuchElementException
from
selenium.common.exceptions
import
NoSuchElementException
from
.base_test
import
MentoringBaseTest
,
MentoringAssessmentBaseTest
,
GetChoices
from
.base_test
import
MentoringBaseTest
,
MentoringAssessmentBaseTest
,
GetChoices
,
ProblemBuilderBaseTest
class
MentoringTest
(
MentoringBaseTest
):
class
MentoringTest
(
MentoringBaseTest
):
...
@@ -72,3 +72,169 @@ class MentoringThemeTest(MentoringAssessmentBaseTest):
...
@@ -72,3 +72,169 @@ class MentoringThemeTest(MentoringAssessmentBaseTest):
with
mock
.
patch
(
"problem_builder.MentoringBlock.get_theme"
)
as
patched_theme
:
with
mock
.
patch
(
"problem_builder.MentoringBlock.get_theme"
)
as
patched_theme
:
patched_theme
.
return_value
=
_get_mentoring_theme_settings
(
theme
)
patched_theme
.
return_value
=
_get_mentoring_theme_settings
(
theme
)
self
.
assert_status_icon_color
(
expected_color
)
self
.
assert_status_icon_color
(
expected_color
)
@ddt.ddt
class
ProblemBuilderQuestionnaireBlockTest
(
ProblemBuilderBaseTest
):
def
_get_xblock
(
self
,
mentoring
,
name
):
return
mentoring
.
find_element_by_css_selector
(
".xblock-v1[data-name='{}']"
.
format
(
name
))
def
_get_choice
(
self
,
questionnaire
,
choice_index
):
return
questionnaire
.
find_elements_by_css_selector
(
".choices-list .choice"
)[
choice_index
]
def
_get_messages_element
(
self
,
mentoring
):
return
mentoring
.
find_element_by_css_selector
(
'.messages'
)
def
_get_controls
(
self
,
mentoring
):
answer
=
self
.
_get_xblock
(
mentoring
,
"feedback_answer_1"
)
.
find_element_by_css_selector
(
'.answer'
)
mcq
=
self
.
_get_xblock
(
mentoring
,
"feedback_mcq_2"
)
mrq
=
self
.
_get_xblock
(
mentoring
,
"feedback_mrq_3"
)
rating
=
self
.
_get_xblock
(
mentoring
,
"feedback_rating_4"
)
return
answer
,
mcq
,
mrq
,
rating
def
_assert_checkmark
(
self
,
checkmark
,
shown
=
True
,
checkmark_class
=
None
):
choice_result_classes
=
checkmark
.
get_attribute
(
'class'
)
.
split
()
if
shown
:
self
.
assertTrue
(
checkmark
.
is_displayed
())
self
.
assertIn
(
checkmark_class
,
choice_result_classes
)
else
:
self
.
assertFalse
(
checkmark
.
is_displayed
())
def
_assert_feedback_showed
(
self
,
questionnaire
,
choice_index
,
expected_text
,
click_choice_result
=
False
,
success
=
True
):
"""
Asserts that feedback for given element contains particular text
If `click_choice_result` is True - clicks on `choice-result` icon before checking feedback visibility:
MRQ feedbacks are not shown right away
"""
choice
=
self
.
_get_choice
(
questionnaire
,
choice_index
)
choice_result
=
choice
.
find_element_by_css_selector
(
'.choice-result'
)
if
click_choice_result
:
choice_result
.
click
()
feedback_popup
=
choice
.
find_element_by_css_selector
(
".choice-tips"
)
checkmark_class
=
'checkmark-correct'
if
success
else
'checkmark-incorrect'
self
.
_assert_checkmark
(
choice_result
,
shown
=
True
,
checkmark_class
=
checkmark_class
)
self
.
assertTrue
(
feedback_popup
.
is_displayed
())
self
.
assertEqual
(
feedback_popup
.
text
,
expected_text
)
def
_assert_feedback_hidden
(
self
,
questionnaire
,
choice_index
):
choice
=
self
.
_get_choice
(
questionnaire
,
choice_index
)
choice_result
=
choice
.
find_element_by_css_selector
(
'.choice-result'
)
feedback_popup
=
choice
.
find_element_by_css_selector
(
".choice-tips"
)
choice_result_classes
=
choice_result
.
get_attribute
(
'class'
)
.
split
()
self
.
assertTrue
(
choice_result
.
is_displayed
())
self
.
assertFalse
(
feedback_popup
.
is_displayed
())
self
.
assertNotIn
(
'checkmark-correct'
,
choice_result_classes
)
self
.
assertNotIn
(
'checkmark-incorrect'
,
choice_result_classes
)
def
_standard_filling
(
self
,
answer
,
mcq
,
mrq
,
rating
):
answer
.
send_keys
(
'This is the answer'
)
self
.
click_choice
(
mcq
,
"Yes"
)
# 1st, 3rd and 4th options, first three are correct, i.e. two mistakes: 2nd and 4th
self
.
click_choice
(
mrq
,
"Its elegance"
)
self
.
click_choice
(
mrq
,
"Its gracefulness"
)
self
.
click_choice
(
mrq
,
"Its bugs"
)
self
.
click_choice
(
rating
,
"4"
)
# mcq and rating can't be reset easily, but it's not required; listing them here to keep method signature similar
def
_clear_filling
(
self
,
answer
,
mcq
,
mrq
,
rating
):
# pylint: disable=unused-argument
answer
.
clear
()
for
checkbox
in
mrq
.
find_elements_by_css_selector
(
'.choice input'
):
if
checkbox
.
is_selected
():
checkbox
.
click
()
def
_standard_checks
(
self
,
answer
,
mcq
,
mrq
,
rating
,
messages
,
only_selected
=
False
):
self
.
assertEqual
(
answer
.
get_attribute
(
'value'
),
'This is the answer'
)
self
.
_assert_feedback_showed
(
mcq
,
0
,
"Great!"
)
self
.
_assert_feedback_showed
(
mrq
,
0
,
"This is something everyone has to like about this MRQ"
,
click_choice_result
=
True
)
if
not
only_selected
:
self
.
_assert_feedback_showed
(
mrq
,
1
,
"This is something everyone has to like about beauty"
,
click_choice_result
=
True
,
success
=
False
)
else
:
self
.
_assert_feedback_hidden
(
mrq
,
1
)
self
.
_assert_feedback_showed
(
mrq
,
2
,
"This MRQ is indeed very graceful"
,
click_choice_result
=
True
)
self
.
_assert_feedback_showed
(
mrq
,
3
,
"Nah, there aren't any!"
,
click_choice_result
=
True
,
success
=
False
)
self
.
_assert_feedback_showed
(
rating
,
3
,
"I love good grades."
,
click_choice_result
=
True
)
self
.
assertTrue
(
messages
.
is_displayed
())
self
.
assertEqual
(
messages
.
text
,
"FEEDBACK
\n
Not done yet"
)
def
test_feedbacks_and_messages_is_not_shown_on_first_load
(
self
):
mentoring
=
self
.
load_scenario
(
"feedback_persistence.xml"
)
answer
,
mcq
,
mrq
,
rating
=
self
.
_get_controls
(
mentoring
)
messages
=
self
.
_get_messages_element
(
mentoring
)
answer_checkmark
=
answer
.
find_element_by_xpath
(
"parent::*"
)
.
find_element_by_css_selector
(
".answer-checkmark"
)
self
.
_assert_checkmark
(
answer_checkmark
,
shown
=
False
)
for
i
in
range
(
3
):
self
.
_assert_feedback_hidden
(
mcq
,
i
)
for
i
in
range
(
4
):
self
.
_assert_feedback_hidden
(
mrq
,
i
)
for
i
in
range
(
5
):
self
.
_assert_feedback_hidden
(
rating
,
i
)
self
.
assertFalse
(
messages
.
is_displayed
())
def
test_persists_feedback_on_page_reload
(
self
):
mentoring
=
self
.
load_scenario
(
"feedback_persistence.xml"
)
answer
,
mcq
,
mrq
,
rating
=
self
.
_get_controls
(
mentoring
)
messages
=
self
.
_get_messages_element
(
mentoring
)
self
.
_standard_filling
(
answer
,
mcq
,
mrq
,
rating
)
self
.
click_submit
(
mentoring
)
self
.
_standard_checks
(
answer
,
mcq
,
mrq
,
rating
,
messages
)
# now, reload the page and do the same checks again
mentoring
=
self
.
go_to_view
(
"student_view"
)
answer
,
mcq
,
mrq
,
rating
=
self
.
_get_controls
(
mentoring
)
messages
=
self
.
_get_messages_element
(
mentoring
)
self
.
_standard_checks
(
answer
,
mcq
,
mrq
,
rating
,
messages
,
only_selected
=
True
)
def
test_given_perfect_score_in_past_loads_current_result
(
self
):
mentoring
=
self
.
load_scenario
(
"feedback_persistence.xml"
)
answer
,
mcq
,
mrq
,
rating
=
self
.
_get_controls
(
mentoring
)
messages
=
self
.
_get_messages_element
(
mentoring
)
answer
.
send_keys
(
'This is the answer'
)
self
.
click_choice
(
mcq
,
"Yes"
)
# 1st, 3rd and 4th options, first three are correct, i.e. two mistakes: 2nd and 4th
self
.
click_choice
(
mrq
,
"Its elegance"
)
self
.
click_choice
(
mrq
,
"Its gracefulness"
)
self
.
click_choice
(
mrq
,
"Its beauty"
)
self
.
click_choice
(
rating
,
"4"
)
self
.
click_submit
(
mentoring
)
# precondition - verifying 100% score achieved
self
.
assertEqual
(
answer
.
get_attribute
(
'value'
),
'This is the answer'
)
self
.
_assert_feedback_showed
(
mcq
,
0
,
"Great!"
)
self
.
_assert_feedback_showed
(
mrq
,
0
,
"This is something everyone has to like about this MRQ"
,
click_choice_result
=
True
)
self
.
_assert_feedback_showed
(
mrq
,
1
,
"This is something everyone has to like about beauty"
,
click_choice_result
=
True
)
self
.
_assert_feedback_showed
(
mrq
,
2
,
"This MRQ is indeed very graceful"
,
click_choice_result
=
True
)
self
.
_assert_feedback_showed
(
mrq
,
3
,
"Nah, there aren't any!"
,
click_choice_result
=
True
)
self
.
_assert_feedback_showed
(
rating
,
3
,
"I love good grades."
,
click_choice_result
=
True
)
self
.
assertTrue
(
messages
.
is_displayed
())
self
.
assertEqual
(
messages
.
text
,
"FEEDBACK
\n
All Good"
)
self
.
_clear_filling
(
answer
,
mcq
,
mrq
,
rating
)
self
.
_standard_filling
(
answer
,
mcq
,
mrq
,
rating
)
self
.
click_submit
(
mentoring
)
self
.
_standard_checks
(
answer
,
mcq
,
mrq
,
rating
,
messages
)
# now, reload the page and make sure LATEST submission is loaded and feedback is shown
mentoring
=
self
.
go_to_view
(
"student_view"
)
answer
,
mcq
,
mrq
,
rating
=
self
.
_get_controls
(
mentoring
)
messages
=
self
.
_get_messages_element
(
mentoring
)
self
.
_standard_checks
(
answer
,
mcq
,
mrq
,
rating
,
messages
,
only_selected
=
True
)
problem_builder/tests/integration/test_messages.py
View file @
2e72dcee
...
@@ -50,13 +50,6 @@ class MessagesTest(ProblemBuilderBaseTest):
...
@@ -50,13 +50,6 @@ class MessagesTest(ProblemBuilderBaseTest):
message_text
=
message_text
[
8
:]
.
lstrip
()
message_text
=
message_text
[
8
:]
.
lstrip
()
self
.
assertEqual
(
MESSAGES
[
msg_type
],
message_text
)
self
.
assertEqual
(
MESSAGES
[
msg_type
],
message_text
)
def
click_choice
(
self
,
container
,
choice_text
):
""" Click on the choice label with the specified text """
for
label
in
container
.
find_elements_by_css_selector
(
'.choices .choice label'
):
if
choice_text
in
label
.
text
:
label
.
click
()
break
@ddt.data
(
@ddt.data
(
(
"One"
,
COMPLETED
),
(
"One"
,
COMPLETED
),
(
"Two"
,
COMPLETED
),
(
"Two"
,
COMPLETED
),
...
...
problem_builder/tests/integration/xml_templates/feedback_persistence.xml
0 → 100644
View file @
2e72dcee
<vertical_demo>
<problem-builder
url_name=
"feedback"
enforce_dependency=
"false"
>
<pb-answer
name=
"feedback_answer_1"
/>
<pb-mcq
name=
"feedback_mcq_2"
question=
"Do you like this MCQ?"
correct_choices=
'["yes"]'
>
<pb-choice
value=
"yes"
>
Yes
</pb-choice>
<pb-choice
value=
"maybenot"
>
Maybe not
</pb-choice>
<pb-choice
value=
"understand"
>
I don't understand
</pb-choice>
<pb-tip
values=
'["yes"]'
>
Great!
</pb-tip>
<pb-tip
values=
'["maybenot"]'
>
Ah, damn.
</pb-tip>
<pb-tip
values=
'["understand"]'
><div
id=
"test-custom-html"
>
Really?
</div></pb-tip>
</pb-mcq>
<pb-mrq
name=
"feedback_mrq_3"
question=
"What do you like in this MRQ?"
required_choices=
'["elegance","gracefulness","beauty"]'
>
<pb-choice
value=
"elegance"
>
Its elegance
</pb-choice>
<pb-choice
value=
"beauty"
>
Its beauty
</pb-choice>
<pb-choice
value=
"gracefulness"
>
Its gracefulness
</pb-choice>
<pb-choice
value=
"bugs"
>
Its bugs
</pb-choice>
<pb-tip
values=
'["elegance"]'
>
This is something everyone has to like about this MRQ
</pb-tip>
<pb-tip
values=
'["beauty"]'
>
This is something everyone has to like about beauty
</pb-tip>
<pb-tip
values=
'["gracefulness"]'
>
This MRQ is indeed very graceful
</pb-tip>
<pb-tip
values=
'["bugs"]'
>
Nah, there aren't any!
</pb-tip>
</pb-mrq>
<pb-rating
name=
"feedback_rating_4"
low=
"Not good at all"
high=
"Extremely good"
question=
"How do you rate this MCQ?"
correct_choices=
'["4","5"]'
>
<pb-choice
value=
"notwant"
>
I don't want to rate it
</pb-choice>
<pb-tip
values=
'["4","5"]'
>
I love good grades.
</pb-tip>
<pb-tip
values=
'["1","2","3"]'
>
Will do better next time...
</pb-tip>
<pb-tip
values=
'["notwant"]'
>
Your loss!
</pb-tip>
</pb-rating>
<pb-message
type=
"completed"
>
All Good
</pb-message>
<pb-message
type=
"incomplete"
>
Not done yet
</pb-message>
</problem-builder>
</vertical_demo>
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment