Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
P
problem-builder
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
OpenEdx
problem-builder
Commits
c1e91c87
Commit
c1e91c87
authored
Apr 24, 2015
by
Braden MacDonald
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fix bug with "Review Grade" button when max_attempts is unlimited
parent
5f60d03a
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
70 additions
and
9 deletions
+70
-9
problem_builder/public/js/mentoring_assessment_view.js
+2
-2
problem_builder/tests/integration/test_assessment.py
+18
-6
problem_builder/tests/integration/xml/assessment_1.xml
+1
-1
problem_builder/tests/integration/xml/assessment_3.xml
+49
-0
No files found.
problem_builder/public/js/mentoring_assessment_view.js
View file @
c1e91c87
...
@@ -27,7 +27,7 @@ function MentoringAssessmentView(runtime, element, mentoring) {
...
@@ -27,7 +27,7 @@ function MentoringAssessmentView(runtime, element, mentoring) {
function
no_more_attempts
()
{
function
no_more_attempts
()
{
var
attempts_data
=
$
(
'.attempts'
,
element
).
data
();
var
attempts_data
=
$
(
'.attempts'
,
element
).
data
();
return
attempts_data
.
num_attempts
>=
attempts_data
.
max_attempts
;
return
(
attempts_data
.
max_attempts
>
0
)
&&
(
attempts_data
.
num_attempts
>=
attempts_data
.
max_attempts
)
;
}
}
function
renderGrade
()
{
function
renderGrade
()
{
...
@@ -63,7 +63,7 @@ function MentoringAssessmentView(runtime, element, mentoring) {
...
@@ -63,7 +63,7 @@ function MentoringAssessmentView(runtime, element, mentoring) {
}
}
mentoring
.
renderAttempts
();
mentoring
.
renderAttempts
();
if
(
data
.
assessment_message
&&
data
.
num_attempts
<
data
.
max_attempts
)
{
if
(
data
.
assessment_message
&&
(
data
.
max_attempts
===
0
||
data
.
num_attempts
<
data
.
max_attempts
)
)
{
mentoring
.
setContent
(
messagesDOM
,
data
.
assessment_message
);
mentoring
.
setContent
(
messagesDOM
,
data
.
assessment_message
);
messagesDOM
.
show
();
messagesDOM
.
show
();
}
}
...
...
problem_builder/tests/integration/test_assessment.py
View file @
c1e91c87
...
@@ -242,7 +242,7 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
...
@@ -242,7 +242,7 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
def
peek_at_review
(
self
,
mentoring
,
controls
,
expected
,
extended_feedback
=
False
):
def
peek_at_review
(
self
,
mentoring
,
controls
,
expected
,
extended_feedback
=
False
):
self
.
wait_until_text_in
(
"You scored {percentage}
%
on this assessment."
.
format
(
**
expected
),
mentoring
)
self
.
wait_until_text_in
(
"You scored {percentage}
%
on this assessment."
.
format
(
**
expected
),
mentoring
)
self
.
assert_persistent_elements_present
(
mentoring
)
self
.
assert_persistent_elements_present
(
mentoring
)
if
expected
[
"num_attempts"
]
<
expected
[
"max_attempts"
]:
if
expected
[
"
max_attempts"
]
>
0
and
expected
[
"
num_attempts"
]
<
expected
[
"max_attempts"
]:
self
.
assertIn
(
"Note: if you retake this assessment, only your final score counts."
,
mentoring
.
text
)
self
.
assertIn
(
"Note: if you retake this assessment, only your final score counts."
,
mentoring
.
text
)
self
.
assertFalse
(
mentoring
.
find_elements_by_css_selector
(
'.review-list'
))
self
.
assertFalse
(
mentoring
.
find_elements_by_css_selector
(
'.review-list'
))
elif
extended_feedback
:
elif
extended_feedback
:
...
@@ -264,6 +264,8 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
...
@@ -264,6 +264,8 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
self
.
assertIn
(
"You answered {incorrect} questions incorrectly."
.
format
(
**
expected
),
mentoring
.
text
)
self
.
assertIn
(
"You answered {incorrect} questions incorrectly."
.
format
(
**
expected
),
mentoring
.
text
)
if
expected
[
"max_attempts"
]
==
1
:
if
expected
[
"max_attempts"
]
==
1
:
self
.
assertIn
(
"You have used {num_attempts} of 1 submission."
.
format
(
**
expected
),
mentoring
.
text
)
self
.
assertIn
(
"You have used {num_attempts} of 1 submission."
.
format
(
**
expected
),
mentoring
.
text
)
elif
expected
[
"max_attempts"
]
==
0
:
self
.
assertNotIn
(
"You have used"
,
mentoring
.
text
)
else
:
else
:
self
.
assertIn
(
self
.
assertIn
(
"You have used {num_attempts} of {max_attempts} submissions."
.
format
(
**
expected
),
"You have used {num_attempts} of {max_attempts} submissions."
.
format
(
**
expected
),
...
@@ -314,9 +316,13 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
...
@@ -314,9 +316,13 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
controls
.
next_question
.
click
()
controls
.
next_question
.
click
()
self
.
peek_at_multiple_response_question
(
4
,
mentoring
,
controls
,
extended_feedback
=
True
,
alternative_review
=
True
)
self
.
peek_at_multiple_response_question
(
4
,
mentoring
,
controls
,
extended_feedback
=
True
,
alternative_review
=
True
)
@data
((
1
,
False
),
(
'Extended Feedback'
,
True
))
@data
(
(
1
,
False
,
4
),
(
3
,
False
,
0
),
(
'Extended Feedback'
,
True
,
2
)
)
@unpack
@unpack
def
test_assessment
(
self
,
assessment
,
extended_feedback
):
def
test_assessment
(
self
,
assessment
,
extended_feedback
,
max_attempts
):
mentoring
,
controls
=
self
.
go_to_assessment
(
"Assessment
%
s"
%
assessment
)
mentoring
,
controls
=
self
.
go_to_assessment
(
"Assessment
%
s"
%
assessment
)
self
.
freeform_answer
(
1
,
mentoring
,
controls
,
'This is the answer'
,
CORRECT
)
self
.
freeform_answer
(
1
,
mentoring
,
controls
,
'This is the answer'
,
CORRECT
)
...
@@ -332,7 +338,7 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
...
@@ -332,7 +338,7 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
expected_results
=
{
expected_results
=
{
"correct"
:
2
,
"partial"
:
1
,
"incorrect"
:
1
,
"percentage"
:
63
,
"correct"
:
2
,
"partial"
:
1
,
"incorrect"
:
1
,
"percentage"
:
63
,
"num_attempts"
:
1
,
"max_attempts"
:
2
"num_attempts"
:
1
,
"max_attempts"
:
max_attempts
}
}
self
.
peek_at_review
(
mentoring
,
controls
,
expected_results
,
extended_feedback
=
extended_feedback
)
self
.
peek_at_review
(
mentoring
,
controls
,
expected_results
,
extended_feedback
=
extended_feedback
)
...
@@ -351,11 +357,17 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
...
@@ -351,11 +357,17 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
expected_results
=
{
expected_results
=
{
"correct"
:
3
,
"partial"
:
0
,
"incorrect"
:
1
,
"percentage"
:
75
,
"correct"
:
3
,
"partial"
:
0
,
"incorrect"
:
1
,
"percentage"
:
75
,
"num_attempts"
:
2
,
"max_attempts"
:
2
"num_attempts"
:
2
,
"max_attempts"
:
max_attempts
}
}
self
.
peek_at_review
(
mentoring
,
controls
,
expected_results
,
extended_feedback
=
extended_feedback
)
self
.
peek_at_review
(
mentoring
,
controls
,
expected_results
,
extended_feedback
=
extended_feedback
)
if
max_attempts
==
2
:
self
.
assert_disabled
(
controls
.
try_again
)
self
.
assert_disabled
(
controls
.
try_again
)
self
.
assert_messages_empty
(
mentoring
)
else
:
self
.
assert_clickable
(
controls
.
try_again
)
if
1
<=
max_attempts
<=
2
:
self
.
assert_messages_empty
(
mentoring
)
# The on-assessment-review message should not be shown if no attempts remain
else
:
self
.
assert_messages_text
(
mentoring
,
"Assessment additional feedback message text"
)
if
extended_feedback
:
if
extended_feedback
:
self
.
extended_feedback_checks
(
mentoring
,
controls
,
expected_results
)
self
.
extended_feedback_checks
(
mentoring
,
controls
,
expected_results
)
...
...
problem_builder/tests/integration/xml/assessment_1.xml
View file @
c1e91c87
<problem-builder
url_name=
"mentoring-assessment-1"
display_name=
"A Simple Assessment"
weight=
"1"
mode=
"assessment"
max_attempts=
"
2
"
>
<problem-builder
url_name=
"mentoring-assessment-1"
display_name=
"A Simple Assessment"
weight=
"1"
mode=
"assessment"
max_attempts=
"
4
"
>
<html_demo>
<html_demo>
<p>
This paragraph is shared between
<strong>
all
</strong>
questions.
</p>
<p>
This paragraph is shared between
<strong>
all
</strong>
questions.
</p>
...
...
problem_builder/tests/integration/xml/assessment_3.xml
0 → 100644
View file @
c1e91c87
<problem-builder
url_name=
"mentoring-assessment-1"
display_name=
"A Simple Assessment"
weight=
"1"
mode=
"assessment"
max_attempts=
"0"
>
<html_demo>
<p>
This paragraph is shared between
<strong>
all
</strong>
questions.
</p>
<p>
Please answer the questions below.
</p>
</html_demo>
<html_demo>
We need an XBlock with JavaScript here to test that it doesn't interfere
with the assessment, since it will show up in runtime(element).children,
but it is not a "step" element:
</html_demo>
<acid/>
<pb-answer
name=
"goal"
question=
"What is your goal?"
/>
<pb-mcq
name=
"mcq_1_1"
question=
"Do you like this MCQ?"
correct_choices=
'["yes"]'
>
<pb-choice
value=
"yes"
>
Yes
</pb-choice>
<pb-choice
value=
"maybenot"
>
Maybe not
</pb-choice>
<pb-choice
value=
"understand"
>
I don't understand
</pb-choice>
<pb-tip
values=
'["yes"]'
>
Great!
</pb-tip>
<pb-tip
values=
'["maybenot"]'
>
Ah, damn.
</pb-tip>
<pb-tip
values=
'["understand"]'
><div
id=
"test-custom-html"
>
Really?
</div></pb-tip>
</pb-mcq>
<pb-rating
name=
"mcq_1_2"
low=
"Not good at all"
high=
"Extremely good"
question=
"How much do you rate this MCQ?"
correct_choices=
'["4","5"]'
>
<pb-choice
value=
"notwant"
>
I don't want to rate it
</pb-choice>
<pb-tip
values=
'["4","5"]'
>
I love good grades.
</pb-tip>
<pb-tip
values=
'["1","2", "3"]'
>
Will do better next time...
</pb-tip>
<pb-tip
values=
'["notwant"]'
>
Your loss!
</pb-tip>
</pb-rating>
<pb-mrq
name=
"mrq_1_1"
question=
"What do you like in this MRQ?"
required_choices=
'["gracefulness","elegance","beauty"]'
>
<pb-choice
value=
"elegance"
>
Its elegance
</pb-choice>
<pb-choice
value=
"beauty"
>
Its beauty
</pb-choice>
<pb-choice
value=
"gracefulness"
>
Its gracefulness
</pb-choice>
<pb-choice
value=
"bugs"
>
Its bugs
</pb-choice>
<pb-tip
values=
'["gracefulness"]'
>
This MRQ is indeed very graceful
</pb-tip>
<pb-tip
values=
'["elegance","beauty"]'
>
This is something everyone has to like about this MRQ
</pb-tip>
<pb-tip
values=
'["bugs"]'
>
Nah, there aren't any!
</pb-tip>
</pb-mrq>
<pb-message
type=
"on-assessment-review"
>
<html>
Assessment additional feedback message text
</html>
</pb-message>
</problem-builder>
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment