Commit 0cf055d5 by E. Kolpakov

Presentation updates:

* Horizontally centering visual progress
* Ability to override "Average" label
* Added ability to hide numbers in dashboard
* Ability to exclude specific questions from dashboard
* Slightly improved a11y (aka accessibility)
parent 4817f9d9
...@@ -36,7 +36,7 @@ from .mcq import MCQBlock ...@@ -36,7 +36,7 @@ from .mcq import MCQBlock
from .sub_api import sub_api from .sub_api import sub_api
from lazy import lazy from lazy import lazy
from xblock.core import XBlock from xblock.core import XBlock
from xblock.fields import Scope, List, String from xblock.fields import Scope, List, String, Boolean, Dict
from xblock.fragment import Fragment from xblock.fragment import Fragment
from xblock.validation import ValidationMessage from xblock.validation import ValidationMessage
from xblockutils.helpers import child_isinstance from xblockutils.helpers import child_isinstance
...@@ -172,6 +172,20 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock): ...@@ -172,6 +172,20 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock):
).format(example_here='["2754b8afc03a439693b9887b6f1d9e36", "215028f7df3d4c68b14fb5fea4da7053"]'), ).format(example_here='["2754b8afc03a439693b9887b6f1d9e36", "215028f7df3d4c68b14fb5fea4da7053"]'),
scope=Scope.settings, scope=Scope.settings,
) )
exclude_questions = Dict(
display_name=_("Questions to be hidden"),
help=_(
"Optional rules to exclude specific questions both from displaying in dashboard and from the calculated "
"average. Rules must start with the url_name of a mentoring block, followed by list of question numbers "
"to exclude. Rule set must be in JSON format. Question numbers are one-based (the first question being "
"number 1). Must be in JSON format. Examples: {examples_here}"
).format(
examples_here='{"2754b8afc03a439693b9887b6f1d9e36":[1,2], "215028f7df3d4c68b14fb5fea4da7053":[1,5]}'
),
scope=Scope.content,
multiline_editor=True,
resettable_editor=False,
)
color_rules = String( color_rules = String(
display_name=_("Color Coding Rules"), display_name=_("Color Coding Rules"),
help=_( help=_(
...@@ -207,8 +221,23 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock): ...@@ -207,8 +221,23 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock):
), ),
scope=Scope.content, scope=Scope.content,
) )
average_label = String(
display_name=_("Label for average value"),
default=_("Average"),
help=_("Label to be shown for calculated average"),
scope=Scope.content,
)
show_numbers = Boolean(
display_name=_("Display values"),
default=True,
help=_("Toggles if numeric values are displayed"),
scope=Scope.content
)
editable_fields = ('display_name', 'mentoring_ids', 'color_rules', 'visual_rules', 'visual_title', 'visual_desc') editable_fields = (
'display_name', 'mentoring_ids', 'exclude_questions', 'average_label', 'show_numbers',
'color_rules', 'visual_rules', 'visual_title', 'visual_desc'
)
css_path = 'public/css/dashboard.css' css_path = 'public/css/dashboard.css'
js_path = 'public/js/dashboard.js' js_path = 'public/js/dashboard.js'
...@@ -321,6 +350,12 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock): ...@@ -321,6 +350,12 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock):
except Exception: except Exception:
return "" return ""
def _get_problem_questions(self, mentoring_block):
""" Generator returning only children of specified block that are MCQs """
for child_id in mentoring_block.children:
if child_isinstance(mentoring_block, child_id, MCQBlock):
yield child_id
def student_view(self, context=None): # pylint: disable=unused-argument def student_view(self, context=None): # pylint: disable=unused-argument
""" """
Standard view of this XBlock. Standard view of this XBlock.
...@@ -336,8 +371,22 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock): ...@@ -336,8 +371,22 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock):
'display_name': mentoring_block.display_name, 'display_name': mentoring_block.display_name,
'mcqs': [] 'mcqs': []
} }
for child_id in mentoring_block.children: try:
if child_isinstance(mentoring_block, child_id, MCQBlock): hide_questions = self.exclude_questions.get(mentoring_block.url_name, [])
except Exception: # pylint: disable=broad-except-clause
log.exception("Cannot parse exclude_questions setting - probably malformed: %s", self.exclude_questions)
hide_questions = []
for question_number, child_id in enumerate(self._get_problem_questions(mentoring_block), 1):
try:
if question_number in hide_questions:
continue
except TypeError:
log.exception(
"Cannot check question number - expected list of ints got: %s",
hide_questions
)
# Get the student's submitted answer to this MCQ from the submissions API: # Get the student's submitted answer to this MCQ from the submissions API:
mcq_block = self.runtime.get_block(child_id) mcq_block = self.runtime.get_block(child_id)
mcq_submission_key = self._get_submission_key(child_id) mcq_submission_key = self._get_submission_key(child_id)
...@@ -345,6 +394,7 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock): ...@@ -345,6 +394,7 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock):
value = sub_api.get_submissions(mcq_submission_key, limit=1)[0]["answer"] value = sub_api.get_submissions(mcq_submission_key, limit=1)[0]["answer"]
except IndexError: except IndexError:
value = None value = None
block['mcqs'].append({ block['mcqs'].append({
"display_name": mcq_block.display_name_with_default, "display_name": mcq_block.display_name_with_default,
"value": value, "value": value,
...@@ -384,6 +434,8 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock): ...@@ -384,6 +434,8 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock):
'blocks': blocks, 'blocks': blocks,
'display_name': self.display_name, 'display_name': self.display_name,
'visual_repr': visual_repr, 'visual_repr': visual_repr,
'average_label': self.average_label,
'show_numbers': self.show_numbers,
}) })
fragment = Fragment(html) fragment = Fragment(html)
...@@ -406,6 +458,14 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock): ...@@ -406,6 +458,14 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock):
except InvalidUrlName as e: except InvalidUrlName as e:
add_error(_(u'Invalid block url_name given: "{bad_url_name}"').format(bad_url_name=unicode(e))) add_error(_(u'Invalid block url_name given: "{bad_url_name}"').format(bad_url_name=unicode(e)))
if data.exclude_questions:
for key, value in data.exclude_questions.iteritems():
if not isinstance(value, list):
add_error(
_(u"Exclude questions is malformed: value for key {key} is {value}, expected list of integers")
.format(key=key, value=value)
)
if data.color_rules: if data.color_rules:
try: try:
self.parse_color_rules_str(data.color_rules, ignore_errors=False) self.parse_color_rules_str(data.color_rules, ignore_errors=False)
......
...@@ -30,3 +30,7 @@ ...@@ -30,3 +30,7 @@
.pb-dashboard table .avg-row td.desc { .pb-dashboard table .avg-row td.desc {
font-style: italic; font-style: italic;
} }
.pb-dashboard-visual {
text-align: center;
}
...@@ -42,16 +42,30 @@ ...@@ -42,16 +42,30 @@
{% for mcq in block.mcqs %} {% for mcq in block.mcqs %}
<tr> <tr>
<th class="desc">{{ mcq.display_name }}</th> <th class="desc">{{ mcq.display_name }}</th>
<td class="value" {% if mcq.color %}style="border-right-color: {{mcq.color}};"{% endif %}> <td class="value"
{% if mcq.value %}{{ mcq.value }}{% endif %} {% if mcq.color %} style="border-right-color: {{mcq.color}};"{% endif %}
{% if not show_numbers %}
{% if mcq.value %} aria-label="Score: {{mcq.value}}" {% else %} aria-label="{% trans 'No value yet' %}" {%endif%}
{% endif %}
>
{% if mcq.value and show_numbers %}
{{ mcq.value }}
{% endif %}
</td> </td>
</tr> </tr>
{% endfor %} {% endfor %}
{% if block.has_average %} {% if block.has_average %}
<tr class="avg-row"> <tr class="avg-row">
<th class="desc">{% trans "Average" %}</th> <th class="desc">{{ average_label }}</th>
<td class="value" {% if block.average_color %}style="border-right-color: {{block.average_color}};"{% endif %}> <td class="value"
{% if block.average_color %} style="border-right-color: {{block.average_color}};"{% endif %}
{% if not show_numbers %}
{% if block.average %} aria-label="Score: {{block.average|floatformat}}" {% else %} aria-label="{% trans 'No value yet' %}" {%endif%}
{% endif %}
>
{% if show_numbers %}
{{ block.average|floatformat }} {{ block.average|floatformat }}
{% endif %}
</td> </td>
</tr> </tr>
{% endif %} {% endif %}
......
...@@ -17,8 +17,10 @@ ...@@ -17,8 +17,10 @@
# along with this program in a file in the toplevel directory called # along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>. # "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
# #
from textwrap import dedent
from mock import Mock, patch from mock import Mock, patch
from xblockutils.base_test import SeleniumXBlockTest from xblockutils.base_test import SeleniumXBlockTest
from xblockutils.resources import ResourceLoader
class MockSubmissionsAPI(object): class MockSubmissionsAPI(object):
...@@ -54,73 +56,28 @@ class TestDashboardBlock(SeleniumXBlockTest): ...@@ -54,73 +56,28 @@ class TestDashboardBlock(SeleniumXBlockTest):
""" """
Test the Student View of a dashboard XBlock linked to some problem builder blocks Test the Student View of a dashboard XBlock linked to some problem builder blocks
""" """
SIMPLE_DASHBOARD = """<pb-dashboard mentoring_ids='["dummy-value"]'/>"""
ALTERNATIVE_DASHBOARD = dedent("""
<pb-dashboard mentoring_ids='["dummy-value"]' average_label="Avg." show_numbers="false"/>
""")
HIDE_QUESTIONS_DASHBOARD = dedent("""
<pb-dashboard mentoring_ids='["dummy-value"]'
exclude_questions='{"Step 1": [2, 3], "Step 2":[3], "Step 3":[2]}'
/>
""")
MALFORMED_HIDE_QUESTIONS_DASHBOARD = dedent("""
<pb-dashboard mentoring_ids='["dummy-value"]'
exclude_questions='{"Step 1": "1234", "Step 2":[3], "Step 3":[2]}'
/>
""")
def setUp(self): def setUp(self):
super(TestDashboardBlock, self).setUp() super(TestDashboardBlock, self).setUp()
# Set up our scenario:
self.set_scenario_xml("""
<vertical_demo>
<problem-builder display_name="Step 1">
<pb-mcq display_name="1.1 First MCQ" question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="1">Option 1</pb-choice>
<pb-choice value="2">Option 2</pb-choice>
<pb-choice value="3">Option 3</pb-choice>
<pb-choice value="4">Option 4</pb-choice>
</pb-mcq>
<pb-mcq display_name="1.2 Second MCQ" question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="1">Option 1</pb-choice>
<pb-choice value="2">Option 2</pb-choice>
<pb-choice value="3">Option 3</pb-choice>
<pb-choice value="4">Option 4</pb-choice>
</pb-mcq>
<pb-mcq display_name="1.3 Third MCQ" question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="1">Option 1</pb-choice>
<pb-choice value="2">Option 2</pb-choice>
<pb-choice value="3">Option 3</pb-choice>
<pb-choice value="4">Option 4</pb-choice>
</pb-mcq>
<html_demo> This message here should be ignored. </html_demo>
</problem-builder>
<problem-builder display_name="Step 2">
<pb-mcq display_name="2.1 First MCQ" question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="4">Option 4</pb-choice>
<pb-choice value="5">Option 5</pb-choice>
<pb-choice value="6">Option 6</pb-choice>
</pb-mcq>
<pb-mcq display_name="2.2 Second MCQ" question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="1">Option 1</pb-choice>
<pb-choice value="2">Option 2</pb-choice>
<pb-choice value="3">Option 3</pb-choice>
<pb-choice value="4">Option 4</pb-choice>
</pb-mcq>
<pb-mcq display_name="2.3 Third MCQ" question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="1">Option 1</pb-choice>
<pb-choice value="2">Option 2</pb-choice>
<pb-choice value="3">Option 3</pb-choice>
<pb-choice value="4">Option 4</pb-choice>
</pb-mcq>
</problem-builder>
<problem-builder display_name="Step 3">
<pb-mcq display_name="3.1 First MCQ" question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="1">Option 1</pb-choice>
<pb-choice value="2">Option 2</pb-choice>
<pb-choice value="3">Option 3</pb-choice>
<pb-choice value="4">Option 4</pb-choice>
</pb-mcq>
<pb-mcq display_name="3.2 MCQ with non-numeric values"
question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="A">Option A</pb-choice>
<pb-choice value="B">Option B</pb-choice>
<pb-choice value="C">Option C</pb-choice>
</pb-mcq>
</problem-builder>
<pb-dashboard mentoring_ids='["dummy-value"]'>
</pb-dashboard>
</vertical_demo>
""")
# Apply a whole bunch of patches that are needed in lieu of the LMS/CMS runtime and edx-submissions: # Apply a whole bunch of patches that are needed in lieu of the LMS/CMS runtime and edx-submissions:
def get_mentoring_blocks(dashboard_block, mentoring_ids, ignore_errors=True): def get_mentoring_blocks(dashboard_block, mentoring_ids, ignore_errors=True):
return [dashboard_block.runtime.get_block(key) for key in dashboard_block.get_parent().children[:-1]] return [dashboard_block.runtime.get_block(key) for key in dashboard_block.get_parent().children[:-1]]
mock_submisisons_api = MockSubmissionsAPI() mock_submisisons_api = MockSubmissionsAPI()
patches = ( patches = (
( (
...@@ -135,13 +92,21 @@ class TestDashboardBlock(SeleniumXBlockTest): ...@@ -135,13 +92,21 @@ class TestDashboardBlock(SeleniumXBlockTest):
), ),
("problem_builder.dashboard.DashboardBlock.get_mentoring_blocks", get_mentoring_blocks), ("problem_builder.dashboard.DashboardBlock.get_mentoring_blocks", get_mentoring_blocks),
("problem_builder.dashboard.sub_api", mock_submisisons_api), ("problem_builder.dashboard.sub_api", mock_submisisons_api),
("problem_builder.mcq.sub_api", mock_submisisons_api) ("problem_builder.mcq.sub_api", mock_submisisons_api),
(
"problem_builder.mentoring.MentoringBlock.url_name",
property(lambda block: block.display_name)
)
) )
for p in patches: for p in patches:
patcher = patch(*p) patcher = patch(*p)
patcher.start() patcher.start()
self.addCleanup(patcher.stop) self.addCleanup(patcher.stop)
# All the patches are installed; now we can proceed with using the XBlocks for tests:
def _install_fixture(self, dashboard_xml):
loader = ResourceLoader(self.__module__)
scenario = loader.render_template("xml/dashboard.xml", {'dashboard': dashboard_xml})
self.set_scenario_xml(scenario)
self.go_to_view("student_view") self.go_to_view("student_view")
self.vertical = self.load_root_xblock() self.vertical = self.load_root_xblock()
...@@ -150,6 +115,7 @@ class TestDashboardBlock(SeleniumXBlockTest): ...@@ -150,6 +115,7 @@ class TestDashboardBlock(SeleniumXBlockTest):
Test that when the student has not submitted any question answers, we still see Test that when the student has not submitted any question answers, we still see
the dashboard, and its lists all the MCQ questions in the way we expect. the dashboard, and its lists all the MCQ questions in the way we expect.
""" """
self._install_fixture(self.SIMPLE_DASHBOARD)
dashboard = self.browser.find_element_by_css_selector('.pb-dashboard') dashboard = self.browser.find_element_by_css_selector('.pb-dashboard')
step_headers = dashboard.find_elements_by_css_selector('thead') step_headers = dashboard.find_elements_by_css_selector('thead')
self.assertEqual(len(step_headers), 3) self.assertEqual(len(step_headers), 3)
...@@ -164,10 +130,7 @@ class TestDashboardBlock(SeleniumXBlockTest): ...@@ -164,10 +130,7 @@ class TestDashboardBlock(SeleniumXBlockTest):
value = mcq.find_element_by_css_selector('td:last-child') value = mcq.find_element_by_css_selector('td:last-child')
self.assertEqual(value.text, '') self.assertEqual(value.text, '')
def test_dashboard(self): def _set_mentoring_values(self):
"""
Submit an answer to each MCQ, then check that the dashboard reflects those answers.
"""
pbs = self.browser.find_elements_by_css_selector('.mentoring') pbs = self.browser.find_elements_by_css_selector('.mentoring')
for pb in pbs: for pb in pbs:
mcqs = pb.find_elements_by_css_selector('fieldset.choices') mcqs = pb.find_elements_by_css_selector('fieldset.choices')
...@@ -178,6 +141,13 @@ class TestDashboardBlock(SeleniumXBlockTest): ...@@ -178,6 +141,13 @@ class TestDashboardBlock(SeleniumXBlockTest):
submit.click() submit.click()
self.wait_until_disabled(submit) self.wait_until_disabled(submit)
def test_dashboard(self):
"""
Submit an answer to each MCQ, then check that the dashboard reflects those answers.
"""
self._install_fixture(self.SIMPLE_DASHBOARD)
self._set_mentoring_values()
# Reload the page: # Reload the page:
self.go_to_view("student_view") self.go_to_view("student_view")
dashboard = self.browser.find_element_by_css_selector('.pb-dashboard') dashboard = self.browser.find_element_by_css_selector('.pb-dashboard')
...@@ -197,3 +167,91 @@ class TestDashboardBlock(SeleniumXBlockTest): ...@@ -197,3 +167,91 @@ class TestDashboardBlock(SeleniumXBlockTest):
right_col = avg_row.find_element_by_css_selector('.value') right_col = avg_row.find_element_by_css_selector('.value')
expected_average = {0: "2", 1: "3", 2: "1"}[step_num] expected_average = {0: "2", 1: "3", 2: "1"}[step_num]
self.assertEqual(right_col.text, expected_average) self.assertEqual(right_col.text, expected_average)
def test_dashboard_alternative(self):
"""
Submit an answer to each MCQ, then check that the dashboard reflects those answers with alternative
configuration:
* Average label is "Avg." instead of default "Average"
* Numerical values are not shown
"""
self._install_fixture(self.ALTERNATIVE_DASHBOARD)
self._set_mentoring_values()
# Reload the page:
self.go_to_view("student_view")
dashboard = self.browser.find_element_by_css_selector('.pb-dashboard')
steps = dashboard.find_elements_by_css_selector('tbody')
self.assertEqual(len(steps), 3)
for step in steps:
mcq_rows = step.find_elements_by_css_selector('tr:not(.avg-row)')
self.assertTrue(2 <= len(mcq_rows) <= 3)
for mcq in mcq_rows:
value = mcq.find_element_by_css_selector('td.value')
self.assertEqual(value.text, '')
# Check the average:
avg_row = step.find_element_by_css_selector('tr.avg-row')
left_col = avg_row.find_element_by_css_selector('.desc')
self.assertEqual(left_col.text, "Avg.")
right_col = avg_row.find_element_by_css_selector('.value')
self.assertEqual(right_col.text, "")
def test_dashboard_exclude_questions(self):
"""
Submit an answer to each MCQ, then check that the dashboard ignores questions it is configured to ignore
"""
self._install_fixture(self.HIDE_QUESTIONS_DASHBOARD)
self._set_mentoring_values()
# Reload the page:
self.go_to_view("student_view")
dashboard = self.browser.find_element_by_css_selector('.pb-dashboard')
steps = dashboard.find_elements_by_css_selector('tbody')
self.assertEqual(len(steps), 3)
lengths = [1, 2, 1]
for step_num, step in enumerate(steps):
mcq_rows = step.find_elements_by_css_selector('tr:not(.avg-row)')
self.assertEqual(len(mcq_rows), lengths[step_num])
for mcq in mcq_rows:
value = mcq.find_element_by_css_selector('td.value')
self.assertIn(value.text, ('1', '2', '3', '4'))
# Check the average:
avg_row = step.find_element_by_css_selector('tr.avg-row')
left_col = avg_row.find_element_by_css_selector('.desc')
self.assertEqual(left_col.text, "Average")
right_col = avg_row.find_element_by_css_selector('.value')
expected_average = {0: "1", 1: "3", 2: "1"}[step_num]
self.assertEqual(right_col.text, expected_average)
def test_dashboard_malformed_exclude_questions(self):
"""
Submit an answer to each MCQ, then check that the dashboard ignores questions it is configured to ignore
"""
self._install_fixture(self.MALFORMED_HIDE_QUESTIONS_DASHBOARD)
self._set_mentoring_values()
# Reload the page:
self.go_to_view("student_view")
dashboard = self.browser.find_element_by_css_selector('.pb-dashboard')
steps = dashboard.find_elements_by_css_selector('tbody')
self.assertEqual(len(steps), 3)
lengths = [3, 2, 1]
for step_num, step in enumerate(steps):
mcq_rows = step.find_elements_by_css_selector('tr:not(.avg-row)')
self.assertEqual(len(mcq_rows), lengths[step_num])
for mcq in mcq_rows:
value = mcq.find_element_by_css_selector('td.value')
self.assertIn(value.text, ('1', '2', '3', '4'))
# Check the average:
avg_row = step.find_element_by_css_selector('tr.avg-row')
left_col = avg_row.find_element_by_css_selector('.desc')
self.assertEqual(left_col.text, "Average")
right_col = avg_row.find_element_by_css_selector('.value')
expected_average = {0: "2", 1: "3", 2: "1"}[step_num]
self.assertEqual(right_col.text, expected_average)
<vertical_demo>
<problem-builder display_name="Step 1">
<pb-mcq display_name="1.1 First MCQ" question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="1">Option 1</pb-choice>
<pb-choice value="2">Option 2</pb-choice>
<pb-choice value="3">Option 3</pb-choice>
<pb-choice value="4">Option 4</pb-choice>
</pb-mcq>
<pb-mcq display_name="1.2 Second MCQ" question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="1">Option 1</pb-choice>
<pb-choice value="2">Option 2</pb-choice>
<pb-choice value="3">Option 3</pb-choice>
<pb-choice value="4">Option 4</pb-choice>
</pb-mcq>
<pb-mcq display_name="1.3 Third MCQ" question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="1">Option 1</pb-choice>
<pb-choice value="2">Option 2</pb-choice>
<pb-choice value="3">Option 3</pb-choice>
<pb-choice value="4">Option 4</pb-choice>
</pb-mcq>
<html_demo> This message here should be ignored. </html_demo>
</problem-builder>
<problem-builder display_name="Step 2">
<pb-mcq display_name="2.1 First MCQ" question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="4">Option 4</pb-choice>
<pb-choice value="5">Option 5</pb-choice>
<pb-choice value="6">Option 6</pb-choice>
</pb-mcq>
<pb-mcq display_name="2.2 Second MCQ" question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="1">Option 1</pb-choice>
<pb-choice value="2">Option 2</pb-choice>
<pb-choice value="3">Option 3</pb-choice>
<pb-choice value="4">Option 4</pb-choice>
</pb-mcq>
<pb-mcq display_name="2.3 Third MCQ" question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="1">Option 1</pb-choice>
<pb-choice value="2">Option 2</pb-choice>
<pb-choice value="3">Option 3</pb-choice>
<pb-choice value="4">Option 4</pb-choice>
</pb-mcq>
</problem-builder>
<problem-builder display_name="Step 3">
<pb-mcq display_name="3.1 First MCQ" question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="1">Option 1</pb-choice>
<pb-choice value="2">Option 2</pb-choice>
<pb-choice value="3">Option 3</pb-choice>
<pb-choice value="4">Option 4</pb-choice>
</pb-mcq>
<pb-mcq display_name="3.2 MCQ with non-numeric values"
question="Which option?" correct_choices='["1","2","3","4"]'>
<pb-choice value="A">Option A</pb-choice>
<pb-choice value="B">Option B</pb-choice>
<pb-choice value="C">Option C</pb-choice>
</pb-mcq>
</problem-builder>
{{ dashboard|safe }}
</vertical_demo>
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment