Commit 78decd9a by E. Kolpakov

Merged edx-release into master

parent 21e29238
...@@ -36,7 +36,7 @@ from .mcq import MCQBlock ...@@ -36,7 +36,7 @@ from .mcq import MCQBlock
from .sub_api import sub_api from .sub_api import sub_api
from lazy import lazy from lazy import lazy
from xblock.core import XBlock from xblock.core import XBlock
from xblock.fields import Scope, List, String from xblock.fields import Scope, List, String, Boolean, Dict
from xblock.fragment import Fragment from xblock.fragment import Fragment
from xblock.validation import ValidationMessage from xblock.validation import ValidationMessage
from xblockutils.helpers import child_isinstance from xblockutils.helpers import child_isinstance
...@@ -172,6 +172,20 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock): ...@@ -172,6 +172,20 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock):
).format(example_here='["2754b8afc03a439693b9887b6f1d9e36", "215028f7df3d4c68b14fb5fea4da7053"]'), ).format(example_here='["2754b8afc03a439693b9887b6f1d9e36", "215028f7df3d4c68b14fb5fea4da7053"]'),
scope=Scope.settings, scope=Scope.settings,
) )
exclude_questions = Dict(
display_name=_("Questions to be hidden"),
help=_(
"Optional rules to exclude specific questions both from displaying in dashboard and from the calculated "
"average. Rules must start with the url_name of a mentoring block, followed by list of question numbers "
"to exclude. Rule set must be in JSON format. Question numbers are one-based (the first question being "
"number 1). Must be in JSON format. Examples: {examples_here}"
).format(
examples_here='{"2754b8afc03a439693b9887b6f1d9e36":[1,2], "215028f7df3d4c68b14fb5fea4da7053":[1,5]}'
),
scope=Scope.content,
multiline_editor=True,
resettable_editor=False,
)
color_rules = String( color_rules = String(
display_name=_("Color Coding Rules"), display_name=_("Color Coding Rules"),
help=_( help=_(
...@@ -207,8 +221,27 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock): ...@@ -207,8 +221,27 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock):
), ),
scope=Scope.content, scope=Scope.content,
) )
average_labels = Dict(
display_name=_("Label for average value"),
help=_(
"This settings allows overriding label for the calculated average per mentoring block. Must be in JSON "
"format. Examples: {examples_here}."
).format(
examples_here='{"2754b8afc03a439693b9887b6f1d9e36": "Avg.", "215028f7df3d4c68b14fb5fea4da7053": "Mean"}'
),
scope=Scope.content,
)
show_numbers = Boolean(
display_name=_("Display values"),
default=True,
help=_("Toggles if numeric values are displayed"),
scope=Scope.content
)
editable_fields = ('display_name', 'mentoring_ids', 'color_rules', 'visual_rules', 'visual_title', 'visual_desc') editable_fields = (
'display_name', 'mentoring_ids', 'exclude_questions', 'average_labels', 'show_numbers',
'color_rules', 'visual_rules', 'visual_title', 'visual_desc'
)
css_path = 'public/css/dashboard.css' css_path = 'public/css/dashboard.css'
js_path = 'public/js/dashboard.js' js_path = 'public/js/dashboard.js'
...@@ -321,6 +354,12 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock): ...@@ -321,6 +354,12 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock):
except Exception: except Exception:
return "" return ""
def _get_problem_questions(self, mentoring_block):
""" Generator returning only children of specified block that are MCQs """
for child_id in mentoring_block.children:
if child_isinstance(mentoring_block, child_id, MCQBlock):
yield child_id
def student_view(self, context=None): # pylint: disable=unused-argument def student_view(self, context=None): # pylint: disable=unused-argument
""" """
Standard view of this XBlock. Standard view of this XBlock.
...@@ -336,20 +375,35 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock): ...@@ -336,20 +375,35 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock):
'display_name': mentoring_block.display_name, 'display_name': mentoring_block.display_name,
'mcqs': [] 'mcqs': []
} }
for child_id in mentoring_block.children: try:
if child_isinstance(mentoring_block, child_id, MCQBlock): hide_questions = self.exclude_questions.get(mentoring_block.url_name, [])
# Get the student's submitted answer to this MCQ from the submissions API: except Exception: # pylint: disable=broad-except-clause
mcq_block = self.runtime.get_block(child_id) log.exception("Cannot parse exclude_questions setting - probably malformed: %s", self.exclude_questions)
mcq_submission_key = self._get_submission_key(child_id) hide_questions = []
try:
value = sub_api.get_submissions(mcq_submission_key, limit=1)[0]["answer"] for question_number, child_id in enumerate(self._get_problem_questions(mentoring_block), 1):
except IndexError: try:
value = None if question_number in hide_questions:
block['mcqs'].append({ continue
"display_name": mcq_block.display_name_with_default, except TypeError:
"value": value, log.exception(
"color": self.color_for_value(value) if value is not None else None, "Cannot check question number - expected list of ints got: %s",
}) hide_questions
)
# Get the student's submitted answer to this MCQ from the submissions API:
mcq_block = self.runtime.get_block(child_id)
mcq_submission_key = self._get_submission_key(child_id)
try:
value = sub_api.get_submissions(mcq_submission_key, limit=1)[0]["answer"]
except IndexError:
value = None
block['mcqs'].append({
"display_name": mcq_block.display_name_with_default,
"value": value,
"color": self.color_for_value(value) if value is not None else None,
})
# If the values are numeric, display an average: # If the values are numeric, display an average:
numeric_values = [ numeric_values = [
float(mcq['value']) for mcq in block['mcqs'] float(mcq['value']) for mcq in block['mcqs']
...@@ -358,6 +412,7 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock): ...@@ -358,6 +412,7 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock):
if numeric_values: if numeric_values:
average_value = sum(numeric_values) / len(numeric_values) average_value = sum(numeric_values) / len(numeric_values)
block['average'] = average_value block['average'] = average_value
block['average_label'] = self.average_labels.get(mentoring_block.url_name, _("Average"))
block['has_average'] = True block['has_average'] = True
block['average_color'] = self.color_for_value(average_value) block['average_color'] = self.color_for_value(average_value)
blocks.append(block) blocks.append(block)
...@@ -384,6 +439,7 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock): ...@@ -384,6 +439,7 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock):
'blocks': blocks, 'blocks': blocks,
'display_name': self.display_name, 'display_name': self.display_name,
'visual_repr': visual_repr, 'visual_repr': visual_repr,
'show_numbers': self.show_numbers,
}) })
fragment = Fragment(html) fragment = Fragment(html)
...@@ -406,6 +462,37 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock): ...@@ -406,6 +462,37 @@ class DashboardBlock(StudioEditableXBlockMixin, XBlock):
except InvalidUrlName as e: except InvalidUrlName as e:
add_error(_(u'Invalid block url_name given: "{bad_url_name}"').format(bad_url_name=unicode(e))) add_error(_(u'Invalid block url_name given: "{bad_url_name}"').format(bad_url_name=unicode(e)))
if data.exclude_questions:
for key, value in data.exclude_questions.iteritems():
if not isinstance(value, list):
add_error(
_(u"'Questions to be hidden' is malformed: value for key {key} is {value}, "
u"expected list of integers")
.format(key=key, value=value)
)
if key not in data.mentoring_ids:
add_error(
_(u"'Questions to be hidden' is malformed: mentoring url_name {url_name} "
u"is not added to Dashboard")
.format(url_name=key)
)
if data.average_labels:
for key, value in data.average_labels.iteritems():
if not isinstance(value, basestring):
add_error(
_(u"'Label for average value' is malformed: value for key {key} is {value}, expected string")
.format(key=key, value=value)
)
if key not in data.mentoring_ids:
add_error(
_(u"'Label for average value' is malformed: mentoring url_name {url_name} "
u"is not added to Dashboard")
.format(url_name=key)
)
if data.color_rules: if data.color_rules:
try: try:
self.parse_color_rules_str(data.color_rules, ignore_errors=False) self.parse_color_rules_str(data.color_rules, ignore_errors=False)
......
...@@ -30,3 +30,7 @@ ...@@ -30,3 +30,7 @@
.pb-dashboard table .avg-row td.desc { .pb-dashboard table .avg-row td.desc {
font-style: italic; font-style: italic;
} }
.pb-dashboard-visual {
text-align: center;
}
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Answer'
db.create_table('problem_builder_answer', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('student_id', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('student_input', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified_on', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('problem_builder', ['Answer'])
# Adding unique constraint on 'Answer', fields ['student_id', 'course_id', 'name']
db.create_unique('problem_builder_answer', ['student_id', 'course_id', 'name'])
def backwards(self, orm):
# Removing unique constraint on 'Answer', fields ['student_id', 'course_id', 'name']
db.delete_unique('problem_builder_answer', ['student_id', 'course_id', 'name'])
# Deleting model 'Answer'
db.delete_table('problem_builder_answer')
models = {
'problem_builder.answer': {
'Meta': {'unique_together': "(('student_id', 'course_id', 'name'),)", 'object_name': 'Answer'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'student_input': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
}
}
complete_apps = ['problem_builder']
# -*- coding: utf-8 -*-
from django.db.utils import DatabaseError
from south.db import db
from south.v2 import DataMigration
class Migration(DataMigration):
def forwards(self, orm):
"""
Copy student data from old table to the new one.
Problem Builder stores student answers in 'problem_builder_answer'.
However earlier versions [may have] used 'mentoring_answer'.
If a 'mentoring' app is currently installed on this instance, copy the student data over
to the new table in case it is being used.
"""
try:
db.execute(
'INSERT INTO problem_builder_answer ('
'name, student_id, course_id, student_input, created_on, modified_on '
') SELECT '
'name, student_id, course_id, student_input, created_on, modified_on '
'FROM mentoring_answer'
)
except DatabaseError: # Would like to just catch 'Table does not exist' but can't do that in a db-agnostic way
print(" - Seems like mentoring_answer does not exist. No data migration needed.")
def backwards(self, orm):
raise RuntimeError("Cannot safely reverse this migration.")
models = {
'problem_builder.answer': {
'Meta': {'unique_together': "(('student_id', 'course_id', 'name'),)", 'object_name': 'Answer'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'student_input': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
}
}
complete_apps = ['problem_builder']
symmetrical = True
...@@ -42,16 +42,30 @@ ...@@ -42,16 +42,30 @@
{% for mcq in block.mcqs %} {% for mcq in block.mcqs %}
<tr> <tr>
<th class="desc">{{ mcq.display_name }}</th> <th class="desc">{{ mcq.display_name }}</th>
<td class="value" {% if mcq.color %}style="border-right-color: {{mcq.color}};"{% endif %}> <td class="value"
{% if mcq.value %}{{ mcq.value }}{% endif %} {% if mcq.color %} style="border-right-color: {{mcq.color}};"{% endif %}
{% if not show_numbers %}
{% if mcq.value %} aria-label="Score: {{mcq.value}}" {% else %} aria-label="{% trans 'No value yet' %}" {%endif%}
{% endif %}
>
{% if mcq.value and show_numbers %}
{{ mcq.value }}
{% endif %}
</td> </td>
</tr> </tr>
{% endfor %} {% endfor %}
{% if block.has_average %} {% if block.has_average %}
<tr class="avg-row"> <tr class="avg-row">
<th class="desc">{% trans "Average" %}</th> <th class="desc">{{ block.average_label }}</th>
<td class="value" {% if block.average_color %}style="border-right-color: {{block.average_color}};"{% endif %}> <td class="value"
{{ block.average|floatformat }} {% if block.average_color %} style="border-right-color: {{block.average_color}};"{% endif %}
{% if not show_numbers %}
{% if block.average %} aria-label="Score: {{block.average|floatformat}}" {% else %} aria-label="{% trans 'No value yet' %}" {%endif%}
{% endif %}
>
{% if show_numbers %}
{{ block.average|floatformat }}
{% endif %}
</td> </td>
</tr> </tr>
{% endif %} {% endif %}
......
...@@ -302,7 +302,7 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest): ...@@ -302,7 +302,7 @@ class MentoringAssessmentTest(MentoringAssessmentBaseTest):
"This is something everyone has to like about this MRQ", "This is something everyone has to like about this MRQ",
"This is something everyone has to like about this MRQ", "This is something everyone has to like about this MRQ",
"This MRQ is indeed very graceful", "This MRQ is indeed very graceful",
"Nah, there aren't any!" "Nah, there isn't any!"
] ]
self.popup_check(mentoring, item_feedbacks, prefix='div[data-name="mrq_1_1"]', do_submit=False) self.popup_check(mentoring, item_feedbacks, prefix='div[data-name="mrq_1_1"]', do_submit=False)
self.assert_hidden(controls.review) self.assert_hidden(controls.review)
......
...@@ -17,8 +17,10 @@ ...@@ -17,8 +17,10 @@
# along with this program in a file in the toplevel directory called # along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>. # "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
# #
from textwrap import dedent
from mock import Mock, patch from mock import Mock, patch
from .base_test import ProblemBuilderBaseTest from .base_test import ProblemBuilderBaseTest
from xblockutils.resources import ResourceLoader
class MockSubmissionsAPI(object): class MockSubmissionsAPI(object):
...@@ -54,11 +56,25 @@ class TestDashboardBlock(ProblemBuilderBaseTest): ...@@ -54,11 +56,25 @@ class TestDashboardBlock(ProblemBuilderBaseTest):
""" """
Test the Student View of a dashboard XBlock linked to some problem builder blocks Test the Student View of a dashboard XBlock linked to some problem builder blocks
""" """
SIMPLE_DASHBOARD = """<pb-dashboard mentoring_ids='["dummy-value"]'/>"""
ALTERNATIVE_DASHBOARD = dedent("""
<pb-dashboard mentoring_ids='["dummy-value"]' show_numbers="false"
average_labels='{"Step 1": "Avg.", "Step 2":"Mean", "Step 3":"Second Quartile"}'
/>
""")
HIDE_QUESTIONS_DASHBOARD = dedent("""
<pb-dashboard mentoring_ids='["dummy-value"]'
exclude_questions='{"Step 1": [2, 3], "Step 2":[3], "Step 3":[2]}'
/>
""")
MALFORMED_HIDE_QUESTIONS_DASHBOARD = dedent("""
<pb-dashboard mentoring_ids='["dummy-value"]'
exclude_questions='{"Step 1": "1234", "Step 2":[3], "Step 3":[2]}'
/>
""")
def setUp(self): def setUp(self):
super(TestDashboardBlock, self).setUp() super(TestDashboardBlock, self).setUp()
# Set up our scenario:
self.load_scenario('dashboard.xml')
# Apply a whole bunch of patches that are needed in lieu of the LMS/CMS runtime and edx-submissions: # Apply a whole bunch of patches that are needed in lieu of the LMS/CMS runtime and edx-submissions:
def get_mentoring_blocks(dashboard_block, mentoring_ids, ignore_errors=True): def get_mentoring_blocks(dashboard_block, mentoring_ids, ignore_errors=True):
...@@ -78,13 +94,21 @@ class TestDashboardBlock(ProblemBuilderBaseTest): ...@@ -78,13 +94,21 @@ class TestDashboardBlock(ProblemBuilderBaseTest):
), ),
("problem_builder.dashboard.DashboardBlock.get_mentoring_blocks", get_mentoring_blocks), ("problem_builder.dashboard.DashboardBlock.get_mentoring_blocks", get_mentoring_blocks),
("problem_builder.dashboard.sub_api", mock_submisisons_api), ("problem_builder.dashboard.sub_api", mock_submisisons_api),
("problem_builder.mcq.sub_api", mock_submisisons_api) ("problem_builder.mcq.sub_api", mock_submisisons_api),
(
"problem_builder.mentoring.MentoringBlock.url_name",
property(lambda block: block.display_name)
)
) )
for p in patches: for p in patches:
patcher = patch(*p) patcher = patch(*p)
patcher.start() patcher.start()
self.addCleanup(patcher.stop) self.addCleanup(patcher.stop)
# All the patches are installed; now we can proceed with using the XBlocks for tests:
def _install_fixture(self, dashboard_xml):
loader = ResourceLoader(self.__module__)
scenario = loader.render_template("xml_templates/dashboard.xml", {'dashboard': dashboard_xml})
self.set_scenario_xml(scenario)
self.go_to_view("student_view") self.go_to_view("student_view")
self.vertical = self.load_root_xblock() self.vertical = self.load_root_xblock()
...@@ -93,6 +117,7 @@ class TestDashboardBlock(ProblemBuilderBaseTest): ...@@ -93,6 +117,7 @@ class TestDashboardBlock(ProblemBuilderBaseTest):
Test that when the student has not submitted any question answers, we still see Test that when the student has not submitted any question answers, we still see
the dashboard, and its lists all the MCQ questions in the way we expect. the dashboard, and its lists all the MCQ questions in the way we expect.
""" """
self._install_fixture(self.SIMPLE_DASHBOARD)
dashboard = self.browser.find_element_by_css_selector('.pb-dashboard') dashboard = self.browser.find_element_by_css_selector('.pb-dashboard')
step_headers = dashboard.find_elements_by_css_selector('thead') step_headers = dashboard.find_elements_by_css_selector('thead')
self.assertEqual(len(step_headers), 3) self.assertEqual(len(step_headers), 3)
...@@ -107,10 +132,7 @@ class TestDashboardBlock(ProblemBuilderBaseTest): ...@@ -107,10 +132,7 @@ class TestDashboardBlock(ProblemBuilderBaseTest):
value = mcq.find_element_by_css_selector('td:last-child') value = mcq.find_element_by_css_selector('td:last-child')
self.assertEqual(value.text, '') self.assertEqual(value.text, '')
def test_dashboard(self): def _set_mentoring_values(self):
"""
Submit an answer to each MCQ, then check that the dashboard reflects those answers.
"""
pbs = self.browser.find_elements_by_css_selector('.mentoring') pbs = self.browser.find_elements_by_css_selector('.mentoring')
for pb in pbs: for pb in pbs:
mcqs = pb.find_elements_by_css_selector('fieldset.choices') mcqs = pb.find_elements_by_css_selector('fieldset.choices')
...@@ -119,6 +141,13 @@ class TestDashboardBlock(ProblemBuilderBaseTest): ...@@ -119,6 +141,13 @@ class TestDashboardBlock(ProblemBuilderBaseTest):
choices[idx].click() choices[idx].click()
self.click_submit(pb) self.click_submit(pb)
def test_dashboard(self):
"""
Submit an answer to each MCQ, then check that the dashboard reflects those answers.
"""
self._install_fixture(self.SIMPLE_DASHBOARD)
self._set_mentoring_values()
# Reload the page: # Reload the page:
self.go_to_view("student_view") self.go_to_view("student_view")
dashboard = self.browser.find_element_by_css_selector('.pb-dashboard') dashboard = self.browser.find_element_by_css_selector('.pb-dashboard')
...@@ -138,3 +167,93 @@ class TestDashboardBlock(ProblemBuilderBaseTest): ...@@ -138,3 +167,93 @@ class TestDashboardBlock(ProblemBuilderBaseTest):
right_col = avg_row.find_element_by_css_selector('.value') right_col = avg_row.find_element_by_css_selector('.value')
expected_average = {0: "2", 1: "3", 2: "1"}[step_num] expected_average = {0: "2", 1: "3", 2: "1"}[step_num]
self.assertEqual(right_col.text, expected_average) self.assertEqual(right_col.text, expected_average)
def test_dashboard_alternative(self):
"""
Submit an answer to each MCQ, then check that the dashboard reflects those answers with alternative
configuration:
* Average label is "Avg." instead of default "Average"
* Numerical values are not shown
"""
self._install_fixture(self.ALTERNATIVE_DASHBOARD)
self._set_mentoring_values()
# Reload the page:
self.go_to_view("student_view")
dashboard = self.browser.find_element_by_css_selector('.pb-dashboard')
steps = dashboard.find_elements_by_css_selector('tbody')
self.assertEqual(len(steps), 3)
average_labels = ["Avg.", "Mean", "Second Quartile"]
for step_num, step in enumerate(steps):
mcq_rows = step.find_elements_by_css_selector('tr:not(.avg-row)')
self.assertTrue(2 <= len(mcq_rows) <= 3)
for mcq in mcq_rows:
value = mcq.find_element_by_css_selector('td.value')
self.assertEqual(value.text, '')
# Check the average:
avg_row = step.find_element_by_css_selector('tr.avg-row')
left_col = avg_row.find_element_by_css_selector('.desc')
self.assertEqual(left_col.text, average_labels[step_num])
right_col = avg_row.find_element_by_css_selector('.value')
self.assertEqual(right_col.text, "")
def test_dashboard_exclude_questions(self):
"""
Submit an answer to each MCQ, then check that the dashboard ignores questions it is configured to ignore
"""
self._install_fixture(self.HIDE_QUESTIONS_DASHBOARD)
self._set_mentoring_values()
# Reload the page:
self.go_to_view("student_view")
dashboard = self.browser.find_element_by_css_selector('.pb-dashboard')
steps = dashboard.find_elements_by_css_selector('tbody')
self.assertEqual(len(steps), 3)
lengths = [1, 2, 1]
for step_num, step in enumerate(steps):
mcq_rows = step.find_elements_by_css_selector('tr:not(.avg-row)')
self.assertEqual(len(mcq_rows), lengths[step_num])
for mcq in mcq_rows:
value = mcq.find_element_by_css_selector('td.value')
self.assertIn(value.text, ('1', '2', '3', '4'))
# Check the average:
avg_row = step.find_element_by_css_selector('tr.avg-row')
left_col = avg_row.find_element_by_css_selector('.desc')
self.assertEqual(left_col.text, "Average")
right_col = avg_row.find_element_by_css_selector('.value')
expected_average = {0: "1", 1: "3", 2: "1"}[step_num]
self.assertEqual(right_col.text, expected_average)
def test_dashboard_malformed_exclude_questions(self):
"""
Submit an answer to each MCQ, then check that the dashboard ignores questions it is configured to ignore
"""
self._install_fixture(self.MALFORMED_HIDE_QUESTIONS_DASHBOARD)
self._set_mentoring_values()
# Reload the page:
self.go_to_view("student_view")
dashboard = self.browser.find_element_by_css_selector('.pb-dashboard')
steps = dashboard.find_elements_by_css_selector('tbody')
self.assertEqual(len(steps), 3)
lengths = [3, 2, 1]
for step_num, step in enumerate(steps):
mcq_rows = step.find_elements_by_css_selector('tr:not(.avg-row)')
self.assertEqual(len(mcq_rows), lengths[step_num])
for mcq in mcq_rows:
value = mcq.find_element_by_css_selector('td.value')
self.assertIn(value.text, ('1', '2', '3', '4'))
# Check the average:
avg_row = step.find_element_by_css_selector('tr.avg-row')
left_col = avg_row.find_element_by_css_selector('.desc')
self.assertEqual(left_col.text, "Average")
right_col = avg_row.find_element_by_css_selector('.value')
expected_average = {0: "2", 1: "3", 2: "1"}[step_num]
self.assertEqual(right_col.text, expected_average)
...@@ -193,7 +193,7 @@ class QuestionnaireBlockTest(MentoringBaseTest): ...@@ -193,7 +193,7 @@ class QuestionnaireBlockTest(MentoringBaseTest):
"This is something everyone has to like about this MRQ", "This is something everyone has to like about this MRQ",
"This is something everyone has to like about beauty", "This is something everyone has to like about beauty",
"This MRQ is indeed very graceful", "This MRQ is indeed very graceful",
"Nah, there aren\\'t any!" "Nah, there isn\\'t any!"
] ]
self.popup_check(mentoring, item_feedbacks, prefix='div[data-name="mrq_1_1_7"]') self.popup_check(mentoring, item_feedbacks, prefix='div[data-name="mrq_1_1_7"]')
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
<pb-tip values='["gracefulness"]' width ="200" height = "200">This MCQ is indeed very graceful</pb-tip> <pb-tip values='["gracefulness"]' width ="200" height = "200">This MCQ is indeed very graceful</pb-tip>
<pb-tip values='["elegance"]' width ="600" height = "800">This is something everyone has to like about this MCQ</pb-tip> <pb-tip values='["elegance"]' width ="600" height = "800">This is something everyone has to like about this MCQ</pb-tip>
<pb-tip values='["beauty"]' width ="400" height = "600">This is something everyone has to like about beauty</pb-tip> <pb-tip values='["beauty"]' width ="400" height = "600">This is something everyone has to like about beauty</pb-tip>
<pb-tip values='["bugs"]' width = "100" height = "200">Nah, there aren\'t any!</pb-tip> <pb-tip values='["bugs"]' width = "100" height = "200">Nah, there isn\'t any!</pb-tip>
</pb-mcq> </pb-mcq>
<pb-message type="completed"> <pb-message type="completed">
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
<pb-tip values='["gracefulness"]' width ="200" height = "200">This MRQ is indeed very graceful</pb-tip> <pb-tip values='["gracefulness"]' width ="200" height = "200">This MRQ is indeed very graceful</pb-tip>
<pb-tip values='["elegance"]' width ="600" height = "800">This is something everyone has to like about this MRQ</pb-tip> <pb-tip values='["elegance"]' width ="600" height = "800">This is something everyone has to like about this MRQ</pb-tip>
<pb-tip values='["beauty"]' width ="400" height = "600">This is something everyone has to like about beauty</pb-tip> <pb-tip values='["beauty"]' width ="400" height = "600">This is something everyone has to like about beauty</pb-tip>
<pb-tip values='["bugs"]' width = "100" height = "200">Nah, there aren\'t any!</pb-tip> <pb-tip values='["bugs"]' width = "100" height = "200">Nah, there isn\'t any!</pb-tip>
<!--<pb-message type="on-submit">This is deliberately commented out to test parsing of XML comments</pb-message> --> <!--<pb-message type="on-submit">This is deliberately commented out to test parsing of XML comments</pb-message> -->
</pb-mrq> </pb-mrq>
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
<pb-tip values='["gracefulness"]' width ="200" height = "200">This MRQ is indeed very graceful</pb-tip> <pb-tip values='["gracefulness"]' width ="200" height = "200">This MRQ is indeed very graceful</pb-tip>
<pb-tip values='["elegance"]' width ="600" height = "800">This is something everyone has to like about this MRQ</pb-tip> <pb-tip values='["elegance"]' width ="600" height = "800">This is something everyone has to like about this MRQ</pb-tip>
<pb-tip values='["beauty"]' width ="400" height = "600">This is something everyone has to like about beauty</pb-tip> <pb-tip values='["beauty"]' width ="400" height = "600">This is something everyone has to like about beauty</pb-tip>
<pb-tip values='["bugs"]' width = "100" height = "200">Nah, there aren\'t any!</pb-tip> <pb-tip values='["bugs"]' width = "100" height = "200">Nah, there isn\'t any!</pb-tip>
</pb-mrq> </pb-mrq>
<pb-message type="completed"> <pb-message type="completed">
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
<pb-tip values='["gracefulness"]'>This MRQ is indeed very graceful</pb-tip> <pb-tip values='["gracefulness"]'>This MRQ is indeed very graceful</pb-tip>
<pb-tip values='["elegance","beauty"]'>This is something everyone has to like about this MRQ</pb-tip> <pb-tip values='["elegance","beauty"]'>This is something everyone has to like about this MRQ</pb-tip>
<pb-tip values='["bugs"]'>Nah, there aren't any!</pb-tip> <pb-tip values='["bugs"]'>Nah, there isn't any!</pb-tip>
</pb-mrq> </pb-mrq>
<pb-message type="on-assessment-review"> <pb-message type="on-assessment-review">
......
...@@ -53,6 +53,5 @@ ...@@ -53,6 +53,5 @@
<pb-choice value="C">Option C</pb-choice> <pb-choice value="C">Option C</pb-choice>
</pb-mcq> </pb-mcq>
</problem-builder> </problem-builder>
<pb-dashboard mentoring_ids='["dummy-value"]'> {{ dashboard|safe }}
</pb-dashboard>
</vertical_demo> </vertical_demo>
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
<pb-choice value="bugs">Its bugs</pb-choice> <pb-choice value="bugs">Its bugs</pb-choice>
<pb-tip values='["gracefulness"]'>This MRQ is indeed very graceful</pb-tip> <pb-tip values='["gracefulness"]'>This MRQ is indeed very graceful</pb-tip>
<pb-tip values='["elegance","beauty"]'>This is something everyone has to like about this MRQ</pb-tip> <pb-tip values='["elegance","beauty"]'>This is something everyone has to like about this MRQ</pb-tip>
<pb-tip values='["bugs"]'>Nah, there aren't any!</pb-tip> <pb-tip values='["bugs"]'>Nah, there isn't any!</pb-tip>
</pb-mrq> </pb-mrq>
<pb-message type="completed"> <pb-message type="completed">
<p>Congratulations!</p> <p>Congratulations!</p>
......
...@@ -51,7 +51,7 @@ Changes from the original: ...@@ -51,7 +51,7 @@ Changes from the original:
<tip require="gracefulness">This MRQ is indeed very graceful</tip> <tip require="gracefulness">This MRQ is indeed very graceful</tip>
<tip require="elegance,beauty">This is something everyone has to like about this MRQ</tip> <tip require="elegance,beauty">This is something everyone has to like about this MRQ</tip>
<tip reject="bugs">Nah, there aren't any!</tip> <tip reject="bugs">Nah, there isn't any!</tip>
<message type="on-submit">Thank you for answering!</message> <message type="on-submit">Thank you for answering!</message>
</mrq> </mrq>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment