Commit 7cd071d2 by Tim Krones

Merge pull request #86 from open-craft/scale-plot-interop

Step Builder: Interoperability for scale and plot blocks
parents addea00c 144bf820
...@@ -185,7 +185,7 @@ class RatingBlock(MCQBlock): ...@@ -185,7 +185,7 @@ class RatingBlock(MCQBlock):
list_values_provider=QuestionnaireAbstractBlock.choice_values_provider, list_values_provider=QuestionnaireAbstractBlock.choice_values_provider,
list_style='set', # Underered, unique items. Affects the UI editor. list_style='set', # Underered, unique items. Affects the UI editor.
) )
editable_fields = MCQBlock.editable_fields + ('low', 'high', 'name') editable_fields = MCQBlock.editable_fields + ('low', 'high')
@property @property
def all_choice_values(self): def all_choice_values(self):
...@@ -212,3 +212,25 @@ class RatingBlock(MCQBlock): ...@@ -212,3 +212,25 @@ class RatingBlock(MCQBlock):
})) }))
self.render_children(context, fragment, can_reorder=True, can_add=False) self.render_children(context, fragment, can_reorder=True, can_add=False)
return fragment return fragment
@property
def url_name(self):
"""
Get the url_name for this block. In Studio/LMS it is provided by a mixin, so we just
defer to super(). In the workbench or any other platform, we use the name.
"""
try:
return super(RatingBlock, self).url_name
except AttributeError:
return self.name
def student_view(self, context):
fragment = super(RatingBlock, self).student_view(context)
rendering_for_studio = None
if context: # Workbench does not provide context
rendering_for_studio = context.get('author_edit_view')
if rendering_for_studio:
fragment.add_content(loader.render_template('templates/html/rating_edit_footer.html', {
"url_name": self.url_name
}))
return fragment
...@@ -61,7 +61,7 @@ def _normalize_id(key): ...@@ -61,7 +61,7 @@ def _normalize_id(key):
@XBlock.wants('user') @XBlock.wants('user')
class PlotBlock(StudioEditableXBlockMixin, StudioContainerWithNestedXBlocksMixin, XBlockWithPreviewMixin, XBlock): class PlotBlock(StudioEditableXBlockMixin, StudioContainerWithNestedXBlocksMixin, XBlockWithPreviewMixin, XBlock):
""" """
XBlock that displays plot that summarizes answers to scale questions. XBlock that displays plot that summarizes answers to scale and/or rating questions.
""" """
CATEGORY = 'sb-plot' CATEGORY = 'sb-plot'
...@@ -141,7 +141,7 @@ class PlotBlock(StudioEditableXBlockMixin, StudioContainerWithNestedXBlocksMixin ...@@ -141,7 +141,7 @@ class PlotBlock(StudioEditableXBlockMixin, StudioContainerWithNestedXBlocksMixin
'Claims and questions that should be included in the plot. ' 'Claims and questions that should be included in the plot. '
'Each line defines a triple of the form "claim, q1, q2", ' 'Each line defines a triple of the form "claim, q1, q2", '
'where "claim" is arbitrary text that represents a claim, ' 'where "claim" is arbitrary text that represents a claim, '
'and "q1" and "q2" are IDs of scale questions. ' 'and "q1" and "q2" are IDs of scale or rating questions. '
), ),
default="", default="",
multiline_editor=True, multiline_editor=True,
...@@ -177,9 +177,9 @@ class PlotBlock(StudioEditableXBlockMixin, StudioContainerWithNestedXBlocksMixin ...@@ -177,9 +177,9 @@ class PlotBlock(StudioEditableXBlockMixin, StudioContainerWithNestedXBlocksMixin
claim, q1, q2 = line.split(', ') claim, q1, q2 = line.split(', ')
r1, r2 = None, None r1, r2 = None, None
for question_id, question in zip(question_ids, questions): for question_id, question in zip(question_ids, questions):
if question.name == q1: if question.url_name == q1:
r1 = response_function(question, question_id) r1 = response_function(question, question_id)
if question.name == q2: if question.url_name == q2:
r2 = response_function(question, question_id) r2 = response_function(question, question_id)
if r1 is not None and r2 is not None: if r1 is not None and r2 is not None:
break break
......
...@@ -87,6 +87,17 @@ class SliderBlock( ...@@ -87,6 +87,17 @@ class SliderBlock(
editable_fields = ('min_label', 'max_label', 'display_name', 'question', 'show_title') editable_fields = ('min_label', 'max_label', 'display_name', 'question', 'show_title')
@property
def url_name(self):
"""
Get the url_name for this block. In Studio/LMS it is provided by a mixin, so we just
defer to super(). In the workbench or any other platform, we use the name.
"""
try:
return super(SliderBlock, self).url_name
except AttributeError:
return self.name
def mentoring_view(self, context): def mentoring_view(self, context):
""" Main view of this block """ """ Main view of this block """
context = context.copy() if context else {} context = context.copy() if context else {}
...@@ -145,11 +156,17 @@ class SliderBlock( ...@@ -145,11 +156,17 @@ class SliderBlock(
self.student_value = value self.student_value = value
if sub_api: if sub_api:
# Also send to the submissions API: # Also send to the submissions API:
sub_api.create_submission(self.student_item_key, {'value': value}) sub_api.create_submission(self.student_item_key, value)
result = self.get_last_result() result = self.get_last_result()
log.debug(u'Slider submission result: %s', result) log.debug(u'Slider submission result: %s', result)
return result return result
def get_submission_display(self, submission):
"""
Get the human-readable version of a submission value
"""
return submission * 100
def validate_field_data(self, validation, data): def validate_field_data(self, validation, data):
""" """
Validate this block's field data. Validate this block's field data.
......
...@@ -217,6 +217,7 @@ class MentoringStepBlock( ...@@ -217,6 +217,7 @@ class MentoringStepBlock(
'head': u'<div class="mentoring">', 'head': u'<div class="mentoring">',
'tail': u'</div>' 'tail': u'</div>'
} }
local_context['author_edit_view'] = True
fragment = super(MentoringStepBlock, self).author_edit_view(local_context) fragment = super(MentoringStepBlock, self).author_edit_view(local_context)
fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/problem-builder.css')) fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/problem-builder.css'))
fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/problem-builder-edit.css')) fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/problem-builder-edit.css'))
......
{% load i18n %}
<div class="xblock-header-secondary url-name-footer">
<span class="url-name-label">{% trans "ID for referencing this rating block:" %}</span>
<span class="url-name">{{ url_name }}</span>
</div>
...@@ -36,8 +36,23 @@ class ExtendedMockSubmissionsAPI(MockSubmissionsAPI): ...@@ -36,8 +36,23 @@ class ExtendedMockSubmissionsAPI(MockSubmissionsAPI):
) )
class MultipleSliderBlocksTestMixins():
""" Mixins for testing slider blocks. Allows multiple slider blocks on the page. """
def get_slider_value(self, slider_number):
print('SLIDER NUMBER: {}'.format(slider_number))
return int(
self.browser.execute_script("return $('.pb-slider-range').eq(arguments[0]).val()", slider_number-1)
)
def set_slider_value(self, slider_number, val):
self.browser.execute_script(
"$('.pb-slider-range').eq(arguments[0]).val(arguments[1]).change()", slider_number-1, val
)
@ddt @ddt
class StepBuilderTest(MentoringAssessmentBaseTest): class StepBuilderTest(MentoringAssessmentBaseTest, MultipleSliderBlocksTestMixins):
def setUp(self): def setUp(self):
super(StepBuilderTest, self).setUp() super(StepBuilderTest, self).setUp()
...@@ -57,13 +72,17 @@ class StepBuilderTest(MentoringAssessmentBaseTest): ...@@ -57,13 +72,17 @@ class StepBuilderTest(MentoringAssessmentBaseTest):
mock_submissions_api mock_submissions_api
), ),
( (
"problem_builder.slider.sub_api",
mock_submissions_api
),
(
"problem_builder.sub_api.SubmittingXBlockMixin.student_item_key", "problem_builder.sub_api.SubmittingXBlockMixin.student_item_key",
property( property(
lambda block: dict( lambda block: dict(
student_id="student_id", student_id="student_id",
course_id="course_id", course_id="course_id",
item_id=block.scope_ids.usage_id, item_id=block.scope_ids.usage_id,
item_type="pb-rating" item_type=block.scope_ids.block_type
) )
) )
), ),
...@@ -765,8 +784,8 @@ class StepBuilderTest(MentoringAssessmentBaseTest): ...@@ -765,8 +784,8 @@ class StepBuilderTest(MentoringAssessmentBaseTest):
'point_color': PointColors.ORANGE, 'point_color': PointColors.ORANGE,
'titles': ['2 + 2 = 5: 1, 5', 'The answer to everything is 42: 5, 1'], 'titles': ['2 + 2 = 5: 1, 5', 'The answer to everything is 42: 5, 1'],
'positions': [ 'positions': [
('20', '396'), # Values computed according to xScale and yScale (cf. plot.js)
('4', '380'), # Values computed according to xScale and yScale (cf. plot.js) ('4', '380'), # Values computed according to xScale and yScale (cf. plot.js)
('20', '396'), # Values computed according to xScale and yScale (cf. plot.js)
], ],
} }
average_overlay = { average_overlay = {
...@@ -775,8 +794,85 @@ class StepBuilderTest(MentoringAssessmentBaseTest): ...@@ -775,8 +794,85 @@ class StepBuilderTest(MentoringAssessmentBaseTest):
'point_color': PointColors.PURPLE, 'point_color': PointColors.PURPLE,
'titles': ['2 + 2 = 5: 1, 5', 'The answer to everything is 42: 5, 1'], 'titles': ['2 + 2 = 5: 1, 5', 'The answer to everything is 42: 5, 1'],
'positions': [ 'positions': [
('20', '396'), # Values computed according to xScale and yScale (cf. plot.js)
('4', '380'), # Values computed according to xScale and yScale (cf. plot.js) ('4', '380'), # Values computed according to xScale and yScale (cf. plot.js)
('20', '396'), # Values computed according to xScale and yScale (cf. plot.js)
],
}
# Check if plot shows correct overlay(s) initially (default overlay on, average overlay off)
self.check_overlays(step_builder, total_num_points=2, overlays=[default_overlay])
# Check if plot shows correct overlay(s) (default overlay on, average overlay on)
self.click_average_button(plot_controls, overlay_on=True, color_on='rgba(128, 0, 128, 1)') # purple
self.check_overlays(step_builder, 4, overlays=[default_overlay, average_overlay])
# Check if plot shows correct overlay(s) (default overlay off, average overlay on)
self.click_default_button(plot_controls, overlay_on=False)
self.check_overlays(step_builder, 2, overlays=[average_overlay])
# Check if plot shows correct overlay(s) (default overlay off, average overlay off)
self.click_average_button(plot_controls, overlay_on=False)
self.plot_empty(step_builder)
# Check if plot shows correct overlay(s) (default overlay on, average overlay off)
self.click_default_button(plot_controls, overlay_on=True, color_on='rgba(255, 165, 0, 1)') # orange
self.check_overlays(step_builder, 2, overlays=[default_overlay])
# Check quadrant labels
self.check_quadrant_labels(step_builder, plot_controls, hidden=True)
plot_controls.quadrants_button.click()
self.check_quadrant_labels(
step_builder, plot_controls, hidden=False,
labels=['Custom Q1 label', 'Custom Q2 label', 'Custom Q3 label', 'Custom Q4 label']
)
def answer_scale_question(self, question_number, step_builder, question, value):
self.assertEqual(self.get_slider_value(question_number), 50)
question_text = self.question_text(question_number)
self.wait_until_text_in(question_text, step_builder)
self.assertIn(question, step_builder.text)
self.set_slider_value(question_number, value)
def test_plot_with_scale_questions(self):
step_builder, controls = self.load_assessment_scenario("step_builder_plot_scale_questions.xml", {})
# Step 1: Questions
# Provide first rating
self.answer_scale_question(1, step_builder, "How much do you agree?", 17)
# Provide second rating
self.answer_scale_question(2, step_builder, "How important do you think this is?", 83)
# Advance
self.submit_and_go_to_next_step(controls)
# Step 2: Questions
# Provide first rating
self.answer_rating_question(2, 1, step_builder, "How much do you agree?", "5 - Agree")
# Provide second rating
self.answer_rating_question(2, 2, step_builder, "How important do you think this is?", "1 - Not important")
# Advance
self.submit_and_go_to_next_step(controls, last=True)
# Step 2: Plot
# Obtain references to plot controls
plot_controls = self.plot_controls(step_builder)
# Overlay data
default_overlay = {
'selector': '.claim-default',
'num_points': 2,
'point_color': 'rgb(255, 165, 0)', # orange
'titles': ['2 + 2 = 5: 17, 83', 'The answer to everything is 42: 5, 1'],
'positions': [
('68', '68'), # Values computed according to xScale and yScale (cf. plot.js)
('20', '396'), # Values computed according to xScale and yScale (cf. plot.js)
],
}
average_overlay = {
'selector': '.claim-average',
'num_points': 2,
'point_color': 'rgb(128, 0, 128)', # purple
'titles': ['2 + 2 = 5: 17, 83', 'The answer to everything is 42: 5, 1'],
'positions': [
('68', '68'), # Values computed according to xScale and yScale (cf. plot.js)
('20', '396'), # Values computed according to xScale and yScale (cf. plot.js)
], ],
} }
# Check if plot shows correct overlay(s) initially (default overlay on, average overlay off) # Check if plot shows correct overlay(s) initially (default overlay on, average overlay off)
......
<step-builder url_name="step-builder" display_name="Step Builder">
<sb-step display_name="First step">
<pb-slider name="slider_1_1"
question="How much do you agree?">
</pb-slider>
<pb-slider name="slider_1_2"
question="How important do you think this is?"
min_label="Not important"
max_label="Very important">
</pb-slider>
</sb-step>
<sb-step display_name="Second step">
<pb-rating name="rating_2_1"
low="Disagree"
high="Agree"
question="How much do you agree?"
correct_choices='["1", "2", "3", "4","5"]'>
</pb-rating>
<pb-rating name="rating_2_2"
low="Not important"
high="Very important"
question="How important do you think this is?"
correct_choices='["1", "2", "3", "4","5"]'>
</pb-rating>
</sb-step>
<sb-step display_name="Last step">
<sb-plot plot_label="Custom plot label"
point_color_default="orange"
point_color_average="purple"
q1_label="Custom Q1 label"
q2_label="Custom Q2 label"
q3_label="Custom Q3 label"
q4_label="Custom Q4 label"
claims="2 + 2 = 5, slider_1_1, slider_1_2&#10;The answer to everything is 42, rating_2_1, rating_2_2">
</sb-plot>
</sb-step>
<sb-review-step></sb-review-step>
</step-builder>
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment