Commit 5386d0a4 by Sarina Canelake

Merge pull request #10845 from edx/kill-ora1

Remove ORA1: Main Pull Request
parents 2695e7b3 21e4c90c
...@@ -698,7 +698,7 @@ class MiscCourseTests(ContentStoreTestCase): ...@@ -698,7 +698,7 @@ class MiscCourseTests(ContentStoreTestCase):
self.check_components_on_page( self.check_components_on_page(
ADVANCED_COMPONENT_TYPES, ADVANCED_COMPONENT_TYPES,
['Word cloud', 'Annotation', 'Text Annotation', 'Video Annotation', 'Image Annotation', ['Word cloud', 'Annotation', 'Text Annotation', 'Video Annotation', 'Image Annotation',
'Open Response Assessment', 'Peer Grading Interface', 'split_test'], 'split_test'],
) )
@ddt.data('/Fake/asset/displayname', '\\Fake\\asset\\displayname') @ddt.data('/Fake/asset/displayname', '\\Fake\\asset\\displayname')
......
...@@ -771,7 +771,7 @@ class CourseMetadataEditingTest(CourseTestCase): ...@@ -771,7 +771,7 @@ class CourseMetadataEditingTest(CourseTestCase):
{ {
"advertised_start": {"value": "start A"}, "advertised_start": {"value": "start A"},
"days_early_for_beta": {"value": 2}, "days_early_for_beta": {"value": 2},
"advanced_modules": {"value": ['combinedopenended']}, "advanced_modules": {"value": ['notes']},
}, },
user=self.user user=self.user
) )
...@@ -781,7 +781,7 @@ class CourseMetadataEditingTest(CourseTestCase): ...@@ -781,7 +781,7 @@ class CourseMetadataEditingTest(CourseTestCase):
# Tab gets tested in test_advanced_settings_munge_tabs # Tab gets tested in test_advanced_settings_munge_tabs
self.assertIn('advanced_modules', test_model, 'Missing advanced_modules') self.assertIn('advanced_modules', test_model, 'Missing advanced_modules')
self.assertEqual(test_model['advanced_modules']['value'], ['combinedopenended'], 'advanced_module is not updated') self.assertEqual(test_model['advanced_modules']['value'], ['notes'], 'advanced_module is not updated')
def test_validate_from_json_wrong_inputs(self): def test_validate_from_json_wrong_inputs(self):
# input incorrectly formatted data # input incorrectly formatted data
...@@ -905,48 +905,21 @@ class CourseMetadataEditingTest(CourseTestCase): ...@@ -905,48 +905,21 @@ class CourseMetadataEditingTest(CourseTestCase):
""" """
Test that adding and removing specific advanced components adds and removes tabs. Test that adding and removing specific advanced components adds and removes tabs.
""" """
open_ended_tab = {"type": "open_ended", "name": "Open Ended Panel"}
peer_grading_tab = {"type": "peer_grading", "name": "Peer grading"}
# First ensure that none of the tabs are visible # First ensure that none of the tabs are visible
self.assertNotIn(open_ended_tab, self.course.tabs)
self.assertNotIn(peer_grading_tab, self.course.tabs)
self.assertNotIn(self.notes_tab, self.course.tabs) self.assertNotIn(self.notes_tab, self.course.tabs)
# Now add the "combinedopenended" component and verify that the tab has been added # Now enable student notes and verify that the "My Notes" tab has been added
self.client.ajax_post(self.course_setting_url, {
ADVANCED_COMPONENT_POLICY_KEY: {"value": ["combinedopenended"]}
})
course = modulestore().get_course(self.course.id)
self.assertIn(open_ended_tab, course.tabs)
self.assertIn(peer_grading_tab, course.tabs)
self.assertNotIn(self.notes_tab, course.tabs)
# Now enable student notes and verify that the "My Notes" tab has also been added
self.client.ajax_post(self.course_setting_url, {
ADVANCED_COMPONENT_POLICY_KEY: {"value": ["combinedopenended", "notes"]}
})
course = modulestore().get_course(self.course.id)
self.assertIn(open_ended_tab, course.tabs)
self.assertIn(peer_grading_tab, course.tabs)
self.assertIn(self.notes_tab, course.tabs)
# Now remove the "combinedopenended" component and verify that the tab is gone
self.client.ajax_post(self.course_setting_url, { self.client.ajax_post(self.course_setting_url, {
ADVANCED_COMPONENT_POLICY_KEY: {"value": ["notes"]} ADVANCED_COMPONENT_POLICY_KEY: {"value": ["notes"]}
}) })
course = modulestore().get_course(self.course.id) course = modulestore().get_course(self.course.id)
self.assertNotIn(open_ended_tab, course.tabs)
self.assertNotIn(peer_grading_tab, course.tabs)
self.assertIn(self.notes_tab, course.tabs) self.assertIn(self.notes_tab, course.tabs)
# Finally disable student notes and verify that the "My Notes" tab is gone # Disable student notes and verify that the "My Notes" tab is gone
self.client.ajax_post(self.course_setting_url, { self.client.ajax_post(self.course_setting_url, {
ADVANCED_COMPONENT_POLICY_KEY: {"value": [""]} ADVANCED_COMPONENT_POLICY_KEY: {"value": [""]}
}) })
course = modulestore().get_course(self.course.id) course = modulestore().get_course(self.course.id)
self.assertNotIn(open_ended_tab, course.tabs)
self.assertNotIn(peer_grading_tab, course.tabs)
self.assertNotIn(self.notes_tab, course.tabs) self.assertNotIn(self.notes_tab, course.tabs)
def test_advanced_components_munge_tabs_validation_failure(self): def test_advanced_components_munge_tabs_validation_failure(self):
......
...@@ -219,26 +219,6 @@ class ContentStoreImportTest(SignalDisconnectTestMixin, ModuleStoreTestCase): ...@@ -219,26 +219,6 @@ class ContentStoreImportTest(SignalDisconnectTestMixin, ModuleStoreTestCase):
conditional_module.show_tag_list conditional_module.show_tag_list
) )
def test_rewrite_reference(self):
module_store = modulestore()
target_id = module_store.make_course_key('testX', 'peergrading_copy', 'copy_run')
import_course_from_xml(
module_store,
self.user.id,
TEST_DATA_DIR,
['open_ended'],
target_id=target_id,
create_if_not_present=True
)
peergrading_module = module_store.get_item(
target_id.make_usage_key('peergrading', 'PeerGradingLinked')
)
self.assertIsNotNone(peergrading_module)
self.assertEqual(
target_id.make_usage_key('combinedopenended', 'SampleQuestion'),
peergrading_module.link_to_location
)
def test_rewrite_reference_value_dict_published(self): def test_rewrite_reference_value_dict_published(self):
""" """
Test rewriting references in ReferenceValueDict, specifically with published content. Test rewriting references in ReferenceValueDict, specifically with published content.
......
...@@ -30,11 +30,11 @@ from student.auth import has_course_author_access ...@@ -30,11 +30,11 @@ from student.auth import has_course_author_access
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from models.settings.course_grading import CourseGradingModel from models.settings.course_grading import CourseGradingModel
__all__ = ['OPEN_ENDED_COMPONENT_TYPES', __all__ = [
'ADVANCED_COMPONENT_POLICY_KEY', 'ADVANCED_COMPONENT_POLICY_KEY',
'container_handler', 'container_handler',
'component_handler' 'component_handler'
] ]
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
...@@ -43,7 +43,6 @@ COMPONENT_TYPES = ['discussion', 'html', 'problem', 'video'] ...@@ -43,7 +43,6 @@ COMPONENT_TYPES = ['discussion', 'html', 'problem', 'video']
# Constants for determining if these components should be enabled for this course # Constants for determining if these components should be enabled for this course
SPLIT_TEST_COMPONENT_TYPE = 'split_test' SPLIT_TEST_COMPONENT_TYPE = 'split_test'
OPEN_ENDED_COMPONENT_TYPES = ["combinedopenended", "peergrading"]
NOTE_COMPONENT_TYPES = ['notes'] NOTE_COMPONENT_TYPES = ['notes']
if settings.FEATURES.get('ALLOW_ALL_ADVANCED_COMPONENTS'): if settings.FEATURES.get('ALLOW_ALL_ADVANCED_COMPONENTS'):
......
...@@ -10,6 +10,7 @@ import pytz ...@@ -10,6 +10,7 @@ import pytz
from django.conf import settings from django.conf import settings
from django.core.exceptions import PermissionDenied from django.core.exceptions import PermissionDenied
from django.test.utils import override_settings
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError
...@@ -440,6 +441,7 @@ class TestCourseOutline(CourseTestCase): ...@@ -440,6 +441,7 @@ class TestCourseOutline(CourseTestCase):
info['block_types_enabled'], info['block_types_enabled'],
any(component in advanced_modules for component in deprecated_block_types) any(component in advanced_modules for component in deprecated_block_types)
) )
self.assertItemsEqual(info['blocks'], expected_blocks) self.assertItemsEqual(info['blocks'], expected_blocks)
self.assertEqual( self.assertEqual(
info['advance_settings_url'], info['advance_settings_url'],
...@@ -455,27 +457,29 @@ class TestCourseOutline(CourseTestCase): ...@@ -455,27 +457,29 @@ class TestCourseOutline(CourseTestCase):
""" """
Verify deprecated warning info for single deprecated feature. Verify deprecated warning info for single deprecated feature.
""" """
block_types = settings.DEPRECATED_BLOCK_TYPES block_types = ['notes']
course_module = modulestore().get_item(self.course.location) with override_settings(DEPRECATED_BLOCK_TYPES=block_types):
self._create_test_data(course_module, create_blocks=True, block_types=block_types, publish=publish) course_module = modulestore().get_item(self.course.location)
info = _deprecated_blocks_info(course_module, block_types) self._create_test_data(course_module, create_blocks=True, block_types=block_types, publish=publish)
self._verify_deprecated_info( info = _deprecated_blocks_info(course_module, block_types)
course_module.id, self._verify_deprecated_info(
course_module.advanced_modules, course_module.id,
info, course_module.advanced_modules,
block_types info,
) block_types
)
def test_verify_deprecated_warning_message_with_multiple_features(self): def test_verify_deprecated_warning_message_with_multiple_features(self):
""" """
Verify deprecated warning info for multiple deprecated features. Verify deprecated warning info for multiple deprecated features.
""" """
block_types = ['peergrading', 'combinedopenended', 'openassessment'] block_types = ['notes', 'lti']
course_module = modulestore().get_item(self.course.location) with override_settings(DEPRECATED_BLOCK_TYPES=block_types):
self._create_test_data(course_module, create_blocks=True, block_types=block_types) course_module = modulestore().get_item(self.course.location)
self._create_test_data(course_module, create_blocks=True, block_types=block_types)
info = _deprecated_blocks_info(course_module, block_types) info = _deprecated_blocks_info(course_module, block_types)
self._verify_deprecated_info(course_module.id, course_module.advanced_modules, info, block_types) self._verify_deprecated_info(course_module.id, course_module.advanced_modules, info, block_types)
@ddt.data( @ddt.data(
{'delete_vertical': True}, {'delete_vertical': True},
...@@ -492,7 +496,7 @@ class TestCourseOutline(CourseTestCase): ...@@ -492,7 +496,7 @@ class TestCourseOutline(CourseTestCase):
un-published block(s). This behavior should be same if we delete un-published block(s). This behavior should be same if we delete
unpublished vertical or problem. unpublished vertical or problem.
""" """
block_types = ['peergrading'] block_types = ['notes']
course_module = modulestore().get_item(self.course.location) course_module = modulestore().get_item(self.course.location)
vertical1 = ItemFactory.create( vertical1 = ItemFactory.create(
...@@ -500,8 +504,8 @@ class TestCourseOutline(CourseTestCase): ...@@ -500,8 +504,8 @@ class TestCourseOutline(CourseTestCase):
) )
problem1 = ItemFactory.create( problem1 = ItemFactory.create(
parent_location=vertical1.location, parent_location=vertical1.location,
category='peergrading', category='notes',
display_name='peergrading problem in vert1', display_name='notes problem in vert1',
publish_item=False publish_item=False
) )
...@@ -515,8 +519,8 @@ class TestCourseOutline(CourseTestCase): ...@@ -515,8 +519,8 @@ class TestCourseOutline(CourseTestCase):
) )
ItemFactory.create( ItemFactory.create(
parent_location=vertical2.location, parent_location=vertical2.location,
category='peergrading', category='notes',
display_name='peergrading problem in vert2', display_name='notes problem in vert2',
pubish_item=True pubish_item=True
) )
# At this point CourseStructure will contain both the above # At this point CourseStructure will contain both the above
...@@ -526,8 +530,8 @@ class TestCourseOutline(CourseTestCase): ...@@ -526,8 +530,8 @@ class TestCourseOutline(CourseTestCase):
self.assertItemsEqual( self.assertItemsEqual(
info['blocks'], info['blocks'],
[ [
[reverse_usage_url('container_handler', vertical1.location), 'peergrading problem in vert1'], [reverse_usage_url('container_handler', vertical1.location), 'notes problem in vert1'],
[reverse_usage_url('container_handler', vertical2.location), 'peergrading problem in vert2'] [reverse_usage_url('container_handler', vertical2.location), 'notes problem in vert2']
] ]
) )
...@@ -542,7 +546,7 @@ class TestCourseOutline(CourseTestCase): ...@@ -542,7 +546,7 @@ class TestCourseOutline(CourseTestCase):
# There shouldn't be any info present about un-published vertical1 # There shouldn't be any info present about un-published vertical1
self.assertEqual( self.assertEqual(
info['blocks'], info['blocks'],
[[reverse_usage_url('container_handler', vertical2.location), 'peergrading problem in vert2']] [[reverse_usage_url('container_handler', vertical2.location), 'notes problem in vert2']]
) )
......
...@@ -1388,28 +1388,28 @@ class TestComponentTemplates(CourseTestCase): ...@@ -1388,28 +1388,28 @@ class TestComponentTemplates(CourseTestCase):
Test the handling of advanced problem templates. Test the handling of advanced problem templates.
""" """
problem_templates = self.get_templates_of_type('problem') problem_templates = self.get_templates_of_type('problem')
ora_template = self.get_template(problem_templates, u'Peer Assessment') circuit_template = self.get_template(problem_templates, u'Circuit Schematic Builder')
self.assertIsNotNone(ora_template) self.assertIsNotNone(circuit_template)
self.assertEqual(ora_template.get('category'), 'openassessment') self.assertEqual(circuit_template.get('category'), 'problem')
self.assertIsNone(ora_template.get('boilerplate_name', None)) self.assertEqual(circuit_template.get('boilerplate_name'), 'circuitschematic.yaml')
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["combinedopenended", "peergrading"]) @patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["poll", "survey"])
def test_ora1_no_advance_component_button(self): def test_deprecated_no_advance_component_button(self):
""" """
Test that there will be no `Advanced` button on unit page if `combinedopenended` and `peergrading` are Test that there will be no `Advanced` button on unit page if units are
deprecated provided that there are only 'combinedopenended', 'peergrading' modules in `Advanced Module List` deprecated provided that they are the only modules in `Advanced Module List`
""" """
self.course.advanced_modules.extend(['combinedopenended', 'peergrading']) self.course.advanced_modules.extend(['poll', 'survey'])
templates = get_component_templates(self.course) templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates] button_names = [template['display_name'] for template in templates]
self.assertNotIn('Advanced', button_names) self.assertNotIn('Advanced', button_names)
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["combinedopenended", "peergrading"]) @patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["poll", "survey"])
def test_cannot_create_ora1_problems(self): def test_cannot_create_deprecated_problems(self):
""" """
Test that we can't create ORA1 problems if `combinedopenended` and `peergrading` are deprecated Test that we can't create problems if they are deprecated
""" """
self.course.advanced_modules.extend(['annotatable', 'combinedopenended', 'peergrading']) self.course.advanced_modules.extend(['annotatable', 'poll', 'survey'])
templates = get_component_templates(self.course) templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates] button_names = [template['display_name'] for template in templates]
self.assertIn('Advanced', button_names) self.assertIn('Advanced', button_names)
...@@ -1418,17 +1418,17 @@ class TestComponentTemplates(CourseTestCase): ...@@ -1418,17 +1418,17 @@ class TestComponentTemplates(CourseTestCase):
self.assertEqual(template_display_names, ['Annotation']) self.assertEqual(template_display_names, ['Annotation'])
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', []) @patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', [])
def test_create_ora1_problems(self): def test_create_non_deprecated_problems(self):
""" """
Test that we can create ORA1 problems if `combinedopenended` and `peergrading` are not deprecated Test that we can create problems if they are not deprecated
""" """
self.course.advanced_modules.extend(['annotatable', 'combinedopenended', 'peergrading']) self.course.advanced_modules.extend(['annotatable', 'poll', 'survey'])
templates = get_component_templates(self.course) templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates] button_names = [template['display_name'] for template in templates]
self.assertIn('Advanced', button_names) self.assertIn('Advanced', button_names)
self.assertEqual(len(templates[0]['templates']), 3) self.assertEqual(len(templates[0]['templates']), 3)
template_display_names = [template['display_name'] for template in templates[0]['templates']] template_display_names = [template['display_name'] for template in templates[0]['templates']]
self.assertEqual(template_display_names, ['Annotation', 'Open Response Assessment', 'Peer Grading Interface']) self.assertEqual(template_display_names, ['Annotation', 'Poll', 'Survey'])
@ddt.ddt @ddt.ddt
......
...@@ -81,14 +81,6 @@ ...@@ -81,14 +81,6 @@
} }
} }
}, },
"OPEN_ENDED_GRADING_INTERFACE": {
"grading_controller": "grading_controller",
"password": "password",
"peer_grading": "peer_grading",
"staff_grading": "staff_grading",
"url": "http://localhost:18060/",
"username": "lms"
},
"DJFS": { "DJFS": {
"type": "s3fs", "type": "s3fs",
"bucket": "test", "bucket": "test",
......
...@@ -104,5 +104,9 @@ ...@@ -104,5 +104,9 @@
"THEME_NAME": "", "THEME_NAME": "",
"TIME_ZONE": "America/New_York", "TIME_ZONE": "America/New_York",
"WIKI_ENABLED": true, "WIKI_ENABLED": true,
"OAUTH_OIDC_ISSUER": "https://www.example.com/oauth2" "OAUTH_OIDC_ISSUER": "https://www.example.com/oauth2",
"DEPRECATED_BLOCK_TYPES": [
"poll",
"survey"
]
} }
...@@ -96,6 +96,9 @@ FEATURES['ENABLE_VIDEO_BUMPER'] = True # Enable video bumper in Studio settings ...@@ -96,6 +96,9 @@ FEATURES['ENABLE_VIDEO_BUMPER'] = True # Enable video bumper in Studio settings
# Enable partner support link in Studio footer # Enable partner support link in Studio footer
FEATURES['PARTNER_SUPPORT_EMAIL'] = 'partner-support@example.com' FEATURES['PARTNER_SUPPORT_EMAIL'] = 'partner-support@example.com'
# Disable some block types to test block deprecation logic
DEPRECATED_BLOCK_TYPES = ['poll', 'survey']
########################### Entrance Exams ################################# ########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True FEATURES['ENTRANCE_EXAMS'] = True
......
...@@ -1016,8 +1016,6 @@ ADVANCED_COMPONENT_TYPES = [ ...@@ -1016,8 +1016,6 @@ ADVANCED_COMPONENT_TYPES = [
'rate', # Allows up-down voting of course content. See https://github.com/pmitros/RateXBlock 'rate', # Allows up-down voting of course content. See https://github.com/pmitros/RateXBlock
'split_test', 'split_test',
'combinedopenended',
'peergrading',
'notes', 'notes',
'schoolyourself_review', 'schoolyourself_review',
'schoolyourself_lesson', 'schoolyourself_lesson',
......
<div class="wrapper-comp-editor" id="editor-tab">
<section class="combinedopenended-editor editor">
<div class="row">
%if enable_markdown:
<div class="editor-bar">
<ul class="format-buttons">
<li><a href="#" class="prompt-button" data-tooltip="Prompt"><span
class="combinedopenended-editor-icon fa fa-quote-left"></span></a></li>
<li><a href="#" class="rubric-button" data-tooltip="Rubric"><span
class="combinedopenended-editor-icon fa fa-table"></span></a></li>
<li><a href="#" class="tasks-button" data-tooltip="Tasks"><span
class="combinedopenended-editor-icon fa fa-sitemap"></span></a></li>
</ul>
<ul class="editor-tabs">
<li><a href="#" class="xml-tab advanced-toggle" data-tab="xml">Advanced Editor</a></li>
<li><a href="#" class="cheatsheet-toggle" data-tooltip="Toggle Cheatsheet">?</a></li>
</ul>
</div>
<textarea class="markdown-box">${markdown | h}</textarea>
%endif
<textarea class="xml-box" rows="8" cols="40">${data | h}</textarea>
</div>
</section>
<script type="text/template" id="open-ended-template">
<openended %min_max_string%>
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "%grading_config%", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</script>
<script type="text/template" id="simple-editor-open-ended-cheatsheet">
<article class="simple-editor-open-ended-cheatsheet">
<div class="cheatsheet-wrapper">
<div class="row">
<h6>Prompt</h6>
<div class="col prompt">
</div>
<div class="col">
<pre><code>
[prompt]
Why is the sky blue?
[prompt]
</code></pre>
</div>
<div class="col">
<p>The student will respond to the prompt. The prompt can contain any html tags, such as paragraph tags and header tags.</p>
</div>
</div>
<div class="row">
<h6>Rubric</h6>
<div class="col sample rubric"><!DOCTYPE html>
</div>
<div class="col">
<pre><code>
[rubric]
+ Color Identification
- Incorrect
- Correct
+ Grammar
- Poor
- Acceptable
- Superb
[rubric]
</code></pre>
</div>
<div class="col">
<p>The rubric is used for feedback and self-assessment. The rubric can have as many categories (+) and options (-) as desired. </p>
<p>The total score for the problem will be the sum of all the points possible on the rubric. The options will be numbered sequentially from zero in each category, so each category will be worth as many points as its number of options minus one. </p>
</div>
</div>
<div class="row">
<h6>Tasks</h6>
<div class="col sample tasks">
</div>
<div class="col">
<pre><code>
[tasks]
(Self), ({1-3}AI), ({2-3}Peer)
[tasks]
</code></pre>
</div>
<div class="col">
<p>The tasks define what feedback the student will get from the problem.</p>
<p>Each task is defined with parentheses around it. Brackets (ie {2-3} above), specify the minimum and maximum score needed to attempt the given task.</p>
<p>In the example above, the student will first be asked to self-assess. If they give themselves greater than or equal to a 1/3 and less than or equal to a 3/3 on the problem, then they will be moved to AI assessment. If they score themselves a 2/3 or 3/3 on AI assessment, they will move to peer assessment.</p>
<p>Students will be given feedback from each task, and their final score for a given attempt of the problem will be their score last task that is completed.</p>
</div>
</div>
</div>
</article>
</script>
</div>
<%include file="metadata-edit.html" />
...@@ -7,7 +7,6 @@ import logging ...@@ -7,7 +7,6 @@ import logging
from .comments import StubCommentsService from .comments import StubCommentsService
from .xqueue import StubXQueueService from .xqueue import StubXQueueService
from .youtube import StubYouTubeService from .youtube import StubYouTubeService
from .ora import StubOraService
from .lti import StubLtiService from .lti import StubLtiService
from .video_source import VideoSourceHttpService from .video_source import VideoSourceHttpService
from .edxnotes import StubEdxNotesService from .edxnotes import StubEdxNotesService
...@@ -19,7 +18,6 @@ USAGE = "USAGE: python -m stubs.start SERVICE_NAME PORT_NUM [CONFIG_KEY=CONFIG_V ...@@ -19,7 +18,6 @@ USAGE = "USAGE: python -m stubs.start SERVICE_NAME PORT_NUM [CONFIG_KEY=CONFIG_V
SERVICES = { SERVICES = {
'xqueue': StubXQueueService, 'xqueue': StubXQueueService,
'youtube': StubYouTubeService, 'youtube': StubYouTubeService,
'ora': StubOraService,
'comments': StubCommentsService, 'comments': StubCommentsService,
'lti': StubLtiService, 'lti': StubLtiService,
'video': VideoSourceHttpService, 'video': VideoSourceHttpService,
......
...@@ -115,19 +115,6 @@ class StubXQueueServiceTest(unittest.TestCase): ...@@ -115,19 +115,6 @@ class StubXQueueServiceTest(unittest.TestCase):
self.assertFalse(self.post.called) self.assertFalse(self.post.called)
self.assertTrue(logger.error.called) self.assertTrue(logger.error.called)
def test_register_submission_url(self):
# Configure the XQueue stub to notify another service
# when it receives a submission.
register_url = 'http://127.0.0.1:8000/register_submission'
self.server.config['register_submission_url'] = register_url
callback_url = 'http://127.0.0.1:8000/test_callback'
submission = json.dumps({'grader_payload': 'test payload'})
self._post_submission(callback_url, 'test_queuekey', 'test_queue', submission)
# Check that a notification was sent
self.post.assert_any_call(register_url, data={'grader_payload': u'test payload'})
def _post_submission(self, callback_url, lms_key, queue_name, xqueue_body): def _post_submission(self, callback_url, lms_key, queue_name, xqueue_body):
""" """
Post a submission to the stub XQueue implementation. Post a submission to the stub XQueue implementation.
......
...@@ -39,7 +39,8 @@ class StubXQueueHandler(StubHttpRequestHandler): ...@@ -39,7 +39,8 @@ class StubXQueueHandler(StubHttpRequestHandler):
if self._is_grade_request(): if self._is_grade_request():
# If configured, send the grader payload to other services. # If configured, send the grader payload to other services.
self._register_submission(self.post_dict['xqueue_body']) # TODO TNL-3906
# self._register_submission(self.post_dict['xqueue_body'])
try: try:
xqueue_header = json.loads(self.post_dict['xqueue_header']) xqueue_header = json.loads(self.post_dict['xqueue_header'])
......
...@@ -3,7 +3,6 @@ from setuptools import setup, find_packages ...@@ -3,7 +3,6 @@ from setuptools import setup, find_packages
XMODULES = [ XMODULES = [
"book = xmodule.backcompat_module:TranslateCustomTagDescriptor", "book = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"chapter = xmodule.seq_module:SequenceDescriptor", "chapter = xmodule.seq_module:SequenceDescriptor",
"combinedopenended = xmodule.combined_open_ended_module:CombinedOpenEndedDescriptor",
"conditional = xmodule.conditional_module:ConditionalDescriptor", "conditional = xmodule.conditional_module:ConditionalDescriptor",
"course = xmodule.course_module:CourseDescriptor", "course = xmodule.course_module:CourseDescriptor",
"customtag = xmodule.template_module:CustomTagDescriptor", "customtag = xmodule.template_module:CustomTagDescriptor",
...@@ -12,7 +11,6 @@ XMODULES = [ ...@@ -12,7 +11,6 @@ XMODULES = [
"image = xmodule.backcompat_module:TranslateCustomTagDescriptor", "image = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"library_content = xmodule.library_content_module:LibraryContentDescriptor", "library_content = xmodule.library_content_module:LibraryContentDescriptor",
"error = xmodule.error_module:ErrorDescriptor", "error = xmodule.error_module:ErrorDescriptor",
"peergrading = xmodule.peer_grading_module:PeerGradingDescriptor",
"poll_question = xmodule.poll_module:PollDescriptor", "poll_question = xmodule.poll_module:PollDescriptor",
"problem = xmodule.capa_module:CapaDescriptor", "problem = xmodule.capa_module:CapaDescriptor",
"problemset = xmodule.seq_module:SequenceDescriptor", "problemset = xmodule.seq_module:SequenceDescriptor",
......
.editor-bar {
.editor-tabs {
.advanced-toggle {
@include white-button;
height: auto;
margin-top: -1px;
padding: 3px 9px;
font-size: 12px;
&.current {
border: 1px solid $lightGrey !important;
border-radius: 3px !important;
background: $lightGrey !important;
color: $darkGrey !important;
pointer-events: none;
cursor: none;
&:hover, &:focus {
box-shadow: 0 0 0 0 !important;
}
}
}
.cheatsheet-toggle {
width: 21px;
height: 21px;
padding: 0;
margin: 0 ($baseline/4) 0 ($baseline*0.75);
border-radius: 22px;
border: 1px solid #a5aaaf;
background: #e5ecf3;
font-size: 13px;
font-weight: 700;
color: #565d64;
text-align: center;
}
}
}
.simple-editor-open-ended-cheatsheet {
position: absolute;
top: 0;
left: 100%;
width: 0;
border-radius: 0 3px 3px 0;
@include linear-gradient(left, $shadow-l1, $transparent 4px);
background-color: $white;
overflow: hidden;
@include transition(width .3s linear 0s);
&.shown {
width: 20%;
height: 100%;
overflow-y: scroll;
}
.cheatsheet-wrapper {
padding: 10%;
}
h6 {
margin-bottom: 7px;
font-size: 15px;
font-weight: 700;
}
.row {
@include clearfix();
padding-bottom: 5px !important;
margin-bottom: 10px !important;
border-bottom: 1px solid #ddd !important;
&:last-child {
border-bottom: none !important;
margin-bottom: 0 !important;
}
}
.col {
float: left;
&.sample {
width: 60px;
margin-right: 30px;
}
}
pre {
font-size: 12px;
line-height: 18px;
}
code {
padding: 0;
background: none;
}
}
.combinedopenended-editor-icon {
display: inline-block;
vertical-align: middle;
color: #333;
}
<section class="course-content">
<section class="xblock xblock-student_view xmodule_display xmodule_CombinedOpenEndedModule" data-type="CombinedOpenEnded">
<section id="combined-open-ended" class="combined-open-ended" data-ajax-url="/courses/MITx/6.002x/2012_Fall/modx/i4x://MITx/6.002x/combinedopenended/CombinedOE" data-allow_reset="False" data-state="assessing" data-task-count="2" data-task-number="1">
<h2>Problem 1</h2>
<div class="status-container">
<h4>Status</h4>
<div class="status-elements">
<section id="combined-open-ended-status" class="combined-open-ended-status">
<div class="statusitem" data-status-number="0">
Step 1 (Problem complete) : 1 / 1
<span class="correct" id="status"></span>
</div>
<div class="statusitem statusitem-current" data-status-number="1">
Step 2 (Being scored) : None / 1
<span class="grading" id="status"></span>
</div>
</section>
</div>
</div>
<div class="item-container">
<h4>Problem</h4>
<div class="problem-container">
<div class="item">
<section id="openended_open_ended" class="open-ended-child" data-state="assessing" data-child-type="openended">
<div class="error">
</div>
<div class="prompt">
Some prompt.
</div>
<textarea rows="30" cols="80" name="answer" class="answer short-form-response" id="input_open_ended" disabled="disabled">
Test submission. Yaaaaaaay!
</textarea>
<div class="message-wrapper"></div>
<div class="grader-status">
<span class="grading" id="status_open_ended">Submitted for grading.</span>
</div>
<input type="button" value="Submit assessment" class="submit-button" name="show" style="display: none;">
<input name="skip" class="skip-button" type="button" value="Skip Post-Assessment" style="display: none;">
<div class="open-ended-action"></div>
<span id="answer_open_ended"></span>
</section>
</div>
</div>
<div class="oe-tools response-tools">
<span class="oe-tools-label"></span>
<input type="button" value="Reset" class="reset-button" name="reset" style="display: none;">
</div>
<input type="button" value="Next Step" class="next-step-button" name="reset" style="display: none;">
</div>
<a name="results">
<div class="result-container">
</div>
</a>
</section>
<a name="results">
</a>
</section>
<a name="results">
</a>
<div>
<a name="results">
</a>
<a href="https://github.com/MITx/content-mit-6002x/tree/master/combinedopenended/CombinedOE.xml">
Edit
</a> /
<a href="#i4x_MITx_6_002x_combinedopenended_CombinedOE_xqa-modal" onclick="javascript:getlog('i4x_MITx_6_002x_combinedopenended_CombinedOE', {
'location': 'i4x://MITx/6.002x/combinedopenended/CombinedOE',
'xqa_key': 'KUBrWtK3RAaBALLbccHrXeD3RHOpmZ2A',
'category': 'CombinedOpenEndedModule',
'user': 'blah'
})" id="i4x_MITx_6_002x_combinedopenended_CombinedOE_xqa_log">QA</a>
</div>
<div>
<a href="#i4x_MITx_6_002x_combinedopenended_CombinedOE_debug" id="i4x_MITx_6_002x_combinedopenended_CombinedOE_trig">
Staff Debug Info
</a>
</div>
<section id="i4x_MITx_6_002x_combinedopenended_CombinedOE_xqa-modal" class="modal xqa-modal" style="width:80%; left:20%; height:80%; overflow:auto">
<div class="inner-wrapper">
<header>
<h2>edX Content Quality Assessment</h2>
</header>
<form id="i4x_MITx_6_002x_combinedopenended_CombinedOE_xqa_form" class="xqa_form">
<label>Comment</label>
<input id="i4x_MITx_6_002x_combinedopenended_CombinedOE_xqa_entry" type="text" placeholder="comment">
<label>Tag</label>
<span style="color:black;vertical-align: -10pt">Optional tag (eg "done" or "broken"):&nbsp; </span>
<input id="i4x_MITx_6_002x_combinedopenended_CombinedOE_xqa_tag" type="text" placeholder="tag" style="width:80px;display:inline">
<div class="submit">
<button name="submit" type="submit">Add comment</button>
</div>
<hr>
<div id="i4x_MITx_6_002x_combinedopenended_CombinedOE_xqa_log_data"></div>
</form>
</div>
</section>
<section class="modal staff-modal" id="i4x_MITx_6_002x_combinedopenended_CombinedOE_debug" style="width:80%; left:20%; height:80%; overflow:auto;">
<div class="inner-wrapper" style="color:black">
<header>
<h2>Staff Debug</h2>
</header>
<div class="staff_info" style="display:block">
is_released = <font color="red">Yes!</font>
location = i4x://MITx/6.002x/combinedopenended/CombinedOE
github = <a href="https://github.com/MITx/content-mit-6002x/tree/master/combinedopenended/CombinedOE.xml">https://github.com/MITx/content-mit-6002x/tree/master/combinedopenended/CombinedOE.xml</a>
definition = <pre>None</pre>
metadata = {
"showanswer": "attempted",
"display_name": "Problem 1",
"graceperiod": "1 day 12 hours 59 minutes 59 seconds",
"xqa_key": "KUBrWtK3RAaBALLbccHrXeD3RHOpmZ2A",
"rerandomize": "never",
"start": "2012-09-05T12:00",
"attempts": "10000",
"data_dir": "content-mit-6002x",
"max_score": "1"
}
category = CombinedOpenEndedModule
</div>
</div>
</section>
<div id="i4x_MITx_6_002x_combinedopenended_CombinedOE_setup"></div>
</section>
<section class="combinedopenended-editor editor">
<div class="row">
<textarea class="markdown-box">markdown</textarea>
<textarea class="xml-box" rows="8" cols="40">xml</textarea>
</div>
</section>
<section class="combinedopenended-editor editor">
<div class="row">
<textarea class="xml-box" rows="8" cols="40">xml only</textarea>
</div>
</section>
describe 'Rubric', ->
beforeEach ->
spyOn Logger, 'log'
# load up some fixtures
loadFixtures 'rubric.html'
jasmine.Clock.useMock()
@element = $('.combined-open-ended')
@location = @element.data('location')
describe 'constructor', ->
beforeEach ->
@rub = new Rubric @element
it 'rubric should properly grab the element', ->
expect(@rub.el).toEqual @element
describe 'initialize', ->
beforeEach ->
@rub = new Rubric @element
@rub.initialize @location
it 'rubric correctly sets location', ->
expect($(@rub.rubric_sel).data('location')).toEqual @location
it 'rubric correctly read', ->
expect(@rub.categories.length).toEqual 5
describe 'CombinedOpenEnded', ->
beforeEach ->
spyOn Logger, 'log'
# load up some fixtures
loadFixtures 'combined-open-ended.html'
jasmine.Clock.useMock()
@element = $('.course-content')
describe 'constructor', ->
beforeEach ->
spyOn(Collapsible, 'setCollapsibles')
@combined = new CombinedOpenEnded @element
it 'set the element', ->
expect(@combined.el).toEqual @element
it 'get the correct values from data fields', ->
expect(@combined.ajax_url).toEqual '/courses/MITx/6.002x/2012_Fall/modx/i4x://MITx/6.002x/combinedopenended/CombinedOE'
expect(@combined.state).toEqual 'assessing'
expect(@combined.task_count).toEqual 2
expect(@combined.task_number).toEqual 1
it 'subelements are made collapsible', ->
expect(Collapsible.setCollapsibles).toHaveBeenCalled()
describe 'poll', ->
# We will store default window.setTimeout() function here.
oldSetTimeout = null
beforeEach =>
# setup the spies
@combined = new CombinedOpenEnded @element
spyOn(@combined, 'reload').andCallFake -> return 0
# Store original window.setTimeout() function. If we do not do this, then
# all other tests that rely on code which uses window.setTimeout()
# function might (and probably will) fail.
oldSetTimeout = window.setTimeout
# Redefine window.setTimeout() function as a spy.
window.setTimeout = jasmine.createSpy().andCallFake (callback, timeout) -> return 5
afterEach =>
# Reset the default window.setTimeout() function. If we do not do this,
# then all other tests that rely on code which uses window.setTimeout()
# function might (and probably will) fail.
window.setTimeout = oldSetTimeout
it 'polls at the correct intervals', =>
fakeResponseContinue = state: 'not done'
spyOn($, 'postWithPrefix').andCallFake (url, callback) -> callback(fakeResponseContinue)
@combined.poll()
expect(window.setTimeout).toHaveBeenCalledWith(@combined.poll, 10000)
expect(window.queuePollerID).toBe(5)
xit 'polling stops properly', =>
fakeResponseDone = state: "done"
spyOn($, 'postWithPrefix').andCallFake (url, callback) -> callback(fakeResponseDone)
@combined.poll()
expect(window.queuePollerID).toBeUndefined()
expect(window.setTimeout).not.toHaveBeenCalled()
describe 'rebind', ->
# We will store default window.setTimeout() function here.
oldSetTimeout = null
beforeEach ->
@combined = new CombinedOpenEnded @element
spyOn(@combined, 'queueing').andCallFake -> return 0
spyOn(@combined, 'skip_post_assessment').andCallFake -> return 0
# Store original window.setTimeout() function. If we do not do this, then
# all other tests that rely on code which uses window.setTimeout()
# function might (and probably will) fail.
oldSetTimeout = window.setTimeout
# Redefine window.setTimeout() function as a spy.
window.setTimeout = jasmine.createSpy().andCallFake (callback, timeout) -> return 5
afterEach =>
# Reset the default window.setTimeout() function. If we do not do this,
# then all other tests that rely on code which uses window.setTimeout()
# function might (and probably will) fail.
window.setTimeout = oldSetTimeout
it 'when our child is in an assessing state', ->
@combined.child_state = 'assessing'
@combined.rebind()
expect(@combined.answer_area.attr("disabled")).toBe("disabled")
expect(@combined.submit_button.val()).toBe("Submit assessment")
expect(@combined.queueing).toHaveBeenCalled()
it 'when our child state is initial', ->
@combined.child_state = 'initial'
@combined.rebind()
expect(@combined.answer_area.attr("disabled")).toBeUndefined()
expect(@combined.submit_button.val()).toBe("Submit")
it 'when our child state is post_assessment', ->
@combined.child_state = 'post_assessment'
@combined.rebind()
expect(@combined.answer_area.attr("disabled")).toBe("disabled")
expect(@combined.submit_button.val()).toBe("Submit post-assessment")
it 'when our child state is done', ->
spyOn(@combined, 'next_problem').andCallFake ->
@combined.child_state = 'done'
@combined.rebind()
expect(@combined.answer_area.attr("disabled")).toBe("disabled")
expect(@combined.next_problem_button).toBe(":visible")
describe 'next_problem', ->
beforeEach ->
@combined = new CombinedOpenEnded @element
@combined.child_state = 'done'
it 'handling a successful call', ->
fakeResponse =
success: true
html: "dummy html"
allow_reset: false
spyOn($, 'postWithPrefix').andCallFake (url, val, callback) -> callback(fakeResponse)
spyOn(@combined, 'reinitialize')
spyOn(@combined, 'rebind')
@combined.next_problem()
expect($.postWithPrefix).toHaveBeenCalled()
expect(@combined.reinitialize).toHaveBeenCalledWith(@combined.element)
expect(@combined.rebind).toHaveBeenCalled()
expect(@combined.answer_area.val()).toBe('')
expect(@combined.child_state).toBe('initial')
it 'handling an unsuccessful call', ->
fakeResponse =
success: false
error: 'This is an error'
spyOn($, 'postWithPrefix').andCallFake (url, val, callback) -> callback(fakeResponse)
@combined.next_problem()
expect(@combined.errors_area.html()).toBe(fakeResponse.error)
describe 'OpenEndedMarkdownEditingDescriptor', ->
describe 'save stores the correct data', ->
it 'saves markdown from markdown editor', ->
loadFixtures 'combinedopenended-with-markdown.html'
@descriptor = new OpenEndedMarkdownEditingDescriptor($('.combinedopenended-editor'))
saveResult = @descriptor.save()
expect(saveResult.metadata.markdown).toEqual('markdown')
expect(saveResult.data).toEqual('<combinedopenended>\nmarkdown\n</combinedopenended>')
it 'clears markdown when xml editor is selected', ->
loadFixtures 'combinedopenended-with-markdown.html'
@descriptor = new OpenEndedMarkdownEditingDescriptor($('.combinedopenended-editor'))
@descriptor.createXMLEditor('replace with markdown')
saveResult = @descriptor.save()
expect(saveResult.nullout).toEqual(['markdown'])
expect(saveResult.data).toEqual('replace with markdown')
it 'saves xml from the xml editor', ->
loadFixtures 'combinedopenended-without-markdown.html'
@descriptor = new OpenEndedMarkdownEditingDescriptor($('.combinedopenended-editor'))
saveResult = @descriptor.save()
expect(saveResult.nullout).toEqual(['markdown'])
expect(saveResult.data).toEqual('xml only')
describe 'advanced editor opens correctly', ->
it 'click on advanced editor should work', ->
loadFixtures 'combinedopenended-with-markdown.html'
@descriptor = new OpenEndedMarkdownEditingDescriptor($('.combinedopenended-editor'))
spyOn(@descriptor, 'confirmConversionToXml').andReturn(true)
expect(@descriptor.confirmConversionToXml).not.toHaveBeenCalled()
e = jasmine.createSpyObj('e', [ 'preventDefault' ])
@descriptor.onShowXMLButton(e)
expect(e.preventDefault).toHaveBeenCalled()
expect(@descriptor.confirmConversionToXml).toHaveBeenCalled()
expect($('.editor-bar').length).toEqual(0)
describe 'insertPrompt', ->
it 'inserts the template if selection is empty', ->
revisedSelection = OpenEndedMarkdownEditingDescriptor.insertPrompt('')
expect(revisedSelection).toEqual(OpenEndedMarkdownEditingDescriptor.promptTemplate)
it 'recognizes html in the prompt', ->
revisedSelection = OpenEndedMarkdownEditingDescriptor.insertPrompt('[prompt]<h1>Hello</h1>[prompt]')
expect(revisedSelection).toEqual('[prompt]<h1>Hello</h1>[prompt]')
describe 'insertRubric', ->
it 'inserts the template if selection is empty', ->
revisedSelection = OpenEndedMarkdownEditingDescriptor.insertRubric('')
expect(revisedSelection).toEqual(OpenEndedMarkdownEditingDescriptor.rubricTemplate)
it 'recognizes a proper rubric', ->
revisedSelection = OpenEndedMarkdownEditingDescriptor.insertRubric('[rubric]\n+1\n-1\n-2\n[rubric]')
expect(revisedSelection).toEqual('[rubric]\n+1\n-1\n-2\n[rubric]')
describe 'insertTasks', ->
it 'inserts the template if selection is empty', ->
revisedSelection = OpenEndedMarkdownEditingDescriptor.insertTasks('')
expect(revisedSelection).toEqual(OpenEndedMarkdownEditingDescriptor.tasksTemplate)
it 'recognizes a proper task string', ->
revisedSelection = OpenEndedMarkdownEditingDescriptor.insertTasks('[tasks](Self)[tasks]')
expect(revisedSelection).toEqual('[tasks](Self)[tasks]')
describe 'markdownToXml', ->
# test default templates
it 'converts prompt to xml', ->
data = OpenEndedMarkdownEditingDescriptor.markdownToXml("""[prompt]
<h1>Prompt!</h1>
This is my super awesome prompt.
[prompt]
""")
data = data.replace(/[\t\n\s]/gmi,'')
expect(data).toEqual("""
<combinedopenended>
<prompt>
<h1>Prompt!</h1>
This is my super awesome prompt.
</prompt>
</combinedopenended>
""".replace(/[\t\n\s]/gmi,''))
it 'converts rubric to xml', ->
data = OpenEndedMarkdownEditingDescriptor.markdownToXml("""[rubric]
+ 1
-1
-2
+ 2
-1
-2
+3
-1
-2
-3
[rubric]
""")
data = data.replace(/[\t\n\s]/gmi,'')
expect(data).toEqual("""
<combinedopenended>
<rubric>
<rubric>
<category>
<description>1</description>
<option>1</option>
<option>2</option>
</category>
<category>
<description>2</description>
<option>1</option>
<option>2</option>
</category>
<category>
<description>3</description>
<option>1</option>
<option>2</option>
<option>3</option>
</category>
</rubric>
</rubric>
</combinedopenended>
""".replace(/[\t\n\s]/gmi,''))
it 'converts tasks to xml', ->
data = OpenEndedMarkdownEditingDescriptor.markdownToXml("""[tasks]
(Self), ({1-2}AI), ({1-4}AI), ({1-2}Peer
[tasks]
""")
data = data.replace(/[\t\n\s]/gmi,'')
equality_list = """
<combinedopenended>
<task>
<selfassessment/>
</task>
<task>
<openended min_score_to_attempt="1" max_score_to_attempt="2">ml_grading.conf</openended>
</task>
<task>
<openended min_score_to_attempt="1" max_score_to_attempt="4">ml_grading.conf</openended>
</task>
<task>
<openended min_score_to_attempt="1" max_score_to_attempt="2">peer_grading.conf</openended>
</task>
</combinedopenended>
"""
expect(data).toEqual(equality_list.replace(/[\t\n\s]/gmi,''))
# This is a simple class that just hides the error container
# and message container when they are empty
# Can (and should be) expanded upon when our problem list
# becomes more sophisticated
class @PeerGrading
peer_grading_sel: '.peer-grading'
peer_grading_container_sel: '.peer-grading-container'
error_container_sel: '.error-container'
message_container_sel: '.message-container'
problem_button_sel: '.problem-button'
problem_list_sel: '.problem-list'
progress_bar_sel: '.progress-bar'
constructor: (element) ->
@el = element
@peer_grading_container = @$(@peer_grading_sel)
@use_single_location = @peer_grading_container.data('use-single-location')
@peer_grading_outer_container = @$(@peer_grading_container_sel)
@ajax_url = @peer_grading_container.data('ajax-url')
if @use_single_location.toLowerCase() == "true"
#If the peer grading element is linked to a single location, then activate the backend for that location
@activate_problem()
else
#Otherwise, activate the panel view.
@error_container = @$(@error_container_sel)
@error_container.toggle(not @error_container.is(':empty'))
@message_container = @$(@message_container_sel)
@message_container.toggle(not @message_container.is(':empty'))
@problem_button = @$(@problem_button_sel)
@problem_button.click @show_results
@problem_list = @$(@problem_list_sel)
@construct_progress_bar()
# locally scoped jquery.
$: (selector) ->
$(selector, @el)
construct_progress_bar: () =>
problems = @problem_list.find('tr').next()
problems.each( (index, element) =>
problem = $(element)
progress_bar = problem.find(@progress_bar_sel)
bar_value = parseInt(problem.data('graded'))
bar_max = parseInt(problem.data('required')) + bar_value
progress_bar.progressbar({value: bar_value, max: bar_max})
)
show_results: (event) =>
location_to_fetch = $(event.target).data('location')
data = {'location' : location_to_fetch}
$.postWithPrefix "#{@ajax_url}problem", data, (response) =>
if response.success
@peer_grading_outer_container.after(response.html).remove()
backend = new PeerGradingProblemBackend(@ajax_url, false)
new PeerGradingProblem(backend, @el)
else
@gentle_alert response.error
activate_problem: () =>
backend = new PeerGradingProblemBackend(@ajax_url, false)
new PeerGradingProblem(backend, @el)
\ No newline at end of file
...@@ -210,13 +210,7 @@ class TestMongoModuleStore(TestMongoModuleStoreBase): ...@@ -210,13 +210,7 @@ class TestMongoModuleStore(TestMongoModuleStoreBase):
'''Make sure the course objects loaded properly''' '''Make sure the course objects loaded properly'''
courses = self.draft_store.get_courses() courses = self.draft_store.get_courses()
# note, the number of courses expected is really assert_equals(len(courses), 6)
# 6, but due to a lack of cache flushing between
# test case runs, we will get back 7.
# When we fix the caching issue, we should reduce this
# to 6 and remove the 'treexport_peer_component' course_id
# from the list below
assert_equals(len(courses), 7)
course_ids = [course.id for course in courses] course_ids = [course.id for course in courses]
for course_key in [ for course_key in [
...@@ -229,9 +223,6 @@ class TestMongoModuleStore(TestMongoModuleStoreBase): ...@@ -229,9 +223,6 @@ class TestMongoModuleStore(TestMongoModuleStoreBase):
['edX', 'test_unicode', '2012_Fall'], ['edX', 'test_unicode', '2012_Fall'],
['edX', 'toy', '2012_Fall'], ['edX', 'toy', '2012_Fall'],
['guestx', 'foo', 'bar'], ['guestx', 'foo', 'bar'],
# This course below is due to a caching issue in the modulestore
# which is not cleared between test runs. This means
['edX', 'treeexport_peer_component', 'export_peer_component'],
] ]
]: ]:
assert_in(course_key, course_ids) assert_in(course_key, course_ids)
...@@ -263,13 +254,7 @@ class TestMongoModuleStore(TestMongoModuleStoreBase): ...@@ -263,13 +254,7 @@ class TestMongoModuleStore(TestMongoModuleStoreBase):
assert_in(course_key, course_ids) assert_in(course_key, course_ids)
courses = self.draft_store.get_courses(org='edX') courses = self.draft_store.get_courses(org='edX')
# note, the number of courses expected is really assert_equals(len(courses), 5)
# 5, but due to a lack of cache flushing between
# test case runs, we will get back 6.
# When we fix the caching issue, we should reduce this
# to 6 and remove the 'treexport_peer_component' course_id
# from the list below
assert_equals(len(courses), 6)
course_ids = [course.id for course in courses] course_ids = [course.id for course in courses]
for course_key in [ for course_key in [
...@@ -280,9 +265,6 @@ class TestMongoModuleStore(TestMongoModuleStoreBase): ...@@ -280,9 +265,6 @@ class TestMongoModuleStore(TestMongoModuleStoreBase):
['edX', 'test_import_course', '2012_Fall'], ['edX', 'test_import_course', '2012_Fall'],
['edX', 'test_unicode', '2012_Fall'], ['edX', 'test_unicode', '2012_Fall'],
['edX', 'toy', '2012_Fall'], ['edX', 'toy', '2012_Fall'],
# This course below is due to a caching issue in the modulestore
# which is not cleared between test runs. This means
['edX', 'treeexport_peer_component', 'export_peer_component'],
] ]
]: ]:
assert_in(course_key, course_ids) assert_in(course_key, course_ids)
...@@ -678,57 +660,6 @@ class TestMongoModuleStore(TestMongoModuleStoreBase): ...@@ -678,57 +660,6 @@ class TestMongoModuleStore(TestMongoModuleStoreBase):
self.assertEqual(component.published_on, published_date) self.assertEqual(component.published_on, published_date)
self.assertEqual(component.published_by, published_by) self.assertEqual(component.published_by, published_by)
def test_export_course_with_peer_component(self):
"""
Test export course when link_to_location is given in peer grading interface settings.
"""
name = "export_peer_component"
locations = self._create_test_tree(name)
# Insert the test block directly into the module store
problem_location = Location('edX', 'tree{}'.format(name), name, 'combinedopenended', 'test_peer_problem')
self.draft_store.create_child(
self.dummy_user,
locations["child"],
problem_location.block_type,
block_id=problem_location.block_id
)
interface_location = Location('edX', 'tree{}'.format(name), name, 'peergrading', 'test_peer_interface')
self.draft_store.create_child(
self.dummy_user,
locations["child"],
interface_location.block_type,
block_id=interface_location.block_id
)
self.draft_store._update_single_item(
as_draft(interface_location),
{
'definition.data': {},
'metadata': {
'link_to_location': unicode(problem_location),
'use_for_single_location': True,
},
},
)
component = self.draft_store.get_item(interface_location)
self.assertEqual(unicode(component.link_to_location), unicode(problem_location))
root_dir = path(mkdtemp())
self.addCleanup(shutil.rmtree, root_dir)
# export_course_to_xml should work.
export_course_to_xml(
self.draft_store, self.content_store, interface_location.course_key,
root_dir, 'test_export'
)
def test_draft_modulestore_create_child_with_position(self): def test_draft_modulestore_create_child_with_position(self):
""" """
This test is designed to hit a specific set of use cases having to do with This test is designed to hit a specific set of use cases having to do with
......
...@@ -95,27 +95,27 @@ class CountMongoCallsCourseTraversal(TestCase): ...@@ -95,27 +95,27 @@ class CountMongoCallsCourseTraversal(TestCase):
# These two lines show the way this traversal *should* be done # These two lines show the way this traversal *should* be done
# (if you'll eventually access all the fields and load all the definitions anyway). # (if you'll eventually access all the fields and load all the definitions anyway).
# 'lazy' does not matter in old Mongo. # 'lazy' does not matter in old Mongo.
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, False, True, 189), (MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, False, True, 175),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, True, True, 189), (MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, True, True, 175),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, False, True, 387), (MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, False, True, 359),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, True, True, 387), (MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, True, True, 359),
# As shown in these two lines: whether or not the XBlock fields are accessed, # As shown in these two lines: whether or not the XBlock fields are accessed,
# the same number of mongo calls are made in old Mongo for depth=None. # the same number of mongo calls are made in old Mongo for depth=None.
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, False, False, 189), (MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, False, False, 175),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, True, False, 189), (MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, True, False, 175),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, False, False, 387), (MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, False, False, 359),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, True, False, 387), (MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, True, False, 359),
# The line below shows the way this traversal *should* be done # The line below shows the way this traversal *should* be done
# (if you'll eventually access all the fields and load all the definitions anyway). # (if you'll eventually access all the fields and load all the definitions anyway).
(MIXED_SPLIT_MODULESTORE_BUILDER, None, False, True, 4), (MIXED_SPLIT_MODULESTORE_BUILDER, None, False, True, 4),
(MIXED_SPLIT_MODULESTORE_BUILDER, None, True, True, 41), (MIXED_SPLIT_MODULESTORE_BUILDER, None, True, True, 38),
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, False, True, 143), (MIXED_SPLIT_MODULESTORE_BUILDER, 0, False, True, 131),
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, True, True, 41), (MIXED_SPLIT_MODULESTORE_BUILDER, 0, True, True, 38),
(MIXED_SPLIT_MODULESTORE_BUILDER, None, False, False, 4), (MIXED_SPLIT_MODULESTORE_BUILDER, None, False, False, 4),
(MIXED_SPLIT_MODULESTORE_BUILDER, None, True, False, 4), (MIXED_SPLIT_MODULESTORE_BUILDER, None, True, False, 4),
# TODO: The call count below seems like a bug - should be 4? # TODO: The call count below seems like a bug - should be 4?
# Seems to be related to using self.lazy in CachingDescriptorSystem.get_module_data(). # Seems to be related to using self.lazy in CachingDescriptorSystem.get_module_data().
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, False, False, 143), (MIXED_SPLIT_MODULESTORE_BUILDER, 0, False, False, 131),
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, True, False, 4) (MIXED_SPLIT_MODULESTORE_BUILDER, 0, True, False, 4)
) )
@ddt.unpack @ddt.unpack
......
import dogstats_wrapper as dog_stats_api
import logging
from .grading_service_module import GradingService
log = logging.getLogger(__name__)
class ControllerQueryService(GradingService):
"""
Interface to controller query backend.
"""
METRIC_NAME = 'edxapp.open_ended_grading.controller_query_service'
def __init__(self, config, render_template):
config['render_template'] = render_template
super(ControllerQueryService, self).__init__(config)
self.url = config['url'] + config['grading_controller']
self.login_url = self.url + '/login/'
self.check_eta_url = self.url + '/get_submission_eta/'
self.combined_notifications_url = self.url + '/combined_notifications/'
self.grading_status_list_url = self.url + '/get_grading_status_list/'
self.flagged_problem_list_url = self.url + '/get_flagged_problem_list/'
self.take_action_on_flags_url = self.url + '/take_action_on_flags/'
def check_for_eta(self, location):
params = {
'location': location,
}
data = self.get(self.check_eta_url, params)
self._record_result('check_for_eta', data)
dog_stats_api.histogram(self._metric_name('check_for_eta.eta'), data.get('eta', 0))
return data
def check_combined_notifications(self, course_id, student_id, user_is_staff, last_time_viewed):
params = {
'student_id': student_id,
'course_id': course_id.to_deprecated_string(),
'user_is_staff': user_is_staff,
'last_time_viewed': last_time_viewed,
}
log.debug(self.combined_notifications_url)
data = self.get(self.combined_notifications_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string()), u'user_is_staff:{}'.format(user_is_staff)]
tags.extend(
u'{}:{}'.format(key, value)
for key, value in data.items()
if key not in ('success', 'version', 'error')
)
self._record_result('check_combined_notifications', data, tags)
return data
def get_grading_status_list(self, course_id, student_id):
params = {
'student_id': student_id,
'course_id': course_id.to_deprecated_string(),
}
data = self.get(self.grading_status_list_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string())]
self._record_result('get_grading_status_list', data, tags)
dog_stats_api.histogram(
self._metric_name('get_grading_status_list.length'),
len(data.get('problem_list', [])),
tags=tags
)
return data
def get_flagged_problem_list(self, course_id):
params = {
'course_id': course_id.to_deprecated_string(),
}
data = self.get(self.flagged_problem_list_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string())]
self._record_result('get_flagged_problem_list', data, tags)
dog_stats_api.histogram(
self._metric_name('get_flagged_problem_list.length'),
len(data.get('flagged_submissions', []))
)
return data
def take_action_on_flags(self, course_id, student_id, submission_id, action_type):
params = {
'course_id': course_id.to_deprecated_string(),
'student_id': student_id,
'submission_id': submission_id,
'action_type': action_type
}
data = self.post(self.take_action_on_flags_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string()), u'action_type:{}'.format(action_type)]
self._record_result('take_action_on_flags', data, tags)
return data
class MockControllerQueryService(object):
"""
Mock controller query service for testing
"""
def __init__(self, config, render_template):
pass
def check_for_eta(self, *args, **kwargs):
"""
Mock later if needed. Stub function for now.
@param params:
@return:
"""
pass
def check_combined_notifications(self, *args, **kwargs):
combined_notifications = {
"flagged_submissions_exist": False,
"version": 1,
"new_student_grading_to_view": False,
"success": True,
"staff_needs_to_grade": False,
"student_needs_to_peer_grade": True,
"overall_need_to_check": True
}
return combined_notifications
def get_grading_status_list(self, *args, **kwargs):
grading_status_list = {
"version": 1,
"problem_list": [
{
"problem_name": "Science Question -- Machine Assessed",
"grader_type": "NA",
"eta_available": True,
"state": "Waiting to be Graded",
"eta": 259200,
"location": "i4x://MITx/oe101x/combinedopenended/Science_SA_ML"
}, {
"problem_name": "Humanities Question -- Peer Assessed",
"grader_type": "NA",
"eta_available": True,
"state": "Waiting to be Graded",
"eta": 259200,
"location": "i4x://MITx/oe101x/combinedopenended/Humanities_SA_Peer"
}
],
"success": True
}
return grading_status_list
def get_flagged_problem_list(self, *args, **kwargs):
flagged_problem_list = {
"version": 1,
"success": False,
"error": "No flagged submissions exist for course: MITx/oe101x/2012_Fall"
}
return flagged_problem_list
def take_action_on_flags(self, *args, **kwargs):
"""
Mock later if needed. Stub function for now.
@param params:
@return:
"""
pass
def convert_seconds_to_human_readable(seconds):
if seconds < 60:
human_string = "{0} seconds".format(seconds)
elif seconds < 60 * 60:
human_string = "{0} minutes".format(round(seconds / 60, 1))
elif seconds < (24 * 60 * 60):
human_string = "{0} hours".format(round(seconds / (60 * 60), 1))
else:
human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1))
return human_string
# This class gives a common interface for logging into the grading controller
import logging
import requests
import dogstats_wrapper as dog_stats_api
from lxml import etree
from requests.exceptions import RequestException, ConnectionError, HTTPError
from .combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError
log = logging.getLogger(__name__)
class GradingServiceError(Exception):
"""
Exception for grading service. Shown when Open Response Assessment servers cannot be reached.
"""
pass
class GradingService(object):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
self.username = config['username']
self.password = config['password']
self.session = requests.Session()
self.render_template = config['render_template']
def _login(self):
"""
Log into the staff grading service.
Raises requests.exceptions.HTTPError if something goes wrong.
Returns the decoded json dict of the response.
"""
response = self.session.post(self.login_url,
{'username': self.username,
'password': self.password, })
response.raise_for_status()
return response.json()
def _metric_name(self, suffix):
"""
Return a metric name for datadog, using `self.METRIC_NAME` as
a prefix, and `suffix` as the suffix.
Arguments:
suffix (str): The metric suffix to use.
"""
return '{}.{}'.format(self.METRIC_NAME, suffix)
def _record_result(self, action, data, tags=None):
"""
Log results from an API call to an ORA service to datadog.
Arguments:
action (str): The ORA action being recorded.
data (dict): The data returned from the ORA service. Should contain the key 'success'.
tags (list): A list of tags to attach to the logged metric.
"""
if tags is None:
tags = []
tags.append(u'result:{}'.format(data.get('success', False)))
tags.append(u'action:{}'.format(action))
dog_stats_api.increment(self._metric_name('request.count'), tags=tags)
def post(self, url, data, allow_redirects=False):
"""
Make a post request to the grading controller. Returns the parsed json results of that request.
"""
try:
op = lambda: self.session.post(url, data=data,
allow_redirects=allow_redirects)
response_json = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError, ValueError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
#This is a dev_facing_error
error_string = "Problem posting data to the grading controller. URL: {0}, data: {1}".format(url, data)
log.error(error_string)
raise GradingServiceError(error_string)
return response_json
def get(self, url, params, allow_redirects=False):
"""
Make a get request to the grading controller. Returns the parsed json results of that request.
"""
op = lambda: self.session.get(url,
allow_redirects=allow_redirects,
params=params)
try:
response_json = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError, ValueError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
#This is a dev_facing_error
error_string = "Problem getting data from the grading controller. URL: {0}, params: {1}".format(url, params)
log.error(error_string)
raise GradingServiceError(error_string)
return response_json
def _try_with_login(self, operation):
"""
Call operation(), which should return a requests response object. If
the request fails with a 'login_required' error, call _login() and try
the operation again.
Returns the result of operation(). Does not catch exceptions.
"""
response = operation()
resp_json = response.json()
if (resp_json
and resp_json.get('success') is False
and resp_json.get('error') == 'login_required'):
# apparently we aren't logged in. Try to fix that.
r = self._login()
if r and not r.get('success'):
log.warning("Couldn't log into ORA backend. Response: %s",
r)
# try again
response = operation()
response.raise_for_status()
resp_json = response.json()
return resp_json
def _render_rubric(self, response, view_only=False):
"""
Given an HTTP Response json with the key 'rubric', render out the html
required to display the rubric and put it back into the response
returns the updated response as a dictionary that can be serialized later
"""
try:
if 'rubric' in response:
rubric = response['rubric']
rubric_renderer = CombinedOpenEndedRubric(self.render_template, view_only)
rubric_dict = rubric_renderer.render_rubric(rubric)
success = rubric_dict['success']
rubric_html = rubric_dict['html']
response['rubric'] = rubric_html
return response
# if we can't parse the rubric into HTML,
except (etree.XMLSyntaxError, RubricParsingError):
#This is a dev_facing_error
log.exception("Cannot parse rubric string. Raw string: {0}".format(response['rubric']))
return {'success': False,
'error': 'Error displaying submission'}
except ValueError:
#This is a dev_facing_error
log.exception("Error parsing response: {0}".format(response))
return {'success': False,
'error': "Error displaying submission"}
import logging
import dogstats_wrapper as dog_stats_api
from .grading_service_module import GradingService
from opaque_keys.edx.keys import UsageKey
log = logging.getLogger(__name__)
class PeerGradingService(GradingService):
"""
Interface with the grading controller for peer grading
"""
METRIC_NAME = 'edxapp.open_ended_grading.peer_grading_service'
def __init__(self, config, render_template):
config['render_template'] = render_template
super(PeerGradingService, self).__init__(config)
self.url = config['url'] + config['peer_grading']
self.login_url = self.url + '/login/'
self.get_next_submission_url = self.url + '/get_next_submission/'
self.save_grade_url = self.url + '/save_grade/'
self.is_student_calibrated_url = self.url + '/is_student_calibrated/'
self.show_calibration_essay_url = self.url + '/show_calibration_essay/'
self.save_calibration_essay_url = self.url + '/save_calibration_essay/'
self.get_problem_list_url = self.url + '/get_problem_list/'
self.get_notifications_url = self.url + '/get_notifications/'
self.get_data_for_location_url = self.url + '/get_data_for_location/'
def get_data_for_location(self, problem_location, student_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
params = {'location': problem_location, 'student_id': student_id}
result = self.get(self.get_data_for_location_url, params)
self._record_result('get_data_for_location', result)
for key in result.keys():
if key in ('success', 'error', 'version'):
continue
dog_stats_api.histogram(
self._metric_name('get_data_for_location.{}'.format(key)),
result[key],
)
return result
def get_next_submission(self, problem_location, grader_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
result = self._render_rubric(self.get(
self.get_next_submission_url,
{
'location': problem_location,
'grader_id': grader_id
}
))
self._record_result('get_next_submission', result)
return result
def save_grade(self, **kwargs):
data = kwargs
data.update({'rubric_scores_complete': True})
result = self.post(self.save_grade_url, data)
self._record_result('save_grade', result)
return result
def is_student_calibrated(self, problem_location, grader_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
params = {'problem_id': problem_location, 'student_id': grader_id}
result = self.get(self.is_student_calibrated_url, params)
self._record_result(
'is_student_calibrated',
result,
tags=['calibrated:{}'.format(result.get('calibrated'))]
)
return result
def show_calibration_essay(self, problem_location, grader_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
params = {'problem_id': problem_location, 'student_id': grader_id}
result = self._render_rubric(self.get(self.show_calibration_essay_url, params))
self._record_result('show_calibration_essay', result)
return result
def save_calibration_essay(self, **kwargs):
data = kwargs
data.update({'rubric_scores_complete': True})
result = self.post(self.save_calibration_essay_url, data)
self._record_result('show_calibration_essay', result)
return result
def get_problem_list(self, course_id, grader_id):
params = {'course_id': course_id.to_deprecated_string(), 'student_id': grader_id}
result = self.get(self.get_problem_list_url, params)
if 'problem_list' in result:
for problem in result['problem_list']:
problem['location'] = course_id.make_usage_key_from_deprecated_string(problem['location'])
self._record_result('get_problem_list', result)
dog_stats_api.histogram(
self._metric_name('get_problem_list.result.length'),
len(result.get('problem_list', [])),
)
return result
def get_notifications(self, course_id, grader_id):
params = {'course_id': course_id.to_deprecated_string(), 'student_id': grader_id}
result = self.get(self.get_notifications_url, params)
self._record_result(
'get_notifications',
result,
tags=['needs_to_peer_grade:{}'.format(result.get('student_needs_to_peer_grade'))]
)
return result
class MockPeerGradingService(object):
"""
This is a mock peer grading service that can be used for unit tests
without making actual service calls to the grading controller
"""
def get_next_submission(self, problem_location, grader_id):
return {
'success': True,
'submission_id': 1,
'submission_key': "",
'student_response': 'Sample student response.',
'prompt': 'Sample submission prompt.',
'rubric': 'Placeholder text for the full rubric.',
'max_score': 4
}
def save_grade(self, **kwargs):
return {'success': True}
def is_student_calibrated(self, problem_location, grader_id):
return {'success': True, 'calibrated': True}
def show_calibration_essay(self, problem_location, grader_id):
return {'success': True,
'submission_id': 1,
'submission_key': '',
'student_response': 'Sample student response.',
'prompt': 'Sample submission prompt.',
'rubric': 'Placeholder text for the full rubric.',
'max_score': 4}
def save_calibration_essay(self, **kwargs):
return {'success': True, 'actual_score': 2}
def get_problem_list(self, course_id, grader_id):
return {'success': True,
'problem_list': [
]}
def get_data_for_location(self, problem_location, student_id):
return {
"version": 1,
"count_graded": 3,
"count_required": 3,
"success": True,
"student_sub_count": 1,
'submissions_available': 0,
}
...@@ -41,16 +41,6 @@ MODULE_DIR = path(__file__).dirname() ...@@ -41,16 +41,6 @@ MODULE_DIR = path(__file__).dirname()
DATA_DIR = MODULE_DIR.parent.parent.parent.parent / "test" / "data" DATA_DIR = MODULE_DIR.parent.parent.parent.parent / "test" / "data"
open_ended_grading_interface = {
'url': 'blah/',
'username': 'incorrect_user',
'password': 'incorrect_pass',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller',
}
class TestModuleSystem(ModuleSystem): # pylint: disable=abstract-method class TestModuleSystem(ModuleSystem): # pylint: disable=abstract-method
""" """
ModuleSystem for testing ModuleSystem for testing
...@@ -150,7 +140,6 @@ def get_test_system(course_id=SlashSeparatedCourseKey('org', 'course', 'run')): ...@@ -150,7 +140,6 @@ def get_test_system(course_id=SlashSeparatedCourseKey('org', 'course', 'run')):
}, },
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
anonymous_student_id='student', anonymous_student_id='student',
open_ended_grading_interface=open_ended_grading_interface,
course_id=course_id, course_id=course_id,
error_descriptor_class=ErrorDescriptor, error_descriptor_class=ErrorDescriptor,
get_user_role=Mock(name='get_test_system.get_user_role', is_staff=False), get_user_role=Mock(name='get_test_system.get_user_role', is_staff=False),
......
"""
Test cases covering workflows and behaviors of the Self Assessment feature
"""
from datetime import datetime
import json
import unittest
from mock import Mock, MagicMock
from webob.multidict import MultiDict
from pytz import UTC
from xblock.fields import ScopeIds
from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule
from opaque_keys.edx.locations import Location
from lxml import etree
from . import get_test_system
import test_util_open_ended
class SelfAssessmentTest(unittest.TestCase):
"""
Test cases covering workflows and behaviors of the Self Assessment feature
"""
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
</category>
</rubric></rubric>'''
prompt = etree.XML("<prompt>This is sample prompt text.</prompt>")
definition = {
'rubric': rubric,
'prompt': prompt,
'submitmessage': 'Shall we submit now?',
'hintprompt': 'Consider this...',
}
location = Location("edX", "sa_test", "run", "selfassessment", "SampleQuestion", None)
descriptor = Mock()
def setUp(self):
super(SelfAssessmentTest, self).setUp()
self.static_data = {
'max_attempts': 10,
'rubric': etree.XML(self.rubric),
'prompt': self.prompt,
'max_score': 1,
'display_name': "Name",
'accept_file_upload': False,
'close_date': None,
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'control': {
'required_peer_grading': 1,
'peer_grader_count': 1,
'min_to_calibrate': 3,
'max_to_calibrate': 6,
'peer_grade_finished_submissions_when_none_pending': False,
}
}
system = get_test_system()
usage_key = system.course_id.make_usage_key('combinedopenended', 'test_loc')
scope_ids = ScopeIds(1, 'combinedopenended', usage_key, usage_key)
system.xmodule_instance = Mock(scope_ids=scope_ids)
self.module = SelfAssessmentModule(
system,
self.location,
self.definition,
self.descriptor,
self.static_data
)
def test_get_html(self):
html = self.module.get_html(self.module.system)
self.assertIn("This is sample prompt text", html)
def test_self_assessment_flow(self):
responses = {'assessment': '0', 'score_list[]': ['0', '0']}
def get_fake_item(name):
"""
Returns the specified key from the parent workflow container
"""
return responses[name]
def get_data_for_location(self, location, student):
"""
Returns a dictionary of keys having zero values
"""
return {
'count_graded': 0,
'count_required': 0,
'student_sub_count': 0,
}
mock_query_dict = MagicMock()
mock_query_dict.__getitem__.side_effect = get_fake_item
mock_query_dict.getall = get_fake_item
self.module.peer_gs.get_data_for_location = get_data_for_location
self.assertEqual(self.module.get_score()['score'], 0)
self.module.save_answer({'student_answer': "I am an answer"},
self.module.system)
self.assertEqual(self.module.child_state, self.module.ASSESSING)
self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.child_state, self.module.DONE)
d = self.module.reset({})
self.assertTrue(d['success'])
self.assertEqual(self.module.child_state, self.module.INITIAL)
# if we now assess as right, skip the REQUEST_HINT state
self.module.save_answer({'student_answer': 'answer 4'},
self.module.system)
responses['assessment'] = '1'
self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.child_state, self.module.DONE)
def test_self_assessment_display(self):
"""
Test storing an answer with the self assessment module.
"""
# Create a module with no state yet. Important that this start off as a blank slate.
test_module = SelfAssessmentModule(
get_test_system(),
self.location,
self.definition,
self.descriptor,
self.static_data
)
saved_response = "Saved response."
submitted_response = "Submitted response."
# Initially, there will be no stored answer.
self.assertEqual(test_module.stored_answer, None)
# And the initial answer to display will be an empty string.
self.assertEqual(test_module.get_display_answer(), "")
# Now, store an answer in the module.
test_module.handle_ajax("store_answer", {'student_answer': saved_response}, get_test_system())
# The stored answer should now equal our response.
self.assertEqual(test_module.stored_answer, saved_response)
self.assertEqual(test_module.get_display_answer(), saved_response)
# Submit a student response to the question.
test_module.handle_ajax("save_answer", {"student_answer": submitted_response}, get_test_system())
# Submitting an answer should clear the stored answer.
self.assertEqual(test_module.stored_answer, None)
# Confirm that the answer is stored properly.
self.assertEqual(test_module.latest_answer(), submitted_response)
# Mock saving an assessment.
assessment_dict = MultiDict({'assessment': 0, 'score_list[]': 0})
data = test_module.handle_ajax("save_assessment", assessment_dict, get_test_system())
self.assertTrue(json.loads(data)['success'])
# Reset the module so the student can try again.
test_module.reset(get_test_system())
# Confirm that the right response is loaded.
self.assertEqual(test_module.get_display_answer(), submitted_response)
def test_save_assessment_after_closing(self):
"""
Test storing assessment when close date is passed.
"""
responses = {'assessment': '0', 'score_list[]': ['0', '0']}
self.module.save_answer({'student_answer': "I am an answer"}, self.module.system)
self.assertEqual(self.module.child_state, self.module.ASSESSING)
#Set close date to current datetime.
self.module.close_date = datetime.now(UTC)
#Save assessment when close date is passed.
self.module.save_assessment(responses, self.module.system)
self.assertNotEqual(self.module.child_state, self.module.DONE)
...@@ -31,11 +31,9 @@ from xmodule.x_module import ModuleSystem, XModule, XModuleDescriptor, Descripto ...@@ -31,11 +31,9 @@ from xmodule.x_module import ModuleSystem, XModule, XModuleDescriptor, Descripto
from xmodule.annotatable_module import AnnotatableDescriptor from xmodule.annotatable_module import AnnotatableDescriptor
from xmodule.capa_module import CapaDescriptor from xmodule.capa_module import CapaDescriptor
from xmodule.course_module import CourseDescriptor from xmodule.course_module import CourseDescriptor
from xmodule.combined_open_ended_module import CombinedOpenEndedDescriptor
from xmodule.discussion_module import DiscussionDescriptor from xmodule.discussion_module import DiscussionDescriptor
from xmodule.gst_module import GraphicalSliderToolDescriptor from xmodule.gst_module import GraphicalSliderToolDescriptor
from xmodule.html_module import HtmlDescriptor from xmodule.html_module import HtmlDescriptor
from xmodule.peer_grading_module import PeerGradingDescriptor
from xmodule.poll_module import PollDescriptor from xmodule.poll_module import PollDescriptor
from xmodule.word_cloud_module import WordCloudDescriptor from xmodule.word_cloud_module import WordCloudDescriptor
from xmodule.crowdsource_hinter import CrowdsourceHinterDescriptor from xmodule.crowdsource_hinter import CrowdsourceHinterDescriptor
...@@ -54,11 +52,9 @@ from xmodule.tests import get_test_descriptor_system, get_test_system ...@@ -54,11 +52,9 @@ from xmodule.tests import get_test_descriptor_system, get_test_system
LEAF_XMODULES = { LEAF_XMODULES = {
AnnotatableDescriptor: [{}], AnnotatableDescriptor: [{}],
CapaDescriptor: [{}], CapaDescriptor: [{}],
CombinedOpenEndedDescriptor: [{}],
DiscussionDescriptor: [{}], DiscussionDescriptor: [{}],
GraphicalSliderToolDescriptor: [{}], GraphicalSliderToolDescriptor: [{}],
HtmlDescriptor: [{}], HtmlDescriptor: [{}],
PeerGradingDescriptor: [{}],
PollDescriptor: [{'display_name': 'Poll Display Name'}], PollDescriptor: [{'display_name': 'Poll Display Name'}],
WordCloudDescriptor: [{}], WordCloudDescriptor: [{}],
# This is being excluded because it has dependencies on django # This is being excluded because it has dependencies on django
......
...@@ -1583,7 +1583,6 @@ class ModuleSystem(MetricsMixin, ConfigurableFragmentWrapper, Runtime): ...@@ -1583,7 +1583,6 @@ class ModuleSystem(MetricsMixin, ConfigurableFragmentWrapper, Runtime):
replace_urls, descriptor_runtime, user=None, filestore=None, replace_urls, descriptor_runtime, user=None, filestore=None,
debug=False, hostname="", xqueue=None, publish=None, node_path="", debug=False, hostname="", xqueue=None, publish=None, node_path="",
anonymous_student_id='', course_id=None, anonymous_student_id='', course_id=None,
open_ended_grading_interface=None, s3_interface=None,
cache=None, can_execute_unsafe_code=None, replace_course_urls=None, cache=None, can_execute_unsafe_code=None, replace_course_urls=None,
replace_jump_to_id_urls=None, error_descriptor_class=None, get_real_user=None, replace_jump_to_id_urls=None, error_descriptor_class=None, get_real_user=None,
field_data=None, get_user_role=None, rebind_noauth_module_to_user=None, field_data=None, get_user_role=None, rebind_noauth_module_to_user=None,
...@@ -1678,9 +1677,6 @@ class ModuleSystem(MetricsMixin, ConfigurableFragmentWrapper, Runtime): ...@@ -1678,9 +1677,6 @@ class ModuleSystem(MetricsMixin, ConfigurableFragmentWrapper, Runtime):
if publish: if publish:
self.publish = publish self.publish = publish
self.open_ended_grading_interface = open_ended_grading_interface
self.s3_interface = s3_interface
self.cache = cache or DoNothingCache() self.cache = cache or DoNothingCache()
self.can_execute_unsafe_code = can_execute_unsafe_code or (lambda: False) self.can_execute_unsafe_code = can_execute_unsafe_code or (lambda: False)
self.get_python_lib_zip = get_python_lib_zip or (lambda: None) self.get_python_lib_zip = get_python_lib_zip or (lambda: None)
......
"""
Open-ended response in the courseware.
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from .rubric import RubricPage
class OpenResponsePage(PageObject):
"""
Open-ended response in the courseware.
"""
url = None
def is_browser_on_page(self):
return self.q(css='div.xmodule_CombinedOpenEndedModule').present
@property
def assessment_type(self):
"""
Return the type of assessment currently active.
Options are "self", "ai", or "peer"
"""
labels = self.q(css='section#combined-open-ended-status>div.statusitem-current').text
if len(labels) < 1:
self.warning("Could not find assessment type label")
# Provide some tolerance to UI changes
label_compare = labels[0].lower().strip()
if 'self' in label_compare:
return 'self'
elif 'ai' in label_compare:
return 'ai'
elif 'peer' in label_compare:
return 'peer'
else:
raise ValueError("Unexpected assessment type: '{0}'".format(label_compare))
@property
def prompt(self):
"""
Return an HTML string representing the essay prompt.
"""
prompt_css = "section.open-ended-child>div.prompt"
prompts = self.q(css=prompt_css).map(lambda el: el.get_attribute('innerHTML').strip()).results
if len(prompts) == 0:
self.warning("Could not find essay prompt on page.")
return ""
elif len(prompts) > 1:
self.warning("Multiple essay prompts found on page; using the first one.")
return prompts[0]
@property
def rubric(self):
"""
Return a `RubricPage` for a self-assessment problem.
If no rubric is available, raises a `BrokenPromise` exception.
"""
rubric = RubricPage(self.browser)
rubric.wait_for_page()
return rubric
@property
def written_feedback(self):
"""
Return the written feedback from the grader (if any).
If no feedback available, returns None.
"""
feedback = self.q(css='div.written-feedback').text
if len(feedback) > 0:
return feedback[0]
else:
return None
@property
def alert_message(self):
"""
Alert message displayed to the user.
"""
alerts = self.q(css="div.open-ended-alert").text
if len(alerts) < 1:
return ""
else:
return alerts[0]
@property
def grader_status(self):
"""
Status message from the grader.
If not present, return an empty string.
"""
status_list = self.q(css='div.grader-status').text
if len(status_list) < 1:
self.warning("No grader status found")
return ""
elif len(status_list) > 1:
self.warning("Multiple grader statuses found; returning the first one")
return status_list[0]
def set_response(self, response_str):
"""
Input a response to the prompt.
"""
input_css = "textarea.short-form-response"
self.q(css=input_css).fill(response_str)
def save_response(self):
"""
Save the response for later submission.
"""
self.q(css='input.save-button').first.click()
EmptyPromise(
lambda: 'save' in self.alert_message.lower(),
"Status message saved"
).fulfill()
def submit_response(self):
"""
Submit a response for grading.
"""
self.q(css='input.submit-button').first.click()
# modal dialog confirmation
self.q(css='button.ok-button').first.click()
# Ensure that the submission completes
self._wait_for_submitted(self.assessment_type)
def _wait_for_submitted(self, assessment_type):
"""
Wait for the submission to complete.
`assessment_type` is either 'self', 'ai', or 'peer'
"""
if assessment_type == 'self':
RubricPage(self.browser).wait_for_page()
elif assessment_type == 'ai' or assessment_type == "peer":
EmptyPromise(
lambda: self.grader_status != 'Unanswered',
"Problem status is no longer 'unanswered'"
).fulfill()
else:
self.warning("Unrecognized assessment type '{0}'".format(assessment_type))
EmptyPromise(lambda: True, "Unrecognized assessment type").fulfill()
"""
Rubric for open-ended response problems, including calibration and peer-grading.
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
class ScoreMismatchError(Exception):
"""
The provided scores do not match the rubric on the page.
"""
pass
class RubricPage(PageObject):
"""
Rubric for open-ended response problems, including calibration and peer-grading.
"""
url = None
def is_browser_on_page(self):
"""
Return a boolean indicating whether the rubric is available.
"""
return self.q(css='div.rubric').present
@property
def categories(self):
"""
Return a list of categories available in the essay rubric.
Example:
["Writing Applications", "Language Conventions"]
The rubric is not always visible; if it's not available,
this will return an empty list.
"""
return self.q(css='span.rubric-category').text
def set_scores(self, scores):
"""
Set the rubric scores. `scores` is a list of integers
indicating the number of points in each category.
For example, `scores` might be [0, 2, 1] if the student scored
0 points in the first category, 2 points in the second category,
and 1 point in the third category.
If the number of scores does not match the number of categories,
a `ScoreMismatchError` is raised.
"""
# Warn if we have the wrong number of scores
num_categories = self.categories
if len(scores) != len(num_categories):
raise ScoreMismatchError(
"Received {0} scores but there are {1} rubric categories".format(
len(scores), num_categories))
# Set the score for each category
for score_index in range(len(scores)):
# Check that we have the enough radio buttons
category_css = "div.rubric>ul.rubric-list:nth-of-type({0})".format(score_index + 1)
if scores[score_index] > len(self.q(css=category_css + ' input.score-selection').results):
raise ScoreMismatchError(
"Tried to select score {0} but there are only {1} options".format(
score_index, len(scores)))
# Check the radio button at the correct index
else:
input_css = (
category_css +
">li.rubric-list-item:nth-of-type({0}) input.score-selection".format(scores[score_index] + 1)
)
EmptyPromise(lambda: self._select_score_radio_button(input_css), "Score selection failed.").fulfill()
def _select_score_radio_button(self, radio_button_css):
self.q(css=radio_button_css).first.click()
return self.q(css=radio_button_css).selected
@property
def feedback(self):
"""
Return a list of correct/incorrect feedback for each rubric category (e.g. from self-assessment).
Example: ['correct', 'incorrect']
If no feedback is available, returns an empty list.
If feedback could not be interpreted (unexpected CSS class),
the list will contain a `None` item.
"""
# Get the green checkmark / red x labels
# We need to filter out the similar-looking CSS classes
# for the rubric items that are NOT marked correct/incorrect
feedback_css = 'div.rubric-label>label'
labels = [
el_class for el_class in
self.q(css=feedback_css).attrs('class')
if el_class != 'rubric-elements-info'
]
def map_feedback(css_class):
"""
Map CSS classes on the labels to correct/incorrect
"""
if 'choicegroup_incorrect' in css_class:
return 'incorrect'
elif 'choicegroup_correct' in css_class:
return 'correct'
else:
return None
return map(map_feedback, labels)
def submit(self, promise_check_type=None):
"""
Submit the rubric.
`promise_check_type` is either 'self', or 'peer'. If promise check is not required then don't pass any value.
"""
# Wait for the button to become enabled
button_css = 'input.submit-button'
EmptyPromise(
lambda: all(self.q(css=button_css).map(lambda el: not el.get_attribute('disabled')).results),
"Submit button not enabled"
).fulfill()
# Submit the assessment
self.q(css=button_css).first.click()
if promise_check_type == 'self':
# Check if submitted rubric is available
EmptyPromise(
lambda: self.q(css='div.rubric-label>label').present, 'Submitted Rubric not available!'
).fulfill()
elif promise_check_type == 'peer':
# Check if we are ready for peer grading
EmptyPromise(
lambda: self.q(css='input.calibration-feedback-button').present, 'Not ready for peer grading!'
).fulfill()
<combinedopenended max_score="2" markdown="null" max_attempts="1000">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<openended>
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
<combinedopenended max_score="1" accept_file_upload="False" markdown="null" max_attempts="10000" skip_spelling_checks="False" version="1">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<openended>
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "peer_grading.conf", "problem_id" : "700x/Demo"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>1</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>
<combinedopenended max_score="1" accept_file_upload="False" markdown="null" max_attempts="1000" skip_spelling_checks="False" version="1">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<selfassessment/>
</task>
</combinedopenended>
<poll url_name="markdown" question="## This is a test&#10;&#10;&lt;h1&gt;This is only a &amp;gt;&amp;lt;test&lt;/h1&gt;&#10;&#10;* One&#10;* Two&#10;* Three&#10;&#10;1. First&#10;2. Second&#10;3. Third&#10;&#10;We shall find out if markdown is respected.&#10;&#10;&gt; &quot;I have not yet begun to code.&quot;"
feedback="### This is some feedback&#10;&#10;[This is a link](http://www.example.com)&#10;&#10;&lt;a href=&quot;http://www.example.com&quot; target=&quot;_blank&quot;&gt;This is also a link.&lt;/a&gt;&#10;&#10;This is a paragraph with *emphasized* and **bold** text, and **_both_**."
answers='[["long", {"label": "I *feel* like this test will **pass**&lt;code&gt;test&lt;/code&gt;.", "img": null}]]'/>
...@@ -1628,9 +1628,9 @@ class DeprecationWarningMessageTest(CourseOutlineTest): ...@@ -1628,9 +1628,9 @@ class DeprecationWarningMessageTest(CourseOutlineTest):
self.course_fixture.create_xblock( self.course_fixture.create_xblock(
parent_vertical.locator, parent_vertical.locator,
XBlockFixtureDesc('combinedopenended', "Open", data=load_data_str('ora_peer_problem.xml')) XBlockFixtureDesc('poll', "Poll", data=load_data_str('poll_markdown.xml'))
) )
self.course_fixture.create_xblock(parent_vertical.locator, XBlockFixtureDesc('peergrading', 'Peer')) self.course_fixture.create_xblock(parent_vertical.locator, XBlockFixtureDesc('survey', 'Survey'))
def _verify_deprecation_warning_info( def _verify_deprecation_warning_info(
self, self,
...@@ -1663,56 +1663,56 @@ class DeprecationWarningMessageTest(CourseOutlineTest): ...@@ -1663,56 +1663,56 @@ class DeprecationWarningMessageTest(CourseOutlineTest):
def test_no_deprecation_warning_message_present(self): def test_no_deprecation_warning_message_present(self):
""" """
Scenario: Verify that deprecation warning message is not shown if ORA1 Scenario: Verify that deprecation warning message is not shown if no deprecated
advance modules are not present and also no ORA1 component exist in advance modules are not present and also no deprecated component exist in
course outline. course outline.
When I goto course outline When I goto course outline
Then I don't see ORA1 deprecated warning Then I don't see any deprecation warning
""" """
self.course_outline_page.visit() self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.deprecated_warning_visible) self.assertFalse(self.course_outline_page.deprecated_warning_visible)
def test_deprecation_warning_message_present(self): def test_deprecation_warning_message_present(self):
""" """
Scenario: Verify deprecation warning message if ORA1 advance modules Scenario: Verify deprecation warning message if deprecated modules
and ORA1 components are present. and components are present.
Given I have ORA1 advance modules present in `Advanced Module List` Given I have "poll" advance modules present in `Advanced Module List`
And I have created 2 ORA1 components And I have created 2 poll components
When I go to course outline When I go to course outline
Then I see ORA1 deprecated warning Then I see poll deprecated warning
And I see correct ORA1 deprecated warning heading text And I see correct poll deprecated warning heading text
And I see correct ORA1 deprecated warning advance modules remove text And I see correct poll deprecated warning advance modules remove text
And I see list of ORA1 components with correct display names And I see list of poll components with correct display names
""" """
self._add_deprecated_advance_modules(block_types=['peergrading', 'combinedopenended']) self._add_deprecated_advance_modules(block_types=['poll', 'survey'])
self._create_deprecated_components() self._create_deprecated_components()
self.course_outline_page.visit() self.course_outline_page.visit()
self._verify_deprecation_warning_info( self._verify_deprecation_warning_info(
deprecated_blocks_present=True, deprecated_blocks_present=True,
components_present=True, components_present=True,
components_display_name_list=['Open', 'Peer'], components_display_name_list=['Poll', 'Survey'],
deprecated_modules_list=['peergrading', 'combinedopenended'] deprecated_modules_list=['poll', 'survey']
) )
def test_deprecation_warning_with_no_displayname(self): def test_deprecation_warning_with_no_displayname(self):
""" """
Scenario: Verify deprecation warning message if ORA1 components are present. Scenario: Verify deprecation warning message if poll components are present.
Given I have created 1 ORA1 deprecated component Given I have created 1 poll deprecated component
When I go to course outline When I go to course outline
Then I see ORA1 deprecated warning Then I see poll deprecated warning
And I see correct ORA1 deprecated warning heading text And I see correct poll deprecated warning heading text
And I see list of ORA1 components with correct message And I see list of poll components with correct message
""" """
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0] parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
# Create a deprecated ORA1 component with display_name to be empty and make sure # Create a deprecated component with display_name to be empty and make sure
# the deprecation warning is displayed with # the deprecation warning is displayed with
self.course_fixture.create_xblock( self.course_fixture.create_xblock(
parent_vertical.locator, parent_vertical.locator,
XBlockFixtureDesc(category='combinedopenended', display_name="", data=load_data_str('ora_peer_problem.xml')) XBlockFixtureDesc(category='poll', display_name="", data=load_data_str('poll_markdown.xml'))
) )
self.course_outline_page.visit() self.course_outline_page.visit()
...@@ -1722,44 +1722,44 @@ class DeprecationWarningMessageTest(CourseOutlineTest): ...@@ -1722,44 +1722,44 @@ class DeprecationWarningMessageTest(CourseOutlineTest):
components_display_name_list=[self.DEFAULT_DISPLAYNAME], components_display_name_list=[self.DEFAULT_DISPLAYNAME],
) )
def test_warning_with_ora1_advance_modules_only(self): def test_warning_with_poll_advance_modules_only(self):
""" """
Scenario: Verify that deprecation warning message is shown if only Scenario: Verify that deprecation warning message is shown if only
ORA1 advance modules are present and no ORA1 component exist. poll advance modules are present and no poll component exist.
Given I have ORA1 advance modules present in `Advanced Module List` Given I have poll advance modules present in `Advanced Module List`
When I go to course outline When I go to course outline
Then I see ORA1 deprecated warning Then I see poll deprecated warning
And I see correct ORA1 deprecated warning heading text And I see correct poll deprecated warning heading text
And I see correct ORA1 deprecated warning advance modules remove text And I see correct poll deprecated warning advance modules remove text
And I don't see list of ORA1 components And I don't see list of poll components
""" """
self._add_deprecated_advance_modules(block_types=['peergrading', 'combinedopenended']) self._add_deprecated_advance_modules(block_types=['poll', 'survey'])
self.course_outline_page.visit() self.course_outline_page.visit()
self._verify_deprecation_warning_info( self._verify_deprecation_warning_info(
deprecated_blocks_present=True, deprecated_blocks_present=True,
components_present=False, components_present=False,
deprecated_modules_list=['peergrading', 'combinedopenended'] deprecated_modules_list=['poll', 'survey']
) )
def test_warning_with_ora1_components_only(self): def test_warning_with_poll_components_only(self):
""" """
Scenario: Verify that deprecation warning message is shown if only Scenario: Verify that deprecation warning message is shown if only
ORA1 component exist and no ORA1 advance modules are present. poll component exist and no poll advance modules are present.
Given I have created two ORA1 components Given I have created two poll components
When I go to course outline When I go to course outline
Then I see ORA1 deprecated warning Then I see poll deprecated warning
And I see correct ORA1 deprecated warning heading text And I see correct poll deprecated warning heading text
And I don't see ORA1 deprecated warning advance modules remove text And I don't see poll deprecated warning advance modules remove text
And I see list of ORA1 components with correct display names And I see list of poll components with correct display names
""" """
self._create_deprecated_components() self._create_deprecated_components()
self.course_outline_page.visit() self.course_outline_page.visit()
self._verify_deprecation_warning_info( self._verify_deprecation_warning_info(
deprecated_blocks_present=False, deprecated_blocks_present=False,
components_present=True, components_present=True,
components_display_name_list=['Open', 'Peer'] components_display_name_list=['Poll', 'Survey']
) )
......
"""
Acceptance tests for Studio related to edit/save peer grading interface.
"""
from ...fixtures.course import XBlockFixtureDesc
from ...pages.studio.import_export import ExportCoursePage
from ...pages.studio.component_editor import ComponentEditorView
from ...pages.studio.overview import CourseOutlinePage
from base_studio_test import StudioCourseTest
from ..helpers import load_data_str
class ORAComponentTest(StudioCourseTest):
"""
Tests tht edit/save is working correctly when link_to_location
is given in peer grading interface settings.
"""
def setUp(self):
super(ORAComponentTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self.export_page = ExportCoursePage(
self.browser,
self.course_info['org'], self.course_info['number'], self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
"""
Return a test course fixture containing a discussion component.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'combinedopenended',
"Peer Problem",
data=load_data_str('ora_peer_problem.xml'),
metadata={
'graded': True,
},
),
XBlockFixtureDesc('peergrading', 'Peer Module'),
)
)
)
)
def _go_to_unit_page(self, section_name='Test Section', subsection_name='Test Subsection', unit_name='Test Unit'):
self.course_outline_page.visit()
subsection = self.course_outline_page.section(section_name).subsection(subsection_name)
return subsection.expand_subsection().unit(unit_name).go_to()
def test_edit_save_and_export(self):
"""
Ensure that edit/save is working correctly with link_to_location
in peer interface settings.
"""
self.course_outline_page.visit()
unit = self._go_to_unit_page()
peer_problem_location = unit.xblocks[1].locator
# Problem location should contain "combinedopeneneded".
self.assertIn("combinedopenended", peer_problem_location)
component = unit.xblocks[2]
# Interface component name should be "Peer Module".
self.assertEqual(component.name, "Peer Module")
component.edit()
component_editor = ComponentEditorView(self.browser, component.locator)
component_editor.set_field_value_and_save('Link to Problem Location', peer_problem_location)
# Verify that we can edit component again after saving and link_to_location is present.
component.edit()
location_input_element = component_editor.get_setting_element("Link to Problem Location")
self.assertEqual(
location_input_element.get_attribute('value'),
peer_problem_location
)
def test_verify_ora1_deprecation_message(self):
"""
Scenario: Verifies the ora1 deprecation message on ora components.
Given I have a course with ora 1 components
When I go to the unit page
Then I see a deprecation error message in ora 1 components.
"""
self.course_outline_page.visit()
unit = self._go_to_unit_page()
for xblock in unit.xblocks:
self.assertTrue(xblock.has_validation_error)
self.assertEqual(
xblock.validation_error_text,
"ORA1 is no longer supported. To use this assessment, "
"replace this ORA1 component with an ORA2 component."
)
<chapter display_name="New Section 2 - Open Ended">
<sequential url_name="b7ebe0f048e9466e9ef32e7815fb5a93"/>
<sequential url_name="5c33f2c2b3aa45f5bfbf7bf7f9bcb2ff"/>
<sequential url_name="f58fd90cbd794cad881692d3b6e5cdbf"/>
<sequential url_name="345d618ca88944668d86586f83bff338"/>
<sequential url_name="4eadf76912cd436b9d698c8759784d8d"/>
</chapter>
<combinedopenended accept_file_upload="true" markdown="[prompt]&#10; &lt;h3&gt;Censorship in the Libraries&lt;/h3&gt;&#10;&#10; &lt;p&gt;'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author&#10; &lt;/p&gt;&#10;&#10; &lt;p&gt;&#10; Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.&#10; &lt;/p&gt;&#10;[prompt]&#10;[rubric]&#10;+ Ideas&#10;- Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.&#10;- Attempts a main idea. Sometimes loses focus or ineffectively displays focus.&#10;- Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.&#10;- Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.&#10;+ Content&#10;- Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.&#10;- Includes little information and few or no details. Explores only one or two facets of the topic.&#10;- Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.&#10;- Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.&#10;+ Organization&#10;- Ideas organized illogically, transitions weak, and response difficult to follow.&#10;- Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.&#10;- Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.&#10;+ Style&#10;- Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.&#10;- Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).&#10;- Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.&#10;+ Voice&#10;- Demonstrates language and tone that may be inappropriate to task and reader.&#10;- Demonstrates an attempt to adjust language and tone to task and reader.&#10;- Demonstrates effective adjustment of language and tone to task and reader.&#10;[rubric]&#10;[tasks]&#10;(Self)&#10;[tasks]&#10;&#10;">
<prompt>
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
</prompt>
<rubric>
<rubric>
<category>
<description>
Ideas
</description>
<option>
Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
</option>
<option>
Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
</option>
<option>
Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
</option>
<option>
Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
</option>
</category>
<category>
<description>
Content
</description>
<option>
Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
</option>
<option>
Includes little information and few or no details. Explores only one or two facets of the topic.
</option>
<option>
Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
</option>
<option>
Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
</option>
</category>
<category>
<description>
Organization
</description>
<option>
Ideas organized illogically, transitions weak, and response difficult to follow.
</option>
<option>
Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
</option>
<option>
Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
</option>
</category>
<category>
<description>
Style
</description>
<option>
Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
</option>
<option>
Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
</option>
<option>
Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
</option>
</category>
<category>
<description>
Voice
</description>
<option>
Demonstrates language and tone that may be inappropriate to task and reader.
</option>
<option>
Demonstrates an attempt to adjust language and tone to task and reader.
</option>
<option>
Demonstrates effective adjustment of language and tone to task and reader.
</option>
</category>
</rubric>
</rubric>
<task>
<selfassessment/>
</task>
</combinedopenended>
<combinedopenended markdown="[prompt]&#10; &lt;h3&gt;Censorship in the Libraries&lt;/h3&gt;&#10;&#10; &lt;p&gt;'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author&#10; &lt;/p&gt;&#10;&#10; &lt;p&gt;&#10; Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.&#10; &lt;/p&gt;&#10;[prompt]&#10;[rubric]&#10;+ Ideas&#10;- Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.&#10;- Attempts a main idea. Sometimes loses focus or ineffectively displays focus.&#10;- Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.&#10;- Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.&#10;+ Content&#10;- Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.&#10;- Includes little information and few or no details. Explores only one or two facets of the topic.&#10;- Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.&#10;- Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.&#10;+ Organization&#10;- Ideas organized illogically, transitions weak, and response difficult to follow.&#10;- Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.&#10;- Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.&#10;+ Style&#10;- Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.&#10;- Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).&#10;- Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.&#10;+ Voice&#10;- Demonstrates language and tone that may be inappropriate to task and reader.&#10;- Demonstrates an attempt to adjust language and tone to task and reader.&#10;- Demonstrates effective adjustment of language and tone to task and reader.&#10;[rubric]&#10;[tasks]&#10;(Peer)&#10;[tasks]&#10;&#10;">
<prompt>
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
</prompt>
<rubric>
<rubric>
<category>
<description>
Ideas
</description>
<option>
Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
</option>
<option>
Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
</option>
<option>
Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
</option>
<option>
Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
</option>
</category>
<category>
<description>
Content
</description>
<option>
Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
</option>
<option>
Includes little information and few or no details. Explores only one or two facets of the topic.
</option>
<option>
Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
</option>
<option>
Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
</option>
</category>
<category>
<description>
Organization
</description>
<option>
Ideas organized illogically, transitions weak, and response difficult to follow.
</option>
<option>
Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
</option>
<option>
Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
</option>
</category>
<category>
<description>
Style
</description>
<option>
Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
</option>
<option>
Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
</option>
<option>
Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
</option>
</category>
<category>
<description>
Voice
</description>
<option>
Demonstrates language and tone that may be inappropriate to task and reader.
</option>
<option>
Demonstrates an attempt to adjust language and tone to task and reader.
</option>
<option>
Demonstrates effective adjustment of language and tone to task and reader.
</option>
</category>
</rubric>
</rubric>
<task>
<openended>
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "peer_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
<combinedopenended markdown="[prompt]&#10; &lt;h3&gt;Censorship in the Libraries&lt;/h3&gt;&#10;&#10; &lt;p&gt;'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author&#10; &lt;/p&gt;&#10;&#10; &lt;p&gt;&#10; Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.&#10; &lt;/p&gt;&#10;[prompt]&#10;[rubric]&#10;+ Ideas&#10;- Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.&#10;- Attempts a main idea. Sometimes loses focus or ineffectively displays focus.&#10;- Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.&#10;- Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.&#10;+ Content&#10;- Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.&#10;- Includes little information and few or no details. Explores only one or two facets of the topic.&#10;- Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.&#10;- Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.&#10;+ Organization&#10;- Ideas organized illogically, transitions weak, and response difficult to follow.&#10;- Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.&#10;- Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.&#10;+ Style&#10;- Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.&#10;- Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).&#10;- Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.&#10;+ Voice&#10;- Demonstrates language and tone that may be inappropriate to task and reader.&#10;- Demonstrates an attempt to adjust language and tone to task and reader.&#10;- Demonstrates effective adjustment of language and tone to task and reader.&#10;[rubric]&#10;[tasks]&#10;(Self)&#10;[tasks]&#10;&#10;">
<prompt>
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
</prompt>
<rubric>
<rubric>
<category>
<description>
Ideas
</description>
<option>
Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
</option>
<option>
Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
</option>
<option>
Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
</option>
<option>
Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
</option>
</category>
<category>
<description>
Content
</description>
<option>
Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
</option>
<option>
Includes little information and few or no details. Explores only one or two facets of the topic.
</option>
<option>
Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
</option>
<option>
Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
</option>
</category>
<category>
<description>
Organization
</description>
<option>
Ideas organized illogically, transitions weak, and response difficult to follow.
</option>
<option>
Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
</option>
<option>
Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
</option>
</category>
<category>
<description>
Style
</description>
<option>
Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
</option>
<option>
Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
</option>
<option>
Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
</option>
</category>
<category>
<description>
Voice
</description>
<option>
Demonstrates language and tone that may be inappropriate to task and reader.
</option>
<option>
Demonstrates an attempt to adjust language and tone to task and reader.
</option>
<option>
Demonstrates effective adjustment of language and tone to task and reader.
</option>
</category>
</rubric>
</rubric>
<task>
<selfassessment/>
</task>
</combinedopenended>
<course advanced_modules="[&quot;annotatable&quot;, &quot;combinedopenended&quot;, &quot;peergrading&quot;, &quot;lti&quot;, &quot;word_cloud&quot;]" display_name="Manual Smoke Test Course 1" lti_passports="[&quot;ims:12345:secret&quot;]" pdf_textbooks="[{&quot;tab_title&quot;: &quot;An Example Paper&quot;, &quot;id&quot;: &quot;0An_Example_Paper&quot;, &quot;chapters&quot;: [{&quot;url&quot;: &quot;/static/1.pdf&quot;, &quot;title&quot;: &quot;Introduction &quot;}]}]" show_calculator="true" show_chat="true" start="2014-06-26T00:00:00Z"> <course advanced_modules="[&quot;annotatable&quot;, &quot;lti&quot;, &quot;word_cloud&quot;]" display_name="Manual Smoke Test Course 1" lti_passports="[&quot;ims:12345:secret&quot;]" pdf_textbooks="[{&quot;tab_title&quot;: &quot;An Example Paper&quot;, &quot;id&quot;: &quot;0An_Example_Paper&quot;, &quot;chapters&quot;: [{&quot;url&quot;: &quot;/static/1.pdf&quot;, &quot;title&quot;: &quot;Introduction &quot;}]}]" show_calculator="true" show_chat="true" start="2014-06-26T00:00:00Z">
<chapter url_name="a64a6f63f75d430aa71e6ce113c5b4d2"/> <chapter url_name="a64a6f63f75d430aa71e6ce113c5b4d2"/>
<chapter url_name="d68c2861c10a4c9d92a679b4cfc0f924"/>
<chapter url_name="ab97a6dbfafd48868c36bed4c8c5391d"/> <chapter url_name="ab97a6dbfafd48868c36bed4c8c5391d"/>
<chapter url_name="5bb7a5ab824f460580a756a4f347377c"/> <chapter url_name="5bb7a5ab824f460580a756a4f347377c"/>
<chapter url_name="ce2fd991d84b4a5ca75350eb8e350627"/> <chapter url_name="ce2fd991d84b4a5ca75350eb8e350627"/>
......
<combinedopenended accept_file_upload="true" markdown="[prompt]&#10; &lt;h3&gt;Censorship in the Libraries&lt;/h3&gt;&#10;&#10; &lt;p&gt;'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author&#10; &lt;/p&gt;&#10;&#10; &lt;p&gt;&#10; Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.&#10; &lt;/p&gt;&#10;[prompt]&#10;[rubric]&#10;+ Ideas&#10;- Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.&#10;- Attempts a main idea. Sometimes loses focus or ineffectively displays focus.&#10;- Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.&#10;- Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.&#10;+ Content&#10;- Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.&#10;- Includes little information and few or no details. Explores only one or two facets of the topic.&#10;- Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.&#10;- Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.&#10;+ Organization&#10;- Ideas organized illogically, transitions weak, and response difficult to follow.&#10;- Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.&#10;- Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.&#10;+ Style&#10;- Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.&#10;- Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).&#10;- Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.&#10;+ Voice&#10;- Demonstrates language and tone that may be inappropriate to task and reader.&#10;- Demonstrates an attempt to adjust language and tone to task and reader.&#10;- Demonstrates effective adjustment of language and tone to task and reader.&#10;[rubric]&#10;[tasks]&#10;(Self)&#10;[tasks]&#10;&#10;">
<prompt>
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
</prompt>
<rubric>
<rubric>
<category>
<description>
Ideas
</description>
<option>
Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
</option>
<option>
Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
</option>
<option>
Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
</option>
<option>
Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
</option>
</category>
<category>
<description>
Content
</description>
<option>
Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
</option>
<option>
Includes little information and few or no details. Explores only one or two facets of the topic.
</option>
<option>
Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
</option>
<option>
Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
</option>
</category>
<category>
<description>
Organization
</description>
<option>
Ideas organized illogically, transitions weak, and response difficult to follow.
</option>
<option>
Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
</option>
<option>
Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
</option>
</category>
<category>
<description>
Style
</description>
<option>
Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
</option>
<option>
Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
</option>
<option>
Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
</option>
</category>
<category>
<description>
Voice
</description>
<option>
Demonstrates language and tone that may be inappropriate to task and reader.
</option>
<option>
Demonstrates an attempt to adjust language and tone to task and reader.
</option>
<option>
Demonstrates effective adjustment of language and tone to task and reader.
</option>
</category>
</rubric>
</rubric>
<task>
<selfassessment/>
</task>
</combinedopenended>
<vertical display_name="Self Assessment" parent_sequential_url="i4x://ManTestX/ManTest1/sequential/b7ebe0f048e9466e9ef32e7815fb5a93" index_in_children_list="0">
<combinedopenended url_name="ecfe4fa774ff48d089ae84daa1f6cc75"/>
</vertical>
{"course/2014": {"advanced_modules": ["annotatable", "combinedopenended", "peergrading", "lti", "word_cloud"], "show_calculator": true, "display_name": "Manual Smoke Test Course 1", "tabs": [{"type": "courseware", "name": "Courseware"}, {"type": "course_info", "name": "Course Info"}, {"type": "textbooks", "name": "Textbooks"}, {"type": "discussion", "name": "Discussion"}, {"type": "wiki", "name": "Wiki"}, {"type": "progress", "name": "Progress"}, {"type": "pdf_textbooks", "name": "Textbooks"}, {"type": "open_ended", "name": "Open Ended Panel"}], "discussion_topics": {"General": {"id": "i4x-ManTestX-ManTest1-course-2014"}}, "start": "2014-06-26T00:00:00Z", "pdf_textbooks": [{"tab_title": "An Example Paper", "id": "0An_Example_Paper", "chapters": [{"url": "/static/1.pdf", "title": "Introduction "}]}], "lti_passports": ["ims:12345:secret"]}} {"course/2014": {"advanced_modules": ["annotatable", "lti", "word_cloud"], "show_calculator": true, "display_name": "Manual Smoke Test Course 1", "tabs": [{"type": "courseware", "name": "Courseware"}, {"type": "course_info", "name": "Course Info"}, {"type": "textbooks", "name": "Textbooks"}, {"type": "discussion", "name": "Discussion"}, {"type": "wiki", "name": "Wiki"}, {"type": "progress", "name": "Progress"}, {"type": "pdf_textbooks", "name": "Textbooks"}], "discussion_topics": {"General": {"id": "i4x-ManTestX-ManTest1-course-2014"}}, "start": "2014-06-26T00:00:00Z", "pdf_textbooks": [{"tab_title": "An Example Paper", "id": "0An_Example_Paper", "chapters": [{"url": "/static/1.pdf", "title": "Introduction "}]}], "lti_passports": ["ims:12345:secret"]}}
<sequential display_name="New Subsection 2.5">
<vertical url_name="4502126328484ed58c87e7ba3b0fa21d"/>
</sequential>
<sequential display_name="New Subsection 2.2">
<vertical url_name="e34798bf546a4178ab76afe3a5f729af"/>
</sequential>
<sequential display_name="New Subsection 2.1">
<vertical url_name="5887a034ad17480393c5ebca4b8fd1d4"/>
</sequential>
<vertical display_name="File Uploads">
<combinedopenended url_name="3b04d935c8d945c3900708279fb24892"/>
</vertical>
<vertical display_name="Self Assessment">
<combinedopenended url_name="ecfe4fa774ff48d089ae84daa1f6cc75"/>
</vertical>
<vertical display_name="Peer Assessment">
<combinedopenended url_name="b3aa2db471a9412fbc96302f2e5ea983"/>
</vertical>
This is a very very simple course, useful for debugging open ended grading code.
<combinedopenended attempts="10000" display_name = "Humanities Question -- Machine Assessed">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<selfassessment/>
</task>
<task>
<openended min_score_to_attempt="2" max_score_to_attempt="3">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
\ No newline at end of file
<combinedopenended attempts="1" display_name = "Humanities Question -- Machine Assessed">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<selfassessment/>
</task>
</combinedopenended>
\ No newline at end of file
<combinedopenended attempts="1" display_name = "Humanities Question -- Machine Assessed" accept_file_upload="True">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<selfassessment/>
</task>
</combinedopenended>
\ No newline at end of file
<course org="edX" course="open_ended" url_name="2012_Fall"/>
<course>
<chapter url_name="Overview">
<combinedopenended url_name="SampleQuestion"/>
<combinedopenended url_name="SampleQuestion1Attempt"/>
<combinedopenended url_name="SampleQuestionImageUpload"/>
<peergrading url_name="PeerGradingSample"/>
<peergrading url_name="PeerGradingScored"/>
<peergrading url_name="PeerGradingLinked"/>
</chapter>
</course>
<peergrading is_graded="True" max_grade="1" use_for_single_location="True" link_to_location="i4x://edX/open_ended/combinedopenended/SampleQuestion"/>
\ No newline at end of file
<peergrading is_graded="True" max_grade="1" use_for_single_location="False" link_to_location="i4x://edX/open_ended/combinedopenended/SampleQuestion"/>
\ No newline at end of file
{
"course/2012_Fall": {
"graceperiod": "2 days 5 hours 59 minutes 59 seconds",
"start": "2015-07-17T12:00",
"display_name": "Self Assessment Test",
"graded": "true"
},
"chapter/Overview": {
"display_name": "Overview"
},
"combinedopenended/SampleQuestion": {
"display_name": "Sample Question"
},
"peergrading/PeerGradingSample": {
"display_name": "Sample Question"
}
}
<course org="edX" course="sa_test" url_name="2012_Fall"/>
This is a very very simple course, useful for debugging open ended grading code. This is specifically for testing if a peer grading module with no path to it in the course will be handled properly.
<course org="edX" course="open_ended_nopath" url_name="2012_Fall"/>
<course>
<chapter url_name="Overview">
</chapter>
</course>
{
"course/2012_Fall": {
"graceperiod": "2 days 5 hours 59 minutes 59 seconds",
"start": "2015-07-17T12:00",
"display_name": "Self Assessment Test",
"graded": "true"
},
"chapter/Overview": {
"display_name": "Overview"
}
}
<peergrading display_name = "Peer Grading" use_for_single_location="False" is_graded="False"/>
Feature: LMS.Open ended grading
As a student in an edX course
In order to complete the courseware questions
I want the machine learning grading to be functional
# Commenting these all out right now until we can
# make a reference implementation for a course with
# an open ended grading problem that is always available
#
# Scenario: An answer that is too short is rejected
# Given I navigate to an openended question
# And I enter the answer "z"
# When I press the "Check" button
# And I wait for "8" seconds
# And I see the grader status "Submitted for grading"
# And I press the "Recheck for Feedback" button
# Then I see the red X
# And I see the grader score "0"
# Scenario: An answer with too many spelling errors is rejected
# Given I navigate to an openended question
# And I enter the answer "az"
# When I press the "Check" button
# And I wait for "8" seconds
# And I see the grader status "Submitted for grading"
# And I press the "Recheck for Feedback" button
# Then I see the red X
# And I see the grader score "0"
# When I click the link for full output
# Then I see the spelling grading message "More spelling errors than average."
# Scenario: An answer makes its way to the instructor dashboard
# Given I navigate to an openended question as staff
# When I submit the answer "I love Chemistry."
# And I wait for "8" seconds
# And I visit the staff grading page
# Then my answer is queued for instructor grading
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from lettuce.django import django_url
from nose.tools import assert_equals, assert_in # pylint: disable=no-name-in-module
from logging import getLogger
logger = getLogger(__name__)
@step('I navigate to an openended question$')
def navigate_to_an_openended_question(step):
world.register_by_course_key('MITx/3.091x/2012_Fall')
world.log_in(email='robot@edx.org', password='test')
problem = '/courses/MITx/3.091x/2012_Fall/courseware/Week_10/Polymer_Synthesis/'
world.browser.visit(django_url(problem))
tab_css = 'ol#sequence-list > li > a[data-element="5"]'
world.css_click(tab_css)
@step('I navigate to an openended question as staff$')
def navigate_to_an_openended_question_as_staff(step):
world.register_by_course_key('MITx/3.091x/2012_Fall', True)
world.log_in(email='robot@edx.org', password='test')
problem = '/courses/MITx/3.091x/2012_Fall/courseware/Week_10/Polymer_Synthesis/'
world.browser.visit(django_url(problem))
tab_css = 'ol#sequence-list > li > a[data-element="5"]'
world.css_click(tab_css)
@step(u'I enter the answer "([^"]*)"$')
def enter_the_answer_text(step, text):
world.css_fill('textarea', text)
@step(u'I submit the answer "([^"]*)"$')
def i_submit_the_answer_text(step, text):
world.css_fill('textarea', text)
world.css_click('input.check')
@step('I click the link for full output$')
def click_full_output_link(step):
world.css_click('a.full')
@step(u'I visit the staff grading page$')
def i_visit_the_staff_grading_page(step):
world.click_link('Instructor')
world.click_link('Staff grading')
@step(u'I see the grader message "([^"]*)"$')
def see_grader_message(step, msg):
message_css = 'div.external-grader-message'
assert_in(msg, world.css_text(message_css))
@step(u'I see the grader status "([^"]*)"$')
def see_the_grader_status(step, status):
status_css = 'div.grader-status'
assert_equals(status, world.css_text(status_css))
@step('I see the red X$')
def see_the_red_x(step):
assert world.is_css_present('div.grader-status > span.incorrect')
@step(u'I see the grader score "([^"]*)"$')
def see_the_grader_score(step, score):
score_css = 'div.result-output > p'
score_text = world.css_text(score_css)
assert_equals(score_text, 'Score: %s' % score)
@step('I see the link for full output$')
def see_full_output_link(step):
assert world.is_css_present('a.full')
@step('I see the spelling grading message "([^"]*)"$')
def see_spelling_msg(step, msg):
spelling_msg = world.css_text('div.spelling')
assert_equals('Spelling: %s' % msg, spelling_msg)
@step(u'my answer is queued for instructor grading$')
def answer_is_queued_for_instructor_grading(step):
list_css = 'ul.problem-list > li > a'
actual_msg = world.css_text(list_css)
expected_msg = "(0 graded, 1 pending)"
assert_in(expected_msg, actual_msg)
...@@ -24,11 +24,10 @@ from xmodule.modulestore.tests.factories import CourseFactory ...@@ -24,11 +24,10 @@ from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.xml_importer import import_course_from_xml from xmodule.modulestore.xml_importer import import_course_from_xml
DATA_DIR = settings.COMMON_TEST_DATA_ROOT DATA_DIR = settings.COMMON_TEST_DATA_ROOT
XML_COURSE_DIRS = ['toy', 'simple', 'open_ended'] XML_COURSE_DIRS = ['toy', 'simple']
MAPPINGS = { MAPPINGS = {
'edX/toy/2012_Fall': 'xml', 'edX/toy/2012_Fall': 'xml',
'edX/simple/2012_Fall': 'xml', 'edX/simple/2012_Fall': 'xml',
'edX/open_ended/2012_Fall': 'xml',
} }
TEST_DATA_MIXED_XML_MODULESTORE = mixed_store_config( TEST_DATA_MIXED_XML_MODULESTORE = mixed_store_config(
...@@ -92,7 +91,7 @@ class CommandsTestBase(ModuleStoreTestCase): ...@@ -92,7 +91,7 @@ class CommandsTestBase(ModuleStoreTestCase):
self.assertEqual(course_ids, dumped_ids) self.assertEqual(course_ids, dumped_ids)
def test_correct_course_structure_metadata(self): def test_correct_course_structure_metadata(self):
course_id = unicode(modulestore().make_course_key('edX', 'open_ended', '2012_Fall')) course_id = unicode(modulestore().make_course_key('edX', 'simple', '2012_Fall'))
args = [course_id] args = [course_id]
kwargs = {'modulestore': 'default'} kwargs = {'modulestore': 'default'}
......
...@@ -412,31 +412,6 @@ def get_module_system_for_user(user, student_data, # TODO # pylint: disable=to ...@@ -412,31 +412,6 @@ def get_module_system_for_user(user, student_data, # TODO # pylint: disable=to
'waittime': settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS 'waittime': settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS
} }
# This is a hacky way to pass settings to the combined open ended xmodule
# It needs an S3 interface to upload images to S3
# It needs the open ended grading interface in order to get peer grading to be done
# this first checks to see if the descriptor is the correct one, and only sends settings if it is
# Get descriptor metadata fields indicating needs for various settings
needs_open_ended_interface = getattr(descriptor, "needs_open_ended_interface", False)
needs_s3_interface = getattr(descriptor, "needs_s3_interface", False)
# Initialize interfaces to None
open_ended_grading_interface = None
s3_interface = None
# Create interfaces if needed
if needs_open_ended_interface:
open_ended_grading_interface = settings.OPEN_ENDED_GRADING_INTERFACE
open_ended_grading_interface['mock_peer_grading'] = settings.MOCK_PEER_GRADING
open_ended_grading_interface['mock_staff_grading'] = settings.MOCK_STAFF_GRADING
if needs_s3_interface:
s3_interface = {
'access_key': getattr(settings, 'AWS_ACCESS_KEY_ID', ''),
'secret_access_key': getattr(settings, 'AWS_SECRET_ACCESS_KEY', ''),
'storage_bucket_name': getattr(settings, 'AWS_STORAGE_BUCKET_NAME', 'openended')
}
def inner_get_module(descriptor): def inner_get_module(descriptor):
""" """
Delegate to get_module_for_descriptor_internal() with all values except `descriptor` set. Delegate to get_module_for_descriptor_internal() with all values except `descriptor` set.
...@@ -725,8 +700,6 @@ def get_module_system_for_user(user, student_data, # TODO # pylint: disable=to ...@@ -725,8 +700,6 @@ def get_module_system_for_user(user, student_data, # TODO # pylint: disable=to
publish=publish, publish=publish,
anonymous_student_id=anonymous_student_id, anonymous_student_id=anonymous_student_id,
course_id=course_id, course_id=course_id,
open_ended_grading_interface=open_ended_grading_interface,
s3_interface=s3_interface,
cache=cache, cache=cache,
can_execute_unsafe_code=(lambda: can_execute_unsafe_code(course_id)), can_execute_unsafe_code=(lambda: can_execute_unsafe_code(course_id)),
get_python_lib_zip=(lambda: get_python_lib_zip(contentstore, course_id)), get_python_lib_zip=(lambda: get_python_lib_zip(contentstore, course_id)),
......
...@@ -2055,13 +2055,13 @@ class TestDisabledXBlockTypes(ModuleStoreTestCase): ...@@ -2055,13 +2055,13 @@ class TestDisabledXBlockTypes(ModuleStoreTestCase):
super(TestDisabledXBlockTypes, self).setUp() super(TestDisabledXBlockTypes, self).setUp()
for store in self.store.modulestores: for store in self.store.modulestores:
store.disabled_xblock_types = ('combinedopenended', 'peergrading', 'video') store.disabled_xblock_types = ('video',)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split) @ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_item(self, default_ms): def test_get_item(self, default_ms):
with self.store.default_store(default_ms): with self.store.default_store(default_ms):
course = CourseFactory() course = CourseFactory()
for block_type in ('peergrading', 'combinedopenended', 'video'): for block_type in ('video',):
item = ItemFactory(category=block_type, parent=course) item = ItemFactory(category=block_type, parent=course)
item = self.store.get_item(item.scope_ids.usage_id) item = self.store.get_item(item.scope_ids.usage_id)
self.assertEqual(item.__class__.__name__, 'RawDescriptorWithMixins') self.assertEqual(item.__class__.__name__, 'RawDescriptorWithMixins')
...@@ -67,8 +67,6 @@ from .entrance_exams import ( ...@@ -67,8 +67,6 @@ from .entrance_exams import (
from courseware.user_state_client import DjangoXBlockUserStateClient from courseware.user_state_client import DjangoXBlockUserStateClient
from course_modes.models import CourseMode from course_modes.models import CourseMode
from open_ended_grading import open_ended_notifications
from open_ended_grading.views import StaffGradingTab, PeerGradingTab, OpenEndedGradingTab
from student.models import UserTestGroup, CourseEnrollment from student.models import UserTestGroup, CourseEnrollment
from student.views import is_course_blocked from student.views import is_course_blocked
from util.cache import cache, cache_if_anonymous from util.cache import cache, cache_if_anonymous
...@@ -1126,25 +1124,6 @@ def submission_history(request, course_id, student_username, location): ...@@ -1126,25 +1124,6 @@ def submission_history(request, course_id, student_username, location):
return render_to_response('courseware/submission_history.html', context) return render_to_response('courseware/submission_history.html', context)
def notification_image_for_tab(course_tab, user, course):
"""
Returns the notification image path for the given course_tab if applicable, otherwise None.
"""
tab_notification_handlers = {
StaffGradingTab.type: open_ended_notifications.staff_grading_notifications,
PeerGradingTab.type: open_ended_notifications.peer_grading_notifications,
OpenEndedGradingTab.type: open_ended_notifications.combined_notifications
}
if course_tab.name in tab_notification_handlers:
notifications = tab_notification_handlers[course_tab.name](course, user)
if notifications and notifications['pending_grading']:
return notifications['img_path']
return None
def get_static_tab_contents(request, course, tab): def get_static_tab_contents(request, course, tab):
""" """
Returns the contents for the given static tab Returns the contents for the given static tab
......
"""
Command to manually re-post open ended submissions to the grader.
"""
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from optparse import make_option
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule
from courseware.courses import get_course
from instructor.utils import get_module_for_student
class Command(BaseCommand):
"""
Command to manually re-post open ended submissions to the grader.
"""
help = ("Usage: openended_post <course_id> <problem_location> <student_ids.txt> <hostname> --dry-run --task-number=<task_number>\n"
"The text file should contain a User.id in each line.")
option_list = BaseCommand.option_list + (
make_option('-n', '--dry-run',
action='store_true', dest='dry_run', default=False,
help="Do everything except send the submission to the grader. "),
make_option('--task-number',
type='int', default=0,
help="Task number that needs to be submitted."),
)
def handle(self, *args, **options):
dry_run = options['dry_run']
task_number = options['task_number']
if len(args) == 4:
course_id = SlashSeparatedCourseKey.from_deprecated_string(args[0])
location = course_id.make_usage_key_from_deprecated_string(args[1])
students_ids = [line.strip() for line in open(args[2])]
hostname = args[3]
else:
print self.help
return
try:
course = get_course(course_id)
except ValueError as err:
print err
return
descriptor = modulestore().get_item(location, depth=0)
if descriptor is None:
print "Location not found in course"
return
if dry_run:
print "Doing a dry run."
students = User.objects.filter(id__in=students_ids).order_by('username')
print "Number of students: {0}".format(students.count())
for student in students:
post_submission_for_student(student, course, location, task_number, dry_run=dry_run, hostname=hostname)
def post_submission_for_student(student, course, location, task_number, dry_run=True, hostname=None):
"""If the student's task child_state is ASSESSING post submission to grader."""
print "{0}:{1}".format(student.id, student.username)
request = DummyRequest()
request.user = student
request.host = hostname
try:
module = get_module_for_student(student, location, request=request, course=course)
if module is None:
print " WARNING: No state found."
return False
latest_task = module.child_module.get_task_number(task_number)
if latest_task is None:
print " WARNING: No task state found."
return False
if not isinstance(latest_task, OpenEndedModule):
print " ERROR: Not an OpenEndedModule task."
return False
latest_task_state = latest_task.child_state
if latest_task_state == OpenEndedChild.INITIAL:
print " WARNING: No submission."
elif latest_task_state == OpenEndedChild.POST_ASSESSMENT or latest_task_state == OpenEndedChild.DONE:
print " WARNING: Submission already graded."
elif latest_task_state == OpenEndedChild.ASSESSING:
latest_answer = latest_task.latest_answer()
if dry_run:
print " Skipped sending submission to grader: {0!r}".format(latest_answer[:100].encode('utf-8'))
else:
latest_task.send_to_grader(latest_answer, latest_task.system)
print " Sent submission to grader: {0!r}".format(latest_answer[:100].encode('utf-8'))
return True
else:
print "WARNING: Invalid task_state: {0}".format(latest_task_state)
except Exception as err: # pylint: disable=broad-except
print err
return False
class DummyRequest(object):
"""Dummy request"""
META = {}
def __init__(self):
self.session = {}
self.user = None
self.host = None
self.secure = True
def get_host(self):
"""Return a default host."""
return self.host
def is_secure(self):
"""Always secure."""
return self.secure
"""
Command to get statistics about open ended problems.
"""
import csv
import time
from django.core.management.base import BaseCommand
from optparse import make_option
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from courseware.courses import get_course
from courseware.models import StudentModule
from student.models import anonymous_id_for_user, CourseEnrollment
from instructor.utils import get_module_for_student
class Command(BaseCommand):
"""
Command to get statistics about open ended problems.
"""
help = "Usage: openended_stats <course_id> <problem_location> --task-number=<task_number>\n"
option_list = BaseCommand.option_list + (
make_option('--task-number',
type='int', default=0,
help="Task number to get statistics about."),
)
def handle(self, *args, **options):
"""Handler for command."""
task_number = options['task_number']
if len(args) == 2:
course_id = SlashSeparatedCourseKey.from_deprecated_string(args[0])
usage_key = course_id.make_usage_key_from_deprecated_string(args[1])
else:
print self.help
return
try:
course = get_course(course_id)
except ValueError as err:
print err
return
descriptor = modulestore().get_item(usage_key, depth=0)
if descriptor is None:
print "Location {0} not found in course".format(usage_key)
return
try:
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
print "Total students enrolled in {0}: {1}".format(course_id, enrolled_students.count())
calculate_task_statistics(enrolled_students, course, usage_key, task_number)
except KeyboardInterrupt:
print "\nOperation Cancelled"
def calculate_task_statistics(students, course, location, task_number, write_to_file=True):
"""Print stats of students."""
stats = {
OpenEndedChild.INITIAL: 0,
OpenEndedChild.ASSESSING: 0,
OpenEndedChild.POST_ASSESSMENT: 0,
OpenEndedChild.DONE: 0
}
students_with_saved_answers = []
students_with_ungraded_submissions = [] # pylint: disable=invalid-name
students_with_graded_submissions = [] # pylint: disable=invalid-name
students_with_no_state = []
student_modules = StudentModule.objects.filter(module_state_key=location, student__in=students).order_by('student')
print "Total student modules: {0}".format(student_modules.count())
for index, student_module in enumerate(student_modules):
if index % 100 == 0:
print "--- {0} students processed ---".format(index)
student = student_module.student
print "{0}:{1}".format(student.id, student.username)
module = get_module_for_student(student, location, course=course)
if module is None:
print " WARNING: No state found"
students_with_no_state.append(student)
continue
latest_task = module.child_module.get_task_number(task_number)
if latest_task is None:
print " No task state found"
students_with_no_state.append(student)
continue
task_state = latest_task.child_state
stats[task_state] += 1
print " State: {0}".format(task_state)
if task_state == OpenEndedChild.INITIAL:
if latest_task.stored_answer is not None:
students_with_saved_answers.append(student)
elif task_state == OpenEndedChild.ASSESSING:
students_with_ungraded_submissions.append(student)
elif task_state == OpenEndedChild.POST_ASSESSMENT or task_state == OpenEndedChild.DONE:
students_with_graded_submissions.append(student)
print "----------------------------------"
print "Time: {0}".format(time.strftime("%Y %b %d %H:%M:%S +0000", time.gmtime()))
print "Course: {0}".format(course.id)
print "Location: {0}".format(location)
print "No state: {0}".format(len(students_with_no_state))
print "Initial State: {0}".format(stats[OpenEndedChild.INITIAL] - len(students_with_saved_answers))
print "Saved answers: {0}".format(len(students_with_saved_answers))
print "Submitted answers: {0}".format(stats[OpenEndedChild.ASSESSING])
print "Received grades: {0}".format(stats[OpenEndedChild.POST_ASSESSMENT] + stats[OpenEndedChild.DONE])
print "----------------------------------"
if write_to_file:
filename = "stats.{0}.{1}".format(location.course, location.name)
time_stamp = time.strftime("%Y%m%d-%H%M%S")
with open('{0}.{1}.csv'.format(filename, time_stamp), 'wb') as csv_file:
writer = csv.writer(csv_file, delimiter=' ', quoting=csv.QUOTE_MINIMAL)
for student in students_with_ungraded_submissions:
writer.writerow(("ungraded", student.id, anonymous_id_for_user(student, None), student.username))
for student in students_with_graded_submissions:
writer.writerow(("graded", student.id, anonymous_id_for_user(student, None), student.username))
return stats
"""Test the openended_post management command."""
from datetime import datetime
import json
from mock import patch
from pytz import UTC
from django.conf import settings
from opaque_keys.edx.locations import Location
import capa.xqueue_interface as xqueue_interface
from courseware.courses import get_course_with_access
from courseware.tests.factories import StudentModuleFactory, UserFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.tests.test_util_open_ended import (
STATE_INITIAL, STATE_ACCESSING, STATE_POST_ASSESSMENT
)
from student.models import anonymous_id_for_user
from instructor.management.commands.openended_post import post_submission_for_student
from instructor.management.commands.openended_stats import calculate_task_statistics
from instructor.utils import get_module_for_student
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
class OpenEndedPostTest(ModuleStoreTestCase):
"""Test the openended_post management command."""
def setUp(self):
super(OpenEndedPostTest, self).setUp()
self.user = UserFactory()
store = modulestore()
course_items = import_course_from_xml(store, self.user.id, TEST_DATA_DIR, ['open_ended']) # pylint: disable=maybe-no-member
self.course = course_items[0]
self.course_id = self.course.id
self.problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
self.self_assessment_task_number = 0
self.open_ended_task_number = 1
self.student_on_initial = UserFactory()
self.student_on_accessing = UserFactory()
self.student_on_post_assessment = UserFactory()
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_initial,
grade=0,
max_grade=1,
state=STATE_INITIAL
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_accessing,
grade=0,
max_grade=1,
state=STATE_ACCESSING
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_post_assessment,
grade=0,
max_grade=1,
state=STATE_POST_ASSESSMENT
)
def test_post_submission_for_student_on_initial(self):
course = get_course_with_access(self.student_on_initial, 'load', self.course_id)
dry_run_result = post_submission_for_student(self.student_on_initial, course, self.problem_location, self.open_ended_task_number, dry_run=True)
self.assertFalse(dry_run_result)
result = post_submission_for_student(self.student_on_initial, course, self.problem_location, self.open_ended_task_number, dry_run=False)
self.assertFalse(result)
def test_post_submission_for_student_on_accessing(self):
course = get_course_with_access(self.student_on_accessing, 'load', self.course_id)
dry_run_result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, self.open_ended_task_number, dry_run=True)
self.assertFalse(dry_run_result)
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_send_to_queue:
mock_send_to_queue.return_value = (0, "Successfully queued")
module = get_module_for_student(self.student_on_accessing, self.problem_location)
module.child_module.get_task_number(self.open_ended_task_number)
student_response = "Here is an answer."
student_anonymous_id = anonymous_id_for_user(self.student_on_accessing, None)
submission_time = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, self.open_ended_task_number, dry_run=False)
self.assertTrue(result)
mock_send_to_queue_body_arg = json.loads(mock_send_to_queue.call_args[1]['body'])
self.assertEqual(mock_send_to_queue_body_arg['max_score'], 2)
self.assertEqual(mock_send_to_queue_body_arg['student_response'], student_response)
body_arg_student_info = json.loads(mock_send_to_queue_body_arg['student_info'])
self.assertEqual(body_arg_student_info['anonymous_student_id'], student_anonymous_id)
self.assertGreaterEqual(body_arg_student_info['submission_time'], submission_time)
def test_post_submission_for_student_on_post_assessment(self):
course = get_course_with_access(self.student_on_post_assessment, 'load', self.course_id)
dry_run_result = post_submission_for_student(self.student_on_post_assessment, course, self.problem_location, self.open_ended_task_number, dry_run=True)
self.assertFalse(dry_run_result)
result = post_submission_for_student(self.student_on_post_assessment, course, self.problem_location, self.open_ended_task_number, dry_run=False)
self.assertFalse(result)
def test_post_submission_for_student_invalid_task(self):
course = get_course_with_access(self.student_on_accessing, 'load', self.course_id)
result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, self.self_assessment_task_number, dry_run=False)
self.assertFalse(result)
out_of_bounds_task_number = 3
result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, out_of_bounds_task_number, dry_run=False)
self.assertFalse(result)
class OpenEndedStatsTest(ModuleStoreTestCase):
"""Test the openended_stats management command."""
def setUp(self):
super(OpenEndedStatsTest, self).setUp()
self.user = UserFactory()
store = modulestore()
course_items = import_course_from_xml(store, self.user.id, TEST_DATA_DIR, ['open_ended']) # pylint: disable=maybe-no-member
self.course = course_items[0]
self.course_id = self.course.id
self.problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
self.task_number = 1
self.invalid_task_number = 3
self.student_on_initial = UserFactory()
self.student_on_accessing = UserFactory()
self.student_on_post_assessment = UserFactory()
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_initial,
grade=0,
max_grade=1,
state=STATE_INITIAL
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_accessing,
grade=0,
max_grade=1,
state=STATE_ACCESSING
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_post_assessment,
grade=0,
max_grade=1,
state=STATE_POST_ASSESSMENT
)
self.students = [self.student_on_initial, self.student_on_accessing, self.student_on_post_assessment]
def test_calculate_task_statistics(self):
course = get_course_with_access(self.student_on_accessing, 'load', self.course_id)
stats = calculate_task_statistics(self.students, course, self.problem_location, self.task_number, write_to_file=False)
self.assertEqual(stats[OpenEndedChild.INITIAL], 1)
self.assertEqual(stats[OpenEndedChild.ASSESSING], 1)
self.assertEqual(stats[OpenEndedChild.POST_ASSESSMENT], 1)
self.assertEqual(stats[OpenEndedChild.DONE], 0)
stats = calculate_task_statistics(self.students, course, self.problem_location, self.invalid_task_number, write_to_file=False)
self.assertEqual(stats[OpenEndedChild.INITIAL], 0)
self.assertEqual(stats[OpenEndedChild.ASSESSING], 0)
self.assertEqual(stats[OpenEndedChild.POST_ASSESSMENT], 0)
self.assertEqual(stats[OpenEndedChild.DONE], 0)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment