Commit a1b68b6e by Hasnain Committed by Bill Filler

Unit test added for backward compatibility.

parent 7e7abbb6
<openassessment text_response="required" file_upload_response="" group_access="{&quot;381451918&quot;: [1179773159]}"> <openassessment text_response="required" file_upload_response="" group_access="{&quot;381451918&quot;: [1179773159]}" prompts_type="text">
<title>Open Assessment Test</title> <title>Open Assessment Test</title>
<prompts> <prompts>
<prompt> <prompt>
......
<openassessment text_response="required" file_upload_response="" group_access="{&quot;381451918&quot;: [1179773159]}" prompts_type="html">
<title>Open Assessment Test</title>
<prompts>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat poverty? Please answer in a short essay of 200-300 words.</description>
</prompt>
<prompt>
<description>Given the state of the world today, what do you think should be done to combat pollution?</description>
</prompt>
</prompts>
<rubric>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>HP Lovecraft explanation</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>Neal Stephenson (early) explanation</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>Earnest Hemingway</explanation>
</option>
</criterion>
<criterion>
<name>Clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation>Yogi Berra explanation</explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation>Hunter S. Thompson explanation</explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation>Isaac Asimov explanation</explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>Spock explanation</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation>lolcats explanation</explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation>Facebook explanation</explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation>Reddit explanation</explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation>metafilter explanation</explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation>Usenet, 1996 explanation</explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation>The Elements of Style explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
<openassessment prompts_type="text">
<title>Test the HTML/Text prompt type compatibility</title>
<rubric>
<prompt>Simple text</prompt>
<criterion>
<name>Concise</name>
<prompt>How concise is it?</prompt>
<option points="0">
<name>Neal Stephenson (late)</name>
<explanation>Neal Stephenson explanation</explanation>
</option>
<option points="1">
<name>HP Lovecraft</name>
<explanation>HP Lovecraft explanation</explanation>
</option>
<option points="3">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="4">
<name>Neal Stephenson (early)</name>
<explanation>Neal Stephenson (early) explanation</explanation>
</option>
<option points="5">
<name>Earnest Hemingway</name>
<explanation>Earnest Hemingway</explanation>
</option>
</criterion>
<criterion>
<name>Clear-headed</name>
<prompt>How clear is the thinking?</prompt>
<option points="0">
<name>Yogi Berra</name>
<explanation>Yogi Berra explanation</explanation>
</option>
<option points="1">
<name>Hunter S. Thompson</name>
<explanation>Hunter S. Thompson explanation</explanation>
</option>
<option points="2">
<name>Robert Heinlein</name>
<explanation>Robert Heinlein explanation</explanation>
</option>
<option points="3">
<name>Isaac Asimov</name>
<explanation>Isaac Asimov explanation</explanation>
</option>
<option points="10">
<name>Spock</name>
<explanation>Spock explanation</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0">
<name>lolcats</name>
<explanation>lolcats explanation</explanation>
</option>
<option points="1">
<name>Facebook</name>
<explanation>Facebook explanation</explanation>
</option>
<option points="2">
<name>Reddit</name>
<explanation>Reddit explanation</explanation>
</option>
<option points="3">
<name>metafilter</name>
<explanation>metafilter explanation</explanation>
</option>
<option points="4">
<name>Usenet, 1996</name>
<explanation>Usenet, 1996 explanation</explanation>
</option>
<option points="5">
<name>The Elements of Style</name>
<explanation>The Elements of Style explanation</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
...@@ -23,6 +23,7 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -23,6 +23,7 @@ class StudioViewTest(XBlockHandlerTestCase):
"text_response": "required", "text_response": "required",
"file_upload_response": None, "file_upload_response": None,
"prompts": [{"description": "Test prompt"}], "prompts": [{"description": "Test prompt"}],
"prompts_type": "html",
"feedback_prompt": "Test feedback prompt", "feedback_prompt": "Test feedback prompt",
"feedback_default_text": "Test feedback default text", "feedback_default_text": "Test feedback default text",
"submission_start": "4014-02-10T09:46", "submission_start": "4014-02-10T09:46",
...@@ -176,6 +177,16 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -176,6 +177,16 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertFalse(resp['success']) self.assertFalse(resp['success'])
self.assertIn(expected_error, resp['msg'].lower()) self.assertIn(expected_error, resp['msg'].lower())
@scenario('data/basic_scenario_html_prompts_type.xml')
def test_update_context_with_prompts_type(self, xblock):
data = copy.deepcopy(self.UPDATE_EDITOR_DATA)
data['prompts_type'] = 'text'
xblock.runtime.modulestore = MagicMock()
xblock.runtime.modulestore.has_published_version.return_value = False
resp = self.request(xblock, 'update_editor_context', json.dumps(data), response_format='json')
self.assertTrue(resp['success'], msg=resp.get('msg'))
@file_data('data/invalid_rubric.json') @file_data('data/invalid_rubric.json')
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_update_rubric_invalid(self, xblock, data): def test_update_rubric_invalid(self, xblock, data):
......
...@@ -130,11 +130,26 @@ class SubmissionTest(XBlockHandlerTestCase): ...@@ -130,11 +130,26 @@ class SubmissionTest(XBlockHandlerTestCase):
self.assertIn(expected_prompt, resp) self.assertIn(expected_prompt, resp)
@scenario('data/prompt_html.xml') @scenario('data/prompt_html.xml')
def test_prompt_html(self, xblock): def test_prompt_html_to_text(self, xblock):
resp = self.request(xblock, 'render_submission', json.dumps(dict())) resp = self.request(xblock, 'render_submission', json.dumps(dict()))
expected_prompt = u"<code><strong>Question 123</strong></code>" expected_prompt = u"<code><strong>Question 123</strong></code>"
self.assertIn(expected_prompt, resp) self.assertIn(expected_prompt, resp)
xblock.prompts_type = "text"
resp = self.request(xblock, 'render_submission', json.dumps(dict()))
expected_prompt = "&lt;code&gt;&lt;strong&gt;Question 123&lt;/strong&gt;&lt;/code&gt;"
self.assertIn(expected_prompt, resp)
@scenario('data/prompt_text.xml')
def test_prompt_text_to_html(self, xblock):
resp = self.request(xblock, 'render_submission', json.dumps(dict()))
expected_prompt = "Simple text"
self.assertIn(expected_prompt, resp)
xblock.prompts_type = "html"
resp = self.request(xblock, 'render_submission', json.dumps(dict()))
self.assertIn(expected_prompt, resp)
@mock_s3 @mock_s3
@override_settings( @override_settings(
AWS_ACCESS_KEY_ID='foobar', AWS_ACCESS_KEY_ID='foobar',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment