Commit 0a2e5efe by David Ormsbee

Fix a bunch of tests, make the rubric display real again for doing evaluations

parent 95e6a40e
from ddt import ddt, file_data # coding=utf-8
from django.db import DatabaseError
import pytz
import datetime import datetime
from django.db import DatabaseError
from django.test import TestCase from django.test import TestCase
from nose.tools import raises import pytz
from ddt import ddt, file_data
from mock import patch from mock import patch
from nose.tools import raises
from openassessment.peer import api from openassessment.peer import api as peer_api
from openassessment.peer.models import Assessment from openassessment.peer.models import Assessment
from submissions import api as sub_api from submissions import api as sub_api
from submissions.models import Submission from submissions.models import Submission
...@@ -88,7 +90,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC) ...@@ -88,7 +90,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
class TestApi(TestCase): class TestApi(TestCase):
def test_create_evaluation(self): def test_create_evaluation(self):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
evaluation = api.create_assessment( evaluation = peer_api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
...@@ -101,7 +103,7 @@ class TestApi(TestCase): ...@@ -101,7 +103,7 @@ class TestApi(TestCase):
@file_data('test_valid_evaluations.json') @file_data('test_valid_evaluations.json')
def test_get_evaluations(self, assessment_dict): def test_get_evaluations(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_assessment( peer_api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
...@@ -109,14 +111,14 @@ class TestApi(TestCase): ...@@ -109,14 +111,14 @@ class TestApi(TestCase):
assessment_dict, assessment_dict,
RUBRIC_DICT, RUBRIC_DICT,
) )
evaluations = api.get_assessments(submission["uuid"]) evaluations = peer_api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations)) self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict) self._assert_evaluation(evaluations[0], **assessment_dict)
@file_data('test_valid_evaluations.json') @file_data('test_valid_evaluations.json')
def test_get_evaluations_with_date(self, assessment_dict): def test_get_evaluations_with_date(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_assessment( peer_api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
...@@ -125,7 +127,7 @@ class TestApi(TestCase): ...@@ -125,7 +127,7 @@ class TestApi(TestCase):
RUBRIC_DICT, RUBRIC_DICT,
MONDAY MONDAY
) )
evaluations = api.get_assessments(submission["uuid"]) evaluations = peer_api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations)) self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict) self._assert_evaluation(evaluations[0], **assessment_dict)
self.assertEqual(evaluations[0]["scored_at"], MONDAY) self.assertEqual(evaluations[0]["scored_at"], MONDAY)
...@@ -143,40 +145,40 @@ class TestApi(TestCase): ...@@ -143,40 +145,40 @@ class TestApi(TestCase):
scores = sub_api.get_score(STUDENT_ITEM) scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores) self.assertFalse(scores)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(peer_api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_assessment( peer_api.create_assessment(
bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
api.create_assessment( peer_api.create_assessment(
sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(peer_api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_assessment( peer_api.create_assessment(
jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(peer_api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation( peer_api.create_assessment(
buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(peer_api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation( peer_api.create_assessment(
xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
self.assertTrue(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertTrue(peer_api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
# Tim should not have a score, because his submission does not have # Tim should not have a score, because his submission does not have
# enough evaluations. # enough evaluations.
scores = sub_api.get_score(STUDENT_ITEM) scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores) self.assertFalse(scores)
api.create_assessment( peer_api.create_assessment(
tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
api.create_assessment( peer_api.create_assessment(
tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
api.create_assessment( peer_api.create_assessment(
tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
) )
...@@ -187,9 +189,9 @@ class TestApi(TestCase): ...@@ -187,9 +189,9 @@ class TestApi(TestCase):
self.assertEqual(12, scores[0]["points_possible"]) self.assertEqual(12, scores[0]["points_possible"])
@raises(api.PeerAssessmentRequestError) @raises(peer_api.PeerAssessmentRequestError)
def test_bad_configuration(self): def test_bad_configuration(self):
api.has_finished_required_evaluating("Tim", -1) peer_api.has_finished_required_evaluating("Tim", -1)
def test_get_submission_to_evaluate(self): def test_get_submission_to_evaluate(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY) self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
...@@ -199,27 +201,27 @@ class TestApi(TestCase): ...@@ -199,27 +201,27 @@ class TestApi(TestCase):
) )
self._create_student_and_submission("Jim", "Jim's answer", THURSDAY) self._create_student_and_submission("Jim", "Jim's answer", THURSDAY)
submission = api.get_submission_to_assess(STUDENT_ITEM, 3) submission = peer_api.get_submission_to_assess(STUDENT_ITEM, 3)
self.assertIsNotNone(submission) self.assertIsNotNone(submission)
self.assertEqual(submission["answer"], u"Bob's answer") self.assertEqual(submission["answer"], u"Bob's answer")
self.assertEqual(submission["student_item"], 2) self.assertEqual(submission["student_item"], 2)
self.assertEqual(submission["attempt_number"], 1) self.assertEqual(submission["attempt_number"], 1)
@raises(api.PeerAssessmentWorkflowError) @raises(peer_api.PeerAssessmentWorkflowError)
def test_no_submissions_to_evaluate_for_tim(self): def test_no_submissions_to_evaluate_for_tim(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY) self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
api.get_submission_to_assess(STUDENT_ITEM, 3) peer_api.get_submission_to_assess(STUDENT_ITEM, 3)
""" """
Some Error Checking Tests against DB failures. Some Error Checking Tests against DB failures.
""" """
@patch.object(Submission.objects, 'get') @patch.object(Submission.objects, 'get')
@raises(api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_error_on_evaluation_creation(self, mock_filter): def test_error_on_evaluation_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_assessment( peer_api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
...@@ -233,7 +235,7 @@ class TestApi(TestCase): ...@@ -233,7 +235,7 @@ class TestApi(TestCase):
@raises(sub_api.SubmissionInternalError) @raises(sub_api.SubmissionInternalError)
def test_error_on_get_evaluation(self, mock_filter): def test_error_on_get_evaluation(self, mock_filter):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_assessment( peer_api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
...@@ -243,17 +245,17 @@ class TestApi(TestCase): ...@@ -243,17 +245,17 @@ class TestApi(TestCase):
MONDAY MONDAY
) )
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
api.get_assessments(submission["uuid"]) peer_api.get_assessments(submission["uuid"])
def test_choose_score(self): def test_choose_score(self):
self.assertEqual(0, api._calculate_final_score([])) self.assertEqual(0, peer_api._calculate_final_score([]))
self.assertEqual(5, api._calculate_final_score([5])) self.assertEqual(5, peer_api._calculate_final_score([5]))
# average of 5, 6, rounded down. # average of 5, 6, rounded down.
self.assertEqual(6, api._calculate_final_score([5, 6])) self.assertEqual(6, peer_api._calculate_final_score([5, 6]))
self.assertEqual(14, api._calculate_final_score([5, 6, 12, 16, 22, 53])) self.assertEqual(14, peer_api._calculate_final_score([5, 6, 12, 16, 22, 53]))
self.assertEqual(14, api._calculate_final_score([6, 5, 12, 53, 16, 22])) self.assertEqual(14, peer_api._calculate_final_score([6, 5, 12, 53, 16, 22]))
self.assertEqual(16, api._calculate_final_score([5, 6, 12, 16, 22, 53, 102])) self.assertEqual(16, peer_api._calculate_final_score([5, 6, 12, 16, 22, 53, 102]))
self.assertEqual(16, api._calculate_final_score([16, 6, 12, 102, 22, 53, 5])) self.assertEqual(16, peer_api._calculate_final_score([16, 6, 12, 102, 22, 53, 5]))
@staticmethod @staticmethod
......
...@@ -7,12 +7,18 @@ ...@@ -7,12 +7,18 @@
"secret": "yes", "secret": "yes",
"ⓢⓐⓕⓔ": "no", "ⓢⓐⓕⓔ": "no",
"giveup": "reluctant", "giveup": "reluctant",
"singing": "no", "singing": "no"
} }
}, },
"basic_evaluation": { "basic_evaluation": {
"points_earned": [1, 0, 3, 2], "points_earned": [1, 0, 3, 2],
"points_possible": 12, "points_possible": 12,
"feedback": "Your submission was thrilling." "feedback": "Your submission was thrilling.",
"options_selected": {
"secret": "yes",
"ⓢⓐⓕⓔ": "no",
"giveup": "reluctant",
"singing": "no"
}
} }
} }
\ No newline at end of file
...@@ -12,7 +12,10 @@ class AssessmentMixin(object): ...@@ -12,7 +12,10 @@ class AssessmentMixin(object):
configured modules, and ask for its rendered HTML. configured modules, and ask for its rendered HTML.
""" """
if not context_dict: context_dict = {} if not context_dict:
context_dict = {}
# TODO: these shouldn't overwrite
context_dict["xblock_trace"] = self.get_xblock_trace() context_dict["xblock_trace"] = self.get_xblock_trace()
context_dict["rubric_instructions"] = self.rubric_instructions context_dict["rubric_instructions"] = self.rubric_instructions
context_dict["rubric_criteria"] = self.rubric_criteria context_dict["rubric_criteria"] = self.rubric_criteria
...@@ -20,3 +23,4 @@ class AssessmentMixin(object): ...@@ -20,3 +23,4 @@ class AssessmentMixin(object):
template = get_template(path) template = get_template(path)
context = Context(context_dict) context = Context(context_dict)
return Response(template.render(context), content_type='application/html', charset='UTF-8') return Response(template.render(context), content_type='application/html', charset='UTF-8')
...@@ -9,6 +9,7 @@ import pkg_resources ...@@ -9,6 +9,7 @@ import pkg_resources
from xblock.core import XBlock from xblock.core import XBlock
from xblock.fields import List, Scope, String from xblock.fields import List, Scope, String
from xblock.fragment import Fragment from xblock.fragment import Fragment
from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin
from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin
from openassessment.xblock.submission_mixin import SubmissionMixin from openassessment.xblock.submission_mixin import SubmissionMixin
...@@ -207,7 +208,6 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse ...@@ -207,7 +208,6 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
def student_view(self, context=None): def student_view(self, context=None):
"""The main view of OpenAssessmentBlock, displayed when viewing courses. """The main view of OpenAssessmentBlock, displayed when viewing courses.
""" """
trace = self.get_xblock_trace() trace = self.get_xblock_trace()
student_item_dict = self.get_student_item_dict() student_item_dict = self.get_student_item_dict()
...@@ -223,21 +223,6 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse ...@@ -223,21 +223,6 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
"grade_state": grade_state, "grade_state": grade_state,
} }
rubric_dict = {
'criteria': self.rubric_criteria
}
assessment = peer_api.create_assessment(
data["submission_uuid"],
student_item_dict["student_id"],
int(peer_eval["must_grade"]),
int(peer_eval["must_be_graded_by"]),
assessment_dict,
rubric_dict,
)
# Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"])
template = get_template("static/html/oa_base.html") template = get_template("static/html/oa_base.html")
context = Context(context_dict) context = Context(context_dict)
frag = Fragment(template.render(context)) frag = Fragment(template.render(context))
......
...@@ -10,25 +10,31 @@ class PeerAssessmentMixin(AssessmentMixin): ...@@ -10,25 +10,31 @@ class PeerAssessmentMixin(AssessmentMixin):
def assess(self, data, suffix=''): def assess(self, data, suffix=''):
"""Place an assessment into OpenAssessment system """Place an assessment into OpenAssessment system
""" """
with self.get_assessment_module('peer-assessment') as assessment: # with self.get_assessment_module('peer-assessment') as assessment:
assessment_dict = { assessment_ui_model = self.get_assessment_module('peer-assessment')
"points_earned": map(int, data["points_earned"]), rubric_dict = {
"points_possible": sum(c['total_value'] for c in self.rubric_criteria), 'criteria': self.rubric_criteria
"feedback": "Not yet implemented.", }
} assessment_dict = {
assessment = peer_api.create_assessment( "points_earned": map(int, data["points_earned"]),
data["submission_uuid"], "points_possible": sum(c['total_value'] for c in self.rubric_criteria),
self.get_student_item_dict()["student_id"], "feedback": "Not yet implemented.",
int(assessment.must_grade), "options_selected": {}, # Placeholder
int(assessment.must_be_graded_by), }
assessment_dict assessment = peer_api.create_assessment(
) data["submission_uuid"],
self.get_student_item_dict()["student_id"],
int(assessment_ui_model.must_grade),
int(assessment_ui_model.must_be_graded_by),
assessment_dict
rubric_dict,
)
# Temp kludge until we fix JSON serialization for datetime # Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"]) assessment["scored_at"] = str(assessment["scored_at"])
return assessment, "Success" return assessment_ui_model, "Success"
@XBlock.handler @XBlock.handler
def render_peer_assessment(self, data, suffix=''): def render_peer_assessment(self, data, suffix=''):
...@@ -63,4 +69,4 @@ class PeerAssessmentMixin(AssessmentMixin): ...@@ -63,4 +69,4 @@ class PeerAssessmentMixin(AssessmentMixin):
""" """
for assessment in self.rubric_assessments: for assessment in self.rubric_assessments:
if assessment.name == mixin_name: if assessment.name == mixin_name:
return assessment return assessment
\ No newline at end of file
...@@ -61,18 +61,24 @@ ...@@ -61,18 +61,24 @@
<!-- individual rubric question (radio-based choice) --> <!-- individual rubric question (radio-based choice) -->
<li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}"> <li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title"> <h4 class="question__title">
{{ criterion.instructions }} {{ criterion.prompt }}
<span class="label--required">* <span class="sr">(Required)</span></span> <span class="label--required">* <span class="sr">(Required)</span></span>
</h4> </h4>
<ol class="question__answers"> <ol class="question__answers">
{% for value, text in criterion.options %} {% for option in criterion.options %}
<li class="answer"> <li class="answer">
<div class="wrapper--input"> <div class="wrapper--input">
<input type="radio" name="assessment__rubric__question--{{ criterion.name }}" id="assessment__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" /> <input type="radio"
<label for="assessment__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label> name="assessment__rubric__question--{{ criterion.name }}"
id="assessment__rubric__question--{{ criterion.name }}--01"
class="answer__value"
value="answer--001__option--01 - Very Well" />
({{option.points}})
<label for="assessment__rubric__question--001__option--01"
class="answer__label"
>{{ option.name }}</label>
</div> </div>
<span class="answer__tip">TODO: Criterion Instructions</span> <span class="answer__tip">{{ option.explanation }}</span>
</li> </li>
{% endfor %} {% endfor %}
</ol> </ol>
......
...@@ -13,30 +13,30 @@ ...@@ -13,30 +13,30 @@
Read for conciseness, clarity of thought, and form. Read for conciseness, clarity of thought, and form.
<criterion name="concise"> <criterion name="concise">
How concise is it? How concise is it?
<option val="0">(0) Neal Stephenson (late) <option val="0">Neal Stephenson (late)
<explain> <explain>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal. In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'. While hilarious, in recent years his work has been anything but 'concise'.
</explain> </explain>
</option> </option>
<option val="1">(1) HP Lovecraft <option val="1">HP Lovecraft
<explain> <explain>
If the author wrote something cyclopean that staggers the mind, score it thus. If the author wrote something cyclopean that staggers the mind, score it thus.
</explain> </explain>
</option> </option>
<option val="3">(3) Robert Heinlein <option val="3">Robert Heinlein
<explain> <explain>
Tight prose that conveys a wealth of information about the world in relatively Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside." few words. Example, "The door irised open and he stepped inside."
</explain> </explain>
</option> </option>
<option val="4">(4) Neal Stephenson (early) <option val="4">Neal Stephenson (early)
<explain> <explain>
When Stephenson still had an editor, his prose was dense, with anecdotes about When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories. nitrox abuse implying main characters' whole life stories.
</explain> </explain>
</option> </option>
<option val="5">(5) Earnest Hemingway <option val="5">Earnest Hemingway
<explain> <explain>
Score the work this way if it makes you weep, and the removal of a single Score the work this way if it makes you weep, and the removal of a single
word would make you sneer. word would make you sneer.
...@@ -45,11 +45,11 @@ ...@@ -45,11 +45,11 @@
</criterion> </criterion>
<criterion name="clearheaded"> <criterion name="clearheaded">
How clear is the thinking? How clear is the thinking?
<option val="0">(0) Yogi Berra</option> <option val="0">Yogi Berra</option>
<option val="1">(1) Hunter S. Thompson</option> <option val="1">Hunter S. Thompson</option>
<option val="2">(2) Robert Heinlein</option> <option val="2">Robert Heinlein</option>
<option val="3">(3) Isaac Asimov</option> <option val="3">Isaac Asimov</option>
<option val="10">(10) Spock <option val="10">Spock
<explain> <explain>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought, Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category, and unemotional examination of the facts. This is the only item explained in this category,
...@@ -59,12 +59,12 @@ ...@@ -59,12 +59,12 @@
</criterion> </criterion>
<criterion name="form"> <criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count. Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">(0) lolcats</option> <option val="0">lolcats</option>
<option val="1">(1) Facebook</option> <option val="1">Facebook</option>
<option val="2">(2) Reddit</option> <option val="2">Reddit</option>
<option val="3">(3) metafilter</option> <option val="3">metafilter</option>
<option val="4">(4) Usenet, 1996</option> <option val="4">Usenet, 1996</option>
<option val="5">(5) The Elements of Style</option> <option val="5">The Elements of Style</option>
</criterion> </criterion>
</rubric> </rubric>
<assessments> <assessments>
......
""" """
Tests the Open Assessment XBlock functionality. Tests the Open Assessment XBlock functionality.
""" """
import json import json
import webob
from django.test import TestCase from django.test import TestCase
from mock import patch from mock import patch
from openassessment.xblock.submission_mixin import SubmissionMixin from workbench.runtime import WorkbenchRuntime
import webob
from submissions import api from openassessment.xblock.submission_mixin import SubmissionMixin
from submissions import api as sub_api
from submissions.api import SubmissionRequestError, SubmissionInternalError from submissions.api import SubmissionRequestError, SubmissionInternalError
from workbench.runtime import WorkbenchRuntime
RUBRIC_CONFIG = """ RUBRIC_CONFIG = """
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00"> <openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
...@@ -87,7 +86,7 @@ class TestOpenAssessment(TestCase): ...@@ -87,7 +86,7 @@ class TestOpenAssessment(TestCase):
result = json.loads(resp.body) result = json.loads(resp.body)
self.assertTrue(result[0]) self.assertTrue(result[0])
@patch.object(api, 'create_submission') @patch.object(sub_api, 'create_submission')
def test_submission_general_failure(self, mock_submit): def test_submission_general_failure(self, mock_submit):
"""Internal errors return some code for submission failure.""" """Internal errors return some code for submission failure."""
mock_submit.side_effect = SubmissionInternalError("Cat on fire.") mock_submit.side_effect = SubmissionInternalError("Cat on fire.")
...@@ -100,7 +99,7 @@ class TestOpenAssessment(TestCase): ...@@ -100,7 +99,7 @@ class TestOpenAssessment(TestCase):
self.assertEqual(result[1], "EUNKNOWN") self.assertEqual(result[1], "EUNKNOWN")
self.assertEqual(result[2], SubmissionMixin().submit_errors["EUNKNOWN"]) self.assertEqual(result[2], SubmissionMixin().submit_errors["EUNKNOWN"])
@patch.object(api, 'create_submission') @patch.object(sub_api, 'create_submission')
def test_submission_API_failure(self, mock_submit): def test_submission_API_failure(self, mock_submit):
"""API usage errors return code and meaningful message.""" """API usage errors return code and meaningful message."""
mock_submit.side_effect = SubmissionRequestError("Cat on fire.") mock_submit.side_effect = SubmissionRequestError("Cat on fire.")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment