Commit 0a2e5efe by David Ormsbee

Fix a bunch of tests, make the rubric display real again for doing evaluations

parent 95e6a40e
from ddt import ddt, file_data
from django.db import DatabaseError
import pytz
# coding=utf-8
import datetime
from django.db import DatabaseError
from django.test import TestCase
from nose.tools import raises
import pytz
from ddt import ddt, file_data
from mock import patch
from nose.tools import raises
from openassessment.peer import api
from openassessment.peer import api as peer_api
from openassessment.peer.models import Assessment
from submissions import api as sub_api
from submissions.models import Submission
......@@ -88,7 +90,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
class TestApi(TestCase):
def test_create_evaluation(self):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
evaluation = api.create_assessment(
evaluation = peer_api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -101,7 +103,7 @@ class TestApi(TestCase):
@file_data('test_valid_evaluations.json')
def test_get_evaluations(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_assessment(
peer_api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -109,14 +111,14 @@ class TestApi(TestCase):
assessment_dict,
RUBRIC_DICT,
)
evaluations = api.get_assessments(submission["uuid"])
evaluations = peer_api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict)
@file_data('test_valid_evaluations.json')
def test_get_evaluations_with_date(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_assessment(
peer_api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -125,7 +127,7 @@ class TestApi(TestCase):
RUBRIC_DICT,
MONDAY
)
evaluations = api.get_assessments(submission["uuid"])
evaluations = peer_api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict)
self.assertEqual(evaluations[0]["scored_at"], MONDAY)
......@@ -143,40 +145,40 @@ class TestApi(TestCase):
scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_assessment(
self.assertFalse(peer_api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
peer_api.create_assessment(
bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
)
api.create_assessment(
peer_api.create_assessment(
sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
self.assertFalse(peer_api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_assessment(
peer_api.create_assessment(
jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
self.assertFalse(peer_api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
peer_api.create_assessment(
buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
self.assertFalse(peer_api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
peer_api.create_assessment(
xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
)
self.assertTrue(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
self.assertTrue(peer_api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
# Tim should not have a score, because his submission does not have
# enough evaluations.
scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores)
api.create_assessment(
peer_api.create_assessment(
tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
)
api.create_assessment(
peer_api.create_assessment(
tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
)
api.create_assessment(
peer_api.create_assessment(
tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT, RUBRIC_DICT
)
......@@ -187,9 +189,9 @@ class TestApi(TestCase):
self.assertEqual(12, scores[0]["points_possible"])
@raises(api.PeerAssessmentRequestError)
@raises(peer_api.PeerAssessmentRequestError)
def test_bad_configuration(self):
api.has_finished_required_evaluating("Tim", -1)
peer_api.has_finished_required_evaluating("Tim", -1)
def test_get_submission_to_evaluate(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
......@@ -199,27 +201,27 @@ class TestApi(TestCase):
)
self._create_student_and_submission("Jim", "Jim's answer", THURSDAY)
submission = api.get_submission_to_assess(STUDENT_ITEM, 3)
submission = peer_api.get_submission_to_assess(STUDENT_ITEM, 3)
self.assertIsNotNone(submission)
self.assertEqual(submission["answer"], u"Bob's answer")
self.assertEqual(submission["student_item"], 2)
self.assertEqual(submission["attempt_number"], 1)
@raises(api.PeerAssessmentWorkflowError)
@raises(peer_api.PeerAssessmentWorkflowError)
def test_no_submissions_to_evaluate_for_tim(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
api.get_submission_to_assess(STUDENT_ITEM, 3)
peer_api.get_submission_to_assess(STUDENT_ITEM, 3)
"""
Some Error Checking Tests against DB failures.
"""
@patch.object(Submission.objects, 'get')
@raises(api.PeerAssessmentInternalError)
@raises(peer_api.PeerAssessmentInternalError)
def test_error_on_evaluation_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_assessment(
peer_api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -233,7 +235,7 @@ class TestApi(TestCase):
@raises(sub_api.SubmissionInternalError)
def test_error_on_get_evaluation(self, mock_filter):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_assessment(
peer_api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -243,17 +245,17 @@ class TestApi(TestCase):
MONDAY
)
mock_filter.side_effect = DatabaseError("Bad things happened")
api.get_assessments(submission["uuid"])
peer_api.get_assessments(submission["uuid"])
def test_choose_score(self):
self.assertEqual(0, api._calculate_final_score([]))
self.assertEqual(5, api._calculate_final_score([5]))
self.assertEqual(0, peer_api._calculate_final_score([]))
self.assertEqual(5, peer_api._calculate_final_score([5]))
# average of 5, 6, rounded down.
self.assertEqual(6, api._calculate_final_score([5, 6]))
self.assertEqual(14, api._calculate_final_score([5, 6, 12, 16, 22, 53]))
self.assertEqual(14, api._calculate_final_score([6, 5, 12, 53, 16, 22]))
self.assertEqual(16, api._calculate_final_score([5, 6, 12, 16, 22, 53, 102]))
self.assertEqual(16, api._calculate_final_score([16, 6, 12, 102, 22, 53, 5]))
self.assertEqual(6, peer_api._calculate_final_score([5, 6]))
self.assertEqual(14, peer_api._calculate_final_score([5, 6, 12, 16, 22, 53]))
self.assertEqual(14, peer_api._calculate_final_score([6, 5, 12, 53, 16, 22]))
self.assertEqual(16, peer_api._calculate_final_score([5, 6, 12, 16, 22, 53, 102]))
self.assertEqual(16, peer_api._calculate_final_score([16, 6, 12, 102, 22, 53, 5]))
@staticmethod
......
......@@ -7,12 +7,18 @@
"secret": "yes",
"ⓢⓐⓕⓔ": "no",
"giveup": "reluctant",
"singing": "no",
"singing": "no"
}
},
"basic_evaluation": {
"points_earned": [1, 0, 3, 2],
"points_possible": 12,
"feedback": "Your submission was thrilling."
"feedback": "Your submission was thrilling.",
"options_selected": {
"secret": "yes",
"ⓢⓐⓕⓔ": "no",
"giveup": "reluctant",
"singing": "no"
}
}
}
\ No newline at end of file
......@@ -12,7 +12,10 @@ class AssessmentMixin(object):
configured modules, and ask for its rendered HTML.
"""
if not context_dict: context_dict = {}
if not context_dict:
context_dict = {}
# TODO: these shouldn't overwrite
context_dict["xblock_trace"] = self.get_xblock_trace()
context_dict["rubric_instructions"] = self.rubric_instructions
context_dict["rubric_criteria"] = self.rubric_criteria
......@@ -20,3 +23,4 @@ class AssessmentMixin(object):
template = get_template(path)
context = Context(context_dict)
return Response(template.render(context), content_type='application/html', charset='UTF-8')
......@@ -9,6 +9,7 @@ import pkg_resources
from xblock.core import XBlock
from xblock.fields import List, Scope, String
from xblock.fragment import Fragment
from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin
from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin
from openassessment.xblock.submission_mixin import SubmissionMixin
......@@ -207,7 +208,6 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
def student_view(self, context=None):
"""The main view of OpenAssessmentBlock, displayed when viewing courses.
"""
trace = self.get_xblock_trace()
student_item_dict = self.get_student_item_dict()
......@@ -223,21 +223,6 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
"grade_state": grade_state,
}
rubric_dict = {
'criteria': self.rubric_criteria
}
assessment = peer_api.create_assessment(
data["submission_uuid"],
student_item_dict["student_id"],
int(peer_eval["must_grade"]),
int(peer_eval["must_be_graded_by"]),
assessment_dict,
rubric_dict,
)
# Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"])
template = get_template("static/html/oa_base.html")
context = Context(context_dict)
frag = Fragment(template.render(context))
......
......@@ -10,25 +10,31 @@ class PeerAssessmentMixin(AssessmentMixin):
def assess(self, data, suffix=''):
"""Place an assessment into OpenAssessment system
"""
with self.get_assessment_module('peer-assessment') as assessment:
# with self.get_assessment_module('peer-assessment') as assessment:
assessment_dict = {
"points_earned": map(int, data["points_earned"]),
"points_possible": sum(c['total_value'] for c in self.rubric_criteria),
"feedback": "Not yet implemented.",
}
assessment = peer_api.create_assessment(
data["submission_uuid"],
self.get_student_item_dict()["student_id"],
int(assessment.must_grade),
int(assessment.must_be_graded_by),
assessment_dict
)
assessment_ui_model = self.get_assessment_module('peer-assessment')
rubric_dict = {
'criteria': self.rubric_criteria
}
assessment_dict = {
"points_earned": map(int, data["points_earned"]),
"points_possible": sum(c['total_value'] for c in self.rubric_criteria),
"feedback": "Not yet implemented.",
"options_selected": {}, # Placeholder
}
assessment = peer_api.create_assessment(
data["submission_uuid"],
self.get_student_item_dict()["student_id"],
int(assessment_ui_model.must_grade),
int(assessment_ui_model.must_be_graded_by),
assessment_dict
rubric_dict,
)
# Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"])
# Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"])
return assessment, "Success"
return assessment_ui_model, "Success"
@XBlock.handler
def render_peer_assessment(self, data, suffix=''):
......@@ -63,4 +69,4 @@ class PeerAssessmentMixin(AssessmentMixin):
"""
for assessment in self.rubric_assessments:
if assessment.name == mixin_name:
return assessment
\ No newline at end of file
return assessment
......@@ -61,18 +61,24 @@
<!-- individual rubric question (radio-based choice) -->
<li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title">
{{ criterion.instructions }}
{{ criterion.prompt }}
<span class="label--required">* <span class="sr">(Required)</span></span>
</h4>
<ol class="question__answers">
{% for value, text in criterion.options %}
{% for option in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio" name="assessment__rubric__question--{{ criterion.name }}" id="assessment__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="assessment__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
<input type="radio"
name="assessment__rubric__question--{{ criterion.name }}"
id="assessment__rubric__question--{{ criterion.name }}--01"
class="answer__value"
value="answer--001__option--01 - Very Well" />
({{option.points}})
<label for="assessment__rubric__question--001__option--01"
class="answer__label"
>{{ option.name }}</label>
</div>
<span class="answer__tip">TODO: Criterion Instructions</span>
<span class="answer__tip">{{ option.explanation }}</span>
</li>
{% endfor %}
</ol>
......
......@@ -13,30 +13,30 @@
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">(0) Neal Stephenson (late)
<option val="0">Neal Stephenson (late)
<explain>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'.
</explain>
</option>
<option val="1">(1) HP Lovecraft
<option val="1">HP Lovecraft
<explain>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explain>
</option>
<option val="3">(3) Robert Heinlein
<option val="3">Robert Heinlein
<explain>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explain>
</option>
<option val="4">(4) Neal Stephenson (early)
<option val="4">Neal Stephenson (early)
<explain>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explain>
</option>
<option val="5">(5) Earnest Hemingway
<option val="5">Earnest Hemingway
<explain>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
......@@ -45,11 +45,11 @@
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">(0) Yogi Berra</option>
<option val="1">(1) Hunter S. Thompson</option>
<option val="2">(2) Robert Heinlein</option>
<option val="3">(3) Isaac Asimov</option>
<option val="10">(10) Spock
<option val="0">Yogi Berra</option>
<option val="1">Hunter S. Thompson</option>
<option val="2">Robert Heinlein</option>
<option val="3">Isaac Asimov</option>
<option val="10">Spock
<explain>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
......@@ -59,12 +59,12 @@
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">(0) lolcats</option>
<option val="1">(1) Facebook</option>
<option val="2">(2) Reddit</option>
<option val="3">(3) metafilter</option>
<option val="4">(4) Usenet, 1996</option>
<option val="5">(5) The Elements of Style</option>
<option val="0">lolcats</option>
<option val="1">Facebook</option>
<option val="2">Reddit</option>
<option val="3">metafilter</option>
<option val="4">Usenet, 1996</option>
<option val="5">The Elements of Style</option>
</criterion>
</rubric>
<assessments>
......
"""
Tests the Open Assessment XBlock functionality.
"""
import json
import webob
from django.test import TestCase
from mock import patch
from openassessment.xblock.submission_mixin import SubmissionMixin
from workbench.runtime import WorkbenchRuntime
import webob
from submissions import api
from openassessment.xblock.submission_mixin import SubmissionMixin
from submissions import api as sub_api
from submissions.api import SubmissionRequestError, SubmissionInternalError
from workbench.runtime import WorkbenchRuntime
RUBRIC_CONFIG = """
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
......@@ -87,7 +86,7 @@ class TestOpenAssessment(TestCase):
result = json.loads(resp.body)
self.assertTrue(result[0])
@patch.object(api, 'create_submission')
@patch.object(sub_api, 'create_submission')
def test_submission_general_failure(self, mock_submit):
"""Internal errors return some code for submission failure."""
mock_submit.side_effect = SubmissionInternalError("Cat on fire.")
......@@ -100,7 +99,7 @@ class TestOpenAssessment(TestCase):
self.assertEqual(result[1], "EUNKNOWN")
self.assertEqual(result[2], SubmissionMixin().submit_errors["EUNKNOWN"])
@patch.object(api, 'create_submission')
@patch.object(sub_api, 'create_submission')
def test_submission_API_failure(self, mock_submit):
"""API usage errors return code and meaningful message."""
mock_submit.side_effect = SubmissionRequestError("Cat on fire.")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment