Commit 9adbf2a5 by Stephen Sanchez

Merge branch 'master' into sanchez/TIM-171-Submission-UI

parents 6a74d0be 435f6d5d
......@@ -42,3 +42,6 @@ nosetests.xml
# some mac thing
.DS_Store
# PyCharm
.idea
from django.contrib import admin
from openassessment.peer.models import PeerEvaluation
from openassessment.peer.models import Assessment, AssessmentPart, Rubric, Criterion, CriterionOption
admin.site.register(Assessment)
admin.site.register(AssessmentPart)
admin.site.register(Rubric)
admin.site.register(Criterion)
admin.site.register(CriterionOption)
admin.site.register(PeerEvaluation)
......@@ -6,12 +6,14 @@ the workflow for a given submission.
"""
import copy
import logging
import math
from django.db import DatabaseError
import math
from openassessment.peer.models import PeerEvaluation
from openassessment.peer.serializers import PeerAssessmentSerializer
from openassessment.peer.models import Assessment
from openassessment.peer.serializers import (
AssessmentSerializer, RubricSerializer, rubric_from_dict
)
from submissions import api as submission_api
from submissions.models import Submission, StudentItem, Score
from submissions.serializers import SubmissionSerializer, StudentItemSerializer
......@@ -70,6 +72,7 @@ def create_assessment(
required_assessments_for_student,
required_assessments_for_submission,
assessment_dict,
rubric_dict,
scored_at=None):
"""Creates an assessment on the given submission.
......@@ -122,18 +125,23 @@ def create_assessment(
"""
try:
submission = Submission.objects.get(uuid=submission_uuid)
rubric = rubric_from_dict(rubric_dict)
option_ids = rubric.options_ids(assessment_dict["options_selected"])
peer_assessment = {
"rubric": rubric.id,
"scorer_id": scorer_id,
"submission": submission.pk,
"points_earned": sum(assessment_dict["points_earned"]),
"points_possible": assessment_dict["points_possible"],
#"points_earned": sum(assessment_dict["points_earned"]),
#"points_possible": assessment_dict["points_possible"],
"score_type": PEER_TYPE,
"feedback": assessment_dict["feedback"],
"parts": [{"option": option_id} for option_id in option_ids]
}
if scored_at:
peer_assessment["scored_at"] = scored_at
peer_serializer = PeerAssessmentSerializer(data=peer_assessment)
peer_serializer = AssessmentSerializer(data=peer_assessment)
if not peer_serializer.is_valid():
raise PeerAssessmentRequestError(peer_serializer.errors)
peer_serializer.save()
......@@ -195,7 +203,7 @@ def _score_if_finished(student_item,
student_item.student_id,
required_assessments_for_student
)
assessments = PeerEvaluation.objects.filter(submission=submission)
assessments = Assessment.objects.filter(submission=submission)
submission_finished = assessments.count() >= required_assessments_for_submission
scores = []
for assessment in assessments:
......@@ -260,7 +268,7 @@ def has_finished_required_evaluating(student_id, required_assessments):
if required_assessments < 0:
raise PeerAssessmentRequestError(
"Required Assessment count must be a positive integer.")
return PeerEvaluation.objects.filter(
return Assessment.objects.filter(
scorer_id=student_id
).count() >= required_assessments
......@@ -308,8 +316,8 @@ def get_assessments(submission_id):
"""
try:
submission = Submission.objects.get(uuid=submission_id)
assessments = PeerEvaluation.objects.filter(submission=submission)
serializer = PeerAssessmentSerializer(assessments, many=True)
assessments = Assessment.objects.filter(submission=submission)
serializer = AssessmentSerializer(assessments, many=True)
return serializer.data
except DatabaseError:
error_message = (
......@@ -391,7 +399,7 @@ def _get_first_submission_not_evaluated(student_items, student_id, required_num_
"-attempt_number"
)
for submission in submissions:
assessments = PeerEvaluation.objects.filter(submission=submission)
assessments = Assessment.objects.filter(submission=submission)
if assessments.count() < required_num_assessments:
already_evaluated = False
for assessment in assessments:
......
# coding=utf-8
"""
This would hold models related to the peer response workflow. There's going to
be a lot here, like rubrics and such.
These models have to capture not only the state of assessments made for certain
submissions, but also the state of the rubrics at the time those assessments
were made.
"""
from copy import deepcopy
from hashlib import sha1
import json
from django.db import models
from django.utils.timezone import now
from submissions.models import Submission
class PeerEvaluation(models.Model):
class Rubric(models.Model):
"""A Rubric contains the guidelines on how to assess a submission.
Rubrics are composed of :class:`Criterion` objects which are in turn
composed of :class:`CriterionOption` objects.
This model is a bit unusual in that it is the representation of the rubric
that an assessment was made with *at the time of assessment*. The source
rubric data lives in the problem definition, which is in the
:class:`OpenAssessmentBlock`. When an assessment is made, the XBlock passes
that rubric information along as well. When this Django app records the
:class:`Assessment`, we do a lookup to see if the Rubric model object
already exists (using hashing). If the Rubric is not found, we create a new
one with the information OpenAssessmentBlock has passed in.
.. warning::
Never change Rubric model data after it's written!
The little tree of objects that compose a Rubric is meant to be immutable —
once created, they're never updated. When the problem changes, we end up
creating a new Rubric instead. This makes it easy to cache and do hash-based
lookups.
"""
# SHA1 hash
content_hash = models.CharField(max_length=40, unique=True, db_index=True)
@property
def points_possible(self):
"""The total number of points that could be earned in this Rubric."""
criteria_points = [crit.points_possible for crit in self.criteria.all()]
return sum(criteria_points) if criteria_points else 0
@staticmethod
def content_hash_from_dict(rubric_dict):
"""Given a dict of rubric information, return a unique hash.
This is a static method because we want to provide the `content_hash`
when we create the rubric -- i.e. before the Rubric object could know or
access its child criteria or options. In Django, when you add child
elements to a model object using a foreign key relation, it will
immediately persist to the database. But in order to persist to the
database, the child object needs to have the ID of the parent, meaning
that Rubric would have to have already been created and persisted.
"""
rubric_dict = deepcopy(rubric_dict)
# Neither "id" nor "content_hash" would count towards calculating the
# content_hash.
rubric_dict.pop("id", None)
rubric_dict.pop("content_hash", None)
canonical_form = json.dumps(rubric_dict, sort_keys=True)
return sha1(canonical_form).hexdigest()
def options_ids(self, options_selected):
"""Given a mapping of selected options, return the option IDs.
We use this to map user selection during assessment to the
:class:`CriterionOption` IDs that are in our database. These IDs are
never shown to the user.
Args:
options_selected (dict): Mapping of criteria names to the names of
the option that was selected for that criterion.
Examples:
>>> options_selected = {"secret": "yes", "safe": "no"}
>>> rubric.options_ids(options_selected)
[10, 12]
"""
# TODO: cache this
crit_to_all_opts = {
crit.name : {
option.name: option.id for option in crit.options.all()
}
for crit in self.criteria.all()
}
return [
crit_to_all_opts[crit][opt]
for crit, opt in options_selected.items()
]
class Criterion(models.Model):
"""A single aspect of a submission that needs assessment.
As an example, an essay might be assessed separately for accuracy, brevity,
and clarity. Each of those would be separate criteria.
All Rubrics have at least one Criterion.
"""
rubric = models.ForeignKey(Rubric, related_name="criteria")
name = models.CharField(max_length=100, blank=False)
# 0-based order in the Rubric
order_num = models.PositiveIntegerField()
# What are we asking the reviewer to evaluate in this Criterion?
prompt = models.TextField(max_length=10000)
class Meta:
ordering = ["rubric", "order_num"]
@property
def points_possible(self):
"""The total number of points that could be earned in this Criterion."""
return max(option.points for option in self.options.all())
class CriterionOption(models.Model):
"""What an assessor chooses when assessing against a Criteria.
CriterionOptions have a name, point value, and explanation associated with
them. When you have to select between "Excellent", "Good", "Fair", "Bad" --
those are options.
Note that this is the representation of the choice itself, *not* a
representation of a particular assessor's choice for a particular
Assessment. That state is stored in :class:`AssessmentPart`.
"""
# All Criteria must have at least one CriterionOption.
criterion = models.ForeignKey(Criterion, related_name="options")
# 0-based order in Criterion
order_num = models.PositiveIntegerField()
# How many points this option is worth. 0 is allowed.
points = models.PositiveIntegerField()
# Short name of the option. This is visible to the user.
# Examples: "Excellent", "Good", "Fair", "Poor"
name = models.CharField(max_length=100)
# Longer text describing this option and why you should choose it.
# Example: "The response makes 3-5 Monty Python references and at least one
# original Star Wars trilogy reference. Do not select this option
# if the author made any references to the second trilogy."
explanation = models.TextField(max_length=10000, blank=True)
class Meta:
ordering = ["criterion", "order_num"]
def __repr__(self):
return (
"CriterionOption(order_num={0.order_num}, points={0.points}, "
"name={0.name!r}, explanation={0.explanation!r})"
).format(self)
def __unicode__(self):
return repr(self)
class Assessment(models.Model):
"""An evaluation made against a particular Submission and Rubric.
This is student state information and is created when a student completes
an assessment of some submission. It is composed of :class:`AssessmentPart`
objects that map to each :class:`Criterion` in the :class:`Rubric` we're
assessing against.
"""
submission = models.ForeignKey(Submission)
points_earned = models.PositiveIntegerField(default=0)
points_possible = models.PositiveIntegerField(default=0)
rubric = models.ForeignKey(Rubric)
scored_at = models.DateTimeField(default=now, db_index=True)
scorer_id = models.CharField(max_length=255, db_index=True)
scorer_id = models.CharField(max_length=40, db_index=True)
score_type = models.CharField(max_length=2)
feedback = models.TextField(max_length=10000, default="")
def __repr__(self):
return repr(dict(
submission=self.submission,
points_earned=self.points_earned,
points_possible=self.points_possible,
scored_at=self.scored_at,
scorer_id=self.scorer_id,
score_type=self.score_type,
feedback=self.feedback,
))
# TODO: move this to its own model
feedback = models.TextField(max_length=10000, default="")
class Meta:
ordering = ["-scored_at"]
@property
def points_earned(self):
parts = [part.points_earned for part in self.parts.all()]
return sum(parts) if parts else 0
@property
def points_possible(self):
return self.rubric.points_possible
@property
def submission_uuid(self):
return self.submission.uuid
def __unicode__(self):
return u"Assessment {}".format(self.id)
class AssessmentPart(models.Model):
"""Part of an Assessment corresponding to a particular Criterion.
This is student state -- `AssessmentPart` represents what the student
assessed a submission with for a given `Criterion`. So an example would be::
5 pts: "Excellent"
It's implemented as a foreign key to the `CriterionOption` that was chosen
by this assessor for this `Criterion`. So basically, think of this class
as :class:`CriterionOption` + student state.
"""
assessment = models.ForeignKey(Assessment, related_name='parts')
# criterion = models.ForeignKey(Criterion) ?
option = models.ForeignKey(CriterionOption) # TODO: no reverse
@property
def points_earned(self):
return self.option.points
@property
def points_possible(self):
return self.option.criterion.points_possible
# coding=utf-8
"""
Serializers are created to ensure models do not have to be accessed outside the
scope of the Tim APIs.
"""
from copy import deepcopy
from hashlib import sha1
import json
from rest_framework import serializers
from openassessment.peer.models import PeerEvaluation
from openassessment.peer.models import (
Assessment, AssessmentPart, Criterion, CriterionOption, Rubric
)
class InvalidRubric(Exception):
"""This can be raised during the deserialization process."""
def __init__(self, errors):
Exception.__init__(self, repr(errors))
self.errors = deepcopy(errors)
class NestedModelSerializer(serializers.ModelSerializer):
"""Model Serializer that supports deserialization with arbitrary nesting.
The Django REST Framework does not currently support deserialization more
than one level deep (so a parent and children). We want to be able to
create a :class:`Rubric` → :class:`Criterion` → :class:`CriterionOption`
hierarchy.
Much of the base logic already "just works" and serialization of arbritrary
depth is supported. So we just override the save_object method to
recursively link foreign key relations instead of doing it one level deep.
We don't touch many-to-many relationships because we don't need to for our
purposes, so those still only work one level deep.
"""
def recursively_link_related(self, obj, **kwargs):
if getattr(obj, '_related_data', None):
for accessor_name, related in obj._related_data.items():
setattr(obj, accessor_name, related)
for related_obj in related:
self.recursively_link_related(related_obj, **kwargs)
del(obj._related_data)
def save_object(self, obj, **kwargs):
obj.save(**kwargs)
# The code for many-to-many relationships is just copy-pasted from the
# Django REST Framework ModelSerializer
if getattr(obj, '_m2m_data', None):
for accessor_name, object_list in obj._m2m_data.items():
setattr(obj, accessor_name, object_list)
del(obj._m2m_data)
# This is our only real change from ModelSerializer
self.recursively_link_related(obj, **kwargs)
class CriterionOptionSerializer(NestedModelSerializer):
"""Serializer for :class:`CriterionOption`"""
class Meta:
model = CriterionOption
fields = ('order_num', 'points', 'name', 'explanation')
class CriterionSerializer(NestedModelSerializer):
"""Serializer for :class:`Criterion`"""
options = CriterionOptionSerializer(required=True, many=True)
class Meta:
model = Criterion
fields = ('order_num', 'name', 'prompt', 'options')
def validate_options(self, attrs, source):
"""Make sure we have at least one CriterionOption in a Criterion."""
options = attrs[source]
if not options:
raise serializers.ValidationError(
"Criterion must have at least one option."
)
return attrs
class RubricSerializer(NestedModelSerializer):
"""Serializer for :class:`Rubric`."""
criteria = CriterionSerializer(required=True, many=True)
points_possible = serializers.Field(source='points_possible')
class Meta:
model = Rubric
fields = ('id', 'content_hash', 'criteria', 'points_possible')
class PeerAssessmentSerializer(serializers.ModelSerializer):
def validate_criteria(self, attrs, source):
"""Make sure we have at least one Criterion in the Rubric."""
criteria = attrs[source]
if not criteria:
raise serializers.ValidationError("Must have at least one criterion")
return attrs
class AssessmentPartSerializer(serializers.ModelSerializer):
"""Serializer for :class:`AssessmentPart`."""
class Meta:
model = AssessmentPart
fields = ('option',) # TODO: Direct link to Criterion?
class AssessmentSerializer(serializers.ModelSerializer):
"""Serializer for :class:`Assessment`."""
submission_uuid = serializers.Field(source='submission_uuid')
parts = AssessmentPartSerializer(required=True, many=True)
points_earned = serializers.Field(source='points_earned')
points_possible = serializers.Field(source='points_possible')
class Meta:
model = PeerEvaluation
model = Assessment
fields = (
'submission',
'points_earned',
'points_possible',
'submission', # will go away shortly
'rubric',
'scored_at',
'scorer_id',
'score_type',
'feedback',
# Foreign Key
'parts',
# Computed, not part of the model
'submission_uuid',
'points_earned',
'points_possible',
)
def rubric_from_dict(rubric_dict):
"""Given a dict of rubric information, return the corresponding Rubric
This will create the Rubric and its children if it does not exist already.
Sample data (one criterion, two options)::
{
"prompt": "Create a plan to deliver edx-tim!",
"criteria": [
{
"order_num": 0,
"name": "realistic",
"prompt": "Is the deadline realistic?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "We need more time!"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "We got this."
},
]
}
]
}
"""
rubric_dict = deepcopy(rubric_dict)
# Calculate the hash based on the rubric content...
content_hash = Rubric.content_hash_from_dict(rubric_dict)
try:
rubric = Rubric.objects.get(content_hash=content_hash)
except Rubric.DoesNotExist:
rubric_dict["content_hash"] = content_hash
for crit_idx, criterion in enumerate(rubric_dict.get("criteria", {})):
if "order_num" not in criterion:
criterion["order_num"] = crit_idx
for opt_idx, option in enumerate(criterion.get("options", {})):
if "order_num" not in option:
option["order_num"] = opt_idx
rubric_serializer = RubricSerializer(data=rubric_dict)
if not rubric_serializer.is_valid():
raise InvalidRubric(rubric_serializer.errors)
rubric = rubric_serializer.save()
return rubric
{
"prompt": "Create a plan to deliver edx-tim!",
"criteria": [
]
}
{
"prompt": "Create a plan to deliver edx-tim!",
"criteria": [
{
"order_num": 0,
"name": "realistic",
"prompt": "Is the deadline realistic?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": ""
},
{
"order_num": 1,
"points": 2,
"name": "Maybe",
"explanation": ""
},
{
"order_num": 2,
"points": 4,
"name": "Yes",
"explanation": ""
}
]
},
{
"order_num": 1,
"name": "architecture",
"prompt": "Describe the architecture.",
"options": [
]
}
]
}
{
"prompt": "Create a plan to deliver edx-tim!"
}
{
"prompt": "Create a plan to deliver edx-tim!",
"criteria": [
{
"order_num": 0,
"name": "realistic",
"prompt": "Is the deadline realistic?"
},
{
"order_num": 1,
"name": "architecture",
"prompt": "Describe the architecture.",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Crazy",
"explanation": ""
},
{
"order_num": 1,
"points": 1,
"name": "Plausible",
"explanation": ""
},
{
"order_num": 2,
"points": 2,
"name": "Solid",
"explanation": ""
}
]
}
]
}
{
"prompt": "Create a plan to deliver edx-tim!",
"criteria": [
{
"order_num": 0,
"prompt": "Is the deadline realistic?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": ""
},
{
"order_num": 1,
"points": 0,
"name": "Maybe",
"explanation": ""
},
{
"order_num": 2,
"points": 0,
"name": "Yes",
"explanation": ""
}
]
},
{
"order_num": 1,
"prompt": "Describe the architecture.",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Crazy",
"explanation": ""
},
{
"order_num": 1,
"points": 0,
"name": "Plausible",
"explanation": ""
},
{
"order_num": 2,
"points": 0,
"name": "Solid",
"explanation": ""
}
]
}
]
}
{
"prompt": "Create a plan to deliver edx-tim!",
"criteria": [
{
"order_num": 0,
"name": "realistic",
"prompt": "Is the deadline realistic?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": ""
},
{
"order_num": 1,
"points": 2,
"name": "Maybe",
"explanation": ""
},
{
"order_num": 2,
"points": 4,
"name": "Yes",
"explanation": ""
}
]
},
{
"order_num": 1,
"name": "architecture",
"prompt": "Describe the architecture.",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Crazy",
"explanation": ""
},
{
"order_num": 1,
"points": 1,
"name": "Plausible",
"explanation": ""
},
{
"order_num": 2,
"points": 2,
"name": "Solid",
"explanation": ""
}
]
}
]
}
\ No newline at end of file
import json
import os.path
from ddt import ddt, file_data
from django.test import TestCase
from openassessment.peer.models import Criterion, CriterionOption, Rubric
from openassessment.peer.serializers import (
InvalidRubric, RubricSerializer, rubric_from_dict
)
def json_data(filename):
curr_dir = os.path.dirname(__file__)
with open(os.path.join(curr_dir, filename), "rb") as json_file:
return json.load(json_file)
class TestRubricDeserialization(TestCase):
def test_rubric_only_created_once(self):
# Make sure sending the same Rubric data twice only creates one Rubric,
# and returns a reference to it the next time.
rubric_data = json_data('rubric_data/project_plan_rubric.json')
r1 = rubric_from_dict(rubric_data)
with self.assertNumQueries(1):
# Just the select -- shouldn't need the create queries
r2 = rubric_from_dict(rubric_data)
self.assertEqual(r1.id, r2.id)
r1.delete()
def test_rubric_requires_positive_score(self):
with self.assertRaises(InvalidRubric):
rubric_from_dict(json_data('rubric_data/no_points.json'))
class TestCriterionDeserialization(TestCase):
def test_empty_criteria(self):
with self.assertRaises(InvalidRubric) as cm:
rubric_from_dict(json_data('rubric_data/empty_criteria.json'))
self.assertEqual(
cm.exception.errors,
{'criteria': [u'Must have at least one criterion']}
)
def test_missing_criteria(self):
with self.assertRaises(InvalidRubric) as cm:
rubric_from_dict(json_data('rubric_data/missing_criteria.json'))
self.assertEqual(
cm.exception.errors,
{'criteria': [u'This field is required.']}
)
class TestCriterionOptionDeserialization(TestCase):
def test_empty_options(self):
with self.assertRaises(InvalidRubric) as cm:
rubric_from_dict(json_data('rubric_data/empty_options.json'))
self.assertEqual(
cm.exception.errors,
{
'criteria': [
{}, # There are no errors in the first criterion
{'options': [u'Criterion must have at least one option.']}
]
}
)
def test_missing_options(self):
with self.assertRaises(InvalidRubric) as cm:
rubric_from_dict(json_data('rubric_data/missing_options.json'))
self.assertEqual(
cm.exception.errors,
{
'criteria': [
{'options': [u'This field is required.']},
{} # No errors in second criterion
]
}
)
{
"unicode_evaluation": {
"points_earned": [10, 0, 24, 36],
"points_possible": 12,
"feedback": "这是中国"
},
"basic_evaluation": {
"points_earned": [1, 0, 3, 2],
"points_possible": 12,
"feedback": "Your submission was thrilling."
}
}
\ No newline at end of file
{
"unicode_evaluation": {
"feedback": "这是中国",
"options_selected": {
"secret": "yes",
"ⓢⓐⓕⓔ": "no",
"giveup": "reluctant",
"singing": "no"
}
},
"basic_evaluation": {
"feedback": "Your submission was thrilling.",
"options_selected": {
"secret": "yes",
"ⓢⓐⓕⓔ": "no",
"giveup": "reluctant",
"singing": "no"
}
}
}
\ No newline at end of file
......@@ -10,6 +10,7 @@ from webob import Response
from xblock.core import XBlock
from xblock.fields import List, Scope, String
from xblock.fragment import Fragment
from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin
from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin
from openassessment.xblock.submission_mixin import SubmissionMixin
......@@ -123,7 +124,6 @@ DEFAULT_ASSESSMENT_MODULES = [
DEFAULT_PEER_ASSESSMENT,
]
def load(path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
......@@ -226,7 +226,6 @@ class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAsse
(Fragment): The HTML Fragment for this XBlock, which determines the
general frame of the Open Ended Assessment Question.
"""
trace = self.get_xblock_trace()
grade_state = self.get_grade_state()
......
......@@ -34,25 +34,29 @@ class PeerAssessmentMixin(object):
newly created assessment, and a "Success" string.
"""
assessment = self.get_assessment_module('peer-assessment')
if assessment:
assessment_ui_model = self.get_assessment_module('peer-assessment')
if assessment_ui_model:
rubric_dict = {
'criteria': self.rubric_criteria
}
assessment_dict = {
"points_earned": map(int, data["points_earned"]),
"points_possible": sum(c['total_value'] for c in self.rubric_criteria),
"feedback": "Not yet implemented.",
"options_selected": data["options_selected"],
}
assessment = peer_api.create_assessment(
data["submission_uuid"],
self.get_student_item_dict()["student_id"],
int(assessment.must_grade),
int(assessment.must_be_graded_by),
assessment_dict
int(assessment_ui_model.must_grade),
int(assessment_ui_model.must_be_graded_by),
assessment_dict,
rubric_dict,
)
# Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"])
return assessment, "Success"
return {}
@XBlock.handler
def render_peer_assessment(self, data, suffix=''):
......
......@@ -48,20 +48,27 @@ class ScenarioParser(object):
</rubric>"""
rubric_criteria = []
for criterion in e:
crit = {'name': criterion.attrib.get('name', ''),
'instructions': criterion.text.strip(),
'total_value': 0,
'options': [],
}
crit = {
'name': criterion.attrib.get('name', ''),
'prompt': criterion.text.strip(),
'options': [],
}
for option in criterion:
explanations = option.getchildren()
if explanations and len(explanations) == 1 and explanations[0].tag == 'explain':
explanation = explanations[0].text.strip()
else:
explanation = ''
crit['options'].append((option.attrib['val'], option.text.strip(), explanation))
crit['total_value'] = max(int(x[0]) for x in crit['options'])
crit['options'].append(
{
'name': option.text.strip(),
'points': int(option.attrib['val']),
'explanation': explanation,
}
)
rubric_criteria.append(crit)
return (e.text.strip(), rubric_criteria)
def get_assessments(self, assessments):
......
......@@ -61,18 +61,24 @@
<!-- individual rubric question (radio-based choice) -->
<li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title">
{{ criterion.instructions }}
{{ criterion.prompt }}
<span class="label--required">* <span class="sr">(Required)</span></span>
</h4>
<ol class="question__answers">
{% for value, text in criterion.options %}
{% for option in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio" name="assessment__rubric__question--{{ criterion.name }}" id="assessment__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="assessment__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
<input type="radio"
name="{{ criterion.name }}"
id="assessment__rubric__question--{{ criterion.name }}"
class="answer__value"
value="{{ option.name }}" />
({{option.points}})
<label for="assessment__rubric__question--001__option--01"
class="answer__label"
>{{ option.name }}</label>
</div>
<span class="answer__tip">TODO: Criterion Instructions</span>
<span class="answer__tip">{{ option.explanation }}</span>
</li>
{% endfor %}
</ol>
......
......@@ -45,17 +45,24 @@ function OpenAssessmentBlock(runtime, element) {
function prepare_assessment_post(element) {
var selector = $("input[type=radio]:checked", element);
var criteriaChoices = {};
var values = [];
for (var i=0; i<selector.length; i++) {
values[i] = selector[i].value;
criteriaChoices[selector[i].name] = selector[i].value
}
return {"submission_uuid":$("div#peer_submission_uuid")[0].innerText, "points_earned":values};
return {
"submission_uuid":$("div#peer_submission_uuid")[0].innerText,
"points_earned":values,
"options_selected":criteriaChoices
};
}
$('#peer-assessment--001__assessment__submit', element).click(function(eventObject) {
eventObject.preventDefault();
$.ajax({
type: "POST",
url: handlerUrl,
url: runtime.handlerUrl(element, 'assess'),
/* data: JSON.stringify({"submission": $('.openassessment_submission', element).val()}), */
data: JSON.stringify(prepare_assessment_post(element)),
success: function(data) {
......
......@@ -13,30 +13,30 @@
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">(0) Neal Stephenson (late)
<option val="0">Neal Stephenson (late)
<explain>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'.
</explain>
</option>
<option val="1">(1) HP Lovecraft
<option val="1">HP Lovecraft
<explain>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explain>
</option>
<option val="3">(3) Robert Heinlein
<option val="3">Robert Heinlein
<explain>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explain>
</option>
<option val="4">(4) Neal Stephenson (early)
<option val="4">Neal Stephenson (early)
<explain>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explain>
</option>
<option val="5">(5) Earnest Hemingway
<option val="5">Earnest Hemingway
<explain>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
......@@ -45,11 +45,11 @@
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">(0) Yogi Berra</option>
<option val="1">(1) Hunter S. Thompson</option>
<option val="2">(2) Robert Heinlein</option>
<option val="3">(3) Isaac Asimov</option>
<option val="10">(10) Spock
<option val="0">Yogi Berra</option>
<option val="1">Hunter S. Thompson</option>
<option val="2">Robert Heinlein</option>
<option val="3">Isaac Asimov</option>
<option val="10">Spock
<explain>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
......@@ -59,12 +59,12 @@
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">(0) lolcats</option>
<option val="1">(1) Facebook</option>
<option val="2">(2) Reddit</option>
<option val="3">(3) metafilter</option>
<option val="4">(4) Usenet, 1996</option>
<option val="5">(5) The Elements of Style</option>
<option val="0">lolcats</option>
<option val="1">Facebook</option>
<option val="2">Reddit</option>
<option val="3">metafilter</option>
<option val="4">Usenet, 1996</option>
<option val="5">The Elements of Style</option>
</criterion>
</rubric>
<assessments>
......
"""
Tests the Open Assessment XBlock functionality.
"""
import json
import webob
from django.test import TestCase
from mock import patch
from openassessment.xblock.submission_mixin import SubmissionMixin
from workbench.runtime import WorkbenchRuntime
import webob
from submissions import api
from openassessment.xblock.submission_mixin import SubmissionMixin
from submissions import api as sub_api
from submissions.api import SubmissionRequestError, SubmissionInternalError
from workbench.runtime import WorkbenchRuntime
RUBRIC_CONFIG = """
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
......@@ -105,7 +104,7 @@ class TestOpenAssessment(TestCase):
self.assertEqual(result[1], "ENOMULTI")
self.assertEqual(result[2], self.assessment.submit_errors["ENOMULTI"])
@patch.object(api, 'create_submission')
@patch.object(sub_api, 'create_submission')
def test_submission_general_failure(self, mock_submit):
"""Internal errors return some code for submission failure."""
mock_submit.side_effect = SubmissionInternalError("Cat on fire.")
......@@ -118,7 +117,7 @@ class TestOpenAssessment(TestCase):
self.assertEqual(result[1], "EUNKNOWN")
self.assertEqual(result[2], SubmissionMixin().submit_errors["EUNKNOWN"])
@patch.object(api, 'create_submission')
@patch.object(sub_api, 'create_submission')
def test_submission_API_failure(self, mock_submit):
"""API usage errors return code and meaningful message."""
mock_submit.side_effect = SubmissionRequestError("Cat on fire.")
......
......@@ -10,7 +10,7 @@ from openassessment.xblock.scenario_parser import ScenarioParser
class TestScenarioParser(TestCase):
"""Test the ScenarioParser XML parsing class, which turns xml into filled XBlocks.
This does the simplest possible set of tests, just calling the parser utility
This does the simplest possible set of tests, just calling the parser utility
methods and confirming that their return results are correct, have good types, etc."""
def setUp(self):
......@@ -24,8 +24,8 @@ class TestScenarioParser(TestCase):
def test_get_rubric(self):
"""Given a <rubric> tree, return a instructions and a list of criteria"""
rubric_instructions_text = "This text is general instructions relating to this rubric. There should only be one set of instructions for the rubric."
criterion_instructions_text = "This text is instructions for this criterion. There can be multiple criteria, but each one should only have one set of instructions."
rubric_prompt_text = "This text is general instructions relating to this rubric. There should only be one set of instructions for the rubric."
criterion_prompt_text = "This text is instructions for this criterion. There can be multiple criteria, but each one should only have one set of instructions."
criterion_option_explain_text = "And this explains what the label for this option means. There can be only one explanation per label."
rubric_text = """<rubric>
{rit}
......@@ -39,27 +39,26 @@ class TestScenarioParser(TestCase):
</explain>
</option>
</criterion>
</rubric>""".format(rit=rubric_instructions_text,
cit=criterion_instructions_text,
</rubric>""".format(rit=rubric_prompt_text,
cit=criterion_prompt_text,
coet=criterion_option_explain_text)
rubric_xml = etree.fromstring(rubric_text)
rubric_instructions, rubric_criteria = self.test_parser.get_rubric(rubric_xml)
rubric_prompt, rubric_criteria = self.test_parser.get_rubric(rubric_xml)
# Basic shape of the rubric: instructions and criteria
self.assertEqual(rubric_instructions, rubric_instructions_text)
# Basic shape of the rubric: prompt and criteria
self.assertEqual(rubric_prompt, rubric_prompt_text)
self.assertEqual(len(rubric_criteria), 1)
# Look inside the criterion to make sure it's shaped correctly
criterion = rubric_criteria[0]
self.assertEqual(criterion['name'], 'myCrit')
self.assertEqual(criterion['instructions'], criterion_instructions_text)
self.assertEqual(criterion['total_value'], 99)
self.assertEqual(criterion['prompt'], criterion_prompt_text)
self.assertEqual(len(criterion['options']), 1)
# And within the criterion, check that options appear to come out well-formed
criterion_option_value, criterion_option, criterion_explanation = criterion['options'][0]
self.assertEqual(int(criterion_option_value), 99)
self.assertEqual(criterion_explanation, criterion_option_explain_text)
option = criterion['options'][0]
self.assertEqual(option['points'], 99)
self.assertEqual(option['explanation'], criterion_option_explain_text)
def test_get_assessments(self):
"""Given an <assessments> list, return a list of assessment modules."""
......
......@@ -37,7 +37,7 @@ class StudentItem(models.Model):
))
def __unicode__(self):
return "({0.student_id}, {0.course_id}, {0.item_type}, {0.item_id})".format(self)
return u"({0.student_id}, {0.course_id}, {0.item_type}, {0.item_id})".format(self)
class Meta:
unique_together = (
......
......@@ -48,3 +48,6 @@ Models
++++++
.. automodule:: submissions.models
:members:
.. automodule:: openassessment.peer.models
:members:
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment