Commit 90b04a1c by Stephen Sanchez

Merge branch 'master' into sanchez/xblock_lms_integration

Conflicts:
	apps/openassessment/xblock/openassessmentblock.py
parents 73b4f8c6 3d1ef58d
...@@ -3,19 +3,20 @@ ...@@ -3,19 +3,20 @@
import pkg_resources import pkg_resources
from mako.template import Template from mako.template import Template
from openassessment.peer.api import PeerEvaluationWorkflowError
from submissions import api
from openassessment.peer import api as peer_api
from xblock.core import XBlock from xblock.core import XBlock
from xblock.fields import List, Scope, String from xblock.fields import List, Scope, String
from xblock.fragment import Fragment from xblock.fragment import Fragment
from submissions.api import SubmissionRequestError from submissions.api import SubmissionRequestError
from submissions import api
from openassessment.peer import api as peer_api
from openassessment.peer.api import PeerEvaluationWorkflowError
from scenario_parser import ScenarioParser
mako_default_filters = ['unicode', 'h', 'trim'] mako_default_filters = ['unicode', 'h', 'trim']
EXAMPLE_POVERTY_RUBRIC = ( EXAMPLE_POVERTY_RUBRIC = (
"OpenAssessmentBlock Poverty Rubric", "OpenAssessmentBlock Poverty Rubric",
""" """
...@@ -30,28 +31,58 @@ EXAMPLE_POVERTY_RUBRIC = ( ...@@ -30,28 +31,58 @@ EXAMPLE_POVERTY_RUBRIC = (
Read for conciseness, clarity of thought, and form. Read for conciseness, clarity of thought, and form.
<criterion name="concise"> <criterion name="concise">
How concise is it? How concise is it?
<option val="0">Neal Stephenson (late)</option> <option val="0">(0) Neal Stephenson (late)
<option val="1">HP Lovecraft</option> <explain>
<option val="3">Robert Heinlein</option> In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
<option val="4">Neal Stephenson (early)</option> While hilarious, in recent years his work has been anything but 'concise'.
<option val="5">Earnest Hemingway</option> </explain>
</option>
<option val="1">(1) HP Lovecraft
<explain>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explain>
</option>
<option val="3">(3) Robert Heinlein
<explain>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explain>
</option>
<option val="4">(4) Neal Stephenson (early)
<explain>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explain>
</option>
<option val="5">(5) Earnest Hemingway
<explain>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
</explain>
</option>
</criterion> </criterion>
<criterion name="clearheaded"> <criterion name="clearheaded">
How clear is the thinking? How clear is the thinking?
<option val="0">Yogi Berra</option> <option val="0">(0) Yogi Berra</option>
<option val="1">Hunter S. Thompson</option> <option val="1">(1) Hunter S. Thompson</option>
<option val="2">Robert Heinlein</option> <option val="2">(2) Robert Heinlein</option>
<option val="3">Isaac Asimov</option> <option val="3">(3) Isaac Asimov</option>
<option val="10">Spock</option> <option val="10">(10) Spock
<explain>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
to show that explained and unexplained items can be mixed.
</explain>
</option>
</criterion> </criterion>
<criterion name="form"> <criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count. Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">lolcats</option> <option val="0">(0) lolcats</option>
<option val="1">Facebook</option> <option val="1">(1) Facebook</option>
<option val="2">Reddit</option> <option val="2">(2) Reddit</option>
<option val="3">metafilter</option> <option val="3">(3) metafilter</option>
<option val="4">Usenet, 1996</option> <option val="4">(4) Usenet, 1996</option>
<option val="5">The Elements of Style</option> <option val="5">(5) The Elements of Style</option>
</criterion> </criterion>
</rubric> </rubric>
<evals> <evals>
...@@ -110,6 +141,7 @@ EXAMPLE_CENSORSHIP_RUBRIC = ( ...@@ -110,6 +141,7 @@ EXAMPLE_CENSORSHIP_RUBRIC = (
""" """
) )
class OpenAssessmentBlock(XBlock): class OpenAssessmentBlock(XBlock):
"""Displays a question and gives an area where students can compose a response.""" """Displays a question and gives an area where students can compose a response."""
...@@ -216,17 +248,11 @@ class OpenAssessmentBlock(XBlock): ...@@ -216,17 +248,11 @@ class OpenAssessmentBlock(XBlock):
# HACK: Replace with proper workflow. # HACK: Replace with proper workflow.
peer_eval = self._hack_get_peer_eval() peer_eval = self._hack_get_peer_eval()
"""Place an assessment into Openassessment system""" """Place an assessment into Openassessment system"""
# TODO: We're not doing points possible in a good way, need to refactor
# the rubric criteria type, Joe has thoughts on this.
student_item_dict = self._get_student_item_dict() student_item_dict = self._get_student_item_dict()
points_possible = sum(
max(int(val) for val in criteria if val.isdigit())
for criteria in self.rubric_criteria
)
assessment_dict = { assessment_dict = {
"points_earned": map(int, data["points_earned"]), "points_earned": map(int, data["points_earned"]),
"points_possible": points_possible, "points_possible": sum(c['total_value'] for c in self.rubric_criteria),
"feedback": "Not yet implemented.", "feedback": "Not yet implemented.",
} }
evaluation = peer_api.create_evaluation( evaluation = peer_api.create_evaluation(
...@@ -268,46 +294,22 @@ class OpenAssessmentBlock(XBlock): ...@@ -268,46 +294,22 @@ class OpenAssessmentBlock(XBlock):
status_text = status_text if status_text else self.submit_errors[status_tag] status_text = status_text if status_text else self.submit_errors[status_tag]
return (status, status_tag, status_text) return (status, status_tag, status_text)
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""Instantiate xblock object from runtime XML definition."""
block = runtime.construct_xblock_from_class(cls, keys)
for child in node:
if child.tag == 'prompt':
block.prompt = child.text.strip()
elif child.tag == 'rubric':
block.rubric_instructions = child.text.strip()
block.rubric_criteria = []
for criterion in child:
crit = {'name': criterion.attrib.get('name', ''),
'instructions': criterion.text.strip(),
}
for option in criterion:
crit[option.attrib['val']] = option.text.strip()
block.rubric_criteria.append(crit)
elif child.tag == 'evals':
block.rubric_evals = []
for evaluation in child:
e = {'type': evaluation.tag,
'name': evaluation.attrib.get('name', ''),
'start_datetime': evaluation.attrib.get('start', None),
'due_datetime': evaluation.attrib.get('due', None),
# These attrs are accepted for self, ai evals, but ignored:
'must_grade': evaluation.attrib.get('must_grade', 1),
'must_be_graded_by': evaluation.attrib.get('must_be_graded_by', 0), }
block.rubric_evals.append(e)
else:
# XXX: jrbl thinks this lets you embed other blocks inside this (?)
block.runtime.add_node_as_child(block, child, id_generator)
return block
# Arbitrary attributes can be defined on the
@staticmethod @staticmethod
def workbench_scenarios(): def workbench_scenarios():
"""A canned scenario for display in the workbench.""" """A canned scenario for display in the workbench."""
return [EXAMPLE_POVERTY_RUBRIC, EXAMPLE_CENSORSHIP_RUBRIC, ] return [EXAMPLE_POVERTY_RUBRIC, EXAMPLE_CENSORSHIP_RUBRIC,]
def studio_view(self, context=None): @staticmethod
def studio_view(context=None):
return Fragment(u"<div>Edit the XBlock.</div>") return Fragment(u"<div>Edit the XBlock.</div>")
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""Instantiate xblock object from runtime XML definition."""
def unknown_handler(block, child):
"""Recursively embed xblocks for nodes we don't recognize"""
block.runtime.add_node_as_child(block, child, id_generator)
block = runtime.construct_xblock_from_class(cls, keys)
sparser = ScenarioParser(block, node, unknown_handler)
block = sparser.parse()
return block
# -*- coding: utf-8 -*-
"""XBlock scenario parsing routines"""
class ScenarioParser(object):
"""Utility class to capture parsing of xml from runtime scenarios."""
def __init__(self, xblock, node, unknown_handler=lambda x,y: (x,y)):
"""Save the little bit of state we expect to re-use.
Args:
xblock (XBlock): The xblock instance whose fields we fill out.
node (lxml.etree): The root of the xml hierarchy we are to walk.
unknown_handler (function): A closure over some environmental data
from our caller, which is used when we encounter an unexpected
child node.
"""
self.xblock = xblock
self.root = node
self.unknown_handler = unknown_handler
def get_prompt(self, e):
"""<prompt>This tells you what you should write about. There should be only one prompt.</prompt>"""
return e.text.strip()
def get_rubric(self, e):
"""<rubric>
This text is general instructions relating to this rubric.
There should only be one set of instructions for the rubric.
<criterion name="myCrit">
This text is instructions for this criterion. There can be multiple criteria,
but each one should only have one set of instructions.
<option val=99>
This is some text labeling the criterion option worth 99 points
Three can be multiple options per criterion.
<explain>
And this explains what the label for this option means. There can be only
one explanation per label.
</explain
</option>
</criterion>
</rubric>"""
rubric_criteria = []
for criterion in e:
crit = {'name': criterion.attrib.get('name', ''),
'instructions': criterion.text.strip(),
'total_value': 0,
'options': [],
}
for option in criterion:
explanations = option.getchildren()
if explanations and len(explanations) == 1 and explanations[0].tag == 'explain':
explanation = explanations[0].text.strip()
else:
explanation = ''
crit['options'].append((option.attrib['val'], option.text.strip(), explanation))
crit['total_value'] = max(int(x[0]) for x in crit['options'])
rubric_criteria.append(crit)
return (e.text.strip(), rubric_criteria)
def get_evals(self, evaluations):
"""<evals>
<!-- There can be multiple types of assessments given in any
arbitrary order, like this self assessment followed by a
peer assessment -->
<self />
<peereval start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</evals>"""
return [{'type': ev.tag,
'name': ev.attrib.get('name', ''),
'start_datetime': ev.attrib.get('start', None),
'due_datetime': ev.attrib.get('due', None),
# These attrs are accepted for self, ai evals, but ignored:
'must_grade': int(ev.attrib.get('must_grade', 1) if ev.tag == 'peereval' else 1),
'must_be_graded_by': int(ev.attrib.get('must_be_graded_by', 0) if ev.tag == 'peereval' else 0),
} for ev in evaluations]
def parse(self):
"""Instantiate xblock object from runtime XML definition."""
for child in self.root:
if child.tag == 'prompt':
self.xblock.prompt = self.get_prompt(child)
elif child.tag == 'rubric':
(self.xblock.rubric_instructions,
self.xblock.rubric_criteria) = self.get_rubric(child)
elif child.tag == 'evals':
self.xblock.rubric_evals = self.get_evals(child)
else:
self.unknown_handler(self.xblock, child)
return self.xblock
...@@ -4,16 +4,18 @@ ...@@ -4,16 +4,18 @@
<p>${peer_submission["answer"]}</p> <p>${peer_submission["answer"]}</p>
<p class="openassessment_prompt" <p class="openassessment_prompt"
id="openassessment_rubric_instructions_${xblock_trace[0]}">${rubric_instructions}</p> id="openassessment_rubric_instructions_${xblock_trace[0]}">${rubric_instructions}</p>
% for criterion in rubric_criteria: % for crit in rubric_criteria:
<div> <div>
<p class="openassessment_prompt">${criterion["instructions"]}</p> <p class="openassessment_prompt">${crit["instructions"]}</p>
% for value in sorted([k for k in criterion.keys() if k != 'name' and k != 'instructions']): % for o in crit['options']:
<input name="${criterion['name']}" type="radio" value="${value}">${criterion[value]}</input> <div>
<input name="${crit['name']}" type="radio" value="${o[0]}">${o[1]}: ${o[2]}</input>
</div>
% endfor % endfor
</div> </div>
% endfor % endfor
<input type="button" <input type="button"
class="openassessment_submit" id="openassessment_submit_${xblock_trace[0]}" value="Submit" /> class="openassessment_submit" id="openassessment_submit_${xblock_trace[0]}" value="Submit" />
</div> </div>
<div class="openassessment_response_status_block" id="openassessment_response_status_block_${xblock_trace[0]}"> <div class="openassessment_response_status_block" id="openassessment_response_status_block_${xblock_trace[0]}">
This message should be invisible; please upgrade your browser. This message should be invisible; please upgrade your browser.
......
...@@ -8,9 +8,9 @@ import webob ...@@ -8,9 +8,9 @@ import webob
from django.test import TestCase from django.test import TestCase
from mock import patch from mock import patch
from workbench.runtime import WorkbenchRuntime
from submissions import api from submissions import api
from submissions.api import SubmissionRequestError, SubmissionInternalError from submissions.api import SubmissionRequestError, SubmissionInternalError
from workbench.runtime import WorkbenchRuntime
RUBRIC_CONFIG = """ RUBRIC_CONFIG = """
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00"> <openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
...@@ -120,3 +120,4 @@ class TestOpenAssessment(TestCase): ...@@ -120,3 +120,4 @@ class TestOpenAssessment(TestCase):
""" """
xblock_fragment = self.runtime.render(self.assessment, "student_view") xblock_fragment = self.runtime.render(self.assessment, "student_view")
self.assertTrue(xblock_fragment.body_html().find("Openassessmentblock")) self.assertTrue(xblock_fragment.body_html().find("Openassessmentblock"))
"""Tests the Workbench Scenario Parser functionality."""
from lxml import etree
from django.test import TestCase
from openassessment.xblock.scenario_parser import ScenarioParser
class TestScenarioParser(TestCase):
"""Test the ScenarioParser XML parsing class, which turns xml into filled XBlocks.
This does the simplest possible set of tests, just calling the parser utility
methods and confirming that their return results are correct, have good types, etc."""
def setUp(self):
self.test_parser = ScenarioParser("Dummy XBlock", "Dummy XML")
def test_get_prompt(self):
"""Given a <prompt> node, return its text."""
prompt_text = "5de0ef7cc2c7469383b58febd2fdac29"
prompt_xml = etree.fromstring("<prompt>{words}</prompt>".format(words=prompt_text))
self.assertEqual(self.test_parser.get_prompt(prompt_xml), prompt_text)
def test_get_rubric(self):
"""Given a <rubric> tree, return a instructions and a list of criteria"""
rubric_instructions_text = "This text is general instructions relating to this rubric. There should only be one set of instructions for the rubric."
criterion_instructions_text = "This text is instructions for this criterion. There can be multiple criteria, but each one should only have one set of instructions."
criterion_option_explain_text = "And this explains what the label for this option means. There can be only one explanation per label."
rubric_text = """<rubric>
{rit}
<criterion name="myCrit">
{cit}
<option val="99">
This is some text labeling the criterion option worth 99 points
Three can be multiple options per criterion.
<explain>
{coet}
</explain>
</option>
</criterion>
</rubric>""".format(rit=rubric_instructions_text,
cit=criterion_instructions_text,
coet=criterion_option_explain_text)
rubric_xml = etree.fromstring(rubric_text)
rubric_instructions, rubric_criteria = self.test_parser.get_rubric(rubric_xml)
# Basic shape of the rubric: instructions and criteria
self.assertEqual(rubric_instructions, rubric_instructions_text)
self.assertEqual(len(rubric_criteria), 1)
# Look inside the criterion to make sure it's shaped correctly
criterion = rubric_criteria[0]
self.assertEqual(criterion['name'], 'myCrit')
self.assertEqual(criterion['instructions'], criterion_instructions_text)
self.assertEqual(criterion['total_value'], 99)
self.assertEqual(len(criterion['options']), 1)
# And within the criterion, check that options appear to come out well-formed
criterion_option_value, criterion_option, criterion_explanation = criterion['options'][0]
self.assertEqual(int(criterion_option_value), 99)
self.assertEqual(criterion_explanation, criterion_option_explain_text)
def test_get_evals(self):
"""Given an <evals> list, return a list of evaluations."""
evals = """<evals>
<selfeval name='0382e03c808e4f2bb12dfdd2d45d5c4b'
must_grade="999"
must_be_graded_by="73" />
<peereval start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<selfeval />
</evals>"""
evals_xml = etree.fromstring(evals)
parsed_list = self.test_parser.get_evals(evals_xml)
# Self evaluations take all the parameters, but mostly ignore them.
self.assertEqual(parsed_list[0]['type'], 'selfeval')
self.assertEqual(parsed_list[0]['name'], '0382e03c808e4f2bb12dfdd2d45d5c4b')
self.assertEqual(parsed_list[0]['must_grade'], 1)
self.assertEqual(parsed_list[0]['must_be_graded_by'], 0)
# Peer evaluations are more interesting
self.assertEqual(parsed_list[1]['type'], 'peereval')
self.assertEqual(parsed_list[1]['name'], '')
self.assertEqual(parsed_list[1]['must_grade'], 5)
self.assertEqual(parsed_list[1]['must_be_graded_by'], 3)
# We can parse arbitrary workflow descriptions as a list of evaluations.
# Whether or not the workflow system can use them is another matter
self.assertEqual(parsed_list[2]['type'], 'selfeval')
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment