Commit bee1251e by Stephen Sanchez

Renamed all Evaluation to Assessment (except models). Some refactoring to begin the UI work

parent 716a0e6e
......@@ -6,7 +6,7 @@ from rest_framework import serializers
from openassessment.peer.models import PeerEvaluation
class PeerEvaluationSerializer(serializers.ModelSerializer):
class PeerAssessmentSerializer(serializers.ModelSerializer):
class Meta:
model = PeerEvaluation
fields = (
......
......@@ -32,7 +32,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
class TestApi(TestCase):
def test_create_evaluation(self):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
evaluation = api.create_evaluation(
evaluation = api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -44,21 +44,21 @@ class TestApi(TestCase):
@file_data('test_valid_evaluations.json')
def test_get_evaluations(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
assessment_dict
)
evaluations = api.get_evaluations(submission["uuid"])
evaluations = api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict)
@file_data('test_valid_evaluations.json')
def test_get_evaluations_with_date(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -66,7 +66,7 @@ class TestApi(TestCase):
assessment_dict,
MONDAY
)
evaluations = api.get_evaluations(submission["uuid"])
evaluations = api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict)
self.assertEqual(evaluations[0]["scored_at"], MONDAY)
......@@ -85,22 +85,22 @@ class TestApi(TestCase):
self.assertFalse(scores)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
api.create_assessment(
bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
api.create_assessment(
sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
api.create_assessment(
jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
api.create_assessment(
buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
api.create_assessment(
xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertTrue(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
......@@ -110,13 +110,13 @@ class TestApi(TestCase):
scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores)
api.create_evaluation(
api.create_assessment(
tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
api.create_assessment(
tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
api.create_assessment(
tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
......@@ -127,7 +127,7 @@ class TestApi(TestCase):
self.assertEqual(12, scores[0]["points_possible"])
@raises(api.PeerEvaluationRequestError)
@raises(api.PeerAssessmentRequestError)
def test_bad_configuration(self):
api.has_finished_required_evaluating("Tim", -1)
......@@ -139,27 +139,27 @@ class TestApi(TestCase):
)
self._create_student_and_submission("Jim", "Jim's answer", THURSDAY)
submission = api.get_submission_to_evaluate(STUDENT_ITEM, 3)
submission = api.get_submission_to_assess(STUDENT_ITEM, 3)
self.assertIsNotNone(submission)
self.assertEqual(submission["answer"], u"Bob's answer")
self.assertEqual(submission["student_item"], 2)
self.assertEqual(submission["attempt_number"], 1)
@raises(api.PeerEvaluationWorkflowError)
@raises(api.PeerAssessmentWorkflowError)
def test_no_submissions_to_evaluate_for_tim(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
api.get_submission_to_evaluate(STUDENT_ITEM, 3)
api.get_submission_to_assess(STUDENT_ITEM, 3)
"""
Some Error Checking Tests against DB failures.
"""
@patch.object(Submission.objects, 'get')
@raises(api.PeerEvaluationInternalError)
@raises(api.PeerAssessmentInternalError)
def test_error_on_evaluation_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -172,7 +172,7 @@ class TestApi(TestCase):
@raises(sub_api.SubmissionInternalError)
def test_error_on_get_evaluation(self, mock_filter):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -181,7 +181,7 @@ class TestApi(TestCase):
MONDAY
)
mock_filter.side_effect = DatabaseError("Bad things happened")
api.get_evaluations(submission["uuid"])
api.get_assessments(submission["uuid"])
def test_choose_score(self):
self.assertEqual(0, api._calculate_final_score([]))
......
......@@ -2,7 +2,7 @@ import logging
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from openassessment.peer.api import get_evaluations
from openassessment.peer.api import get_assessments
from submissions.api import SubmissionRequestError, get_submissions
log = logging.getLogger(__name__)
......@@ -38,7 +38,7 @@ def get_evaluations_for_student_item(request, course_id, student_id, item_id):
submissions = get_submissions(student_item_dict)
evaluations = []
for submission in submissions:
submission_evaluations = get_evaluations(submission["uuid"])
submission_evaluations = get_assessments(submission["uuid"])
for evaluation in submission_evaluations:
evaluation["submission_uuid"] = submission["uuid"]
evaluations.append(evaluation)
......
class Assessment(object):
assessment_type = None
name = ''
start_datetime = None
due_datetime = None
must_grade = 1
must_be_graded_by = 0
navigation_text = ""
path = ""
def create_ui_model(self):
return {
"assessment_type": self.assessment_type,
"name": self.name,
"start_datetime": self.start_datetime,
"due_datetime": self.due_datetime,
"must_grade": self.must_grade,
"must_be_graded_by": self.must_be_graded_by,
"navigation_text": self.navigation_text,
"path": self.path
}
\ No newline at end of file
__author__ = 'stephensanchez'
from openassessment.peer import api as peer_api
from openassessment.peer.api import PeerAssessmentWorkflowError
from openassessment.xblock.assessment import Assessment
class PeerAssessment(Assessment):
assessment_type = "peer-assessment"
navigation_text = "Your assessment(s) of peer responses"
path = "static/html/oa_peer_assessment.html"
@classmethod
def assess(cls, student_item_dict, rubric_criteria, data):
"""Place an assessment into Openassessment system
"""
assessment_dict = {
"points_earned": map(int, data["points_earned"]),
"points_possible": sum(c['total_value'] for c in rubric_criteria),
"feedback": "Not yet implemented.",
}
assessment = peer_api.create_assessment(
data["submission_uuid"],
student_item_dict["student_id"],
int(cls.must_grade),
int(cls.must_be_graded_by),
assessment_dict
)
# Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"])
return assessment, "Success"
def get_peer_submission(self, student_item_dict):
peer_submission = False
try:
peer_submission = peer_api.get_submission_to_assess(
student_item_dict, self.must_be_graded_by
)
# context_dict["peer_submission"] = peer_submission
peer_submission = peer_api.get_submission_to_assess(
student_item_dict,
self.must_be_graded_by
)
except PeerAssessmentWorkflowError:
# TODO: Log?
pass
return peer_submission
\ No newline at end of file
# -*- coding: utf-8 -*-
"""XBlock scenario parsing routines"""
from openassessment.xblock.peer_assessment import PeerAssessment
from openassessment.xblock.self_assessment import SelfAssessment
class ScenarioParser(object):
......@@ -63,35 +65,35 @@ class ScenarioParser(object):
rubric_criteria.append(crit)
return (e.text.strip(), rubric_criteria)
def get_evals(self, evaluations):
"""<evals>
def get_assessments(self, assessments):
"""<assessments>
<!-- There can be multiple types of assessments given in any
arbitrary order, like this self assessment followed by a
peer assessment -->
<self />
<peereval start="2014-12-20T19:00-7:00"
<self-assessment />
<peer-assessment start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</evals>"""
evaluation_list = []
for ev in evaluations:
evaluation = None
type = ev.tag
if 'peer-evaluation' == type:
evaluation = PeerEvaluation()
elif 'self-evaluation' == type:
evaluation = SelfEvaluation()
if evaluation:
evaluation.name = ev.attrib.get('name', '')
evaluation.start_datetime = ev.attrib.get('start', None)
evaluation.due_datetime = ev.attrib.get('due', None)
evaluation.must_grade = int(ev.attrib.get('must_grade', 1))
evaluation.must_be_graded_by = int(ev.attrib.get('must_be_graded_by', 0))
evaluation_list.append(evaluation)
return evaluation_list
</peer-assessment>"""
assessment_list = []
for asmnt in assessments:
assessment = None
assessment_type = asmnt.tag
if 'peer-assessment' == assessment_type:
assessment = PeerAssessment()
assessment.must_grade = int(asmnt.attrib.get('must_grade', 1))
assessment.must_be_graded_by = int(asmnt.attrib.get('must_be_graded_by', 0))
elif 'self-assessment' == assessment_type:
assessment = SelfAssessment()
if assessment:
assessment.name = asmnt.attrib.get('name', '')
assessment.start_datetime = asmnt.attrib.get('start', None)
assessment.due_datetime = asmnt.attrib.get('due', None)
assessment_list.append(assessment)
return assessment_list
def parse(self):
"""Instantiate xblock object from runtime XML definition."""
......@@ -103,32 +105,8 @@ class ScenarioParser(object):
self.xblock.rubric_criteria) = self.get_rubric(child)
elif child.tag == 'title':
self.xblock.title = self.get_title(child)
elif child.tag == 'evals':
self.xblock.rubric_evals = self.get_evals(child)
elif child.tag == 'assessments':
self.xblock.rubric_assessments = self.get_assessments(child)
else:
self.unknown_handler(self.xblock, child)
return self.xblock
class EvaluationModule():
eval_type = None
name = ''
start_datetime = None
due_datetime = None
must_grade = 1
must_be_graded_by = 0
class PeerEvaluation(EvaluationModule):
eval_type = "peer-evaluation"
navigation_text = "Your evaluation(s) of peer responses"
url = "static/html/oa_peer_evaluation.html"
class SelfEvaluation(EvaluationModule):
eval_type = "self-evaluation"
navigation_text = "Your evaluation of your response"
url = "static/html/oa_self_evaluation.html"
\ No newline at end of file
from openassessment.xblock.assessment import Assessment
class SelfAssessment(Assessment):
assessment_type = "self-assessment"
navigation_text = "Your assessment of your response"
path = "static/html/oa_self_assessment.html"
\ No newline at end of file
......@@ -20,9 +20,9 @@
<span class="openassessment__title--sub">
<span class="problem-type problem-type--open-ended-response">Open Ended Response</span>
{% for eval in rubric_evals %}
{% for assessment in rubric_assessments %}
+
<span class="problem-type problem-type--{{ eval.type }}">{{ eval.name }}</span>
<span class="problem-type problem-type--{{ assessment.type }}">{{ assessment.name }}</span>
{% endfor %}
</span>
</h1>
......@@ -35,9 +35,9 @@
<li class="list--nav__item">
<a class="action" href="#openassessment__response">Your response to this problem</a>
</li>
{% for eval in rubric_evals %}
{% for assessment in rubric_assessments %}
<li class="list--nav__item">
<a class="action" href="#openassessment__{{ eval.type }}">{{ eval.navigation_text }}</a>
<a class="action" href="#openassessment__{{ assessment.type }}">{{ assessment.navigation_text }}</a>
</li>
{% endfor %}
<li class="list--nav__item">
......@@ -73,8 +73,8 @@
<!-- STEP: response -->
{% include "static/html/oa_response.html" %}
{% for eval in rubric_evals %}
{% include eval.url %}
{% for assessment in rubric_assessments %}
{% include assessment.path %}
{% endfor %}
</ol>
......
......@@ -11,7 +11,7 @@
<!-- CASE: default/not started -->
<li id="openassessment__peer-evaluation" class="openassessment__steps__step step--peer-evaluation">
<li id="openassessment__peer-assessment" class="openassessment__steps__step step--peer-assessment">
{#<header class="step__header">#}
<h2 class="step__title">
<span class="step__label">Evaluate Peers' Responses</span>
......@@ -32,34 +32,34 @@
</div>
<div class="step__content">
<ul class="list--peer-evaluations">
<li class="list--peer-evaluations__item">
<article class="peer-evaluation" id="peer-evaluation--001">
<header class="peer-evaluation__header">
<h3 class="peer-evaluation__title">Evaluation #
<span class="peer-evaluation__number--current">1</span> of
<span class="peer-evaluation__number--required">3</span>
<ul class="list--peer-assessments">
<li class="list--peer-assessments__item">
<article class="peer-assessment" id="peer-assessment--001">
<header class="peer-assessment__header">
<h3 class="peer-assessment__title">Assessment #
<span class="peer-assessment__number--current">1</span> of
<span class="peer-assessment__number--required">3</span>
</h3>
<span class="peer-evaluation__expected-time">
<span class="peer-assessment__expected-time">
<span class="label">Expected Time Spent:</span>
<span class="value">20 Minutes</span>
</span>
</header>
<!-- ?: markup validating/copy cleaning upon submission -->
<div class="peer-evaluation__response">
<div class="peer-assessment__response">
{{ peer_submission.answer }}
</div>
<form id="peer-evaluation--001__evaluation" class="peer-evaluation__evaluation" method="post">
<fieldset class="evaluation__fields">
<legend class="evaluation__instruction">{{ rubric_instructions }}</legend>
<form id="peer-assessment--001__assessment" class="peer-assessment__assessment" method="post">
<fieldset class="assessment__fields">
<legend class="assessment__instruction">{{ rubric_instructions }}</legend>
<ol class="list list--fields evaluation__rubric">
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<!-- individual rubric question (radio-based choice) -->
<li class="field field--radio is--required evaluation__rubric__question" id="evaluation__rubric__question--{{ criterion.name }}">
<li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title">
{{ criterion.instructions }}
<span class="label--required">* <span class="sr">(Required)</span></span>
......@@ -69,8 +69,8 @@
{% for value, text in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio" name="evaluation__rubric__question--{{ criterion.name }}" id="evaluation__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="evaluation__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
<input type="radio" name="assessment__rubric__question--{{ criterion.name }}" id="assessment__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="assessment__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
</div>
<span class="answer__tip">TODO: Criterion Instructions</span>
</li>
......@@ -80,16 +80,16 @@
{% endfor %}
<!-- individual rubric question (text) -->
<li class="field field--textarea evaluation__rubric__question" id="evaluation__rubric__question--004">
<label for="evaluation__rubric__question--004__value">Please provide any other feedback you have around this response</label>
<textarea id="evaluation__rubric__question--004__value" placeholder="I felt this response was..."></textarea>
<li class="field field--textarea assessment__rubric__question" id="assessment__rubric__question--004">
<label for="assessment__rubric__question--004__value">Please provide any other feedback you have around this response</label>
<textarea id="assessment__rubric__question--004__value" placeholder="I felt this response was..."></textarea>
</li>
</ol>
</fieldset>
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="peer-evaluation--001__evaluation__submit" class="action action--submit">Submit your evaluation &amp; move to response #2</button>
<button type="submit" id="peer-assessment--001__assessment__submit" class="action action--submit">Submit your assessment &amp; move to response #2</button>
</li>
</ul>
</form>
......
......@@ -4,14 +4,14 @@
<!-- NOTES:
* class of is--unavailable is added when step is not available
* each .self-evaluation item needs a unique id attribute formatted as #self-evaluation--###
* each .self-assessment item needs a unique id attribute formatted as #self-assessment--###
* individual rubric questions' answers need specific id attributes in several places
-->
<!-- CASE: default/not started -->
<li id="openassessment__self-evaluation" class="openassessment__steps__step step--self-evaluation">
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment">
{# <header class="step__header">#}
<h2 class="step__title">
<span class="step__title__label">Evaluate Your Response</span>
......@@ -25,24 +25,24 @@
{# </header>#}
<div class="step--content">
<article class="self-evaluation" id="self-evaluation">
<header class="self-evaluation__header">
<h3 class="self-evaluation__title">Your Submitted Response</h3>
<article class="self-assessment" id="self-assessment">
<header class="self-assessment__header">
<h3 class="self-assessment__title">Your Submitted Response</h3>
</header>
<!-- ?: markup validating/copy cleaning upon submission -->
<div class="self-evaluation__response">
<div class="self-assessment__response">
{{ self_submission.answer }}
</div>
<form id="self-evaluation--001__evaluation" class="self-evaluation__evaluation" method="post">
<fieldset class="evaluation__fields">
<legend class="evaluation__instruction">{{ rubric_instructions }}</legend>
<form id="self-assessment--001__assessment" class="self-assessment__assessment" method="post">
<fieldset class="assessment__fields">
<legend class="assessment__instruction">{{ rubric_instructions }}</legend>
<ol class="list list--fields evaluation__rubric">
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<!-- individual rubric question (radio-based choice) -->
<li class="field field--radio is--required evaluation__rubric__question" id="evaluation__rubric__question--{{ criterion.name }}">
<li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title">
{{ criterion.instructions }}
<span class="label--required">* <span class="sr">(Required)</span></span>
......@@ -52,8 +52,8 @@
{% for value, text in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio" name="evaluation__rubric__question--{{ criterion.name }}" id="evaluation__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="evaluation__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
<input type="radio" name="assessment__rubric__question--{{ criterion.name }}" id="assessment__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="assessment__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
</div>
<span class="answer__tip">TODO: Criterion Instructions</span>
</li>
......@@ -63,16 +63,16 @@
{% endfor %}
<!-- individual rubric question (text) -->
<li class="field field--textarea evaluation__rubric__question" id="evaluation__rubric__question--004">
<label for="evaluation__rubric__question--004__value">Please provide any other feedback you have around this response</label>
<textarea id="evaluation__rubric__question--004__value" placeholder="I felt this response was..."></textarea>
<li class="field field--textarea assessment__rubric__question" id="assessment__rubric__question--004">
<label for="assessment__rubric__question--004__value">Please provide any other feedback you have around this response</label>
<textarea id="assessment__rubric__question--004__value" placeholder="I felt this response was..."></textarea>
</li>
</ol>
</fieldset>
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="self-evaluation--001__evaluation__submit" class="action action--submit">Submit your evaluation</button>
<button type="submit" id="self-assessment--001__assessment__submit" class="action action--submit">Submit your assessment</button>
</li>
</ul>
</form>
......
......@@ -8,8 +8,8 @@ function OpenAssessmentBlock(runtime, element) {
/* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */
function prepare_assessment_post(element) {
selector = $("input[type=radio]:checked", element);
values = [];
var selector = $("input[type=radio]:checked", element);
var values = [];
for (i=0; i<selector.length; i++) {
values[i] = selector[i].value;
}
......@@ -17,8 +17,8 @@ function OpenAssessmentBlock(runtime, element) {
}
function displayStatus(result) {
status = result[0]
error_msg = result[1]
var status = result[0];
var error_msg = result[1];
if (status) {
$('.openassessment_response_status_block', element).html(success_msg.concat(click_msg));
} else {
......
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<vertical_demo>
<openassessment start="2013-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<title>
Censorship in Public Libraries
</title>
<prompt>
What do you think about censorship in libraries? I think it's pretty great.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">The Bible</option>
<option val="1">Earnest Hemingway</option>
<option val="3">Matsuo Basho</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">Eric</option>
<option val="1">John</option>
<option val="2">Ian</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">IRC</option>
<option val="1">Real Email</option>
<option val="2">Old-timey letters</option>
</criterion>
</rubric>
<assessments>
<self-assessment name="self-assessment" />
<peer-assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</assessments>
</openassessment>
</vertical_demo>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<vertical_demo>
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<title>
Global Poverty
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty?
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">(0) Neal Stephenson (late)
<explain>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'.
</explain>
</option>
<option val="1">(1) HP Lovecraft
<explain>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explain>
</option>
<option val="3">(3) Robert Heinlein
<explain>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explain>
</option>
<option val="4">(4) Neal Stephenson (early)
<explain>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explain>
</option>
<option val="5">(5) Earnest Hemingway
<explain>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
</explain>
</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">(0) Yogi Berra</option>
<option val="1">(1) Hunter S. Thompson</option>
<option val="2">(2) Robert Heinlein</option>
<option val="3">(3) Isaac Asimov</option>
<option val="10">(10) Spock
<explain>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
to show that explained and unexplained items can be mixed.
</explain>
</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">(0) lolcats</option>
<option val="1">(1) Facebook</option>
<option val="2">(2) Reddit</option>
<option val="3">(3) metafilter</option>
<option val="4">(4) Usenet, 1996</option>
<option val="5">(5) The Elements of Style</option>
</criterion>
</rubric>
<assessments>
<peer-assessment start="2014-12-20T19:00-7:00"
name="peer-assessment"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<self-assessment name="self-assessment" />
</assessments>
</openassessment>
</vertical_demo>
\ No newline at end of file
__author__ = 'stephensanchez'
......@@ -46,13 +46,14 @@ RUBRIC_CONFIG = """
<option val="5">The Elements of Style</option>
</criterion>
</rubric>
<evals>
<peer-evaluation start="2014-12-20T19:00-7:00"
<assessments>
<peer-assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<self-evaluation/>
</evals>
<self-assessment/>
</assessments>
</openassessment>
"""
......
......@@ -61,34 +61,34 @@ class TestScenarioParser(TestCase):
self.assertEqual(int(criterion_option_value), 99)
self.assertEqual(criterion_explanation, criterion_option_explain_text)
def test_get_evals(self):
"""Given an <evals> list, return a list of evaluations."""
evals = """<evals>
<selfeval name='0382e03c808e4f2bb12dfdd2d45d5c4b'
def test_get_assessments(self):
"""Given an <assessments> list, return a list of assessment modules."""
assessments = """<assessments>
<self-assessment name='0382e03c808e4f2bb12dfdd2d45d5c4b'
must_grade="999"
must_be_graded_by="73" />
<peereval start="2014-12-20T19:00-7:00"
<peer-assessment start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<selfeval />
</evals>"""
evals_xml = etree.fromstring(evals)
parsed_list = self.test_parser.get_evals(evals_xml)
# Self evaluations take all the parameters, but mostly ignore them.
self.assertEqual(parsed_list[0]['type'], 'selfeval')
self.assertEqual(parsed_list[0]['name'], '0382e03c808e4f2bb12dfdd2d45d5c4b')
self.assertEqual(parsed_list[0]['must_grade'], 1)
self.assertEqual(parsed_list[0]['must_be_graded_by'], 0)
# Peer evaluations are more interesting
self.assertEqual(parsed_list[1]['type'], 'peereval')
self.assertEqual(parsed_list[1]['name'], '')
self.assertEqual(parsed_list[1]['must_grade'], 5)
self.assertEqual(parsed_list[1]['must_be_graded_by'], 3)
# We can parse arbitrary workflow descriptions as a list of evaluations.
<self-assessment />
</assessments>"""
assessments_xml = etree.fromstring(assessments)
parsed_list = self.test_parser.get_assessments(assessments_xml)
# Self assessments take all the parameters, but mostly ignore them.
self.assertEqual(parsed_list[0].assessment_type, 'self-assessment')
self.assertEqual(parsed_list[0].name, '0382e03c808e4f2bb12dfdd2d45d5c4b')
self.assertEqual(parsed_list[0].must_grade, 1)
self.assertEqual(parsed_list[0].must_be_graded_by, 0)
# Peer assessments are more interesting
self.assertEqual(parsed_list[1].assessment_type, 'peer-assessment')
self.assertEqual(parsed_list[1].name, '')
self.assertEqual(parsed_list[1].must_grade, 5)
self.assertEqual(parsed_list[1].must_be_graded_by, 3)
# We can parse arbitrary workflow descriptions as a list of assessments.
# Whether or not the workflow system can use them is another matter
self.assertEqual(parsed_list[2]['type'], 'selfeval')
self.assertEqual(parsed_list[2].assessment_type, 'self-assessment')
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment