Commit bee1251e by Stephen Sanchez

Renamed all Evaluation to Assessment (except models). Some refactoring to begin the UI work

parent 716a0e6e
"""Public interface managing the workflow for peer assessments.
The Peer Evaluation Workflow API exposes all public actions required to complete
The Peer Assessment Workflow API exposes all public actions required to complete
the workflow for a given submission.
"""
......@@ -11,7 +11,7 @@ from django.db import DatabaseError
import math
from openassessment.peer.models import PeerEvaluation
from openassessment.peer.serializers import PeerEvaluationSerializer
from openassessment.peer.serializers import PeerAssessmentSerializer
from submissions import api as submission_api
from submissions.models import Submission, StudentItem, Score
from submissions.serializers import SubmissionSerializer, StudentItemSerializer
......@@ -21,17 +21,17 @@ logger = logging.getLogger(__name__)
PEER_TYPE = "PE"
class PeerEvaluationError(Exception):
"""Generic Peer Evaluation Error
class PeerAssessmentError(Exception):
"""Generic Peer Assessment Error
Raised when an error occurs while processing a request related to the
Peer Evaluation Workflow.
Peer Assessment Workflow.
"""
pass
class PeerEvaluationRequestError(PeerEvaluationError):
class PeerAssessmentRequestError(PeerAssessmentError):
"""Error indicating insufficient or incorrect parameters in the request.
Raised when the request does not contain enough information, or incorrect
......@@ -44,7 +44,7 @@ class PeerEvaluationRequestError(PeerEvaluationError):
self.field_errors = copy.deepcopy(field_errors)
class PeerEvaluationWorkflowError(PeerEvaluationError):
class PeerAssessmentWorkflowError(PeerAssessmentError):
"""Error indicating a step in the workflow cannot be completed,
Raised when the action taken cannot be completed in the workflow. This can
......@@ -54,7 +54,7 @@ class PeerEvaluationWorkflowError(PeerEvaluationError):
pass
class PeerEvaluationInternalError(PeerEvaluationError):
class PeerAssessmentInternalError(PeerAssessmentError):
"""Error indicating an internal problem independent of API use.
Raised when an internal error has occurred. This should be independent of
......@@ -64,16 +64,16 @@ class PeerEvaluationInternalError(PeerEvaluationError):
pass
def create_evaluation(
def create_assessment(
submission_uuid,
scorer_id,
required_evaluations_for_student,
required_evaluations_for_submission,
required_assessments_for_student,
required_assessments_for_submission,
assessment_dict,
scored_at=None):
"""Creates an evaluation on the given submission.
"""Creates an assessment on the given submission.
Evaluations are created based on feedback associated with a particular
Assessments are created based on feedback associated with a particular
rubric.
Args:
......@@ -82,27 +82,27 @@ def create_evaluation(
Submission model.
scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission.
required_evaluations_for_student (int): The number of evaluations
required_assessments_for_student (int): The number of assessments
required for the student to receive a score for their submission.
required_evaluations_for_submission (int): The number of evaluations
required_assessments_for_submission (int): The number of assessments
required on the submission for it to be scored.
assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback.
scored_at (datetime): Optional argument to override the time in which
the evaluation took place. If not specified, scored_at is set to
the assessment took place. If not specified, scored_at is set to
now.
Returns:
dict: The dictionary representing the evaluation. This includes the
dict: The dictionary representing the assessment. This includes the
points earned, points possible, time scored, scorer id, score type,
and feedback.
Raises:
PeerEvaluationRequestError: Raised when the submission_id is invalid, or
PeerAssessmentRequestError: Raised when the submission_id is invalid, or
the assessment_dict does not contain the required values to create
an assessment.
PeerEvaluationInternalError: Raised when there is an internal error
while creating a new evaluation.
PeerAssessmentInternalError: Raised when there is an internal error
while creating a new assessment.
Examples:
>>> assessment_dict = dict(
......@@ -110,7 +110,7 @@ def create_evaluation(
>>> points_possible=12,
>>> feedback="Your submission was thrilling.",
>>> )
>>> create_evaluation("1", "Tim", assessment_dict)
>>> create_assessment("1", "Tim", assessment_dict)
{
'points_earned': 6,
'points_possible': 12,
......@@ -122,7 +122,7 @@ def create_evaluation(
"""
try:
submission = Submission.objects.get(uuid=submission_uuid)
peer_evaluation = {
peer_assessment = {
"scorer_id": scorer_id,
"submission": submission.pk,
"points_earned": sum(assessment_dict["points_earned"]),
......@@ -131,11 +131,11 @@ def create_evaluation(
"feedback": assessment_dict["feedback"],
}
if scored_at:
peer_evaluation["scored_at"] = scored_at
peer_assessment["scored_at"] = scored_at
peer_serializer = PeerEvaluationSerializer(data=peer_evaluation)
peer_serializer = PeerAssessmentSerializer(data=peer_assessment)
if not peer_serializer.is_valid():
raise PeerEvaluationRequestError(peer_serializer.errors)
raise PeerAssessmentRequestError(peer_serializer.errors)
peer_serializer.save()
# Check if the submission is finished and its Author has graded enough.
......@@ -143,11 +143,11 @@ def create_evaluation(
_check_if_finished_and_create_score(
student_item,
submission,
required_evaluations_for_student,
required_evaluations_for_submission
required_assessments_for_student,
required_assessments_for_submission
)
# Check if the grader is finished and has enough evaluations
# Check if the grader is finished and has enough assessments
scorer_item = StudentItem.objects.get(
student_id=scorer_id,
item_id=student_item.item_id,
......@@ -162,28 +162,28 @@ def create_evaluation(
_check_if_finished_and_create_score(
scorer_item,
scorer_submissions[0],
required_evaluations_for_student,
required_evaluations_for_submission
required_assessments_for_student,
required_assessments_for_submission
)
return peer_serializer.data
except DatabaseError:
error_message = u"An error occurred while creating evaluation {} for submission: {} by: {}".format(
error_message = u"An error occurred while creating assessment {} for submission: {} by: {}".format(
assessment_dict,
submission_uuid,
scorer_id
)
logger.exception(error_message)
raise PeerEvaluationInternalError(error_message)
raise PeerAssessmentInternalError(error_message)
def _check_if_finished_and_create_score(student_item,
submission,
required_evaluations_for_student,
required_evaluations_for_submission):
required_assessments_for_student,
required_assessments_for_submission):
"""Basic function for checking if a student is finished with peer workflow.
Checks if the student is finished with the peer evaluation workflow. If the
Checks if the student is finished with the peer assessment workflow. If the
student already has a final grade calculated, there is no need to proceed.
If they do not have a grade, the student has a final grade calculated.
......@@ -193,19 +193,19 @@ def _check_if_finished_and_create_score(student_item,
finished_evaluating = has_finished_required_evaluating(
student_item.student_id,
required_evaluations_for_student
required_assessments_for_student
)
evaluations = PeerEvaluation.objects.filter(submission=submission)
submission_finished = evaluations.count() >= required_evaluations_for_submission
assessments = PeerEvaluation.objects.filter(submission=submission)
submission_finished = assessments.count() >= required_assessments_for_submission
scores = []
for evaluation in evaluations:
scores.append(evaluation.points_earned)
for assessment in assessments:
scores.append(assessment.points_earned)
if finished_evaluating and submission_finished:
submission_api.set_score(
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
_calculate_final_score(scores),
evaluations[0].points_possible
assessments[0].points_possible
)
......@@ -228,7 +228,7 @@ def _calculate_final_score(scores):
return int(math.ceil(sum(scores[median-1:median+1])/float(2)))
def has_finished_required_evaluating(student_id, required_evaluations):
def has_finished_required_evaluating(student_id, required_assessments):
"""Check if a student still needs to evaluate more submissions
Per the contract of the peer assessment workflow, a student must evaluate a
......@@ -237,7 +237,7 @@ def has_finished_required_evaluating(student_id, required_evaluations):
Args:
student_id (str): The student in the peer grading workflow to check for
peer workflow criteria. This argument is required.
required_evaluations (int): The number of evaluations a student has to
required_assessments (int): The number of assessments a student has to
submit before receiving the feedback on their submission. This is a
required argument.
......@@ -247,9 +247,9 @@ def has_finished_required_evaluating(student_id, required_evaluations):
evaluate more peer submissions.
Raises:
PeerEvaluationRequestError: Raised when the student_id is invalid, or
the required_evaluations is not a positive integer.
PeerEvaluationInternalError: Raised when there is an internal error
PeerAssessmentRequestError: Raised when the student_id is invalid, or
the required_assessments is not a positive integer.
PeerAssessmentInternalError: Raised when there is an internal error
while evaluating this workflow rule.
Examples:
......@@ -257,37 +257,37 @@ def has_finished_required_evaluating(student_id, required_evaluations):
True
"""
if required_evaluations < 0:
raise PeerEvaluationRequestError(
"Required Evaluation count must be a positive integer.")
if required_assessments < 0:
raise PeerAssessmentRequestError(
"Required Assessment count must be a positive integer.")
return PeerEvaluation.objects.filter(
scorer_id=student_id
).count() >= required_evaluations
).count() >= required_assessments
def get_evaluations(submission_id):
"""Retrieve the evaluations for a submission.
def get_assessments(submission_id):
"""Retrieve the assessments for a submission.
Retrieves all the evaluations for a submissions. This API returns related
Retrieves all the assessments for a submissions. This API returns related
feedback without making any assumptions about grading. Any outstanding
evaluations associated with this submission will not be returned.
assessments associated with this submission will not be returned.
Args:
submission_id (str): The submission all the requested evaluations are
submission_id (str): The submission all the requested assessments are
associated with. Required.
Returns:
list(dict): A list of dictionaries, where each dictionary represents a
separate evaluation. Each evaluation contains points earned, points
separate assessment. Each assessment contains points earned, points
possible, time scored, scorer id, score type, and feedback.
Raises:
PeerEvaluationRequestError: Raised when the submission_id is invalid.
PeerEvaluationInternalError: Raised when there is an internal error
while retrieving the evaluations associated with this submission.
PeerAssessmentRequestError: Raised when the submission_id is invalid.
PeerAssessmentInternalError: Raised when there is an internal error
while retrieving the assessments associated with this submission.
Examples:
>>> get_evaluations("1")
>>> get_assessments("1")
[
{
'points_earned': 6,
......@@ -308,45 +308,45 @@ def get_evaluations(submission_id):
"""
try:
submission = Submission.objects.get(uuid=submission_id)
evaluations = PeerEvaluation.objects.filter(submission=submission)
serializer = PeerEvaluationSerializer(evaluations, many=True)
assessments = PeerEvaluation.objects.filter(submission=submission)
serializer = PeerAssessmentSerializer(assessments, many=True)
return serializer.data
except DatabaseError:
error_message = (
u"Error getting evaluations for submission {}".format(submission_id)
u"Error getting assessments for submission {}".format(submission_id)
)
logger.exception(error_message)
raise PeerEvaluationInternalError(error_message)
raise PeerAssessmentInternalError(error_message)
def get_submission_to_evaluate(student_item_dict, required_num_evaluations):
def get_submission_to_assess(student_item_dict, required_num_assessments):
"""Get a submission to peer evaluate.
Retrieves a submission for evaluation for the given student_item. This will
Retrieves a submission for assessment for the given student_item. This will
not return a submission submitted by the requesting scorer. The submission
returned (TODO: will be) is based on a priority queue. Submissions with the
fewest evaluations and the most active students will be prioritized over
submissions from students who are not as active in the evaluation process.
fewest assessments and the most active students will be prioritized over
submissions from students who are not as active in the assessment process.
Args:
student_item_dict (dict): The student item information from the student
requesting a submission for evaluation. The dict contains an
requesting a submission for assessment. The dict contains an
item_id, course_id, and item_type, used to identify the unique
question for the review, while the student_id is used to explicitly
avoid giving the student their own submission.
required_num_evaluations (int): The number of evaluations a submission
requires before it has completed the peer evaluation process.
required_num_assessments (int): The number of assessments a submission
requires before it has completed the peer assessment process.
Returns:
dict: A peer submission for evaluation. This contains a 'student_item',
dict: A peer submission for assessment. This contains a 'student_item',
'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
used for evaluation.
used for assessment.
Raises:
PeerEvaluationRequestError: Raised when the request parameters are
PeerAssessmentRequestError: Raised when the request parameters are
invalid for the request.
PeerEvaluationInternalError:
PeerEvaluationWorkflowError:
PeerAssessmentInternalError:
PeerAssessmentWorkflowError:
Examples:
>>> student_item_dict = dict(
......@@ -355,7 +355,7 @@ def get_submission_to_evaluate(student_item_dict, required_num_evaluations):
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> get_submission_to_evaluate(student_item_dict, 3)
>>> get_submission_to_assess(student_item_dict, 3)
{
'student_item': 2,
'attempt_number': 1,
......@@ -375,26 +375,26 @@ def get_submission_to_evaluate(student_item_dict, required_num_evaluations):
submission = _get_first_submission_not_evaluated(
student_items,
student_item_dict["student_id"],
required_num_evaluations
required_num_assessments
)
if not submission:
raise PeerEvaluationWorkflowError(
"There are no submissions available for evaluation."
raise PeerAssessmentWorkflowError(
"There are no submissions available for assessment."
)
return SubmissionSerializer(submission).data
def _get_first_submission_not_evaluated(student_items, student_id, required_num_evaluations):
def _get_first_submission_not_evaluated(student_items, student_id, required_num_assessments):
# TODO: We need a priority queue.
submissions = Submission.objects.filter(student_item__in=student_items).order_by(
"submitted_at",
"-attempt_number"
)
for submission in submissions:
evaluations = PeerEvaluation.objects.filter(submission=submission)
if evaluations.count() < required_num_evaluations:
assessments = PeerEvaluation.objects.filter(submission=submission)
if assessments.count() < required_num_assessments:
already_evaluated = False
for evaluation in evaluations:
already_evaluated = already_evaluated or evaluation.scorer_id == student_id
for assessment in assessments:
already_evaluated = already_evaluated or assessment.scorer_id == student_id
if not already_evaluated:
return submission
\ No newline at end of file
......@@ -6,7 +6,7 @@ from rest_framework import serializers
from openassessment.peer.models import PeerEvaluation
class PeerEvaluationSerializer(serializers.ModelSerializer):
class PeerAssessmentSerializer(serializers.ModelSerializer):
class Meta:
model = PeerEvaluation
fields = (
......
......@@ -32,7 +32,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
class TestApi(TestCase):
def test_create_evaluation(self):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
evaluation = api.create_evaluation(
evaluation = api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -44,21 +44,21 @@ class TestApi(TestCase):
@file_data('test_valid_evaluations.json')
def test_get_evaluations(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
assessment_dict
)
evaluations = api.get_evaluations(submission["uuid"])
evaluations = api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict)
@file_data('test_valid_evaluations.json')
def test_get_evaluations_with_date(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -66,7 +66,7 @@ class TestApi(TestCase):
assessment_dict,
MONDAY
)
evaluations = api.get_evaluations(submission["uuid"])
evaluations = api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict)
self.assertEqual(evaluations[0]["scored_at"], MONDAY)
......@@ -85,22 +85,22 @@ class TestApi(TestCase):
self.assertFalse(scores)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
api.create_assessment(
bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
api.create_assessment(
sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
api.create_assessment(
jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
api.create_assessment(
buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
api.create_assessment(
xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertTrue(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
......@@ -110,13 +110,13 @@ class TestApi(TestCase):
scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores)
api.create_evaluation(
api.create_assessment(
tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
api.create_assessment(
tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
api.create_assessment(
tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
......@@ -127,7 +127,7 @@ class TestApi(TestCase):
self.assertEqual(12, scores[0]["points_possible"])
@raises(api.PeerEvaluationRequestError)
@raises(api.PeerAssessmentRequestError)
def test_bad_configuration(self):
api.has_finished_required_evaluating("Tim", -1)
......@@ -139,27 +139,27 @@ class TestApi(TestCase):
)
self._create_student_and_submission("Jim", "Jim's answer", THURSDAY)
submission = api.get_submission_to_evaluate(STUDENT_ITEM, 3)
submission = api.get_submission_to_assess(STUDENT_ITEM, 3)
self.assertIsNotNone(submission)
self.assertEqual(submission["answer"], u"Bob's answer")
self.assertEqual(submission["student_item"], 2)
self.assertEqual(submission["attempt_number"], 1)
@raises(api.PeerEvaluationWorkflowError)
@raises(api.PeerAssessmentWorkflowError)
def test_no_submissions_to_evaluate_for_tim(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
api.get_submission_to_evaluate(STUDENT_ITEM, 3)
api.get_submission_to_assess(STUDENT_ITEM, 3)
"""
Some Error Checking Tests against DB failures.
"""
@patch.object(Submission.objects, 'get')
@raises(api.PeerEvaluationInternalError)
@raises(api.PeerAssessmentInternalError)
def test_error_on_evaluation_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -172,7 +172,7 @@ class TestApi(TestCase):
@raises(sub_api.SubmissionInternalError)
def test_error_on_get_evaluation(self, mock_filter):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -181,7 +181,7 @@ class TestApi(TestCase):
MONDAY
)
mock_filter.side_effect = DatabaseError("Bad things happened")
api.get_evaluations(submission["uuid"])
api.get_assessments(submission["uuid"])
def test_choose_score(self):
self.assertEqual(0, api._calculate_final_score([]))
......
......@@ -2,7 +2,7 @@ import logging
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from openassessment.peer.api import get_evaluations
from openassessment.peer.api import get_assessments
from submissions.api import SubmissionRequestError, get_submissions
log = logging.getLogger(__name__)
......@@ -38,7 +38,7 @@ def get_evaluations_for_student_item(request, course_id, student_id, item_id):
submissions = get_submissions(student_item_dict)
evaluations = []
for submission in submissions:
submission_evaluations = get_evaluations(submission["uuid"])
submission_evaluations = get_assessments(submission["uuid"])
for evaluation in submission_evaluations:
evaluation["submission_uuid"] = submission["uuid"]
evaluations.append(evaluation)
......
class Assessment(object):
assessment_type = None
name = ''
start_datetime = None
due_datetime = None
must_grade = 1
must_be_graded_by = 0
navigation_text = ""
path = ""
def create_ui_model(self):
return {
"assessment_type": self.assessment_type,
"name": self.name,
"start_datetime": self.start_datetime,
"due_datetime": self.due_datetime,
"must_grade": self.must_grade,
"must_be_graded_by": self.must_be_graded_by,
"navigation_text": self.navigation_text,
"path": self.path
}
\ No newline at end of file
__author__ = 'stephensanchez'
......@@ -4,16 +4,15 @@ from django.template.context import Context
import pkg_resources
from django.template.loader import get_template
from openassessment.peer.api import PeerEvaluationWorkflowError
import datetime
from xblock.core import XBlock
from xblock.fields import List, Scope, String
from xblock.fragment import Fragment
from openassessment.xblock.peer_assessment import PeerAssessment
from submissions.api import SubmissionRequestError
from submissions import api
from openassessment.peer import api as peer_api
from scenario_parser import ScenarioParser
......@@ -41,20 +40,31 @@ DEFAULT_RUBRIC_CRITERIA = [
'instructions': "Determine if there is a unifying theme or main idea.",
'total_value': 5,
'options': [
(0, "Poor", "Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.",),
(3, "Fair", "Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.",),
(5, "Good", "Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.",),
(0, "Poor", """Difficult for the reader to discern the main idea.
Too brief or too repetitive to establish or maintain a focus.""",),
(3, "Fair", """Presents a unifying theme or main idea, but may
include minor tangents. Stays somewhat focused on topic and
task.""",),
(5, "Good", """Presents a unifying theme or main idea without going
off on tangents. Stays completely focused on topic and task.""",),
],
},
{
'name': "Content",
'instructions': "Evaluate the content of the submission",
'instructions': "Assess the content of the submission",
'total_value': 5,
'options': [
(0, "Poor", "Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.",),
(1, "Fair", "Includes little information and few or no details. Explores only one or two facets of the topic.",),
(3, "Good", "Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.",),
(5, "Excellent", "Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.",),
(0, "Poor", """Includes little information with few or no details or
unrelated details. Unsuccessful in attempts to explore any
facets of the topic.""",),
(1, "Fair", """Includes little information and few or no details.
Explores only one or two facets of the topic.""",),
(3, "Good", """Includes sufficient information and supporting
details. (Details may not be fully developed; ideas may be
listed.) Explores some facets of the topic.""",),
(5, "Excellent", """Includes in-depth information and exceptional
supporting details that are fully developed. Explores all
facets of the topic.""",),
],
},
{
......@@ -62,9 +72,13 @@ DEFAULT_RUBRIC_CRITERIA = [
'instructions': "Determine if the submission is well organized.",
'total_value': 2,
'options': [
(0, "Poor", "Ideas organized illogically, transitions weak, and response difficult to follow.",),
(1, "Fair", "Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.",),
(2, "Good", "Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.",),
(0, "Poor", """Ideas organized illogically, transitions weak, and
response difficult to follow.""",),
(1, "Fair", """Attempts to logically organize ideas. Attempts to
progress in an order that enhances meaning, and demonstrates use
of transitions.""",),
(2, "Good", """Ideas organized logically. Progresses in an order
that enhances meaning. Includes smooth transitions.""",),
],
},
{
......@@ -72,9 +86,15 @@ DEFAULT_RUBRIC_CRITERIA = [
'instructions': "Read for style.",
'total_value': 2,
'options': [
(0, "Poor", "Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.",),
(1, "Fair", "Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).",),
(2, "Good", "Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.",),
(0, "Poor", """Contains limited vocabulary, with many words used
incorrectly. Demonstrates problems with sentence patterns.""",),
(1, "Fair", """Contains basic vocabulary, with words that are
predictable and common. Contains mostly simple sentences
(although there may be an attempt at more varied sentence
patterns).""",),
(2, "Good", """Includes vocabulary to make explanations detailed and
precise. Includes varied sentence patterns, including complex
sentences.""",),
],
},
{
......@@ -82,170 +102,82 @@ DEFAULT_RUBRIC_CRITERIA = [
'instructions': "Read for style.",
'total_value': 2,
'options': [
(0, "Poor", "Demonstrates language and tone that may be inappropriate to task and reader.",),
(1, "Fair", "Demonstrates an attempt to adjust language and tone to task and reader.",),
(2, "Good", "Demonstrates effective adjustment of language and tone to task and reader.",),
(0, "Poor", """Demonstrates language and tone that may be
inappropriate to task and reader.""",),
(1, "Fair", """Demonstrates an attempt to adjust language and tone
to task and reader.""",),
(2, "Good", """Demonstrates effective adjustment of language and
tone to task and reader.""",),
],
}
]
DEFAULT_EVAL_MODULES = [
{
'type': "peereval",
'name': "peereval",
'start_datetime': datetime.datetime.now,
'due_datetime': None,
'must_grade': 5,
'must_be_graded_by': 3,
},
DEFAULT_PEER_ASSESSMENT = PeerAssessment()
DEFAULT_PEER_ASSESSMENT.name = "peer-assessment"
DEFAULT_PEER_ASSESSMENT.start_datetime = datetime.datetime.now().isoformat()
DEFAULT_PEER_ASSESSMENT.must_grade = 5
DEFAULT_PEER_ASSESSMENT.must_be_graded_by = 3
DEFAULT_ASSESSMENT_MODULES = [
DEFAULT_PEER_ASSESSMENT,
]
EXAMPLE_POVERTY_RUBRIC = (
"OpenAssessmentBlock Poverty Rubric",
"""
<vertical_demo>
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<title>
Global Poverty
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty?
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">(0) Neal Stephenson (late)
<explain>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'.
</explain>
</option>
<option val="1">(1) HP Lovecraft
<explain>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explain>
</option>
<option val="3">(3) Robert Heinlein
<explain>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explain>
</option>
<option val="4">(4) Neal Stephenson (early)
<explain>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explain>
</option>
<option val="5">(5) Earnest Hemingway
<explain>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
</explain>
</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">(0) Yogi Berra</option>
<option val="1">(1) Hunter S. Thompson</option>
<option val="2">(2) Robert Heinlein</option>
<option val="3">(3) Isaac Asimov</option>
<option val="10">(10) Spock
<explain>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
to show that explained and unexplained items can be mixed.
</explain>
</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">(0) lolcats</option>
<option val="1">(1) Facebook</option>
<option val="2">(2) Reddit</option>
<option val="3">(3) metafilter</option>
<option val="4">(4) Usenet, 1996</option>
<option val="5">(5) The Elements of Style</option>
</criterion>
</rubric>
<evals>
<peer-evaluation start="2014-12-20T19:00-7:00"
name="Peer Evaluation"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<self-evaluation name="Self Evaluation" />
</evals>
</openassessment>
</vertical_demo>
"""
)
EXAMPLE_CENSORSHIP_RUBRIC = (
"OpenAssessmentBlock Censorship Rubric",
"""
<vertical_demo>
<openassessment start="2013-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<title>
Censorship in Public Libraries
</title>
<prompt>
What do you think about censorship in libraries? I think it's pretty great.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">The Bible</option>
<option val="1">Earnest Hemingway</option>
<option val="3">Matsuo Basho</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">Eric</option>
<option val="1">John</option>
<option val="2">Ian</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">IRC</option>
<option val="1">Real Email</option>
<option val="2">Old-timey letters</option>
</criterion>
</rubric>
<evals>
<self-evaluation name="Self Evaluation" />
<peer-evaluation name="Peer Evaluation"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</evals>
</openassessment>
</vertical_demo>
"""
)
def load(path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
class OpenAssessmentBlock(XBlock):
"""Displays a question and gives an area where students can compose a response."""
start_datetime = String(default=datetime.datetime.now().isoformat(), scope=Scope.content, help="ISO-8601 formatted string representing the start date of this assignment.")
due_datetime = String(default=None, scope=Scope.content, help="ISO-8601 formatted string representing the end date of this assignment.")
title = String(default="", scope=Scope.content, help="A title to display to a student (plain text).")
prompt = String( default=DEFAULT_PROMPT, scope=Scope.content, help="A prompt to display to a student (plain text).")
rubric = List( default=[], scope=Scope.content, help="Instructions and criteria for students giving feedback.")
rubric_instructions = String( default=DEFAULT_RUBRIC_INSTRUCTIONS, scope=Scope.content, help="Instructions for self and peer assessment.")
rubric_criteria = List(default=DEFAULT_RUBRIC_CRITERIA, scope=Scope.content, help="The different parts of grading for students giving feedback.")
rubric_evals = List(default=DEFAULT_EVAL_MODULES, scope=Scope.content, help="The requested set of evaluations and the order in which to apply them.")
course_id = String( default=u"TestCourse", scope=Scope.content, help="The course_id associated with this prompt (until we can get it from runtime).",)
start_datetime = String(
default=datetime.datetime.now().isoformat(),
scope=Scope.content,
help="ISO-8601 formatted string representing the start date of this assignment."
)
due_datetime = String(
default=None,
scope=Scope.content,
help="ISO-8601 formatted string representing the end date of this assignment."
)
title = String(
default="",
scope=Scope.content,
help="A title to display to a student (plain text)."
)
prompt = String(
default=DEFAULT_PROMPT,
scope=Scope.content,
help="A prompt to display to a student (plain text)."
)
rubric = List(
default=[],
scope=Scope.content,
help="Instructions and criteria for students giving feedback."
)
rubric_instructions = String(
default=DEFAULT_RUBRIC_INSTRUCTIONS,
scope=Scope.content,
help="Instructions for self and peer assessment."
)
rubric_criteria = List(
default=DEFAULT_RUBRIC_CRITERIA,
scope=Scope.content,
help="The different parts of grading for students giving feedback."
)
rubric_assessments = List(
default=DEFAULT_ASSESSMENT_MODULES,
scope=Scope.content,
help="The requested set of assessments and the order in which to apply them."
)
course_id = String(
default=u"TestCourse",
scope=Scope.content,
help="The course_id associated with this prompt (until we can get it from runtime).",
)
submit_errors = { # Reported to user sometimes, and useful in tests
'ENOSUB': 'API submission is unrequested',
......@@ -261,10 +193,10 @@ class OpenAssessmentBlock(XBlock):
important contextual information. Per @nedbat, the usage_id attribute
uniquely identifies this block in this course, and the user_id uniquely
identifies this student. With the two of them, we can trace all the
interactions emenating from this interaction.
interactions emanating from this interaction.
Useful for logging, debugging, and uniqueification."""
return (self.scope_ids.usage_id, self.scope_ids.user_id)
return self.scope_ids.usage_id, self.scope_ids.user_id
def _get_student_item_dict(self):
"""Create a student_item_dict from our surrounding context.
......@@ -283,15 +215,11 @@ class OpenAssessmentBlock(XBlock):
def student_view(self, context=None):
"""The main view of OpenAssessmentBlock, displayed when viewing courses.
"""
def load(path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
trace = self._get_xblock_trace()
student_item_dict = self._get_student_item_dict()
grade_state = self._get_grade_state(student_item_dict)
grade_state = self._get_grade_state()
# All data we intend to pass to the front end.
context_dict = {
"xblock_trace": trace,
......@@ -299,7 +227,7 @@ class OpenAssessmentBlock(XBlock):
"question": self.prompt,
"rubric_instructions": self.rubric_instructions,
"rubric_criteria": self.rubric_criteria,
"rubric_evals": self.rubric_evals,
"rubric_assessments": [assessment.create_ui_model() for assessment in self.rubric_assessments],
"grade_state": grade_state,
}
......@@ -308,26 +236,9 @@ class OpenAssessmentBlock(XBlock):
except SubmissionRequestError:
previous_submissions = []
peer_submission = False
try:
# HACK: Replace with proper workflow.
peer_eval = self._hack_get_peer_eval()
peer_submission = peer_api.get_submission_to_evaluate(
student_item_dict, peer_eval.must_be_graded_by
)
context_dict["peer_submission"] = peer_submission
if peer_eval:
peer_submission = peer_api.get_submission_to_evaluate(student_item_dict, peer_eval["must_be_graded_by"])
except PeerEvaluationWorkflowError:
# Additional HACK: Without proper workflow, there may not be the
# correct information to complete the request for a peer submission.
# This error should be handled properly once we have a workflow API.
pass
if previous_submissions and peer_submission: # XXX: until workflow better, move on w/ prev submit
peer_module = self._get_assessment_module('peer-assessment')
peer_assessment = peer_module.get_peer_submission(student_item_dict)
if previous_submissions and peer_assessment: # XXX: until workflow better, move on w/ prev submit
template = get_template("static/html/oa_base.html")
context = Context(context_dict)
frag = Fragment(template.render(context))
......@@ -345,36 +256,23 @@ class OpenAssessmentBlock(XBlock):
frag.initialize_js('OpenAssessmentBlock')
return frag
def _hack_get_peer_eval(self):
# HACK: Forcing Peer Eval, we'll get the Eval config.
for next_eval in self.rubric_evals:
if next_eval.eval_type == "peer-evaluation":
return next_eval
@XBlock.json_handler
def assess(self, data, suffix=''):
# HACK: Replace with proper workflow.
peer_eval = self._hack_get_peer_eval()
"""Place an assessment into Openassessment system"""
student_item_dict = self._get_student_item_dict()
assessment_dict = {
"points_earned": map(int, data["points_earned"]),
"points_possible": sum(c['total_value'] for c in self.rubric_criteria),
"feedback": "Not yet implemented.",
}
evaluation = peer_api.create_evaluation(
data["submission_uuid"],
student_item_dict["student_id"],
int(peer_eval.must_grade),
int(peer_eval.must_be_graded_by),
assessment_dict
)
# Temp kludge until we fix JSON serialization for datetime
evaluation["scored_at"] = str(evaluation["scored_at"])
# TODO Pass name through the handler.
assessment = self._get_assessment_module('peer-assessment')
if assessment:
assessment.assess(
self._get_student_item_dict(),
self.rubric_criteria,
data
)
return evaluation, "Success"
def _get_assessment_module(self, name):
"""Get a configured assessment module by name.
"""
for assessment in self.rubric_assessments:
if assessment.name == name:
return assessment
@XBlock.json_handler
def submit(self, data, suffix=''):
......@@ -405,7 +303,16 @@ class OpenAssessmentBlock(XBlock):
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [EXAMPLE_POVERTY_RUBRIC, EXAMPLE_CENSORSHIP_RUBRIC,]
return [
(
"OpenAssessmentBlock Poverty Rubric",
load('static/xml/poverty_rubric_example.xml')
),
(
"OpenAssessmentBlock Censorship Rubric",
load('static/xml/censorship_rubric_example.xml')
),
]
@staticmethod
def studio_view(context=None):
......@@ -423,7 +330,7 @@ class OpenAssessmentBlock(XBlock):
block = sparser.parse()
return block
def _get_grade_state(self, student_item):
def _get_grade_state(self):
# TODO: Determine if we want to build out grade state right now.
grade_state = {
......
from openassessment.peer import api as peer_api
from openassessment.peer.api import PeerAssessmentWorkflowError
from openassessment.xblock.assessment import Assessment
class PeerAssessment(Assessment):
assessment_type = "peer-assessment"
navigation_text = "Your assessment(s) of peer responses"
path = "static/html/oa_peer_assessment.html"
@classmethod
def assess(cls, student_item_dict, rubric_criteria, data):
"""Place an assessment into Openassessment system
"""
assessment_dict = {
"points_earned": map(int, data["points_earned"]),
"points_possible": sum(c['total_value'] for c in rubric_criteria),
"feedback": "Not yet implemented.",
}
assessment = peer_api.create_assessment(
data["submission_uuid"],
student_item_dict["student_id"],
int(cls.must_grade),
int(cls.must_be_graded_by),
assessment_dict
)
# Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"])
return assessment, "Success"
def get_peer_submission(self, student_item_dict):
peer_submission = False
try:
peer_submission = peer_api.get_submission_to_assess(
student_item_dict, self.must_be_graded_by
)
# context_dict["peer_submission"] = peer_submission
peer_submission = peer_api.get_submission_to_assess(
student_item_dict,
self.must_be_graded_by
)
except PeerAssessmentWorkflowError:
# TODO: Log?
pass
return peer_submission
\ No newline at end of file
# -*- coding: utf-8 -*-
"""XBlock scenario parsing routines"""
from openassessment.xblock.peer_assessment import PeerAssessment
from openassessment.xblock.self_assessment import SelfAssessment
class ScenarioParser(object):
......@@ -63,35 +65,35 @@ class ScenarioParser(object):
rubric_criteria.append(crit)
return (e.text.strip(), rubric_criteria)
def get_evals(self, evaluations):
"""<evals>
def get_assessments(self, assessments):
"""<assessments>
<!-- There can be multiple types of assessments given in any
arbitrary order, like this self assessment followed by a
peer assessment -->
<self />
<peereval start="2014-12-20T19:00-7:00"
<self-assessment />
<peer-assessment start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</evals>"""
evaluation_list = []
for ev in evaluations:
evaluation = None
type = ev.tag
if 'peer-evaluation' == type:
evaluation = PeerEvaluation()
elif 'self-evaluation' == type:
evaluation = SelfEvaluation()
if evaluation:
evaluation.name = ev.attrib.get('name', '')
evaluation.start_datetime = ev.attrib.get('start', None)
evaluation.due_datetime = ev.attrib.get('due', None)
evaluation.must_grade = int(ev.attrib.get('must_grade', 1))
evaluation.must_be_graded_by = int(ev.attrib.get('must_be_graded_by', 0))
evaluation_list.append(evaluation)
return evaluation_list
</peer-assessment>"""
assessment_list = []
for asmnt in assessments:
assessment = None
assessment_type = asmnt.tag
if 'peer-assessment' == assessment_type:
assessment = PeerAssessment()
assessment.must_grade = int(asmnt.attrib.get('must_grade', 1))
assessment.must_be_graded_by = int(asmnt.attrib.get('must_be_graded_by', 0))
elif 'self-assessment' == assessment_type:
assessment = SelfAssessment()
if assessment:
assessment.name = asmnt.attrib.get('name', '')
assessment.start_datetime = asmnt.attrib.get('start', None)
assessment.due_datetime = asmnt.attrib.get('due', None)
assessment_list.append(assessment)
return assessment_list
def parse(self):
"""Instantiate xblock object from runtime XML definition."""
......@@ -103,32 +105,8 @@ class ScenarioParser(object):
self.xblock.rubric_criteria) = self.get_rubric(child)
elif child.tag == 'title':
self.xblock.title = self.get_title(child)
elif child.tag == 'evals':
self.xblock.rubric_evals = self.get_evals(child)
elif child.tag == 'assessments':
self.xblock.rubric_assessments = self.get_assessments(child)
else:
self.unknown_handler(self.xblock, child)
return self.xblock
class EvaluationModule():
eval_type = None
name = ''
start_datetime = None
due_datetime = None
must_grade = 1
must_be_graded_by = 0
class PeerEvaluation(EvaluationModule):
eval_type = "peer-evaluation"
navigation_text = "Your evaluation(s) of peer responses"
url = "static/html/oa_peer_evaluation.html"
class SelfEvaluation(EvaluationModule):
eval_type = "self-evaluation"
navigation_text = "Your evaluation of your response"
url = "static/html/oa_self_evaluation.html"
\ No newline at end of file
from openassessment.xblock.assessment import Assessment
class SelfAssessment(Assessment):
assessment_type = "self-assessment"
navigation_text = "Your assessment of your response"
path = "static/html/oa_self_assessment.html"
\ No newline at end of file
......@@ -20,9 +20,9 @@
<span class="openassessment__title--sub">
<span class="problem-type problem-type--open-ended-response">Open Ended Response</span>
{% for eval in rubric_evals %}
{% for assessment in rubric_assessments %}
+
<span class="problem-type problem-type--{{ eval.type }}">{{ eval.name }}</span>
<span class="problem-type problem-type--{{ assessment.type }}">{{ assessment.name }}</span>
{% endfor %}
</span>
</h1>
......@@ -35,9 +35,9 @@
<li class="list--nav__item">
<a class="action" href="#openassessment__response">Your response to this problem</a>
</li>
{% for eval in rubric_evals %}
{% for assessment in rubric_assessments %}
<li class="list--nav__item">
<a class="action" href="#openassessment__{{ eval.type }}">{{ eval.navigation_text }}</a>
<a class="action" href="#openassessment__{{ assessment.type }}">{{ assessment.navigation_text }}</a>
</li>
{% endfor %}
<li class="list--nav__item">
......@@ -73,8 +73,8 @@
<!-- STEP: response -->
{% include "static/html/oa_response.html" %}
{% for eval in rubric_evals %}
{% include eval.url %}
{% for assessment in rubric_assessments %}
{% include assessment.path %}
{% endfor %}
</ol>
......
......@@ -11,7 +11,7 @@
<!-- CASE: default/not started -->
<li id="openassessment__peer-evaluation" class="openassessment__steps__step step--peer-evaluation">
<li id="openassessment__peer-assessment" class="openassessment__steps__step step--peer-assessment">
{#<header class="step__header">#}
<h2 class="step__title">
<span class="step__label">Evaluate Peers' Responses</span>
......@@ -32,34 +32,34 @@
</div>
<div class="step__content">
<ul class="list--peer-evaluations">
<li class="list--peer-evaluations__item">
<article class="peer-evaluation" id="peer-evaluation--001">
<header class="peer-evaluation__header">
<h3 class="peer-evaluation__title">Evaluation #
<span class="peer-evaluation__number--current">1</span> of
<span class="peer-evaluation__number--required">3</span>
<ul class="list--peer-assessments">
<li class="list--peer-assessments__item">
<article class="peer-assessment" id="peer-assessment--001">
<header class="peer-assessment__header">
<h3 class="peer-assessment__title">Assessment #
<span class="peer-assessment__number--current">1</span> of
<span class="peer-assessment__number--required">3</span>
</h3>
<span class="peer-evaluation__expected-time">
<span class="peer-assessment__expected-time">
<span class="label">Expected Time Spent:</span>
<span class="value">20 Minutes</span>
</span>
</header>
<!-- ?: markup validating/copy cleaning upon submission -->
<div class="peer-evaluation__response">
<div class="peer-assessment__response">
{{ peer_submission.answer }}
</div>
<form id="peer-evaluation--001__evaluation" class="peer-evaluation__evaluation" method="post">
<fieldset class="evaluation__fields">
<legend class="evaluation__instruction">{{ rubric_instructions }}</legend>
<form id="peer-assessment--001__assessment" class="peer-assessment__assessment" method="post">
<fieldset class="assessment__fields">
<legend class="assessment__instruction">{{ rubric_instructions }}</legend>
<ol class="list list--fields evaluation__rubric">
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<!-- individual rubric question (radio-based choice) -->
<li class="field field--radio is--required evaluation__rubric__question" id="evaluation__rubric__question--{{ criterion.name }}">
<li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title">
{{ criterion.instructions }}
<span class="label--required">* <span class="sr">(Required)</span></span>
......@@ -69,8 +69,8 @@
{% for value, text in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio" name="evaluation__rubric__question--{{ criterion.name }}" id="evaluation__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="evaluation__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
<input type="radio" name="assessment__rubric__question--{{ criterion.name }}" id="assessment__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="assessment__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
</div>
<span class="answer__tip">TODO: Criterion Instructions</span>
</li>
......@@ -80,16 +80,16 @@
{% endfor %}
<!-- individual rubric question (text) -->
<li class="field field--textarea evaluation__rubric__question" id="evaluation__rubric__question--004">
<label for="evaluation__rubric__question--004__value">Please provide any other feedback you have around this response</label>
<textarea id="evaluation__rubric__question--004__value" placeholder="I felt this response was..."></textarea>
<li class="field field--textarea assessment__rubric__question" id="assessment__rubric__question--004">
<label for="assessment__rubric__question--004__value">Please provide any other feedback you have around this response</label>
<textarea id="assessment__rubric__question--004__value" placeholder="I felt this response was..."></textarea>
</li>
</ol>
</fieldset>
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="peer-evaluation--001__evaluation__submit" class="action action--submit">Submit your evaluation &amp; move to response #2</button>
<button type="submit" id="peer-assessment--001__assessment__submit" class="action action--submit">Submit your assessment &amp; move to response #2</button>
</li>
</ul>
</form>
......
......@@ -4,14 +4,14 @@
<!-- NOTES:
* class of is--unavailable is added when step is not available
* each .self-evaluation item needs a unique id attribute formatted as #self-evaluation--###
* each .self-assessment item needs a unique id attribute formatted as #self-assessment--###
* individual rubric questions' answers need specific id attributes in several places
-->
<!-- CASE: default/not started -->
<li id="openassessment__self-evaluation" class="openassessment__steps__step step--self-evaluation">
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment">
{# <header class="step__header">#}
<h2 class="step__title">
<span class="step__title__label">Evaluate Your Response</span>
......@@ -25,24 +25,24 @@
{# </header>#}
<div class="step--content">
<article class="self-evaluation" id="self-evaluation">
<header class="self-evaluation__header">
<h3 class="self-evaluation__title">Your Submitted Response</h3>
<article class="self-assessment" id="self-assessment">
<header class="self-assessment__header">
<h3 class="self-assessment__title">Your Submitted Response</h3>
</header>
<!-- ?: markup validating/copy cleaning upon submission -->
<div class="self-evaluation__response">
<div class="self-assessment__response">
{{ self_submission.answer }}
</div>
<form id="self-evaluation--001__evaluation" class="self-evaluation__evaluation" method="post">
<fieldset class="evaluation__fields">
<legend class="evaluation__instruction">{{ rubric_instructions }}</legend>
<form id="self-assessment--001__assessment" class="self-assessment__assessment" method="post">
<fieldset class="assessment__fields">
<legend class="assessment__instruction">{{ rubric_instructions }}</legend>
<ol class="list list--fields evaluation__rubric">
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<!-- individual rubric question (radio-based choice) -->
<li class="field field--radio is--required evaluation__rubric__question" id="evaluation__rubric__question--{{ criterion.name }}">
<li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title">
{{ criterion.instructions }}
<span class="label--required">* <span class="sr">(Required)</span></span>
......@@ -52,8 +52,8 @@
{% for value, text in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio" name="evaluation__rubric__question--{{ criterion.name }}" id="evaluation__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="evaluation__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
<input type="radio" name="assessment__rubric__question--{{ criterion.name }}" id="assessment__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="assessment__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
</div>
<span class="answer__tip">TODO: Criterion Instructions</span>
</li>
......@@ -63,16 +63,16 @@
{% endfor %}
<!-- individual rubric question (text) -->
<li class="field field--textarea evaluation__rubric__question" id="evaluation__rubric__question--004">
<label for="evaluation__rubric__question--004__value">Please provide any other feedback you have around this response</label>
<textarea id="evaluation__rubric__question--004__value" placeholder="I felt this response was..."></textarea>
<li class="field field--textarea assessment__rubric__question" id="assessment__rubric__question--004">
<label for="assessment__rubric__question--004__value">Please provide any other feedback you have around this response</label>
<textarea id="assessment__rubric__question--004__value" placeholder="I felt this response was..."></textarea>
</li>
</ol>
</fieldset>
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="self-evaluation--001__evaluation__submit" class="action action--submit">Submit your evaluation</button>
<button type="submit" id="self-assessment--001__assessment__submit" class="action action--submit">Submit your assessment</button>
</li>
</ul>
</form>
......
......@@ -8,8 +8,8 @@ function OpenAssessmentBlock(runtime, element) {
/* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */
function prepare_assessment_post(element) {
selector = $("input[type=radio]:checked", element);
values = [];
var selector = $("input[type=radio]:checked", element);
var values = [];
for (i=0; i<selector.length; i++) {
values[i] = selector[i].value;
}
......@@ -17,8 +17,8 @@ function OpenAssessmentBlock(runtime, element) {
}
function displayStatus(result) {
status = result[0]
error_msg = result[1]
var status = result[0];
var error_msg = result[1];
if (status) {
$('.openassessment_response_status_block', element).html(success_msg.concat(click_msg));
} else {
......
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<vertical_demo>
<openassessment start="2013-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<title>
Censorship in Public Libraries
</title>
<prompt>
What do you think about censorship in libraries? I think it's pretty great.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">The Bible</option>
<option val="1">Earnest Hemingway</option>
<option val="3">Matsuo Basho</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">Eric</option>
<option val="1">John</option>
<option val="2">Ian</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">IRC</option>
<option val="1">Real Email</option>
<option val="2">Old-timey letters</option>
</criterion>
</rubric>
<assessments>
<self-assessment name="self-assessment" />
<peer-assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</assessments>
</openassessment>
</vertical_demo>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<vertical_demo>
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<title>
Global Poverty
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty?
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">(0) Neal Stephenson (late)
<explain>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'.
</explain>
</option>
<option val="1">(1) HP Lovecraft
<explain>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explain>
</option>
<option val="3">(3) Robert Heinlein
<explain>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explain>
</option>
<option val="4">(4) Neal Stephenson (early)
<explain>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explain>
</option>
<option val="5">(5) Earnest Hemingway
<explain>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
</explain>
</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">(0) Yogi Berra</option>
<option val="1">(1) Hunter S. Thompson</option>
<option val="2">(2) Robert Heinlein</option>
<option val="3">(3) Isaac Asimov</option>
<option val="10">(10) Spock
<explain>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
to show that explained and unexplained items can be mixed.
</explain>
</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">(0) lolcats</option>
<option val="1">(1) Facebook</option>
<option val="2">(2) Reddit</option>
<option val="3">(3) metafilter</option>
<option val="4">(4) Usenet, 1996</option>
<option val="5">(5) The Elements of Style</option>
</criterion>
</rubric>
<assessments>
<peer-assessment start="2014-12-20T19:00-7:00"
name="peer-assessment"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<self-assessment name="self-assessment" />
</assessments>
</openassessment>
</vertical_demo>
\ No newline at end of file
__author__ = 'stephensanchez'
......@@ -46,13 +46,14 @@ RUBRIC_CONFIG = """
<option val="5">The Elements of Style</option>
</criterion>
</rubric>
<evals>
<peer-evaluation start="2014-12-20T19:00-7:00"
<assessments>
<peer-assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<self-evaluation/>
</evals>
<self-assessment/>
</assessments>
</openassessment>
"""
......
......@@ -61,34 +61,34 @@ class TestScenarioParser(TestCase):
self.assertEqual(int(criterion_option_value), 99)
self.assertEqual(criterion_explanation, criterion_option_explain_text)
def test_get_evals(self):
"""Given an <evals> list, return a list of evaluations."""
evals = """<evals>
<selfeval name='0382e03c808e4f2bb12dfdd2d45d5c4b'
def test_get_assessments(self):
"""Given an <assessments> list, return a list of assessment modules."""
assessments = """<assessments>
<self-assessment name='0382e03c808e4f2bb12dfdd2d45d5c4b'
must_grade="999"
must_be_graded_by="73" />
<peereval start="2014-12-20T19:00-7:00"
<peer-assessment start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<selfeval />
</evals>"""
evals_xml = etree.fromstring(evals)
parsed_list = self.test_parser.get_evals(evals_xml)
# Self evaluations take all the parameters, but mostly ignore them.
self.assertEqual(parsed_list[0]['type'], 'selfeval')
self.assertEqual(parsed_list[0]['name'], '0382e03c808e4f2bb12dfdd2d45d5c4b')
self.assertEqual(parsed_list[0]['must_grade'], 1)
self.assertEqual(parsed_list[0]['must_be_graded_by'], 0)
# Peer evaluations are more interesting
self.assertEqual(parsed_list[1]['type'], 'peereval')
self.assertEqual(parsed_list[1]['name'], '')
self.assertEqual(parsed_list[1]['must_grade'], 5)
self.assertEqual(parsed_list[1]['must_be_graded_by'], 3)
# We can parse arbitrary workflow descriptions as a list of evaluations.
<self-assessment />
</assessments>"""
assessments_xml = etree.fromstring(assessments)
parsed_list = self.test_parser.get_assessments(assessments_xml)
# Self assessments take all the parameters, but mostly ignore them.
self.assertEqual(parsed_list[0].assessment_type, 'self-assessment')
self.assertEqual(parsed_list[0].name, '0382e03c808e4f2bb12dfdd2d45d5c4b')
self.assertEqual(parsed_list[0].must_grade, 1)
self.assertEqual(parsed_list[0].must_be_graded_by, 0)
# Peer assessments are more interesting
self.assertEqual(parsed_list[1].assessment_type, 'peer-assessment')
self.assertEqual(parsed_list[1].name, '')
self.assertEqual(parsed_list[1].must_grade, 5)
self.assertEqual(parsed_list[1].must_be_graded_by, 3)
# We can parse arbitrary workflow descriptions as a list of assessments.
# Whether or not the workflow system can use them is another matter
self.assertEqual(parsed_list[2]['type'], 'selfeval')
self.assertEqual(parsed_list[2].assessment_type, 'self-assessment')
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment