Commit bee1251e by Stephen Sanchez

Renamed all Evaluation to Assessment (except models). Some refactoring to begin the UI work

parent 716a0e6e
"""Public interface managing the workflow for peer assessments. """Public interface managing the workflow for peer assessments.
The Peer Evaluation Workflow API exposes all public actions required to complete The Peer Assessment Workflow API exposes all public actions required to complete
the workflow for a given submission. the workflow for a given submission.
""" """
...@@ -11,7 +11,7 @@ from django.db import DatabaseError ...@@ -11,7 +11,7 @@ from django.db import DatabaseError
import math import math
from openassessment.peer.models import PeerEvaluation from openassessment.peer.models import PeerEvaluation
from openassessment.peer.serializers import PeerEvaluationSerializer from openassessment.peer.serializers import PeerAssessmentSerializer
from submissions import api as submission_api from submissions import api as submission_api
from submissions.models import Submission, StudentItem, Score from submissions.models import Submission, StudentItem, Score
from submissions.serializers import SubmissionSerializer, StudentItemSerializer from submissions.serializers import SubmissionSerializer, StudentItemSerializer
...@@ -21,17 +21,17 @@ logger = logging.getLogger(__name__) ...@@ -21,17 +21,17 @@ logger = logging.getLogger(__name__)
PEER_TYPE = "PE" PEER_TYPE = "PE"
class PeerEvaluationError(Exception): class PeerAssessmentError(Exception):
"""Generic Peer Evaluation Error """Generic Peer Assessment Error
Raised when an error occurs while processing a request related to the Raised when an error occurs while processing a request related to the
Peer Evaluation Workflow. Peer Assessment Workflow.
""" """
pass pass
class PeerEvaluationRequestError(PeerEvaluationError): class PeerAssessmentRequestError(PeerAssessmentError):
"""Error indicating insufficient or incorrect parameters in the request. """Error indicating insufficient or incorrect parameters in the request.
Raised when the request does not contain enough information, or incorrect Raised when the request does not contain enough information, or incorrect
...@@ -44,7 +44,7 @@ class PeerEvaluationRequestError(PeerEvaluationError): ...@@ -44,7 +44,7 @@ class PeerEvaluationRequestError(PeerEvaluationError):
self.field_errors = copy.deepcopy(field_errors) self.field_errors = copy.deepcopy(field_errors)
class PeerEvaluationWorkflowError(PeerEvaluationError): class PeerAssessmentWorkflowError(PeerAssessmentError):
"""Error indicating a step in the workflow cannot be completed, """Error indicating a step in the workflow cannot be completed,
Raised when the action taken cannot be completed in the workflow. This can Raised when the action taken cannot be completed in the workflow. This can
...@@ -54,7 +54,7 @@ class PeerEvaluationWorkflowError(PeerEvaluationError): ...@@ -54,7 +54,7 @@ class PeerEvaluationWorkflowError(PeerEvaluationError):
pass pass
class PeerEvaluationInternalError(PeerEvaluationError): class PeerAssessmentInternalError(PeerAssessmentError):
"""Error indicating an internal problem independent of API use. """Error indicating an internal problem independent of API use.
Raised when an internal error has occurred. This should be independent of Raised when an internal error has occurred. This should be independent of
...@@ -64,16 +64,16 @@ class PeerEvaluationInternalError(PeerEvaluationError): ...@@ -64,16 +64,16 @@ class PeerEvaluationInternalError(PeerEvaluationError):
pass pass
def create_evaluation( def create_assessment(
submission_uuid, submission_uuid,
scorer_id, scorer_id,
required_evaluations_for_student, required_assessments_for_student,
required_evaluations_for_submission, required_assessments_for_submission,
assessment_dict, assessment_dict,
scored_at=None): scored_at=None):
"""Creates an evaluation on the given submission. """Creates an assessment on the given submission.
Evaluations are created based on feedback associated with a particular Assessments are created based on feedback associated with a particular
rubric. rubric.
Args: Args:
...@@ -82,27 +82,27 @@ def create_evaluation( ...@@ -82,27 +82,27 @@ def create_evaluation(
Submission model. Submission model.
scorer_id (str): The user ID for the user giving this assessment. This scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission. is required to create an assessment on a submission.
required_evaluations_for_student (int): The number of evaluations required_assessments_for_student (int): The number of assessments
required for the student to receive a score for their submission. required for the student to receive a score for their submission.
required_evaluations_for_submission (int): The number of evaluations required_assessments_for_submission (int): The number of assessments
required on the submission for it to be scored. required on the submission for it to be scored.
assessment_dict (dict): All related information for the assessment. An assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback. assessment contains points_earned, points_possible, and feedback.
scored_at (datetime): Optional argument to override the time in which scored_at (datetime): Optional argument to override the time in which
the evaluation took place. If not specified, scored_at is set to the assessment took place. If not specified, scored_at is set to
now. now.
Returns: Returns:
dict: The dictionary representing the evaluation. This includes the dict: The dictionary representing the assessment. This includes the
points earned, points possible, time scored, scorer id, score type, points earned, points possible, time scored, scorer id, score type,
and feedback. and feedback.
Raises: Raises:
PeerEvaluationRequestError: Raised when the submission_id is invalid, or PeerAssessmentRequestError: Raised when the submission_id is invalid, or
the assessment_dict does not contain the required values to create the assessment_dict does not contain the required values to create
an assessment. an assessment.
PeerEvaluationInternalError: Raised when there is an internal error PeerAssessmentInternalError: Raised when there is an internal error
while creating a new evaluation. while creating a new assessment.
Examples: Examples:
>>> assessment_dict = dict( >>> assessment_dict = dict(
...@@ -110,7 +110,7 @@ def create_evaluation( ...@@ -110,7 +110,7 @@ def create_evaluation(
>>> points_possible=12, >>> points_possible=12,
>>> feedback="Your submission was thrilling.", >>> feedback="Your submission was thrilling.",
>>> ) >>> )
>>> create_evaluation("1", "Tim", assessment_dict) >>> create_assessment("1", "Tim", assessment_dict)
{ {
'points_earned': 6, 'points_earned': 6,
'points_possible': 12, 'points_possible': 12,
...@@ -122,7 +122,7 @@ def create_evaluation( ...@@ -122,7 +122,7 @@ def create_evaluation(
""" """
try: try:
submission = Submission.objects.get(uuid=submission_uuid) submission = Submission.objects.get(uuid=submission_uuid)
peer_evaluation = { peer_assessment = {
"scorer_id": scorer_id, "scorer_id": scorer_id,
"submission": submission.pk, "submission": submission.pk,
"points_earned": sum(assessment_dict["points_earned"]), "points_earned": sum(assessment_dict["points_earned"]),
...@@ -131,11 +131,11 @@ def create_evaluation( ...@@ -131,11 +131,11 @@ def create_evaluation(
"feedback": assessment_dict["feedback"], "feedback": assessment_dict["feedback"],
} }
if scored_at: if scored_at:
peer_evaluation["scored_at"] = scored_at peer_assessment["scored_at"] = scored_at
peer_serializer = PeerEvaluationSerializer(data=peer_evaluation) peer_serializer = PeerAssessmentSerializer(data=peer_assessment)
if not peer_serializer.is_valid(): if not peer_serializer.is_valid():
raise PeerEvaluationRequestError(peer_serializer.errors) raise PeerAssessmentRequestError(peer_serializer.errors)
peer_serializer.save() peer_serializer.save()
# Check if the submission is finished and its Author has graded enough. # Check if the submission is finished and its Author has graded enough.
...@@ -143,11 +143,11 @@ def create_evaluation( ...@@ -143,11 +143,11 @@ def create_evaluation(
_check_if_finished_and_create_score( _check_if_finished_and_create_score(
student_item, student_item,
submission, submission,
required_evaluations_for_student, required_assessments_for_student,
required_evaluations_for_submission required_assessments_for_submission
) )
# Check if the grader is finished and has enough evaluations # Check if the grader is finished and has enough assessments
scorer_item = StudentItem.objects.get( scorer_item = StudentItem.objects.get(
student_id=scorer_id, student_id=scorer_id,
item_id=student_item.item_id, item_id=student_item.item_id,
...@@ -162,28 +162,28 @@ def create_evaluation( ...@@ -162,28 +162,28 @@ def create_evaluation(
_check_if_finished_and_create_score( _check_if_finished_and_create_score(
scorer_item, scorer_item,
scorer_submissions[0], scorer_submissions[0],
required_evaluations_for_student, required_assessments_for_student,
required_evaluations_for_submission required_assessments_for_submission
) )
return peer_serializer.data return peer_serializer.data
except DatabaseError: except DatabaseError:
error_message = u"An error occurred while creating evaluation {} for submission: {} by: {}".format( error_message = u"An error occurred while creating assessment {} for submission: {} by: {}".format(
assessment_dict, assessment_dict,
submission_uuid, submission_uuid,
scorer_id scorer_id
) )
logger.exception(error_message) logger.exception(error_message)
raise PeerEvaluationInternalError(error_message) raise PeerAssessmentInternalError(error_message)
def _check_if_finished_and_create_score(student_item, def _check_if_finished_and_create_score(student_item,
submission, submission,
required_evaluations_for_student, required_assessments_for_student,
required_evaluations_for_submission): required_assessments_for_submission):
"""Basic function for checking if a student is finished with peer workflow. """Basic function for checking if a student is finished with peer workflow.
Checks if the student is finished with the peer evaluation workflow. If the Checks if the student is finished with the peer assessment workflow. If the
student already has a final grade calculated, there is no need to proceed. student already has a final grade calculated, there is no need to proceed.
If they do not have a grade, the student has a final grade calculated. If they do not have a grade, the student has a final grade calculated.
...@@ -193,19 +193,19 @@ def _check_if_finished_and_create_score(student_item, ...@@ -193,19 +193,19 @@ def _check_if_finished_and_create_score(student_item,
finished_evaluating = has_finished_required_evaluating( finished_evaluating = has_finished_required_evaluating(
student_item.student_id, student_item.student_id,
required_evaluations_for_student required_assessments_for_student
) )
evaluations = PeerEvaluation.objects.filter(submission=submission) assessments = PeerEvaluation.objects.filter(submission=submission)
submission_finished = evaluations.count() >= required_evaluations_for_submission submission_finished = assessments.count() >= required_assessments_for_submission
scores = [] scores = []
for evaluation in evaluations: for assessment in assessments:
scores.append(evaluation.points_earned) scores.append(assessment.points_earned)
if finished_evaluating and submission_finished: if finished_evaluating and submission_finished:
submission_api.set_score( submission_api.set_score(
StudentItemSerializer(student_item).data, StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data, SubmissionSerializer(submission).data,
_calculate_final_score(scores), _calculate_final_score(scores),
evaluations[0].points_possible assessments[0].points_possible
) )
...@@ -228,7 +228,7 @@ def _calculate_final_score(scores): ...@@ -228,7 +228,7 @@ def _calculate_final_score(scores):
return int(math.ceil(sum(scores[median-1:median+1])/float(2))) return int(math.ceil(sum(scores[median-1:median+1])/float(2)))
def has_finished_required_evaluating(student_id, required_evaluations): def has_finished_required_evaluating(student_id, required_assessments):
"""Check if a student still needs to evaluate more submissions """Check if a student still needs to evaluate more submissions
Per the contract of the peer assessment workflow, a student must evaluate a Per the contract of the peer assessment workflow, a student must evaluate a
...@@ -237,7 +237,7 @@ def has_finished_required_evaluating(student_id, required_evaluations): ...@@ -237,7 +237,7 @@ def has_finished_required_evaluating(student_id, required_evaluations):
Args: Args:
student_id (str): The student in the peer grading workflow to check for student_id (str): The student in the peer grading workflow to check for
peer workflow criteria. This argument is required. peer workflow criteria. This argument is required.
required_evaluations (int): The number of evaluations a student has to required_assessments (int): The number of assessments a student has to
submit before receiving the feedback on their submission. This is a submit before receiving the feedback on their submission. This is a
required argument. required argument.
...@@ -247,9 +247,9 @@ def has_finished_required_evaluating(student_id, required_evaluations): ...@@ -247,9 +247,9 @@ def has_finished_required_evaluating(student_id, required_evaluations):
evaluate more peer submissions. evaluate more peer submissions.
Raises: Raises:
PeerEvaluationRequestError: Raised when the student_id is invalid, or PeerAssessmentRequestError: Raised when the student_id is invalid, or
the required_evaluations is not a positive integer. the required_assessments is not a positive integer.
PeerEvaluationInternalError: Raised when there is an internal error PeerAssessmentInternalError: Raised when there is an internal error
while evaluating this workflow rule. while evaluating this workflow rule.
Examples: Examples:
...@@ -257,37 +257,37 @@ def has_finished_required_evaluating(student_id, required_evaluations): ...@@ -257,37 +257,37 @@ def has_finished_required_evaluating(student_id, required_evaluations):
True True
""" """
if required_evaluations < 0: if required_assessments < 0:
raise PeerEvaluationRequestError( raise PeerAssessmentRequestError(
"Required Evaluation count must be a positive integer.") "Required Assessment count must be a positive integer.")
return PeerEvaluation.objects.filter( return PeerEvaluation.objects.filter(
scorer_id=student_id scorer_id=student_id
).count() >= required_evaluations ).count() >= required_assessments
def get_evaluations(submission_id): def get_assessments(submission_id):
"""Retrieve the evaluations for a submission. """Retrieve the assessments for a submission.
Retrieves all the evaluations for a submissions. This API returns related Retrieves all the assessments for a submissions. This API returns related
feedback without making any assumptions about grading. Any outstanding feedback without making any assumptions about grading. Any outstanding
evaluations associated with this submission will not be returned. assessments associated with this submission will not be returned.
Args: Args:
submission_id (str): The submission all the requested evaluations are submission_id (str): The submission all the requested assessments are
associated with. Required. associated with. Required.
Returns: Returns:
list(dict): A list of dictionaries, where each dictionary represents a list(dict): A list of dictionaries, where each dictionary represents a
separate evaluation. Each evaluation contains points earned, points separate assessment. Each assessment contains points earned, points
possible, time scored, scorer id, score type, and feedback. possible, time scored, scorer id, score type, and feedback.
Raises: Raises:
PeerEvaluationRequestError: Raised when the submission_id is invalid. PeerAssessmentRequestError: Raised when the submission_id is invalid.
PeerEvaluationInternalError: Raised when there is an internal error PeerAssessmentInternalError: Raised when there is an internal error
while retrieving the evaluations associated with this submission. while retrieving the assessments associated with this submission.
Examples: Examples:
>>> get_evaluations("1") >>> get_assessments("1")
[ [
{ {
'points_earned': 6, 'points_earned': 6,
...@@ -308,45 +308,45 @@ def get_evaluations(submission_id): ...@@ -308,45 +308,45 @@ def get_evaluations(submission_id):
""" """
try: try:
submission = Submission.objects.get(uuid=submission_id) submission = Submission.objects.get(uuid=submission_id)
evaluations = PeerEvaluation.objects.filter(submission=submission) assessments = PeerEvaluation.objects.filter(submission=submission)
serializer = PeerEvaluationSerializer(evaluations, many=True) serializer = PeerAssessmentSerializer(assessments, many=True)
return serializer.data return serializer.data
except DatabaseError: except DatabaseError:
error_message = ( error_message = (
u"Error getting evaluations for submission {}".format(submission_id) u"Error getting assessments for submission {}".format(submission_id)
) )
logger.exception(error_message) logger.exception(error_message)
raise PeerEvaluationInternalError(error_message) raise PeerAssessmentInternalError(error_message)
def get_submission_to_evaluate(student_item_dict, required_num_evaluations): def get_submission_to_assess(student_item_dict, required_num_assessments):
"""Get a submission to peer evaluate. """Get a submission to peer evaluate.
Retrieves a submission for evaluation for the given student_item. This will Retrieves a submission for assessment for the given student_item. This will
not return a submission submitted by the requesting scorer. The submission not return a submission submitted by the requesting scorer. The submission
returned (TODO: will be) is based on a priority queue. Submissions with the returned (TODO: will be) is based on a priority queue. Submissions with the
fewest evaluations and the most active students will be prioritized over fewest assessments and the most active students will be prioritized over
submissions from students who are not as active in the evaluation process. submissions from students who are not as active in the assessment process.
Args: Args:
student_item_dict (dict): The student item information from the student student_item_dict (dict): The student item information from the student
requesting a submission for evaluation. The dict contains an requesting a submission for assessment. The dict contains an
item_id, course_id, and item_type, used to identify the unique item_id, course_id, and item_type, used to identify the unique
question for the review, while the student_id is used to explicitly question for the review, while the student_id is used to explicitly
avoid giving the student their own submission. avoid giving the student their own submission.
required_num_evaluations (int): The number of evaluations a submission required_num_assessments (int): The number of assessments a submission
requires before it has completed the peer evaluation process. requires before it has completed the peer assessment process.
Returns: Returns:
dict: A peer submission for evaluation. This contains a 'student_item', dict: A peer submission for assessment. This contains a 'student_item',
'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be 'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
used for evaluation. used for assessment.
Raises: Raises:
PeerEvaluationRequestError: Raised when the request parameters are PeerAssessmentRequestError: Raised when the request parameters are
invalid for the request. invalid for the request.
PeerEvaluationInternalError: PeerAssessmentInternalError:
PeerEvaluationWorkflowError: PeerAssessmentWorkflowError:
Examples: Examples:
>>> student_item_dict = dict( >>> student_item_dict = dict(
...@@ -355,7 +355,7 @@ def get_submission_to_evaluate(student_item_dict, required_num_evaluations): ...@@ -355,7 +355,7 @@ def get_submission_to_evaluate(student_item_dict, required_num_evaluations):
>>> item_type="type_one", >>> item_type="type_one",
>>> student_id="Bob", >>> student_id="Bob",
>>> ) >>> )
>>> get_submission_to_evaluate(student_item_dict, 3) >>> get_submission_to_assess(student_item_dict, 3)
{ {
'student_item': 2, 'student_item': 2,
'attempt_number': 1, 'attempt_number': 1,
...@@ -375,26 +375,26 @@ def get_submission_to_evaluate(student_item_dict, required_num_evaluations): ...@@ -375,26 +375,26 @@ def get_submission_to_evaluate(student_item_dict, required_num_evaluations):
submission = _get_first_submission_not_evaluated( submission = _get_first_submission_not_evaluated(
student_items, student_items,
student_item_dict["student_id"], student_item_dict["student_id"],
required_num_evaluations required_num_assessments
) )
if not submission: if not submission:
raise PeerEvaluationWorkflowError( raise PeerAssessmentWorkflowError(
"There are no submissions available for evaluation." "There are no submissions available for assessment."
) )
return SubmissionSerializer(submission).data return SubmissionSerializer(submission).data
def _get_first_submission_not_evaluated(student_items, student_id, required_num_evaluations): def _get_first_submission_not_evaluated(student_items, student_id, required_num_assessments):
# TODO: We need a priority queue. # TODO: We need a priority queue.
submissions = Submission.objects.filter(student_item__in=student_items).order_by( submissions = Submission.objects.filter(student_item__in=student_items).order_by(
"submitted_at", "submitted_at",
"-attempt_number" "-attempt_number"
) )
for submission in submissions: for submission in submissions:
evaluations = PeerEvaluation.objects.filter(submission=submission) assessments = PeerEvaluation.objects.filter(submission=submission)
if evaluations.count() < required_num_evaluations: if assessments.count() < required_num_assessments:
already_evaluated = False already_evaluated = False
for evaluation in evaluations: for assessment in assessments:
already_evaluated = already_evaluated or evaluation.scorer_id == student_id already_evaluated = already_evaluated or assessment.scorer_id == student_id
if not already_evaluated: if not already_evaluated:
return submission return submission
\ No newline at end of file
...@@ -6,7 +6,7 @@ from rest_framework import serializers ...@@ -6,7 +6,7 @@ from rest_framework import serializers
from openassessment.peer.models import PeerEvaluation from openassessment.peer.models import PeerEvaluation
class PeerEvaluationSerializer(serializers.ModelSerializer): class PeerAssessmentSerializer(serializers.ModelSerializer):
class Meta: class Meta:
model = PeerEvaluation model = PeerEvaluation
fields = ( fields = (
......
...@@ -32,7 +32,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC) ...@@ -32,7 +32,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
class TestApi(TestCase): class TestApi(TestCase):
def test_create_evaluation(self): def test_create_evaluation(self):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
evaluation = api.create_evaluation( evaluation = api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
...@@ -44,21 +44,21 @@ class TestApi(TestCase): ...@@ -44,21 +44,21 @@ class TestApi(TestCase):
@file_data('test_valid_evaluations.json') @file_data('test_valid_evaluations.json')
def test_get_evaluations(self, assessment_dict): def test_get_evaluations(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation( api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
assessment_dict assessment_dict
) )
evaluations = api.get_evaluations(submission["uuid"]) evaluations = api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations)) self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict) self._assert_evaluation(evaluations[0], **assessment_dict)
@file_data('test_valid_evaluations.json') @file_data('test_valid_evaluations.json')
def test_get_evaluations_with_date(self, assessment_dict): def test_get_evaluations_with_date(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation( api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
...@@ -66,7 +66,7 @@ class TestApi(TestCase): ...@@ -66,7 +66,7 @@ class TestApi(TestCase):
assessment_dict, assessment_dict,
MONDAY MONDAY
) )
evaluations = api.get_evaluations(submission["uuid"]) evaluations = api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations)) self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict) self._assert_evaluation(evaluations[0], **assessment_dict)
self.assertEqual(evaluations[0]["scored_at"], MONDAY) self.assertEqual(evaluations[0]["scored_at"], MONDAY)
...@@ -85,22 +85,22 @@ class TestApi(TestCase): ...@@ -85,22 +85,22 @@ class TestApi(TestCase):
self.assertFalse(scores) self.assertFalse(scores)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation( api.create_assessment(
bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
api.create_evaluation( api.create_assessment(
sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation( api.create_assessment(
jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation( api.create_assessment(
buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation( api.create_assessment(
xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
self.assertTrue(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertTrue(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
...@@ -110,13 +110,13 @@ class TestApi(TestCase): ...@@ -110,13 +110,13 @@ class TestApi(TestCase):
scores = sub_api.get_score(STUDENT_ITEM) scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores) self.assertFalse(scores)
api.create_evaluation( api.create_assessment(
tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
api.create_evaluation( api.create_assessment(
tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
api.create_evaluation( api.create_assessment(
tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
...@@ -127,7 +127,7 @@ class TestApi(TestCase): ...@@ -127,7 +127,7 @@ class TestApi(TestCase):
self.assertEqual(12, scores[0]["points_possible"]) self.assertEqual(12, scores[0]["points_possible"])
@raises(api.PeerEvaluationRequestError) @raises(api.PeerAssessmentRequestError)
def test_bad_configuration(self): def test_bad_configuration(self):
api.has_finished_required_evaluating("Tim", -1) api.has_finished_required_evaluating("Tim", -1)
...@@ -139,27 +139,27 @@ class TestApi(TestCase): ...@@ -139,27 +139,27 @@ class TestApi(TestCase):
) )
self._create_student_and_submission("Jim", "Jim's answer", THURSDAY) self._create_student_and_submission("Jim", "Jim's answer", THURSDAY)
submission = api.get_submission_to_evaluate(STUDENT_ITEM, 3) submission = api.get_submission_to_assess(STUDENT_ITEM, 3)
self.assertIsNotNone(submission) self.assertIsNotNone(submission)
self.assertEqual(submission["answer"], u"Bob's answer") self.assertEqual(submission["answer"], u"Bob's answer")
self.assertEqual(submission["student_item"], 2) self.assertEqual(submission["student_item"], 2)
self.assertEqual(submission["attempt_number"], 1) self.assertEqual(submission["attempt_number"], 1)
@raises(api.PeerEvaluationWorkflowError) @raises(api.PeerAssessmentWorkflowError)
def test_no_submissions_to_evaluate_for_tim(self): def test_no_submissions_to_evaluate_for_tim(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY) self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
api.get_submission_to_evaluate(STUDENT_ITEM, 3) api.get_submission_to_assess(STUDENT_ITEM, 3)
""" """
Some Error Checking Tests against DB failures. Some Error Checking Tests against DB failures.
""" """
@patch.object(Submission.objects, 'get') @patch.object(Submission.objects, 'get')
@raises(api.PeerEvaluationInternalError) @raises(api.PeerAssessmentInternalError)
def test_error_on_evaluation_creation(self, mock_filter): def test_error_on_evaluation_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation( api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
...@@ -172,7 +172,7 @@ class TestApi(TestCase): ...@@ -172,7 +172,7 @@ class TestApi(TestCase):
@raises(sub_api.SubmissionInternalError) @raises(sub_api.SubmissionInternalError)
def test_error_on_get_evaluation(self, mock_filter): def test_error_on_get_evaluation(self, mock_filter):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation( api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
...@@ -181,7 +181,7 @@ class TestApi(TestCase): ...@@ -181,7 +181,7 @@ class TestApi(TestCase):
MONDAY MONDAY
) )
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
api.get_evaluations(submission["uuid"]) api.get_assessments(submission["uuid"])
def test_choose_score(self): def test_choose_score(self):
self.assertEqual(0, api._calculate_final_score([])) self.assertEqual(0, api._calculate_final_score([]))
......
...@@ -2,7 +2,7 @@ import logging ...@@ -2,7 +2,7 @@ import logging
from django.contrib.auth.decorators import login_required from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response from django.shortcuts import render_to_response
from openassessment.peer.api import get_evaluations from openassessment.peer.api import get_assessments
from submissions.api import SubmissionRequestError, get_submissions from submissions.api import SubmissionRequestError, get_submissions
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
...@@ -38,7 +38,7 @@ def get_evaluations_for_student_item(request, course_id, student_id, item_id): ...@@ -38,7 +38,7 @@ def get_evaluations_for_student_item(request, course_id, student_id, item_id):
submissions = get_submissions(student_item_dict) submissions = get_submissions(student_item_dict)
evaluations = [] evaluations = []
for submission in submissions: for submission in submissions:
submission_evaluations = get_evaluations(submission["uuid"]) submission_evaluations = get_assessments(submission["uuid"])
for evaluation in submission_evaluations: for evaluation in submission_evaluations:
evaluation["submission_uuid"] = submission["uuid"] evaluation["submission_uuid"] = submission["uuid"]
evaluations.append(evaluation) evaluations.append(evaluation)
......
class Assessment(object):
assessment_type = None
name = ''
start_datetime = None
due_datetime = None
must_grade = 1
must_be_graded_by = 0
navigation_text = ""
path = ""
def create_ui_model(self):
return {
"assessment_type": self.assessment_type,
"name": self.name,
"start_datetime": self.start_datetime,
"due_datetime": self.due_datetime,
"must_grade": self.must_grade,
"must_be_graded_by": self.must_be_graded_by,
"navigation_text": self.navigation_text,
"path": self.path
}
\ No newline at end of file
__author__ = 'stephensanchez'
...@@ -4,16 +4,15 @@ from django.template.context import Context ...@@ -4,16 +4,15 @@ from django.template.context import Context
import pkg_resources import pkg_resources
from django.template.loader import get_template from django.template.loader import get_template
from openassessment.peer.api import PeerEvaluationWorkflowError
import datetime import datetime
from xblock.core import XBlock from xblock.core import XBlock
from xblock.fields import List, Scope, String from xblock.fields import List, Scope, String
from xblock.fragment import Fragment from xblock.fragment import Fragment
from openassessment.xblock.peer_assessment import PeerAssessment
from submissions.api import SubmissionRequestError from submissions.api import SubmissionRequestError
from submissions import api from submissions import api
from openassessment.peer import api as peer_api
from scenario_parser import ScenarioParser from scenario_parser import ScenarioParser
...@@ -41,20 +40,31 @@ DEFAULT_RUBRIC_CRITERIA = [ ...@@ -41,20 +40,31 @@ DEFAULT_RUBRIC_CRITERIA = [
'instructions': "Determine if there is a unifying theme or main idea.", 'instructions': "Determine if there is a unifying theme or main idea.",
'total_value': 5, 'total_value': 5,
'options': [ 'options': [
(0, "Poor", "Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.",), (0, "Poor", """Difficult for the reader to discern the main idea.
(3, "Fair", "Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.",), Too brief or too repetitive to establish or maintain a focus.""",),
(5, "Good", "Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.",), (3, "Fair", """Presents a unifying theme or main idea, but may
include minor tangents. Stays somewhat focused on topic and
task.""",),
(5, "Good", """Presents a unifying theme or main idea without going
off on tangents. Stays completely focused on topic and task.""",),
], ],
}, },
{ {
'name': "Content", 'name': "Content",
'instructions': "Evaluate the content of the submission", 'instructions': "Assess the content of the submission",
'total_value': 5, 'total_value': 5,
'options': [ 'options': [
(0, "Poor", "Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.",), (0, "Poor", """Includes little information with few or no details or
(1, "Fair", "Includes little information and few or no details. Explores only one or two facets of the topic.",), unrelated details. Unsuccessful in attempts to explore any
(3, "Good", "Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.",), facets of the topic.""",),
(5, "Excellent", "Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.",), (1, "Fair", """Includes little information and few or no details.
Explores only one or two facets of the topic.""",),
(3, "Good", """Includes sufficient information and supporting
details. (Details may not be fully developed; ideas may be
listed.) Explores some facets of the topic.""",),
(5, "Excellent", """Includes in-depth information and exceptional
supporting details that are fully developed. Explores all
facets of the topic.""",),
], ],
}, },
{ {
...@@ -62,9 +72,13 @@ DEFAULT_RUBRIC_CRITERIA = [ ...@@ -62,9 +72,13 @@ DEFAULT_RUBRIC_CRITERIA = [
'instructions': "Determine if the submission is well organized.", 'instructions': "Determine if the submission is well organized.",
'total_value': 2, 'total_value': 2,
'options': [ 'options': [
(0, "Poor", "Ideas organized illogically, transitions weak, and response difficult to follow.",), (0, "Poor", """Ideas organized illogically, transitions weak, and
(1, "Fair", "Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.",), response difficult to follow.""",),
(2, "Good", "Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.",), (1, "Fair", """Attempts to logically organize ideas. Attempts to
progress in an order that enhances meaning, and demonstrates use
of transitions.""",),
(2, "Good", """Ideas organized logically. Progresses in an order
that enhances meaning. Includes smooth transitions.""",),
], ],
}, },
{ {
...@@ -72,9 +86,15 @@ DEFAULT_RUBRIC_CRITERIA = [ ...@@ -72,9 +86,15 @@ DEFAULT_RUBRIC_CRITERIA = [
'instructions': "Read for style.", 'instructions': "Read for style.",
'total_value': 2, 'total_value': 2,
'options': [ 'options': [
(0, "Poor", "Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.",), (0, "Poor", """Contains limited vocabulary, with many words used
(1, "Fair", "Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).",), incorrectly. Demonstrates problems with sentence patterns.""",),
(2, "Good", "Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.",), (1, "Fair", """Contains basic vocabulary, with words that are
predictable and common. Contains mostly simple sentences
(although there may be an attempt at more varied sentence
patterns).""",),
(2, "Good", """Includes vocabulary to make explanations detailed and
precise. Includes varied sentence patterns, including complex
sentences.""",),
], ],
}, },
{ {
...@@ -82,170 +102,82 @@ DEFAULT_RUBRIC_CRITERIA = [ ...@@ -82,170 +102,82 @@ DEFAULT_RUBRIC_CRITERIA = [
'instructions': "Read for style.", 'instructions': "Read for style.",
'total_value': 2, 'total_value': 2,
'options': [ 'options': [
(0, "Poor", "Demonstrates language and tone that may be inappropriate to task and reader.",), (0, "Poor", """Demonstrates language and tone that may be
(1, "Fair", "Demonstrates an attempt to adjust language and tone to task and reader.",), inappropriate to task and reader.""",),
(2, "Good", "Demonstrates effective adjustment of language and tone to task and reader.",), (1, "Fair", """Demonstrates an attempt to adjust language and tone
to task and reader.""",),
(2, "Good", """Demonstrates effective adjustment of language and
tone to task and reader.""",),
], ],
} }
] ]
DEFAULT_EVAL_MODULES = [ DEFAULT_PEER_ASSESSMENT = PeerAssessment()
{ DEFAULT_PEER_ASSESSMENT.name = "peer-assessment"
'type': "peereval", DEFAULT_PEER_ASSESSMENT.start_datetime = datetime.datetime.now().isoformat()
'name': "peereval", DEFAULT_PEER_ASSESSMENT.must_grade = 5
'start_datetime': datetime.datetime.now, DEFAULT_PEER_ASSESSMENT.must_be_graded_by = 3
'due_datetime': None,
'must_grade': 5, DEFAULT_ASSESSMENT_MODULES = [
'must_be_graded_by': 3, DEFAULT_PEER_ASSESSMENT,
},
] ]
EXAMPLE_POVERTY_RUBRIC = (
"OpenAssessmentBlock Poverty Rubric",
"""
<vertical_demo>
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<title>
Global Poverty
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty?
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">(0) Neal Stephenson (late)
<explain>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'.
</explain>
</option>
<option val="1">(1) HP Lovecraft
<explain>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explain>
</option>
<option val="3">(3) Robert Heinlein
<explain>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explain>
</option>
<option val="4">(4) Neal Stephenson (early)
<explain>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explain>
</option>
<option val="5">(5) Earnest Hemingway
<explain>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
</explain>
</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">(0) Yogi Berra</option>
<option val="1">(1) Hunter S. Thompson</option>
<option val="2">(2) Robert Heinlein</option>
<option val="3">(3) Isaac Asimov</option>
<option val="10">(10) Spock
<explain>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
to show that explained and unexplained items can be mixed.
</explain>
</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">(0) lolcats</option>
<option val="1">(1) Facebook</option>
<option val="2">(2) Reddit</option>
<option val="3">(3) metafilter</option>
<option val="4">(4) Usenet, 1996</option>
<option val="5">(5) The Elements of Style</option>
</criterion>
</rubric>
<evals>
<peer-evaluation start="2014-12-20T19:00-7:00"
name="Peer Evaluation"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<self-evaluation name="Self Evaluation" />
</evals>
</openassessment>
</vertical_demo>
"""
)
EXAMPLE_CENSORSHIP_RUBRIC = ( def load(path):
"OpenAssessmentBlock Censorship Rubric", """Handy helper for getting resources from our kit."""
""" data = pkg_resources.resource_string(__name__, path)
<vertical_demo> return data.decode("utf8")
<openassessment start="2013-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<title>
Censorship in Public Libraries
</title>
<prompt>
What do you think about censorship in libraries? I think it's pretty great.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">The Bible</option>
<option val="1">Earnest Hemingway</option>
<option val="3">Matsuo Basho</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">Eric</option>
<option val="1">John</option>
<option val="2">Ian</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">IRC</option>
<option val="1">Real Email</option>
<option val="2">Old-timey letters</option>
</criterion>
</rubric>
<evals>
<self-evaluation name="Self Evaluation" />
<peer-evaluation name="Peer Evaluation"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</evals>
</openassessment>
</vertical_demo>
"""
)
class OpenAssessmentBlock(XBlock): class OpenAssessmentBlock(XBlock):
"""Displays a question and gives an area where students can compose a response.""" """Displays a question and gives an area where students can compose a response."""
start_datetime = String(default=datetime.datetime.now().isoformat(), scope=Scope.content, help="ISO-8601 formatted string representing the start date of this assignment.") start_datetime = String(
due_datetime = String(default=None, scope=Scope.content, help="ISO-8601 formatted string representing the end date of this assignment.") default=datetime.datetime.now().isoformat(),
scope=Scope.content,
help="ISO-8601 formatted string representing the start date of this assignment."
)
due_datetime = String(
default=None,
scope=Scope.content,
help="ISO-8601 formatted string representing the end date of this assignment."
)
title = String(default="", scope=Scope.content, help="A title to display to a student (plain text).") title = String(
prompt = String( default=DEFAULT_PROMPT, scope=Scope.content, help="A prompt to display to a student (plain text).") default="",
rubric = List( default=[], scope=Scope.content, help="Instructions and criteria for students giving feedback.") scope=Scope.content,
rubric_instructions = String( default=DEFAULT_RUBRIC_INSTRUCTIONS, scope=Scope.content, help="Instructions for self and peer assessment.") help="A title to display to a student (plain text)."
rubric_criteria = List(default=DEFAULT_RUBRIC_CRITERIA, scope=Scope.content, help="The different parts of grading for students giving feedback.") )
rubric_evals = List(default=DEFAULT_EVAL_MODULES, scope=Scope.content, help="The requested set of evaluations and the order in which to apply them.") prompt = String(
course_id = String( default=u"TestCourse", scope=Scope.content, help="The course_id associated with this prompt (until we can get it from runtime).",) default=DEFAULT_PROMPT,
scope=Scope.content,
help="A prompt to display to a student (plain text)."
)
rubric = List(
default=[],
scope=Scope.content,
help="Instructions and criteria for students giving feedback."
)
rubric_instructions = String(
default=DEFAULT_RUBRIC_INSTRUCTIONS,
scope=Scope.content,
help="Instructions for self and peer assessment."
)
rubric_criteria = List(
default=DEFAULT_RUBRIC_CRITERIA,
scope=Scope.content,
help="The different parts of grading for students giving feedback."
)
rubric_assessments = List(
default=DEFAULT_ASSESSMENT_MODULES,
scope=Scope.content,
help="The requested set of assessments and the order in which to apply them."
)
course_id = String(
default=u"TestCourse",
scope=Scope.content,
help="The course_id associated with this prompt (until we can get it from runtime).",
)
submit_errors = { # Reported to user sometimes, and useful in tests submit_errors = { # Reported to user sometimes, and useful in tests
'ENOSUB': 'API submission is unrequested', 'ENOSUB': 'API submission is unrequested',
...@@ -261,10 +193,10 @@ class OpenAssessmentBlock(XBlock): ...@@ -261,10 +193,10 @@ class OpenAssessmentBlock(XBlock):
important contextual information. Per @nedbat, the usage_id attribute important contextual information. Per @nedbat, the usage_id attribute
uniquely identifies this block in this course, and the user_id uniquely uniquely identifies this block in this course, and the user_id uniquely
identifies this student. With the two of them, we can trace all the identifies this student. With the two of them, we can trace all the
interactions emenating from this interaction. interactions emanating from this interaction.
Useful for logging, debugging, and uniqueification.""" Useful for logging, debugging, and uniqueification."""
return (self.scope_ids.usage_id, self.scope_ids.user_id) return self.scope_ids.usage_id, self.scope_ids.user_id
def _get_student_item_dict(self): def _get_student_item_dict(self):
"""Create a student_item_dict from our surrounding context. """Create a student_item_dict from our surrounding context.
...@@ -283,15 +215,11 @@ class OpenAssessmentBlock(XBlock): ...@@ -283,15 +215,11 @@ class OpenAssessmentBlock(XBlock):
def student_view(self, context=None): def student_view(self, context=None):
"""The main view of OpenAssessmentBlock, displayed when viewing courses. """The main view of OpenAssessmentBlock, displayed when viewing courses.
""" """
def load(path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
trace = self._get_xblock_trace() trace = self._get_xblock_trace()
student_item_dict = self._get_student_item_dict() student_item_dict = self._get_student_item_dict()
grade_state = self._get_grade_state(student_item_dict) grade_state = self._get_grade_state()
# All data we intend to pass to the front end. # All data we intend to pass to the front end.
context_dict = { context_dict = {
"xblock_trace": trace, "xblock_trace": trace,
...@@ -299,7 +227,7 @@ class OpenAssessmentBlock(XBlock): ...@@ -299,7 +227,7 @@ class OpenAssessmentBlock(XBlock):
"question": self.prompt, "question": self.prompt,
"rubric_instructions": self.rubric_instructions, "rubric_instructions": self.rubric_instructions,
"rubric_criteria": self.rubric_criteria, "rubric_criteria": self.rubric_criteria,
"rubric_evals": self.rubric_evals, "rubric_assessments": [assessment.create_ui_model() for assessment in self.rubric_assessments],
"grade_state": grade_state, "grade_state": grade_state,
} }
...@@ -308,26 +236,9 @@ class OpenAssessmentBlock(XBlock): ...@@ -308,26 +236,9 @@ class OpenAssessmentBlock(XBlock):
except SubmissionRequestError: except SubmissionRequestError:
previous_submissions = [] previous_submissions = []
peer_submission = False peer_module = self._get_assessment_module('peer-assessment')
try: peer_assessment = peer_module.get_peer_submission(student_item_dict)
# HACK: Replace with proper workflow. if previous_submissions and peer_assessment: # XXX: until workflow better, move on w/ prev submit
peer_eval = self._hack_get_peer_eval()
peer_submission = peer_api.get_submission_to_evaluate(
student_item_dict, peer_eval.must_be_graded_by
)
context_dict["peer_submission"] = peer_submission
if peer_eval:
peer_submission = peer_api.get_submission_to_evaluate(student_item_dict, peer_eval["must_be_graded_by"])
except PeerEvaluationWorkflowError:
# Additional HACK: Without proper workflow, there may not be the
# correct information to complete the request for a peer submission.
# This error should be handled properly once we have a workflow API.
pass
if previous_submissions and peer_submission: # XXX: until workflow better, move on w/ prev submit
template = get_template("static/html/oa_base.html") template = get_template("static/html/oa_base.html")
context = Context(context_dict) context = Context(context_dict)
frag = Fragment(template.render(context)) frag = Fragment(template.render(context))
...@@ -345,36 +256,23 @@ class OpenAssessmentBlock(XBlock): ...@@ -345,36 +256,23 @@ class OpenAssessmentBlock(XBlock):
frag.initialize_js('OpenAssessmentBlock') frag.initialize_js('OpenAssessmentBlock')
return frag return frag
def _hack_get_peer_eval(self):
# HACK: Forcing Peer Eval, we'll get the Eval config.
for next_eval in self.rubric_evals:
if next_eval.eval_type == "peer-evaluation":
return next_eval
@XBlock.json_handler @XBlock.json_handler
def assess(self, data, suffix=''): def assess(self, data, suffix=''):
# HACK: Replace with proper workflow. # TODO Pass name through the handler.
peer_eval = self._hack_get_peer_eval() assessment = self._get_assessment_module('peer-assessment')
"""Place an assessment into Openassessment system""" if assessment:
student_item_dict = self._get_student_item_dict() assessment.assess(
self._get_student_item_dict(),
assessment_dict = { self.rubric_criteria,
"points_earned": map(int, data["points_earned"]), data
"points_possible": sum(c['total_value'] for c in self.rubric_criteria),
"feedback": "Not yet implemented.",
}
evaluation = peer_api.create_evaluation(
data["submission_uuid"],
student_item_dict["student_id"],
int(peer_eval.must_grade),
int(peer_eval.must_be_graded_by),
assessment_dict
) )
# Temp kludge until we fix JSON serialization for datetime def _get_assessment_module(self, name):
evaluation["scored_at"] = str(evaluation["scored_at"]) """Get a configured assessment module by name.
"""
return evaluation, "Success" for assessment in self.rubric_assessments:
if assessment.name == name:
return assessment
@XBlock.json_handler @XBlock.json_handler
def submit(self, data, suffix=''): def submit(self, data, suffix=''):
...@@ -405,7 +303,16 @@ class OpenAssessmentBlock(XBlock): ...@@ -405,7 +303,16 @@ class OpenAssessmentBlock(XBlock):
@staticmethod @staticmethod
def workbench_scenarios(): def workbench_scenarios():
"""A canned scenario for display in the workbench.""" """A canned scenario for display in the workbench."""
return [EXAMPLE_POVERTY_RUBRIC, EXAMPLE_CENSORSHIP_RUBRIC,] return [
(
"OpenAssessmentBlock Poverty Rubric",
load('static/xml/poverty_rubric_example.xml')
),
(
"OpenAssessmentBlock Censorship Rubric",
load('static/xml/censorship_rubric_example.xml')
),
]
@staticmethod @staticmethod
def studio_view(context=None): def studio_view(context=None):
...@@ -423,7 +330,7 @@ class OpenAssessmentBlock(XBlock): ...@@ -423,7 +330,7 @@ class OpenAssessmentBlock(XBlock):
block = sparser.parse() block = sparser.parse()
return block return block
def _get_grade_state(self, student_item): def _get_grade_state(self):
# TODO: Determine if we want to build out grade state right now. # TODO: Determine if we want to build out grade state right now.
grade_state = { grade_state = {
......
from openassessment.peer import api as peer_api
from openassessment.peer.api import PeerAssessmentWorkflowError
from openassessment.xblock.assessment import Assessment
class PeerAssessment(Assessment):
assessment_type = "peer-assessment"
navigation_text = "Your assessment(s) of peer responses"
path = "static/html/oa_peer_assessment.html"
@classmethod
def assess(cls, student_item_dict, rubric_criteria, data):
"""Place an assessment into Openassessment system
"""
assessment_dict = {
"points_earned": map(int, data["points_earned"]),
"points_possible": sum(c['total_value'] for c in rubric_criteria),
"feedback": "Not yet implemented.",
}
assessment = peer_api.create_assessment(
data["submission_uuid"],
student_item_dict["student_id"],
int(cls.must_grade),
int(cls.must_be_graded_by),
assessment_dict
)
# Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"])
return assessment, "Success"
def get_peer_submission(self, student_item_dict):
peer_submission = False
try:
peer_submission = peer_api.get_submission_to_assess(
student_item_dict, self.must_be_graded_by
)
# context_dict["peer_submission"] = peer_submission
peer_submission = peer_api.get_submission_to_assess(
student_item_dict,
self.must_be_graded_by
)
except PeerAssessmentWorkflowError:
# TODO: Log?
pass
return peer_submission
\ No newline at end of file
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
"""XBlock scenario parsing routines""" """XBlock scenario parsing routines"""
from openassessment.xblock.peer_assessment import PeerAssessment
from openassessment.xblock.self_assessment import SelfAssessment
class ScenarioParser(object): class ScenarioParser(object):
...@@ -63,35 +65,35 @@ class ScenarioParser(object): ...@@ -63,35 +65,35 @@ class ScenarioParser(object):
rubric_criteria.append(crit) rubric_criteria.append(crit)
return (e.text.strip(), rubric_criteria) return (e.text.strip(), rubric_criteria)
def get_evals(self, evaluations): def get_assessments(self, assessments):
"""<evals> """<assessments>
<!-- There can be multiple types of assessments given in any <!-- There can be multiple types of assessments given in any
arbitrary order, like this self assessment followed by a arbitrary order, like this self assessment followed by a
peer assessment --> peer assessment -->
<self /> <self-assessment />
<peereval start="2014-12-20T19:00-7:00" <peer-assessment start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00" due="2014-12-21T22:22-7:00"
must_grade="5" must_grade="5"
must_be_graded_by="3" /> must_be_graded_by="3" />
</evals>""" </peer-assessment>"""
evaluation_list = [] assessment_list = []
for ev in evaluations: for asmnt in assessments:
evaluation = None assessment = None
type = ev.tag assessment_type = asmnt.tag
if 'peer-evaluation' == type: if 'peer-assessment' == assessment_type:
evaluation = PeerEvaluation() assessment = PeerAssessment()
elif 'self-evaluation' == type: assessment.must_grade = int(asmnt.attrib.get('must_grade', 1))
evaluation = SelfEvaluation() assessment.must_be_graded_by = int(asmnt.attrib.get('must_be_graded_by', 0))
elif 'self-assessment' == assessment_type:
if evaluation: assessment = SelfAssessment()
evaluation.name = ev.attrib.get('name', '')
evaluation.start_datetime = ev.attrib.get('start', None) if assessment:
evaluation.due_datetime = ev.attrib.get('due', None) assessment.name = asmnt.attrib.get('name', '')
evaluation.must_grade = int(ev.attrib.get('must_grade', 1)) assessment.start_datetime = asmnt.attrib.get('start', None)
evaluation.must_be_graded_by = int(ev.attrib.get('must_be_graded_by', 0)) assessment.due_datetime = asmnt.attrib.get('due', None)
evaluation_list.append(evaluation) assessment_list.append(assessment)
return evaluation_list return assessment_list
def parse(self): def parse(self):
"""Instantiate xblock object from runtime XML definition.""" """Instantiate xblock object from runtime XML definition."""
...@@ -103,32 +105,8 @@ class ScenarioParser(object): ...@@ -103,32 +105,8 @@ class ScenarioParser(object):
self.xblock.rubric_criteria) = self.get_rubric(child) self.xblock.rubric_criteria) = self.get_rubric(child)
elif child.tag == 'title': elif child.tag == 'title':
self.xblock.title = self.get_title(child) self.xblock.title = self.get_title(child)
elif child.tag == 'evals': elif child.tag == 'assessments':
self.xblock.rubric_evals = self.get_evals(child) self.xblock.rubric_assessments = self.get_assessments(child)
else: else:
self.unknown_handler(self.xblock, child) self.unknown_handler(self.xblock, child)
return self.xblock return self.xblock
class EvaluationModule():
eval_type = None
name = ''
start_datetime = None
due_datetime = None
must_grade = 1
must_be_graded_by = 0
class PeerEvaluation(EvaluationModule):
eval_type = "peer-evaluation"
navigation_text = "Your evaluation(s) of peer responses"
url = "static/html/oa_peer_evaluation.html"
class SelfEvaluation(EvaluationModule):
eval_type = "self-evaluation"
navigation_text = "Your evaluation of your response"
url = "static/html/oa_self_evaluation.html"
\ No newline at end of file
from openassessment.xblock.assessment import Assessment
class SelfAssessment(Assessment):
assessment_type = "self-assessment"
navigation_text = "Your assessment of your response"
path = "static/html/oa_self_assessment.html"
\ No newline at end of file
...@@ -20,9 +20,9 @@ ...@@ -20,9 +20,9 @@
<span class="openassessment__title--sub"> <span class="openassessment__title--sub">
<span class="problem-type problem-type--open-ended-response">Open Ended Response</span> <span class="problem-type problem-type--open-ended-response">Open Ended Response</span>
{% for eval in rubric_evals %} {% for assessment in rubric_assessments %}
+ +
<span class="problem-type problem-type--{{ eval.type }}">{{ eval.name }}</span> <span class="problem-type problem-type--{{ assessment.type }}">{{ assessment.name }}</span>
{% endfor %} {% endfor %}
</span> </span>
</h1> </h1>
...@@ -35,9 +35,9 @@ ...@@ -35,9 +35,9 @@
<li class="list--nav__item"> <li class="list--nav__item">
<a class="action" href="#openassessment__response">Your response to this problem</a> <a class="action" href="#openassessment__response">Your response to this problem</a>
</li> </li>
{% for eval in rubric_evals %} {% for assessment in rubric_assessments %}
<li class="list--nav__item"> <li class="list--nav__item">
<a class="action" href="#openassessment__{{ eval.type }}">{{ eval.navigation_text }}</a> <a class="action" href="#openassessment__{{ assessment.type }}">{{ assessment.navigation_text }}</a>
</li> </li>
{% endfor %} {% endfor %}
<li class="list--nav__item"> <li class="list--nav__item">
...@@ -73,8 +73,8 @@ ...@@ -73,8 +73,8 @@
<!-- STEP: response --> <!-- STEP: response -->
{% include "static/html/oa_response.html" %} {% include "static/html/oa_response.html" %}
{% for eval in rubric_evals %} {% for assessment in rubric_assessments %}
{% include eval.url %} {% include assessment.path %}
{% endfor %} {% endfor %}
</ol> </ol>
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
<!-- CASE: default/not started --> <!-- CASE: default/not started -->
<li id="openassessment__peer-evaluation" class="openassessment__steps__step step--peer-evaluation"> <li id="openassessment__peer-assessment" class="openassessment__steps__step step--peer-assessment">
{#<header class="step__header">#} {#<header class="step__header">#}
<h2 class="step__title"> <h2 class="step__title">
<span class="step__label">Evaluate Peers' Responses</span> <span class="step__label">Evaluate Peers' Responses</span>
...@@ -32,34 +32,34 @@ ...@@ -32,34 +32,34 @@
</div> </div>
<div class="step__content"> <div class="step__content">
<ul class="list--peer-evaluations"> <ul class="list--peer-assessments">
<li class="list--peer-evaluations__item"> <li class="list--peer-assessments__item">
<article class="peer-evaluation" id="peer-evaluation--001"> <article class="peer-assessment" id="peer-assessment--001">
<header class="peer-evaluation__header"> <header class="peer-assessment__header">
<h3 class="peer-evaluation__title">Evaluation # <h3 class="peer-assessment__title">Assessment #
<span class="peer-evaluation__number--current">1</span> of <span class="peer-assessment__number--current">1</span> of
<span class="peer-evaluation__number--required">3</span> <span class="peer-assessment__number--required">3</span>
</h3> </h3>
<span class="peer-evaluation__expected-time"> <span class="peer-assessment__expected-time">
<span class="label">Expected Time Spent:</span> <span class="label">Expected Time Spent:</span>
<span class="value">20 Minutes</span> <span class="value">20 Minutes</span>
</span> </span>
</header> </header>
<!-- ?: markup validating/copy cleaning upon submission --> <!-- ?: markup validating/copy cleaning upon submission -->
<div class="peer-evaluation__response"> <div class="peer-assessment__response">
{{ peer_submission.answer }} {{ peer_submission.answer }}
</div> </div>
<form id="peer-evaluation--001__evaluation" class="peer-evaluation__evaluation" method="post"> <form id="peer-assessment--001__assessment" class="peer-assessment__assessment" method="post">
<fieldset class="evaluation__fields"> <fieldset class="assessment__fields">
<legend class="evaluation__instruction">{{ rubric_instructions }}</legend> <legend class="assessment__instruction">{{ rubric_instructions }}</legend>
<ol class="list list--fields evaluation__rubric"> <ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %} {% for criterion in rubric_criteria %}
<!-- individual rubric question (radio-based choice) --> <!-- individual rubric question (radio-based choice) -->
<li class="field field--radio is--required evaluation__rubric__question" id="evaluation__rubric__question--{{ criterion.name }}"> <li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title"> <h4 class="question__title">
{{ criterion.instructions }} {{ criterion.instructions }}
<span class="label--required">* <span class="sr">(Required)</span></span> <span class="label--required">* <span class="sr">(Required)</span></span>
...@@ -69,8 +69,8 @@ ...@@ -69,8 +69,8 @@
{% for value, text in criterion.options %} {% for value, text in criterion.options %}
<li class="answer"> <li class="answer">
<div class="wrapper--input"> <div class="wrapper--input">
<input type="radio" name="evaluation__rubric__question--{{ criterion.name }}" id="evaluation__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" /> <input type="radio" name="assessment__rubric__question--{{ criterion.name }}" id="assessment__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="evaluation__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label> <label for="assessment__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
</div> </div>
<span class="answer__tip">TODO: Criterion Instructions</span> <span class="answer__tip">TODO: Criterion Instructions</span>
</li> </li>
...@@ -80,16 +80,16 @@ ...@@ -80,16 +80,16 @@
{% endfor %} {% endfor %}
<!-- individual rubric question (text) --> <!-- individual rubric question (text) -->
<li class="field field--textarea evaluation__rubric__question" id="evaluation__rubric__question--004"> <li class="field field--textarea assessment__rubric__question" id="assessment__rubric__question--004">
<label for="evaluation__rubric__question--004__value">Please provide any other feedback you have around this response</label> <label for="assessment__rubric__question--004__value">Please provide any other feedback you have around this response</label>
<textarea id="evaluation__rubric__question--004__value" placeholder="I felt this response was..."></textarea> <textarea id="assessment__rubric__question--004__value" placeholder="I felt this response was..."></textarea>
</li> </li>
</ol> </ol>
</fieldset> </fieldset>
<ul class="list list--actions"> <ul class="list list--actions">
<li class="list--actions__item"> <li class="list--actions__item">
<button type="submit" id="peer-evaluation--001__evaluation__submit" class="action action--submit">Submit your evaluation &amp; move to response #2</button> <button type="submit" id="peer-assessment--001__assessment__submit" class="action action--submit">Submit your assessment &amp; move to response #2</button>
</li> </li>
</ul> </ul>
</form> </form>
......
...@@ -4,14 +4,14 @@ ...@@ -4,14 +4,14 @@
<!-- NOTES: <!-- NOTES:
* class of is--unavailable is added when step is not available * class of is--unavailable is added when step is not available
* each .self-evaluation item needs a unique id attribute formatted as #self-evaluation--### * each .self-assessment item needs a unique id attribute formatted as #self-assessment--###
* individual rubric questions' answers need specific id attributes in several places * individual rubric questions' answers need specific id attributes in several places
--> -->
<!-- CASE: default/not started --> <!-- CASE: default/not started -->
<li id="openassessment__self-evaluation" class="openassessment__steps__step step--self-evaluation"> <li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment">
{# <header class="step__header">#} {# <header class="step__header">#}
<h2 class="step__title"> <h2 class="step__title">
<span class="step__title__label">Evaluate Your Response</span> <span class="step__title__label">Evaluate Your Response</span>
...@@ -25,24 +25,24 @@ ...@@ -25,24 +25,24 @@
{# </header>#} {# </header>#}
<div class="step--content"> <div class="step--content">
<article class="self-evaluation" id="self-evaluation"> <article class="self-assessment" id="self-assessment">
<header class="self-evaluation__header"> <header class="self-assessment__header">
<h3 class="self-evaluation__title">Your Submitted Response</h3> <h3 class="self-assessment__title">Your Submitted Response</h3>
</header> </header>
<!-- ?: markup validating/copy cleaning upon submission --> <!-- ?: markup validating/copy cleaning upon submission -->
<div class="self-evaluation__response"> <div class="self-assessment__response">
{{ self_submission.answer }} {{ self_submission.answer }}
</div> </div>
<form id="self-evaluation--001__evaluation" class="self-evaluation__evaluation" method="post"> <form id="self-assessment--001__assessment" class="self-assessment__assessment" method="post">
<fieldset class="evaluation__fields"> <fieldset class="assessment__fields">
<legend class="evaluation__instruction">{{ rubric_instructions }}</legend> <legend class="assessment__instruction">{{ rubric_instructions }}</legend>
<ol class="list list--fields evaluation__rubric"> <ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %} {% for criterion in rubric_criteria %}
<!-- individual rubric question (radio-based choice) --> <!-- individual rubric question (radio-based choice) -->
<li class="field field--radio is--required evaluation__rubric__question" id="evaluation__rubric__question--{{ criterion.name }}"> <li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title"> <h4 class="question__title">
{{ criterion.instructions }} {{ criterion.instructions }}
<span class="label--required">* <span class="sr">(Required)</span></span> <span class="label--required">* <span class="sr">(Required)</span></span>
...@@ -52,8 +52,8 @@ ...@@ -52,8 +52,8 @@
{% for value, text in criterion.options %} {% for value, text in criterion.options %}
<li class="answer"> <li class="answer">
<div class="wrapper--input"> <div class="wrapper--input">
<input type="radio" name="evaluation__rubric__question--{{ criterion.name }}" id="evaluation__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" /> <input type="radio" name="assessment__rubric__question--{{ criterion.name }}" id="assessment__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="evaluation__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label> <label for="assessment__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
</div> </div>
<span class="answer__tip">TODO: Criterion Instructions</span> <span class="answer__tip">TODO: Criterion Instructions</span>
</li> </li>
...@@ -63,16 +63,16 @@ ...@@ -63,16 +63,16 @@
{% endfor %} {% endfor %}
<!-- individual rubric question (text) --> <!-- individual rubric question (text) -->
<li class="field field--textarea evaluation__rubric__question" id="evaluation__rubric__question--004"> <li class="field field--textarea assessment__rubric__question" id="assessment__rubric__question--004">
<label for="evaluation__rubric__question--004__value">Please provide any other feedback you have around this response</label> <label for="assessment__rubric__question--004__value">Please provide any other feedback you have around this response</label>
<textarea id="evaluation__rubric__question--004__value" placeholder="I felt this response was..."></textarea> <textarea id="assessment__rubric__question--004__value" placeholder="I felt this response was..."></textarea>
</li> </li>
</ol> </ol>
</fieldset> </fieldset>
<ul class="list list--actions"> <ul class="list list--actions">
<li class="list--actions__item"> <li class="list--actions__item">
<button type="submit" id="self-evaluation--001__evaluation__submit" class="action action--submit">Submit your evaluation</button> <button type="submit" id="self-assessment--001__assessment__submit" class="action action--submit">Submit your assessment</button>
</li> </li>
</ul> </ul>
</form> </form>
......
...@@ -8,8 +8,8 @@ function OpenAssessmentBlock(runtime, element) { ...@@ -8,8 +8,8 @@ function OpenAssessmentBlock(runtime, element) {
/* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */ /* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */
function prepare_assessment_post(element) { function prepare_assessment_post(element) {
selector = $("input[type=radio]:checked", element); var selector = $("input[type=radio]:checked", element);
values = []; var values = [];
for (i=0; i<selector.length; i++) { for (i=0; i<selector.length; i++) {
values[i] = selector[i].value; values[i] = selector[i].value;
} }
...@@ -17,8 +17,8 @@ function OpenAssessmentBlock(runtime, element) { ...@@ -17,8 +17,8 @@ function OpenAssessmentBlock(runtime, element) {
} }
function displayStatus(result) { function displayStatus(result) {
status = result[0] var status = result[0];
error_msg = result[1] var error_msg = result[1];
if (status) { if (status) {
$('.openassessment_response_status_block', element).html(success_msg.concat(click_msg)); $('.openassessment_response_status_block', element).html(success_msg.concat(click_msg));
} else { } else {
......
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<vertical_demo>
<openassessment start="2013-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<title>
Censorship in Public Libraries
</title>
<prompt>
What do you think about censorship in libraries? I think it's pretty great.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">The Bible</option>
<option val="1">Earnest Hemingway</option>
<option val="3">Matsuo Basho</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">Eric</option>
<option val="1">John</option>
<option val="2">Ian</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">IRC</option>
<option val="1">Real Email</option>
<option val="2">Old-timey letters</option>
</criterion>
</rubric>
<assessments>
<self-assessment name="self-assessment" />
<peer-assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</assessments>
</openassessment>
</vertical_demo>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<vertical_demo>
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<title>
Global Poverty
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty?
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">(0) Neal Stephenson (late)
<explain>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'.
</explain>
</option>
<option val="1">(1) HP Lovecraft
<explain>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explain>
</option>
<option val="3">(3) Robert Heinlein
<explain>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explain>
</option>
<option val="4">(4) Neal Stephenson (early)
<explain>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explain>
</option>
<option val="5">(5) Earnest Hemingway
<explain>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
</explain>
</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">(0) Yogi Berra</option>
<option val="1">(1) Hunter S. Thompson</option>
<option val="2">(2) Robert Heinlein</option>
<option val="3">(3) Isaac Asimov</option>
<option val="10">(10) Spock
<explain>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
to show that explained and unexplained items can be mixed.
</explain>
</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">(0) lolcats</option>
<option val="1">(1) Facebook</option>
<option val="2">(2) Reddit</option>
<option val="3">(3) metafilter</option>
<option val="4">(4) Usenet, 1996</option>
<option val="5">(5) The Elements of Style</option>
</criterion>
</rubric>
<assessments>
<peer-assessment start="2014-12-20T19:00-7:00"
name="peer-assessment"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<self-assessment name="self-assessment" />
</assessments>
</openassessment>
</vertical_demo>
\ No newline at end of file
__author__ = 'stephensanchez'
...@@ -46,13 +46,14 @@ RUBRIC_CONFIG = """ ...@@ -46,13 +46,14 @@ RUBRIC_CONFIG = """
<option val="5">The Elements of Style</option> <option val="5">The Elements of Style</option>
</criterion> </criterion>
</rubric> </rubric>
<evals> <assessments>
<peer-evaluation start="2014-12-20T19:00-7:00" <peer-assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00" due="2014-12-21T22:22-7:00"
must_grade="5" must_grade="5"
must_be_graded_by="3" /> must_be_graded_by="3" />
<self-evaluation/> <self-assessment/>
</evals> </assessments>
</openassessment> </openassessment>
""" """
......
...@@ -61,34 +61,34 @@ class TestScenarioParser(TestCase): ...@@ -61,34 +61,34 @@ class TestScenarioParser(TestCase):
self.assertEqual(int(criterion_option_value), 99) self.assertEqual(int(criterion_option_value), 99)
self.assertEqual(criterion_explanation, criterion_option_explain_text) self.assertEqual(criterion_explanation, criterion_option_explain_text)
def test_get_evals(self): def test_get_assessments(self):
"""Given an <evals> list, return a list of evaluations.""" """Given an <assessments> list, return a list of assessment modules."""
evals = """<evals> assessments = """<assessments>
<selfeval name='0382e03c808e4f2bb12dfdd2d45d5c4b' <self-assessment name='0382e03c808e4f2bb12dfdd2d45d5c4b'
must_grade="999" must_grade="999"
must_be_graded_by="73" /> must_be_graded_by="73" />
<peereval start="2014-12-20T19:00-7:00" <peer-assessment start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00" due="2014-12-21T22:22-7:00"
must_grade="5" must_grade="5"
must_be_graded_by="3" /> must_be_graded_by="3" />
<selfeval /> <self-assessment />
</evals>""" </assessments>"""
evals_xml = etree.fromstring(evals) assessments_xml = etree.fromstring(assessments)
parsed_list = self.test_parser.get_evals(evals_xml) parsed_list = self.test_parser.get_assessments(assessments_xml)
# Self evaluations take all the parameters, but mostly ignore them. # Self assessments take all the parameters, but mostly ignore them.
self.assertEqual(parsed_list[0]['type'], 'selfeval') self.assertEqual(parsed_list[0].assessment_type, 'self-assessment')
self.assertEqual(parsed_list[0]['name'], '0382e03c808e4f2bb12dfdd2d45d5c4b') self.assertEqual(parsed_list[0].name, '0382e03c808e4f2bb12dfdd2d45d5c4b')
self.assertEqual(parsed_list[0]['must_grade'], 1) self.assertEqual(parsed_list[0].must_grade, 1)
self.assertEqual(parsed_list[0]['must_be_graded_by'], 0) self.assertEqual(parsed_list[0].must_be_graded_by, 0)
# Peer evaluations are more interesting # Peer assessments are more interesting
self.assertEqual(parsed_list[1]['type'], 'peereval') self.assertEqual(parsed_list[1].assessment_type, 'peer-assessment')
self.assertEqual(parsed_list[1]['name'], '') self.assertEqual(parsed_list[1].name, '')
self.assertEqual(parsed_list[1]['must_grade'], 5) self.assertEqual(parsed_list[1].must_grade, 5)
self.assertEqual(parsed_list[1]['must_be_graded_by'], 3) self.assertEqual(parsed_list[1].must_be_graded_by, 3)
# We can parse arbitrary workflow descriptions as a list of evaluations. # We can parse arbitrary workflow descriptions as a list of assessments.
# Whether or not the workflow system can use them is another matter # Whether or not the workflow system can use them is another matter
self.assertEqual(parsed_list[2]['type'], 'selfeval') self.assertEqual(parsed_list[2].assessment_type, 'self-assessment')
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment