Commit b491ad87 by Stephen Sanchez

Merge pull request #38 from edx/sanchez/template-first-crack

WIP: UI Architecture for Module UI Components
parents 4053e448 6ac00039
"""Public interface managing the workflow for peer assessments.
The Peer Evaluation Workflow API exposes all public actions required to complete
The Peer Assessment Workflow API exposes all public actions required to complete
the workflow for a given submission.
"""
......@@ -11,7 +11,7 @@ from django.db import DatabaseError
import math
from openassessment.peer.models import PeerEvaluation
from openassessment.peer.serializers import PeerEvaluationSerializer
from openassessment.peer.serializers import PeerAssessmentSerializer
from submissions import api as submission_api
from submissions.models import Submission, StudentItem, Score
from submissions.serializers import SubmissionSerializer, StudentItemSerializer
......@@ -21,17 +21,17 @@ logger = logging.getLogger(__name__)
PEER_TYPE = "PE"
class PeerEvaluationError(Exception):
"""Generic Peer Evaluation Error
class PeerAssessmentError(Exception):
"""Generic Peer Assessment Error
Raised when an error occurs while processing a request related to the
Peer Evaluation Workflow.
Peer Assessment Workflow.
"""
pass
class PeerEvaluationRequestError(PeerEvaluationError):
class PeerAssessmentRequestError(PeerAssessmentError):
"""Error indicating insufficient or incorrect parameters in the request.
Raised when the request does not contain enough information, or incorrect
......@@ -44,7 +44,7 @@ class PeerEvaluationRequestError(PeerEvaluationError):
self.field_errors = copy.deepcopy(field_errors)
class PeerEvaluationWorkflowError(PeerEvaluationError):
class PeerAssessmentWorkflowError(PeerAssessmentError):
"""Error indicating a step in the workflow cannot be completed,
Raised when the action taken cannot be completed in the workflow. This can
......@@ -54,7 +54,7 @@ class PeerEvaluationWorkflowError(PeerEvaluationError):
pass
class PeerEvaluationInternalError(PeerEvaluationError):
class PeerAssessmentInternalError(PeerAssessmentError):
"""Error indicating an internal problem independent of API use.
Raised when an internal error has occurred. This should be independent of
......@@ -64,16 +64,16 @@ class PeerEvaluationInternalError(PeerEvaluationError):
pass
def create_evaluation(
def create_assessment(
submission_uuid,
scorer_id,
required_evaluations_for_student,
required_evaluations_for_submission,
required_assessments_for_student,
required_assessments_for_submission,
assessment_dict,
scored_at=None):
"""Creates an evaluation on the given submission.
"""Creates an assessment on the given submission.
Evaluations are created based on feedback associated with a particular
Assessments are created based on feedback associated with a particular
rubric.
Args:
......@@ -82,27 +82,27 @@ def create_evaluation(
Submission model.
scorer_id (str): The user ID for the user giving this assessment. This
is required to create an assessment on a submission.
required_evaluations_for_student (int): The number of evaluations
required_assessments_for_student (int): The number of assessments
required for the student to receive a score for their submission.
required_evaluations_for_submission (int): The number of evaluations
required_assessments_for_submission (int): The number of assessments
required on the submission for it to be scored.
assessment_dict (dict): All related information for the assessment. An
assessment contains points_earned, points_possible, and feedback.
scored_at (datetime): Optional argument to override the time in which
the evaluation took place. If not specified, scored_at is set to
the assessment took place. If not specified, scored_at is set to
now.
Returns:
dict: The dictionary representing the evaluation. This includes the
dict: The dictionary representing the assessment. This includes the
points earned, points possible, time scored, scorer id, score type,
and feedback.
Raises:
PeerEvaluationRequestError: Raised when the submission_id is invalid, or
PeerAssessmentRequestError: Raised when the submission_id is invalid, or
the assessment_dict does not contain the required values to create
an assessment.
PeerEvaluationInternalError: Raised when there is an internal error
while creating a new evaluation.
PeerAssessmentInternalError: Raised when there is an internal error
while creating a new assessment.
Examples:
>>> assessment_dict = dict(
......@@ -110,7 +110,7 @@ def create_evaluation(
>>> points_possible=12,
>>> feedback="Your submission was thrilling.",
>>> )
>>> create_evaluation("1", "Tim", assessment_dict)
>>> create_assessment("1", "Tim", assessment_dict)
{
'points_earned': 6,
'points_possible': 12,
......@@ -122,7 +122,7 @@ def create_evaluation(
"""
try:
submission = Submission.objects.get(uuid=submission_uuid)
peer_evaluation = {
peer_assessment = {
"scorer_id": scorer_id,
"submission": submission.pk,
"points_earned": sum(assessment_dict["points_earned"]),
......@@ -131,11 +131,11 @@ def create_evaluation(
"feedback": assessment_dict["feedback"],
}
if scored_at:
peer_evaluation["scored_at"] = scored_at
peer_assessment["scored_at"] = scored_at
peer_serializer = PeerEvaluationSerializer(data=peer_evaluation)
peer_serializer = PeerAssessmentSerializer(data=peer_assessment)
if not peer_serializer.is_valid():
raise PeerEvaluationRequestError(peer_serializer.errors)
raise PeerAssessmentRequestError(peer_serializer.errors)
peer_serializer.save()
# Check if the submission is finished and its Author has graded enough.
......@@ -143,11 +143,11 @@ def create_evaluation(
_score_if_finished(
student_item,
submission,
required_evaluations_for_student,
required_evaluations_for_submission
required_assessments_for_student,
required_assessments_for_submission
)
# Check if the grader is finished and has enough evaluations
# Check if the grader is finished and has enough assessments
scorer_item = StudentItem.objects.get(
student_id=scorer_id,
item_id=student_item.item_id,
......@@ -162,28 +162,28 @@ def create_evaluation(
_score_if_finished(
scorer_item,
scorer_submissions[0],
required_evaluations_for_student,
required_evaluations_for_submission
required_assessments_for_student,
required_assessments_for_submission
)
return peer_serializer.data
except DatabaseError:
error_message = u"An error occurred while creating evaluation {} for submission: {} by: {}".format(
error_message = u"An error occurred while creating assessment {} for submission: {} by: {}".format(
assessment_dict,
submission_uuid,
scorer_id
)
logger.exception(error_message)
raise PeerEvaluationInternalError(error_message)
raise PeerAssessmentInternalError(error_message)
def _score_if_finished(student_item,
submission,
required_evaluations_for_student,
required_evaluations_for_submission):
required_assessments_for_student,
required_assessments_for_submission):
"""Calculate final grade iff peer evaluation flow is satisfied.
Checks if the student is finished with the peer evaluation workflow. If the
Checks if the student is finished with the peer assessment workflow. If the
student already has a final grade calculated, there is no need to proceed.
If they do not have a grade, the student has a final grade calculated.
......@@ -193,19 +193,19 @@ def _score_if_finished(student_item,
finished_evaluating = has_finished_required_evaluating(
student_item.student_id,
required_evaluations_for_student
required_assessments_for_student
)
evaluations = PeerEvaluation.objects.filter(submission=submission)
submission_finished = evaluations.count() >= required_evaluations_for_submission
assessments = PeerEvaluation.objects.filter(submission=submission)
submission_finished = assessments.count() >= required_assessments_for_submission
scores = []
for evaluation in evaluations:
scores.append(evaluation.points_earned)
for assessment in assessments:
scores.append(assessment.points_earned)
if finished_evaluating and submission_finished:
submission_api.set_score(
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
_calculate_final_score(scores),
evaluations[0].points_possible
assessments[0].points_possible
)
......@@ -228,7 +228,7 @@ def _calculate_final_score(scores):
return int(math.ceil(sum(scores[median-1:median+1])/float(2)))
def has_finished_required_evaluating(student_id, required_evaluations):
def has_finished_required_evaluating(student_id, required_assessments):
"""Check if a student still needs to evaluate more submissions
Per the contract of the peer assessment workflow, a student must evaluate a
......@@ -237,7 +237,7 @@ def has_finished_required_evaluating(student_id, required_evaluations):
Args:
student_id (str): The student in the peer grading workflow to check for
peer workflow criteria. This argument is required.
required_evaluations (int): The number of evaluations a student has to
required_assessments (int): The number of assessments a student has to
submit before receiving the feedback on their submission. This is a
required argument.
......@@ -247,9 +247,9 @@ def has_finished_required_evaluating(student_id, required_evaluations):
evaluate more peer submissions.
Raises:
PeerEvaluationRequestError: Raised when the student_id is invalid, or
the required_evaluations is not a positive integer.
PeerEvaluationInternalError: Raised when there is an internal error
PeerAssessmentRequestError: Raised when the student_id is invalid, or
the required_assessments is not a positive integer.
PeerAssessmentInternalError: Raised when there is an internal error
while evaluating this workflow rule.
Examples:
......@@ -257,37 +257,37 @@ def has_finished_required_evaluating(student_id, required_evaluations):
True
"""
if required_evaluations < 0:
raise PeerEvaluationRequestError(
"Required Evaluation count must be a positive integer.")
if required_assessments < 0:
raise PeerAssessmentRequestError(
"Required Assessment count must be a positive integer.")
return PeerEvaluation.objects.filter(
scorer_id=student_id
).count() >= required_evaluations
).count() >= required_assessments
def get_evaluations(submission_id):
"""Retrieve the evaluations for a submission.
def get_assessments(submission_id):
"""Retrieve the assessments for a submission.
Retrieves all the evaluations for a submissions. This API returns related
Retrieves all the assessments for a submissions. This API returns related
feedback without making any assumptions about grading. Any outstanding
evaluations associated with this submission will not be returned.
assessments associated with this submission will not be returned.
Args:
submission_id (str): The submission all the requested evaluations are
submission_id (str): The submission all the requested assessments are
associated with. Required.
Returns:
list(dict): A list of dictionaries, where each dictionary represents a
separate evaluation. Each evaluation contains points earned, points
separate assessment. Each assessment contains points earned, points
possible, time scored, scorer id, score type, and feedback.
Raises:
PeerEvaluationRequestError: Raised when the submission_id is invalid.
PeerEvaluationInternalError: Raised when there is an internal error
while retrieving the evaluations associated with this submission.
PeerAssessmentRequestError: Raised when the submission_id is invalid.
PeerAssessmentInternalError: Raised when there is an internal error
while retrieving the assessments associated with this submission.
Examples:
>>> get_evaluations("1")
>>> get_assessments("1")
[
{
'points_earned': 6,
......@@ -308,45 +308,45 @@ def get_evaluations(submission_id):
"""
try:
submission = Submission.objects.get(uuid=submission_id)
evaluations = PeerEvaluation.objects.filter(submission=submission)
serializer = PeerEvaluationSerializer(evaluations, many=True)
assessments = PeerEvaluation.objects.filter(submission=submission)
serializer = PeerAssessmentSerializer(assessments, many=True)
return serializer.data
except DatabaseError:
error_message = (
u"Error getting evaluations for submission {}".format(submission_id)
u"Error getting assessments for submission {}".format(submission_id)
)
logger.exception(error_message)
raise PeerEvaluationInternalError(error_message)
raise PeerAssessmentInternalError(error_message)
def get_submission_to_evaluate(student_item_dict, required_num_evaluations):
def get_submission_to_assess(student_item_dict, required_num_assessments):
"""Get a submission to peer evaluate.
Retrieves a submission for evaluation for the given student_item. This will
Retrieves a submission for assessment for the given student_item. This will
not return a submission submitted by the requesting scorer. The submission
returned (TODO: will be) is based on a priority queue. Submissions with the
fewest evaluations and the most active students will be prioritized over
submissions from students who are not as active in the evaluation process.
fewest assessments and the most active students will be prioritized over
submissions from students who are not as active in the assessment process.
Args:
student_item_dict (dict): The student item information from the student
requesting a submission for evaluation. The dict contains an
requesting a submission for assessment. The dict contains an
item_id, course_id, and item_type, used to identify the unique
question for the review, while the student_id is used to explicitly
avoid giving the student their own submission.
required_num_evaluations (int): The number of evaluations a submission
requires before it has completed the peer evaluation process.
required_num_assessments (int): The number of assessments a submission
requires before it has completed the peer assessment process.
Returns:
dict: A peer submission for evaluation. This contains a 'student_item',
dict: A peer submission for assessment. This contains a 'student_item',
'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
used for evaluation.
used for assessment.
Raises:
PeerEvaluationRequestError: Raised when the request parameters are
PeerAssessmentRequestError: Raised when the request parameters are
invalid for the request.
PeerEvaluationInternalError:
PeerEvaluationWorkflowError:
PeerAssessmentInternalError:
PeerAssessmentWorkflowError:
Examples:
>>> student_item_dict = dict(
......@@ -355,7 +355,7 @@ def get_submission_to_evaluate(student_item_dict, required_num_evaluations):
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> get_submission_to_evaluate(student_item_dict, 3)
>>> get_submission_to_assess(student_item_dict, 3)
{
'student_item': 2,
'attempt_number': 1,
......@@ -375,26 +375,26 @@ def get_submission_to_evaluate(student_item_dict, required_num_evaluations):
submission = _get_first_submission_not_evaluated(
student_items,
student_item_dict["student_id"],
required_num_evaluations
required_num_assessments
)
if not submission:
raise PeerEvaluationWorkflowError(
"There are no submissions available for evaluation."
raise PeerAssessmentWorkflowError(
"There are no submissions available for assessment."
)
return SubmissionSerializer(submission).data
def _get_first_submission_not_evaluated(student_items, student_id, required_num_evaluations):
def _get_first_submission_not_evaluated(student_items, student_id, required_num_assessments):
# TODO: We need a priority queue.
submissions = Submission.objects.filter(student_item__in=student_items).order_by(
"submitted_at",
"-attempt_number"
)
for submission in submissions:
evaluations = PeerEvaluation.objects.filter(submission=submission)
if evaluations.count() < required_num_evaluations:
assessments = PeerEvaluation.objects.filter(submission=submission)
if assessments.count() < required_num_assessments:
already_evaluated = False
for evaluation in evaluations:
already_evaluated = already_evaluated or evaluation.scorer_id == student_id
for assessment in assessments:
already_evaluated = already_evaluated or assessment.scorer_id == student_id
if not already_evaluated:
return submission
......@@ -6,7 +6,7 @@ from rest_framework import serializers
from openassessment.peer.models import PeerEvaluation
class PeerEvaluationSerializer(serializers.ModelSerializer):
class PeerAssessmentSerializer(serializers.ModelSerializer):
class Meta:
model = PeerEvaluation
fields = (
......
......@@ -32,7 +32,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
class TestApi(TestCase):
def test_create_evaluation(self):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
evaluation = api.create_evaluation(
evaluation = api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -44,21 +44,21 @@ class TestApi(TestCase):
@file_data('test_valid_evaluations.json')
def test_get_evaluations(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
REQUIRED_GRADED_BY,
assessment_dict
)
evaluations = api.get_evaluations(submission["uuid"])
evaluations = api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict)
@file_data('test_valid_evaluations.json')
def test_get_evaluations_with_date(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -66,7 +66,7 @@ class TestApi(TestCase):
assessment_dict,
MONDAY
)
evaluations = api.get_evaluations(submission["uuid"])
evaluations = api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict)
self.assertEqual(evaluations[0]["scored_at"], MONDAY)
......@@ -85,22 +85,22 @@ class TestApi(TestCase):
self.assertFalse(scores)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
api.create_assessment(
bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
api.create_assessment(
sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
api.create_assessment(
jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
api.create_assessment(
buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation(
api.create_assessment(
xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
self.assertTrue(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
......@@ -110,13 +110,13 @@ class TestApi(TestCase):
scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores)
api.create_evaluation(
api.create_assessment(
tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
api.create_assessment(
tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
api.create_evaluation(
api.create_assessment(
tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
)
......@@ -127,7 +127,7 @@ class TestApi(TestCase):
self.assertEqual(12, scores[0]["points_possible"])
@raises(api.PeerEvaluationRequestError)
@raises(api.PeerAssessmentRequestError)
def test_bad_configuration(self):
api.has_finished_required_evaluating("Tim", -1)
......@@ -139,27 +139,27 @@ class TestApi(TestCase):
)
self._create_student_and_submission("Jim", "Jim's answer", THURSDAY)
submission = api.get_submission_to_evaluate(STUDENT_ITEM, 3)
submission = api.get_submission_to_assess(STUDENT_ITEM, 3)
self.assertIsNotNone(submission)
self.assertEqual(submission["answer"], u"Bob's answer")
self.assertEqual(submission["student_item"], 2)
self.assertEqual(submission["attempt_number"], 1)
@raises(api.PeerEvaluationWorkflowError)
@raises(api.PeerAssessmentWorkflowError)
def test_no_submissions_to_evaluate_for_tim(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
api.get_submission_to_evaluate(STUDENT_ITEM, 3)
api.get_submission_to_assess(STUDENT_ITEM, 3)
"""
Some Error Checking Tests against DB failures.
"""
@patch.object(Submission.objects, 'get')
@raises(api.PeerEvaluationInternalError)
@raises(api.PeerAssessmentInternalError)
def test_error_on_evaluation_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -172,7 +172,7 @@ class TestApi(TestCase):
@raises(sub_api.SubmissionInternalError)
def test_error_on_get_evaluation(self, mock_filter):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation(
api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
REQUIRED_GRADED,
......@@ -181,7 +181,7 @@ class TestApi(TestCase):
MONDAY
)
mock_filter.side_effect = DatabaseError("Bad things happened")
api.get_evaluations(submission["uuid"])
api.get_assessments(submission["uuid"])
def test_choose_score(self):
self.assertEqual(0, api._calculate_final_score([]))
......
......@@ -2,7 +2,7 @@ import logging
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from openassessment.peer.api import get_evaluations
from openassessment.peer.api import get_assessments
from submissions.api import SubmissionRequestError, get_submissions
log = logging.getLogger(__name__)
......@@ -38,7 +38,7 @@ def get_evaluations_for_student_item(request, course_id, student_id, item_id):
submissions = get_submissions(student_item_dict)
evaluations = []
for submission in submissions:
submission_evaluations = get_evaluations(submission["uuid"])
submission_evaluations = get_assessments(submission["uuid"])
for evaluation in submission_evaluations:
evaluation["submission_uuid"] = submission["uuid"]
evaluations.append(evaluation)
......
"""An XBlock where students can read a question and compose their response"""
import datetime
import datetime
import pkg_resources
from mako.template import Template
from django.template.context import Context
from django.template.loader import get_template
from webob import Response
from xblock.core import XBlock
from xblock.fields import List, Scope, String
from xblock.fragment import Fragment
from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin
from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin
from openassessment.xblock.submission_mixin import SubmissionMixin
from openassessment.xblock.ui_models import PeerAssessmentUIModel
from submissions import api as submissions_api
from openassessment.peer import api as peer_api
from scenario_parser import ScenarioParser
mako_default_filters = ['unicode', 'h', 'trim']
DEFAULT_PROMPT = """
Censorship in the Libraries
......@@ -40,20 +41,31 @@ DEFAULT_RUBRIC_CRITERIA = [
'instructions': "Determine if there is a unifying theme or main idea.",
'total_value': 5,
'options': [
(0, "Poor", "Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.",),
(3, "Fair", "Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.",),
(5, "Good", "Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.",),
(0, "Poor", """Difficult for the reader to discern the main idea.
Too brief or too repetitive to establish or maintain a focus.""",),
(3, "Fair", """Presents a unifying theme or main idea, but may
include minor tangents. Stays somewhat focused on topic and
task.""",),
(5, "Good", """Presents a unifying theme or main idea without going
off on tangents. Stays completely focused on topic and task.""",),
],
},
{
'name': "Content",
'instructions': "Evaluate the content of the submission",
'instructions': "Assess the content of the submission",
'total_value': 5,
'options': [
(0, "Poor", "Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.",),
(1, "Fair", "Includes little information and few or no details. Explores only one or two facets of the topic.",),
(3, "Good", "Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.",),
(5, "Excellent", "Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.",),
(0, "Poor", """Includes little information with few or no details or
unrelated details. Unsuccessful in attempts to explore any
facets of the topic.""",),
(1, "Fair", """Includes little information and few or no details.
Explores only one or two facets of the topic.""",),
(3, "Good", """Includes sufficient information and supporting
details. (Details may not be fully developed; ideas may be
listed.) Explores some facets of the topic.""",),
(5, "Excellent", """Includes in-depth information and exceptional
supporting details that are fully developed. Explores all
facets of the topic.""",),
],
},
{
......@@ -61,9 +73,13 @@ DEFAULT_RUBRIC_CRITERIA = [
'instructions': "Determine if the submission is well organized.",
'total_value': 2,
'options': [
(0, "Poor", "Ideas organized illogically, transitions weak, and response difficult to follow.",),
(1, "Fair", "Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.",),
(2, "Good", "Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.",),
(0, "Poor", """Ideas organized illogically, transitions weak, and
response difficult to follow.""",),
(1, "Fair", """Attempts to logically organize ideas. Attempts to
progress in an order that enhances meaning, and demonstrates use
of transitions.""",),
(2, "Good", """Ideas organized logically. Progresses in an order
that enhances meaning. Includes smooth transitions.""",),
],
},
{
......@@ -71,9 +87,15 @@ DEFAULT_RUBRIC_CRITERIA = [
'instructions': "Read for style.",
'total_value': 2,
'options': [
(0, "Poor", "Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.",),
(1, "Fair", "Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).",),
(2, "Good", "Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.",),
(0, "Poor", """Contains limited vocabulary, with many words used
incorrectly. Demonstrates problems with sentence patterns.""",),
(1, "Fair", """Contains basic vocabulary, with words that are
predictable and common. Contains mostly simple sentences
(although there may be an attempt at more varied sentence
patterns).""",),
(2, "Good", """Includes vocabulary to make explanations detailed and
precise. Includes varied sentence patterns, including complex
sentences.""",),
],
},
{
......@@ -81,187 +103,101 @@ DEFAULT_RUBRIC_CRITERIA = [
'instructions': "Read for style.",
'total_value': 2,
'options': [
(0, "Poor", "Demonstrates language and tone that may be inappropriate to task and reader.",),
(1, "Fair", "Demonstrates an attempt to adjust language and tone to task and reader.",),
(2, "Good", "Demonstrates effective adjustment of language and tone to task and reader.",),
(0, "Poor", """Demonstrates language and tone that may be
inappropriate to task and reader.""",),
(1, "Fair", """Demonstrates an attempt to adjust language and tone
to task and reader.""",),
(2, "Good", """Demonstrates effective adjustment of language and
tone to task and reader.""",),
],
}
]
DEFAULT_EVAL_MODULES = [
{
'type': "peereval",
'name': "peereval",
'start_datetime': datetime.datetime.now,
'due_datetime': None,
'must_grade': 5,
'must_be_graded_by': 3,
},
DEFAULT_PEER_ASSESSMENT = PeerAssessmentUIModel()
DEFAULT_PEER_ASSESSMENT.name = "peer-assessment"
DEFAULT_PEER_ASSESSMENT.start_datetime = datetime.datetime.now().isoformat()
DEFAULT_PEER_ASSESSMENT.must_grade = 5
DEFAULT_PEER_ASSESSMENT.must_be_graded_by = 3
DEFAULT_ASSESSMENT_MODULES = [
DEFAULT_PEER_ASSESSMENT,
]
EXAMPLE_POVERTY_RUBRIC = (
"OpenAssessmentBlock Poverty Rubric",
"""
<vertical_demo>
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">(0) Neal Stephenson (late)
<explain>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'.
</explain>
</option>
<option val="1">(1) HP Lovecraft
<explain>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explain>
</option>
<option val="3">(3) Robert Heinlein
<explain>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explain>
</option>
<option val="4">(4) Neal Stephenson (early)
<explain>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explain>
</option>
<option val="5">(5) Earnest Hemingway
<explain>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
</explain>
</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">(0) Yogi Berra</option>
<option val="1">(1) Hunter S. Thompson</option>
<option val="2">(2) Robert Heinlein</option>
<option val="3">(3) Isaac Asimov</option>
<option val="10">(10) Spock
<explain>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
to show that explained and unexplained items can be mixed.
</explain>
</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">(0) lolcats</option>
<option val="1">(1) Facebook</option>
<option val="2">(2) Reddit</option>
<option val="3">(3) metafilter</option>
<option val="4">(4) Usenet, 1996</option>
<option val="5">(5) The Elements of Style</option>
</criterion>
</rubric>
<evals>
<peereval start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<selfeval/>
</evals>
</openassessment>
</vertical_demo>
"""
)
EXAMPLE_CENSORSHIP_RUBRIC = (
"OpenAssessmentBlock Censorship Rubric",
"""
<vertical_demo>
<openassessment start="2013-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<prompt>
What do you think about censorship in libraries? I think it's pretty great.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">The Bible</option>
<option val="1">Earnest Hemingway</option>
<option val="3">Matsuo Basho</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">Eric</option>
<option val="1">John</option>
<option val="2">Ian</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">IRC</option>
<option val="1">Real Email</option>
<option val="2">Old-timey letters</option>
</criterion>
</rubric>
<evals>
<selfeval/>
<peereval start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</evals>
</openassessment>
</vertical_demo>
"""
)
class OpenAssessmentBlock(XBlock):
"""Displays a question and gives an area where students can compose a response."""
start_datetime = String(default=datetime.datetime.now().isoformat(), scope=Scope.content, help="ISO-8601 formatted string representing the start date of this assignment.")
due_datetime = String(default=None, scope=Scope.content, help="ISO-8601 formatted string representing the end date of this assignment.")
prompt = String( default=DEFAULT_PROMPT, scope=Scope.content, help="A prompt to display to a student (plain text).")
rubric = List( default=[], scope=Scope.content, help="Instructions and criteria for students giving feedback.")
rubric_instructions = String( default=DEFAULT_RUBRIC_INSTRUCTIONS, scope=Scope.content, help="Instructions for self and peer assessment.")
rubric_criteria = List(default=DEFAULT_RUBRIC_CRITERIA, scope=Scope.content, help="The different parts of grading for students giving feedback.")
rubric_evals = List(default=DEFAULT_EVAL_MODULES, scope=Scope.content, help="The requested set of evaluations and the order in which to apply them.")
course_id = String( default=u"TestCourse", scope=Scope.content, help="The course_id associated with this prompt (until we can get it from runtime).",)
submit_errors = { # Reported to user sometimes, and useful in tests
'ENOSUB': 'API submission is unrequested',
'ENODATA': 'API returned an empty response',
'EBADFORM': 'API Submission Request Error',
'EUNKNOWN': 'API returned unclassified exception',
'ENOMULTI': 'Multiple submissions are not allowed for this item',
}
def load(path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def _get_xblock_trace(self):
class OpenAssessmentBlock(XBlock, SubmissionMixin, PeerAssessmentMixin, SelfAssessmentMixin):
"""Displays a question and gives an area where students can compose a response."""
start_datetime = String(
default=datetime.datetime.now().isoformat(),
scope=Scope.content,
help="ISO-8601 formatted string representing the start date of this assignment."
)
due_datetime = String(
default=None,
scope=Scope.content,
help="ISO-8601 formatted string representing the end date of this assignment."
)
title = String(
default="",
scope=Scope.content,
help="A title to display to a student (plain text)."
)
prompt = String(
default=DEFAULT_PROMPT,
scope=Scope.content,
help="A prompt to display to a student (plain text)."
)
rubric = List(
default=[],
scope=Scope.content,
help="Instructions and criteria for students giving feedback."
)
rubric_instructions = String(
default=DEFAULT_RUBRIC_INSTRUCTIONS,
scope=Scope.content,
help="Instructions for self and peer assessment."
)
rubric_criteria = List(
default=DEFAULT_RUBRIC_CRITERIA,
scope=Scope.content,
help="The different parts of grading for students giving feedback."
)
rubric_assessments = List(
default=DEFAULT_ASSESSMENT_MODULES,
scope=Scope.content,
help="The requested set of assessments and the order in which to apply them."
)
course_id = String(
default=u"TestCourse",
scope=Scope.content,
help="The course_id associated with this prompt (until we can get it from runtime).",
)
def get_xblock_trace(self):
"""Uniquely identify this xblock by context.
Every XBlock has a scope_ids, which is a NamedTuple describing
important contextual information. Per @nedbat, the usage_id attribute
uniquely identifies this block in this course, and the user_id uniquely
identifies this student. With the two of them, we can trace all the
interactions emenating from this interaction.
interactions emanating from this interaction.
Useful for logging, debugging, and uniqueification."""
return (self.scope_ids.usage_id, self.scope_ids.user_id)
return self.scope_ids.usage_id, self.scope_ids.user_id
def _get_student_item_dict(self):
def get_student_item_dict(self):
"""Create a student_item_dict from our surrounding context.
See also: submissions.api for details.
"""
item_id, student_id = self._get_xblock_trace()
item_id, student_id = self.get_xblock_trace()
student_item_dict = dict(
student_id=student_id,
item_id=item_id,
......@@ -271,183 +207,45 @@ class OpenAssessmentBlock(XBlock):
return student_item_dict
def student_view(self, context=None):
"""The main view of OpenAssessmentBlock, displayed when viewing courses."""
# XXX: For the moment, the initializations and if/else tree below
# embody a rough-and-ready workflow, which will be replaced in the
# beatific future of a proper workflow module. Probably. HACK
trace = self._get_xblock_trace()
student_item_dict = self._get_student_item_dict()
# This user's most recent previous submission
user_submission = self._get_user_submission(student_item_dict)
# This score for this user's user_submission
user_score = self._get_submission_score(student_item_dict, user_submission)
peer_eval = self._hack_get_eval() # HACK: Replace with proper workflow.
peer_submission = self._get_peer_submission(student_item_dict, peer_eval)
if user_submission and peer_submission:
# We've submitted, but not finished assessing. Do assessments.
return self.show_assessment_html(peer_submission, trace)
elif user_submission:
# We've submitted, but there's no assesing to do yet.
return self.show_check_back_html(user_score)
else:
# We haven't submitted, so do that first.
# XXX: In future, we'll support multiple submission and this will be wrong
return self.show_submission_html(trace)
@staticmethod
def _get_user_submission(student_item_dict):
"""Return the most recent submission, if any, by user in student_item_dict"""
submissions = []
try:
submissions = submissions_api.get_submissions(student_item_dict)
except submissions_api.SubmissionRequestError:
# This error is actually ok.
pass
return submissions[0] if submissions else None
@staticmethod
def _get_submission_score(student_item_dict, submission=False):
"""Return the most recent score, if any, for student item"""
scores = False
if submission:
scores = submissions_api.get_score(student_item_dict)
return scores[0] if scores else None
"""The main view of OpenAssessmentBlock, displayed when viewing courses.
"""
@staticmethod
def _get_peer_submission(student_item_dict, peer_eval):
"""Return a peer submission, if any, for user to assess"""
peer_submission = None
try:
peer_submission = peer_api.get_submission_to_evaluate(student_item_dict, peer_eval["must_be_graded_by"])
except peer_api.PeerEvaluationWorkflowError:
# Additional HACK: Without proper workflow, there may not be the
# correct information to complete the request for a peer submission.
# This error should be handled properly once we have a workflow API.
pass
return peer_submission
def show_check_back_html(self, user_score=None):
"""Return HTML saying no peer work to assess, check back later."""
# This looks awful on purpose; XXX: should fix as shiny lands
html = ""
if user_score:
html = u"<div>You've received the following score: %s/%s.</div" % (user_score['points_earned'],
user_score['points_possible'])
html += u"<div>There are no submissions to review. Check back soon.</div>"
return Fragment(html)
def show_assessment_html(self, peer_submission, user_score=None, trace=None):
"""Return HTML for rubric display and assessment solicitation."""
# Submits to assess handler
load = self._load
html = Template(load("static/html/oa_rubric.html"),
default_filters=mako_default_filters,
input_encoding='utf-8',
)
frag = Fragment(html.render_unicode(
xblock_trace=trace,
peer_submission=peer_submission,
rubric_instructions=self.rubric_instructions,
rubric_criteria=self.rubric_criteria,
user_score=user_score
))
frag.add_css(load("static/css/openassessment.css"))
frag.add_javascript(load("static/js/src/oa_assessment.js"))
frag.initialize_js('OpenAssessmentBlock')
return frag
trace = self.get_xblock_trace()
student_item_dict = self.get_student_item_dict()
grade_state = self.get_grade_state()
# All data we intend to pass to the front end.
context_dict = {
"xblock_trace": trace,
"title": self.title,
"question": self.prompt,
"rubric_instructions": self.rubric_instructions,
"rubric_criteria": self.rubric_criteria,
"rubric_assessments": [assessment.create_ui_model() for assessment in self.rubric_assessments],
"grade_state": grade_state,
}
def show_submission_html(self, trace=None):
"""Return HTML for the page prompting the user and soliciting submissions."""
# Submits to submission handler
load = self._load
html = Template(load("static/html/oa_submission.html"),
default_filters=mako_default_filters,
input_encoding='utf-8',
)
frag = Fragment(html.render_unicode(xblock_trace=trace, question=self.prompt))
template = get_template("static/html/oa_base.html")
context = Context(context_dict)
frag = Fragment(template.render(context))
frag.add_css(load("static/css/openassessment.css"))
frag.add_javascript(load("static/js/src/oa_submission.js"))
frag.add_javascript(load("static/js/src/oa_base.js"))
frag.initialize_js('OpenAssessmentBlock')
return frag
@staticmethod
def _load(path):
"""Help get resources from our package kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def _hack_get_eval(self):
# HACK: Forcing Peer Eval, we'll get the Eval config.
# TODO: When this is smarter, remove 'hack' from name
for next_eval in self.rubric_evals:
if next_eval["type"] == "peereval":
return next_eval
@XBlock.json_handler
def assess(self, data, suffix=''):
# HACK: Replace with proper workflow.
peer_eval = self._hack_get_eval()
"""Place an assessment into Openassessment system"""
student_item_dict = self._get_student_item_dict()
assessment_dict = {
"points_earned": map(int, data["points_earned"]),
"points_possible": sum(c['total_value'] for c in self.rubric_criteria),
"feedback": "Not yet implemented.",
}
evaluation = peer_api.create_evaluation(
data["submission_uuid"],
student_item_dict["student_id"],
int(peer_eval["must_grade"]),
int(peer_eval["must_be_graded_by"]),
assessment_dict
)
# Temp kludge until we fix JSON serialization for datetime
evaluation["scored_at"] = str(evaluation["scored_at"])
return evaluation, "Success"
@XBlock.json_handler
def submit(self, data, suffix=''):
"""
Place the submission text into Openassessment system
"""
status = False
status_tag = 'ENOSUB'
status_text = None
student_sub = data['submission']
student_item_dict = self._get_student_item_dict()
prev_sub = self._get_user_submission(student_item_dict)
if prev_sub:
# It is an error to submit multiple times for the same item
status_tag = 'ENOMULTI'
else:
status_tag = 'ENODATA'
try:
response = submissions_api.create_submission(student_item_dict, student_sub)
except submissions_api.SubmissionRequestError, e:
status_tag = 'EBADFORM'
status_text = unicode(e.field_errors)
except submissions_api.SubmissionError:
status_tag = 'EUNKNOWN'
else:
status = True
status_tag = response.get('student_item')
status_text = response.get('attempt_number')
# relies on success being orthogonal to errors
status_text = status_text if status_text else self.submit_errors[status_tag]
return (status, status_tag, status_text)
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [EXAMPLE_POVERTY_RUBRIC, EXAMPLE_CENSORSHIP_RUBRIC,]
return [
(
"OpenAssessmentBlock Poverty Rubric",
load('static/xml/poverty_rubric_example.xml')
),
(
"OpenAssessmentBlock Censorship Rubric",
load('static/xml/censorship_rubric_example.xml')
),
]
@staticmethod
def studio_view(context=None):
......@@ -460,6 +258,36 @@ class OpenAssessmentBlock(XBlock):
"""Recursively embed xblocks for nodes we don't recognize"""
block.runtime.add_node_as_child(block, child, id_generator)
block = runtime.construct_xblock_from_class(cls, keys)
sparser = ScenarioParser(block, node, unknown_handler)
block = sparser.parse()
return block
def get_grade_state(self):
# TODO: Determine if we want to build out grade state right now.
grade_state = {
"style_class": "is--incomplete",
"value": "Incomplete",
"title": "Your Grade:",
"message": "You have not started this problem",
}
return grade_state
def render_assessment(self, path, context_dict=None):
"""Render an Assessment Module's HTML
Given the name of an assessment module, find it in the list of
configured modules, and ask for its rendered HTML.
"""
if not context_dict:
context_dict = {}
context_dict["xblock_trace"] = self.get_xblock_trace()
context_dict["rubric_instructions"] = self.rubric_instructions
context_dict["rubric_criteria"] = self.rubric_criteria
template = get_template(path)
context = Context(context_dict)
return Response(template.render(context), content_type='application/html', charset='UTF-8')
from xblock.core import XBlock
from openassessment.peer import api as peer_api
from openassessment.peer.api import PeerAssessmentWorkflowError
class PeerAssessmentMixin(object):
@XBlock.json_handler
def assess(self, data, suffix=''):
"""Place an assessment into OpenAssessment system
"""
assessment = self.get_assessment_module('peer-assessment')
if assessment:
assessment_dict = {
"points_earned": map(int, data["points_earned"]),
"points_possible": sum(c['total_value'] for c in self.rubric_criteria),
"feedback": "Not yet implemented.",
}
assessment = peer_api.create_assessment(
data["submission_uuid"],
self.get_student_item_dict()["student_id"],
int(assessment.must_grade),
int(assessment.must_be_graded_by),
assessment_dict
)
# Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"])
return assessment, "Success"
@XBlock.handler
def render_peer_assessment(self, data, suffix=''):
assessment = self.get_assessment_module('peer-assessment')
if assessment:
peer_sub = self.get_peer_submission(self.get_student_item_dict(), assessment)
context_dict = {"peer_submission": peer_sub}
return self.render_assessment('static/html/oa_peer_assessment.html', context_dict)
def get_peer_submission(self, student_item_dict, assessment):
peer_submission = False
try:
peer_submission = peer_api.get_submission_to_assess(
student_item_dict, assessment.must_be_graded_by
)
peer_submission = peer_api.get_submission_to_assess(
student_item_dict,
assessment.must_be_graded_by
)
except PeerAssessmentWorkflowError:
# TODO: Log?
pass
return peer_submission
def get_assessment_module(self, mixin_name):
"""Get a configured assessment module by name.
"""
for assessment in self.rubric_assessments:
if assessment.name == mixin_name:
return assessment
# -*- coding: utf-8 -*-
"""XBlock scenario parsing routines"""
from openassessment.xblock.ui_models import PeerAssessmentUIModel, SelfAssessmentUIModel, SubmissionUIModel
class ScenarioParser(object):
"""Utility class to capture parsing of xml from runtime scenarios."""
......@@ -22,6 +24,11 @@ class ScenarioParser(object):
"""<prompt>This tells you what you should write about. There should be only one prompt.</prompt>"""
return e.text.strip()
def get_title(self, e):
"""<title>The title of this block</title>
"""
return e.text.strip()
def get_rubric(self, e):
"""<rubric>
This text is general instructions relating to this rubric.
......@@ -57,25 +64,35 @@ class ScenarioParser(object):
rubric_criteria.append(crit)
return (e.text.strip(), rubric_criteria)
def get_evals(self, evaluations):
"""<evals>
def get_assessments(self, assessments):
"""<assessments>
<!-- There can be multiple types of assessments given in any
arbitrary order, like this self assessment followed by a
peer assessment -->
<self />
<peereval start="2014-12-20T19:00-7:00"
<self-assessment />
<peer-assessment start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</evals>"""
return [{'type': ev.tag,
'name': ev.attrib.get('name', ''),
'start_datetime': ev.attrib.get('start', None),
'due_datetime': ev.attrib.get('due', None),
# These attrs are accepted for self, ai evals, but ignored:
'must_grade': int(ev.attrib.get('must_grade', 1) if ev.tag == 'peereval' else 1),
'must_be_graded_by': int(ev.attrib.get('must_be_graded_by', 0) if ev.tag == 'peereval' else 0),
} for ev in evaluations]
</peer-assessment>"""
assessment_list = [SubmissionUIModel()]
for asmnt in assessments:
assessment = None
assessment_type = asmnt.tag
if 'peer-assessment' == assessment_type:
assessment = PeerAssessmentUIModel()
assessment.must_grade = int(asmnt.attrib.get('must_grade', 1))
assessment.must_be_graded_by = int(asmnt.attrib.get('must_be_graded_by', 0))
elif 'self-assessment' == assessment_type:
assessment = SelfAssessmentUIModel()
if assessment:
assessment.name = asmnt.attrib.get('name', '')
assessment.start_datetime = asmnt.attrib.get('start', None)
assessment.due_datetime = asmnt.attrib.get('due', None)
assessment_list.append(assessment)
return assessment_list
def parse(self):
"""Instantiate xblock object from runtime XML definition."""
......@@ -85,9 +102,10 @@ class ScenarioParser(object):
elif child.tag == 'rubric':
(self.xblock.rubric_instructions,
self.xblock.rubric_criteria) = self.get_rubric(child)
elif child.tag == 'evals':
self.xblock.rubric_evals = self.get_evals(child)
elif child.tag == 'title':
self.xblock.title = self.get_title(child)
elif child.tag == 'assessments':
self.xblock.rubric_assessments = self.get_assessments(child)
else:
self.unknown_handler(self.xblock, child)
return self.xblock
from xblock.core import XBlock
class SelfAssessmentMixin(object):
@XBlock.handler
def render_self_assessment(self, data, suffix=''):
return self.render_assessment('static/html/oa_self_assessment.html')
<!-- TEMPLATE: status messages -->
<!-- NOTES:
* class of message--warning is added message is a warning/notification to user
* class of message--confirmation is added message is a confirmation to user
* class of has--actions is added when a message has actions associated with it for user to interact with
-->
<div class="wrapper wrapper--openassessment theme--basic">
<div class="openassessment" id="openassessment">
<h1 class="openassessment__title">
<span class="openassessment__title--super">
{{ title }}
</span>
<span class="openassessment__title--sub">
<span class="problem-type problem-type--open-ended-response">Open Ended Response</span>
{% for assessment in rubric_assessments %}
+
<span class="problem-type problem-type--{{ assessment.type }}">{{ assessment.name }}</span>
{% endfor %}
</span>
</h1>
<!--?: may have trouble with aync -->
<nav class="nav--contents">
<h2 class="title">Skip to a part of this problem:</h2>
<ol class="list list--nav">
{% for assessment in rubric_assessments %}
<li class="list--nav__item">
<a class="action" href="#openassessment__{{ assessment.type }}">{{ assessment.navigation_text }}</a>
</li>
{% endfor %}
<li class="list--nav__item">
<a class="action" href="#openassessment__grade">Your grade for this problem</a>
</li>
</ol>
</nav>
<!-- STATUS: system messages -->
<!-- SEE t-messages.html for all cases -->
<!-- question -->
<div class="wrapper--openassessment__prompt">
<article class="openassessment__prompt ui-toggle-visibility">
<h2 class="openassessment__prompt__title">Open Assessment Problem</h2>
<div class="openassessment__prompt__copy ui-toggle-visibility__content">
<p>{{ question }}</p>
</div>
<ul class="list list--controls">
<li class="list--controls__item">
<a href="" class="action action--toggle ui-toggle-visibility__control">Collapse/Expand This</a>
</li>
</ul>
</article>
</div>
<!-- steps -->
<ol class="openassessment__steps" id="openassessment__steps">
<!-- STEP: response -->
{% for assessment in rubric_assessments %}
<li id="{{ assessment.name }}">{{ assessment.title }}</li>
{% endfor %}
</ol>
<!-- STATUS: problem grade -->
{% include "static/html/oa_grade.html" %}
</div>
</div>
<!-- TEMPLATE: grade status -->
<!-- NOTES:
* class of is--incomplete is added when the problem is in an incomplete state
* class of is--complete is added when problem is successfully completed by user
* class of has--grade is added when a grade (failing or passing) is calculated for user
* classes of needs--* are added when user action is needed
* classes of awaiting--* are added when a user's flow is dependent upon system readiness
-->
<!-- CASE: default/unstarted -->
<div id="openassessment__grade" class="openassessment__grade {{ grade_state.style_class }}">
<h2 class="openassessment__grade__title">{{ grade_state.title }}</h2>
<div class="openassessment__grade__content">
<span class="grade__value">{{ grade_state.value }}</span>
<p>{{ grade_state.message }}</p>
</div>
</div>
<!-- TEMPLATE: peer evaluation -->
<!-- NOTES:
* class of is--unavailable is added when step is not available
* each .peer-evaluation item needs a unique id attribute formatted as #peer-evaluation--###
* individual rubric questions' answers need specific id attributes in several places
-->
<!-- CASE: default/not started -->
<li id="openassessment__peer-assessment" class="openassessment__steps__step step--peer-assessment">
<div id="peer_submission_uuid" hidden="true">{{ peer_submission.uuid }}</div>
<h2 class="step__title">
<span class="step__label">Evaluate Peers' Responses</span>
<span class="step__deadline">due <span class="date">January 30, 2014</span> at <span class="time">15:00 UTC</span></span>
</h2>
<span class="step__status">
<span class="step__status__label">This step's status:</span>
<span class="step__status__value">
<span class="step__status__value--completed">0</span> of
<span class="step__status__value--required">5</span> completed
</span>
</span>
{#</header>#}
<div class="step__instruction">
<p>Please read and evaluate the following response from one of your peers in the course.</p>
</div>
<div class="step__content">
<ul class="list--peer-assessments">
<li class="list--peer-assessments__item">
<article class="peer-assessment" id="peer-assessment--001">
<header class="peer-assessment__header">
<h3 class="peer-assessment__title">Assessment #
<span class="peer-assessment__number--current">1</span> of
<span class="peer-assessment__number--required">3</span>
</h3>
<span class="peer-assessment__expected-time">
<span class="label">Expected Time Spent:</span>
<span class="value">20 Minutes</span>
</span>
</header>
<!-- ?: markup validating/copy cleaning upon submission -->
<div class="peer-assessment__response">
{{ peer_submission.answer }}
</div>
<form id="peer-assessment--001__assessment" class="peer-assessment__assessment" method="post">
<fieldset class="assessment__fields">
<legend class="assessment__instruction">{{ rubric_instructions }}</legend>
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<!-- individual rubric question (radio-based choice) -->
<li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title">
{{ criterion.instructions }}
<span class="label--required">* <span class="sr">(Required)</span></span>
</h4>
<ol class="question__answers">
{% for value, text in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio" name="assessment__rubric__question--{{ criterion.name }}" id="assessment__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="assessment__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
</div>
<span class="answer__tip">TODO: Criterion Instructions</span>
</li>
{% endfor %}
</ol>
</li>
{% endfor %}
<!-- individual rubric question (text) -->
<li class="field field--textarea assessment__rubric__question" id="assessment__rubric__question--004">
<label for="assessment__rubric__question--004__value">Please provide any other feedback you have around this response</label>
<textarea id="assessment__rubric__question--004__value" placeholder="I felt this response was..."></textarea>
</li>
</ol>
</fieldset>
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="peer-assessment--001__assessment__submit" class="action action--submit">Submit your assessment &amp; move to response #2</button>
</li>
</ul>
</form>
</article>
</li>
</ul>
</div>
</li>
<!-- TEMPLATE: student response -->
<!-- NOTES:
* class of is--unavailable is added when step is not available
* class of is--saved is added when response is saved by system
* class of is--submitted is added when user has formally submitted response
* class of is--graded is added when user's response has proper amount of completed peer evaluations
* class of is--updated is added to the step__status element when a response is saved
-->
<!-- CASE: default/unanswered -->
<li id="openassessment__response" class="openassessment__steps__step step--response ui-toggle-visibility">
<!--header class="step__header ui-toggle-visibility__control"-->
<h2 class="step__title">
<span class="step__label">Your Response</span>
<span class="step__deadline">due <span class="date">January 24, 2014</span> at <span class="time">15:00 UTC</span></span>
</h2>
<span class="step__status">
<span class="step__status__label">This step's status:</span>
<span class="step__status__value">Incomplete</span>
</span>
<!--/header-->
<div class="step__instruction">
<p>Please provide your response to the following question. You may save your progress and return to complete your response anytime before the due date of <span class="step__deadline">due <span class="date">January 24, 2014</span></span>. <strong class="emphasis--beta">Once you submit, you may not edit your response</strong>.</p>
</div>
<div class="step__content">
<form id="response__submission" class="response__submission">
<ol class="list list--fields">
<li class="field field--textarea submission__answer" id="submission__answer">
<label for="submission__answer__value">Please provide your response to the above question</label>
<textarea id="submission__answer__value" placeholder=""></textarea>
</li>
</ol>
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="submission__submit" class="action action--submit submission__submit">Save Your Progress</button>
<span class="tip">you may continue to work on your response until you submit</span>
</li>
</ul>
</form>
</div>
<div class="step__actions">
<ul class="list list--actions">
<li class="list--actions__item">
<a aria-role="button" href="#" id="step--response__submit" class="action action--submit step--response__submit">Submit your response &amp; move forward</a>
</li>
</ul>
</div>
</li>
<!-- START OpenAssessmentBlock HTML -->
% if user_score:
<div>You've received the following score: ${user_score['points_earned']}/${user_score['points_possible']}.</div>
% endif
<div class="openassessment_block" id="openassessment_block_${xblock_trace[0]}">
<div id="peer_submission_uuid" hidden="true">${peer_submission["uuid"]}</div>
<p>${peer_submission["answer"]}</p>
<p class="openassessment_prompt"
id="openassessment_rubric_instructions_${xblock_trace[0]}">${rubric_instructions}</p>
% for crit in rubric_criteria:
<div>
<p class="openassessment_prompt">${crit["instructions"]}</p>
% for o in crit['options']:
<div>
<input name="${crit['name']}" type="radio" value="${o[0]}">${o[1]}: ${o[2]}</input>
</div>
% endfor
</div>
% endfor
<input type="button"
class="openassessment_submit" id="openassessment_submit_${xblock_trace[0]}" value="Submit" />
</div>
<div class="openassessment_response_status_block" id="openassessment_response_status_block_${xblock_trace[0]}">
This message should be invisible; please upgrade your browser.
</div>
<!-- END OpenAssessmentBlock HTML -->
<!-- TEMPLATE: self evaluation -->
<!-- NOTES:
* class of is--unavailable is added when step is not available
* each .self-assessment item needs a unique id attribute formatted as #self-assessment--###
* individual rubric questions' answers need specific id attributes in several places
-->
<!-- CASE: default/not started -->
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment">
{# <header class="step__header">#}
<h2 class="step__title">
<span class="step__title__label">Evaluate Your Response</span>
<span class="step__title__deadline">due <span class="date">January 31, 2014</span> at <span class="time">15:00 UTC</span></span>
</h2>
<span class="step__status">
<span class="step__status__label">This step's status:</span>
<span class="step_status_value">Incomplete</span>
</span>
{# </header>#}
<div class="step--content">
<article class="self-assessment" id="self-assessment">
<header class="self-assessment__header">
<h3 class="self-assessment__title">Your Submitted Response</h3>
</header>
<!-- ?: markup validating/copy cleaning upon submission -->
<div class="self-assessment__response">
{{ self_submission.answer }}
</div>
<form id="self-assessment--001__assessment" class="self-assessment__assessment" method="post">
<fieldset class="assessment__fields">
<legend class="assessment__instruction">{{ rubric_instructions }}</legend>
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<!-- individual rubric question (radio-based choice) -->
<li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title">
{{ criterion.instructions }}
<span class="label--required">* <span class="sr">(Required)</span></span>
</h4>
<ol class="question__answers">
{% for value, text in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio" name="assessment__rubric__question--{{ criterion.name }}" id="assessment__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="assessment__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
</div>
<span class="answer__tip">TODO: Criterion Instructions</span>
</li>
{% endfor %}
</ol>
</li>
{% endfor %}
<!-- individual rubric question (text) -->
<li class="field field--textarea assessment__rubric__question" id="assessment__rubric__question--004">
<label for="assessment__rubric__question--004__value">Please provide any other feedback you have around this response</label>
<textarea id="assessment__rubric__question--004__value" placeholder="I felt this response was..."></textarea>
</li>
</ol>
</fieldset>
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="self-assessment--001__assessment__submit" class="action action--submit">Submit your assessment</button>
</li>
</ul>
</form>
</article>
</li>
</ul>
</div>
</li>
<!-- START OpenAssessmentBlock HTML -->
<div class="openassessment_block" id="openassessment_block_${xblock_trace[0]}">
<p class="openassessment_prompt" id="openassessment_question_${xblock_trace[0]}">${question}</p>
<textarea class="openassessment_submission" id="openassessment_submission_${xblock_trace[0]}">Compose your response here</textarea>
<input type="button" class="openassessment_submit" id="openassessment_submit_${xblock_trace[0]}" value="Submit" />
</div>
<div class="openassessment_response_status_block" id=openassessment_response_status_block_${xblock_trace[0]}">
This message should be invisible; please upgrade your browser.
</div>
<!-- END OpenAssessmentBlock HTML -->
......@@ -8,8 +8,8 @@ function OpenAssessmentBlock(runtime, element) {
/* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */
function prepare_assessment_post(element) {
selector = $("input[type=radio]:checked", element);
values = [];
var selector = $("input[type=radio]:checked", element);
var values = [];
for (i=0; i<selector.length; i++) {
values[i] = selector[i].value;
}
......@@ -17,8 +17,8 @@ function OpenAssessmentBlock(runtime, element) {
}
function displayStatus(result) {
status = result[0]
error_msg = result[1]
var status = result[0];
var error_msg = result[1];
if (status) {
$('.openassessment_response_status_block', element).html(success_msg.concat(click_msg));
} else {
......
/* START Javascript for OpenAssessmentXBlock. */
function OpenAssessmentBlock(runtime, element) {
var handlerUrl = runtime.handlerUrl(element, 'submit');
var renderSubmissionUrl = runtime.handlerUrl(element, 'render_submission');
var renderPeerUrl = runtime.handlerUrl(element, 'render_peer_assessment');
var renderSelfUrl = runtime.handlerUrl(element, 'render_self_assessment');
/* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */
/*
* Submission Functions
*/
function render_submissions(data) {
$('#submission', element).replaceWith(data);
$('#step--response__submit', element).click(function(eventObject) {
$.ajax({
type: "POST",
url: handlerUrl,
data: JSON.stringify({"submission": $('#submission__answer__value', element).val()}),
success: function(data) {
$.ajax({
type: "POST",
url: renderPeerUrl,
success: function(data) {
render_peer_assessment(data);
}
});
$.ajax({
type: "POST",
url: renderSubmissionUrl,
success: function(data) {
render_submissions(data);
}
});
}
});
});
}
/*
* Peer Assessment Functions
*/
function render_peer_assessment(data) {
$('#peer-assessment', element).replaceWith(data);
function prepare_assessment_post(element) {
var selector = $("input[type=radio]:checked", element);
var values = [];
for (var i=0; i<selector.length; i++) {
values[i] = selector[i].value;
}
return {"submission_uuid":$("div#peer_submission_uuid")[0].innerText, "points_earned":values};
}
$('#peer-assessment--001__assessment__submit', element).click(function(eventObject) {
$.ajax({
type: "POST",
url: handlerUrl,
/* data: JSON.stringify({"submission": $('.openassessment_submission', element).val()}), */
data: JSON.stringify(prepare_assessment_post(element)),
success: function(data) {
$.ajax({
type: "POST",
url: renderSelfUrl,
success: function(data) {
$('#self-assessment', element).replaceWith(data);
}
});
$.ajax({
type: "POST",
url: renderPeerUrl,
success: function(data) {
render_peer_assessment(data)
}
});
}
});
});
}
$(function ($) {
/* Here's where you'd do things on page load. */
$.ajax({
type: "POST",
url: renderSubmissionUrl,
success: function(data) {
render_submissions(data);
}
});
});
}
/* END Javascript for OpenAssessmentXBlock. */
/* START Javascript for OpenassessmentComposeXBlock. */
function OpenAssessmentBlock(runtime, element) {
var handlerUrl = runtime.handlerUrl(element, 'submit');
var success_msg = '<p class="success">Your submission has been received, thank you!</p>';
var failure_msg = '<p class="failure">An error occurred with your submission</p>';
var click_msg = '<p class="clickhere">(click here to dismiss this message)</p>';
/* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */
function displayStatus(result) {
status = result[0];
error_msg = result[2];
if (status === 'true') {
$('.openassessment_response_status_block', element).html(success_msg.concat(click_msg));
} else {
$('.openassessment_response_status_block', element).html(failure_msg.concat(error_msg).concat(click_msg));
}
$('.openassessment_response_status_block', element).css('display', 'block');
}
$('.openassessment_response_status_block', element).click(function(eventObject) {
$('.openassessment_response_status_block', element).css('display', 'none');
});
$('.openassessment_submit', element).click(function(eventObject) {
$.ajax({
type: "POST",
url: handlerUrl,
data: JSON.stringify({"submission": $('.openassessment_submission', element).val()}),
success: displayStatus
});
});
$(function ($) {
/* Here's where you'd do things on page load. */
$(element).css('background-color', 'LightBlue')
});
}
/* END Javascript for OpenassessmentComposeXBlock. */
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<vertical_demo>
<openassessment start="2013-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<title>
Censorship in Public Libraries
</title>
<prompt>
What do you think about censorship in libraries? I think it's pretty great.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">The Bible</option>
<option val="1">Earnest Hemingway</option>
<option val="3">Matsuo Basho</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">Eric</option>
<option val="1">John</option>
<option val="2">Ian</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">IRC</option>
<option val="1">Real Email</option>
<option val="2">Old-timey letters</option>
</criterion>
</rubric>
<assessments>
<self-assessment name="self-assessment" />
<peer-assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</assessments>
</openassessment>
</vertical_demo>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<vertical_demo>
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<title>
Global Poverty
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty?
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">(0) Neal Stephenson (late)
<explain>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'.
</explain>
</option>
<option val="1">(1) HP Lovecraft
<explain>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explain>
</option>
<option val="3">(3) Robert Heinlein
<explain>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explain>
</option>
<option val="4">(4) Neal Stephenson (early)
<explain>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explain>
</option>
<option val="5">(5) Earnest Hemingway
<explain>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
</explain>
</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">(0) Yogi Berra</option>
<option val="1">(1) Hunter S. Thompson</option>
<option val="2">(2) Robert Heinlein</option>
<option val="3">(3) Isaac Asimov</option>
<option val="10">(10) Spock
<explain>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
to show that explained and unexplained items can be mixed.
</explain>
</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">(0) lolcats</option>
<option val="1">(1) Facebook</option>
<option val="2">(2) Reddit</option>
<option val="3">(3) metafilter</option>
<option val="4">(4) Usenet, 1996</option>
<option val="5">(5) The Elements of Style</option>
</criterion>
</rubric>
<assessments>
<peer-assessment start="2014-12-20T19:00-7:00"
name="peer-assessment"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<self-assessment name="self-assessment" />
</assessments>
</openassessment>
</vertical_demo>
\ No newline at end of file
from xblock.core import XBlock
from submissions import api
class SubmissionMixin(object):
submit_errors = {
# Reported to user sometimes, and useful in tests
'ENOSUB': 'API submission is unrequested',
'ENODATA': 'API returned an empty response',
'EBADFORM': 'API Submission Request Error',
'EUNKNOWN': 'API returned unclassified exception',
'ENOMULTI': 'Multiple submissions are not allowed for this item',
}
@XBlock.json_handler
def submit(self, data, suffix=''):
"""
Place the submission text into Openassessment system
"""
status = False
status_tag = 'ENOSUB'
status_text = None
student_sub = data['submission']
student_item_dict = self.get_student_item_dict()
prev_sub = self._get_user_submission(student_item_dict)
if prev_sub:
# It is an error to submit multiple times for the same item
status_tag = 'ENOMULTI'
else:
status_tag = 'ENODATA'
try:
response = api.create_submission(student_item_dict, student_sub)
except api.SubmissionRequestError, e:
status_tag = 'EBADFORM'
status_text = unicode(e.field_errors)
except api.SubmissionError:
status_tag = 'EUNKNOWN'
else:
status = True
status_tag = response.get('student_item')
status_text = response.get('attempt_number')
# relies on success being orthogonal to errors
status_text = status_text if status_text else self.submit_errors[status_tag]
return status, status_tag, status_text
@staticmethod
def _get_submission_score(student_item_dict, submission=False):
"""Return the most recent score, if any, for student item"""
scores = False
if submission:
scores = api.get_score(student_item_dict)
return scores[0] if scores else None
@staticmethod
def _get_user_submission(student_item_dict):
"""Return the most recent submission, if any, by user in student_item_dict"""
submissions = []
try:
submissions = api.get_submissions(student_item_dict)
except api.SubmissionRequestError:
# This error is actually ok.
pass
return submissions[0] if submissions else None
@XBlock.handler
def render_submission(self, data, suffix=''):
return self.render_assessment('static/html/oa_response.html')
......@@ -7,6 +7,7 @@ import webob
from django.test import TestCase
from mock import patch
from openassessment.xblock.submission_mixin import SubmissionMixin
from submissions import api
from submissions.api import SubmissionRequestError, SubmissionInternalError
......@@ -46,13 +47,14 @@ RUBRIC_CONFIG = """
<option val="5">The Elements of Style</option>
</criterion>
</rubric>
<evals>
<peereval start="2014-12-20T19:00-7:00"
<assessments>
<peer-assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<selfeval/>
</evals>
<self-assessment/>
</assessments>
</openassessment>
"""
......@@ -114,7 +116,7 @@ class TestOpenAssessment(TestCase):
result = json.loads(resp.body)
self.assertFalse(result[0])
self.assertEqual(result[1], "EUNKNOWN")
self.assertEqual(result[2], self.assessment.submit_errors["EUNKNOWN"])
self.assertEqual(result[2], SubmissionMixin().submit_errors["EUNKNOWN"])
@patch.object(api, 'create_submission')
def test_submission_API_failure(self, mock_submit):
......
......@@ -61,34 +61,37 @@ class TestScenarioParser(TestCase):
self.assertEqual(int(criterion_option_value), 99)
self.assertEqual(criterion_explanation, criterion_option_explain_text)
def test_get_evals(self):
"""Given an <evals> list, return a list of evaluations."""
evals = """<evals>
<selfeval name='0382e03c808e4f2bb12dfdd2d45d5c4b'
def test_get_assessments(self):
"""Given an <assessments> list, return a list of assessment modules."""
assessments = """<assessments>
<self-assessment name='0382e03c808e4f2bb12dfdd2d45d5c4b'
must_grade="999"
must_be_graded_by="73" />
<peereval start="2014-12-20T19:00-7:00"
<peer-assessment start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<selfeval />
</evals>"""
evals_xml = etree.fromstring(evals)
parsed_list = self.test_parser.get_evals(evals_xml)
# Self evaluations take all the parameters, but mostly ignore them.
self.assertEqual(parsed_list[0]['type'], 'selfeval')
self.assertEqual(parsed_list[0]['name'], '0382e03c808e4f2bb12dfdd2d45d5c4b')
self.assertEqual(parsed_list[0]['must_grade'], 1)
self.assertEqual(parsed_list[0]['must_be_graded_by'], 0)
# Peer evaluations are more interesting
self.assertEqual(parsed_list[1]['type'], 'peereval')
self.assertEqual(parsed_list[1]['name'], '')
self.assertEqual(parsed_list[1]['must_grade'], 5)
self.assertEqual(parsed_list[1]['must_be_graded_by'], 3)
# We can parse arbitrary workflow descriptions as a list of evaluations.
<self-assessment />
</assessments>"""
assessments_xml = etree.fromstring(assessments)
parsed_list = self.test_parser.get_assessments(assessments_xml)
# Need to capture Submissions in Tests
self.assertEqual(parsed_list[0].assessment_type, 'submission')
# Self assessments take all the parameters, but mostly ignore them.
self.assertEqual(parsed_list[1].assessment_type, 'self-assessment')
self.assertEqual(parsed_list[1].name, '0382e03c808e4f2bb12dfdd2d45d5c4b')
self.assertEqual(parsed_list[1].must_grade, 1)
self.assertEqual(parsed_list[1].must_be_graded_by, 0)
# Peer assessments are more interesting
self.assertEqual(parsed_list[2].assessment_type, 'peer-assessment')
self.assertEqual(parsed_list[2].name, '')
self.assertEqual(parsed_list[2].must_grade, 5)
self.assertEqual(parsed_list[2].must_be_graded_by, 3)
# We can parse arbitrary workflow descriptions as a list of assessments.
# Whether or not the workflow system can use them is another matter
self.assertEqual(parsed_list[2]['type'], 'selfeval')
self.assertEqual(parsed_list[3].assessment_type, 'self-assessment')
class SubmissionUIModel(object):
def __init__(self):
self.assessment_type = "submission"
self.name = "submission"
self.navigation_text = "Your response to this problem"
self.title = "Your Response"
def create_ui_model(self):
return {
"assessment_type": self.assessment_type,
"name": self.name,
"navigation_text": self.navigation_text,
"title": self.title
}
class AssessmentUIModel(object):
def __init__(self):
self.assessment_type = None
self.name = ''
self.start_datetime = None
self.due_datetime = None
self.must_grade = 1
self.must_be_graded_by = 0
self.navigation_text = ""
self.title = ""
def create_ui_model(self):
return {
"assessment_type": self.assessment_type,
"name": self.name,
"start_datetime": self.start_datetime,
"due_datetime": self.due_datetime,
"must_grade": self.must_grade,
"must_be_graded_by": self.must_be_graded_by,
"navigation_text": self.navigation_text,
"title": self.title
}
class PeerAssessmentUIModel(AssessmentUIModel):
def __init__(self):
super(PeerAssessmentUIModel, self).__init__()
self.assessment_type = "peer-assessment"
self.title = "Assess Peers' Responses"
self.navigation_text = "Your assessment(s) of peer responses"
class SelfAssessmentUIModel(AssessmentUIModel):
def __init__(self):
super(SelfAssessmentUIModel, self).__init__()
self.assessment_type = "self-assessment"
self.navigation_text = "Your assessment of your response"
self.title = "Assess Your Response"
\ No newline at end of file
......@@ -59,15 +59,15 @@ class SubmissionRequestError(SubmissionError):
def create_submission(student_item_dict, answer, submitted_at=None,
attempt_number=None):
"""Creates a submission for evaluation.
"""Creates a submission for assessment.
Generic means by which to submit an answer for evaluation.
Generic means by which to submit an answer for assessment.
Args:
student_item_dict (dict): The student_item this
submission is associated with. This is used to determine which
course, student, and location this submission belongs to.
answer (str): The answer given by the student to be evaluated.
answer (str): The answer given by the student to be assessed.
submitted_at (datetime): The date in which this submission was submitted.
If not specified, defaults to the current date.
attempt_number (int): A student may be able to submit multiple attempts
......
......@@ -116,6 +116,8 @@ ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
"apps/submissions/templates",
"apps/openassessment/peer/templates",
"apps/openassessment/xblock",
)
INSTALLED_APPS = (
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment