Commit e5d1d4bd by Will Daly

Merge pull request #463 from edx/will/0-option-criterion

Feedback-only criteria
parents 14bc13f3 8b54300f
...@@ -91,8 +91,8 @@ class AssessmentAdmin(admin.ModelAdmin): ...@@ -91,8 +91,8 @@ class AssessmentAdmin(admin.ModelAdmin):
u"{}/{} - {}: {} - {}".format( u"{}/{} - {}: {} - {}".format(
part.points_earned, part.points_earned,
part.points_possible, part.points_possible,
part.option.criterion.name, part.criterion.name,
part.option.name, part.option.name if part.option else "None",
part.feedback, part.feedback,
) )
) )
......
...@@ -14,7 +14,7 @@ from openassessment.assessment.errors import ( ...@@ -14,7 +14,7 @@ from openassessment.assessment.errors import (
) )
from openassessment.assessment.models import ( from openassessment.assessment.models import (
Assessment, AITrainingWorkflow, AIGradingWorkflow, Assessment, AITrainingWorkflow, AIGradingWorkflow,
InvalidOptionSelection, NoTrainingExamples, InvalidRubricSelection, NoTrainingExamples,
AI_ASSESSMENT_TYPE, AIClassifierSet AI_ASSESSMENT_TYPE, AIClassifierSet
) )
from openassessment.assessment.worker import training as training_tasks from openassessment.assessment.worker import training as training_tasks
...@@ -268,7 +268,7 @@ def train_classifiers(rubric_dict, examples, course_id, item_id, algorithm_id): ...@@ -268,7 +268,7 @@ def train_classifiers(rubric_dict, examples, course_id, item_id, algorithm_id):
# Get or create the rubric and training examples # Get or create the rubric and training examples
try: try:
examples = deserialize_training_examples(examples, rubric_dict) examples = deserialize_training_examples(examples, rubric_dict)
except (InvalidRubric, InvalidTrainingExample, InvalidOptionSelection) as ex: except (InvalidRubric, InvalidTrainingExample, InvalidRubricSelection) as ex:
msg = u"Could not parse rubric and/or training examples: {ex}".format(ex=ex) msg = u"Could not parse rubric and/or training examples: {ex}".format(ex=ex)
raise AITrainingRequestError(msg) raise AITrainingRequestError(msg)
......
...@@ -8,7 +8,8 @@ from dogapi import dog_stats_api ...@@ -8,7 +8,8 @@ from dogapi import dog_stats_api
from openassessment.assessment.models import ( from openassessment.assessment.models import (
AITrainingWorkflow, AIGradingWorkflow, AITrainingWorkflow, AIGradingWorkflow,
ClassifierUploadError, ClassifierSerializeError, ClassifierUploadError, ClassifierSerializeError,
IncompleteClassifierSet, NoTrainingExamples IncompleteClassifierSet, NoTrainingExamples,
InvalidRubricSelection
) )
from openassessment.assessment.errors import ( from openassessment.assessment.errors import (
AITrainingRequestError, AITrainingInternalError, AITrainingRequestError, AITrainingInternalError,
...@@ -274,7 +275,7 @@ def create_classifiers(training_workflow_uuid, classifier_set): ...@@ -274,7 +275,7 @@ def create_classifiers(training_workflow_uuid, classifier_set):
except NoTrainingExamples as ex: except NoTrainingExamples as ex:
logger.exception(ex) logger.exception(ex)
raise AITrainingInternalError(ex) raise AITrainingInternalError(ex)
except IncompleteClassifierSet as ex: except (IncompleteClassifierSet, InvalidRubricSelection) as ex:
msg = ( msg = (
u"An error occurred while creating the classifier set " u"An error occurred while creating the classifier set "
u"for the training workflow with UUID {uuid}: {ex}" u"for the training workflow with UUID {uuid}: {ex}"
......
...@@ -11,11 +11,12 @@ from dogapi import dog_stats_api ...@@ -11,11 +11,12 @@ from dogapi import dog_stats_api
from openassessment.assessment.models import ( from openassessment.assessment.models import (
Assessment, AssessmentFeedback, AssessmentPart, Assessment, AssessmentFeedback, AssessmentPart,
InvalidOptionSelection, PeerWorkflow, PeerWorkflowItem, InvalidRubricSelection, PeerWorkflow, PeerWorkflowItem,
) )
from openassessment.assessment.serializers import ( from openassessment.assessment.serializers import (
AssessmentSerializer, AssessmentFeedbackSerializer, RubricSerializer, AssessmentFeedbackSerializer, RubricSerializer,
full_assessment_dict, rubric_from_dict, serialize_assessments, full_assessment_dict, rubric_from_dict, serialize_assessments,
InvalidRubric
) )
from openassessment.assessment.errors import ( from openassessment.assessment.errors import (
PeerAssessmentRequestError, PeerAssessmentWorkflowError, PeerAssessmentInternalError PeerAssessmentRequestError, PeerAssessmentWorkflowError, PeerAssessmentInternalError
...@@ -192,14 +193,15 @@ def get_score(submission_uuid, requirements): ...@@ -192,14 +193,15 @@ def get_score(submission_uuid, requirements):
def create_assessment( def create_assessment(
scorer_submission_uuid, scorer_submission_uuid,
scorer_id, scorer_id,
options_selected, options_selected,
criterion_feedback, criterion_feedback,
overall_feedback, overall_feedback,
rubric_dict, rubric_dict,
num_required_grades, num_required_grades,
scored_at=None): scored_at=None
):
"""Creates an assessment on the given submission. """Creates an assessment on the given submission.
Assessments are created based on feedback associated with a particular Assessments are created based on feedback associated with a particular
...@@ -244,24 +246,9 @@ def create_assessment( ...@@ -244,24 +246,9 @@ def create_assessment(
>>> feedback = "Your submission was thrilling." >>> feedback = "Your submission was thrilling."
>>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict) >>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict)
""" """
# Ensure that this variables is declared so if an error occurs
# we don't get an error when trying to log it!
assessment_dict = None
try: try:
rubric = rubric_from_dict(rubric_dict) # Retrieve workflow information
# Validate that the selected options matched the rubric
# and raise an error if this is not the case
try:
option_ids = rubric.options_ids(options_selected)
except InvalidOptionSelection:
msg = "Selected options do not match the rubric"
logger.warning(msg, exc_info=True)
raise PeerAssessmentRequestError(msg)
scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid) scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
peer_workflow_item = scorer_workflow.get_latest_open_workflow_item() peer_workflow_item = scorer_workflow.get_latest_open_workflow_item()
if peer_workflow_item is None: if peer_workflow_item is None:
message = ( message = (
...@@ -270,55 +257,50 @@ def create_assessment( ...@@ -270,55 +257,50 @@ def create_assessment(
).format(scorer_submission_uuid) ).format(scorer_submission_uuid)
logger.warning(message) logger.warning(message)
raise PeerAssessmentWorkflowError(message) raise PeerAssessmentWorkflowError(message)
peer_submission_uuid = peer_workflow_item.author.submission_uuid peer_submission_uuid = peer_workflow_item.author.submission_uuid
peer_assessment = {
"rubric": rubric.id,
"scorer_id": scorer_id,
"submission_uuid": peer_submission_uuid,
"score_type": PEER_TYPE,
"feedback": overall_feedback[0:Assessment.MAXSIZE],
}
if scored_at is not None:
peer_assessment["scored_at"] = scored_at
peer_serializer = AssessmentSerializer(data=peer_assessment) # Get or create the rubric
rubric = rubric_from_dict(rubric_dict)
if not peer_serializer.is_valid():
msg = (
u"An error occurred while serializing "
u"the peer assessment associated with "
u"the scorer's submission UUID {}."
).format(scorer_submission_uuid)
raise PeerAssessmentRequestError(msg)
assessment = peer_serializer.save() # Create the peer assessment
assessment = Assessment.create(
rubric,
scorer_id,
peer_submission_uuid,
PEER_TYPE,
scored_at=scored_at,
feedback=overall_feedback
)
# We do this to do a run around django-rest-framework serializer # Create assessment parts for each criterion in the rubric
# validation, which would otherwise require two DB queries per # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
# option to do validation. We already validated these options above. AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
AssessmentPart.add_to_assessment(assessment, option_ids, criterion_feedback=criterion_feedback)
# Close the active assessment # Close the active assessment
scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades) scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades)
assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, scorer_workflow) _log_assessment(assessment, scorer_workflow)
return full_assessment_dict(assessment)
return assessment_dict
except DatabaseError:
error_message = (
u"An error occurred while creating assessment {} by: {}"
).format(assessment_dict, scorer_id)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
except PeerWorkflow.DoesNotExist: except PeerWorkflow.DoesNotExist:
message = ( message = (
u"There is no Peer Workflow associated with the given " u"There is no Peer Workflow associated with the given "
u"submission UUID {}." u"submission UUID {}."
).format(scorer_submission_uuid) ).format(scorer_submission_uuid)
logger.error(message) logger.exception(message)
raise PeerAssessmentWorkflowError(message) raise PeerAssessmentWorkflowError(message)
except InvalidRubric:
msg = u"Rubric definition was not valid"
logger.exception(msg)
raise PeerAssessmentRequestError(msg)
except InvalidRubricSelection:
msg = u"Invalid options selected in the rubric"
logger.warning(msg, exc_info=True)
raise PeerAssessmentRequestError(msg)
except DatabaseError:
error_message = (
u"An error occurred while retrieving the peer workflow item by scorer with ID: {}"
).format(scorer_id)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def get_rubric_max_scores(submission_uuid): def get_rubric_max_scores(submission_uuid):
......
...@@ -7,11 +7,10 @@ from dogapi import dog_stats_api ...@@ -7,11 +7,10 @@ from dogapi import dog_stats_api
from submissions.api import get_submission_and_student, SubmissionNotFoundError from submissions.api import get_submission_and_student, SubmissionNotFoundError
from openassessment.assessment.serializers import ( from openassessment.assessment.serializers import (
AssessmentSerializer, InvalidRubric, InvalidRubric, full_assessment_dict, rubric_from_dict, serialize_assessments
full_assessment_dict, rubric_from_dict, serialize_assessments
) )
from openassessment.assessment.models import ( from openassessment.assessment.models import (
Assessment, AssessmentPart, InvalidOptionSelection Assessment, AssessmentPart, InvalidRubricSelection
) )
from openassessment.assessment.errors import ( from openassessment.assessment.errors import (
SelfAssessmentRequestError, SelfAssessmentInternalError SelfAssessmentRequestError, SelfAssessmentInternalError
...@@ -139,50 +138,25 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s ...@@ -139,50 +138,25 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
).format(uuid=submission_uuid) ).format(uuid=submission_uuid)
raise SelfAssessmentRequestError() raise SelfAssessmentRequestError()
# Get or create the rubric
try: try:
# Get or create the rubric
rubric = rubric_from_dict(rubric_dict) rubric = rubric_from_dict(rubric_dict)
option_ids = rubric.options_ids(options_selected)
# Create the self assessment
assessment = Assessment.create(rubric, user_id, submission_uuid, SELF_TYPE, scored_at=scored_at)
AssessmentPart.create_from_option_names(assessment, options_selected)
_log_assessment(assessment, submission)
except InvalidRubric: except InvalidRubric:
msg = "Invalid rubric definition" msg = "Invalid rubric definition"
logger.warning(msg, exc_info=True) logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg) raise SelfAssessmentRequestError(msg)
except InvalidOptionSelection: except InvalidRubricSelection:
msg = "Selected options do not match the rubric" msg = "Selected options do not match the rubric"
logger.warning(msg, exc_info=True) logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg) raise SelfAssessmentRequestError(msg)
# Create the assessment
# Since we have already retrieved the submission, we can assume that
# the user who created the submission exists.
self_assessment = {
"rubric": rubric.id,
"scorer_id": user_id,
"submission_uuid": submission_uuid,
"score_type": SELF_TYPE,
"feedback": u"",
}
if scored_at is not None:
self_assessment['scored_at'] = scored_at
# Serialize the assessment
serializer = AssessmentSerializer(data=self_assessment)
if not serializer.is_valid():
msg = "Could not create self assessment: {errors}".format(errors=serializer.errors)
raise SelfAssessmentRequestError(msg)
assessment = serializer.save()
# We do this to do a run around django-rest-framework serializer
# validation, which would otherwise require two DB queries per
# option to do validation. We already validated these options above.
AssessmentPart.add_to_assessment(assessment, option_ids)
assessment_dict = full_assessment_dict(assessment)
_log_assessment(assessment, submission)
# Return the serialized assessment # Return the serialized assessment
return assessment_dict return full_assessment_dict(assessment)
def get_assessment(submission_uuid): def get_assessment(submission_uuid):
......
...@@ -10,7 +10,7 @@ import logging ...@@ -10,7 +10,7 @@ import logging
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from django.db import DatabaseError from django.db import DatabaseError
from submissions import api as sub_api from submissions import api as sub_api
from openassessment.assessment.models import StudentTrainingWorkflow from openassessment.assessment.models import StudentTrainingWorkflow, InvalidRubricSelection
from openassessment.assessment.serializers import ( from openassessment.assessment.serializers import (
deserialize_training_examples, serialize_training_example, deserialize_training_examples, serialize_training_example,
validate_training_example_format, validate_training_example_format,
...@@ -179,6 +179,21 @@ def validate_training_examples(rubric, examples): ...@@ -179,6 +179,21 @@ def validate_training_examples(rubric, examples):
logger.warning("Could not parse serialized rubric", exc_info=True) logger.warning("Could not parse serialized rubric", exc_info=True)
return [_(u"Could not parse serialized rubric")] return [_(u"Could not parse serialized rubric")]
# Check that at least one criterion in the rubric has options
# If this is not the case (that is, if all rubric criteria are written feedback only),
# then it doesn't make sense to do student training.
criteria_without_options = [
criterion_name
for criterion_name, criterion_option_list in criteria_options.iteritems()
if len(criterion_option_list) == 0
]
if len(set(criteria_options) - set(criteria_without_options)) == 0:
return [_(
u"When you include a student training assessment, "
u"the rubric for the assessment must contain at least one criterion, "
u"and each criterion must contain at least two options."
)]
# Check each example # Check each example
for order_num, example_dict in enumerate(examples, start=1): for order_num, example_dict in enumerate(examples, start=1):
...@@ -219,7 +234,9 @@ def validate_training_examples(rubric, examples): ...@@ -219,7 +234,9 @@ def validate_training_examples(rubric, examples):
errors.append(msg) errors.append(msg)
# Check for missing criteria # Check for missing criteria
for missing_criterion in set(criteria_options.keys()) - set(options_selected.keys()): # Ignore options
all_example_criteria = set(options_selected.keys() + criteria_without_options)
for missing_criterion in set(criteria_options.keys()) - all_example_criteria:
msg = _( msg = _(
u"Example {example_number} is missing an option " u"Example {example_number} is missing an option "
u"for \"{criterion_name}\"" u"for \"{criterion_name}\""
...@@ -353,7 +370,7 @@ def get_training_example(submission_uuid, rubric, examples): ...@@ -353,7 +370,7 @@ def get_training_example(submission_uuid, rubric, examples):
# If the student already started a training example, then return that instead. # If the student already started a training example, then return that instead.
next_example = workflow.next_training_example(examples) next_example = workflow.next_training_example(examples)
return None if next_example is None else serialize_training_example(next_example) return None if next_example is None else serialize_training_example(next_example)
except (InvalidRubric, InvalidTrainingExample) as ex: except (InvalidRubric, InvalidRubricSelection, InvalidTrainingExample) as ex:
logger.exception( logger.exception(
"Could not deserialize training examples for submission UUID {}".format(submission_uuid) "Could not deserialize training examples for submission UUID {}".format(submission_uuid)
) )
......
...@@ -12,7 +12,6 @@ from django.utils.timezone import now ...@@ -12,7 +12,6 @@ from django.utils.timezone import now
from django_extensions.db.fields import UUIDField from django_extensions.db.fields import UUIDField
from dogapi import dog_stats_api from dogapi import dog_stats_api
from submissions import api as sub_api from submissions import api as sub_api
from openassessment.assessment.serializers import rubric_from_dict
from .base import Rubric, Criterion, Assessment, AssessmentPart from .base import Rubric, Criterion, Assessment, AssessmentPart
from .training import TrainingExample from .training import TrainingExample
...@@ -45,16 +44,14 @@ class IncompleteClassifierSet(Exception): ...@@ -45,16 +44,14 @@ class IncompleteClassifierSet(Exception):
""" """
The classifier set is missing a classifier for a criterion in the rubric. The classifier set is missing a classifier for a criterion in the rubric.
""" """
def __init__(self, expected_criteria, actual_criteria): def __init__(self, missing_criteria):
""" """
Construct an error message that explains which criteria were missing. Construct an error message that explains which criteria were missing.
Args: Args:
expected_criteria (iterable of unicode): The criteria in the rubric. missing_criteria (list): The list of criteria names that were missing.
actual_criteria (iterable of unicode): The criteria specified by the classifier set.
""" """
missing_criteria = set(expected_criteria) - set(actual_criteria)
msg = ( msg = (
u"Missing classifiers for the following " u"Missing classifiers for the following "
u"criteria: {missing}" u"criteria: {missing}"
...@@ -136,6 +133,7 @@ class AIClassifierSet(models.Model): ...@@ -136,6 +133,7 @@ class AIClassifierSet(models.Model):
Raises: Raises:
ClassifierSerializeError ClassifierSerializeError
ClassifierUploadError ClassifierUploadError
InvalidRubricSelection
DatabaseError DatabaseError
""" """
...@@ -146,12 +144,8 @@ class AIClassifierSet(models.Model): ...@@ -146,12 +144,8 @@ class AIClassifierSet(models.Model):
# Retrieve the criteria for this rubric, # Retrieve the criteria for this rubric,
# then organize them by criterion name # then organize them by criterion name
try: try:
criteria = { rubric_index = rubric.index
criterion.name: criterion
for criterion in Criterion.objects.filter(rubric=rubric)
}
except DatabaseError as ex: except DatabaseError as ex:
msg = ( msg = (
u"An unexpected error occurred while retrieving rubric criteria with the" u"An unexpected error occurred while retrieving rubric criteria with the"
...@@ -161,15 +155,22 @@ class AIClassifierSet(models.Model): ...@@ -161,15 +155,22 @@ class AIClassifierSet(models.Model):
raise raise
# Check that we have classifiers for all criteria in the rubric # Check that we have classifiers for all criteria in the rubric
if set(criteria.keys()) != set(classifiers_dict.keys()): # Ignore criteria that have no options: since these have only written feedback,
raise IncompleteClassifierSet(criteria.keys(), classifiers_dict.keys()) # we can't assign them a score.
all_criteria = set(classifiers_dict.keys())
all_criteria |= set(
criterion.name for criterion in
rubric_index.find_criteria_without_options()
)
missing_criteria = rubric_index.find_missing_criteria(all_criteria)
if missing_criteria:
raise IncompleteClassifierSet(missing_criteria)
# Create classifiers for each criterion # Create classifiers for each criterion
for criterion_name, classifier_data in classifiers_dict.iteritems(): for criterion_name, classifier_data in classifiers_dict.iteritems():
criterion = criteria.get(criterion_name)
classifier = AIClassifier.objects.create( classifier = AIClassifier.objects.create(
classifier_set=classifier_set, classifier_set=classifier_set,
criterion=criterion criterion=rubric_index.find_criterion(criterion_name)
) )
# Serialize the classifier data and upload # Serialize the classifier data and upload
...@@ -279,7 +280,6 @@ class AIClassifierSet(models.Model): ...@@ -279,7 +280,6 @@ class AIClassifierSet(models.Model):
Returns: Returns:
dict: keys are criteria names, values are JSON-serializable classifier data dict: keys are criteria names, values are JSON-serializable classifier data
If there are no classifiers in the set, returns None
Raises: Raises:
ValueError ValueError
...@@ -328,7 +328,7 @@ class AIClassifierSet(models.Model): ...@@ -328,7 +328,7 @@ class AIClassifierSet(models.Model):
).format(key=cache_key) ).format(key=cache_key)
logger.info(msg) logger.info(msg)
return classifiers_dict if classifiers_dict else None return classifiers_dict
@property @property
def valid_scores_by_criterion(self): def valid_scores_by_criterion(self):
...@@ -698,6 +698,7 @@ class AITrainingWorkflow(AIWorkflow): ...@@ -698,6 +698,7 @@ class AITrainingWorkflow(AIWorkflow):
IncompleteClassifierSet IncompleteClassifierSet
ClassifierSerializeError ClassifierSerializeError
ClassifierUploadError ClassifierUploadError
InvalidRubricSelection
DatabaseError DatabaseError
""" """
self.classifier_set = AIClassifierSet.create_classifier_set( self.classifier_set = AIClassifierSet.create_classifier_set(
...@@ -788,6 +789,7 @@ class AIGradingWorkflow(AIWorkflow): ...@@ -788,6 +789,7 @@ class AIGradingWorkflow(AIWorkflow):
submission = sub_api.get_submission_and_student(submission_uuid) submission = sub_api.get_submission_and_student(submission_uuid)
# Get or create the rubric # Get or create the rubric
from openassessment.assessment.serializers import rubric_from_dict
rubric = rubric_from_dict(rubric_dict) rubric = rubric_from_dict(rubric_dict)
# Retrieve the submission text # Retrieve the submission text
...@@ -828,18 +830,12 @@ class AIGradingWorkflow(AIWorkflow): ...@@ -828,18 +830,12 @@ class AIGradingWorkflow(AIWorkflow):
criterion_scores (dict): Dictionary mapping criteria names to integer scores. criterion_scores (dict): Dictionary mapping criteria names to integer scores.
Raises: Raises:
InvalidRubricSelection
DatabaseError DatabaseError
""" """
assessment = Assessment.objects.create( self.assessment = Assessment.create(
submission_uuid=self.submission_uuid, self.rubric, self.algorithm_id, self.submission_uuid, AI_ASSESSMENT_TYPE
rubric=self.rubric,
scorer_id=self.algorithm_id,
score_type=AI_ASSESSMENT_TYPE
) )
AssessmentPart.create_from_option_points(self.assessment, criterion_scores)
option_ids = self.rubric.options_ids_for_points(criterion_scores)
AssessmentPart.add_to_assessment(assessment, option_ids)
self.assessment = assessment
self.mark_complete_and_save() self.mark_complete_and_save()
...@@ -39,6 +39,9 @@ class TrainingExample(models.Model): ...@@ -39,6 +39,9 @@ class TrainingExample(models.Model):
Returns: Returns:
TrainingExample TrainingExample
Raises:
InvalidRubricSelection
""" """
content_hash = cls.calculate_hash(answer, options_selected, rubric) content_hash = cls.calculate_hash(answer, options_selected, rubric)
example = TrainingExample.objects.create( example = TrainingExample.objects.create(
...@@ -46,11 +49,12 @@ class TrainingExample(models.Model): ...@@ -46,11 +49,12 @@ class TrainingExample(models.Model):
raw_answer=json.dumps(answer), raw_answer=json.dumps(answer),
rubric=rubric rubric=rubric
) )
options_ids = rubric.options_ids(options_selected)
for option in CriterionOption.objects.filter(pk__in=list(options_ids)): # This will raise `InvalidRubricSelection` if the selected options
# do not match the rubric.
for criterion_name, option_name in options_selected.iteritems():
option = rubric.index.find_option(criterion_name, option_name)
example.options_selected.add(option) example.options_selected.add(option)
return example return example
@property @property
......
...@@ -75,15 +75,6 @@ class CriterionSerializer(NestedModelSerializer): ...@@ -75,15 +75,6 @@ class CriterionSerializer(NestedModelSerializer):
model = Criterion model = Criterion
fields = ('order_num', 'name', 'prompt', 'options', 'points_possible') fields = ('order_num', 'name', 'prompt', 'options', 'points_possible')
def validate_options(self, attrs, source):
"""Make sure we have at least one CriterionOption in a Criterion."""
options = attrs[source]
if not options:
raise serializers.ValidationError(
"Criterion must have at least one option."
)
return attrs
class RubricSerializer(NestedModelSerializer): class RubricSerializer(NestedModelSerializer):
"""Serializer for :class:`Rubric`.""" """Serializer for :class:`Rubric`."""
...@@ -150,7 +141,7 @@ class AssessmentPartSerializer(serializers.ModelSerializer): ...@@ -150,7 +141,7 @@ class AssessmentPartSerializer(serializers.ModelSerializer):
class Meta: class Meta:
model = AssessmentPart model = AssessmentPart
fields = ('option', 'feedback') fields = ('option', 'criterion', 'feedback')
class AssessmentSerializer(serializers.ModelSerializer): class AssessmentSerializer(serializers.ModelSerializer):
...@@ -219,12 +210,15 @@ def full_assessment_dict(assessment, rubric_dict=None): ...@@ -219,12 +210,15 @@ def full_assessment_dict(assessment, rubric_dict=None):
# `CriterionOption` again, we simply index into the places we expect them to # `CriterionOption` again, we simply index into the places we expect them to
# be from the big, saved `Rubric` serialization. # be from the big, saved `Rubric` serialization.
parts = [] parts = []
for part in assessment.parts.all().select_related("option__criterion"): for part in assessment.parts.all().select_related("criterion", "option"):
criterion_dict = rubric_dict["criteria"][part.option.criterion.order_num] criterion_dict = rubric_dict["criteria"][part.criterion.order_num]
options_dict = criterion_dict["options"][part.option.order_num] options_dict = None
options_dict["criterion"] = criterion_dict if part.option is not None:
options_dict = criterion_dict["options"][part.option.order_num]
options_dict["criterion"] = criterion_dict
parts.append({ parts.append({
"option": options_dict, "option": options_dict,
"criterion": criterion_dict,
"feedback": part.feedback "feedback": part.feedback
}) })
...@@ -232,7 +226,9 @@ def full_assessment_dict(assessment, rubric_dict=None): ...@@ -232,7 +226,9 @@ def full_assessment_dict(assessment, rubric_dict=None):
# `Assessment` so we can again avoid DB calls. # `Assessment` so we can again avoid DB calls.
assessment_dict["parts"] = parts assessment_dict["parts"] = parts
assessment_dict["points_earned"] = sum( assessment_dict["points_earned"] = sum(
part_dict["option"]["points"] for part_dict in parts part_dict["option"]["points"]
if part_dict["option"] is not None else 0
for part_dict in parts
) )
assessment_dict["points_possible"] = rubric_dict["points_possible"] assessment_dict["points_possible"] = rubric_dict["points_possible"]
......
...@@ -80,6 +80,7 @@ def deserialize_training_examples(examples, rubric_dict): ...@@ -80,6 +80,7 @@ def deserialize_training_examples(examples, rubric_dict):
Raises: Raises:
InvalidRubric InvalidRubric
InvalidRubricSelection
InvalidTrainingExample InvalidTrainingExample
Example usage: Example usage:
......
...@@ -474,5 +474,110 @@ ...@@ -474,5 +474,110 @@
"Example 3 has a validation error: Training example must contain an \"answer\" field.", "Example 3 has a validation error: Training example must contain an \"answer\" field.",
"Example 3 has a validation error: Training example must contain an \"options_selected\" field." "Example 3 has a validation error: Training example must contain an \"options_selected\" field."
] ]
},
"feedback_only_criterion": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 0,
"name": "vøȼȺƀᵾłȺɍɏ",
"prompt": "Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
},
{
"order_num": 1,
"name": "feedback only",
"prompt": "feedback only",
"options": []
}
]
},
"examples": [
{
"answer": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"options_selected": { "vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭" }
}
],
"errors": []
},
"feedback_only_criterion_extra_score": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 0,
"name": "vøȼȺƀᵾłȺɍɏ",
"prompt": "Ħøw vȺɍɨɇđ ɨs ŧħɇ vøȼȺƀᵾłȺɍɏ?",
"options": [
{
"order_num": 0,
"name": "𝒑𝒐𝒐𝒓",
"explanation": "𝕻𝖔𝖔𝖗 𝖏𝖔𝖇!",
"points": 0
},
{
"order_num": 1,
"name": "𝓰𝓸𝓸𝓭",
"explanation": "ﻭѻѻɗ ﻝѻ๒!",
"points": 1
}
]
},
{
"order_num": 1,
"name": "feedback only",
"prompt": "feedback only",
"options": []
}
]
},
"examples": [
{
"answer": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"options_selected": {
"vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭",
"feedback only": "𝓰𝓸𝓸𝓭"
}
}
],
"errors": ["Example 1 has an invalid option for \"feedback only\": \"𝓰𝓸𝓸𝓭\""]
},
"feedback_only_all_criteria": {
"rubric": {
"prompt": "𝓣𝓮𝓼𝓽 𝓹𝓻𝓸𝓶𝓹𝓽",
"criteria": [
{
"order_num": 1,
"name": "feedback only",
"prompt": "feedback only",
"options": []
}
]
},
"examples": [
{
"answer": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"options_selected": {}
}
],
"errors": ["When you include a student training assessment, the rubric for the assessment must contain at least one criterion, and each criterion must contain at least two options."]
} }
} }
# coding=utf-8
"""
Tests for the assessment Django models.
"""
import copy
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.serializers import rubric_from_dict
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection
from .constants import RUBRIC
class AssessmentTest(CacheResetTest):
"""
Tests for the `Assessment` and `AssessmentPart` models.
"""
def test_create_with_feedback_only_criterion(self):
rubric = self._rubric_with_one_feedback_only_criterion()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts
# We can't select an option for the last criterion, but we do
# provide written feedback.
selected = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
feedback = {
u"feedback": u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐."
}
AssessmentPart.create_from_option_names(
assessment, selected, feedback=feedback
)
# Check the score (the feedback-only assessment should count for 0 points)
self.assertEqual(assessment.points_earned, 3)
self.assertEqual(assessment.points_possible, 4)
# Check the feedback text
feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
self.assertEqual(feedback_only.feedback, u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐.")
def test_create_with_all_feedback_only_criteria(self):
rubric = self._rubric_with_all_feedback_only_criteria()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts, each of which are feedback-only (no points)
selected = {}
feedback = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
AssessmentPart.create_from_option_names(
assessment, selected, feedback=feedback
)
# Check the score (should be 0, since we haven't selected any points)
self.assertEqual(assessment.points_earned, 0)
self.assertEqual(assessment.points_possible, 0)
def test_create_from_option_points_feedback_only_criterion(self):
rubric = self._rubric_with_one_feedback_only_criterion()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts by providing scores for options
# but NO feedback. This simulates how an example-based AI
# assessment is created.
selected = {
u"vøȼȺƀᵾłȺɍɏ": 2,
u"ﻭɼค๓๓คɼ": 1,
}
AssessmentPart.create_from_option_points(assessment, selected)
# Check the score (the feedback-only assessment should count for 0 points)
self.assertEqual(assessment.points_earned, 3)
self.assertEqual(assessment.points_possible, 4)
# Check the feedback text (should default to an empty string)
feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
self.assertEqual(feedback_only.feedback, u"")
def test_create_from_option_points_all_feedback_only_criteria(self):
rubric = self._rubric_with_all_feedback_only_criteria()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Since there are no criteria with options, and we're not
# providing written feedback, pass in an empty selection.
selected = {}
AssessmentPart.create_from_option_points(assessment, selected)
# Score should be zero, since none of the criteria have options
self.assertEqual(assessment.points_earned, 0)
self.assertEqual(assessment.points_possible, 0)
def test_default_feedback_for_feedback_only_criterion(self):
rubric = self._rubric_with_one_feedback_only_criterion()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts, but do NOT provide any feedback
# This simulates how non-peer assessments are created
# Note that this is different from providing an empty feedback dict;
# here, we're not providing the `feedback` kwarg at all.
selected = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
AssessmentPart.create_from_option_names(assessment, selected)
# Check the score (the feedback-only assessment should count for 0 points)
self.assertEqual(assessment.points_earned, 3)
self.assertEqual(assessment.points_possible, 4)
# Check the feedback text, which should default to an empty string
feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
self.assertEqual(feedback_only.feedback, u"")
def test_no_feedback_provided_for_feedback_only_criterion(self):
rubric = self._rubric_with_one_feedback_only_criterion()
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
# Create assessment parts
# Do NOT provide feedback for the feedback-only criterion
selected = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
feedback = {}
# Expect an error when we try to create the assessment parts
with self.assertRaises(InvalidRubricSelection):
AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)
def _rubric_with_one_feedback_only_criterion(self):
"""Create a rubric with one feedback-only criterion."""
rubric_dict = copy.deepcopy(RUBRIC)
rubric_dict['criteria'].append({
"order_num": 2,
"name": u"feedback",
"prompt": u"only feedback, no points",
"options": []
})
return rubric_from_dict(rubric_dict)
def _rubric_with_all_feedback_only_criteria(self):
"""Create a rubric with all feedback-only criteria."""
rubric_dict = copy.deepcopy(RUBRIC)
for criterion in rubric_dict['criteria']:
criterion['options'] = []
return rubric_from_dict(rubric_dict)
# -*- coding: utf-8 -*-
"""
Tests for assessment models.
"""
from openassessment.test_utils import CacheResetTest
from submissions import api as sub_api
from openassessment.assessment.models import (
Rubric, Criterion, CriterionOption, InvalidOptionSelection,
AssessmentFeedback, AssessmentFeedbackOption,
PeerWorkflow, PeerWorkflowItem
)
class TestRubricOptionIds(CacheResetTest):
"""
Test selection of options from a rubric.
"""
NUM_CRITERIA = 4
NUM_OPTIONS = 3
def setUp(self):
"""
Create a rubric in the database.
"""
self.rubric = Rubric.objects.create()
self.criteria = [
Criterion.objects.create(
rubric=self.rubric,
name="test criterion {num}".format(num=num),
order_num=num,
) for num in range(self.NUM_CRITERIA)
]
self.options = dict()
for criterion in self.criteria:
self.options[criterion.name] = [
CriterionOption.objects.create(
criterion=criterion,
name="test option {num}".format(num=num),
order_num=num,
points=num
) for num in range(self.NUM_OPTIONS)
]
def test_option_ids(self):
options_ids = self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 0",
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][0].id
]))
def test_option_ids_different_order(self):
options_ids = self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 0",
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][0].id
]))
def test_option_ids_missing_criteria(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 3": "test option 2",
})
def test_option_ids_extra_criteria(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
"extra criterion": "test",
})
def test_option_ids_mutated_criterion_name(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test mutated criterion": "test option 1",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
})
def test_option_ids_mutated_option_name(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 1",
"test criterion 1": "test mutated option",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
})
class AssessmentFeedbackTest(CacheResetTest):
"""
Tests for assessment feedback.
This is feedback that students give in response to the peer assessments they receive.
"""
def setUp(self):
self.feedback = AssessmentFeedback.objects.create(
submission_uuid='test_submission',
feedback_text='test feedback',
)
def test_default_options(self):
self.assertEqual(self.feedback.options.count(), 0)
def test_add_options_all_new(self):
# We haven't created any feedback options yet, so these should be created.
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Check the feedback options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
def test_add_options_some_new(self):
# Create one feedback option in the database
AssessmentFeedbackOption.objects.create(text='I liked my assessment')
# Add feedback options. The one that's new should be created.
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Check the feedback options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
def test_add_options_empty(self):
# No options
self.feedback.add_options([])
self.assertEqual(len(self.feedback.options.all()), 0)
# Add an option
self.feedback.add_options(['test'])
self.assertEqual(len(self.feedback.options.all()), 1)
# Add an empty list of options
self.feedback.add_options([])
self.assertEqual(len(self.feedback.options.all()), 1)
def test_add_options_duplicates(self):
# Add some options, which will be created
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Add some more options, one of which is a duplicate
self.feedback.add_options(['I liked my assessment', 'I disliked my assessment'])
# There should be three options
options = self.feedback.options.all()
self.assertEqual(len(options), 3)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
self.assertEqual(options[2].text, 'I disliked my assessment')
# There should be only three options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 3)
def test_add_options_all_old(self):
# Add some options, which will be created
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# Add some more options, all of which are duplicates
self.feedback.add_options(['I liked my assessment', 'I thought my assessment was unfair'])
# There should be two options
options = self.feedback.options.all()
self.assertEqual(len(options), 2)
self.assertEqual(options[0].text, 'I liked my assessment')
self.assertEqual(options[1].text, 'I thought my assessment was unfair')
# There should be two options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 2)
def test_unicode(self):
# Create options with unicode
self.feedback.add_options([u'𝓘 𝓵𝓲𝓴𝓮𝓭 𝓶𝔂 𝓪𝓼𝓼𝓮𝓼𝓼𝓶𝓮𝓷𝓽', u'ノ イんougんイ ᄊリ ム丂丂乇丂丂ᄊ乇刀イ wム丂 u刀キムノ尺'])
# There should be two options in the database
self.assertEqual(AssessmentFeedbackOption.objects.count(), 2)
class PeerWorkflowTest(CacheResetTest):
"""
Tests for the peer workflow model.
"""
STUDENT_ITEM = {
'student_id': 'test_student',
'course_id': 'test_course',
'item_type': 'openassessment',
'item_id': 'test_item'
}
OTHER_STUDENT = {
'student_id': 'test_student_2',
'course_id': 'test_course',
'item_type': 'openassessment',
'item_id': 'test_item'
}
def test_create_item_multiple_available(self):
# Bugfix TIM-572
submitter_sub = sub_api.create_submission(self.STUDENT_ITEM, 'test answer')
submitter_workflow = PeerWorkflow.objects.create(
student_id=self.STUDENT_ITEM['student_id'],
item_id=self.STUDENT_ITEM['item_id'],
course_id=self.STUDENT_ITEM['course_id'],
submission_uuid=submitter_sub['uuid']
)
scorer_sub = sub_api.create_submission(self.OTHER_STUDENT, 'test answer 2')
scorer_workflow = PeerWorkflow.objects.create(
student_id=self.OTHER_STUDENT['student_id'],
item_id=self.OTHER_STUDENT['item_id'],
course_id=self.OTHER_STUDENT['course_id'],
submission_uuid=scorer_sub['uuid']
)
for _ in range(2):
PeerWorkflowItem.objects.create(
scorer=scorer_workflow,
author=submitter_workflow,
submission_uuid=submitter_sub['uuid']
)
# This used to cause an error when `get_or_create` returned multiple workflow items
PeerWorkflow.create_item(scorer_workflow, submitter_sub['uuid'])
# coding=utf-8 # coding=utf-8
import datetime import datetime
import pytz import pytz
import copy
from django.db import DatabaseError, IntegrityError from django.db import DatabaseError, IntegrityError
from django.utils import timezone from django.utils import timezone
...@@ -117,7 +118,7 @@ ASSESSMENT_DICT_PASS = { ...@@ -117,7 +118,7 @@ ASSESSMENT_DICT_PASS = {
# Answers are against RUBRIC_DICT -- this is worth 12 points # Answers are against RUBRIC_DICT -- this is worth 12 points
# Feedback text is one character over the limit. # Feedback text is one character over the limit.
LONG_FEEDBACK_TEXT = u"是" * Assessment.MAXSIZE + "." LONG_FEEDBACK_TEXT = u"是" * Assessment.MAX_FEEDBACK_SIZE + "."
ASSESSMENT_DICT_HUGE = { ASSESSMENT_DICT_HUGE = {
'overall_feedback': LONG_FEEDBACK_TEXT, 'overall_feedback': LONG_FEEDBACK_TEXT,
'criterion_feedback': { 'criterion_feedback': {
...@@ -150,7 +151,7 @@ class TestPeerApi(CacheResetTest): ...@@ -150,7 +151,7 @@ class TestPeerApi(CacheResetTest):
Tests for the peer assessment API functions. Tests for the peer assessment API functions.
""" """
CREATE_ASSESSMENT_NUM_QUERIES = 61 CREATE_ASSESSMENT_NUM_QUERIES = 59
def test_create_assessment_points(self): def test_create_assessment_points(self):
self._create_student_and_submission("Tim", "Tim's answer") self._create_student_and_submission("Tim", "Tim's answer")
...@@ -173,9 +174,7 @@ class TestPeerApi(CacheResetTest): ...@@ -173,9 +174,7 @@ class TestPeerApi(CacheResetTest):
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.get_submission_to_assess(bob_sub['uuid'], 1) peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
# Creating feedback per criterion should need one additional query to update with self.assertNumQueries(self.CREATE_ASSESSMENT_NUM_QUERIES):
# for each criterion that has feedback.
with self.assertNumQueries(self.CREATE_ASSESSMENT_NUM_QUERIES + 1):
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
bob_sub["uuid"], bob_sub["uuid"],
bob["student_id"], bob["student_id"],
...@@ -196,26 +195,62 @@ class TestPeerApi(CacheResetTest): ...@@ -196,26 +195,62 @@ class TestPeerApi(CacheResetTest):
expected_feedback = ASSESSMENT_DICT['criterion_feedback'].get(criterion_name, "") expected_feedback = ASSESSMENT_DICT['criterion_feedback'].get(criterion_name, "")
self.assertEqual(part['feedback'], expected_feedback) self.assertEqual(part['feedback'], expected_feedback)
def test_create_assessment_unknown_criterion_feedback(self): def test_create_assessment_criterion_with_zero_options(self):
self._create_student_and_submission("Tim", "Tim's answer") self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.get_submission_to_assess(bob_sub['uuid'], 1) peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
# Create an assessment where the criterion feedback uses # Modify the rubric to include a criterion with no options,
# a criterion name that isn't in the rubric. # only written feedback.
rubric = copy.deepcopy(RUBRIC_DICT)
rubric["criteria"].append({
"name": "feedback only",
"prompt": "feedback only",
"options": []
})
# Provide written feedback for the feedback-only criterion
feedback = {
"feedback only": u"This is some feedback"
}
assessment = peer_api.create_assessment( assessment = peer_api.create_assessment(
bob_sub["uuid"], bob_sub["uuid"],
bob["student_id"], bob["student_id"],
ASSESSMENT_DICT['options_selected'], ASSESSMENT_DICT['options_selected'],
{'unknown': 'Unknown criterion has feedback!'}, feedback, "",
ASSESSMENT_DICT['overall_feedback'], rubric,
RUBRIC_DICT,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
) )
# The criterion feedback should be ignored # Verify that the point values are the same
for part_num in range(3): # (the feedback-only criterion isn't worth any points)
self.assertEqual(assessment["parts"][part_num]["feedback"], "") self.assertEqual(assessment["points_earned"], 6)
self.assertEqual(assessment["points_possible"], 14)
# Verify the feedback-only criterion assessment part
self.assertEqual(assessment["parts"][4]["criterion"]["name"], "feedback only")
self.assertIs(assessment["parts"][4]["option"], None)
self.assertEqual(assessment["parts"][4]["feedback"], u"This is some feedback")
def test_create_assessment_unknown_criterion_feedback(self):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
# Create an assessment where the criterion feedback uses
# a criterion name that isn't in the rubric.
# An exception should be raised, since this will be interpreted
# as adding an extra criterion with no options, just feedback.
with self.assertRaises(peer_api.PeerAssessmentRequestError):
peer_api.create_assessment(
bob_sub["uuid"],
bob["student_id"],
ASSESSMENT_DICT['options_selected'],
{'unknown': 'Unknown criterion has feedback!'},
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
def test_create_huge_overall_feedback_error(self): def test_create_huge_overall_feedback_error(self):
self._create_student_and_submission("Tim", "Tim's answer") self._create_student_and_submission("Tim", "Tim's answer")
...@@ -234,12 +269,12 @@ class TestPeerApi(CacheResetTest): ...@@ -234,12 +269,12 @@ class TestPeerApi(CacheResetTest):
) )
# The assessment feedback text should be truncated # The assessment feedback text should be truncated
self.assertEqual(len(assessment_dict['feedback']), Assessment.MAXSIZE) self.assertEqual(len(assessment_dict['feedback']), Assessment.MAX_FEEDBACK_SIZE)
# The length of the feedback text in the database should # The length of the feedback text in the database should
# equal what we got from the API. # equal what we got from the API.
assessment = Assessment.objects.get() assessment = Assessment.objects.get()
self.assertEqual(len(assessment.feedback), Assessment.MAXSIZE) self.assertEqual(len(assessment.feedback), Assessment.MAX_FEEDBACK_SIZE)
def test_create_huge_per_criterion_feedback_error(self): def test_create_huge_per_criterion_feedback_error(self):
self._create_student_and_submission("Tim", "Tim's answer") self._create_student_and_submission("Tim", "Tim's answer")
...@@ -259,11 +294,11 @@ class TestPeerApi(CacheResetTest): ...@@ -259,11 +294,11 @@ class TestPeerApi(CacheResetTest):
# Verify that the feedback has been truncated # Verify that the feedback has been truncated
for part in assessment['parts']: for part in assessment['parts']:
self.assertEqual(len(part['feedback']), Assessment.MAXSIZE) self.assertEqual(len(part['feedback']), Assessment.MAX_FEEDBACK_SIZE)
# Verify that the feedback in the database matches what we got back from the API # Verify that the feedback in the database matches what we got back from the API
for part in AssessmentPart.objects.all(): for part in AssessmentPart.objects.all():
self.assertEqual(len(part.feedback), Assessment.MAXSIZE) self.assertEqual(len(part.feedback), Assessment.MAX_FEEDBACK_SIZE)
@file_data('data/valid_assessments.json') @file_data('data/valid_assessments.json')
def test_get_assessments(self, assessment_dict): def test_get_assessments(self, assessment_dict):
...@@ -1022,7 +1057,7 @@ class TestPeerApi(CacheResetTest): ...@@ -1022,7 +1057,7 @@ class TestPeerApi(CacheResetTest):
peer_api.set_assessment_feedback( peer_api.set_assessment_feedback(
{ {
'submission_uuid': tim_answer['uuid'], 'submission_uuid': tim_answer['uuid'],
'feedback_text': 'Boo'*AssessmentFeedback.MAXSIZE, 'feedback_text': 'Boo' * AssessmentFeedback.MAXSIZE,
} }
) )
...@@ -1265,6 +1300,39 @@ class TestPeerApi(CacheResetTest): ...@@ -1265,6 +1300,39 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(len(scored_assessments), 1) self.assertEqual(len(scored_assessments), 1)
self.assertEqual(scored_assessments[0]['scorer_id'], tim['student_id']) self.assertEqual(scored_assessments[0]['scorer_id'], tim['student_id'])
@raises(peer_api.PeerAssessmentInternalError)
def test_create_assessment_database_error(self):
self._create_student_and_submission("Bob", "Bob's answer")
submission, student = self._create_student_and_submission("Jim", "Jim's answer")
peer_api.get_submission_to_assess(submission['uuid'], 1)
with patch.object(PeerWorkflow.objects, 'get') as mock_call:
mock_call.side_effect = DatabaseError("Kaboom!")
peer_api.create_assessment(
submission['uuid'],
student['student_id'],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
REQUIRED_GRADED_BY
)
@raises(peer_api.PeerAssessmentRequestError)
def test_create_assessment_invalid_rubric_error(self):
self._create_student_and_submission("Bob", "Bob's answer")
submission, student = self._create_student_and_submission("Jim", "Jim's answer")
peer_api.get_submission_to_assess(submission['uuid'], 1)
peer_api.create_assessment(
submission['uuid'],
student['student_id'],
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
{"invalid_rubric!": "is invalid"},
REQUIRED_GRADED_BY
)
@staticmethod @staticmethod
def _create_student_and_submission(student, answer, date=None): def _create_student_and_submission(student, answer, date=None):
new_student_item = STUDENT_ITEM.copy() new_student_item = STUDENT_ITEM.copy()
......
...@@ -6,12 +6,12 @@ Tests for assessment models. ...@@ -6,12 +6,12 @@ Tests for assessment models.
import copy import copy
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import ( from openassessment.assessment.models import (
Rubric, Criterion, CriterionOption, InvalidOptionSelection Rubric, Criterion, CriterionOption, InvalidRubricSelection
) )
from openassessment.assessment.test.constants import RUBRIC from openassessment.assessment.test.constants import RUBRIC
class TestRubricOptionIds(CacheResetTest): class RubricIndexTest(CacheResetTest):
""" """
Test selection of options from a rubric. Test selection of options from a rubric.
""" """
...@@ -23,6 +23,8 @@ class TestRubricOptionIds(CacheResetTest): ...@@ -23,6 +23,8 @@ class TestRubricOptionIds(CacheResetTest):
""" """
Create a rubric in the database. Create a rubric in the database.
""" """
super(RubricIndexTest, self).setUp()
self.rubric = Rubric.objects.create() self.rubric = Rubric.objects.create()
self.criteria = [ self.criteria = [
Criterion.objects.create( Criterion.objects.create(
...@@ -43,104 +45,73 @@ class TestRubricOptionIds(CacheResetTest): ...@@ -43,104 +45,73 @@ class TestRubricOptionIds(CacheResetTest):
) for num in range(self.NUM_OPTIONS) ) for num in range(self.NUM_OPTIONS)
] ]
def test_option_ids(self): def test_find_option(self):
options_ids = self.rubric.options_ids({ self.assertEqual(
"test criterion 0": "test option 0", self.rubric.index.find_option("test criterion 0", "test option 0"),
"test criterion 1": "test option 1", self.options["test criterion 0"][0]
"test criterion 2": "test option 2", )
"test criterion 3": "test option 0", self.assertEqual(
}) self.rubric.index.find_option("test criterion 1", "test option 1"),
self.assertEqual(options_ids, set([ self.options["test criterion 1"][1]
self.options['test criterion 0'][0].id, )
self.options['test criterion 1'][1].id, self.assertEqual(
self.options['test criterion 2'][2].id, self.rubric.index.find_option("test criterion 2", "test option 2"),
self.options['test criterion 3'][0].id self.options["test criterion 2"][2]
])) )
self.assertEqual(
def test_option_ids_different_order(self): self.rubric.index.find_option("test criterion 3", "test option 0"),
options_ids = self.rubric.options_ids({ self.options["test criterion 3"][0]
"test criterion 0": "test option 0", )
"test criterion 1": "test option 1",
"test criterion 2": "test option 2", def test_find_missing_criteria(self):
"test criterion 3": "test option 0", missing = self.rubric.index.find_missing_criteria([
}) 'test criterion 0', 'test criterion 1', 'test criterion 3'
self.assertEqual(options_ids, set([ ])
self.options['test criterion 0'][0].id, expected_missing = set(['test criterion 2'])
self.options['test criterion 1'][1].id, self.assertEqual(missing, expected_missing)
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][0].id def test_invalid_option(self):
])) with self.assertRaises(InvalidRubricSelection):
self.rubric.index.find_option("test criterion 0", "invalid")
def test_option_ids_missing_criteria(self):
with self.assertRaises(InvalidOptionSelection): def test_valid_option_wrong_criterion(self):
self.rubric.options_ids({ # Add another option to the first criterion
"test criterion 0": "test option 0", new_option = CriterionOption.objects.create(
"test criterion 1": "test option 1", criterion=self.criteria[0],
"test criterion 3": "test option 2", name="extra option",
}) order_num=(self.NUM_OPTIONS + 1),
points=4
def test_option_ids_extra_criteria(self): )
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({ # We should be able to find it in the first criterion
"test criterion 0": "test option 0", self.assertEqual(
"test criterion 1": "test option 1", new_option,
"test criterion 2": "test option 2", self.rubric.index.find_option("test criterion 0", "extra option")
"test criterion 3": "test option 1", )
"extra criterion": "test",
}) # ... but not from another criterion
with self.assertRaises(InvalidRubricSelection):
def test_option_ids_mutated_criterion_name(self): self.rubric.index.find_option("test criterion 1", "extra option")
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({ def test_find_option_for_points(self):
"test mutated criterion": "test option 1", self.assertEqual(
"test criterion 1": "test option 1", self.rubric.index.find_option_for_points("test criterion 0", 0),
"test criterion 2": "test option 2", self.options["test criterion 0"][0]
"test criterion 3": "test option 1", )
}) self.assertEqual(
self.rubric.index.find_option_for_points("test criterion 1", 1),
def test_option_ids_mutated_option_name(self): self.options["test criterion 1"][1]
with self.assertRaises(InvalidOptionSelection): )
self.rubric.options_ids({ self.assertEqual(
"test criterion 0": "test option 1", self.rubric.index.find_option_for_points("test criterion 2", 2),
"test criterion 1": "test mutated option", self.options["test criterion 2"][2]
"test criterion 2": "test option 2", )
"test criterion 3": "test option 1", self.assertEqual(
}) self.rubric.index.find_option_for_points("test criterion 3", 1),
self.options["test criterion 3"][1]
def test_options_ids_points(self): )
options_ids = self.rubric.options_ids_for_points({
'test criterion 0': 0, def test_find_option_for_points_first_of_duplicate_points(self):
'test criterion 1': 1,
'test criterion 2': 2,
'test criterion 3': 1
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][1].id
]))
def test_options_ids_points_caching(self):
# First call: the dict is not cached
with self.assertNumQueries(1):
self.rubric.options_ids_for_points({
'test criterion 0': 0,
'test criterion 1': 1,
'test criterion 2': 2,
'test criterion 3': 1
})
# Second call: the dict is not cached
with self.assertNumQueries(0):
self.rubric.options_ids_for_points({
'test criterion 0': 1,
'test criterion 1': 2,
'test criterion 2': 1,
'test criterion 3': 0
})
def test_options_ids_first_of_duplicate_points(self):
# Change the first criterion options so that the second and third # Change the first criterion options so that the second and third
# option have the same point value # option have the same point value
self.options['test criterion 0'][1].points = 5 self.options['test criterion 0'][1].points = 5
...@@ -149,23 +120,42 @@ class TestRubricOptionIds(CacheResetTest): ...@@ -149,23 +120,42 @@ class TestRubricOptionIds(CacheResetTest):
self.options['test criterion 0'][2].save() self.options['test criterion 0'][2].save()
# Should get the first option back # Should get the first option back
options_ids = self.rubric.options_ids_for_points({ option = self.rubric.index.find_option_for_points("test criterion 0", 5)
'test criterion 0': 5, self.assertEqual(option, self.options['test criterion 0'][1])
'test criterion 1': 1,
'test criterion 2': 2, def test_find_option_for_points_invalid_selection(self):
'test criterion 3': 1 # No such point value
}) with self.assertRaises(InvalidRubricSelection):
self.assertIn(self.options['test criterion 0'][1].id, options_ids) self.rubric.index.find_option_for_points("test criterion 0", 10)
def test_options_ids_points_invalid_selection(self): # No such criterion
with self.assertRaises(InvalidOptionSelection): with self.assertRaises(InvalidRubricSelection):
self.rubric.options_ids_for_points({ self.rubric.index.find_option_for_points("no such criterion", 0)
'test criterion 0': self.NUM_OPTIONS + 1,
'test criterion 1': 2, def test_valid_points_wrong_criterion(self):
'test criterion 2': 1, # Add another option to the first criterion
'test criterion 3': 0 new_option = CriterionOption.objects.create(
}) criterion=self.criteria[0],
name="extra option",
order_num=(self.NUM_OPTIONS + 1),
points=10
)
# We should be able to find it in the first criterion
self.assertEqual(
new_option,
self.rubric.index.find_option_for_points("test criterion 0", 10)
)
# ... but not from another criterion
with self.assertRaises(InvalidRubricSelection):
self.rubric.index.find_option_for_points("test criterion 1", 10)
class RubricHashTest(CacheResetTest):
"""
Tests of the rubric content and structure hash.
"""
def test_structure_hash_identical(self): def test_structure_hash_identical(self):
first_hash = Rubric.structure_hash_from_dict(RUBRIC) first_hash = Rubric.structure_hash_from_dict(RUBRIC)
......
...@@ -91,7 +91,7 @@ class TestSelfApi(CacheResetTest): ...@@ -91,7 +91,7 @@ class TestSelfApi(CacheResetTest):
create_assessment( create_assessment(
'invalid_submission_uuid', u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗', 'invalid_submission_uuid', u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.RUBRIC, self.OPTIONS_SELECTED, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1) scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
) )
def test_create_assessment_wrong_user(self): def test_create_assessment_wrong_user(self):
...@@ -103,7 +103,7 @@ class TestSelfApi(CacheResetTest): ...@@ -103,7 +103,7 @@ class TestSelfApi(CacheResetTest):
create_assessment( create_assessment(
'invalid_submission_uuid', u'another user', 'invalid_submission_uuid', u'another user',
self.OPTIONS_SELECTED, self.RUBRIC, self.OPTIONS_SELECTED, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1) scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
) )
def test_create_assessment_invalid_criterion(self): def test_create_assessment_invalid_criterion(self):
...@@ -119,7 +119,7 @@ class TestSelfApi(CacheResetTest): ...@@ -119,7 +119,7 @@ class TestSelfApi(CacheResetTest):
create_assessment( create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗', submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC, options, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1) scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
) )
def test_create_assessment_invalid_option(self): def test_create_assessment_invalid_option(self):
...@@ -135,7 +135,7 @@ class TestSelfApi(CacheResetTest): ...@@ -135,7 +135,7 @@ class TestSelfApi(CacheResetTest):
create_assessment( create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗', submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC, options, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1) scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
) )
def test_create_assessment_missing_criterion(self): def test_create_assessment_missing_criterion(self):
...@@ -151,7 +151,7 @@ class TestSelfApi(CacheResetTest): ...@@ -151,7 +151,7 @@ class TestSelfApi(CacheResetTest):
create_assessment( create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗', submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC, options, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1) scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
) )
def test_create_assessment_timestamp(self): def test_create_assessment_timestamp(self):
...@@ -200,3 +200,51 @@ class TestSelfApi(CacheResetTest): ...@@ -200,3 +200,51 @@ class TestSelfApi(CacheResetTest):
def test_is_complete_no_submission(self): def test_is_complete_no_submission(self):
# This submission uuid does not exist # This submission uuid does not exist
self.assertFalse(submitter_is_finished('abc1234', {})) self.assertFalse(submitter_is_finished('abc1234', {}))
def test_create_assessment_criterion_with_zero_options(self):
# Create a submission to self-assess
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Modify the rubric to include a criterion with no options (only written feedback)
rubric = copy.deepcopy(self.RUBRIC)
rubric['criteria'].append({
"name": "feedback only",
"prompt": "feedback only",
"options": []
})
# Create a self-assessment for the submission
assessment = create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, rubric,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
# The self-assessment should have set the feedback for
# the criterion with no options to an empty string
self.assertEqual(assessment["parts"][2]["option"], None)
self.assertEqual(assessment["parts"][2]["feedback"], u"")
def test_create_assessment_all_criteria_have_zero_options(self):
# Create a submission to self-assess
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Use a rubric with only criteria with no options (only written feedback)
rubric = copy.deepcopy(self.RUBRIC)
for criterion in rubric["criteria"]:
criterion["options"] = []
# Create a self-assessment for the submission
# We don't select any options, since none of the criteria have options
options_selected = {}
assessment = create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options_selected, rubric,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
# The self-assessment should have set the feedback for
# all criteria to an empty string.
for part in assessment["parts"]:
self.assertEqual(part["option"], None)
self.assertEqual(part["feedback"], u"")
# coding=utf-8
"""
Tests for assessment serializers.
"""
import json import json
import os.path import os.path
import copy
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import Criterion, CriterionOption, Rubric, AssessmentFeedback from openassessment.assessment.models import (
Assessment, AssessmentPart, AssessmentFeedback
)
from openassessment.assessment.serializers import ( from openassessment.assessment.serializers import (
InvalidRubric, RubricSerializer, rubric_from_dict, rubric_from_dict, full_assessment_dict,
AssessmentFeedbackSerializer AssessmentFeedbackSerializer, InvalidRubric
) )
from .constants import RUBRIC
def json_data(filename): def json_data(filename):
curr_dir = os.path.dirname(__file__) curr_dir = os.path.dirname(__file__)
...@@ -14,7 +24,7 @@ def json_data(filename): ...@@ -14,7 +24,7 @@ def json_data(filename):
return json.load(json_file) return json.load(json_file)
class TestRubricDeserialization(CacheResetTest): class RubricDeserializationTest(CacheResetTest):
def test_rubric_only_created_once(self): def test_rubric_only_created_once(self):
# Make sure sending the same Rubric data twice only creates one Rubric, # Make sure sending the same Rubric data twice only creates one Rubric,
...@@ -35,7 +45,7 @@ class TestRubricDeserialization(CacheResetTest): ...@@ -35,7 +45,7 @@ class TestRubricDeserialization(CacheResetTest):
rubric_from_dict(json_data('data/rubric/no_points.json')) rubric_from_dict(json_data('data/rubric/no_points.json'))
class TestCriterionDeserialization(CacheResetTest): class CriterionDeserializationTest(CacheResetTest):
def test_empty_criteria(self): def test_empty_criteria(self):
with self.assertRaises(InvalidRubric) as cm: with self.assertRaises(InvalidRubric) as cm:
...@@ -54,20 +64,11 @@ class TestCriterionDeserialization(CacheResetTest): ...@@ -54,20 +64,11 @@ class TestCriterionDeserialization(CacheResetTest):
) )
class TestCriterionOptionDeserialization(CacheResetTest): class CriterionOptionDeserializationTest(CacheResetTest):
def test_empty_options(self): def test_empty_options(self):
with self.assertRaises(InvalidRubric) as cm: rubric = rubric_from_dict(json_data('data/rubric/empty_options.json'))
rubric_from_dict(json_data('data/rubric/empty_options.json')) self.assertEqual(rubric.criteria.count(), 2)
self.assertEqual(
cm.exception.errors,
{
'criteria': [
{}, # There are no errors in the first criterion
{'options': [u'Criterion must have at least one option.']}
]
}
)
def test_missing_options(self): def test_missing_options(self):
with self.assertRaises(InvalidRubric) as cm: with self.assertRaises(InvalidRubric) as cm:
...@@ -83,7 +84,7 @@ class TestCriterionOptionDeserialization(CacheResetTest): ...@@ -83,7 +84,7 @@ class TestCriterionOptionDeserialization(CacheResetTest):
) )
class TestAssessmentFeedbackSerializer(CacheResetTest): class AssessmentFeedbackSerializerTest(CacheResetTest):
def test_serialize(self): def test_serialize(self):
feedback = AssessmentFeedback.objects.create( feedback = AssessmentFeedback.objects.create(
...@@ -114,3 +115,41 @@ class TestAssessmentFeedbackSerializer(CacheResetTest): ...@@ -114,3 +115,41 @@ class TestAssessmentFeedbackSerializer(CacheResetTest):
'options': [], 'options': [],
'assessments': [], 'assessments': [],
}) })
class AssessmentSerializerTest(CacheResetTest):
def test_full_assessment_dict_criteria_no_options(self):
# Create a rubric with a criterion that has no options (just feedback)
rubric_dict = copy.deepcopy(RUBRIC)
rubric_dict['criteria'].append({
'order_num': 2,
'name': 'feedback only',
'prompt': 'feedback only',
'options': []
})
rubric = rubric_from_dict(rubric_dict)
# Create an assessment for the rubric
assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
selected = {
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
feedback = {
u"feedback only": u"enjoy the feedback!"
}
AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)
# Serialize the assessment
serialized = full_assessment_dict(assessment)
# Verify that the assessment dict correctly serialized the criterion with options.
self.assertEqual(serialized['parts'][0]['criterion']['name'], u"vøȼȺƀᵾłȺɍɏ")
self.assertEqual(serialized['parts'][0]['option']['name'], u"𝓰𝓸𝓸𝓭")
self.assertEqual(serialized['parts'][1]['criterion']['name'], u"ﻭɼค๓๓คɼ")
self.assertEqual(serialized['parts'][1]['option']['name'], u"єχ¢єℓℓєηт")
# Verify that the assessment dict correctly serialized the criterion with no options.
self.assertIs(serialized['parts'][2]['option'], None)
self.assertEqual(serialized['parts'][2]['criterion']['name'], u"feedback only")
...@@ -221,9 +221,9 @@ class CsvWriter(object): ...@@ -221,9 +221,9 @@ class CsvWriter(object):
for part in assessment_parts: for part in assessment_parts:
self._write_unicode('assessment_part', [ self._write_unicode('assessment_part', [
part.assessment.id, part.assessment.id,
part.option.points, part.points_earned,
part.option.criterion.name, part.criterion.name,
part.option.name, part.option.name if part.option is not None else u"",
part.feedback part.feedback
]) ])
......
...@@ -4,12 +4,16 @@ ...@@ -4,12 +4,16 @@
<header class="step__header ui-toggle-visibility__control"> <header class="step__header ui-toggle-visibility__control">
<h2 class="step__title"> <h2 class="step__title">
<span class="wrapper--copy"> <span class="wrapper--copy">
{% if score %}
<span class="step__label">{% trans "Your Grade" %}: </span> <span class="step__label">{% trans "Your Grade" %}: </span>
<span class="grade__value"> <span class="grade__value">
<span class="grade__value__title"> <span class="grade__value__title">
{% blocktrans with points_earned=score.points_earned points_possible=score.points_possible%}<span class="grade__value__earned">{{ points_earned }}</span> out of <span class="grade__value__potential">{{ points_possible }}</span>{% endblocktrans %} {% blocktrans with points_earned=score.points_earned points_possible=score.points_possible%}<span class="grade__value__earned">{{ points_earned }}</span> out of <span class="grade__value__potential">{{ points_possible }}</span>{% endblocktrans %}
</span> </span>
</span> </span>
{% else %}
<span class="step__label">{% trans "Your Grade" %}</span>
{% endif %}
</span> </span>
</h2> </h2>
</header> </header>
...@@ -133,13 +137,15 @@ ...@@ -133,13 +137,15 @@
{% endfor %} {% endfor %}
{% if criterion.feedback %} {% if criterion.feedback %}
<li class="answer--feedback ui-toggle-visibility is--collapsed"> <li class="answer--feedback ui-toggle-visibility {% if criterion.options %}is--collapsed{% endif %}">
{% if criterion.options %}
<h5 class="answer--feedback__title ui-toggle-visibility__control"> <h5 class="answer--feedback__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i> <i class="ico icon-caret-right"></i>
<span class="answer--feedback__title__copy">{% trans "Additional Comments" %} ({{ criterion.feedback|length }})</span> <span class="answer--feedback__title__copy">{% trans "Additional Comments" %} ({{ criterion.feedback|length }})</span>
</h5> </h5>
{% endif %}
<ul class="answer--feedback__content ui-toggle-visibility__content"> <ul class="answer--feedback__content {% if criterion.options %}ui-toggle-visibility__content{% endif %}">
{% for feedback in criterion.feedback %} {% for feedback in criterion.feedback %}
<li class="feedback feedback--{{ forloop.counter }}"> <li class="feedback feedback--{{ forloop.counter }}">
<h6 class="feedback__source"> <h6 class="feedback__source">
......
...@@ -60,7 +60,10 @@ ...@@ -60,7 +60,10 @@
<ol class="list list--fields assessment__rubric"> <ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %} {% for criterion in rubric_criteria %}
<li class="field field--radio is--required assessment__rubric__question ui-toggle-visibility" id="assessment__rubric__question--{{ criterion.order_num }}"> <li
class="field field--radio is--required assessment__rubric__question ui-toggle-visibility {% if criterion.options %}has--options{% endif %}"
id="assessment__rubric__question--{{ criterion.order_num }}"
>
<h4 class="question__title ui-toggle-visibility__control"> <h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i> <i class="ico icon-caret-right"></i>
<span class="ui-toggle-visibility__control__copy question__title__copy">{{ criterion.prompt }}</span> <span class="ui-toggle-visibility__control__copy question__title__copy">{{ criterion.prompt }}</span>
...@@ -88,7 +91,7 @@ ...@@ -88,7 +91,7 @@
</li> </li>
{% endfor %} {% endfor %}
{% if criterion.feedback == 'optional' %} {% if criterion.feedback == 'optional' or criterion.feedback == 'required' %}
<li class="answer--feedback"> <li class="answer--feedback">
<div class="wrapper--input"> <div class="wrapper--input">
<label for="assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label> <label for="assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label>
...@@ -98,6 +101,7 @@ ...@@ -98,6 +101,7 @@
value="{{ criterion.name }}" value="{{ criterion.name }}"
name="{{ criterion.name }}" name="{{ criterion.name }}"
maxlength="300" maxlength="300"
{% if criterion.feedback == 'required' %}required{% endif %}
> >
</textarea> </textarea>
</div> </div>
......
...@@ -52,7 +52,10 @@ ...@@ -52,7 +52,10 @@
<ol class="list list--fields assessment__rubric"> <ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %} {% for criterion in rubric_criteria %}
<li class="field field--radio is--required assessment__rubric__question ui-toggle-visibility" id="assessment__rubric__question--{{ criterion.order_num }}"> <li
class="field field--radio is--required assessment__rubric__question ui-toggle-visibility {% if criterion.options %}has--options{% endif %}"
id="assessment__rubric__question--{{ criterion.order_num }}"
>
<h4 class="question__title ui-toggle-visibility__control"> <h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i> <i class="ico icon-caret-right"></i>
<span class="ui-toggle-visibility__control__copy question__title__copy">{{ criterion.prompt }}</span> <span class="ui-toggle-visibility__control__copy question__title__copy">{{ criterion.prompt }}</span>
...@@ -80,7 +83,7 @@ ...@@ -80,7 +83,7 @@
</li> </li>
{% endfor %} {% endfor %}
{% if criterion.feedback == 'optional' %} {% if criterion.feedback == 'optional' or criterion.feedback == 'required' %}
<li class="answer--feedback"> <li class="answer--feedback">
<div class="wrapper--input"> <div class="wrapper--input">
<label for="assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label> <label for="assessment__rubric__question--{{ criterion.order_num }}__feedback" class="answer__label">{% trans "Comments" %}</label>
...@@ -90,6 +93,7 @@ ...@@ -90,6 +93,7 @@
value="{{ criterion.name }}" value="{{ criterion.name }}"
name="{{ criterion.name }}" name="{{ criterion.name }}"
maxlength="300" maxlength="300"
{% if criterion.feedback == 'required' %}required{% endif %}
> >
</textarea> </textarea>
</div> </div>
......
...@@ -50,7 +50,11 @@ ...@@ -50,7 +50,11 @@
<fieldset class="assessment__fields"> <fieldset class="assessment__fields">
<ol class="list list--fields assessment__rubric"> <ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %} {% for criterion in rubric_criteria %}
<li class="field field--radio is--required assessment__rubric__question ui-toggle-visibility" id="assessment__rubric__question--{{ criterion.order_num }}"> {% if criterion.options %}
<li
class="field field--radio is--required assessment__rubric__question ui-toggle-visibility has--options"
id="assessment__rubric__question--{{ criterion.order_num }}"
>
<h4 class="question__title ui-toggle-visibility__control"> <h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i> <i class="ico icon-caret-right"></i>
<span class="question__title__copy">{{ criterion.prompt }}</span> <span class="question__title__copy">{{ criterion.prompt }}</span>
...@@ -79,6 +83,7 @@ ...@@ -79,6 +83,7 @@
</ol> </ol>
</div> </div>
</li> </li>
{% endif %}
{% endfor %} {% endfor %}
</ol> </ol>
</fieldset> </fieldset>
......
...@@ -73,7 +73,11 @@ ...@@ -73,7 +73,11 @@
<fieldset class="assessment__fields"> <fieldset class="assessment__fields">
<ol class="list list--fields assessment__rubric"> <ol class="list list--fields assessment__rubric">
{% for criterion in training_rubric.criteria %} {% for criterion in training_rubric.criteria %}
<li class="field field--radio is--required assessment__rubric__question ui-toggle-visibility" id="assessment__rubric__question--{{ criterion.order_num }}"> {% if criterion.options %}
<li
class="field field--radio is--required assessment__rubric__question ui-toggle-visibility has--options"
id="assessment__rubric__question--{{ criterion.order_num }}"
>
<h4 class="question__title ui-toggle-visibility__control"> <h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i> <i class="ico icon-caret-right"></i>
<span class="question__title__copy">{{ criterion.prompt }}</span> <span class="question__title__copy">{{ criterion.prompt }}</span>
...@@ -115,6 +119,7 @@ ...@@ -115,6 +119,7 @@
</ol> </ol>
</div> </div>
</li> </li>
{% endif %}
{% endfor %} {% endfor %}
</ol> </ol>
</fieldset> </fieldset>
......
...@@ -506,6 +506,7 @@ ...@@ -506,6 +506,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 1, "assessment": 1,
"criterion": 5,
"option": 25, "option": 25,
"feedback": "" "feedback": ""
} }
...@@ -515,6 +516,7 @@ ...@@ -515,6 +516,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 1, "assessment": 1,
"criterion": 4,
"option": 19, "option": 19,
"feedback": "Elit nonumy m\u00eal ut, nam \u00e9sse fabul\u00e1s n\u00f3" "feedback": "Elit nonumy m\u00eal ut, nam \u00e9sse fabul\u00e1s n\u00f3"
} }
...@@ -524,6 +526,7 @@ ...@@ -524,6 +526,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 1, "assessment": 1,
"criterion": 6,
"option": 30, "option": 30,
"feedback": "Per in\u00e2n\u00ed dol\u00f3re an, \u00fat s\u00e9a t\u00f4ta qu\u00e0eque d\u00edssenti\u00fant" "feedback": "Per in\u00e2n\u00ed dol\u00f3re an, \u00fat s\u00e9a t\u00f4ta qu\u00e0eque d\u00edssenti\u00fant"
} }
...@@ -533,6 +536,7 @@ ...@@ -533,6 +536,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 2, "assessment": 2,
"criterion": 5,
"option": 25, "option": 25,
"feedback": "" "feedback": ""
} }
...@@ -542,6 +546,7 @@ ...@@ -542,6 +546,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 2, "assessment": 2,
"criterion": 4,
"option": 20, "option": 20,
"feedback": "" "feedback": ""
} }
...@@ -551,6 +556,7 @@ ...@@ -551,6 +556,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 2, "assessment": 2,
"criterion": 6,
"option": 30, "option": 30,
"feedback": "" "feedback": ""
} }
...@@ -560,6 +566,7 @@ ...@@ -560,6 +566,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 3, "assessment": 3,
"criterion": 5,
"option": 24, "option": 24,
"feedback": "" "feedback": ""
} }
...@@ -569,6 +576,7 @@ ...@@ -569,6 +576,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 3, "assessment": 3,
"criterion": 4,
"option": 19, "option": 19,
"feedback": "" "feedback": ""
} }
...@@ -578,6 +586,7 @@ ...@@ -578,6 +586,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 3, "assessment": 3,
"criterion": 6,
"option": 31, "option": 31,
"feedback": "" "feedback": ""
} }
...@@ -587,6 +596,7 @@ ...@@ -587,6 +596,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 4, "assessment": 4,
"criterion": 4,
"option": 18, "option": 18,
"feedback": "" "feedback": ""
} }
...@@ -596,6 +606,7 @@ ...@@ -596,6 +606,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 4, "assessment": 4,
"criterion": 6,
"option": 30, "option": 30,
"feedback": "" "feedback": ""
} }
...@@ -605,6 +616,7 @@ ...@@ -605,6 +616,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 4, "assessment": 4,
"criterion": 5,
"option": 23, "option": 23,
"feedback": "" "feedback": ""
} }
......
...@@ -656,6 +656,7 @@ ...@@ -656,6 +656,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 1, "assessment": 1,
"criterion": 8,
"option": 32, "option": 32,
"feedback": "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna." "feedback": "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."
} }
...@@ -665,6 +666,7 @@ ...@@ -665,6 +666,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 1, "assessment": 1,
"criterion": 10,
"option": 44, "option": 44,
"feedback": "Fusce varius, elit ut blandit consequat, odio ante mollis lectus" "feedback": "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"
} }
...@@ -674,6 +676,7 @@ ...@@ -674,6 +676,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 1, "assessment": 1,
"criterion": 9,
"option": 37, "option": 37,
"feedback": "" "feedback": ""
} }
......
...@@ -692,6 +692,7 @@ ...@@ -692,6 +692,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 1, "assessment": 1,
"criterion": 4,
"option": 32, "option": 32,
"feedback": "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna." "feedback": "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."
} }
...@@ -701,6 +702,7 @@ ...@@ -701,6 +702,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 1, "assessment": 1,
"criterion": 10,
"option": 44, "option": 44,
"feedback": "Fusce varius, elit ut blandit consequat, odio ante mollis lectus" "feedback": "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"
} }
...@@ -710,6 +712,7 @@ ...@@ -710,6 +712,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 1, "assessment": 1,
"criterion": 9,
"option": 37, "option": 37,
"feedback": "" "feedback": ""
} }
...@@ -719,6 +722,7 @@ ...@@ -719,6 +722,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 2, "assessment": 2,
"criterion": 8,
"option": 33, "option": 33,
"feedback": "" "feedback": ""
} }
...@@ -728,6 +732,7 @@ ...@@ -728,6 +732,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 2, "assessment": 2,
"criterion": 10,
"option": 44, "option": 44,
"feedback": "" "feedback": ""
} }
...@@ -737,6 +742,7 @@ ...@@ -737,6 +742,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 2, "assessment": 2,
"criterion": 9,
"option": 38, "option": 38,
"feedback": "" "feedback": ""
} }
...@@ -746,6 +752,7 @@ ...@@ -746,6 +752,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 3, "assessment": 3,
"criterion": 8,
"option": 32, "option": 32,
"feedback": "Aenean vehicula nunc quis semper porttitor. " "feedback": "Aenean vehicula nunc quis semper porttitor. "
} }
...@@ -755,6 +762,7 @@ ...@@ -755,6 +762,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 3, "assessment": 3,
"criterion": 10,
"option": 42, "option": 42,
"feedback": "Etiam vitae facilisis ante, in tristique lacus." "feedback": "Etiam vitae facilisis ante, in tristique lacus."
} }
...@@ -764,6 +772,7 @@ ...@@ -764,6 +772,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 3, "assessment": 3,
"criterion": 9,
"option": 38, "option": 38,
"feedback": "" "feedback": ""
} }
...@@ -773,6 +782,7 @@ ...@@ -773,6 +782,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 4, "assessment": 4,
"criterion": 10,
"option": 43, "option": 43,
"feedback": "" "feedback": ""
} }
...@@ -782,6 +792,7 @@ ...@@ -782,6 +792,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 4, "assessment": 4,
"criterion": 9,
"option": 38, "option": 38,
"feedback": "" "feedback": ""
} }
...@@ -791,6 +802,7 @@ ...@@ -791,6 +802,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 4, "assessment": 4,
"criterion": 8,
"option": 31, "option": 31,
"feedback": "" "feedback": ""
} }
......
...@@ -668,6 +668,7 @@ ...@@ -668,6 +668,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 1, "assessment": 1,
"criterion": 8,
"option": 32, "option": 32,
"feedback": "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna." "feedback": "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."
} }
...@@ -677,6 +678,7 @@ ...@@ -677,6 +678,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 1, "assessment": 1,
"criterion": 10,
"option": 44, "option": 44,
"feedback": "Fusce varius, elit ut blandit consequat, odio ante mollis lectus" "feedback": "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"
} }
...@@ -686,6 +688,7 @@ ...@@ -686,6 +688,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 1, "assessment": 1,
"criterion": 9,
"option": 37, "option": 37,
"feedback": "" "feedback": ""
} }
...@@ -695,6 +698,7 @@ ...@@ -695,6 +698,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 2, "assessment": 2,
"criterion": 8,
"option": 33, "option": 33,
"feedback": "" "feedback": ""
} }
...@@ -704,6 +708,7 @@ ...@@ -704,6 +708,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 2, "assessment": 2,
"criterion": 10,
"option": 44, "option": 44,
"feedback": "" "feedback": ""
} }
...@@ -713,6 +718,7 @@ ...@@ -713,6 +718,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 2, "assessment": 2,
"criterion": 9,
"option": 38, "option": 38,
"feedback": "" "feedback": ""
} }
......
...@@ -238,6 +238,7 @@ ...@@ -238,6 +238,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 1, "assessment": 1,
"criterion": 3,
"option": 9, "option": 9,
"feedback": "\u0547\ufec9\u0e23\u0547 \u0e23\u0547\u027c\u0671\u0e01\ufeed" "feedback": "\u0547\ufec9\u0e23\u0547 \u0e23\u0547\u027c\u0671\u0e01\ufeed"
} }
...@@ -247,6 +248,7 @@ ...@@ -247,6 +248,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 1, "assessment": 1,
"criterion": 4,
"option": 11, "option": 11,
"feedback": "" "feedback": ""
} }
...@@ -256,6 +258,7 @@ ...@@ -256,6 +258,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 2, "assessment": 2,
"criterion": 4,
"option": 12, "option": 12,
"feedback": "" "feedback": ""
} }
...@@ -265,6 +268,7 @@ ...@@ -265,6 +268,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 2, "assessment": 2,
"criterion": 3,
"option": 7, "option": 7,
"feedback": "" "feedback": ""
} }
...@@ -274,6 +278,7 @@ ...@@ -274,6 +278,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 3, "assessment": 3,
"criterion": 3,
"option": 9, "option": 9,
"feedback": "\u0547\ufec9\u0e23\u0547 \u0e23\u0547\u027c\u0671\u0e01\ufeed" "feedback": "\u0547\ufec9\u0e23\u0547 \u0e23\u0547\u027c\u0671\u0e01\ufeed"
} }
...@@ -283,6 +288,7 @@ ...@@ -283,6 +288,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 3, "assessment": 3,
"criterion": 4,
"option": 12, "option": 12,
"feedback": "" "feedback": ""
} }
...@@ -292,6 +298,7 @@ ...@@ -292,6 +298,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 4, "assessment": 4,
"criterion": 4,
"option": 12, "option": 12,
"feedback": "" "feedback": ""
} }
...@@ -301,6 +308,7 @@ ...@@ -301,6 +308,7 @@
"model": "assessment.assessmentpart", "model": "assessment.assessmentpart",
"fields": { "fields": {
"assessment": 4, "assessment": 4,
"criterion": 3,
"option": 7, "option": 7,
"feedback": "" "feedback": ""
} }
......
...@@ -151,5 +151,33 @@ ...@@ -151,5 +151,33 @@
["2", "I disagree with one or more of the peer assessments of my response."] ["2", "I disagree with one or more of the peer assessments of my response."]
] ]
} }
},
"feedback_only_criterion": {
"fixture": "db_fixtures/feedback_only_criterion.json",
"course_id": "edX/Enchantment_101/April_1",
"expected_csv": {
"assessment": [
[
"id", "submission_uuid", "scored_at", "scorer_id", "score_type",
"points_possible", "feedback"
],
[
"1", "cf5190b8-d0aa-11e3-a734-14109fd8dc43",
"2014-04-30 21:06:35.019000+00:00",
"other",
"PE",
"20",
"Donec consequat vitae ante in pellentesque."
]
],
"assessment_part": [
["assessment_id", "points_earned", "criterion_name", "option_name", "feedback"],
["1", "4", "concise", "Neal Stephenson (early)", "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."],
["1", "5", "form", "The Elements of Style", "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"],
["1", "3", "clear-headed", "Isaac Asimov", ""],
["1", "0", "feedback only", "", "Feedback!"]
]
}
} }
} }
...@@ -116,6 +116,8 @@ class GradeMixin(object): ...@@ -116,6 +116,8 @@ class GradeMixin(object):
# the score for our current submission UUID. # the score for our current submission UUID.
# We look up the score by submission UUID instead of student item # We look up the score by submission UUID instead of student item
# to ensure that the score always matches the rubric. # to ensure that the score always matches the rubric.
# It's possible for the score to be `None` even if the workflow status is "done"
# when all the criteria in the rubric are feedback-only (no options).
score = workflow['score'] score = workflow['score']
context = { context = {
...@@ -246,7 +248,7 @@ class GradeMixin(object): ...@@ -246,7 +248,7 @@ class GradeMixin(object):
for assessment in peer_assessments: for assessment in peer_assessments:
for part in assessment['parts']: for part in assessment['parts']:
if part['feedback']: if part['feedback']:
part_criterion_name = part['option']['criterion']['name'] part_criterion_name = part['criterion']['name']
criteria_feedback[part_criterion_name].append(part['feedback']) criteria_feedback[part_criterion_name].append(part['feedback'])
for criterion in criteria: for criterion in criteria:
......
...@@ -574,6 +574,57 @@ class OpenAssessmentBlock( ...@@ -574,6 +574,57 @@ class OpenAssessmentBlock(
if assessment["name"] == mixin_name: if assessment["name"] == mixin_name:
return assessment return assessment
def publish_assessment_event(self, event_name, assessment):
"""
Emit an analytics event for the peer assessment.
Args:
event_name (str): An identifier for this event type.
assessment (dict): The serialized assessment model.
Returns:
None
"""
parts_list = []
for part in assessment["parts"]:
# Some assessment parts do not include point values,
# only written feedback. In this case, the assessment
# part won't have an associated option.
option_dict = None
if part["option"] is not None:
option_dict = {
"name": part["option"]["name"],
"points": part["option"]["points"],
}
# All assessment parts are associated with criteria
criterion_dict = {
"name": part["criterion"]["name"],
"points_possible": part["criterion"]["points_possible"]
}
parts_list.append({
"option": option_dict,
"criterion": criterion_dict,
"feedback": part["feedback"]
})
self.runtime.publish(
self, event_name,
{
"feedback": assessment["feedback"],
"rubric": {
"content_hash": assessment["rubric"]["content_hash"],
},
"scorer_id": assessment["scorer_id"],
"score_type": assessment["score_type"],
"scored_at": assessment["scored_at"],
"submission_uuid": assessment["submission_uuid"],
"parts": parts_list
}
)
def _serialize_opaque_key(self, key): def _serialize_opaque_key(self, key):
""" """
Gracefully handle opaque keys, both before and after the transition. Gracefully handle opaque keys, both before and after the transition.
......
...@@ -80,7 +80,8 @@ class PeerAssessmentMixin(object): ...@@ -80,7 +80,8 @@ class PeerAssessmentMixin(object):
) )
# Emit analytics event... # Emit analytics event...
self._publish_peer_assessment_event(assessment) self.publish_assessment_event("openassessmentblock.peer_assess", assessment)
except (PeerAssessmentRequestError, PeerAssessmentWorkflowError): except (PeerAssessmentRequestError, PeerAssessmentWorkflowError):
logger.warning( logger.warning(
u"Peer API error for submission UUID {}".format(self.submission_uuid), u"Peer API error for submission UUID {}".format(self.submission_uuid),
...@@ -260,42 +261,6 @@ class PeerAssessmentMixin(object): ...@@ -260,42 +261,6 @@ class PeerAssessmentMixin(object):
return peer_submission return peer_submission
def _publish_peer_assessment_event(self, assessment):
"""
Emit an analytics event for the peer assessment.
Args:
assessment (dict): The serialized assessment model.
Returns:
None
"""
self.runtime.publish(
self,
"openassessmentblock.peer_assess",
{
"feedback": assessment["feedback"],
"rubric": {
"content_hash": assessment["rubric"]["content_hash"],
},
"scorer_id": assessment["scorer_id"],
"score_type": assessment["score_type"],
"scored_at": assessment["scored_at"],
"submission_uuid": assessment["submission_uuid"],
"parts": [
{
"option": {
"name": part["option"]["name"],
"points": part["option"]["points"],
},
"feedback": part["feedback"],
}
for part in assessment["parts"]
]
}
)
def _clean_criterion_feedback(self, criterion_feedback): def _clean_criterion_feedback(self, criterion_feedback):
""" """
Remove per-criterion feedback for criteria with feedback disabled Remove per-criterion feedback for criteria with feedback disabled
...@@ -312,5 +277,5 @@ class PeerAssessmentMixin(object): ...@@ -312,5 +277,5 @@ class PeerAssessmentMixin(object):
criterion['name']: criterion_feedback[criterion['name']] criterion['name']: criterion_feedback[criterion['name']]
for criterion in self.rubric_criteria for criterion in self.rubric_criteria
if criterion['name'] in criterion_feedback if criterion['name'] in criterion_feedback
and criterion.get('feedback', 'disabled') == 'optional' and criterion.get('feedback', 'disabled') in ['optional', 'required']
} }
...@@ -117,29 +117,8 @@ class SelfAssessmentMixin(object): ...@@ -117,29 +117,8 @@ class SelfAssessmentMixin(object):
data['options_selected'], data['options_selected'],
{"criteria": self.rubric_criteria} {"criteria": self.rubric_criteria}
) )
self.runtime.publish( self.publish_assessment_event("openassessmentblock.self_assess", assessment)
self,
"openassessmentblock.self_assess",
{
"feedback": assessment["feedback"],
"rubric": {
"content_hash": assessment["rubric"]["content_hash"],
},
"scorer_id": assessment["scorer_id"],
"score_type": assessment["score_type"],
"scored_at": assessment["scored_at"],
"submission_uuid": assessment["submission_uuid"],
"parts": [
{
"option": {
"name": part["option"]["name"],
"points": part["option"]["points"]
}
}
for part in assessment["parts"]
]
}
)
# After we've created the self-assessment, we need to update the workflow. # After we've created the self-assessment, we need to update the workflow.
self.update_workflow_status() self.update_workflow_status()
except (self_api.SelfAssessmentRequestError, workflow_api.AssessmentWorkflowRequestError): except (self_api.SelfAssessmentRequestError, workflow_api.AssessmentWorkflowRequestError):
......
...@@ -418,5 +418,55 @@ ...@@ -418,5 +418,55 @@
] ]
}, },
"output": "oa_staff_info.html" "output": "oa_staff_info.html"
},
{
"template": "openassessmentblock/peer/oa_peer_assessment.html",
"context": {
"rubric_criteria": [
{
"name": "vocabulary",
"prompt": "vocabulary",
"order_num": 0,
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Bad"
},
{
"order_num": 1,
"points": 1,
"name": "Good"
}
]
},
{
"name": "grammar",
"prompt": "grammar",
"order_num": 1,
"options": [
{
"order_num": 0,
"points": 0,
"name": "Bad"
},
{
"order_num": 1,
"points": 1,
"name": "Good"
}
]
},
{
"name": "feedback_only",
"prompt": "Feedback only, no options!",
"order_num": 2,
"feedback": "required",
"options": []
}
]
},
"output": "oa_rubric.html"
} }
] ]
/**
Tests for an Open Assessment rubric.
**/
describe("OpenAssessment.Rubric", function() {
var rubric = null;
beforeEach(function() {
jasmine.getFixtures().fixturesPath = 'base/fixtures';
loadFixtures('oa_rubric.html');
var el = $("#peer-assessment--001__assessment").get(0);
rubric = new OpenAssessment.Rubric(el);
});
it("enables the submit button only when all options and required feedback have been provided", function() {
// Initially, the submit button should be disabled
expect(rubric.canSubmit()).toBe(false);
// Select some, but not all, options
rubric.optionsSelected({vocabulary: 'Good'});
expect(rubric.canSubmit()).toBe(false);
// Select all options, but do not provide required feedback
rubric.optionsSelected({
vocabulary: 'Good',
grammar: 'Bad'
});
expect(rubric.canSubmit()).toBe(false);
// Provide required feedback, but do not provide all options
rubric.optionsSelected({vocabulary: 'Good'});
rubric.criterionFeedback({
feedback_only: 'This is some feedback.'
});
expect(rubric.canSubmit()).toBe(false);
// Provide all options AND required feedback
rubric.optionsSelected({
vocabulary: 'Good',
grammar: 'Bad'
});
rubric.criterionFeedback({
feedback_only: 'This is some feedback.'
});
expect(rubric.canSubmit()).toBe(true);
});
});
...@@ -98,17 +98,40 @@ OpenAssessment.Rubric.prototype = { ...@@ -98,17 +98,40 @@ OpenAssessment.Rubric.prototype = {
**/ **/
canSubmitCallback: function(callback) { canSubmitCallback: function(callback) {
$(this.element).change( var rubric = this;
function() {
var numChecked = $('input[type=radio]:checked', this).length; // Set the initial state
var numAvailable = $('.field--radio.assessment__rubric__question', this).length; callback(rubric.canSubmit());
var canSubmit = numChecked == numAvailable;
callback(canSubmit); // Install a handler to update on change
} $(this.element).on('change keyup drop paste',
function() { callback(rubric.canSubmit()); }
); );
}, },
/** /**
Check whether the user has filled in all the required fields
to be able to submit an assessment.
Returns:
boolean
**/
canSubmit: function() {
var numChecked = $('input[type=radio]:checked', this.element).length;
var numAvailable = $('.field--radio.assessment__rubric__question.has--options', this.element).length;
var completedRequiredComments = true;
$('textarea[required]', this.element).each(function() {
var trimmedText = $.trim($(this).val());
if (trimmedText === "") {
completedRequiredComments = false;
}
});
return (numChecked == numAvailable && completedRequiredComments);
},
/**
Updates the rubric to display positive and negative messages on each Updates the rubric to display positive and negative messages on each
criterion. For each correction provided, the associated criterion will have criterion. For each correction provided, the associated criterion will have
an appropriate message displayed. an appropriate message displayed.
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
Read for conciseness, clarity of thought, and form. Read for conciseness, clarity of thought, and form.
</prompt> </prompt>
<criterion feedback='optional'> <criterion feedback="optional">
<name>concise</name> <name>concise</name>
<prompt>How concise is it?</prompt> <prompt>How concise is it?</prompt>
<option points="0"> <option points="0">
...@@ -74,7 +74,7 @@ ...@@ -74,7 +74,7 @@
</explanation> </explanation>
</option> </option>
</criterion> </criterion>
<criterion feedback='optional'> <criterion feedback="optional">
<name>form</name> <name>form</name>
<prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt> <prompt>Lastly, how is its form? Punctuation, grammar, and spelling all count.</prompt>
<option points="0"> <option points="0">
...@@ -102,6 +102,10 @@ ...@@ -102,6 +102,10 @@
<explanation></explanation> <explanation></explanation>
</option> </option>
</criterion> </criterion>
<criterion feedback="required">
<name>Feedback only</name>
<prompt>This criterion has only written feedback, no options</prompt>
</criterion>
</rubric> </rubric>
<assessments> <assessments>
<assessment name="peer-assessment" <assessment name="peer-assessment"
......
<openassessment>
<title>Feedback only criterion</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>vocabulary</name>
<prompt>How good is the vocabulary?</prompt>
<option points="0">
<name>bad</name>
<explanation>bad</explanation>
</option>
<option points="1">
<name>good</name>
<explanation>good</explanation>
</option>
</criterion>
<criterion feedback="required">
<name>𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞</name>
<prompt>This criterion accepts only written feedback, so it has no options</prompt>
</criterion>
</rubric>
<assessments>
<assessment name="example-based-assessment" algorithm_id="fake">
<example>
<answer>This is my answer.</answer>
<select criterion="vocabulary" option="good" />
</example>
<example>
<answer>тєѕт αηѕωєя</answer>
<select criterion="vocabulary" option="bad" />
</example>
</assessment>
</assessments>
</openassessment>
<openassessment>
<title>Feedback only criterion</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
<criterion feedback="required">
<name>𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞</name>
<prompt>This criterion accepts only written feedback, so it has no options</prompt>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="2" must_be_graded_by="2"/>
</assessments>
</openassessment>
<openassessment>
<title>Feedback only criterion</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>vocabulary</name>
<prompt>How good is the vocabulary?</prompt>
<option points="0">
<name>bad</name>
<explanation>bad</explanation>
</option>
<option points="1">
<name>good</name>
<explanation>good</explanation>
</option>
</criterion>
<criterion feedback="required">
<name>𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞</name>
<prompt>This criterion accepts only written feedback, so it has no options</prompt>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1" />
</assessments>
</openassessment>
<openassessment>
<title>Feedback only criterion</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>vocabulary</name>
<prompt>How good is the vocabulary?</prompt>
<option points="0">
<name>bad</name>
<explanation>bad</explanation>
</option>
<option points="1">
<name>good</name>
<explanation>good</explanation>
</option>
</criterion>
<criterion feedback="required">
<name>𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞</name>
<prompt>This criterion accepts only written feedback, so it has no options</prompt>
</criterion>
</rubric>
<assessments>
<assessment name="self-assessment" />
</assessments>
</openassessment>
<openassessment>
<title>Feedback only criterion</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>vocabulary</name>
<prompt>How good is the vocabulary?</prompt>
<option points="0">
<name>bad</name>
<explanation>bad</explanation>
</option>
<option points="1">
<name>good</name>
<explanation>good</explanation>
</option>
</criterion>
<criterion feedback="required">
<name>𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞</name>
<prompt>This criterion accepts only written feedback, so it has no options</prompt>
</criterion>
</rubric>
<assessments>
<assessment name="student-training">
<example>
<answer>This is my answer.</answer>
<select criterion="vocabulary" option="good" />
</example>
<example>
<answer>тєѕт αηѕωєя</answer>
<select criterion="vocabulary" option="bad" />
</example>
</assessment>
<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1" />
</assessments>
</openassessment>
...@@ -40,28 +40,6 @@ ...@@ -40,28 +40,6 @@
</criterion> </criterion>
</rubric> </rubric>
<assessments> <assessments>
<assessment name="example-based-assessment" algorithm_id="fake">
<example>
<answer>Example Answer One</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="Ġööḋ" />
<select criterion="Form" option="Poor" />
</example>
<example>
<answer>Example Answer Two</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="ﻉซƈﻉɭɭﻉกՇ" />
<select criterion="Form" option="Fair" />
</example>
<example>
<answer>Example Answer Three</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="Ġööḋ" />
<select criterion="Form" option="Good" />
</example>
<example>
<answer>Example Answer Four</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="ﻉซƈﻉɭɭﻉกՇ" />
<select criterion="Form" option="Good" />
</example>
</assessment>
<assessment name="peer-assessment" must_grade="2" must_be_graded_by="2" /> <assessment name="peer-assessment" must_grade="2" must_be_graded_by="2" />
<assessment name="self-assessment" /> <assessment name="self-assessment" />
</assessments> </assessments>
......
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="example-based-assessment" algorithm_id="fake">
<example>
<answer>Example Answer One</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="Ġööḋ" />
<select criterion="Form" option="Poor" />
</example>
<example>
<answer>Example Answer Two</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="ﻉซƈﻉɭɭﻉกՇ" />
<select criterion="Form" option="Fair" />
</example>
<example>
<answer>Example Answer Three</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="Ġööḋ" />
<select criterion="Form" option="Good" />
</example>
<example>
<answer>Example Answer Four</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="ﻉซƈﻉɭɭﻉกՇ" />
<select criterion="Form" option="Good" />
</example>
</assessment>
<assessment name="peer-assessment" must_grade="2" must_be_graded_by="2" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
...@@ -9,7 +9,15 @@ ...@@ -9,7 +9,15 @@
<criterion> <criterion>
<name>Test criterion</name> <name>Test criterion</name>
<prompt>Test criterion prompt</prompt> <prompt>Test criterion prompt</prompt>
<!-- no options --> <!-- duplicate option names -->
<option points="0">
<name>DUPLICATE</name>
<explanation></explanation>
</option>
<option points="1">
<name>DUPLICATE</name>
<explanation></explanation>
</option>
</criterion> </criterion>
</rubric> </rubric>
</openassessment> </openassessment>
...@@ -6,20 +6,6 @@ ...@@ -6,20 +6,6 @@
} }
}, },
"zero_options": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": []
}
]
}
},
"negative_points": { "negative_points": {
"rubric": { "rubric": {
"prompt": "Test Prompt", "prompt": "Test Prompt",
...@@ -544,5 +530,49 @@ ...@@ -544,5 +530,49 @@
} }
] ]
} }
},
"zero_options_feedback_optional": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [],
"feedback": "optional"
}
]
}
},
"zero_options_feedback_disabled": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [],
"feedback": "disabled"
}
]
}
},
"zero_options_no_feedback": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": []
}
]
}
} }
} }
...@@ -706,6 +706,71 @@ ...@@ -706,6 +706,71 @@
] ]
}, },
"criterion_feedback_required": {
"title": "Foo",
"prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"feedback": "required",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start": null,
"due": null,
"must_grade": 5,
"must_be_graded_by": 3
},
{
"name": "self-assessment",
"start": null,
"due": null
}
],
"expected_xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion feedback=\"required\">",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"<feedbackprompt>Test Feedback Prompt</feedbackprompt>",
"</rubric>",
"</openassessment>"
]
},
"student_training_no_examples": { "student_training_no_examples": {
"title": "Foo", "title": "Foo",
"prompt": "Test prompt", "prompt": "Test prompt",
......
...@@ -478,28 +478,28 @@ ...@@ -478,28 +478,28 @@
"criterion_feedback_optional": { "criterion_feedback_optional": {
"xml": [ "xml": [
"<openassessment>", "<openassessment>",
"<title>Foo</title>", "<title>foo</title>",
"<assessments>", "<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />", "<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>", "</assessments>",
"<rubric>", "<rubric>",
"<prompt>Test prompt</prompt>", "<prompt>test prompt</prompt>",
"<criterion>", "<criterion>",
"<name>Test criterion</name>", "<name>test criterion</name>",
"<prompt>Test criterion prompt</prompt>", "<prompt>test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>", "<option points=\"0\"><name>no</name><explanation>no explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>", "<option points=\"2\"><name>yes</name><explanation>yes explanation</explanation></option>",
"</criterion>", "</criterion>",
"<criterion feedback=\"optional\">", "<criterion feedback=\"optional\">",
"<name>Second criterion</name>", "<name>second criterion</name>",
"<prompt>Second criterion prompt</prompt>", "<prompt>second criterion prompt</prompt>",
"<option points=\"1\"><name>Maybe</name><explanation>Maybe explanation</explanation></option>", "<option points=\"1\"><name>maybe</name><explanation>maybe explanation</explanation></option>",
"</criterion>", "</criterion>",
"</rubric>", "</rubric>",
"</openassessment>" "</openassessment>"
], ],
"title": "Foo", "title": "foo",
"prompt": "Test prompt", "prompt": "test prompt",
"start": "2000-01-01T00:00:00", "start": "2000-01-01T00:00:00",
"due": "3000-01-01T00:00:00", "due": "3000-01-01T00:00:00",
"submission_start": null, "submission_start": null,
...@@ -507,35 +507,111 @@ ...@@ -507,35 +507,111 @@
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
"name": "Test criterion", "name": "test criterion",
"prompt": "Test criterion prompt", "prompt": "test criterion prompt",
"feedback": "disabled", "feedback": "disabled",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
"points": 0, "points": 0,
"name": "No", "name": "no",
"explanation": "No explanation" "explanation": "no explanation"
}, },
{ {
"order_num": 1, "order_num": 1,
"points": 2, "points": 2,
"name": "Yes", "name": "yes",
"explanation": "Yes explanation" "explanation": "yes explanation"
} }
] ]
}, },
{ {
"order_num": 1, "order_num": 1,
"name": "Second criterion", "name": "second criterion",
"prompt": "Second criterion prompt", "prompt": "second criterion prompt",
"feedback": "optional", "feedback": "optional",
"options": [ "options": [
{ {
"order_num": 0, "order_num": 0,
"points": 1, "points": 1,
"name": "Maybe", "name": "maybe",
"explanation": "Maybe explanation" "explanation": "maybe explanation"
}
]
}
],
"assessments": [
{
"name": "peer-assessment",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
"must_grade": 5,
"must_be_graded_by": 3
}
]
},
"criterion_feedback_required": {
"xml": [
"<openassessment>",
"<title>foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>test prompt</prompt>",
"<criterion>",
"<name>test criterion</name>",
"<prompt>test criterion prompt</prompt>",
"<option points=\"0\"><name>no</name><explanation>no explanation</explanation></option>",
"<option points=\"2\"><name>yes</name><explanation>yes explanation</explanation></option>",
"</criterion>",
"<criterion feedback=\"required\">",
"<name>second criterion</name>",
"<prompt>second criterion prompt</prompt>",
"<option points=\"1\"><name>maybe</name><explanation>maybe explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
],
"title": "foo",
"prompt": "test prompt",
"start": "2000-01-01T00:00:00",
"due": "3000-01-01T00:00:00",
"submission_start": null,
"submission_due": null,
"criteria": [
{
"order_num": 0,
"name": "test criterion",
"prompt": "test criterion prompt",
"feedback": "disabled",
"options": [
{
"order_num": 0,
"points": 0,
"name": "no",
"explanation": "no explanation"
},
{
"order_num": 1,
"points": 2,
"name": "yes",
"explanation": "yes explanation"
}
]
},
{
"order_num": 1,
"name": "second criterion",
"prompt": "second criterion prompt",
"feedback": "required",
"options": [
{
"order_num": 0,
"points": 1,
"name": "maybe",
"explanation": "maybe explanation"
} }
] ]
} }
......
...@@ -596,5 +596,20 @@ ...@@ -596,5 +596,20 @@
] ]
}, },
"is_released": true "is_released": true
},
"zero_options": {
"rubric": {
"prompt": "Test Prompt",
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [],
"feedback": "required"
}
]
}
} }
} }
...@@ -36,3 +36,19 @@ class AIAssessmentIntegrationTest(XBlockHandlerTestCase): ...@@ -36,3 +36,19 @@ class AIAssessmentIntegrationTest(XBlockHandlerTestCase):
score = sub_api.get_score(xblock.get_student_item_dict()) score = sub_api.get_score(xblock.get_student_item_dict())
self.assertIsNot(score, None) self.assertIsNot(score, None)
self.assertEqual(score['submission_uuid'], xblock.submission_uuid) self.assertEqual(score['submission_uuid'], xblock.submission_uuid)
@mock.patch.object(OpenAssessmentBlock, 'is_admin', new_callable=mock.PropertyMock)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/feedback_only_criterion_ai.xml', user_id='Bob')
def test_feedback_only_criterion(self, xblock, mock_is_admin):
# Test that AI grading, which creates assessments asynchronously,
# updates the workflow so students can receive a score.
mock_is_admin.return_value = True
# Train classifiers for the problem and submit a response
self.request(xblock, 'schedule_training', json.dumps({}), response_format='json')
self.request(xblock, 'submit', self.SUBMISSION, response_format='json')
# Render the grade page
resp = self.request(xblock, 'render_grade', json.dumps({}))
self.assertIn('example-based', resp.lower())
...@@ -603,6 +603,26 @@ class TestPeerAssessHandler(XBlockHandlerTestCase): ...@@ -603,6 +603,26 @@ class TestPeerAssessHandler(XBlockHandlerTestCase):
for part in assessment['parts']: for part in assessment['parts']:
self.assertEqual(part['feedback'], '') self.assertEqual(part['feedback'], '')
@scenario('data/feedback_only_criterion_peer.xml', user_id='Bob')
def test_peer_assess_feedback_only_criterion(self, xblock):
# Submit a peer assessment for a rubric with a feedback-only criterion
assessment_dict = {
'options_selected': {u'vocabulary': u'good'},
'criterion_feedback': {u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞': u'Ṫḧïṡ ïṡ ṡöṁë ḟëëḋḅäċḳ'},
'overall_feedback': u''
}
_, assessment = self._submit_peer_assessment(xblock, 'Sally', 'Bob', assessment_dict)
# Check the assessment for the criterion that has options
self.assertEqual(assessment['parts'][0]['criterion']['name'], 'vocabulary')
self.assertEqual(assessment['parts'][0]['option']['name'], 'good')
self.assertEqual(assessment['parts'][0]['option']['points'], 1)
# Check the feedback-only criterion score/feedback
self.assertEqual(assessment['parts'][1]['criterion']['name'], u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞')
self.assertIs(assessment['parts'][1]['option'], None)
self.assertEqual(assessment['parts'][1]['feedback'], u'Ṫḧïṡ ïṡ ṡöṁë ḟëëḋḅäċḳ')
@scenario('data/peer_assessment_scenario.xml', user_id='Bob') @scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_submission_uuid_input_regression(self, xblock): def test_submission_uuid_input_regression(self, xblock):
# Submit a peer assessment # Submit a peer assessment
......
...@@ -78,6 +78,32 @@ class TestSelfAssessment(XBlockHandlerTestCase): ...@@ -78,6 +78,32 @@ class TestSelfAssessment(XBlockHandlerTestCase):
} }
mock_api.update_from_assessments.assert_called_once_with(submission['uuid'], expected_reqs) mock_api.update_from_assessments.assert_called_once_with(submission['uuid'], expected_reqs)
@scenario('data/feedback_only_criterion_self.xml', user_id='Bob')
def test_self_assess_feedback_only_criterion(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
submission = xblock.create_submission(student_item, self.SUBMISSION)
# Submit a self assessment for a rubric with a feedback-only criterion
assessment_dict = {
'options_selected': {u'vocabulary': u'good'},
'overall_feedback': u''
}
resp = self.request(xblock, 'self_assess', json.dumps(assessment_dict), response_format='json')
self.assertTrue(resp['success'])
assessment = self_api.get_assessment(submission["uuid"])
# Check the assessment for the criterion that has options
self.assertEqual(assessment['parts'][0]['criterion']['name'], 'vocabulary')
self.assertEqual(assessment['parts'][0]['option']['name'], 'good')
self.assertEqual(assessment['parts'][0]['option']['points'], 1)
# Check the feedback-only criterion score/feedback
# The written feedback should default to an empty string
self.assertEqual(assessment['parts'][1]['criterion']['name'], u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞')
self.assertIs(assessment['parts'][1]['option'], None)
self.assertEqual(assessment['parts'][1]['feedback'], u'')
@scenario('data/self_assessment_scenario.xml', user_id='Bob') @scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_workflow_error(self, xblock): def test_self_assess_workflow_error(self, xblock):
# Create a submission for the student # Create a submission for the student
......
...@@ -13,20 +13,55 @@ from openassessment.workflow import api as workflow_api ...@@ -13,20 +13,55 @@ from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError from openassessment.workflow.errors import AssessmentWorkflowError
from .base import XBlockHandlerTestCase, scenario from .base import XBlockHandlerTestCase, scenario
@ddt.ddt
class StudentTrainingAssessTest(XBlockHandlerTestCase): class StudentTrainingTest(XBlockHandlerTestCase):
""" """
Tests for student training assessment. Base class for student training tests.
""" """
SUBMISSION = { SUBMISSION = {
'submission': u'Thé őbjéćt őf édúćátíőń íś tő téáćh úś tő ĺővé ẃhát íś béáútífúĺ.' 'submission': u'Thé őbjéćt őf édúćátíőń íś tő téáćh úś tő ĺővé ẃhát íś béáútífúĺ.'
} }
def assert_path_and_context(self, xblock, expected_path, expected_context):
"""
Render the student training step and verify that the expected template
and context were used. Also check that the template renders without error.
Args:
xblock (OpenAssessmentBlock): The XBlock under test.
expected_path (str): The expected template path.
expected_context (dict): The expected template context.
Raises:
AssertionError
"""
path, context = xblock.training_path_and_context()
self.assertEqual(path, expected_path)
self.assertEqual(len(context), len(expected_context))
for key in expected_context.keys():
if key == 'training_due':
iso_date = context['training_due'].isoformat()
self.assertEqual(iso_date, expected_context[key])
else:
self.assertEqual(context[key], expected_context[key])
# Verify that we render without error
resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertGreater(len(resp), 0)
@ddt.ddt
class StudentTrainingAssessTest(StudentTrainingTest):
"""
Tests for student training assessment.
"""
@scenario('data/student_training.xml', user_id="Plato") @scenario('data/student_training.xml', user_id="Plato")
@ddt.file_data('data/student_training_mixin.json') @ddt.file_data('data/student_training_mixin.json')
def test_correct(self, xblock, data): def test_correct(self, xblock, data):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION) xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self._assert_path_and_context(xblock, data["expected_template"], data["expected_context"]) self.assert_path_and_context(xblock, data["expected_template"], data["expected_context"])
# Agree with the course author's assessment # Agree with the course author's assessment
# (as defined in the scenario XML) # (as defined in the scenario XML)
...@@ -46,7 +81,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase): ...@@ -46,7 +81,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
@ddt.file_data('data/student_training_mixin.json') @ddt.file_data('data/student_training_mixin.json')
def test_correct_with_error(self, xblock, data): def test_correct_with_error(self, xblock, data):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION) xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self._assert_path_and_context(xblock, data["expected_template"], data["expected_context"]) self.assert_path_and_context(xblock, data["expected_template"], data["expected_context"])
# Agree with the course author's assessment # Agree with the course author's assessment
# (as defined in the scenario XML) # (as defined in the scenario XML)
...@@ -69,7 +104,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase): ...@@ -69,7 +104,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
@ddt.file_data('data/student_training_mixin.json') @ddt.file_data('data/student_training_mixin.json')
def test_incorrect(self, xblock, data): def test_incorrect(self, xblock, data):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION) xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self._assert_path_and_context(xblock, data["expected_template"], data["expected_context"]) self.assert_path_and_context(xblock, data["expected_template"], data["expected_context"])
# Disagree with the course author's assessment # Disagree with the course author's assessment
# (as defined in the scenario XML) # (as defined in the scenario XML)
...@@ -91,7 +126,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase): ...@@ -91,7 +126,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
expected_context = data["expected_context"].copy() expected_context = data["expected_context"].copy()
expected_template = data["expected_template"] expected_template = data["expected_template"]
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION) xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self._assert_path_and_context(xblock, expected_template, expected_context) self.assert_path_and_context(xblock, expected_template, expected_context)
# Agree with the course author's assessment # Agree with the course author's assessment
# (as defined in the scenario XML) # (as defined in the scenario XML)
...@@ -119,7 +154,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase): ...@@ -119,7 +154,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
expected_context["training_num_completed"] = 1 expected_context["training_num_completed"] = 1
expected_context["training_num_current"] = 2 expected_context["training_num_current"] = 2
expected_context["training_essay"] = u"тєѕт αηѕωєя" expected_context["training_essay"] = u"тєѕт αηѕωєя"
self._assert_path_and_context(xblock, expected_template, expected_context) self.assert_path_and_context(xblock, expected_template, expected_context)
resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json') resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json')
# Expect that we were correct # Expect that we were correct
...@@ -127,7 +162,27 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase): ...@@ -127,7 +162,27 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
self.assertFalse(resp['corrections']) self.assertFalse(resp['corrections'])
expected_context = {} expected_context = {}
expected_template = "openassessmentblock/student_training/student_training_complete.html" expected_template = "openassessmentblock/student_training/student_training_complete.html"
self._assert_path_and_context(xblock, expected_template, expected_context) self.assert_path_and_context(xblock, expected_template, expected_context)
@scenario('data/feedback_only_criterion_student_training.xml', user_id='Bob')
def test_feedback_only_criterion(self, xblock):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self.request(xblock, 'render_student_training', json.dumps({}))
# Agree with the course author's assessment
# (as defined in the scenario XML)
# We do NOT pass in an option for the feedback-only criterion,
# because it doesn't have any options.
data = {
'options_selected': {
'vocabulary': 'good',
}
}
resp = self.request(xblock, 'training_assess', json.dumps(data), response_format='json')
# Expect that we were correct
self.assertTrue(resp['success'], msg=resp.get('msg'))
self.assertFalse(resp['corrections'])
@scenario('data/student_training.xml', user_id="Plato") @scenario('data/student_training.xml', user_id="Plato")
@ddt.file_data('data/student_training_mixin.json') @ddt.file_data('data/student_training_mixin.json')
...@@ -135,7 +190,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase): ...@@ -135,7 +190,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION) xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
expected_context = data["expected_context"].copy() expected_context = data["expected_context"].copy()
expected_template = data["expected_template"] expected_template = data["expected_template"]
self._assert_path_and_context(xblock, expected_template, expected_context) self.assert_path_and_context(xblock, expected_template, expected_context)
resp = self.request(xblock, 'training_assess', json.dumps({}), response_format='json') resp = self.request(xblock, 'training_assess', json.dumps({}), response_format='json')
self.assertFalse(resp['success'], msg=resp.get('msg')) self.assertFalse(resp['success'], msg=resp.get('msg'))
...@@ -151,7 +206,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase): ...@@ -151,7 +206,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION) xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
expected_context = data["expected_context"].copy() expected_context = data["expected_context"].copy()
expected_template = data["expected_template"] expected_template = data["expected_template"]
self._assert_path_and_context(xblock, expected_template, expected_context) self.assert_path_and_context(xblock, expected_template, expected_context)
selected_data = { selected_data = {
'options_selected': { 'options_selected': {
...@@ -174,36 +229,8 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase): ...@@ -174,36 +229,8 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
resp = self.request(xblock, 'training_assess', json.dumps(selected_data)) resp = self.request(xblock, 'training_assess', json.dumps(selected_data))
self.assertIn("Your scores could not be checked", resp.decode('utf-8')) self.assertIn("Your scores could not be checked", resp.decode('utf-8'))
def _assert_path_and_context(self, xblock, expected_path, expected_context):
"""
Render the student training step and verify that the expected template
and context were used. Also check that the template renders without error.
Args:
xblock (OpenAssessmentBlock): The XBlock under test.
expected_path (str): The expected template path.
expected_context (dict): The expected template context.
Raises:
AssertionError
"""
path, context = xblock.training_path_and_context()
self.assertEqual(path, expected_path)
self.assertEqual(len(context), len(expected_context))
for key in expected_context.keys():
if key == 'training_due':
iso_date = context['training_due'].isoformat()
self.assertEqual(iso_date, expected_context[key])
else:
self.assertEqual(context[key], expected_context[key])
# Verify that we render without error
resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertGreater(len(resp), 0)
class StudentTrainingRenderTest(StudentTrainingAssessTest): class StudentTrainingRenderTest(StudentTrainingTest):
""" """
Tests for student training step rendering. Tests for student training step rendering.
""" """
...@@ -230,7 +257,7 @@ class StudentTrainingRenderTest(StudentTrainingAssessTest): ...@@ -230,7 +257,7 @@ class StudentTrainingRenderTest(StudentTrainingAssessTest):
expected_context = { expected_context = {
'training_due': "2000-01-01T00:00:00+00:00" 'training_due': "2000-01-01T00:00:00+00:00"
} }
self._assert_path_and_context(xblock, expected_template, expected_context) self.assert_path_and_context(xblock, expected_template, expected_context)
@scenario('data/student_training.xml', user_id="Plato") @scenario('data/student_training.xml', user_id="Plato")
@patch.object(StudentTrainingWorkflow, "get_workflow") @patch.object(StudentTrainingWorkflow, "get_workflow")
...@@ -247,4 +274,4 @@ class StudentTrainingRenderTest(StudentTrainingAssessTest): ...@@ -247,4 +274,4 @@ class StudentTrainingRenderTest(StudentTrainingAssessTest):
expected_context = { expected_context = {
'training_start': datetime.datetime(3000, 1, 1).replace(tzinfo=pytz.utc) 'training_start': datetime.datetime(3000, 1, 1).replace(tzinfo=pytz.utc)
} }
self._assert_path_and_context(xblock, expected_template, expected_context) self.assert_path_and_context(xblock, expected_template, expected_context)
...@@ -102,7 +102,7 @@ class StudioViewTest(XBlockHandlerTestCase): ...@@ -102,7 +102,7 @@ class StudioViewTest(XBlockHandlerTestCase):
self.assertFalse(resp['success']) self.assertFalse(resp['success'])
self.assertIn("for this assignment", resp['msg'].lower()) self.assertIn("for this assignment", resp['msg'].lower())
@data(('data/invalid_rubric.xml', 'rubric'), ('data/invalid_assessment.xml', 'assessment')) @data(('data/invalid_rubric.xml', 'duplicate'), ('data/invalid_assessment.xml', 'assessment'))
@scenario('data/basic_scenario.xml') @scenario('data/basic_scenario.xml')
def test_update_xml_invalid(self, xblock, data): def test_update_xml_invalid(self, xblock, data):
xml_path = data[0] xml_path = data[0]
......
...@@ -106,7 +106,7 @@ def validate_assessments(assessments, current_assessments, is_released): ...@@ -106,7 +106,7 @@ def validate_assessments(assessments, current_assessments, is_released):
if len(assessments) == 0: if len(assessments) == 0:
return (False, _("This problem must include at least one assessment.")) return (False, _("This problem must include at least one assessment."))
# Right now, there are two allowed scenarios: (peer -> self) and (self) # Ensure that we support this sequence of assessments.
if not _is_valid_assessment_sequence(assessments): if not _is_valid_assessment_sequence(assessments):
msg = _( msg = _(
"For this assignment, you can set a peer assessment only, a self " "For this assignment, you can set a peer assessment only, a self "
...@@ -131,7 +131,7 @@ def validate_assessments(assessments, current_assessments, is_released): ...@@ -131,7 +131,7 @@ def validate_assessments(assessments, current_assessments, is_released):
if must_grade < must_be_graded_by: if must_grade < must_be_graded_by:
return (False, _('The "must_grade" value must be greater than or equal to the "must_be_graded_by" value.')) return (False, _('The "must_grade" value must be greater than or equal to the "must_be_graded_by" value.'))
# Example-based assessment MUST specify 'ease' as the algorithm ID, # Example-based assessment MUST specify 'ease' or 'fake' as the algorithm ID,
# at least for now. Later, we may make this more flexible. # at least for now. Later, we may make this more flexible.
if assessment_dict.get('name') == 'example-based-assessment': if assessment_dict.get('name') == 'example-based-assessment':
if assessment_dict.get('algorithm_id') not in ['ease', 'fake']: if assessment_dict.get('algorithm_id') not in ['ease', 'fake']:
...@@ -177,8 +177,8 @@ def validate_rubric(rubric_dict, current_rubric, is_released, is_example_based): ...@@ -177,8 +177,8 @@ def validate_rubric(rubric_dict, current_rubric, is_released, is_example_based):
) )
return (False, msg) return (False, msg)
# No duplicate option names within a criterion
for criterion in rubric_dict['criteria']: for criterion in rubric_dict['criteria']:
# No duplicate option names within a criterion
duplicates = _duplicates([option['name'] for option in criterion['options']]) duplicates = _duplicates([option['name'] for option in criterion['options']])
if len(duplicates) > 0: if len(duplicates) > 0:
msg = _(u"Options in '{criterion}' have duplicate name(s): {duplicates}").format( msg = _(u"Options in '{criterion}' have duplicate name(s): {duplicates}").format(
...@@ -186,6 +186,12 @@ def validate_rubric(rubric_dict, current_rubric, is_released, is_example_based): ...@@ -186,6 +186,12 @@ def validate_rubric(rubric_dict, current_rubric, is_released, is_example_based):
) )
return (False, msg) return (False, msg)
# Some criteria may have no options, just written feedback.
# In this case, written feedback must be required (not optional or disabled).
if len(criterion['options']) == 0 and criterion.get('feedback', 'disabled') != 'required':
msg = _(u'Criteria with no options must require written feedback.')
return (False, msg)
# Example-based assessments impose the additional restriction # Example-based assessments impose the additional restriction
# that the point values for options must be unique within # that the point values for options must be unique within
# a particular rubric criterion. # a particular rubric criterion.
......
...@@ -113,10 +113,10 @@ def _serialize_criteria(criteria_root, criteria_list): ...@@ -113,10 +113,10 @@ def _serialize_criteria(criteria_root, criteria_list):
criterion_prompt = etree.SubElement(criterion_el, 'prompt') criterion_prompt = etree.SubElement(criterion_el, 'prompt')
criterion_prompt.text = unicode(criterion.get('prompt', u'')) criterion_prompt.text = unicode(criterion.get('prompt', u''))
# Criterion feedback disabled or optional # Criterion feedback disabled, optional, or required
# If disabled, do not set the attribute. # If disabled, do not set the attribute.
if criterion.get('feedback') == "optional": if criterion.get('feedback') in ["optional", "required"]:
criterion_el.set('feedback', 'optional') criterion_el.set('feedback', criterion['feedback'])
# Criterion options # Criterion options
options_list = criterion.get('options', None) options_list = criterion.get('options', None)
...@@ -266,12 +266,12 @@ def _parse_criteria_xml(criteria_root): ...@@ -266,12 +266,12 @@ def _parse_criteria_xml(criteria_root):
else: else:
raise UpdateFromXmlError(_('Every "criterion" element must contain a "prompt" element.')) raise UpdateFromXmlError(_('Every "criterion" element must contain a "prompt" element.'))
# Criterion feedback (disabled or optional) # Criterion feedback (disabled, optional, or required)
criterion_feedback = criterion.get('feedback', 'disabled') criterion_feedback = criterion.get('feedback', 'disabled')
if criterion_feedback in ['optional', 'disabled']: if criterion_feedback in ['optional', 'disabled', 'required']:
criterion_dict['feedback'] = criterion_feedback criterion_dict['feedback'] = criterion_feedback
else: else:
raise UpdateFromXmlError(_('Invalid value for "feedback" attribute: if specified, it must be set set to "optional"')) raise UpdateFromXmlError(_('Invalid value for "feedback" attribute: if specified, it must be set set to "optional" or "required".'))
# Criterion options # Criterion options
criterion_dict['options'] = _parse_options_xml(criterion) criterion_dict['options'] = _parse_options_xml(criterion)
......
...@@ -18,6 +18,7 @@ django-celery==3.0.17 ...@@ -18,6 +18,7 @@ django-celery==3.0.17
django-extensions==1.2.5 django-extensions==1.2.5
django-model-utils==1.4.0 django-model-utils==1.4.0
djangorestframework==2.3.5 djangorestframework==2.3.5
lazy==1.1
loremipsum==1.0.2 loremipsum==1.0.2
python-dateutil==2.1 python-dateutil==2.1
pytz==2012h pytz==2012h
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment