Commit fda2118b by Will Daly

Rename peer Django app to assessments

Implement self assessment api
parent 8c47b91e
from django.contrib import admin from django.contrib import admin
from openassessment.peer.models import Assessment, AssessmentPart, Rubric, Criterion, CriterionOption from openassessment.assessment.models import Assessment, AssessmentPart, Rubric, Criterion, CriterionOption
admin.site.register(Assessment) admin.site.register(Assessment)
admin.site.register(AssessmentPart) admin.site.register(AssessmentPart)
......
...@@ -11,11 +11,19 @@ import json ...@@ -11,11 +11,19 @@ import json
from django.db import models from django.db import models
from django.utils.timezone import now from django.utils.timezone import now
from django.utils.translation import ugettext as _
import math import math
from submissions.models import Submission from submissions.models import Submission
class InvalidOptionSelection(Exception):
"""
The user selected options that do not match the rubric.
"""
pass
class Rubric(models.Model): class Rubric(models.Model):
"""A Rubric contains the guidelines on how to assess a submission. """A Rubric contains the guidelines on how to assess a submission.
...@@ -82,28 +90,50 @@ class Rubric(models.Model): ...@@ -82,28 +90,50 @@ class Rubric(models.Model):
the option that was selected for that criterion. the option that was selected for that criterion.
Returns: Returns:
list of option ids (set to None if the selected option does not match the rubric) set of option ids
Examples: Examples:
>>> options_selected = {"secret": "yes", "safe": "no"} >>> options_selected = {"secret": "yes", "safe": "no"}
>>> rubric.options_ids(options_selected) >>> rubric.options_ids(options_selected)
[10, 12] [10, 12]
Raises:
InvalidOptionSelection: the selected options do not match the rubric.
""" """
# TODO: cache this # Select all criteria and options for this rubric
crit_to_all_opts = { # We use `select_related()` to minimize the number of database queries
crit.name : { rubric_options = CriterionOption.objects.filter(criterion__rubric=self).select_related()
option.name: option.id for option in crit.options.all()
} # Create a dict of dicts that maps:
for crit in self.criteria.all() # criterion names --> option names --> option ids
} rubric_criteria_dict = defaultdict(dict)
# Construct dictionaries for each option in the rubric
for option in rubric_options:
rubric_criteria_dict[option.criterion.name][option.name] = option.id
# Validate: are options selected for each criterion in the rubric?
if len(options_selected) != len(rubric_criteria_dict):
msg = _("Incorrect number of options for this rubric ({actual} instead of {expected}").format(
actual=len(options_selected), expected=len(rubric_criteria_dict))
raise InvalidOptionSelection(msg)
# Look up each selected option
option_id_set = set()
for criterion_name, option_name in options_selected.iteritems():
if (criterion_name in rubric_criteria_dict and
option_name in rubric_criteria_dict[criterion_name]
):
option_id = rubric_criteria_dict[criterion_name][option_name]
option_id_set.add(option_id)
else:
msg = _("{criterion}: {option} not found in rubric").format(
criterion=criterion_name, option=option_name
)
raise InvalidOptionSelection(msg)
return [ return option_id_set
crit_to_all_opts[crit][opt]
if crit in crit_to_all_opts and opt in crit_to_all_opts[crit]
else None
for crit, opt in options_selected.items()
]
class Criterion(models.Model): class Criterion(models.Model):
...@@ -191,7 +221,7 @@ class Assessment(models.Model): ...@@ -191,7 +221,7 @@ class Assessment(models.Model):
scorer_id = models.CharField(max_length=40, db_index=True) scorer_id = models.CharField(max_length=40, db_index=True)
score_type = models.CharField(max_length=2) score_type = models.CharField(max_length=2)
feedback = models.TextField(max_length=10000, default="") feedback = models.TextField(max_length=10000, default="", blank=True)
class Meta: class Meta:
ordering = ["-scored_at"] ordering = ["-scored_at"]
......
...@@ -10,8 +10,8 @@ import logging ...@@ -10,8 +10,8 @@ import logging
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from django.db import DatabaseError from django.db import DatabaseError
from openassessment.peer.models import Assessment from openassessment.assessment.models import Assessment, InvalidOptionSelection
from openassessment.peer.serializers import ( from openassessment.assessment.serializers import (
AssessmentSerializer, rubric_from_dict, get_assessment_review) AssessmentSerializer, rubric_from_dict, get_assessment_review)
from submissions import api as submission_api from submissions import api as submission_api
from submissions.models import Submission, StudentItem, Score from submissions.models import Submission, StudentItem, Score
...@@ -73,6 +73,7 @@ def is_complete(submission_uuid, requirements): ...@@ -73,6 +73,7 @@ def is_complete(submission_uuid, requirements):
) )
return finished_evaluating return finished_evaluating
def get_score(submission_uuid, requirements): def get_score(submission_uuid, requirements):
# User hasn't completed their own submission yet # User hasn't completed their own submission yet
if not is_complete(submission_uuid, requirements): if not is_complete(submission_uuid, requirements):
...@@ -117,9 +118,10 @@ def create_assessment( ...@@ -117,9 +118,10 @@ def create_assessment(
required for the student to receive a score for their submission. required for the student to receive a score for their submission.
must_be_graded_by (int): The number of assessments must_be_graded_by (int): The number of assessments
required on the submission for it to be scored. required on the submission for it to be scored.
assessment_dict (dict): All related information for the assessment. The dictionary assessment_dict (dict): All related information for the assessment. An
must have the following keys: "options_selected" (mapping of criterion names to option values), assessment contains points_earned, points_possible, and feedback.
and "feedback" (string of written feedback for the submission).
Kwargs:
scored_at (datetime): Optional argument to override the time in which scored_at (datetime): Optional argument to override the time in which
the assessment took place. If not specified, scored_at is set to the assessment took place. If not specified, scored_at is set to
now. now.
...@@ -145,12 +147,14 @@ def create_assessment( ...@@ -145,12 +147,14 @@ def create_assessment(
submission = Submission.objects.get(uuid=submission_uuid) submission = Submission.objects.get(uuid=submission_uuid)
student_item = submission.student_item student_item = submission.student_item
rubric = rubric_from_dict(rubric_dict) rubric = rubric_from_dict(rubric_dict)
option_ids = rubric.options_ids(assessment_dict["options_selected"])
# Validate that the selected options matched the rubric # Validate that the selected options matched the rubric
# and raise an error if this is not the case # and raise an error if this is not the case
if None in option_ids: try:
raise PeerAssessmentRequestError(_("Selected options do not match the rubric options.")) option_ids = rubric.options_ids(assessment_dict["options_selected"])
except InvalidOptionSelection as ex:
msg = _("Selected options do not match the rubric: {error}").format(error=ex.message)
raise PeerAssessmentRequestError(msg)
# Check if the grader has even submitted an answer themselves... # Check if the grader has even submitted an answer themselves...
try: try:
...@@ -195,6 +199,36 @@ def create_assessment( ...@@ -195,6 +199,36 @@ def create_assessment(
raise PeerAssessmentInternalError(error_message) raise PeerAssessmentInternalError(error_message)
def _score_if_finished(student_item,
submission,
required_assessments_for_student,
must_be_graded_by):
"""Calculate final grade iff peer evaluation flow is satisfied.
Checks if the student is finished with the peer assessment workflow. If the
student already has a final grade calculated, there is no need to proceed.
If they do not have a grade, the student has a final grade calculated.
"""
if Score.objects.filter(student_item=student_item):
return
finished_evaluating = has_finished_required_evaluating(
StudentItemSerializer(student_item).data,
required_assessments_for_student
)
assessments = Assessment.objects.filter(submission=submission, score_type=PEER_TYPE)
submission_finished = assessments.count() >= must_be_graded_by
if finished_evaluating and submission_finished:
submission_api.set_score(
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
sum(get_assessment_median_scores(submission.uuid, must_be_graded_by).values()),
assessments[0].points_possible
)
def get_assessment_median_scores(submission_id, must_be_graded_by): def get_assessment_median_scores(submission_id, must_be_graded_by):
"""Get the median score for each rubric criterion """Get the median score for each rubric criterion
...@@ -289,7 +323,8 @@ def has_finished_required_evaluating(student_item_dict, required_assessments): ...@@ -289,7 +323,8 @@ def has_finished_required_evaluating(student_item_dict, required_assessments):
) )
count = Assessment.objects.filter( count = Assessment.objects.filter(
submission__in=submissions, submission__in=submissions,
scorer_id=student_item_dict["student_id"] scorer_id=student_item_dict["student_id"],
score_type=PEER_TYPE
).count() ).count()
return count >= required_assessments, count return count >= required_assessments, count
...@@ -418,7 +453,7 @@ def _get_first_submission_not_evaluated(student_items, student_id, required_num_ ...@@ -418,7 +453,7 @@ def _get_first_submission_not_evaluated(student_items, student_id, required_num_
"-attempt_number" "-attempt_number"
) )
for submission in submissions: for submission in submissions:
assessments = Assessment.objects.filter(submission=submission) assessments = Assessment.objects.filter(submission=submission, score_type=PEER_TYPE)
if assessments.count() < required_num_assessments: if assessments.count() < required_num_assessments:
already_evaluated = False already_evaluated = False
for assessment in assessments: for assessment in assessments:
......
"""
Public interface for self-assessment.
"""
from django.utils.translation import ugettext as _
from submissions.api import (
get_submission_by_uuid, get_submissions,
SubmissionNotFoundError, SubmissionRequestError
)
from openassessment.assessment.serializers import (
rubric_from_dict, AssessmentSerializer, full_assessment_dict, InvalidRubric
)
from openassessment.assessment.models import Assessment, InvalidOptionSelection
# TODO -- remove once Dave's changes land
from submissions.models import Submission
# Assessments are tagged as "self-evaluation"
SELF_TYPE = "SE"
class SelfAssessmentRequestError(Exception):
"""
There was a problem with the request for a self-assessment.
"""
pass
def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None):
"""
Create a self-assessment for a submission.
Args:
submission_uuid (str): The unique identifier for the submission being assessed.
user_id (str): The ID of the user creating the assessment. This must match the ID of the user who made the submission.
options_selected (dict): Mapping of rubric criterion names to option values selected.
rubric_dict (dict): Serialized Rubric model.
Kwargs:
scored_at (datetime): The timestamp of the assessment; defaults to the current time.
Returns:
dict: serialized Assessment model
Raises:
SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score.
"""
# Check that there are not any assessments for this submission
# TODO -- change key lookup for submission UUID once Dave's changes land
if Assessment.objects.filter(submission__uuid=submission_uuid, score_type=SELF_TYPE).exists():
raise SelfAssessmentRequestError(_("Self assessment already exists for this submission"))
# Check that the student is allowed to assess this submission
try:
submission = get_submission_by_uuid(submission_uuid)
if submission is None or submission['student_item']['student_id'] != user_id:
raise SelfAssessmentRequestError(_("Cannot self-assess this submission"))
except SubmissionNotFoundError:
raise SelfAssessmentRequestError(_("Could not retrieve the submission."))
# Get or create the rubric
try:
rubric = rubric_from_dict(rubric_dict)
option_ids = rubric.options_ids(options_selected)
except InvalidRubric as ex:
msg = _("Invalid rubric definition: {errors}").format(errors=ex.errors)
raise SelfAssessmentRequestError(msg)
except InvalidOptionSelection:
msg = _("Selected options do not match the rubric")
raise SelfAssessmentRequestError(msg)
# Create the assessment
# Since we have already retrieved the submission, we can assume that
# the user who created the submission exists.
self_assessment = {
"rubric": rubric.id,
"scorer_id": user_id,
# TODO -- replace once Dave adds submission_uuid as a field on the assessment
"submission": Submission.objects.get(uuid=submission_uuid).pk,
"score_type": SELF_TYPE,
"feedback": u"",
"parts": [{"option": option_id} for option_id in option_ids],
}
if scored_at is not None:
self_assessment['scored_at'] = scored_at
# Serialize the assessment
serializer = AssessmentSerializer(data=self_assessment)
if not serializer.is_valid():
msg = _("Could not create self assessment: {errors}").format(errors=serializer.errors)
raise SelfAssessmentRequestError(msg)
serializer.save()
# Return the serialized assessment
return serializer.data
def get_submission_and_assessment(student_item_dict):
"""
Retrieve a submission and self-assessment for a student item.
Args:
student_item_dict (dict): serialized StudentItem model
Returns:
A tuple `(submission, assessment)` where:
submission (dict) is a serialized Submission model, or None (if the user has not yet made a submission)
assessment (dict) is a serialized Assessment model, or None (if the user has not yet self-assessed)
If multiple submissions or self-assessments are found, returns the most recent one.
Raises:
SelfAssessmentRequestError: Student item dict was invalid.
"""
# Look up the most recent submission from the student item
try:
submissions = get_submissions(student_item_dict, limit=1)
if not submissions:
return (None, None)
except SubmissionNotFoundError:
return (None, None)
except SubmissionRequestError as ex:
raise SelfAssessmentRequestError(_('Could not retrieve submission'))
submission_uuid = submissions[0]['uuid']
# Retrieve assessments for the submission
# We weakly enforce that number of self-assessments per submission is <= 1,
# but not at the database level. Someone could take advantage of the race condition
# between checking the number of self-assessments and creating a new self-assessment.
# To be safe, we retrieve just the most recent submission.
assessments = Assessment.objects.filter(
score_type=SELF_TYPE, submission__uuid=submission_uuid
).order_by('-scored_at')
if assessments.exists():
# TODO -- remove once Dave's changes land
assessment_dict = full_assessment_dict(assessments[0])
assessment_dict['submission_uuid'] = submission_uuid
return (submissions[0], assessment_dict)
else:
return (submissions[0], None)
# TODO: fill in this stub
def is_complete(submission_uuid):
return True
...@@ -8,7 +8,7 @@ from copy import deepcopy ...@@ -8,7 +8,7 @@ from copy import deepcopy
import dateutil.parser import dateutil.parser
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from rest_framework import serializers from rest_framework import serializers
from openassessment.peer.models import ( from openassessment.assessment.models import (
Assessment, AssessmentPart, Criterion, CriterionOption, Rubric Assessment, AssessmentPart, Criterion, CriterionOption, Rubric
) )
...@@ -123,7 +123,7 @@ class AssessmentSerializer(serializers.ModelSerializer): ...@@ -123,7 +123,7 @@ class AssessmentSerializer(serializers.ModelSerializer):
'scored_at', 'scored_at',
'scorer_id', 'scorer_id',
'score_type', 'score_type',
'feedback', 'feedback',
# Foreign Key # Foreign Key
'parts', 'parts',
...@@ -153,7 +153,7 @@ def get_assessment_review(submission): ...@@ -153,7 +153,7 @@ def get_assessment_review(submission):
Examples: Examples:
>>> get_assessment_review(submission) >>> get_assessment_review(submission)
{ [{
'submission': 1, 'submission': 1,
'rubric': { 'rubric': {
'id': 1, 'id': 1,
...@@ -183,26 +183,38 @@ def get_assessment_review(submission): ...@@ -183,26 +183,38 @@ def get_assessment_review(submission):
'submission_uuid': u'0a600160-be7f-429d-a853-1283d49205e7', 'submission_uuid': u'0a600160-be7f-429d-a853-1283d49205e7',
'points_earned': 9, 'points_earned': 9,
'points_possible': 20, 'points_possible': 20,
} }]
"""
return [
full_assessment_dict(assessment)
for assessment in Assessment.objects.filter(submission=submission)
]
def full_assessment_dict(assessment):
""" """
reviews = [] Return a dict representation of the Assessment model,
assessments = Assessment.objects.filter(submission=submission) including nested assessment parts.
for assessment in assessments:
assessment_dict = AssessmentSerializer(assessment).data Args:
rubric_dict = RubricSerializer(assessment.rubric).data assessment (Assessment): The Assessment model to serialize
assessment_dict["rubric"] = rubric_dict
parts = []
for part in assessment.parts.all():
part_dict = AssessmentPartSerializer(part).data
options_dict = CriterionOptionSerializer(part.option).data
criterion_dict = CriterionSerializer(part.option.criterion).data
options_dict["criterion"] = criterion_dict
part_dict["option"] = options_dict
parts.append(part_dict)
assessment_dict["parts"] = parts
reviews.append(assessment_dict)
return reviews
Returns:
dict with keys 'rubric' (serialized Rubric model) and 'parts' (serialized assessment parts)
"""
assessment_dict = AssessmentSerializer(assessment).data
rubric_dict = RubricSerializer(assessment.rubric).data
assessment_dict["rubric"] = rubric_dict
parts = []
for part in assessment.parts.all():
part_dict = AssessmentPartSerializer(part).data
options_dict = CriterionOptionSerializer(part.option).data
criterion_dict = CriterionSerializer(part.option.criterion).data
options_dict["criterion"] = criterion_dict
part_dict["option"] = options_dict
parts.append(part_dict)
assessment_dict["parts"] = parts
return assessment_dict
def rubric_from_dict(rubric_dict): def rubric_from_dict(rubric_dict):
...@@ -276,18 +288,27 @@ def validate_assessment_dict(assessment_dict): ...@@ -276,18 +288,27 @@ def validate_assessment_dict(assessment_dict):
if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']: if not assessment_dict.get('name') in ['peer-assessment', 'self-assessment']:
return (False, _("Assessment type is not supported")) return (False, _("Assessment type is not supported"))
# Number you need to grade is >= the number of people that need to grade you # Peer assessments need to specify must_grade and must_be_graded_by
must_grade = assessment_dict.get('must_grade') if assessment_dict.get('name') == 'peer-assessment':
must_be_graded_by = assessment_dict.get('must_be_graded_by')
if 'must_grade' not in assessment_dict:
return (False, _(u'Attribute "must_grade" is missing from peer assessment.'))
if 'must_be_graded_by' not in assessment_dict:
return (False, _(u'Attribute "must_be_graded_by" is missing from peer assessment.'))
# Number you need to grade is >= the number of people that need to grade you
must_grade = assessment_dict.get('must_grade')
must_be_graded_by = assessment_dict.get('must_be_graded_by')
if must_grade is None or must_grade < 1: if must_grade is None or must_grade < 1:
return (False, _('"must_grade" must be a positive integer')) return (False, _('"must_grade" must be a positive integer'))
if must_be_graded_by is None or must_be_graded_by < 1: if must_be_graded_by is None or must_be_graded_by < 1:
return (False, _('"must_be_graded_by" must be a positive integer')) return (False, _('"must_be_graded_by" must be a positive integer'))
if must_grade < must_be_graded_by: if must_grade < must_be_graded_by:
return (False, _('"must_grade" should be greater than or equal to "must_be_graded_by"')) return (False, _('"must_grade" should be greater than or equal to "must_be_graded_by"'))
return (True, u'') return (True, u'')
......
...@@ -50,14 +50,14 @@ ...@@ -50,14 +50,14 @@
}, },
"must_grade_zero": { "must_grade_zero": {
"assessment": { "assessment": {
"name": "self-assessment", "name": "peer-assessment",
"must_grade": 0, "must_grade": 0,
"must_be_graded_by": 0 "must_be_graded_by": 0
} }
}, },
"must_be_graded_by_zero": { "must_be_graded_by_zero": {
"assessment": { "assessment": {
"name": "self-assessment", "name": "peer-assessment",
"must_grade": 1, "must_grade": 1,
"must_be_graded_by": 0 "must_be_graded_by": 0
} }
......
"""
Tests for assessment models.
"""
from django.test import TestCase
from openassessment.assessment.models import (
Rubric, Criterion, CriterionOption, InvalidOptionSelection
)
class TestRubricOptionIds(TestCase):
"""
Test selection of options from a rubric.
"""
NUM_CRITERIA = 4
NUM_OPTIONS = 3
def setUp(self):
"""
Create a rubric in the database.
"""
self.rubric = Rubric.objects.create()
self.criteria = [
Criterion.objects.create(
rubric=self.rubric,
name="test criterion {num}".format(num=num),
order_num=num,
) for num in range(self.NUM_CRITERIA)
]
self.options = dict()
for criterion in self.criteria:
self.options[criterion.name] = [
CriterionOption.objects.create(
criterion=criterion,
name="test option {num}".format(num=num),
order_num=num,
points=num
) for num in range(self.NUM_OPTIONS)
]
def test_option_ids(self):
options_ids = self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 0",
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][0].id
]))
def test_option_ids_different_order(self):
options_ids = self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 0",
})
self.assertEqual(options_ids, set([
self.options['test criterion 0'][0].id,
self.options['test criterion 1'][1].id,
self.options['test criterion 2'][2].id,
self.options['test criterion 3'][0].id
]))
def test_option_ids_missing_criteria(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 3": "test option 2",
})
def test_option_ids_extra_criteria(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 0",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
"extra criterion": "test",
})
def test_option_ids_mutated_criterion_name(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test mutated criterion": "test option 1",
"test criterion 1": "test option 1",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
})
def test_option_ids_mutated_option_name(self):
with self.assertRaises(InvalidOptionSelection):
self.rubric.options_ids({
"test criterion 0": "test option 1",
"test criterion 1": "test mutated option",
"test criterion 2": "test option 2",
"test criterion 3": "test option 1",
})
...@@ -9,8 +9,8 @@ from ddt import ddt, file_data ...@@ -9,8 +9,8 @@ from ddt import ddt, file_data
from mock import patch from mock import patch
from nose.tools import raises from nose.tools import raises
from openassessment.peer import api as peer_api from openassessment.assessment import peer_api
from openassessment.peer.models import Assessment from openassessment.assessment.models import Assessment
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from submissions import api as sub_api from submissions import api as sub_api
from submissions.models import Submission from submissions.models import Submission
......
# -*- coding: utf-8 -*-
"""
Tests for self-assessment API.
"""
import copy
import datetime
import pytz
from django.test import TestCase
from submissions.api import create_submission
from openassessment.assessment.self_api import (
create_assessment, get_submission_and_assessment,
SelfAssessmentRequestError
)
class TestSelfApi(TestCase):
STUDENT_ITEM = {
'student_id': u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
'item_id': 'test_item',
'course_id': 'test_course',
'item_type': 'test_type'
}
RUBRIC = {
"criteria": [
{
"name": "clarity",
"prompt": "How clear was it?",
"options": [
{"name": "somewhat clear", "points": 1, "explanation": ""},
{"name": "clear", "points": 3, "explanation": ""},
{"name": "very clear", "points": 5, "explanation": ""},
]
},
{
"name": "accuracy",
"prompt": "How accurate was the content?",
"options": [
{"name": "somewhat accurate", "points": 1, "explanation": ""},
{"name": "accurate", "points": 3, "explanation": ""},
{"name": "very accurate", "points": 5, "explanation": ""},
]
},
]
}
OPTIONS_SELECTED = {
"clarity": "clear",
"accuracy": "very accurate",
}
def test_create_assessment(self):
# Initially, there should be no submission or self assessment
self.assertEqual(get_submission_and_assessment(self.STUDENT_ITEM), (None, None))
# Create a submission to self-assess
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Now there should be a submission, but no self-assessment
received_submission, assessment = get_submission_and_assessment(self.STUDENT_ITEM)
self.assertItemsEqual(received_submission, submission)
self.assertIs(assessment, None)
# Create a self-assessment for the submission
assessment = create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
# Retrieve the self-assessment
received_submission, retrieved = get_submission_and_assessment(self.STUDENT_ITEM)
self.assertItemsEqual(received_submission, submission)
# Check that the assessment we created matches the assessment we retrieved
# and that both have the correct values
self.assertItemsEqual(assessment, retrieved)
self.assertEqual(assessment['submission_uuid'], submission['uuid'])
self.assertEqual(assessment['points_earned'], 8)
self.assertEqual(assessment['points_possible'], 10)
self.assertEqual(assessment['feedback'], u'')
self.assertEqual(assessment['score_type'], u'SE')
def test_create_assessment_no_submission(self):
# Attempt to create a self-assessment for a submission that doesn't exist
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
'invalid_submission_uuid', u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
)
def test_create_assessment_wrong_user(self):
# Create a submission
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Attempt to create a self-assessment for the submission from a different user
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
'invalid_submission_uuid', u'another user',
self.OPTIONS_SELECTED, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
)
def test_create_assessment_invalid_criterion(self):
# Create a submission
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Mutate the selected option criterion so it does not match a criterion in the rubric
options = copy.deepcopy(self.OPTIONS_SELECTED)
options['invalid criterion'] = 'very clear'
# Attempt to create a self-assessment with options that do not match the rubric
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
)
def test_create_assessment_invalid_option(self):
# Create a submission
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Mutate the selected option so the value does not match an available option
options = copy.deepcopy(self.OPTIONS_SELECTED)
options['clarity'] = 'invalid option'
# Attempt to create a self-assessment with options that do not match the rubric
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
)
def test_create_assessment_missing_critieron(self):
# Create a submission
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Delete one of the criterion that's present in the rubric
options = copy.deepcopy(self.OPTIONS_SELECTED)
del options['clarity']
# Attempt to create a self-assessment with options that do not match the rubric
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
options, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1)
)
def test_create_assessment_timestamp(self):
# Create a submission to self-assess
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Record the current system clock time
before = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
# Create a self-assessment for the submission
# Do not override the scored_at timestamp, so it should be set to the current time
assessment = create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.RUBRIC,
)
# Retrieve the self-assessment
_, retrieved = get_submission_and_assessment(self.STUDENT_ITEM)
# Expect that both the created and retrieved assessments have the same
# timestamp, and it's >= our recorded time.
self.assertEqual(assessment['scored_at'], retrieved['scored_at'])
self.assertGreaterEqual(assessment['scored_at'], before)
def test_create_multiple_self_assessments(self):
# Create a submission to self-assess
submission = create_submission(self.STUDENT_ITEM, "Test answer")
# Self assess once
assessment = create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.RUBRIC,
)
# Attempt to self-assess again, which should raise an exception
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.RUBRIC,
)
# Expect that we still have the original assessment
_, retrieved = get_submission_and_assessment(self.STUDENT_ITEM)
self.assertItemsEqual(assessment, retrieved)
...@@ -4,8 +4,8 @@ import os.path ...@@ -4,8 +4,8 @@ import os.path
from ddt import ddt, file_data from ddt import ddt, file_data
from django.test import TestCase from django.test import TestCase
from openassessment.peer.models import Criterion, CriterionOption, Rubric from openassessment.assessment.models import Criterion, CriterionOption, Rubric
from openassessment.peer.serializers import ( from openassessment.assessment.serializers import (
InvalidRubric, RubricSerializer, rubric_from_dict InvalidRubric, RubricSerializer, rubric_from_dict
) )
......
...@@ -4,7 +4,7 @@ Test validation of serialized models. ...@@ -4,7 +4,7 @@ Test validation of serialized models.
import ddt import ddt
from django.test import TestCase from django.test import TestCase
from openassessment.peer.serializers import validate_assessment_dict, validate_rubric_dict from openassessment.assessment.serializers import validate_assessment_dict, validate_rubric_dict
@ddt.ddt @ddt.ddt
......
from django.conf.urls import patterns, url from django.conf.urls import patterns, url
urlpatterns = patterns( urlpatterns = patterns(
'openassessment.peer.views', 'openassessment.assessment.views',
url( url(
r'^(?P<student_id>[^/]+)/(?P<course_id>[^/]+)/(?P<item_id>[^/]+)$', r'^(?P<student_id>[^/]+)/(?P<course_id>[^/]+)/(?P<item_id>[^/]+)$',
'get_evaluations_for_student_item' 'get_evaluations_for_student_item'
......
...@@ -2,7 +2,7 @@ import logging ...@@ -2,7 +2,7 @@ import logging
from django.contrib.auth.decorators import login_required from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response from django.shortcuts import render_to_response
from openassessment.peer.api import get_assessments from openassessment.assessment.peer_api import get_assessments
from submissions.api import SubmissionRequestError, get_submissions from submissions.api import SubmissionRequestError, get_submissions
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
......
<!-- TODO: this is a placeholder -->
<div>{{ error_msg }}</div>
{% load i18n %}
{% block list_item %} {% block list_item %}
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment is--expanded"> <li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment is--expanded">
{% endblock %} {% endblock %}
<span class="system__element" id="self_submission_uuid">
{{ self_submission.uuid }}
</span>
<header class="step__header ui-toggle-visibility__control"> <header class="step__header ui-toggle-visibility__control">
<h2 class="step__title"> <h2 class="step__title">
...@@ -69,13 +74,6 @@ ...@@ -69,13 +74,6 @@
</li> </li>
{% endfor %} {% endfor %}
<li class="wrapper--input field field--textarea assessment__rubric__question" id="assessment__rubric__question--feedback">
<label for="assessment__rubric__question--feedback__value">
<i class="ico fa fa-caret-right"></i>
<span class="question__title__copy">Please provide any other feedback you have around this response</span>
</label>
<textarea id="assessment__rubric__question--feedback__value" placeholder="I felt this response was..."></textarea>
</li>
</ol> </ol>
</fieldset> </fieldset>
...@@ -83,7 +81,7 @@ ...@@ -83,7 +81,7 @@
<ul class="list list--actions"> <ul class="list list--actions">
<li class="list--actions__item"> <li class="list--actions__item">
<button type="submit" id="self-assessment--001__assessment__submit" class="action action--submit"> <button type="submit" id="self-assessment--001__assessment__submit" class="action action--submit">
<span class="copy">{{ submit_button_text }}</span> <span class="copy">{% trans "Submit Your Assessment" %}</span>
<i class="ico fa fa-arrow-right"></i> <i class="ico fa fa-arrow-right"></i>
</button> </button>
</li> </li>
......
...@@ -9,10 +9,10 @@ from django_extensions.db.fields import UUIDField ...@@ -9,10 +9,10 @@ from django_extensions.db.fields import UUIDField
from model_utils import Choices from model_utils import Choices
from model_utils.models import StatusModel, TimeStampedModel from model_utils.models import StatusModel, TimeStampedModel
from openassessment.peer import api as peer_api from openassessment.assessment import peer_api, self_api
from openassessment.peer import self_api
from submissions import api as sub_api from submissions import api as sub_api
class AssessmentWorkflow(TimeStampedModel, StatusModel): class AssessmentWorkflow(TimeStampedModel, StatusModel):
"""Tracks the open-ended assessment status of a student submission. """Tracks the open-ended assessment status of a student submission.
......
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.peer import api as peer_api from openassessment.assessment.peer_api import get_assessments
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
...@@ -25,7 +25,7 @@ class GradeMixin(object): ...@@ -25,7 +25,7 @@ class GradeMixin(object):
"score": workflow["score"], "score": workflow["score"],
"assessments": [ "assessments": [
assessment assessment
for assessment in peer_api.get_assessments(self.submission_uuid) for assessment in get_assessments(self.submission_uuid)
], ],
} }
elif workflow.get('status') == "waiting": elif workflow.get('status') == "waiting":
......
...@@ -360,6 +360,20 @@ class OpenAssessmentBlock( ...@@ -360,6 +360,20 @@ class OpenAssessmentBlock(
context = Context(context_dict) context = Context(context_dict)
return Response(template.render(context), content_type='application/html', charset='UTF-8') return Response(template.render(context), content_type='application/html', charset='UTF-8')
def render_error(self, error_msg):
"""
Render an error message.
Args:
error_msg (unicode): The error message to display.
Returns:
Response: A response object with an HTML body.
"""
context = Context({'error_msg': error_msg})
template = get_template('openassessmentblock/oa_error.html')
return Response(template.render(context), content_type='application/html', charset='UTF-8')
def is_open(self): def is_open(self):
"""Checks if the question is open. """Checks if the question is open.
......
import logging import logging
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.peer import api as peer_api from openassessment.assessment import peer_api
from openassessment.peer.api import ( from openassessment.assessment.peer_api import (
PeerAssessmentWorkflowError, PeerAssessmentRequestError, PeerAssessmentWorkflowError, PeerAssessmentRequestError,
PeerAssessmentInternalError PeerAssessmentInternalError
) )
...@@ -25,8 +25,8 @@ class PeerAssessmentMixin(object): ...@@ -25,8 +25,8 @@ class PeerAssessmentMixin(object):
""" """
@XBlock.json_handler @XBlock.json_handler
def assess(self, data, suffix=''): def peer_assess(self, data, suffix=''):
"""Place an assessment into OpenAssessment system """Place a peer assessment into OpenAssessment system
Assess a Peer Submission. Performs basic workflow validation to ensure Assess a Peer Submission. Performs basic workflow validation to ensure
that an assessment can be performed as this time. that an assessment can be performed as this time.
......
import logging
from django.utils.translation import ugettext as _
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment import self_api
logger = logging.getLogger(__name__)
class SelfAssessmentMixin(object): class SelfAssessmentMixin(object):
"""The Self Assessment Mixin for all Self Assessment Functionality. """The Self Assessment Mixin for all Self Assessment Functionality.
Abstracts all functionality and handlers associated with Self Assessment. Abstracts all functionality and handlers associated with Self Assessment.
All Self Assessment API calls should be contained without this Mixin as All Self Assessment API calls should be contained within this Mixin as
well. well.
SelfAssessmentMixin is a Mixin for the OpenAssessmentBlock. Functions in SelfAssessmentMixin is a Mixin for the OpenAssessmentBlock. Functions in
the SelfAssessmentMixin call into the OpenAssessmentBlock functions and the SelfAssessmentMixin call into the OpenAssessmentBlock functions and
will not work outside of OpenAssessmentBlock. will not work outside of OpenAssessmentBlock.
""" """
@XBlock.handler @XBlock.handler
def render_self_assessment(self, data, suffix=''): def render_self_assessment(self, data, suffix=''):
student = self.get_student_item_dict()
path = 'openassessmentblock/self/oa_self_closed.html' path = 'openassessmentblock/self/oa_self_closed.html'
context_dict = {} context = {"step_status": "Incomplete"}
student_item = self.get_student_item_dict()
student_submission = self.get_user_submission(student_item) # If we are not logged in (as in Studio preview mode),
if student_submission: # we cannot interact with the self-assessment API.
path = 'openassessmentblock/self/oa_self_assessment.html' if student['student_id'] is not None:
context_dict = {
"rubric_criteria": self.rubric_criteria, # Retrieve the self-assessment, if there is one
"estimated_time": "20 minutes", # TODO: Need to configure this. try:
"self_submission": student_submission, submission, assessment = self_api.get_submission_and_assessment(student)
"step_status": "Grading" except self_api.SelfAssessmentRequestError:
} logger.exception(u"Could not retrieve self assessment for {student_item}".format(student_item=student))
return self.render_assessment(path, context_dict) return self.render_error(_(u"An unexpected error occurred."))
# If we haven't submitted yet, we cannot self-assess
if submission is None:
path = 'openassessmentblock/self/oa_self_closed.html'
context = {"step_status": "Incomplete"}
# If we have already submitted, then we're complete
elif assessment is not None:
path = 'openassessmentblock/self/oa_self_complete.html'
context = {"step_status": "Complete"}
# Otherwise, we can submit a self-assessment
else:
path = 'openassessmentblock/self/oa_self_assessment.html'
context = {
"rubric_criteria": self.rubric_criteria,
"estimated_time": "20 minutes", # TODO: Need to configure this.
"self_submission": submission,
"step_status": "Grading"
}
return self.render_assessment(path, context)
@XBlock.json_handler
def self_assess(self, data, suffix=''):
"""
Create a self-assessment for a submission.
Args:
data (dict): Must have the following keys:
submission_uuid (string): The unique identifier of the submission being assessed.
options_selected (dict): Dictionary mapping criterion names to option values.
Returns:
Dict with keys "success" (bool) indicating success/failure
and "msg" (unicode) containing additional information if an error occurs.
"""
if 'submission_uuid' not in data:
return {'success': False, 'msg': _(u"Missing submission_uuid key in request")}
if 'options_selected' not in data:
return {'success': False, 'msg': _(u"Missing options_selected key in request")}
try:
self_api.create_assessment(
data['submission_uuid'],
self.get_student_item_dict()['student_id'],
data['options_selected'],
{"criteria": self.rubric_criteria}
)
except self_api.SelfAssessmentRequestError as ex:
msg = _(u"Could not create self assessment: {error}").format(error=ex.message)
return {'success': False, 'msg': msg}
else:
return {'success': True, 'msg': u""}
<div id="openassessment-base">
<div id="openassessment__response"></div>
<div id="openassessment__peer-assessment"></div>
<div id="openassessment__self-assessment"></div>
<div id="openassessment__self-assessment"></div>
<div id="openassessment__grade"></div>
</div>
<span id="peer_submission_uuid">abc1234</span>
<div id="peer-assessment--001__assessment">
<input name="option 1" type="radio" />
<input name="option 2" type="radio" />
<input name="option 3" type="radio" />
</div>
<div id="assessment__rubric__question--feedback__value">test feedback</div>
<span id="self_submission_uuid">abc1234</span>
<div id="self-assessment--001__assessment">
<input name="option 1" type="radio" />
<input name="option 2" type="radio" />
<input name="option 3" type="radio" />
</div>
/**
Tests for OA student-facing views.
**/
describe("OpenAssessment.BaseUI", function() {
// Stub server that returns dummy data
var StubServer = function() {
// Dummy fragments to return from the render func
this.fragments = {
submission: "Test submission",
self_assessment: readFixtures("self_assessment_frag.html"),
peer_assessment: readFixtures("peer_assessment_frag.html"),
grade: "Test fragment"
};
this.submit = function(submission) {
return $.Deferred(function(defer) {
defer.resolveWith(this, ['student', 0]);
}).promise();
};
this.peerAssess = function(submissionId, optionsSelected, feedback) {
return $.Deferred(function(defer) { defer.resolve(); }).promise();
};
this.selfAssess = function(submissionId, optionsSelected) {
return $.Deferred(function(defer) { defer.resolve(); }).promise();
};
this.render = function(component) {
var server = this;
return $.Deferred(function(defer) {
defer.resolveWith(this, [server.fragments[component]]);
}).promise();
};
}
// Stub runtime
var runtime = {};
var server = null;
var ui = null;
/**
Wait for subviews to load before executing callback.
Args:
callback (function): Function that takes no arguments.
**/
var loadSubviews = function(callback) {
runs(function() {
ui.load();
});
waitsFor(function() {
var subviewHasHtml = $("#openassessment-base").children().map(
function(index, el) { return el.innerHTML != ''; }
);
return Array(subviewHasHtml).every(function(hasHtml) { return hasHtml; });
});
runs(function() {
return callback();
});
}
beforeEach(function() {
// Load the DOM fixture
jasmine.getFixtures().fixturesPath = 'base/fixtures'
loadFixtures('oa_base.html');
// Create a new stub server
server = new StubServer();
// Create the object under test
var el = $("#openassessment-base").get(0);
ui = new OpenAssessment.BaseUI(runtime, el, server);
});
it("Sends a submission to the server", function() {
loadSubviews(function() {
spyOn(server, 'submit').andCallThrough();
ui.submit();
expect(server.submit).toHaveBeenCalled();
});
});
it("Sends a peer assessment to the server", function() {
loadSubviews(function() {
spyOn(server, 'peerAssess').andCallThrough();
ui.peerAssess();
expect(server.peerAssess).toHaveBeenCalled()
});
});
it("Sends a self assessment to the server", function() {
loadSubviews(function() {
spyOn(server, 'selfAssess').andCallThrough();
ui.selfAssess();
expect(server.selfAssess).toHaveBeenCalled();
});
});
});
...@@ -90,13 +90,13 @@ describe("OpenAssessment.Server", function() { ...@@ -90,13 +90,13 @@ describe("OpenAssessment.Server", function() {
var success = false; var success = false;
var options = {clarity: "Very clear", precision: "Somewhat precise"}; var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.assess("abc1234", options, "Excellent job!").done(function() { server.peerAssess("abc1234", options, "Excellent job!").done(function() {
success = true; success = true;
}); });
expect(success).toBe(true); expect(success).toBe(true);
expect($.ajax).toHaveBeenCalledWith({ expect($.ajax).toHaveBeenCalledWith({
url: '/assess', url: '/peer_assess',
type: "POST", type: "POST",
data: JSON.stringify({ data: JSON.stringify({
submission_uuid: "abc1234", submission_uuid: "abc1234",
...@@ -231,24 +231,24 @@ describe("OpenAssessment.Server", function() { ...@@ -231,24 +231,24 @@ describe("OpenAssessment.Server", function() {
expect(receivedMsg).toEqual("Test error"); expect(receivedMsg).toEqual("Test error");
}); });
it("informs the caller of a server error when sending an assessment", function() { it("informs the caller of a server error when sending a peer assessment", function() {
stubAjax(true, {success:false, msg:'Test error!'}); stubAjax(true, {success:false, msg:'Test error!'});
var receivedMsg = null; var receivedMsg = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"}; var options = {clarity: "Very clear", precision: "Somewhat precise"}
server.assess("abc1234", options, "Excellent job!").fail(function(msg) { server.peerAssess("abc1234", options, "Excellent job!").fail(function(msg) {
receivedMsg = msg; receivedMsg = msg;
}); });
expect(receivedMsg).toEqual("Test error!"); expect(receivedMsg).toEqual("Test error!");
}); });
it("informs the caller of an AJAX error when sending an assessment", function() { it("informs the caller of an AJAX error when sending a peer assessment", function() {
stubAjax(false, null); stubAjax(false, null);
var receivedMsg = null; var receivedMsg = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"}; var options = {clarity: "Very clear", precision: "Somewhat precise"}
server.assess("abc1234", options, "Excellent job!").fail(function(msg) { server.peerAssess("abc1234", options, "Excellent job!").fail(function(msg) {
receivedMsg = msg; receivedMsg = msg;
}); });
......
...@@ -111,7 +111,7 @@ OpenAssessment.BaseUI.prototype = { ...@@ -111,7 +111,7 @@ OpenAssessment.BaseUI.prototype = {
eventObject.preventDefault(); eventObject.preventDefault();
// Handle the click // Handle the click
ui.assess(); ui.peerAssess();
} }
); );
} }
...@@ -133,6 +133,17 @@ OpenAssessment.BaseUI.prototype = { ...@@ -133,6 +133,17 @@ OpenAssessment.BaseUI.prototype = {
function(html) { function(html) {
$('#openassessment__self-assessment', ui.element).replaceWith(html); $('#openassessment__self-assessment', ui.element).replaceWith(html);
ui.setExpanded('self-assessment', expand); ui.setExpanded('self-assessment', expand);
// Install a click handler for the submit button
$('#self-assessment--001__assessment__submit', ui.element).click(
function(eventObject) {
// Override default form submission
eventObject.preventDefault();
// Handle the click
ui.selfAssess();
}
);
} }
).fail(function(errMsg) { ).fail(function(errMsg) {
// TODO: display to the user // TODO: display to the user
...@@ -198,11 +209,11 @@ OpenAssessment.BaseUI.prototype = { ...@@ -198,11 +209,11 @@ OpenAssessment.BaseUI.prototype = {
/** /**
Send an assessment to the server and update the UI. Send an assessment to the server and update the UI.
**/ **/
assess: function() { peerAssess: function() {
// Retrieve assessment info from the DOM // Retrieve assessment info from the DOM
var submissionId = $("span#peer_submission_uuid", this.element)[0].innerText; var submissionId = $("span#peer_submission_uuid", this.element)[0].innerText;
var optionsSelected = {}; var optionsSelected = {};
$("input[type=radio]:checked", this.element).each( $("#peer-assessment--001__assessment input[type=radio]:checked", this.element).each(
function(index, sel) { function(index, sel) {
optionsSelected[sel.name] = sel.value; optionsSelected[sel.name] = sel.value;
} }
...@@ -211,17 +222,46 @@ OpenAssessment.BaseUI.prototype = { ...@@ -211,17 +222,46 @@ OpenAssessment.BaseUI.prototype = {
// Send the assessment to the server // Send the assessment to the server
var ui = this; var ui = this;
this.server.assess(submissionId, optionsSelected, feedback).done( this.server.peerAssess(submissionId, optionsSelected, feedback).done(
function() { function() {
// When we have successfully sent the assessment, expand the next step // When we have successfully sent the assessment,
ui.renderPeerAssessmentStep(true); // collapse the current step and expand the next step
ui.renderPeerAssessmentStep(false);
ui.renderSelfAssessmentStep(true); ui.renderSelfAssessmentStep(true);
ui.renderGradeStep(true); ui.renderGradeStep(false);
} }
).fail(function(errMsg) { ).fail(function(errMsg) {
// TODO: display to the user // TODO: display to the user
console.log(errMsg); console.log(errMsg);
}); });
},
/**
Send a self-assessment to the server and update the UI.
**/
selfAssess: function() {
// Retrieve self-assessment info from the DOM
var submissionId = $("span#self_submission_uuid", this.element)[0].innerText;
var optionsSelected = {};
$("#self-assessment--001__assessment input[type=radio]:checked", this.element).each(
function(index, sel) {
optionsSelected[sel.name] = sel.value;
}
);
// Send the assessment to the server
var ui = this;
this.server.selfAssess(submissionId, optionsSelected).done(
function() {
// When we have successfully sent the assessment,
// collapse the current step and expand the next step
ui.renderSelfAssessmentStep(false);
ui.renderGradeStep(true);
}
).fail(function(errMsg) {
// TODO: display to user
console.log(errMsg);
});
} }
}; };
......
...@@ -131,7 +131,7 @@ OpenAssessment.Server.prototype = { ...@@ -131,7 +131,7 @@ OpenAssessment.Server.prototype = {
}, },
/** /**
Send an assessment to the XBlock. Send a peer assessment to the XBlock.
Args: Args:
submissionId (string): The UUID of the submission. submissionId (string): The UUID of the submission.
...@@ -146,14 +146,14 @@ OpenAssessment.Server.prototype = { ...@@ -146,14 +146,14 @@ OpenAssessment.Server.prototype = {
Example: Example:
var options = { clarity: "Very clear", precision: "Somewhat precise" }; var options = { clarity: "Very clear", precision: "Somewhat precise" };
var feedback = "Good job!"; var feedback = "Good job!";
server.assess("abc123", options, feedback).done( server.peerAssess("abc123", options, feedback).done(
function() { console.log("Success!"); } function() { console.log("Success!"); }
).fail( ).fail(
function(errorMsg) { console.log(errorMsg); } function(errorMsg) { console.log(errorMsg); }
); );
**/ **/
assess: function(submissionId, optionsSelected, feedback) { peerAssess: function(submissionId, optionsSelected, feedback) {
var url = this.url('assess'); var url = this.url('peer_assess');
var payload = JSON.stringify({ var payload = JSON.stringify({
submission_uuid: submissionId, submission_uuid: submissionId,
options_selected: optionsSelected, options_selected: optionsSelected,
...@@ -176,6 +176,48 @@ OpenAssessment.Server.prototype = { ...@@ -176,6 +176,48 @@ OpenAssessment.Server.prototype = {
}, },
/** /**
Send a self-assessment to the XBlock.
Args:
submissionId (string): The UUID of the submission.
optionsSelected (object literal): Keys are criteria names,
values are the option text the user selected for the criterion.
Returns:
A JQuery promise, which resolves with no args if successful
and fails with an error message otherwise.
Example:
var options = { clarity: "Very clear", precision: "Somewhat precise" };
server.selfAssess("abc123", options).done(
function() { console.log("Success!"); }
).fail(
function(errorMsg) { console.log(errorMsg); }
);
**/
selfAssess: function(submissionId, optionsSelected) {
var url = this.url('self_assess');
var payload = JSON.stringify({
submission_uuid: submissionId,
options_selected: optionsSelected
});
return $.Deferred(function(defer) {
$.ajax({ type: "POST", url: url, data: payload }).done(
function(data) {
if (data.success) {
defer.resolve();
}
else {
defer.rejectWith(this, [data.msg]);
}
}
).fail(function(data) {
defer.rejectWith(this, ['Could not contact server.']);
});
});
},
/**
Load the XBlock's XML definition from the server. Load the XBlock's XML definition from the server.
Returns: Returns:
......
...@@ -110,9 +110,6 @@ ...@@ -110,9 +110,6 @@
due="2014-12-21T22:22-7:00" due="2014-12-21T22:22-7:00"
must_grade="5" must_grade="5"
must_be_graded_by="3" /> must_be_graded_by="3" />
<assessment name="self-assessment" <assessment name="self-assessment" due="2014-12-21T22:22-7:00" />
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</assessments> </assessments>
</openassessment> </openassessment>
...@@ -13,7 +13,7 @@ from openassessment.xblock.xml import ( ...@@ -13,7 +13,7 @@ from openassessment.xblock.xml import (
UpdateFromXmlError, InvalidRubricError UpdateFromXmlError, InvalidRubricError
) )
from openassessment.peer.serializers import validate_assessment_dict, validate_rubric_dict from openassessment.assessment.serializers import validate_assessment_dict, validate_rubric_dict
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
from xblock.core import XBlock from xblock.core import XBlock
from submissions import api
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from openassessment.peer import api as peer_api from submissions import api
from openassessment.assessment import peer_api
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
......
...@@ -40,10 +40,6 @@ ...@@ -40,10 +40,6 @@
</criterion> </criterion>
</rubric> </rubric>
<assessments> <assessments>
<assessment name="peer-assessment" <assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
start="2014-12-20T19:00"
due="2014-12-21T22:22"
must_grade="5"
must_be_graded_by="3" />
</assessments> </assessments>
</openassessment> </openassessment>
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="self-assessment" />
</assessments>
</openassessment>
...@@ -278,45 +278,5 @@ ...@@ -278,45 +278,5 @@
"</rubric>", "</rubric>",
"</openassessment>" "</openassessment>"
] ]
},
"missing_must_grade": {
"xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_be_graded_by=\"3\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
},
"missing_must_be_graded_by": {
"xml": [
"<openassessment>",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
} }
} }
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
<title>Foo</title> <title>Foo</title>
<assessments> <assessments>
<assessment name="peer-assessment" start="2014-02-27T09:46:28" due="2014-03-01T00:00:00" must_grade="5" must_be_graded_by="3" /> <assessment name="peer-assessment" start="2014-02-27T09:46:28" due="2014-03-01T00:00:00" must_grade="5" must_be_graded_by="3" />
<assessment name="self-assessment" start="2014-04-01T00:00:00" due="2014-06-01T00:00:00" must_grade="2" must_be_graded_by="1" /> <assessment name="self-assessment" start="2014-04-01T00:00:00" due="2014-06-01T00:00:00" />
</assessments> </assessments>
<rubric> <rubric>
<prompt>Test prompt</prompt> <prompt>Test prompt</prompt>
......
...@@ -4,7 +4,7 @@ Tests for grade handlers in Open Assessment XBlock. ...@@ -4,7 +4,7 @@ Tests for grade handlers in Open Assessment XBlock.
""" """
import copy import copy
import json import json
from openassessment.peer import api as peer_api from openassessment.assessment import peer_api
from submissions import api as sub_api from submissions import api as sub_api
from .base import XBlockHandlerTestCase, scenario from .base import XBlockHandlerTestCase, scenario
......
...@@ -5,7 +5,8 @@ Tests for peer assessment handlers in Open Assessment XBlock. ...@@ -5,7 +5,8 @@ Tests for peer assessment handlers in Open Assessment XBlock.
import copy import copy
import json import json
from openassessment.peer import api as peer_api from openassessment.assessment import peer_api
from submissions import api as submission_api
from .base import XBlockHandlerTestCase, scenario from .base import XBlockHandlerTestCase, scenario
...@@ -19,7 +20,7 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -19,7 +20,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
SUBMISSION = u'ՇﻉรՇ รપ๒๓ٱรรٱѻก' SUBMISSION = u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
@scenario('data/assessment_scenario.xml', user_id='Bob') @scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_assess_handler(self, xblock): def test_assess_handler(self, xblock):
# Create a submission for this problem from another user # Create a submission for this problem from another user
...@@ -35,7 +36,7 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -35,7 +36,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
# Submit an assessment and expect a successful response # Submit an assessment and expect a successful response
assessment = copy.deepcopy(self.ASSESSMENT) assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = submission['uuid'] assessment['submission_uuid'] = submission['uuid']
resp = self.request(xblock, 'assess', json.dumps(assessment), response_format='json') resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
self.assertTrue(resp['success']) self.assertTrue(resp['success'])
# Retrieve the assessment and check that it matches what we sent # Retrieve the assessment and check that it matches what we sent
...@@ -56,7 +57,7 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -56,7 +57,7 @@ class TestPeerAssessment(XBlockHandlerTestCase):
self.assertEqual(actual[0]['feedback'], assessment['feedback']) self.assertEqual(actual[0]['feedback'], assessment['feedback'])
@scenario('data/assessment_scenario.xml', user_id='Bob') @scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_assess_rubric_option_mismatch(self, xblock): def test_assess_rubric_option_mismatch(self, xblock):
# Create a submission for this problem from another user # Create a submission for this problem from another user
...@@ -73,16 +74,16 @@ class TestPeerAssessment(XBlockHandlerTestCase): ...@@ -73,16 +74,16 @@ class TestPeerAssessment(XBlockHandlerTestCase):
assessment = copy.deepcopy(self.ASSESSMENT) assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = submission['uuid'] assessment['submission_uuid'] = submission['uuid']
assessment['options_selected']['invalid'] = 'not a part of the rubric!' assessment['options_selected']['invalid'] = 'not a part of the rubric!'
resp = self.request(xblock, 'assess', json.dumps(assessment), response_format='json') resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
# Expect an error response # Expect an error response
self.assertFalse(resp['success']) self.assertFalse(resp['success'])
@scenario('data/assessment_scenario.xml', user_id='Bob') @scenario('data/peer_assessment_scenario.xml', user_id='Bob')
def test_missing_keys_in_request(self, xblock): def test_missing_keys_in_request(self, xblock):
for missing in ['feedback', 'submission_uuid', 'options_selected']: for missing in ['feedback', 'submission_uuid', 'options_selected']:
assessment = copy.deepcopy(self.ASSESSMENT) assessment = copy.deepcopy(self.ASSESSMENT)
del assessment[missing] del assessment[missing]
resp = self.request(xblock, 'assess', json.dumps(assessment), response_format='json') resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
self.assertEqual(resp['success'], False) self.assertEqual(resp['success'], False)
# -*- coding: utf-8 -*-
"""
Tests for self assessment handlers in Open Assessment XBlock.
"""
import copy
import json
import mock
from submissions import api as submission_api
from openassessment.assessment import self_api
from .base import XBlockHandlerTestCase, scenario
class TestSelfAssessment(XBlockHandlerTestCase):
maxDiff = None
SUBMISSION = u'ՇﻉรՇ รપ๒๓ٱรรٱѻก'
ASSESSMENT = {
'submission_uuid': None,
'options_selected': {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
}
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_handler(self, xblock):
student_item = xblock.get_student_item_dict()
# Create a submission for the student
submission = submission_api.create_submission(student_item, self.SUBMISSION)
# Submit a self-assessment
assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = submission['uuid']
resp = self.request(xblock, 'self_assess', json.dumps(assessment), response_format='json')
self.assertTrue(resp['success'])
# Expect that a self-assessment was created
_, assessment = self_api.get_submission_and_assessment(student_item)
self.assertEqual(assessment['submission_uuid'], submission['uuid'])
self.assertEqual(assessment['points_earned'], 5)
self.assertEqual(assessment['points_possible'], 6)
self.assertEqual(assessment['scorer_id'], 'Bob')
self.assertEqual(assessment['score_type'], 'SE')
self.assertEqual(assessment['feedback'], u'')
parts = sorted(assessment['parts'])
self.assertEqual(len(parts), 2)
self.assertEqual(parts[0]['option']['criterion']['name'], u'Form')
self.assertEqual(parts[0]['option']['name'], 'Fair')
self.assertEqual(parts[1]['option']['criterion']['name'], u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮')
self.assertEqual(parts[1]['option']['name'], u'ﻉซƈﻉɭɭﻉกՇ')
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_handler_missing_keys(self, xblock):
# Missing submission_uuid
assessment = copy.deepcopy(self.ASSESSMENT)
del assessment['submission_uuid']
resp = self.request(xblock, 'self_assess', json.dumps(assessment), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('submission_uuid', resp['msg'])
# Missing options_selected
assessment = copy.deepcopy(self.ASSESSMENT)
del assessment['options_selected']
resp = self.request(xblock, 'self_assess', json.dumps(assessment), response_format='json')
self.assertFalse(resp['success'])
self.assertIn('options_selected', resp['msg'])
# No user specified, to simulate the Studio preview runtime
@scenario('data/self_assessment_scenario.xml')
def test_render_self_assessment_preview(self, xblock):
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("Incomplete", resp)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_render_self_assessment_complete(self, xblock):
student_item = xblock.get_student_item_dict()
# Create a submission for the student
submission = submission_api.create_submission(student_item, self.SUBMISSION)
# Self-assess the submission
assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = submission['uuid']
resp = self.request(xblock, 'self_assess', json.dumps(assessment), response_format='json')
self.assertTrue(resp['success'])
# Expect that the self assessment shows that we've completed the step
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("Complete", resp)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_render_self_assessment_open(self, xblock):
student_item = xblock.get_student_item_dict()
# Create a submission for the student
submission = submission_api.create_submission(student_item, self.SUBMISSION)
# Expect that the self-assessment step is open
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("Grading", resp)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_render_self_assessment_no_submission(self, xblock):
# Without creating a submission, render the self-assessment step
# Expect that the step is closed
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("Incomplete", resp)
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_render_self_assessessment_api_error(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
submission = submission_api.create_submission(student_item, self.SUBMISSION)
# Simulate an error and expect a failure response
with mock.patch('openassessment.xblock.self_assessment_mixin.self_api') as mock_api:
mock_api.SelfAssessmentRequestError = self_api.SelfAssessmentRequestError
mock_api.get_submission_and_assessment.side_effect = self_api.SelfAssessmentRequestError
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertIn("error", resp.lower())
@scenario('data/self_assessment_scenario.xml', user_id='Bob')
def test_self_assess_api_error(self, xblock):
# Create a submission for the student
student_item = xblock.get_student_item_dict()
submission = submission_api.create_submission(student_item, self.SUBMISSION)
# Submit a self-assessment
assessment = copy.deepcopy(self.ASSESSMENT)
assessment['submission_uuid'] = submission['uuid']
# Simulate an error and expect a failure response
with mock.patch('openassessment.xblock.self_assessment_mixin.self_api') as mock_api:
mock_api.SelfAssessmentRequestError = self_api.SelfAssessmentRequestError
mock_api.create_assessment.side_effect = self_api.SelfAssessmentRequestError
resp = self.request(xblock, 'self_assess', json.dumps(assessment), response_format='json')
self.assertFalse(resp['success'])
...@@ -365,8 +365,6 @@ def _parse_assessments_xml(assessments_root, validator): ...@@ -365,8 +365,6 @@ def _parse_assessments_xml(assessments_root, validator):
assessment_dict['must_grade'] = int(assessment.get('must_grade')) assessment_dict['must_grade'] = int(assessment.get('must_grade'))
except ValueError: except ValueError:
raise UpdateFromXmlError(_('Assessment "must_grade" attribute must be an integer.')) raise UpdateFromXmlError(_('Assessment "must_grade" attribute must be an integer.'))
else:
raise UpdateFromXmlError(_('XML assessment definition must have a "must_grade" attribute'))
# Assessment must_be_graded_by # Assessment must_be_graded_by
if 'must_be_graded_by' in assessment.attrib: if 'must_be_graded_by' in assessment.attrib:
...@@ -374,8 +372,6 @@ def _parse_assessments_xml(assessments_root, validator): ...@@ -374,8 +372,6 @@ def _parse_assessments_xml(assessments_root, validator):
assessment_dict['must_be_graded_by'] = int(assessment.get('must_be_graded_by')) assessment_dict['must_be_graded_by'] = int(assessment.get('must_be_graded_by'))
except ValueError: except ValueError:
raise UpdateFromXmlError(_('Assessment "must_be_graded_by" attribute must be an integer.')) raise UpdateFromXmlError(_('Assessment "must_be_graded_by" attribute must be an integer.'))
else:
raise UpdateFromXmlError(_('XML assessment definition must have a "must_be_graded_by" attribute'))
# Validate the semantics of the assessment definition # Validate the semantics of the assessment definition
success, msg = validator(assessment_dict) success, msg = validator(assessment_dict)
......
...@@ -192,6 +192,29 @@ def get_submission(submission_uuid): ...@@ -192,6 +192,29 @@ def get_submission(submission_uuid):
return SubmissionSerializer(submission).data return SubmissionSerializer(submission).data
def get_submission_by_uuid(uuid):
"""
Retrieve a submission by its unique identifier.
Args:
uuid (str): the unique identifier of the submission.
Returns:
Serialized Submission model (dict) containing a serialized StudentItem model
If the submission does not exist, return None
"""
try:
submission = Submission.objects.get(uuid=uuid)
except Submission.DoesNotExist:
return None
# There is probably a more idiomatic way to do this using the Django REST framework
submission_dict = SubmissionSerializer(submission).data
submission_dict['student_item'] = StudentItemSerializer(submission.student_item).data
return submission_dict
def get_submissions(student_item_dict, limit=None): def get_submissions(student_item_dict, limit=None):
"""Retrieves the submissions for the specified student item, """Retrieves the submissions for the specified student item,
ordered by most recent submitted date. ordered by most recent submitted date.
......
...@@ -13,6 +13,7 @@ class StudentItemSerializer(serializers.ModelSerializer): ...@@ -13,6 +13,7 @@ class StudentItemSerializer(serializers.ModelSerializer):
class SubmissionSerializer(serializers.ModelSerializer): class SubmissionSerializer(serializers.ModelSerializer):
class Meta: class Meta:
model = Submission model = Submission
fields = ( fields = (
...@@ -21,7 +22,7 @@ class SubmissionSerializer(serializers.ModelSerializer): ...@@ -21,7 +22,7 @@ class SubmissionSerializer(serializers.ModelSerializer):
'attempt_number', 'attempt_number',
'submitted_at', 'submitted_at',
'created_at', 'created_at',
'answer' 'answer',
) )
......
...@@ -41,6 +41,17 @@ class TestSubmissionsApi(TestCase): ...@@ -41,6 +41,17 @@ class TestSubmissionsApi(TestCase):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
self._assert_submission(submission, ANSWER_ONE, 1, 1) self._assert_submission(submission, ANSWER_ONE, 1, 1)
def test_get_submission_by_uuid(self):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
# Retrieve the submission by its uuid
retrieved = api.get_submission_by_uuid(submission['uuid'])
self.assertItemsEqual(submission, retrieved)
# Should get None if we retrieve a submission that doesn't exist
retrieved = api.get_submission_by_uuid(u'no such uuid')
self.assertIs(retrieved, None)
def test_get_submissions(self): def test_get_submissions(self):
api.create_submission(STUDENT_ITEM, ANSWER_ONE) api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_submission(STUDENT_ITEM, ANSWER_TWO) api.create_submission(STUDENT_ITEM, ANSWER_TWO)
......
...@@ -140,8 +140,8 @@ INSTALLED_APPS = ( ...@@ -140,8 +140,8 @@ INSTALLED_APPS = (
# edx-tim apps # edx-tim apps
'submissions', 'submissions',
'openassessment', 'openassessment',
'openassessment.peer',
'openassessment.workflow', 'openassessment.workflow',
'openassessment.assessment',
) )
# A sample logging configuration. The only tangible logging # A sample logging configuration. The only tangible logging
......
...@@ -6,7 +6,7 @@ Test-specific Django settings. ...@@ -6,7 +6,7 @@ Test-specific Django settings.
from .base import * from .base import *
TEST_APPS = ( TEST_APPS = (
'openassessment.peer', 'openassessment.assessment',
'openassessment.workflow', 'openassessment.workflow',
'openassessment.xblock', 'openassessment.xblock',
'submissions', 'submissions',
......
...@@ -4,7 +4,7 @@ from setuptools import setup ...@@ -4,7 +4,7 @@ from setuptools import setup
PACKAGES = [ PACKAGES = [
'submissions', 'submissions',
'openassessment.peer', 'openassessment.assessment',
'openassessment.xblock' 'openassessment.xblock'
] ]
......
from django.conf.urls import include, patterns, url from django.conf.urls import include, patterns, url
from django.contrib import admin from django.contrib import admin
import openassessment.peer.urls import openassessment.assessment.urls
import submissions.urls import submissions.urls
import workbench.urls import workbench.urls
...@@ -19,5 +19,5 @@ urlpatterns = patterns( ...@@ -19,5 +19,5 @@ urlpatterns = patterns(
url(r'^submissions/', include(submissions.urls)), url(r'^submissions/', include(submissions.urls)),
# edx-tim apps # edx-tim apps
url(r'^peer/evaluations/', include(openassessment.peer.urls)), url(r'^peer/evaluations/', include(openassessment.assessment.urls)),
) )
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment