Commit 3e13d27d by Eric Fischer

Modernize ORA, commit 2 of 2

Code changes needed to work with the upgraded dependencies from the
previous commit.
parent 00605095
......@@ -4,13 +4,10 @@ Django admin models for openassessment
import json
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils import html
from openassessment.assessment.models import (
Assessment, AssessmentFeedback, PeerWorkflow, PeerWorkflowItem, Rubric,
AIGradingWorkflow, AITrainingWorkflow, AIClassifierSet, AIClassifier
)
from openassessment.assessment.models import Assessment, AssessmentFeedback, PeerWorkflow, PeerWorkflowItem, Rubric
from openassessment.assessment.serializers import RubricSerializer
......@@ -92,7 +89,7 @@ class AssessmentAdmin(admin.ModelAdmin):
"""
Returns the rubric link for this assessment.
"""
url = reverse(
url = reverse_lazy(
'admin:assessment_rubric_change',
args=[assessment_obj.rubric.id]
)
......@@ -141,7 +138,7 @@ class AssessmentFeedbackAdmin(admin.ModelAdmin):
"""
links = [
u'<a href="{}">{}</a>'.format(
reverse('admin:assessment_assessment_change', args=[asmt.id]),
reverse_lazy('admin:assessment_assessment_change', args=[asmt.id]),
html.escape(asmt.scorer_id)
)
for asmt in assessment_feedback.assessments.all()
......@@ -150,44 +147,7 @@ class AssessmentFeedbackAdmin(admin.ModelAdmin):
assessments_by.allow_tags = True
class AIGradingWorkflowAdmin(admin.ModelAdmin):
"""
Django admin model for AIGradingWorkflows.
"""
list_display = ('uuid', 'submission_uuid')
search_fields = ('uuid', 'submission_uuid', 'student_id', 'item_id', 'course_id')
readonly_fields = ('uuid', 'submission_uuid', 'student_id', 'item_id', 'course_id')
class AITrainingWorkflowAdmin(admin.ModelAdmin):
"""
Django admin model for AITrainingWorkflows.
"""
list_display = ('uuid',)
search_fields = ('uuid', 'course_id', 'item_id',)
readonly_fields = ('uuid', 'course_id', 'item_id',)
class AIClassifierInline(admin.TabularInline):
"""
Django admin model for AIClassifiers.
"""
model = AIClassifier
class AIClassifierSetAdmin(admin.ModelAdmin):
"""
Django admin model for AICLassifierSets.
"""
list_display = ('id',)
search_fields = ('id',)
inlines = [AIClassifierInline]
admin.site.register(Rubric, RubricAdmin)
admin.site.register(PeerWorkflow, PeerWorkflowAdmin)
admin.site.register(Assessment, AssessmentAdmin)
admin.site.register(AssessmentFeedback, AssessmentFeedbackAdmin)
admin.site.register(AIGradingWorkflow, AIGradingWorkflowAdmin)
admin.site.register(AITrainingWorkflow, AITrainingWorkflowAdmin)
admin.site.register(AIClassifierSet, AIClassifierSetAdmin)
......@@ -5,22 +5,17 @@ the workflow for a given submission.
"""
import logging
from django.utils import timezone
from django.db import DatabaseError, IntegrityError, transaction
from dogapi import dog_stats_api
from django.utils import timezone
from openassessment.assessment.models import (
Assessment, AssessmentFeedback, AssessmentPart,
InvalidRubricSelection, PeerWorkflow, PeerWorkflowItem,
)
from openassessment.assessment.serializers import (
AssessmentFeedbackSerializer, RubricSerializer,
full_assessment_dict, rubric_from_dict, serialize_assessments,
InvalidRubric
)
from openassessment.assessment.errors import (
PeerAssessmentRequestError, PeerAssessmentWorkflowError, PeerAssessmentInternalError
)
from dogapi import dog_stats_api
from openassessment.assessment.errors import (PeerAssessmentInternalError, PeerAssessmentRequestError,
PeerAssessmentWorkflowError)
from openassessment.assessment.models import (Assessment, AssessmentFeedback, AssessmentPart, InvalidRubricSelection,
PeerWorkflow, PeerWorkflowItem)
from openassessment.assessment.serializers import (AssessmentFeedbackSerializer, InvalidRubric, RubricSerializer,
full_assessment_dict, rubric_from_dict, serialize_assessments)
from submissions import api as sub_api
logger = logging.getLogger("openassessment.assessment.api.peer")
......
......@@ -2,20 +2,15 @@
Public interface for self-assessment.
"""
import logging
from django.db import DatabaseError, transaction
from dogapi import dog_stats_api
from submissions.api import get_submission_and_student, SubmissionNotFoundError
from openassessment.assessment.serializers import (
InvalidRubric, full_assessment_dict, rubric_from_dict, serialize_assessments
)
from openassessment.assessment.models import (
Assessment, AssessmentPart, InvalidRubricSelection
)
from openassessment.assessment.errors import (
SelfAssessmentRequestError, SelfAssessmentInternalError
)
from django.db import DatabaseError, transaction
from dogapi import dog_stats_api
from openassessment.assessment.errors import SelfAssessmentInternalError, SelfAssessmentRequestError
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection
from openassessment.assessment.serializers import (InvalidRubric, full_assessment_dict, rubric_from_dict,
serialize_assessments)
from submissions.api import SubmissionNotFoundError, get_submission_and_student
# Assessments are tagged as "self-evaluation"
SELF_TYPE = "SE"
......
......@@ -2,25 +2,15 @@
Public interface for staff grading, used by students/course staff.
"""
import logging
from django.db import DatabaseError, transaction
from django.utils.timezone import now
from dogapi import dog_stats_api
from openassessment.assessment.errors import StaffAssessmentInternalError, StaffAssessmentRequestError
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection, StaffWorkflow
from openassessment.assessment.serializers import InvalidRubric, full_assessment_dict, rubric_from_dict
from submissions import api as submissions_api
from openassessment.assessment.models import (
Assessment, AssessmentFeedback, AssessmentPart,
InvalidRubricSelection, StaffWorkflow,
)
from openassessment.assessment.serializers import (
AssessmentFeedbackSerializer, RubricSerializer,
full_assessment_dict, rubric_from_dict, serialize_assessments,
InvalidRubric
)
from openassessment.assessment.errors import (
StaffAssessmentRequestError, StaffAssessmentInternalError
)
logger = logging.getLogger("openassessment.assessment.api.staff")
STAFF_TYPE = "ST"
......
......@@ -7,19 +7,15 @@ Public interface for student training:
"""
import logging
from django.utils.translation import ugettext as _
from django.db import DatabaseError
from submissions import api as sub_api
from openassessment.assessment.models import StudentTrainingWorkflow, InvalidRubricSelection
from openassessment.assessment.serializers import (
deserialize_training_examples, serialize_training_example,
validate_training_example_format,
InvalidTrainingExample, InvalidRubric
)
from openassessment.assessment.errors import (
StudentTrainingRequestError, StudentTrainingInternalError
)
from django.utils.translation import ugettext as _
from openassessment.assessment.errors import StudentTrainingInternalError, StudentTrainingRequestError
from openassessment.assessment.models import InvalidRubricSelection, StudentTrainingWorkflow
from openassessment.assessment.serializers import (InvalidRubric, InvalidTrainingExample, deserialize_training_examples,
serialize_training_example, validate_training_example_format)
from submissions import api as sub_api
logger = logging.getLogger(__name__)
......
......@@ -2,7 +2,6 @@
Data Conversion utility methods for handling assessment data transformations.
"""
import json
def update_training_example_answer_format(answer):
......@@ -14,7 +13,7 @@ def update_training_example_answer_format(answer):
Returns:
dict
"""
if isinstance(answer, unicode) or isinstance(answer, str):
if isinstance(answer, (str, unicode)):
return {
'parts': [
{'text': answer}
......
......@@ -8,4 +8,3 @@ from .peer import *
from .self import *
from .staff import *
from .student_training import *
from .ai import *
......@@ -2,7 +2,7 @@
# pylint: skip-file
from __future__ import unicode_literals
from django.db import models, migrations
from django.db import migrations, models
import django.utils.timezone
......
......@@ -7,5 +7,4 @@ from .base import *
from .peer import *
from .training import *
from .student_training import *
from .ai import *
from .staff import *
# This file is empty, but we cannot delete it b/c historical migration records refer to it.
......@@ -12,18 +12,19 @@ need to then generate a matching migration for it using:
./manage.py schemamigration openassessment.assessment --auto
"""
import math
from collections import defaultdict
from copy import deepcopy
from hashlib import sha1
import json
import logging
import math
from lazy import lazy
from django.core.cache import cache
from django.db import models
from django.utils.timezone import now
from lazy import lazy
import logging
logger = logging.getLogger("openassessment.assessment.models")
......
......@@ -7,16 +7,16 @@ need to then generate a matching migration for it using:
./manage.py schemamigration openassessment.assessment --auto
"""
import random
from datetime import timedelta
import logging
import random
from django.db import models, DatabaseError
from django.db import DatabaseError, models
from django.utils.timezone import now
from openassessment.assessment.errors import PeerAssessmentInternalError, PeerAssessmentWorkflowError
from openassessment.assessment.models.base import Assessment
from openassessment.assessment.errors import PeerAssessmentWorkflowError, PeerAssessmentInternalError
import logging
logger = logging.getLogger("openassessment.assessment.models")
......
......@@ -3,10 +3,9 @@ Models for managing staff assessments.
"""
from datetime import timedelta
from django.db import models, DatabaseError
from django.db import DatabaseError, models
from django.utils.timezone import now
from openassessment.assessment.models.base import Assessment
from openassessment.assessment.errors import StaffAssessmentInternalError
......
"""
Django models specific to the student training assessment type.
"""
from django.db import models, transaction, IntegrityError
from django.db import IntegrityError, models, transaction
from django.utils import timezone
from submissions import api as sub_api
from .training import TrainingExample
......
"""
Django models for training (both student and AI).
"""
import json
from hashlib import sha1
import json
from django.core.cache import cache
from django.db import models
from .base import Rubric, CriterionOption
from .base import CriterionOption, Rubric
class TrainingExample(models.Model):
......
......@@ -5,13 +5,12 @@ Serializers common to all assessment types.
from copy import deepcopy
import logging
from django.core.cache import cache
from rest_framework import serializers
from rest_framework.fields import IntegerField, DateTimeField
from openassessment.assessment.models import (
Assessment, AssessmentPart, Criterion, CriterionOption, Rubric,
)
from rest_framework.fields import DateTimeField, IntegerField
from django.core.cache import cache
from openassessment.assessment.models import Assessment, AssessmentPart, Criterion, CriterionOption, Rubric
logger = logging.getLogger(__name__)
......@@ -211,8 +210,8 @@ def full_assessment_dict(assessment, rubric_dict=None):
# `CriterionOption` again, we simply index into the places we expect them to
# be from the big, saved `Rubric` serialization.
parts = []
for part in assessment.parts.all().select_related("criterion", "option"):
criterion_dict = rubric_dict["criteria"][part.criterion.order_num]
for part in assessment.parts.order_by('criterion__order_num').all().select_related("criterion", "option"):
criterion_dict = dict(rubric_dict["criteria"][part.criterion.order_num])
options_dict = None
if part.option is not None:
options_dict = criterion_dict["options"][part.option.order_num]
......
......@@ -2,11 +2,11 @@
Serializers specific to peer assessment.
"""
from rest_framework import serializers
from openassessment.assessment.models import (AssessmentFeedback, AssessmentFeedbackOption, PeerWorkflow,
PeerWorkflowItem)
from .base import AssessmentSerializer
from openassessment.assessment.models import (
AssessmentFeedback, AssessmentFeedbackOption,
PeerWorkflow, PeerWorkflowItem
)
class AssessmentFeedbackOptionSerializer(serializers.ModelSerializer):
......
......@@ -2,10 +2,12 @@
Serializers for the training assessment type.
"""
from django.core.cache import cache
from django.db import transaction, IntegrityError
from openassessment.assessment.models import TrainingExample
from django.db import IntegrityError, transaction
from openassessment.assessment.data_conversion import update_training_example_answer_format
from .base import rubric_from_dict, RubricSerializer
from openassessment.assessment.models import TrainingExample
from .base import RubricSerializer, rubric_from_dict
class InvalidTrainingExample(Exception):
......
......@@ -2,14 +2,19 @@
"""
Tests for the assessment Django models.
"""
import copy, ddt
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.serializers import rubric_from_dict
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection
from .constants import RUBRIC
import copy
import ddt
from openassessment.assessment.api.self import create_assessment
from submissions.api import create_submission
from openassessment.assessment.errors import SelfAssessmentRequestError
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection
from openassessment.assessment.serializers import rubric_from_dict
from openassessment.test_utils import CacheResetTest
from submissions.api import create_submission
from .constants import RUBRIC
@ddt.ddt
class AssessmentTest(CacheResetTest):
......@@ -212,4 +217,4 @@ class AssessmentTest(CacheResetTest):
if has_feedback:
criterion_feedback['Quality'] = "This was an assignment of average quality."
return rubric, options_selected, criterion_feedback
\ No newline at end of file
return rubric, options_selected, criterion_feedback
# coding=utf-8
import datetime
import pytz
import copy
import datetime
from django.db import DatabaseError, IntegrityError
from django.utils import timezone
from ddt import ddt, file_data
from mock import patch
from nose.tools import raises
import pytz
from django.db import DatabaseError, IntegrityError
from django.utils import timezone
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.models import (
Assessment, AssessmentPart, AssessmentFeedback, AssessmentFeedbackOption,
PeerWorkflow, PeerWorkflowItem
)
from openassessment.assessment.models import (Assessment, AssessmentFeedback, AssessmentFeedbackOption, AssessmentPart,
PeerWorkflow, PeerWorkflowItem)
from openassessment.test_utils import CacheResetTest
from openassessment.workflow import api as workflow_api
from submissions import api as sub_api
......@@ -410,7 +409,7 @@ class TestPeerApi(CacheResetTest):
def test_peer_workflow_integrity_error(self):
tim_sub, __ = self._create_student_and_submission("Tim", "Tim's answer")
with patch.object(PeerWorkflow.objects, "get_or_create") as mock_peer:
with patch("openassessment.assessment.models.peer.PeerWorkflow.objects.get_or_create") as mock_peer:
mock_peer.side_effect = IntegrityError("Oh no!")
# This should not raise an exception
peer_api.on_start(tim_sub["uuid"])
......@@ -1113,7 +1112,7 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(xander_answer["uuid"], submission["uuid"])
self.assertIsNotNone(item.assessment)
@patch.object(PeerWorkflowItem.objects, "filter")
@patch("openassessment.assessment.models.peer.PeerWorkflowItem.objects.filter")
@raises(peer_api.PeerAssessmentInternalError)
def test_get_submitted_assessments_error(self, mock_filter):
self._create_student_and_submission("Tim", "Tim's answer")
......@@ -1123,7 +1122,7 @@ class TestPeerApi(CacheResetTest):
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"])
self.assertEqual(1, len(submitted_assessments))
@patch.object(PeerWorkflow.objects, 'raw')
@patch('openassessment.assessment.models.peer.PeerWorkflow.objects.raw')
@raises(peer_api.PeerAssessmentInternalError)
def test_failure_to_get_review_submission(self, mock_filter):
tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
......@@ -1131,21 +1130,21 @@ class TestPeerApi(CacheResetTest):
mock_filter.side_effect = DatabaseError("Oh no.")
tim_workflow.get_submission_for_review(3)
@patch.object(AssessmentFeedback.objects, 'get')
@patch('openassessment.assessment.models.AssessmentFeedback.objects.get')
@raises(peer_api.PeerAssessmentInternalError)
def test_get_assessment_feedback_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.get_assessment_feedback(tim_answer['uuid'])
@patch.object(PeerWorkflowItem, 'get_scored_assessments')
@patch('openassessment.assessment.models.peer.PeerWorkflowItem.get_scored_assessments')
@raises(peer_api.PeerAssessmentInternalError)
def test_set_assessment_feedback_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.set_assessment_feedback({'submission_uuid': tim_answer['uuid']})
@patch.object(AssessmentFeedback, 'save')
@patch('openassessment.assessment.models.AssessmentFeedback.save')
@raises(peer_api.PeerAssessmentInternalError)
def test_set_assessment_feedback_error_on_save(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.")
......@@ -1157,7 +1156,7 @@ class TestPeerApi(CacheResetTest):
}
)
@patch.object(AssessmentFeedback, 'save')
@patch('openassessment.assessment.models.AssessmentFeedback.save')
@raises(peer_api.PeerAssessmentRequestError)
def test_set_assessment_feedback_error_on_huge_save(self, mock_filter):
tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
......@@ -1168,20 +1167,20 @@ class TestPeerApi(CacheResetTest):
}
)
@patch.object(PeerWorkflow.objects, 'get')
@patch('openassessment.assessment.models.peer.PeerWorkflow.objects.get')
@raises(peer_api.PeerAssessmentWorkflowError)
def test_failure_to_get_latest_workflow(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
PeerWorkflow.get_by_submission_uuid(tim_answer['uuid'])
@patch.object(PeerWorkflow.objects, 'get_or_create')
@patch('openassessment.assessment.models.peer.PeerWorkflow.objects.get_or_create')
@raises(peer_api.PeerAssessmentInternalError)
def test_create_workflow_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.")
self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
@patch.object(PeerWorkflow.objects, 'get_or_create')
@patch('openassessment.assessment.models.peer.PeerWorkflow.objects.get_or_create')
@raises(peer_api.PeerAssessmentInternalError)
def test_create_workflow_item_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.")
......@@ -1240,25 +1239,25 @@ class TestPeerApi(CacheResetTest):
@raises(peer_api.PeerAssessmentInternalError)
def test_max_score_db_error(self):
tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
with patch.object(Assessment.objects, 'filter') as mock_filter:
with patch('openassessment.assessment.models.Assessment.objects.filter') as mock_filter:
mock_filter.side_effect = DatabaseError("Bad things happened")
peer_api.get_rubric_max_scores(tim["uuid"])
@patch.object(PeerWorkflow.objects, 'get')
@patch('openassessment.assessment.models.peer.PeerWorkflow.objects.get')
@raises(peer_api.PeerAssessmentInternalError)
def test_median_score_db_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
peer_api.get_assessment_median_scores(tim["uuid"])
@patch.object(Assessment.objects, 'filter')
@patch('openassessment.assessment.models.Assessment.objects.filter')
@raises(peer_api.PeerAssessmentInternalError)
def test_get_assessments_db_error(self, mock_filter):
tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
mock_filter.side_effect = DatabaseError("Bad things happened")
peer_api.get_assessments(tim["uuid"])
@patch.object(PeerWorkflow.objects, 'get_or_create')
@patch('openassessment.assessment.models.peer.PeerWorkflow.objects.get_or_create')
@raises(peer_api.PeerAssessmentInternalError)
def test_error_on_assessment_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
......@@ -1274,7 +1273,7 @@ class TestPeerApi(CacheResetTest):
MONDAY,
)
@patch.object(Assessment.objects, 'filter')
@patch('openassessment.assessment.models.Assessment.objects.filter')
@raises(peer_api.PeerAssessmentInternalError)
def test_error_on_get_assessment(self, mock_filter):
self._create_student_and_submission("Tim", "Tim's answer")
......@@ -1529,7 +1528,7 @@ class TestPeerApi(CacheResetTest):
submission, student = self._create_student_and_submission("Jim", "Jim's answer")
peer_api.get_submission_to_assess(submission['uuid'], 1)
with patch.object(PeerWorkflow.objects, 'get') as mock_call:
with patch('openassessment.assessment.models.peer.PeerWorkflow.objects.get') as mock_call:
mock_call.side_effect = DatabaseError("Kaboom!")
peer_api.create_assessment(
submission['uuid'],
......
......@@ -4,11 +4,10 @@ Tests for assessment models.
"""
import copy
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import (
Rubric, Criterion, CriterionOption, InvalidRubricSelection
)
from openassessment.assessment.models import Criterion, CriterionOption, InvalidRubricSelection, Rubric
from openassessment.assessment.test.constants import RUBRIC
from openassessment.test_utils import CacheResetTest
class RubricIndexTest(CacheResetTest):
......
......@@ -5,14 +5,13 @@ Tests for self-assessment API.
import copy
import datetime
from mock import patch
import pytz
from django.db import DatabaseError
from mock import patch
from openassessment.assessment.api.self import (
create_assessment, submitter_is_finished, get_assessment
)
from openassessment.assessment.api.self import create_assessment, get_assessment, submitter_is_finished
from openassessment.assessment.errors import SelfAssessmentInternalError, SelfAssessmentRequestError
from openassessment.test_utils import CacheResetTest
from submissions.api import create_submission
......@@ -103,7 +102,7 @@ class TestSelfApi(CacheResetTest):
# Attempt to create a self-assessment for a submission that doesn't exist
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
'invalid_submission_uuid', u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
'deadbeef-1234-5678-9100-1234deadbeef', u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
......@@ -115,7 +114,7 @@ class TestSelfApi(CacheResetTest):
# Attempt to create a self-assessment for the submission from a different user
with self.assertRaises(SelfAssessmentRequestError):
create_assessment(
'invalid_submission_uuid', u'another user',
'deadbeef-1234-5678-9100-1234deadbeef', u'another user',
self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
)
......
......@@ -3,18 +3,15 @@
Tests for assessment serializers.
"""
import copy
import json
import os.path
import copy
from openassessment.assessment.models import Assessment, AssessmentFeedback, AssessmentPart
from openassessment.assessment.serializers import (AssessmentFeedbackSerializer, InvalidRubric, full_assessment_dict,
rubric_from_dict)
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import (
Assessment, AssessmentPart, AssessmentFeedback
)
from openassessment.assessment.serializers import (
rubric_from_dict, full_assessment_dict,
AssessmentFeedbackSerializer, InvalidRubric
)
from .constants import RUBRIC
......
......@@ -3,30 +3,26 @@
Tests for staff assessments.
"""
import copy
import mock
from datetime import timedelta
from ddt import data, ddt, unpack
import mock
from django.db import DatabaseError
from django.test.utils import override_settings
from django.utils.timezone import now
from ddt import ddt, data, unpack
from .constants import OPTIONS_SELECTED_DICT, RUBRIC, RUBRIC_OPTIONS, RUBRIC_POSSIBLE_POINTS, STUDENT_ITEM
from openassessment.assessment.test.test_ai import (
ALGORITHM_ID,
AI_ALGORITHMS,
AIGradingTest,
train_classifiers
)
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.api import staff as staff_api, ai as ai_api, peer as peer_api
from openassessment.assessment.api.self import create_assessment as self_assess
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import staff as staff_api
from openassessment.assessment.api.peer import create_assessment as peer_assess
from openassessment.assessment.models import Assessment, StaffWorkflow
from openassessment.assessment.errors import StaffAssessmentRequestError, StaffAssessmentInternalError
from openassessment.assessment.api.self import create_assessment as self_assess
from openassessment.assessment.errors import StaffAssessmentInternalError, StaffAssessmentRequestError
from openassessment.assessment.models import StaffWorkflow
from openassessment.test_utils import CacheResetTest
from openassessment.workflow import api as workflow_api
from submissions import api as sub_api
from .constants import OPTIONS_SELECTED_DICT, RUBRIC, RUBRIC_OPTIONS, RUBRIC_POSSIBLE_POINTS, STUDENT_ITEM
@ddt
class TestStaffAssessment(CacheResetTest):
......@@ -41,17 +37,6 @@ class TestStaffAssessment(CacheResetTest):
ASSESSMENT_SCORES_DDT = [key for key in OPTIONS_SELECTED_DICT]
@staticmethod
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def _ai_assess(sub):
"""
Helper to fulfill ai assessment requirements.
"""
# Note that CLASSIFIER_SCORE_OVERRIDES matches OPTIONS_SELECTED_DICT['most'] scores
train_classifiers(RUBRIC, AIGradingTest.CLASSIFIER_SCORE_OVERRIDES)
ai_api.on_init(sub, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
return ai_api.get_latest_assessment(sub)
@staticmethod
def _peer_assess(scores):
"""
Helper to fulfill peer assessment requirements.
......@@ -67,7 +52,6 @@ class TestStaffAssessment(CacheResetTest):
'staff',
lambda sub, scorer_id, scores: staff_api.create_assessment(sub, scorer_id, scores, dict(), "", RUBRIC)
),
('ai', lambda sub, scorer_id, scores: TestStaffAssessment._ai_assess(sub))
]
def _verify_done_state(self, uuid, requirements, expect_done=True):
......@@ -377,7 +361,7 @@ class TestStaffAssessment(CacheResetTest):
)
self.assertEqual(str(context_manager.exception), u"Invalid options were selected in the rubric.")
@mock.patch.object(Assessment.objects, 'filter')
@mock.patch('openassessment.assessment.models.Assessment.objects.filter')
def test_database_filter_error_handling(self, mock_filter):
# Create a submission
tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")
......@@ -403,7 +387,7 @@ class TestStaffAssessment(CacheResetTest):
u"Error getting staff assessment scores for {}".format(tim_sub["uuid"])
)
@mock.patch.object(Assessment, 'create')
@mock.patch('openassessment.assessment.models.Assessment.create')
def test_database_create_error_handling(self, mock_create):
mock_create.side_effect = DatabaseError("KABOOM!")
......@@ -531,7 +515,5 @@ class TestStaffAssessment(CacheResetTest):
steps = problem_steps
if 'peer' in steps:
peer_api.on_start(submission["uuid"])
if 'ai' in steps:
init_params['ai'] = {'rubric': RUBRIC, 'algorithm_id': ALGORITHM_ID}
workflow_api.create_workflow(submission["uuid"], steps, init_params)
return submission, new_student_item
......@@ -3,15 +3,18 @@
Tests for training assessment type.
"""
import copy
from django.db import DatabaseError
import ddt
from mock import patch
from django.db import DatabaseError
from openassessment.assessment.api import student_training as training_api
from openassessment.assessment.errors import StudentTrainingInternalError, StudentTrainingRequestError
from openassessment.test_utils import CacheResetTest
from .constants import STUDENT_ITEM, ANSWER, RUBRIC, EXAMPLES
from submissions import api as sub_api
from openassessment.assessment.api import student_training as training_api
from openassessment.assessment.errors import StudentTrainingRequestError, StudentTrainingInternalError
from openassessment.assessment.models import StudentTrainingWorkflow
from .constants import ANSWER, EXAMPLES, RUBRIC, STUDENT_ITEM
@ddt.ddt
......@@ -210,13 +213,13 @@ class StudentTrainingAssessmentTest(CacheResetTest):
with self.assertRaises(StudentTrainingRequestError):
training_api.get_training_example("no_such_submission", RUBRIC, EXAMPLES)
@patch.object(StudentTrainingWorkflow.objects, 'get')
@patch('openassessment.assessment.models.student_training.StudentTrainingWorkflow.objects.get')
def test_get_num_completed_database_error(self, mock_db):
mock_db.side_effect = DatabaseError("Kaboom!")
with self.assertRaises(StudentTrainingInternalError):
training_api.get_num_completed(self.submission_uuid)
@patch.object(StudentTrainingWorkflow.objects, 'get')
@patch('openassessment.assessment.models.student_training.StudentTrainingWorkflow.objects.get')
def test_get_training_example_database_error(self, mock_db):
mock_db.side_effect = DatabaseError("Kaboom!")
with self.assertRaises(StudentTrainingInternalError):
......@@ -224,7 +227,7 @@ class StudentTrainingAssessmentTest(CacheResetTest):
def test_assess_training_example_database_error(self):
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
with patch.object(StudentTrainingWorkflow.objects, 'get') as mock_db:
with patch('openassessment.assessment.models.student_training.StudentTrainingWorkflow.objects.get') as mock_db:
mock_db.side_effect = DatabaseError("Kaboom!")
with self.assertRaises(StudentTrainingInternalError):
training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])
......
......@@ -2,13 +2,14 @@
Tests for student training models.
"""
import mock
from django.db import IntegrityError
from submissions import api as sub_api
from openassessment.assessment.models import StudentTrainingWorkflow, StudentTrainingWorkflowItem
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import (
StudentTrainingWorkflow, StudentTrainingWorkflowItem
)
from .constants import STUDENT_ITEM, ANSWER, EXAMPLES
from submissions import api as sub_api
from .constants import ANSWER, EXAMPLES, STUDENT_ITEM
class StudentTrainingWorkflowTest(CacheResetTest):
......@@ -16,8 +17,8 @@ class StudentTrainingWorkflowTest(CacheResetTest):
Tests for the student training workflow model.
"""
@mock.patch.object(StudentTrainingWorkflow.objects, 'get')
@mock.patch.object(StudentTrainingWorkflow.objects, 'get_or_create')
@mock.patch('openassessment.assessment.models.student_training.StudentTrainingWorkflow.objects.get')
@mock.patch('openassessment.assessment.models.student_training.StudentTrainingWorkflow.objects.get_or_create')
def test_create_workflow_integrity_error(self, mock_create, mock_get):
# Simulate a race condition in which someone creates a workflow
# after we check if it exists. This will violate the database uniqueness
......@@ -37,7 +38,7 @@ class StudentTrainingWorkflowTest(CacheResetTest):
workflow = StudentTrainingWorkflow.get_workflow(submission['uuid'])
self.assertEqual(workflow, mock_workflow)
@mock.patch.object(StudentTrainingWorkflowItem.objects, 'create')
@mock.patch('openassessment.assessment.models.student_training.StudentTrainingWorkflowItem.objects.create')
def test_create_workflow_item_integrity_error(self, mock_create):
# Create a submission and workflow
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
......
......@@ -2,12 +2,16 @@
"""
Tests for training models and serializers (common to student and AI training).
"""
from collections import OrderedDict
import copy
import mock
from django.db import IntegrityError
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import TrainingExample
from openassessment.assessment.serializers import deserialize_training_examples, serialize_training_example
from openassessment.test_utils import CacheResetTest
class TrainingExampleSerializerTest(CacheResetTest):
......@@ -63,17 +67,17 @@ class TrainingExampleSerializerTest(CacheResetTest):
u" 𝖜𝖍𝖊𝖓 𝖆 𝖒𝖆𝖓 𝖙𝖆𝖐𝖊𝖘 𝖙𝖍𝖎𝖘 𝖜𝖍𝖔𝖑𝖊 𝖚𝖓𝖎𝖛𝖊𝖗𝖘𝖊 𝖋𝖔𝖗 𝖆 𝖛𝖆𝖘𝖙 𝖕𝖗𝖆𝖈𝖙𝖎𝖈𝖆𝖑 𝖏𝖔𝖐𝖊, 𝖙𝖍𝖔𝖚𝖌𝖍 𝖙𝖍𝖊 𝖜𝖎𝖙 𝖙𝖍𝖊𝖗𝖊𝖔𝖋"
u" 𝖍𝖊 𝖇𝖚𝖙 𝖉𝖎𝖒𝖑𝖞 𝖉𝖎𝖘𝖈𝖊𝖗𝖓𝖘, 𝖆𝖓𝖉 𝖒𝖔𝖗𝖊 𝖙𝖍𝖆𝖓 𝖘𝖚𝖘𝖕𝖊𝖈𝖙𝖘 𝖙𝖍𝖆𝖙 𝖙𝖍𝖊 𝖏𝖔𝖐𝖊 𝖎𝖘 𝖆𝖙 𝖓𝖔𝖇𝖔𝖉𝖞'𝖘 𝖊𝖝𝖕𝖊𝖓𝖘𝖊 𝖇𝖚𝖙 𝖍𝖎𝖘 𝖔𝖜𝖓."
),
'options_selected': {
'options_selected': OrderedDict({
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"𝒑𝒐𝒐𝒓",
}
})
},
{
'answer': u"Tőṕ-héávӳ ẃáś thé śhíṕ áś á díńńéŕĺéśś śtúdéńt ẃíth áĺĺ Áŕíśtőtĺé íń híś héád.",
'options_selected': {
'options_selected': OrderedDict({
u"vøȼȺƀᵾłȺɍɏ": u"𝒑𝒐𝒐𝒓",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
})
},
{
'answer': (
......@@ -82,10 +86,10 @@ class TrainingExampleSerializerTest(CacheResetTest):
u"azure..... Consider all this; and then turn to this green, gentle, and most docile earth; "
u"consider them both, the sea and the land; and do you not find a strange analogy to something in yourself?"
),
'options_selected': {
'options_selected': OrderedDict({
u"vøȼȺƀᵾłȺɍɏ": u"𝒑𝒐𝒐𝒓",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
}
})
},
]
......@@ -156,8 +160,8 @@ class TrainingExampleSerializerTest(CacheResetTest):
for example in (first_examples + second_examples):
self.assertIn(example, db_examples)
@mock.patch.object(TrainingExample.objects, 'get')
@mock.patch.object(TrainingExample, 'create_example')
@mock.patch('openassessment.assessment.models.TrainingExample.objects.get')
@mock.patch('openassessment.assessment.models.TrainingExample.create_example')
def test_deserialize_integrity_error(self, mock_create, mock_get):
# Simulate an integrity error when creating the training example
# This can occur when using repeatable-read isolation mode.
......
from django.conf.urls import patterns, url
from django.conf.urls import url
urlpatterns = patterns(
'openassessment.assessment.views',
from openassessment.assessment import views
urlpatterns = [
url(
r'^(?P<student_id>[^/]+)/(?P<course_id>[^/]+)/(?P<item_id>[^/]+)$',
'get_evaluations_for_student_item'
views.get_evaluations_for_student_item
),
)
]
import logging
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from openassessment.assessment.api.peer import get_assessments
from submissions.api import SubmissionRequestError, get_submissions
......
"""
Aggregate data for openassessment.
"""
from collections import defaultdict
import csv
import json
from collections import defaultdict
from django.conf import settings
from submissions import api as sub_api
from openassessment.assessment.models import Assessment, AssessmentFeedback, AssessmentPart
from openassessment.workflow.models import AssessmentWorkflow
from openassessment.assessment.models import Assessment, AssessmentPart, AssessmentFeedback
from submissions import api as sub_api
class CsvWriter(object):
......
......@@ -2,8 +2,7 @@ import abc
from django.conf import settings
from ..exceptions import FileUploadInternalError
from ..exceptions import FileUploadRequestError
from ..exceptions import FileUploadInternalError, FileUploadRequestError
class Settings(object):
......
import os
from .base import BaseBackend
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse_lazy
from .base import BaseBackend
class Backend(BaseBackend):
......@@ -14,7 +15,7 @@ class Backend(BaseBackend):
"""
Return the URL pointing to the ORA2 django storage upload endpoint.
"""
return reverse("openassessment-django-storage", kwargs={'key': key})
return reverse_lazy("openassessment-django-storage", kwargs={'key': key})
def get_download_url(self, key):
"""
......
from .base import BaseBackend
from .. import exceptions
from django.conf import settings
import django.core.cache
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.encoding import smart_text
from .. import exceptions
from .base import BaseBackend
class Backend(BaseBackend):
"""
......@@ -47,7 +47,7 @@ class Backend(BaseBackend):
def _get_url(self, key):
key_name = self._get_key_name(key)
url = reverse("openassessment-filesystem-storage", kwargs={'key': key_name})
url = reverse_lazy("openassessment-filesystem-storage", kwargs={'key': key_name})
return url
......
import boto
import logging
import boto3
from django.conf import settings
from .base import BaseBackend
from ..exceptions import FileUploadInternalError
from .base import BaseBackend
logger = logging.getLogger("openassessment.fileupload.api")
......@@ -12,15 +15,16 @@ class Backend(BaseBackend):
def get_upload_url(self, key, content_type):
bucket_name, key_name = self._retrieve_parameters(key)
try:
conn = _connect_to_s3()
upload_url = conn.generate_url(
expires_in=self.UPLOAD_URL_TIMEOUT,
method='PUT',
bucket=bucket_name,
key=key_name,
headers={'Content-Length': '5242880', 'Content-Type': content_type}
client = _connect_to_s3()
return client.generate_presigned_url(
ExpiresIn=self.UPLOAD_URL_TIMEOUT,
ClientMethod='put_object',
Params={
'Bucket': bucket_name,
'Key': key_name
},
HttpMethod="PUT"
)
return upload_url
except Exception as ex:
logger.exception(
u"An internal exception occurred while generating an upload URL."
......@@ -30,10 +34,16 @@ class Backend(BaseBackend):
def get_download_url(self, key):
bucket_name, key_name = self._retrieve_parameters(key)
try:
conn = _connect_to_s3()
bucket = conn.get_bucket(bucket_name)
s3_key = bucket.get_key(key_name)
return s3_key.generate_url(expires_in=self.DOWNLOAD_URL_TIMEOUT) if s3_key else ""
client = _connect_to_s3()
return client.generate_presigned_url(
ExpiresIn=self.DOWNLOAD_URL_TIMEOUT,
ClientMethod='get_object',
Params={
'Bucket': bucket_name,
'Key': key_name
},
HttpMethod="GET"
)
except Exception as ex:
logger.exception(
u"An internal exception occurred while generating a download URL."
......@@ -42,15 +52,16 @@ class Backend(BaseBackend):
def remove_file(self, key):
bucket_name, key_name = self._retrieve_parameters(key)
conn = _connect_to_s3()
bucket = conn.get_bucket(bucket_name)
s3_key = bucket.get_key(key_name)
if s3_key:
bucket.delete_key(s3_key)
client = _connect_to_s3()
resp = client.delete_objects(
Bucket=bucket_name,
Delete={
'Objects': [{'Key':key_name}]
}
)
if 'Deleted' in resp and any(key_name == deleted_dict['Key'] for deleted_dict in resp['Deleted']):
return True
else:
return False
return False
def _connect_to_s3():
......@@ -65,7 +76,8 @@ def _connect_to_s3():
aws_access_key_id = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
aws_secret_access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
return boto.connect_s3(
return boto3.client(
's3',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
......@@ -12,15 +12,21 @@ ORA2_SWIFT_KEY should correspond to Meta Temp-Url-Key configure in swift. Run
'''
import logging
from django.conf import settings
import swiftclient
import urlparse
import requests
import swiftclient
from django.conf import settings
from .base import BaseBackend
from ..exceptions import FileUploadInternalError
from .base import BaseBackend
logger = logging.getLogger("openassessment.fileupload.api")
# prefix paths with current version, in case we need to roll it at some point
SWIFT_BACKEND_VERSION = 1
class Backend(BaseBackend):
"""
......@@ -32,10 +38,11 @@ class Backend(BaseBackend):
key, url = get_settings()
try:
temp_url = swiftclient.utils.generate_temp_url(
path='%s/%s/%s' % (url.path, bucket_name, key_name),
path='/v%s%s/%s/%s' % (SWIFT_BACKEND_VERSION, url.path, bucket_name, key_name),
key=key,
method='PUT',
seconds=self.UPLOAD_URL_TIMEOUT)
seconds=self.UPLOAD_URL_TIMEOUT
)
return '%s://%s%s' % (url.scheme, url.netloc, temp_url)
except Exception as ex:
logger.exception(
......@@ -48,10 +55,11 @@ class Backend(BaseBackend):
key, url = get_settings()
try:
temp_url = swiftclient.utils.generate_temp_url(
path='%s/%s/%s' % (url.path, bucket_name, key_name),
path='/v%s%s/%s/%s' % (SWIFT_BACKEND_VERSION, url.path, bucket_name, key_name),
key=key,
method='GET',
seconds=self.DOWNLOAD_URL_TIMEOUT)
seconds=self.DOWNLOAD_URL_TIMEOUT
)
download_url = '%s://%s%s' % (url.scheme, url.netloc, temp_url)
response = requests.get(download_url)
return download_url if response.status_code == 200 else ""
......
# -*- coding: utf-8 -*-
import boto
from boto.s3.key import Key
import ddt
import json
from mock import patch, Mock
import os
import shutil
import tempfile
import urllib
from urlparse import urlparse
import boto3
import ddt
from mock import Mock, patch
from moto import mock_s3
from nose.tools import raises
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse_lazy
from django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from moto import mock_s3
from mock import patch
from nose.tools import raises
from openassessment.fileupload import api
from openassessment.fileupload import exceptions
from openassessment.fileupload import api, exceptions, urls
from openassessment.fileupload import views_filesystem as views
from openassessment.fileupload.backends.base import Settings as FileUploadSettings
from openassessment.fileupload.backends.filesystem import get_cache as get_filesystem_cache
......@@ -39,8 +35,8 @@ class TestFileUploadService(TestCase):
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket"
)
def test_get_upload_url(self):
conn = boto.connect_s3()
conn.create_bucket('mybucket')
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='mybucket')
uploadUrl = api.get_upload_url("foo", "bar")
self.assertIn("https://mybucket.s3.amazonaws.com/submissions_attachments/foo", uploadUrl)
......@@ -51,11 +47,9 @@ class TestFileUploadService(TestCase):
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket"
)
def test_get_download_url(self):
conn = boto.connect_s3()
bucket = conn.create_bucket('mybucket')
key = Key(bucket)
key.key = "submissions_attachments/foo"
key.set_contents_from_string("How d'ya do?")
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='mybucket')
s3.Object('mybucket', 'submissions_attachments/foo').put(Body="How d'ya do?")
downloadUrl = api.get_download_url("foo")
self.assertIn("https://mybucket.s3.amazonaws.com/submissions_attachments/foo", downloadUrl)
......@@ -66,11 +60,9 @@ class TestFileUploadService(TestCase):
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket"
)
def test_remove_file(self):
conn = boto.connect_s3()
bucket = conn.create_bucket('mybucket')
key = Key(bucket)
key.key = "submissions_attachments/foo"
key.set_contents_from_string("Test")
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='mybucket')
s3.Object('mybucket', 'submissions_attachments/foo').put(Body="Test")
result = api.remove_file("foo")
self.assertTrue(result)
result = api.remove_file("foo")
......@@ -90,7 +82,7 @@ class TestFileUploadService(TestCase):
AWS_SECRET_ACCESS_KEY='bizbaz',
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket"
)
@patch.object(boto, 'connect_s3')
@patch.object(boto3, 'client')
@raises(exceptions.FileUploadInternalError)
def test_get_upload_url_error(self, mock_s3):
mock_s3.side_effect = Exception("Oh noes")
......@@ -102,7 +94,7 @@ class TestFileUploadService(TestCase):
AWS_SECRET_ACCESS_KEY='bizbaz',
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket"
)
@patch.object(boto, 'connect_s3')
@patch.object(boto3, 'client')
@raises(exceptions.FileUploadInternalError, mock_s3)
def test_get_download_url_error(self, mock_s3):
mock_s3.side_effect = Exception("Oh noes")
......@@ -272,7 +264,7 @@ class TestFileUploadServiceWithFilesystemBackend(TestCase):
self.assertEqual('application/octet-stream', download_response["Content-Type"])
def test_upload_with_unauthorized_key(self):
upload_url = reverse("openassessment-filesystem-storage", kwargs={'key': self.key_name})
upload_url = reverse_lazy("openassessment-filesystem-storage", kwargs={'key': self.key_name})
cache_before_request = get_filesystem_cache().get(self.key_name)
upload_response = self.client.put(upload_url, data=self.content.read(), content_type=self.content_type)
......@@ -282,7 +274,7 @@ class TestFileUploadServiceWithFilesystemBackend(TestCase):
self.assertIsNone(cache_after_request)
def test_download_url_with_unauthorized_key(self):
download_url = reverse("openassessment-filesystem-storage", kwargs={'key': self.key_name})
download_url = reverse_lazy("openassessment-filesystem-storage", kwargs={'key': self.key_name})
views.save_to_file(self.key_name, "uploaded content")
download_response = self.client.get(download_url)
......@@ -327,7 +319,7 @@ class TestSwiftBackend(TestCase):
result = urlparse(url)
self.assertEqual(result.scheme, u'http')
self.assertEqual(result.netloc, u'www.example.com:12345')
self.assertEqual(result.path, u'/bucket_name/submissions_attachments/foo')
self.assertEqual(result.path, u'/v1/bucket_name/submissions_attachments/foo')
self.assertIn(result.params, 'temp_url_sig=')
self.assertIn(result.params, 'temp_url_expires=')
......
from django.conf.urls import patterns, url
from django.conf.urls import url
urlpatterns = patterns(
'openassessment.fileupload.views_django_storage',
url(r'^django/(?P<key>.+)/$', 'django_storage', name='openassessment-django-storage'),
)
from openassessment.fileupload import views_django_storage, views_filesystem
urlpatterns += patterns(
'openassessment.fileupload.views_filesystem',
url(r'^(?P<key>.+)/$', 'filesystem_storage', name='openassessment-filesystem-storage'),
)
urlpatterns = [
url(r'^django/(?P<key>.+)/$', views_django_storage.django_storage, name='openassessment-django-storage'),
url(r'^(?P<key>.+)/$', views_filesystem.filesystem_storage, name='openassessment-filesystem-storage'),
]
......@@ -3,13 +3,13 @@ import json
import os
from django.conf import settings
from django.shortcuts import HttpResponse, Http404
from django.shortcuts import Http404, HttpResponse
from django.utils import timezone
from django.views.decorators.http import require_http_methods
from . import exceptions
from .backends.filesystem import is_upload_url_available, is_download_url_available
from .backends.base import Settings
from .backends.filesystem import is_download_url_available, is_upload_url_available
@require_http_methods(["PUT", "GET"])
......
......@@ -7,7 +7,6 @@ This command differs from upload_oa_data in that it places all the data into one
Generates the same format as the instructor dashboard downloads.
"""
import csv
from optparse import make_option
import os
from django.core.management.base import BaseCommand, CommandError
......@@ -21,25 +20,34 @@ class Command(BaseCommand):
"""
help = ("Usage: collect_ora2_data <course_id> --output-dir=<output_dir>")
args = "<course_id>"
option_list = BaseCommand.option_list + (
make_option('-o', '--output-dir',
action='store', dest='output_dir', default=None,
help="Write output to a directory rather than stdout"),
make_option('-n', '--file-name',
action='store', dest='file_name', default=None,
help="Write CSV file to the given name"),
)
def add_arguments(self, parser):
parser.add_argument('course_id', nargs='+', type=unicode)
parser.add_argument(
'-o',
'--output-dir',
action='store',
dest='output_dir',
default=None,
help="Write output to a directory rather than stdout"
)
parser.add_argument(
'-n',
'--file-name',
action='store',
dest='file_name',
default=None,
help="Write CSV file to the given name"
)
def handle(self, *args, **options):
"""
Run the command.
"""
if not args:
if not options['course_id']:
raise CommandError("Course ID must be specified to fetch data")
course_id = args[0]
course_id = options['course_id']
if options['file_name']:
file_name = options['file_name']
......
"""
Create dummy submissions and assessments for testing.
"""
from uuid import uuid4
import copy
from django.core.management.base import BaseCommand, CommandError
from uuid import uuid4
import loremipsum
from submissions import api as sub_api
from openassessment.workflow import api as workflow_api
from django.core.management.base import BaseCommand, CommandError
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.workflow import api as workflow_api
from submissions import api as sub_api
STEPS = ['peer', 'self']
......
"""
Generate CSV files for submission and assessment data, then upload to S3.
"""
import sys
import datetime
import os
import os.path
import datetime
import shutil
import tempfile
import sys
import tarfile
import boto
from boto.s3.key import Key
from django.core.management.base import BaseCommand, CommandError
import tempfile
import boto3
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from openassessment.data import CsvWriter
......@@ -135,16 +137,24 @@ class Command(BaseCommand):
# environment vars or configuration files instead.
aws_access_key_id = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
aws_secret_access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
conn = boto.connect_s3(
client = boto3.client(
's3',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
bucket = client.create_bucket(Bucket=s3_bucket)
bucket = conn.get_bucket(s3_bucket)
key_name = os.path.join(course_id, os.path.split(file_path)[1])
key = Key(bucket=bucket, name=key_name)
key.set_contents_from_filename(file_path)
url = key.generate_url(self.URL_EXPIRATION_HOURS * 3600)
client.put_object(Bucket=s3_bucket, Key=key_name, Body=open(file_path, 'rb'))
url = client.generate_presigned_url(
ExpiresIn=self.URL_EXPIRATION_HOURS * 3600,
ClientMethod='get_object',
Params={
'Bucket': s3_bucket,
'Key': key_name
},
HttpMethod="GET"
)
# Store the key and url in the history
self._history.append({'key': key_name, 'url': url})
......
......@@ -2,11 +2,12 @@
Tests for the management command that creates dummy submissions.
"""
from submissions import api as sub_api
from django.test import TestCase
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.management.commands import create_oa_submissions
from django.test import TestCase
from submissions import api as sub_api
class CreateSubmissionsTest(TestCase):
......
......@@ -2,12 +2,13 @@
"""
Tests for management command that uploads submission/assessment data.
"""
from StringIO import StringIO
import tarfile
import boto
import boto3
import moto
from openassessment.test_utils import CacheResetTest
from openassessment.management.commands import upload_oa_data
from openassessment.test_utils import CacheResetTest
from openassessment.workflow import api as workflow_api
from submissions import api as sub_api
......@@ -29,8 +30,8 @@ class UploadDataTest(CacheResetTest):
@moto.mock_s3
def test_upload(self):
# Create an S3 bucket using the fake S3 implementation
conn = boto.connect_s3()
conn.create_bucket(self.BUCKET_NAME)
s3 = boto3.resource('s3')
s3.create_bucket(Bucket=self.BUCKET_NAME)
# Create some submissions to ensure that we cover
# the progress indicator code.
......@@ -53,12 +54,10 @@ class UploadDataTest(CacheResetTest):
# Retrieve the uploaded file from the fake S3 implementation
self.assertEqual(len(cmd.history), 1)
bucket = conn.get_all_buckets()[0]
key = bucket.get_key(cmd.history[0]['key'])
contents = StringIO(key.get_contents_as_string())
s3.Object(self.BUCKET_NAME, cmd.history[0]['key']).download_file("tmp-test-file.tar.gz")
# Expect that the contents contain all the expected CSV files
with tarfile.open(mode="r:gz", fileobj=contents) as tar:
with tarfile.open("tmp-test-file.tar.gz", mode="r:gz") as tar:
file_sizes = {
member.name: member.size
for member in tar.getmembers()
......@@ -69,4 +68,4 @@ class UploadDataTest(CacheResetTest):
# Expect that we generated a URL for the bucket
url = cmd.history[0]['url']
self.assertIn("https://{}".format(self.BUCKET_NAME), url)
self.assertIn("https://s3.amazonaws.com/{}".format(self.BUCKET_NAME), url)
......@@ -181,11 +181,7 @@
</ul>
<p class="openassessment_description" id="openassessment_step_select_description">
{% if 'example_based_assessment' in editor_assessments_order %}
{% trans "In this assignment, you can include learner training, peer assessment, self assessment, example based assessment, and staff assessment steps. Select the steps that you want below, and then drag them into the order you want. If you include an example based assessment step, it must precede all other steps. If you include a learner training step, it must precede peer and self assessment steps. If you include a staff assessment step, it must be the final step. After you release an ORA assignment, you cannot change the type and number of assessment steps." %}
{% else %}
{% trans "In this assignment, you can include learner training, peer assessment, self assessment, and staff assessment steps. Select the steps that you want below, and then drag them into the order you want. If you include a learner training step, it must precede all other steps. If you include a staff assessment step, it must be the final step. After you release an ORA assignment, you cannot change the type and number of assessment steps." %}
{% endif %}
{% trans "In this assignment, you can include learner training, peer assessment, self assessment, and staff assessment steps. Select the steps that you want below, and then drag them into the order you want. If you include a learner training step, it must precede all other steps. If you include a staff assessment step, it must be the final step. After you release an ORA assignment, you cannot change the type and number of assessment steps." %}
</p>
<ol id="openassessment_assessment_module_settings_editors">
{% for assessment in editor_assessments_order %}
......
......@@ -33,51 +33,6 @@
<div class="openassessment__student-info staff-info__student__report"></div>
</div>
{% if display_schedule_training %}
<div class="staff-info__classifierset ui-staff__content__section">
{% if classifierset %}
<table class="staff-info__classifierset__table">
<caption class="title">{% trans "Classifier set" %}</caption>
<thead>
<th abbr="Field" scope="col">{% trans "Field" %}</th>
<th abbr="Value" scope="col">{% trans "Value" %}</th>
</thead>
<tbody>
<tr>
<td class="value">{% trans "Created at" %}</td>
<td class="value">{{ classifierset.created_at }}</td>
</tr>
<tr>
<td class="value">{% trans "Algorithm ID" %}</td>
<td class="value">{{ classifierset.algorithm_id }}</td>
</tr>
<tr>
<td class="value">{% trans "Course ID" %}</td>
<td class="value">{{ classifierset.course_id }}</td>
</tr>
<tr>
<td class="value">{% trans "Item ID" %}</td>
<td class="value">{{ classifierset.item_id }}</td>
</tr>
</tbody>
</table>
{% else %}
{% trans "No classifiers are available for this problem" %}
{% endif %}
</div>
<div class="staff-info__status ui-staff__content__section">
<button class="action--submit action--submit-training">{% trans "Schedule Example-Based Assessment Training" %}</button>
<div class="schedule_training_message"></div>
</div>
{% endif %}
{% if display_reschedule_unfinished_tasks %}
<div class="staff-info__status ui-staff__content__section">
<button class="action--submit action--submit-unfinished-tasks">{% trans "Reschedule All Unfinished Example-Based Assessment Grading Tasks" %}</button>
<div class="reschedule_unfinished_tasks_message"></div>
</div>
{% endif %}
</div>
</div>
</div>
......
......@@ -68,11 +68,6 @@
{% include "openassessmentblock/staff_area/oa_student_info_assessment_detail.html" with class_type="self" assessments=self_assessment %}
{% endif %}
{% if example_based_assessment %}
{% trans "Example-Based Assessment" as translated_title %}
{% include "openassessmentblock/staff_area/oa_student_info_assessment_detail.html" with class_type="example_based" assessments=example_based_assessment %}
{% endif %}
{% if staff_assessment %}
{% trans "Staff Assessment for This Learner" as translated_title %}
{% include "openassessmentblock/staff_area/oa_student_info_assessment_detail.html" with class_type="staff" assessments=staff_assessment %}
......
......@@ -3,16 +3,11 @@ Test utilities
"""
from django.core.cache import cache
from django.test import TestCase, TransactionTestCase
from openassessment.assessment.models.ai import (
CLASSIFIERS_CACHE_IN_MEM, CLASSIFIERS_CACHE_IN_FILE
)
def _clear_all_caches():
"""Clear the default cache and any custom caches."""
cache.clear()
CLASSIFIERS_CACHE_IN_MEM.clear()
CLASSIFIERS_CACHE_IN_FILE.clear()
class CacheResetTest(TestCase):
......
......@@ -4,9 +4,8 @@ Create factories for assessments and all of their related models.
import factory
from factory.django import DjangoModelFactory
from openassessment.assessment.models import (
Assessment, AssessmentPart, Rubric, Criterion, CriterionOption, AssessmentFeedbackOption, AssessmentFeedback
)
from openassessment.assessment.models import (Assessment, AssessmentFeedback, AssessmentFeedbackOption, AssessmentPart,
Criterion, CriterionOption, Rubric)
class RubricFactory(DjangoModelFactory):
......
......@@ -3,23 +3,24 @@
Tests for openassessment data aggregation.
"""
import os.path
from StringIO import StringIO
import csv
from django.core.management import call_command
import os.path
import ddt
from submissions import api as sub_api
from django.core.management import call_command
import openassessment.assessment.api.peer as peer_api
from openassessment.data import CsvWriter, OraAggregateData
from openassessment.test_utils import TransactionCacheResetTest
from openassessment.tests.factories import * # pylint: disable=wildcard-import
from openassessment.workflow import api as workflow_api
from openassessment.data import CsvWriter, OraAggregateData
import openassessment.assessment.api.peer as peer_api
from submissions import api as sub_api
COURSE_ID = "Test_Course"
STUDENT_ID = "Student"
STUDENT_ID = u"Student"
SCORER_ID = "Scorer"
......@@ -82,8 +83,8 @@ FEEDBACK_TEXT = u"𝓨𝓸𝓾 𝓼𝓱𝓸𝓾𝓵𝓭𝓷'𝓽 𝓰𝓲𝓿
FEEDBACK_OPTIONS = {
"feedback_text": FEEDBACK_TEXT,
"options": [
'I disliked this assessment',
'I felt this assessment was unfair',
u'I disliked this assessment',
u'I felt this assessment was unfair',
]
}
......@@ -363,6 +364,7 @@ class TestOraAggregateDataIntegration(TransactionCacheResetTest):
def setUp(self):
super(TestOraAggregateDataIntegration, self).setUp()
self.maxDiff = None
# Create submissions and assessments
self.submission = self._create_submission(STUDENT_ITEM)
self.scorer_submission = self._create_submission(SCORER_ITEM)
......@@ -370,6 +372,7 @@ class TestOraAggregateDataIntegration(TransactionCacheResetTest):
self.possible_points = 2
peer_api.get_submission_to_assess(self.scorer_submission['uuid'], 1)
self.assessment = self._create_assessment(self.scorer_submission['uuid'])
self.assertEqual(self.assessment['parts'][0]['criterion']['label'], "criterion_1")
sub_api.set_score(self.submission['uuid'], self.earned_points, self.possible_points)
self.score = sub_api.get_score(STUDENT_ITEM)
......@@ -470,15 +473,15 @@ class TestOraAggregateDataIntegration(TransactionCacheResetTest):
),
u"Assessment #{id}\n-- {label}: {option_label} ({points})\n".format(
id=self.assessment['id'],
label=self.assessment['parts'][1]['criterion']['label'],
option_label=self.assessment['parts'][1]['criterion']['options'][0]['label'],
points=self.assessment['parts'][1]['criterion']['options'][0]['points'],
label=self.assessment['parts'][0]['criterion']['label'],
option_label=self.assessment['parts'][0]['criterion']['options'][0]['label'],
points=self.assessment['parts'][0]['criterion']['options'][0]['points'],
) +
u"-- {label}: {option_label} ({points})\n-- feedback: {feedback}\n".format(
label=self.assessment['parts'][0]['criterion']['label'],
option_label=self.assessment['parts'][0]['criterion']['options'][1]['label'],
points=self.assessment['parts'][0]['criterion']['options'][1]['points'],
feedback=self.assessment['parts'][0]['feedback'],
label=self.assessment['parts'][1]['criterion']['label'],
option_label=self.assessment['parts'][1]['criterion']['options'][1]['label'],
points=self.assessment['parts'][1]['criterion']['options'][1]['points'],
feedback=self.assessment['parts'][1]['feedback'],
),
self.score['created_at'],
self.score['points_earned'],
......@@ -532,19 +535,19 @@ class TestOraAggregateDataIntegration(TransactionCacheResetTest):
self.assertIn(item_id2, data)
self.assertIn(item_id3, data)
for item in [ITEM_ID, item_id2, item_id3]:
self.assertEqual({'total', 'training', 'peer', 'self', 'staff', 'waiting', 'done', 'ai', 'cancelled'},
self.assertEqual({'total', 'training', 'peer', 'self', 'staff', 'waiting', 'done', 'cancelled'},
set(data[item].keys()))
self.assertEqual(data[ITEM_ID], {
'total': 2, 'training': 0, 'peer': 2, 'self': 0, 'staff': 0, 'waiting': 0,
'done': 0, 'ai': 0, 'cancelled': 0
'done': 0, 'cancelled': 0
})
self.assertEqual(data[item_id2], {
'total': 2, 'training': 0, 'peer': 1, 'self': 1, 'staff': 0, 'waiting': 0,
'done': 0, 'ai': 0, 'cancelled': 0
'done': 0, 'cancelled': 0
})
self.assertEqual(data[item_id3], {
'total': 3, 'training': 0, 'peer': 1, 'self': 2, 'staff': 0, 'waiting': 0,
'done': 0, 'ai': 0, 'cancelled': 0
'done': 0, 'cancelled': 0
})
data = OraAggregateData.collect_ora2_responses(COURSE_ID, ['staff', 'peer'])
......
......@@ -8,12 +8,11 @@ from django.db import DatabaseError
from openassessment.assessment.errors import PeerAssessmentError, PeerAssessmentInternalError
from submissions import api as sub_api
from .errors import (AssessmentWorkflowError, AssessmentWorkflowInternalError, AssessmentWorkflowNotFoundError,
AssessmentWorkflowRequestError)
from .models import AssessmentWorkflow, AssessmentWorkflowCancellation
from .serializers import AssessmentWorkflowSerializer, AssessmentWorkflowCancellationSerializer
from .errors import (
AssessmentWorkflowError, AssessmentWorkflowInternalError,
AssessmentWorkflowRequestError, AssessmentWorkflowNotFoundError
)
from .serializers import AssessmentWorkflowCancellationSerializer, AssessmentWorkflowSerializer
logger = logging.getLogger(__name__)
......
......@@ -4,8 +4,8 @@ from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
import django_extensions.db.fields
class Migration(migrations.Migration):
......@@ -23,7 +23,7 @@ class Migration(migrations.Migration):
('status', model_utils.fields.StatusField(default=b'peer', max_length=100, verbose_name='status', no_check_for_status=True, choices=[(b'peer', b'peer'), (b'ai', b'ai'), (b'self', b'self'), (b'training', b'training'), (b'waiting', b'waiting'), (b'done', b'done'), (b'cancelled', b'cancelled')])),
('status_changed', model_utils.fields.MonitorField(default=django.utils.timezone.now, verbose_name='status changed', monitor='status')),
('submission_uuid', models.CharField(unique=True, max_length=36, db_index=True)),
('uuid', django_extensions.db.fields.UUIDField(db_index=True, unique=True, version=1, editable=False, blank=True)),
('uuid', models.UUIDField(db_index=True, unique=True, editable=False, blank=True)),
('course_id', models.CharField(max_length=255, db_index=True)),
('item_id', models.CharField(max_length=255, db_index=True)),
],
......
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workflow', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='assessmentworkflow',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, unique=True, db_index=True),
),
]
......@@ -9,20 +9,23 @@ need to then generate a matching migration for it using:
./manage.py schemamigration openassessment.workflow --auto
"""
import logging
import importlib
import logging
from uuid import uuid4
from django.conf import settings
from django.db import models, transaction, DatabaseError
from django.db import DatabaseError, models, transaction
from django.dispatch import receiver
from django_extensions.db.fields import UUIDField
from django.utils.timezone import now
from model_utils import Choices
from model_utils.models import StatusModel, TimeStampedModel
from submissions import api as sub_api
from openassessment.assessment.errors.base import AssessmentError
from openassessment.assessment.signals import assessment_complete_signal
from .errors import AssessmentApiLoadError, AssessmentWorkflowError, AssessmentWorkflowInternalError
from submissions import api as sub_api
from .errors import AssessmentApiLoadError, AssessmentWorkflowError, AssessmentWorkflowInternalError
logger = logging.getLogger('openassessment.workflow.models')
......@@ -36,7 +39,6 @@ DEFAULT_ASSESSMENT_API_DICT = {
'peer': 'openassessment.assessment.api.peer',
'self': 'openassessment.assessment.api.self',
'training': 'openassessment.assessment.api.student_training',
'ai': 'openassessment.assessment.api.ai',
}
ASSESSMENT_API_DICT = getattr(
settings, 'ORA2_ASSESSMENTS',
......@@ -77,7 +79,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
# We then use that score as the student's overall score.
# This Django setting is a list of assessment steps (defined in `settings.ORA2_ASSESSMENTS`)
# in descending priority order.
DEFAULT_ASSESSMENT_SCORE_PRIORITY = ['peer', 'self', 'ai']
DEFAULT_ASSESSMENT_SCORE_PRIORITY = ['peer', 'self']
ASSESSMENT_SCORE_PRIORITY = getattr(
settings, 'ORA2_ASSESSMENT_SCORE_PRIORITY',
DEFAULT_ASSESSMENT_SCORE_PRIORITY
......@@ -86,7 +88,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
STAFF_ANNOTATION_TYPE = "staff_defined"
submission_uuid = models.CharField(max_length=36, db_index=True, unique=True)
uuid = UUIDField(version=1, db_index=True, unique=True)
uuid = models.UUIDField(db_index=True, unique=True, default=uuid4)
# These values are used to find workflows for a particular item
# in a course without needing to look up the submissions for that item.
......@@ -98,6 +100,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
class Meta:
ordering = ["-created"]
# TODO: In migration, need a non-unique index on (course_id, item_id, status)
app_label = "workflow"
def __init__(self, *args, **kwargs):
super(AssessmentWorkflow, self).__init__(*args, **kwargs)
......@@ -154,7 +157,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
item_id=submission_dict['student_item']['item_id']
)
workflow_steps = [
AssessmentWorkflowStep(
AssessmentWorkflowStep.objects.create(
workflow=workflow, name=step, order_num=i
)
for i, step in enumerate(step_names)
......@@ -396,12 +399,14 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
except AssessmentWorkflowStep.DoesNotExist:
for step in list(self.steps.all()):
step.order_num += 1
staff_step, _ = AssessmentWorkflowStep.objects.get_or_create(
name=self.STATUS.staff,
order_num=0,
assessment_completed_at=now(),
workflow=self,
)
self.steps.add(
AssessmentWorkflowStep(
name=self.STATUS.staff,
order_num=0,
assessment_completed_at=now(),
)
staff_step
)
# Do not return steps that are not recognized in the AssessmentWorkflow.
......@@ -624,6 +629,7 @@ class AssessmentWorkflowStep(models.Model):
class Meta:
ordering = ["workflow", "order_num"]
app_label = "workflow"
def is_submitter_complete(self):
"""
......@@ -760,6 +766,7 @@ class AssessmentWorkflowCancellation(models.Model):
class Meta:
ordering = ["created_at", "id"]
app_label = "workflow"
def __repr__(self):
return (
......
......@@ -3,6 +3,7 @@ Serializers are created to ensure models do not have to be accessed outside the
scope of the ORA2 APIs.
"""
from rest_framework import serializers
from openassessment.workflow.models import AssessmentWorkflow, AssessmentWorkflowCancellation
......
......@@ -57,66 +57,5 @@
},
"self": {}
}
},
"ai": {
"steps": ["ai"],
"requirements": {
"ai": {}
}
},
"ai_peer": {
"steps": ["ai", "peer"],
"requirements": {
"ai": {},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
}
}
},
"ai_training_peer": {
"steps": ["ai", "training", "peer"],
"requirements": {
"ai": {},
"training": {
"num_required": 2
},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
}
}
},
"ai_self": {
"steps": ["ai", "self"],
"requirements": {
"ai": {},
"self": {}
}
},
"ai_peer_self": {
"steps": ["ai", "peer", "self"],
"requirements": {
"ai": {},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
},
"self": {}
}
},
"ai_training_peer_self": {
"steps": ["ai", "training", "peer", "self"],
"requirements": {
"ai": {},
"training": {
"num_required": 2
},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
},
"self": {}
}
}
}
\ No newline at end of file
}
"""
Tests for Django signals and receivers defined by the workflow API.
"""
import ddt
import mock
from django.db import DatabaseError
import ddt
from submissions import api as sub_api
from openassessment.assessment.signals import assessment_complete_signal
from openassessment.test_utils import CacheResetTest
from openassessment.workflow import api as workflow_api
from openassessment.workflow.models import AssessmentWorkflow
from openassessment.assessment.signals import assessment_complete_signal
from submissions import api as sub_api
@ddt.ddt
......@@ -54,7 +56,7 @@ class UpdateWorkflowSignalTest(CacheResetTest):
mock_update.assert_called_once_with(None)
@ddt.data(DatabaseError, IOError)
@mock.patch.object(AssessmentWorkflow.objects, 'get')
@mock.patch('openassessment.workflow.models.AssessmentWorkflow.objects.get')
def test_errors(self, error, mock_call):
# Start a workflow for the submission
workflow_api.create_workflow(self.submission_uuid, ['self'])
......
......@@ -2,21 +2,19 @@
Grade step in the OpenAssessment XBlock.
"""
import copy
from lazy import lazy
from xblock.core import XBlock
from django.utils.translation import ugettext as _
from xblock.core import XBlock
from openassessment.assessment.api import ai as ai_api
from data_conversion import create_submission_dict
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import staff as staff_api
from openassessment.assessment.errors import SelfAssessmentError, PeerAssessmentError
from openassessment.assessment.errors import PeerAssessmentError, SelfAssessmentError
from submissions import api as sub_api
from data_conversion import create_submission_dict
class GradeMixin(object):
"""Grade Mixin introduces all handlers for displaying grades
......@@ -91,7 +89,6 @@ class GradeMixin(object):
submission_uuid = workflow['submission_uuid']
staff_assessment = None
example_based_assessment = None
self_assessment = None
feedback = None
peer_assessments = []
......@@ -111,11 +108,6 @@ class GradeMixin(object):
self_api.get_assessment(submission_uuid)
)
if "example-based-assessment" in assessment_steps:
example_based_assessment = self._assessment_grade_context(
ai_api.get_latest_assessment(submission_uuid)
)
raw_staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
if raw_staff_assessment:
staff_assessment = self._assessment_grade_context(raw_staff_assessment)
......@@ -141,7 +133,6 @@ class GradeMixin(object):
submission_uuid,
peer_assessments=peer_assessments,
self_assessment=self_assessment,
example_based_assessment=example_based_assessment,
staff_assessment=staff_assessment,
),
'file_upload_type': self.file_upload_type,
......@@ -219,7 +210,7 @@ class GradeMixin(object):
return {'success': True, 'msg': self._(u"Feedback saved.")}
def grade_details(
self, submission_uuid, peer_assessments, self_assessment, example_based_assessment, staff_assessment,
self, submission_uuid, peer_assessments, self_assessment, staff_assessment,
is_staff=False
):
"""
......@@ -229,7 +220,6 @@ class GradeMixin(object):
submission_uuid (str): The id of the submission being graded.
peer_assessments (list of dict): Serialized assessment models from the peer API.
self_assessment (dict): Serialized assessment model from the self API
example_based_assessment (dict): Serialized assessment model from the example-based API
staff_assessment (dict): Serialized assessment model from the staff API
is_staff (bool): True if the grade details are being displayed to staff, else False.
Default value is False (meaning grade details are being shown to the learner).
......@@ -268,7 +258,10 @@ class GradeMixin(object):
Returns True if at least one assessment has feedback.
"""
return any(
assessment.get('feedback', None) or has_feedback(assessment.get('individual_assessments', []))
(
assessment and
(assessment.get('feedback', None) or has_feedback(assessment.get('individual_assessments', [])))
)
for assessment in assessments
)
......@@ -279,8 +272,6 @@ class GradeMixin(object):
median_scores = staff_api.get_assessment_scores_by_criteria(submission_uuid)
elif "peer-assessment" in assessment_steps:
median_scores = peer_api.get_assessment_median_scores(submission_uuid)
elif "example-based-assessment" in assessment_steps:
median_scores = ai_api.get_assessment_scores_by_criteria(submission_uuid)
elif "self-assessment" in assessment_steps:
median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
......@@ -293,7 +284,6 @@ class GradeMixin(object):
assessment_steps,
staff_assessment,
peer_assessments,
example_based_assessment,
self_assessment,
is_staff=is_staff,
)
......@@ -322,7 +312,7 @@ class GradeMixin(object):
def _graded_assessments(
self, submission_uuid, criterion, assessment_steps, staff_assessment, peer_assessments,
example_based_assessment, self_assessment, is_staff=False
self_assessment, is_staff=False
):
"""
Returns an array of assessments with their associated grades.
......@@ -364,9 +354,6 @@ class GradeMixin(object):
}
else:
peer_assessment_part = None
example_based_assessment_part = _get_assessment_part(
_('Example-Based Grade'), _('Example-Based Comments'), criterion_name, example_based_assessment
)
self_assessment_part = _get_assessment_part(
_('Self Assessment Grade') if is_staff else _('Your Self Assessment'),
_('Your Comments'), # This is only used in the LMS student-facing view
......@@ -380,8 +367,6 @@ class GradeMixin(object):
assessments.append(staff_assessment_part)
if peer_assessment_part:
assessments.append(peer_assessment_part)
if example_based_assessment_part:
assessments.append(example_based_assessment_part)
if self_assessment_part:
assessments.append(self_assessment_part)
......@@ -389,7 +374,7 @@ class GradeMixin(object):
if len(assessments) > 0:
first_assessment = assessments[0]
option = first_assessment['option']
if option:
if option and option.get('points'):
first_assessment['points'] = option['points']
return assessments
......
"""
Leaderboard step in the OpenAssessment XBlock.
"""
from django.utils.translation import ugettext as _
from xblock.core import XBlock
from submissions import api as sub_api
from django.utils.translation import ugettext as _
from openassessment.assessment.errors import SelfAssessmentError, PeerAssessmentError
from openassessment.assessment.errors import PeerAssessmentError, SelfAssessmentError
from openassessment.fileupload import api as file_upload_api
from openassessment.fileupload.exceptions import FileUploadError
from openassessment.xblock.data_conversion import create_submission_dict
from submissions import api as sub_api
class LeaderboardMixin(object):
......
......@@ -2,7 +2,7 @@
Fields and methods used by the LMS and Studio.
"""
from xblock.fields import String, Float, Scope, DateTime
from xblock.fields import DateTime, Float, Scope, String
class LmsCompatibilityMixin(object):
......
......@@ -5,7 +5,6 @@ Message step in the OpenAssessment XBlock.
import datetime as dt
import pytz
from xblock.core import XBlock
......
......@@ -5,40 +5,37 @@ import datetime as dt
import json
import logging
import os
import pkg_resources
from lazy import lazy
import pkg_resources
import pytz
from django.conf import settings
from django.template.context import Context
from django.template.loader import get_template
from webob import Response
from lazy import lazy
from xblock.core import XBlock
from xblock.fields import List, Scope, String, Boolean, Integer
from xblock.fields import Boolean, Integer, List, Scope, String
from xblock.fragment import Fragment
from django.conf import settings
from django.template.loader import get_template
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.course_items_listing_mixin import CourseItemsListingMixin
from openassessment.xblock.data_conversion import create_prompts_list, create_rubric_dict, update_assessments_format
from openassessment.xblock.defaults import * # pylint: disable=wildcard-import, unused-wildcard-import
from openassessment.xblock.grade_mixin import GradeMixin
from openassessment.xblock.leaderboard_mixin import LeaderboardMixin
from openassessment.xblock.defaults import * # pylint: disable=wildcard-import, unused-wildcard-import
from openassessment.xblock.lms_mixin import LmsCompatibilityMixin
from openassessment.xblock.message_mixin import MessageMixin
from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin
from openassessment.xblock.lms_mixin import LmsCompatibilityMixin
from openassessment.xblock.resolve_dates import DISTANT_FUTURE, DISTANT_PAST, parse_date_value, resolve_dates
from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin
from openassessment.xblock.submission_mixin import SubmissionMixin
from openassessment.xblock.studio_mixin import StudioMixin
from openassessment.xblock.xml import parse_from_xml, serialize_content_to_xml
from openassessment.xblock.staff_area_mixin import StaffAreaMixin
from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.xblock.staff_assessment_mixin import StaffAssessmentMixin
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.student_training_mixin import StudentTrainingMixin
from openassessment.xblock.studio_mixin import StudioMixin
from openassessment.xblock.submission_mixin import SubmissionMixin
from openassessment.xblock.validation import validator
from openassessment.xblock.resolve_dates import resolve_dates, parse_date_value, DISTANT_PAST, DISTANT_FUTURE
from openassessment.xblock.data_conversion import create_prompts_list, create_rubric_dict, update_assessments_format
from openassessment.xblock.course_items_listing_mixin import CourseItemsListingMixin
from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.xblock.xml import parse_from_xml, serialize_content_to_xml
logger = logging.getLogger(__name__)
......@@ -83,7 +80,6 @@ UI_MODELS = {
VALID_ASSESSMENT_TYPES = [
"student-training",
"example-based-assessment",
"peer-assessment",
"self-assessment",
"staff-assessment"
......@@ -491,8 +487,7 @@ class OpenAssessmentBlock(MessageMixin,
Creates a fragment for display.
"""
context = Context(context_dict)
fragment = Fragment(template.render(context))
fragment = Fragment(template.render(context_dict))
if additional_css is None:
additional_css = []
......@@ -646,10 +641,6 @@ class OpenAssessmentBlock(MessageMixin,
load('static/xml/unicode.xml')
),
(
"OpenAssessmentBlock Example Based Rubric",
load('static/xml/example_based_example.xml')
),
(
"OpenAssessmentBlock Poverty Rubric",
load('static/xml/poverty_rubric_example.xml')
),
......@@ -825,8 +816,7 @@ class OpenAssessmentBlock(MessageMixin,
context_dict = {}
template = get_template(path)
context = Context(context_dict)
return Response(template.render(context), content_type='application/html', charset='UTF-8')
return Response(template.render(context_dict), content_type='application/html', charset='UTF-8')
def add_xml_to_node(self, node):
"""
......@@ -844,7 +834,7 @@ class OpenAssessmentBlock(MessageMixin,
Returns:
Response: A response object with an HTML body.
"""
context = Context({'error_msg': error_msg})
context = {'error_msg': error_msg}
template = get_template('openassessmentblock/oa_error.html')
return Response(template.render(context), content_type='application/html', charset='UTF-8')
......
......@@ -9,15 +9,15 @@ from webob import Response
from xblock.core import XBlock
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.errors import (
PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentWorkflowError
)
from openassessment.assessment.errors import (PeerAssessmentInternalError, PeerAssessmentRequestError,
PeerAssessmentWorkflowError)
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.defaults import DEFAULT_RUBRIC_FEEDBACK_TEXT
from .data_conversion import create_rubric_dict
from .data_conversion import (clean_criterion_feedback, create_rubric_dict, create_submission_dict,
verify_assessment_parameters)
from .resolve_dates import DISTANT_FUTURE
from .user_data import get_user_preferences
from .data_conversion import clean_criterion_feedback, create_submission_dict, verify_assessment_parameters
logger = logging.getLogger(__name__)
......
......@@ -2,8 +2,9 @@
Resolve unspecified dates and date strings to datetimes.
"""
import datetime as dt
import pytz
from dateutil.parser import parse as parse_date
import pytz
class InvalidDateFormat(Exception):
......
......@@ -4,7 +4,7 @@ Schema for validating and sanitizing data received from the JavaScript client.
import dateutil
from pytz import utc
from voluptuous import Schema, Required, All, Any, Range, In, Invalid
from voluptuous import All, Any, In, Invalid, Range, Required, Schema
def utf8_validator(value):
......@@ -66,7 +66,6 @@ NECESSITY_OPTIONS = [
VALID_ASSESSMENT_TYPES = [
u'peer-assessment',
u'self-assessment',
u'example-based-assessment',
u'student-training',
u'staff-assessment',
]
......
import logging
from xblock.core import XBlock
from webob import Response
from xblock.core import XBlock
from openassessment.assessment.api import self as self_api
from openassessment.workflow import api as workflow_api
from submissions import api as submission_api
from .data_conversion import (clean_criterion_feedback, create_rubric_dict, create_submission_dict,
verify_assessment_parameters)
from .resolve_dates import DISTANT_FUTURE
from .user_data import get_user_preferences
from .data_conversion import (clean_criterion_feedback, create_submission_dict,
create_rubric_dict, verify_assessment_parameters)
logger = logging.getLogger(__name__)
......
......@@ -7,25 +7,18 @@ from functools import wraps
import logging
from xblock.core import XBlock
from openassessment.assessment.errors import (
PeerAssessmentInternalError,
)
from openassessment.workflow.errors import (
AssessmentWorkflowError, AssessmentWorkflowInternalError
)
from openassessment.assessment.errors.ai import AIError
from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE
from openassessment.xblock.data_conversion import (
create_rubric_dict, convert_training_examples_list_to_dict, create_submission_dict
)
from submissions import api as submission_api
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import ai as ai_api
from openassessment.workflow import api as workflow_api
from openassessment.assessment.api import staff as staff_api
from .user_data import get_user_preferences
from openassessment.assessment.errors import PeerAssessmentInternalError
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError, AssessmentWorkflowInternalError
from openassessment.xblock.data_conversion import create_submission_dict
from openassessment.xblock.resolve_dates import DISTANT_FUTURE, DISTANT_PAST
from submissions import api as submission_api
from .user_data import get_user_preferences
logger = logging.getLogger(__name__)
......@@ -127,24 +120,6 @@ class StaffAreaMixin(object):
context['status_counts'] = status_counts
context['num_submissions'] = num_submissions
# Show the schedule training button if example based assessment is
# configured, and the current user has admin privileges.
example_based_assessment = self.get_assessment_module('example-based-assessment')
display_ai_staff_info = (
self.is_admin and
bool(example_based_assessment) and
not self.in_studio_preview
)
context['display_schedule_training'] = display_ai_staff_info
context['display_reschedule_unfinished_tasks'] = display_ai_staff_info
if display_ai_staff_info:
context['classifierset'] = ai_api.get_classifier_set_info(
create_rubric_dict(self.prompts, self.rubric_criteria_with_labels),
example_based_assessment['algorithm_id'],
student_item['course_id'],
student_item['item_id']
)
# Include Latex setting
context['allow_latex'] = self.allow_latex
......@@ -152,9 +127,6 @@ class StaffAreaMixin(object):
context['step_dates'] = list()
for step in ['submission'] + self.assessment_steps:
if step == 'example-based-assessment':
continue
# Get the dates as a student would see them
__, __, start_date, due_date = self.is_closed(step=step, course_staff=False)
......@@ -187,42 +159,6 @@ class StaffAreaMixin(object):
'staff_assessment_in_progress': grading_stats['in-progress']
}
@XBlock.json_handler
@require_global_admin("SCHEDULE_TRAINING")
def schedule_training(self, data, suffix=''): # pylint: disable=W0613
"""
Schedule a new training task for example-based grading.
"""
assessment = self.get_assessment_module('example-based-assessment')
student_item_dict = self.get_student_item_dict()
if assessment:
examples = assessment["examples"]
try:
workflow_uuid = ai_api.train_classifiers(
create_rubric_dict(self.prompts, self.rubric_criteria_with_labels),
convert_training_examples_list_to_dict(examples),
student_item_dict.get('course_id'),
student_item_dict.get('item_id'),
assessment["algorithm_id"]
)
return {
'success': True,
'workflow_uuid': workflow_uuid,
'msg': self._(u"Training scheduled with new Workflow UUID: {uuid}".format(uuid=workflow_uuid))
}
except AIError as err:
return {
'success': False,
'msg': self._(u"An error occurred scheduling classifier training: {error}".format(error=err))
}
else:
return {
'success': False,
'msg': self._(u"Example Based Assessment is not configured for this location.")
}
@XBlock.handler
@require_course_staff("STUDENT_INFO")
def render_student_info(self, data, suffix=''): # pylint: disable=W0613
......@@ -389,9 +325,6 @@ class StaffAreaMixin(object):
"""
assessment_steps = self.assessment_steps
example_based_assessment = None
example_based_assessment_grade_context = None
self_assessment = None
self_assessment_grade_context = None
......@@ -423,11 +356,6 @@ class StaffAreaMixin(object):
if grade_exists:
self_assessment_grade_context = self._assessment_grade_context(self_assessment)
if "example-based-assessment" in assessment_steps:
example_based_assessment = ai_api.get_latest_assessment(submission_uuid)
if grade_exists:
example_based_assessment_grade_context = self._assessment_grade_context(example_based_assessment)
if grade_exists:
if staff_assessment:
staff_assessment_grade_context = self._assessment_grade_context(staff_assessment)
......@@ -436,7 +364,6 @@ class StaffAreaMixin(object):
submission_uuid,
peer_assessments_grade_context,
self_assessment_grade_context,
example_based_assessment_grade_context,
staff_assessment_grade_context,
is_staff=True,
)
......@@ -444,7 +371,6 @@ class StaffAreaMixin(object):
workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)
context.update({
'example_based_assessment': [example_based_assessment] if example_based_assessment else None,
'self_assessment': [self_assessment] if self_assessment else None,
'peer_assessments': peer_assessments,
'staff_assessment': [staff_assessment] if staff_assessment else None,
......@@ -455,50 +381,11 @@ class StaffAreaMixin(object):
'workflow_cancellation': workflow_cancellation,
})
if peer_assessments or self_assessment or example_based_assessment or staff_assessment:
if peer_assessments or self_assessment or staff_assessment:
max_scores = peer_api.get_rubric_max_scores(submission_uuid)
for criterion in context["rubric_criteria"]:
criterion["total_value"] = max_scores[criterion["name"]]
@XBlock.json_handler
@require_global_admin("RESCHEDULE_TASKS")
def reschedule_unfinished_tasks(self, data, suffix=''): # pylint: disable=W0613
"""
Wrapper which invokes the API call for rescheduling grading tasks.
Checks that the requester is an administrator that is not in studio-preview mode,
and that the api-call returns without error. If it returns with an error, (any
exception), the appropriate JSON serializable dictionary with success conditions
is passed back.
Args:
data (not used)
suffix (not used)
Return:
Json serilaizable dict with the following elements:
'success': (bool) Indicates whether or not the tasks were rescheduled successfully
'msg': The response to the server (could be error message or success message)
"""
# Identifies the course and item that will need to be re-run
student_item_dict = self.get_student_item_dict()
course_id = student_item_dict.get('course_id')
item_id = student_item_dict.get('item_id')
try:
# Note that we only want to recschdule grading tasks, but maintain the potential functionallity
# within the API to also reschedule training tasks.
ai_api.reschedule_unfinished_tasks(course_id=course_id, item_id=item_id, task_type=u"grade")
return {
'success': True,
'msg': self._(u"All AI tasks associated with this item have been rescheduled successfully.")
}
except AIError as ex:
return {
'success': False,
'msg': self._(u"An error occurred while rescheduling tasks: {}".format(ex))
}
def clear_student_state(self, user_id, course_id, item_id, requesting_user_id):
"""
This xblock method is called (from our LMS runtime, which defines this method signature) to clear student state
......
......@@ -3,17 +3,14 @@ A mixin for staff grading.
"""
import logging
from staff_area_mixin import require_course_staff
from xblock.core import XBlock
from openassessment.assessment.api import staff as staff_api
from openassessment.assessment.errors import StaffAssessmentInternalError, StaffAssessmentRequestError
from openassessment.workflow import api as workflow_api
from openassessment.assessment.errors import (
StaffAssessmentRequestError, StaffAssessmentInternalError
)
from staff_area_mixin import require_course_staff
from .data_conversion import create_rubric_dict
from .data_conversion import clean_criterion_feedback, verify_assessment_parameters
from .data_conversion import clean_criterion_feedback, create_rubric_dict, verify_assessment_parameters
logger = logging.getLogger(__name__)
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -502,7 +502,6 @@
"student_training",
"peer_assessment",
"self_assessment",
"example_based_assessment",
"staff_assessment"
]
},
......@@ -668,7 +667,6 @@
"student_training",
"peer_assessment",
"self_assessment",
"example_based_assessment",
"staff_assessment"
]
},
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -82,7 +82,6 @@ describe("OpenAssessment.Server", function() {
"student_training",
"peer_assessment",
"self_assessment",
"example_based_assessment"
];
var TITLE = 'This is the title.';
......
......@@ -117,7 +117,6 @@ describe("OpenAssessment.StudioView", function() {
"student-training",
"peer-assessment",
"self-assessment",
"example-based-assessment",
"staff-assessment"
]
};
......
......@@ -238,19 +238,6 @@ describe("OpenAssessment edit assessment views", function() {
});
});
describe("OpenAssessment.EditExampleBasedAssessmentView", function() {
var view = null;
beforeEach(function() {
var element = $("#oa_ai_assessment_editor").get(0);
view = new OpenAssessment.EditExampleBasedAssessmentView(element);
});
it("Enables and disables", function() { testEnableAndDisable(view); });
it("Loads a description", function() { testLoadXMLExamples(view); });
it("shows an alert when disabled", function() { testAlertOnDisable(view); });
});
describe("OpenAssessment.EditStaffAssessmentView", function() {
var view = null;
......
......@@ -49,7 +49,6 @@ describe("OpenAssessment.EditSettingsView", function() {
// The Peer and Self Editor ID's
var PEER = "oa_peer_assessment_editor";
var SELF = "oa_self_assessment_editor";
var AI = "oa_ai_assessment_editor";
var TRAINING = "oa_student_training_editor";
var STAFF = "oa_staff_assessment_editor";
......@@ -61,7 +60,6 @@ describe("OpenAssessment.EditSettingsView", function() {
assessmentViews = {};
assessmentViews[SELF] = new StubView("self-assessment", "Self assessment description");
assessmentViews[PEER] = new StubView("peer-assessment", "Peer assessment description");
assessmentViews[AI] = new StubView("ai-assessment", "Example Based assessment description");
assessmentViews[TRAINING] = new StubView("student-training", "Student Training description");
assessmentViews[STAFF] = new StubView("staff-assessment", "Staff assessment description");
......@@ -131,7 +129,6 @@ describe("OpenAssessment.EditSettingsView", function() {
// Disable all assessments, and expect an empty description
assessmentViews[PEER].isEnabled(false);
assessmentViews[SELF].isEnabled(false);
assessmentViews[AI].isEnabled(false);
assessmentViews[TRAINING].isEnabled(false);
assessmentViews[STAFF].isEnabled(false);
expect(view.assessmentsDescription()).toEqual([]);
......@@ -139,7 +136,6 @@ describe("OpenAssessment.EditSettingsView", function() {
// Enable the first assessment only
assessmentViews[PEER].isEnabled(false);
assessmentViews[SELF].isEnabled(true);
assessmentViews[AI].isEnabled(false);
assessmentViews[TRAINING].isEnabled(false);
expect(view.assessmentsDescription()).toEqual([
{
......@@ -151,7 +147,6 @@ describe("OpenAssessment.EditSettingsView", function() {
// Enable the second assessment only
assessmentViews[PEER].isEnabled(true);
assessmentViews[SELF].isEnabled(false);
assessmentViews[AI].isEnabled(false);
assessmentViews[TRAINING].isEnabled(false);
expect(view.assessmentsDescription()).toEqual([
{
......@@ -163,7 +158,6 @@ describe("OpenAssessment.EditSettingsView", function() {
// Enable both assessments
assessmentViews[PEER].isEnabled(true);
assessmentViews[SELF].isEnabled(true);
assessmentViews[AI].isEnabled(false);
assessmentViews[TRAINING].isEnabled(false);
expect(view.assessmentsDescription()).toEqual([
{
......
......@@ -208,7 +208,7 @@ OpenAssessment.BaseView.prototype = {
// extract typed-in response and replace newline with br
var previewText = parentElement.find('textarea[data-preview="' + previewName + '"]').val();
var previewContainer = parentElement.find('.preview_content[data-preview="' + previewName + '"]');
previewContainer.html(previewText.replace(/\r\n|\r|\n/g,"<br />"));
previewContainer.html(previewText.replace(/\r\n|\r|\n/g, "<br />"));
// Render in mathjax
previewContainer.parent().parent().parent().show();
......
......@@ -50,15 +50,11 @@ OpenAssessment.StudioView = function(runtime, element, server, data) {
var selfAssessmentView = new OpenAssessment.EditSelfAssessmentView(
$("#oa_self_assessment_editor", this.element).get(0)
);
var exampleBasedAssessmentView = new OpenAssessment.EditExampleBasedAssessmentView(
$("#oa_ai_assessment_editor", this.element).get(0)
);
var assessmentLookupDictionary = {};
assessmentLookupDictionary[staffAssessmentView.getID()] = staffAssessmentView;
assessmentLookupDictionary[studentTrainingView.getID()] = studentTrainingView;
assessmentLookupDictionary[peerAssessmentView.getID()] = peerAssessmentView;
assessmentLookupDictionary[selfAssessmentView.getID()] = selfAssessmentView;
assessmentLookupDictionary[exampleBasedAssessmentView.getID()] = exampleBasedAssessmentView;
this.settingsView = new OpenAssessment.EditSettingsView(
$("#oa_basic_settings_editor", this.element).get(0), assessmentLookupDictionary, data
......
......@@ -526,103 +526,6 @@ OpenAssessment.EditStudentTrainingView.prototype = {
};
/**
Interface for editing example-based assessment settings.
Args:
element (DOM element): The DOM element representing this view.
Returns:
OpenAssessment.EditExampleBasedAssessmentView
**/
OpenAssessment.EditExampleBasedAssessmentView = function(element) {
this.element = element;
this.name = "example-based-assessment";
new OpenAssessment.ToggleControl(
$("#include_ai_assessment", this.element),
$("#ai_assessment_settings_editor", this.element),
$("#ai_assessment_description_closed", this.element),
new OpenAssessment.Notifier([
new OpenAssessment.AssessmentToggleListener()
])
).install();
};
OpenAssessment.EditExampleBasedAssessmentView.prototype = {
/**
Return a description of the assessment.
Returns:
object literal
Example usage:
>>> editTrainingView.description();
{
examples_xml: "XML DEFINITION HERE",
}
**/
description: function() {
return {
examples_xml: this.exampleDefinitions()
};
},
/**
Get or set whether the assessment is enabled.
Args:
isEnabled (boolean, optional): If provided, set the enabled state of the assessment.
Returns:
boolean
***/
isEnabled: function(isEnabled) {
var sel = $("#include_ai_assessment", this.element);
return OpenAssessment.Fields.booleanField(sel, isEnabled);
},
/**
Toggle whether the assessment is enabled or disabled.
This triggers the actual click event and is mainly useful for testing.
**/
toggleEnabled: function() {
$("#include_ai_assessment", this.element).click();
},
/**
Get or set the XML defining the training examples.
Args:
xml (string, optional): The XML of the training example definitions.
Returns:
string
**/
exampleDefinitions: function(xml) {
var sel = $("#ai_training_examples", this.element);
return OpenAssessment.Fields.stringField(sel, xml);
},
/**
Gets the ID of the assessment
Returns:
string (CSS ID of the Element object)
**/
getID: function() {
return $(this.element).attr('id');
},
validate: function() { return true; },
validationErrors: function() { return []; },
clearValidationErrors: function() {}
};
/**
* Interface for editing staff assessment settings.
*
* @param {Object} element - The DOM element representing this view.
......
......@@ -1086,7 +1086,7 @@
}
.action--save {
@extend %action-button
@extend %action-button !optional
}
.feedback {
......
......@@ -2,16 +2,17 @@
Student training step in the OpenAssessment XBlock.
"""
import logging
from webob import Response
from xblock.core import XBlock
from openassessment.assessment.api import student_training
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict, create_submission_dict
from .resolve_dates import DISTANT_FUTURE
from .user_data import get_user_preferences
logger = logging.getLogger(__name__)
......
......@@ -3,27 +3,24 @@ Studio editing view for OpenAssessment XBlock.
"""
import copy
import logging
import pkg_resources
from uuid import uuid4
from xml import UpdateFromXmlError
from django.conf import settings
from django.template import Context
from django.template.loader import get_template
from django.utils.translation import ugettext as _, ugettext_lazy
import pkg_resources
from voluptuous import MultipleInvalid
from xblock.core import XBlock
from xblock.fields import List, Scope
from xblock.fragment import Fragment
from django.conf import settings
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy
from openassessment.xblock.data_conversion import (create_rubric_dict, make_django_template_key,
update_assessments_format)
from openassessment.xblock.defaults import DEFAULT_EDITOR_ASSESSMENTS_ORDER, DEFAULT_RUBRIC_FEEDBACK_TEXT
from openassessment.xblock.validation import validator
from openassessment.xblock.data_conversion import (
create_rubric_dict, make_django_template_key, update_assessments_format
)
from openassessment.xblock.schema import EDITOR_UPDATE_SCHEMA
from openassessment.xblock.resolve_dates import resolve_dates
from openassessment.xblock.xml import serialize_examples_to_xml_str, parse_examples_from_xml_str
from openassessment.xblock.schema import EDITOR_UPDATE_SCHEMA
from openassessment.xblock.validation import validator
logger = logging.getLogger(__name__)
......@@ -73,7 +70,7 @@ class StudioMixin(object):
"""
rendered_template = get_template(
'openassessmentblock/edit/oa_edit.html'
).render(Context(self.editor_context()))
).render(self.editor_context())
fragment = Fragment(rendered_template)
if settings.DEBUG:
self.add_javascript_files(fragment, "static/js/src/oa_shared.js")
......@@ -181,15 +178,12 @@ class StudioMixin(object):
logger.exception('Editor context is invalid')
return {'success': False, 'msg': self._('Error updating XBlock configuration')}
# Check that the editor assessment order contains all the assessments. We are more flexible on example-based.
given_without_example_based = set(data['editor_assessments_order']) - {'example-based-assessment'}
if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != given_without_example_based:
# Check that the editor assessment order contains all the assessments.
current_order = set(data['editor_assessments_order'])
if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != current_order:
# Backwards compatibility: "staff-assessment" may not be present.
# If that is the only problem with this data, just add it manually and continue.
if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) == (
# Check the given set, minus example-based, plus staff
given_without_example_based | {'staff-assessment'}
):
if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) == current_order | {'staff-assessment'}:
data['editor_assessments_order'].append('staff-assessment')
logger.info('Backwards compatibility: editor_assessments_order now contains staff-assessment')
else:
......@@ -218,27 +212,6 @@ class StudioMixin(object):
if 'name' not in option:
option['name'] = uuid4().hex
# If example based assessment is enabled, we replace it's xml definition with the dictionary
# definition we expect for validation and storing.
for assessment in data['assessments']:
if assessment['name'] == 'example-based-assessment':
try:
assessment['examples'] = parse_examples_from_xml_str(assessment['examples_xml'])
except UpdateFromXmlError:
return {'success': False, 'msg': self._(
u'Validation error: There was an error in the XML definition of the '
u'examples provided by the user. Please correct the XML definition before saving.')
}
except KeyError:
return {'success': False, 'msg': self._(
u'Validation error: No examples were provided for example based assessment.'
)}
# This is where we default to EASE for problems which are edited in the GUI
assessment['algorithm_id'] = 'ease'
if assessment['name'] == 'student-training':
for example in assessment['examples']:
example['answer'] = {'parts': [{'text': text} for text in example['answer']]}
xblock_validator = validator(self, self._)
success, msg = xblock_validator(
create_rubric_dict(data['prompts'], data['criteria']),
......@@ -356,13 +329,6 @@ class StudioMixin(object):
else:
assessments['training'] = {'examples': [student_training_template], 'template': student_training_template}
example_based_assessment = self.get_assessment_module('example-based-assessment')
if example_based_assessment:
assessments['example_based_assessment'] = {
'examples': serialize_examples_to_xml_str(example_based_assessment)
}
return assessments
def _editor_assessments_order_context(self):
......@@ -379,19 +345,13 @@ class StudioMixin(object):
# since the user last saved their ordering.
effective_order = copy.deepcopy(DEFAULT_EDITOR_ASSESSMENTS_ORDER)
# If the problem already contains example-based assessment
# then allow the editor to display example-based assessments,
# which is not included in the default
enabled_assessments = [asmnt['name'] for asmnt in self.valid_assessments]
if 'example-based-assessment' in enabled_assessments:
effective_order.insert(0, 'example-based-assessment')
# Account for changes the user has made to the default order
user_order = copy.deepcopy(self.editor_assessments_order)
effective_order = self._subset_in_relative_order(effective_order, user_order)
# Account for inconsistencies between the user's order and the problems
# that are currently enabled in the problem (These cannot be changed)
enabled_assessments = [asmnt['name'] for asmnt in self.valid_assessments]
enabled_ordered_assessments = [
assessment for assessment in enabled_assessments if assessment in user_order
]
......
......@@ -3,17 +3,16 @@ import logging
from xblock.core import XBlock
from submissions import api
from data_conversion import create_submission_dict, prepare_submission_for_serialization
from openassessment.fileupload import api as file_upload_api
from openassessment.fileupload.exceptions import FileUploadError
from openassessment.workflow.errors import AssessmentWorkflowError
from submissions import api
from validation import validate_submission
from .resolve_dates import DISTANT_FUTURE
from .user_data import get_user_preferences
from data_conversion import create_submission_dict, prepare_submission_for_serialization
from validation import validate_submission
logger = logging.getLogger(__name__)
......
......@@ -3,20 +3,19 @@
Base class for handler-level testing of the XBlock.
"""
import copy
import mock
import os.path
import json
from functools import wraps
import json
import os.path
from submissions import api as submissions_api
import mock
import webob
from workbench.runtime import WorkbenchRuntime
from openassessment.workflow import api as workflow_api
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.test_utils import CacheResetTest, TransactionCacheResetTest
from workbench.runtime import WorkbenchRuntime
import webob
from openassessment.workflow import api as workflow_api
from submissions import api as submissions_api
# Sample peer assessments
PEER_ASSESSMENTS = [
......
......@@ -43,28 +43,6 @@
</criterion>
</rubric>
<assessments>
<assessment name="example-based-assessment" algorithm_id="fake">
<example>
<answer>Example Answer One</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="Ġööḋ" />
<select criterion="Form" option="Poor" />
</example>
<example>
<answer>Example Answer Two</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="ﻉซƈﻉɭɭﻉกՇ" />
<select criterion="Form" option="Fair" />
</example>
<example>
<answer>Example Answer Three</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="Ġööḋ" />
<select criterion="Form" option="Good" />
</example>
<example>
<answer>Example Answer Four</answer>
<select criterion="𝓒𝓸𝓷𝓬𝓲𝓼𝓮" option="ﻉซƈﻉɭɭﻉกՇ" />
<select criterion="Form" option="Good" />
</example>
</assessment>
<assessment name="peer-assessment" must_grade="2" must_be_graded_by="2" />
<assessment name="self-assessment" />
</assessments>
......
......@@ -71,59 +71,5 @@
"must_be_graded_by": 3
}
]
},
"example_based_no_training_examples": {
"rubric": {
"criteria": [
{
"order_num": 0,
"name": "vocabulary",
"prompt": "how good is the vocabulary?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "poor",
"explanation": "poor job!"
},
{
"order_num": 1,
"points": 1,
"name": "good",
"explanation": "good job!"
}
]
},
{
"order_num": 1,
"name": "grammar",
"prompt": "how good is the grammar?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "poor",
"explanation": "poor job!"
},
{
"order_num": 1,
"points": 1,
"name": "good",
"explanation": "good job!"
}
]
}
]
},
"assessments": [
{
"name": "example-based-assessment",
"start": null,
"due": null,
"algorithm_id": "ease",
"examples": []
}
]
}
}
......@@ -211,38 +211,5 @@
],
"current_assessments": null,
"is_released": false
},
"example_based_algorithm_id_is_not_ease": {
"assessments": [
{
"name": "example-based-assessment",
"start": null,
"due": null,
"algorithm_id": "NOT_EASE",
"examples": [
{
"answer": "тєѕт αηѕωєя",
"options_selected": [
{
"criterion": "Test criterion",
"option": "No"
}
]
},
{
"answer": "тєѕт αηѕωєя TWO",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
}
]
}
]
}
],
"current_assessments": null,
"is_released": false
}
}
......@@ -523,34 +523,6 @@
"is_released": true
},
"example_based_duplicate_option_points": {
"is_example_based": true,
"rubric": {
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
"criteria": [
{
"order_num": 0,
"name": "Test criterion",
"prompt": "Test criterion prompt",
"options": [
{
"order_num": 0,
"points": 2,
"name": "No",
"explanation": "No explanation"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "Yes explanation"
}
]
}
]
}
},
"zero_options_feedback_optional": {
"rubric": {
"prompts": [{"description": "Test Prompt 1."}, {"description": "Test Prompt 2."}],
......
......@@ -1225,7 +1225,7 @@
],
"editor_assessments_order": [
"student-training", "peer-assessment",
"self-assessment", "example-based-assessment", "staff-assessment",
"self-assessment", "staff-assessment",
"NOT A VALID ASSESSMENT"
]
},
......@@ -1278,7 +1278,7 @@
"due": null
}
],
"editor_assessments_order": ["student-training", "peer-assessment", "self-assessment", "example-based-assessment", "staff-assessment"],
"editor_assessments_order": ["student-training", "peer-assessment", "self-assessment", "staff-assessment"],
"submission_due": "2014-02-27T09:46",
"submission_start": "2014-02-10T09:46"
},
......@@ -1331,7 +1331,7 @@
"due": null
}
],
"editor_assessments_order": ["student-training", "peer-assessment", "self-assessment", "example-based-assessment", "staff-assessment"],
"editor_assessments_order": ["student-training", "peer-assessment", "self-assessment", "staff-assessment"],
"submission_due": "2014-02-27T09:46",
"submission_start": "2014-02-10T09:46"
},
......@@ -1406,7 +1406,7 @@
"due": null
}
],
"editor_assessments_order": ["student-training", "peer-assessment", "self-assessment", "example-based-assessment", "staff-assessment"],
"editor_assessments_order": ["student-training", "peer-assessment", "self-assessment", "staff-assessment"],
"submission_due": "2014-02-27T09:46",
"submission_start": "2014-02-10T09:46"
},
......@@ -1461,7 +1461,7 @@
"due": null
}
],
"editor_assessments_order": ["student-training", "peer-assessment", "self-assessment", "example-based-assessment", "staff-assessment"],
"editor_assessments_order": ["student-training", "peer-assessment", "self-assessment", "staff-assessment"],
"submission_due": "2014-02-27T09:46",
"submission_start": "2014-02-10T09:46"
},
......
......@@ -1312,30 +1312,6 @@
],
"assessments": [
{
"name": "example-based-assessment",
"algorithm_id": "sample-algorithm-id",
"examples": [
{
"answer": "тєѕт αηѕωєя",
"options_selected": [
{
"criterion": "Test criterion",
"option": "No"
}
]
},
{
"answer": "тєѕт αηѕωєя TWO",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
}
]
}
]
},
{
"name": "peer-assessment",
"start": "2014-02-27T09:46:28",
"due": "2014-03-01T00:00:00",
......@@ -1356,20 +1332,6 @@
"<openassessment text_response=\"required\">",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"example-based-assessment\" algorithm_id=\"sample-algorithm-id\">",
"<example>",
"<answer>",
"<part>тєѕт αηѕωєя</part>",
"</answer>",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>",
"<part>тєѕт αηѕωєя TWO</part>",
"</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>",
"</assessment>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" />",
"<assessment name=\"staff-assessment\" required=\"True\" />",
......
......@@ -84,14 +84,6 @@
</criterion>
</rubric>
<assessments>
<assessment name="example-based-assessment" algorithm_id="fake">
<example>
<answer>Example Answer One</answer>
<select criterion="Form" option="Reddit" />
<select criterion="Clear-headed" option="Yogi Berra" />
<select criterion="Concise" option="HP Lovecraft" />
</example>
</assessment>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" start="2015-01-02" due="2015-04-01"/>
<assessment name="self-assessment" start="2016-01-02" due="2016-04-01"/>
</assessments>
......
......@@ -567,120 +567,6 @@
]
},
"example_based_assessment": {
"xml": [
"<openassessment>",
"<title>foo</title>",
"<assessments>",
"<assessment name=\"example-based-assessment\" algorithm_id=\"ease\">",
"<example>",
"<answer>тєѕт αηѕωєя</answer>",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>тєѕт αηѕωєя TWO</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>",
"</assessment>",
"</assessments>",
"<rubric>",
"<prompt>test prompt</prompt>",
"<criterion>",
"<name>test criterion</name>",
"<prompt>test criterion prompt</prompt>",
"<option points=\"0\"><name>no</name><explanation>no explanation</explanation></option>",
"<option points=\"2\"><name>yes</name><explanation>yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
],
"rubric_assessments": [
{
"name": "example-based-assessment",
"start": null,
"due": null,
"algorithm_id": "ease",
"examples": [
{
"answer": "тєѕт αηѕωєя",
"options_selected": [
{
"criterion": "Test criterion",
"option": "No"
}
]
},
{
"answer": "тєѕт αηѕωєя TWO",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
}
]
}
]
}
]
},
"example_based_default_algorithm_id": {
"xml": [
"<openassessment>",
"<title>foo</title>",
"<assessments>",
"<assessment name=\"example-based-assessment\">",
"<example>",
"<answer>тєѕт αηѕωєя</answer>",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>тєѕт αηѕωєя TWO</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>",
"</assessment>",
"</assessments>",
"<rubric>",
"<prompt>test prompt</prompt>",
"<criterion>",
"<name>test criterion</name>",
"<prompt>test criterion prompt</prompt>",
"<option points=\"0\"><name>no</name><explanation>no explanation</explanation></option>",
"<option points=\"2\"><name>yes</name><explanation>yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
],
"rubric_assessments": [
{
"name": "example-based-assessment",
"start": null,
"due": null,
"algorithm_id": "ease",
"examples": [
{
"answer": "тєѕт αηѕωєя",
"options_selected": [
{
"criterion": "Test criterion",
"option": "No"
}
]
},
{
"answer": "тєѕт αηѕωєя TWO",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
}
]
}
]
}
]
},
"file_upload_type_none": {
"xml": [
"<openassessment>",
......
......@@ -436,64 +436,6 @@
]
},
"example_based_start_date": {
"xml": [
"<openassessment>",
"<title>foo</title>",
"<assessments>",
"<assessment name=\"example-based-assessment\" start=\"2020-01-01\">",
"<example>",
"<answer>тєѕт αηѕωєя</answer>",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>тєѕт αηѕωєя TWO</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>",
"</assessment>",
"</assessments>",
"<rubric>",
"<prompt>test prompt</prompt>",
"<criterion>",
"<name>test criterion</name>",
"<prompt>test criterion prompt</prompt>",
"<option points=\"0\"><name>no</name><explanation>no explanation</explanation></option>",
"<option points=\"2\"><name>yes</name><explanation>yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
},
"example_based_due_date": {
"xml": [
"<openassessment>",
"<title>foo</title>",
"<assessments>",
"<assessment name=\"example-based-assessment\" due=\"2020-01-01\">",
"<example>",
"<answer>тєѕт αηѕωєя</answer>",
"<select criterion=\"Test criterion\" option=\"No\" />",
"</example>",
"<example>",
"<answer>тєѕт αηѕωєя TWO</answer>",
"<select criterion=\"Test criterion\" option=\"Yes\" />",
"</example>",
"</assessment>",
"</assessments>",
"<rubric>",
"<prompt>test prompt</prompt>",
"<criterion>",
"<name>test criterion</name>",
"<prompt>test criterion prompt</prompt>",
"<option points=\"0\"><name>no</name><explanation>no explanation</explanation></option>",
"<option points=\"2\"><name>yes</name><explanation>yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
},
"leaderboard_num_not_integer": {
"xml": [
"<openassessment leaderboard_show=\"not_an_int\">",
......
......@@ -71,73 +71,5 @@
"must_be_graded_by": 3
}
]
},
"example_based_assessment_matches_rubric": {
"rubric": {
"criteria": [
{
"order_num": 0,
"name": "vocabulary",
"prompt": "How good is the vocabulary?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor",
"explanation": "Poor job!"
},
{
"order_num": 1,
"points": 1,
"name": "Good",
"explanation": "Good job!"
}
]
},
{
"order_num": 1,
"name": "grammar",
"prompt": "How good is the grammar?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor",
"explanation": "Poor job!"
},
{
"order_num": 1,
"points": 1,
"name": "Good",
"explanation": "Good job!"
}
]
}
]
},
"assessments": [
{
"name": "example-based-assessment",
"start": null,
"due": null,
"algorithm_id": "ease",
"examples": [
{
"answer": "ẗëṡẗ äṅṡẅëṛ",
"options_selected": [
{
"criterion": "vocabulary",
"option": "Good"
},
{
"criterion": "grammar",
"option": "Poor"
}
]
}
]
}
]
}
}
......@@ -56,38 +56,5 @@
],
"current_assessments": null,
"is_released": false
},
"example_based_algorithm_id_is_ease": {
"assessments": [
{
"name": "example-based-assessment",
"start": null,
"due": null,
"algorithm_id": "ease",
"examples": [
{
"answer": "тєѕт αηѕωєя",
"options_selected": [
{
"criterion": "Test criterion",
"option": "No"
}
]
},
{
"answer": "тєѕт αηѕωєя TWO",
"options_selected": [
{
"criterion": "Test criterion",
"option": "Yes"
}
]
}
]
}
],
"current_assessments": null,
"is_released": false
}
}
{
"waiting_for_peer": {
"waiting_for_peer": true,
"waiting_for_ai": false,
"expected_response": "some assessments still need to be done on your response"
},
"waiting_for_ai": {
"waiting_for_peer": false,
"waiting_for_ai": true,
"expected_response": "some assessments still need to be done on your response"
},
"waiting_for_both": {
"waiting_for_peer": true,
"waiting_for_ai": true,
"expected_response": "some assessments still need to be done on your response"
},
"not_waiting": {
"waiting_for_peer": false,
"waiting_for_ai": false,
"expected_response": "your grade:"
}
}
......@@ -6,7 +6,8 @@ Tests for course items listing handlers.
import json
from mock import patch
from .base import scenario, XBlockHandlerTestCase, SubmitAssessmentsMixin
from .base import SubmitAssessmentsMixin, XBlockHandlerTestCase, scenario
class TestCourseItemsListingHandlers(XBlockHandlerTestCase, SubmitAssessmentsMixin):
......
......@@ -4,13 +4,11 @@ Test OpenAssessment XBlock data_conversion.
"""
import ddt
import mock
from django.test import TestCase
from openassessment.xblock.data_conversion import (
create_prompts_list, create_submission_dict, prepare_submission_for_serialization, update_assessments_format
)
from openassessment.xblock.data_conversion import (create_prompts_list, create_submission_dict,
prepare_submission_for_serialization, update_assessments_format)
@ddt.ddt
......
......@@ -2,8 +2,9 @@
Test that we can export a block from the runtime (to XML) and re-import it without error.
"""
import copy
from StringIO import StringIO
import copy
from .base import XBlockHandlerTestCase, scenario
......
......@@ -3,18 +3,14 @@
Tests for grade handlers in Open Assessment XBlock.
"""
import copy
import ddt
import json
import mock
from django.test.utils import override_settings
import ddt
from openassessment.assessment.api import peer as peer_api
from openassessment.xblock.openassessmentblock import OpenAssessmentBlock
from .base import (
scenario, SubmitAssessmentsMixin, XBlockHandlerTestCase,
PEER_ASSESSMENTS, SELF_ASSESSMENT, STAFF_GOOD_ASSESSMENT, STAFF_BAD_ASSESSMENT,
)
from .base import (PEER_ASSESSMENTS, SELF_ASSESSMENT, STAFF_BAD_ASSESSMENT, STAFF_GOOD_ASSESSMENT,
SubmitAssessmentsMixin, XBlockHandlerTestCase, scenario)
@ddt.ddt
......@@ -22,10 +18,6 @@ class TestGrade(XBlockHandlerTestCase, SubmitAssessmentsMixin):
"""
View-level tests for the XBlock grade handlers.
"""
AI_ALGORITHMS = {
'fake': 'openassessment.assessment.worker.algorithm.FakeAIAlgorithm'
}
@scenario('data/grade_scenario.xml', user_id='Greggs')
def test_render_grade(self, xblock):
# Submit, assess, and render the grade view
......@@ -118,39 +110,6 @@ class TestGrade(XBlockHandlerTestCase, SubmitAssessmentsMixin):
self.assertIn(u'єאςєɭɭєภՇ ฬ๏гк!', resp.decode('utf-8'))
self.assertIn(u'Good job!', resp.decode('utf-8'))
@mock.patch.object(OpenAssessmentBlock, 'is_admin', new_callable=mock.PropertyMock)
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
@scenario('data/grade_scenario_ai_only.xml', user_id='Greggs')
def test_render_grade_ai_only(self, xblock, mock_is_admin):
# Train classifiers using the fake AI algorithm
mock_is_admin.return_value = True
self.request(xblock, 'schedule_training', json.dumps({}), response_format='json')
# Submit, assess, and render the grade view
self.create_submission_and_assessments(
xblock, self.SUBMISSION, [], [], None, waiting_for_peer=True
)
resp = self.request(xblock, 'render_grade', json.dumps(dict()))
# Verify that feedback from each scorer appears in the view
self.assertNotIn(u'єאςєɭɭєภՇ', resp.decode('utf-8'))
self.assertIn(u'Poor', resp.decode('utf-8'))
# Verify that the submission and peer steps show that we're graded
# This isn't strictly speaking part of the grade step rendering,
# but we've already done all the setup to get to this point in the flow,
# so we might as well verify it here.
resp = self.request(xblock, 'render_submission', json.dumps(dict()))
self.assertIn('response', resp.lower())
self.assertIn('complete', resp.lower())
resp = self.request(xblock, 'render_peer_assessment', json.dumps(dict()))
self.assertNotIn('peer', resp.lower())
self.assertNotIn('complete', resp.lower())
resp = self.request(xblock, 'render_self_assessment', json.dumps(dict()))
self.assertNotIn('self', resp.lower())
self.assertNotIn('complete', resp.lower())
@scenario('data/feedback_per_criterion.xml', user_id='Bernard')
def test_render_grade_feedback(self, xblock):
# Submit, assess, and render the grade view
......@@ -382,14 +341,6 @@ class TestGrade(XBlockHandlerTestCase, SubmitAssessmentsMixin):
@ddt.file_data('data/waiting_scenarios.json')
@scenario('data/grade_waiting_scenario.xml', user_id='Omar')
def test_grade_waiting(self, xblock, data):
# If AI classifiers are not trained, then we should see a "waiting for AI" display
if not data["waiting_for_ai"]:
with mock.patch.object(
OpenAssessmentBlock, 'is_admin', new_callable=mock.PropertyMock
) as mock_is_admin:
mock_is_admin.return_value = True
self.request(xblock, 'schedule_training', json.dumps({}), response_format='json')
# Waiting to be assessed by a peer
self.create_submission_and_assessments(
xblock, self.SUBMISSION, self.PEERS, PEER_ASSESSMENTS, SELF_ASSESSMENT,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment