Commit 60f6ea36 by Eric Fischer Committed by GitHub

Merge pull request #1023 from edx/efischer/tox

Tox, for parallel test goodness
parents 43aa53ea 3e13d27d
addons: addons:
apt: apt:
packages: packages:
- aspell
- aspell-en
- g++
- gcc
- git
- gfortran
- libblas3gf
- libblas-dev
- liblapack3gf
- liblapack-dev
- libatlas-base-dev
- libfontconfig1
- libmysqlclient-dev
- libxml2-dev
- libxslt1-dev
- nodejs - nodejs
- python2.7
- python2.7-dev
- python-pip
- python-software-properties
language: python language: python
sudo: false sudo: false
python: python:
- "2.7" - "2.7"
env:
- TOXENV=django18
- TOXENV=django111
matrix:
include:
- python: 2.7
env: TOXENV=quality
- python: 2.7
env: TOXENV=js
before_install:
- "pip install -U pip"
cache:
directories:
- $HOME/.cache/pip
install: install:
- "pip install coveralls" - "pip install tox"
- "make install"
before_script:
- "export DISPLAY=:99.0"
- "sh -e /etc/init.d/xvfb start"
script: script:
- "make verify-generated-files" - tox
- "make test"
- "python manage.py makemessages -l eo"
branches: branches:
only: only:
- master - master
- ora-staff-grading
after_success: after_success:
coveralls coveralls
include LICENSE include LICENSE
include AUTHORS include AUTHORS
include README.rst include README.rst
include setup.cfg
include openassessment/xblock/static/css/*.css include openassessment/xblock/static/css/*.css
include openassessment/xblock/static/css/lib/backgrid/*.css include openassessment/xblock/static/css/lib/backgrid/*.css
include openassessment/xblock/static/js/openassessment*.min.js include openassessment/xblock/static/js/openassessment*.min.js
include openassessment/xblock/static/js/lib/backgrid/*.js include openassessment/xblock/static/js/lib/backgrid/*.js
include requirements/*.txt
recursive-include openassessment/xblock/static/js/src *.js recursive-include openassessment/xblock/static/js/src *.js
recursive-include openassessment/templates *.html *.underscore recursive-include openassessment/templates *.html *.underscore
recursive-include openassessment/locale *.po recursive-include openassessment/locale *.po
recursive-include openassessment/locale *.mo recursive-include openassessment/locale *.mo
global-exclude */test*
global-exclude */test?/*
all: install test ##################
# Install commands
.PHONY: install test ##################
# not used by travis
install-system:
sudo apt-get update -qq
sudo xargs -a apt-packages.txt apt-get install -qq --fix-missing
# not used by travis
install-node:
sudo apt-get install -qq nodejs
install-wheels:
./scripts/install-wheels.sh
install-python: install-python:
./scripts/install-python.sh pip install -r requirements/django.txt
pip install -r requirements/base.txt --only-binary=lxml,libsass
install-js: install-js:
npm install npm install
install-nltk-data: install-test:
./scripts/download-nltk-data.sh pip install -r requirements/test.txt
install: install-python install-js install-test javascript sass
##############################
# Generate js/css output files
##############################
STATIC_JS = openassessment/xblock/static/js STATIC_JS = openassessment/xblock/static/js
STATIC_CSS = openassessment/xblock/static/css STATIC_CSS = openassessment/xblock/static/css
update-npm-requirements:
npm update --silent
cp ./node_modules/backgrid/lib/backgrid*.js $(STATIC_JS)/lib/backgrid/
cp ./node_modules/backgrid/lib/backgrid*.css $(STATIC_CSS)/lib/backgrid/
javascript: update-npm-requirements javascript: update-npm-requirements
node_modules/.bin/uglifyjs $(STATIC_JS)/src/oa_shared.js $(STATIC_JS)/src/*.js $(STATIC_JS)/src/lms/*.js $(STATIC_JS)/lib/backgrid/backgrid.min.js -c warnings=false > "$(STATIC_JS)/openassessment-lms.min.js" node_modules/.bin/uglifyjs $(STATIC_JS)/src/oa_shared.js $(STATIC_JS)/src/*.js $(STATIC_JS)/src/lms/*.js $(STATIC_JS)/lib/backgrid/backgrid.min.js -c warnings=false > "$(STATIC_JS)/openassessment-lms.min.js"
node_modules/.bin/uglifyjs $(STATIC_JS)/src/oa_shared.js $(STATIC_JS)/src/*.js $(STATIC_JS)/src/studio/*.js $(STATIC_JS)/lib/backgrid/backgrid.min.js -c warnings=false > "$(STATIC_JS)/openassessment-studio.min.js" node_modules/.bin/uglifyjs $(STATIC_JS)/src/oa_shared.js $(STATIC_JS)/src/*.js $(STATIC_JS)/src/studio/*.js $(STATIC_JS)/lib/backgrid/backgrid.min.js -c warnings=false > "$(STATIC_JS)/openassessment-studio.min.js"
...@@ -38,27 +34,17 @@ sass: ...@@ -38,27 +34,17 @@ sass:
verify-generated-files: verify-generated-files:
@git diff --quiet || (echo 'Modifications exist locally! Run `make javascript` or `make sass` to update bundled files.'; exit 1) @git diff --quiet || (echo 'Modifications exist locally! Run `make javascript` or `make sass` to update bundled files.'; exit 1)
install-test: ################
pip install -q -r requirements/test.txt #Tests and checks
################
install-sys-requirements: install-system install-node
npm config set loglevel warn
install-dev:
pip install -q -r requirements/dev.txt
install: install-wheels install-python install-js install-nltk-data install-test install-dev javascript sass
quality: quality:
./node_modules/.bin/jshint $(STATIC_JS)/src -c .jshintrc --verbose ./node_modules/.bin/jshint $(STATIC_JS)/src -c .jshintrc --verbose
./node_modules/jscs/bin/jscs $(STATIC_JS)/src --verbose ./node_modules/jscs/bin/jscs $(STATIC_JS)/src --verbose
./scripts/run-pep8.sh ./scripts/run-pep8.sh
./scripts/run-pylint.sh ./scripts/run-pylint.sh
test: quality test-python test-js
test-python: test-python:
./scripts/test-python.sh coverage run manage.py test openassessment
render-templates: render-templates:
./scripts/render-templates.sh ./scripts/render-templates.sh
...@@ -69,15 +55,13 @@ test-js: render-templates ...@@ -69,15 +55,13 @@ test-js: render-templates
test-js-debug: render-templates test-js-debug: render-templates
./scripts/js-debugger.sh ./scripts/js-debugger.sh
test-sandbox: test-acceptance test-a11y test: quality test-python test-js
# acceptance and a11y tests require a functioning sandbox, and do not run on travis
test-acceptance: test-acceptance:
./scripts/test-acceptance.sh tests ./scripts/test-acceptance.sh tests
test-a11y: test-a11y:
./scripts/test-acceptance.sh accessibility ./scripts/test-acceptance.sh accessibility
update-npm-requirements: test-sandbox: test-acceptance test-a11y
npm update --silent
cp ./node_modules/backgrid/lib/backgrid*.js $(STATIC_JS)/lib/backgrid/
cp ./node_modules/backgrid/lib/backgrid*.css $(STATIC_CSS)/lib/backgrid/
...@@ -7,7 +7,7 @@ Open Response Assessment |build-status| |coverage-status| ...@@ -7,7 +7,7 @@ Open Response Assessment |build-status| |coverage-status|
Installation, Tests, and other Developer Tasks Installation, Tests, and other Developer Tasks
============================================== ==============================================
EdX engineers follow the `guides on our wiki <https://openedx.atlassian.net/wiki/display/EDUCATOR/ORA+FAQ>`_. EdX engineers follow the `guides on our wiki <https://openedx.atlassian.net/wiki/spaces/EDUCATOR/pages/9765004/ORA+Developer+Guide>`_.
License License
======= =======
......
aspell
g++
gcc
git
gfortran
libblas-dev
liblapack-dev
libatlas-base-dev
libfontconfig1
libmysqlclient-dev
libxml2-dev
libxslt1-dev
nodejs
npm
python2.7
python2.7-dev
python-mysqldb
python-pip
python-software-properties
rubygems
../openassessment/locale/
\ No newline at end of file
Log files:
apps_info.log = INFO level logging for all edx-ora2 apps and OpenAssessmentBlock
apps_debug.log = same as above, except DEBUG level
errors.log = all ERROR and CRITICAL logs, stack traces
events.log = Analytics events from the xblock-sdk workbench runtime's publish()
trace.log = The kitchen sink. Massive because of SQL debug logs from Django.
#!/usr/bin/env python #!/usr/bin/env python
import sys
import os import os
import sys
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -4,13 +4,10 @@ Django admin models for openassessment ...@@ -4,13 +4,10 @@ Django admin models for openassessment
import json import json
from django.contrib import admin from django.contrib import admin
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse_lazy
from django.utils import html from django.utils import html
from openassessment.assessment.models import ( from openassessment.assessment.models import Assessment, AssessmentFeedback, PeerWorkflow, PeerWorkflowItem, Rubric
Assessment, AssessmentFeedback, PeerWorkflow, PeerWorkflowItem, Rubric,
AIGradingWorkflow, AITrainingWorkflow, AIClassifierSet, AIClassifier
)
from openassessment.assessment.serializers import RubricSerializer from openassessment.assessment.serializers import RubricSerializer
...@@ -92,7 +89,7 @@ class AssessmentAdmin(admin.ModelAdmin): ...@@ -92,7 +89,7 @@ class AssessmentAdmin(admin.ModelAdmin):
""" """
Returns the rubric link for this assessment. Returns the rubric link for this assessment.
""" """
url = reverse( url = reverse_lazy(
'admin:assessment_rubric_change', 'admin:assessment_rubric_change',
args=[assessment_obj.rubric.id] args=[assessment_obj.rubric.id]
) )
...@@ -141,7 +138,7 @@ class AssessmentFeedbackAdmin(admin.ModelAdmin): ...@@ -141,7 +138,7 @@ class AssessmentFeedbackAdmin(admin.ModelAdmin):
""" """
links = [ links = [
u'<a href="{}">{}</a>'.format( u'<a href="{}">{}</a>'.format(
reverse('admin:assessment_assessment_change', args=[asmt.id]), reverse_lazy('admin:assessment_assessment_change', args=[asmt.id]),
html.escape(asmt.scorer_id) html.escape(asmt.scorer_id)
) )
for asmt in assessment_feedback.assessments.all() for asmt in assessment_feedback.assessments.all()
...@@ -150,44 +147,7 @@ class AssessmentFeedbackAdmin(admin.ModelAdmin): ...@@ -150,44 +147,7 @@ class AssessmentFeedbackAdmin(admin.ModelAdmin):
assessments_by.allow_tags = True assessments_by.allow_tags = True
class AIGradingWorkflowAdmin(admin.ModelAdmin):
"""
Django admin model for AIGradingWorkflows.
"""
list_display = ('uuid', 'submission_uuid')
search_fields = ('uuid', 'submission_uuid', 'student_id', 'item_id', 'course_id')
readonly_fields = ('uuid', 'submission_uuid', 'student_id', 'item_id', 'course_id')
class AITrainingWorkflowAdmin(admin.ModelAdmin):
"""
Django admin model for AITrainingWorkflows.
"""
list_display = ('uuid',)
search_fields = ('uuid', 'course_id', 'item_id',)
readonly_fields = ('uuid', 'course_id', 'item_id',)
class AIClassifierInline(admin.TabularInline):
"""
Django admin model for AIClassifiers.
"""
model = AIClassifier
class AIClassifierSetAdmin(admin.ModelAdmin):
"""
Django admin model for AICLassifierSets.
"""
list_display = ('id',)
search_fields = ('id',)
inlines = [AIClassifierInline]
admin.site.register(Rubric, RubricAdmin) admin.site.register(Rubric, RubricAdmin)
admin.site.register(PeerWorkflow, PeerWorkflowAdmin) admin.site.register(PeerWorkflow, PeerWorkflowAdmin)
admin.site.register(Assessment, AssessmentAdmin) admin.site.register(Assessment, AssessmentAdmin)
admin.site.register(AssessmentFeedback, AssessmentFeedbackAdmin) admin.site.register(AssessmentFeedback, AssessmentFeedbackAdmin)
admin.site.register(AIGradingWorkflow, AIGradingWorkflowAdmin)
admin.site.register(AITrainingWorkflow, AITrainingWorkflowAdmin)
admin.site.register(AIClassifierSet, AIClassifierSetAdmin)
...@@ -5,22 +5,17 @@ the workflow for a given submission. ...@@ -5,22 +5,17 @@ the workflow for a given submission.
""" """
import logging import logging
from django.utils import timezone
from django.db import DatabaseError, IntegrityError, transaction from django.db import DatabaseError, IntegrityError, transaction
from dogapi import dog_stats_api from django.utils import timezone
from openassessment.assessment.models import ( from dogapi import dog_stats_api
Assessment, AssessmentFeedback, AssessmentPart, from openassessment.assessment.errors import (PeerAssessmentInternalError, PeerAssessmentRequestError,
InvalidRubricSelection, PeerWorkflow, PeerWorkflowItem, PeerAssessmentWorkflowError)
) from openassessment.assessment.models import (Assessment, AssessmentFeedback, AssessmentPart, InvalidRubricSelection,
from openassessment.assessment.serializers import ( PeerWorkflow, PeerWorkflowItem)
AssessmentFeedbackSerializer, RubricSerializer, from openassessment.assessment.serializers import (AssessmentFeedbackSerializer, InvalidRubric, RubricSerializer,
full_assessment_dict, rubric_from_dict, serialize_assessments, full_assessment_dict, rubric_from_dict, serialize_assessments)
InvalidRubric
)
from openassessment.assessment.errors import (
PeerAssessmentRequestError, PeerAssessmentWorkflowError, PeerAssessmentInternalError
)
from submissions import api as sub_api from submissions import api as sub_api
logger = logging.getLogger("openassessment.assessment.api.peer") logger = logging.getLogger("openassessment.assessment.api.peer")
......
...@@ -2,20 +2,15 @@ ...@@ -2,20 +2,15 @@
Public interface for self-assessment. Public interface for self-assessment.
""" """
import logging import logging
from django.db import DatabaseError, transaction
from dogapi import dog_stats_api
from submissions.api import get_submission_and_student, SubmissionNotFoundError from django.db import DatabaseError, transaction
from openassessment.assessment.serializers import (
InvalidRubric, full_assessment_dict, rubric_from_dict, serialize_assessments
)
from openassessment.assessment.models import (
Assessment, AssessmentPart, InvalidRubricSelection
)
from openassessment.assessment.errors import (
SelfAssessmentRequestError, SelfAssessmentInternalError
)
from dogapi import dog_stats_api
from openassessment.assessment.errors import SelfAssessmentInternalError, SelfAssessmentRequestError
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection
from openassessment.assessment.serializers import (InvalidRubric, full_assessment_dict, rubric_from_dict,
serialize_assessments)
from submissions.api import SubmissionNotFoundError, get_submission_and_student
# Assessments are tagged as "self-evaluation" # Assessments are tagged as "self-evaluation"
SELF_TYPE = "SE" SELF_TYPE = "SE"
......
...@@ -2,25 +2,15 @@ ...@@ -2,25 +2,15 @@
Public interface for staff grading, used by students/course staff. Public interface for staff grading, used by students/course staff.
""" """
import logging import logging
from django.db import DatabaseError, transaction from django.db import DatabaseError, transaction
from django.utils.timezone import now from django.utils.timezone import now
from dogapi import dog_stats_api
from openassessment.assessment.errors import StaffAssessmentInternalError, StaffAssessmentRequestError
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection, StaffWorkflow
from openassessment.assessment.serializers import InvalidRubric, full_assessment_dict, rubric_from_dict
from submissions import api as submissions_api from submissions import api as submissions_api
from openassessment.assessment.models import (
Assessment, AssessmentFeedback, AssessmentPart,
InvalidRubricSelection, StaffWorkflow,
)
from openassessment.assessment.serializers import (
AssessmentFeedbackSerializer, RubricSerializer,
full_assessment_dict, rubric_from_dict, serialize_assessments,
InvalidRubric
)
from openassessment.assessment.errors import (
StaffAssessmentRequestError, StaffAssessmentInternalError
)
logger = logging.getLogger("openassessment.assessment.api.staff") logger = logging.getLogger("openassessment.assessment.api.staff")
STAFF_TYPE = "ST" STAFF_TYPE = "ST"
......
...@@ -7,19 +7,15 @@ Public interface for student training: ...@@ -7,19 +7,15 @@ Public interface for student training:
""" """
import logging import logging
from django.utils.translation import ugettext as _
from django.db import DatabaseError from django.db import DatabaseError
from submissions import api as sub_api from django.utils.translation import ugettext as _
from openassessment.assessment.models import StudentTrainingWorkflow, InvalidRubricSelection
from openassessment.assessment.serializers import (
deserialize_training_examples, serialize_training_example,
validate_training_example_format,
InvalidTrainingExample, InvalidRubric
)
from openassessment.assessment.errors import (
StudentTrainingRequestError, StudentTrainingInternalError
)
from openassessment.assessment.errors import StudentTrainingInternalError, StudentTrainingRequestError
from openassessment.assessment.models import InvalidRubricSelection, StudentTrainingWorkflow
from openassessment.assessment.serializers import (InvalidRubric, InvalidTrainingExample, deserialize_training_examples,
serialize_training_example, validate_training_example_format)
from submissions import api as sub_api
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
Data Conversion utility methods for handling assessment data transformations. Data Conversion utility methods for handling assessment data transformations.
""" """
import json
def update_training_example_answer_format(answer): def update_training_example_answer_format(answer):
...@@ -14,7 +13,7 @@ def update_training_example_answer_format(answer): ...@@ -14,7 +13,7 @@ def update_training_example_answer_format(answer):
Returns: Returns:
dict dict
""" """
if isinstance(answer, unicode) or isinstance(answer, str): if isinstance(answer, (str, unicode)):
return { return {
'parts': [ 'parts': [
{'text': answer} {'text': answer}
......
...@@ -8,4 +8,3 @@ from .peer import * ...@@ -8,4 +8,3 @@ from .peer import *
from .self import * from .self import *
from .staff import * from .staff import *
from .student_training import * from .student_training import *
from .ai import *
"""
Errors related to AI assessment.
"""
from celery.exceptions import InvalidTaskError, NotConfigured, NotRegistered, QueueNotFound
from socket import error as socket_error
ANTICIPATED_CELERY_ERRORS = (InvalidTaskError, NotConfigured, NotRegistered, QueueNotFound, socket_error)
class AIError(Exception):
"""
A general error occurred while using the AI assessment API.
"""
pass
class AITrainingRequestError(AIError):
"""
There was a problem with the request sent to the AI assessment API.
"""
pass
class AITrainingInternalError(AIError):
"""
An unexpected error occurred while using the AI assessment API.
"""
pass
class AIGradingRequestError(AIError):
"""
There was a problem with the request sent to the AI assessment API.
"""
pass
class AIGradingInternalError(AIError):
"""
An unexpected error occurred while using the AI assessment API.
"""
pass
class AIReschedulingRequestError(AIError):
"""
There was a problem with the request sent to the AI assessment API.
"""
pass
class AIReschedulingInternalError(AIError):
"""
An unexpected error occurred while using the AI assessment API.
"""
pass
...@@ -2,10 +2,8 @@ ...@@ -2,10 +2,8 @@
# pylint: skip-file # pylint: skip-file
from __future__ import unicode_literals from __future__ import unicode_literals
from django.db import models, migrations from django.db import migrations, models
import django.utils.timezone import django.utils.timezone
import django_extensions.db.fields
import openassessment.assessment.models.ai
class Migration(migrations.Migration): class Migration(migrations.Migration):
...@@ -15,54 +13,6 @@ class Migration(migrations.Migration): ...@@ -15,54 +13,6 @@ class Migration(migrations.Migration):
operations = [ operations = [
migrations.CreateModel( migrations.CreateModel(
name='AIClassifier',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('classifier_data', models.FileField(upload_to=openassessment.assessment.models.ai.upload_to_path)),
],
),
migrations.CreateModel(
name='AIClassifierSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
('algorithm_id', models.CharField(max_length=128, db_index=True)),
('course_id', models.CharField(max_length=40, db_index=True)),
('item_id', models.CharField(max_length=128, db_index=True)),
],
options={
'ordering': ['-created_at', '-id'],
},
),
migrations.CreateModel(
name='AIGradingWorkflow',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uuid', django_extensions.db.fields.UUIDField(db_index=True, unique=True, version=1, editable=False, blank=True)),
('course_id', models.CharField(max_length=40, db_index=True)),
('item_id', models.CharField(max_length=128, db_index=True)),
('scheduled_at', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
('completed_at', models.DateTimeField(null=True, db_index=True)),
('algorithm_id', models.CharField(max_length=128, db_index=True)),
('submission_uuid', models.CharField(max_length=128, db_index=True)),
('essay_text', models.TextField(blank=True)),
('student_id', models.CharField(max_length=40, db_index=True)),
],
),
migrations.CreateModel(
name='AITrainingWorkflow',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uuid', django_extensions.db.fields.UUIDField(db_index=True, unique=True, version=1, editable=False, blank=True)),
('course_id', models.CharField(max_length=40, db_index=True)),
('item_id', models.CharField(max_length=128, db_index=True)),
('scheduled_at', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
('completed_at', models.DateTimeField(null=True, db_index=True)),
('algorithm_id', models.CharField(max_length=128, db_index=True)),
('classifier_set', models.ForeignKey(related_name='+', default=None, to='assessment.AIClassifierSet', null=True)),
],
),
migrations.CreateModel(
name='Assessment', name='Assessment',
fields=[ fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
...@@ -235,41 +185,6 @@ class Migration(migrations.Migration): ...@@ -235,41 +185,6 @@ class Migration(migrations.Migration):
name='rubric', name='rubric',
field=models.ForeignKey(to='assessment.Rubric'), field=models.ForeignKey(to='assessment.Rubric'),
), ),
migrations.AddField(
model_name='aitrainingworkflow',
name='training_examples',
field=models.ManyToManyField(related_name='+', to='assessment.TrainingExample'),
),
migrations.AddField(
model_name='aigradingworkflow',
name='assessment',
field=models.ForeignKey(related_name='+', default=None, to='assessment.Assessment', null=True),
),
migrations.AddField(
model_name='aigradingworkflow',
name='classifier_set',
field=models.ForeignKey(related_name='+', default=None, to='assessment.AIClassifierSet', null=True),
),
migrations.AddField(
model_name='aigradingworkflow',
name='rubric',
field=models.ForeignKey(related_name='+', to='assessment.Rubric'),
),
migrations.AddField(
model_name='aiclassifierset',
name='rubric',
field=models.ForeignKey(related_name='+', to='assessment.Rubric'),
),
migrations.AddField(
model_name='aiclassifier',
name='classifier_set',
field=models.ForeignKey(related_name='classifiers', to='assessment.AIClassifierSet'),
),
migrations.AddField(
model_name='aiclassifier',
name='criterion',
field=models.ForeignKey(related_name='+', to='assessment.Criterion'),
),
migrations.AlterUniqueTogether( migrations.AlterUniqueTogether(
name='studenttrainingworkflowitem', name='studenttrainingworkflowitem',
unique_together=set([('workflow', 'order_num')]), unique_together=set([('workflow', 'order_num')]),
......
...@@ -13,21 +13,6 @@ class Migration(migrations.Migration): ...@@ -13,21 +13,6 @@ class Migration(migrations.Migration):
operations = [ operations = [
migrations.AlterField( migrations.AlterField(
model_name='aiclassifierset',
name='course_id',
field=models.CharField(max_length=255, db_index=True),
),
migrations.AlterField(
model_name='aigradingworkflow',
name='course_id',
field=models.CharField(max_length=255, db_index=True),
),
migrations.AlterField(
model_name='aitrainingworkflow',
name='course_id',
field=models.CharField(max_length=255, db_index=True),
),
migrations.AlterField(
model_name='peerworkflow', model_name='peerworkflow',
name='course_id', name='course_id',
field=models.CharField(max_length=255, db_index=True), field=models.CharField(max_length=255, db_index=True),
......
...@@ -7,5 +7,4 @@ from .base import * ...@@ -7,5 +7,4 @@ from .base import *
from .peer import * from .peer import *
from .training import * from .training import *
from .student_training import * from .student_training import *
from .ai import *
from .staff import * from .staff import *
...@@ -12,18 +12,19 @@ need to then generate a matching migration for it using: ...@@ -12,18 +12,19 @@ need to then generate a matching migration for it using:
./manage.py schemamigration openassessment.assessment --auto ./manage.py schemamigration openassessment.assessment --auto
""" """
import math
from collections import defaultdict from collections import defaultdict
from copy import deepcopy from copy import deepcopy
from hashlib import sha1 from hashlib import sha1
import json import json
import logging
import math
from lazy import lazy
from django.core.cache import cache from django.core.cache import cache
from django.db import models from django.db import models
from django.utils.timezone import now from django.utils.timezone import now
from lazy import lazy
import logging
logger = logging.getLogger("openassessment.assessment.models") logger = logging.getLogger("openassessment.assessment.models")
......
...@@ -7,16 +7,16 @@ need to then generate a matching migration for it using: ...@@ -7,16 +7,16 @@ need to then generate a matching migration for it using:
./manage.py schemamigration openassessment.assessment --auto ./manage.py schemamigration openassessment.assessment --auto
""" """
import random
from datetime import timedelta from datetime import timedelta
import logging
import random
from django.db import models, DatabaseError from django.db import DatabaseError, models
from django.utils.timezone import now from django.utils.timezone import now
from openassessment.assessment.errors import PeerAssessmentInternalError, PeerAssessmentWorkflowError
from openassessment.assessment.models.base import Assessment from openassessment.assessment.models.base import Assessment
from openassessment.assessment.errors import PeerAssessmentWorkflowError, PeerAssessmentInternalError
import logging
logger = logging.getLogger("openassessment.assessment.models") logger = logging.getLogger("openassessment.assessment.models")
......
...@@ -3,10 +3,9 @@ Models for managing staff assessments. ...@@ -3,10 +3,9 @@ Models for managing staff assessments.
""" """
from datetime import timedelta from datetime import timedelta
from django.db import models, DatabaseError from django.db import DatabaseError, models
from django.utils.timezone import now from django.utils.timezone import now
from openassessment.assessment.models.base import Assessment
from openassessment.assessment.errors import StaffAssessmentInternalError from openassessment.assessment.errors import StaffAssessmentInternalError
......
""" """
Django models specific to the student training assessment type. Django models specific to the student training assessment type.
""" """
from django.db import models, transaction, IntegrityError from django.db import IntegrityError, models, transaction
from django.utils import timezone from django.utils import timezone
from submissions import api as sub_api from submissions import api as sub_api
from .training import TrainingExample from .training import TrainingExample
......
""" """
Django models for training (both student and AI). Django models for training (both student and AI).
""" """
import json
from hashlib import sha1 from hashlib import sha1
import json
from django.core.cache import cache from django.core.cache import cache
from django.db import models from django.db import models
from .base import Rubric, CriterionOption
from .base import CriterionOption, Rubric
class TrainingExample(models.Model): class TrainingExample(models.Model):
......
...@@ -5,13 +5,12 @@ Serializers common to all assessment types. ...@@ -5,13 +5,12 @@ Serializers common to all assessment types.
from copy import deepcopy from copy import deepcopy
import logging import logging
from django.core.cache import cache
from rest_framework import serializers from rest_framework import serializers
from rest_framework.fields import IntegerField, DateTimeField from rest_framework.fields import DateTimeField, IntegerField
from openassessment.assessment.models import (
Assessment, AssessmentPart, Criterion, CriterionOption, Rubric, from django.core.cache import cache
)
from openassessment.assessment.models import Assessment, AssessmentPart, Criterion, CriterionOption, Rubric
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -211,8 +210,8 @@ def full_assessment_dict(assessment, rubric_dict=None): ...@@ -211,8 +210,8 @@ def full_assessment_dict(assessment, rubric_dict=None):
# `CriterionOption` again, we simply index into the places we expect them to # `CriterionOption` again, we simply index into the places we expect them to
# be from the big, saved `Rubric` serialization. # be from the big, saved `Rubric` serialization.
parts = [] parts = []
for part in assessment.parts.all().select_related("criterion", "option"): for part in assessment.parts.order_by('criterion__order_num').all().select_related("criterion", "option"):
criterion_dict = rubric_dict["criteria"][part.criterion.order_num] criterion_dict = dict(rubric_dict["criteria"][part.criterion.order_num])
options_dict = None options_dict = None
if part.option is not None: if part.option is not None:
options_dict = criterion_dict["options"][part.option.order_num] options_dict = criterion_dict["options"][part.option.order_num]
......
...@@ -2,11 +2,11 @@ ...@@ -2,11 +2,11 @@
Serializers specific to peer assessment. Serializers specific to peer assessment.
""" """
from rest_framework import serializers from rest_framework import serializers
from openassessment.assessment.models import (AssessmentFeedback, AssessmentFeedbackOption, PeerWorkflow,
PeerWorkflowItem)
from .base import AssessmentSerializer from .base import AssessmentSerializer
from openassessment.assessment.models import (
AssessmentFeedback, AssessmentFeedbackOption,
PeerWorkflow, PeerWorkflowItem
)
class AssessmentFeedbackOptionSerializer(serializers.ModelSerializer): class AssessmentFeedbackOptionSerializer(serializers.ModelSerializer):
......
...@@ -2,10 +2,12 @@ ...@@ -2,10 +2,12 @@
Serializers for the training assessment type. Serializers for the training assessment type.
""" """
from django.core.cache import cache from django.core.cache import cache
from django.db import transaction, IntegrityError from django.db import IntegrityError, transaction
from openassessment.assessment.models import TrainingExample
from openassessment.assessment.data_conversion import update_training_example_answer_format from openassessment.assessment.data_conversion import update_training_example_answer_format
from .base import rubric_from_dict, RubricSerializer from openassessment.assessment.models import TrainingExample
from .base import RubricSerializer, rubric_from_dict
class InvalidTrainingExample(Exception): class InvalidTrainingExample(Exception):
......
"""
Celery looks for tasks in this module,
so import the tasks we want the workers to implement.
"""
# pylint:disable=W0611
from .worker.training import train_classifiers, reschedule_training_tasks
from .worker.grading import grade_essay, reschedule_grading_tasks
# coding=utf-8
"""
Tests for AI algorithm implementations.
"""
import unittest
import json
import mock
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.worker.algorithm import (
AIAlgorithm, FakeAIAlgorithm, EaseAIAlgorithm,
TrainingError, InvalidClassifier
)
EXAMPLES = [
AIAlgorithm.ExampleEssay(u"Mine's a tale that can't be told, my ƒяєє∂σм I hold dear.", 2),
AIAlgorithm.ExampleEssay(u"How years ago in days of old, when 𝒎𝒂𝒈𝒊𝒄 filled th air.", 1),
AIAlgorithm.ExampleEssay(u"Ṫ'ẅäṡ in the darkest depths of Ṁöṛḋöṛ, I met a girl so fair.", 1),
AIAlgorithm.ExampleEssay(u"But goレレuᄊ, and the evil one crept up and slipped away with her", 0),
AIAlgorithm.ExampleEssay(u"", 4),
AIAlgorithm.ExampleEssay(u".!?", 4),
AIAlgorithm.ExampleEssay(u"no punctuation", 4),
AIAlgorithm.ExampleEssay(u"one", 4),
]
INPUT_ESSAYS = [
u"Good times, 𝑩𝒂𝒅 𝑻𝒊𝒎𝒆𝒔, you know I had my share",
u"When my woman left home for a 𝒃𝒓𝒐𝒘𝒏 𝒆𝒚𝒆𝒅 𝒎𝒂𝒏",
u"Well, I still don't seem to 𝒄𝒂𝒓𝒆",
u"",
u".!?",
u"no punctuation",
u"one",
]
class AIAlgorithmTest(CacheResetTest):
"""
Base class for testing AI algorithm implementations.
"""
ALGORITHM_CLASS = None
def setUp(self):
self.algorithm = self.ALGORITHM_CLASS() # pylint:disable=E1102
def _scores(self, classifier, input_essays):
"""
Use the classifier to score multiple input essays.
Args:
input_essays (list of unicode): The essays to score.
Returns:
list of int: The scores
"""
cache = {}
return [
self.algorithm.score(input_essay, classifier, cache)
for input_essay in input_essays
]
class FakeAIAlgorithmTest(AIAlgorithmTest):
"""
Test for the fake AI algorithm implementation.
"""
ALGORITHM_CLASS = FakeAIAlgorithm
def test_train_and_score(self):
classifier = self.algorithm.train_classifier(EXAMPLES)
expected_scores = [2, 0, 0, 0, 4, 2, 4]
scores = self._scores(classifier, INPUT_ESSAYS)
self.assertEqual(scores, expected_scores)
def test_score_classifier_missing_key(self):
with self.assertRaises(InvalidClassifier):
self.algorithm.score(u"Test input", {}, {})
def test_score_classifier_no_scores(self):
with self.assertRaises(InvalidClassifier):
self.algorithm.score(u"Test input", {'scores': []}, {})
# Try to import EASE -- if we can't, then skip the tests that require it
try:
import ease # pylint: disable=F0401,W0611
EASE_INSTALLED = True
except ImportError:
EASE_INSTALLED = False
@unittest.skipUnless(EASE_INSTALLED, "EASE library required")
class EaseAIAlgorithmTest(AIAlgorithmTest):
"""
Test for the EASE AI library wrapper.
"""
ALGORITHM_CLASS = EaseAIAlgorithm
def test_train_and_score(self):
classifier = self.algorithm.train_classifier(EXAMPLES)
scores = self._scores(classifier, INPUT_ESSAYS)
# Check that we got scores in the correct range
valid_scores = set(example.score for example in EXAMPLES)
for score in scores:
self.assertIn(score, valid_scores)
# Check that the scores are consistent when we re-run the algorithm
repeat_scores = self._scores(classifier, INPUT_ESSAYS)
self.assertEqual(scores, repeat_scores)
def test_all_examples_have_same_score(self):
examples = [
AIAlgorithm.ExampleEssay(u"Test ëṡṡäÿ", 1),
AIAlgorithm.ExampleEssay(u"Another test ëṡṡäÿ", 1),
]
# No assertion -- just verifying that this does not raise an exception
classifier = self.algorithm.train_classifier(examples)
self._scores(classifier, INPUT_ESSAYS)
def test_most_examples_have_same_score(self):
# All training examples have the same score except for one
examples = [
AIAlgorithm.ExampleEssay(u"Test ëṡṡäÿ", 1),
AIAlgorithm.ExampleEssay(u"Another test ëṡṡäÿ", 1),
AIAlgorithm.ExampleEssay(u"Different score", 0),
]
classifier = self.algorithm.train_classifier(examples)
scores = self._scores(classifier, INPUT_ESSAYS)
# Check that we got scores back.
# This is not a very rigorous assertion -- we're mainly
# checking that we got this far without an exception.
self.assertEqual(len(scores), len(INPUT_ESSAYS))
def test_no_examples(self):
with self.assertRaises(TrainingError):
self.algorithm.train_classifier([])
def test_json_serializable(self):
classifier = self.algorithm.train_classifier(EXAMPLES)
serialized = json.dumps(classifier)
deserialized = json.loads(serialized)
# This should not raise an exception
scores = self._scores(deserialized, INPUT_ESSAYS)
self.assertEqual(len(scores), len(INPUT_ESSAYS))
@mock.patch('openassessment.assessment.worker.algorithm.pickle')
def test_pickle_serialize_error(self, mock_pickle):
mock_pickle.dumps.side_effect = Exception("Test error!")
with self.assertRaises(TrainingError):
self.algorithm.train_classifier(EXAMPLES)
def test_pickle_deserialize_error(self):
classifier = self.algorithm.train_classifier(EXAMPLES)
with mock.patch('openassessment.assessment.worker.algorithm.pickle.loads') as mock_call:
mock_call.side_effect = Exception("Test error!")
with self.assertRaises(InvalidClassifier):
self.algorithm.score(u"Test ëṡṡäÿ", classifier, {})
def test_serialized_classifier_not_a_dict(self):
with self.assertRaises(InvalidClassifier):
self.algorithm.score(u"Test ëṡṡäÿ", "not a dict", {})
# coding=utf-8
"""
Test AI Django models.
"""
import copy
import ddt
from django.test import TestCase
from django.test.utils import override_settings
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import (
AIClassifierSet, AIClassifier, AIGradingWorkflow, AI_CLASSIFIER_STORAGE,
CLASSIFIERS_CACHE_IN_MEM, essay_text_from_submission
)
from openassessment.assessment.serializers import rubric_from_dict
from .constants import RUBRIC
CLASSIFIERS_DICT = {
u"vøȼȺƀᵾłȺɍɏ": "test data",
u"ﻭɼค๓๓คɼ": "more test data"
}
COURSE_ID = u"†3߆ çøU®ß3"
ITEM_ID = u"fake_item_id"
@ddt.ddt
class DataConversionTest(TestCase):
@ddt.data(
(u'Answer', u'Answer'),
({'answer': {'text': u'Answer'}}, u'Answer'),
({'answer': {'parts': [{'text': u'Answer 1'}, {'text': u'Answer 2'}]}}, u'Answer 1\nAnswer 2')
)
@ddt.unpack
def test_essay_text_from_submission(self, input, output):
self.assertEqual(essay_text_from_submission(input), output)
class AIClassifierTest(CacheResetTest):
"""
Tests for the AIClassifier model.
"""
def test_upload_to_path_default(self):
# No path prefix provided in the settings
classifier = self._create_classifier()
components = classifier.classifier_data.name.split(u'/')
self.assertEqual(len(components), 2)
self.assertEqual(components[0], AI_CLASSIFIER_STORAGE)
self.assertGreater(len(components[1]), 0)
@override_settings(ORA2_FILE_PREFIX=u"ƒιℓє_ρяєƒιχ")
def test_upload_to_path_with_prefix(self):
classifier = self._create_classifier()
components = classifier.classifier_data.name.split(u'/')
self.assertEqual(len(components), 3)
self.assertEqual(components[0], u"ƒιℓє_ρяєƒιχ")
self.assertEqual(components[1], AI_CLASSIFIER_STORAGE)
self.assertGreater(len(components[2]), 0)
def _create_classifier(self):
"""
Create and return an AIClassifier.
"""
rubric = rubric_from_dict(RUBRIC)
classifier_set = AIClassifierSet.create_classifier_set(
CLASSIFIERS_DICT, rubric, "test_algorithm", COURSE_ID, ITEM_ID
)
return AIClassifier.objects.filter(classifier_set=classifier_set)[0]
class AIClassifierSetTest(CacheResetTest):
"""
Tests for the AIClassifierSet model.
"""
def setUp(self):
super(AIClassifierSetTest, self).setUp()
rubric = rubric_from_dict(RUBRIC)
self.classifier_set = AIClassifierSet.create_classifier_set(
CLASSIFIERS_DICT, rubric, "test_algorithm", COURSE_ID, ITEM_ID
)
def test_cache_downloads(self):
# Retrieve the classifier dict twice, which should hit the caching code.
# We can check that we're using the cache by asserting that
# the number of database queries decreases.
with self.assertNumQueries(1):
first = self.classifier_set.classifier_data_by_criterion
with self.assertNumQueries(0):
second = self.classifier_set.classifier_data_by_criterion
# Verify that we got the same value both times
self.assertEqual(first, second)
def test_file_cache_downloads(self):
# Retrieve the classifiers dict, which should be cached
# both in memory and on the file system
first = self.classifier_set.classifier_data_by_criterion
# Clear the in-memory cache
# This simulates what happens when a worker process dies
# after exceeding the maximum number of retries.
CLASSIFIERS_CACHE_IN_MEM.clear()
# We should still be able to retrieve the classifiers dict
# from the on-disk cache, even if memory has been cleared
with self.assertNumQueries(0):
second = self.classifier_set.classifier_data_by_criterion
# Verify that we got the correct classifiers dict back
self.assertEqual(first, second)
class AIGradingWorkflowTest(CacheResetTest):
"""
Tests for the AIGradingWorkflow model.
"""
CLASSIFIERS_DICT = {
u"vøȼȺƀᵾłȺɍɏ": "test data",
u"ﻭɼค๓๓คɼ": "more test data"
}
COURSE_ID = u"test"
ITEM_ID = u"test"
ALGORITHM_ID = "test"
def setUp(self):
"""
Create a new grading workflow.
"""
self.rubric = rubric_from_dict(RUBRIC)
self.workflow = AIGradingWorkflow.objects.create(
submission_uuid='test', essay_text='test',
rubric=self.rubric, algorithm_id=self.ALGORITHM_ID,
item_id=self.ITEM_ID, course_id=self.COURSE_ID
)
# Create a rubric with a similar structure, but different prompt
similar_rubric_dict = copy.deepcopy(RUBRIC)
similar_rubric_dict['prompts'] = [{"description": 'Different prompt!'}]
self.similar_rubric = rubric_from_dict(similar_rubric_dict)
def test_assign_most_recent_classifier_set(self):
# No classifier sets are available
found = self.workflow.assign_most_recent_classifier_set()
self.assertFalse(found)
self.assertIs(self.workflow.classifier_set, None)
# Same rubric (exact), but different course id
classifier_set = AIClassifierSet.create_classifier_set(
self.CLASSIFIERS_DICT, self.rubric, self.ALGORITHM_ID,
"different course!", self.ITEM_ID
)
found = self.workflow.assign_most_recent_classifier_set()
self.assertTrue(found)
self.assertEqual(classifier_set.pk, self.workflow.classifier_set.pk)
# Same rubric (exact) but different item id
classifier_set = AIClassifierSet.create_classifier_set(
self.CLASSIFIERS_DICT, self.rubric, self.ALGORITHM_ID,
self.COURSE_ID, "different item!"
)
found = self.workflow.assign_most_recent_classifier_set()
self.assertTrue(found)
self.assertEqual(classifier_set.pk, self.workflow.classifier_set.pk)
# Same rubric (exact), but different algorithm id
# Shouldn't change, since the algorithm ID doesn't match
AIClassifierSet.create_classifier_set(
self.CLASSIFIERS_DICT, self.rubric, "different algorithm!",
self.COURSE_ID, self.ITEM_ID
)
found = self.workflow.assign_most_recent_classifier_set()
self.assertTrue(found)
self.assertEqual(classifier_set.pk, self.workflow.classifier_set.pk)
# Same rubric *structure*, but in a different item
# Shouldn't change, since the rubric isn't an exact match.
AIClassifierSet.create_classifier_set(
self.CLASSIFIERS_DICT, self.similar_rubric, self.ALGORITHM_ID,
self.COURSE_ID, "different item!"
)
found = self.workflow.assign_most_recent_classifier_set()
self.assertTrue(found)
self.assertEqual(classifier_set.pk, self.workflow.classifier_set.pk)
# Same rubric *structure* AND in the same course/item
# This should replace our current classifier set
classifier_set = AIClassifierSet.create_classifier_set(
self.CLASSIFIERS_DICT, self.similar_rubric, self.ALGORITHM_ID,
self.COURSE_ID, self.ITEM_ID
)
found = self.workflow.assign_most_recent_classifier_set()
self.assertTrue(found)
self.assertEqual(classifier_set.pk, self.workflow.classifier_set.pk)
# Same rubric and same course/item
# This is the ideal, so we should always prefer it
classifier_set = AIClassifierSet.create_classifier_set(
self.CLASSIFIERS_DICT, self.rubric, self.ALGORITHM_ID,
self.COURSE_ID, self.ITEM_ID
)
found = self.workflow.assign_most_recent_classifier_set()
self.assertTrue(found)
self.assertEqual(classifier_set.pk, self.workflow.classifier_set.pk)
...@@ -2,14 +2,19 @@ ...@@ -2,14 +2,19 @@
""" """
Tests for the assessment Django models. Tests for the assessment Django models.
""" """
import copy, ddt import copy
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.serializers import rubric_from_dict import ddt
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection
from .constants import RUBRIC
from openassessment.assessment.api.self import create_assessment from openassessment.assessment.api.self import create_assessment
from submissions.api import create_submission
from openassessment.assessment.errors import SelfAssessmentRequestError from openassessment.assessment.errors import SelfAssessmentRequestError
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection
from openassessment.assessment.serializers import rubric_from_dict
from openassessment.test_utils import CacheResetTest
from submissions.api import create_submission
from .constants import RUBRIC
@ddt.ddt @ddt.ddt
class AssessmentTest(CacheResetTest): class AssessmentTest(CacheResetTest):
...@@ -212,4 +217,4 @@ class AssessmentTest(CacheResetTest): ...@@ -212,4 +217,4 @@ class AssessmentTest(CacheResetTest):
if has_feedback: if has_feedback:
criterion_feedback['Quality'] = "This was an assignment of average quality." criterion_feedback['Quality'] = "This was an assignment of average quality."
return rubric, options_selected, criterion_feedback return rubric, options_selected, criterion_feedback
\ No newline at end of file
# coding=utf-8 # coding=utf-8
import datetime
import pytz
import copy import copy
import datetime
from django.db import DatabaseError, IntegrityError
from django.utils import timezone
from ddt import ddt, file_data from ddt import ddt, file_data
from mock import patch from mock import patch
from nose.tools import raises from nose.tools import raises
import pytz
from django.db import DatabaseError, IntegrityError
from django.utils import timezone
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.models import ( from openassessment.assessment.models import (Assessment, AssessmentFeedback, AssessmentFeedbackOption, AssessmentPart,
Assessment, AssessmentPart, AssessmentFeedback, AssessmentFeedbackOption, PeerWorkflow, PeerWorkflowItem)
PeerWorkflow, PeerWorkflowItem from openassessment.test_utils import CacheResetTest
)
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from submissions import api as sub_api from submissions import api as sub_api
...@@ -410,7 +409,7 @@ class TestPeerApi(CacheResetTest): ...@@ -410,7 +409,7 @@ class TestPeerApi(CacheResetTest):
def test_peer_workflow_integrity_error(self): def test_peer_workflow_integrity_error(self):
tim_sub, __ = self._create_student_and_submission("Tim", "Tim's answer") tim_sub, __ = self._create_student_and_submission("Tim", "Tim's answer")
with patch.object(PeerWorkflow.objects, "get_or_create") as mock_peer: with patch("openassessment.assessment.models.peer.PeerWorkflow.objects.get_or_create") as mock_peer:
mock_peer.side_effect = IntegrityError("Oh no!") mock_peer.side_effect = IntegrityError("Oh no!")
# This should not raise an exception # This should not raise an exception
peer_api.on_start(tim_sub["uuid"]) peer_api.on_start(tim_sub["uuid"])
...@@ -1113,7 +1112,7 @@ class TestPeerApi(CacheResetTest): ...@@ -1113,7 +1112,7 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(xander_answer["uuid"], submission["uuid"]) self.assertEqual(xander_answer["uuid"], submission["uuid"])
self.assertIsNotNone(item.assessment) self.assertIsNotNone(item.assessment)
@patch.object(PeerWorkflowItem.objects, "filter") @patch("openassessment.assessment.models.peer.PeerWorkflowItem.objects.filter")
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_get_submitted_assessments_error(self, mock_filter): def test_get_submitted_assessments_error(self, mock_filter):
self._create_student_and_submission("Tim", "Tim's answer") self._create_student_and_submission("Tim", "Tim's answer")
...@@ -1123,7 +1122,7 @@ class TestPeerApi(CacheResetTest): ...@@ -1123,7 +1122,7 @@ class TestPeerApi(CacheResetTest):
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"]) submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"])
self.assertEqual(1, len(submitted_assessments)) self.assertEqual(1, len(submitted_assessments))
@patch.object(PeerWorkflow.objects, 'raw') @patch('openassessment.assessment.models.peer.PeerWorkflow.objects.raw')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_failure_to_get_review_submission(self, mock_filter): def test_failure_to_get_review_submission(self, mock_filter):
tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY) tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
...@@ -1131,21 +1130,21 @@ class TestPeerApi(CacheResetTest): ...@@ -1131,21 +1130,21 @@ class TestPeerApi(CacheResetTest):
mock_filter.side_effect = DatabaseError("Oh no.") mock_filter.side_effect = DatabaseError("Oh no.")
tim_workflow.get_submission_for_review(3) tim_workflow.get_submission_for_review(3)
@patch.object(AssessmentFeedback.objects, 'get') @patch('openassessment.assessment.models.AssessmentFeedback.objects.get')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_get_assessment_feedback_error(self, mock_filter): def test_get_assessment_feedback_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.") mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY) tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.get_assessment_feedback(tim_answer['uuid']) peer_api.get_assessment_feedback(tim_answer['uuid'])
@patch.object(PeerWorkflowItem, 'get_scored_assessments') @patch('openassessment.assessment.models.peer.PeerWorkflowItem.get_scored_assessments')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_set_assessment_feedback_error(self, mock_filter): def test_set_assessment_feedback_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.") mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY) tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
peer_api.set_assessment_feedback({'submission_uuid': tim_answer['uuid']}) peer_api.set_assessment_feedback({'submission_uuid': tim_answer['uuid']})
@patch.object(AssessmentFeedback, 'save') @patch('openassessment.assessment.models.AssessmentFeedback.save')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_set_assessment_feedback_error_on_save(self, mock_filter): def test_set_assessment_feedback_error_on_save(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.") mock_filter.side_effect = DatabaseError("Oh no.")
...@@ -1157,7 +1156,7 @@ class TestPeerApi(CacheResetTest): ...@@ -1157,7 +1156,7 @@ class TestPeerApi(CacheResetTest):
} }
) )
@patch.object(AssessmentFeedback, 'save') @patch('openassessment.assessment.models.AssessmentFeedback.save')
@raises(peer_api.PeerAssessmentRequestError) @raises(peer_api.PeerAssessmentRequestError)
def test_set_assessment_feedback_error_on_huge_save(self, mock_filter): def test_set_assessment_feedback_error_on_huge_save(self, mock_filter):
tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY) tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
...@@ -1168,20 +1167,20 @@ class TestPeerApi(CacheResetTest): ...@@ -1168,20 +1167,20 @@ class TestPeerApi(CacheResetTest):
} }
) )
@patch.object(PeerWorkflow.objects, 'get') @patch('openassessment.assessment.models.peer.PeerWorkflow.objects.get')
@raises(peer_api.PeerAssessmentWorkflowError) @raises(peer_api.PeerAssessmentWorkflowError)
def test_failure_to_get_latest_workflow(self, mock_filter): def test_failure_to_get_latest_workflow(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.") mock_filter.side_effect = DatabaseError("Oh no.")
tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY) tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
PeerWorkflow.get_by_submission_uuid(tim_answer['uuid']) PeerWorkflow.get_by_submission_uuid(tim_answer['uuid'])
@patch.object(PeerWorkflow.objects, 'get_or_create') @patch('openassessment.assessment.models.peer.PeerWorkflow.objects.get_or_create')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_create_workflow_error(self, mock_filter): def test_create_workflow_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.") mock_filter.side_effect = DatabaseError("Oh no.")
self._create_student_and_submission("Tim", "Tim's answer", MONDAY) self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
@patch.object(PeerWorkflow.objects, 'get_or_create') @patch('openassessment.assessment.models.peer.PeerWorkflow.objects.get_or_create')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_create_workflow_item_error(self, mock_filter): def test_create_workflow_item_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Oh no.") mock_filter.side_effect = DatabaseError("Oh no.")
...@@ -1240,25 +1239,25 @@ class TestPeerApi(CacheResetTest): ...@@ -1240,25 +1239,25 @@ class TestPeerApi(CacheResetTest):
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_max_score_db_error(self): def test_max_score_db_error(self):
tim, _ = self._create_student_and_submission("Tim", "Tim's answer") tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
with patch.object(Assessment.objects, 'filter') as mock_filter: with patch('openassessment.assessment.models.Assessment.objects.filter') as mock_filter:
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
peer_api.get_rubric_max_scores(tim["uuid"]) peer_api.get_rubric_max_scores(tim["uuid"])
@patch.object(PeerWorkflow.objects, 'get') @patch('openassessment.assessment.models.peer.PeerWorkflow.objects.get')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_median_score_db_error(self, mock_filter): def test_median_score_db_error(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
tim, _ = self._create_student_and_submission("Tim", "Tim's answer") tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
peer_api.get_assessment_median_scores(tim["uuid"]) peer_api.get_assessment_median_scores(tim["uuid"])
@patch.object(Assessment.objects, 'filter') @patch('openassessment.assessment.models.Assessment.objects.filter')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_get_assessments_db_error(self, mock_filter): def test_get_assessments_db_error(self, mock_filter):
tim, _ = self._create_student_and_submission("Tim", "Tim's answer") tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
peer_api.get_assessments(tim["uuid"]) peer_api.get_assessments(tim["uuid"])
@patch.object(PeerWorkflow.objects, 'get_or_create') @patch('openassessment.assessment.models.peer.PeerWorkflow.objects.get_or_create')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_error_on_assessment_creation(self, mock_filter): def test_error_on_assessment_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
...@@ -1274,7 +1273,7 @@ class TestPeerApi(CacheResetTest): ...@@ -1274,7 +1273,7 @@ class TestPeerApi(CacheResetTest):
MONDAY, MONDAY,
) )
@patch.object(Assessment.objects, 'filter') @patch('openassessment.assessment.models.Assessment.objects.filter')
@raises(peer_api.PeerAssessmentInternalError) @raises(peer_api.PeerAssessmentInternalError)
def test_error_on_get_assessment(self, mock_filter): def test_error_on_get_assessment(self, mock_filter):
self._create_student_and_submission("Tim", "Tim's answer") self._create_student_and_submission("Tim", "Tim's answer")
...@@ -1529,7 +1528,7 @@ class TestPeerApi(CacheResetTest): ...@@ -1529,7 +1528,7 @@ class TestPeerApi(CacheResetTest):
submission, student = self._create_student_and_submission("Jim", "Jim's answer") submission, student = self._create_student_and_submission("Jim", "Jim's answer")
peer_api.get_submission_to_assess(submission['uuid'], 1) peer_api.get_submission_to_assess(submission['uuid'], 1)
with patch.object(PeerWorkflow.objects, 'get') as mock_call: with patch('openassessment.assessment.models.peer.PeerWorkflow.objects.get') as mock_call:
mock_call.side_effect = DatabaseError("Kaboom!") mock_call.side_effect = DatabaseError("Kaboom!")
peer_api.create_assessment( peer_api.create_assessment(
submission['uuid'], submission['uuid'],
......
...@@ -4,11 +4,10 @@ Tests for assessment models. ...@@ -4,11 +4,10 @@ Tests for assessment models.
""" """
import copy import copy
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import ( from openassessment.assessment.models import Criterion, CriterionOption, InvalidRubricSelection, Rubric
Rubric, Criterion, CriterionOption, InvalidRubricSelection
)
from openassessment.assessment.test.constants import RUBRIC from openassessment.assessment.test.constants import RUBRIC
from openassessment.test_utils import CacheResetTest
class RubricIndexTest(CacheResetTest): class RubricIndexTest(CacheResetTest):
......
...@@ -5,14 +5,13 @@ Tests for self-assessment API. ...@@ -5,14 +5,13 @@ Tests for self-assessment API.
import copy import copy
import datetime import datetime
from mock import patch
import pytz import pytz
from django.db import DatabaseError from django.db import DatabaseError
from mock import patch
from openassessment.assessment.api.self import ( from openassessment.assessment.api.self import create_assessment, get_assessment, submitter_is_finished
create_assessment, submitter_is_finished, get_assessment
)
from openassessment.assessment.errors import SelfAssessmentInternalError, SelfAssessmentRequestError from openassessment.assessment.errors import SelfAssessmentInternalError, SelfAssessmentRequestError
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from submissions.api import create_submission from submissions.api import create_submission
...@@ -103,7 +102,7 @@ class TestSelfApi(CacheResetTest): ...@@ -103,7 +102,7 @@ class TestSelfApi(CacheResetTest):
# Attempt to create a self-assessment for a submission that doesn't exist # Attempt to create a self-assessment for a submission that doesn't exist
with self.assertRaises(SelfAssessmentRequestError): with self.assertRaises(SelfAssessmentRequestError):
create_assessment( create_assessment(
'invalid_submission_uuid', u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗', 'deadbeef-1234-5678-9100-1234deadbeef', u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC, self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc) scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
) )
...@@ -115,7 +114,7 @@ class TestSelfApi(CacheResetTest): ...@@ -115,7 +114,7 @@ class TestSelfApi(CacheResetTest):
# Attempt to create a self-assessment for the submission from a different user # Attempt to create a self-assessment for the submission from a different user
with self.assertRaises(SelfAssessmentRequestError): with self.assertRaises(SelfAssessmentRequestError):
create_assessment( create_assessment(
'invalid_submission_uuid', u'another user', 'deadbeef-1234-5678-9100-1234deadbeef', u'another user',
self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC, self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc) scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
) )
......
...@@ -3,18 +3,15 @@ ...@@ -3,18 +3,15 @@
Tests for assessment serializers. Tests for assessment serializers.
""" """
import copy
import json import json
import os.path import os.path
import copy
from openassessment.assessment.models import Assessment, AssessmentFeedback, AssessmentPart
from openassessment.assessment.serializers import (AssessmentFeedbackSerializer, InvalidRubric, full_assessment_dict,
rubric_from_dict)
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import (
Assessment, AssessmentPart, AssessmentFeedback
)
from openassessment.assessment.serializers import (
rubric_from_dict, full_assessment_dict,
AssessmentFeedbackSerializer, InvalidRubric
)
from .constants import RUBRIC from .constants import RUBRIC
......
...@@ -3,30 +3,26 @@ ...@@ -3,30 +3,26 @@
Tests for staff assessments. Tests for staff assessments.
""" """
import copy import copy
import mock
from datetime import timedelta from datetime import timedelta
from ddt import data, ddt, unpack
import mock
from django.db import DatabaseError from django.db import DatabaseError
from django.test.utils import override_settings
from django.utils.timezone import now from django.utils.timezone import now
from ddt import ddt, data, unpack
from .constants import OPTIONS_SELECTED_DICT, RUBRIC, RUBRIC_OPTIONS, RUBRIC_POSSIBLE_POINTS, STUDENT_ITEM from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.test.test_ai import ( from openassessment.assessment.api import staff as staff_api
ALGORITHM_ID,
AI_ALGORITHMS,
AIGradingTest,
train_classifiers
)
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.api import staff as staff_api, ai as ai_api, peer as peer_api
from openassessment.assessment.api.self import create_assessment as self_assess
from openassessment.assessment.api.peer import create_assessment as peer_assess from openassessment.assessment.api.peer import create_assessment as peer_assess
from openassessment.assessment.models import Assessment, StaffWorkflow from openassessment.assessment.api.self import create_assessment as self_assess
from openassessment.assessment.errors import StaffAssessmentRequestError, StaffAssessmentInternalError from openassessment.assessment.errors import StaffAssessmentInternalError, StaffAssessmentRequestError
from openassessment.assessment.models import StaffWorkflow
from openassessment.test_utils import CacheResetTest
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from submissions import api as sub_api from submissions import api as sub_api
from .constants import OPTIONS_SELECTED_DICT, RUBRIC, RUBRIC_OPTIONS, RUBRIC_POSSIBLE_POINTS, STUDENT_ITEM
@ddt @ddt
class TestStaffAssessment(CacheResetTest): class TestStaffAssessment(CacheResetTest):
...@@ -41,17 +37,6 @@ class TestStaffAssessment(CacheResetTest): ...@@ -41,17 +37,6 @@ class TestStaffAssessment(CacheResetTest):
ASSESSMENT_SCORES_DDT = [key for key in OPTIONS_SELECTED_DICT] ASSESSMENT_SCORES_DDT = [key for key in OPTIONS_SELECTED_DICT]
@staticmethod @staticmethod
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def _ai_assess(sub):
"""
Helper to fulfill ai assessment requirements.
"""
# Note that CLASSIFIER_SCORE_OVERRIDES matches OPTIONS_SELECTED_DICT['most'] scores
train_classifiers(RUBRIC, AIGradingTest.CLASSIFIER_SCORE_OVERRIDES)
ai_api.on_init(sub, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
return ai_api.get_latest_assessment(sub)
@staticmethod
def _peer_assess(scores): def _peer_assess(scores):
""" """
Helper to fulfill peer assessment requirements. Helper to fulfill peer assessment requirements.
...@@ -67,7 +52,6 @@ class TestStaffAssessment(CacheResetTest): ...@@ -67,7 +52,6 @@ class TestStaffAssessment(CacheResetTest):
'staff', 'staff',
lambda sub, scorer_id, scores: staff_api.create_assessment(sub, scorer_id, scores, dict(), "", RUBRIC) lambda sub, scorer_id, scores: staff_api.create_assessment(sub, scorer_id, scores, dict(), "", RUBRIC)
), ),
('ai', lambda sub, scorer_id, scores: TestStaffAssessment._ai_assess(sub))
] ]
def _verify_done_state(self, uuid, requirements, expect_done=True): def _verify_done_state(self, uuid, requirements, expect_done=True):
...@@ -377,7 +361,7 @@ class TestStaffAssessment(CacheResetTest): ...@@ -377,7 +361,7 @@ class TestStaffAssessment(CacheResetTest):
) )
self.assertEqual(str(context_manager.exception), u"Invalid options were selected in the rubric.") self.assertEqual(str(context_manager.exception), u"Invalid options were selected in the rubric.")
@mock.patch.object(Assessment.objects, 'filter') @mock.patch('openassessment.assessment.models.Assessment.objects.filter')
def test_database_filter_error_handling(self, mock_filter): def test_database_filter_error_handling(self, mock_filter):
# Create a submission # Create a submission
tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer") tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")
...@@ -403,7 +387,7 @@ class TestStaffAssessment(CacheResetTest): ...@@ -403,7 +387,7 @@ class TestStaffAssessment(CacheResetTest):
u"Error getting staff assessment scores for {}".format(tim_sub["uuid"]) u"Error getting staff assessment scores for {}".format(tim_sub["uuid"])
) )
@mock.patch.object(Assessment, 'create') @mock.patch('openassessment.assessment.models.Assessment.create')
def test_database_create_error_handling(self, mock_create): def test_database_create_error_handling(self, mock_create):
mock_create.side_effect = DatabaseError("KABOOM!") mock_create.side_effect = DatabaseError("KABOOM!")
...@@ -531,7 +515,5 @@ class TestStaffAssessment(CacheResetTest): ...@@ -531,7 +515,5 @@ class TestStaffAssessment(CacheResetTest):
steps = problem_steps steps = problem_steps
if 'peer' in steps: if 'peer' in steps:
peer_api.on_start(submission["uuid"]) peer_api.on_start(submission["uuid"])
if 'ai' in steps:
init_params['ai'] = {'rubric': RUBRIC, 'algorithm_id': ALGORITHM_ID}
workflow_api.create_workflow(submission["uuid"], steps, init_params) workflow_api.create_workflow(submission["uuid"], steps, init_params)
return submission, new_student_item return submission, new_student_item
...@@ -3,15 +3,18 @@ ...@@ -3,15 +3,18 @@
Tests for training assessment type. Tests for training assessment type.
""" """
import copy import copy
from django.db import DatabaseError
import ddt import ddt
from mock import patch from mock import patch
from django.db import DatabaseError
from openassessment.assessment.api import student_training as training_api
from openassessment.assessment.errors import StudentTrainingInternalError, StudentTrainingRequestError
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from .constants import STUDENT_ITEM, ANSWER, RUBRIC, EXAMPLES
from submissions import api as sub_api from submissions import api as sub_api
from openassessment.assessment.api import student_training as training_api
from openassessment.assessment.errors import StudentTrainingRequestError, StudentTrainingInternalError from .constants import ANSWER, EXAMPLES, RUBRIC, STUDENT_ITEM
from openassessment.assessment.models import StudentTrainingWorkflow
@ddt.ddt @ddt.ddt
...@@ -210,13 +213,13 @@ class StudentTrainingAssessmentTest(CacheResetTest): ...@@ -210,13 +213,13 @@ class StudentTrainingAssessmentTest(CacheResetTest):
with self.assertRaises(StudentTrainingRequestError): with self.assertRaises(StudentTrainingRequestError):
training_api.get_training_example("no_such_submission", RUBRIC, EXAMPLES) training_api.get_training_example("no_such_submission", RUBRIC, EXAMPLES)
@patch.object(StudentTrainingWorkflow.objects, 'get') @patch('openassessment.assessment.models.student_training.StudentTrainingWorkflow.objects.get')
def test_get_num_completed_database_error(self, mock_db): def test_get_num_completed_database_error(self, mock_db):
mock_db.side_effect = DatabaseError("Kaboom!") mock_db.side_effect = DatabaseError("Kaboom!")
with self.assertRaises(StudentTrainingInternalError): with self.assertRaises(StudentTrainingInternalError):
training_api.get_num_completed(self.submission_uuid) training_api.get_num_completed(self.submission_uuid)
@patch.object(StudentTrainingWorkflow.objects, 'get') @patch('openassessment.assessment.models.student_training.StudentTrainingWorkflow.objects.get')
def test_get_training_example_database_error(self, mock_db): def test_get_training_example_database_error(self, mock_db):
mock_db.side_effect = DatabaseError("Kaboom!") mock_db.side_effect = DatabaseError("Kaboom!")
with self.assertRaises(StudentTrainingInternalError): with self.assertRaises(StudentTrainingInternalError):
...@@ -224,7 +227,7 @@ class StudentTrainingAssessmentTest(CacheResetTest): ...@@ -224,7 +227,7 @@ class StudentTrainingAssessmentTest(CacheResetTest):
def test_assess_training_example_database_error(self): def test_assess_training_example_database_error(self):
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
with patch.object(StudentTrainingWorkflow.objects, 'get') as mock_db: with patch('openassessment.assessment.models.student_training.StudentTrainingWorkflow.objects.get') as mock_db:
mock_db.side_effect = DatabaseError("Kaboom!") mock_db.side_effect = DatabaseError("Kaboom!")
with self.assertRaises(StudentTrainingInternalError): with self.assertRaises(StudentTrainingInternalError):
training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected']) training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])
......
...@@ -2,13 +2,14 @@ ...@@ -2,13 +2,14 @@
Tests for student training models. Tests for student training models.
""" """
import mock import mock
from django.db import IntegrityError from django.db import IntegrityError
from submissions import api as sub_api
from openassessment.assessment.models import StudentTrainingWorkflow, StudentTrainingWorkflowItem
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import ( from submissions import api as sub_api
StudentTrainingWorkflow, StudentTrainingWorkflowItem
) from .constants import ANSWER, EXAMPLES, STUDENT_ITEM
from .constants import STUDENT_ITEM, ANSWER, EXAMPLES
class StudentTrainingWorkflowTest(CacheResetTest): class StudentTrainingWorkflowTest(CacheResetTest):
...@@ -16,8 +17,8 @@ class StudentTrainingWorkflowTest(CacheResetTest): ...@@ -16,8 +17,8 @@ class StudentTrainingWorkflowTest(CacheResetTest):
Tests for the student training workflow model. Tests for the student training workflow model.
""" """
@mock.patch.object(StudentTrainingWorkflow.objects, 'get') @mock.patch('openassessment.assessment.models.student_training.StudentTrainingWorkflow.objects.get')
@mock.patch.object(StudentTrainingWorkflow.objects, 'get_or_create') @mock.patch('openassessment.assessment.models.student_training.StudentTrainingWorkflow.objects.get_or_create')
def test_create_workflow_integrity_error(self, mock_create, mock_get): def test_create_workflow_integrity_error(self, mock_create, mock_get):
# Simulate a race condition in which someone creates a workflow # Simulate a race condition in which someone creates a workflow
# after we check if it exists. This will violate the database uniqueness # after we check if it exists. This will violate the database uniqueness
...@@ -37,7 +38,7 @@ class StudentTrainingWorkflowTest(CacheResetTest): ...@@ -37,7 +38,7 @@ class StudentTrainingWorkflowTest(CacheResetTest):
workflow = StudentTrainingWorkflow.get_workflow(submission['uuid']) workflow = StudentTrainingWorkflow.get_workflow(submission['uuid'])
self.assertEqual(workflow, mock_workflow) self.assertEqual(workflow, mock_workflow)
@mock.patch.object(StudentTrainingWorkflowItem.objects, 'create') @mock.patch('openassessment.assessment.models.student_training.StudentTrainingWorkflowItem.objects.create')
def test_create_workflow_item_integrity_error(self, mock_create): def test_create_workflow_item_integrity_error(self, mock_create):
# Create a submission and workflow # Create a submission and workflow
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
......
...@@ -2,12 +2,16 @@ ...@@ -2,12 +2,16 @@
""" """
Tests for training models and serializers (common to student and AI training). Tests for training models and serializers (common to student and AI training).
""" """
from collections import OrderedDict
import copy import copy
import mock import mock
from django.db import IntegrityError from django.db import IntegrityError
from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import TrainingExample from openassessment.assessment.models import TrainingExample
from openassessment.assessment.serializers import deserialize_training_examples, serialize_training_example from openassessment.assessment.serializers import deserialize_training_examples, serialize_training_example
from openassessment.test_utils import CacheResetTest
class TrainingExampleSerializerTest(CacheResetTest): class TrainingExampleSerializerTest(CacheResetTest):
...@@ -63,17 +67,17 @@ class TrainingExampleSerializerTest(CacheResetTest): ...@@ -63,17 +67,17 @@ class TrainingExampleSerializerTest(CacheResetTest):
u" 𝖜𝖍𝖊𝖓 𝖆 𝖒𝖆𝖓 𝖙𝖆𝖐𝖊𝖘 𝖙𝖍𝖎𝖘 𝖜𝖍𝖔𝖑𝖊 𝖚𝖓𝖎𝖛𝖊𝖗𝖘𝖊 𝖋𝖔𝖗 𝖆 𝖛𝖆𝖘𝖙 𝖕𝖗𝖆𝖈𝖙𝖎𝖈𝖆𝖑 𝖏𝖔𝖐𝖊, 𝖙𝖍𝖔𝖚𝖌𝖍 𝖙𝖍𝖊 𝖜𝖎𝖙 𝖙𝖍𝖊𝖗𝖊𝖔𝖋" u" 𝖜𝖍𝖊𝖓 𝖆 𝖒𝖆𝖓 𝖙𝖆𝖐𝖊𝖘 𝖙𝖍𝖎𝖘 𝖜𝖍𝖔𝖑𝖊 𝖚𝖓𝖎𝖛𝖊𝖗𝖘𝖊 𝖋𝖔𝖗 𝖆 𝖛𝖆𝖘𝖙 𝖕𝖗𝖆𝖈𝖙𝖎𝖈𝖆𝖑 𝖏𝖔𝖐𝖊, 𝖙𝖍𝖔𝖚𝖌𝖍 𝖙𝖍𝖊 𝖜𝖎𝖙 𝖙𝖍𝖊𝖗𝖊𝖔𝖋"
u" 𝖍𝖊 𝖇𝖚𝖙 𝖉𝖎𝖒𝖑𝖞 𝖉𝖎𝖘𝖈𝖊𝖗𝖓𝖘, 𝖆𝖓𝖉 𝖒𝖔𝖗𝖊 𝖙𝖍𝖆𝖓 𝖘𝖚𝖘𝖕𝖊𝖈𝖙𝖘 𝖙𝖍𝖆𝖙 𝖙𝖍𝖊 𝖏𝖔𝖐𝖊 𝖎𝖘 𝖆𝖙 𝖓𝖔𝖇𝖔𝖉𝖞'𝖘 𝖊𝖝𝖕𝖊𝖓𝖘𝖊 𝖇𝖚𝖙 𝖍𝖎𝖘 𝖔𝖜𝖓." u" 𝖍𝖊 𝖇𝖚𝖙 𝖉𝖎𝖒𝖑𝖞 𝖉𝖎𝖘𝖈𝖊𝖗𝖓𝖘, 𝖆𝖓𝖉 𝖒𝖔𝖗𝖊 𝖙𝖍𝖆𝖓 𝖘𝖚𝖘𝖕𝖊𝖈𝖙𝖘 𝖙𝖍𝖆𝖙 𝖙𝖍𝖊 𝖏𝖔𝖐𝖊 𝖎𝖘 𝖆𝖙 𝖓𝖔𝖇𝖔𝖉𝖞'𝖘 𝖊𝖝𝖕𝖊𝖓𝖘𝖊 𝖇𝖚𝖙 𝖍𝖎𝖘 𝖔𝖜𝖓."
), ),
'options_selected': { 'options_selected': OrderedDict({
u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭", u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
u"ﻭɼค๓๓คɼ": u"𝒑𝒐𝒐𝒓", u"ﻭɼค๓๓คɼ": u"𝒑𝒐𝒐𝒓",
} })
}, },
{ {
'answer': u"Tőṕ-héávӳ ẃáś thé śhíṕ áś á díńńéŕĺéśś śtúdéńt ẃíth áĺĺ Áŕíśtőtĺé íń híś héád.", 'answer': u"Tőṕ-héávӳ ẃáś thé śhíṕ áś á díńńéŕĺéśś śtúdéńt ẃíth áĺĺ Áŕíśtőtĺé íń híś héád.",
'options_selected': { 'options_selected': OrderedDict({
u"vøȼȺƀᵾłȺɍɏ": u"𝒑𝒐𝒐𝒓", u"vøȼȺƀᵾłȺɍɏ": u"𝒑𝒐𝒐𝒓",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт", u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
} })
}, },
{ {
'answer': ( 'answer': (
...@@ -82,10 +86,10 @@ class TrainingExampleSerializerTest(CacheResetTest): ...@@ -82,10 +86,10 @@ class TrainingExampleSerializerTest(CacheResetTest):
u"azure..... Consider all this; and then turn to this green, gentle, and most docile earth; " u"azure..... Consider all this; and then turn to this green, gentle, and most docile earth; "
u"consider them both, the sea and the land; and do you not find a strange analogy to something in yourself?" u"consider them both, the sea and the land; and do you not find a strange analogy to something in yourself?"
), ),
'options_selected': { 'options_selected': OrderedDict({
u"vøȼȺƀᵾłȺɍɏ": u"𝒑𝒐𝒐𝒓", u"vøȼȺƀᵾłȺɍɏ": u"𝒑𝒐𝒐𝒓",
u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт", u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
} })
}, },
] ]
...@@ -156,8 +160,8 @@ class TrainingExampleSerializerTest(CacheResetTest): ...@@ -156,8 +160,8 @@ class TrainingExampleSerializerTest(CacheResetTest):
for example in (first_examples + second_examples): for example in (first_examples + second_examples):
self.assertIn(example, db_examples) self.assertIn(example, db_examples)
@mock.patch.object(TrainingExample.objects, 'get') @mock.patch('openassessment.assessment.models.TrainingExample.objects.get')
@mock.patch.object(TrainingExample, 'create_example') @mock.patch('openassessment.assessment.models.TrainingExample.create_example')
def test_deserialize_integrity_error(self, mock_create, mock_get): def test_deserialize_integrity_error(self, mock_create, mock_get):
# Simulate an integrity error when creating the training example # Simulate an integrity error when creating the training example
# This can occur when using repeatable-read isolation mode. # This can occur when using repeatable-read isolation mode.
......
from django.conf.urls import patterns, url from django.conf.urls import url
urlpatterns = patterns( from openassessment.assessment import views
'openassessment.assessment.views',
urlpatterns = [
url( url(
r'^(?P<student_id>[^/]+)/(?P<course_id>[^/]+)/(?P<item_id>[^/]+)$', r'^(?P<student_id>[^/]+)/(?P<course_id>[^/]+)/(?P<item_id>[^/]+)$',
'get_evaluations_for_student_item' views.get_evaluations_for_student_item
), ),
) ]
import logging import logging
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response from django.shortcuts import render_to_response
from openassessment.assessment.api.peer import get_assessments from openassessment.assessment.api.peer import get_assessments
from submissions.api import SubmissionRequestError, get_submissions from submissions.api import SubmissionRequestError, get_submissions
......
""" """
Aggregate data for openassessment. Aggregate data for openassessment.
""" """
from collections import defaultdict
import csv import csv
import json import json
from collections import defaultdict
from django.conf import settings from django.conf import settings
from submissions import api as sub_api
from openassessment.assessment.models import Assessment, AssessmentFeedback, AssessmentPart
from openassessment.workflow.models import AssessmentWorkflow from openassessment.workflow.models import AssessmentWorkflow
from openassessment.assessment.models import Assessment, AssessmentPart, AssessmentFeedback from submissions import api as sub_api
class CsvWriter(object): class CsvWriter(object):
......
...@@ -2,8 +2,7 @@ import abc ...@@ -2,8 +2,7 @@ import abc
from django.conf import settings from django.conf import settings
from ..exceptions import FileUploadInternalError from ..exceptions import FileUploadInternalError, FileUploadRequestError
from ..exceptions import FileUploadRequestError
class Settings(object): class Settings(object):
......
import os import os
from .base import BaseBackend
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse_lazy
from .base import BaseBackend
class Backend(BaseBackend): class Backend(BaseBackend):
...@@ -14,7 +15,7 @@ class Backend(BaseBackend): ...@@ -14,7 +15,7 @@ class Backend(BaseBackend):
""" """
Return the URL pointing to the ORA2 django storage upload endpoint. Return the URL pointing to the ORA2 django storage upload endpoint.
""" """
return reverse("openassessment-django-storage", kwargs={'key': key}) return reverse_lazy("openassessment-django-storage", kwargs={'key': key})
def get_download_url(self, key): def get_download_url(self, key):
""" """
......
from .base import BaseBackend
from .. import exceptions
from django.conf import settings from django.conf import settings
import django.core.cache import django.core.cache
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse_lazy
from django.utils.encoding import smart_text from django.utils.encoding import smart_text
from .. import exceptions
from .base import BaseBackend
class Backend(BaseBackend): class Backend(BaseBackend):
""" """
...@@ -47,7 +47,7 @@ class Backend(BaseBackend): ...@@ -47,7 +47,7 @@ class Backend(BaseBackend):
def _get_url(self, key): def _get_url(self, key):
key_name = self._get_key_name(key) key_name = self._get_key_name(key)
url = reverse("openassessment-filesystem-storage", kwargs={'key': key_name}) url = reverse_lazy("openassessment-filesystem-storage", kwargs={'key': key_name})
return url return url
......
import boto
import logging import logging
import boto3
from django.conf import settings from django.conf import settings
from .base import BaseBackend
from ..exceptions import FileUploadInternalError from ..exceptions import FileUploadInternalError
from .base import BaseBackend
logger = logging.getLogger("openassessment.fileupload.api") logger = logging.getLogger("openassessment.fileupload.api")
...@@ -12,15 +15,16 @@ class Backend(BaseBackend): ...@@ -12,15 +15,16 @@ class Backend(BaseBackend):
def get_upload_url(self, key, content_type): def get_upload_url(self, key, content_type):
bucket_name, key_name = self._retrieve_parameters(key) bucket_name, key_name = self._retrieve_parameters(key)
try: try:
conn = _connect_to_s3() client = _connect_to_s3()
upload_url = conn.generate_url( return client.generate_presigned_url(
expires_in=self.UPLOAD_URL_TIMEOUT, ExpiresIn=self.UPLOAD_URL_TIMEOUT,
method='PUT', ClientMethod='put_object',
bucket=bucket_name, Params={
key=key_name, 'Bucket': bucket_name,
headers={'Content-Length': '5242880', 'Content-Type': content_type} 'Key': key_name
},
HttpMethod="PUT"
) )
return upload_url
except Exception as ex: except Exception as ex:
logger.exception( logger.exception(
u"An internal exception occurred while generating an upload URL." u"An internal exception occurred while generating an upload URL."
...@@ -30,10 +34,16 @@ class Backend(BaseBackend): ...@@ -30,10 +34,16 @@ class Backend(BaseBackend):
def get_download_url(self, key): def get_download_url(self, key):
bucket_name, key_name = self._retrieve_parameters(key) bucket_name, key_name = self._retrieve_parameters(key)
try: try:
conn = _connect_to_s3() client = _connect_to_s3()
bucket = conn.get_bucket(bucket_name) return client.generate_presigned_url(
s3_key = bucket.get_key(key_name) ExpiresIn=self.DOWNLOAD_URL_TIMEOUT,
return s3_key.generate_url(expires_in=self.DOWNLOAD_URL_TIMEOUT) if s3_key else "" ClientMethod='get_object',
Params={
'Bucket': bucket_name,
'Key': key_name
},
HttpMethod="GET"
)
except Exception as ex: except Exception as ex:
logger.exception( logger.exception(
u"An internal exception occurred while generating a download URL." u"An internal exception occurred while generating a download URL."
...@@ -42,15 +52,16 @@ class Backend(BaseBackend): ...@@ -42,15 +52,16 @@ class Backend(BaseBackend):
def remove_file(self, key): def remove_file(self, key):
bucket_name, key_name = self._retrieve_parameters(key) bucket_name, key_name = self._retrieve_parameters(key)
conn = _connect_to_s3() client = _connect_to_s3()
bucket = conn.get_bucket(bucket_name) resp = client.delete_objects(
s3_key = bucket.get_key(key_name) Bucket=bucket_name,
Delete={
if s3_key: 'Objects': [{'Key':key_name}]
bucket.delete_key(s3_key) }
)
if 'Deleted' in resp and any(key_name == deleted_dict['Key'] for deleted_dict in resp['Deleted']):
return True return True
else: return False
return False
def _connect_to_s3(): def _connect_to_s3():
...@@ -65,7 +76,8 @@ def _connect_to_s3(): ...@@ -65,7 +76,8 @@ def _connect_to_s3():
aws_access_key_id = getattr(settings, 'AWS_ACCESS_KEY_ID', None) aws_access_key_id = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
aws_secret_access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None) aws_secret_access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
return boto.connect_s3( return boto3.client(
's3',
aws_access_key_id=aws_access_key_id, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key aws_secret_access_key=aws_secret_access_key
) )
...@@ -12,15 +12,21 @@ ORA2_SWIFT_KEY should correspond to Meta Temp-Url-Key configure in swift. Run ...@@ -12,15 +12,21 @@ ORA2_SWIFT_KEY should correspond to Meta Temp-Url-Key configure in swift. Run
''' '''
import logging import logging
from django.conf import settings
import swiftclient
import urlparse import urlparse
import requests import requests
import swiftclient
from django.conf import settings
from .base import BaseBackend
from ..exceptions import FileUploadInternalError from ..exceptions import FileUploadInternalError
from .base import BaseBackend
logger = logging.getLogger("openassessment.fileupload.api") logger = logging.getLogger("openassessment.fileupload.api")
# prefix paths with current version, in case we need to roll it at some point
SWIFT_BACKEND_VERSION = 1
class Backend(BaseBackend): class Backend(BaseBackend):
""" """
...@@ -32,10 +38,11 @@ class Backend(BaseBackend): ...@@ -32,10 +38,11 @@ class Backend(BaseBackend):
key, url = get_settings() key, url = get_settings()
try: try:
temp_url = swiftclient.utils.generate_temp_url( temp_url = swiftclient.utils.generate_temp_url(
path='%s/%s/%s' % (url.path, bucket_name, key_name), path='/v%s%s/%s/%s' % (SWIFT_BACKEND_VERSION, url.path, bucket_name, key_name),
key=key, key=key,
method='PUT', method='PUT',
seconds=self.UPLOAD_URL_TIMEOUT) seconds=self.UPLOAD_URL_TIMEOUT
)
return '%s://%s%s' % (url.scheme, url.netloc, temp_url) return '%s://%s%s' % (url.scheme, url.netloc, temp_url)
except Exception as ex: except Exception as ex:
logger.exception( logger.exception(
...@@ -48,10 +55,11 @@ class Backend(BaseBackend): ...@@ -48,10 +55,11 @@ class Backend(BaseBackend):
key, url = get_settings() key, url = get_settings()
try: try:
temp_url = swiftclient.utils.generate_temp_url( temp_url = swiftclient.utils.generate_temp_url(
path='%s/%s/%s' % (url.path, bucket_name, key_name), path='/v%s%s/%s/%s' % (SWIFT_BACKEND_VERSION, url.path, bucket_name, key_name),
key=key, key=key,
method='GET', method='GET',
seconds=self.DOWNLOAD_URL_TIMEOUT) seconds=self.DOWNLOAD_URL_TIMEOUT
)
download_url = '%s://%s%s' % (url.scheme, url.netloc, temp_url) download_url = '%s://%s%s' % (url.scheme, url.netloc, temp_url)
response = requests.get(download_url) response = requests.get(download_url)
return download_url if response.status_code == 200 else "" return download_url if response.status_code == 200 else ""
......
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import boto
from boto.s3.key import Key
import ddt
import json import json
from mock import patch, Mock
import os import os
import shutil import shutil
import tempfile import tempfile
import urllib import urllib
from urlparse import urlparse from urlparse import urlparse
import boto3
import ddt
from mock import Mock, patch
from moto import mock_s3
from nose.tools import raises
from django.conf import settings from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse_lazy
from django.test import TestCase from django.test import TestCase
from django.test.utils import override_settings from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from moto import mock_s3
from mock import patch
from nose.tools import raises
from openassessment.fileupload import api from openassessment.fileupload import api, exceptions, urls
from openassessment.fileupload import exceptions
from openassessment.fileupload import views_filesystem as views from openassessment.fileupload import views_filesystem as views
from openassessment.fileupload.backends.base import Settings as FileUploadSettings from openassessment.fileupload.backends.base import Settings as FileUploadSettings
from openassessment.fileupload.backends.filesystem import get_cache as get_filesystem_cache from openassessment.fileupload.backends.filesystem import get_cache as get_filesystem_cache
...@@ -39,8 +35,8 @@ class TestFileUploadService(TestCase): ...@@ -39,8 +35,8 @@ class TestFileUploadService(TestCase):
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket" FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket"
) )
def test_get_upload_url(self): def test_get_upload_url(self):
conn = boto.connect_s3() s3 = boto3.resource('s3')
conn.create_bucket('mybucket') s3.create_bucket(Bucket='mybucket')
uploadUrl = api.get_upload_url("foo", "bar") uploadUrl = api.get_upload_url("foo", "bar")
self.assertIn("https://mybucket.s3.amazonaws.com/submissions_attachments/foo", uploadUrl) self.assertIn("https://mybucket.s3.amazonaws.com/submissions_attachments/foo", uploadUrl)
...@@ -51,11 +47,9 @@ class TestFileUploadService(TestCase): ...@@ -51,11 +47,9 @@ class TestFileUploadService(TestCase):
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket" FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket"
) )
def test_get_download_url(self): def test_get_download_url(self):
conn = boto.connect_s3() s3 = boto3.resource('s3')
bucket = conn.create_bucket('mybucket') s3.create_bucket(Bucket='mybucket')
key = Key(bucket) s3.Object('mybucket', 'submissions_attachments/foo').put(Body="How d'ya do?")
key.key = "submissions_attachments/foo"
key.set_contents_from_string("How d'ya do?")
downloadUrl = api.get_download_url("foo") downloadUrl = api.get_download_url("foo")
self.assertIn("https://mybucket.s3.amazonaws.com/submissions_attachments/foo", downloadUrl) self.assertIn("https://mybucket.s3.amazonaws.com/submissions_attachments/foo", downloadUrl)
...@@ -66,11 +60,9 @@ class TestFileUploadService(TestCase): ...@@ -66,11 +60,9 @@ class TestFileUploadService(TestCase):
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket" FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket"
) )
def test_remove_file(self): def test_remove_file(self):
conn = boto.connect_s3() s3 = boto3.resource('s3')
bucket = conn.create_bucket('mybucket') s3.create_bucket(Bucket='mybucket')
key = Key(bucket) s3.Object('mybucket', 'submissions_attachments/foo').put(Body="Test")
key.key = "submissions_attachments/foo"
key.set_contents_from_string("Test")
result = api.remove_file("foo") result = api.remove_file("foo")
self.assertTrue(result) self.assertTrue(result)
result = api.remove_file("foo") result = api.remove_file("foo")
...@@ -90,7 +82,7 @@ class TestFileUploadService(TestCase): ...@@ -90,7 +82,7 @@ class TestFileUploadService(TestCase):
AWS_SECRET_ACCESS_KEY='bizbaz', AWS_SECRET_ACCESS_KEY='bizbaz',
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket" FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket"
) )
@patch.object(boto, 'connect_s3') @patch.object(boto3, 'client')
@raises(exceptions.FileUploadInternalError) @raises(exceptions.FileUploadInternalError)
def test_get_upload_url_error(self, mock_s3): def test_get_upload_url_error(self, mock_s3):
mock_s3.side_effect = Exception("Oh noes") mock_s3.side_effect = Exception("Oh noes")
...@@ -102,7 +94,7 @@ class TestFileUploadService(TestCase): ...@@ -102,7 +94,7 @@ class TestFileUploadService(TestCase):
AWS_SECRET_ACCESS_KEY='bizbaz', AWS_SECRET_ACCESS_KEY='bizbaz',
FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket" FILE_UPLOAD_STORAGE_BUCKET_NAME="mybucket"
) )
@patch.object(boto, 'connect_s3') @patch.object(boto3, 'client')
@raises(exceptions.FileUploadInternalError, mock_s3) @raises(exceptions.FileUploadInternalError, mock_s3)
def test_get_download_url_error(self, mock_s3): def test_get_download_url_error(self, mock_s3):
mock_s3.side_effect = Exception("Oh noes") mock_s3.side_effect = Exception("Oh noes")
...@@ -272,7 +264,7 @@ class TestFileUploadServiceWithFilesystemBackend(TestCase): ...@@ -272,7 +264,7 @@ class TestFileUploadServiceWithFilesystemBackend(TestCase):
self.assertEqual('application/octet-stream', download_response["Content-Type"]) self.assertEqual('application/octet-stream', download_response["Content-Type"])
def test_upload_with_unauthorized_key(self): def test_upload_with_unauthorized_key(self):
upload_url = reverse("openassessment-filesystem-storage", kwargs={'key': self.key_name}) upload_url = reverse_lazy("openassessment-filesystem-storage", kwargs={'key': self.key_name})
cache_before_request = get_filesystem_cache().get(self.key_name) cache_before_request = get_filesystem_cache().get(self.key_name)
upload_response = self.client.put(upload_url, data=self.content.read(), content_type=self.content_type) upload_response = self.client.put(upload_url, data=self.content.read(), content_type=self.content_type)
...@@ -282,7 +274,7 @@ class TestFileUploadServiceWithFilesystemBackend(TestCase): ...@@ -282,7 +274,7 @@ class TestFileUploadServiceWithFilesystemBackend(TestCase):
self.assertIsNone(cache_after_request) self.assertIsNone(cache_after_request)
def test_download_url_with_unauthorized_key(self): def test_download_url_with_unauthorized_key(self):
download_url = reverse("openassessment-filesystem-storage", kwargs={'key': self.key_name}) download_url = reverse_lazy("openassessment-filesystem-storage", kwargs={'key': self.key_name})
views.save_to_file(self.key_name, "uploaded content") views.save_to_file(self.key_name, "uploaded content")
download_response = self.client.get(download_url) download_response = self.client.get(download_url)
...@@ -327,7 +319,7 @@ class TestSwiftBackend(TestCase): ...@@ -327,7 +319,7 @@ class TestSwiftBackend(TestCase):
result = urlparse(url) result = urlparse(url)
self.assertEqual(result.scheme, u'http') self.assertEqual(result.scheme, u'http')
self.assertEqual(result.netloc, u'www.example.com:12345') self.assertEqual(result.netloc, u'www.example.com:12345')
self.assertEqual(result.path, u'/bucket_name/submissions_attachments/foo') self.assertEqual(result.path, u'/v1/bucket_name/submissions_attachments/foo')
self.assertIn(result.params, 'temp_url_sig=') self.assertIn(result.params, 'temp_url_sig=')
self.assertIn(result.params, 'temp_url_expires=') self.assertIn(result.params, 'temp_url_expires=')
......
from django.conf.urls import patterns, url from django.conf.urls import url
urlpatterns = patterns( from openassessment.fileupload import views_django_storage, views_filesystem
'openassessment.fileupload.views_django_storage',
url(r'^django/(?P<key>.+)/$', 'django_storage', name='openassessment-django-storage'),
)
urlpatterns += patterns( urlpatterns = [
'openassessment.fileupload.views_filesystem', url(r'^django/(?P<key>.+)/$', views_django_storage.django_storage, name='openassessment-django-storage'),
url(r'^(?P<key>.+)/$', 'filesystem_storage', name='openassessment-filesystem-storage'), url(r'^(?P<key>.+)/$', views_filesystem.filesystem_storage, name='openassessment-filesystem-storage'),
) ]
...@@ -3,13 +3,13 @@ import json ...@@ -3,13 +3,13 @@ import json
import os import os
from django.conf import settings from django.conf import settings
from django.shortcuts import HttpResponse, Http404 from django.shortcuts import Http404, HttpResponse
from django.utils import timezone from django.utils import timezone
from django.views.decorators.http import require_http_methods from django.views.decorators.http import require_http_methods
from . import exceptions from . import exceptions
from .backends.filesystem import is_upload_url_available, is_download_url_available
from .backends.base import Settings from .backends.base import Settings
from .backends.filesystem import is_download_url_available, is_upload_url_available
@require_http_methods(["PUT", "GET"]) @require_http_methods(["PUT", "GET"])
......
...@@ -7,7 +7,6 @@ This command differs from upload_oa_data in that it places all the data into one ...@@ -7,7 +7,6 @@ This command differs from upload_oa_data in that it places all the data into one
Generates the same format as the instructor dashboard downloads. Generates the same format as the instructor dashboard downloads.
""" """
import csv import csv
from optparse import make_option
import os import os
from django.core.management.base import BaseCommand, CommandError from django.core.management.base import BaseCommand, CommandError
...@@ -21,25 +20,34 @@ class Command(BaseCommand): ...@@ -21,25 +20,34 @@ class Command(BaseCommand):
""" """
help = ("Usage: collect_ora2_data <course_id> --output-dir=<output_dir>") help = ("Usage: collect_ora2_data <course_id> --output-dir=<output_dir>")
args = "<course_id>"
option_list = BaseCommand.option_list + ( def add_arguments(self, parser):
make_option('-o', '--output-dir', parser.add_argument('course_id', nargs='+', type=unicode)
action='store', dest='output_dir', default=None, parser.add_argument(
help="Write output to a directory rather than stdout"), '-o',
make_option('-n', '--file-name', '--output-dir',
action='store', dest='file_name', default=None, action='store',
help="Write CSV file to the given name"), dest='output_dir',
) default=None,
help="Write output to a directory rather than stdout"
)
parser.add_argument(
'-n',
'--file-name',
action='store',
dest='file_name',
default=None,
help="Write CSV file to the given name"
)
def handle(self, *args, **options): def handle(self, *args, **options):
""" """
Run the command. Run the command.
""" """
if not args: if not options['course_id']:
raise CommandError("Course ID must be specified to fetch data") raise CommandError("Course ID must be specified to fetch data")
course_id = args[0] course_id = options['course_id']
if options['file_name']: if options['file_name']:
file_name = options['file_name'] file_name = options['file_name']
......
""" """
Create dummy submissions and assessments for testing. Create dummy submissions and assessments for testing.
""" """
from uuid import uuid4
import copy import copy
from django.core.management.base import BaseCommand, CommandError from uuid import uuid4
import loremipsum import loremipsum
from submissions import api as sub_api
from openassessment.workflow import api as workflow_api from django.core.management.base import BaseCommand, CommandError
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api from openassessment.assessment.api import self as self_api
from openassessment.workflow import api as workflow_api
from submissions import api as sub_api
STEPS = ['peer', 'self'] STEPS = ['peer', 'self']
......
"""
Gives the time taken by
find_active_assessments
get_submission_for_review
get_submission_for_over_grading
methods for particular set of workflows.
"""
import random
import datetime
from django.core.management.base import BaseCommand
from openassessment.assessment.models import PeerWorkflow
class Command(BaseCommand):
"""
Note the time taken by queries.
"""
help = ("Test the performance for "
"find_active_assessments, "
"get_submission_for_review & "
"get_submission_for_over_grading"
"methods.")
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **options):
"""
Execute the command.
Args:
None
"""
peer_workflow_count = PeerWorkflow.objects.filter(submission_uuid__isnull=False).count()
peer_workflow_ids = [random.randint(1, peer_workflow_count) for num in range(100)]
peer_workflows = list(PeerWorkflow.objects.filter(id__in=peer_workflow_ids))
pw_dt_before = datetime.datetime.now()
for peer_workflow in peer_workflows:
peer_workflow.find_active_assessments()
pw_dt_after = datetime.datetime.now()
time_taken = pw_dt_after - pw_dt_before
print "Time taken by (find_active_assessments) method Is: %s " % time_taken
#### get_submission_for_review ####
pw_dt_before = datetime.datetime.now()
for peer_workflow in peer_workflows:
peer_workflow.get_submission_for_review(2)
pw_dt_after = datetime.datetime.now()
time_taken = pw_dt_after - pw_dt_before
print "Time taken by (get_submission_for_review) method Is: %s " % time_taken
#### get_submission_for_over_grading ####
pw_dt_before = datetime.datetime.now()
for peer_workflow in peer_workflows:
peer_workflow.get_submission_for_over_grading()
pw_dt_after = datetime.datetime.now()
time_taken = pw_dt_after - pw_dt_before
print "Time taken by (get_submission_for_over_grading) method Is: %s " % time_taken
# -*- coding: utf-8 -*-
"""
Simulate failure of the worker AI grading tasks.
When the workers fail to successfully complete AI grading,
the AI grading workflow in the database will never be marked complete.
To simulate the error condition, therefore, we create incomplete
AI grading workflows without scheduling a grading task.
To recover, a staff member can reschedule incomplete grading tasks.
"""
from django.core.management.base import BaseCommand, CommandError
from submissions import api as sub_api
from openassessment.assessment.models import AIGradingWorkflow, AIClassifierSet
from openassessment.assessment.serializers import rubric_from_dict
from openassessment.assessment.worker.algorithm import AIAlgorithm
class Command(BaseCommand):
"""
Create submissions and AI incomplete grading workflows.
"""
help = (
u"Simulate failure of the worker AI grading tasks "
u"by creating incomplete AI grading workflows in the database."
)
args = '<COURSE_ID> <PROBLEM_ID> <NUM_SUBMISSIONS> <ALGORITHM_ID>'
RUBRIC_OPTIONS = [
{
"order_num": 0,
"name": u"poor",
"explanation": u"Poor job!",
"points": 0,
},
{
"order_num": 1,
"name": u"good",
"explanation": u"Good job!",
"points": 1,
}
]
RUBRIC = {
'prompts': [{"description": u"Test prompt"}],
'criteria': [
{
"order_num": 0,
"name": u"vocabulary",
"prompt": u"Vocabulary",
"options": RUBRIC_OPTIONS
},
{
"order_num": 1,
"name": u"grammar",
"prompt": u"Grammar",
"options": RUBRIC_OPTIONS
}
]
}
EXAMPLES = {
"vocabulary": [
AIAlgorithm.ExampleEssay(
text=u"World Food Day is celebrated every year around the world on 16 October in honor "
u"of the date of the founding of the Food and Agriculture "
u"Organization of the United Nations in 1945.",
score=0
),
AIAlgorithm.ExampleEssay(
text=u"Since 1981, World Food Day has adopted a different theme each year "
u"in order to highlight areas needed for action and provide a common focus.",
score=1
),
],
"grammar": [
AIAlgorithm.ExampleEssay(
text=u"Most of the themes revolve around agriculture because only investment in agriculture ",
score=0
),
AIAlgorithm.ExampleEssay(
text=u"In spite of the importance of agriculture as the driving force "
u"in the economies of many developing countries, this "
u"vital sector is frequently starved of investment.",
score=1
)
]
}
STUDENT_ID = u'test_student'
ANSWER = {"text": 'test answer'}
def handle(self, *args, **options):
"""
Execute the command.
Args:
course_id (unicode): The ID of the course to create submissions/workflows in.
item_id (unicode): The ID of the problem in the course.
num_submissions (int): The number of submissions/workflows to create.
algorithm_id (unicode): The ID of the ML algorithm to use ("fake" or "ease")
Raises:
CommandError
"""
if len(args) < 4:
raise CommandError(u"Usage: simulate_ai_grading_error {}".format(self.args))
# Parse arguments
course_id = args[0].decode('utf-8')
item_id = args[1].decode('utf-8')
num_submissions = int(args[2])
algorithm_id = args[3].decode('utf-8')
# Create the rubric model
rubric = rubric_from_dict(self.RUBRIC)
# Train classifiers
print u"Training classifiers using {algorithm_id}...".format(algorithm_id=algorithm_id)
algorithm = AIAlgorithm.algorithm_for_id(algorithm_id)
classifier_data = {
criterion_name: algorithm.train_classifier(example)
for criterion_name, example in self.EXAMPLES.iteritems()
}
print u"Successfully trained classifiers."
# Create the classifier set
classifier_set = AIClassifierSet.create_classifier_set(
classifier_data, rubric, algorithm_id, course_id, item_id
)
print u"Successfully created classifier set with id {}".format(classifier_set.pk)
# Create submissions and grading workflows
for num in range(num_submissions):
student_item = {
'course_id': course_id,
'item_id': item_id,
'item_type': 'openassessment',
'student_id': "{base}_{num}".format(base=self.STUDENT_ID, num=num)
}
submission = sub_api.create_submission(student_item, self.ANSWER)
workflow = AIGradingWorkflow.start_workflow(
submission['uuid'], self.RUBRIC, algorithm_id
)
workflow.classifier_set = classifier_set
workflow.save()
print u"{num}: Created incomplete grading workflow with UUID {uuid}".format(
num=num, uuid=workflow.uuid
)
""" """
Generate CSV files for submission and assessment data, then upload to S3. Generate CSV files for submission and assessment data, then upload to S3.
""" """
import sys import datetime
import os import os
import os.path import os.path
import datetime
import shutil import shutil
import tempfile import sys
import tarfile import tarfile
import boto import tempfile
from boto.s3.key import Key
from django.core.management.base import BaseCommand, CommandError import boto3
from django.conf import settings from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from openassessment.data import CsvWriter from openassessment.data import CsvWriter
...@@ -135,16 +137,24 @@ class Command(BaseCommand): ...@@ -135,16 +137,24 @@ class Command(BaseCommand):
# environment vars or configuration files instead. # environment vars or configuration files instead.
aws_access_key_id = getattr(settings, 'AWS_ACCESS_KEY_ID', None) aws_access_key_id = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
aws_secret_access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None) aws_secret_access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
conn = boto.connect_s3( client = boto3.client(
's3',
aws_access_key_id=aws_access_key_id, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key aws_secret_access_key=aws_secret_access_key
) )
bucket = client.create_bucket(Bucket=s3_bucket)
bucket = conn.get_bucket(s3_bucket)
key_name = os.path.join(course_id, os.path.split(file_path)[1]) key_name = os.path.join(course_id, os.path.split(file_path)[1])
key = Key(bucket=bucket, name=key_name) client.put_object(Bucket=s3_bucket, Key=key_name, Body=open(file_path, 'rb'))
key.set_contents_from_filename(file_path) url = client.generate_presigned_url(
url = key.generate_url(self.URL_EXPIRATION_HOURS * 3600) ExpiresIn=self.URL_EXPIRATION_HOURS * 3600,
ClientMethod='get_object',
Params={
'Bucket': s3_bucket,
'Key': key_name
},
HttpMethod="GET"
)
# Store the key and url in the history # Store the key and url in the history
self._history.append({'key': key_name, 'url': url}) self._history.append({'key': key_name, 'url': url})
......
...@@ -2,11 +2,12 @@ ...@@ -2,11 +2,12 @@
Tests for the management command that creates dummy submissions. Tests for the management command that creates dummy submissions.
""" """
from submissions import api as sub_api from django.test import TestCase
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api from openassessment.assessment.api import self as self_api
from openassessment.management.commands import create_oa_submissions from openassessment.management.commands import create_oa_submissions
from django.test import TestCase from submissions import api as sub_api
class CreateSubmissionsTest(TestCase): class CreateSubmissionsTest(TestCase):
......
# -*- coding: utf-8 -*-
"""
Tests for the simulate AI grading error management command.
"""
from django.test.utils import override_settings
from openassessment.test_utils import CacheResetTest
from openassessment.management.commands import simulate_ai_grading_error
from openassessment.assessment.models import AIGradingWorkflow
from openassessment.assessment.worker.grading import grade_essay
class SimulateAIGradingErrorTest(CacheResetTest):
"""
Tests for the simulate AI grading error management command.
"""
COURSE_ID = u"TɘꙅT ↄoUᴙꙅɘ"
ITEM_ID = u"𝖙𝖊𝖘𝖙 𝖎𝖙𝖊𝖒"
NUM_SUBMISSIONS = 20
AI_ALGORITHMS = {
"fake": "openassessment.assessment.worker.algorithm.FakeAIAlgorithm"
}
@override_settings(ORA2_AI_ALGORITHMS=AI_ALGORITHMS)
def test_simulate_ai_grading_error(self):
# Run the command
cmd = simulate_ai_grading_error.Command()
cmd.handle(
self.COURSE_ID.encode('utf-8'),
self.ITEM_ID.encode('utf-8'),
self.NUM_SUBMISSIONS,
"fake"
)
# Check that the correct number of incomplete workflows
# were created. These workflows should still have
# a classifier set, though, because otherwise they
# wouldn't have been scheduled for grading
# (that is, the submissions were made before classifier
# training completed).
incomplete_workflows = AIGradingWorkflow.objects.filter(
classifier_set__isnull=False,
completed_at__isnull=True
)
num_errors = incomplete_workflows.count()
self.assertEqual(self.NUM_SUBMISSIONS, num_errors)
# Verify that we can complete the workflows successfully
# (that is, make sure the classifier data is valid)
# We're calling a Celery task method here,
# but we're NOT using `apply_async`, so this will
# execute synchronously.
for workflow in incomplete_workflows:
grade_essay(workflow.uuid)
# Now there should be no incomplete workflows
remaining_incomplete = AIGradingWorkflow.objects.filter(
classifier_set__isnull=False,
completed_at__isnull=True
).count()
self.assertEqual(remaining_incomplete, 0)
...@@ -2,12 +2,13 @@ ...@@ -2,12 +2,13 @@
""" """
Tests for management command that uploads submission/assessment data. Tests for management command that uploads submission/assessment data.
""" """
from StringIO import StringIO
import tarfile import tarfile
import boto
import boto3
import moto import moto
from openassessment.test_utils import CacheResetTest
from openassessment.management.commands import upload_oa_data from openassessment.management.commands import upload_oa_data
from openassessment.test_utils import CacheResetTest
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from submissions import api as sub_api from submissions import api as sub_api
...@@ -29,8 +30,8 @@ class UploadDataTest(CacheResetTest): ...@@ -29,8 +30,8 @@ class UploadDataTest(CacheResetTest):
@moto.mock_s3 @moto.mock_s3
def test_upload(self): def test_upload(self):
# Create an S3 bucket using the fake S3 implementation # Create an S3 bucket using the fake S3 implementation
conn = boto.connect_s3() s3 = boto3.resource('s3')
conn.create_bucket(self.BUCKET_NAME) s3.create_bucket(Bucket=self.BUCKET_NAME)
# Create some submissions to ensure that we cover # Create some submissions to ensure that we cover
# the progress indicator code. # the progress indicator code.
...@@ -53,12 +54,10 @@ class UploadDataTest(CacheResetTest): ...@@ -53,12 +54,10 @@ class UploadDataTest(CacheResetTest):
# Retrieve the uploaded file from the fake S3 implementation # Retrieve the uploaded file from the fake S3 implementation
self.assertEqual(len(cmd.history), 1) self.assertEqual(len(cmd.history), 1)
bucket = conn.get_all_buckets()[0] s3.Object(self.BUCKET_NAME, cmd.history[0]['key']).download_file("tmp-test-file.tar.gz")
key = bucket.get_key(cmd.history[0]['key'])
contents = StringIO(key.get_contents_as_string())
# Expect that the contents contain all the expected CSV files # Expect that the contents contain all the expected CSV files
with tarfile.open(mode="r:gz", fileobj=contents) as tar: with tarfile.open("tmp-test-file.tar.gz", mode="r:gz") as tar:
file_sizes = { file_sizes = {
member.name: member.size member.name: member.size
for member in tar.getmembers() for member in tar.getmembers()
...@@ -69,4 +68,4 @@ class UploadDataTest(CacheResetTest): ...@@ -69,4 +68,4 @@ class UploadDataTest(CacheResetTest):
# Expect that we generated a URL for the bucket # Expect that we generated a URL for the bucket
url = cmd.history[0]['url'] url = cmd.history[0]['url']
self.assertIn("https://{}".format(self.BUCKET_NAME), url) self.assertIn("https://s3.amazonaws.com/{}".format(self.BUCKET_NAME), url)
...@@ -181,11 +181,7 @@ ...@@ -181,11 +181,7 @@
</ul> </ul>
<p class="openassessment_description" id="openassessment_step_select_description"> <p class="openassessment_description" id="openassessment_step_select_description">
{% if 'example_based_assessment' in editor_assessments_order %} {% trans "In this assignment, you can include learner training, peer assessment, self assessment, and staff assessment steps. Select the steps that you want below, and then drag them into the order you want. If you include a learner training step, it must precede all other steps. If you include a staff assessment step, it must be the final step. After you release an ORA assignment, you cannot change the type and number of assessment steps." %}
{% trans "In this assignment, you can include learner training, peer assessment, self assessment, example based assessment, and staff assessment steps. Select the steps that you want below, and then drag them into the order you want. If you include an example based assessment step, it must precede all other steps. If you include a learner training step, it must precede peer and self assessment steps. If you include a staff assessment step, it must be the final step. After you release an ORA assignment, you cannot change the type and number of assessment steps." %}
{% else %}
{% trans "In this assignment, you can include learner training, peer assessment, self assessment, and staff assessment steps. Select the steps that you want below, and then drag them into the order you want. If you include a learner training step, it must precede all other steps. If you include a staff assessment step, it must be the final step. After you release an ORA assignment, you cannot change the type and number of assessment steps." %}
{% endif %}
</p> </p>
<ol id="openassessment_assessment_module_settings_editors"> <ol id="openassessment_assessment_module_settings_editors">
{% for assessment in editor_assessments_order %} {% for assessment in editor_assessments_order %}
......
{% load i18n %}
{% spaceless %}
<li class="openassessment_assessment_module_settings_editor" id="oa_ai_assessment_editor">
<div class="drag-handle action"></div>
<div class="openassessment_inclusion_wrapper">
<input id="include_ai_assessment" type="checkbox"
{% if assessments.example_based_assessment %} checked="true" {% endif %}>
<label for="include_ai_assessment">{% trans "Step: Example-Based Assessment" %}</label>
</div>
<div class="openassessment_assessment_module_editor">
<p id="ai_assessment_description_closed" class="openassessment_description_closed {% if assessments.example_based_assessment %} is--hidden {% endif %}">
{% trans "An algorithm assesses learners' responses by comparing the responses to pre-assessed sample responses that the instructor provides."%}
</p>
<div id="ai_assessment_settings_editor" class="assessment_settings_wrapper {% if not assessments.example_based_assessment %} is--hidden {% endif %}">
<p class="openassessment_description">
{% trans "Enter one or more sample responses that you've created, and then specify the options that you would choose for each criterion in your rubric. Note that you must add your rubric to the Rubric tab before you can complete this step." %}
</p>
<textarea id="ai_training_examples">{{ assessments.example_based_assessment.examples }}</textarea>
</div>
</div>
</li>
{% endspaceless %}
...@@ -33,51 +33,6 @@ ...@@ -33,51 +33,6 @@
<div class="openassessment__student-info staff-info__student__report"></div> <div class="openassessment__student-info staff-info__student__report"></div>
</div> </div>
{% if display_schedule_training %}
<div class="staff-info__classifierset ui-staff__content__section">
{% if classifierset %}
<table class="staff-info__classifierset__table">
<caption class="title">{% trans "Classifier set" %}</caption>
<thead>
<th abbr="Field" scope="col">{% trans "Field" %}</th>
<th abbr="Value" scope="col">{% trans "Value" %}</th>
</thead>
<tbody>
<tr>
<td class="value">{% trans "Created at" %}</td>
<td class="value">{{ classifierset.created_at }}</td>
</tr>
<tr>
<td class="value">{% trans "Algorithm ID" %}</td>
<td class="value">{{ classifierset.algorithm_id }}</td>
</tr>
<tr>
<td class="value">{% trans "Course ID" %}</td>
<td class="value">{{ classifierset.course_id }}</td>
</tr>
<tr>
<td class="value">{% trans "Item ID" %}</td>
<td class="value">{{ classifierset.item_id }}</td>
</tr>
</tbody>
</table>
{% else %}
{% trans "No classifiers are available for this problem" %}
{% endif %}
</div>
<div class="staff-info__status ui-staff__content__section">
<button class="action--submit action--submit-training">{% trans "Schedule Example-Based Assessment Training" %}</button>
<div class="schedule_training_message"></div>
</div>
{% endif %}
{% if display_reschedule_unfinished_tasks %}
<div class="staff-info__status ui-staff__content__section">
<button class="action--submit action--submit-unfinished-tasks">{% trans "Reschedule All Unfinished Example-Based Assessment Grading Tasks" %}</button>
<div class="reschedule_unfinished_tasks_message"></div>
</div>
{% endif %}
</div> </div>
</div> </div>
</div> </div>
......
...@@ -68,11 +68,6 @@ ...@@ -68,11 +68,6 @@
{% include "openassessmentblock/staff_area/oa_student_info_assessment_detail.html" with class_type="self" assessments=self_assessment %} {% include "openassessmentblock/staff_area/oa_student_info_assessment_detail.html" with class_type="self" assessments=self_assessment %}
{% endif %} {% endif %}
{% if example_based_assessment %}
{% trans "Example-Based Assessment" as translated_title %}
{% include "openassessmentblock/staff_area/oa_student_info_assessment_detail.html" with class_type="example_based" assessments=example_based_assessment %}
{% endif %}
{% if staff_assessment %} {% if staff_assessment %}
{% trans "Staff Assessment for This Learner" as translated_title %} {% trans "Staff Assessment for This Learner" as translated_title %}
{% include "openassessmentblock/staff_area/oa_student_info_assessment_detail.html" with class_type="staff" assessments=staff_assessment %} {% include "openassessmentblock/staff_area/oa_student_info_assessment_detail.html" with class_type="staff" assessments=staff_assessment %}
......
...@@ -3,16 +3,11 @@ Test utilities ...@@ -3,16 +3,11 @@ Test utilities
""" """
from django.core.cache import cache from django.core.cache import cache
from django.test import TestCase, TransactionTestCase from django.test import TestCase, TransactionTestCase
from openassessment.assessment.models.ai import (
CLASSIFIERS_CACHE_IN_MEM, CLASSIFIERS_CACHE_IN_FILE
)
def _clear_all_caches(): def _clear_all_caches():
"""Clear the default cache and any custom caches.""" """Clear the default cache and any custom caches."""
cache.clear() cache.clear()
CLASSIFIERS_CACHE_IN_MEM.clear()
CLASSIFIERS_CACHE_IN_FILE.clear()
class CacheResetTest(TestCase): class CacheResetTest(TestCase):
......
...@@ -4,9 +4,8 @@ Create factories for assessments and all of their related models. ...@@ -4,9 +4,8 @@ Create factories for assessments and all of their related models.
import factory import factory
from factory.django import DjangoModelFactory from factory.django import DjangoModelFactory
from openassessment.assessment.models import ( from openassessment.assessment.models import (Assessment, AssessmentFeedback, AssessmentFeedbackOption, AssessmentPart,
Assessment, AssessmentPart, Rubric, Criterion, CriterionOption, AssessmentFeedbackOption, AssessmentFeedback Criterion, CriterionOption, Rubric)
)
class RubricFactory(DjangoModelFactory): class RubricFactory(DjangoModelFactory):
......
...@@ -3,23 +3,24 @@ ...@@ -3,23 +3,24 @@
Tests for openassessment data aggregation. Tests for openassessment data aggregation.
""" """
import os.path
from StringIO import StringIO from StringIO import StringIO
import csv import csv
from django.core.management import call_command import os.path
import ddt import ddt
from submissions import api as sub_api
from django.core.management import call_command
import openassessment.assessment.api.peer as peer_api
from openassessment.data import CsvWriter, OraAggregateData
from openassessment.test_utils import TransactionCacheResetTest from openassessment.test_utils import TransactionCacheResetTest
from openassessment.tests.factories import * # pylint: disable=wildcard-import from openassessment.tests.factories import * # pylint: disable=wildcard-import
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from openassessment.data import CsvWriter, OraAggregateData from submissions import api as sub_api
import openassessment.assessment.api.peer as peer_api
COURSE_ID = "Test_Course" COURSE_ID = "Test_Course"
STUDENT_ID = "Student" STUDENT_ID = u"Student"
SCORER_ID = "Scorer" SCORER_ID = "Scorer"
...@@ -82,8 +83,8 @@ FEEDBACK_TEXT = u"𝓨𝓸𝓾 𝓼𝓱𝓸𝓾𝓵𝓭𝓷'𝓽 𝓰𝓲𝓿 ...@@ -82,8 +83,8 @@ FEEDBACK_TEXT = u"𝓨𝓸𝓾 𝓼𝓱𝓸𝓾𝓵𝓭𝓷'𝓽 𝓰𝓲𝓿
FEEDBACK_OPTIONS = { FEEDBACK_OPTIONS = {
"feedback_text": FEEDBACK_TEXT, "feedback_text": FEEDBACK_TEXT,
"options": [ "options": [
'I disliked this assessment', u'I disliked this assessment',
'I felt this assessment was unfair', u'I felt this assessment was unfair',
] ]
} }
...@@ -363,6 +364,7 @@ class TestOraAggregateDataIntegration(TransactionCacheResetTest): ...@@ -363,6 +364,7 @@ class TestOraAggregateDataIntegration(TransactionCacheResetTest):
def setUp(self): def setUp(self):
super(TestOraAggregateDataIntegration, self).setUp() super(TestOraAggregateDataIntegration, self).setUp()
self.maxDiff = None
# Create submissions and assessments # Create submissions and assessments
self.submission = self._create_submission(STUDENT_ITEM) self.submission = self._create_submission(STUDENT_ITEM)
self.scorer_submission = self._create_submission(SCORER_ITEM) self.scorer_submission = self._create_submission(SCORER_ITEM)
...@@ -370,6 +372,7 @@ class TestOraAggregateDataIntegration(TransactionCacheResetTest): ...@@ -370,6 +372,7 @@ class TestOraAggregateDataIntegration(TransactionCacheResetTest):
self.possible_points = 2 self.possible_points = 2
peer_api.get_submission_to_assess(self.scorer_submission['uuid'], 1) peer_api.get_submission_to_assess(self.scorer_submission['uuid'], 1)
self.assessment = self._create_assessment(self.scorer_submission['uuid']) self.assessment = self._create_assessment(self.scorer_submission['uuid'])
self.assertEqual(self.assessment['parts'][0]['criterion']['label'], "criterion_1")
sub_api.set_score(self.submission['uuid'], self.earned_points, self.possible_points) sub_api.set_score(self.submission['uuid'], self.earned_points, self.possible_points)
self.score = sub_api.get_score(STUDENT_ITEM) self.score = sub_api.get_score(STUDENT_ITEM)
...@@ -470,15 +473,15 @@ class TestOraAggregateDataIntegration(TransactionCacheResetTest): ...@@ -470,15 +473,15 @@ class TestOraAggregateDataIntegration(TransactionCacheResetTest):
), ),
u"Assessment #{id}\n-- {label}: {option_label} ({points})\n".format( u"Assessment #{id}\n-- {label}: {option_label} ({points})\n".format(
id=self.assessment['id'], id=self.assessment['id'],
label=self.assessment['parts'][1]['criterion']['label'], label=self.assessment['parts'][0]['criterion']['label'],
option_label=self.assessment['parts'][1]['criterion']['options'][0]['label'], option_label=self.assessment['parts'][0]['criterion']['options'][0]['label'],
points=self.assessment['parts'][1]['criterion']['options'][0]['points'], points=self.assessment['parts'][0]['criterion']['options'][0]['points'],
) + ) +
u"-- {label}: {option_label} ({points})\n-- feedback: {feedback}\n".format( u"-- {label}: {option_label} ({points})\n-- feedback: {feedback}\n".format(
label=self.assessment['parts'][0]['criterion']['label'], label=self.assessment['parts'][1]['criterion']['label'],
option_label=self.assessment['parts'][0]['criterion']['options'][1]['label'], option_label=self.assessment['parts'][1]['criterion']['options'][1]['label'],
points=self.assessment['parts'][0]['criterion']['options'][1]['points'], points=self.assessment['parts'][1]['criterion']['options'][1]['points'],
feedback=self.assessment['parts'][0]['feedback'], feedback=self.assessment['parts'][1]['feedback'],
), ),
self.score['created_at'], self.score['created_at'],
self.score['points_earned'], self.score['points_earned'],
...@@ -532,19 +535,19 @@ class TestOraAggregateDataIntegration(TransactionCacheResetTest): ...@@ -532,19 +535,19 @@ class TestOraAggregateDataIntegration(TransactionCacheResetTest):
self.assertIn(item_id2, data) self.assertIn(item_id2, data)
self.assertIn(item_id3, data) self.assertIn(item_id3, data)
for item in [ITEM_ID, item_id2, item_id3]: for item in [ITEM_ID, item_id2, item_id3]:
self.assertEqual({'total', 'training', 'peer', 'self', 'staff', 'waiting', 'done', 'ai', 'cancelled'}, self.assertEqual({'total', 'training', 'peer', 'self', 'staff', 'waiting', 'done', 'cancelled'},
set(data[item].keys())) set(data[item].keys()))
self.assertEqual(data[ITEM_ID], { self.assertEqual(data[ITEM_ID], {
'total': 2, 'training': 0, 'peer': 2, 'self': 0, 'staff': 0, 'waiting': 0, 'total': 2, 'training': 0, 'peer': 2, 'self': 0, 'staff': 0, 'waiting': 0,
'done': 0, 'ai': 0, 'cancelled': 0 'done': 0, 'cancelled': 0
}) })
self.assertEqual(data[item_id2], { self.assertEqual(data[item_id2], {
'total': 2, 'training': 0, 'peer': 1, 'self': 1, 'staff': 0, 'waiting': 0, 'total': 2, 'training': 0, 'peer': 1, 'self': 1, 'staff': 0, 'waiting': 0,
'done': 0, 'ai': 0, 'cancelled': 0 'done': 0, 'cancelled': 0
}) })
self.assertEqual(data[item_id3], { self.assertEqual(data[item_id3], {
'total': 3, 'training': 0, 'peer': 1, 'self': 2, 'staff': 0, 'waiting': 0, 'total': 3, 'training': 0, 'peer': 1, 'self': 2, 'staff': 0, 'waiting': 0,
'done': 0, 'ai': 0, 'cancelled': 0 'done': 0, 'cancelled': 0
}) })
data = OraAggregateData.collect_ora2_responses(COURSE_ID, ['staff', 'peer']) data = OraAggregateData.collect_ora2_responses(COURSE_ID, ['staff', 'peer'])
......
...@@ -8,12 +8,11 @@ from django.db import DatabaseError ...@@ -8,12 +8,11 @@ from django.db import DatabaseError
from openassessment.assessment.errors import PeerAssessmentError, PeerAssessmentInternalError from openassessment.assessment.errors import PeerAssessmentError, PeerAssessmentInternalError
from submissions import api as sub_api from submissions import api as sub_api
from .errors import (AssessmentWorkflowError, AssessmentWorkflowInternalError, AssessmentWorkflowNotFoundError,
AssessmentWorkflowRequestError)
from .models import AssessmentWorkflow, AssessmentWorkflowCancellation from .models import AssessmentWorkflow, AssessmentWorkflowCancellation
from .serializers import AssessmentWorkflowSerializer, AssessmentWorkflowCancellationSerializer from .serializers import AssessmentWorkflowCancellationSerializer, AssessmentWorkflowSerializer
from .errors import (
AssessmentWorkflowError, AssessmentWorkflowInternalError,
AssessmentWorkflowRequestError, AssessmentWorkflowNotFoundError
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -4,8 +4,8 @@ from __future__ import unicode_literals ...@@ -4,8 +4,8 @@ from __future__ import unicode_literals
from django.db import migrations, models from django.db import migrations, models
import django.utils.timezone import django.utils.timezone
import model_utils.fields import model_utils.fields
import django_extensions.db.fields
class Migration(migrations.Migration): class Migration(migrations.Migration):
...@@ -23,7 +23,7 @@ class Migration(migrations.Migration): ...@@ -23,7 +23,7 @@ class Migration(migrations.Migration):
('status', model_utils.fields.StatusField(default=b'peer', max_length=100, verbose_name='status', no_check_for_status=True, choices=[(b'peer', b'peer'), (b'ai', b'ai'), (b'self', b'self'), (b'training', b'training'), (b'waiting', b'waiting'), (b'done', b'done'), (b'cancelled', b'cancelled')])), ('status', model_utils.fields.StatusField(default=b'peer', max_length=100, verbose_name='status', no_check_for_status=True, choices=[(b'peer', b'peer'), (b'ai', b'ai'), (b'self', b'self'), (b'training', b'training'), (b'waiting', b'waiting'), (b'done', b'done'), (b'cancelled', b'cancelled')])),
('status_changed', model_utils.fields.MonitorField(default=django.utils.timezone.now, verbose_name='status changed', monitor='status')), ('status_changed', model_utils.fields.MonitorField(default=django.utils.timezone.now, verbose_name='status changed', monitor='status')),
('submission_uuid', models.CharField(unique=True, max_length=36, db_index=True)), ('submission_uuid', models.CharField(unique=True, max_length=36, db_index=True)),
('uuid', django_extensions.db.fields.UUIDField(db_index=True, unique=True, version=1, editable=False, blank=True)), ('uuid', models.UUIDField(db_index=True, unique=True, editable=False, blank=True)),
('course_id', models.CharField(max_length=255, db_index=True)), ('course_id', models.CharField(max_length=255, db_index=True)),
('item_id', models.CharField(max_length=255, db_index=True)), ('item_id', models.CharField(max_length=255, db_index=True)),
], ],
......
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workflow', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='assessmentworkflow',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, unique=True, db_index=True),
),
]
...@@ -9,20 +9,23 @@ need to then generate a matching migration for it using: ...@@ -9,20 +9,23 @@ need to then generate a matching migration for it using:
./manage.py schemamigration openassessment.workflow --auto ./manage.py schemamigration openassessment.workflow --auto
""" """
import logging
import importlib import importlib
import logging
from uuid import uuid4
from django.conf import settings from django.conf import settings
from django.db import models, transaction, DatabaseError from django.db import DatabaseError, models, transaction
from django.dispatch import receiver from django.dispatch import receiver
from django_extensions.db.fields import UUIDField
from django.utils.timezone import now from django.utils.timezone import now
from model_utils import Choices from model_utils import Choices
from model_utils.models import StatusModel, TimeStampedModel from model_utils.models import StatusModel, TimeStampedModel
from submissions import api as sub_api
from openassessment.assessment.errors.base import AssessmentError from openassessment.assessment.errors.base import AssessmentError
from openassessment.assessment.signals import assessment_complete_signal from openassessment.assessment.signals import assessment_complete_signal
from .errors import AssessmentApiLoadError, AssessmentWorkflowError, AssessmentWorkflowInternalError from submissions import api as sub_api
from .errors import AssessmentApiLoadError, AssessmentWorkflowError, AssessmentWorkflowInternalError
logger = logging.getLogger('openassessment.workflow.models') logger = logging.getLogger('openassessment.workflow.models')
...@@ -36,7 +39,6 @@ DEFAULT_ASSESSMENT_API_DICT = { ...@@ -36,7 +39,6 @@ DEFAULT_ASSESSMENT_API_DICT = {
'peer': 'openassessment.assessment.api.peer', 'peer': 'openassessment.assessment.api.peer',
'self': 'openassessment.assessment.api.self', 'self': 'openassessment.assessment.api.self',
'training': 'openassessment.assessment.api.student_training', 'training': 'openassessment.assessment.api.student_training',
'ai': 'openassessment.assessment.api.ai',
} }
ASSESSMENT_API_DICT = getattr( ASSESSMENT_API_DICT = getattr(
settings, 'ORA2_ASSESSMENTS', settings, 'ORA2_ASSESSMENTS',
...@@ -77,7 +79,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -77,7 +79,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
# We then use that score as the student's overall score. # We then use that score as the student's overall score.
# This Django setting is a list of assessment steps (defined in `settings.ORA2_ASSESSMENTS`) # This Django setting is a list of assessment steps (defined in `settings.ORA2_ASSESSMENTS`)
# in descending priority order. # in descending priority order.
DEFAULT_ASSESSMENT_SCORE_PRIORITY = ['peer', 'self', 'ai'] DEFAULT_ASSESSMENT_SCORE_PRIORITY = ['peer', 'self']
ASSESSMENT_SCORE_PRIORITY = getattr( ASSESSMENT_SCORE_PRIORITY = getattr(
settings, 'ORA2_ASSESSMENT_SCORE_PRIORITY', settings, 'ORA2_ASSESSMENT_SCORE_PRIORITY',
DEFAULT_ASSESSMENT_SCORE_PRIORITY DEFAULT_ASSESSMENT_SCORE_PRIORITY
...@@ -86,7 +88,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -86,7 +88,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
STAFF_ANNOTATION_TYPE = "staff_defined" STAFF_ANNOTATION_TYPE = "staff_defined"
submission_uuid = models.CharField(max_length=36, db_index=True, unique=True) submission_uuid = models.CharField(max_length=36, db_index=True, unique=True)
uuid = UUIDField(version=1, db_index=True, unique=True) uuid = models.UUIDField(db_index=True, unique=True, default=uuid4)
# These values are used to find workflows for a particular item # These values are used to find workflows for a particular item
# in a course without needing to look up the submissions for that item. # in a course without needing to look up the submissions for that item.
...@@ -98,6 +100,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -98,6 +100,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
class Meta: class Meta:
ordering = ["-created"] ordering = ["-created"]
# TODO: In migration, need a non-unique index on (course_id, item_id, status) # TODO: In migration, need a non-unique index on (course_id, item_id, status)
app_label = "workflow"
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(AssessmentWorkflow, self).__init__(*args, **kwargs) super(AssessmentWorkflow, self).__init__(*args, **kwargs)
...@@ -154,7 +157,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -154,7 +157,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
item_id=submission_dict['student_item']['item_id'] item_id=submission_dict['student_item']['item_id']
) )
workflow_steps = [ workflow_steps = [
AssessmentWorkflowStep( AssessmentWorkflowStep.objects.create(
workflow=workflow, name=step, order_num=i workflow=workflow, name=step, order_num=i
) )
for i, step in enumerate(step_names) for i, step in enumerate(step_names)
...@@ -396,12 +399,14 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -396,12 +399,14 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
except AssessmentWorkflowStep.DoesNotExist: except AssessmentWorkflowStep.DoesNotExist:
for step in list(self.steps.all()): for step in list(self.steps.all()):
step.order_num += 1 step.order_num += 1
staff_step, _ = AssessmentWorkflowStep.objects.get_or_create(
name=self.STATUS.staff,
order_num=0,
assessment_completed_at=now(),
workflow=self,
)
self.steps.add( self.steps.add(
AssessmentWorkflowStep( staff_step
name=self.STATUS.staff,
order_num=0,
assessment_completed_at=now(),
)
) )
# Do not return steps that are not recognized in the AssessmentWorkflow. # Do not return steps that are not recognized in the AssessmentWorkflow.
...@@ -624,6 +629,7 @@ class AssessmentWorkflowStep(models.Model): ...@@ -624,6 +629,7 @@ class AssessmentWorkflowStep(models.Model):
class Meta: class Meta:
ordering = ["workflow", "order_num"] ordering = ["workflow", "order_num"]
app_label = "workflow"
def is_submitter_complete(self): def is_submitter_complete(self):
""" """
...@@ -760,6 +766,7 @@ class AssessmentWorkflowCancellation(models.Model): ...@@ -760,6 +766,7 @@ class AssessmentWorkflowCancellation(models.Model):
class Meta: class Meta:
ordering = ["created_at", "id"] ordering = ["created_at", "id"]
app_label = "workflow"
def __repr__(self): def __repr__(self):
return ( return (
......
...@@ -3,6 +3,7 @@ Serializers are created to ensure models do not have to be accessed outside the ...@@ -3,6 +3,7 @@ Serializers are created to ensure models do not have to be accessed outside the
scope of the ORA2 APIs. scope of the ORA2 APIs.
""" """
from rest_framework import serializers from rest_framework import serializers
from openassessment.workflow.models import AssessmentWorkflow, AssessmentWorkflowCancellation from openassessment.workflow.models import AssessmentWorkflow, AssessmentWorkflowCancellation
......
...@@ -57,66 +57,5 @@ ...@@ -57,66 +57,5 @@
}, },
"self": {} "self": {}
} }
},
"ai": {
"steps": ["ai"],
"requirements": {
"ai": {}
}
},
"ai_peer": {
"steps": ["ai", "peer"],
"requirements": {
"ai": {},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
}
}
},
"ai_training_peer": {
"steps": ["ai", "training", "peer"],
"requirements": {
"ai": {},
"training": {
"num_required": 2
},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
}
}
},
"ai_self": {
"steps": ["ai", "self"],
"requirements": {
"ai": {},
"self": {}
}
},
"ai_peer_self": {
"steps": ["ai", "peer", "self"],
"requirements": {
"ai": {},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
},
"self": {}
}
},
"ai_training_peer_self": {
"steps": ["ai", "training", "peer", "self"],
"requirements": {
"ai": {},
"training": {
"num_required": 2
},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
},
"self": {}
}
} }
} }
\ No newline at end of file
""" """
Tests for Django signals and receivers defined by the workflow API. Tests for Django signals and receivers defined by the workflow API.
""" """
import ddt
import mock import mock
from django.db import DatabaseError from django.db import DatabaseError
import ddt
from submissions import api as sub_api from openassessment.assessment.signals import assessment_complete_signal
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from openassessment.workflow.models import AssessmentWorkflow from openassessment.workflow.models import AssessmentWorkflow
from openassessment.assessment.signals import assessment_complete_signal from submissions import api as sub_api
@ddt.ddt @ddt.ddt
...@@ -54,7 +56,7 @@ class UpdateWorkflowSignalTest(CacheResetTest): ...@@ -54,7 +56,7 @@ class UpdateWorkflowSignalTest(CacheResetTest):
mock_update.assert_called_once_with(None) mock_update.assert_called_once_with(None)
@ddt.data(DatabaseError, IOError) @ddt.data(DatabaseError, IOError)
@mock.patch.object(AssessmentWorkflow.objects, 'get') @mock.patch('openassessment.workflow.models.AssessmentWorkflow.objects.get')
def test_errors(self, error, mock_call): def test_errors(self, error, mock_call):
# Start a workflow for the submission # Start a workflow for the submission
workflow_api.create_workflow(self.submission_uuid, ['self']) workflow_api.create_workflow(self.submission_uuid, ['self'])
......
...@@ -2,21 +2,19 @@ ...@@ -2,21 +2,19 @@
Grade step in the OpenAssessment XBlock. Grade step in the OpenAssessment XBlock.
""" """
import copy import copy
from lazy import lazy from lazy import lazy
from xblock.core import XBlock
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from xblock.core import XBlock from data_conversion import create_submission_dict
from openassessment.assessment.api import ai as ai_api
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import staff as staff_api from openassessment.assessment.api import staff as staff_api
from openassessment.assessment.errors import SelfAssessmentError, PeerAssessmentError from openassessment.assessment.errors import PeerAssessmentError, SelfAssessmentError
from submissions import api as sub_api from submissions import api as sub_api
from data_conversion import create_submission_dict
class GradeMixin(object): class GradeMixin(object):
"""Grade Mixin introduces all handlers for displaying grades """Grade Mixin introduces all handlers for displaying grades
...@@ -91,7 +89,6 @@ class GradeMixin(object): ...@@ -91,7 +89,6 @@ class GradeMixin(object):
submission_uuid = workflow['submission_uuid'] submission_uuid = workflow['submission_uuid']
staff_assessment = None staff_assessment = None
example_based_assessment = None
self_assessment = None self_assessment = None
feedback = None feedback = None
peer_assessments = [] peer_assessments = []
...@@ -111,11 +108,6 @@ class GradeMixin(object): ...@@ -111,11 +108,6 @@ class GradeMixin(object):
self_api.get_assessment(submission_uuid) self_api.get_assessment(submission_uuid)
) )
if "example-based-assessment" in assessment_steps:
example_based_assessment = self._assessment_grade_context(
ai_api.get_latest_assessment(submission_uuid)
)
raw_staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid) raw_staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
if raw_staff_assessment: if raw_staff_assessment:
staff_assessment = self._assessment_grade_context(raw_staff_assessment) staff_assessment = self._assessment_grade_context(raw_staff_assessment)
...@@ -141,7 +133,6 @@ class GradeMixin(object): ...@@ -141,7 +133,6 @@ class GradeMixin(object):
submission_uuid, submission_uuid,
peer_assessments=peer_assessments, peer_assessments=peer_assessments,
self_assessment=self_assessment, self_assessment=self_assessment,
example_based_assessment=example_based_assessment,
staff_assessment=staff_assessment, staff_assessment=staff_assessment,
), ),
'file_upload_type': self.file_upload_type, 'file_upload_type': self.file_upload_type,
...@@ -219,7 +210,7 @@ class GradeMixin(object): ...@@ -219,7 +210,7 @@ class GradeMixin(object):
return {'success': True, 'msg': self._(u"Feedback saved.")} return {'success': True, 'msg': self._(u"Feedback saved.")}
def grade_details( def grade_details(
self, submission_uuid, peer_assessments, self_assessment, example_based_assessment, staff_assessment, self, submission_uuid, peer_assessments, self_assessment, staff_assessment,
is_staff=False is_staff=False
): ):
""" """
...@@ -229,7 +220,6 @@ class GradeMixin(object): ...@@ -229,7 +220,6 @@ class GradeMixin(object):
submission_uuid (str): The id of the submission being graded. submission_uuid (str): The id of the submission being graded.
peer_assessments (list of dict): Serialized assessment models from the peer API. peer_assessments (list of dict): Serialized assessment models from the peer API.
self_assessment (dict): Serialized assessment model from the self API self_assessment (dict): Serialized assessment model from the self API
example_based_assessment (dict): Serialized assessment model from the example-based API
staff_assessment (dict): Serialized assessment model from the staff API staff_assessment (dict): Serialized assessment model from the staff API
is_staff (bool): True if the grade details are being displayed to staff, else False. is_staff (bool): True if the grade details are being displayed to staff, else False.
Default value is False (meaning grade details are being shown to the learner). Default value is False (meaning grade details are being shown to the learner).
...@@ -268,7 +258,10 @@ class GradeMixin(object): ...@@ -268,7 +258,10 @@ class GradeMixin(object):
Returns True if at least one assessment has feedback. Returns True if at least one assessment has feedback.
""" """
return any( return any(
assessment.get('feedback', None) or has_feedback(assessment.get('individual_assessments', [])) (
assessment and
(assessment.get('feedback', None) or has_feedback(assessment.get('individual_assessments', [])))
)
for assessment in assessments for assessment in assessments
) )
...@@ -279,8 +272,6 @@ class GradeMixin(object): ...@@ -279,8 +272,6 @@ class GradeMixin(object):
median_scores = staff_api.get_assessment_scores_by_criteria(submission_uuid) median_scores = staff_api.get_assessment_scores_by_criteria(submission_uuid)
elif "peer-assessment" in assessment_steps: elif "peer-assessment" in assessment_steps:
median_scores = peer_api.get_assessment_median_scores(submission_uuid) median_scores = peer_api.get_assessment_median_scores(submission_uuid)
elif "example-based-assessment" in assessment_steps:
median_scores = ai_api.get_assessment_scores_by_criteria(submission_uuid)
elif "self-assessment" in assessment_steps: elif "self-assessment" in assessment_steps:
median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid) median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
...@@ -293,7 +284,6 @@ class GradeMixin(object): ...@@ -293,7 +284,6 @@ class GradeMixin(object):
assessment_steps, assessment_steps,
staff_assessment, staff_assessment,
peer_assessments, peer_assessments,
example_based_assessment,
self_assessment, self_assessment,
is_staff=is_staff, is_staff=is_staff,
) )
...@@ -322,7 +312,7 @@ class GradeMixin(object): ...@@ -322,7 +312,7 @@ class GradeMixin(object):
def _graded_assessments( def _graded_assessments(
self, submission_uuid, criterion, assessment_steps, staff_assessment, peer_assessments, self, submission_uuid, criterion, assessment_steps, staff_assessment, peer_assessments,
example_based_assessment, self_assessment, is_staff=False self_assessment, is_staff=False
): ):
""" """
Returns an array of assessments with their associated grades. Returns an array of assessments with their associated grades.
...@@ -364,9 +354,6 @@ class GradeMixin(object): ...@@ -364,9 +354,6 @@ class GradeMixin(object):
} }
else: else:
peer_assessment_part = None peer_assessment_part = None
example_based_assessment_part = _get_assessment_part(
_('Example-Based Grade'), _('Example-Based Comments'), criterion_name, example_based_assessment
)
self_assessment_part = _get_assessment_part( self_assessment_part = _get_assessment_part(
_('Self Assessment Grade') if is_staff else _('Your Self Assessment'), _('Self Assessment Grade') if is_staff else _('Your Self Assessment'),
_('Your Comments'), # This is only used in the LMS student-facing view _('Your Comments'), # This is only used in the LMS student-facing view
...@@ -380,8 +367,6 @@ class GradeMixin(object): ...@@ -380,8 +367,6 @@ class GradeMixin(object):
assessments.append(staff_assessment_part) assessments.append(staff_assessment_part)
if peer_assessment_part: if peer_assessment_part:
assessments.append(peer_assessment_part) assessments.append(peer_assessment_part)
if example_based_assessment_part:
assessments.append(example_based_assessment_part)
if self_assessment_part: if self_assessment_part:
assessments.append(self_assessment_part) assessments.append(self_assessment_part)
...@@ -389,7 +374,7 @@ class GradeMixin(object): ...@@ -389,7 +374,7 @@ class GradeMixin(object):
if len(assessments) > 0: if len(assessments) > 0:
first_assessment = assessments[0] first_assessment = assessments[0]
option = first_assessment['option'] option = first_assessment['option']
if option: if option and option.get('points'):
first_assessment['points'] = option['points'] first_assessment['points'] = option['points']
return assessments return assessments
......
""" """
Leaderboard step in the OpenAssessment XBlock. Leaderboard step in the OpenAssessment XBlock.
""" """
from django.utils.translation import ugettext as _
from xblock.core import XBlock from xblock.core import XBlock
from submissions import api as sub_api from django.utils.translation import ugettext as _
from openassessment.assessment.errors import SelfAssessmentError, PeerAssessmentError from openassessment.assessment.errors import PeerAssessmentError, SelfAssessmentError
from openassessment.fileupload import api as file_upload_api from openassessment.fileupload import api as file_upload_api
from openassessment.fileupload.exceptions import FileUploadError from openassessment.fileupload.exceptions import FileUploadError
from openassessment.xblock.data_conversion import create_submission_dict from openassessment.xblock.data_conversion import create_submission_dict
from submissions import api as sub_api
class LeaderboardMixin(object): class LeaderboardMixin(object):
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
Fields and methods used by the LMS and Studio. Fields and methods used by the LMS and Studio.
""" """
from xblock.fields import String, Float, Scope, DateTime from xblock.fields import DateTime, Float, Scope, String
class LmsCompatibilityMixin(object): class LmsCompatibilityMixin(object):
......
...@@ -5,7 +5,6 @@ Message step in the OpenAssessment XBlock. ...@@ -5,7 +5,6 @@ Message step in the OpenAssessment XBlock.
import datetime as dt import datetime as dt
import pytz import pytz
from xblock.core import XBlock from xblock.core import XBlock
......
...@@ -5,40 +5,37 @@ import datetime as dt ...@@ -5,40 +5,37 @@ import datetime as dt
import json import json
import logging import logging
import os import os
import pkg_resources
from lazy import lazy
import pkg_resources
import pytz import pytz
from django.conf import settings
from django.template.context import Context
from django.template.loader import get_template
from webob import Response from webob import Response
from lazy import lazy
from xblock.core import XBlock from xblock.core import XBlock
from xblock.fields import List, Scope, String, Boolean, Integer from xblock.fields import Boolean, Integer, List, Scope, String
from xblock.fragment import Fragment from xblock.fragment import Fragment
from django.conf import settings
from django.template.loader import get_template
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.course_items_listing_mixin import CourseItemsListingMixin
from openassessment.xblock.data_conversion import create_prompts_list, create_rubric_dict, update_assessments_format
from openassessment.xblock.defaults import * # pylint: disable=wildcard-import, unused-wildcard-import
from openassessment.xblock.grade_mixin import GradeMixin from openassessment.xblock.grade_mixin import GradeMixin
from openassessment.xblock.leaderboard_mixin import LeaderboardMixin from openassessment.xblock.leaderboard_mixin import LeaderboardMixin
from openassessment.xblock.defaults import * # pylint: disable=wildcard-import, unused-wildcard-import from openassessment.xblock.lms_mixin import LmsCompatibilityMixin
from openassessment.xblock.message_mixin import MessageMixin from openassessment.xblock.message_mixin import MessageMixin
from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin
from openassessment.xblock.lms_mixin import LmsCompatibilityMixin from openassessment.xblock.resolve_dates import DISTANT_FUTURE, DISTANT_PAST, parse_date_value, resolve_dates
from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin
from openassessment.xblock.submission_mixin import SubmissionMixin
from openassessment.xblock.studio_mixin import StudioMixin
from openassessment.xblock.xml import parse_from_xml, serialize_content_to_xml
from openassessment.xblock.staff_area_mixin import StaffAreaMixin from openassessment.xblock.staff_area_mixin import StaffAreaMixin
from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.xblock.staff_assessment_mixin import StaffAssessmentMixin from openassessment.xblock.staff_assessment_mixin import StaffAssessmentMixin
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.student_training_mixin import StudentTrainingMixin from openassessment.xblock.student_training_mixin import StudentTrainingMixin
from openassessment.xblock.studio_mixin import StudioMixin
from openassessment.xblock.submission_mixin import SubmissionMixin
from openassessment.xblock.validation import validator from openassessment.xblock.validation import validator
from openassessment.xblock.resolve_dates import resolve_dates, parse_date_value, DISTANT_PAST, DISTANT_FUTURE from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.xblock.data_conversion import create_prompts_list, create_rubric_dict, update_assessments_format from openassessment.xblock.xml import parse_from_xml, serialize_content_to_xml
from openassessment.xblock.course_items_listing_mixin import CourseItemsListingMixin
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -83,7 +80,6 @@ UI_MODELS = { ...@@ -83,7 +80,6 @@ UI_MODELS = {
VALID_ASSESSMENT_TYPES = [ VALID_ASSESSMENT_TYPES = [
"student-training", "student-training",
"example-based-assessment",
"peer-assessment", "peer-assessment",
"self-assessment", "self-assessment",
"staff-assessment" "staff-assessment"
...@@ -491,8 +487,7 @@ class OpenAssessmentBlock(MessageMixin, ...@@ -491,8 +487,7 @@ class OpenAssessmentBlock(MessageMixin,
Creates a fragment for display. Creates a fragment for display.
""" """
context = Context(context_dict) fragment = Fragment(template.render(context_dict))
fragment = Fragment(template.render(context))
if additional_css is None: if additional_css is None:
additional_css = [] additional_css = []
...@@ -646,10 +641,6 @@ class OpenAssessmentBlock(MessageMixin, ...@@ -646,10 +641,6 @@ class OpenAssessmentBlock(MessageMixin,
load('static/xml/unicode.xml') load('static/xml/unicode.xml')
), ),
( (
"OpenAssessmentBlock Example Based Rubric",
load('static/xml/example_based_example.xml')
),
(
"OpenAssessmentBlock Poverty Rubric", "OpenAssessmentBlock Poverty Rubric",
load('static/xml/poverty_rubric_example.xml') load('static/xml/poverty_rubric_example.xml')
), ),
...@@ -825,8 +816,7 @@ class OpenAssessmentBlock(MessageMixin, ...@@ -825,8 +816,7 @@ class OpenAssessmentBlock(MessageMixin,
context_dict = {} context_dict = {}
template = get_template(path) template = get_template(path)
context = Context(context_dict) return Response(template.render(context_dict), content_type='application/html', charset='UTF-8')
return Response(template.render(context), content_type='application/html', charset='UTF-8')
def add_xml_to_node(self, node): def add_xml_to_node(self, node):
""" """
...@@ -844,7 +834,7 @@ class OpenAssessmentBlock(MessageMixin, ...@@ -844,7 +834,7 @@ class OpenAssessmentBlock(MessageMixin,
Returns: Returns:
Response: A response object with an HTML body. Response: A response object with an HTML body.
""" """
context = Context({'error_msg': error_msg}) context = {'error_msg': error_msg}
template = get_template('openassessmentblock/oa_error.html') template = get_template('openassessmentblock/oa_error.html')
return Response(template.render(context), content_type='application/html', charset='UTF-8') return Response(template.render(context), content_type='application/html', charset='UTF-8')
......
...@@ -9,15 +9,15 @@ from webob import Response ...@@ -9,15 +9,15 @@ from webob import Response
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.errors import ( from openassessment.assessment.errors import (PeerAssessmentInternalError, PeerAssessmentRequestError,
PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentWorkflowError PeerAssessmentWorkflowError)
)
from openassessment.workflow.errors import AssessmentWorkflowError from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.defaults import DEFAULT_RUBRIC_FEEDBACK_TEXT from openassessment.xblock.defaults import DEFAULT_RUBRIC_FEEDBACK_TEXT
from .data_conversion import create_rubric_dict
from .data_conversion import (clean_criterion_feedback, create_rubric_dict, create_submission_dict,
verify_assessment_parameters)
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
from .user_data import get_user_preferences from .user_data import get_user_preferences
from .data_conversion import clean_criterion_feedback, create_submission_dict, verify_assessment_parameters
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -2,8 +2,9 @@ ...@@ -2,8 +2,9 @@
Resolve unspecified dates and date strings to datetimes. Resolve unspecified dates and date strings to datetimes.
""" """
import datetime as dt import datetime as dt
import pytz
from dateutil.parser import parse as parse_date from dateutil.parser import parse as parse_date
import pytz
class InvalidDateFormat(Exception): class InvalidDateFormat(Exception):
......
...@@ -4,7 +4,7 @@ Schema for validating and sanitizing data received from the JavaScript client. ...@@ -4,7 +4,7 @@ Schema for validating and sanitizing data received from the JavaScript client.
import dateutil import dateutil
from pytz import utc from pytz import utc
from voluptuous import Schema, Required, All, Any, Range, In, Invalid from voluptuous import All, Any, In, Invalid, Range, Required, Schema
def utf8_validator(value): def utf8_validator(value):
...@@ -66,7 +66,6 @@ NECESSITY_OPTIONS = [ ...@@ -66,7 +66,6 @@ NECESSITY_OPTIONS = [
VALID_ASSESSMENT_TYPES = [ VALID_ASSESSMENT_TYPES = [
u'peer-assessment', u'peer-assessment',
u'self-assessment', u'self-assessment',
u'example-based-assessment',
u'student-training', u'student-training',
u'staff-assessment', u'staff-assessment',
] ]
......
import logging import logging
from xblock.core import XBlock
from webob import Response from webob import Response
from xblock.core import XBlock
from openassessment.assessment.api import self as self_api from openassessment.assessment.api import self as self_api
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from submissions import api as submission_api from submissions import api as submission_api
from .data_conversion import (clean_criterion_feedback, create_rubric_dict, create_submission_dict,
verify_assessment_parameters)
from .resolve_dates import DISTANT_FUTURE from .resolve_dates import DISTANT_FUTURE
from .user_data import get_user_preferences from .user_data import get_user_preferences
from .data_conversion import (clean_criterion_feedback, create_submission_dict,
create_rubric_dict, verify_assessment_parameters)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -7,25 +7,18 @@ from functools import wraps ...@@ -7,25 +7,18 @@ from functools import wraps
import logging import logging
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment.errors import (
PeerAssessmentInternalError,
)
from openassessment.workflow.errors import (
AssessmentWorkflowError, AssessmentWorkflowInternalError
)
from openassessment.assessment.errors.ai import AIError
from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE
from openassessment.xblock.data_conversion import (
create_rubric_dict, convert_training_examples_list_to_dict, create_submission_dict
)
from submissions import api as submission_api
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import ai as ai_api
from openassessment.workflow import api as workflow_api
from openassessment.assessment.api import staff as staff_api from openassessment.assessment.api import staff as staff_api
from .user_data import get_user_preferences from openassessment.assessment.errors import PeerAssessmentInternalError
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError, AssessmentWorkflowInternalError
from openassessment.xblock.data_conversion import create_submission_dict
from openassessment.xblock.resolve_dates import DISTANT_FUTURE, DISTANT_PAST
from submissions import api as submission_api
from .user_data import get_user_preferences
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -127,24 +120,6 @@ class StaffAreaMixin(object): ...@@ -127,24 +120,6 @@ class StaffAreaMixin(object):
context['status_counts'] = status_counts context['status_counts'] = status_counts
context['num_submissions'] = num_submissions context['num_submissions'] = num_submissions
# Show the schedule training button if example based assessment is
# configured, and the current user has admin privileges.
example_based_assessment = self.get_assessment_module('example-based-assessment')
display_ai_staff_info = (
self.is_admin and
bool(example_based_assessment) and
not self.in_studio_preview
)
context['display_schedule_training'] = display_ai_staff_info
context['display_reschedule_unfinished_tasks'] = display_ai_staff_info
if display_ai_staff_info:
context['classifierset'] = ai_api.get_classifier_set_info(
create_rubric_dict(self.prompts, self.rubric_criteria_with_labels),
example_based_assessment['algorithm_id'],
student_item['course_id'],
student_item['item_id']
)
# Include Latex setting # Include Latex setting
context['allow_latex'] = self.allow_latex context['allow_latex'] = self.allow_latex
...@@ -152,9 +127,6 @@ class StaffAreaMixin(object): ...@@ -152,9 +127,6 @@ class StaffAreaMixin(object):
context['step_dates'] = list() context['step_dates'] = list()
for step in ['submission'] + self.assessment_steps: for step in ['submission'] + self.assessment_steps:
if step == 'example-based-assessment':
continue
# Get the dates as a student would see them # Get the dates as a student would see them
__, __, start_date, due_date = self.is_closed(step=step, course_staff=False) __, __, start_date, due_date = self.is_closed(step=step, course_staff=False)
...@@ -187,42 +159,6 @@ class StaffAreaMixin(object): ...@@ -187,42 +159,6 @@ class StaffAreaMixin(object):
'staff_assessment_in_progress': grading_stats['in-progress'] 'staff_assessment_in_progress': grading_stats['in-progress']
} }
@XBlock.json_handler
@require_global_admin("SCHEDULE_TRAINING")
def schedule_training(self, data, suffix=''): # pylint: disable=W0613
"""
Schedule a new training task for example-based grading.
"""
assessment = self.get_assessment_module('example-based-assessment')
student_item_dict = self.get_student_item_dict()
if assessment:
examples = assessment["examples"]
try:
workflow_uuid = ai_api.train_classifiers(
create_rubric_dict(self.prompts, self.rubric_criteria_with_labels),
convert_training_examples_list_to_dict(examples),
student_item_dict.get('course_id'),
student_item_dict.get('item_id'),
assessment["algorithm_id"]
)
return {
'success': True,
'workflow_uuid': workflow_uuid,
'msg': self._(u"Training scheduled with new Workflow UUID: {uuid}".format(uuid=workflow_uuid))
}
except AIError as err:
return {
'success': False,
'msg': self._(u"An error occurred scheduling classifier training: {error}".format(error=err))
}
else:
return {
'success': False,
'msg': self._(u"Example Based Assessment is not configured for this location.")
}
@XBlock.handler @XBlock.handler
@require_course_staff("STUDENT_INFO") @require_course_staff("STUDENT_INFO")
def render_student_info(self, data, suffix=''): # pylint: disable=W0613 def render_student_info(self, data, suffix=''): # pylint: disable=W0613
...@@ -389,9 +325,6 @@ class StaffAreaMixin(object): ...@@ -389,9 +325,6 @@ class StaffAreaMixin(object):
""" """
assessment_steps = self.assessment_steps assessment_steps = self.assessment_steps
example_based_assessment = None
example_based_assessment_grade_context = None
self_assessment = None self_assessment = None
self_assessment_grade_context = None self_assessment_grade_context = None
...@@ -423,11 +356,6 @@ class StaffAreaMixin(object): ...@@ -423,11 +356,6 @@ class StaffAreaMixin(object):
if grade_exists: if grade_exists:
self_assessment_grade_context = self._assessment_grade_context(self_assessment) self_assessment_grade_context = self._assessment_grade_context(self_assessment)
if "example-based-assessment" in assessment_steps:
example_based_assessment = ai_api.get_latest_assessment(submission_uuid)
if grade_exists:
example_based_assessment_grade_context = self._assessment_grade_context(example_based_assessment)
if grade_exists: if grade_exists:
if staff_assessment: if staff_assessment:
staff_assessment_grade_context = self._assessment_grade_context(staff_assessment) staff_assessment_grade_context = self._assessment_grade_context(staff_assessment)
...@@ -436,7 +364,6 @@ class StaffAreaMixin(object): ...@@ -436,7 +364,6 @@ class StaffAreaMixin(object):
submission_uuid, submission_uuid,
peer_assessments_grade_context, peer_assessments_grade_context,
self_assessment_grade_context, self_assessment_grade_context,
example_based_assessment_grade_context,
staff_assessment_grade_context, staff_assessment_grade_context,
is_staff=True, is_staff=True,
) )
...@@ -444,7 +371,6 @@ class StaffAreaMixin(object): ...@@ -444,7 +371,6 @@ class StaffAreaMixin(object):
workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid) workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)
context.update({ context.update({
'example_based_assessment': [example_based_assessment] if example_based_assessment else None,
'self_assessment': [self_assessment] if self_assessment else None, 'self_assessment': [self_assessment] if self_assessment else None,
'peer_assessments': peer_assessments, 'peer_assessments': peer_assessments,
'staff_assessment': [staff_assessment] if staff_assessment else None, 'staff_assessment': [staff_assessment] if staff_assessment else None,
...@@ -455,50 +381,11 @@ class StaffAreaMixin(object): ...@@ -455,50 +381,11 @@ class StaffAreaMixin(object):
'workflow_cancellation': workflow_cancellation, 'workflow_cancellation': workflow_cancellation,
}) })
if peer_assessments or self_assessment or example_based_assessment or staff_assessment: if peer_assessments or self_assessment or staff_assessment:
max_scores = peer_api.get_rubric_max_scores(submission_uuid) max_scores = peer_api.get_rubric_max_scores(submission_uuid)
for criterion in context["rubric_criteria"]: for criterion in context["rubric_criteria"]:
criterion["total_value"] = max_scores[criterion["name"]] criterion["total_value"] = max_scores[criterion["name"]]
@XBlock.json_handler
@require_global_admin("RESCHEDULE_TASKS")
def reschedule_unfinished_tasks(self, data, suffix=''): # pylint: disable=W0613
"""
Wrapper which invokes the API call for rescheduling grading tasks.
Checks that the requester is an administrator that is not in studio-preview mode,
and that the api-call returns without error. If it returns with an error, (any
exception), the appropriate JSON serializable dictionary with success conditions
is passed back.
Args:
data (not used)
suffix (not used)
Return:
Json serilaizable dict with the following elements:
'success': (bool) Indicates whether or not the tasks were rescheduled successfully
'msg': The response to the server (could be error message or success message)
"""
# Identifies the course and item that will need to be re-run
student_item_dict = self.get_student_item_dict()
course_id = student_item_dict.get('course_id')
item_id = student_item_dict.get('item_id')
try:
# Note that we only want to recschdule grading tasks, but maintain the potential functionallity
# within the API to also reschedule training tasks.
ai_api.reschedule_unfinished_tasks(course_id=course_id, item_id=item_id, task_type=u"grade")
return {
'success': True,
'msg': self._(u"All AI tasks associated with this item have been rescheduled successfully.")
}
except AIError as ex:
return {
'success': False,
'msg': self._(u"An error occurred while rescheduling tasks: {}".format(ex))
}
def clear_student_state(self, user_id, course_id, item_id, requesting_user_id): def clear_student_state(self, user_id, course_id, item_id, requesting_user_id):
""" """
This xblock method is called (from our LMS runtime, which defines this method signature) to clear student state This xblock method is called (from our LMS runtime, which defines this method signature) to clear student state
......
...@@ -3,17 +3,14 @@ A mixin for staff grading. ...@@ -3,17 +3,14 @@ A mixin for staff grading.
""" """
import logging import logging
from staff_area_mixin import require_course_staff
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment.api import staff as staff_api from openassessment.assessment.api import staff as staff_api
from openassessment.assessment.errors import StaffAssessmentInternalError, StaffAssessmentRequestError
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
from openassessment.assessment.errors import ( from staff_area_mixin import require_course_staff
StaffAssessmentRequestError, StaffAssessmentInternalError
)
from .data_conversion import create_rubric_dict from .data_conversion import clean_criterion_feedback, create_rubric_dict, verify_assessment_parameters
from .data_conversion import clean_criterion_feedback, verify_assessment_parameters
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -502,7 +502,6 @@ ...@@ -502,7 +502,6 @@
"student_training", "student_training",
"peer_assessment", "peer_assessment",
"self_assessment", "self_assessment",
"example_based_assessment",
"staff_assessment" "staff_assessment"
] ]
}, },
...@@ -668,7 +667,6 @@ ...@@ -668,7 +667,6 @@
"student_training", "student_training",
"peer_assessment", "peer_assessment",
"self_assessment", "self_assessment",
"example_based_assessment",
"staff_assessment" "staff_assessment"
] ]
}, },
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -82,7 +82,6 @@ describe("OpenAssessment.Server", function() { ...@@ -82,7 +82,6 @@ describe("OpenAssessment.Server", function() {
"student_training", "student_training",
"peer_assessment", "peer_assessment",
"self_assessment", "self_assessment",
"example_based_assessment"
]; ];
var TITLE = 'This is the title.'; var TITLE = 'This is the title.';
......
...@@ -117,7 +117,6 @@ describe("OpenAssessment.StudioView", function() { ...@@ -117,7 +117,6 @@ describe("OpenAssessment.StudioView", function() {
"student-training", "student-training",
"peer-assessment", "peer-assessment",
"self-assessment", "self-assessment",
"example-based-assessment",
"staff-assessment" "staff-assessment"
] ]
}; };
......
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment