Commit 76e9dc56 by Will Daly

Merge remote-tracking branch 'origin/master' into ai-grading

Conflicts:
	openassessment/workflow/api.py
	openassessment/workflow/models.py
	openassessment/workflow/test/test_models.py
	openassessment/xblock/static/js/openassessment.min.js
	openassessment/xblock/validation.py
	scripts/data/test-course.tar.gz
	settings/base.py
	settings/test.py
parents c04c459b b108083b
[main]
host = https://www.transifex.com
[edx-platform.openassessment]
file_filter = conf/locale/<lang>/LC_MESSAGES/django.po
source_file = conf/locale/en/LC_MESSAGES/django.po
source_lang = en
type = PO
[edx-platform.openassessment-js]
file_filter = conf/locale/<lang>/LC_MESSAGES/djangojs.po
source_file = conf/locale/en/LC_MESSAGES/djangojs.po
source_lang = en
type = PO
......@@ -49,6 +49,8 @@ Isolation of Assessment types
a non `None` value has been returned by this function for a given
`submission_uuid`, repeated calls to this function should return the same
thing.
`on_start(submission_uuid)`
Notification to the API that the student has started the assessment step.
In the long run, it could be that `OpenAssessmentBlock` becomes a wrapper
that talks to child XBlocks via this kind of API, and that each child would
......@@ -96,3 +98,15 @@ Handling Problem Definition Change
2. If the sequence of steps changes, we look at the new steps and advance to
the first step that the user has not completed (`is_submitter_done()`
returns `False`).
Django settings
Assessments in the workflow are configurable using Django settings.
This encapsulates the workflow API from the assessment modules.
The two settings are:
* `ORA2_ASSESSMENTS`: a `dict` mapping assessment names to the Python module path
of the corresponding assessment API.
* `ORA2_ASSESSMENT_SCORE_PRIORITY`: a `list` of assessment names that determine
which assessment type is used to generate a student's score.
......@@ -28,6 +28,18 @@ PEER_TYPE = "PE"
def submitter_is_finished(submission_uuid, requirements):
"""
Check whether the submitter has made the required number of assessments.
Args:
submission_uuid (str): The UUID of the submission being tracked.
requirements (dict): Dictionary with the key "must_grade" indicating
the required number of submissions the student must grade.
Returns:
bool
"""
try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
if workflow.completed_at is not None:
......@@ -41,17 +53,78 @@ def submitter_is_finished(submission_uuid, requirements):
return False
def assessment_is_finished(submission_uuid, requirements):
"""
Check whether the submitter has received enough assessments
to get a score.
Args:
submission_uuid (str): The UUID of the submission being tracked.
requirements (dict): Dictionary with the key "must_be_graded_by"
indicating the required number of assessments the student
must receive to get a score.
Returns:
bool
"""
return bool(get_score(submission_uuid, requirements))
def on_start(submission_uuid):
"""Create a new peer workflow for a student item and submission.
Creates a unique peer workflow for a student item, associated with a
submission.
Args:
submission_uuid (str): The submission associated with this workflow.
Returns:
None
Raises:
SubmissionError: There was an error retrieving the submission.
PeerAssessmentInternalError: Raised when there is an internal error
creating the Workflow.
"""
try:
submission = sub_api.get_submission_and_student(submission_uuid)
workflow, __ = PeerWorkflow.objects.get_or_create(
student_id=submission['student_item']['student_id'],
course_id=submission['student_item']['course_id'],
item_id=submission['student_item']['item_id'],
submission_uuid=submission_uuid
)
workflow.save()
except IntegrityError:
# If we get an integrity error, it means someone else has already
# created a workflow for this submission, so we don't need to do anything.
pass
except DatabaseError:
error_message = (
u"An internal error occurred while creating a new peer "
u"workflow for submission {}"
.format(submission_uuid)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def get_score(submission_uuid, requirements):
"""
Retrieve a score for a submission if requirements have been satisfied.
Args:
submission_uuid (str): The UUID of the submission.
requirements (dict): Description of requirements for receiving a score,
specific to the particular kind of submission (e.g. self or peer).
requirements (dict): Dictionary with the key "must_be_graded_by"
indicating the required number of assessments the student
must receive to get a score.
Returns:
dict with keys "points_earned" and "points_possible".
"""
# User hasn't completed their own submission yet
if not submitter_is_finished(submission_uuid, requirements):
......@@ -93,10 +166,6 @@ def get_score(submission_uuid, requirements):
}
def assessment_is_finished(submission_uuid, requirements):
return bool(get_score(submission_uuid, requirements))
def create_assessment(
scorer_submission_uuid,
scorer_id,
......
......@@ -24,6 +24,74 @@ SELF_TYPE = "SE"
logger = logging.getLogger("openassessment.assessment.api.self")
def submitter_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed for a submission.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the submitter has assessed their answer
Examples:
>>> submitter_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).exists()
def assessment_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed. For self-assessment,
this function is synonymous with submitter_is_finished.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the assessment is complete.
Examples:
>>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return submitter_is_finished(submission_uuid, requirements)
def get_score(submission_uuid, requirements):
"""
Get the score for this particular assessment.
Args:
submission_uuid (str): The unique identifier for the submission
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
'points_earned': 5,
'points_possible': 10
}
"""
assessment = get_assessment(submission_uuid)
if not assessment:
return None
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
}
def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None):
"""
Create a self-assessment for a submission.
......@@ -155,73 +223,6 @@ def get_assessment(submission_uuid):
return serialized_assessment
def submitter_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed for a submission.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the submitter has assessed their answer
Examples:
>>> submitter_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).exists()
def assessment_is_finished(submission_uuid, requirements):
"""
Check whether a self-assessment has been completed. For self-assessment,
this function is synonymous with submitter_is_finished.
Args:
submission_uuid (str): The unique identifier of the submission.
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the assessment is complete.
Examples:
>>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return submitter_is_finished(submission_uuid, requirements)
def get_score(submission_uuid, requirements):
"""
Get the score for this particular assessment.
Args:
submission_uuid (str): The unique identifier for the submission
requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
A dict of points earned and points possible for the given submission.
Returns None if no score can be determined yet.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
'points_earned': 5,
'points_possible': 10
}
"""
assessment = get_assessment(submission_uuid)
if not assessment:
return None
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"]
}
def get_assessment_scores_by_criteria(submission_uuid):
"""Get the median score for each rubric criterion
......
......@@ -56,19 +56,33 @@ def submitter_is_finished(submission_uuid, requirements): # pylint:disable=W06
return workflow.num_completed >= num_required
def assessment_is_finished(submission_uuid, requirements): # pylint:disable=W0613
def on_start(submission_uuid):
"""
Since the student is not being assessed by others,
this always returns true.
"""
return True
Creates a new student training workflow.
This function should be called to indicate that a submission has entered the
student training workflow part of the assessment process.
def get_score(submission_uuid, requirements): # pylint:disable=W0613
"""
Training is either complete or incomplete; there is no score.
Args:
submission_uuid (str): The submission UUID for the student that is
initiating training.
Returns:
None
Raises:
StudentTrainingInternalError: Raised when an error occurs persisting the
Student Training Workflow
"""
return None
try:
StudentTrainingWorkflow.create_workflow(submission_uuid)
except Exception:
msg = (
u"An internal error has occurred while creating the student "
u"training workflow for submission UUID {}".format(submission_uuid)
)
logger.exception(msg)
raise StudentTrainingInternalError(msg)
def validate_training_examples(rubric, examples):
......@@ -354,34 +368,6 @@ def get_training_example(submission_uuid, rubric, examples):
raise StudentTrainingInternalError(msg)
def create_student_training_workflow(submission_uuid):
"""
Creates a new student training workflow.
This function should be called to indicate that a submission has entered the
student training workflow part of the assessment process.
Args:
submission_uuid (str): The submission UUID for the student that is
initiating training.
Returns:
None
Raises:
StudentTrainingInternalError: Raised when an error occurs persisting the
Student Training Workflow
"""
try:
StudentTrainingWorkflow.create_workflow(submission_uuid)
except Exception:
msg = (
u"An internal error has occurred while creating the student "
u"training workflow for submission UUID {}".format(submission_uuid)
)
logger.exception(msg)
raise StudentTrainingInternalError(msg)
def assess_training_example(submission_uuid, options_selected, update_workflow=True):
"""
Assess a training example and update the workflow.
......
......@@ -370,7 +370,7 @@ class TestPeerApi(CacheResetTest):
with patch.object(PeerWorkflow.objects, "get_or_create") as mock_peer:
mock_peer.side_effect = IntegrityError("Oh no!")
# This should not raise an exception
peer_api.create_peer_workflow(tim_sub["uuid"])
peer_api.on_start(tim_sub["uuid"])
@raises(peer_api.PeerAssessmentWorkflowError)
def test_no_submission_found_closing_assessment(self):
......@@ -1121,7 +1121,7 @@ class TestPeerApi(CacheResetTest):
def test_error_on_assessment_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened")
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
peer_api.create_peer_workflow(submission["uuid"])
peer_api.on_start(submission["uuid"])
peer_api.create_assessment(
submission["uuid"], STUDENT_ITEM["student_id"],
ASSESSMENT_DICT['options_selected'],
......@@ -1188,7 +1188,7 @@ class TestPeerApi(CacheResetTest):
new_student_item = STUDENT_ITEM.copy()
new_student_item["student_id"] = student
submission = sub_api.create_submission(new_student_item, answer, date)
peer_api.create_peer_workflow(submission["uuid"])
peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], STEPS)
return submission, new_student_item
......
......@@ -26,7 +26,7 @@ class StudentTrainingAssessmentTest(CacheResetTest):
Create a submission.
"""
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
training_api.create_student_training_workflow(submission['uuid'])
training_api.on_start(submission['uuid'])
self.submission_uuid = submission['uuid']
def test_training_workflow(self):
......@@ -122,7 +122,7 @@ class StudentTrainingAssessmentTest(CacheResetTest):
def test_submitter_is_finished_num_queries(self):
# Complete the first training example
training_api.create_student_training_workflow(self.submission_uuid)
training_api.on_start(self.submission_uuid)
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])
......@@ -160,9 +160,6 @@ class StudentTrainingAssessmentTest(CacheResetTest):
requirements = {'num_required': 1}
self.assertFalse(training_api.submitter_is_finished(self.submission_uuid, requirements))
# But since we're not being assessed by others, the "assessment" should be finished.
self.assertTrue(training_api.assessment_is_finished(self.submission_uuid, requirements))
def test_get_training_example_none_available(self):
for example in EXAMPLES:
training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
......@@ -263,13 +260,6 @@ class StudentTrainingAssessmentTest(CacheResetTest):
is_finished = training_api.submitter_is_finished(submission_uuid, requirements)
self.assertEqual(is_finished, bool(num_completed >= num_required))
# Assessment is finished should always be true,
# since we're not being assessed by others.
self.assertTrue(training_api.assessment_is_finished(submission_uuid, requirements))
# At no point should we receive a score!
self.assertIs(training_api.get_score(submission_uuid, requirements), None)
def _expected_example(self, input_example, rubric):
"""
Return the training example we would expect to retrieve for an example.
......@@ -323,7 +313,7 @@ class StudentTrainingAssessmentTest(CacheResetTest):
"""
pre_submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
training_api.create_student_training_workflow(pre_submission['uuid'])
training_api.on_start(pre_submission['uuid'])
for example in examples:
training_api.get_training_example(pre_submission['uuid'], rubric, examples)
training_api.assess_training_example(pre_submission['uuid'], example['options_selected'])
......@@ -2,7 +2,6 @@
Public interface for the Assessment Workflow.
"""
import copy
import logging
from django.db import DatabaseError
......@@ -16,53 +15,14 @@ from openassessment.assessment.errors import (
from submissions import api as sub_api
from .models import AssessmentWorkflow, AssessmentWorkflowStep
from .serializers import AssessmentWorkflowSerializer
from .errors import (
AssessmentWorkflowInternalError, AssessmentWorkflowRequestError,
AssessmentWorkflowNotFoundError
)
logger = logging.getLogger(__name__)
class AssessmentWorkflowError(Exception):
"""An error that occurs during workflow actions.
This error is raised when the Workflow API cannot perform a requested
action.
"""
pass
class AssessmentWorkflowInternalError(AssessmentWorkflowError):
"""An error internal to the Workflow API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class AssessmentWorkflowRequestError(AssessmentWorkflowError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, field_errors):
Exception.__init__(self, repr(field_errors))
self.field_errors = copy.deepcopy(field_errors)
class AssessmentWorkflowNotFoundError(AssessmentWorkflowError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
def create_workflow(submission_uuid, steps, rubric=None, algorithm_id=None):
"""Begins a new assessment workflow.
......@@ -157,7 +117,7 @@ def create_workflow(submission_uuid, steps, rubric=None, algorithm_id=None):
if step == "peer":
status = AssessmentWorkflow.STATUS.peer
try:
peer_api.create_peer_workflow(submission_uuid)
peer_api.on_start(submission_uuid)
except PeerAssessmentError as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
......@@ -167,7 +127,7 @@ def create_workflow(submission_uuid, steps, rubric=None, algorithm_id=None):
elif step == "training":
status = AssessmentWorkflow.STATUS.training
try:
training_api.create_student_training_workflow(submission_uuid)
training_api.on_start(submission_uuid)
except StudentTrainingInternalError as err:
err_msg = u"Could not create assessment workflow: {}".format(err)
logger.exception(err_msg)
......
"""
Errors defined by the workflow API.
"""
import copy
class AssessmentWorkflowError(Exception):
"""An error that occurs during workflow actions.
This error is raised when the Workflow API cannot perform a requested
action.
"""
pass
class AssessmentWorkflowInternalError(AssessmentWorkflowError):
"""An error internal to the Workflow API has occurred.
This error is raised when an error occurs that is not caused by incorrect
use of the API, but rather internal implementation of the underlying
services.
"""
pass
class AssessmentWorkflowRequestError(AssessmentWorkflowError):
"""This error is raised when there was a request-specific error
This error is reserved for problems specific to the use of the API.
"""
def __init__(self, field_errors):
Exception.__init__(self, repr(field_errors))
self.field_errors = copy.deepcopy(field_errors)
class AssessmentWorkflowNotFoundError(AssessmentWorkflowError):
"""This error is raised when no submission is found for the request.
If a state is specified in a call to the API that results in no matching
Submissions, this error may be raised.
"""
pass
class AssessmentApiLoadError(AssessmentWorkflowInternalError):
"""
The assessment API could not be loaded.
"""
def __init__(self, assessment_name, api_path):
msg = u"Could not load assessment API for {} from {}".format(
assessment_name, api_path
)
super(AssessmentApiLoadError, self).__init__(msg)
"""
This is just a dummy event logger to test our ability to dyanmically change this
value based on configuration. All this should go away when we start using the
edx-analytics approved library (once that's ready to be used on prod).
"""
def fake_event_logger(event):
print event
from django.db import DatabaseError
from django.test.utils import override_settings
import ddt
from mock import patch
from nose.tools import raises
......@@ -6,7 +7,6 @@ from openassessment.assessment.models import PeerWorkflow
from openassessment.test_utils import CacheResetTest
from openassessment.workflow.models import AssessmentWorkflow
from submissions.models import Submission
import openassessment.workflow.api as workflow_api
from openassessment.assessment.api import ai as ai_api
......@@ -15,6 +15,9 @@ from openassessment.assessment.models import StudentTrainingWorkflow
import submissions.api as sub_api
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.workflow.models import AssessmentWorkflow
from openassessment.workflow.errors import AssessmentApiLoadError
RUBRIC_DICT = {
"criteria": [
......@@ -38,6 +41,7 @@ ITEM_1 = {
"item_type": "openassessment",
}
@ddt.ddt
class TestAssessmentWorkflowApi(CacheResetTest):
......@@ -319,8 +323,24 @@ class TestAssessmentWorkflowApi(CacheResetTest):
)
self.assertEqual(counts, updated_counts)
def _create_workflow_with_status(self, student_id, course_id, item_id,
status, answer="answer", steps=None):
@override_settings(ORA2_ASSESSMENTS={'self': 'not.a.module'})
def test_unable_to_load_api(self):
submission = sub_api.create_submission({
"student_id": "test student",
"course_id": "test course",
"item_id": "test item",
"item_type": "openassessment",
}, "test answer")
workflow_api.create_workflow(submission['uuid'], ['self'])
with self.assertRaises(AssessmentApiLoadError):
workflow_api.update_from_assessments(submission['uuid'], {})
def _create_workflow_with_status(
self, student_id, course_id, item_id,
status, answer="answer", steps=None
):
"""
Create a submission and workflow with a given status.
......@@ -338,7 +358,8 @@ class TestAssessmentWorkflowApi(CacheResetTest):
Returns:
workflow, submission
"""
if not steps: steps = ["peer", "self"]
if not steps:
steps = ["peer", "self"]
submission = sub_api.create_submission({
"student_id": student_id,
......
from django.test import TestCase
from mock import patch
from nose.tools import raises
from openassessment.workflow.models import emit_event
from openassessment.workflow.test.events import fake_event_logger
class TestEmitEvent(TestCase):
def test_emit_wired_correctly(self):
self.assertEqual(emit_event, fake_event_logger)
......@@ -26,6 +26,7 @@ from openassessment.xblock.xml import update_from_xml, serialize_content_to_xml
from openassessment.xblock.staff_info_mixin import StaffInfoMixin
from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.student_training_mixin import StudentTrainingMixin
from openassessment.xblock.validation import validator
from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DISTANT_FUTURE
......@@ -217,7 +218,7 @@ class OpenAssessmentBlock(
# case we may have a score available.
try:
self.update_workflow_status()
except workflow_api.AssessmentWorkflowError:
except AssessmentWorkflowError:
# Log the exception, but continue loading the page
logger.exception('An error occurred while updating the workflow on page load.')
......
......@@ -9,6 +9,8 @@ from openassessment.assessment.errors import (
PeerAssessmentRequestError, PeerAssessmentInternalError, PeerAssessmentWorkflowError
)
import openassessment.workflow.api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from .resolve_dates import DISTANT_FUTURE
logger = logging.getLogger(__name__)
......@@ -98,7 +100,7 @@ class PeerAssessmentMixin(object):
if assessment:
self.update_workflow_status(submission_uuid=assessment['submission_uuid'])
self.update_workflow_status()
except workflow_api.AssessmentWorkflowError:
except AssessmentWorkflowError:
logger.exception(
u"Workflow error occurred when submitting peer assessment "
u"for submission {}".format(self.submission_uuid)
......
......@@ -64,8 +64,9 @@ class SelfAssessmentMixin(object):
# and `workflow_status` will be None.
workflow = self.get_workflow_info()
workflow_status = workflow.get('status')
self_complete = workflow.get('status_details', {}).get('self', {}).get('complete', False)
if workflow_status == 'waiting' or workflow_status == 'done':
if self_complete:
path = 'openassessmentblock/self/oa_self_complete.html'
elif workflow_status == 'self' or problem_closed:
assessment = self_api.get_assessment(workflow.get("submission_uuid"))
......
......@@ -157,9 +157,7 @@ function OpenAssessmentBlock(runtime, element) {
/**
Render views within the base view on page load.
**/
$(function($) {
var server = new OpenAssessment.Server(runtime, element);
var view = new OpenAssessment.BaseView(runtime, element, server);
view.load();
});
var server = new OpenAssessment.Server(runtime, element);
var view = new OpenAssessment.BaseView(runtime, element, server);
view.load();
}
......@@ -132,9 +132,7 @@ function OpenAssessmentEditor(runtime, element) {
/**
Initialize the editing interface on page load.
**/
$(function($) {
var server = new OpenAssessment.Server(runtime, element);
var view = new OpenAssessment.StudioView(runtime, element, server);
view.load();
});
var server = new OpenAssessment.Server(runtime, element);
var view = new OpenAssessment.StudioView(runtime, element, server);
view.load();
}
......@@ -77,11 +77,11 @@
<select criterion="form" option="Old-timey letters"/>
</example>
</assessment>
<assessment name="self-assessment" />
<assessment name="peer-assessment"
start="2013-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="1"
must_be_graded_by="1" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
......@@ -7,6 +7,7 @@ from webob import Response
from xblock.core import XBlock
from openassessment.assessment.api import student_training
import openassessment.workflow.api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict
from .resolve_dates import DISTANT_FUTURE
......@@ -182,12 +183,10 @@ class StudentTrainingMixin(object):
else:
try:
self.update_workflow_status()
except workflow_api.AssessmentWorkflowError:
logger.exception(
u"Workflow error occurred when submitting peer assessment "
u"for submission {uuid}".format(uuid=self.submission_uuid)
)
return {'success': False, 'msg': _('Could not update workflow status.')}
except AssessmentWorkflowError:
msg = _('Could not update workflow status.')
logger.exception(msg)
return {'success': False, 'msg': msg}
return {
'success': True,
'msg': u'',
......
......@@ -6,6 +6,7 @@ from xblock.core import XBlock
from submissions import api
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from .resolve_dates import DISTANT_FUTURE
......@@ -73,7 +74,7 @@ class SubmissionMixin(object):
except api.SubmissionRequestError as err:
status_tag = 'EBADFORM'
status_text = unicode(err.field_errors)
except (api.SubmissionError, workflow_api.AssessmentWorkflowError):
except (api.SubmissionError, AssessmentWorkflowError):
logger.exception("This response was not submitted.")
status_tag = 'EUNKNOWN'
else:
......
......@@ -37,9 +37,27 @@
"is_released": false
},
"self_before_peer": {
"valid": false,
"valid": true,
"assessments": [
{
"name": "self-assessment"
},
{
"name": "peer-assessment",
"must_grade": 5,
"must_be_graded_by": 3
}
],
"current_assessments": null,
"is_released": false
},
"student_training_self_then_peer": {
"valid": true,
"assessments": [
{
"name": "student-training"
},
{
"name": "self-assessment"
},
{
......
......@@ -16,7 +16,7 @@
</criterion>
</rubric>
<assessments>
<assessment name="self-assessment" />
<assessment name="peer-assessment" />
<assessment name="peer-assessment" />
</assessments>
</openassessment>
<openassessment>
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="self-assessment" />
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
</assessments>
</openassessment>
......@@ -9,6 +9,7 @@ from mock import Mock, patch
from openassessment.xblock import openassessmentblock
from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from .base import XBlockHandlerTestCase, scenario
......@@ -89,7 +90,7 @@ class TestOpenAssessment(XBlockHandlerTestCase):
# Simulate an error from updating the workflow
xblock.submission_uuid = 'test_submission'
with patch('openassessment.xblock.workflow_mixin.workflow_api') as mock_api:
mock_api.update_from_assessments.side_effect = workflow_api.AssessmentWorkflowError
mock_api.update_from_assessments.side_effect = AssessmentWorkflowError
xblock_fragment = self.runtime.render(xblock, "student_view")
# Expect that the page renders even if the update fails
......
......@@ -168,15 +168,36 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase):
@scenario('data/self_assessment_open.xml', user_id='James Brown')
def test_open_in_waiting_for_peer_step(self, xblock):
# Simulate the workflow status being "waiting"
# Currently, this implies that we've completed the self assessment module,
# but this may change in the future.
# In the peer-->self configuration, if we're done with the
# self step, but not with the peer step (because we're waiting
# to be assessed), then the self step should display as completed.
xblock.create_submission(
xblock.get_student_item_dict(), u"𝓟𝓪𝓼𝓼 𝓽𝓱𝓮 𝓹𝓮𝓪𝓼"
)
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {},
workflow_status='waiting'
workflow_status='waiting',
status_details={
'self': {'complete': True},
'peer': {'complete': False}
}
)
@scenario('data/self_then_peer.xml', user_id="The Bee Gees")
def test_self_then_peer(self, xblock):
xblock.create_submission(
xblock.get_student_item_dict(), u"Stayin' alive!"
)
# In the self --> peer configuration, self can be complete
# if our status is "peer"
self._assert_path_and_context(
xblock, 'openassessmentblock/self/oa_self_complete.html', {},
workflow_status="peer",
status_details={
'self': {'complete': True},
'peer': {'complete': False}
}
)
@scenario('data/self_assessment_open.xml', user_id='James Brown')
......@@ -296,7 +317,8 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase):
def _assert_path_and_context(
self, xblock, expected_path, expected_context,
workflow_status=None, submission_uuid=None
workflow_status=None, status_details=None,
submission_uuid=None
):
"""
Render the self assessment step and verify:
......@@ -310,11 +332,19 @@ class TestSelfAssessmentRender(XBlockHandlerTestCase):
Kwargs:
workflow_status (str): If provided, simulate this status from the workflow API.
workflow_status (str): If provided, simulate these details from the workflow API.
submission_uuid (str): If provided, simulate this submision UUI for the current workflow.
"""
if workflow_status is not None:
# Assume a peer-->self flow by default
if status_details is None:
status_details = {
'peer': {'complete': workflow_status == 'done'},
'self': {'complete': workflow_status in ['waiting', 'done']}
}
xblock.get_workflow_info = mock.Mock(return_value={
'status': workflow_status,
'status_details': status_details,
'submission_uuid': submission_uuid
})
path, context = xblock.self_path_and_context()
......
......@@ -198,14 +198,14 @@ class TestCourseStaff(XBlockHandlerTestCase):
bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
peer_api.create_peer_workflow(submission["uuid"])
peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['peer'])
# Create a submission for Tim, and corresponding workflow.
tim_item = bob_item.copy()
tim_item["student_id"] = "Tim"
tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
peer_api.create_peer_workflow(tim_sub["uuid"])
peer_api.on_start(tim_sub["uuid"])
workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])
# Bob assesses Tim.
......@@ -238,7 +238,7 @@ class TestCourseStaff(XBlockHandlerTestCase):
bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
peer_api.create_peer_workflow(submission["uuid"])
peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['self'])
# Bob assesses himself.
......@@ -281,14 +281,14 @@ class TestCourseStaff(XBlockHandlerTestCase):
bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
peer_api.create_peer_workflow(submission["uuid"])
peer_api.on_start(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])
# Create a submission for Tim, and corresponding workflow.
tim_item = bob_item.copy()
tim_item["student_id"] = "Tim"
tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
peer_api.create_peer_workflow(tim_sub["uuid"])
peer_api.on_start(tim_sub["uuid"])
workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])
# Bob assesses Tim.
......
......@@ -10,6 +10,7 @@ import pytz
from django.db import DatabaseError
from openassessment.assessment.models import StudentTrainingWorkflow
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from .base import XBlockHandlerTestCase, scenario
@ddt.ddt
......@@ -56,7 +57,7 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
}
}
with patch.object(workflow_api, "update_from_assessments") as mock_workflow_update:
mock_workflow_update.side_effect = workflow_api.AssessmentWorkflowError("Oh no!")
mock_workflow_update.side_effect = AssessmentWorkflowError("Oh no!")
resp = self.request(xblock, 'training_assess', json.dumps(data), response_format='json')
# Expect that we were not correct due to a workflow update error.
......
......@@ -36,8 +36,7 @@ class AssessmentValidationTest(TestCase):
self.assertFalse(success)
self.assertGreater(len(msg), 0)
# Make sure only legal assessment combinations are allowed. For now, that's
# (peer -> self), and (self)
# Make sure only legal assessment combinations are allowed.
@ddt.file_data('data/assessment_combo.json')
def test_enforce_assessment_combo_restrictions(self, data):
self._assert_validation(
......
......@@ -62,14 +62,18 @@ def _is_valid_assessment_sequence(assessments):
['self-assessment'],
['peer-assessment'],
['peer-assessment', 'self-assessment'],
['self-assessment', 'peer-assessment'],
['student-training', 'peer-assessment'],
['student-training', 'peer-assessment', 'self-assessment'],
['student-training', 'self-assessment', 'peer-assessment'],
['example-based-assessment'],
['example-based-assessment', 'self-assessment'],
['example-based-assessment', 'peer-assessment'],
['example-based-assessment', 'peer-assessment', 'self-assessment'],
['example-based-assessment', 'self-assessment', 'peer-assessment'],
['example-based-assessment', 'student-training', 'peer-assessment'],
['example-based-assessment', 'student-training', 'peer-assessment', 'self-assessment'],
['example-based-assessment', 'student-training', 'self-assessment', 'peer-assessment'],
]
sequence = [asmnt.get('name') for asmnt in assessments]
......
......@@ -147,11 +147,22 @@ CACHES = {
},
}
EDX_ORA2 = {
BASE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# Configuration for the workflow API
# We use dependency injection to tell the workflow API
# which assessments to use and where to find the corresponding
# assessment API Python modules.
ORA2_ASSESSMENTS = {
'peer': 'openassessment.assessment.api.peer',
'self': 'openassessment.assessment.api.self',
'training': 'openassessment.assessment.api.student_training',
'ai': 'openassessment.assessment.api.ai',
}
BASE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# If peer-assessment provides a score, use that;
# otherwise fall back to self-assessment.
ORA2_ASSESSMENT_SCORE_PRIORITY = ['peer', 'self', 'ai']
# Celery configuration
# Note: Version 3.1 of Celery includes Django support, but since we're using
......
......@@ -36,14 +36,9 @@ TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# Install test-specific Django apps
INSTALLED_APPS += ('django_nose',)
# Store uploaded files in a test-specific directory
MEDIA_ROOT = os.path.join(BASE_DIR, 'storage/test')
EDX_ORA2["EVENT_LOGGER"] = "openassessment.workflow.test.events.fake_event_logger"
# We run Celery in "always eager" mode in the test suite,
# which executes tasks synchronously instead of using the task queue.
CELERY_ALWAYS_EAGER = True
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment