Commit 07e147ed by Stephen Sanchez

Merge branch 'master' into ai-grading

parents 7b363dc5 5c30cf83
......@@ -7,7 +7,7 @@ the workflow for a given submission.
import logging
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.db import DatabaseError
from django.db import DatabaseError, IntegrityError
from dogapi import dog_stats_api
from openassessment.assessment.models import (
......@@ -581,13 +581,14 @@ def create_peer_workflow(submission_uuid):
"""
try:
submission = sub_api.get_submission_and_student(submission_uuid)
workflow = PeerWorkflow.objects.get_or_create(
workflow, __ = PeerWorkflow.objects.get_or_create(
student_id=submission['student_item']['student_id'],
course_id=submission['student_item']['course_id'],
item_id=submission['student_item']['item_id'],
submission_uuid=submission_uuid
)
return workflow
except IntegrityError:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
except DatabaseError:
error_message = _(
u"An internal error occurred while creating a new peer "
......@@ -596,6 +597,8 @@ def create_peer_workflow(submission_uuid):
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
workflow.save()
return workflow
def create_peer_workflow_item(scorer_submission_uuid, submission_uuid):
......
......@@ -2,7 +2,7 @@
import datetime
import pytz
from django.db import DatabaseError
from django.db import DatabaseError, IntegrityError
from django.utils import timezone
from ddt import ddt, file_data
from mock import patch
......@@ -357,6 +357,13 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(len(pwis), 1)
self.assertNotEqual(pwis[0].started_at, yesterday)
def test_peer_workflow_integrity_error(self):
tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
with patch.object(PeerWorkflow.objects, "get_or_create") as mock_peer:
mock_peer.side_effect = IntegrityError("Oh no!")
workflow = peer_api.create_peer_workflow(tim_sub["uuid"])
self.assertEquals(tim_sub["uuid"], workflow.submission_uuid)
@raises(peer_api.PeerAssessmentWorkflowError)
def test_no_submission_found_closing_assessment(self):
"""
......
{% load i18n %}
{% load tz %}
{% spaceless %}
{% block list_item %}
<li id="openassessment__student-training" class="openassessment__steps__step step--student-training ui-toggle-visibility">
{% endblock %}
<header class="step__header ui-toggle-visibility__control">
<h2 class="step__title">
<span class="step__counter"></span>
<span class="wrapper--copy">
<span class="step__label">{% trans "Learn to Assess Responses" %}</span>
{% if training_start %}
<span class="step__deadline">{% trans "available" %}
<span class="date">
{{ training_start|utc|date:"N j, Y H:i e" }}
(in {{ training_start|timeuntil }})
</span>
</span>
{% elif training_due %}
<span class="step__deadline">due
<span class="date">
{{ training_due|utc|date:"N j, Y H:i e" }}
(in {{ training_due|timeuntil }})
</span>
</span>
{% endif %}
</span>
</h2>
{% block title %}
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<span class="copy">{% trans "In Progress" %}
(<span class="step__status__value--completed">{{ training_num_completed }}</span> of
<span class="step__status__value--required">{{ training_num_available }}</span>)
</span>
</span>
</span>
{% endblock %}
</header>
{% block body %}
<div class="ui-toggle-visibility__content">
<div class="wrapper--step__content">
<div id="openassessment__student-training--instructions" class="step__message message message--correct">
<h3 class="message__title">{% trans "Learning to Assess Responses" %}</h3>
<div class="message__content">
<p>{% blocktrans %}Before you begin to assess your peers' responses, you'll learn how to complete peer assessments by reviewing responses that instructors have already assessed. If you select the same options for the response that the instructor selected, you'll move to the next step. If you don't select the same options, you'll review the response and try again.{% endblocktrans %}</p>
</div>
</div>
<div id="openassessment__student-training--incorrect" class="step__message message message--incorrect is--hidden">
<h3 class="message__title">{% trans "Learning to Assess Responses" %}</h3>
<div class="message__content">
<p>{% blocktrans %}Your assessment differs from the instructor's assessment of this response. Review the response and consider why the instructor may have assessed it differently. Then, try the assessment again.{% endblocktrans %}</p>
</div>
</div>
<div class="step__content">
<article class="student-training__display" id="student-training">
<header class="student-training__display__header">
<h3 class="student-training__display__title">{% trans "Training Essay #" %}
<span class="student-training__number--current">{{ training_num_current }}</span> of
<span class="student-training__number--required">{{ training_num_available }}</span>
</h3>
</header>
<div class="student-training__display__response">
{{ training_essay|linebreaks }}
</div>
</article>
<form id="student-training--001__assessment" class="student-training__assessment" method="post">
<fieldset class="assessment__fields">
<ol class="list list--fields assessment__rubric">
{% for criterion in training_rubric.criteria %}
<li class="field field--radio is--required assessment__rubric__question ui-toggle-visibility" id="assessment__rubric__question--{{ criterion.order_num }}">
<h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="question__title__copy">{{ criterion.prompt }}</span>
<span class="label--required sr">* ({% trans "Required" %})</span>
</h4>
<div class="ui-toggle-visibility__content">
<div class="step__message message message--correct ui-toggle-visibility is--hidden">
<h3 class="message__title">{% trans "Selected Options Agree" %}</h3>
<div class="message__content">
<p>{% trans "The option you selected is the option that the instructor selected." %}</p>
</div>
</div>
<div class="step__message message message--incorrect ui-toggle-visibility is--hidden">
<h3 class="message__title">{% trans "Selected Options Differ" %}</h3>
<div class="message__content">
<p>{% trans "The option you selected is not the option that the instructor selected." %}</p>
</div>
</div>
<ol class="question__answers">
{% for option in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio"
name="{{ criterion.name }}"
id="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__value"
value="{{ option.name }}" />
<label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__label">{{ option.name }}</label>
</div>
<div class="wrapper--metadata">
<span class="answer__tip">{{ option.explanation }}</span>
<span class="answer__points">{{option.points}} <span class="answer__points__label">{% trans "points" %}</span></span>
</div>
</li>
{% endfor %}
</ol>
</div>
</li>
{% endfor %}
</ol>
</fieldset>
</form>
</div>
<div class="step__actions">
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">{% trans "We could not check your assessment" %}</h3>
<div class="message__content"></div>
</div>
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="student-training--001__assessment__submit" class="action action--submit is--disabled">
<span class="copy">{% trans "Compare your selections with the instructor's selections" %}</span>
<i class="ico icon-caret-right"></i>
</button>
</li>
</ul>
</div>
</div>
</div>
{% endblock %}
</li>
{% endspaceless %}
{% extends "openassessmentblock/student_training/student_training.html" %}
{% load i18n %}
{% block list_item %}
<li id="openassessment__student-training" class="openassessment__steps__step step--student-training is--incomplete ui-toggle-visibility">
{% endblock %}
{% block title %}
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<i class="ico icon-warning-sign"></i>
<span class="copy">{% trans "Incomplete" %}</span>
</span>
</span>
{% endblock %}
{% block body %}
<div class="ui-toggle-visibility__content">
<div class="wrapper--step__content">
<div class="step__message message message--incomplete">
<h3 class="message__title">{% trans "The Due Date for This Step Has Passed" %}</h3>
<div class="message__content">
<p>{% trans "This step is now closed.You can no longer continue with this assignment, and you'll receive a grade of Incomplete." %}</p>
</div>
</div>
</div>
</div>
{% endblock %}
{% extends "openassessmentblock/student_training/student_training.html" %}
{% load i18n %}
{% block list_item %}
<li id="openassessment__student-training" class="openassessment__steps__step step--student-training is--complete is--empty is--collapsed">
{% endblock %}
{% block title %}
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<i class="ico icon-ok"></i>
<span class="copy">{% trans "Complete" %}</span>
</span>
</span>
{% endblock %}
{% block body %}
{% endblock %}
{% extends "openassessmentblock/student_training/student_training.html" %}
{% load i18n %}
{% block list_item %}
<li id="openassessment__student-training" class="openassessment__steps__step step--student-training is--empty is--unavailable is--collapsed">
{% endblock %}
{% block title %}
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<span class="copy">{% trans "Not Available" %}</span>
</span>
</span>
{% endblock %}
{% block body %}
{% endblock %}
......@@ -138,6 +138,8 @@ def create_workflow(submission_uuid, steps):
raise AssessmentWorkflowInternalError(err_msg)
elif steps[0] == "self":
status = AssessmentWorkflow.STATUS.self
elif steps[0] == "training":
status = AssessmentWorkflow.STATUS.training
try:
workflow = AssessmentWorkflow.objects.create(
......@@ -297,7 +299,7 @@ def update_from_assessments(submission_uuid, assessment_requirements):
problem.
Examples:
>>> get_workflow_for_submission(
>>> update_from_assessments(
... '222bdf3d-a88e-11e3-859e-040ccee02800',
... {"peer": {"must_grade":5, "must_be_graded_by":3}}
... )
......@@ -321,8 +323,14 @@ def update_from_assessments(submission_uuid, assessment_requirements):
"""
workflow = _get_workflow_model(submission_uuid)
workflow.update_from_assessments(assessment_requirements)
return _serialized_with_details(workflow, assessment_requirements)
try:
workflow.update_from_assessments(assessment_requirements)
return _serialized_with_details(workflow, assessment_requirements)
except PeerAssessmentError as err:
err_msg = u"Could not update assessment workflow: {}".format(err)
logger.exception(err_msg)
raise AssessmentWorkflowInternalError(err_msg)
def get_status_counts(course_id, item_id, steps):
......@@ -408,6 +416,7 @@ def _get_workflow_model(submission_uuid):
return workflow
def _serialized_with_details(workflow, assessment_requirements):
"""Given a workflow and assessment requirements, return the serialized
version of an `AssessmentWorkflow` and add in the status details. See
......
......@@ -50,6 +50,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
STEPS = [
"peer", # User needs to assess peer submissions
"self", # User needs to assess themselves
"training", # User needs to practice grading using example essays
]
STATUSES = [
......@@ -150,6 +151,11 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
self.STATUS.waiting # if nothing's left to complete, we're waiting
)
# If the submitter is beginning peer assessment, add them to the queue
# by creating a new peer workflow
if new_status == "peer":
peer_api.create_peer_workflow(self.submission_uuid)
# If the submitter has done all they need to do, let's check to see if
# all steps have been fully assessed (i.e. we can score it).
if (new_status == self.STATUS.waiting and
......@@ -262,11 +268,14 @@ class AssessmentWorkflowStep(models.Model):
"""
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import student_training
api = None
if self.name == AssessmentWorkflow.STATUS.self:
api = self_api
elif self.name == AssessmentWorkflow.STATUS.peer:
api = peer_api
elif self.name == AssessmentWorkflow.STATUS.training:
api = student_training
return api
def update(self, submission_uuid, assessment_requirements):
......
......@@ -8,7 +8,7 @@
}
}
},
"both": {
"peer_and_self": {
"steps": ["peer", "self"],
"requirements": {
"peer": {
......@@ -23,5 +23,40 @@
"requirements": {
"self": {}
}
},
"training_peer": {
"steps": ["training", "peer"],
"requirements": {
"training": {
"num_required": 2
},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
}
}
},
"training_self": {
"steps": ["training", "self"],
"requirements": {
"training": {
"num_required": 2
},
"self": {}
}
},
"training_peer_self": {
"steps": ["training", "peer", "self"],
"requirements": {
"training": {
"num_required": 2
},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
},
"self": {}
}
}
}
\ No newline at end of file
......@@ -9,6 +9,7 @@ from openassessment.test_utils import CacheResetTest
from openassessment.workflow.models import AssessmentWorkflow
from submissions.models import Submission
import openassessment.workflow.api as workflow_api
from openassessment.assessment.models import StudentTrainingWorkflow
import submissions.api as sub_api
ITEM_1 = {
......@@ -43,6 +44,56 @@ class TestAssessmentWorkflowApi(CacheResetTest):
del workflow_from_get['status_details']
self.assertEqual(workflow, workflow_from_get)
# Test that the Peer Workflow is, or is not created, based on when peer
# is a step in the workflow.
if "peer" == first_step:
peer_workflow = PeerWorkflow.objects.get(submission_uuid=submission["uuid"])
self.assertIsNotNone(peer_workflow)
else:
peer_workflows = list(PeerWorkflow.objects.filter(submission_uuid=submission["uuid"]))
self.assertFalse(peer_workflows)
def test_update_peer_workflow(self):
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
workflow = workflow_api.create_workflow(submission["uuid"], ["training", "peer"])
StudentTrainingWorkflow.get_or_create_workflow(submission_uuid=submission["uuid"])
requirements = {
"training": {
"num_required": 2
},
"peer": {
"must_grade": 5,
"must_be_graded_by": 3
}
}
workflow_keys = set(workflow.keys())
self.assertEqual(
workflow_keys,
{
'submission_uuid', 'uuid', 'status', 'created', 'modified', 'score'
}
)
self.assertEqual(workflow["submission_uuid"], submission["uuid"])
self.assertEqual(workflow["status"], "training")
peer_workflows = list(PeerWorkflow.objects.filter(submission_uuid=submission["uuid"]))
self.assertFalse(peer_workflows)
workflow_from_get = workflow_api.get_workflow_for_submission(
submission["uuid"], requirements
)
del workflow_from_get['status_details']
self.assertEqual(workflow, workflow_from_get)
requirements["training"]["num_required"] = 0
workflow = workflow_api.update_from_assessments(submission["uuid"], requirements)
# New step is Peer, and a Workflow has been created.
self.assertEqual(workflow["status"], "peer")
peer_workflow = PeerWorkflow.objects.get(submission_uuid=submission["uuid"])
self.assertIsNotNone(peer_workflow)
@ddt.file_data('data/assessments.json')
def test_need_valid_submission_uuid(self, data):
# submission doesn't exist
......
"""
Data Conversion utility methods for handling ORA2 XBlock data transformations.
"""
def convert_training_examples_list_to_dict(examples_list):
"""
Convert of options selected we store in the problem def,
which is ordered, to the unordered dictionary of options
selected that the student training API expects.
Args:
examples_list (list): A list of options selected against a rubric.
Returns:
A dictionary of the given examples in the list.
Example:
>>> examples = [
>>> {
>>> "answer": "This is my response",
>>> "options_selected": [
>>> {
>>> "criterion": "Ideas",
>>> "option": "Fair"
>>> },
>>> {
>>> "criterion": "Content",
>>> "option": "Good"
>>> }
>>> ]
>>> }
>>> ]
>>> convert_training_examples_list_to_dict(examples)
[
{
'answer': 'This is my response',
'options_selected': {
'Ideas': 'Fair',
'Content': 'Good'
}
}
]
"""
return [
{
'answer': ex['answer'],
'options_selected': {
select_dict['criterion']: select_dict['option']
for select_dict in ex['options_selected']
}
}
for ex in examples_list
]
\ No newline at end of file
......@@ -64,6 +64,40 @@ DEFAULT_RUBRIC_FEEDBACK_PROMPT = """
(Optional) What aspects of this response stood out to you? What did it do well? How could it improve?
"""
DEFAULT_EXAMPLE_ANSWER = "Replace this text with a sample response for this assignment. You'll assess this sample response in the courseware, and students will then learn to assess responses by assessing this response and comparing the options that they select in the rubric with the options that you selected."
DEFAULT_STUDENT_TRAINING = {
"name": "student-training",
"start": None,
"due": None,
"examples": [
{
"answer": DEFAULT_EXAMPLE_ANSWER,
"options_selected": [
{
"criterion": "Ideas",
"option": "Fair"
},
{
"criterion": "Content",
"option": "Good"
}
]
},
{
"answer": DEFAULT_EXAMPLE_ANSWER,
"options_selected": [
{
"criterion": "Ideas",
"option": "Poor"
},
{
"criterion": "Content",
"option": "Good"
}
]
}
]
}
# The Default Peer Assessment is created as an example of how this XBlock can be
# configured. If no configuration is specified, this is the default assessment
......@@ -82,6 +116,7 @@ DEFAULT_SELF_ASSESSMENT = {
}
DEFAULT_ASSESSMENT_MODULES = [
DEFAULT_STUDENT_TRAINING,
DEFAULT_PEER_ASSESSMENT,
DEFAULT_SELF_ASSESSMENT,
]
......
......@@ -25,6 +25,7 @@ from openassessment.xblock.xml import update_from_xml, serialize_content_to_xml
from openassessment.xblock.staff_info_mixin import StaffInfoMixin
from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.workflow import api as workflow_api
from openassessment.xblock.student_training_mixin import StudentTrainingMixin
from openassessment.xblock.validation import validator
from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DISTANT_FUTURE
......@@ -36,9 +37,15 @@ UI_MODELS = {
"submission": {
"name": "submission",
"class_id": "openassessment__response",
"navigation_text": "Your response to this problem",
"navigation_text": "Your response to this assignment",
"title": "Your Response"
},
"student-training": {
"name": "student-training",
"class_id": "openassessment__student-training",
"navigation_text": "Learn to assess responses",
"title": "Learn to Assess"
},
"peer-assessment": {
"name": "peer-assessment",
"class_id": "openassessment__peer-assessment",
......@@ -54,12 +61,13 @@ UI_MODELS = {
"grade": {
"name": "grade",
"class_id": "openassessment__grade",
"navigation_text": "Your grade for this problem",
"navigation_text": "Your grade for this assignment",
"title": "Your Grade:"
}
}
VALID_ASSESSMENT_TYPES = [
"student-training",
"peer-assessment",
"self-assessment",
]
......@@ -80,8 +88,10 @@ class OpenAssessmentBlock(
GradeMixin,
StaffInfoMixin,
WorkflowMixin,
LmsCompatibilityMixin):
"""Displays a question and gives an area where students can compose a response."""
StudentTrainingMixin,
LmsCompatibilityMixin
):
"""Displays a prompt and provides an area where students can compose a response."""
submission_start = String(
default=None, scope=Scope.settings,
......
......@@ -45,6 +45,85 @@
"output": "oa_response.html"
},
{
"template": "openassessmentblock/student_training/student_training.html",
"context": {
"training_essay": "My special essay.",
"training_rubric": {
"criteria": [
{
"name": "Criterion 1",
"prompt": "Prompt 1",
"order_num": 0,
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor"
},
{
"order_num": 1,
"points": 1,
"name": "Fair"
},
{
"order_num": 2,
"points": 2,
"name": "Good"
}
]
},
{
"name": "Criterion 2",
"prompt": "Prompt 2",
"order_num": 1,
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor"
},
{
"order_num": 1,
"points": 1,
"name": "Fair"
},
{
"order_num": 2,
"points": 2,
"name": "Good"
}
]
},
{
"name": "Criterion 3",
"prompt": "Prompt 3",
"order_num": 2,
"feedback": "optional",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor"
},
{
"order_num": 1,
"points": 1,
"name": "Fair"
},
{
"order_num": 2,
"points": 2,
"name": "Good"
}
]
}
]
}
},
"output": "oa_student_training.html"
},
{
"template": "openassessmentblock/self/oa_self_assessment.html",
"context": {
"rubric_criteria": [
......
......@@ -10,6 +10,7 @@ describe("OpenAssessment.BaseView", function() {
// Dummy fragments to return from the render func
this.fragments = {
submission: readFixtures("oa_response.html"),
student_training: readFixtures("oa_student_training.html"),
self_assessment: readFixtures("oa_self_assessment.html"),
peer_assessment: readFixtures("oa_peer_assessment.html"),
grade: readFixtures("oa_grade_complete.html")
......@@ -69,10 +70,10 @@ describe("OpenAssessment.BaseView", function() {
it("Loads each step", function() {
loadSubviews(function() {
expect(server.fragmentsLoaded).toContain("submission");
expect(server.fragmentsLoaded).toContain("student_training");
expect(server.fragmentsLoaded).toContain("self_assessment");
expect(server.fragmentsLoaded).toContain("peer_assessment");
expect(server.fragmentsLoaded).toContain("grade");
});
});
});
......@@ -13,7 +13,7 @@ describe("OpenAssessment.PeerView", function() {
).promise();
this.peerAssess = function(optionsSelected, feedback) {
return $.Deferred(function(defer) { defer.resolve(); }).promise();
return successPromise;
};
this.render = function(step) {
......
......@@ -85,7 +85,7 @@ describe("OpenAssessment.Server", function() {
});
});
it("sends an assessment to the XBlock", function() {
it("sends a peer-assessment to the XBlock", function() {
stubAjax(true, {success: true, msg: ''});
var success = false;
......@@ -107,6 +107,29 @@ describe("OpenAssessment.Server", function() {
});
});
it("sends a training assessment to the XBlock", function() {
stubAjax(true, {success: true, msg: '', correct: true});
var success = false;
var corrections = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.trainingAssess(options).done(
function(result) {
success = true;
corrections = result;
}
);
expect(success).toBe(true);
expect(corrections).toBeUndefined();
expect($.ajax).toHaveBeenCalledWith({
url: '/training_assess',
type: "POST",
data: JSON.stringify({
options_selected: options
})
});
});
it("Sends feedback on an assessment to the XBlock", function() {
stubAjax(true, {success: true, msg: ''});
......@@ -297,6 +320,28 @@ describe("OpenAssessment.Server", function() {
expect(receivedMsg).toContain("This assessment could not be submitted");
});
it("informs the caller of a server error when sending a training example assessment", function() {
stubAjax(true, {success: false, msg: "Test error!"});
var receivedMsg = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.trainingAssess(options).fail(function(msg) {
receivedMsg = msg;
});
expect(receivedMsg).toEqual("Test error!");
});
it("informs the caller of an AJAX error when sending a training example assessment", function() {
stubAjax(false, null);
var receivedMsg = null;
var options = {clarity: "Very clear", precision: "Somewhat precise"};
server.trainingAssess(options).fail(function(msg) {
receivedMsg = msg;
});
expect(receivedMsg).toContain("This assessment could not be submitted");
});
it("informs the caller of an AJAX error when checking whether the XBlock has been released", function() {
stubAjax(false, null);
......
/**
Tests for OpenAssessment Student Training view.
**/
describe("OpenAssessment.StudentTrainingView", function() {
// Stub server
var StubServer = function() {
var successPromise = $.Deferred(
function(defer) { defer.resolve(); }
).promise();
this.render = function(step) {
return successPromise;
};
this.trainingAssess = function() {
var server = this;
return $.Deferred(function(defer) {
defer.resolveWith(server, [server.corrections]);
}).promise();
};
// The corrections returned by the stub server.
// Tests can update this property to control
// the behavior of the stub.
this.corrections = {};
};
// Stub base view
var StubBaseView = function() {
this.showLoadError = function(msg) {};
this.toggleActionError = function(msg, step) {};
this.setUpCollapseExpand = function(sel) {};
this.scrollToTop = function() {};
this.loadAssessmentModules = function() {};
};
// Stubs
var baseView = null;
var server = null;
// View under test
var view = null;
beforeEach(function() {
// Load the DOM fixture
jasmine.getFixtures().fixturesPath = 'base/fixtures';
loadFixtures('oa_student_training.html');
// Create a new stub server
server = new StubServer();
// Create the stub base view
baseView = new StubBaseView();
// Create the object under test
var el = $("#openassessment-base").get(0);
view = new OpenAssessment.StudentTrainingView(el, server, baseView);
view.installHandlers();
});
it("submits an assessment for a training example", function() {
server.corrections = {
"Criterion 1": "Good",
"Criterion 2": "Poor",
"Criterion 3": "Fair"
};
spyOn(server, 'trainingAssess').andCallThrough();
// Select rubric options
var optionsSelected = {};
optionsSelected['Criterion 1'] = 'Poor';
optionsSelected['Criterion 2'] = 'Fair';
optionsSelected['Criterion 3'] = 'Good';
view.rubric.optionsSelected(optionsSelected);
// Submit the assessment
view.assess();
// Expect that the assessment was sent to the server
expect(server.trainingAssess).toHaveBeenCalledWith(optionsSelected);
});
it("disable the assess button when the user submits", function() {
server.corrections = {
"Criterion 1": "Good",
"Criterion 2": "Poor",
"Criterion 3": "Fair"
};
// Initially, the button should be disabled
expect(view.assessButtonEnabled()).toBe(false);
// Select options and submit an assessment
var optionsSelected = {};
optionsSelected['Criterion 1'] = 'Poor';
optionsSelected['Criterion 2'] = 'Fair';
optionsSelected['Criterion 3'] = 'Good';
view.rubric.optionsSelected(optionsSelected);
// Enable the button (we do this manually to avoid dealing with async change handlers)
view.assessButtonEnabled(true);
// Submit the assessment
view.assess();
// The button should be disabled after submission
expect(view.assessButtonEnabled()).toBe(false);
});
it("reloads the assessment steps when the user submits an assessment", function() {
// Simulate that the user answered the problem correctly, so there are no corrections
server.corrections = {};
spyOn(server, 'trainingAssess').andCallThrough();
spyOn(baseView, 'loadAssessmentModules').andCallThrough();
// Select rubric options
var optionsSelected = {};
optionsSelected['Criterion 1'] = 'Poor';
optionsSelected['Criterion 2'] = 'Fair';
optionsSelected['Criterion 3'] = 'Good';
view.rubric.optionsSelected(optionsSelected);
// Submit the assessment
view.assess();
// Expect that the assessment was sent to the server
expect(server.trainingAssess).toHaveBeenCalledWith(optionsSelected);
// Expect that the steps were reloaded
expect(baseView.loadAssessmentModules).toHaveBeenCalled();
});
});
......@@ -15,6 +15,7 @@ OpenAssessment.BaseView = function(runtime, element, server) {
this.server = server;
this.responseView = new OpenAssessment.ResponseView(this.element, this.server, this);
this.trainingView = new OpenAssessment.StudentTrainingView(this.element, this.server, this);
this.selfView = new OpenAssessment.SelfView(this.element, this.server, this);
this.peerView = new OpenAssessment.PeerView(this.element, this.server, this);
this.gradeView = new OpenAssessment.GradeView(this.element, this.server, this);
......@@ -75,6 +76,7 @@ OpenAssessment.BaseView.prototype = {
performed by the user.
**/
loadAssessmentModules: function() {
this.trainingView.load();
this.peerView.load();
this.selfView.load();
this.gradeView.load();
......@@ -94,7 +96,7 @@ OpenAssessment.BaseView.prototype = {
if (type == 'save') {
container = '.response__submission__actions';
}
else if (type == 'submit' || type == 'peer' || type == 'self') {
else if (type == 'submit' || type == 'peer' || type == 'self' || type == 'student-training') {
container = '.step__actions';
}
else if (type == 'feedback_assess') {
......
......@@ -31,7 +31,7 @@ OpenAssessment.PeerView.prototype = {
view.installHandlers(false);
}
).fail(function(errMsg) {
view.showLoadError('peer-assessment');
view.baseView.showLoadError('peer-assessment');
});
},
......@@ -50,7 +50,7 @@ OpenAssessment.PeerView.prototype = {
view.installHandlers(true);
}
).fail(function(errMsg) {
view.showLoadError('peer-assessment');
view.baseView.showLoadError('peer-assessment');
});
},
......
......@@ -106,5 +106,35 @@ OpenAssessment.Rubric.prototype = {
callback(canSubmit);
}
);
},
/**
Updates the rubric to display positive and negative messages on each
criterion. For each correction provided, the associated criterion will have
an appropriate message displayed.
Args:
Corrections (list): A list of corrections to the rubric criteria that
did not match the expected selected options.
Returns:
True if there were errors found, False if there are no corrections.
**/
showCorrections: function(corrections) {
var selector = "input[type=radio]";
var hasErrors = false;
// Display appropriate messages for each selection
$(selector, this.element).each(function(index, sel) {
var listItem = $(sel).parents(".assessment__rubric__question");
if (corrections.hasOwnProperty(sel.name)) {
hasErrors = true;
listItem.find('.message--incorrect').removeClass('is--hidden');
listItem.find('.message--correct').addClass('is--hidden');
} else {
listItem.find('.message--correct').removeClass('is--hidden');
listItem.find('.message--incorrect').addClass('is--hidden');
}
});
return hasErrors;
}
};
......@@ -296,6 +296,47 @@ OpenAssessment.Server.prototype = {
},
/**
Assess an instructor-provided training example.
Args:
optionsSelected (object literal): Keys are criteria names,
values are the option text the user selected for the criterion.
Returns:
A JQuery promise, which resolves with a list of corrections if
successful and fails with an error message otherwise.
Example:
var options = { clarity: "Very clear", precision: "Somewhat precise" };
server.trainingAssess(options).done(
function(corrections) { console.log("Success!"); }
alert(corrections);
).fail(
function(errorMsg) { console.log(errorMsg); }
);
**/
trainingAssess: function(optionsSelected) {
var url = this.url('training_assess');
var payload = JSON.stringify({
options_selected: optionsSelected
});
return $.Deferred(function(defer) {
$.ajax({ type: "POST", url: url, data: payload }).done(
function(data) {
if (data.success) {
defer.resolveWith(this, [data.corrections]);
}
else {
defer.rejectWith(this, [data.msg]);
}
}
).fail(function(data) {
defer.rejectWith(this, [gettext('This assessment could not be submitted.')]);
});
});
},
/**
Load the XBlock's XML definition from the server.
Returns:
......
/**
Interface for student training view.
Args:
element (DOM element): The DOM element representing the XBlock.
server (OpenAssessment.Server): The interface to the XBlock server.
baseView (OpenAssessment.BaseView): Container view.
Returns:
OpenAssessment.StudentTrainingView
**/
OpenAssessment.StudentTrainingView = function(element, server, baseView) {
this.element = element;
this.server = server;
this.baseView = baseView;
this.rubric = null;
};
OpenAssessment.StudentTrainingView.prototype = {
/**
Load the student training view.
**/
load: function() {
var view = this;
this.server.render('student_training').done(
function(html) {
// Load the HTML and install event handlers
$('#openassessment__student-training', view.element).replaceWith(html);
view.installHandlers();
}
).fail(function(errMsg) {
view.baseView.showLoadError('student-training');
});
},
/**
Install event handlers for the view.
**/
installHandlers: function() {
var sel = $("#openassessment__student-training", this.element);
var view = this;
// Install a click handler for collapse/expand
this.baseView.setUpCollapseExpand(sel);
// Initialize the rubric
var rubricSelector = $("#student-training--001__assessment", this.element);
if (rubricSelector.size() > 0) {
var rubricElement = rubricSelector.get(0);
this.rubric = new OpenAssessment.Rubric(rubricElement);
}
// Install a change handler for rubric options to enable/disable the submit button
if (this.rubric !== null) {
this.rubric.canSubmitCallback($.proxy(this.assessButtonEnabled, this));
}
// Install a click handler for submitting the assessment
sel.find('#student-training--001__assessment__submit').click(
function(eventObject) {
// Override default form submission
eventObject.preventDefault();
// Handle the click
view.assess();
}
);
},
/**
Submit an assessment for the training example.
**/
assess: function() {
// Immediately disable the button to prevent resubmission
this.assessButtonEnabled(false);
var options = {};
if (this.rubric !== null) {
options = this.rubric.optionsSelected();
}
var view = this;
var baseView = this.baseView;
this.server.trainingAssess(options).done(
function(corrections) {
var incorrect = $("#openassessment__student-training--incorrect", this.element);
var instructions = $("#openassessment__student-training--instructions", this.element);
if (!view.rubric.showCorrections(corrections)) {
baseView.loadAssessmentModules();
incorrect.addClass("is--hidden");
instructions.removeClass("is--hidden");
} else {
instructions.addClass("is--hidden");
incorrect.removeClass("is--hidden");
}
baseView.scrollToTop();
}
).fail(function(errMsg) {
// Display the error
baseView.toggleActionError('student-training', errMsg);
// Re-enable the button to allow the user to resubmit
view.assessButtonEnabled(true);
});
},
/**
Enable/disable the submit training assessment button.
Check that whether the assessment button is enabled.
Args:
enabled (bool): If specified, set the state of the button.
Returns:
bool: Whether the button is enabled.
Examples:
>> view.assessButtonEnabled(true); // enable the button
>> view.assessButtonEnabled(); // check whether the button is enabled
>> true
**/
assessButtonEnabled: function(isEnabled) {
var button = $('#student-training--001__assessment__submit', this.element);
if (typeof isEnabled === 'undefined') {
return !button.hasClass('is--disabled');
} else {
button.toggleClass('is--disabled', !isEnabled);
}
}
};
......@@ -44,7 +44,6 @@
@include transition(opacity $tmg-f1 ease-in 0, max-height $tmg-f2 ease-in 0);
@extend %trans-opacity;
max-height: ($baseline-v*2000);
overflow: auto;
opacity: 1.0;
padding-left: ($baseline-h/20);
padding-right: ($baseline-h/20);
......
......@@ -73,4 +73,98 @@
.student__answer__display__content p {
color: inherit;
}
}
\ No newline at end of file
}
// --------------------
// Developer Styles for Student Training
// --------------------
.step--student-training {
// submission
.student-training__display {
@extend %ui-subsection;
}
.student-training__display__header {
@include clearfix();
}
.student-training__display__title {
@extend %t-heading;
margin-bottom: ($baseline-v/2);
color: $heading-secondary-color;
}
.student-training__display__response {
@extend %ui-subsection-content;
@extend %copy-3;
@extend %ui-content-longanswer;
@extend %ui-well;
color: $copy-color;
}
// assessment form
.student-training__assessment {
// fields
.assessment__fields {
margin-bottom: $baseline-v;
}
// rubric question
.assessment__rubric__question {
@extend %ui-rubric-question;
}
// rubric options
.question__answers {
@extend %ui-rubric-answers;
overflow: visible; // needed for ui-hints
}
// genereal feedback question
.assessment__rubric__question--feedback {
textarea {
@extend %ui-content-longanswer;
min-height: ($baseline-v*5);
}
}
}
// TYPE: correct
.message--correct {
@extend .message--complete;
margin-top: $baseline-v;
&.is--hidden {
height: 0;
width: 0;
padding: 0;
margin: 0;
.step__header {
border-bottom: none;
}
}
}
// TYPE: incorrect
.message--incorrect {
@extend .message--incomplete;
margin-top: $baseline-v;
&.is--hidden {
height: 0;
width: 0;
padding: 0;
margin: 0;
.step__header {
border-bottom: none;
}
}
}
}
......@@ -63,8 +63,22 @@
</feedbackprompt>
</rubric>
<assessments>
<assessment name="student-training">
<example>
<answer>Censorship is terrible. I don't like it at all.</answer>
<select criterion="concise" option="Matsuo Basho" />
<select criterion="clear-headed" option="Eric" />
<select criterion="form" option="IRC"/>
</example>
<example>
<answer>I love censorship.</answer>
<select criterion="concise" option="Matsuo Basho" />
<select criterion="clear-headed" option="Ian" />
<select criterion="form" option="Old-timey letters"/>
</example>
</assessment>
<assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
start="2013-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="1"
must_be_graded_by="1" />
......
"""
Student training step in the OpenAssessment XBlock.
"""
import logging
from django.utils.translation import ugettext as _
from webob import Response
from xblock.core import XBlock
from openassessment.assessment.api import student_training
from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict
from .resolve_dates import DISTANT_FUTURE
logger = logging.getLogger(__name__)
class StudentTrainingMixin(object):
"""
Student training is a step that allows students to practice
assessing example essays provided by the course author.
1) A student is shown an example essay.
2) The student scores the example essay.
a) If the student's scores match the instructor's scores,
the student is shown the next example. If there are no
more examples, the step is marked complete.
b) If the student's scores do NOT match the instructor's scores,
the student is prompted to retry.
"""
@XBlock.handler
def render_student_training(self, data, suffix=''): # pylint:disable=W0613
"""
Render the student training step.
Args:
data: Not used.
Kwargs:
suffix: Not used.
Returns:
unicode: HTML content of the grade step
"""
if "student-training" not in self.assessment_steps:
return Response(u"")
try:
path, context = self.training_path_and_context()
except: # pylint:disable=W0702
msg = u"Could not render student training step for submission {}".format(self.submission_uuid)
logger.exception(msg)
return self.render_error(_(u"An unexpected error occurred."))
else:
return self.render_assessment(path, context)
def training_path_and_context(self):
"""
Return the template path and context used to render the student training step.
Returns:
tuple of `(path, context)` where `path` is the path to the template and
`context` is a dict.
"""
# Retrieve the status of the workflow.
# If no submissions have been created yet, the status will be None.
workflow_status = self.get_workflow_info().get('status')
problem_closed, reason, start_date, due_date = self.is_closed(step="student-training")
context = {}
template = 'openassessmentblock/student_training/student_training_unavailable.html'
if not workflow_status:
return template, context
# If the student has completed the training step, then show that the step is complete.
# We put this condition first so that if a student has completed the step, it *always*
# shows as complete.
# We're assuming here that the training step always precedes the other assessment steps
# (peer/self) -- we may need to make this more flexible later.
if workflow_status and workflow_status != "training":
template = 'openassessmentblock/student_training/student_training_complete.html'
# If the problem is closed, then do not allow students to access the training step
elif problem_closed and reason == 'start':
context['training_start'] = start_date
template = 'openassessmentblock/student_training/student_training_unavailable.html'
elif problem_closed and reason == 'due':
context['training_due'] = due_date
template = 'openassessmentblock/student_training/student_training_closed.html'
# If we're on the training step, show the student an example
# We do this last so we can avoid querying the student training API if possible.
else:
training_module = self.get_assessment_module('student-training')
if not training_module:
return template, context
if due_date < DISTANT_FUTURE:
context['training_due'] = due_date
# Report progress in the student training workflow (completed X out of Y)
context['training_num_available'] = len(training_module["examples"])
context['training_num_completed'] = student_training.get_num_completed(self.submission_uuid)
context['training_num_current'] = context['training_num_completed'] + 1
# Retrieve the example essay for the student to submit
# This will contain the essay text, the rubric, and the options the instructor selected.
examples = convert_training_examples_list_to_dict(training_module["examples"])
example = student_training.get_training_example(
self.submission_uuid,
{
'prompt': self.prompt,
'criteria': self.rubric_criteria
},
examples
)
context['training_essay'] = example['answer']
context['training_rubric'] = example['rubric']
template = 'openassessmentblock/student_training/student_training.html'
return template, context
@XBlock.json_handler
def training_assess(self, data, suffix=''): # pylint:disable=W0613
"""
Compare the scores given by the student with those given by the course author.
If they match, update the training workflow. The client can then reload this
step to view the next essay or the completed step.
Currently, we return a boolean indicating whether the student assessed correctly
or not. However, the student training API provides the exact criteria that the student
scored incorrectly, as well as the "correct" options for those criteria.
In the future, we may expose this in the UI to provide more detailed feedback.
Args:
data (dict): Must have the following keys:
options_selected (dict): Dictionary mapping criterion names to option values.
Returns:
Dict with keys:
* "success" (bool) indicating success or error
* "msg" (unicode) containing additional information if an error occurs.
* "correct" (bool) indicating whether the student scored the assessment correctly.
"""
if 'options_selected' not in data:
return {'success': False, 'msg': _(u"Missing options_selected key in request")}
if not isinstance(data['options_selected'], dict):
return {'success': False, 'msg': _(u"options_selected must be a dictionary")}
# Check the student's scores against the course author's scores.
# This implicitly updates the student training workflow (which example essay is shown)
# as well as the assessment workflow (training/peer/self steps).
try:
corrections = student_training.assess_training_example(
self.submission_uuid, data['options_selected']
)
except (student_training.StudentTrainingRequestError, student_training.StudentTrainingInternalError) as ex:
return {
'success': False,
'msg': _(u"Your scores could not be checked: {error}.").format(error=ex.message)
}
except:
return {
'success': False,
'msg': _(u"An unexpected error occurred.")
}
else:
return {
'success': True,
'msg': u'',
'corrections': corrections,
}
<openassessment>
<title>Student training test</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>Vocabulary</name>
<prompt>How varied is the vocabulary?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job</explanation>
</option>
<option points="1">
<name>Good</name>
<explanation>Good job</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>Excellent job</explanation>
</option>
</criterion>
<criterion>
<name>Grammar</name>
<prompt>How correct is the grammar?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job</explanation>
</option>
<option points="1">
<name>Good</name>
<explanation>Good job</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>Excellent job</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="student-training">
<example>
<answer>This is my answer.</answer>
<select criterion="Vocabulary" option="Good" />
<select criterion="Grammar" option="Excellent" />
</example>
<example>
<answer>тєѕт αηѕωєя</answer>
<select criterion="Vocabulary" option="Excellent" />
<select criterion="Grammar" option="Poor" />
</example>
</assessment>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
</assessments>
</openassessment>
<openassessment>
<title>Student training test</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>Vocabulary</name>
<prompt>How varied is the vocabulary?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job</explanation>
</option>
<option points="1">
<name>Good</name>
<explanation>Good job</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>Excellent job</explanation>
</option>
</criterion>
<criterion>
<name>Grammar</name>
<prompt>How correct is the grammar?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job</explanation>
</option>
<option points="1">
<name>Good</name>
<explanation>Good job</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>Excellent job</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="student-training" due="2000-01-01">
<example>
<answer>
𝑨 𝒔𝒖𝒃𝒔𝒕𝒂𝒏𝒄𝒆--𝒕𝒉𝒂𝒕 𝒘𝒉𝒊𝒄𝒉 𝒊𝒔 𝒄𝒂𝒍𝒍𝒆𝒅 𝒂 𝒔𝒖𝒃𝒔𝒕𝒂𝒏𝒄𝒆 𝒎𝒐𝒔𝒕 𝒔𝒕𝒓𝒊𝒄𝒕𝒍𝒚, 𝒑𝒓𝒊𝒎𝒂𝒓𝒊𝒍𝒚,
𝒂𝒏𝒅 𝒎𝒐𝒔𝒕 𝒐𝒇 𝒂𝒍𝒍--𝒊𝒔 𝒕𝒉𝒂𝒕 𝒘𝒉𝒊𝒄𝒉 𝒊𝒔 𝒏𝒆𝒊𝒕𝒉𝒆𝒓 𝒔𝒂𝒊𝒅 𝒐𝒇 𝒂 𝒔𝒖𝒃𝒋𝒆𝒄𝒕 𝒐𝒏 𝒐𝒓 𝒊𝒏 𝒂 𝒔𝒖𝒃𝒋𝒆𝒄𝒕,
𝒆.𝒈. 𝒕𝒉𝒆 𝒊𝒏𝒅𝒊𝒗𝒊𝒅𝒖𝒂𝒍 𝒎𝒂𝒏 𝒐𝒓 𝒕𝒉𝒆 𝒊𝒏𝒅𝒊𝒗𝒊𝒅𝒖𝒂𝒍 𝒉𝒐𝒓𝒔𝒆.
</answer>
<select criterion="Vocabulary" option="Good" />
<select criterion="Grammar" option="Excellent" />
</example>
<example>
<answer>
Їḟ ẗḧëṛë ïṡ ṡöṁë ëṅḋ öḟ ẗḧë ẗḧïṅġṡ ẅë ḋö, ẅḧïċḧ ẅë ḋëṡïṛë ḟöṛ ïẗṡ öẅṅ ṡäḳë,
ċḷëäṛḷÿ ẗḧïṡ ṁüṡẗ ḅë ẗḧë ġööḋ. Ẅïḷḷ ṅöẗ ḳṅöẅḷëḋġë öḟ ïẗ, ẗḧëṅ, ḧäṿë ä ġṛëäẗ
ïṅḟḷüëṅċë öṅ ḷïḟë? Ṡḧäḷḷ ẅë ṅöẗ, ḷïḳë äṛċḧëṛṡ ẅḧö ḧäṿë ä ṁäṛḳ ẗö äïṁ äẗ,
ḅë ṁöṛë ḷïḳëḷÿ ẗö ḧïẗ üṗöṅ ẅḧäẗ ẅë ṡḧöüḷḋ? Їḟ ṡö, ẅë ṁüṡẗ ẗṛÿ, ïṅ öüẗḷïṅë äẗ ḷëäṡẗ,
ẗö ḋëẗëṛṁïṅë ẅḧäẗ ïẗ ïṡ.
</answer>
<select criterion="Vocabulary" option="Excellent" />
<select criterion="Grammar" option="Poor" />
</example>
</assessment>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
</assessments>
</openassessment>
<openassessment>
<title>Student training test</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>Vocabulary</name>
<prompt>How varied is the vocabulary?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job</explanation>
</option>
<option points="1">
<name>Good</name>
<explanation>Good job</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>Excellent job</explanation>
</option>
</criterion>
<criterion>
<name>Grammar</name>
<prompt>How correct is the grammar?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job</explanation>
</option>
<option points="1">
<name>Good</name>
<explanation>Good job</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>Excellent job</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="student-training" start="3000-01-01">
<example>
<answer>
𝑨 𝒔𝒖𝒃𝒔𝒕𝒂𝒏𝒄𝒆--𝒕𝒉𝒂𝒕 𝒘𝒉𝒊𝒄𝒉 𝒊𝒔 𝒄𝒂𝒍𝒍𝒆𝒅 𝒂 𝒔𝒖𝒃𝒔𝒕𝒂𝒏𝒄𝒆 𝒎𝒐𝒔𝒕 𝒔𝒕𝒓𝒊𝒄𝒕𝒍𝒚, 𝒑𝒓𝒊𝒎𝒂𝒓𝒊𝒍𝒚,
𝒂𝒏𝒅 𝒎𝒐𝒔𝒕 𝒐𝒇 𝒂𝒍𝒍--𝒊𝒔 𝒕𝒉𝒂𝒕 𝒘𝒉𝒊𝒄𝒉 𝒊𝒔 𝒏𝒆𝒊𝒕𝒉𝒆𝒓 𝒔𝒂𝒊𝒅 𝒐𝒇 𝒂 𝒔𝒖𝒃𝒋𝒆𝒄𝒕 𝒐𝒏 𝒐𝒓 𝒊𝒏 𝒂 𝒔𝒖𝒃𝒋𝒆𝒄𝒕,
𝒆.𝒈. 𝒕𝒉𝒆 𝒊𝒏𝒅𝒊𝒗𝒊𝒅𝒖𝒂𝒍 𝒎𝒂𝒏 𝒐𝒓 𝒕𝒉𝒆 𝒊𝒏𝒅𝒊𝒗𝒊𝒅𝒖𝒂𝒍 𝒉𝒐𝒓𝒔𝒆.
</answer>
<select criterion="Vocabulary" option="Good" />
<select criterion="Grammar" option="Excellent" />
</example>
<example>
<answer>
Їḟ ẗḧëṛë ïṡ ṡöṁë ëṅḋ öḟ ẗḧë ẗḧïṅġṡ ẅë ḋö, ẅḧïċḧ ẅë ḋëṡïṛë ḟöṛ ïẗṡ öẅṅ ṡäḳë,
ċḷëäṛḷÿ ẗḧïṡ ṁüṡẗ ḅë ẗḧë ġööḋ. Ẅïḷḷ ṅöẗ ḳṅöẅḷëḋġë öḟ ïẗ, ẗḧëṅ, ḧäṿë ä ġṛëäẗ
ïṅḟḷüëṅċë öṅ ḷïḟë? Ṡḧäḷḷ ẅë ṅöẗ, ḷïḳë äṛċḧëṛṡ ẅḧö ḧäṿë ä ṁäṛḳ ẗö äïṁ äẗ,
ḅë ṁöṛë ḷïḳëḷÿ ẗö ḧïẗ üṗöṅ ẅḧäẗ ẅë ṡḧöüḷḋ? Їḟ ṡö, ẅë ṁüṡẗ ẗṛÿ, ïṅ öüẗḷïṅë äẗ ḷëäṡẗ,
ẗö ḋëẗëṛṁïṅë ẅḧäẗ ïẗ ïṡ.
</answer>
<select criterion="Vocabulary" option="Excellent" />
<select criterion="Grammar" option="Poor" />
</example>
</assessment>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
</assessments>
</openassessment>
{
"simple": {
"expected_template": "openassessmentblock/student_training/student_training.html",
"expected_context": {
"training_num_completed": 0,
"training_num_current": 1,
"training_num_available": 2,
"training_essay": "This is my answer.",
"training_rubric": {
"id": 2,
"content_hash": "de2bb2b7e2c6e3df014e53b8c65f37d511cc4344",
"criteria": [
{
"order_num": 0,
"name": "Vocabulary",
"prompt": "How varied is the vocabulary?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor",
"explanation": "Poor job"
},
{
"order_num": 1,
"points": 1,
"name": "Good",
"explanation": "Good job"
},
{
"order_num": 2,
"points": 3,
"name": "Excellent",
"explanation": "Excellent job"
}
],
"points_possible": 3
},
{
"order_num": 1,
"name": "Grammar",
"prompt": "How correct is the grammar?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor",
"explanation": "Poor job"
},
{
"order_num": 1,
"points": 1,
"name": "Good",
"explanation": "Good job"
},
{
"order_num": 2,
"points": 3,
"name": "Excellent",
"explanation": "Excellent job"
}
],
"points_possible": 3
}
],
"points_possible": 6
}
}
}
}
\ No newline at end of file
# -*- coding: utf-8 -*-
"""
Tests for the student training step in the Open Assessment XBlock.
"""
import datetime
import ddt
import json
from mock import patch
import pytz
from django.db import DatabaseError
from openassessment.assessment.models import StudentTrainingWorkflow
from .base import XBlockHandlerTestCase, scenario
@ddt.ddt
class StudentTrainingAssessTest(XBlockHandlerTestCase):
"""
Tests for student training assessment.
"""
SUBMISSION = {
'submission': u'Thé őbjéćt őf édúćátíőń íś tő téáćh úś tő ĺővé ẃhát íś béáútífúĺ.'
}
@scenario('data/student_training.xml', user_id="Plato")
@ddt.file_data('data/student_training_mixin.json')
def test_correct(self, xblock, data):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self._assert_path_and_context(xblock, data["expected_template"], data["expected_context"])
# Agree with the course author's assessment
# (as defined in the scenario XML)
data = {
'options_selected': {
'Vocabulary': 'Good',
'Grammar': 'Excellent'
}
}
resp = self.request(xblock, 'training_assess', json.dumps(data), response_format='json')
# Expect that we were correct
self.assertTrue(resp['success'], msg=resp.get('msg'))
self.assertFalse(resp['corrections'])
@scenario('data/student_training.xml', user_id="Plato")
@ddt.file_data('data/student_training_mixin.json')
def test_incorrect(self, xblock, data):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self._assert_path_and_context(xblock, data["expected_template"], data["expected_context"])
# Disagree with the course author's assessment
# (as defined in the scenario XML)
select_data = {
'options_selected': {
'Vocabulary': 'Poor',
'Grammar': 'Poor'
}
}
resp = self.request(xblock, 'training_assess', json.dumps(select_data), response_format='json')
# Expect that we were marked incorrect
self.assertTrue(resp['success'], msg=resp.get('msg'))
self.assertTrue(resp['corrections'])
@scenario('data/student_training.xml', user_id="Plato")
@ddt.file_data('data/student_training_mixin.json')
def test_updates_workflow(self, xblock, data):
expected_context = data["expected_context"].copy()
expected_template = data["expected_template"]
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self._assert_path_and_context(xblock, expected_template, expected_context)
# Agree with the course author's assessment
# (as defined in the scenario XML)
selected_data = {
'options_selected': {
'Vocabulary': 'Good',
'Grammar': 'Excellent'
}
}
resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json')
# Expect that we were correct
self.assertTrue(resp['success'], msg=resp.get('msg'))
self.assertFalse(resp['corrections'])
# Agree with the course author's assessment
# (as defined in the scenario XML)
selected_data = {
'options_selected': {
'Vocabulary': 'Excellent',
'Grammar': 'Poor'
}
}
expected_context["training_num_completed"] = 1
expected_context["training_num_current"] = 2
expected_context["training_essay"] = u"тєѕт αηѕωєя"
self._assert_path_and_context(xblock, expected_template, expected_context)
resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json')
# Expect that we were correct
self.assertTrue(resp['success'], msg=resp.get('msg'))
self.assertFalse(resp['corrections'])
expected_context = {}
expected_template = "openassessmentblock/student_training/student_training_complete.html"
self._assert_path_and_context(xblock, expected_template, expected_context)
@scenario('data/student_training.xml', user_id="Plato")
@ddt.file_data('data/student_training_mixin.json')
def test_request_error(self, xblock, data):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
expected_context = data["expected_context"].copy()
expected_template = data["expected_template"]
self._assert_path_and_context(xblock, expected_template, expected_context)
resp = self.request(xblock, 'training_assess', json.dumps({}), response_format='json')
self.assertFalse(resp['success'], msg=resp.get('msg'))
selected_data = {
'options_selected': "foo"
}
resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json')
self.assertFalse(resp['success'], msg=resp.get('msg'))
@scenario('data/student_training.xml', user_id="Plato")
@ddt.file_data('data/student_training_mixin.json')
def test_invalid_options_dict(self, xblock, data):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
expected_context = data["expected_context"].copy()
expected_template = data["expected_template"]
self._assert_path_and_context(xblock, expected_template, expected_context)
selected_data = {
'options_selected': {
'Bananas': 'Excellent',
'Grammar': 'Poor'
}
}
resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json')
self.assertFalse(resp['success'], msg=resp.get('msg'))
@scenario('data/student_training.xml', user_id="Plato")
def test_no_submission(self, xblock):
selected_data = {
'options_selected': {
'Vocabulary': 'Excellent',
'Grammar': 'Poor'
}
}
resp = self.request(xblock, 'training_assess', json.dumps(selected_data))
self.assertIn("Your scores could not be checked", resp.decode('utf-8'))
def _assert_path_and_context(self, xblock, expected_path, expected_context):
"""
Render the student training step and verify that the expected template
and context were used. Also check that the template renders without error.
Args:
xblock (OpenAssessmentBlock): The XBlock under test.
expected_path (str): The expected template path.
expected_context (dict): The expected template context.
Raises:
AssertionError
"""
path, context = xblock.training_path_and_context()
self.assertEqual(path, expected_path)
self.assertEqual(len(context), len(expected_context))
for key in expected_context.keys():
if key == 'training_due':
iso_date = context['training_due'].isoformat()
self.assertEqual(iso_date, expected_context[key])
else:
self.assertEqual(context[key], expected_context[key])
# Verify that we render without error
resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertGreater(len(resp), 0)
class StudentTrainingRenderTest(StudentTrainingAssessTest):
"""
Tests for student training step rendering.
"""
@scenario('data/basic_scenario.xml', user_id="Plato")
def test_no_student_training_defined(self, xblock):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertEquals("", resp.decode('utf-8'))
@scenario('data/student_training.xml', user_id="Plato")
def test_no_submission(self, xblock):
resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertIn("Not Available", resp.decode('utf-8'))
@scenario('data/student_training.xml')
def test_studio_preview(self, xblock):
resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertIn("Not Available", resp.decode('utf-8'))
@scenario('data/student_training_due.xml', user_id="Plato")
def test_past_due(self, xblock):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
expected_template = "openassessmentblock/student_training/student_training_closed.html"
expected_context = {
'training_due': "2000-01-01T00:00:00+00:00"
}
self._assert_path_and_context(xblock, expected_template, expected_context)
@scenario('data/student_training.xml', user_id="Plato")
@patch.object(StudentTrainingWorkflow, "get_or_create_workflow")
def test_internal_error(self, xblock, mock_workflow):
mock_workflow.side_effect = DatabaseError("Oh no.")
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertIn("An unexpected error occurred.", resp.decode('utf-8'))
@scenario('data/student_training_future.xml', user_id="Plato")
def test_before_start(self, xblock):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
expected_template = "openassessmentblock/student_training/student_training_unavailable.html"
expected_context = {
'training_start': datetime.datetime(3000, 1, 1).replace(tzinfo=pytz.utc)
}
self._assert_path_and_context(xblock, expected_template, expected_context)
......@@ -6,6 +6,7 @@ from django.utils.translation import ugettext as _
from openassessment.assessment.serializers import rubric_from_dict, InvalidRubric
from openassessment.assessment.api.student_training import validate_training_examples
from openassessment.xblock.resolve_dates import resolve_dates, DateValidationError, InvalidDateFormat
from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict
def _match_by_order(items, others):
......@@ -228,27 +229,15 @@ def _validate_assessment_examples(rubric_dict, assessments):
for asmnt in assessments:
if asmnt['name'] == 'student-training':
# Convert of options selected we store in the problem def,
# which is ordered, to the unordered dictionary of options
# selected that the student training API expects.
examples = [
{
'answer': ex['answer'],
'options_selected': {
select_dict['criterion']: select_dict['option']
for select_dict in ex['options_selected']
}
}
for ex in asmnt['examples']
]
examples = convert_training_examples_list_to_dict(asmnt['examples'])
# Delegate to the student training API to validate the
# examples against the rubric.
errors = validate_training_examples(rubric_dict, examples)
if errors:
return (False, "\n".join(errors))
return False, "\n".join(errors)
return (True, u'')
return True, u''
def validator(oa_block, strict_post_release=True):
......
"""
Handle OpenAssessment XBlock requests to the Workflow API.
"""
from xblock.core import XBlock
from openassessment.workflow import api as workflow_api
class WorkflowMixin(object):
"""
Handle OpenAssessment XBlock requests to the Workflow API.
"""
# Dictionary mapping assessment names (e.g. peer-assessment)
# to the corresponding workflow step names.
ASSESSMENT_STEP_NAMES = {
"self-assessment": "self",
"peer-assessment": "peer",
"student-training": "training",
}
@XBlock.json_handler
def handle_workflow_info(self, data, suffix=''):
def handle_workflow_info(self, data, suffix=''): # pylint:disable=W0613
"""
Retrieve the current state of the workflow.
Args:
data: Unused
Kwargs:
suffix: Unused
Returns:
dict
"""
return self.get_workflow_info()
def create_workflow(self, submission_uuid):
"""
Create a new workflow for a student submission.
Args:
submission_uuid (str): The UUID of the submission to associate
with the workflow.
Returns:
None
"""
steps = self._create_step_list()
workflow_api.create_workflow(submission_uuid, steps)
def _create_step_list(self):
def _convert_rubric_assessment_name(ra_name):
"""'self-assessment' -> 'self', 'peer-assessment' -> 'peer'"""
short_name, suffix = ra_name.split("-")
return short_name
# rubric_assessments stores names as "self-assessment",
# "peer-assessment", while the model is expecting "self", "peer".
# Therefore, this conversion step. We should refactor later to
# standardize.
return [
_convert_rubric_assessment_name(ra["name"])
for ra in self.valid_assessments
]
def workflow_requirements(self):
"""
Retrieve the requirements from each assessment module
......@@ -34,18 +58,24 @@ class WorkflowMixin(object):
Returns:
dict
"""
assessment_ui_model = self.get_assessment_module('peer-assessment')
requirements = {}
if not assessment_ui_model:
return {}
peer_assessment_module = self.get_assessment_module('peer-assessment')
if peer_assessment_module:
requirements["peer"] = {
"must_grade": peer_assessment_module["must_grade"],
"must_be_graded_by": peer_assessment_module["must_be_graded_by"]
}
return {
"peer": {
"must_grade": assessment_ui_model["must_grade"],
"must_be_graded_by": assessment_ui_model["must_be_graded_by"]
training_module = self.get_assessment_module('student-training')
if training_module:
requirements["training"] = {
"num_required": len(training_module["examples"])
}
}
return requirements
def update_workflow_status(self, submission_uuid=None):
"""
......@@ -116,3 +146,20 @@ class WorkflowMixin(object):
)
num_submissions = sum(item['count'] for item in status_counts)
return status_counts, num_submissions
def _create_step_list(self):
"""
Return a list of valid workflow step names.
This translates between the assessment types (loaded from the problem definition)
and the step types (used by the Workflow API).
At some point, we should probably refactor to make these two names consistent.
Returns:
list
"""
return [
self.ASSESSMENT_STEP_NAMES.get(ra['name'])
for ra in self.valid_assessments
if ra['name'] in self.ASSESSMENT_STEP_NAMES
]
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment