Commit 9e3192de by Stephen Sanchez

Adding the Student Training UI

parent 12c5cd8f
{% load i18n %}
{% load tz %}
{% spaceless %}
{% block list_item %}
<li id="openassessment__student-training" class="openassessment__steps__step step--student-training ui-toggle-visibility">
{% endblock %}
<header class="step__header ui-toggle-visibility__control">
<h2 class="step__title">
<span class="step__counter"></span>
<span class="wrapper--copy">
<span class="step__label">{% trans "Learn to Assess" %}</span>
{% if self_start %}
<span class="step__deadline">{% trans "available" %}
<span class="date">
{{ self_start|utc|date:"N j, Y H:i e" }}
(in {{ self_start|timeuntil }})
</span>
</span>
{% elif training_due %}
<span class="step__deadline">due
<span class="date">
{{ training_due|utc|date:"N j, Y H:i e" }}
(in {{ training_due|timeuntil }})
</span>
</span>
{% endif %}
</span>
</h2>
{% block title %}
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<span class="copy">{% trans "In Progress" %}</span>
</span>
</span>
{% endblock %}
</header>
{% block body %}
<div class="ui-toggle-visibility__content">
<div class="wrapper--step__content">
<div class="step__content">
<article class="student-training__display" id="student-training">
<header class="student-training__display__header">
<h3 class="student-training__display__title">{% trans "Your Response" %}</h3>
</header>
<div class="student-training__display__response">
{{ training_essay.text|linebreaks }}
</div>
</article>
<form id="student-training--001__assessment" class="student-training__assessment" method="post">
<fieldset class="assessment__fields">
<ol class="list list--fields assessment__rubric">
{% for criterion in training_rubric.criteria %}
<li class="field field--radio is--required assessment__rubric__question ui-toggle-visibility" id="assessment__rubric__question--{{ criterion.order_num }}">
<h4 class="question__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="question__title__copy">{{ criterion.prompt }}</span>
<span class="label--required sr">* ({% trans "Required" %})</span>
</h4>
<div class="ui-toggle-visibility__content">
<ol class="question__answers">
{% for option in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio"
name="{{ criterion.name }}"
id="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__value"
value="{{ option.name }}" />
<label for="assessment__rubric__question--{{ criterion.order_num }}__{{ option.order_num }}"
class="answer__label">{{ option.name }}</label>
</div>
<div class="wrapper--metadata">
<span class="answer__tip">{{ option.explanation }}</span>
<span class="answer__points">{{option.points}} <span class="answer__points__label">{% trans "points" %}</span></span>
</div>
</li>
{% endfor %}
</ol>
</div>
</li>
{% endfor %}
</ol>
</fieldset>
</form>
</div>
<div class="step__actions">
<div class="message message--inline message--error message--error-server">
<h3 class="message__title">{% trans "We could not submit your assessment" %}</h3>
<div class="message__content"></div>
</div>
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="student-training--001__assessment__submit" class="action action--submit is--disabled">
<span class="copy">{% trans "Check Rubric" %}</span>
<i class="ico icon-caret-right"></i>
</button>
</li>
</ul>
</div>
</div>
</div>
{% endblock %}
</li>
{% endspaceless %}
{% extends "openassessmentblock/student_training/student_training.html" %}
{% load i18n %}
{% block list_item %}
<li id="openassessment__student-training" class="openassessment__steps__step step--student-training is--incomplete ui-toggle-visibility">
{% endblock %}
{% block title %}
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<i class="ico icon-warning-sign"></i>
<span class="copy">{% trans "Incomplete" %}</span>
</span>
</span>
{% endblock %}
{% block body %}
<div class="ui-toggle-visibility__content">
<div class="wrapper--step__content">
<div class="step__message message message--incomplete">
<h3 class="message__title">{% trans "The Due Date for This Step Has Passed" %}</h3>
<div class="message__content">
<p>{% trans "This step is now closed. You can no longer complete a assessment training or continue with this assignment, and you'll receive a grade of Incomplete." %}</p>
</div>
</div>
</div>
</div>
{% endblock %}
{% extends "openassessmentblock/student_training/student_training.html" %}
{% load i18n %}
{% block list_item %}
<li id="openassessment__student-training" class="openassessment__steps__step step--student-training is--complete is--empty is--collapsed">
{% endblock %}
{% block title %}
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<i class="ico icon-ok"></i>
<span class="copy">{% trans "Complete" %}</span>
</span>
</span>
{% endblock %}
{% block body %}
{% endblock %}
{% extends "openassessmentblock/student_training/student_training.html" %}
{% load i18n %}
{% block list_item %}
<li id="openassessment__student-training" class="openassessment__steps__step step--student-training is--empty is--unavailable is--collapsed">
{% endblock %}
{% block title %}
<span class="step__status">
<span class="step__status__label">{% trans "This step's status" %}:</span>
<span class="step__status__value">
<span class="copy">{% trans "Not Available" %}</span>
</span>
</span>
{% endblock %}
{% block body %}
{% endblock %}
...@@ -8,6 +8,7 @@ import logging ...@@ -8,6 +8,7 @@ import logging
from django.db import DatabaseError from django.db import DatabaseError
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import student_training as student_training
from openassessment.assessment.errors import PeerAssessmentError from openassessment.assessment.errors import PeerAssessmentError
from submissions import api as sub_api from submissions import api as sub_api
from .models import AssessmentWorkflow, AssessmentWorkflowStep from .models import AssessmentWorkflow, AssessmentWorkflowStep
...@@ -138,6 +139,8 @@ def create_workflow(submission_uuid, steps): ...@@ -138,6 +139,8 @@ def create_workflow(submission_uuid, steps):
raise AssessmentWorkflowInternalError(err_msg) raise AssessmentWorkflowInternalError(err_msg)
elif steps[0] == "self": elif steps[0] == "self":
status = AssessmentWorkflow.STATUS.self status = AssessmentWorkflow.STATUS.self
elif steps[0] == "training":
status = AssessmentWorkflow.STATUS.training
try: try:
workflow = AssessmentWorkflow.objects.create( workflow = AssessmentWorkflow.objects.create(
......
...@@ -263,7 +263,7 @@ class AssessmentWorkflowStep(models.Model): ...@@ -263,7 +263,7 @@ class AssessmentWorkflowStep(models.Model):
""" """
from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import student_training as student_training from openassessment.assessment.api import student_training
api = None api = None
if self.name == AssessmentWorkflow.STATUS.self: if self.name == AssessmentWorkflow.STATUS.self:
api = self_api api = self_api
......
"""
Data Conversion utility methods for handling ORA2 XBlock data transformations.
"""
def convert_training_examples_list_to_dict(examples_list):
"""
Convert of options selected we store in the problem def,
which is ordered, to the unordered dictionary of options
selected that the student training API expects.
Args:
examples_list (list): A list of options selected against a rubric.
Returns:
A dictionary of the given examples in the list.
Example:
>>> examples = [
>>> {
>>> "answer": "This is my response",
>>> "options_selected": [
>>> {
>>> "criterion": "Ideas",
>>> "option": "Fair"
>>> },
>>> {
>>> "criterion": "Content",
>>> "option": "Good"
>>> }
>>> ]
>>> }
>>> ]
>>> convert_training_examples_list_to_dict(examples)
[
{
'answer': 'This is my response',
'options_selected': {
'Ideas': 'Fair',
'Content': 'Good'
}
}
]
"""
return [
{
'answer': ex['answer'],
'options_selected': {
select_dict['criterion']: select_dict['option']
for select_dict in ex['options_selected']
}
}
for ex in examples_list
]
\ No newline at end of file
...@@ -65,6 +65,40 @@ DEFAULT_RUBRIC_FEEDBACK_PROMPT = """ ...@@ -65,6 +65,40 @@ DEFAULT_RUBRIC_FEEDBACK_PROMPT = """
""" """
DEFAULT_STUDENT_TRAINING = {
"name": "student-training",
"start": None,
"due": None,
"examples": [
{
"answer": "Example Calibration Response",
"options_selected": [
{
"criterion": "Ideas",
"option": "Fair"
},
{
"criterion": "Content",
"option": "Good"
}
]
},
{
"answer": "Another Example Calibration Response",
"options_selected": [
{
"criterion": "Ideas",
"option": "Poor"
},
{
"criterion": "Content",
"option": "Good"
}
]
}
]
}
# The Default Peer Assessment is created as an example of how this XBlock can be # The Default Peer Assessment is created as an example of how this XBlock can be
# configured. If no configuration is specified, this is the default assessment # configured. If no configuration is specified, this is the default assessment
# module(s) associated with the XBlock. # module(s) associated with the XBlock.
...@@ -82,6 +116,7 @@ DEFAULT_SELF_ASSESSMENT = { ...@@ -82,6 +116,7 @@ DEFAULT_SELF_ASSESSMENT = {
} }
DEFAULT_ASSESSMENT_MODULES = [ DEFAULT_ASSESSMENT_MODULES = [
DEFAULT_STUDENT_TRAINING,
DEFAULT_PEER_ASSESSMENT, DEFAULT_PEER_ASSESSMENT,
DEFAULT_SELF_ASSESSMENT, DEFAULT_SELF_ASSESSMENT,
] ]
......
...@@ -40,6 +40,12 @@ UI_MODELS = { ...@@ -40,6 +40,12 @@ UI_MODELS = {
"navigation_text": "Your response to this problem", "navigation_text": "Your response to this problem",
"title": "Your Response" "title": "Your Response"
}, },
"student-training": {
"name": "student-training",
"class_id": "openassessment__student-training",
"navigation_text": "Learn to assess responses",
"title": "Learn to Assess"
},
"peer-assessment": { "peer-assessment": {
"name": "peer-assessment", "name": "peer-assessment",
"class_id": "openassessment__peer-assessment", "class_id": "openassessment__peer-assessment",
...@@ -61,6 +67,7 @@ UI_MODELS = { ...@@ -61,6 +67,7 @@ UI_MODELS = {
} }
VALID_ASSESSMENT_TYPES = [ VALID_ASSESSMENT_TYPES = [
"student-training",
"peer-assessment", "peer-assessment",
"self-assessment", "self-assessment",
] ]
......
...@@ -73,4 +73,62 @@ ...@@ -73,4 +73,62 @@
.student__answer__display__content p { .student__answer__display__content p {
color: inherit; color: inherit;
} }
// --------------------
// Developer Styles for Student Training
// --------------------
.step--student-training {
// submission
.student-training__display {
@extend %ui-subsection;
}
.student-training__display__header {
@include clearfix();
}
.student-training__display__title {
@extend %t-heading;
margin-bottom: ($baseline-v/2);
color: $heading-secondary-color;
}
.student-training__display__response {
@extend %ui-subsection-content;
@extend %copy-3;
@extend %ui-content-longanswer;
@extend %ui-well;
color: $copy-color;
}
// assessment form
.student-training__assessment {
// fields
.assessment__fields {
margin-bottom: $baseline-v;
}
// rubric question
.assessment__rubric__question {
@extend %ui-rubric-question;
}
// rubric options
.question__answers {
@extend %ui-rubric-answers;
overflow: visible; // needed for ui-hints
}
// genereal feedback question
.assessment__rubric__question--feedback {
textarea {
@extend %ui-content-longanswer;
min-height: ($baseline-v*5);
}
}
}
}
} }
\ No newline at end of file
...@@ -63,6 +63,20 @@ ...@@ -63,6 +63,20 @@
</feedbackprompt> </feedbackprompt>
</rubric> </rubric>
<assessments> <assessments>
<assessment name="student-training">
<example>
<answer>Censorship is terrible. I don't like it at all.</answer>
<select criterion="concise" option="Matsuo Basho" />
<select criterion="clear-headed" option="Eric" />
<select criterion="form" option="IRC"/>
</example>
<example>
<answer>I love censorship.</answer>
<select criterion="concise" option="Matsuo Basho" />
<select criterion="clear-headed" option="Ian" />
<select criterion="form" option="Old-timey letters"/>
</example>
</assessment>
<assessment name="peer-assessment" <assessment name="peer-assessment"
start="2014-12-20T19:00-7:00" start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00" due="2014-12-21T22:22-7:00"
......
...@@ -6,6 +6,7 @@ from django.utils.translation import ugettext as _ ...@@ -6,6 +6,7 @@ from django.utils.translation import ugettext as _
from webob import Response from webob import Response
from xblock.core import XBlock from xblock.core import XBlock
from openassessment.assessment.api import student_training from openassessment.assessment.api import student_training
from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -41,7 +42,7 @@ class StudentTrainingMixin(object): ...@@ -41,7 +42,7 @@ class StudentTrainingMixin(object):
unicode: HTML content of the grade step unicode: HTML content of the grade step
""" """
if "training" not in self.assessment_steps: if "student-training" not in self.assessment_steps:
return Response(u"") return Response(u"")
try: try:
...@@ -70,12 +71,15 @@ class StudentTrainingMixin(object): ...@@ -70,12 +71,15 @@ class StudentTrainingMixin(object):
context = {} context = {}
template = 'openassessmentblock/student_training/student_training_unavailable.html' template = 'openassessmentblock/student_training/student_training_unavailable.html'
if not workflow_status:
return template, context
# If the student has completed the training step, then show that the step is complete. # If the student has completed the training step, then show that the step is complete.
# We put this condition first so that if a student has completed the step, it *always* # We put this condition first so that if a student has completed the step, it *always*
# shows as complete. # shows as complete.
# We're assuming here that the training step always precedes the other assessment steps # We're assuming here that the training step always precedes the other assessment steps
# (peer/self) -- we may need to make this more flexible later. # (peer/self) -- we may need to make this more flexible later.
if workflow_status in ['peer', 'self', 'waiting', 'done']: if workflow_status and workflow_status != "training":
template = 'openassessmentblock/student_training/student_training_complete.html' template = 'openassessmentblock/student_training/student_training_complete.html'
# If the problem is closed, then do not allow students to access the training step # If the problem is closed, then do not allow students to access the training step
...@@ -89,14 +93,27 @@ class StudentTrainingMixin(object): ...@@ -89,14 +93,27 @@ class StudentTrainingMixin(object):
# If we're on the training step, show the student an example # If we're on the training step, show the student an example
# We do this last so we can avoid querying the student training API if possible. # We do this last so we can avoid querying the student training API if possible.
else: else:
training_module = self.get_assessment_module('student-training')
if not training_module:
return template, context
context['training_due'] = due_date
# Report progress in the student training workflow (completed X out of Y) # Report progress in the student training workflow (completed X out of Y)
status = student_training.get_workflow_status(self.submission_uuid) context['training_num_available'] = len(training_module["examples"])
context['training_num_completed'] = status['num_completed'] context['training_num_completed'] = student_training.get_num_completed(self.submission_uuid)
context['training_num_available'] = status['num_total']
# Retrieve the example essay for the student to submit # Retrieve the example essay for the student to submit
# This will contain the essay text, the rubric, and the options the instructor selected. # This will contain the essay text, the rubric, and the options the instructor selected.
example = student_training.get_training_example(self.submission_uuid) examples = convert_training_examples_list_to_dict(training_module["examples"])
example = student_training.get_training_example(
self.submission_uuid,
{
'prompt': self.prompt,
'criteria': self.rubric_criteria
},
examples
)
context['training_essay'] = example['answer'] context['training_essay'] = example['answer']
context['training_rubric'] = example['rubric'] context['training_rubric'] = example['rubric']
template = 'openassessmentblock/student_training/student_training.html' template = 'openassessmentblock/student_training/student_training.html'
......
...@@ -39,22 +39,12 @@ ...@@ -39,22 +39,12 @@
<assessments> <assessments>
<assessment name="student-training"> <assessment name="student-training">
<example> <example>
<answer> <answer>This is my answer.</answer>
𝑨 𝒔𝒖𝒃𝒔𝒕𝒂𝒏𝒄𝒆--𝒕𝒉𝒂𝒕 𝒘𝒉𝒊𝒄𝒉 𝒊𝒔 𝒄𝒂𝒍𝒍𝒆𝒅 𝒂 𝒔𝒖𝒃𝒔𝒕𝒂𝒏𝒄𝒆 𝒎𝒐𝒔𝒕 𝒔𝒕𝒓𝒊𝒄𝒕𝒍𝒚, 𝒑𝒓𝒊𝒎𝒂𝒓𝒊𝒍𝒚,
𝒂𝒏𝒅 𝒎𝒐𝒔𝒕 𝒐𝒇 𝒂𝒍𝒍--𝒊𝒔 𝒕𝒉𝒂𝒕 𝒘𝒉𝒊𝒄𝒉 𝒊𝒔 𝒏𝒆𝒊𝒕𝒉𝒆𝒓 𝒔𝒂𝒊𝒅 𝒐𝒇 𝒂 𝒔𝒖𝒃𝒋𝒆𝒄𝒕 𝒐𝒏 𝒐𝒓 𝒊𝒏 𝒂 𝒔𝒖𝒃𝒋𝒆𝒄𝒕,
𝒆.𝒈. 𝒕𝒉𝒆 𝒊𝒏𝒅𝒊𝒗𝒊𝒅𝒖𝒂𝒍 𝒎𝒂𝒏 𝒐𝒓 𝒕𝒉𝒆 𝒊𝒏𝒅𝒊𝒗𝒊𝒅𝒖𝒂𝒍 𝒉𝒐𝒓𝒔𝒆.
</answer>
<select criterion="Vocabulary" option="Good" /> <select criterion="Vocabulary" option="Good" />
<select criterion="Grammar" option="Excellent" /> <select criterion="Grammar" option="Excellent" />
</example> </example>
<example> <example>
<answer> <answer>тєѕт αηѕωєя</answer>
Їḟ ẗḧëṛë ïṡ ṡöṁë ëṅḋ öḟ ẗḧë ẗḧïṅġṡ ẅë ḋö, ẅḧïċḧ ẅë ḋëṡïṛë ḟöṛ ïẗṡ öẅṅ ṡäḳë,
ċḷëäṛḷÿ ẗḧïṡ ṁüṡẗ ḅë ẗḧë ġööḋ. Ẅïḷḷ ṅöẗ ḳṅöẅḷëḋġë öḟ ïẗ, ẗḧëṅ, ḧäṿë ä ġṛëäẗ
ïṅḟḷüëṅċë öṅ ḷïḟë? Ṡḧäḷḷ ẅë ṅöẗ, ḷïḳë äṛċḧëṛṡ ẅḧö ḧäṿë ä ṁäṛḳ ẗö äïṁ äẗ,
ḅë ṁöṛë ḷïḳëḷÿ ẗö ḧïẗ üṗöṅ ẅḧäẗ ẅë ṡḧöüḷḋ? Їḟ ṡö, ẅë ṁüṡẗ ẗṛÿ, ïṅ öüẗḷïṅë äẗ ḷëäṡẗ,
ẗö ḋëẗëṛṁïṅë ẅḧäẗ ïẗ ïṡ.
</answer>
<select criterion="Vocabulary" option="Excellent" /> <select criterion="Vocabulary" option="Excellent" />
<select criterion="Grammar" option="Poor" /> <select criterion="Grammar" option="Poor" />
</example> </example>
......
{
"simple": {
"expected_template": "openassessmentblock/student_training/student_training.html",
"expected_context": {
"training_num_completed": 0,
"training_num_available": 2,
"training_due": "9999-01-01T00:00:00+00:00",
"training_essay": "This is my answer.",
"training_rubric": {
"id": 2,
"content_hash": "de2bb2b7e2c6e3df014e53b8c65f37d511cc4344",
"criteria": [
{
"order_num": 0,
"name": "Vocabulary",
"prompt": "How varied is the vocabulary?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor",
"explanation": "Poor job"
},
{
"order_num": 1,
"points": 1,
"name": "Good",
"explanation": "Good job"
},
{
"order_num": 2,
"points": 3,
"name": "Excellent",
"explanation": "Excellent job"
}
],
"points_possible": 3
},
{
"order_num": 1,
"name": "Grammar",
"prompt": "How correct is the grammar?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "Poor",
"explanation": "Poor job"
},
{
"order_num": 1,
"points": 1,
"name": "Good",
"explanation": "Good job"
},
{
"order_num": 2,
"points": 3,
"name": "Excellent",
"explanation": "Excellent job"
}
],
"points_possible": 3
}
],
"points_possible": 6
}
}
}
}
\ No newline at end of file
...@@ -2,11 +2,16 @@ ...@@ -2,11 +2,16 @@
""" """
Tests for the student training step in the Open Assessment XBlock. Tests for the student training step in the Open Assessment XBlock.
""" """
import datetime
import ddt
import json import json
from openassessment.assessment.api import student_training from mock import patch
import pytz
from django.db import DatabaseError
from openassessment.assessment.models import StudentTrainingWorkflow
from .base import XBlockHandlerTestCase, scenario from .base import XBlockHandlerTestCase, scenario
@ddt.ddt
class StudentTrainingAssessTest(XBlockHandlerTestCase): class StudentTrainingAssessTest(XBlockHandlerTestCase):
""" """
Tests for student training assessment. Tests for student training assessment.
...@@ -16,8 +21,10 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase): ...@@ -16,8 +21,10 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
} }
@scenario('data/student_training.xml', user_id="Plato") @scenario('data/student_training.xml', user_id="Plato")
def test_correct(self, xblock): @ddt.file_data('data/student_training_mixin.json')
def test_correct(self, xblock, data):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION) xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self._assert_path_and_context(xblock, data["expected_template"], data["expected_context"])
# Agree with the course author's assessment # Agree with the course author's assessment
# (as defined in the scenario XML) # (as defined in the scenario XML)
...@@ -34,104 +41,112 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase): ...@@ -34,104 +41,112 @@ class StudentTrainingAssessTest(XBlockHandlerTestCase):
self.assertTrue(resp['correct']) self.assertTrue(resp['correct'])
@scenario('data/student_training.xml', user_id="Plato") @scenario('data/student_training.xml', user_id="Plato")
def test_incorrect(self, xblock): @ddt.file_data('data/student_training_mixin.json')
def test_incorrect(self, xblock, data):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION) xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self._assert_path_and_context(xblock, data["expected_template"], data["expected_context"])
# Disagree with the course author's assessment # Disagree with the course author's assessment
# (as defined in the scenario XML) # (as defined in the scenario XML)
data = { select_data = {
'options_selected': { 'options_selected': {
'Vocabulary': 'Poor', 'Vocabulary': 'Poor',
'Grammar': 'Poor' 'Grammar': 'Poor'
} }
} }
resp = self.request(xblock, 'training_assess', json.dumps(data), response_format='json') resp = self.request(xblock, 'training_assess', json.dumps(select_data), response_format='json')
# Expect that we were marked incorrect # Expect that we were marked incorrect
self.assertTrue(resp['success'], msg=resp.get('msg')) self.assertTrue(resp['success'], msg=resp.get('msg'))
self.assertFalse(resp['correct']) self.assertFalse(resp['correct'])
@scenario('data/student_training.xml', user_id="Plato") @scenario('data/student_training.xml', user_id="Plato")
def test_updates_workflow(self, xblock): @ddt.file_data('data/student_training_mixin.json')
self.fail() def test_updates_workflow(self, xblock, data):
expected_context = data["expected_context"].copy()
@scenario('data/student_training.xml', user_id="Plato") expected_template = data["expected_template"]
def test_no_examples_left(self, xblock): xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
self.fail() self._assert_path_and_context(xblock, expected_template, expected_context)
@scenario('data/student_training.xml', user_id="Plato")
def test_request_error(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_internal_error(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_invalid_options_dict(self, xblock):
self.fail()
@scenario('data/basic_scenario.xml', user_id="Plato")
def test_no_student_training_defined(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_no_submission(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_studio_preview(self, xblock):
self.fail()
@scenario('data/student_training.xml')
def test_not_logged_in(self, xblock):
self.fail()
# Agree with the course author's assessment
# (as defined in the scenario XML)
selected_data = {
'options_selected': {
'Vocabulary': 'Good',
'Grammar': 'Excellent'
}
}
resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json')
class StudentTrainingRenderTest(XBlockHandlerTestCase): # Expect that we were correct
""" self.assertTrue(resp['success'], msg=resp.get('msg'))
Tests for student training step rendering. self.assertTrue(resp['correct'])
"""
@scenario('data/student_training_due.xml', user_id="Plato") # Agree with the course author's assessment
def test_past_due(self, xblock): # (as defined in the scenario XML)
self.fail() selected_data = {
'options_selected': {
'Vocabulary': 'Excellent',
'Grammar': 'Poor'
}
}
@scenario('data/student_training_future.xml', user_id="Plato") expected_context["training_num_completed"] = 1
def test_before_start(self, xblock): expected_context["training_essay"] = u"тєѕт αηѕωєя"
self.fail() self._assert_path_and_context(xblock, expected_template, expected_context)
resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json')
@scenario('data/student_training.xml', user_id="Plato") # Expect that we were correct
def test_training_complete(self, xblock): self.assertTrue(resp['success'], msg=resp.get('msg'))
self.fail() self.assertTrue(resp['correct'])
expected_context = {}
expected_template = "openassessmentblock/student_training/student_training_complete.html"
self._assert_path_and_context(xblock, expected_template, expected_context)
@scenario('data/student_training.xml', user_id="Plato") @scenario('data/student_training.xml', user_id="Plato")
def test_training_example_available(self, xblock): @ddt.file_data('data/student_training_mixin.json')
self.fail() def test_request_error(self, xblock, data):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
expected_context = data["expected_context"].copy()
expected_template = data["expected_template"]
self._assert_path_and_context(xblock, expected_template, expected_context)
resp = self.request(xblock, 'training_assess', json.dumps({}), response_format='json')
self.assertFalse(resp['success'], msg=resp.get('msg'))
selected_data = {
'options_selected': "foo"
}
resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json')
self.assertFalse(resp['success'], msg=resp.get('msg'))
@scenario('data/student_training.xml', user_id="Plato") @scenario('data/student_training.xml', user_id="Plato")
def test_no_training_examples_left(self, xblock): @ddt.file_data('data/student_training_mixin.json')
self.fail() def test_invalid_options_dict(self, xblock, data):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
expected_context = data["expected_context"].copy()
expected_template = data["expected_template"]
self._assert_path_and_context(xblock, expected_template, expected_context)
@scenario('data/student_training.xml', user_id="Plato") selected_data = {
def test_render_error(self, xblock): 'options_selected': {
self.fail() 'Bananas': 'Excellent',
'Grammar': 'Poor'
}
}
@scenario('data/basic_scenario.xml', user_id="Plato") resp = self.request(xblock, 'training_assess', json.dumps(selected_data), response_format='json')
def test_no_student_training_defined(self, xblock): self.assertFalse(resp['success'], msg=resp.get('msg'))
self.fail()
@scenario('data/student_training.xml', user_id="Plato") @scenario('data/student_training.xml', user_id="Plato")
def test_no_submission(self, xblock): def test_no_submission(self, xblock):
self.fail() selected_data = {
'options_selected': {
@scenario('data/student_training.xml', user_id="Plato") 'Vocabulary': 'Excellent',
def test_studio_preview(self, xblock): 'Grammar': 'Poor'
self.fail() }
}
@scenario('data/student_training.xml') resp = self.request(xblock, 'training_assess', json.dumps(selected_data))
def test_not_logged_in(self, xblock): self.assertIn("Your scores could not be checked", resp.decode('utf-8'))
self.fail()
def _assert_path_and_context(self, xblock, expected_path, expected_context): def _assert_path_and_context(self, xblock, expected_path, expected_context):
""" """
...@@ -149,8 +164,61 @@ class StudentTrainingRenderTest(XBlockHandlerTestCase): ...@@ -149,8 +164,61 @@ class StudentTrainingRenderTest(XBlockHandlerTestCase):
""" """
path, context = xblock.training_path_and_context() path, context = xblock.training_path_and_context()
self.assertEqual(path, expected_path) self.assertEqual(path, expected_path)
self.assertItemsEqual(context, expected_context) self.assertEqual(len(context), len(expected_context))
for key in expected_context.keys():
# Verify that we render without error if key == 'training_due':
iso_date = context['training_due'].isoformat()
self.assertEqual(iso_date, expected_context[key])
else:
self.assertEqual(context[key], expected_context[key])
# Verify that we render without error
resp = self.request(xblock, 'render_student_training', json.dumps({})) resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertGreater(len(resp), 0) self.assertGreater(len(resp), 0)
class StudentTrainingRenderTest(StudentTrainingAssessTest):
"""
Tests for student training step rendering.
"""
@scenario('data/basic_scenario.xml', user_id="Plato")
def test_no_student_training_defined(self, xblock):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertEquals("", resp.decode('utf-8'))
@scenario('data/student_training.xml', user_id="Plato")
def test_no_submission(self, xblock):
resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertIn("Not Available", resp.decode('utf-8'))
@scenario('data/student_training.xml')
def test_studio_preview(self, xblock):
resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertIn("Not Available", resp.decode('utf-8'))
@scenario('data/student_training_due.xml', user_id="Plato")
def test_past_due(self, xblock):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
expected_template = "openassessmentblock/student_training/student_training_closed.html"
expected_context = {
'training_due': "2000-01-01T00:00:00+00:00"
}
self._assert_path_and_context(xblock, expected_template, expected_context)
@scenario('data/student_training.xml', user_id="Plato")
@patch.object(StudentTrainingWorkflow, "get_or_create_workflow")
def test_internal_error(self, xblock, mock_workflow):
mock_workflow.side_effect = DatabaseError("Oh no.")
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertIn("An unexpected error occurred.", resp.decode('utf-8'))
@scenario('data/student_training_future.xml', user_id="Plato")
def test_before_start(self, xblock):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
expected_template = "openassessmentblock/student_training/student_training_unavailable.html"
expected_context = {
'training_start': datetime.datetime(3000, 1, 1).replace(tzinfo=pytz.utc)
}
self._assert_path_and_context(xblock, expected_template, expected_context)
...@@ -6,6 +6,7 @@ from django.utils.translation import ugettext as _ ...@@ -6,6 +6,7 @@ from django.utils.translation import ugettext as _
from openassessment.assessment.serializers import rubric_from_dict, InvalidRubric from openassessment.assessment.serializers import rubric_from_dict, InvalidRubric
from openassessment.assessment.api.student_training import validate_training_examples from openassessment.assessment.api.student_training import validate_training_examples
from openassessment.xblock.resolve_dates import resolve_dates, DateValidationError, InvalidDateFormat from openassessment.xblock.resolve_dates import resolve_dates, DateValidationError, InvalidDateFormat
from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict
def _match_by_order(items, others): def _match_by_order(items, others):
...@@ -228,27 +229,15 @@ def _validate_assessment_examples(rubric_dict, assessments): ...@@ -228,27 +229,15 @@ def _validate_assessment_examples(rubric_dict, assessments):
for asmnt in assessments: for asmnt in assessments:
if asmnt['name'] == 'student-training': if asmnt['name'] == 'student-training':
# Convert of options selected we store in the problem def, examples = convert_training_examples_list_to_dict(asmnt['examples'])
# which is ordered, to the unordered dictionary of options
# selected that the student training API expects.
examples = [
{
'answer': ex['answer'],
'options_selected': {
select_dict['criterion']: select_dict['option']
for select_dict in ex['options_selected']
}
}
for ex in asmnt['examples']
]
# Delegate to the student training API to validate the # Delegate to the student training API to validate the
# examples against the rubric. # examples against the rubric.
errors = validate_training_examples(rubric_dict, examples) errors = validate_training_examples(rubric_dict, examples)
if errors: if errors:
return (False, "\n".join(errors)) return False, "\n".join(errors)
return (True, u'') return True, u''
def validator(oa_block, strict_post_release=True): def validator(oa_block, strict_post_release=True):
......
...@@ -60,17 +60,22 @@ class WorkflowMixin(object): ...@@ -60,17 +60,22 @@ class WorkflowMixin(object):
dict dict
""" """
assessment_ui_model = self.get_assessment_module('peer-assessment') requirements = {}
if not assessment_ui_model: peer_assessment_module = self.get_assessment_module('peer-assessment')
return {} if peer_assessment_module:
requirements["peer"] = {
"must_grade": peer_assessment_module["must_grade"],
"must_be_graded_by": peer_assessment_module["must_be_graded_by"]
}
return { training_module = self.get_assessment_module('student-training')
"peer": { if training_module:
"must_grade": assessment_ui_model["must_grade"], requirements["training"] = {
"must_be_graded_by": assessment_ui_model["must_be_graded_by"] "num_required": len(training_module["examples"])
} }
}
return requirements
def update_workflow_status(self, submission_uuid=None): def update_workflow_status(self, submission_uuid=None):
""" """
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment