Commit 12c5cd8f by Will Daly Committed by Stephen Sanchez

Integrate the student training API with the XBlock.

parent 13d2e3ef
......@@ -297,7 +297,7 @@ def update_from_assessments(submission_uuid, assessment_requirements):
problem.
Examples:
>>> get_workflow_for_submission(
>>> update_from_assessments(
... '222bdf3d-a88e-11e3-859e-040ccee02800',
... {"peer": {"must_grade":5, "must_be_graded_by":3}}
... )
......@@ -408,6 +408,7 @@ def _get_workflow_model(submission_uuid):
return workflow
def _serialized_with_details(workflow, assessment_requirements):
"""Given a workflow and assessment requirements, return the serialized
version of an `AssessmentWorkflow` and add in the status details. See
......
......@@ -50,6 +50,7 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
STEPS = [
"peer", # User needs to assess peer submissions
"self", # User needs to assess themselves
"training", # User needs to practice grading using example essays
]
STATUSES = [
......@@ -262,11 +263,14 @@ class AssessmentWorkflowStep(models.Model):
"""
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import student_training as student_training
api = None
if self.name == AssessmentWorkflow.STATUS.self:
api = self_api
elif self.name == AssessmentWorkflow.STATUS.peer:
api = peer_api
elif self.name == AssessmentWorkflow.STATUS.training:
api = student_training
return api
def update(self, submission_uuid, assessment_requirements):
......
......@@ -25,6 +25,7 @@ from openassessment.xblock.xml import update_from_xml, serialize_content_to_xml
from openassessment.xblock.staff_info_mixin import StaffInfoMixin
from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.workflow import api as workflow_api
from openassessment.xblock.student_training_mixin import StudentTrainingMixin
from openassessment.xblock.validation import validator
from openassessment.xblock.resolve_dates import resolve_dates, DISTANT_PAST, DISTANT_FUTURE
......@@ -80,7 +81,9 @@ class OpenAssessmentBlock(
GradeMixin,
StaffInfoMixin,
WorkflowMixin,
LmsCompatibilityMixin):
StudentTrainingMixin,
LmsCompatibilityMixin
):
"""Displays a question and gives an area where students can compose a response."""
submission_start = String(
......
"""
Student training step in the OpenAssessment XBlock.
"""
import logging
from django.utils.translation import ugettext as _
from webob import Response
from xblock.core import XBlock
from openassessment.assessment.api import student_training
logger = logging.getLogger(__name__)
class StudentTrainingMixin(object):
"""
Student training is a step that allows students to practice
assessing example essays provided by the course author.
1) A student is shown an example essay.
2) The student scores the example essay.
a) If the student's scores match the instructor's scores,
the student is shown the next example. If there are no
more examples, the step is marked complete.
b) If the student's scores do NOT match the instructor's scores,
the student is prompted to retry.
"""
@XBlock.handler
def render_student_training(self, data, suffix=''): # pylint:disable=W0613
"""
Render the student training step.
Args:
data: Not used.
Kwargs:
suffix: Not used.
Returns:
unicode: HTML content of the grade step
"""
if "training" not in self.assessment_steps:
return Response(u"")
try:
path, context = self.training_path_and_context()
except: # pylint:disable=W0702
msg = u"Could not render student training step for submission {}".format(self.submission_uuid)
logger.exception(msg)
return self.render_error(_(u"An unexpected error occurred."))
else:
return self.render_assessment(path, context)
def training_path_and_context(self):
"""
Return the template path and context used to render the student training step.
Returns:
tuple of `(path, context)` where `path` is the path to the template and
`context` is a dict.
"""
# Retrieve the status of the workflow.
# If no submissions have been created yet, the status will be None.
workflow_status = self.get_workflow_info().get('status')
problem_closed, reason, start_date, due_date = self.is_closed(step="student-training")
context = {}
template = 'openassessmentblock/student_training/student_training_unavailable.html'
# If the student has completed the training step, then show that the step is complete.
# We put this condition first so that if a student has completed the step, it *always*
# shows as complete.
# We're assuming here that the training step always precedes the other assessment steps
# (peer/self) -- we may need to make this more flexible later.
if workflow_status in ['peer', 'self', 'waiting', 'done']:
template = 'openassessmentblock/student_training/student_training_complete.html'
# If the problem is closed, then do not allow students to access the training step
elif problem_closed and reason == 'start':
context['training_start'] = start_date
template = 'openassessmentblock/student_training/student_training_unavailable.html'
elif problem_closed and reason == 'due':
context['training_due'] = due_date
template = 'openassessmentblock/student_training/student_training_closed.html'
# If we're on the training step, show the student an example
# We do this last so we can avoid querying the student training API if possible.
else:
# Report progress in the student training workflow (completed X out of Y)
status = student_training.get_workflow_status(self.submission_uuid)
context['training_num_completed'] = status['num_completed']
context['training_num_available'] = status['num_total']
# Retrieve the example essay for the student to submit
# This will contain the essay text, the rubric, and the options the instructor selected.
example = student_training.get_training_example(self.submission_uuid)
context['training_essay'] = example['answer']
context['training_rubric'] = example['rubric']
template = 'openassessmentblock/student_training/student_training.html'
return template, context
@XBlock.json_handler
def training_assess(self, data, suffix=''): # pylint:disable=W0613
"""
Compare the scores given by the student with those given by the course author.
If they match, update the training workflow. The client can then reload this
step to view the next essay or the completed step.
Currently, we return a boolean indicating whether the student assessed correctly
or not. However, the student training API provides the exact criteria that the student
scored incorrectly, as well as the "correct" options for those criteria.
In the future, we may expose this in the UI to provide more detailed feedback.
Args:
data (dict): Must have the following keys:
options_selected (dict): Dictionary mapping criterion names to option values.
Returns:
Dict with keys:
* "success" (bool) indicating success or error
* "msg" (unicode) containing additional information if an error occurs.
* "correct" (bool) indicating whether the student scored the assessment correctly.
"""
if 'options_selected' not in data:
return {'success': False, 'msg': _(u"Missing options_selected key in request")}
if not isinstance(data['options_selected'], dict):
return {'success': False, 'msg': _(u"options_selected must be a dictionary")}
# Check the student's scores against the course author's scores.
# This implicitly updates the student training workflow (which example essay is shown)
# as well as the assessment workflow (training/peer/self steps).
try:
corrections = student_training.assess_training_example(
self.submission_uuid, data['options_selected']
)
except (student_training.StudentTrainingRequestError, student_training.StudentTrainingInternalError) as ex:
return {
'success': False,
'msg': _(u"Your scores could not be checked: {error}.").format(error=ex.message)
}
except:
return {
'success': False,
'msg': _(u"An unexpected error occurred.")
}
else:
return {
'success': True,
'msg': u'',
'correct': len(corrections) == 0,
}
<openassessment>
<title>Student training test</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>Vocabulary</name>
<prompt>How varied is the vocabulary?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job</explanation>
</option>
<option points="1">
<name>Good</name>
<explanation>Good job</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>Excellent job</explanation>
</option>
</criterion>
<criterion>
<name>Grammar</name>
<prompt>How correct is the grammar?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job</explanation>
</option>
<option points="1">
<name>Good</name>
<explanation>Good job</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>Excellent job</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="student-training">
<example>
<answer>
𝑨 𝒔𝒖𝒃𝒔𝒕𝒂𝒏𝒄𝒆--𝒕𝒉𝒂𝒕 𝒘𝒉𝒊𝒄𝒉 𝒊𝒔 𝒄𝒂𝒍𝒍𝒆𝒅 𝒂 𝒔𝒖𝒃𝒔𝒕𝒂𝒏𝒄𝒆 𝒎𝒐𝒔𝒕 𝒔𝒕𝒓𝒊𝒄𝒕𝒍𝒚, 𝒑𝒓𝒊𝒎𝒂𝒓𝒊𝒍𝒚,
𝒂𝒏𝒅 𝒎𝒐𝒔𝒕 𝒐𝒇 𝒂𝒍𝒍--𝒊𝒔 𝒕𝒉𝒂𝒕 𝒘𝒉𝒊𝒄𝒉 𝒊𝒔 𝒏𝒆𝒊𝒕𝒉𝒆𝒓 𝒔𝒂𝒊𝒅 𝒐𝒇 𝒂 𝒔𝒖𝒃𝒋𝒆𝒄𝒕 𝒐𝒏 𝒐𝒓 𝒊𝒏 𝒂 𝒔𝒖𝒃𝒋𝒆𝒄𝒕,
𝒆.𝒈. 𝒕𝒉𝒆 𝒊𝒏𝒅𝒊𝒗𝒊𝒅𝒖𝒂𝒍 𝒎𝒂𝒏 𝒐𝒓 𝒕𝒉𝒆 𝒊𝒏𝒅𝒊𝒗𝒊𝒅𝒖𝒂𝒍 𝒉𝒐𝒓𝒔𝒆.
</answer>
<select criterion="Vocabulary" option="Good" />
<select criterion="Grammar" option="Excellent" />
</example>
<example>
<answer>
Їḟ ẗḧëṛë ïṡ ṡöṁë ëṅḋ öḟ ẗḧë ẗḧïṅġṡ ẅë ḋö, ẅḧïċḧ ẅë ḋëṡïṛë ḟöṛ ïẗṡ öẅṅ ṡäḳë,
ċḷëäṛḷÿ ẗḧïṡ ṁüṡẗ ḅë ẗḧë ġööḋ. Ẅïḷḷ ṅöẗ ḳṅöẅḷëḋġë öḟ ïẗ, ẗḧëṅ, ḧäṿë ä ġṛëäẗ
ïṅḟḷüëṅċë öṅ ḷïḟë? Ṡḧäḷḷ ẅë ṅöẗ, ḷïḳë äṛċḧëṛṡ ẅḧö ḧäṿë ä ṁäṛḳ ẗö äïṁ äẗ,
ḅë ṁöṛë ḷïḳëḷÿ ẗö ḧïẗ üṗöṅ ẅḧäẗ ẅë ṡḧöüḷḋ? Їḟ ṡö, ẅë ṁüṡẗ ẗṛÿ, ïṅ öüẗḷïṅë äẗ ḷëäṡẗ,
ẗö ḋëẗëṛṁïṅë ẅḧäẗ ïẗ ïṡ.
</answer>
<select criterion="Vocabulary" option="Excellent" />
<select criterion="Grammar" option="Poor" />
</example>
</assessment>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
</assessments>
</openassessment>
<openassessment>
<title>Student training test</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>Vocabulary</name>
<prompt>How varied is the vocabulary?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job</explanation>
</option>
<option points="1">
<name>Good</name>
<explanation>Good job</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>Excellent job</explanation>
</option>
</criterion>
<criterion>
<name>Grammar</name>
<prompt>How correct is the grammar?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job</explanation>
</option>
<option points="1">
<name>Good</name>
<explanation>Good job</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>Excellent job</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="student-training" due="2000-01-01">
<example>
<answer>
𝑨 𝒔𝒖𝒃𝒔𝒕𝒂𝒏𝒄𝒆--𝒕𝒉𝒂𝒕 𝒘𝒉𝒊𝒄𝒉 𝒊𝒔 𝒄𝒂𝒍𝒍𝒆𝒅 𝒂 𝒔𝒖𝒃𝒔𝒕𝒂𝒏𝒄𝒆 𝒎𝒐𝒔𝒕 𝒔𝒕𝒓𝒊𝒄𝒕𝒍𝒚, 𝒑𝒓𝒊𝒎𝒂𝒓𝒊𝒍𝒚,
𝒂𝒏𝒅 𝒎𝒐𝒔𝒕 𝒐𝒇 𝒂𝒍𝒍--𝒊𝒔 𝒕𝒉𝒂𝒕 𝒘𝒉𝒊𝒄𝒉 𝒊𝒔 𝒏𝒆𝒊𝒕𝒉𝒆𝒓 𝒔𝒂𝒊𝒅 𝒐𝒇 𝒂 𝒔𝒖𝒃𝒋𝒆𝒄𝒕 𝒐𝒏 𝒐𝒓 𝒊𝒏 𝒂 𝒔𝒖𝒃𝒋𝒆𝒄𝒕,
𝒆.𝒈. 𝒕𝒉𝒆 𝒊𝒏𝒅𝒊𝒗𝒊𝒅𝒖𝒂𝒍 𝒎𝒂𝒏 𝒐𝒓 𝒕𝒉𝒆 𝒊𝒏𝒅𝒊𝒗𝒊𝒅𝒖𝒂𝒍 𝒉𝒐𝒓𝒔𝒆.
</answer>
<select criterion="Vocabulary" option="Good" />
<select criterion="Grammar" option="Excellent" />
</example>
<example>
<answer>
Їḟ ẗḧëṛë ïṡ ṡöṁë ëṅḋ öḟ ẗḧë ẗḧïṅġṡ ẅë ḋö, ẅḧïċḧ ẅë ḋëṡïṛë ḟöṛ ïẗṡ öẅṅ ṡäḳë,
ċḷëäṛḷÿ ẗḧïṡ ṁüṡẗ ḅë ẗḧë ġööḋ. Ẅïḷḷ ṅöẗ ḳṅöẅḷëḋġë öḟ ïẗ, ẗḧëṅ, ḧäṿë ä ġṛëäẗ
ïṅḟḷüëṅċë öṅ ḷïḟë? Ṡḧäḷḷ ẅë ṅöẗ, ḷïḳë äṛċḧëṛṡ ẅḧö ḧäṿë ä ṁäṛḳ ẗö äïṁ äẗ,
ḅë ṁöṛë ḷïḳëḷÿ ẗö ḧïẗ üṗöṅ ẅḧäẗ ẅë ṡḧöüḷḋ? Їḟ ṡö, ẅë ṁüṡẗ ẗṛÿ, ïṅ öüẗḷïṅë äẗ ḷëäṡẗ,
ẗö ḋëẗëṛṁïṅë ẅḧäẗ ïẗ ïṡ.
</answer>
<select criterion="Vocabulary" option="Excellent" />
<select criterion="Grammar" option="Poor" />
</example>
</assessment>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
</assessments>
</openassessment>
<openassessment>
<title>Student training test</title>
<prompt>Test prompt</prompt>
<rubric>
<prompt>Test rubric prompt</prompt>
<criterion>
<name>Vocabulary</name>
<prompt>How varied is the vocabulary?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job</explanation>
</option>
<option points="1">
<name>Good</name>
<explanation>Good job</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>Excellent job</explanation>
</option>
</criterion>
<criterion>
<name>Grammar</name>
<prompt>How correct is the grammar?</prompt>
<option points="0">
<name>Poor</name>
<explanation>Poor job</explanation>
</option>
<option points="1">
<name>Good</name>
<explanation>Good job</explanation>
</option>
<option points="3">
<name>Excellent</name>
<explanation>Excellent job</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="student-training" start="3000-01-01">
<example>
<answer>
𝑨 𝒔𝒖𝒃𝒔𝒕𝒂𝒏𝒄𝒆--𝒕𝒉𝒂𝒕 𝒘𝒉𝒊𝒄𝒉 𝒊𝒔 𝒄𝒂𝒍𝒍𝒆𝒅 𝒂 𝒔𝒖𝒃𝒔𝒕𝒂𝒏𝒄𝒆 𝒎𝒐𝒔𝒕 𝒔𝒕𝒓𝒊𝒄𝒕𝒍𝒚, 𝒑𝒓𝒊𝒎𝒂𝒓𝒊𝒍𝒚,
𝒂𝒏𝒅 𝒎𝒐𝒔𝒕 𝒐𝒇 𝒂𝒍𝒍--𝒊𝒔 𝒕𝒉𝒂𝒕 𝒘𝒉𝒊𝒄𝒉 𝒊𝒔 𝒏𝒆𝒊𝒕𝒉𝒆𝒓 𝒔𝒂𝒊𝒅 𝒐𝒇 𝒂 𝒔𝒖𝒃𝒋𝒆𝒄𝒕 𝒐𝒏 𝒐𝒓 𝒊𝒏 𝒂 𝒔𝒖𝒃𝒋𝒆𝒄𝒕,
𝒆.𝒈. 𝒕𝒉𝒆 𝒊𝒏𝒅𝒊𝒗𝒊𝒅𝒖𝒂𝒍 𝒎𝒂𝒏 𝒐𝒓 𝒕𝒉𝒆 𝒊𝒏𝒅𝒊𝒗𝒊𝒅𝒖𝒂𝒍 𝒉𝒐𝒓𝒔𝒆.
</answer>
<select criterion="Vocabulary" option="Good" />
<select criterion="Grammar" option="Excellent" />
</example>
<example>
<answer>
Їḟ ẗḧëṛë ïṡ ṡöṁë ëṅḋ öḟ ẗḧë ẗḧïṅġṡ ẅë ḋö, ẅḧïċḧ ẅë ḋëṡïṛë ḟöṛ ïẗṡ öẅṅ ṡäḳë,
ċḷëäṛḷÿ ẗḧïṡ ṁüṡẗ ḅë ẗḧë ġööḋ. Ẅïḷḷ ṅöẗ ḳṅöẅḷëḋġë öḟ ïẗ, ẗḧëṅ, ḧäṿë ä ġṛëäẗ
ïṅḟḷüëṅċë öṅ ḷïḟë? Ṡḧäḷḷ ẅë ṅöẗ, ḷïḳë äṛċḧëṛṡ ẅḧö ḧäṿë ä ṁäṛḳ ẗö äïṁ äẗ,
ḅë ṁöṛë ḷïḳëḷÿ ẗö ḧïẗ üṗöṅ ẅḧäẗ ẅë ṡḧöüḷḋ? Їḟ ṡö, ẅë ṁüṡẗ ẗṛÿ, ïṅ öüẗḷïṅë äẗ ḷëäṡẗ,
ẗö ḋëẗëṛṁïṅë ẅḧäẗ ïẗ ïṡ.
</answer>
<select criterion="Vocabulary" option="Excellent" />
<select criterion="Grammar" option="Poor" />
</example>
</assessment>
<assessment name="peer-assessment" must_grade="5" must_be_graded_by="3" />
</assessments>
</openassessment>
# -*- coding: utf-8 -*-
"""
Tests for the student training step in the Open Assessment XBlock.
"""
import json
from openassessment.assessment.api import student_training
from .base import XBlockHandlerTestCase, scenario
class StudentTrainingAssessTest(XBlockHandlerTestCase):
"""
Tests for student training assessment.
"""
SUBMISSION = {
'submission': u'Thé őbjéćt őf édúćátíőń íś tő téáćh úś tő ĺővé ẃhát íś béáútífúĺ.'
}
@scenario('data/student_training.xml', user_id="Plato")
def test_correct(self, xblock):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
# Agree with the course author's assessment
# (as defined in the scenario XML)
data = {
'options_selected': {
'Vocabulary': 'Good',
'Grammar': 'Excellent'
}
}
resp = self.request(xblock, 'training_assess', json.dumps(data), response_format='json')
# Expect that we were correct
self.assertTrue(resp['success'], msg=resp.get('msg'))
self.assertTrue(resp['correct'])
@scenario('data/student_training.xml', user_id="Plato")
def test_incorrect(self, xblock):
xblock.create_submission(xblock.get_student_item_dict(), self.SUBMISSION)
# Disagree with the course author's assessment
# (as defined in the scenario XML)
data = {
'options_selected': {
'Vocabulary': 'Poor',
'Grammar': 'Poor'
}
}
resp = self.request(xblock, 'training_assess', json.dumps(data), response_format='json')
# Expect that we were marked incorrect
self.assertTrue(resp['success'], msg=resp.get('msg'))
self.assertFalse(resp['correct'])
@scenario('data/student_training.xml', user_id="Plato")
def test_updates_workflow(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_no_examples_left(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_request_error(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_internal_error(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_invalid_options_dict(self, xblock):
self.fail()
@scenario('data/basic_scenario.xml', user_id="Plato")
def test_no_student_training_defined(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_no_submission(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_studio_preview(self, xblock):
self.fail()
@scenario('data/student_training.xml')
def test_not_logged_in(self, xblock):
self.fail()
class StudentTrainingRenderTest(XBlockHandlerTestCase):
"""
Tests for student training step rendering.
"""
@scenario('data/student_training_due.xml', user_id="Plato")
def test_past_due(self, xblock):
self.fail()
@scenario('data/student_training_future.xml', user_id="Plato")
def test_before_start(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_training_complete(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_training_example_available(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_no_training_examples_left(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_render_error(self, xblock):
self.fail()
@scenario('data/basic_scenario.xml', user_id="Plato")
def test_no_student_training_defined(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_no_submission(self, xblock):
self.fail()
@scenario('data/student_training.xml', user_id="Plato")
def test_studio_preview(self, xblock):
self.fail()
@scenario('data/student_training.xml')
def test_not_logged_in(self, xblock):
self.fail()
def _assert_path_and_context(self, xblock, expected_path, expected_context):
"""
Render the student training step and verify that the expected template
and context were used. Also check that the template renders without error.
Args:
xblock (OpenAssessmentBlock): The XBlock under test.
expected_path (str): The expected template path.
expected_context (dict): The expected template context.
Raises:
AssertionError
"""
path, context = xblock.training_path_and_context()
self.assertEqual(path, expected_path)
self.assertItemsEqual(context, expected_context)
# Verify that we render without error
resp = self.request(xblock, 'render_student_training', json.dumps({}))
self.assertGreater(len(resp), 0)
"""
Handle OpenAssessment XBlock requests to the Workflow API.
"""
from xblock.core import XBlock
from openassessment.workflow import api as workflow_api
class WorkflowMixin(object):
"""
Handle OpenAssessment XBlock requests to the Workflow API.
"""
# Dictionary mapping assessment names (e.g. peer-assessment)
# to the corresponding workflow step names.
ASSESSMENT_STEP_NAMES = {
"self-assessment": "self",
"peer-assessment": "peer",
"student-training": "training",
}
@XBlock.json_handler
def handle_workflow_info(self, data, suffix=''):
def handle_workflow_info(self, data, suffix=''): # pylint:disable=W0613
"""
Retrieve the current state of the workflow.
Args:
data: Unused
Kwargs:
suffix: Unused
Returns:
dict
"""
return self.get_workflow_info()
def create_workflow(self, submission_uuid):
"""
Create a new workflow for a student submission.
Args:
submission_uuid (str): The UUID of the submission to associate
with the workflow.
Returns:
None
"""
steps = self._create_step_list()
workflow_api.create_workflow(submission_uuid, steps)
def _create_step_list(self):
def _convert_rubric_assessment_name(ra_name):
"""'self-assessment' -> 'self', 'peer-assessment' -> 'peer'"""
short_name, suffix = ra_name.split("-")
return short_name
# rubric_assessments stores names as "self-assessment",
# "peer-assessment", while the model is expecting "self", "peer".
# Therefore, this conversion step. We should refactor later to
# standardize.
return [
_convert_rubric_assessment_name(ra["name"])
for ra in self.valid_assessments
]
def workflow_requirements(self):
"""
Retrieve the requirements from each assessment module
......@@ -34,6 +58,7 @@ class WorkflowMixin(object):
Returns:
dict
"""
assessment_ui_model = self.get_assessment_module('peer-assessment')
......@@ -116,3 +141,20 @@ class WorkflowMixin(object):
)
num_submissions = sum(item['count'] for item in status_counts)
return status_counts, num_submissions
def _create_step_list(self):
"""
Return a list of valid workflow step names.
This translates between the assessment types (loaded from the problem definition)
and the step types (used by the Workflow API).
At some point, we should probably refactor to make these two names consistent.
Returns:
list
"""
return [
self.ASSESSMENT_STEP_NAMES.get(ra['name'])
for ra in self.valid_assessments
if ra['name'] in self.ASSESSMENT_STEP_NAMES
]
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment