Commit b491ad87 by Stephen Sanchez

Merge pull request #38 from edx/sanchez/template-first-crack

WIP: UI Architecture for Module UI Components
parents 4053e448 6ac00039
...@@ -6,7 +6,7 @@ from rest_framework import serializers ...@@ -6,7 +6,7 @@ from rest_framework import serializers
from openassessment.peer.models import PeerEvaluation from openassessment.peer.models import PeerEvaluation
class PeerEvaluationSerializer(serializers.ModelSerializer): class PeerAssessmentSerializer(serializers.ModelSerializer):
class Meta: class Meta:
model = PeerEvaluation model = PeerEvaluation
fields = ( fields = (
......
...@@ -32,7 +32,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC) ...@@ -32,7 +32,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
class TestApi(TestCase): class TestApi(TestCase):
def test_create_evaluation(self): def test_create_evaluation(self):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
evaluation = api.create_evaluation( evaluation = api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
...@@ -44,21 +44,21 @@ class TestApi(TestCase): ...@@ -44,21 +44,21 @@ class TestApi(TestCase):
@file_data('test_valid_evaluations.json') @file_data('test_valid_evaluations.json')
def test_get_evaluations(self, assessment_dict): def test_get_evaluations(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation( api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
REQUIRED_GRADED_BY, REQUIRED_GRADED_BY,
assessment_dict assessment_dict
) )
evaluations = api.get_evaluations(submission["uuid"]) evaluations = api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations)) self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict) self._assert_evaluation(evaluations[0], **assessment_dict)
@file_data('test_valid_evaluations.json') @file_data('test_valid_evaluations.json')
def test_get_evaluations_with_date(self, assessment_dict): def test_get_evaluations_with_date(self, assessment_dict):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation( api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
...@@ -66,7 +66,7 @@ class TestApi(TestCase): ...@@ -66,7 +66,7 @@ class TestApi(TestCase):
assessment_dict, assessment_dict,
MONDAY MONDAY
) )
evaluations = api.get_evaluations(submission["uuid"]) evaluations = api.get_assessments(submission["uuid"])
self.assertEqual(1, len(evaluations)) self.assertEqual(1, len(evaluations))
self._assert_evaluation(evaluations[0], **assessment_dict) self._assert_evaluation(evaluations[0], **assessment_dict)
self.assertEqual(evaluations[0]["scored_at"], MONDAY) self.assertEqual(evaluations[0]["scored_at"], MONDAY)
...@@ -85,22 +85,22 @@ class TestApi(TestCase): ...@@ -85,22 +85,22 @@ class TestApi(TestCase):
self.assertFalse(scores) self.assertFalse(scores)
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation( api.create_assessment(
bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT bob["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
api.create_evaluation( api.create_assessment(
sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT sally["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation( api.create_assessment(
jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT jim["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation( api.create_assessment(
buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT buffy["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertFalse(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
api.create_evaluation( api.create_assessment(
xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT xander["uuid"], "Tim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
self.assertTrue(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED)) self.assertTrue(api.has_finished_required_evaluating("Tim", REQUIRED_GRADED))
...@@ -110,13 +110,13 @@ class TestApi(TestCase): ...@@ -110,13 +110,13 @@ class TestApi(TestCase):
scores = sub_api.get_score(STUDENT_ITEM) scores = sub_api.get_score(STUDENT_ITEM)
self.assertFalse(scores) self.assertFalse(scores)
api.create_evaluation( api.create_assessment(
tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT tim["uuid"], "Bob", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
api.create_evaluation( api.create_assessment(
tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT tim["uuid"], "Sally", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
api.create_evaluation( api.create_assessment(
tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT tim["uuid"], "Jim", REQUIRED_GRADED, REQUIRED_GRADED_BY, ASSESSMENT_DICT
) )
...@@ -127,7 +127,7 @@ class TestApi(TestCase): ...@@ -127,7 +127,7 @@ class TestApi(TestCase):
self.assertEqual(12, scores[0]["points_possible"]) self.assertEqual(12, scores[0]["points_possible"])
@raises(api.PeerEvaluationRequestError) @raises(api.PeerAssessmentRequestError)
def test_bad_configuration(self): def test_bad_configuration(self):
api.has_finished_required_evaluating("Tim", -1) api.has_finished_required_evaluating("Tim", -1)
...@@ -139,27 +139,27 @@ class TestApi(TestCase): ...@@ -139,27 +139,27 @@ class TestApi(TestCase):
) )
self._create_student_and_submission("Jim", "Jim's answer", THURSDAY) self._create_student_and_submission("Jim", "Jim's answer", THURSDAY)
submission = api.get_submission_to_evaluate(STUDENT_ITEM, 3) submission = api.get_submission_to_assess(STUDENT_ITEM, 3)
self.assertIsNotNone(submission) self.assertIsNotNone(submission)
self.assertEqual(submission["answer"], u"Bob's answer") self.assertEqual(submission["answer"], u"Bob's answer")
self.assertEqual(submission["student_item"], 2) self.assertEqual(submission["student_item"], 2)
self.assertEqual(submission["attempt_number"], 1) self.assertEqual(submission["attempt_number"], 1)
@raises(api.PeerEvaluationWorkflowError) @raises(api.PeerAssessmentWorkflowError)
def test_no_submissions_to_evaluate_for_tim(self): def test_no_submissions_to_evaluate_for_tim(self):
self._create_student_and_submission("Tim", "Tim's answer", MONDAY) self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
api.get_submission_to_evaluate(STUDENT_ITEM, 3) api.get_submission_to_assess(STUDENT_ITEM, 3)
""" """
Some Error Checking Tests against DB failures. Some Error Checking Tests against DB failures.
""" """
@patch.object(Submission.objects, 'get') @patch.object(Submission.objects, 'get')
@raises(api.PeerEvaluationInternalError) @raises(api.PeerAssessmentInternalError)
def test_error_on_evaluation_creation(self, mock_filter): def test_error_on_evaluation_creation(self, mock_filter):
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation( api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
...@@ -172,7 +172,7 @@ class TestApi(TestCase): ...@@ -172,7 +172,7 @@ class TestApi(TestCase):
@raises(sub_api.SubmissionInternalError) @raises(sub_api.SubmissionInternalError)
def test_error_on_get_evaluation(self, mock_filter): def test_error_on_get_evaluation(self, mock_filter):
submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
api.create_evaluation( api.create_assessment(
submission["uuid"], submission["uuid"],
STUDENT_ITEM["student_id"], STUDENT_ITEM["student_id"],
REQUIRED_GRADED, REQUIRED_GRADED,
...@@ -181,7 +181,7 @@ class TestApi(TestCase): ...@@ -181,7 +181,7 @@ class TestApi(TestCase):
MONDAY MONDAY
) )
mock_filter.side_effect = DatabaseError("Bad things happened") mock_filter.side_effect = DatabaseError("Bad things happened")
api.get_evaluations(submission["uuid"]) api.get_assessments(submission["uuid"])
def test_choose_score(self): def test_choose_score(self):
self.assertEqual(0, api._calculate_final_score([])) self.assertEqual(0, api._calculate_final_score([]))
......
...@@ -2,7 +2,7 @@ import logging ...@@ -2,7 +2,7 @@ import logging
from django.contrib.auth.decorators import login_required from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response from django.shortcuts import render_to_response
from openassessment.peer.api import get_evaluations from openassessment.peer.api import get_assessments
from submissions.api import SubmissionRequestError, get_submissions from submissions.api import SubmissionRequestError, get_submissions
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
...@@ -38,7 +38,7 @@ def get_evaluations_for_student_item(request, course_id, student_id, item_id): ...@@ -38,7 +38,7 @@ def get_evaluations_for_student_item(request, course_id, student_id, item_id):
submissions = get_submissions(student_item_dict) submissions = get_submissions(student_item_dict)
evaluations = [] evaluations = []
for submission in submissions: for submission in submissions:
submission_evaluations = get_evaluations(submission["uuid"]) submission_evaluations = get_assessments(submission["uuid"])
for evaluation in submission_evaluations: for evaluation in submission_evaluations:
evaluation["submission_uuid"] = submission["uuid"] evaluation["submission_uuid"] = submission["uuid"]
evaluations.append(evaluation) evaluations.append(evaluation)
......
from xblock.core import XBlock
from openassessment.peer import api as peer_api
from openassessment.peer.api import PeerAssessmentWorkflowError
class PeerAssessmentMixin(object):
@XBlock.json_handler
def assess(self, data, suffix=''):
"""Place an assessment into OpenAssessment system
"""
assessment = self.get_assessment_module('peer-assessment')
if assessment:
assessment_dict = {
"points_earned": map(int, data["points_earned"]),
"points_possible": sum(c['total_value'] for c in self.rubric_criteria),
"feedback": "Not yet implemented.",
}
assessment = peer_api.create_assessment(
data["submission_uuid"],
self.get_student_item_dict()["student_id"],
int(assessment.must_grade),
int(assessment.must_be_graded_by),
assessment_dict
)
# Temp kludge until we fix JSON serialization for datetime
assessment["scored_at"] = str(assessment["scored_at"])
return assessment, "Success"
@XBlock.handler
def render_peer_assessment(self, data, suffix=''):
assessment = self.get_assessment_module('peer-assessment')
if assessment:
peer_sub = self.get_peer_submission(self.get_student_item_dict(), assessment)
context_dict = {"peer_submission": peer_sub}
return self.render_assessment('static/html/oa_peer_assessment.html', context_dict)
def get_peer_submission(self, student_item_dict, assessment):
peer_submission = False
try:
peer_submission = peer_api.get_submission_to_assess(
student_item_dict, assessment.must_be_graded_by
)
peer_submission = peer_api.get_submission_to_assess(
student_item_dict,
assessment.must_be_graded_by
)
except PeerAssessmentWorkflowError:
# TODO: Log?
pass
return peer_submission
def get_assessment_module(self, mixin_name):
"""Get a configured assessment module by name.
"""
for assessment in self.rubric_assessments:
if assessment.name == mixin_name:
return assessment
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
"""XBlock scenario parsing routines""" """XBlock scenario parsing routines"""
from openassessment.xblock.ui_models import PeerAssessmentUIModel, SelfAssessmentUIModel, SubmissionUIModel
class ScenarioParser(object): class ScenarioParser(object):
"""Utility class to capture parsing of xml from runtime scenarios.""" """Utility class to capture parsing of xml from runtime scenarios."""
...@@ -22,6 +24,11 @@ class ScenarioParser(object): ...@@ -22,6 +24,11 @@ class ScenarioParser(object):
"""<prompt>This tells you what you should write about. There should be only one prompt.</prompt>""" """<prompt>This tells you what you should write about. There should be only one prompt.</prompt>"""
return e.text.strip() return e.text.strip()
def get_title(self, e):
"""<title>The title of this block</title>
"""
return e.text.strip()
def get_rubric(self, e): def get_rubric(self, e):
"""<rubric> """<rubric>
This text is general instructions relating to this rubric. This text is general instructions relating to this rubric.
...@@ -57,25 +64,35 @@ class ScenarioParser(object): ...@@ -57,25 +64,35 @@ class ScenarioParser(object):
rubric_criteria.append(crit) rubric_criteria.append(crit)
return (e.text.strip(), rubric_criteria) return (e.text.strip(), rubric_criteria)
def get_evals(self, evaluations): def get_assessments(self, assessments):
"""<evals> """<assessments>
<!-- There can be multiple types of assessments given in any <!-- There can be multiple types of assessments given in any
arbitrary order, like this self assessment followed by a arbitrary order, like this self assessment followed by a
peer assessment --> peer assessment -->
<self /> <self-assessment />
<peereval start="2014-12-20T19:00-7:00" <peer-assessment start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00" due="2014-12-21T22:22-7:00"
must_grade="5" must_grade="5"
must_be_graded_by="3" /> must_be_graded_by="3" />
</evals>""" </peer-assessment>"""
return [{'type': ev.tag, assessment_list = [SubmissionUIModel()]
'name': ev.attrib.get('name', ''), for asmnt in assessments:
'start_datetime': ev.attrib.get('start', None), assessment = None
'due_datetime': ev.attrib.get('due', None), assessment_type = asmnt.tag
# These attrs are accepted for self, ai evals, but ignored: if 'peer-assessment' == assessment_type:
'must_grade': int(ev.attrib.get('must_grade', 1) if ev.tag == 'peereval' else 1), assessment = PeerAssessmentUIModel()
'must_be_graded_by': int(ev.attrib.get('must_be_graded_by', 0) if ev.tag == 'peereval' else 0), assessment.must_grade = int(asmnt.attrib.get('must_grade', 1))
} for ev in evaluations] assessment.must_be_graded_by = int(asmnt.attrib.get('must_be_graded_by', 0))
elif 'self-assessment' == assessment_type:
assessment = SelfAssessmentUIModel()
if assessment:
assessment.name = asmnt.attrib.get('name', '')
assessment.start_datetime = asmnt.attrib.get('start', None)
assessment.due_datetime = asmnt.attrib.get('due', None)
assessment_list.append(assessment)
return assessment_list
def parse(self): def parse(self):
"""Instantiate xblock object from runtime XML definition.""" """Instantiate xblock object from runtime XML definition."""
...@@ -85,9 +102,10 @@ class ScenarioParser(object): ...@@ -85,9 +102,10 @@ class ScenarioParser(object):
elif child.tag == 'rubric': elif child.tag == 'rubric':
(self.xblock.rubric_instructions, (self.xblock.rubric_instructions,
self.xblock.rubric_criteria) = self.get_rubric(child) self.xblock.rubric_criteria) = self.get_rubric(child)
elif child.tag == 'evals': elif child.tag == 'title':
self.xblock.rubric_evals = self.get_evals(child) self.xblock.title = self.get_title(child)
elif child.tag == 'assessments':
self.xblock.rubric_assessments = self.get_assessments(child)
else: else:
self.unknown_handler(self.xblock, child) self.unknown_handler(self.xblock, child)
return self.xblock return self.xblock
from xblock.core import XBlock
class SelfAssessmentMixin(object):
@XBlock.handler
def render_self_assessment(self, data, suffix=''):
return self.render_assessment('static/html/oa_self_assessment.html')
<!-- TEMPLATE: status messages -->
<!-- NOTES:
* class of message--warning is added message is a warning/notification to user
* class of message--confirmation is added message is a confirmation to user
* class of has--actions is added when a message has actions associated with it for user to interact with
-->
<div class="wrapper wrapper--openassessment theme--basic">
<div class="openassessment" id="openassessment">
<h1 class="openassessment__title">
<span class="openassessment__title--super">
{{ title }}
</span>
<span class="openassessment__title--sub">
<span class="problem-type problem-type--open-ended-response">Open Ended Response</span>
{% for assessment in rubric_assessments %}
+
<span class="problem-type problem-type--{{ assessment.type }}">{{ assessment.name }}</span>
{% endfor %}
</span>
</h1>
<!--?: may have trouble with aync -->
<nav class="nav--contents">
<h2 class="title">Skip to a part of this problem:</h2>
<ol class="list list--nav">
{% for assessment in rubric_assessments %}
<li class="list--nav__item">
<a class="action" href="#openassessment__{{ assessment.type }}">{{ assessment.navigation_text }}</a>
</li>
{% endfor %}
<li class="list--nav__item">
<a class="action" href="#openassessment__grade">Your grade for this problem</a>
</li>
</ol>
</nav>
<!-- STATUS: system messages -->
<!-- SEE t-messages.html for all cases -->
<!-- question -->
<div class="wrapper--openassessment__prompt">
<article class="openassessment__prompt ui-toggle-visibility">
<h2 class="openassessment__prompt__title">Open Assessment Problem</h2>
<div class="openassessment__prompt__copy ui-toggle-visibility__content">
<p>{{ question }}</p>
</div>
<ul class="list list--controls">
<li class="list--controls__item">
<a href="" class="action action--toggle ui-toggle-visibility__control">Collapse/Expand This</a>
</li>
</ul>
</article>
</div>
<!-- steps -->
<ol class="openassessment__steps" id="openassessment__steps">
<!-- STEP: response -->
{% for assessment in rubric_assessments %}
<li id="{{ assessment.name }}">{{ assessment.title }}</li>
{% endfor %}
</ol>
<!-- STATUS: problem grade -->
{% include "static/html/oa_grade.html" %}
</div>
</div>
<!-- TEMPLATE: grade status -->
<!-- NOTES:
* class of is--incomplete is added when the problem is in an incomplete state
* class of is--complete is added when problem is successfully completed by user
* class of has--grade is added when a grade (failing or passing) is calculated for user
* classes of needs--* are added when user action is needed
* classes of awaiting--* are added when a user's flow is dependent upon system readiness
-->
<!-- CASE: default/unstarted -->
<div id="openassessment__grade" class="openassessment__grade {{ grade_state.style_class }}">
<h2 class="openassessment__grade__title">{{ grade_state.title }}</h2>
<div class="openassessment__grade__content">
<span class="grade__value">{{ grade_state.value }}</span>
<p>{{ grade_state.message }}</p>
</div>
</div>
<!-- TEMPLATE: peer evaluation -->
<!-- NOTES:
* class of is--unavailable is added when step is not available
* each .peer-evaluation item needs a unique id attribute formatted as #peer-evaluation--###
* individual rubric questions' answers need specific id attributes in several places
-->
<!-- CASE: default/not started -->
<li id="openassessment__peer-assessment" class="openassessment__steps__step step--peer-assessment">
<div id="peer_submission_uuid" hidden="true">{{ peer_submission.uuid }}</div>
<h2 class="step__title">
<span class="step__label">Evaluate Peers' Responses</span>
<span class="step__deadline">due <span class="date">January 30, 2014</span> at <span class="time">15:00 UTC</span></span>
</h2>
<span class="step__status">
<span class="step__status__label">This step's status:</span>
<span class="step__status__value">
<span class="step__status__value--completed">0</span> of
<span class="step__status__value--required">5</span> completed
</span>
</span>
{#</header>#}
<div class="step__instruction">
<p>Please read and evaluate the following response from one of your peers in the course.</p>
</div>
<div class="step__content">
<ul class="list--peer-assessments">
<li class="list--peer-assessments__item">
<article class="peer-assessment" id="peer-assessment--001">
<header class="peer-assessment__header">
<h3 class="peer-assessment__title">Assessment #
<span class="peer-assessment__number--current">1</span> of
<span class="peer-assessment__number--required">3</span>
</h3>
<span class="peer-assessment__expected-time">
<span class="label">Expected Time Spent:</span>
<span class="value">20 Minutes</span>
</span>
</header>
<!-- ?: markup validating/copy cleaning upon submission -->
<div class="peer-assessment__response">
{{ peer_submission.answer }}
</div>
<form id="peer-assessment--001__assessment" class="peer-assessment__assessment" method="post">
<fieldset class="assessment__fields">
<legend class="assessment__instruction">{{ rubric_instructions }}</legend>
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<!-- individual rubric question (radio-based choice) -->
<li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title">
{{ criterion.instructions }}
<span class="label--required">* <span class="sr">(Required)</span></span>
</h4>
<ol class="question__answers">
{% for value, text in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio" name="assessment__rubric__question--{{ criterion.name }}" id="assessment__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="assessment__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
</div>
<span class="answer__tip">TODO: Criterion Instructions</span>
</li>
{% endfor %}
</ol>
</li>
{% endfor %}
<!-- individual rubric question (text) -->
<li class="field field--textarea assessment__rubric__question" id="assessment__rubric__question--004">
<label for="assessment__rubric__question--004__value">Please provide any other feedback you have around this response</label>
<textarea id="assessment__rubric__question--004__value" placeholder="I felt this response was..."></textarea>
</li>
</ol>
</fieldset>
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="peer-assessment--001__assessment__submit" class="action action--submit">Submit your assessment &amp; move to response #2</button>
</li>
</ul>
</form>
</article>
</li>
</ul>
</div>
</li>
<!-- TEMPLATE: student response -->
<!-- NOTES:
* class of is--unavailable is added when step is not available
* class of is--saved is added when response is saved by system
* class of is--submitted is added when user has formally submitted response
* class of is--graded is added when user's response has proper amount of completed peer evaluations
* class of is--updated is added to the step__status element when a response is saved
-->
<!-- CASE: default/unanswered -->
<li id="openassessment__response" class="openassessment__steps__step step--response ui-toggle-visibility">
<!--header class="step__header ui-toggle-visibility__control"-->
<h2 class="step__title">
<span class="step__label">Your Response</span>
<span class="step__deadline">due <span class="date">January 24, 2014</span> at <span class="time">15:00 UTC</span></span>
</h2>
<span class="step__status">
<span class="step__status__label">This step's status:</span>
<span class="step__status__value">Incomplete</span>
</span>
<!--/header-->
<div class="step__instruction">
<p>Please provide your response to the following question. You may save your progress and return to complete your response anytime before the due date of <span class="step__deadline">due <span class="date">January 24, 2014</span></span>. <strong class="emphasis--beta">Once you submit, you may not edit your response</strong>.</p>
</div>
<div class="step__content">
<form id="response__submission" class="response__submission">
<ol class="list list--fields">
<li class="field field--textarea submission__answer" id="submission__answer">
<label for="submission__answer__value">Please provide your response to the above question</label>
<textarea id="submission__answer__value" placeholder=""></textarea>
</li>
</ol>
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="submission__submit" class="action action--submit submission__submit">Save Your Progress</button>
<span class="tip">you may continue to work on your response until you submit</span>
</li>
</ul>
</form>
</div>
<div class="step__actions">
<ul class="list list--actions">
<li class="list--actions__item">
<a aria-role="button" href="#" id="step--response__submit" class="action action--submit step--response__submit">Submit your response &amp; move forward</a>
</li>
</ul>
</div>
</li>
<!-- START OpenAssessmentBlock HTML -->
% if user_score:
<div>You've received the following score: ${user_score['points_earned']}/${user_score['points_possible']}.</div>
% endif
<div class="openassessment_block" id="openassessment_block_${xblock_trace[0]}">
<div id="peer_submission_uuid" hidden="true">${peer_submission["uuid"]}</div>
<p>${peer_submission["answer"]}</p>
<p class="openassessment_prompt"
id="openassessment_rubric_instructions_${xblock_trace[0]}">${rubric_instructions}</p>
% for crit in rubric_criteria:
<div>
<p class="openassessment_prompt">${crit["instructions"]}</p>
% for o in crit['options']:
<div>
<input name="${crit['name']}" type="radio" value="${o[0]}">${o[1]}: ${o[2]}</input>
</div>
% endfor
</div>
% endfor
<input type="button"
class="openassessment_submit" id="openassessment_submit_${xblock_trace[0]}" value="Submit" />
</div>
<div class="openassessment_response_status_block" id="openassessment_response_status_block_${xblock_trace[0]}">
This message should be invisible; please upgrade your browser.
</div>
<!-- END OpenAssessmentBlock HTML -->
<!-- TEMPLATE: self evaluation -->
<!-- NOTES:
* class of is--unavailable is added when step is not available
* each .self-assessment item needs a unique id attribute formatted as #self-assessment--###
* individual rubric questions' answers need specific id attributes in several places
-->
<!-- CASE: default/not started -->
<li id="openassessment__self-assessment" class="openassessment__steps__step step--self-assessment">
{# <header class="step__header">#}
<h2 class="step__title">
<span class="step__title__label">Evaluate Your Response</span>
<span class="step__title__deadline">due <span class="date">January 31, 2014</span> at <span class="time">15:00 UTC</span></span>
</h2>
<span class="step__status">
<span class="step__status__label">This step's status:</span>
<span class="step_status_value">Incomplete</span>
</span>
{# </header>#}
<div class="step--content">
<article class="self-assessment" id="self-assessment">
<header class="self-assessment__header">
<h3 class="self-assessment__title">Your Submitted Response</h3>
</header>
<!-- ?: markup validating/copy cleaning upon submission -->
<div class="self-assessment__response">
{{ self_submission.answer }}
</div>
<form id="self-assessment--001__assessment" class="self-assessment__assessment" method="post">
<fieldset class="assessment__fields">
<legend class="assessment__instruction">{{ rubric_instructions }}</legend>
<ol class="list list--fields assessment__rubric">
{% for criterion in rubric_criteria %}
<!-- individual rubric question (radio-based choice) -->
<li class="field field--radio is--required assessment__rubric__question" id="assessment__rubric__question--{{ criterion.name }}">
<h4 class="question__title">
{{ criterion.instructions }}
<span class="label--required">* <span class="sr">(Required)</span></span>
</h4>
<ol class="question__answers">
{% for value, text in criterion.options %}
<li class="answer">
<div class="wrapper--input">
<input type="radio" name="assessment__rubric__question--{{ criterion.name }}" id="assessment__rubric__question--{{ criterion.name }}--01" class="answer__value" value="answer--001__option--01 - Very Well" />
<label for="assessment__rubric__question--001__option--01" class="answer__label">({{ value }}) {{ text }}</label>
</div>
<span class="answer__tip">TODO: Criterion Instructions</span>
</li>
{% endfor %}
</ol>
</li>
{% endfor %}
<!-- individual rubric question (text) -->
<li class="field field--textarea assessment__rubric__question" id="assessment__rubric__question--004">
<label for="assessment__rubric__question--004__value">Please provide any other feedback you have around this response</label>
<textarea id="assessment__rubric__question--004__value" placeholder="I felt this response was..."></textarea>
</li>
</ol>
</fieldset>
<ul class="list list--actions">
<li class="list--actions__item">
<button type="submit" id="self-assessment--001__assessment__submit" class="action action--submit">Submit your assessment</button>
</li>
</ul>
</form>
</article>
</li>
</ul>
</div>
</li>
<!-- START OpenAssessmentBlock HTML -->
<div class="openassessment_block" id="openassessment_block_${xblock_trace[0]}">
<p class="openassessment_prompt" id="openassessment_question_${xblock_trace[0]}">${question}</p>
<textarea class="openassessment_submission" id="openassessment_submission_${xblock_trace[0]}">Compose your response here</textarea>
<input type="button" class="openassessment_submit" id="openassessment_submit_${xblock_trace[0]}" value="Submit" />
</div>
<div class="openassessment_response_status_block" id=openassessment_response_status_block_${xblock_trace[0]}">
This message should be invisible; please upgrade your browser.
</div>
<!-- END OpenAssessmentBlock HTML -->
...@@ -8,8 +8,8 @@ function OpenAssessmentBlock(runtime, element) { ...@@ -8,8 +8,8 @@ function OpenAssessmentBlock(runtime, element) {
/* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */ /* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */
function prepare_assessment_post(element) { function prepare_assessment_post(element) {
selector = $("input[type=radio]:checked", element); var selector = $("input[type=radio]:checked", element);
values = []; var values = [];
for (i=0; i<selector.length; i++) { for (i=0; i<selector.length; i++) {
values[i] = selector[i].value; values[i] = selector[i].value;
} }
...@@ -17,8 +17,8 @@ function OpenAssessmentBlock(runtime, element) { ...@@ -17,8 +17,8 @@ function OpenAssessmentBlock(runtime, element) {
} }
function displayStatus(result) { function displayStatus(result) {
status = result[0] var status = result[0];
error_msg = result[1] var error_msg = result[1];
if (status) { if (status) {
$('.openassessment_response_status_block', element).html(success_msg.concat(click_msg)); $('.openassessment_response_status_block', element).html(success_msg.concat(click_msg));
} else { } else {
......
/* START Javascript for OpenAssessmentXBlock. */
function OpenAssessmentBlock(runtime, element) {
var handlerUrl = runtime.handlerUrl(element, 'submit');
var renderSubmissionUrl = runtime.handlerUrl(element, 'render_submission');
var renderPeerUrl = runtime.handlerUrl(element, 'render_peer_assessment');
var renderSelfUrl = runtime.handlerUrl(element, 'render_self_assessment');
/* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */
/*
* Submission Functions
*/
function render_submissions(data) {
$('#submission', element).replaceWith(data);
$('#step--response__submit', element).click(function(eventObject) {
$.ajax({
type: "POST",
url: handlerUrl,
data: JSON.stringify({"submission": $('#submission__answer__value', element).val()}),
success: function(data) {
$.ajax({
type: "POST",
url: renderPeerUrl,
success: function(data) {
render_peer_assessment(data);
}
});
$.ajax({
type: "POST",
url: renderSubmissionUrl,
success: function(data) {
render_submissions(data);
}
});
}
});
});
}
/*
* Peer Assessment Functions
*/
function render_peer_assessment(data) {
$('#peer-assessment', element).replaceWith(data);
function prepare_assessment_post(element) {
var selector = $("input[type=radio]:checked", element);
var values = [];
for (var i=0; i<selector.length; i++) {
values[i] = selector[i].value;
}
return {"submission_uuid":$("div#peer_submission_uuid")[0].innerText, "points_earned":values};
}
$('#peer-assessment--001__assessment__submit', element).click(function(eventObject) {
$.ajax({
type: "POST",
url: handlerUrl,
/* data: JSON.stringify({"submission": $('.openassessment_submission', element).val()}), */
data: JSON.stringify(prepare_assessment_post(element)),
success: function(data) {
$.ajax({
type: "POST",
url: renderSelfUrl,
success: function(data) {
$('#self-assessment', element).replaceWith(data);
}
});
$.ajax({
type: "POST",
url: renderPeerUrl,
success: function(data) {
render_peer_assessment(data)
}
});
}
});
});
}
$(function ($) {
/* Here's where you'd do things on page load. */
$.ajax({
type: "POST",
url: renderSubmissionUrl,
success: function(data) {
render_submissions(data);
}
});
});
}
/* END Javascript for OpenAssessmentXBlock. */
/* START Javascript for OpenassessmentComposeXBlock. */
function OpenAssessmentBlock(runtime, element) {
var handlerUrl = runtime.handlerUrl(element, 'submit');
var success_msg = '<p class="success">Your submission has been received, thank you!</p>';
var failure_msg = '<p class="failure">An error occurred with your submission</p>';
var click_msg = '<p class="clickhere">(click here to dismiss this message)</p>';
/* Sample Debug Console: http://localhost:8000/submissions/Joe_Bloggs/TestCourse/u_3 */
function displayStatus(result) {
status = result[0];
error_msg = result[2];
if (status === 'true') {
$('.openassessment_response_status_block', element).html(success_msg.concat(click_msg));
} else {
$('.openassessment_response_status_block', element).html(failure_msg.concat(error_msg).concat(click_msg));
}
$('.openassessment_response_status_block', element).css('display', 'block');
}
$('.openassessment_response_status_block', element).click(function(eventObject) {
$('.openassessment_response_status_block', element).css('display', 'none');
});
$('.openassessment_submit', element).click(function(eventObject) {
$.ajax({
type: "POST",
url: handlerUrl,
data: JSON.stringify({"submission": $('.openassessment_submission', element).val()}),
success: displayStatus
});
});
$(function ($) {
/* Here's where you'd do things on page load. */
$(element).css('background-color', 'LightBlue')
});
}
/* END Javascript for OpenassessmentComposeXBlock. */
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<vertical_demo>
<openassessment start="2013-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<title>
Censorship in Public Libraries
</title>
<prompt>
What do you think about censorship in libraries? I think it's pretty great.
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">The Bible</option>
<option val="1">Earnest Hemingway</option>
<option val="3">Matsuo Basho</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">Eric</option>
<option val="1">John</option>
<option val="2">Ian</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">IRC</option>
<option val="1">Real Email</option>
<option val="2">Old-timey letters</option>
</criterion>
</rubric>
<assessments>
<self-assessment name="self-assessment" />
<peer-assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
</assessments>
</openassessment>
</vertical_demo>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<vertical_demo>
<openassessment start="2014-12-19T23:00-7:00" due="2014-12-21T23:00-7:00">
<title>
Global Poverty
</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty?
</prompt>
<rubric>
Read for conciseness, clarity of thought, and form.
<criterion name="concise">
How concise is it?
<option val="0">(0) Neal Stephenson (late)
<explain>
In "Cryptonomicon", Stephenson spent multiple pages talking about breakfast cereal.
While hilarious, in recent years his work has been anything but 'concise'.
</explain>
</option>
<option val="1">(1) HP Lovecraft
<explain>
If the author wrote something cyclopean that staggers the mind, score it thus.
</explain>
</option>
<option val="3">(3) Robert Heinlein
<explain>
Tight prose that conveys a wealth of information about the world in relatively
few words. Example, "The door irised open and he stepped inside."
</explain>
</option>
<option val="4">(4) Neal Stephenson (early)
<explain>
When Stephenson still had an editor, his prose was dense, with anecdotes about
nitrox abuse implying main characters' whole life stories.
</explain>
</option>
<option val="5">(5) Earnest Hemingway
<explain>
Score the work this way if it makes you weep, and the removal of a single
word would make you sneer.
</explain>
</option>
</criterion>
<criterion name="clearheaded">
How clear is the thinking?
<option val="0">(0) Yogi Berra</option>
<option val="1">(1) Hunter S. Thompson</option>
<option val="2">(2) Robert Heinlein</option>
<option val="3">(3) Isaac Asimov</option>
<option val="10">(10) Spock
<explain>
Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,
and unemotional examination of the facts. This is the only item explained in this category,
to show that explained and unexplained items can be mixed.
</explain>
</option>
</criterion>
<criterion name="form">
Lastly, how is it's form? Punctuation, grammar, and spelling all count.
<option val="0">(0) lolcats</option>
<option val="1">(1) Facebook</option>
<option val="2">(2) Reddit</option>
<option val="3">(3) metafilter</option>
<option val="4">(4) Usenet, 1996</option>
<option val="5">(5) The Elements of Style</option>
</criterion>
</rubric>
<assessments>
<peer-assessment start="2014-12-20T19:00-7:00"
name="peer-assessment"
due="2014-12-21T22:22-7:00"
must_grade="5"
must_be_graded_by="3" />
<self-assessment name="self-assessment" />
</assessments>
</openassessment>
</vertical_demo>
\ No newline at end of file
from xblock.core import XBlock
from submissions import api
class SubmissionMixin(object):
submit_errors = {
# Reported to user sometimes, and useful in tests
'ENOSUB': 'API submission is unrequested',
'ENODATA': 'API returned an empty response',
'EBADFORM': 'API Submission Request Error',
'EUNKNOWN': 'API returned unclassified exception',
'ENOMULTI': 'Multiple submissions are not allowed for this item',
}
@XBlock.json_handler
def submit(self, data, suffix=''):
"""
Place the submission text into Openassessment system
"""
status = False
status_tag = 'ENOSUB'
status_text = None
student_sub = data['submission']
student_item_dict = self.get_student_item_dict()
prev_sub = self._get_user_submission(student_item_dict)
if prev_sub:
# It is an error to submit multiple times for the same item
status_tag = 'ENOMULTI'
else:
status_tag = 'ENODATA'
try:
response = api.create_submission(student_item_dict, student_sub)
except api.SubmissionRequestError, e:
status_tag = 'EBADFORM'
status_text = unicode(e.field_errors)
except api.SubmissionError:
status_tag = 'EUNKNOWN'
else:
status = True
status_tag = response.get('student_item')
status_text = response.get('attempt_number')
# relies on success being orthogonal to errors
status_text = status_text if status_text else self.submit_errors[status_tag]
return status, status_tag, status_text
@staticmethod
def _get_submission_score(student_item_dict, submission=False):
"""Return the most recent score, if any, for student item"""
scores = False
if submission:
scores = api.get_score(student_item_dict)
return scores[0] if scores else None
@staticmethod
def _get_user_submission(student_item_dict):
"""Return the most recent submission, if any, by user in student_item_dict"""
submissions = []
try:
submissions = api.get_submissions(student_item_dict)
except api.SubmissionRequestError:
# This error is actually ok.
pass
return submissions[0] if submissions else None
@XBlock.handler
def render_submission(self, data, suffix=''):
return self.render_assessment('static/html/oa_response.html')
...@@ -7,6 +7,7 @@ import webob ...@@ -7,6 +7,7 @@ import webob
from django.test import TestCase from django.test import TestCase
from mock import patch from mock import patch
from openassessment.xblock.submission_mixin import SubmissionMixin
from submissions import api from submissions import api
from submissions.api import SubmissionRequestError, SubmissionInternalError from submissions.api import SubmissionRequestError, SubmissionInternalError
...@@ -46,13 +47,14 @@ RUBRIC_CONFIG = """ ...@@ -46,13 +47,14 @@ RUBRIC_CONFIG = """
<option val="5">The Elements of Style</option> <option val="5">The Elements of Style</option>
</criterion> </criterion>
</rubric> </rubric>
<evals> <assessments>
<peereval start="2014-12-20T19:00-7:00" <peer-assessment name="peer-assessment"
start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00" due="2014-12-21T22:22-7:00"
must_grade="5" must_grade="5"
must_be_graded_by="3" /> must_be_graded_by="3" />
<selfeval/> <self-assessment/>
</evals> </assessments>
</openassessment> </openassessment>
""" """
...@@ -114,7 +116,7 @@ class TestOpenAssessment(TestCase): ...@@ -114,7 +116,7 @@ class TestOpenAssessment(TestCase):
result = json.loads(resp.body) result = json.loads(resp.body)
self.assertFalse(result[0]) self.assertFalse(result[0])
self.assertEqual(result[1], "EUNKNOWN") self.assertEqual(result[1], "EUNKNOWN")
self.assertEqual(result[2], self.assessment.submit_errors["EUNKNOWN"]) self.assertEqual(result[2], SubmissionMixin().submit_errors["EUNKNOWN"])
@patch.object(api, 'create_submission') @patch.object(api, 'create_submission')
def test_submission_API_failure(self, mock_submit): def test_submission_API_failure(self, mock_submit):
......
...@@ -61,34 +61,37 @@ class TestScenarioParser(TestCase): ...@@ -61,34 +61,37 @@ class TestScenarioParser(TestCase):
self.assertEqual(int(criterion_option_value), 99) self.assertEqual(int(criterion_option_value), 99)
self.assertEqual(criterion_explanation, criterion_option_explain_text) self.assertEqual(criterion_explanation, criterion_option_explain_text)
def test_get_evals(self): def test_get_assessments(self):
"""Given an <evals> list, return a list of evaluations.""" """Given an <assessments> list, return a list of assessment modules."""
evals = """<evals> assessments = """<assessments>
<selfeval name='0382e03c808e4f2bb12dfdd2d45d5c4b' <self-assessment name='0382e03c808e4f2bb12dfdd2d45d5c4b'
must_grade="999" must_grade="999"
must_be_graded_by="73" /> must_be_graded_by="73" />
<peereval start="2014-12-20T19:00-7:00" <peer-assessment start="2014-12-20T19:00-7:00"
due="2014-12-21T22:22-7:00" due="2014-12-21T22:22-7:00"
must_grade="5" must_grade="5"
must_be_graded_by="3" /> must_be_graded_by="3" />
<selfeval /> <self-assessment />
</evals>""" </assessments>"""
evals_xml = etree.fromstring(evals) assessments_xml = etree.fromstring(assessments)
parsed_list = self.test_parser.get_evals(evals_xml) parsed_list = self.test_parser.get_assessments(assessments_xml)
# Self evaluations take all the parameters, but mostly ignore them. # Need to capture Submissions in Tests
self.assertEqual(parsed_list[0]['type'], 'selfeval') self.assertEqual(parsed_list[0].assessment_type, 'submission')
self.assertEqual(parsed_list[0]['name'], '0382e03c808e4f2bb12dfdd2d45d5c4b')
self.assertEqual(parsed_list[0]['must_grade'], 1) # Self assessments take all the parameters, but mostly ignore them.
self.assertEqual(parsed_list[0]['must_be_graded_by'], 0) self.assertEqual(parsed_list[1].assessment_type, 'self-assessment')
self.assertEqual(parsed_list[1].name, '0382e03c808e4f2bb12dfdd2d45d5c4b')
# Peer evaluations are more interesting self.assertEqual(parsed_list[1].must_grade, 1)
self.assertEqual(parsed_list[1]['type'], 'peereval') self.assertEqual(parsed_list[1].must_be_graded_by, 0)
self.assertEqual(parsed_list[1]['name'], '')
self.assertEqual(parsed_list[1]['must_grade'], 5) # Peer assessments are more interesting
self.assertEqual(parsed_list[1]['must_be_graded_by'], 3) self.assertEqual(parsed_list[2].assessment_type, 'peer-assessment')
self.assertEqual(parsed_list[2].name, '')
# We can parse arbitrary workflow descriptions as a list of evaluations. self.assertEqual(parsed_list[2].must_grade, 5)
self.assertEqual(parsed_list[2].must_be_graded_by, 3)
# We can parse arbitrary workflow descriptions as a list of assessments.
# Whether or not the workflow system can use them is another matter # Whether or not the workflow system can use them is another matter
self.assertEqual(parsed_list[2]['type'], 'selfeval') self.assertEqual(parsed_list[3].assessment_type, 'self-assessment')
class SubmissionUIModel(object):
def __init__(self):
self.assessment_type = "submission"
self.name = "submission"
self.navigation_text = "Your response to this problem"
self.title = "Your Response"
def create_ui_model(self):
return {
"assessment_type": self.assessment_type,
"name": self.name,
"navigation_text": self.navigation_text,
"title": self.title
}
class AssessmentUIModel(object):
def __init__(self):
self.assessment_type = None
self.name = ''
self.start_datetime = None
self.due_datetime = None
self.must_grade = 1
self.must_be_graded_by = 0
self.navigation_text = ""
self.title = ""
def create_ui_model(self):
return {
"assessment_type": self.assessment_type,
"name": self.name,
"start_datetime": self.start_datetime,
"due_datetime": self.due_datetime,
"must_grade": self.must_grade,
"must_be_graded_by": self.must_be_graded_by,
"navigation_text": self.navigation_text,
"title": self.title
}
class PeerAssessmentUIModel(AssessmentUIModel):
def __init__(self):
super(PeerAssessmentUIModel, self).__init__()
self.assessment_type = "peer-assessment"
self.title = "Assess Peers' Responses"
self.navigation_text = "Your assessment(s) of peer responses"
class SelfAssessmentUIModel(AssessmentUIModel):
def __init__(self):
super(SelfAssessmentUIModel, self).__init__()
self.assessment_type = "self-assessment"
self.navigation_text = "Your assessment of your response"
self.title = "Assess Your Response"
\ No newline at end of file
...@@ -59,15 +59,15 @@ class SubmissionRequestError(SubmissionError): ...@@ -59,15 +59,15 @@ class SubmissionRequestError(SubmissionError):
def create_submission(student_item_dict, answer, submitted_at=None, def create_submission(student_item_dict, answer, submitted_at=None,
attempt_number=None): attempt_number=None):
"""Creates a submission for evaluation. """Creates a submission for assessment.
Generic means by which to submit an answer for evaluation. Generic means by which to submit an answer for assessment.
Args: Args:
student_item_dict (dict): The student_item this student_item_dict (dict): The student_item this
submission is associated with. This is used to determine which submission is associated with. This is used to determine which
course, student, and location this submission belongs to. course, student, and location this submission belongs to.
answer (str): The answer given by the student to be evaluated. answer (str): The answer given by the student to be assessed.
submitted_at (datetime): The date in which this submission was submitted. submitted_at (datetime): The date in which this submission was submitted.
If not specified, defaults to the current date. If not specified, defaults to the current date.
attempt_number (int): A student may be able to submit multiple attempts attempt_number (int): A student may be able to submit multiple attempts
......
...@@ -116,6 +116,8 @@ ROOT_URLCONF = 'urls' ...@@ -116,6 +116,8 @@ ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = ( TEMPLATE_DIRS = (
"apps/submissions/templates", "apps/submissions/templates",
"apps/openassessment/peer/templates",
"apps/openassessment/xblock",
) )
INSTALLED_APPS = ( INSTALLED_APPS = (
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment