Commit 77ee930e by Stephen Sanchez

Adding the ability to review a single student as staff

parent 3de57ae4
......@@ -9,7 +9,6 @@ from django.utils import timezone
from django.utils.translation import ugettext as _
from django.db import DatabaseError
from dogapi import dog_stats_api
import random
from openassessment.assessment.models import (
Assessment, AssessmentFeedback, AssessmentPart,
......@@ -59,7 +58,7 @@ def get_score(submission_uuid, requirements):
if not submitter_is_finished(submission_uuid, requirements):
return None
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
# This query will use the ordering defined by the assessment model
# (descending scored_at, then descending id)
......@@ -401,6 +400,72 @@ def get_assessments(submission_uuid, scored_only=True, limit=None):
raise PeerAssessmentInternalError(error_message)
def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
"""Retrieve the assessments created by the given submission's author.
Retrieves all the assessments created by the given submission's author. This
API returns related feedback without making any assumptions about grading.
Any outstanding assessments associated with this submission will not be
returned.
Args:
submission_uuid (str): The submission of the student whose assessments
we are requesting. Required.
Kwargs:
scored (boolean): Only retrieve the assessments used to generate a score
for this submission.
limit (int): Limit the returned assessments. If None, returns all.
Returns:
list(dict): A list of dictionaries, where each dictionary represents a
separate assessment. Each assessment contains points earned, points
possible, time scored, scorer id, score type, and feedback.
Raises:
PeerAssessmentRequestError: Raised when the submission_id is invalid.
PeerAssessmentInternalError: Raised when there is an internal error
while retrieving the assessments associated with this submission.
Examples:
>>> get_submitted_assessments("1", scored_only=True, limit=2)
[
{
'points_earned': 6,
'points_possible': 12,
'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'scorer': u"Tim",
'feedback': u'Your submission was thrilling.'
},
{
'points_earned': 11,
'points_possible': 12,
'scored_at': datetime.datetime(2014, 1, 31, 14, 10, 17, 544214 tzinfo=<UTC>),
'scorer': u"Tim",
'feedback': u'Great submission.'
}
]
"""
try:
workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
items = PeerWorkflowItem.objects.filter(
scorer=workflow,
assessment__isnull=False
)
if scored_only:
items = items.exclude(scored=False)
assessments = Assessment.objects.filter(
pk__in=[item.assessment.pk for item in items])[:limit]
return serialize_assessments(assessments)
except DatabaseError:
error_message = _(
u"Error getting assessments graded by author of submission {}".format(submission_uuid)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def get_submission_to_assess(submission_uuid, graded_by):
"""Get a submission to peer evaluate.
......
......@@ -535,7 +535,6 @@ class TestPeerApi(CacheResetTest):
# Tim's workflow has enough grades.
self.assertIsNotNone(PeerWorkflow.objects.get(student_id=tim["student_id"]).grading_completed_at)
def test_complex_peer_assessment_workflow(self):
"""
Intended to mimic a more complicated scenario where people do not
......@@ -789,6 +788,25 @@ class TestPeerApi(CacheResetTest):
}
self.assertTrue(peer_api.submitter_is_finished(buffy_sub["uuid"], requirements))
def test_get_submitted_assessments(self):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY)
assessment = peer_api.create_assessment(
bob_sub["uuid"],
bob["student_id"],
ASSESSMENT_DICT['options_selected'], dict(), "",
RUBRIC_DICT,
REQUIRED_GRADED_BY,
)
self.assertEqual(assessment["points_earned"], 6)
self.assertEqual(assessment["points_possible"], 14)
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=True)
self.assertEqual(0, len(submitted_assessments))
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=False)
self.assertEqual(1, len(submitted_assessments))
def test_find_active_assessments(self):
buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer")
......@@ -934,6 +952,16 @@ class TestPeerApi(CacheResetTest):
self.assertEqual(xander_answer["uuid"], submission["uuid"])
self.assertIsNotNone(item.assessment)
@patch.object(PeerWorkflowItem.objects, "filter")
@raises(peer_api.PeerAssessmentInternalError)
def test_get_submitted_assessments_error(self, mock_filter):
self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY)
mock_filter.side_effect = DatabaseError("Oh no.")
submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=False)
self.assertEqual(1, len(submitted_assessments))
@patch.object(PeerWorkflow.objects, 'raw')
@raises(peer_api.PeerAssessmentInternalError)
def test_failure_to_get_review_submission(self, mock_filter):
......
......@@ -66,7 +66,7 @@
</ol>
{% if is_course_staff %}
{% include 'openassessmentblock/staff_debug.html' %}
<div id="openassessment__staff_info"></div>
{% endif %}
</div>
</div>
......
{% load i18n %}
{% load tz %}
<div class="wrapper--staff-info wrapper--ui-staff">
<div id="openassessment__staff_info" class="wrapper--staff-info wrapper--ui-staff">
<div class="staff-info ui-staff ui-toggle-visibility is--collapsed">
<h2 class="staff-info__title ui-staff__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
......@@ -78,6 +78,26 @@
</table>
</div>
<div class="staff-info__student ui-staff__content__section">
<div class="wrapper--input" class="staff-info__student__form">
<form id="openassessment_student_info_form">
<ul>
<li class="label">
<label for="openassessment__student_id" class="label">{% trans "Get Student Info" %}</label>
</li>
<li>
<input id="openassessment__student_id" type="text" class="value" maxlength="100">
</li>
</ul>
<ul class="list list--actions">
<li class="list--actions__item">
<a aria-role="button" href="" id="submit_student_id" class="action--submit"><span class="copy">{% trans "Submit" %}</span></a>
</li>
</ul>
</form>
</div>
<div id="openassessment__student_info" class="staff-info__student__report"></div>
</div>
</div>
</div>
</div>
{% load i18n %}
{% load tz %}
<div id="openassessment__student_info" class="staff-info__student__report">
{% if submission %}
<h2 class="title">
<span class="label">{% trans "Student Workflow Information" %}</span>
</h2>
<div class="staff-info__content ui-staff__content">
<div class="wrapper--step__content">
<div class="step__content">
<h3 class="title">{% trans "Student Response" %}</h3>
<div class="student__answer__display__content">
{{ submission.answer.text|linebreaks }}
</div>
</div>
</div>
<div class="staff-info__status ui-staff__content__section">
<h3 class="title">{% trans "Assessments on Student" %}</h3>
{% for assessment in peer_assessments %}
{% with peer_num=forloop.counter %}
<h4 class="title--sub"> {% trans "Peer" %} {{ peer_num }}: </h4>
<table class="staff-info__status__table" summary="{% trans "Assessment from Peer" %}">
<thead>
<tr>
<th abbr="Criterion" scope="col">{% trans "Criterion" %}</th>
<th abbr="Selected Option" scope="col">{% trans "Selected Option" %}</th>
<th abbr="Feedback" scope="col">{% trans "Feedback" %}</th>
<th abbr="Points" scope="col">{% trans "Points" %}</th>
<th abbr="Points Possible" scope="col">{% trans "Points Possible" %}</th>
</tr>
</thead>
<tbody>
{% for criterion in rubric_criteria %}
{% for part in assessment.parts %}
{% if part.option.criterion.name == criterion.name %}
<tr>
<td class="label">{{ criterion.name }}</td>
<td class="value">{{ part.option.name }}</td>
<td class="value">{{ part.feedback }}</td>
<td class="value">{{ part.option.points }}</td>
<td class="value">{{ criterion.total_value }}</td>
</tr>
{% endif %}
{% endfor %}
{% endfor %}
</tbody>
</table>
<h4 class="title--sub">{% trans "Overall Feedback" %}</h4>
<div class="student__answer__display__content">
{{ assessment.feedback|linebreaks }}
</div>
{% endwith %}
{% endfor %}
</div>
<div class="staff-info__status ui-staff__content__section">
<h3 class="title">{% trans "Assessments submitted by Student" %}</h3>
{% for assessment in submitted_assessments %}
{% with peer_num=forloop.counter %}
<h4 class="title--sub">{% trans "Peer" %} {{ peer_num }}:</h4>
<table class="staff-info__status__table" summary="{% trans "Assessment from Student" %}">
<thead>
<tr>
<th abbr="Criterion" scope="col">{% trans "Criterion" %}</th>
<th abbr="Selected Option" scope="col">{% trans "Selected Option" %}</th>
<th abbr="Feedback" scope="col">{% trans "Feedback" %}</th>
<th abbr="Points" scope="col">{% trans "Points" %}</th>
<th abbr="Points Possible" scope="col">{% trans "Points Possible" %}</th>
</tr>
</thead>
<tbody>
{% for criterion in rubric_criteria %}
{% for part in assessment.parts %}
{% if part.option.criterion.name == criterion.name %}
<tr>
<td class="label">{{ criterion.name }}</td>
<td class="value">{{ part.option.name }}</td>
<td class="value">{{ part.feedback }}</td>
<td class="value">{{ part.option.points }}</td>
<td class="value">{{ criterion.total_value }}</td>
</tr>
{% endif %}
{% endfor %}
{% endfor %}
</tbody>
</table>
<h4 class="title--sub">{% trans "Overall Feedback" %}</h4>
<div class="student__answer__display__content">
{{ assessment.feedback|linebreaks }}
</div>
{% endwith %}
{% endfor %}
</div>
<div class="staff-info__status ui-staff__content__section">
<h3 class="title">{% trans "Student Self Assessment" %}</h3>
<table class="staff-info__status__table" summary="{% trans "Self Assessment" %}">
<thead>
<tr>
<th abbr="Criterion" scope="col">{% trans "Criterion" %}</th>
<th abbr="Selected Option" scope="col">{% trans "Selected Option" %}</th>
<th abbr="Points" scope="col">{% trans "Points" %}</th>
<th abbr="Points Possible" scope="col">{% trans "Points Possible" %}</th>
</tr>
</thead>
<tbody>
{% for criterion in rubric_criteria %}
{% for part in self_assessment.parts %}
{% if part.option.criterion.name == criterion.name %}
<tr>
<td class="label">{{ criterion.name }}</td>
<td class="value">{{ part.option.name }}</td>
<td class="value">{{ part.option.points }}</td>
<td class="value">{{ criterion.total_value }}</td>
</tr>
{% endif %}
{% endfor %}
{% endfor %}
</tbody>
</table>
</div>
</div>
{% else %}
{% trans "No submission found for student." %}
{% endif %}
</div>
\ No newline at end of file
......@@ -22,6 +22,7 @@ from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin
from openassessment.xblock.submission_mixin import SubmissionMixin
from openassessment.xblock.studio_mixin import StudioMixin
from openassessment.xblock.xml import update_from_xml, serialize_content_to_xml
from openassessment.xblock.staff_info_mixin import StaffInfoMixin
from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.workflow import api as workflow_api
from openassessment.xblock.validation import validator
......@@ -72,6 +73,7 @@ class OpenAssessmentBlock(
SelfAssessmentMixin,
StudioMixin,
GradeMixin,
StaffInfoMixin,
WorkflowMixin,
LmsCompatibilityMixin):
"""Displays a question and gives an area where students can compose a response."""
......@@ -202,14 +204,9 @@ class OpenAssessmentBlock(
"question": self.prompt,
"rubric_criteria": self.rubric_criteria,
"rubric_assessments": ui_models,
"is_course_staff": False,
"is_course_staff": self.is_course_staff,
}
# If we're course staff, add the context necessary to render
# the course staff debug panel.
if self.is_course_staff and not self.in_studio_preview:
context_dict.update(self.staff_debug_template_context())
template = get_template("openassessmentblock/oa_base.html")
context = Context(context_dict)
frag = Fragment(template.render(context))
......@@ -218,42 +215,6 @@ class OpenAssessmentBlock(
frag.initialize_js('OpenAssessmentBlock')
return frag
def staff_debug_template_context(self):
"""
Template context dictionary for course staff debug panel.
Returns:
dict: The template context specific to the course staff debug panel.
"""
context = dict()
# Enable the course staff debug panel
context['is_course_staff'] = True
# Calculate how many students are in each step of the workflow
status_counts, num_submissions = self.get_workflow_status_counts()
context['status_counts'] = status_counts
context['num_submissions'] = num_submissions
context['item_id'] = unicode(self.scope_ids.usage_id)
# Include release/due dates for each step in the problem
context['step_dates'] = list()
steps = ['submission'] + self.assessment_steps
for step in steps:
# Get the dates as a student would see them
__, __, start_date, due_date = self.is_closed(step=step, course_staff=False)
context['step_dates'].append({
'step': step,
'start': start_date if start_date > DISTANT_PAST else None,
'due': due_date if due_date < DISTANT_FUTURE else None,
})
return context
@property
def is_course_staff(self):
"""
......
"""
The Staff Info View mixin renders all the staff-specific information used to
determine the flow of the problem.
"""
import copy
from django.utils.translation import ugettext as _
from xblock.core import XBlock
from openassessment.xblock.resolve_dates import DISTANT_PAST, DISTANT_FUTURE
from submissions import api as submission_api
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
class StaffInfoMixin(object):
@XBlock.handler
def render_staff_info(self, data, suffix=''):
"""
Template context dictionary for course staff debug panel.
Returns:
dict: The template context specific to the course staff debug panel.
"""
# If we're not course staff, or in preview mode, return nothing for the
# staff info view.
if not self.is_course_staff or self.in_studio_preview:
return self.render_error(_(
u"You do not have permission to access staff information"
))
context = dict()
path = 'openassessmentblock/staff_debug/staff_debug.html'
# Calculate how many students are in each step of the workflow
status_counts, num_submissions = self.get_workflow_status_counts()
context['status_counts'] = status_counts
context['num_submissions'] = num_submissions
context['item_id'] = unicode(self.scope_ids.usage_id)
# Include release/due dates for each step in the problem
context['step_dates'] = list()
steps = ['submission'] + self.assessment_steps
for step in steps:
# Get the dates as a student would see them
__, __, start_date, due_date = self.is_closed(step=step, course_staff=False)
context['step_dates'].append({
'step': step,
'start': start_date if start_date > DISTANT_PAST else None,
'due': due_date if due_date < DISTANT_FUTURE else None,
})
return self.render_assessment(path, context)
@XBlock.handler
def render_student_info(self, data, suffix=''):
"""
Renders all relative information for a specific student's workflow.
Given a student's ID, we can render a staff-only section of the page
with submissions and assessments specific to the student.
Must be course staff to render this view.
"""
# If request does not come from course staff, return nothing.
# This should not be able to happen unless someone attempts to
# explicitly invoke this handler.
if not self.is_course_staff or self.in_studio_preview:
return self.render_error(_(
u"You do not have permission to access student information"
))
student_id = data.params.get('student_id', '')
submission_uuid = None
submission = None
assessment_steps = self.assessment_steps
if student_id:
student_item = self.get_student_item_dict()
student_item['student_id'] = student_id
# If there is a submission available for the requested student, present
# it. If not, there will be no other information to collect.
submissions = submission_api.get_submissions(student_item, 1)
if submissions:
submission = submissions[0]
submission_uuid = submissions[0]['uuid']
if "peer-assessment" in assessment_steps:
peer_assessments = peer_api.get_assessments(submission_uuid)
submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)
else:
peer_assessments = []
submitted_assessments = []
if "self-assessment" in assessment_steps:
self_assessment = self_api.get_assessment(submission_uuid)
else:
self_assessment = None
context = {
'submission': submission,
'peer_assessments': peer_assessments,
'submitted_assessments': submitted_assessments,
'self_assessment': self_assessment,
'rubric_criteria': copy.deepcopy(self.rubric_criteria),
}
if peer_assessments or self_assessment:
max_scores = peer_api.get_rubric_max_scores(submission_uuid)
for criterion in context["rubric_criteria"]:
criterion["total_value"] = max_scores[criterion["name"]]
path = 'openassessmentblock/staff_debug/student_info.html'
return self.render_assessment(path, context)
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -17,6 +17,8 @@ OpenAssessment.BaseView = function(runtime, element, server) {
this.responseView = new OpenAssessment.ResponseView(this.element, this.server, this);
this.peerView = new OpenAssessment.PeerView(this.element, this.server, this);
this.gradeView = new OpenAssessment.GradeView(this.element, this.server, this);
// Staff only information about student progress.
this.staffInfoView = new OpenAssessment.StaffInfoView(this.element, this.server, this);
};
......@@ -63,12 +65,7 @@ OpenAssessment.BaseView.prototype = {
load: function() {
this.responseView.load();
this.loadAssessmentModules();
// Set up expand/collapse for course staff debug, if available
courseStaffDebug = $('.wrapper--staff-info');
if (courseStaffDebug.length > 0) {
this.setUpCollapseExpand(courseStaffDebug, function() {});
}
this.staffInfoView.load();
},
/**
......
......@@ -93,6 +93,25 @@ OpenAssessment.Server.prototype = {
},
/**
Load the Student Info section in Staff Info.
**/
studentInfo: function(student_id) {
var url = this.url('render_student_info');
return $.Deferred(function(defer) {
$.ajax({
url: url,
type: "POST",
dataType: "html",
data: {student_id: student_id}
}).done(function(data) {
defer.resolveWith(this, [data]);
}).fail(function(data) {
defer.rejectWith(this, [gettext('This section could not be loaded.')]);
});
}).promise();
},
/**
Send a submission to the XBlock.
Args:
......
/**
Interface for staff info view.
Args:
element (DOM element): The DOM element representing the XBlock.
server (OpenAssessment.Server): The interface to the XBlock server.
baseView (OpenAssessment.BaseView): Container view.
Returns:
OpenAssessment.StaffInfoView
**/
OpenAssessment.StaffInfoView = function(element, server, baseView) {
this.element = element;
this.server = server;
this.baseView = baseView;
};
OpenAssessment.StaffInfoView.prototype = {
/**
Load the Student Info section in Staff Info.
**/
load: function() {
var view = this;
this.server.render('staff_info').done(
function(html) {
// Load the HTML and install event handlers
$('#openassessment__staff_info', view.element).replaceWith(html);
view.installHandlers();
}
).fail(function(errMsg) {
view.baseView.showLoadError('staff_info');
});
},
/**
Upon request, loads the student info section of the staff info view. This
allows viewing all the submissions and assessments associated to the given
student's current workflow.
**/
loadStudentInfo: function() {
var view = this;
var sel = $('#openassessment__staff_info', this.element);
var student_id = sel.find('#openassessment__student_id').val();
this.server.studentInfo(student_id).done(
function(html) {
// Load the HTML and install event handlers
$('#openassessment__student_info', view.element).replaceWith(html);
}
).fail(function(errMsg) {
view.showLoadError('student_info');
});
},
/**
Install event handlers for the view.
**/
installHandlers: function() {
var sel = $('#openassessment__staff_info', this.element);
var view = this;
if (sel.length <= 0) {
return;
}
this.baseView.setUpCollapseExpand(sel, function() {});
// Install key handler for student id field
sel.find('#openassessment_student_info_form').submit(
function(eventObject) {
eventObject.preventDefault();
view.loadStudentInfo();
}
);
// Install a click handler for requesting student info
sel.find('#submit_student_id').click(
function(eventObject) {
eventObject.preventDefault();
view.loadStudentInfo();
}
);
}
};
......@@ -22,3 +22,40 @@
background: $bg-view;
margin: ($baseline-v*2) $baseline-h;
}
// --------------------
// Developer styles for Staff Section
// --------------------
.staff-info__student {
.label {
color: $heading-staff-color;
margin-bottom: ($baseline-v/2);
}
.action--submit {
@extend %btn--secondary;
@extend %action-2;
margin-right: ($baseline-v/2);
margin-top: ($baseline-v/2);
margin-bottom: ($baseline-v/2);
}
.title {
@extend %hd-2;
text-align: left;
color: $heading-staff-color;
margin-bottom: ($baseline-v/2);
}
.title--sub {
color: $heading-staff-color;
margin-top: ($baseline-v/2);
margin-bottom: ($baseline-v/2);
}
.student__answer__display__content {
border: 1px solid rgba($color-decorative-staff, 0.25);
padding: ($baseline-v/2) ($baseline-h/2) ($baseline-v/2) ($baseline-h/2);
margin-bottom: ($baseline-v/2);
}
}
\ No newline at end of file
......@@ -157,110 +157,6 @@ class TestOpenAssessment(XBlockHandlerTestCase):
self.assertEqual(student_item['student_id'], 'test_student')
class TestCourseStaff(XBlockHandlerTestCase):
"""
Tests for course staff debug panel.
"""
@scenario('data/basic_scenario.xml', user_id='Bob')
def test_is_course_staff(self, xblock):
# By default, we shouldn't be course staff
self.assertFalse(xblock.is_course_staff)
# If the LMS runtime tells us we're not course staff,
# we shouldn't be course staff.
xblock.xmodule_runtime = Mock(user_is_staff=False)
self.assertFalse(xblock.is_course_staff)
# If the LMS runtime tells us that we ARE course staff,
# then we're course staff.
xblock.xmodule_runtime.user_is_staff = True
self.assertTrue(xblock.is_course_staff)
@scenario('data/basic_scenario.xml', user_id='Bob')
def test_course_staff_debug_info(self, xblock):
# If we're not course staff, we shouldn't see the debug info
xblock.xmodule_runtime = Mock(
course_id='test_course',
anonymous_student_id='test_student',
user_is_staff=False
)
xblock_fragment = self.runtime.render(xblock, "student_view")
self.assertNotIn("course staff information", xblock_fragment.body_html().lower())
# If we ARE course staff, then we should see the debug info
xblock.xmodule_runtime.user_is_staff = True
xblock_fragment = self.runtime.render(xblock, "student_view")
self.assertIn("course staff information", xblock_fragment.body_html().lower())
@scenario('data/basic_scenario.xml')
def test_hide_course_staff_debug_info_in_studio_preview(self, xblock):
# If we are in Studio preview mode, don't show the staff debug info
# In this case, the runtime will tell us that we're staff,
# but no user ID will be set.
xblock.xmodule_runtime = Mock(
course_id='test_course',
anonymous_student_id='test_student',
user_is_staff=True
)
xblock_fragment = self.runtime.render(xblock, "student_view")
self.assertNotIn("course staff information", xblock_fragment.body_html().lower())
@scenario('data/staff_dates_scenario.xml')
def test_staff_debug_dates_table(self, xblock):
# Simulate that we are course staff
xblock.xmodule_runtime = Mock(
course_id='test_course',
anonymous_student_id='test_student',
user_is_staff=True
)
# Get the context for the course staff debug panel
# and check that the dates match the scenario.
context = xblock.staff_debug_template_context()
self.assertEqual(context['step_dates'], [
{
'step': 'submission',
'start': dt.datetime(2014, 3, 1).replace(tzinfo=pytz.utc),
'due': dt.datetime(2014, 4, 1).replace(tzinfo=pytz.utc),
},
{
'step': 'peer-assessment',
'start': dt.datetime(2015, 1, 2).replace(tzinfo=pytz.utc),
'due': dt.datetime(2015, 4, 1).replace(tzinfo=pytz.utc),
},
{
'step': 'self-assessment',
'start': dt.datetime(2016, 1, 2).replace(tzinfo=pytz.utc),
'due': dt.datetime(2016, 4, 1).replace(tzinfo=pytz.utc),
},
])
# Verify that we can render without error
self.runtime.render(xblock, 'student_view')
@scenario('data/basic_scenario.xml')
def test_staff_debug_dates_distant_past_and_future(self, xblock):
# Simulate that we are course staff
xblock.xmodule_runtime = Mock(
course_id='test_course',
anonymous_student_id='test_student',
user_is_staff=True
)
# Get the context for the course staff debug panel
# and check that the dates match the scenario.
context = xblock.staff_debug_template_context()
self.assertEqual(context['step_dates'], [
{'step': 'submission', 'start': None, 'due': None},
{'step': 'peer-assessment', 'start': None, 'due': None},
{'step': 'self-assessment', 'start': None, 'due': None},
])
# Verify that we can render without error
self.runtime.render(xblock, 'student_view')
class TestDates(XBlockHandlerTestCase):
@scenario('data/basic_scenario.xml')
......
# coding=utf-8
from collections import namedtuple
import pytz
import json
from mock import Mock, patch
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.workflow import api as workflow_api
from submissions import api as sub_api
from openassessment.xblock.test.base import scenario, XBlockHandlerTestCase
STUDENT_ITEM = dict(
student_id="Bob",
course_id="test_course",
item_id="item_one",
item_type="openassessment",
)
ASSESSMENT_DICT = {
'overall_feedback': u"这是中国",
'options_selected': {
"Concise": "Robert Heinlein",
"Clear-headed": "Yogi Berra",
"Form": "Reddit",
},
}
class TestCourseStaff(XBlockHandlerTestCase):
"""
Tests for course staff debug panel.
"""
@scenario('data/basic_scenario.xml', user_id='Bob')
def test_is_course_staff(self, xblock):
# By default, we shouldn't be course staff
self.assertFalse(xblock.is_course_staff)
# If the LMS runtime tells us we're not course staff,
# we shouldn't be course staff.
xblock.xmodule_runtime = Mock(user_is_staff=False)
self.assertFalse(xblock.is_course_staff)
# If the LMS runtime tells us that we ARE course staff,
# then we're course staff.
xblock.xmodule_runtime.user_is_staff = True
self.assertTrue(xblock.is_course_staff)
@scenario('data/basic_scenario.xml', user_id='Bob')
def test_course_staff_debug_info(self, xblock):
# If we're not course staff, we shouldn't see the debug info
xblock.xmodule_runtime = Mock(
course_id='test_course',
anonymous_student_id='test_student',
user_is_staff=False
)
resp = self.request(xblock, 'render_staff_info', json.dumps({}))
self.assertNotIn("course staff information", resp.decode('utf-8').lower())
# If we ARE course staff, then we should see the debug info
xblock.xmodule_runtime.user_is_staff = True
resp = self.request(xblock, 'render_staff_info', json.dumps({}))
self.assertIn("course staff information", resp.decode('utf-8').lower())
@scenario('data/basic_scenario.xml')
def test_hide_course_staff_debug_info_in_studio_preview(self, xblock):
# If we are in Studio preview mode, don't show the staff debug info
# In this case, the runtime will tell us that we're staff,
# but no user ID will be set.
xblock.xmodule_runtime = Mock(
course_id='test_course',
anonymous_student_id='test_student',
user_is_staff=True
)
resp = self.request(xblock, 'render_staff_info', json.dumps({}))
self.assertNotIn("course staff information", resp.decode('utf-8').lower())
@scenario('data/staff_dates_scenario.xml', user_id='Bob')
def test_staff_debug_dates_table(self, xblock):
# Simulate that we are course staff
xblock.xmodule_runtime = Mock(
course_id='test_course',
anonymous_student_id='test_student',
user_is_staff=True
)
# Verify that we can render without error
resp = self.request(xblock, 'render_staff_info', json.dumps({}))
self.assertIn("course staff information", resp.decode('utf-8').lower())
# Check all release dates.
self.assertIn("march 1, 2014", resp.decode('utf-8').lower())
self.assertIn("jan. 2, 2015", resp.decode('utf-8').lower())
self.assertIn("jan. 2, 2016", resp.decode('utf-8').lower())
# Check all due dates.
self.assertIn("april 1, 2014", resp.decode('utf-8').lower())
self.assertIn("april 1, 2015", resp.decode('utf-8').lower())
self.assertIn("april 1, 2016", resp.decode('utf-8').lower())
@scenario('data/basic_scenario.xml', user_id='Bob')
def test_staff_debug_dates_distant_past_and_future(self, xblock):
# Simulate that we are course staff
xblock.xmodule_runtime = Mock(
course_id='test_course',
anonymous_student_id='test_student',
user_is_staff=True
)
# Verify that we can render without error
resp = self.request(xblock, 'render_staff_info', json.dumps({}))
self.assertIn("course staff information", resp.decode('utf-8').lower())
self.assertIn("n/a", resp.decode('utf-8').lower())
@scenario('data/basic_scenario.xml', user_id='Bob')
def test_staff_debug_student_info_no_submission(self, xblock):
# Simulate that we are course staff
xblock.xmodule_runtime = Mock(
course_id='test_course',
anonymous_student_id='test_student',
user_is_staff=True
)
request = namedtuple('Request', 'params')
request.params = {"student_id": "test_student"}
# Verify that we can render without error
resp = xblock.render_student_info(request)
self.assertIn("no submission", resp.body.lower())
@scenario('data/basic_scenario.xml', user_id='Bob')
def test_staff_debug_student_info_full_workflow(self, xblock):
# Simulate that we are course staff
xblock.xmodule_runtime = Mock(
course_id='test_course',
item_id=xblock.scope_ids.usage_id,
anonymous_student_id='Bob',
user_is_staff=True
)
bob_item = STUDENT_ITEM.copy()
bob_item["item_id"] = xblock.scope_ids.usage_id
# Create a submission for Bob, and corresponding workflow.
submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
peer_api.create_peer_workflow(submission["uuid"])
workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])
# Create a submission for Tim, and corresponding workflow.
tim_item = bob_item.copy()
tim_item["student_id"] = "Tim"
tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
peer_api.create_peer_workflow(tim_sub["uuid"])
workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])
# Bob assesses Tim.
peer_api.get_submission_to_assess(submission['uuid'], 1)
peer_api.create_assessment(
submission["uuid"],
STUDENT_ITEM["student_id"],
ASSESSMENT_DICT['options_selected'], dict(), "",
{'criteria': xblock.rubric_criteria},
1,
)
# Bob assesses himself.
self_api.create_assessment(
submission['uuid'],
STUDENT_ITEM["student_id"],
ASSESSMENT_DICT['options_selected'],
{'criteria': xblock.rubric_criteria},
)
# Now Bob should be fully populated in the student info view.
request = namedtuple('Request', 'params')
request.params = {"student_id": "Bob"}
# Verify that we can render without error
resp = xblock.render_student_info(request)
print resp.body.lower()
self.assertIn("bob answer", resp.body.lower())
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment