Commit d32de309 by Will Daly

Merge pull request #222 from edx/will/staff-counts-workflow

Course staff can see counts
parents 29b95399 55da2eee
...@@ -585,6 +585,7 @@ def create_peer_workflow_item(scorer, submission_uuid): ...@@ -585,6 +585,7 @@ def create_peer_workflow_item(scorer, submission_uuid):
Raises: Raises:
PeerAssessmentWorkflowError: Could not find the workflow for the student. PeerAssessmentWorkflowError: Could not find the workflow for the student.
PeerAssessmentInternalError: Could not create the peer workflow item. PeerAssessmentInternalError: Could not create the peer workflow item.
SubmissionError: An error occurred while retrieving the submission.
""" """
submission = get_submission_and_student(submission_uuid) submission = get_submission_and_student(submission_uuid)
student_item_dict = copy.copy(submission['student_item']) student_item_dict = copy.copy(submission['student_item'])
......
...@@ -53,7 +53,7 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s ...@@ -53,7 +53,7 @@ def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, s
# Check that the student is allowed to assess this submission # Check that the student is allowed to assess this submission
try: try:
submission = get_submission_and_student(submission_uuid) submission = get_submission_and_student(submission_uuid)
if submission is None or submission['student_item']['student_id'] != user_id: if submission['student_item']['student_id'] != user_id:
raise SelfAssessmentRequestError(_("Cannot self-assess this submission")) raise SelfAssessmentRequestError(_("Cannot self-assess this submission"))
except SubmissionNotFoundError: except SubmissionNotFoundError:
raise SelfAssessmentRequestError(_("Could not retrieve the submission.")) raise SelfAssessmentRequestError(_("Could not retrieve the submission."))
......
...@@ -3,14 +3,14 @@ ...@@ -3,14 +3,14 @@
Tests for assessment models. Tests for assessment models.
""" """
from django.test import TestCase from openassessment.test_utils import CacheResetTest
from openassessment.assessment.models import ( from openassessment.assessment.models import (
Rubric, Criterion, CriterionOption, InvalidOptionSelection, Rubric, Criterion, CriterionOption, InvalidOptionSelection,
AssessmentFeedback, AssessmentFeedbackOption, AssessmentFeedback, AssessmentFeedbackOption,
) )
class TestRubricOptionIds(TestCase): class TestRubricOptionIds(CacheResetTest):
""" """
Test selection of options from a rubric. Test selection of options from a rubric.
""" """
...@@ -107,7 +107,7 @@ class TestRubricOptionIds(TestCase): ...@@ -107,7 +107,7 @@ class TestRubricOptionIds(TestCase):
}) })
class AssessmentFeedbackTest(TestCase): class AssessmentFeedbackTest(CacheResetTest):
""" """
Tests for assessment feedback. Tests for assessment feedback.
This is feedback that students give in response to the peer assessments they receive. This is feedback that students give in response to the peer assessments they receive.
......
...@@ -2,13 +2,13 @@ ...@@ -2,13 +2,13 @@
import datetime import datetime
from django.db import DatabaseError from django.db import DatabaseError
from django.test import TestCase
import pytz import pytz
from ddt import ddt, file_data from ddt import ddt, file_data
from mock import patch from mock import patch
from nose.tools import raises from nose.tools import raises
from openassessment.test_utils import CacheResetTest
from openassessment.assessment import peer_api from openassessment.assessment import peer_api
from openassessment.assessment.models import Assessment, PeerWorkflow, PeerWorkflowItem, AssessmentFeedback from openassessment.assessment.models import Assessment, PeerWorkflow, PeerWorkflowItem, AssessmentFeedback
from openassessment.workflow import api as workflow_api from openassessment.workflow import api as workflow_api
...@@ -121,7 +121,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC) ...@@ -121,7 +121,7 @@ THURSDAY = datetime.datetime(2007, 9, 16, 0, 0, 0, 0, pytz.UTC)
@ddt @ddt
class TestPeerApi(TestCase): class TestPeerApi(CacheResetTest):
def test_create_assessment(self): def test_create_assessment(self):
self._create_student_and_submission("Tim", "Tim's answer") self._create_student_and_submission("Tim", "Tim's answer")
bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
......
...@@ -6,14 +6,14 @@ Tests for self-assessment API. ...@@ -6,14 +6,14 @@ Tests for self-assessment API.
import copy import copy
import datetime import datetime
import pytz import pytz
from django.test import TestCase from openassessment.test_utils import CacheResetTest
from submissions.api import create_submission from submissions.api import create_submission
from openassessment.assessment.self_api import ( from openassessment.assessment.self_api import (
create_assessment, is_complete, SelfAssessmentRequestError, get_assessment create_assessment, is_complete, SelfAssessmentRequestError, get_assessment
) )
class TestSelfApi(TestCase): class TestSelfApi(CacheResetTest):
STUDENT_ITEM = { STUDENT_ITEM = {
'student_id': u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗', 'student_id': u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
...@@ -198,4 +198,4 @@ class TestSelfApi(TestCase): ...@@ -198,4 +198,4 @@ class TestSelfApi(TestCase):
def test_is_complete_no_submission(self): def test_is_complete_no_submission(self):
# This submission uuid does not exist # This submission uuid does not exist
self.assertFalse(is_complete('abc1234')) self.assertFalse(is_complete('abc1234'))
\ No newline at end of file
import json import json
import os.path import os.path
from ddt import ddt, file_data from openassessment.test_utils import CacheResetTest
from django.test import TestCase
from openassessment.assessment.models import Criterion, CriterionOption, Rubric, AssessmentFeedback from openassessment.assessment.models import Criterion, CriterionOption, Rubric, AssessmentFeedback
from openassessment.assessment.serializers import ( from openassessment.assessment.serializers import (
InvalidRubric, RubricSerializer, rubric_from_dict, InvalidRubric, RubricSerializer, rubric_from_dict,
...@@ -16,7 +14,7 @@ def json_data(filename): ...@@ -16,7 +14,7 @@ def json_data(filename):
return json.load(json_file) return json.load(json_file)
class TestRubricDeserialization(TestCase): class TestRubricDeserialization(CacheResetTest):
def test_rubric_only_created_once(self): def test_rubric_only_created_once(self):
# Make sure sending the same Rubric data twice only creates one Rubric, # Make sure sending the same Rubric data twice only creates one Rubric,
...@@ -37,8 +35,7 @@ class TestRubricDeserialization(TestCase): ...@@ -37,8 +35,7 @@ class TestRubricDeserialization(TestCase):
rubric_from_dict(json_data('rubric_data/no_points.json')) rubric_from_dict(json_data('rubric_data/no_points.json'))
class TestCriterionDeserialization(CacheResetTest):
class TestCriterionDeserialization(TestCase):
def test_empty_criteria(self): def test_empty_criteria(self):
with self.assertRaises(InvalidRubric) as cm: with self.assertRaises(InvalidRubric) as cm:
...@@ -56,7 +53,8 @@ class TestCriterionDeserialization(TestCase): ...@@ -56,7 +53,8 @@ class TestCriterionDeserialization(TestCase):
{'criteria': [u'This field is required.']} {'criteria': [u'This field is required.']}
) )
class TestCriterionOptionDeserialization(TestCase):
class TestCriterionOptionDeserialization(CacheResetTest):
def test_empty_options(self): def test_empty_options(self):
with self.assertRaises(InvalidRubric) as cm: with self.assertRaises(InvalidRubric) as cm:
...@@ -85,7 +83,7 @@ class TestCriterionOptionDeserialization(TestCase): ...@@ -85,7 +83,7 @@ class TestCriterionOptionDeserialization(TestCase):
) )
class TestAssessmentFeedbackSerializer(TestCase): class TestAssessmentFeedbackSerializer(CacheResetTest):
def test_serialize(self): def test_serialize(self):
feedback = AssessmentFeedback.objects.create( feedback = AssessmentFeedback.objects.create(
......
...@@ -61,6 +61,10 @@ ...@@ -61,6 +61,10 @@
</li> </li>
{% endfor %} {% endfor %}
</ol> </ol>
{% if is_course_staff %}
{% include 'openassessmentblock/staff_debug.html' %}
{% endif %}
</div> </div>
</div> </div>
</div> </div>
......
<div class="wrapper--staff-info wrapper--ui-staff">
<div class="staff-info ui-staff ui-toggle-visibility is--collapsed">
<h2 class="staff-info__title ui-staff__title ui-toggle-visibility__control">
<i class="ico icon-caret-right"></i>
<span class="staff-info__title__copy">Course Staff Information</span>
</h2>
<div class="staff-info__content ui-staff__content ui-toggle-visibility__content">
<div class="staff-info__summary ui-staff__content__section">
<dl class="submissions--total">
<dt class="label">Total number of submissions:</dt>
<dd class="value">{{ num_submissions }}</dd>
</dl>
</div>
<div class="staff-info__status ui-staff__content__section">
<table class="staff-info__status__table" summary="Where are your students currently in this problem">
<caption class="title">Student Progress/Step Status</caption>
<thead>
<tr>
<th abbr="Step" scope="col">Problem Step</th>
<th abbr="# of Students" scope="col">Number of Students Actively in Step</th>
</tr>
</thead>
<tbody>
{% for item in status_counts %}
<tr>
<td class="label">{{ item.status }}</td>
<td class="value">{{ item.count }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
</div>
</div>
</div>
"""
Test utilities
"""
from django.core.cache import cache
from django.test import TestCase
class CacheResetTest(TestCase):
"""
Test case that resets the cache before and after each test.
"""
def setUp(self):
super(CacheResetTest, self).setUp()
cache.clear()
def tearDown(self):
super(CacheResetTest, self).tearDown()
cache.clear()
...@@ -97,7 +97,7 @@ def create_workflow(submission_uuid): ...@@ -97,7 +97,7 @@ def create_workflow(submission_uuid):
) )
try: try:
submission_dict = sub_api.get_submission(submission_uuid) submission_dict = sub_api.get_submission_and_student(submission_uuid)
except sub_api.SubmissionNotFoundError as err: except sub_api.SubmissionNotFoundError as err:
err_msg = sub_err_msg("submission not found") err_msg = sub_err_msg("submission not found")
logger.error(err_msg) logger.error(err_msg)
...@@ -121,7 +121,9 @@ def create_workflow(submission_uuid): ...@@ -121,7 +121,9 @@ def create_workflow(submission_uuid):
peer_api.create_peer_workflow(submission_uuid) peer_api.create_peer_workflow(submission_uuid)
workflow = AssessmentWorkflow.objects.create( workflow = AssessmentWorkflow.objects.create(
submission_uuid=submission_uuid, submission_uuid=submission_uuid,
status=AssessmentWorkflow.STATUS.peer status=AssessmentWorkflow.STATUS.peer,
course_id=submission_dict['student_item']['course_id'],
item_id=submission_dict['student_item']['item_id'],
) )
except (DatabaseError, peer_api.PeerAssessmentError) as err: except (DatabaseError, peer_api.PeerAssessmentError) as err:
err_msg = u"Could not create assessment workflow: {}".format(err) err_msg = u"Could not create assessment workflow: {}".format(err)
...@@ -292,6 +294,39 @@ def update_from_assessments(submission_uuid, assessment_requirements): ...@@ -292,6 +294,39 @@ def update_from_assessments(submission_uuid, assessment_requirements):
return _serialized_with_details(workflow, assessment_requirements) return _serialized_with_details(workflow, assessment_requirements)
def get_status_counts(course_id, item_id):
"""
Count how many workflows have each status, for a given item in a course.
Kwargs:
course_id (unicode): The ID of the course.
item_id (unicode): The ID of the item in the course.
Returns:
list of dictionaries with keys "status" (str) and "count" (int)
Example usage:
>>> get_status_counts("ora2/1/1", "peer-assessment-problem")
[
{"status": "peer", "count": 5},
{"status": "self", "count": 10},
{"status": "waiting", "count": 43},
{"status": "done", "count": 12},
]
"""
return [
{
"status": status,
"count": AssessmentWorkflow.objects.filter(
status=status,
course_id=course_id,
item_id=item_id,
).count()
} for status in AssessmentWorkflow.STATUS_VALUES
]
def _get_workflow_model(submission_uuid): def _get_workflow_model(submission_uuid):
"""Return the `AssessmentWorkflow` model for a given `submission_uuid`. """Return the `AssessmentWorkflow` model for a given `submission_uuid`.
......
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AssessmentWorkflow.course_id'
db.add_column('workflow_assessmentworkflow', 'course_id',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, db_index=True),
keep_default=False)
# Adding field 'AssessmentWorkflow.item_id'
db.add_column('workflow_assessmentworkflow', 'item_id',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, db_index=True),
keep_default=False)
# Create a composite index of course_id, item_id, and status
db.create_index('workflow_assessmentworkflow', ['course_id', 'item_id', 'status'])
def backwards(self, orm):
# Delete the composite index of course_id, item_id, and status
db.delete_index('workflow_assessmentworkflow', ['course_id', 'item_id', 'status'])
# Deleting field 'AssessmentWorkflow.course_id'
db.delete_column('workflow_assessmentworkflow', 'course_id')
# Deleting field 'AssessmentWorkflow.item_id'
db.delete_column('workflow_assessmentworkflow', 'item_id')
models = {
'workflow.assessmentworkflow': {
'Meta': {'ordering': "['-created']", 'object_name': 'AssessmentWorkflow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'item_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('model_utils.fields.StatusField', [], {'default': "'peer'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
}
}
complete_apps = ['workflow']
...@@ -30,18 +30,27 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel): ...@@ -30,18 +30,27 @@ class AssessmentWorkflow(TimeStampedModel, StatusModel):
an after the fact recording of the last known state of that information so an after the fact recording of the last known state of that information so
we can search easily. we can search easily.
""" """
STATUS = Choices( # implicit "status" field STATUS_VALUES = [
"peer", # User needs to assess peer submissions "peer", # User needs to assess peer submissions
"self", # User needs to assess themselves "self", # User needs to assess themselves
"waiting", # User has done all necessary assessment but hasn't been "waiting", # User has done all necessary assessment but hasn't been
# graded yet -- we're waiting for assessments of their # graded yet -- we're waiting for assessments of their
# submission by others. # submission by others.
"done", # Complete "done", # Complete
) ]
STATUS = Choices(*STATUS_VALUES) # implicit "status" field
submission_uuid = models.CharField(max_length=36, db_index=True, unique=True) submission_uuid = models.CharField(max_length=36, db_index=True, unique=True)
uuid = UUIDField(version=1, db_index=True, unique=True) uuid = UUIDField(version=1, db_index=True, unique=True)
# These values are used to find workflows for a particular item
# in a course without needing to look up the submissions for that item.
# Because submissions are immutable, we can safely duplicate the values
# here without violating data integrity.
course_id = models.CharField(max_length=255, blank=False, db_index=True)
item_id = models.CharField(max_length=255, blank=False, db_index=True)
class Meta: class Meta:
ordering = ["-created"] ordering = ["-created"]
# TODO: In migration, need a non-unique index on (course_id, item_id, status) # TODO: In migration, need a non-unique index on (course_id, item_id, status)
......
from django.db import DatabaseError from django.db import DatabaseError
from django.test import TestCase
from mock import patch from mock import patch
from nose.tools import raises from nose.tools import raises
from openassessment.test_utils import CacheResetTest
from openassessment.assessment import peer_api from openassessment.assessment import peer_api
from openassessment.workflow.models import AssessmentWorkflow from openassessment.workflow.models import AssessmentWorkflow
...@@ -24,7 +25,7 @@ REQUIREMENTS = { ...@@ -24,7 +25,7 @@ REQUIREMENTS = {
} }
} }
class TestAssessmentWorkflowApi(TestCase): class TestAssessmentWorkflowApi(CacheResetTest):
def test_create_workflow(self): def test_create_workflow(self):
submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod") submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
...@@ -81,3 +82,76 @@ class TestAssessmentWorkflowApi(TestCase): ...@@ -81,3 +82,76 @@ class TestAssessmentWorkflowApi(TestCase):
submission = sub_api.create_submission(ITEM_1, "We talk TV!") submission = sub_api.create_submission(ITEM_1, "We talk TV!")
workflow = workflow_api.create_workflow(submission["uuid"]) workflow = workflow_api.create_workflow(submission["uuid"])
workflow_api.get_workflow_for_submission(workflow["uuid"], REQUIREMENTS) workflow_api.get_workflow_for_submission(workflow["uuid"], REQUIREMENTS)
def test_get_status_counts(self):
# Initially, the counts should all be zero
counts = workflow_api.get_status_counts("test/1/1", "peer-problem")
self.assertEqual(counts, [
{"status": "peer", "count": 0},
{"status": "self", "count": 0},
{"status": "waiting", "count": 0},
{"status": "done", "count": 0},
])
# Create assessments with each status
# We're going to cheat a little bit by using the model objects
# directly, since the API does not provide access to the status directly.
self._create_workflow_with_status("user 1", "test/1/1", "peer-problem", "peer")
self._create_workflow_with_status("user 2", "test/1/1", "peer-problem", "self")
self._create_workflow_with_status("user 3", "test/1/1", "peer-problem", "self")
self._create_workflow_with_status("user 4", "test/1/1", "peer-problem", "waiting")
self._create_workflow_with_status("user 5", "test/1/1", "peer-problem", "waiting")
self._create_workflow_with_status("user 6", "test/1/1", "peer-problem", "waiting")
self._create_workflow_with_status("user 7", "test/1/1", "peer-problem", "done")
self._create_workflow_with_status("user 8", "test/1/1", "peer-problem", "done")
self._create_workflow_with_status("user 9", "test/1/1", "peer-problem", "done")
self._create_workflow_with_status("user 10", "test/1/1", "peer-problem", "done")
# Now the counts should be updated
counts = workflow_api.get_status_counts("test/1/1", "peer-problem")
self.assertEqual(counts, [
{"status": "peer", "count": 1},
{"status": "self", "count": 2},
{"status": "waiting", "count": 3},
{"status": "done", "count": 4},
])
# Create a workflow in a different course, same user and item
# Counts should be the same
self._create_workflow_with_status("user 1", "other_course", "peer-problem", "peer")
updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem")
self.assertEqual(counts, updated_counts)
# Create a workflow in the same course, different item
# Counts should be the same
self._create_workflow_with_status("user 1", "test/1/1", "other problem", "peer")
updated_counts = workflow_api.get_status_counts("test/1/1", "peer-problem")
self.assertEqual(counts, updated_counts)
def _create_workflow_with_status(self, student_id, course_id, item_id, status, answer="answer"):
"""
Create a submission and workflow with a given status.
Args:
student_id (unicode): Student ID for the submission.
course_id (unicode): Course ID for the submission.
item_id (unicode): Item ID for the submission
status (unicode): One of acceptable status values (e.g. "peer", "self", "waiting", "done")
Kwargs:
answer (unicode): Submission answer.
Returns:
None
"""
submission = sub_api.create_submission({
"student_id": student_id,
"course_id": course_id,
"item_id": item_id,
"item_type": "openassessment",
}, answer)
workflow = workflow_api.create_workflow(submission['uuid'])
workflow_model = AssessmentWorkflow.objects.get(uuid=workflow['uuid'])
workflow_model.status = status
workflow_model.save()
...@@ -290,8 +290,15 @@ class OpenAssessmentBlock( ...@@ -290,8 +290,15 @@ class OpenAssessmentBlock(
"question": self.prompt, "question": self.prompt,
"rubric_criteria": self.rubric_criteria, "rubric_criteria": self.rubric_criteria,
"rubric_assessments": ui_models, "rubric_assessments": ui_models,
"is_course_staff": False,
} }
if self.is_course_staff:
status_counts, num_submissions = self.get_workflow_status_counts()
context_dict['is_course_staff'] = True
context_dict['status_counts'] = status_counts
context_dict['num_submissions'] = num_submissions
template = get_template("openassessmentblock/oa_base.html") template = get_template("openassessmentblock/oa_base.html")
context = Context(context_dict) context = Context(context_dict)
frag = Fragment(template.render(context)) frag = Fragment(template.render(context))
...@@ -300,6 +307,19 @@ class OpenAssessmentBlock( ...@@ -300,6 +307,19 @@ class OpenAssessmentBlock(
frag.initialize_js('OpenAssessmentBlock') frag.initialize_js('OpenAssessmentBlock')
return frag return frag
@property
def is_course_staff(self):
"""
Check whether the user has course staff permissions for this XBlock.
Returns:
bool
"""
if hasattr(self, 'xmodule_runtime'):
return getattr(self.xmodule_runtime, 'user_is_staff', False)
else:
return False
def _create_ui_models(self): def _create_ui_models(self):
"""Combine UI attributes and XBlock configuration into a UI model. """Combine UI attributes and XBlock configuration into a UI model.
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -72,6 +72,12 @@ OpenAssessment.BaseView.prototype = { ...@@ -72,6 +72,12 @@ OpenAssessment.BaseView.prototype = {
this.renderPeerAssessmentStep(); this.renderPeerAssessmentStep();
this.renderSelfAssessmentStep(); this.renderSelfAssessmentStep();
this.gradeView.load(); this.gradeView.load();
// Set up expand/collapse for course staff debug, if available
courseStaffDebug = $('.wrapper--staff-info');
if (courseStaffDebug.length > 0) {
this.setUpCollapseExpand(courseStaffDebug, function() {});
}
}, },
/** /**
......
// openassessment: elements - staff
// ====================
// NOTES:
// * staff-centric UI used for reporting/debugging
.wrapper--xblock {
// --------------------
// general: staff UI
// --------------------
.wrapper--ui-staff {
box-shadow: inset 0 1px -2px 1px $shadow-d1;
margin-top: ($baseline-v*2);
border-radius: ($baseline-v/10);
border: 1px solid shade($color-decorative-staff, 25%);
border-top: ($baseline-v/4) solid $color-decorative-staff;
padding: $baseline-v ($baseline-h/2);
background: $staff-bg;
}
.ui-staff {
// CASE: area is collapse/expand friendly
&.ui-toggle-visibility {
.staff-info__title__copy {
margin-left: ($baseline-h/4);
}
// STATE: is collapsed
&.is--collapsed {
.ui-staff__content {
margin-top: 0;
}
}
}
}
.ui-staff__title {
@extend %t-heading;
color: $copy-staff-color;
}
.ui-staff__content {
margin-top: $baseline-v;
color: $copy-staff-color;
}
.ui-staff__content__section {
padding-bottom: $baseline-v;
border-bottom: 1px solid rgba($color-decorative-staff, 0.25);
margin-bottom: $baseline-v;
@extend %wipe-last-child;
}
// --------------------
// staff debug info
// --------------------
// UI - summary (statement)
.staff-info__summary {
.label, .value {
@extend %hd-2;
display: inline-block;
vertical-align: center;
color: $heading-staff-color;
}
.label {
margin-right: ($baseline-h/4);
}
.value {
@extend %t-strong;
}
}
// UI - status (table)
.staff-info__status {
}
.staff-info__status__table {
@extend %copy-3;
border-radius: ($baseline-v/10);
color: $copy-staff-color;
.title {
@extend %hd-2;
text-align: left;
color: $heading-staff-color;
margin-bottom: ($baseline-v/2);
}
.label {
color: $heading-staff-color;
}
.value {
@extend %t-strong;
color: $heading-staff-color;
}
th, td {
border: 1px solid rgba($color-decorative-staff, 0.25);
padding: ($baseline-v/2) ($baseline-h/4);
}
th, td[scope] {
text-align: left;
}
th {
@extend %copy-4;
@extend %t-titlecase;
}
thead {
}
tbody {
}
}
}
...@@ -194,4 +194,8 @@ ...@@ -194,4 +194,8 @@
overflow: auto; // needed for ui-hints to show and for the document flow to clear overflow: auto; // needed for ui-hints to show and for the document flow to clear
} }
} }
.ui-staff__title {
color: $copy-staff-color !important;
}
} }
...@@ -79,6 +79,7 @@ ...@@ -79,6 +79,7 @@
@import 'oa/elements/footer'; // view/app footers @import 'oa/elements/footer'; // view/app footers
@import 'oa/elements/navigation'; // navigation sets @import 'oa/elements/navigation'; // navigation sets
@import 'oa/elements/layout'; // applied layouts and deliberate class-based breakpoints @import 'oa/elements/layout'; // applied layouts and deliberate class-based breakpoints
@import 'oa/elements/staff'; // open assessment staff-centric UI
@import 'oa/views/oa-base'; // open assessment base view @import 'oa/views/oa-base'; // open assessment base view
// openassessment: contextual // openassessment: contextual
......
...@@ -154,6 +154,9 @@ $color-content-main: $gray-d1; ...@@ -154,6 +154,9 @@ $color-content-main: $gray-d1;
// application - colors: states // application - colors: states
$selected-color: $black-t; $selected-color: $black-t;
// application - colors: staff UI
$color-decorative-staff: $edx-pink;
// -------------------- // --------------------
// mixins: // mixins:
// -------------------- // --------------------
......
...@@ -193,3 +193,11 @@ $color-complete: rgb(98, 194, 74); ...@@ -193,3 +193,11 @@ $color-complete: rgb(98, 194, 74);
$color-incomplete: $color-warning; $color-incomplete: $color-warning;
$color-confirm: $heading-primary-color; $color-confirm: $heading-primary-color;
$color-unavailable: tint($copy-color, 85%); $color-unavailable: tint($copy-color, 85%);
// --------------------
// // application - colors: staff UI
// --------------------
$color-decorative-staff: $color-decorative-primary;
$copy-staff-color: $gray-l3;
$heading-staff-color: $white;
$staff-bg: $gray-d1;
...@@ -5,7 +5,7 @@ import os.path ...@@ -5,7 +5,7 @@ import os.path
import json import json
from functools import wraps from functools import wraps
from django.test import TestCase from openassessment.test_utils import CacheResetTest
from workbench.runtime import WorkbenchRuntime from workbench.runtime import WorkbenchRuntime
import webob import webob
...@@ -61,7 +61,7 @@ def scenario(scenario_path, user_id=None): ...@@ -61,7 +61,7 @@ def scenario(scenario_path, user_id=None):
return _decorator return _decorator
class XBlockHandlerTestCase(TestCase): class XBlockHandlerTestCase(CacheResetTest):
""" """
Load the XBlock in the workbench runtime to test its handler. Load the XBlock in the workbench runtime to test its handler.
""" """
......
...@@ -145,6 +145,37 @@ class TestOpenAssessment(XBlockHandlerTestCase): ...@@ -145,6 +145,37 @@ class TestOpenAssessment(XBlockHandlerTestCase):
self.assertEqual(student_item['course_id'], 'test_course') self.assertEqual(student_item['course_id'], 'test_course')
self.assertEqual(student_item['student_id'], 'test_student') self.assertEqual(student_item['student_id'], 'test_student')
@scenario('data/basic_scenario.xml')
def test_is_course_staff(self, xblock):
# By default, we shouldn't be course staff
self.assertFalse(xblock.is_course_staff)
# If the LMS runtime tells us we're not course staff,
# we shouldn't be course staff.
xblock.xmodule_runtime = Mock(user_is_staff=False)
self.assertFalse(xblock.is_course_staff)
# If the LMS runtime tells us that we ARE course staff,
# then we're course staff.
xblock.xmodule_runtime.user_is_staff = True
self.assertTrue(xblock.is_course_staff)
@scenario('data/basic_scenario.xml')
def test_course_staff_debug_info(self, xblock):
# If we're not course staff, we shouldn't see the debug info
xblock.xmodule_runtime = Mock(
course_id='test_course',
anonymous_student_id='test_student',
user_is_staff=False
)
xblock_fragment = self.runtime.render(xblock, "student_view")
self.assertNotIn("course staff information", xblock_fragment.body_html().lower())
# If we ARE course staff, then we should see the debug info
xblock.xmodule_runtime.user_is_staff = True
xblock_fragment = self.runtime.render(xblock, "student_view")
self.assertIn("course staff information", xblock_fragment.body_html().lower())
class TestDates(XBlockHandlerTestCase): class TestDates(XBlockHandlerTestCase):
......
...@@ -67,3 +67,32 @@ class WorkflowMixin(object): ...@@ -67,3 +67,32 @@ class WorkflowMixin(object):
return workflow_api.get_workflow_for_submission( return workflow_api.get_workflow_for_submission(
self.submission_uuid, self.workflow_requirements() self.submission_uuid, self.workflow_requirements()
) )
def get_workflow_status_counts(self):
"""
Retrieve the counts of students in each step of the workflow.
Returns:
tuple of (list, int), where the list contains dicts with keys
"status" (unicode value) and "count" (int value), and the
integer represents the total number of submissions.
Example Usage:
>>> status_counts, num_submissions = xblock.get_workflow_status_counts()
>>> num_submissions
12
>>> status_counts
[
{"status": "peer", "count": 2},
{"status": "self", "count": 1},
{"status": "waiting": "count": 4},
{"status": "done", "count": 5}
]
"""
student_item = self.get_student_item_dict()
status_counts = workflow_api.get_status_counts(
course_id=student_item['course_id'],
item_id=student_item['item_id'],
)
num_submissions = sum(item['count'] for item in status_counts)
return status_counts, num_submissions
...@@ -183,7 +183,14 @@ def get_submission(submission_uuid): ...@@ -183,7 +183,14 @@ def get_submission(submission_uuid):
) )
cache_key = "submissions.submission.{}".format(submission_uuid) cache_key = "submissions.submission.{}".format(submission_uuid)
cached_submission_data = cache.get(cache_key) try:
cached_submission_data = cache.get(cache_key)
except Exception as ex:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving submission from the cache")
cached_submission_data = None
if cached_submission_data: if cached_submission_data:
return cached_submission_data return cached_submission_data
...@@ -213,23 +220,40 @@ def get_submission_and_student(uuid): ...@@ -213,23 +220,40 @@ def get_submission_and_student(uuid):
Returns: Returns:
Serialized Submission model (dict) containing a serialized StudentItem model Serialized Submission model (dict) containing a serialized StudentItem model
If the submission does not exist, return None
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
""" """
try: # This may raise API exceptions
submission = Submission.objects.get(uuid=uuid) submission = get_submission(uuid)
except Submission.DoesNotExist:
return None
# There is probably a more idiomatic way to do this using the Django REST framework # Retrieve the student item from the cache
cache_key = "submissions.student_item.{}".format(submission['student_item'])
try: try:
submission_dict = SubmissionSerializer(submission).data cached_student_item = cache.get(cache_key)
submission_dict['student_item'] = StudentItemSerializer(submission.student_item).data except:
except Exception as ex: # The cache backend could raise an exception
err_msg = "Could not get submission due to error: {}".format(ex) # (for example, memcache keys that contain spaces)
logger.exception(err_msg) logger.exception("Error occurred while retrieving student item from the cache")
raise SubmissionInternalError(err_msg) cached_student_item = None
return submission_dict if cached_student_item is not None:
submission['student_item'] = cached_student_item
else:
# There is probably a more idiomatic way to do this using the Django REST framework
try:
student_item = StudentItem.objects.get(id=submission['student_item'])
submission['student_item'] = StudentItemSerializer(student_item).data
cache.set(cache_key, submission['student_item'])
except Exception as ex:
err_msg = "Could not get submission due to error: {}".format(ex)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
return submission
def get_submissions(student_item_dict, limit=None): def get_submissions(student_item_dict, limit=None):
......
...@@ -3,6 +3,7 @@ import copy ...@@ -3,6 +3,7 @@ import copy
from ddt import ddt, file_data from ddt import ddt, file_data
from django.db import DatabaseError from django.db import DatabaseError
from django.core.cache import cache
from django.test import TestCase from django.test import TestCase
from nose.tools import raises from nose.tools import raises
from mock import patch from mock import patch
...@@ -37,6 +38,12 @@ class TestSubmissionsApi(TestCase): ...@@ -37,6 +38,12 @@ class TestSubmissionsApi(TestCase):
Testing Submissions Testing Submissions
""" """
def setUp(self):
"""
Clear the cache.
"""
cache.clear()
def test_create_submission(self): def test_create_submission(self):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE) submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
student_item = self._get_student_item(STUDENT_ITEM) student_item = self._get_student_item(STUDENT_ITEM)
...@@ -53,9 +60,9 @@ class TestSubmissionsApi(TestCase): ...@@ -53,9 +60,9 @@ class TestSubmissionsApi(TestCase):
retrieved = api.get_submission_and_student(submission['uuid']) retrieved = api.get_submission_and_student(submission['uuid'])
self.assertItemsEqual(submission, retrieved) self.assertItemsEqual(submission, retrieved)
# Should get None if we retrieve a submission that doesn't exist # Should raise an exception if the student item does not exist
retrieved = api.get_submission_and_student(u'no such uuid') with self.assertRaises(api.SubmissionNotFoundError):
self.assertIs(retrieved, None) api.get_submission_and_student(u'no such uuid')
def test_get_submissions(self): def test_get_submissions(self):
api.create_submission(STUDENT_ITEM, ANSWER_ONE) api.create_submission(STUDENT_ITEM, ANSWER_ONE)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment