Commit 6c1694ff by Andrew Dekker Committed by Will Daly

Implement leaderboard feature

parent 21aa930e
{% load i18n %}
{% spaceless %}
<li id="openassessment__leaderboard" class="openassessment__steps__step step--leaderboard is--complete">
<header class="step__header">
<h2 class="step__title">
<span class="wrapper--copy">
<span class="step__label">{% trans "Leaderboard: Complete" %} </span>
<div class="wrapper--step__content">
<h3 class="leaderboard__title">{% trans "Best Responses For This Assignment" %}</h3>
<ol class="list leaderboard__score__list">
{% for topscore in topscores %}
<li class="leaderboard__score__item">
<h4 class="leaderboard__list__number">{{ forloop.counter }}</h4>
{% with num_points=topscore.score %}
<h4 class="leaderboard__score__title">
{% blocktrans %}{{ num_points }} points{% endblocktrans %}
</h4>
{% endwith %}
<div class="leaderboard__answer">{{ topscore.content|linebreaks }}</div>
</li>
{% endfor %}
</ol>
</div>
</span>
</h2>
</header>
</li>
{% endspaceless %}
{% load i18n %}
{% spaceless %}
<li id="openassessment__leaderboard" class="openassessment__steps__step step--leaderboard">
<header class="step__header">
<h2 class="step__title">
<span class="wrapper--copy">
<span class="step__label">{% trans "Leaderboard: Not Available" %} </span>
</span>
</h2>
</header>
<div class="wrapper--step__content">
<div class="step__content">
<div class="leaderboard__description">
<p>{% trans "The leaderboard is not available until your final grade is complete." %}</p>
</div>
</div>
</div>
</li>
{% endspaceless %}
"""
Leaderboard step in the OpenAssessment XBlock.
"""
from django.utils.translation import ugettext as _
from xblock.core import XBlock
from openassessment.assessment.errors import SelfAssessmentError, PeerAssessmentError
from submissions import api as sub_api
class LeaderboardMixin(object):
"""Leaderboard Mixin introduces all handlers for displaying the leaderboard
Abstracts all functionality and handlers associated with the Leaderboard.
Leaderboard is a Mixin for the OpenAssessmentBlock. Functions in the
Leaderboard call into the OpenAssessmentBlock functions and will not work
outside of OpenAssessmentBlock.
"""
@XBlock.handler
def render_leaderboard(self, data, suffix=''):
"""
Render the leaderboard.
Args:
data: Not used.
Kwargs:
suffix: Not used.
Returns:
unicode: HTML content of the leaderboard.
"""
# Retrieve the status of the workflow. If no workflows have been
# started this will be an empty dict, so status will be None.
workflow = self.get_workflow_info()
status = workflow.get('status')
# Render the grading section based on the status of the workflow
try:
if status == "done":
path, context = self.render_leaderboard_complete(self.get_student_item_dict())
else: # status is 'self' or 'peer', which implies that the workflow is incomplete
path, context = self.render_leaderboard_incomplete()
except (sub_api.SubmissionError, PeerAssessmentError, SelfAssessmentError):
return self.render_error(_(u"An unexpected error occurred."))
else:
return self.render_assessment(path, context)
def render_leaderboard_complete(self, student_item_dict):
"""
Render the leaderboard complete state.
Args:
student_item_dict (dict): The student item
Returns:
template_path (string), tuple of context (dict)
"""
scores = sub_api.get_top_submissions(
student_item_dict['course_id'],
student_item_dict['item_id'],
student_item_dict['item_type'],
self.leaderboard_show,
use_cache=False
)
for score in scores:
if 'text' in score['content']:
score['content'] = score['content']['text']
elif isinstance(score['content'], basestring):
pass
# Currently, we do not handle non-text submissions.
else:
score['content'] = ""
context = { 'topscores': scores }
return ('openassessmentblock/leaderboard/oa_leaderboard_show.html', context)
def render_leaderboard_incomplete(self):
"""
Render the grade incomplete state.
Returns:
template_path (string), tuple of context (dict)
"""
return ('openassessmentblock/leaderboard/oa_leaderboard_waiting.html', {})
...@@ -11,10 +11,10 @@ from django.template.loader import get_template ...@@ -11,10 +11,10 @@ from django.template.loader import get_template
from webob import Response from webob import Response
from xblock.core import XBlock from xblock.core import XBlock
from xblock.fields import List, Scope, String, Boolean from xblock.fields import List, Scope, String, Boolean, Integer
from xblock.fragment import Fragment from xblock.fragment import Fragment
from openassessment.xblock.grade_mixin import GradeMixin from openassessment.xblock.grade_mixin import GradeMixin
from openassessment.xblock.leaderboard_mixin import LeaderboardMixin
from openassessment.xblock.defaults import * # pylint: disable=wildcard-import, unused-wildcard-import from openassessment.xblock.defaults import * # pylint: disable=wildcard-import, unused-wildcard-import
from openassessment.xblock.message_mixin import MessageMixin from openassessment.xblock.message_mixin import MessageMixin
from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin
...@@ -64,6 +64,12 @@ UI_MODELS = { ...@@ -64,6 +64,12 @@ UI_MODELS = {
"class_id": "openassessment__grade", "class_id": "openassessment__grade",
"navigation_text": "Your grade for this assignment", "navigation_text": "Your grade for this assignment",
"title": "Your Grade:" "title": "Your Grade:"
},
"leaderboard": {
"name": "leaderboard",
"class_id": "openassessment__leaderboard",
"navigation_text": "A leaderboard of the top submissions",
"title": "Leaderboard"
} }
} }
...@@ -89,6 +95,7 @@ class OpenAssessmentBlock( ...@@ -89,6 +95,7 @@ class OpenAssessmentBlock(
SelfAssessmentMixin, SelfAssessmentMixin,
StudioMixin, StudioMixin,
GradeMixin, GradeMixin,
LeaderboardMixin,
StaffInfoMixin, StaffInfoMixin,
WorkflowMixin, WorkflowMixin,
StudentTrainingMixin, StudentTrainingMixin,
...@@ -118,6 +125,12 @@ class OpenAssessmentBlock( ...@@ -118,6 +125,12 @@ class OpenAssessmentBlock(
help="A title to display to a student (plain text)." help="A title to display to a student (plain text)."
) )
leaderboard_show = Integer(
default=0,
scope=Scope.content,
help="The number of leaderboard results to display (0 if none)"
)
prompt = String( prompt = String(
default=DEFAULT_PROMPT, default=DEFAULT_PROMPT,
scope=Scope.content, scope=Scope.content,
...@@ -221,6 +234,7 @@ class OpenAssessmentBlock( ...@@ -221,6 +234,7 @@ class OpenAssessmentBlock(
# On page load, update the workflow status. # On page load, update the workflow status.
# We need to do this here because peers may have graded us, in which # We need to do this here because peers may have graded us, in which
# case we may have a score available. # case we may have a score available.
try: try:
self.update_workflow_status() self.update_workflow_status()
except AssessmentWorkflowError: except AssessmentWorkflowError:
...@@ -236,7 +250,6 @@ class OpenAssessmentBlock( ...@@ -236,7 +250,6 @@ class OpenAssessmentBlock(
"rubric_assessments": ui_models, "rubric_assessments": ui_models,
"show_staff_debug_info": self.is_course_staff and not self.in_studio_preview, "show_staff_debug_info": self.is_course_staff and not self.in_studio_preview,
} }
template = get_template("openassessmentblock/oa_base.html") template = get_template("openassessmentblock/oa_base.html")
context = Context(context_dict) context = Context(context_dict)
frag = Fragment(template.render(context)) frag = Fragment(template.render(context))
...@@ -300,6 +313,10 @@ class OpenAssessmentBlock( ...@@ -300,6 +313,10 @@ class OpenAssessmentBlock(
if ui_model: if ui_model:
ui_models.append(dict(assessment, **ui_model)) ui_models.append(dict(assessment, **ui_model))
ui_models.append(UI_MODELS["grade"]) ui_models.append(UI_MODELS["grade"])
if self.leaderboard_show > 0:
ui_models.append(UI_MODELS["leaderboard"])
return ui_models return ui_models
@staticmethod @staticmethod
...@@ -324,6 +341,10 @@ class OpenAssessmentBlock( ...@@ -324,6 +341,10 @@ class OpenAssessmentBlock(
load('static/xml/poverty_rubric_example.xml') load('static/xml/poverty_rubric_example.xml')
), ),
( (
"OpenAssessmentBlock Leaderboard",
load('static/xml/leaderboard.xml')
),
(
"OpenAssessmentBlock (Peer Only) Rubric", "OpenAssessmentBlock (Peer Only) Rubric",
load('static/xml/poverty_peer_only_example.xml') load('static/xml/poverty_peer_only_example.xml')
), ),
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -30,6 +30,12 @@ ...@@ -30,6 +30,12 @@
"class_id": "openassessment__grade", "class_id": "openassessment__grade",
"navigation_text": "Your grade for this problem", "navigation_text": "Your grade for this problem",
"title": "Your Grade:" "title": "Your Grade:"
},
{
"name": "leaderboard",
"class_id": "openassessment__leaderboard",
"navigation_text": "A leaderboard for the top submissions",
"title": "Leaderboard:"
} }
] ]
}, },
...@@ -66,6 +72,12 @@ ...@@ -66,6 +72,12 @@
"class_id": "openassessment__grade", "class_id": "openassessment__grade",
"navigation_text": "Your grade for this problem", "navigation_text": "Your grade for this problem",
"title": "Your Grade:" "title": "Your Grade:"
},
{
"name": "leaderboard",
"class_id": "openassessment__leaderboard",
"navigation_text": "A leaderboard for the top submissions",
"title": "Leaderboard:"
} }
] ]
}, },
......
...@@ -20,6 +20,7 @@ OpenAssessment.BaseView = function(runtime, element, server) { ...@@ -20,6 +20,7 @@ OpenAssessment.BaseView = function(runtime, element, server) {
this.selfView = new OpenAssessment.SelfView(this.element, this.server, this); this.selfView = new OpenAssessment.SelfView(this.element, this.server, this);
this.peerView = new OpenAssessment.PeerView(this.element, this.server, this); this.peerView = new OpenAssessment.PeerView(this.element, this.server, this);
this.gradeView = new OpenAssessment.GradeView(this.element, this.server, this); this.gradeView = new OpenAssessment.GradeView(this.element, this.server, this);
this.leaderboardView = new OpenAssessment.LeaderboardView(this.element, this.server, this);
this.messageView = new OpenAssessment.MessageView(this.element, this.server, this); this.messageView = new OpenAssessment.MessageView(this.element, this.server, this);
// Staff only information about student progress. // Staff only information about student progress.
this.staffInfoView = new OpenAssessment.StaffInfoView(this.element, this.server, this); this.staffInfoView = new OpenAssessment.StaffInfoView(this.element, this.server, this);
...@@ -74,6 +75,7 @@ OpenAssessment.BaseView.prototype = { ...@@ -74,6 +75,7 @@ OpenAssessment.BaseView.prototype = {
this.peerView.load(); this.peerView.load();
this.selfView.load(); this.selfView.load();
this.gradeView.load(); this.gradeView.load();
this.leaderboardView.load();
/** /**
this.messageView.load() is intentionally omitted. this.messageView.load() is intentionally omitted.
Because of the asynchronous loading, there is no way to tell (from the perspective of the Because of the asynchronous loading, there is no way to tell (from the perspective of the
......
/**
Interface for leaderboard view.
Args:
element (DOM element): The DOM element representing the XBlock.
server (OpenAssessment.Server): The interface to the XBlock server.
baseView (OpenAssessment.BaseView): Container view.
Returns:
OpenAssessment.ResponseView
**/
OpenAssessment.LeaderboardView = function(element, server, baseView) {
this.element = element;
this.server = server;
this.baseView = baseView;
};
OpenAssessment.LeaderboardView.prototype = {
/**
Load the leaderboard view.
**/
load: function() {
var view = this;
var baseView = this.baseView;
this.server.render('leaderboard').done(
function(html) {
// Load the HTML and install event handlers
$('#openassessment__leaderboard', view.element).replaceWith(html);
}
).fail(function(errMsg) {
baseView.showLoadError('leaderboard', errMsg);
});
},
};
...@@ -228,3 +228,88 @@ ...@@ -228,3 +228,88 @@
@extend .action--submit; @extend .action--submit;
} }
} }
#openassessment__leaderboard{
font-family: "Open Sans","Helvetica Neue",Helvetica,Arial,sans-serif;
.step__counter, .step__counter:before {
display: none;
}
.wrapper--copy{
margin-left: 0;
padding-left: 0;
border-left: 0;
}
@include media($bp-m) {
@include span-columns(4 of 4);
}
@include media($bp-ds) {
@include span-columns(6 of 6);
}
@include media($bp-dm) {
@include span-columns(12 of 12);
}
@include media($bp-dl) {
@include span-columns(12 of 12);
}
@include media($bp-dx) {
@include span-columns(12 of 12);
}
.step__label, .grade__value {
display: inline-block;
vertical-align: middle;
}
.step__label {
margin-right: ($baseline-h/4);
}
.leaderboard__title{
@extend %t-superheading;
color: $heading-primary-color;
}
.list.leaderboard__score__list{
list-style-type: none;
li.leaderboard__score__item {
margin: 15px 0;
.leaderboard__list__number{
display: inline-block;
background: $edx-gray-d2;
color: white;
padding: 5px 5px 3px 5px;
font-size: 16px;
min-width: 35px;
text-align: center;
border-top-right-radius: 2px;
border-top-left-radius: 2px;
}
.leaderboard__score__title{
font-size: 15px;
color: $edx-gray-l1;
text-transform: uppercase;
display: inline-block;
padding-left: 15px;
}
.leaderboard__answer{
border-top: 2px solid $edx-gray-d2;
box-shadow: inset 0 0 3px 1px rgba(10, 10, 10, 0.1);
padding: 5px 10px;
max-height: 200px;
overflow-y: scroll;
font-size: 14px;
}
}
}
}
\ No newline at end of file
<openassessment submission_due="2030-03-11T18:20" leaderboard_show="10">
<title>
My favourite pet
</title>
<rubric>
<prompt>
Which animal would you like to have as a pet?
</prompt>
<criterion feedback='optional'>
<name>concise</name>
<prompt>How rare is the animal?</prompt>
<option points="0">
<name>Very common</name>
<explanation>
You can pick it up on the street
</explanation>
</option>
<option points="2">
<name>Common</name>
<explanation>
Can get it at the local pet store
</explanation>
</option>
<option points="4">
<name>Somewhat common</name>
<explanation>
Easy to see but hard to purchase as a pet
</explanation>
</option>
<option points="8">
<name>Rare</name>
<explanation>
Need to travel the world to find it
</explanation>
</option>
<option points="10">
<name>Extinct</name>
<explanation>
Maybe in the ice-age
</explanation>
</option>
</criterion>
<criterion feedback='optional'>
<name>form</name>
<prompt>How hard would it be to care for the animal?</prompt>
<option points="0">
<name>It feeds itself</name>
<explanation></explanation>
</option>
<option points="2">
<name>Any pet food will do</name>
<explanation></explanation>
</option>
<option points="4">
<name>Some work required to care for the animal</name>
<explanation></explanation>
</option>
<option points="6">
<name>A full time job to care for the animal</name>
<explanation></explanation>
</option>
<option points="8">
<name>A team required to care for the animal</name>
<explanation></explanation>
</option>
<option points="10">
<name>The pet has special needs</name>
<explanation></explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="self-assessment" />
</assessments>
</openassessment>
...@@ -5,7 +5,7 @@ import os.path ...@@ -5,7 +5,7 @@ import os.path
import json import json
from functools import wraps from functools import wraps
from openassessment.test_utils import CacheResetTest from openassessment.test_utils import CacheResetTest, TransactionCacheResetTest
from workbench.runtime import WorkbenchRuntime from workbench.runtime import WorkbenchRuntime
import webob import webob
...@@ -41,7 +41,7 @@ def scenario(scenario_path, user_id=None): ...@@ -41,7 +41,7 @@ def scenario(scenario_path, user_id=None):
xblock = None xblock = None
if args: if args:
self = args[0] self = args[0]
if isinstance(self, XBlockHandlerTestCase): if isinstance(self, XBlockHandlerTestCaseMixin):
# Print a debug message # Print a debug message
print "Loading scenario from {path}".format(path=scenario_path) print "Loading scenario from {path}".format(path=scenario_path)
...@@ -61,7 +61,7 @@ def scenario(scenario_path, user_id=None): ...@@ -61,7 +61,7 @@ def scenario(scenario_path, user_id=None):
return _decorator return _decorator
class XBlockHandlerTestCase(CacheResetTest): class XBlockHandlerTestCaseMixin(object):
""" """
Load the XBlock in the workbench runtime to test its handler. Load the XBlock in the workbench runtime to test its handler.
""" """
...@@ -70,6 +70,7 @@ class XBlockHandlerTestCase(CacheResetTest): ...@@ -70,6 +70,7 @@ class XBlockHandlerTestCase(CacheResetTest):
""" """
Create the runtime. Create the runtime.
""" """
super(XBlockHandlerTestCaseMixin, self).setUp()
self.runtime = WorkbenchRuntime() self.runtime = WorkbenchRuntime()
def set_user(self, user_id): def set_user(self, user_id):
...@@ -149,3 +150,20 @@ class XBlockHandlerTestCase(CacheResetTest): ...@@ -149,3 +150,20 @@ class XBlockHandlerTestCase(CacheResetTest):
base_dir = os.path.dirname(os.path.abspath(__file__)) base_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(base_dir, path)) as file_handle: with open(os.path.join(base_dir, path)) as file_handle:
return file_handle.read() return file_handle.read()
class XBlockHandlerTestCase(XBlockHandlerTestCaseMixin, CacheResetTest):
"""
Base XBlock handler test case. Use this if you do NOT need to simulate the read-replica.
"""
pass
class XBlockHandlerTransactionTestCase(XBlockHandlerTestCaseMixin, TransactionCacheResetTest):
"""
Variation of the XBlock handler test case that truncates the test database instead
of rolling back transactions. This is necessary if the software under test relies
on the read replica. It's also slower, so unless you're using the read-replica,
use `XBlockHandlerTestCase` instead.
"""
pass
<openassessment leaderboard_show="3">
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
<openassessment leaderboard_show="10">
<title>Open Assessment Test</title>
<prompt>
Given the state of the world today, what do you think should be done to
combat poverty? Please answer in a short essay of 200-300 words.
</prompt>
<rubric>
<prompt>Read for conciseness, clarity of thought, and form.</prompt>
<criterion>
<name>𝓒𝓸𝓷𝓬𝓲𝓼𝓮</name>
<prompt>How concise is it?</prompt>
<option points="3">
<name>ﻉซƈﻉɭɭﻉกՇ</name>
<explanation>Extremely concise</explanation>
</option>
<option points="2">
<name>Ġööḋ</name>
<explanation>Concise</explanation>
</option>
<option points="1">
<name>ק๏๏г</name>
<explanation>Wordy</explanation>
</option>
</criterion>
<criterion>
<name>Form</name>
<prompt>How well-formed is it?</prompt>
<option points="3">
<name>Good</name>
<explanation>Good</explanation>
</option>
<option points="2">
<name>Fair</name>
<explanation>Fair</explanation>
</option>
<option points="1">
<name>Poor</name>
<explanation>Poor</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
...@@ -3,11 +3,6 @@ ...@@ -3,11 +3,6 @@
"title": "Foo", "title": "Foo",
"prompt": "Test prompt", "prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt", "rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -66,13 +61,7 @@ ...@@ -66,13 +61,7 @@
"promptless": { "promptless": {
"title": "Foo", "title": "Foo",
"prompt": null,
"rubric_feedback_prompt": "Test Feedback Prompt", "rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -132,11 +121,6 @@ ...@@ -132,11 +121,6 @@
"title": "Foo", "title": "Foo",
"prompt": "", "prompt": "",
"rubric_feedback_prompt": "Test Feedback Prompt", "rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -197,11 +181,6 @@ ...@@ -197,11 +181,6 @@
"title": "ƒσσ", "title": "ƒσσ",
"prompt": "Ṫëṡẗ ṗṛöṁṗẗ", "prompt": "Ṫëṡẗ ṗṛöṁṗẗ",
"rubric_feedback_prompt": "†es† Feedbåck Prømp†", "rubric_feedback_prompt": "†es† Feedbåck Prømp†",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -256,11 +235,6 @@ ...@@ -256,11 +235,6 @@
"title": "Foo", "title": "Foo",
"prompt": "Test prompt", "prompt": "Test prompt",
"rubric_feedback_prompt": "", "rubric_feedback_prompt": "",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -320,12 +294,6 @@ ...@@ -320,12 +294,6 @@
"no_feedback_prompt": { "no_feedback_prompt": {
"title": "Foo", "title": "Foo",
"prompt": "Test prompt", "prompt": "Test prompt",
"rubric_feedback_prompt": null,
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -385,11 +353,6 @@ ...@@ -385,11 +353,6 @@
"title": "Foo", "title": "Foo",
"prompt": "Test prompt", "prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt", "rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -444,11 +407,6 @@ ...@@ -444,11 +407,6 @@
"title": "Foo", "title": "Foo",
"prompt": "Test prompt", "prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt", "rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 2, "order_num": 2,
...@@ -521,11 +479,6 @@ ...@@ -521,11 +479,6 @@
"title": "Foo", "title": "Foo",
"prompt": "Test prompt", "prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt", "rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -590,9 +543,7 @@ ...@@ -590,9 +543,7 @@
"rubric_feedback_prompt": "Test Feedback Prompt", "rubric_feedback_prompt": "Test Feedback Prompt",
"start": "2010-04-01T00:00:00", "start": "2010-04-01T00:00:00",
"due": "2030-05-01T00:00:00", "due": "2030-05-01T00:00:00",
"submission_start": null,
"submission_due": "2020-04-15T00:00:00", "submission_due": "2020-04-15T00:00:00",
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -655,11 +606,6 @@ ...@@ -655,11 +606,6 @@
"title": "Foo", "title": "Foo",
"prompt": "Test prompt", "prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt", "rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -721,11 +667,6 @@ ...@@ -721,11 +667,6 @@
"title": "Foo", "title": "Foo",
"prompt": "Test prompt", "prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt", "rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -787,11 +728,6 @@ ...@@ -787,11 +728,6 @@
"title": "Foo", "title": "Foo",
"prompt": "Test prompt", "prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt", "rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -845,11 +781,6 @@ ...@@ -845,11 +781,6 @@
"title": "Foo", "title": "Foo",
"prompt": "Test prompt", "prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt", "rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -948,11 +879,6 @@ ...@@ -948,11 +879,6 @@
"title": "Foo", "title": "Foo",
"prompt": "Test prompt", "prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt", "rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -1069,11 +995,6 @@ ...@@ -1069,11 +995,6 @@
"title": "Foo", "title": "Foo",
"prompt": "Test prompt", "prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt", "rubric_feedback_prompt": "Test Feedback Prompt",
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"allow_file_upload": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
...@@ -1169,10 +1090,6 @@ ...@@ -1169,10 +1090,6 @@
"prompt": "Test prompt", "prompt": "Test prompt",
"rubric_feedback_prompt": "Test Feedback Prompt", "rubric_feedback_prompt": "Test Feedback Prompt",
"allow_file_upload": true, "allow_file_upload": true,
"start": null,
"due": null,
"submission_start": null,
"submission_due": null,
"criteria": [ "criteria": [
{ {
"order_num": 0, "order_num": 0,
......
...@@ -450,5 +450,89 @@ ...@@ -450,5 +450,89 @@
"</rubric>", "</rubric>",
"</openassessment>" "</openassessment>"
] ]
},
"leaderboard_num_zero": {
"xml": [
"<openassessment leaderboard_show=\"0\">",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
},
"leaderboard_num_negative": {
"xml": [
"<openassessment leaderboard_show=\"-1\">",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
},
"leaderboard_num_too_high": {
"xml": [
"<openassessment leaderboard_show=\"101\">",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
},
"leaderboard_num_not_integer": {
"xml": [
"<openassessment leaderboard_show=\"not_an_int\">",
"<title>Foo</title>",
"<assessments>",
"<assessment name=\"peer-assessment\" start=\"2014-02-27T09:46:28\" due=\"2014-03-01T00:00:00\" must_grade=\"5\" must_be_graded_by=\"3\" />",
"<assessment name=\"self-assessment\" start=\"2014-04-01T00:00:00\" due=\"2014-06-01T00:00:00\" />",
"</assessments>",
"<rubric>",
"<prompt>Test prompt</prompt>",
"<criterion>",
"<name>Test criterion</name>",
"<prompt>Test criterion prompt</prompt>",
"<option points=\"0\"><name>No</name><explanation>No explanation</explanation></option>",
"<option points=\"2\"><name>Yes</name><explanation>Yes explanation</explanation></option>",
"</criterion>",
"</rubric>",
"</openassessment>"
]
} }
} }
# -*- coding: utf-8 -*-
"""
Tests for leaderboard handlers in Open Assessment XBlock.
"""
import json
import mock
from django.core.cache import cache
from submissions import api as sub_api
from .base import XBlockHandlerTransactionTestCase, scenario
class TestLeaderboardRender(XBlockHandlerTransactionTestCase):
@scenario('data/basic_scenario.xml')
def test_no_leaderboard(self, xblock):
# Since there's no leaderboard set in the problem XML,
# it should not be visible
self._assert_leaderboard_visible(xblock, False)
@scenario('data/leaderboard_unavailable.xml')
def test_unavailable(self, xblock):
# Start date is in the future for this scenario
self._assert_path_and_context(
xblock,
'openassessmentblock/leaderboard/oa_leaderboard_waiting.html',
{}
)
self._assert_leaderboard_visible(xblock, True)
@scenario('data/leaderboard_show.xml')
def test_show_no_submissions(self, xblock):
# No submissions created yet, so the leaderboard shouldn't display any scores
self._assert_scores(xblock, [])
self._assert_leaderboard_visible(xblock, True)
@scenario('data/leaderboard_show.xml')
def test_show_submissions(self, xblock):
# Create some submissions (but fewer than the max that can be shown)
self._create_submissions_and_scores(xblock, [
("test answer 1", 1),
("test answer 2", 2)
])
self._assert_scores(xblock, [
{"content": "test answer 2", "score": 2},
{"content": "test answer 1", "score": 1}
])
self._assert_leaderboard_visible(xblock, True)
# Since leaderboard results are cached, we need to clear
# the cache in order to see the new scores.
cache.clear()
# Create more submissions than the max
self._create_submissions_and_scores(xblock, [
("test answer 3", 0),
("test answer 4", 10),
("test answer 5", 3)
])
self._assert_scores(xblock, [
{"content": "test answer 4", "score": 10},
{"content": "test answer 5", "score": 3},
{"content": "test answer 2", "score": 2}
])
self._assert_leaderboard_visible(xblock, True)
@scenario('data/leaderboard_show.xml')
def test_no_text_key_submission(self, xblock):
# Instead of using the default submission as a dict with "text",
# make the submission a string.
self._create_submissions_and_scores(xblock, [("test answer", 1)], submission_key=None)
# It should still work
self._assert_scores(xblock, [
{"content": "test answer", "score": 1}
])
@scenario('data/leaderboard_show.xml')
def test_non_text_submission(self, xblock):
# Create a non-text submission (the submission dict doesn't contain "text")
self._create_submissions_and_scores(xblock, [("s3key", 1)], submission_key="file_key")
# Expect that we default to an empty string for content
self._assert_scores(xblock, [
{"content": "", "score": 1}
])
def _create_submissions_and_scores(
self, xblock, submissions_and_scores,
submission_key="text", points_possible=10
):
"""
Create submissions and scores that should be displayed by the leaderboard.
Args:
xblock (OpenAssessmentBlock)
submisions_and_scores (list): List of `(submission, score)` tuples, where
`submission` is the essay text (string) and `score` is the integer
number of points earned.
Keyword Args:
points_possible (int): The total number of points possible for this problem
submission_key (string): The key to use in the submission dict. If None, use
the submission value itself instead of embedding it in a dictionary.
"""
for num, (submission, points_earned) in enumerate(submissions_and_scores):
# Assign a unique student ID
# These aren't displayed by the leaderboard, so we can set them
# to anything without affecting the test.
student_item = xblock.get_student_item_dict()
student_item['student_id'] = "student {num}".format(num=num)
if submission_key is not None:
answer = { submission_key: submission }
else:
answer = submission
# Create a submission
sub = sub_api.create_submission(student_item, answer)
# Create a score for the submission
sub_api.set_score(sub['uuid'], points_earned, points_possible)
def _assert_scores(self, xblock, scores):
"""
Check that the leaderboard displays the expected scores.
Args:
xblock (OpenAssessmentBlock)
scores (list): The scores displayed by the leaderboard, each of which
is a dictionary of with keys 'content' (the submission text)
and 'score' (the integer number of points earned)
"""
self._assert_path_and_context(
xblock,
'openassessmentblock/leaderboard/oa_leaderboard_show.html',
{
'topscores': scores
},
workflow_status='done'
)
def _assert_path_and_context(self, xblock, expected_path, expected_context, workflow_status=None):
"""
Render the leaderboard and verify:
1) that the correct template and context were used
2) that the rendering occurred without an error
Args:
xblock (OpenAssessmentBlock): The XBlock under test.
expected_path (str): The expected template path.
expected_context (dict): The expected template context.
Kwargs:
workflow_status (str): If provided, simulate this status from the workflow API.
Raises:
AssertionError
"""
if workflow_status is not None:
xblock.get_workflow_info = mock.Mock(return_value={ 'status': workflow_status })
if workflow_status == 'done':
path, context = xblock.render_leaderboard_complete(xblock.get_student_item_dict())
else:
path, context = xblock.render_leaderboard_incomplete()
self.assertEqual(path, expected_path)
self.assertEqual(context, expected_context)
# Verify that we render without error
resp = self.request(xblock, 'render_leaderboard', json.dumps({}))
self.assertGreater(len(resp), 0)
def _assert_leaderboard_visible(self, xblock, is_visible):
"""
Check that the leaderboard is displayed in the student view.
"""
fragment = self.runtime.render(xblock, "student_view")
has_leaderboard = 'openassessment__leaderboard' in fragment.body_html()
self.assertEqual(has_leaderboard, is_visible)
...@@ -93,16 +93,17 @@ class TestSerializeContent(TestCase): ...@@ -93,16 +93,17 @@ class TestSerializeContent(TestCase):
@ddt.file_data('data/serialize.json') @ddt.file_data('data/serialize.json')
def test_serialize(self, data): def test_serialize(self, data):
self.oa_block.title = data['title'] self.oa_block.title = data.get('title')
self.oa_block.prompt = data['prompt'] self.oa_block.prompt = data.get('prompt')
self.oa_block.rubric_feedback_prompt = data['rubric_feedback_prompt'] self.oa_block.rubric_feedback_prompt = data.get('rubric_feedback_prompt')
self.oa_block.start = _parse_date(data['start']) self.oa_block.start = _parse_date(data.get('start'))
self.oa_block.due = _parse_date(data['due']) self.oa_block.due = _parse_date(data.get('due'))
self.oa_block.submission_start = data['submission_start'] self.oa_block.submission_start = data.get('submission_start')
self.oa_block.submission_due = data['submission_due'] self.oa_block.submission_due = data.get('submission_due')
self.oa_block.rubric_criteria = data['criteria'] self.oa_block.leaderboard_show = data.get('leaderboard_show')
self.oa_block.rubric_assessments = data['assessments'] self.oa_block.rubric_criteria = data.get('criteria')
self.oa_block.allow_file_upload = data['allow_file_upload'] self.oa_block.rubric_assessments = data.get('assessments')
self.oa_block.allow_file_upload = data.get('allow_file_upload')
xml = serialize_content(self.oa_block) xml = serialize_content(self.oa_block)
# Compare the XML with our expected output # Compare the XML with our expected output
...@@ -152,6 +153,7 @@ class TestSerializeContent(TestCase): ...@@ -152,6 +153,7 @@ class TestSerializeContent(TestCase):
self.oa_block.due = None self.oa_block.due = None
self.oa_block.submission_start = None self.oa_block.submission_start = None
self.oa_block.submission_due = None self.oa_block.submission_due = None
self.oa_block.leaderboard_show = 10
# We have to be really permissive with the data we'll accept. # We have to be really permissive with the data we'll accept.
# If the data we're retrieving is somehow corrupted, # If the data we're retrieving is somehow corrupted,
...@@ -177,6 +179,7 @@ class TestSerializeContent(TestCase): ...@@ -177,6 +179,7 @@ class TestSerializeContent(TestCase):
self.oa_block.due = None self.oa_block.due = None
self.oa_block.submission_start = None self.oa_block.submission_start = None
self.oa_block.submission_due = None self.oa_block.submission_due = None
self.oa_block.leaderboard_show = 10
for assessment_dict in self.BASIC_ASSESSMENTS: for assessment_dict in self.BASIC_ASSESSMENTS:
for mutated_dict in self._dict_mutations(assessment_dict): for mutated_dict in self._dict_mutations(assessment_dict):
...@@ -189,7 +192,7 @@ class TestSerializeContent(TestCase): ...@@ -189,7 +192,7 @@ class TestSerializeContent(TestCase):
msg = "Could not parse mutated assessment dict {assessment}\n{ex}".format(assessment=mutated_dict, ex=ex) msg = "Could not parse mutated assessment dict {assessment}\n{ex}".format(assessment=mutated_dict, ex=ex)
self.fail(msg) self.fail(msg)
@ddt.data("title", "prompt", "start", "due", "submission_due", "submission_start") @ddt.data("title", "prompt", "start", "due", "submission_due", "submission_start", "leaderboard_show")
def test_mutated_field(self, field): def test_mutated_field(self, field):
self.oa_block.rubric_criteria = self.BASIC_CRITERIA self.oa_block.rubric_criteria = self.BASIC_CRITERIA
self.oa_block.rubric_assessments = self.BASIC_ASSESSMENTS self.oa_block.rubric_assessments = self.BASIC_ASSESSMENTS
...@@ -197,6 +200,7 @@ class TestSerializeContent(TestCase): ...@@ -197,6 +200,7 @@ class TestSerializeContent(TestCase):
self.oa_block.due = None self.oa_block.due = None
self.oa_block.submission_start = None self.oa_block.submission_start = None
self.oa_block.submission_due = None self.oa_block.submission_due = None
self.oa_block.leaderboard_show = 10
self.oa_block.allow_file_upload = None self.oa_block.allow_file_upload = None
for mutated_value in [0, u"\u9282", None]: for mutated_value in [0, u"\u9282", None]:
...@@ -319,11 +323,11 @@ class TestUpdateFromXml(TestCase): ...@@ -319,11 +323,11 @@ class TestUpdateFromXml(TestCase):
self.oa_block.prompt = "" self.oa_block.prompt = ""
self.oa_block.rubric_criteria = dict() self.oa_block.rubric_criteria = dict()
self.oa_block.rubric_assessments = list() self.oa_block.rubric_assessments = list()
self.oa_block.start = dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc) self.oa_block.start = dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc)
self.oa_block.due = dt.datetime(3000, 1, 1).replace(tzinfo=pytz.utc) self.oa_block.due = dt.datetime(3000, 1, 1).replace(tzinfo=pytz.utc)
self.oa_block.submission_start = "2000-01-01T00:00:00" self.oa_block.submission_start = "2000-01-01T00:00:00"
self.oa_block.submission_due = "2000-01-01T00:00:00" self.oa_block.submission_due = "2000-01-01T00:00:00"
self.oa_block.leaderboard_show = 0
@ddt.file_data('data/update_from_xml.json') @ddt.file_data('data/update_from_xml.json')
def test_update_from_xml(self, data): def test_update_from_xml(self, data):
...@@ -335,14 +339,34 @@ class TestUpdateFromXml(TestCase): ...@@ -335,14 +339,34 @@ class TestUpdateFromXml(TestCase):
self.assertEqual(self.oa_block, returned_block) self.assertEqual(self.oa_block, returned_block)
# Check that the contents of the modified XBlock are correct # Check that the contents of the modified XBlock are correct
self.assertEqual(self.oa_block.title, data['title']) expected_fields = [
self.assertEqual(self.oa_block.prompt, data['prompt']) 'title',
self.assertEqual(self.oa_block.start, _parse_date(data['start'])) 'prompt',
self.assertEqual(self.oa_block.due, _parse_date(data['due'])) 'start',
self.assertEqual(self.oa_block.submission_start, data['submission_start']) 'due',
self.assertEqual(self.oa_block.submission_due, data['submission_due']) 'submission_start',
self.assertEqual(self.oa_block.rubric_criteria, data['criteria']) 'submission_due',
self.assertEqual(self.oa_block.rubric_assessments, data['assessments']) 'criteria',
'assessments',
'allow_file_upload',
'leaderboard_show'
]
for field_name in expected_fields:
if field_name in data:
actual = getattr(self.oa_block, field_name)
expected = data[field_name]
if field_name in ['start', 'due']:
expected = _parse_date(expected)
self.assertEqual(
actual, expected,
msg=u"Wrong value for '{key}': was {actual} but expected {expected}".format(
key=field_name,
actual=repr(actual),
expected=repr(expected)
)
)
@ddt.file_data('data/update_from_xml_error.json') @ddt.file_data('data/update_from_xml_error.json')
def test_update_from_xml_error(self, data): def test_update_from_xml_error(self, data):
......
...@@ -6,6 +6,7 @@ import pytz ...@@ -6,6 +6,7 @@ import pytz
import dateutil.parser import dateutil.parser
import defusedxml.ElementTree as safe_etree import defusedxml.ElementTree as safe_etree
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext as _
from submissions.api import MAX_TOP_SUBMISSIONS
class UpdateFromXmlError(Exception): class UpdateFromXmlError(Exception):
...@@ -517,6 +518,10 @@ def serialize_content_to_xml(oa_block, root): ...@@ -517,6 +518,10 @@ def serialize_content_to_xml(oa_block, root):
if oa_block.submission_due is not None: if oa_block.submission_due is not None:
root.set('submission_due', unicode(oa_block.submission_due)) root.set('submission_due', unicode(oa_block.submission_due))
# Set leaderboard show
if oa_block.leaderboard_show:
root.set('leaderboard_show', unicode(oa_block.leaderboard_show))
if oa_block.allow_file_upload is not None: if oa_block.allow_file_upload is not None:
root.set('allow_file_upload', unicode(oa_block.allow_file_upload)) root.set('allow_file_upload', unicode(oa_block.allow_file_upload))
...@@ -644,6 +649,21 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR): ...@@ -644,6 +649,21 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
else: else:
rubric = _parse_rubric_xml(rubric_el) rubric = _parse_rubric_xml(rubric_el)
# Retrieve the leaderboard if it exists, otherwise set it to 0
leaderboard_show = 0
if 'leaderboard_show' in root.attrib:
try:
leaderboard_show = int(root.attrib['leaderboard_show'])
if leaderboard_show < 1:
raise UpdateFromXmlError(_('The leaderboard must have a positive integer value.'))
if leaderboard_show > MAX_TOP_SUBMISSIONS:
msg = _('The number of leaderboard scores must be less than {max_num}').format(
max_num=MAX_TOP_SUBMISSIONS
)
raise UpdateFromXmlError(msg)
except (TypeError, ValueError):
raise UpdateFromXmlError(_('The leaderboard must have an integer value.'))
# Retrieve the assessments # Retrieve the assessments
assessments_el = root.find('assessments') assessments_el = root.find('assessments')
if assessments_el is None: if assessments_el is None:
...@@ -667,6 +687,7 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR): ...@@ -667,6 +687,7 @@ def update_from_xml(oa_block, root, validator=DEFAULT_VALIDATOR):
oa_block.submission_start = submission_start oa_block.submission_start = submission_start
oa_block.submission_due = submission_due oa_block.submission_due = submission_due
oa_block.allow_file_upload = allow_file_upload oa_block.allow_file_upload = allow_file_upload
oa_block.leaderboard_show = leaderboard_show
return oa_block return oa_block
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
git+https://github.com/edx/XBlock.git@fc5fea25c973ec66d8db63cf69a817ce624f5ef5#egg=XBlock git+https://github.com/edx/XBlock.git@fc5fea25c973ec66d8db63cf69a817ce624f5ef5#egg=XBlock
git+https://github.com/edx/xblock-sdk.git@643900aadcb18aaeb7fe67271ca9dbf36e463ee6#egg=xblock-sdk git+https://github.com/edx/xblock-sdk.git@643900aadcb18aaeb7fe67271ca9dbf36e463ee6#egg=xblock-sdk
edx-submissions==0.0.5 edx-submissions==0.0.6
# Third Party Requirements # Third Party Requirements
boto>=2.30.0,<3.0.0 boto>=2.30.0,<3.0.0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment