Commit e1273024 by Will Daly

Merge pull request #303 from edx/will/csv-data-2

Data upload
parents 8b9f8090 2626a75a
......@@ -19,7 +19,7 @@ class TestRubricDeserialization(CacheResetTest):
def test_rubric_only_created_once(self):
# Make sure sending the same Rubric data twice only creates one Rubric,
# and returns a reference to it the next time.
rubric_data = json_data('rubric_data/project_plan_rubric.json')
rubric_data = json_data('data/rubric/project_plan_rubric.json')
r1 = rubric_from_dict(rubric_data)
......@@ -32,14 +32,14 @@ class TestRubricDeserialization(CacheResetTest):
def test_rubric_requires_positive_score(self):
with self.assertRaises(InvalidRubric):
rubric_from_dict(json_data('rubric_data/no_points.json'))
rubric_from_dict(json_data('data/rubric/no_points.json'))
class TestCriterionDeserialization(CacheResetTest):
def test_empty_criteria(self):
with self.assertRaises(InvalidRubric) as cm:
rubric_from_dict(json_data('rubric_data/empty_criteria.json'))
rubric_from_dict(json_data('data/rubric/empty_criteria.json'))
self.assertEqual(
cm.exception.errors,
{'criteria': [u'Must have at least one criterion']}
......@@ -47,7 +47,7 @@ class TestCriterionDeserialization(CacheResetTest):
def test_missing_criteria(self):
with self.assertRaises(InvalidRubric) as cm:
rubric_from_dict(json_data('rubric_data/missing_criteria.json'))
rubric_from_dict(json_data('data/rubric/missing_criteria.json'))
self.assertEqual(
cm.exception.errors,
{'criteria': [u'This field is required.']}
......@@ -58,7 +58,7 @@ class TestCriterionOptionDeserialization(CacheResetTest):
def test_empty_options(self):
with self.assertRaises(InvalidRubric) as cm:
rubric_from_dict(json_data('rubric_data/empty_options.json'))
rubric_from_dict(json_data('data/rubric/empty_options.json'))
self.assertEqual(
cm.exception.errors,
{
......@@ -71,7 +71,7 @@ class TestCriterionOptionDeserialization(CacheResetTest):
def test_missing_options(self):
with self.assertRaises(InvalidRubric) as cm:
rubric_from_dict(json_data('rubric_data/missing_options.json'))
rubric_from_dict(json_data('data/rubric/missing_options.json'))
self.assertEqual(
cm.exception.errors,
{
......
"""
Generate CSV files for submission and assessment data, then upload to S3.
"""
import sys
import os
import os.path
import datetime
import shutil
import tempfile
import tarfile
import boto
from boto.s3.key import Key
from django.core.management.base import BaseCommand, CommandError
from openassessment.data import CsvWriter
class Command(BaseCommand):
"""
Create and upload CSV files for submission and assessment data.
"""
help = 'Create and upload CSV files for submission and assessment data.'
args = '<COURSE_ID> <S3_BUCKET_NAME>'
OUTPUT_CSV_PATHS = {
output_name: "{}.csv".format(output_name)
for output_name in CsvWriter.MODELS
}
URL_EXPIRATION_HOURS = 24
PROGRESS_INTERVAL = 10
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self._history = list()
self._submission_counter = 0
@property
def history(self):
"""
Return the upload history, which is useful for testing.
Returns:
list of dictionaries with keys 'url' and 'key'
"""
return self._history
def handle(self, *args, **options):
"""
Execute the command.
Args:
course_id (unicode): The ID of the course to create submissions for.
s3_bucket_name (unicode): The name of the S3 bucket to upload to.
Raises:
CommandError
"""
if len(args) < 2:
raise CommandError(u'Usage: upload_oa_data {}'.format(self.args))
course_id, s3_bucket = args[0].decode('utf-8'), args[1].decode('utf-8')
csv_dir = tempfile.mkdtemp()
try:
print u"Generating CSV files for course '{}'".format(course_id)
self._dump_to_csv(course_id, csv_dir)
print u"Creating archive of CSV files in {}".format(csv_dir)
archive_path = self._create_archive(csv_dir)
print u"Uploading {} to {}/{}".format(archive_path, s3_bucket, course_id)
url = self._upload(course_id, archive_path, s3_bucket)
print "== Upload successful =="
print u"Download URL (expires in {} hours):\n{}".format(self.URL_EXPIRATION_HOURS, url)
finally:
# Assume that the archive was created in the directory,
# so to clean up we just need to delete the directory.
shutil.rmtree(csv_dir)
def _dump_to_csv(self, course_id, csv_dir):
"""
Create CSV files for submission/assessment data in a directory.
Args:
course_id (unicode): The ID of the course to dump data from.
csv_dir (unicode): The absolute path to the directory in which to create CSV files.
Returns:
None
"""
output_streams = {
name: open(os.path.join(csv_dir, rel_path), 'w')
for name, rel_path in self.OUTPUT_CSV_PATHS.iteritems()
}
csv_writer = CsvWriter(output_streams, self._progress_callback)
csv_writer.write_to_csv(course_id)
def _create_archive(self, dir_path):
"""
Create an archive of a directory.
Args:
dir_path (unicode): The absolute path to the directory containing the CSV files.
Returns:
unicode: Absolute path to the archive.
"""
tarball_name = u"{}.tar.gz".format(
datetime.datetime.utcnow().strftime("%Y-%m-%dT%H_%M")
)
tarball_path = os.path.join(dir_path, tarball_name)
with tarfile.open(tarball_path, "w:gz") as tar:
for rel_path in self.OUTPUT_CSV_PATHS.values():
tar.add(os.path.join(dir_path, rel_path), arcname=rel_path)
return tarball_path
def _upload(self, course_id, file_path, s3_bucket):
"""
Upload a file.
Args:
course_id (unicode): The ID of the course.
file_path (unicode): Absolute path to the file to upload.
s3_bucket (unicode): Name of the S3 bucket where the file will be uploaded.
Returns:
str: URL to access the uploaded archive.
"""
conn = boto.connect_s3()
bucket = conn.get_bucket(s3_bucket)
key_name = os.path.join(course_id, os.path.split(file_path)[1])
key = Key(bucket=bucket, name=key_name)
key.set_contents_from_filename(file_path)
url = key.generate_url(self.URL_EXPIRATION_HOURS * 3600)
# Store the key and url in the history
self._history.append({'key': key_name, 'url': url})
return url
def _progress_callback(self):
"""
Indicate progress to the user as submissions are processed.
"""
self._submission_counter += 1
if self._submission_counter > 0 and self._submission_counter % self.PROGRESS_INTERVAL == 0:
sys.stdout.write('.')
sys.stdout.flush()
# -*- coding: utf-8 -*-
"""
Tests for management command that uploads submission/assessment data.
"""
from StringIO import StringIO
import tarfile
from django.test import TestCase
import boto
import moto
from openassessment.management.commands import upload_oa_data
from openassessment.workflow import api as workflow_api
from submissions import api as sub_api
class UploadDataTest(TestCase):
"""
Test the upload management command. Archiving and upload are in-scope,
but the contents of the generated CSV files are tested elsewhere.
"""
COURSE_ID = u"TɘꙅT ↄoUᴙꙅɘ"
BUCKET_NAME = u"com.example.data"
CSV_NAMES = [
"assessment.csv", "assessment_part.csv",
"assessment_feedback.csv", "assessment_feedback_option.csv",
"submission.csv", "score.csv",
]
@moto.mock_s3
def test_upload(self):
# Create an S3 bucket using the fake S3 implementation
conn = boto.connect_s3()
conn.create_bucket(self.BUCKET_NAME)
# Create some submissions to ensure that we cover
# the progress indicator code.
for index in range(50):
student_item = {
'student_id': "test_user_{}".format(index),
'course_id': self.COURSE_ID,
'item_id': 'test_item',
'item_type': 'openassessment',
}
submission_text = "test submission {}".format(index)
submission = sub_api.create_submission(student_item, submission_text)
workflow_api.create_workflow(submission['uuid'])
# Create and upload the archive of CSV files
# This should generate the files even though
# we don't have any data available.
cmd = upload_oa_data.Command()
cmd.handle(self.COURSE_ID.encode('utf-8'), self.BUCKET_NAME)
# Retrieve the uploaded file from the fake S3 implementation
self.assertEqual(len(cmd.history), 1)
bucket = conn.get_all_buckets()[0]
key = bucket.get_key(cmd.history[0]['key'])
contents = StringIO(key.get_contents_as_string())
# Expect that the contents contain all the expected CSV files
with tarfile.open(mode="r:gz", fileobj=contents) as tar:
file_sizes = {
member.name: member.size
for member in tar.getmembers()
}
for csv_name in self.CSV_NAMES:
self.assertIn(csv_name, file_sizes)
self.assertGreater(file_sizes[csv_name], 0)
# Expect that we generated a URL for the bucket
url = cmd.history[0]['url']
self.assertIn("https://{}".format(self.BUCKET_NAME), url)
[
{
"pk": 1,
"model": "assessment.rubric",
"fields": {
"content_hash": "7405a513d9f99b62dd561816f20cdb90b09b8060"
}
},
{
"pk": 1,
"model": "assessment.criterion",
"fields": {
"order_num": 0,
"prompt": "How concise is it?",
"rubric": 1,
"name": "concise"
}
},
{
"pk": 2,
"model": "assessment.criterion",
"fields": {
"order_num": 1,
"prompt": "How clear is the thinking?",
"rubric": 1,
"name": "clear-headed"
}
},
{
"pk": 3,
"model": "assessment.criterion",
"fields": {
"order_num": 2,
"prompt": "Lastly, how is its form? Punctuation, grammar, and spelling all count.",
"rubric": 1,
"name": "form"
}
},
{
"pk": 1,
"model": "assessment.criterionoption",
"fields": {
"order_num": 0,
"explanation": "\n In \"Cryptonomicon\", Stephenson spent multiple pages talking about breakfast cereal.\n While hilarious, in recent years his work has been anything but 'concise'.\n ",
"points": 0,
"criterion": 1,
"name": "Neal Stephenson (late)"
}
},
{
"pk": 2,
"model": "assessment.criterionoption",
"fields": {
"order_num": 1,
"explanation": "\n If the author wrote something cyclopean that staggers the mind, score it thus.\n ",
"points": 1,
"criterion": 1,
"name": "HP Lovecraft"
}
},
{
"pk": 3,
"model": "assessment.criterionoption",
"fields": {
"order_num": 2,
"explanation": "\n Tight prose that conveys a wealth of information about the world in relatively\n few words. Example, \"The door irised open and he stepped inside.\"\n ",
"points": 3,
"criterion": 1,
"name": "Robert Heinlein"
}
},
{
"pk": 4,
"model": "assessment.criterionoption",
"fields": {
"order_num": 3,
"explanation": "\n When Stephenson still had an editor, his prose was dense, with anecdotes about\n nitrox abuse implying main characters' whole life stories.\n ",
"points": 4,
"criterion": 1,
"name": "Neal Stephenson (early)"
}
},
{
"pk": 5,
"model": "assessment.criterionoption",
"fields": {
"order_num": 4,
"explanation": "\n Score the work this way if it makes you weep, and the removal of a single\n word would make you sneer.\n ",
"points": 5,
"criterion": 1,
"name": "Earnest Hemingway"
}
},
{
"pk": 6,
"model": "assessment.criterionoption",
"fields": {
"order_num": 0,
"explanation": "",
"points": 0,
"criterion": 2,
"name": "Yogi Berra"
}
},
{
"pk": 7,
"model": "assessment.criterionoption",
"fields": {
"order_num": 1,
"explanation": "",
"points": 1,
"criterion": 2,
"name": "Hunter S. Thompson"
}
},
{
"pk": 8,
"model": "assessment.criterionoption",
"fields": {
"order_num": 2,
"explanation": "",
"points": 2,
"criterion": 2,
"name": "Robert Heinlein"
}
},
{
"pk": 9,
"model": "assessment.criterionoption",
"fields": {
"order_num": 3,
"explanation": "",
"points": 3,
"criterion": 2,
"name": "Isaac Asimov"
}
},
{
"pk": 10,
"model": "assessment.criterionoption",
"fields": {
"order_num": 4,
"explanation": "\n Coolly rational, with a firm grasp of the main topics, a crystal-clear train of thought,\n and unemotional examination of the facts. This is the only item explained in this category,\n to show that explained and unexplained items can be mixed.\n ",
"points": 10,
"criterion": 2,
"name": "Spock"
}
},
{
"pk": 11,
"model": "assessment.criterionoption",
"fields": {
"order_num": 0,
"explanation": "",
"points": 0,
"criterion": 3,
"name": "lolcats"
}
},
{
"pk": 12,
"model": "assessment.criterionoption",
"fields": {
"order_num": 1,
"explanation": "",
"points": 1,
"criterion": 3,
"name": "Facebook"
}
},
{
"pk": 13,
"model": "assessment.criterionoption",
"fields": {
"order_num": 2,
"explanation": "",
"points": 2,
"criterion": 3,
"name": "Reddit"
}
},
{
"pk": 14,
"model": "assessment.criterionoption",
"fields": {
"order_num": 3,
"explanation": "",
"points": 3,
"criterion": 3,
"name": "metafilter"
}
},
{
"pk": 15,
"model": "assessment.criterionoption",
"fields": {
"order_num": 4,
"explanation": "",
"points": 4,
"criterion": 3,
"name": "Usenet, 1996"
}
},
{
"pk": 16,
"model": "assessment.criterionoption",
"fields": {
"order_num": 5,
"explanation": "",
"points": 5,
"criterion": 3,
"name": "The Elements of Style"
}
}
]
\ No newline at end of file
{
"empty": {
"fixture": "db_fixtures/empty.json",
"course_id": "edX/Enchantment_101/April_1",
"expected_csv": {
"assessment": [
[
"id", "submission_uuid", "scored_at", "scorer_id", "score_type",
"points_possible", "feedback"
]
],
"assessment_feedback": [
["submission_uuid", "feedback_text", "options"]
],
"assessment_part": [
["assessment_id", "points_earned", "criterion_name", "option_name", "feedback"]
],
"assessment_feedback_option": [
["id", "text"]
],
"submission": [
["uuid", "student_id", "item_id", "submitted_at", "created_at", "raw_answer"]
],
"score": [
["submission_uuid", "points_earned", "points_possible", "created_at"]
]
}
},
"submitted": {
"fixture": "db_fixtures/submitted.json",
"course_id": "edX/Enchantment_101/April_1",
"expected_csv": {
"submission": [
["uuid", "student_id", "item_id", "submitted_at", "created_at", "raw_answer"],
[
"cf5190b8-d0aa-11e3-a734-14109fd8dc43",
"student_1", "openassessmentblock-poverty-rubric.openassessment.d0.u0",
"2014-04-30 21:02:59.234000+00:00", "2014-04-30 21:02:59.241000+00:00",
"{\"text\": \"Lorem ipsum dolor sit amet\"}"
]
]
}
},
"peer_assessed": {
"fixture": "db_fixtures/peer_assessed.json",
"course_id": "edX/Enchantment_101/April_1",
"expected_csv": {
"assessment": [
[
"id", "submission_uuid", "scored_at", "scorer_id", "score_type",
"points_possible", "feedback"
],
[
"1", "cf5190b8-d0aa-11e3-a734-14109fd8dc43",
"2014-04-30 21:06:35.019000+00:00",
"other",
"PE",
"20",
"Donec consequat vitae ante in pellentesque."
]
],
"assessment_part": [
["assessment_id", "points_earned", "criterion_name", "option_name", "feedback"],
["1", "4", "concise", "Neal Stephenson (early)", "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."],
["1", "5", "form", "The Elements of Style", "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"],
["1", "3", "clear-headed", "Isaac Asimov", ""]
]
}
},
"self_assessed": {
"fixture": "db_fixtures/self_assessed.json",
"course_id": "edX/Enchantment_101/April_1",
"expected_csv": {
"assessment": [
[
"id", "submission_uuid", "scored_at", "scorer_id", "score_type",
"points_possible", "feedback"
],
[
"1", "cf5190b8-d0aa-11e3-a734-14109fd8dc43",
"2014-04-30 21:06:35.019000+00:00",
"other",
"PE",
"20",
"Donec consequat vitae ante in pellentesque."
],
[
"2", "28cebeca-d0ab-11e3-a6ab-14109fd8dc43",
"2014-04-30 21:06:59.953000+00:00",
"other",
"SE",
"20",
""
]
],
"assessment_part": [
["assessment_id", "points_earned", "criterion_name", "option_name", "feedback"],
["1", "4", "concise", "Neal Stephenson (early)", "Praesent ac lorem ac nunc tincidunt ultricies sit amet ut magna."],
["1", "5", "form", "The Elements of Style", "Fusce varius, elit ut blandit consequat, odio ante mollis lectus"],
["1", "3", "clear-headed", "Isaac Asimov", ""],
["2", "5", "concise", "Earnest Hemingway", ""],
["2", "5", "form", "The Elements of Style", ""],
["2", "10", "clear-headed", "Spock", ""]
]
}
},
"scored": {
"fixture": "db_fixtures/scored.json",
"course_id": "edX/Enchantment_101/April_1",
"expected_csv": {
"score": [
["submission_uuid", "points_earned", "points_possible", "created_at"],
[
"cf5190b8-d0aa-11e3-a734-14109fd8dc43",
"12", "20",
"2014-04-30 21:07:53.534000+00:00"
],
[
"28cebeca-d0ab-11e3-a6ab-14109fd8dc43",
"17", "20",
"2014-04-30 21:07:46.524000+00:00"
]
]
}
},
"feedback_on_assessment": {
"fixture": "db_fixtures/feedback_on_assessment.json",
"course_id": "edX/Enchantment_101/April_1",
"expected_csv": {
"assessment_feedback": [
["submission_uuid", "feedback_text", "options"],
[
"1783758f-d0ae-11e3-b495-14109fd8dc43",
"Feedback on assessment",
"1,2"
],
[
"387d840a-d0ae-11e3-bb0e-14109fd8dc43",
"Feedback on assessment",
"1,2"
]
],
"assessment_feedback_option": [
["id", "text"],
["1", "These assessments were useful."],
["2", "I disagree with one or more of the peer assessments of my response."]
]
}
}
}
# -*- coding: utf-8 -*-
"""
Tests for openassessment data aggregation.
"""
import os.path
from StringIO import StringIO
import csv
from django.core.management import call_command
import ddt
from openassessment.test_utils import CacheResetTest
from submissions import api as sub_api
from openassessment.workflow import api as workflow_api
from openassessment.data import CsvWriter
@ddt.ddt
class CsvWriterTest(CacheResetTest):
"""
Test for writing openassessment data to CSV.
"""
longMessage = True
maxDiff = None
@ddt.file_data('data/write_to_csv.json')
def test_write_to_csv(self, data):
# Create in-memory buffers for the CSV file data
output_streams = self._output_streams(data['expected_csv'].keys())
# Load the database fixture
# We use the database fixture to ensure that this test will
# catch backwards-compatibility issues even if the Django model
# implementation or API calls change.
self._load_fixture(data['fixture'])
# Write the data to CSV
writer = CsvWriter(output_streams)
writer.write_to_csv(data['course_id'])
# Check that the CSV matches what we expected
for output_name, expected_csv in data['expected_csv'].iteritems():
output_buffer = output_streams[output_name]
output_buffer.seek(0)
actual_csv = csv.reader(output_buffer)
for expected_row in expected_csv:
try:
actual_row = actual_csv.next()
except StopIteration:
actual_row = None
self.assertEqual(
actual_row, expected_row,
msg="Output name: {}".format(output_name)
)
# Check for extra rows
try:
extra_row = actual_csv.next()
except StopIteration:
extra_row = None
if extra_row is not None:
self.fail(u"CSV contains extra row: {}".format(extra_row))
def test_many_submissions(self):
# Create a lot of submissions
num_submissions = 234
for index in range(num_submissions):
student_item = {
'student_id': "test_user_{}".format(index),
'course_id': 'test_course',
'item_id': 'test_item',
'item_type': 'openassessment',
}
submission_text = "test submission {}".format(index)
submission = sub_api.create_submission(student_item, submission_text)
workflow_api.create_workflow(submission['uuid'])
# Generate a CSV file for the submissions
output_streams = self._output_streams(['submission'])
writer = CsvWriter(output_streams)
writer.write_to_csv('test_course')
# Parse the generated CSV
content = output_streams['submission'].getvalue()
rows = content.split('\n')
# Remove the first row (header) and last row (blank line)
rows = rows[1:-1]
# Check that we have the right number of rows
self.assertEqual(len(rows), num_submissions)
def test_other_course_id(self):
# Try a course ID with no submissions
self._load_fixture('db_fixtures/scored.json')
output_streams = self._output_streams(CsvWriter.MODELS)
writer = CsvWriter(output_streams)
writer.write_to_csv('other_course')
# Expect that each output has only two lines (the header and a blank line)
# since this course has no submissions
for output in output_streams.values():
content = output.getvalue()
rows = content.split('\n')
self.assertEqual(len(rows), 2)
def test_unicode(self):
# Flush out unicode errors
self._load_fixture('db_fixtures/unicode.json')
output_streams = self._output_streams(CsvWriter.MODELS)
CsvWriter(output_streams).write_to_csv(u"𝓽𝓮𝓼𝓽_𝓬𝓸𝓾𝓻𝓼𝓮")
# Check that data ended up in the reports
for output in output_streams.values():
content = output.getvalue()
rows = content.split('\n')
self.assertGreater(len(rows), 2)
def _output_streams(self, names):
"""
Create in-memory buffers.
Args:
names (list of unicode): The output names.
Returns:
dict: map of output names to StringIO objects.
"""
output_streams = dict()
for output_name in names:
output_buffer = StringIO()
self.addCleanup(output_buffer.close)
output_streams[output_name] = output_buffer
return output_streams
def _load_fixture(self, fixture_relpath):
"""
Load a database fixture into the test database.
Args:
fixture_relpath (unicode): Path to the fixture,
relative to the test/data directory.
Returns:
None
"""
fixture_path = os.path.join(
os.path.dirname(__file__), 'data', fixture_relpath
)
print "Loading database fixtures from {}".format(fixture_path)
call_command('loaddata', fixture_path)
......@@ -305,6 +305,10 @@ class OpenAssessmentBlock(
"""
return [
(
"OpenAssessmentBlock Unicode",
load('static/xml/unicode.xml')
),
(
"OpenAssessmentBlock Poverty Rubric",
load('static/xml/poverty_rubric_example.xml')
),
......
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<openassessment>
<title>
Censorship in Public Libraries
......
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<openassessment submission_due="2015-03-11T18:20">
<title>
Global Poverty
......
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<openassessment>
<title>
Promptless rubric
......
<openassessment>
<title>υηι¢σ∂є тєѕт</title>
<rubric>
<prompt>Uᴎiↄobɘ qᴙomqT</prompt>
<criterion feedback='optional'>
<name>ȼønȼɨsɇ</name>
<prompt>Нош соисіѕэ іѕ іт?</prompt>
<option points="0">
<name>アoo尺</name>
<explanation>ʇɥǝ ʍɹıʇǝɹ pıp ɐ dooɹ ɾoq.</explanation>
</option>
<option points="1">
<name>ꟻAiᴙ</name>
<explanation>Ṫḧë ẅṛïẗëṛ ḋïḋ ä ḟäïṛ jöḅ.</explanation>
</option>
<option points="3">
<name>Ǥøøđ</name>
<explanation>ᴛʜᴇ ᴡʀɪᴛᴇʀ ᴅɪᴅ ᴀ ɢᴏᴏᴅ ᴊᴏʙ.</explanation>
</option>
</criterion>
<criterion>
<name>őń-tőṕíć</name>
<prompt>ωαѕ тнє ωяιтєя ση тσρι¢?</prompt>
<option points="0">
<name>アoo尺</name>
<explanation>ʇɥǝ ʍɹıʇǝɹ pıp ɐ dooɹ ɾoq.</explanation>
</option>
<option points="1">
<name>ꟻAiᴙ</name>
<explanation>Ṫḧë ẅṛïẗëṛ ḋïḋ ä ḟäïṛ jöḅ.</explanation>
</option>
<option points="3">
<name>Ǥøøđ</name>
<explanation>ᴛʜᴇ ᴡʀɪᴛᴇʀ ᴅɪᴅ ᴀ ɢᴏᴏᴅ ᴊᴏʙ.</explanation>
</option>
</criterion>
</rubric>
<assessments>
<assessment name="peer-assessment" must_grade="1" must_be_graded_by="1" />
<assessment name="self-assessment" />
</assessments>
</openassessment>
# edX Internal Requirements
git+https://github.com/edx/XBlock.git@3b6e4218bd326f84dbeb0baed7b2b7813ffea3dd#egg=XBlock
git+https://github.com/edx/XBlock.git@fc5fea25c973ec66d8db63cf69a817ce624f5ef5#egg=XBlock
git+https://github.com/edx/xblock-sdk.git@643900aadcb18aaeb7fe67271ca9dbf36e463ee6#egg=xblock-sdk
# Third Party Requirements
boto==2.13.3
defusedxml==0.4.1
dogapi==1.2.1
django>=1.4,<1.5
......
# Grab everything in base requirements
-r base.txt
# There's a unicode bug in the httpretty==0.8 (used by moto)
# Once a new version gets released on PyPi we can use that instead.
git+https://github.com/gabrielfalcao/HTTPretty.git@4c2b10925c86c9b6299c1a04ae334d89fe007ae2#egg=httpretty
ddt==0.7.0
django-nose==1.2
mock==1.0.1
moto==0.2.22
nose==1.3.0
coverage==3.7.1
pep8==1.4.6
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment