Commit e0645cfd by Brian Mesick Committed by GitHub

Code cleanup (#6)

* General code cleanup
* Bumping version, small update to README
* Adding automatic PyPI deployment
parent 8c88be88
...@@ -23,3 +23,14 @@ script: ...@@ -23,3 +23,14 @@ script:
after_success: after_success:
- bash <(curl -s https://codecov.io/bash) - bash <(curl -s https://codecov.io/bash)
deploy:
provider: pypi
user: edx
password:
secure: "rXk3zjCNHdRE9Q9gawz6KBcL+3aXY+TkxPVKHdJgQCHsX6T5ZmUIQCmKVAz9pRstzpOe4rLSNEjIfdMerPb8Aeto46LGjm1/AJ5HjPqqvu5c9PzJ/HSCKLjIwa6JBr2EBPf3b98kwi9h3aub/jX81bOvqGBamSdzQ4GbFnTfqP8QX3WEk2ANLQVUTe3w1Qw6e1pdbOAGRGUZf1iAMMlm690jjuZdqDVL92haRKrmFgy1ToIc/OSKn8o1PXIn1YWY+vg9kO0xsbfDUcZ1eAV15cQobf5+9nB0g+RsuI76tyZ1ZyHQaMsWgwD6tbQOWt4b6/PYs9spLfEbb+UZcU1MycXMHfJbOiGHuq1D//VgoxzJzLZKD6uYncWmOk/OlMYdmQqIdi9tvBFncnitkmLwV91dIfXVFPNrmiYfK207b62SVaxRH3CKFPUp2UrSi8r0bM9ud33PcT8KdW+2Z9tcwbkc46UTodqtbLFQnvbyhROTqooLiwfeFfy60g9/qfdsgGpkt1XgMs8c7vuix5qkuB9KYjSVGBdNUazIWg9PVCW510TJXxal1rc+NQBB3SWa2hozkFgrvmTtRds4lSU+FgkiKpSf3MGIYKugLjYDFrrmfPqjIPbaKdkdOxv30cWUJZ+/kS/N8aUlohbjbov0tgNpoJ3RlreE/34y85Sm8Pc="
distributions: sdist bdist_wheel
on:
tags: true
python: "2.7"
condition: '$TOXENV == django110'
...@@ -59,9 +59,6 @@ Good next steps would be to: ...@@ -59,9 +59,6 @@ Good next steps would be to:
* Allow Django storages to act as a back-end for pyfilesystem * Allow Django storages to act as a back-end for pyfilesystem
* Allow django-pyfs to act as a back-end for Django storages * Allow django-pyfs to act as a back-end for Django storages
* Support more types of pyfilesystems (esp. in-memory would be nice) * Support more types of pyfilesystems (esp. in-memory would be nice)
* General code cleanup, documentation, test cases, etc.
* Add better test support. Django does nice things with resetting
DBs to a know state for testing. It'd be nice to do the same here.
State: This code is tested and has worked well in a range of settings, State: This code is tested and has worked well in a range of settings,
and is currently deployed on edx.org. However, it doesn't have test and is currently deployed on edx.org. However, it doesn't have test
......
''' """
This is a thin veneer around a `pyfilesystem`. It adds a few bits of
This is a thin veneer around a pyfilesystem. It adds a few bits of
functionality: functionality:
1) Django configuration. This can go to Amazon S3 or a static 1) Django configuration. This can go to Amazon S3 or a static
...@@ -8,46 +7,55 @@ filesystem. ...@@ -8,46 +7,55 @@ filesystem.
2) The ability to get URLs for objects stored on the filesystem. 2) The ability to get URLs for objects stored on the filesystem.
3) The ability to create objects with a limited lifetime. A 3) The ability to create objects with a limited lifetime. A
task can garbage-collect those objects. task can garbage-collect those objects.
"""
'''
from __future__ import absolute_import from __future__ import absolute_import
import os import os
import os.path import os.path
import types import types
from boto.s3.connection import S3Connection
from django.conf import settings from django.conf import settings
from fs.osfs import OSFS from fs.osfs import OSFS
from fs.s3fs import S3FS
from .models import FSExpirations from .models import FSExpirations
if hasattr(settings, 'DJFS'): if hasattr(settings, 'DJFS'):
djfs_settings = settings.DJFS #pragma: no cover DJFS_SETTINGS = settings.DJFS # pragma: no cover
else: else:
djfs_settings = {'type' : 'osfs', DJFS_SETTINGS = {'type': 'osfs',
'directory_root' : 'django-pyfs/static/django-pyfs', 'directory_root': 'django-pyfs/static/django-pyfs',
'url_root' : '/static/django-pyfs'} 'url_root': '/static/django-pyfs'}
s3conn = None # Global to hold the active S3 connection. Prevents needing to reconnect
# several times in a request. Connections are set up below in `get_s3_url`.
S3CONN = None
def get_filesystem(namespace): def get_filesystem(namespace):
''' Returns a pyfilesystem for static module storage. """
Returns a patched pyfilesystem for static module storage based on
The file system will have two additional properties: `DJFS_SETTINGS`. See `patch_fs` documentation for additional details.
1) get_url: A way to get a URL for a static file download
2) expire: A way to expire files (so they are automatically destroyed) The file system will have two additional properties:
''' 1) get_url: A way to get a URL for a static file download
if djfs_settings['type'] == 'osfs': 2) expire: A way to expire files (so they are automatically destroyed)
return get_osfs( namespace ) """
elif djfs_settings['type'] == 's3fs': if DJFS_SETTINGS['type'] == 'osfs':
return get_s3fs( namespace ) return get_osfs(namespace)
elif DJFS_SETTINGS['type'] == 's3fs':
return get_s3fs(namespace)
else: else:
raise AttributeError("Bad filesystem: "+str(djfs_settings['type'])) raise AttributeError("Bad filesystem: " + str(DJFS_SETTINGS['type']))
def expire_objects(): def expire_objects():
''' Remove all obsolete objects from the file systems. Untested. ''' """
Remove all obsolete objects from the file systems.
"""
objects = sorted(FSExpirations.expired(), key=lambda x: x.module) objects = sorted(FSExpirations.expired(), key=lambda x: x.module)
fs = None fs = None
module = None module = None
...@@ -59,65 +67,106 @@ def expire_objects(): ...@@ -59,65 +67,106 @@ def expire_objects():
fs.remove(o.filename) fs.remove(o.filename)
o.delete() o.delete()
def patch_fs(fs, namespace, url_method): def patch_fs(fs, namespace, url_method):
''' Patch a filesystem object to add two methods: """
get_url returns a URL for a resource stored on that filesystem. It takes two parameters: Patch a filesystem instance to add the `get_url` and `expire` methods.
filename: Which resource
timeout: How long that resource is available for Arguments:
expire sets a timeout on how long the system should keep the resource. It takes four parameters: fs (obj): The pyfilesystem subclass instance to be patched.
filename: Which resource namespace (str): Namespace of the filesystem, used in `expire`
seconds: How long we will keep it url_method (func): Function to patch into the filesyste instance as
days: (optional) More user-friendly if a while `get_url`. Allows filesystem independent implementation.
expires: (optional) boolean; if set to False, we keep the resource forever. Returns:
Without calling this method, we provide no guarantees on how long resources will stick around. obj: Patched filesystem instance
''' """
def expire(self, filename, seconds, days=0, expires = True): def expire(self, filename, seconds, days=0, expires=True):
''' Set the lifespan of a file on the filesystem. """
Set the lifespan of a file on the filesystem.
filename: Name of file
expire: False means the file will never be removed Arguments:
seconds and days give time to expiration. filename (str): Name of file
''' expires (bool): False means the file will never be removed seconds
FSExpirations.create_expiration(namespace, filename, seconds, days=days, expires = expires) and days give time to expiration.
seconds (int): (optional) how many seconds to keep the file around
days (int): (optional) how many days to keep the file around for.
If both days and seconds are given they will be added
together. So `seconds=86400, days=1` would expire the file
in 2 days.
Returns:
None
"""
FSExpirations.create_expiration(namespace, filename, seconds, days=days, expires=expires)
fs.expire = types.MethodType(expire, fs) fs.expire = types.MethodType(expire, fs)
fs.get_url = types.MethodType(url_method, fs) fs.get_url = types.MethodType(url_method, fs)
return fs return fs
def get_osfs(namespace): def get_osfs(namespace):
''' Helper method to get_filesystem for a file system on disk ''' """
full_path = os.path.join(djfs_settings['directory_root'], namespace) Helper method to get_filesystem for a file system on disk
"""
full_path = os.path.join(DJFS_SETTINGS['directory_root'], namespace)
if not os.path.exists(full_path): if not os.path.exists(full_path):
os.makedirs(full_path) os.makedirs(full_path)
osfs = OSFS(full_path) osfs = OSFS(full_path)
osfs = patch_fs(osfs, namespace, lambda self, filename, timeout=0: os.path.join(djfs_settings['url_root'], namespace, filename)) osfs = patch_fs(
osfs,
namespace,
# This is the OSFS implementation of `get_url`, note that it ignores
# the timeout param so all OSFS file urls have no time limits.
lambda self, filename, timeout=0: os.path.join(DJFS_SETTINGS['url_root'], namespace, filename)
)
return osfs return osfs
def get_s3fs(namespace):
''' Helper method to get_filesystem for a file system on S3 '''
# Our test suite does not presume Amazon S3, and we would prefer not to have a global import so that we can run
# tests without requiring boto. These will become global when and if we include S3/boto in our test suite.
from fs.s3fs import S3FS
from boto.s3.connection import S3Connection
key_id = djfs_settings.get('aws_access_key_id', None) def get_s3fs(namespace):
key_secret = djfs_settings.get('aws_secret_access_key', None) """
s3conn = None Helper method to get_filesystem for a file system on S3
"""
key_id = DJFS_SETTINGS.get('aws_access_key_id', None)
key_secret = DJFS_SETTINGS.get('aws_secret_access_key', None)
fullpath = namespace fullpath = namespace
if 'prefix' in djfs_settings:
fullpath = os.path.join(djfs_settings['prefix'], fullpath) if 'prefix' in DJFS_SETTINGS:
s3fs = S3FS(djfs_settings['bucket'], fullpath, aws_access_key=key_id, aws_secret_key=key_secret) fullpath = os.path.join(DJFS_SETTINGS['prefix'], fullpath)
s3fs = S3FS(DJFS_SETTINGS['bucket'], fullpath, aws_access_key=key_id, aws_secret_key=key_secret)
def get_s3_url(self, filename, timeout=60): def get_s3_url(self, filename, timeout=60):
global s3conn """
Patch method to returns a signed S3 url for the given filename
Note that this will return a url whether or not the requested file
exsits.
Arguments:
self (obj): S3FS instance that this function has been patched onto
filename (str): The name of the file we are retrieving a url for
timeout (int): How long the url should be valid for; S3 enforces
this limit
Returns:
str: A signed url to the requested file in S3
"""
global S3CONN
try: try:
if not s3conn: if not S3CONN:
s3conn = S3Connection(aws_access_key_id=key_id, aws_secret_access_key=key_secret) S3CONN = S3Connection(aws_access_key_id=key_id, aws_secret_access_key=key_secret)
return s3conn.generate_url(timeout, 'GET', bucket = djfs_settings['bucket'], key = os.path.join(fullpath, filename)) return S3CONN.generate_url(
except: # Retry on error; typically, if the connection has timed out, but the broad except covers all errors. timeout, 'GET', bucket=DJFS_SETTINGS['bucket'], key=os.path.join(fullpath, filename)
s3conn = S3Connection(aws_access_key_id=key_id, aws_secret_access_key=key_secret) )
return s3conn.generate_url(timeout, 'GET', bucket = djfs_settings['bucket'], key = os.path.join(fullpath, filename)) except Exception: # pylint: disable=broad-except
# Retry on error; typically, if the connection has timed out, but
# the broad except covers all errors.
S3CONN = S3Connection(aws_access_key_id=key_id, aws_secret_access_key=key_secret)
return S3CONN.generate_url(
timeout, 'GET', bucket=DJFS_SETTINGS['bucket'], key=os.path.join(fullpath, filename)
)
s3fs = patch_fs(s3fs, namespace, get_s3_url) s3fs = patch_fs(s3fs, namespace, get_s3_url)
return s3fs return s3fs
...@@ -5,26 +5,34 @@ from django.utils import timezone ...@@ -5,26 +5,34 @@ from django.utils import timezone
class FSExpirations(models.Model): class FSExpirations(models.Model):
''' """
Model to handle expiring temporary files. Model to handle expiring temporary files.
The modules have access to a pyfilesystem object where they The modules have access to a pyfilesystem object where they can store big
can store big data, images, etc. In most cases, we would like data, images, etc. In most cases, we would like those objects to expire
those objects to expire (e.g. if a view generates a .PNG analytic (e.g. if a view generates a .PNG analytic to show to a user). This model
to show to a user). This model keeps track of files stored, as keeps track of files stored, as well as the expirations of those models.
well as the expirations of those models. """
'''
module = models.CharField(max_length=382) # Defines the namespace module = models.CharField(max_length=382) # Defines the namespace
filename = models.CharField(max_length=382) # Filename within the namespace filename = models.CharField(max_length=382) # Filename within namespace
expires = models.BooleanField() # Does it expire? expires = models.BooleanField() # Does it expire?
expiration = models.DateTimeField(db_index=True) expiration = models.DateTimeField(db_index=True) # If so, when?
@classmethod @classmethod
def create_expiration(cls, module, filename, seconds, days=0, expires = True): def create_expiration(cls, module, filename, seconds, days=0, expires=True):
''' """
May be used instead of the constructor to create a new expiration. May be used instead of the constructor to create a new expiration.
Automatically applies timedelta and saves to DB.
''' Automatically applies timedelta and saves to DB.
Arguments:
cls (classtype): Class this method is attached to
module (str): Namespace of the filesystem
filename (str): Name of the file to create the expiration for
seconds (int): Number of seconds before we expire the file
days (int): Number of days before we expire the file. If both days
and seconds are given they are added together.
"""
expiration_time = timezone.now() + timezone.timedelta(days, seconds) expiration_time = timezone.now() + timezone.timedelta(days, seconds)
# If object exists, update it # If object exists, update it
...@@ -46,18 +54,19 @@ class FSExpirations(models.Model): ...@@ -46,18 +54,19 @@ class FSExpirations(models.Model):
@classmethod @classmethod
def expired(cls): def expired(cls):
''' """
Returns a list of expired objects Returns a list of expired objects
''' """
expiration_lte = timezone.now() expiration_lte = timezone.now()
return cls.objects.filter(expires=True, expiration__lte = expiration_lte) return cls.objects.filter(expires=True, expiration__lte=expiration_lte)
class Meta(object): class Meta(object):
app_label = 'djpyfs' app_label = 'djpyfs'
unique_together = (("module", "filename"),) unique_together = (("module", "filename"),)
# We'd like to create an index first on expiration than on expires (so we can # We'd like to create an index first on expiration than on expires (so
# search for objects where expires=True and expiration is before now). # we can search for objects where expires=True and expiration is before
# now).
index_together = [ index_together = [
["expiration", "expires"], ["expiration", "expires"],
] ]
......
...@@ -8,6 +8,7 @@ import unittest ...@@ -8,6 +8,7 @@ import unittest
from io import StringIO from io import StringIO
import boto import boto
import mock
from django.test import TestCase from django.test import TestCase
from django.utils import timezone from django.utils import timezone
from fs.memoryfs import MemoryFS from fs.memoryfs import MemoryFS
...@@ -34,7 +35,8 @@ class FSExpirationsTest(TestCase): ...@@ -34,7 +35,8 @@ class FSExpirationsTest(TestCase):
""" """
Exercises FSExpirations.create_expiration() with an existing expiration Exercises FSExpirations.create_expiration() with an existing expiration
""" """
# In the first create_expiration, it does not exist. Second loop it is updating the existing row. # In the first create_expiration, it does not exist. Second loop it is
# updating the existing row.
for _ in range(2): for _ in range(2):
FSExpirations.create_expiration( FSExpirations.create_expiration(
self.module, self.test_file_path, self.expire_secs, self.expire_days, self.expires self.module, self.test_file_path, self.expire_secs, self.expire_days, self.expires
...@@ -83,7 +85,8 @@ class FSExpirationsTest(TestCase): ...@@ -83,7 +85,8 @@ class FSExpirationsTest(TestCase):
self.module, self.test_file_path, expire_secs, expire_days, self.expires self.module, self.test_file_path, expire_secs, expire_days, self.expires
) )
# Make sure there is 1 expiration pending, but nothing currently expired # Make sure there is 1 expiration pending, but nothing currently
# expired
self.assertEqual(FSExpirations.objects.all().count(), 1) self.assertEqual(FSExpirations.objects.all().count(), 1)
self.assertEqual(len(FSExpirations.expired()), 0) self.assertEqual(len(FSExpirations.expired()), 0)
...@@ -97,7 +100,8 @@ class FSExpirationsTest(TestCase): ...@@ -97,7 +100,8 @@ class FSExpirationsTest(TestCase):
) )
for f in (fse, fse2): for f in (fse, fse2):
# Don't really care what __str__ is, just that it returns a string of some variety and doesn't error # Don't really care what __str__ is, just that it returns a string
# of some variety and doesn't error
try: try:
result = f.__str__() result = f.__str__()
self.assertTrue(isinstance(result, six.string_types)) self.assertTrue(isinstance(result, six.string_types))
...@@ -112,9 +116,10 @@ class _BaseFs(TestCase): ...@@ -112,9 +116,10 @@ class _BaseFs(TestCase):
if self.djfs_settings is None: if self.djfs_settings is None:
raise unittest.SkipTest("Skipping test on base class.") raise unittest.SkipTest("Skipping test on base class.")
# Monkey patch djpyfs settings to force settings to whatever the inheriting class is testing # Monkey patch djpyfs settings to force settings to whatever the
self.orig_djpyfs_settings = djpyfs.djfs_settings # inheriting class is testing
djpyfs.djfs_settings = self.djfs_settings self.orig_djpyfs_settings = djpyfs.DJFS_SETTINGS
djpyfs.DJFS_SETTINGS = self.djfs_settings
self.namespace = 'unitttest' self.namespace = 'unitttest'
self.secondary_namespace = 'unittest_2' self.secondary_namespace = 'unittest_2'
...@@ -127,17 +132,18 @@ class _BaseFs(TestCase): ...@@ -127,17 +132,18 @@ class _BaseFs(TestCase):
self.relative_path_to_secondary_test_file = os.path.join(self.test_dir_name, self.secondary_test_file_name) self.relative_path_to_secondary_test_file = os.path.join(self.test_dir_name, self.secondary_test_file_name)
self.relative_path_to_uncreated_test_file = os.path.join(self.test_dir_name, self.uncreated_test_file_name) self.relative_path_to_uncreated_test_file = os.path.join(self.test_dir_name, self.uncreated_test_file_name)
self.full_test_path = os.path.join(djpyfs.djfs_settings['directory_root'], self.namespace) self.full_test_path = os.path.join(djpyfs.DJFS_SETTINGS['directory_root'], self.namespace)
self.secondary_full_test_path = os.path.join(djpyfs.djfs_settings['directory_root'], self.secondary_namespace) self.secondary_full_test_path = os.path.join(djpyfs.DJFS_SETTINGS['directory_root'], self.secondary_namespace)
# We test against just the beginning of the returned url since S3 will have changing query params appended. # We test against just the beginning of the returned url since S3 will
# have changing query params appended.
self.expected_url_prefix = os.path.join( self.expected_url_prefix = os.path.join(
djpyfs.djfs_settings['url_root'], self.namespace, self.relative_path_to_test_file djpyfs.DJFS_SETTINGS['url_root'], self.namespace, self.relative_path_to_test_file
) )
def tearDown(self): def tearDown(self):
# Restore original settings # Restore original settings
djpyfs.djfs_settings = self.orig_djpyfs_settings djpyfs.DJFS_SETTINGS = self.orig_djpyfs_settings
def test_get_filesystem(self): def test_get_filesystem(self):
# Testing that using the default retrieval also gives us a usable osfs # Testing that using the default retrieval also gives us a usable osfs
...@@ -150,8 +156,10 @@ class _BaseFs(TestCase): ...@@ -150,8 +156,10 @@ class _BaseFs(TestCase):
expire_secs = 0 expire_secs = 0
expire_days = 0 expire_days = 0
# Need to create two different namespaces with at least two files and at least one file that doesn't exist to # Need to create two different namespaces with at least two files and
# fully exercise expire_objects. They all have to be part of the same run, thus this overly complicated hoo-ha. # at least one file that doesn't exist to fully exercise expire_
# objects. They all have to be part of the same run, thus this overly
# complicated hoo-ha.
fs1 = djpyfs.get_filesystem(self.namespace) fs1 = djpyfs.get_filesystem(self.namespace)
fs2 = djpyfs.get_filesystem(self.secondary_namespace) fs2 = djpyfs.get_filesystem(self.secondary_namespace)
...@@ -173,7 +181,8 @@ class _BaseFs(TestCase): ...@@ -173,7 +181,8 @@ class _BaseFs(TestCase):
self.assertEqual(FSExpirations.objects.all().count(), 6) self.assertEqual(FSExpirations.objects.all().count(), 6)
# Expire, should delete the files that exist and do nothing for the one that doesn't # Expire, should delete the files that exist and do nothing for the
# one that doesn't
djpyfs.expire_objects() djpyfs.expire_objects()
self.assertFalse(fs1.exists(self.relative_path_to_test_file)) self.assertFalse(fs1.exists(self.relative_path_to_test_file))
...@@ -197,14 +206,15 @@ class _BaseFs(TestCase): ...@@ -197,14 +206,15 @@ class _BaseFs(TestCase):
self.assertTrue(fs.get_url(self.relative_path_to_test_file).startswith(self.expected_url_prefix)) self.assertTrue(fs.get_url(self.relative_path_to_test_file).startswith(self.expected_url_prefix))
def test_get_url_does_not_exist(self): def test_get_url_does_not_exist(self):
# Current behavior is that even if a file doesn't exist you can get a URL for it # Current behavior is that even if a file doesn't exist you can get a
# URL for it
fs = djpyfs.get_filesystem(self.namespace) fs = djpyfs.get_filesystem(self.namespace)
self.assertFalse(fs.exists(self.relative_path_to_test_file)) self.assertFalse(fs.exists(self.relative_path_to_test_file))
self.assertTrue(fs.get_url(self.relative_path_to_test_file).startswith(self.expected_url_prefix)) self.assertTrue(fs.get_url(self.relative_path_to_test_file).startswith(self.expected_url_prefix))
def test_patch_fs(self): def test_patch_fs(self):
""" """
Simple check to make sure the filesystem is getting patched as expected. Simple check to make sure the filesystem is patched as expected.
""" """
fs = djpyfs.get_filesystem(self.namespace) fs = djpyfs.get_filesystem(self.namespace)
self.assertTrue(callable(getattr(fs, 'expire'))) self.assertTrue(callable(getattr(fs, 'expire')))
...@@ -213,8 +223,8 @@ class _BaseFs(TestCase): ...@@ -213,8 +223,8 @@ class _BaseFs(TestCase):
class BadFileSystemTestInh(_BaseFs): class BadFileSystemTestInh(_BaseFs):
""" """
Test filesystem class that uses an unknown filesystem type to make sure all methods return consistently. Wraps Test filesystem class that uses an unknown filesystem type to make sure all
all BaseFs tests to catch the exception. methods return consistently. Wraps all BaseFs tests to catch the exception.
""" """
djfs_settings = { djfs_settings = {
'type': 'bogusfs', 'type': 'bogusfs',
...@@ -269,7 +279,7 @@ class OsfsTest(_BaseFs): ...@@ -269,7 +279,7 @@ class OsfsTest(_BaseFs):
class S3Test(_BaseFs): class S3Test(_BaseFs):
""" """
Tests the S3FS implementation, without a prefix Tests the S3FS implementation, without a prefix.
""" """
djfs_settings = { djfs_settings = {
'type': 's3fs', 'type': 's3fs',
...@@ -283,14 +293,15 @@ class S3Test(_BaseFs): ...@@ -283,14 +293,15 @@ class S3Test(_BaseFs):
def setUp(self): def setUp(self):
super(S3Test, self).setUp() super(S3Test, self).setUp()
# For some reason the Py3 version of get_url returns a port in this test, while the Py2 version does not. # For some reason the Py3 version of get_url returns a port in this
# test, while the Py2 version does not.
if sys.version_info[0] == 2: if sys.version_info[0] == 2:
self.expected_url_prefix = "https://{}.s3.amazonaws.com/{}/{}".format( self.expected_url_prefix = "https://{}.s3.amazonaws.com/{}/{}".format(
djpyfs.djfs_settings['bucket'], self.namespace, self.relative_path_to_test_file djpyfs.DJFS_SETTINGS['bucket'], self.namespace, self.relative_path_to_test_file
) )
else: else:
self.expected_url_prefix = "https://{}.s3.amazonaws.com:443/{}/{}".format( self.expected_url_prefix = "https://{}.s3.amazonaws.com:443/{}/{}".format(
djpyfs.djfs_settings['bucket'], self.namespace, self.relative_path_to_test_file djpyfs.DJFS_SETTINGS['bucket'], self.namespace, self.relative_path_to_test_file
) )
self._setUpS3() self._setUpS3()
...@@ -302,7 +313,17 @@ class S3Test(_BaseFs): ...@@ -302,7 +313,17 @@ class S3Test(_BaseFs):
# Create our fake bucket in fake s3 # Create our fake bucket in fake s3
self.conn = boto.connect_s3() self.conn = boto.connect_s3()
self.conn.create_bucket(djpyfs.djfs_settings['bucket']) self.conn.create_bucket(djpyfs.DJFS_SETTINGS['bucket'])
# This test is only relevant for S3. Generate some fake errors to make
# sure we cover the retry code.
def test_get_url_retry(self):
with mock.patch("boto.s3.connection.S3Connection.generate_url") as mock_exception:
mock_exception.side_effect = AttributeError("test mock exception")
fs = djpyfs.get_filesystem(self.namespace)
with self.assertRaises(AttributeError):
fs.get_url(self.relative_path_to_test_file).startswith(self.expected_url_prefix)
def tearDown(self): def tearDown(self):
self.mock_s3.stop() self.mock_s3.stop()
...@@ -327,15 +348,16 @@ class S3TestPrefix(S3Test): ...@@ -327,15 +348,16 @@ class S3TestPrefix(S3Test):
def setUp(self): def setUp(self):
super(S3TestPrefix, self).setUp() super(S3TestPrefix, self).setUp()
# For some reason the Py3 version of get_url returns a port in this test, while the Py2 version does not. # For some reason the Py3 version of get_url returns a port in this
# test, while the Py2 version does not.
if sys.version_info[0] == 2: if sys.version_info[0] == 2:
self.expected_url_prefix = "https://{}.s3.amazonaws.com/{}/{}/{}".format( self.expected_url_prefix = "https://{}.s3.amazonaws.com/{}/{}/{}".format(
djpyfs.djfs_settings['bucket'], djpyfs.djfs_settings['prefix'], djpyfs.DJFS_SETTINGS['bucket'], djpyfs.DJFS_SETTINGS['prefix'],
self.namespace, self.relative_path_to_test_file self.namespace, self.relative_path_to_test_file
) )
else: else:
self.expected_url_prefix = "https://{}.s3.amazonaws.com:443/{}/{}/{}".format( self.expected_url_prefix = "https://{}.s3.amazonaws.com:443/{}/{}/{}".format(
djpyfs.djfs_settings['bucket'], djpyfs.djfs_settings['prefix'], djpyfs.DJFS_SETTINGS['bucket'], djpyfs.DJFS_SETTINGS['prefix'],
self.namespace, self.relative_path_to_test_file self.namespace, self.relative_path_to_test_file
) )
......
# This requirement breaks both pip-tools and versioneye. The requirements are unchanged from master. It is possible to # This requirement breaks both pip-tools and versioneye. The requirements are
# temporarily change this to fs for updating pinned versions using pip-tools, then change it back after, but you will # unchanged from master. It is possible to temporarily change this to fs for
# have to hand edit the resulting requirements.txt to replace this url since it contains changes necessary for Python 3. # updating pinned versions using pip-tools, then change it back after, but you
# will have to hand edit the resulting requirements.txt to replace this url
# since it contains changes necessary for Python 3 support.
# The pip-tools issue should be cleared up with this PR (or when we move to a
# full release of pyfilesystem or pyfilsystem2):
# https://github.com/nvie/pip-tools/pull/405
git+https://github.com/edx/pyfilesystem.git@bmedx/s3fs-py3-support#egg=fs==0.5.5a1 git+https://github.com/edx/pyfilesystem.git@bmedx/s3fs-py3-support#egg=fs==0.5.5a1
-r github.txt -r github.txt
boto==2.45.0
six==1.10.0 six==1.10.0
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
# pip-compile --output-file requirements/requirements.txt requirements/requirements.in # pip-compile --output-file requirements/requirements.txt requirements/requirements.in
# #
appdirs==1.4.0 # via fs appdirs==1.4.0 # via fs
boto==2.45.0
enum34==1.1.6 # via fs enum34==1.1.6 # via fs
# This line is NOT autogenerated, see requirements/requirements.in for details. # This line is NOT autogenerated, see requirements/requirements.in for details.
...@@ -13,3 +14,6 @@ enum34==1.1.6 # via fs ...@@ -13,3 +14,6 @@ enum34==1.1.6 # via fs
pytz==2016.10 # via fs pytz==2016.10 # via fs
scandir==1.4 # via fs scandir==1.4 # via fs
six==1.10.0 six==1.10.0
# The following packages are considered to be unsafe in a requirements file:
# setuptools # via fs
# Additional requirements for unit tests, you must also run the main requirements.txt. # Additional requirements for unit tests, you must also run the main requirements.txt.
boto==2.45.0
codecov==2.0.5 codecov==2.0.5
mock==2.0.0
moto==0.4.30 moto==0.4.30
pytest==3.0.5 pytest==3.0.5
pytest-django==3.1.2 pytest-django==3.1.2
......
...@@ -5,13 +5,16 @@ ...@@ -5,13 +5,16 @@
# pip-compile --output-file requirements/test_requirements.txt requirements/test_requirements.in # pip-compile --output-file requirements/test_requirements.txt requirements/test_requirements.in
# #
argparse==1.4.0 # via codecov argparse==1.4.0 # via codecov
boto==2.45.0 boto==2.45.0 # via moto
codecov==2.0.5 codecov==2.0.5
coverage==4.3.1 # via codecov, pytest-cov coverage==4.3.1 # via codecov, pytest-cov
funcsigs==1.0.2 # via mock
httpretty==0.8.10 # via moto httpretty==0.8.10 # via moto
jinja2==2.8.1 # via moto jinja2==2.8.1 # via moto
markupsafe==0.23 # via jinja2 markupsafe==0.23 # via jinja2
mock==2.0.0
moto==0.4.30 moto==0.4.30
pbr==1.10.0 # via mock
pluggy==0.4.0 # via tox pluggy==0.4.0 # via tox
py==1.4.32 # via pytest, tox py==1.4.32 # via pytest, tox
pypng==0.0.18 pypng==0.0.18
...@@ -21,7 +24,7 @@ pytest==3.0.5 ...@@ -21,7 +24,7 @@ pytest==3.0.5
python-dateutil==2.6.0 # via moto python-dateutil==2.6.0 # via moto
pytz==2016.10 # via moto pytz==2016.10 # via moto
requests==2.12.4 # via codecov, moto requests==2.12.4 # via codecov, moto
six==1.10.0 # via moto, python-dateutil six==1.10.0 # via mock, moto, python-dateutil
tox==2.5.0 tox==2.5.0
virtualenv==15.1.0 # via tox virtualenv==15.1.0 # via tox
werkzeug==0.11.15 # via moto werkzeug==0.11.15 # via moto
......
...@@ -12,7 +12,7 @@ else: ...@@ -12,7 +12,7 @@ else:
setup( setup(
name='django-pyfs', name='django-pyfs',
version='1.0.4', version='1.0.5',
description='Django pyfilesystem integration', description='Django pyfilesystem integration',
author='Piotr Mitros', author='Piotr Mitros',
author_email='pmitros@edx.org', author_email='pmitros@edx.org',
...@@ -34,5 +34,5 @@ setup( ...@@ -34,5 +34,5 @@ setup(
"Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: Apache Software License", "License :: OSI Approved :: Apache Software License",
], ],
install_requires=['fs', 'six'], install_requires=['fs', 'boto', 'six'],
) )
...@@ -8,7 +8,7 @@ envlist = {py27,py35}-{django18,django19,django110} ...@@ -8,7 +8,7 @@ envlist = {py27,py35}-{django18,django19,django110}
[testenv] [testenv]
passenv = CI TRAVIS TRAVIS_* passenv = CI TRAVIS TRAVIS_*
commands = pytest commands = pytest {posargs}
deps = deps =
-r{toxinidir}/requirements/requirements.txt -r{toxinidir}/requirements/requirements.txt
-r{toxinidir}/requirements/test_requirements.txt -r{toxinidir}/requirements/test_requirements.txt
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment