Commit 754fcc98 by willmcgugan

First stab at a Python3 port

parent 87a736d5
......@@ -35,6 +35,10 @@ from fs.path import *
from fs.errors import *
from fs.local_functools import wraps
import compatibility
import six
from six import PY3
class DummyLock(object):
"""A dummy lock object that doesn't do anything.
......@@ -703,7 +707,7 @@ class FS(object):
return sys_path
def getcontents(self, path):
def getcontents(self, path, mode="rb"):
"""Returns the contents of a file as a string.
:param path: A path of file to read
......@@ -712,7 +716,7 @@ class FS(object):
"""
f = None
try:
f = self.open(path, "rb")
f = self.open(path, mode)
contents = f.read()
return contents
finally:
......@@ -731,23 +735,7 @@ class FS(object):
if not data:
self.createfile(path)
else:
f = None
try:
f = self.open(path, 'wb')
if hasattr(data, "read"):
read = data.read
write = f.write
chunk = read(chunk_size)
while chunk:
write(chunk)
chunk = read(chunk_size)
else:
f.write(data)
if hasattr(f, 'flush'):
f.flush()
finally:
if f is not None:
f.close()
compatibility.copy_file_to_fs(data, self, path, chunk_size=chunk_size)
def setcontents_async(self,
path,
......@@ -777,7 +765,53 @@ class FS(object):
if progress_callback is None:
progress_callback = lambda bytes_written:None
finished_event = threading.Event()
def do_setcontents():
if PY3:
try:
f = None
try:
progress_callback(0)
if hasattr(data, "read"):
bytes_written = 0
read = data.read
chunk = read(chunk_size)
if isinstance(chunk, six.text_type):
f = self.open(path, 'w')
else:
f = self.open(path, 'wb')
write = f.write
while chunk:
write(chunk)
bytes_written += len(chunk)
progress_callback(bytes_written)
chunk = read(chunk_size)
else:
if isinstance(data, six.text_type):
f = self.open(path, 'w')
else:
f = self.open(path, 'wb')
f.write(data)
progress_callback(len(data))
if finished_callback is not None:
finished_callback()
finally:
if f is not None:
f.close()
except Exception, e:
if error_callback is not None:
error_callback(e)
raise
finally:
finished_event.set()
else:
try:
f = None
try:
......@@ -812,7 +846,6 @@ class FS(object):
finally:
finished_event.set()
finished_event = threading.Event()
threading.Thread(target=do_setcontents).start()
return finished_event
......
"""
Some functions for Python3 compatibility.
Not for general usage, the functionality in this file is exposed elsewhere
"""
import six
from six import PY3
if PY3:
def copy_file_to_fs(data, dst_fs, dst_path, chunk_size=64 * 1024):
"""Copy data from a string or a file-like object to a given fs/path"""
if hasattr(data, "read"):
read = data.read
chunk = read(chunk_size)
f = None
try:
if isinstance(chunk, six.text_type):
f = dst_fs.open(dst_path, 'w')
else:
f = dst_fs.open(dst_path, 'wb')
write = f.write
while chunk:
write(chunk)
chunk = read(chunk_size)
finally:
if f is not None:
f.close()
else:
f = None
try:
if isinstance(data, six.text_type):
f = dst_fs.open(dst_path, 'w')
else:
f = dst_fs.open(dst_path, 'wb')
f.write(data)
finally:
if f is not None:
f.close()
else:
def copy_file_to_fs(data, dst_fs, dst_path, chunk_size=64 * 1024):
"""Copy data from a string or a file-like object to a given fs/path"""
f = None
try:
f = dst_fs.open(dst_path, 'wb')
if hasattr(data, "read"):
read = data.read
write = f.write
chunk = read(chunk_size)
while chunk:
write(chunk)
chunk = read(chunk_size)
else:
f.write(data)
if hasattr(f, 'flush'):
f.flush()
finally:
if f is not None:
f.close()
......@@ -544,7 +544,7 @@ class MountProcess(subprocess.Popen):
os.close(w)
if os.read(r,1) != "S":
self.terminate()
raise RuntimeError("FUSE error: " + os.read(r,20))
raise RuntimeError("FUSE error: " + os.read(r,20)).decode(NATIVE_ENCODING)
def unmount(self):
"""Cleanly unmount the FUSE filesystem, terminating this subprocess."""
......
......@@ -33,11 +33,6 @@ Other useful classes include:
import tempfile as _tempfile
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
import fs
......@@ -50,6 +45,17 @@ class NotSeekableError(IOError):
class NotTruncatableError(IOError):
pass
import six
from six import PY3, b
if PY3:
_StringIO = six.BytesIO
else:
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
class FileLikeBase(object):
"""Base class for implementing file-like objects.
......@@ -265,14 +271,14 @@ class FileLikeBase(object):
if self.closed:
raise IOError("File has been closed")
if self._check_mode("w-") and self._wbuffer is not None:
buffered = ""
buffered = b("")
if self._sbuffer:
buffered = buffered + self._sbuffer
self._sbuffer = None
buffered = buffered + self._wbuffer
self._wbuffer = None
leftover = self._write(buffered,flushing=True)
if leftover:
if leftover and not isinstance(leftover, int):
raise IOError("Could not flush write buffer.")
def close(self):
......@@ -306,7 +312,7 @@ class FileLikeBase(object):
next() returning subsequent lines from the file.
"""
ln = self.readline()
if ln == "":
if ln == b(""):
raise StopIteration()
return ln
......@@ -442,19 +448,19 @@ class FileLikeBase(object):
data = [self._rbuffer]
else:
data = []
self._rbuffer = ""
self._rbuffer = b("")
newData = self._read()
while newData is not None:
data.append(newData)
newData = self._read()
output = "".join(data)
output = b("").join(data)
# Otherwise, we need to return a specific amount of data
else:
if self._rbuffer:
newData = self._rbuffer
data = [newData]
else:
newData = ""
newData = b("")
data = []
sizeSoFar = len(newData)
while sizeSoFar < size:
......@@ -463,20 +469,20 @@ class FileLikeBase(object):
break
data.append(newData)
sizeSoFar += len(newData)
data = "".join(data)
data = b("").join(data)
if sizeSoFar > size:
# read too many bytes, store in the buffer
self._rbuffer = data[size:]
data = data[:size]
else:
self._rbuffer = ""
self._rbuffer = b("")
output = data
return output
def _do_read_rest(self):
"""Private method to read the file through to EOF."""
data = self._do_read(self._bufsize)
while data != "":
while data != b(""):
data = self._do_read(self._bufsize)
def readline(self,size=-1):
......@@ -488,11 +494,11 @@ class FileLikeBase(object):
nextBit = self.read(self._bufsize)
bits.append(nextBit)
sizeSoFar += len(nextBit)
if nextBit == "":
if nextBit == b(""):
break
if size > 0 and sizeSoFar >= size:
break
indx = nextBit.find("\n")
indx = nextBit.find(b("\n"))
# If not found, return whole string up to <size> length
# Any leftovers are pushed onto front of buffer
if indx == -1:
......@@ -508,7 +514,7 @@ class FileLikeBase(object):
extra = bits[-1][indx:]
bits[-1] = bits[-1][:indx]
self._rbuffer = extra + self._rbuffer
return "".join(bits)
return b("").join(bits)
def readlines(self,sizehint=-1):
"""Return a list of all lines in the file."""
......@@ -542,8 +548,8 @@ class FileLikeBase(object):
if self._wbuffer:
string = self._wbuffer + string
leftover = self._write(string)
if leftover is None:
self._wbuffer = ""
if leftover is None or isinstance(leftover, int):
self._wbuffer = b("")
else:
self._wbuffer = leftover
......@@ -649,7 +655,7 @@ class FileWrapper(FileLikeBase):
def _read(self,sizehint=-1):
data = self.wrapped_file.read(sizehint)
if data == "":
if data == b(""):
return None
return data
......@@ -694,7 +700,7 @@ class StringIO(FileWrapper):
if size > curlen:
self.wrapped_file.seek(curlen)
try:
self.wrapped_file.write("\x00"*(size-curlen))
self.wrapped_file.write(b("\x00")*(size-curlen))
finally:
self.wrapped_file.seek(pos)
......@@ -715,7 +721,8 @@ class SpooledTemporaryFile(FileWrapper):
try:
stf_args = (max_size,mode,bufsize) + args
wrapped_file = _tempfile.SpooledTemporaryFile(*stf_args,**kwds)
wrapped_file._file = StringIO()
#wrapped_file._file = StringIO()
wrapped_file._file = six.BytesIO()
self.__is_spooled = True
except AttributeError:
ntf_args = (mode,bufsize) + args
......
......@@ -1170,7 +1170,7 @@ class FTPFS(FS):
self.ftp.storbinary('STOR %s' % _encode(path), data, blocksize=chunk_size)
@ftperrors
def getcontents(self, path):
def getcontents(self, path, mode="rb"):
path = normpath(path)
contents = StringIO()
self.ftp.retrbinary('RETR %s' % _encode(path), contents.write, blocksize=1024*64)
......
......@@ -20,6 +20,9 @@ from fs.filelike import StringIO
from os import SEEK_END
import threading
import six
def _check_mode(mode, mode_chars):
for c in mode_chars:
......@@ -264,10 +267,11 @@ class MemoryFS(FS):
def __str__(self):
return "<MemoryFS>"
__repr__ = __str__
def __repr__(self):
return "MemoryFS()"
def __unicode__(self):
return unicode(self.__str__())
return "<MemoryFS>"
@synchronize
def _get_dir_entry(self, dirpath):
......@@ -600,7 +604,7 @@ class MemoryFS(FS):
dst_dir_entry.xattrs.update(src_xattrs)
@synchronize
def getcontents(self, path):
def getcontents(self, path, mode="rb"):
dir_entry = self._get_dir_entry(path)
if dir_entry is None:
raise ResourceNotFoundError(path)
......
......@@ -91,7 +91,7 @@ class MountFS(FS):
__repr__ = __str__
def __unicode__(self):
return unicode(self.__str__())
return u"<%s [%s]>" % (self.__class__.__name__,self.mount_tree.items(),)
def _delegate(self, path):
path = abspath(normpath(path))
......
......@@ -270,7 +270,7 @@ class OpenerRegistry(object):
file_object.fs = fs
return file_object
def getcontents(self, fs_url):
def getcontents(self, fs_url, mode="rb"):
"""Gets the contents from a given FS url (if it references a file)
:param fs_url: a FS URL e.g. ftp://ftp.mozilla.org/README
......@@ -278,7 +278,7 @@ class OpenerRegistry(object):
"""
fs, path = self.parse(fs_url)
return fs.getcontents(path)
return fs.getcontents(path, mode)
def opendir(self, fs_url, writeable=True, create_dir=False):
"""Opens an FS object from an FS URL
......
......@@ -205,7 +205,8 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS):
@convert_os_errors
def open(self, path, mode="r", **kwargs):
mode = filter(lambda c: c in "rwabt+",mode)
#mode = filter(lambda c: c in "rwabt+",mode)
mode = ''.join(c for c in mode if c in 'rwabt+')
sys_path = self.getsyspath(path)
try:
return open(sys_path, mode, kwargs.get("buffering", -1))
......
......@@ -39,6 +39,7 @@ from fs import SEEK_SET, SEEK_CUR, SEEK_END
_SENTINAL = object()
from six import PY3, b
class RemoteFileBuffer(FileWrapper):
"""File-like object providing buffer for local file operations.
......@@ -186,7 +187,7 @@ class RemoteFileBuffer(FileWrapper):
self.wrapped_file.seek(curpos)
def _read(self, length=None):
if length < 0:
if length is not None and length < 0:
length = None
with self._lock:
self._fillbuffer(length)
......@@ -668,7 +669,7 @@ class CacheFSMixin(FS):
def getsize(self,path):
return self.getinfo(path)["size"]
def setcontents(self, path, contents="", chunk_size=64*1024):
def setcontents(self, path, contents=b(""), chunk_size=64*1024):
supsc = super(CacheFSMixin,self).setcontents
res = supsc(path, contents, chunk_size=chunk_size)
with self.__cache_lock:
......
......@@ -155,11 +155,11 @@ class S3FS(FS):
super(S3FS,self).__setstate__(state)
self._tlocal = thread_local()
def __str__(self):
def __repr__(self):
args = (self.__class__.__name__,self._bucket_name,self._prefix)
return '<%s: %s:%s>' % args
__repr__ = __str__
__str__ = __repr__
def _s3path(self,path):
"""Get the absolute path to a file stored in S3."""
......
......@@ -177,7 +177,7 @@ class SFTPFS(FS):
self._transport = connection
def __unicode__(self):
return '<SFTPFS: %s>' % self.desc('/')
return u'<SFTPFS: %s>' % self.desc('/')
@classmethod
def _agent_auth(cls, transport, username):
......
......@@ -49,10 +49,10 @@ class TempFS(OSFS):
self._cleaned = False
super(TempFS, self).__init__(self._temp_dir, dir_mode=dir_mode, thread_synchronize=thread_synchronize)
def __str__(self):
def __repr__(self):
return '<TempFS: %s>' % self._temp_dir
__repr__ = __str__
__str__ = __repr__
def __unicode__(self):
return u'<TempFS: %s>' % self._temp_dir
......
......@@ -20,6 +20,10 @@ from fs.errors import *
from fs import rpcfs
from fs.expose.xmlrpc import RPCFSServer
import six
from six import PY3, b
class TestRPCFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
def makeServer(self,fs,addr):
......@@ -106,7 +110,7 @@ class TestRPCFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
sock = socket.socket(af, socktype, proto)
sock.settimeout(.1)
sock.connect(sa)
sock.send("\n")
sock.send(b("\n"))
except socket.error, e:
pass
finally:
......@@ -114,10 +118,16 @@ class TestRPCFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
sock.close()
from fs import sftpfs
from fs.expose.sftp import BaseSFTPServer
try:
from fs import sftpfs
from fs.expose.sftp import BaseSFTPServer
except ImportError:
if not PY3:
raise
class TestSFTPFS(TestRPCFS):
__test__ = not PY3
def makeServer(self,fs,addr):
return BaseSFTPServer(addr,fs)
......@@ -209,7 +219,7 @@ if dokan.is_available:
def test_safety_wrapper(self):
rawfs = MemoryFS()
safefs = dokan.Win32SafetyFS(rawfs)
rawfs.setcontents("autoRun.inf","evilcodeevilcode")
rawfs.setcontents("autoRun.inf", b("evilcodeevilcode"))
self.assertTrue(safefs.exists("_autoRun.inf"))
self.assertTrue("autoRun.inf" not in safefs.listdir("/"))
safefs.setcontents("file:stream","test")
......
......@@ -12,9 +12,13 @@ import time
from os.path import abspath
import urllib
from six import PY3
try:
from pyftpdlib import ftpserver
except ImportError:
if not PY3:
raise ImportError("Requires pyftpdlib <http://code.google.com/p/pyftpdlib/>")
from fs.path import *
......@@ -24,6 +28,8 @@ from fs import ftpfs
ftp_port = 30000
class TestFTPFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
__test__ = not PY3
def setUp(self):
global ftp_port
ftp_port += 1
......
......@@ -22,6 +22,7 @@ from fs.tempfs import TempFS
from fs.path import *
from fs.local_functools import wraps
from six import PY3, b
class RemoteTempFS(TempFS):
"""
......@@ -79,7 +80,7 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
self.fs.close()
self.fakeOff()
def fake_setcontents(self, path, content='', chunk_size=16*1024):
def fake_setcontents(self, path, content=b(''), chunk_size=16*1024):
''' Fake replacement for RemoteTempFS setcontents() '''
raise self.FakeException("setcontents should not be called here!")
......@@ -99,8 +100,8 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
'''
Tests on-demand loading of remote content in RemoteFileBuffer
'''
contents = "Tristatricettri stribrnych strikacek strikalo" + \
"pres tristatricettri stribrnych strech."
contents = b("Tristatricettri stribrnych strikacek strikalo") + \
b("pres tristatricettri stribrnych strech.")
f = self.fs.open('test.txt', 'wb')
f.write(contents)
f.close()
......@@ -136,10 +137,10 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
# Rollback position 5 characters before eof
f._rfile.seek(len(contents[:-5]))
# Write 10 new characters (will make contents longer for 5 chars)
f.write(u'1234567890')
f.write(b('1234567890'))
f.flush()
# We are on the end of file (and buffer not serve anything anymore)
self.assertEquals(f.read(), '')
self.assertEquals(f.read(), b(''))
f.close()
self.fakeOn()
......@@ -147,7 +148,7 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
# Check if we wrote everything OK from
# previous writing over the remote buffer edge
f = self.fs.open('test.txt', 'rb')
self.assertEquals(f.read(), contents[:-5] + u'1234567890')
self.assertEquals(f.read(), contents[:-5] + b('1234567890'))
f.close()
self.fakeOff()
......@@ -161,19 +162,19 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
'''
self.fakeOn()
f = self.fs.open('test.txt', 'wb', write_on_flush=True)
f.write('Sample text')
f.write(b('Sample text'))
self.assertRaises(self.FakeException, f.flush)
f.write('Second sample text')
f.write(b('Second sample text'))
self.assertRaises(self.FakeException, f.close)
self.fakeOff()
f.close()
self.fakeOn()
f = self.fs.open('test.txt', 'wb', write_on_flush=False)
f.write('Sample text')
f.write(b('Sample text'))
# FakeException is not raised, because setcontents is not called
f.flush()
f.write('Second sample text')
f.write(b('Second sample text'))
self.assertRaises(self.FakeException, f.close)
self.fakeOff()
......@@ -183,8 +184,8 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
back to remote destination and opened file is still
in good condition.
'''
contents = "Zlutoucky kun upel dabelske ody."
contents2 = 'Ententyky dva spaliky cert vyletel z elektriky'
contents = b("Zlutoucky kun upel dabelske ody.")
contents2 = b('Ententyky dva spaliky cert vyletel z elektriky')
f = self.fs.open('test.txt', 'wb')
f.write(contents)
......@@ -195,17 +196,17 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
self.assertEquals(f.read(10), contents[:10])
self.assertEquals(f._rfile.tell(), 10)
# Write garbage to file to mark it as _changed
f.write('x')
f.write(b('x'))
# This should read the rest of file and store file back to again.
f.flush()
f.seek(0)
# Try if we have unocrrupted file locally...
self.assertEquals(f.read(), contents[:10] + 'x' + contents[11:])
self.assertEquals(f.read(), contents[:10] + b('x') + contents[11:])
f.close()
# And if we have uncorrupted file also on storage
f = self.fs.open('test.txt', 'rb')
self.assertEquals(f.read(), contents[:10] + 'x' + contents[11:])
self.assertEquals(f.read(), contents[:10] + b('x') + contents[11:])
f.close()
# Now try it again, but write garbage behind edge of remote file
......@@ -243,7 +244,7 @@ class TestCacheFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
self.fs.cache_timeout = None
try:
self.assertFalse(self.fs.isfile("hello"))
self.wrapped_fs.setcontents("hello","world")
self.wrapped_fs.setcontents("hello",b("world"))
self.assertTrue(self.fs.isfile("hello"))
self.wrapped_fs.remove("hello")
self.assertTrue(self.fs.isfile("hello"))
......@@ -257,11 +258,11 @@ class TestCacheFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
self.fs.cache_timeout = None
try:
self.assertFalse(self.fs.isfile("hello"))
self.wrapped_fs.setcontents("hello","world")
self.wrapped_fs.setcontents("hello",b("world"))
self.assertTrue(self.fs.isfile("hello"))
self.wrapped_fs.remove("hello")
self.assertTrue(self.fs.isfile("hello"))
self.wrapped_fs.setcontents("hello","world")
self.wrapped_fs.setcontents("hello",b("world"))
self.assertTrue(self.fs.isfile("hello"))
self.fs.remove("hello")
self.assertFalse(self.fs.isfile("hello"))
......@@ -315,7 +316,7 @@ class DisconnectingFS(WrapFS):
time.sleep(random.random()*0.1)
self._connected = not self._connected
def setcontents(self, path, contents='', chunk_size=64*1024):
def setcontents(self, path, contents=b(''), chunk_size=64*1024):
return self.wrapped_fs.setcontents(path, contents)
def close(self):
......
......@@ -13,7 +13,13 @@ import unittest
from fs.tests import FSTestCases, ThreadingTestCases
from fs.path import *
from fs import s3fs
from six import PY3
try:
from fs import s3fs
except ImportError:
if not PY3:
raise
class TestS3FS(unittest.TestCase,FSTestCases,ThreadingTestCases):
# Disable the tests by default
......
......@@ -29,6 +29,8 @@ if sys.platform == "win32":
else:
watch_win32 = None
import six
from six import PY3, b
class WatcherTestCases:
"""Testcases for filesystems providing change watcher support.
......@@ -88,11 +90,11 @@ class WatcherTestCases:
def test_watch_readfile(self):
self.setupWatchers()
self.fs.setcontents("hello","hello world")
self.fs.setcontents("hello", b("hello world"))
self.assertEventOccurred(CREATED,"/hello")
self.clearCapturedEvents()
old_atime = self.fs.getinfo("hello").get("accessed_time")
self.assertEquals(self.fs.getcontents("hello"),"hello world")
self.assertEquals(self.fs.getcontents("hello"), b("hello world"))
if not isinstance(self.watchfs,PollingWatchableFS):
# Help it along by updting the atime.
# TODO: why is this necessary?
......@@ -121,17 +123,17 @@ class WatcherTestCases:
def test_watch_writefile(self):
self.setupWatchers()
self.fs.setcontents("hello","hello world")
self.fs.setcontents("hello", b("hello world"))
self.assertEventOccurred(CREATED,"/hello")
self.clearCapturedEvents()
self.fs.setcontents("hello","hello again world")
self.fs.setcontents("hello", b("hello again world"))
self.assertEventOccurred(MODIFIED,"/hello")
def test_watch_single_file(self):
self.fs.setcontents("hello","hello world")
self.fs.setcontents("hello", b("hello world"))
events = []
self.watchfs.add_watcher(events.append,"/hello",(MODIFIED,))
self.fs.setcontents("hello","hello again world")
self.fs.setcontents("hello", b("hello again world"))
self.fs.remove("hello")
self.waitForEvents()
for evt in events:
......@@ -140,10 +142,10 @@ class WatcherTestCases:
def test_watch_single_file_remove(self):
self.fs.makedir("testing")
self.fs.setcontents("testing/hello","hello world")
self.fs.setcontents("testing/hello", b("hello world"))
events = []
self.watchfs.add_watcher(events.append,"/testing/hello",(REMOVED,))
self.fs.setcontents("testing/hello","hello again world")
self.fs.setcontents("testing/hello", b("hello again world"))
self.waitForEvents()
self.fs.remove("testing/hello")
self.waitForEvents()
......@@ -154,7 +156,7 @@ class WatcherTestCases:
def test_watch_iter_changes(self):
changes = iter_changes(self.watchfs)
self.fs.makedir("test1")
self.fs.setcontents("test1/hello","hello world")
self.fs.setcontents("test1/hello", b("hello world"))
self.waitForEvents()
self.fs.removedir("test1",force=True)
self.waitForEvents()
......
......@@ -15,11 +15,15 @@ import tempfile
from fs import osfs
from fs.errors import *
from fs.path import *
from fs import wrapfs
import six
from six import PY3, b
from fs import wrapfs
class TestWrapFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
__test__ = False
def setUp(self):
self.temp_dir = tempfile.mkdtemp(u"fstest")
self.fs = wrapfs.WrapFS(osfs.OSFS(self.temp_dir))
......@@ -67,7 +71,7 @@ class TestLimitSizeFS(TestWrapFS):
for i in xrange(1024*2):
try:
total_written += 1030
self.fs.setcontents("file"+str(i),"C"*1030)
self.fs.setcontents("file %i" % i, b("C")*1030)
except StorageSpaceError:
self.assertTrue(total_written > 1024*1024*2)
self.assertTrue(total_written < 1024*1024*2 + 1030)
......
......@@ -120,8 +120,8 @@ class WrapFS(FS):
def __unicode__(self):
return u"<%s: %s>" % (self.__class__.__name__,self.wrapped_fs,)
def __str__(self):
return unicode(self).encode(sys.getdefaultencoding(),"replace")
#def __str__(self):
# return unicode(self).encode(sys.getdefaultencoding(),"replace")
@rewrite_errors
......@@ -155,10 +155,11 @@ class WrapFS(FS):
# We can't pass setcontents() through to the wrapped FS if the
# wrapper has defined a _file_wrap method, as it would bypass
# the file contents wrapping.
if self._file_wrap.im_func is WrapFS._file_wrap.im_func:
#if self._file_wrap.im_func is WrapFS._file_wrap.im_func:
if getattr(self.__class__, '_file_wrap', None) is getattr(WrapFS, '_file_wrap', None):
return self.wrapped_fs.setcontents(self._encode(path), data, chunk_size=chunk_size)
else:
return super(WrapFS,self).setcontents(path, data, chunk_size)
return super(WrapFS,self).setcontents(path, data, chunk_size=chunk_size)
@rewrite_errors
def createfile(self, path):
......
......@@ -95,7 +95,7 @@ class LimitSizeFS(WrapFS):
def setcontents(self, path, data, chunk_size=64*1024):
f = None
try:
f = self.open(path, 'w')
f = self.open(path, 'wb')
if hasattr(data, 'read'):
chunk = data.read(chunk_size)
while chunk:
......
......@@ -37,7 +37,7 @@ class SubFS(WrapFS):
return u'<SubFS: %s/%s>' % (self.wrapped_fs, self.sub_dir.lstrip('/'))
def __repr__(self):
return str(self)
return "SubFS(%r, %r)" % (self.wrapped_fs, self.sub_dir)
def desc(self, path):
if path in ('', '/'):
......
......@@ -142,7 +142,7 @@ class ZipFS(FS):
return "<ZipFS: %s>" % self.zip_path
def __unicode__(self):
return unicode(self.__str__())
return u"<ZipFS: %s>" % self.zip_path
def _parse_resource_list(self):
for path in self.zf.namelist():
......@@ -209,7 +209,7 @@ class ZipFS(FS):
raise ValueError("Mode must contain be 'r' or 'w'")
@synchronize
def getcontents(self, path):
def getcontents(self, path, mode="rb"):
if not self.exists(path):
raise ResourceNotFoundError(path)
path = normpath(relpath(path))
......
#!/usr/bin/env python
from distutils.core import setup
from fs import __version__ as VERSION
#from distribute_setup import use_setuptools
#use_setuptools()
from setuptools import setup
import sys
PY3 = sys.version_info >= (3,)
VERSION = "0.4.1"
COMMANDS = ['fscat',
'fscp',
......@@ -17,7 +23,7 @@ COMMANDS = ['fscat',
classifiers = [
'Development Status :: 3 - Alpha',
"Development Status :: 5 - Production/Stable",
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
......@@ -30,7 +36,12 @@ long_desc = """Pyfilesystem is a module that provides a simplified common interf
Even if you only need to work with file and directories on the local hard-drive, Pyfilesystem can simplify your code and make it more robust -- with the added advantage that you can change where the files are located by changing a single line of code.
"""
setup(name='fs',
extra = {}
if PY3:
extra["use_2to3"] = True
setup(install_requires=['distribute'],
name='fs',
version=VERSION,
description="Filesystem abstraction",
long_description=long_desc,
......@@ -55,5 +66,6 @@ setup(name='fs',
'fs.commands'],
scripts=['fs/commands/%s' % command for command in COMMANDS],
classifiers=classifiers,
**extra
)
[tox]
envlist = py25,py26,py27
envlist = py25,py26,py27,py32
[testenv]
deps = dexml
deps = distribute
six
dexml
paramiko
boto
nose
mako
pyftpdlib
commands = nosetests -v \
changedir=.tox
commands = nosetests fs.tests -v \
[]
[testenv:py25]
deps = dexml
deps = distribute
six
dexml
paramiko
boto
nose
mako
pyftpdlib
simplejson
[testenv:py32]
commands = nosetests fs.tests -v \
[]
deps = distribute
six
dexml
nose
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment