Commit 2cafbe31 by willmcgugan

A few tweaks and fixes

parent c85ece62
......@@ -320,7 +320,7 @@ class FS(object):
"""
def get_path(p):
if not full:
if not full or absolute:
return pathjoin(path, p)
return [(p, self.getinfo(get_path(p)))
......
......@@ -543,8 +543,6 @@ class _FTPFile(object):
self.ftp.voidcmd('TYPE I')
self.conn = ftp.transfercmd('RETR '+path, None)
#self._ftp_thread = threading.Thread(target=do_read)
#self._ftp_thread.start()
elif 'w' in mode or 'a' in mode:
self.ftp.voidcmd('TYPE I')
if 'a' in mode:
......@@ -552,16 +550,6 @@ class _FTPFile(object):
self.conn = self.ftp.transfercmd('APPE '+path)
else:
self.conn = self.ftp.transfercmd('STOR '+path)
#while 1:
# buf = fp.read(blocksize)
# if not buf: break
# conn.sendall(buf)
# if callback: callback(buf)
#conn.close()
#return self.voidresp()
#self._ftp_thread = threading.Thread(target=do_write)
#self._ftp_thread.start()
@synchronize
def read(self, size=None):
......@@ -689,17 +677,20 @@ class _FTPFile(object):
"""
endings = '\r\n'
chars = []
append = chars.append
read = self.read
join = ''.join
while True:
char = self.read(1)
char = read(1)
if not char:
yield ''.join(chars)
del chars[:]
if chars:
yield join(chars)
break
chars.append(char)
append(char)
if char in endings:
line = ''.join(chars)
line = join(chars)
del chars[:]
c = self.read(1)
c = read(1)
if not char:
yield line
break
......@@ -707,7 +698,7 @@ class _FTPFile(object):
yield line + c
else:
yield line
chars.append(c)
append(c)
......
......@@ -169,11 +169,11 @@ class DirEntry(object):
class MemoryFS(FS):
""" An in-memory filesystem.
MemoryFS objects are very fast, but non-permantent. They are useful for creating a directory structure prior to writing it somewhere permanent.
"""
def _make_dir_entry(self, *args, **kwargs):
......@@ -181,6 +181,7 @@ class MemoryFS(FS):
def __init__(self, file_factory=None):
super(MemoryFS, self).__init__(thread_synchronize=_thread_synchronize_default)
self.dir_entry_factory = DirEntry
self.file_factory = file_factory or MemoryFile
......@@ -235,7 +236,7 @@ class MemoryFS(FS):
@synchronize
def makedir(self, dirname, recursive=False, allow_recreate=False):
if not dirname:
if not dirname:
raise PathError(dirname)
fullpath = dirname
dirpath, dirname = pathsplit(dirname)
......@@ -463,5 +464,3 @@ class MemoryFS(FS):
info['size'] = len(dir_entry.data or '')
return info
......@@ -35,7 +35,7 @@ class MountFS(FS):
FileMount = FileMount
def __init__(self, thread_synchronize=_thread_synchronize_default):
FS.__init__(self, thread_synchronize=thread_synchronize)
super(MountFS, self).__init__(thread_synchronize=thread_synchronize)
self.mount_tree = ObjectTree()
def __str__(self):
......@@ -249,9 +249,9 @@ class MountFS(FS):
@synchronize
def unmount(self, path):
"""Unmounds a path.
:param path: Path to unmount
"""
path = normpath(path)
del self.mount_tree[path]
......
......@@ -39,7 +39,7 @@ class MultiFS(FS):
"""
def __init__(self):
FS.__init__(self, thread_synchronize=_thread_synchronize_default)
super(MultiFS, self).__init__(thread_synchronize=_thread_synchronize_default)
self.fs_sequence = []
self.fs_lookup = {}
......@@ -195,4 +195,3 @@ class MultiFS(FS):
if fs.exists(path):
return fs.getinfo(path)
raise ResourceNotFoundError(path)
......@@ -41,19 +41,19 @@ class OSFS(FS):
methods in the os and os.path modules.
"""
def __init__(self, root_path, dir_mode=0700, thread_synchronize=_thread_synchronize_default, encoding=None):
def __init__(self, root_path, dir_mode=0700, thread_synchronize=_thread_synchronize_default, encoding=None, create=False):
"""
Creates an FS object that represents the OS Filesystem under a given root path
:param root_path: The root OS path
:param dir_mode: srt
:param thread_synchronize: If True, this object will be thread-safe by use of a threading.Lock object
:param encoding: The encoding method for path strings
:param create: Of True, then root_path will be created (if neccesary)
"""
FS.__init__(self, thread_synchronize=thread_synchronize)
super(OSFS, self).__init__(thread_synchronize=thread_synchronize)
self.encoding = encoding
root_path = os.path.expanduser(os.path.expandvars(root_path))
root_path = os.path.normpath(os.path.abspath(root_path))
......@@ -61,6 +61,13 @@ class OSFS(FS):
if sys.platform == "win32":
if not root_path.startswith("\\\\?\\"):
root_path = u"\\\\?\\" + root_path
if create:
try:
os.makedirs(root_path, mode=dir_mode)
except OSError:
pass
if not os.path.exists(root_path):
raise ResourceNotFoundError(root_path,msg="Root directory does not exist: %(path)s")
if not os.path.isdir(root_path):
......@@ -70,7 +77,7 @@ class OSFS(FS):
def __str__(self):
return "<OSFS: %s>" % self.root_path
def __unicode__(self):
return u"<OSFS: %s>" % self.root_path
......@@ -127,7 +134,7 @@ class OSFS(FS):
raise ParentDirectoryMissingError(path)
else:
raise
@convert_os_errors
def remove(self, path):
sys_path = self.getsyspath(path)
......@@ -225,5 +232,3 @@ class OSFS(FS):
@convert_os_errors
def listxattrs(self, path):
return xattr.xattr(self.getsyspath(path)).keys()
......@@ -30,7 +30,7 @@ else:
def __init__(self):
self._map = {}
def __getattr__(self,attr):
try:
return self._map[(threading.currentThread(),attr)]
except KeyError:
......@@ -88,7 +88,7 @@ class S3FS(FS):
prefix = prefix + separator
self._prefix = prefix
self._tlocal = thread_local()
FS.__init__(self, thread_synchronize=thread_synchronize)
super(S3FS, self).__init__(thread_synchronize=thread_synchronize)
# Make _s3conn and _s3bukt properties that are created on demand,
# since they cannot be stored during pickling.
......@@ -304,14 +304,14 @@ class S3FS(FS):
else:
entries = [k.name for k in keys]
return entries
def makedir(self,path,recursive=False,allow_recreate=False):
"""Create a directory at the given path.
The 'mode' argument is accepted for compatability with the standard
FS interface, but is currently ignored.
"""
s3path = self._s3path(path)
s3path = self._s3path(path)
s3pathD = s3path + self._separator
if s3pathD == self._prefix:
if allow_recreate:
......@@ -369,7 +369,7 @@ class S3FS(FS):
if s3path != self._prefix:
s3path = s3path + self._separator
if force:
# If we will be forcibly removing any directory contents, we
# If we will be forcibly removing any directory contents, we
# might as well get the un-delimited list straight away.
ks = self._s3bukt.list(prefix=s3path)
else:
......@@ -393,7 +393,7 @@ class S3FS(FS):
self.removedir(pdir,recursive=True,force=False)
except DirectoryNotEmptyError:
pass
def rename(self,src,dst):
"""Rename the file at 'src' to 'dst'."""
# Actually, in S3 'rename' is exactly the same as 'move'
......@@ -486,4 +486,3 @@ class S3FS(FS):
def get_total_size(self):
"""Get total size of all files in this FS."""
return sum(k.size for k in self._s3bukt.list(prefix=self._prefix))
......@@ -6,7 +6,7 @@ Filesystem accessing an SFTP server (via paramiko)
"""
import datetime
import datetime
import stat as statinfo
import paramiko
......@@ -79,6 +79,7 @@ class SFTPFS(FS):
connection.connect(**credentials)
self._transport = connection
self.root_path = abspath(normpath(root_path))
super(SFTPFS, self).__init__()
def __del__(self):
self.close()
......@@ -146,7 +147,7 @@ class SFTPFS(FS):
return False
raise
return True
@convert_os_errors
def isdir(self,path):
npath = self._normpath(path)
......@@ -205,7 +206,7 @@ class SFTPFS(FS):
self.makedir(path,allow_recreate=allow_recreate)
else:
# Undetermined error, let the decorator handle it
raise
raise
else:
# Destination exists
if statinfo.S_ISDIR(stat.st_mode):
......@@ -320,5 +321,3 @@ class SFTPFS(FS):
npath = self._normpath(path)
stats = self.client.stat(npath)
return stats.st_size
......@@ -28,7 +28,7 @@ class TempFS(OSFS):
"""
self._temp_dir = tempfile.mkdtemp(identifier or "TempFS",dir=temp_dir)
self._cleaned = False
OSFS.__init__(self, self._temp_dir, dir_mode=dir_mode, thread_synchronize=thread_synchronize)
super(TempFS, self).__init__(self._temp_dir, dir_mode=dir_mode, thread_synchronize=thread_synchronize)
def __str__(self):
return '<TempFS: %s>' % self._temp_dir
......@@ -86,5 +86,3 @@ class TempFS(OSFS):
finally:
self._lock.release()
super(TempFS,self).close()
......@@ -26,7 +26,7 @@ except ImportError:
import dummy_threading as threading
class FSTestCases:
class FSTestCases(object):
"""Base suite of testcases for filesystem implementations.
Any FS subclass should be capable of passing all of these tests.
......@@ -70,14 +70,14 @@ class FSTestCases:
f = self.fs.open("test1.txt","w")
f.write("testing")
f.close()
self.check("test1.txt")
self.assertTrue(self.check("test1.txt"))
f = self.fs.open("test1.txt","r")
self.assertEquals(f.read(),"testing")
f.close()
f = self.fs.open("test1.txt","w")
f.write("test file overwrite")
f.close()
self.check("test1.txt")
self.assertTrue(self.check("test1.txt"))
f = self.fs.open("test1.txt","r")
self.assertEquals(f.read(),"test file overwrite")
f.close()
......@@ -165,9 +165,9 @@ class FSTestCases:
self.fs.makedir(alpha)
self.fs.createfile(alpha+"/a")
self.fs.createfile(alpha+"/"+beta)
self.check(alpha)
self.assertTrue(self.check(alpha))
self.assertEquals(sorted(self.fs.listdir(alpha)),["a",beta])
def test_makedir(self):
check = self.check
self.fs.makedir("a")
......@@ -383,7 +383,7 @@ class FSTestCases:
makefile("a/3.txt")
self.fs.makedir("a/foo/bar", recursive=True)
makefile("a/foo/bar/baz.txt")
self.fs.copydir("a", "copy of a")
self.assert_(check("copy of a/1.txt"))
self.assert_(check("copy of a/2.txt"))
......@@ -575,9 +575,9 @@ class ThreadingTestCases:
errors = []
threads = [self._makeThread(f,errors) for f in funcs]
for t in threads:
t.start()
t.start()
for t in threads:
t.join()
t.join()
for (c,e,t) in errors:
raise c,e,t
......@@ -746,5 +746,3 @@ class ThreadingTestCases:
self.assertEquals(self.fs.getsize("thread2.txt"),len(c))
self.assertEquals(self.fs.getcontents("thread2.txt"),c)
self._runThreads(thread1,thread2)
......@@ -55,18 +55,18 @@ class ZipFS(FS):
"""A FileSystem that represents a zip file."""
def __init__(self, zip_file, mode="r", compression="deflated", allowZip64=False, encoding="CP437", thread_synchronize=True):
def __init__(self, zip_file, mode="r", compression="deflated", allow_zip_64=False, encoding="CP437", thread_synchronize=True):
"""Create a FS that maps on to a zip file.
:param zip_file: A (system) path, or a file-like object
:param mode: Mode to open zip file: 'r' for reading, 'w' for writing or 'a' for appending
:param compression: Can be 'deflated' (default) to compress data or 'stored' to just store date
:param allowZip64: -- Set to True to use zip files greater than 2 MB, default is False
:param allow_zip_64: -- Set to True to use zip files greater than 2 MB, default is False
:param encoding: -- The encoding to use for unicode filenames
:param thread_synchronize: -- Set to True (default) to enable thread-safety
"""
FS.__init__(self, thread_synchronize=thread_synchronize)
super(ZipFS, self).__init__(thread_synchronize=thread_synchronize)
if compression == "deflated":
compression_type = ZIP_DEFLATED
elif compression == "stored":
......@@ -80,7 +80,7 @@ class ZipFS(FS):
self.zip_mode = mode
self.encoding = encoding
try:
self.zf = ZipFile(zip_file, mode, compression_type, allowZip64)
self.zf = ZipFile(zip_file, mode, compression_type, allow_zip_64)
except IOError:
raise ResourceNotFoundError(str(zip_file), msg="Zip file does not exist: %(path)s")
self.zip_path = str(zip_file)
......@@ -212,5 +212,3 @@ class ZipFS(FS):
info['created_time'] = datetime.datetime(*zinfo['date_time'])
info.update(zinfo)
return info
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment