Commit 73cac8d1 by Piotr Mitros

Revert to version 0.4.0

parent c30091b0
Metadata-Version: 1.0
Name: fs
Version: 0.4.0
Summary: Filesystem abstraction
Home-page: http://code.google.com/p/pyfilesystem/
Author: Will McGugan
Author-email: will@willmcgugan.com
License: BSD
Download-URL: http://code.google.com/p/pyfilesystem/downloads/list
Description: Pyfilesystem is a module that provides a simplified common interface to many types of filesystem. Filesystems exposed via Pyfilesystem can also be served over the network, or 'mounted' on the native filesystem.
Even if you only need to work with file and directories on the local hard-drive, Pyfilesystem can simplify your code and make it more robust -- with the added advantage that you can change where the files are located by changing a single line of code.
Platform: any
Classifier: Development Status :: 3 - Alpha
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Topic :: System :: Filesystems
......@@ -15,23 +15,26 @@ implementations of this interface such as:
"""
__version__ = "0.5.0"
__version__ = "0.4.0"
__author__ = "Will McGugan (will@willmcgugan.com)"
# No longer necessary - WM
#from base import *
# provide these by default so people can use 'fs.path.basename' etc.
from fs import errors
from fs import path
import errors
import path
_thread_synchronize_default = True
def set_thread_synchronize_default(sync):
"""Sets the default thread synchronisation flag.
FS objects are made thread-safe through the use of a per-FS threading Lock
object. Since this can introduce an small overhead it can be disabled with
this function if the code is single-threaded.
:param sync: Set whether to use thread synchronisation for new FS objects
"""
global _thread_synchronization_default
_thread_synchronization_default = sync
......
......@@ -6,8 +6,8 @@ A collection of filesystems that map to application specific locations.
These classes abstract away the different requirements for user data across platforms,
which vary in their conventions. They are all subclasses of :class:`fs.osfs.OSFS`,
all that differs from `OSFS` is the constructor which detects the appropriate
location given the name of the application, author name and other parameters.
all that differs from `OSFS` is the constructor which detects the appropriate
location given the name of the application, author name and other parameters.
Uses `appdirs` (https://github.com/ActiveState/appdirs), written by Trent Mick and Sridhar Ratnakumar <trentm at gmail com; github at srid name>
......@@ -21,7 +21,6 @@ __all__ = ['UserDataFS',
'UserCacheFS',
'UserLogFS']
class UserDataFS(OSFS):
"""A filesystem for per-user application data."""
def __init__(self, appname, appauthor=None, version=None, roaming=False, create=True):
......@@ -31,10 +30,10 @@ class UserDataFS(OSFS):
:param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist
"""
app_dirs = AppDirs(appname, appauthor, version, roaming)
super(UserDataFS, self).__init__(app_dirs.user_data_dir, create=create)
super(self.__class__, self).__init__(app_dirs.user_data_dir, create=create)
class SiteDataFS(OSFS):
......@@ -46,10 +45,10 @@ class SiteDataFS(OSFS):
:param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist
"""
app_dirs = AppDirs(appname, appauthor, version, roaming)
super(SiteDataFS, self).__init__(app_dirs.site_data_dir, create=create)
super(self.__class__, self).__init__(app_dirs.site_data_dir, create=create)
class UserCacheFS(OSFS):
......@@ -61,10 +60,10 @@ class UserCacheFS(OSFS):
:param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist
"""
app_dirs = AppDirs(appname, appauthor, version, roaming)
super(UserCacheFS, self).__init__(app_dirs.user_cache_dir, create=create)
super(self.__class__, self).__init__(app_dirs.user_cache_dir, create=create)
class UserLogFS(OSFS):
......@@ -76,14 +75,13 @@ class UserLogFS(OSFS):
:param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist
"""
app_dirs = AppDirs(appname, appauthor, version, roaming)
super(UserLogFS, self).__init__(app_dirs.user_log_dir, create=create)
super(self.__class__, self).__init__(app_dirs.user_log_dir, create=create)
if __name__ == "__main__":
udfs = UserDataFS('exampleapp', appauthor='pyfs')
udfs = UserDataFS('sexytime', appauthor='pyfs')
print udfs
udfs2 = UserDataFS('exampleapp2', appauthor='pyfs', create=False)
udfs2 = UserDataFS('sexytime2', appauthor='pyfs', create=False)
print udfs2
import fnmatch
from itertools import chain
import re
class BatchError(Exception):
pass
def _params(*args, **kwargs):
return (args, kwargs)
class BatchBase(object):
def __init__(self):
self._stack = []
self._eval_cache = None
self._eval_level = 0
def _eval(self, paths):
operations = []
for cmd in self._stack[::-1]:
cmd_name, (args, kwargs) = cmd
cmd_func = getattr(self, '_cmd_' + cmd_name, None)
assert cmd_func is not None, "Unknown batch command"
operations.append(lambda paths:cmd_func(paths, *args, **kwargs))
def recurse_operations(op_index=0):
if op_index >= len(operations):
for fs, path in paths:
yield fs, path
else:
for fs, path in operations[op_index](recurse_operations(op_index+1), ):
yield fs, path
for fs, path in recurse_operations():
yield fs, path
def filter(self, *wildcards):
cmd = ('filter', _params(wildcards))
self._stack.append(cmd)
return self
def exclude(self, *wildcards):
cmd = ('exclude', _params(wildcards))
self._stack.append(cmd)
return self
def _cmd_filter(self, fs_paths, wildcards):
wildcard_res = [re.compile(fnmatch.translate(w)) for w in wildcards]
for fs, path in fs_paths:
for wildcard_re in wildcard_res:
if wildcard_re.match(path):
yield fs, path
def _cmd_exclude(self, fs_paths, wildcards):
wildcard_res = [re.compile(fnmatch.translate(w)) for w in wildcards]
for fs, path in fs_paths:
for wildcard_re in wildcard_res:
if wildcard_re.match(path):
break
else:
yield fs, path
class Batch(BatchBase):
def __init__(self, *fs, **kwargs):
super(Batch, self).__init__()
self.fs_list = fs
self.recursive = kwargs.get('recursive', False)
def path_iter(self, fs_list):
if self.recursive:
for fs in fs_list:
for path in fs.walkfiles():
yield fs, path
else:
for fs in fs_list:
for path in fs.listdir(full=True, absolute=True):
yield fs, path
def __iter__(self):
return self._eval(self.path_iter(self.fs_list))
def paths(self):
for fs, path in self:
yield path
class BatchList(BatchBase):
def __init__(self, fs, paths):
self.fs_list = [(fs, path) for path in paths]
def __iter__(self):
return self.fs_list
class BatchOp(Batch):
def __init__(self):
super(BatchBase, self).__init__(None)
self._op_stack = []
def remove(self):
cmd = ('remove', _params())
self._op_stack.append(cmd)
return self
def _op_remove(self, fs, path):
fs.remove(path)
def apply(self, fs=None, ignore_errors=False):
def do_call(func, *args, **kwargs):
return func(*args, **kwargs)
def ignore_exceptions(func, *arg, **kwargs):
try:
return func(*args, **kwargs)
except:
return None
if ignore_errors:
call_cmd = ignore_exceptions
else:
call_cmd = do_call
for fs, path in self.path_iter():
for cmd in self._op_stack:
cmd_name, (args, kwargs) = cmd
cmd_func = getattr(self, '_op_' + cmd_name)
call_cmd(cmd_func, fs, path, *args, **kwargs)
if __name__ == "__main__":
from fs.osfs import OSFS
test_fs = OSFS("/home/will/projects/meshminds/meshminds")
b = Batch(test_fs, recursive=True).exclude("*.py", "*.html")
print list(b.paths())
#b=BatchBase()
#b.filter('*.py')
#print b._eval([[None, 'a/b/c.py'],
# [None, 'a/b/c.pyw']])
\ No newline at end of file
......@@ -8,17 +8,17 @@ from collections import defaultdict
import sys
class FSls(Command):
usage = """fsls [OPTIONS]... [PATH]
List contents of [PATH]"""
def get_optparse(self):
optparse = super(FSls, self).get_optparse()
optparse = super(FSls, self).get_optparse()
optparse.add_option('-u', '--full', dest='fullpath', action="store_true", default=False,
help="output full path", metavar="FULL")
optparse.add_option('-s', '--syspath', dest='syspath', action="store_true", default=False,
help="output system path (if one exists)", metavar="SYSPATH")
help="output system path (if one exists)", metavar="SYSPATH")
optparse.add_option('-r', '--url', dest='url', action="store_true", default=False,
help="output URL in place of path (if one exists)", metavar="URL")
optparse.add_option('-d', '--dirsonly', dest='dirsonly', action="store_true", default=False,
......@@ -29,73 +29,73 @@ List contents of [PATH]"""
help="use a long listing format", metavar="LONG")
optparse.add_option('-a', '--all', dest='all', action='store_true', default=False,
help="do not hide dot files")
return optparse
def do_run(self, options, args):
def do_run(self, options, args):
output = self.output
if not args:
args = [u'.']
dir_paths = []
dir_paths = []
file_paths = []
fs_used = set()
fs_used = set()
for fs_url in args:
fs, path = self.open_fs(fs_url)
fs_used.add(fs)
fs, path = self.open_fs(fs_url)
fs_used.add(fs)
path = path or '.'
wildcard = None
if iswildcard(path):
path, wildcard = pathsplit(path)
if path != '.' and fs.isfile(path):
if not options.dirsonly:
if path != '.' and fs.isfile(path):
if not options.dirsonly:
file_paths.append(path)
else:
else:
if not options.filesonly:
dir_paths += fs.listdir(path,
wildcard=wildcard,
full=options.fullpath or options.url,
dirs_only=True)
if not options.dirsonly:
if not options.dirsonly:
file_paths += fs.listdir(path,
wildcard=wildcard,
full=options.fullpath or options.url,
full=options.fullpath or options.url,
files_only=True)
for fs in fs_used:
try:
fs.close()
except FSError:
pass
if options.syspath:
# Path without a syspath, just won't be displayed
dir_paths = filter(None, [fs.getsyspath(path, allow_none=True) for path in dir_paths])
file_paths = filter(None, [fs.getsyspath(path, allow_none=True) for path in file_paths])
if options.url:
# Path without a syspath, just won't be displayed
dir_paths = filter(None, [fs.getpathurl(path, allow_none=True) for path in dir_paths])
file_paths = filter(None, [fs.getpathurl(path, allow_none=True) for path in file_paths])
dirs = frozenset(dir_paths)
paths = sorted(dir_paths + file_paths, key=lambda p: p.lower())
if not options.all:
paths = sorted(dir_paths + file_paths, key=lambda p:p.lower())
if not options.all:
paths = [path for path in paths if not isdotfile(path)]
if not paths:
return
return
def columnize(paths, num_columns):
col_height = (len(paths) + num_columns - 1) / num_columns
columns = [[] for _ in xrange(num_columns)]
col_height = (len(paths) + num_columns - 1) / num_columns
columns = [[] for _ in xrange(num_columns)]
col_no = 0
col_pos = 0
for path in paths:
......@@ -104,72 +104,74 @@ List contents of [PATH]"""
if col_pos >= col_height:
col_no += 1
col_pos = 0
padded_columns = []
wrap_filename = self.wrap_filename
wrap_dirname = self.wrap_dirname
def wrap(path):
def wrap(path):
if path in dirs:
return wrap_dirname(path.ljust(max_width))
else:
return wrap_filename(path.ljust(max_width))
for column in columns:
if column:
max_width = max([len(path) for path in column])
else:
max_width = 1
max_width = min(max_width, terminal_width)
max_width = min(max_width, terminal_width)
padded_columns.append([wrap(path) for path in column])
return padded_columns
def condense_columns(columns):
max_column_height = max([len(col) for col in columns])
lines = [[] for _ in xrange(max_column_height)]
for column in columns:
for line, path in zip(lines, column):
line.append(path)
line.append(path)
return '\n'.join(u' '.join(line) for line in lines)
if options.long:
if options.long:
for path in paths:
if path in dirs:
output((self.wrap_dirname(path), '\n'))
else:
output((self.wrap_filename(path), '\n'))
else:
output((self.wrap_filename(path), '\n'))
else:
terminal_width = self.terminal_width
path_widths = [len(path) for path in paths]
smallest_paths = min(path_widths)
num_paths = len(paths)
num_cols = min(terminal_width // (smallest_paths + 2), num_paths)
while num_cols:
col_height = (num_paths + num_cols - 1) // num_cols
line_width = 0
for col_no in xrange(num_cols):
try:
col_width = max(path_widths[col_no * col_height: (col_no + 1) * col_height])
smallest_paths = min(path_widths)
num_paths = len(paths)
num_cols = min(terminal_width / (smallest_paths + 2), num_paths)
while num_cols:
col_height = (num_paths + num_cols - 1) / num_cols
line_width = 0
for col_no in xrange(num_cols):
try:
col_width = max(path_widths[col_no*col_height:(col_no + 1) * col_height])
except ValueError:
continue
line_width += col_width
if line_width > terminal_width:
break
line_width += col_width
if line_width > terminal_width:
break;
line_width += 2
else:
if line_width - 1 <= terminal_width:
break
num_cols -= 1
num_cols = max(1, num_cols)
columns = columnize(paths, num_cols)
output((condense_columns(columns), '\n'))
num_cols -= 1
num_cols = max(1, num_cols)
columns = columnize(paths, num_cols)
output((condense_columns(columns), '\n'))
def run():
return FSls().run()
return FSls().run()
if __name__ == "__main__":
sys.exit(run())
\ No newline at end of file
......@@ -8,9 +8,8 @@ import os.path
platform = platform.system()
class FSMount(Command):
if platform == "Windows":
usage = """fsmount [OPTIONS]... [FS] [DRIVE LETTER]
or fsmount -u [DRIVER LETTER]
......@@ -21,57 +20,58 @@ or fsmount -u [SYSTEM PATH]
Mounts a file system on a system path"""
version = "1.0"
def get_optparse(self):
optparse = super(FSMount, self).get_optparse()
optparse = super(FSMount, self).get_optparse()
optparse.add_option('-f', '--foreground', dest='foreground', action="store_true", default=False,
help="run the mount process in the foreground", metavar="FOREGROUND")
optparse.add_option('-u', '--unmount', dest='unmount', action="store_true", default=False,
help="unmount path", metavar="UNMOUNT")
optparse.add_option('-n', '--nocache', dest='nocache', action="store_true", default=False,
help="do not cache network filesystems", metavar="NOCACHE")
return optparse
def do_run(self, options, args):
windows = platform == "Windows"
if options.unmount:
if windows:
try:
try:
mount_path = args[0][:1]
except IndexError:
self.error('Driver letter required\n')
return 1
from fs.expose import dokan
mount_path = mount_path[:1].upper()
self.output('unmounting %s:...\n' % mount_path, True)
dokan.unmount(mount_path)
dokan.unmount(mount_path)
return
else:
try:
else:
try:
mount_path = args[0]
except IndexError:
self.error(self.usage + '\n')
return 1
return 1
from fs.expose import fuse
self.output('unmounting %s...\n' % mount_path, True)
fuse.unmount(mount_path)
fuse.unmount(mount_path)
return
try:
fs_url = args[0]
except IndexError:
self.error(self.usage + '\n')
return 1
try:
try:
mount_path = args[1]
except IndexError:
if windows:
......@@ -79,61 +79,62 @@ Mounts a file system on a system path"""
self.error(self.usage + '\n')
else:
self.error(self.usage + '\n')
return 1
return 1
fs, path = self.open_fs(fs_url, create_dir=True)
if path:
if not fs.isdir(path):
self.error('%s is not a directory on %s' % (fs_url, fs))
self.error('%s is not a directory on %s' % (fs_url. fs))
return 1
fs = fs.opendir(path)
path = '/'
if not options.nocache:
fs.cache_hint(True)
if windows:
from fs.expose import dokan
if len(mount_path) > 1:
self.error('Driver letter should be one character')
return 1
self.output("Mounting %s on %s:\n" % (fs, mount_path), True)
flags = dokan.DOKAN_OPTION_REMOVABLE
if options.debug:
flags |= dokan.DOKAN_OPTION_DEBUG | dokan.DOKAN_OPTION_STDERR
mp = dokan.mount(fs,
mount_path,
numthreads=5,
foreground=options.foreground,
flags=flags,
volname=str(fs))
else:
else:
if not os.path.exists(mount_path):
try:
os.makedirs(mount_path)
except:
pass
from fs.expose import fuse
self.output("Mounting %s on %s\n" % (fs, mount_path), True)
if options.foreground:
fuse_process = fuse.mount(fs,
mount_path,
foreground=True)
mount_path,
foreground=True)
else:
if not os.fork():
mp = fuse.mount(fs,
mount_path,
foreground=True)
else:
fs.close = lambda: None
fs.close = lambda:None
def run():
return FSMount().run()
if __name__ == "__main__":
sys.exit(run())
\ No newline at end of file
......@@ -5,66 +5,58 @@ import sys
from fs.opener import opener
from fs.commands.runner import Command
from fs.utils import print_fs
import errno
class FSServe(Command):
usage = """fsserve [OPTION]... [PATH]
Serves the contents of PATH with one of a number of methods"""
def get_optparse(self):
optparse = super(FSServe, self).get_optparse()
optparse = super(FSServe, self).get_optparse()
optparse.add_option('-t', '--type', dest='type', type="string", default="http",
help="Server type to create (http, rpc, sftp)", metavar="TYPE")
optparse.add_option('-a', '--addr', dest='addr', type="string", default="127.0.0.1",
help="Server address", metavar="ADDR")
optparse.add_option('-p', '--port', dest='port', type="int",
help="Port number", metavar="")
help="Port number", metavar="")
return optparse
def do_run(self, options, args):
def do_run(self, options, args):
try:
fs_url = args[0]
except IndexError:
fs_url = './'
fs_url = './'
fs, path = self.open_fs(fs_url)
if fs.isdir(path):
fs = fs.opendir(path)
path = '/'
self.output("Opened %s\n" % fs, verbose=True)
port = options.port
try:
if options.type == 'http':
from fs.expose.http import serve_fs
if options.type == 'http':
from fs.expose.http import serve_fs
if port is None:
port = 80
self.output("Starting http server on %s:%i\n" % (options.addr, port), verbose=True)
serve_fs(fs, options.addr, port)
elif options.type == 'rpc':
self.output("Starting http server on %s:%i\n" % (options.addr, port), verbose=True)
serve_fs(fs, options.addr, port)
elif options.type == 'rpc':
from fs.expose.xmlrpc import RPCFSServer
if port is None:
port = 80
s = RPCFSServer(fs, (options.addr, port))
self.output("Starting rpc server on %s:%i\n" % (options.addr, port), verbose=True)
s.serve_forever()
elif options.type == 'ftp':
from fs.expose.ftp import serve_fs
if port is None:
port = 21
self.output("Starting ftp server on %s:%i\n" % (options.addr, port), verbose=True)
serve_fs(fs, options.addr, port)
elif options.type == 'sftp':
s.serve_forever()
elif options.type == 'sftp':
from fs.expose.sftp import BaseSFTPServer
import logging
log = logging.getLogger('paramiko')
......@@ -75,7 +67,7 @@ Serves the contents of PATH with one of a number of methods"""
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
log.addHandler(ch)
if port is None:
port = 22
server = BaseSFTPServer((options.addr, port), fs)
......@@ -86,20 +78,20 @@ Serves the contents of PATH with one of a number of methods"""
pass
finally:
server.server_close()
else:
self.error("Server type '%s' not recognised\n" % options.type)
except IOError, e:
if e.errno == errno.EACCES:
if e.errno == 13:
self.error('Permission denied\n')
return 1
else:
self.error(str(e) + '\n')
return 1
def run():
return FSServe().run()
else:
self.error(e.strerror + '\n')
return 1
def run():
return FSServe().run()
if __name__ == "__main__":
sys.exit(run())
......@@ -19,12 +19,8 @@ Recursively display the contents of PATH in an ascii tree"""
help="browse the tree with a gui")
optparse.add_option('-a', '--all', dest='all', action='store_true', default=False,
help="do not hide dot files")
optparse.add_option('--dirsfirst', dest='dirsfirst', action='store_true', default=False,
optparse.add_option('-d', '--dirsfirst', dest='dirsfirst', action='store_true', default=False,
help="List directories before files")
optparse.add_option('-P', dest="pattern", default=None,
help="Only list files that match the given pattern")
optparse.add_option('-d', dest="dirsonly", default=False, action='store_true',
help="List directories only")
return optparse
def do_run(self, options, args):
......@@ -47,24 +43,12 @@ Recursively display the contents of PATH in an ascii tree"""
max_levels = None
else:
max_levels = options.depth
self.output(self.wrap_dirname(args[0] + '\n'))
dircount, filecount = print_fs(fs, path or '',
file_out=self.output_file,
max_levels=max_levels,
terminal_colors=self.terminal_colors,
hide_dotfiles=not options.all,
dirs_first=options.dirsfirst,
files_wildcard=options.pattern,
dirs_only=options.dirsonly)
self.output('\n')
def pluralize(one, many, count):
if count == 1:
return '%i %s' % (count, one)
else:
return '%i %s' % (count, many)
self.output("%s, %s\n" % (pluralize('directory', 'directories', dircount),
pluralize('file', 'files', filecount)))
print_fs(fs, path or '',
file_out=self.output_file,
max_levels=max_levels,
terminal_colors=self.terminal_colors,
hide_dotfiles=not options.all,
dirs_first=options.dirsfirst)
def run():
return FSTree().run()
......
"""
Some functions for Python3 compatibility.
Not for general usage, the functionality in this file is exposed elsewhere
"""
import six
from six import PY3
def copy_file_to_fs(data, dst_fs, dst_path, chunk_size=64 * 1024, progress_callback=None, finished_callback=None):
"""Copy data from a string or a file-like object to a given fs/path"""
if progress_callback is None:
progress_callback = lambda bytes_written: None
bytes_written = 0
f = None
try:
progress_callback(bytes_written)
if hasattr(data, "read"):
read = data.read
chunk = read(chunk_size)
if isinstance(chunk, six.text_type):
f = dst_fs.open(dst_path, 'w')
else:
f = dst_fs.open(dst_path, 'wb')
write = f.write
while chunk:
write(chunk)
bytes_written += len(chunk)
progress_callback(bytes_written)
chunk = read(chunk_size)
else:
if isinstance(data, six.text_type):
f = dst_fs.open(dst_path, 'w')
else:
f = dst_fs.open(dst_path, 'wb')
f.write(data)
bytes_written += len(data)
progress_callback(bytes_written)
if hasattr(f, 'flush'):
f.flush()
if finished_callback is not None:
finished_callback()
finally:
if f is not None:
f.close()
......@@ -41,15 +41,11 @@ from fs.base import *
from fs.path import *
from fs.errors import *
from fs.remote import RemoteFileBuffer
from fs import iotools
from fs.contrib.davfs.util import *
from fs.contrib.davfs import xmlobj
from fs.contrib.davfs.xmlobj import *
import six
from six import b
import errno
_RETRYABLE_ERRORS = [errno.EADDRINUSE]
try:
......@@ -86,12 +82,12 @@ class DAVFS(FS):
"http": 80,
"https": 443,
}
_meta = { 'virtual' : False,
'read_only' : False,
'unicode_paths' : True,
'case_insensitive_paths' : False,
'network' : True
'network' : True
}
def __init__(self,url,credentials=None,get_credentials=None,thread_synchronize=True,connection_classes=None,timeout=None):
......@@ -123,7 +119,7 @@ class DAVFS(FS):
self.url = url
pf = propfind(prop="<prop xmlns='DAV:'><resourcetype /></prop>")
resp = self._request("/","PROPFIND",pf.render(),{"Depth":"0"})
try:
try:
if resp.status == 404:
raise ResourceNotFoundError("/",msg="root url gives 404")
if resp.status in (401,403):
......@@ -149,9 +145,9 @@ class DAVFS(FS):
if not port:
try:
port = self._DEFAULT_PORT_NUMBERS[scheme]
except KeyError:
msg = "unsupported protocol: '%s'" % (url.scheme,)
raise RemoteConnectionError(msg=msg)
except KeyError:
msg = "unsupported protocol: '%s'" % (url.scheme,)
raise RemoteConnectionError(msg=msg)
# Can we re-use an existing connection?
with self._connection_lock:
now = time.time()
......@@ -167,12 +163,12 @@ class DAVFS(FS):
return (False,con)
self._discard_connection(con)
# Nope, we need to make a fresh one.
try:
ConClass = self.connection_classes[scheme]
except KeyError:
msg = "unsupported protocol: '%s'" % (url.scheme,)
raise RemoteConnectionError(msg=msg)
con = ConClass(url.hostname,url.port,timeout=self.timeout)
try:
ConClass = self.connection_classes[scheme]
except KeyError:
msg = "unsupported protocol: '%s'" % (url.scheme,)
raise RemoteConnectionError(msg=msg)
con = ConClass(url.hostname,url.port,timeout=self.timeout)
self._connections.append(con)
return (True,con)
......@@ -184,9 +180,9 @@ class DAVFS(FS):
if not port:
try:
port = self._DEFAULT_PORT_NUMBERS[scheme]
except KeyError:
msg = "unsupported protocol: '%s'" % (url.scheme,)
raise RemoteConnectionError(msg=msg)
except KeyError:
msg = "unsupported protocol: '%s'" % (url.scheme,)
raise RemoteConnectionError(msg=msg)
with self._connection_lock:
now = time.time()
try:
......@@ -258,7 +254,7 @@ class DAVFS(FS):
resp = None
try:
resp = self._raw_request(url,method,body,headers)
# Loop to retry for redirects and authentication responses.
# Loop to retry for redirects and authentication responses.
while resp.status in (301,302,401,403):
resp.close()
if resp.status in (301,302,):
......@@ -270,7 +266,7 @@ class DAVFS(FS):
raise OperationFailedError(msg="redirection seems to be looping")
if len(visited) > 10:
raise OperationFailedError("too much redirection")
elif resp.status in (401,403):
elif resp.status in (401,403):
if self.get_credentials is None:
break
else:
......@@ -278,7 +274,7 @@ class DAVFS(FS):
if creds is None:
break
else:
self.credentials = creds
self.credentials = creds
resp = self._raw_request(url,method,body,headers)
except Exception:
if resp is not None:
......@@ -345,10 +341,8 @@ class DAVFS(FS):
msg = str(e)
raise RemoteConnectionError("",msg=msg,details=e)
def setcontents(self,path, data=b'', encoding=None, errors=None, chunk_size=1024 * 64):
if isinstance(data, six.text_type):
data = data.encode(encoding=encoding, errors=errors)
resp = self._request(path, "PUT", data)
def setcontents(self,path, contents, chunk_size=1024*64):
resp = self._request(path,"PUT",contents)
resp.close()
if resp.status == 405:
raise ResourceInvalidError(path)
......@@ -357,11 +351,10 @@ class DAVFS(FS):
if resp.status not in (200,201,204):
raise_generic_error(resp,"setcontents",path)
@iotools.filelike_to_stream
def open(self,path,mode="r", **kwargs):
def open(self,path,mode="r"):
mode = mode.replace("b","").replace("t","")
# Truncate the file if requested
contents = b("")
contents = ""
if "w" in mode:
self.setcontents(path,contents)
else:
......@@ -371,7 +364,7 @@ class DAVFS(FS):
if "a" not in mode:
contents.close()
raise ResourceNotFoundError(path)
contents = b("")
contents = ""
self.setcontents(path,contents)
elif contents.status in (401,403):
contents.close()
......@@ -422,7 +415,7 @@ class DAVFS(FS):
if self._isurl(path,res.href):
for ps in res.propstats:
if ps.props.getElementsByTagNameNS("DAV:","collection"):
return True
return True
return False
finally:
response.close()
......@@ -442,11 +435,11 @@ class DAVFS(FS):
rt = ps.props.getElementsByTagNameNS("DAV:","resourcetype")
cl = ps.props.getElementsByTagNameNS("DAV:","collection")
if rt and not cl:
return True
return True
return False
finally:
response.close()
def listdir(self,path="./",wildcard=None,full=False,absolute=False,dirs_only=False,files_only=False):
return list(self.ilistdir(path=path,wildcard=wildcard,full=full,absolute=absolute,dirs_only=dirs_only,files_only=files_only))
......
......@@ -73,8 +73,6 @@ from fs.base import fnmatch, NoDefaultMeta
from util import TahoeUtil
from connection import Connection
from six import b
logger = fs.getLogger('fs.tahoelafs')
def _fix_path(func):
......@@ -157,7 +155,7 @@ class _TahoeLAFS(FS):
self._log(DEBUG, 'Creating empty file %s' % path)
if self.getmeta("read_only"):
raise errors.UnsupportedError('read only filesystem')
self.setcontents(path, b(''))
self.setcontents(path, '')
handler = NullFile()
else:
self._log(DEBUG, 'Opening existing file %s for reading' % path)
......
......@@ -11,8 +11,6 @@ catch-all exception.
__all__ = ['FSError',
'CreateFailedError',
'PathError',
'InvalidPathError',
'InvalidCharsInPathError',
'OperationFailedError',
'UnsupportedError',
'RemoteConnectionError',
......@@ -20,26 +18,23 @@ __all__ = ['FSError',
'PermissionDeniedError',
'FSClosedError',
'OperationTimeoutError',
'RemoveRootError',
'ResourceError',
'NoSysPathError',
'NoMetaError',
'NoPathURLError',
'ResourceNotFoundError',
'ResourceInvalidError',
'ResourceInvalidError',
'DestinationExistsError',
'DirectoryNotEmptyError',
'ParentDirectoryMissingError',
'ResourceLockedError',
'NoMMapError',
'BackReferenceError',
'convert_fs_errors',
'convert_os_errors',
'convert_os_errors'
]
import sys
import errno
import six
from fs.path import *
from fs.local_functools import wraps
......@@ -64,17 +59,13 @@ class FSError(Exception):
return str(self.msg % keys)
def __unicode__(self):
keys = {}
for k,v in self.__dict__.iteritems():
if isinstance(v, six.binary_type):
v = v.decode(sys.getfilesystemencoding(), 'replace')
keys[k] = v
return unicode(self.msg, encoding=sys.getfilesystemencoding(), errors='replace') % keys
return unicode(self.msg) % self.__dict__
def __reduce__(self):
return (self.__class__,(),self.__dict__.copy(),)
class CreateFailedError(FSError):
"""An exception thrown when a FS could not be created"""
default_message = "Unable to create filesystem"
......@@ -88,17 +79,7 @@ class PathError(FSError):
def __init__(self,path="",**kwds):
self.path = path
super(PathError,self).__init__(**kwds)
class InvalidPathError(PathError):
"""Base exception for fs paths that can't be mapped on to the underlaying filesystem."""
default_message = "Path is invalid on this filesystem %(path)s"
class InvalidCharsInPathError(InvalidPathError):
"""The path contains characters that are invalid on this filesystem"""
default_message = "Path contains invalid characters: %(path)s"
class OperationFailedError(FSError):
"""Base exception class for errors associated with a specific operation."""
......@@ -138,10 +119,6 @@ class OperationTimeoutError(OperationFailedError):
default_message = "Unable to %(opname)s: operation timed out"
class RemoveRootError(OperationFailedError):
default_message = "Can't remove root dir"
class ResourceError(FSError):
"""Base exception class for error associated with a specific resource."""
default_message = "Unspecified resource error: %(path)s"
......@@ -201,16 +178,11 @@ class ResourceLockedError(ResourceError):
"""Exception raised when a resource can't be used because it is locked."""
default_message = "Resource is locked: %(path)s"
class NoMMapError(ResourceError):
"""Exception raise when getmmap fails to create a mmap"""
default_message = "Can't get mmap for %(path)s"
class BackReferenceError(ValueError):
"""Exception raised when too many backrefs exist in a path (ex: '/..', '/docs/../..')."""
def convert_fs_errors(func):
"""Function wrapper to convert FSError instances into OSError."""
@wraps(func)
......@@ -266,10 +238,6 @@ def convert_os_errors(func):
raise OperationFailedError(opname,details=e),None,tb
if e.errno == errno.ENOENT:
raise ResourceNotFoundError(path,opname=opname,details=e),None,tb
if e.errno == errno.EFAULT:
# This can happen when listdir a directory that is deleted by another thread
# Best to interpret it as a resource not found
raise ResourceNotFoundError(path,opname=opname,details=e),None,tb
if e.errno == errno.ESRCH:
raise ResourceNotFoundError(path,opname=opname,details=e),None,tb
if e.errno == errno.ENOTEMPTY:
......
"""
fs.expose.ftp
==============
Expose an FS object over FTP (via pyftpdlib).
This module provides the necessary interfaces to expose an FS object over
FTP, plugging into the infrastructure provided by the 'pyftpdlib' module.
To use this in combination with fsserve, do the following:
$ fsserve -t 'ftp' $HOME
The above will serve your home directory in read-only mode via anonymous FTP on the
loopback address.
"""
import os
import stat
import time
import errno
from functools import wraps
from pyftpdlib import ftpserver
from fs.path import *
from fs.osfs import OSFS
from fs.errors import convert_fs_errors
from fs import iotools
from six import text_type as unicode
# Get these once so we can reuse them:
UID = os.getuid()
GID = os.getgid()
def decode_args(f):
"""
Decodes string arguments using the decoding defined on the method's class.
This decorator is for use on methods (functions which take a class or instance
as the first parameter).
Pyftpdlib (as of 0.7.0) uses str internally, so this decoding is necessary.
"""
@wraps(f)
def wrapper(self, *args):
encoded = []
for arg in args:
if isinstance(arg, str):
arg = arg.decode(self.encoding)
encoded.append(arg)
return f(self, *encoded)
return wrapper
class FakeStat(object):
"""
Pyftpdlib uses stat inside the library. This class emulates the standard
os.stat_result class to make pyftpdlib happy. Think of it as a stat-like
object ;-).
"""
def __init__(self, **kwargs):
for attr in dir(stat):
if not attr.startswith('ST_'):
continue
attr = attr.lower()
value = kwargs.get(attr, 0)
setattr(self, attr, value)
class FTPFS(ftpserver.AbstractedFS):
"""
The basic FTP Filesystem. This is a bridge between a pyfs filesystem and pyftpdlib's
AbstractedFS. This class will cause the FTP server to serve the given fs instance.
"""
encoding = 'utf8'
"Sets the encoding to use for paths."
def __init__(self, fs, root, cmd_channel, encoding=None):
self.fs = fs
if encoding is not None:
self.encoding = encoding
super(FTPFS, self).__init__(root, cmd_channel)
def close(self):
# Close and dereference the pyfs file system.
if self.fs:
self.fs.close()
self.fs = None
def validpath(self, path):
try:
normpath(path)
return True
except:
return False
@convert_fs_errors
@decode_args
@iotools.filelike_to_stream
def open(self, path, mode, **kwargs):
return self.fs.open(path, mode, **kwargs)
@convert_fs_errors
def chdir(self, path):
# We dont' use the decorator here, we actually decode a version of the
# path for use with pyfs, but keep the original for use with pyftpdlib.
if not isinstance(path, unicode):
# pyftpdlib 0.7.x
unipath = unicode(path, self.encoding)
else:
# pyftpdlib 1.x
unipath = path
# TODO: can the following conditional checks be farmed out to the fs?
# If we don't raise an error here for files, then the FTP server will
# happily allow the client to CWD into a file. We really only want to
# allow that for directories.
if self.fs.isfile(unipath):
raise OSError(errno.ENOTDIR, 'Not a directory')
# similarly, if we don't check for existence, the FTP server will allow
# the client to CWD into a non-existent directory.
if not self.fs.exists(unipath):
raise OSError(errno.ENOENT, 'Does not exist')
# We use the original path here, so we don't corrupt self._cwd
self._cwd = self.ftp2fs(path)
@convert_fs_errors
@decode_args
def mkdir(self, path):
self.fs.makedir(path)
@convert_fs_errors
@decode_args
def listdir(self, path):
return map(lambda x: x.encode(self.encoding), self.fs.listdir(path))
@convert_fs_errors
@decode_args
def rmdir(self, path):
self.fs.removedir(path)
@convert_fs_errors
@decode_args
def remove(self, path):
self.fs.remove(path)
@convert_fs_errors
@decode_args
def rename(self, src, dst):
self.fs.rename(src, dst)
@convert_fs_errors
@decode_args
def chmod(self, path, mode):
return
@convert_fs_errors
@decode_args
def stat(self, path):
info = self.fs.getinfo(path)
kwargs = {
'st_size': info.get('size'),
}
# Give the fs a chance to provide the uid/gid. Otherwise echo the current
# uid/gid.
kwargs['st_uid'] = info.get('st_uid', UID)
kwargs['st_gid'] = info.get('st_gid', GID)
if 'st_atime' in info:
kwargs['st_atime'] = info['st_atime']
elif 'accessed_time' in info:
kwargs['st_atime'] = time.mktime(info["accessed_time"].timetuple())
if 'st_mtime' in info:
kwargs['st_mtime'] = info.get('st_mtime')
elif 'modified_time' in info:
kwargs['st_mtime'] = time.mktime(info["modified_time"].timetuple())
# Pyftpdlib uses st_ctime on Windows platform, try to provide it.
if 'st_ctime' in info:
kwargs['st_ctime'] = info['st_ctime']
elif 'created_time' in info:
kwargs['st_ctime'] = time.mktime(info["created_time"].timetuple())
elif 'st_mtime' in kwargs:
# As a last resort, just copy the modified time.
kwargs['st_ctime'] = kwargs['st_mtime']
# Try to use existing mode.
if 'st_mode' in info:
kwargs['st_mode'] = info['st_mode']
elif 'mode' in info:
kwargs['st_mode'] = info['mode']
else:
# Otherwise, build one. Not executable by default.
mode = 0660
# Merge in the type (dir or file). File is tested first, some file systems
# such as ArchiveMountFS treat archive files as directories too. By checking
# file first, any such files will be only files (not directories).
if self.fs.isfile(path):
mode |= stat.S_IFREG
elif self.fs.isdir(path):
mode |= stat.S_IFDIR
mode |= 0110 # Merge in exec bit to signal dir is listable
kwargs['st_mode'] = mode
return FakeStat(**kwargs)
# No link support...
lstat = stat
@convert_fs_errors
@decode_args
def isfile(self, path):
return self.fs.isfile(path)
@convert_fs_errors
@decode_args
def isdir(self, path):
return self.fs.isdir(path)
@convert_fs_errors
@decode_args
def getsize(self, path):
return self.fs.getsize(path)
@convert_fs_errors
@decode_args
def getmtime(self, path):
return self.stat(path).st_mtime
def realpath(self, path):
return path
def lexists(self, path):
return True
class FTPFSHandler(ftpserver.FTPHandler):
"""
An FTPHandler class that closes the filesystem when done.
"""
def close(self):
# Close the FTPFS instance, it will close the pyfs file system.
if self.fs:
self.fs.close()
super(FTPFSHandler, self).close()
class FTPFSFactory(object):
"""
A factory class which can hold a reference to a file system object and
encoding, then later pass it along to an FTPFS instance. An instance of
this object allows multiple FTPFS instances to be created by pyftpdlib
while sharing the same fs.
"""
def __init__(self, fs, encoding=None):
"""
Initializes the factory with an fs instance.
"""
self.fs = fs
self.encoding = encoding
def __call__(self, root, cmd_channel):
"""
This is the entry point of pyftpdlib. We will pass along the two parameters
as well as the previously provided fs instance and encoding.
"""
return FTPFS(self.fs, root, cmd_channel, encoding=self.encoding)
class HomeFTPFS(FTPFS):
"""
A file system which serves a user's home directory.
"""
def __init__(self, root, cmd_channel):
"""
Use the provided user's home directory to create an FTPFS that serves an OSFS
rooted at the home directory.
"""
super(DemoFS, self).__init__(OSFS(root_path=root), '/', cmd_channel)
def serve_fs(fs, addr, port):
"""
Creates a basic anonymous FTP server serving the given FS on the given address/port
combo.
"""
from pyftpdlib.contrib.authorizers import UnixAuthorizer
ftp_handler = FTPFSHandler
ftp_handler.authorizer = ftpserver.DummyAuthorizer()
ftp_handler.authorizer.add_anonymous('/')
ftp_handler.abstracted_fs = FTPFSFactory(fs)
s = ftpserver.FTPServer((addr, port), ftp_handler)
s.serve_forever()
......@@ -22,27 +22,6 @@ from platform import machine, system
from stat import S_IFDIR
from traceback import print_exc
_system = system()
_machine = machine()
# Locate the fuse shared library.
# On OSX this can be provided by a number of different packages
# with slightly incompatible interfaces.
if _system == 'Darwin':
_libfuse_path = find_library('fuse4x') or find_library('fuse')
else:
_libfuse_path = find_library('fuse')
if not _libfuse_path:
raise EnvironmentError('Unable to find libfuse')
if _system == 'Darwin':
_libiconv = CDLL(find_library('iconv'), RTLD_GLOBAL) # libfuse dependency
_libfuse = CDLL(_libfuse_path)
# Check whether OSX is using the legacy "macfuse" system.
# This has a different struct layout than the newer fuse4x system.
if _system == 'Darwin' and hasattr(_libfuse, 'macfuse_version'):
_system = 'Darwin-MacFuse'
class c_timespec(Structure):
_fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)]
......@@ -53,7 +32,9 @@ class c_utimbuf(Structure):
class c_stat(Structure):
pass # Platform dependent
if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'):
_system = system()
if _system in ('Darwin', 'FreeBSD'):
_libiconv = CDLL(find_library("iconv"), RTLD_GLOBAL) # libfuse dependency
ENOTSUP = 45
c_dev_t = c_int32
c_fsblkcnt_t = c_ulong
......@@ -67,44 +48,20 @@ if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'):
c_size_t, c_int, c_uint32)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_uint32)
# OSX with fuse4x uses 64-bit inodes and so has a different
# struct layout. Other darwinish platforms use 32-bit inodes.
if _system == 'Darwin':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_ino', c_uint64),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_birthtimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32),
('st_flags', c_int32),
('st_gen', c_int32),
('st_lspare', c_int32),
('st_qspare', c_int64)]
else:
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_uint32),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32)]
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_uint32),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32)]
elif _system == 'Linux':
ENOTSUP = 95
c_dev_t = c_ulonglong
......@@ -282,6 +239,10 @@ def set_st_attrs(st, attrs):
setattr(st, key, val)
_libfuse_path = find_library('fuse')
if not _libfuse_path:
raise EnvironmentError('Unable to find libfuse')
_libfuse = CDLL(_libfuse_path)
_libfuse.fuse_get_context.restype = POINTER(fuse_context)
......
......@@ -17,15 +17,15 @@ def _datetime_to_epoch(d):
return mktime(d.timetuple())
class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""A hacked together version of SimpleHTTPRequestHandler"""
def __init__(self, fs, request, client_address, server):
self._fs = fs
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
"""Serve a GET request."""
"""Serve a GET request."""
f = None
try:
f = self.send_head()
......@@ -33,10 +33,10 @@ class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
try:
self.copyfile(f, self.wfile)
except socket.error:
pass
pass
finally:
if f is not None:
f.close()
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
......@@ -65,15 +65,15 @@ class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
ctype = self.guess_type(path)
try:
info = self._fs.getinfo(path)
f = self._fs.open(path, 'rb')
f = self._fs.open(path, 'r')
except FSError, e:
self.send_error(404, str(e))
return None
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_header("Content-Length", str(info['size']))
if 'modified_time' in info:
self.send_header("Last-Modified", self.date_time_string(_datetime_to_epoch(info['modified_time'])))
......@@ -103,11 +103,11 @@ class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<ul>\n")
parent = dirname(path)
if path != parent:
f.write('<li><a href="%s">../</a></li>' % urllib.quote(parent.rstrip('/') + '/'))
for path in paths:
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(path), cgi.escape(path)))
......@@ -119,45 +119,45 @@ class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.unquote(path))
return path
def serve_fs(fs, address='', port=8000):
"""Serve an FS instance over http
:param fs: an FS object
:param address: IP address to serve on
:param port: port number
"""
def Handler(request, client_address, server):
return FSHTTPRequestHandler(fs, request, client_address, server)
#class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
# pass
httpd = SocketServer.TCPServer((address, port), Handler, bind_and_activate=False)
# pass
httpd = SocketServer.TCPServer((address, port), Handler, bind_and_activate=False)
#httpd = ThreadedTCPServer((address, port), Handler, bind_and_activate=False)
httpd.allow_reuse_address = True
httpd.server_bind()
httpd.server_activate()
server_thread = threading.Thread(target=httpd.serve_forever)
server_thread.start()
server_thread = threading.Thread(target=httpd.serve_forever)
server_thread.start()
try:
while True:
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
httpd.shutdown()
if __name__ == "__main__":
from fs.osfs import OSFS
serve_fs(OSFS('~/'))
\ No newline at end of file
......@@ -42,8 +42,6 @@ from fs.opener import fsopendir, OpenerError
from fs.errors import *
from fs.path import *
from six import b
class FSImportHook(object):
"""PEP-302-compliant module finder and loader for FS objects.
......@@ -85,7 +83,7 @@ class FSImportHook(object):
import machinery of the running process, if it is not already
installed.
"""
for imp in enumerate(sys.path_hooks):
for i,imp in enumerate(sys.path_hooks):
try:
if issubclass(cls,imp):
break
......@@ -206,9 +204,9 @@ class FSImportHook(object):
if info is None:
info = self._get_module_info(fullname)
(path,type,ispkg) = info
code = self.fs.getcontents(path, 'rb')
code = self.fs.getcontents(path)
if type == imp.PY_SOURCE:
code = code.replace(b("\r\n"),b("\n"))
code = code.replace("\r\n","\n")
return compile(code,path,"exec")
elif type == imp.PY_COMPILED:
if code[:4] != imp.get_magic():
......@@ -225,12 +223,12 @@ class FSImportHook(object):
(path,type,ispkg) = info
if type != imp.PY_SOURCE:
return None
return self.fs.getcontents(path, 'rb').replace(b("\r\n"),b("\n"))
return self.fs.getcontents(path).replace("\r\n","\n")
def get_data(self,path):
"""Read the specified data file."""
try:
return self.fs.getcontents(path, 'rb')
return self.fs.getcontents(path)
except FSError, e:
raise IOError(str(e))
......
# Work in progress
\ No newline at end of file
try:
from json import dumps, loads
except ImportError:
from simplejson import dumps, loads
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def encode(header='', payload=''):
def textsize(s):
if s:
return str(len(s))
return ''
return '%i,%i:%s%s' % (textsize(header), textsize(payload), header, payload)
class FileEncoder(object):
def __init__(self, f):
self.f = f
def write(self, header='', payload=''):
fwrite = self.f.write
def textsize(s):
if s:
return str(len(s))
return ''
fwrite('%s,%s:' % (textsize(header), textsize(payload)))
if header:
fwrite(header)
if payload:
fwrite(payload)
class JSONFileEncoder(FileEncoder):
def write(self, header=None, payload=''):
if header is None:
super(JSONFileEncoder, self).write('', payload)
else:
header_json = dumps(header, separators=(',', ':'))
super(JSONFileEncoder, self).write(header_json, payload)
class DecoderError(Exception):
pass
class PreludeError(DecoderError):
pass
class Decoder(object):
STAGE_PRELUDE, STAGE_SIZE, STAGE_HEADER, STAGE_PAYLOAD = range(4)
MAX_PRELUDE = 255
def __init__(self, no_prelude=False, prelude_callback=None):
self.prelude_callback = prelude_callback
self.stream_broken = False
self.expecting_bytes = None
self.stage = self.STAGE_PRELUDE
self._prelude = []
self._size = []
self._expecting_bytes = None
self.header_size = None
self.payload_size = None
self._header_bytes = None
self._payload_bytes = None
self._header_data = []
self._payload_data = []
self.header = None
self.payload = None
if no_prelude:
self.stage = self.STAGE_SIZE
def feed(self, data):
if self.stream_broken:
raise DecoderError('Stream is broken')
STAGE_PRELUDE, STAGE_SIZE, STAGE_HEADER, STAGE_PAYLOAD = range(4)
size_append = self._size.append
header_append = self._header_data.append
payload_append = self._payload_data.append
datafind = data.find
def reset_packet():
self.expecting_bytes = None
del self._header_data[:]
del self._payload_data[:]
self.header = None
self.payload = None
data_len = len(data)
data_pos = 0
expecting_bytes = self.expecting_bytes
stage = self.stage
if stage == STAGE_PRELUDE:
max_find = min(len(data), data_pos + self.MAX_PRELUDE)
cr_pos = datafind('\n', data_pos, max_find)
if cr_pos == -1:
self._prelude.append(data[data_pos:])
data_pos = max_find
if sum(len(s) for s in self._prelude) > self.MAX_PRELUDE:
self.stream_broken = True
raise PreludeError('Prelude not found')
else:
self._prelude.append(data[data_pos:cr_pos])
if sum(len(s) for s in self._prelude) > self.MAX_PRELUDE:
self.stream_broken = True
raise PreludeError('Prelude not found')
data_pos = cr_pos + 1
prelude = ''.join(self._prelude)
del self._prelude[:]
reset_packet()
if not self.on_prelude(prelude):
self.broken = True
return
stage = STAGE_SIZE
while data_pos < data_len:
if stage == STAGE_HEADER:
bytes_to_read = min(data_len - data_pos, expecting_bytes)
header_append(data[data_pos:data_pos + bytes_to_read])
data_pos += bytes_to_read
expecting_bytes -= bytes_to_read
if not expecting_bytes:
self.header = ''.join(self._header_data)
if not self.payload_size:
yield self.header, ''
reset_packet()
expecting_bytes = None
stage = STAGE_SIZE
else:
stage = STAGE_PAYLOAD
expecting_bytes = self.payload_size
elif stage == STAGE_PAYLOAD:
bytes_to_read = min(data_len - data_pos, expecting_bytes)
payload_append(data[data_pos:data_pos + bytes_to_read])
data_pos += bytes_to_read
expecting_bytes -= bytes_to_read
if not expecting_bytes:
self.payload = ''.join(self._payload_data)
yield self.header, self.payload
reset_packet()
stage = STAGE_SIZE
expecting_bytes = None
elif stage == STAGE_SIZE:
term_pos = datafind(':', data_pos)
if term_pos == -1:
size_append(data[data_pos:])
break
else:
size_append(data[data_pos:term_pos])
data_pos = term_pos + 1
size = ''.join(self._size)
del self._size[:]
if ',' in size:
header_size, payload_size = size.split(',', 1)
else:
header_size = size
payload_size = ''
try:
self.header_size = int(header_size or '0')
self.payload_size = int(payload_size or '0')
except ValueError:
self.stream_broken = False
raise DecoderError('Invalid size in packet (%s)' % size)
if self.header_size:
expecting_bytes = self.header_size
stage = STAGE_HEADER
elif self.payload_size:
expecting_bytes = self.payload_size
stage = STAGE_PAYLOAD
else:
# A completely empty packet, permitted, if a little odd
yield '', ''
reset_packet()
expecting_bytes = None
self.expecting_bytes = expecting_bytes
self.stage = stage
def on_prelude(self, prelude):
if self.prelude_callback and not self.prelude_callback(self, prelude):
return False
#pass
#print "Prelude:", prelude
return True
class JSONDecoder(Decoder):
def feed(self, data):
for header, payload in Decoder.feed(self, data):
if header:
header = loads(header)
else:
header = {}
yield header, payload
if __name__ == "__main__":
f = StringIO()
encoder = JSONFileEncoder(f)
encoder.write(dict(a=1, b=2), 'Payload')
encoder.write(dict(foo="bar", nested=dict(apples="oranges"), alist=range(5)), 'Payload goes here')
encoder.write(None, 'Payload')
encoder.write(dict(a=1))
encoder.write()
stream = 'prelude\n' + f.getvalue()
#print stream
# packets = ['Prelude string\n',
# encode('header', 'payload'),
# encode('header number 2', 'second payload'),
# encode('', '')]
#
# stream = ''.join(packets)
decoder = JSONDecoder()
stream = 'pyfs/0.1\n59,13:{"type":"rpc","method":"ping","client_ref":"-1221142848:1"}Hello, World!'
fdata = StringIO(stream)
while 1:
data = fdata.read(3)
if not data:
break
for header, payload in decoder.feed(data):
print "Header:", repr(header)
print "Payload:", repr(payload)
\ No newline at end of file
from __future__ import with_statement
import socket
import threading
from packetstream import JSONDecoder, JSONFileEncoder
class _SocketFile(object):
def __init__(self, socket):
self.socket = socket
def read(self, size):
try:
return self.socket.recv(size)
except socket.error:
return ''
def write(self, data):
self.socket.sendall(data)
def remote_call(method_name=None):
method = method_name
def deco(f):
if not hasattr(f, '_remote_call_names'):
f._remote_call_names = []
f._remote_call_names.append(method or f.__name__)
return f
return deco
class RemoteResponse(Exception):
def __init__(self, header, payload):
self.header = header
self.payload = payload
class ConnectionHandlerBase(threading.Thread):
_methods = {}
def __init__(self, server, connection_id, socket, address):
super(ConnectionHandlerBase, self).__init__()
self.server = server
self.connection_id = connection_id
self.socket = socket
self.transport = _SocketFile(socket)
self.address = address
self.encoder = JSONFileEncoder(self.transport)
self.decoder = JSONDecoder(prelude_callback=self.on_stream_prelude)
self._lock = threading.RLock()
self.socket_error = None
if not self._methods:
for method_name in dir(self):
method = getattr(self, method_name)
if callable(method) and hasattr(method, '_remote_call_names'):
for name in method._remote_call_names:
self._methods[name] = method
print self._methods
self.fs = None
def run(self):
self.transport.write('pyfs/1.0\n')
while True:
try:
data = self.transport.read(4096)
except socket.error, socket_error:
print socket_error
self.socket_error = socket_error
break
print "data", repr(data)
if data:
for packet in self.decoder.feed(data):
print repr(packet)
self.on_packet(*packet)
else:
break
self.on_connection_close()
def close(self):
with self._lock:
self.socket.close()
def on_connection_close(self):
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
self.server.on_connection_close(self.connection_id)
def on_stream_prelude(self, packet_stream, prelude):
print "prelude", prelude
return True
def on_packet(self, header, payload):
print '-' * 30
print repr(header)
print repr(payload)
if header['type'] == 'rpc':
method = header['method']
args = header['args']
kwargs = header['kwargs']
method_callable = self._methods[method]
remote = dict(type='rpcresult',
client_ref = header['client_ref'])
try:
response = method_callable(*args, **kwargs)
remote['response'] = response
self.encoder.write(remote, '')
except RemoteResponse, response:
self.encoder.write(response.header, response.payload)
class RemoteFSConnection(ConnectionHandlerBase):
@remote_call()
def auth(self, username, password, resource):
self.username = username
self.password = password
self.resource = resource
from fs.memoryfs import MemoryFS
self.fs = MemoryFS()
class Server(object):
def __init__(self, addr='', port=3000, connection_factory=RemoteFSConnection):
self.addr = addr
self.port = port
self.connection_factory = connection_factory
self.socket = None
self.connection_id = 0
self.threads = {}
self._lock = threading.RLock()
def serve_forever(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.addr, self.port))
sock.listen(5)
try:
while True:
clientsocket, address = sock.accept()
self.on_connect(clientsocket, address)
except KeyboardInterrupt:
pass
try:
self._close_graceful()
except KeyboardInterrupt:
self._close_harsh()
def _close_graceful(self):
"""Tell all threads to exit and wait for them"""
with self._lock:
for connection in self.threads.itervalues():
connection.close()
for connection in self.threads.itervalues():
connection.join()
self.threads.clear()
def _close_harsh(self):
with self._lock:
for connection in self.threads.itervalues():
connection.close()
self.threads.clear()
def on_connect(self, clientsocket, address):
print "Connection from", address
with self._lock:
self.connection_id += 1
thread = self.connection_factory(self,
self.connection_id,
clientsocket,
address)
self.threads[self.connection_id] = thread
thread.start()
def on_connection_close(self, connection_id):
pass
#with self._lock:
# self.threads[connection_id].join()
# del self.threads[connection_id]
if __name__ == "__main__":
server = Server()
server.serve_forever()
\ No newline at end of file
import threading
import Queue as queue
def make_job(job_callable, *args, **kwargs):
""" Returns a callable that calls the supplied callable with given arguements. """
def job():
return job_callable(*args, **kwargs)
return job
class _PoolThread(threading.Thread):
""" Internal thread class that runs jobs. """
def __init__(self, queue, name):
super(_PoolThread, self).__init__()
self.queue = queue
self.name = name
def __str__(self):
return self.name
def run(self):
while True:
try:
_priority, job = self.queue.get()
except queue.Empty:
break
if job is None:
break
if callable(job):
try:
job()
except Exception, e:
print e
self.queue.task_done()
class ThreadPool(object):
def __init__(self, num_threads, size=None, name=''):
self.num_threads = num_threads
self.name = name
self.queue = queue.PriorityQueue(size)
self.job_no = 0
self.threads = [_PoolThread(self.queue, '%s #%i' % (name, i)) for i in xrange(num_threads)]
for thread in self.threads:
thread.start()
def _make_priority_key(self, i):
no = self.job_no
self.job_no += 1
return (i, no)
def job(self, job_callable, *args, **kwargs):
""" Post a job to the queue. """
def job():
return job_callable(*args, **kwargs)
self.queue.put( (self._make_priority_key(1), job), True )
return self.job_no
def flush_quit(self):
""" Quit after all tasks on the queue have been processed. """
for thread in self.threads:
self.queue.put( (self._make_priority_key(1), None) )
for thread in self.threads:
thread.join()
def quit(self):
""" Quit as soon as possible, potentially leaving tasks on the queue. """
for thread in self.threads:
self.queue.put( (self._make_priority_key(0), None) )
for thread in self.threads:
thread.join()
if __name__ == "__main__":
import time
def job(n):
print "Starting #%i" % n
time.sleep(1)
print "Ending #%i" % n
pool = ThreadPool(5, 'test thread')
for n in range(20):
pool.job(job, n)
pool.flush_quit()
\ No newline at end of file
......@@ -6,16 +6,15 @@ template = """
<style type="text/css">
body {
font-family:Arial, Verdana;
font-family:Arial, Verdana;
margin:0px;
padding:0px;
padding:0px;
}
table.dirlist {
margin:0 auto;
margin:0 auto;
font-size:13px;
color:#666;
min-width:960px;
}
table.dirlist tr.r1 {
......@@ -32,7 +31,7 @@ table.dirlist td a.link-dir {
}
table.dirlist td a {
text-decoration:none;
text-decoration:none;
}
table.dirlist td a:hover {
......@@ -58,22 +57,20 @@ table.dirlist tr:hover {
<div class="dirlist-container">
<table class="dirlist">
<thead>
<tr>
<th>File/Directory</th>
<th>Size</th>
<th>Created Date</th>
</tr>
</thead>
<tbody>
% for i, entry in enumerate(dirlist):
<tr class="${entry['type']} r${i%2}">
<td><a class="link-${entry['type']}" href="${ entry['path'] }">${entry['name']}</a></td>
<td>${entry['size']}</td>
<td>${entry['created_time']}</td>
</tr>
% endfor
</tbody>
</table>
......
from wsgiref.simple_server import make_server
from fs.osfs import OSFS
from wsgi import serve_fs
osfs = OSFS('~/')
application = serve_fs(osfs)
httpd = make_server('', 8000, application)
print "Serving on http://127.0.0.1:8000"
httpd.serve_forever()
application = serve_fs(osfs)
\ No newline at end of file
......@@ -18,96 +18,94 @@ class Request(object):
"""Very simple request object"""
def __init__(self, environ, start_response):
self.environ = environ
self.start_response = start_response
self.start_response = start_response
self.path = environ.get('PATH_INFO')
class WSGIServer(object):
"""Light-weight WSGI server that exposes an FS"""
def __init__(self, serve_fs, indexes=True, dir_template=None, chunk_size=16*1024*1024):
if dir_template is None:
from dirtemplate import template as dir_template
from dirtemplate import template as dir_template
self.serve_fs = serve_fs
self.indexes = indexes
self.indexes = indexes
self.chunk_size = chunk_size
self.dir_template = Template(dir_template)
self.dir_template = Template(dir_template)
def __call__(self, environ, start_response):
request = Request(environ, start_response)
if not self.serve_fs.exists(request.path):
return self.serve_404(request)
return self.serve_404(request)
if self.serve_fs.isdir(request.path):
if not self.indexes:
return self.serve_404(request)
return self.serve_dir(request)
else:
return self.serve_file(request)
def serve_file(self, request):
def serve_file(self, request):
"""Serve a file, guessing a mime-type"""
path = request.path
path = request.path
serving_file = None
try:
try:
serving_file = self.serve_fs.open(path, 'rb')
except Exception, e:
if serving_file is not None:
serving_file.close()
return self.serve_500(request, str(e))
mime_type = mimetypes.guess_type(basename(path))[0] or b'text/plain'
mime_type = mimetypes.guess_type(basename(path))
file_size = self.serve_fs.getsize(path)
headers = [(b'Content-Type', bytes(mime_type)),
(b'Content-Length', bytes(file_size))]
headers = [('Content-Type', mime_type),
('Content-Length', str(file_size))]
def gen_file():
chunk_size = self.chunk_size
read = serving_file.read
try:
while 1:
data = read(chunk_size)
while True:
data = serving_file.read(self.chunk_size)
if not data:
break
yield data
finally:
serving_file.close()
request.start_response(b'200 OK',
request.start_response('200 OK',
headers)
return gen_file()
return gen_file()
def serve_dir(self, request):
"""Serve an index page"""
fs = self.serve_fs
isdir = fs.isdir
path = request.path
dirinfo = fs.listdirinfo(path, full=True, absolute=True)
isdir = fs.isdir
path = request.path
dirinfo = fs.listdirinfo(path, full=True, absolute=True)
entries = []
for p, info in dirinfo:
entry = {}
entry['path'] = p
entry['name'] = basename(p)
entry['size'] = info.get('size', 'unknown')
entry['created_time'] = info.get('created_time')
entry['created_time'] = info.get('created_time')
if isdir(p):
entry['type'] = 'dir'
else:
entry['type'] = 'file'
entry['type'] = 'file'
entries.append(entry)
# Put dirs first, and sort by reverse created time order
no_time = datetime(1970, 1, 1, 1, 0)
entries.sort(key=lambda k:(k['type'] == 'dir', k.get('created_time') or no_time), reverse=True)
# Turn datetime to text and tweak names
for entry in entries:
t = entry.get('created_time')
......@@ -115,34 +113,35 @@ class WSGIServer(object):
entry['created_time'] = t.ctime()
if entry['type'] == 'dir':
entry['name'] += '/'
# Add an up dir link for non-root
if path not in ('', '/'):
entries.insert(0, dict(name='../', path='../', type="dir", size='', created_time='..'))
# Render the mako template
html = self.dir_template.render(**dict(fs=self.serve_fs,
path=path,
dirlist=entries)).encode('utf-8')
request.start_response(b'200 OK', [(b'Content-Type', b'text/html'),
(b'Content-Length', b'%i' % len(html))])
dirlist=entries))
request.start_response('200 OK', [('Content-Type', 'text/html'),
('Content-Length', '%i' % len(html))])
return [html]
def serve_404(self, request, msg='Not found'):
"""Serves a Not found page"""
request.start_response(b'404 NOT FOUND', [(b'Content-Type', b'text/html')])
request.start_response('404 NOT FOUND', [('Content-Type', 'text/html')])
return [msg]
def serve_500(self, request, msg='Unable to complete request'):
"""Serves an internal server error page"""
request.start_response(b'500 INTERNAL SERVER ERROR', [(b'Content-Type', b'text/html')])
"""Serves an internal server error page"""
request.start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')])
return [msg]
def serve_fs(fs, indexes=True):
"""Serves an FS object via WSGI"""
application = WSGIServer(fs, indexes)
return application
return application
......@@ -18,11 +18,6 @@ an FS object, which can then be exposed using whatever server you choose
import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
from datetime import datetime
import base64
import six
from six import PY3
class RPCFSInterface(object):
"""Wrapper to expose an FS via a XML-RPC compatible interface.
......@@ -31,16 +26,6 @@ class RPCFSInterface(object):
the contents of files.
"""
# info keys are restricted to a subset known to work over xmlrpc
# This fixes an issue with transporting Longs on Py3
_allowed_info = ["size",
"created_time",
"modified_time",
"accessed_time",
"st_size",
"st_mode",
"type"]
def __init__(self, fs):
super(RPCFSInterface, self).__init__()
self.fs = fs
......@@ -52,36 +37,31 @@ class RPCFSInterface(object):
must return something that can be represented in ASCII. The default
is base64-encoded UTF-8.
"""
#return path
return six.text_type(base64.b64encode(path.encode("utf8")), 'ascii')
return path.encode("utf8").encode("base64")
def decode_path(self, path):
"""Decode paths arriving over the wire."""
return six.text_type(base64.b64decode(path.encode('ascii')), 'utf8')
return path.decode("base64").decode("utf8")
def getmeta(self, meta_name):
meta = self.fs.getmeta(meta_name)
if isinstance(meta, basestring):
meta = self.decode_path(meta)
return meta
def getmeta_default(self, meta_name, default):
meta = self.fs.getmeta(meta_name, default)
if isinstance(meta, basestring):
meta = self.decode_path(meta)
return meta
def hasmeta(self, meta_name):
return self.fs.hasmeta(meta_name)
def get_contents(self, path, mode="rb"):
def get_contents(self, path):
path = self.decode_path(path)
data = self.fs.getcontents(path, mode)
data = self.fs.getcontents(path)
return xmlrpclib.Binary(data)
def set_contents(self, path, data):
path = self.decode_path(path)
self.fs.setcontents(path, data.data)
self.fs.setcontents(path,data.data)
def exists(self, path):
path = self.decode_path(path)
......@@ -97,7 +77,7 @@ class RPCFSInterface(object):
def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
path = self.decode_path(path)
entries = self.fs.listdir(path, wildcard, full, absolute, dirs_only, files_only)
entries = self.fs.listdir(path,wildcard,full,absolute,dirs_only,files_only)
return [self.encode_path(e) for e in entries]
def makedir(self, path, recursive=False, allow_recreate=False):
......@@ -111,7 +91,7 @@ class RPCFSInterface(object):
def removedir(self, path, recursive=False, force=False):
path = self.decode_path(path)
return self.fs.removedir(path, recursive, force)
def rename(self, src, dst):
src = self.decode_path(src)
dst = self.decode_path(dst)
......@@ -122,15 +102,12 @@ class RPCFSInterface(object):
if isinstance(accessed_time, xmlrpclib.DateTime):
accessed_time = datetime.strptime(accessed_time.value, "%Y%m%dT%H:%M:%S")
if isinstance(modified_time, xmlrpclib.DateTime):
modified_time = datetime.strptime(modified_time.value, "%Y%m%dT%H:%M:%S")
modified_time = datetime.strptime(modified_time.value, "%Y%m%dT%H:%M:%S")
return self.fs.settimes(path, accessed_time, modified_time)
def getinfo(self, path):
path = self.decode_path(path)
info = self.fs.getinfo(path)
info = dict((k, v) for k, v in info.iteritems()
if k in self._allowed_info)
return info
return self.fs.getinfo(path)
def desc(self, path):
path = self.decode_path(path)
......@@ -160,7 +137,7 @@ class RPCFSInterface(object):
dst = self.decode_path(dst)
return self.fs.copy(src, dst, overwrite, chunk_size)
def move(self, src, dst, overwrite=False, chunk_size=16384):
def move(self,src,dst,overwrite=False,chunk_size=16384):
src = self.decode_path(src)
dst = self.decode_path(dst)
return self.fs.move(src, dst, overwrite, chunk_size)
......@@ -198,10 +175,11 @@ class RPCFSServer(SimpleXMLRPCServer):
if logRequests is not None:
kwds['logRequests'] = logRequests
self.serve_more_requests = True
SimpleXMLRPCServer.__init__(self, addr, **kwds)
SimpleXMLRPCServer.__init__(self,addr,**kwds)
self.register_instance(RPCFSInterface(fs))
def serve_forever(self):
"""Override serve_forever to allow graceful shutdown."""
while self.serve_more_requests:
self.handle_request()
......@@ -8,45 +8,41 @@ fs.httpfs
from fs.base import FS
from fs.path import normpath
from fs.errors import ResourceNotFoundError, UnsupportedError
from fs.filelike import FileWrapper
from fs import iotools
from urllib2 import urlopen, URLError
from datetime import datetime
from fs.filelike import FileWrapper
class HTTPFS(FS):
"""Can barely be called a filesystem, because HTTP servers generally don't support
"""Can barely be called a filesystem, because HTTP servers generally don't support
typical filesystem functionality. This class exists to allow the :doc:`opener` system
to read files over HTTP.
to read files over HTTP.
If you do need filesystem like functionality over HTTP, see :mod:`fs.contrib.davfs`.
"""
_meta = {'read_only': True,
'network': True}
_meta = {'read_only':True,
'network':True,}
def __init__(self, url):
"""
:param url: The base URL
"""
self.root_url = url
def _make_url(self, path):
path = normpath(path)
url = '%s/%s' % (self.root_url.rstrip('/'), path.lstrip('/'))
return url
@iotools.filelike_to_stream
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
def open(self, path, mode="r"):
if '+' in mode or 'w' in mode or 'a' in mode:
raise UnsupportedError('write')
url = self._make_url(path)
try:
f = urlopen(url)
......@@ -54,15 +50,15 @@ class HTTPFS(FS):
raise ResourceNotFoundError(path, details=e)
except OSError, e:
raise ResourceNotFoundError(path, details=e)
return FileWrapper(f)
def exists(self, path):
return self.isfile(path)
def isdir(self, path):
return False
def isfile(self, path):
url = self._make_url(path)
f = None
......@@ -74,9 +70,9 @@ class HTTPFS(FS):
finally:
if f is not None:
f.close()
return True
def listdir(self, path="./",
wildcard=None,
full=False,
......
from __future__ import unicode_literals
from __future__ import print_function
from fs import SEEK_SET, SEEK_CUR, SEEK_END
import io
from functools import wraps
import six
class RawWrapper(object):
"""Convert a Python 2 style file-like object in to a IO object"""
def __init__(self, f, mode=None, name=None):
self._f = f
self.is_io = isinstance(f, io.IOBase)
if mode is None and hasattr(f, 'mode'):
mode = f.mode
self.mode = mode
self.name = name
self.closed = False
super(RawWrapper, self).__init__()
def __repr__(self):
return "<IO wrapper for {0}>".format(self._f)
def close(self):
self._f.close()
self.closed = True
def fileno(self):
return self._f.fileno()
def flush(self):
return self._f.flush()
def isatty(self):
return self._f.isatty()
def seek(self, offset, whence=SEEK_SET):
return self._f.seek(offset, whence)
def readable(self):
if hasattr(self._f, 'readable'):
return self._f.readable()
return 'r' in self.mode
def writable(self):
if hasattr(self._f, 'writeable'):
return self._fs.writeable()
return 'w' in self.mode
def seekable(self):
if hasattr(self._f, 'seekable'):
return self._f.seekable()
try:
self.seek(0, SEEK_CUR)
except IOError:
return False
else:
return True
def tell(self):
return self._f.tell()
def truncate(self, size=None):
return self._f.truncate(size)
def write(self, data):
if self.is_io:
return self._f.write(data)
self._f.write(data)
return len(data)
def read(self, n=-1):
if n == -1:
return self.readall()
return self._f.read(n)
def read1(self, n=-1):
if self.is_io:
return self._f.read1(n)
return self.read(n)
def readall(self):
return self._f.read()
def readinto(self, b):
if self.is_io:
return self._f.readinto(b)
data = self._f.read(len(b))
bytes_read = len(data)
b[:len(data)] = data
return bytes_read
def readline(self, limit=-1):
return self._f.readline(limit)
def readlines(self, hint=-1):
return self._f.readlines(hint)
def writelines(self, sequence):
return self._f.writelines(sequence)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
def __iter__(self):
return iter(self._f)
def filelike_to_stream(f):
@wraps(f)
def wrapper(self, path, mode='rt', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
file_like = f(self,
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,
**kwargs)
return make_stream(path,
file_like,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering)
return wrapper
def make_stream(name,
f,
mode='r',
buffering=-1,
encoding=None,
errors=None,
newline=None,
line_buffering=False,
**kwargs):
"""Take a Python 2.x binary file and returns an IO Stream"""
r, w, a, binary = 'r' in mode, 'w' in mode, 'a' in mode, 'b' in mode
if '+' in mode:
r, w = True, True
io_object = RawWrapper(f, mode=mode, name=name)
if buffering >= 0:
if r and w:
io_object = io.BufferedRandom(io_object, buffering or io.DEFAULT_BUFFER_SIZE)
elif r:
io_object = io.BufferedReader(io_object, buffering or io.DEFAULT_BUFFER_SIZE)
elif w:
io_object = io.BufferedWriter(io_object, buffering or io.DEFAULT_BUFFER_SIZE)
if not binary:
io_object = io.TextIOWrapper(io_object,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,)
return io_object
def decode_binary(data, encoding=None, errors=None, newline=None):
"""Decode bytes as though read from a text file"""
return io.TextIOWrapper(io.BytesIO(data), encoding=encoding, errors=errors, newline=newline).read()
def make_bytes_io(data, encoding=None, errors=None):
"""Make a bytes IO object from either a string or an open file"""
if hasattr(data, 'mode') and 'b' in data.mode:
# It's already a binary file
return data
if not isinstance(data, basestring):
# It's a file, but we don't know if its binary
# TODO: Is there a better way than reading the entire file?
data = data.read() or b''
if isinstance(data, six.text_type):
# If its text, encoding in to bytes
data = data.encode(encoding=encoding, errors=errors)
return io.BytesIO(data)
def copy_file_to_fs(f, fs, path, encoding=None, errors=None, progress_callback=None, chunk_size=64 * 1024):
"""Copy an open file to a path on an FS"""
if progress_callback is None:
progress_callback = lambda bytes_written: None
read = f.read
chunk = read(chunk_size)
if isinstance(chunk, six.text_type):
f = fs.open(path, 'wt', encoding=encoding, errors=errors)
else:
f = fs.open(path, 'wb')
write = f.write
bytes_written = 0
try:
while chunk:
write(chunk)
bytes_written += len(chunk)
progress_callback(bytes_written)
chunk = read(chunk_size)
finally:
f.close()
return bytes_written
if __name__ == "__main__":
print("Reading a binary file")
bin_file = open('tests/data/UTF-8-demo.txt', 'rb')
with make_stream('UTF-8-demo.txt', bin_file, 'rb') as f:
print(repr(f))
print(type(f.read(200)))
print("Reading a text file")
bin_file = open('tests/data/UTF-8-demo.txt', 'rb')
with make_stream('UTF-8-demo.txt', bin_file, 'rt') as f:
print(repr(f))
print(type(f.read(200)))
print("Reading a buffered binary file")
bin_file = open('tests/data/UTF-8-demo.txt', 'rb')
with make_stream('UTF-8-demo.txt', bin_file, 'rb', buffering=0) as f:
print(repr(f))
print(type(f.read(200)))
......@@ -34,8 +34,6 @@ if OSFSWatchMixin is None:
# Fall back to raising UnsupportedError
if OSFSWatchMixin is None:
class OSFSWatchMixin(object):
def __init__(self, *args, **kwargs):
super(OSFSWatchMixin, self).__init__(*args, **kwargs)
def add_watcher(self,*args,**kwds):
raise UnsupportedError
def del_watcher(self,watcher_or_callback):
......
......@@ -170,7 +170,7 @@ class OSFSWatchMixin(WatchableFSMixin):
if inevt.mask & pyinotify.IN_MODIFY:
watcher.handle_event(MODIFIED(self,path,True))
if inevt.mask & pyinotify.IN_CLOSE_WRITE:
watcher.handle_event(MODIFIED(self,path,True, closed=True))
watcher.handle_event(MODIFIED(self,path,True))
if inevt.mask & pyinotify.IN_MOVED_FROM:
# Sorry folks, I'm not up for decoding the destination path.
watcher.handle_event(MOVED_SRC(self,path,None))
......@@ -184,7 +184,7 @@ class OSFSWatchMixin(WatchableFSMixin):
watcher.handle_event(OVERFLOW(self))
if inevt.mask & pyinotify.IN_UNMOUNT:
watcher.handle_event(CLOSE(self))
def __get_watch_thread(self):
"""Get the shared watch thread, initializing if necessary.
......@@ -219,7 +219,7 @@ class SharedThreadedNotifier(threading.Thread):
self.watchers[fd] = watcher
self._poller.register(fd,select.POLLIN)
# Bump the poll object so it recognises the new fd.
os.write(self._pipe_w,b"H")
os.write(self._pipe_w,"H")
def del_watcher(self,watcher):
fd = watcher._pyinotify_WatchManager.get_fd()
......
......@@ -240,17 +240,17 @@ class WatchedDirectory(object):
self.callback(os.path.join(self.path,name),action)
def _extract_change_info(self,buffer):
"""Extract the information out of a FILE_NOTIFY_INFORMATION structure."""
pos = 0
while pos < len(buffer):
jump, action, namelen = struct.unpack("iii",buffer[pos:pos+12])
# TODO: this may return a shortname or a longname, with no way
# to tell which. Normalise them somehow?
name = buffer[pos+12:pos+12+namelen].decode("utf16")
yield (name,action)
if not jump:
break
pos += jump
"""Extract the information out of a FILE_NOTIFY_INFORMATION structure."""
pos = 0
while pos < len(buffer):
jump, action, namelen = struct.unpack("iii",buffer[pos:pos+12])
# TODO: this may return a shortname or a longname, with no way
# to tell which. Normalise them somehow?
name = buffer[pos+12:pos+12+namelen].decode("utf16")
yield (name,action)
if not jump:
break
pos += jump
class WatchThread(threading.Thread):
......@@ -290,24 +290,24 @@ class WatchThread(threading.Thread):
flags = 0
for evt in events:
if issubclass(ACCESSED,evt):
do_access = True
do_access = True
if issubclass(MODIFIED,evt):
do_change = True
flags |= FILE_NOTIFY_CHANGE_ATTRIBUTES
flags |= FILE_NOTIFY_CHANGE_CREATION
flags |= FILE_NOTIFY_CHANGE_SECURITY
do_change = True
flags |= FILE_NOTIFY_CHANGE_ATTRIBUTES
flags |= FILE_NOTIFY_CHANGE_CREATION
flags |= FILE_NOTIFY_CHANGE_SECURITY
if issubclass(CREATED,evt):
flags |= FILE_NOTIFY_CHANGE_FILE_NAME
flags |= FILE_NOTIFY_CHANGE_DIR_NAME
flags |= FILE_NOTIFY_CHANGE_FILE_NAME
flags |= FILE_NOTIFY_CHANGE_DIR_NAME
if issubclass(REMOVED,evt):
flags |= FILE_NOTIFY_CHANGE_FILE_NAME
flags |= FILE_NOTIFY_CHANGE_DIR_NAME
flags |= FILE_NOTIFY_CHANGE_FILE_NAME
flags |= FILE_NOTIFY_CHANGE_DIR_NAME
if issubclass(MOVED_SRC,evt):
flags |= FILE_NOTIFY_CHANGE_FILE_NAME
flags |= FILE_NOTIFY_CHANGE_DIR_NAME
flags |= FILE_NOTIFY_CHANGE_FILE_NAME
flags |= FILE_NOTIFY_CHANGE_DIR_NAME
if issubclass(MOVED_DST,evt):
flags |= FILE_NOTIFY_CHANGE_FILE_NAME
flags |= FILE_NOTIFY_CHANGE_DIR_NAME
flags |= FILE_NOTIFY_CHANGE_FILE_NAME
flags |= FILE_NOTIFY_CHANGE_DIR_NAME
if do_access:
# Separately capture FILE_NOTIFY_CHANGE_LAST_ACCESS events
# so we can reliably generate ACCESSED events.
......
......@@ -23,12 +23,9 @@ except ImportError:
if xattr is not None:
class OSFSXAttrMixin(object):
class OSFSXAttrMixin(FS):
"""Mixin providing extended-attribute support via the 'xattr' module"""
def __init__(self, *args, **kwargs):
super(OSFSXAttrMixin, self).__init__(*args, **kwargs)
@convert_os_errors
def setxattr(self, path, key, value):
xattr.xattr(self.getsyspath(path))[key]=value
......@@ -56,9 +53,6 @@ else:
class OSFSXAttrMixin(object):
"""Mixin disable extended-attribute support."""
def __init__(self, *args, **kwargs):
super(OSFSXAttrMixin, self).__init__(*args, **kwargs)
def getxattr(self,path,key,default=None):
raise UnsupportedError
......
# Work in Progress - Do not use
from __future__ import with_statement
from fs.base import FS
from fs.expose.serve import packetstream
from collections import defaultdict
import threading
from threading import Lock, RLock
from json import dumps
import Queue as queue
import socket
from six import b
class PacketHandler(threading.Thread):
def __init__(self, transport, prelude_callback=None):
super(PacketHandler, self).__init__()
self.transport = transport
self.encoder = packetstream.JSONFileEncoder(transport)
self.decoder = packetstream.JSONDecoder(prelude_callback=None)
self.queues = defaultdict(queue.Queue)
self._encoder_lock = threading.Lock()
self._queues_lock = threading.Lock()
self._call_id_lock = threading.Lock()
self.call_id = 0
def run(self):
decoder = self.decoder
read = self.transport.read
on_packet = self.on_packet
while True:
data = read(1024*16)
if not data:
print "No data"
break
print "data", repr(data)
for header, payload in decoder.feed(data):
print repr(header)
print repr(payload)
on_packet(header, payload)
def _new_call_id(self):
with self._call_id_lock:
self.call_id += 1
return self.call_id
def get_thread_queue(self, queue_id=None):
if queue_id is None:
queue_id = threading.current_thread().ident
with self._queues_lock:
return self.queues[queue_id]
def send_packet(self, header, payload=''):
call_id = self._new_call_id()
queue_id = threading.current_thread().ident
client_ref = "%i:%i" % (queue_id, call_id)
header['client_ref'] = client_ref
with self._encoder_lock:
self.encoder.write(header, payload)
return call_id
def get_packet(self, call_id):
if call_id is not None:
queue_id = threading.current_thread().ident
client_ref = "%i:%i" % (queue_id, call_id)
else:
client_ref = None
queue = self.get_thread_queue()
while True:
header, payload = queue.get()
print repr(header)
print repr(payload)
if client_ref is not None and header.get('client_ref') != client_ref:
continue
break
return header, payload
def on_packet(self, header, payload):
client_ref = header.get('client_ref', '')
queue_id, call_id = client_ref.split(':', 1)
queue_id = int(queue_id)
#queue_id = header.get('queue_id', '')
queue = self.get_thread_queue(queue_id)
queue.put((header, payload))
class _SocketFile(object):
def __init__(self, socket):
self.socket = socket
def read(self, size):
try:
return self.socket.recv(size)
except:
return b('')
def write(self, data):
self.socket.sendall(data)
def close(self):
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
class _RemoteFile(object):
def __init__(self, path, connection):
self.path = path
self.connection = connection
class RemoteFS(FS):
_meta = { 'thead_safe' : True,
'network' : True,
'virtual' : False,
'read_only' : False,
'unicode_paths' : True,
}
def __init__(self, addr='', port=3000, username=None, password=None, resource=None, transport=None):
self.addr = addr
self.port = port
self.username = None
self.password = None
self.resource = None
self.transport = transport
if self.transport is None:
self.transport = self._open_connection()
self.packet_handler = PacketHandler(self.transport)
self.packet_handler.start()
self._remote_call('auth',
username=username,
password=password,
resource=resource)
def _open_connection(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.addr, self.port))
socket_file = _SocketFile(sock)
socket_file.write(b('pyfs/0.1\n'))
return socket_file
def _make_call(self, method_name, *args, **kwargs):
call = dict(type='rpc',
method=method_name,
args=args,
kwargs=kwargs)
return call
def _remote_call(self, method_name, *args, **kwargs):
call = self._make_call(method_name, *args, **kwargs)
call_id = self.packet_handler.send_packet(call)
header, payload = self.packet_handler.get_packet(call_id)
return header, payload
def ping(self, msg):
call_id = self.packet_handler.send_packet({'type':'rpc', 'method':'ping'}, msg)
header, payload = self.packet_handler.get_packet(call_id)
print "PING"
print header
print payload
def close(self):
self.transport.close()
self.packet_handler.join()
def open(self, path, mode="r", **kwargs):
pass
def exists(self, path):
remote = self._remote_call('exists', path)
return remote.get('response')
if __name__ == "__main__":
rfs = RemoteFS()
rfs.close()
......@@ -2,8 +2,6 @@
fs.s3fs
=======
**Currently only avaiable on Python2 due to boto not being available for Python3**
FS subclass accessing files in Amazon S3
This module provides the class 'S3FS', which implements the FS filesystem
......@@ -26,9 +24,7 @@ from fs.path import *
from fs.errors import *
from fs.remote import *
from fs.filelike import LimitBytesFile
from fs import iotools
import six
# Boto is not thread-safe, so we need to use a per-thread S3 connection.
if hasattr(threading,"local"):
......@@ -59,18 +55,18 @@ class S3FS(FS):
or flushed.
"""
_meta = {'thread_safe': True,
'virtual': False,
'read_only': False,
'unicode_paths': True,
'case_insensitive_paths': False,
'network': True,
'atomic.move': True,
'atomic.copy': True,
'atomic.makedir': True,
'atomic.rename': False,
'atomic.setcontent': True
}
_meta = { 'thread_safe' : True,
'virtual': False,
'read_only' : False,
'unicode_paths' : True,
'case_insensitive_paths' : False,
'network' : True,
'atomic.move' : True,
'atomic.copy' : True,
'atomic.makedir' : True,
'atomic.rename' : False,
'atomic.setconetns' : True
}
class meta:
PATH_MAX = None
......@@ -108,6 +104,12 @@ class S3FS(FS):
prefix = prefix + separator
if isinstance(prefix,unicode):
prefix = prefix.encode("utf8")
if aws_access_key is None:
if "AWS_ACCESS_KEY_ID" not in os.environ:
raise CreateFailedError("AWS_ACCESS_KEY_ID not set")
if aws_secret_key is None:
if "AWS_SECRET_ACCESS_KEY" not in os.environ:
raise CreateFailedError("AWS_SECRET_ACCESS_KEY not set")
self._prefix = prefix
self._tlocal = thread_local()
super(S3FS, self).__init__(thread_synchronize=thread_synchronize)
......@@ -135,14 +137,7 @@ class S3FS(FS):
return b
except AttributeError:
try:
# Validate by listing the bucket if there is no prefix.
# If there is a prefix, validate by listing only the prefix
# itself, to avoid errors when an IAM policy has been applied.
if self._prefix:
b = self._s3conn.get_bucket(self._bucket_name, validate=0)
b.get_key(self._prefix)
else:
b = self._s3conn.get_bucket(self._bucket_name, validate=1)
b = self._s3conn.get_bucket(self._bucket_name, validate=True)
except S3ResponseError, e:
if "404 Not Found" not in str(e):
raise
......@@ -160,11 +155,11 @@ class S3FS(FS):
super(S3FS,self).__setstate__(state)
self._tlocal = thread_local()
def __repr__(self):
def __str__(self):
args = (self.__class__.__name__,self._bucket_name,self._prefix)
return '<%s: %s:%s>' % args
__str__ = __repr__
__repr__ = __str__
def _s3path(self,path):
"""Get the absolute path to a file stored in S3."""
......@@ -242,19 +237,19 @@ class S3FS(FS):
s3path = self._s3path(path)
k = self._s3bukt.get_key(s3path)
k.make_public()
def getpathurl(self, path, allow_none=False, expires=3600):
"""Returns a url that corresponds to the given path."""
s3path = self._s3path(path)
k = self._s3bukt.get_key(s3path)
# Is there AllUsers group with READ permissions?
is_public = True in [grant.permission == 'READ' and
is_public = True in [grant.permission == 'READ' and \
grant.uri == 'http://acs.amazonaws.com/groups/global/AllUsers'
for grant in k.get_acl().acl.grants]
for grant in k.get_acl().acl.grants ]
url = k.generate_url(expires, force_http=is_public)
if url == None:
if not allow_none:
raise NoPathURLError(path=path)
......@@ -263,17 +258,14 @@ class S3FS(FS):
if is_public:
# Strip time token; it has no sense for public resource
url = url.split('?')[0]
return url
def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=64*1024):
def setcontents(self, path, data, chunk_size=64*1024):
s3path = self._s3path(path)
if isinstance(data, six.text_type):
data = data.encode(encoding=encoding, errors=errors)
self._sync_set_contents(s3path, data)
@iotools.filelike_to_stream
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
def open(self,path,mode="r"):
"""Open the named file in the given mode.
This method downloads the file contents into a local temporary file
......@@ -502,8 +494,6 @@ class S3FS(FS):
def removedir(self,path,recursive=False,force=False):
"""Remove the directory at the given path."""
if normpath(path) in ('', '/'):
raise RemoveRootError(path)
s3path = self._s3path(path)
if s3path != self._prefix:
s3path = s3path + self._separator
......@@ -653,7 +643,7 @@ class S3FS(FS):
yield item
else:
prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix):
for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix))
if name != "":
if not isinstance(name,unicode):
......@@ -681,7 +671,7 @@ class S3FS(FS):
yield (item,self.getinfo(item))
else:
prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix):
for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix))
if name != "":
if not isinstance(name,unicode):
......@@ -708,7 +698,7 @@ class S3FS(FS):
yield (item,self.getinfo(item))
else:
prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix):
for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix))
if name != "":
if not isinstance(name,unicode):
......
......@@ -11,23 +11,29 @@ import os.path
import time
import tempfile
from fs.base import synchronize
from fs.osfs import OSFS
from fs.errors import *
from fs import _thread_synchronize_default
class TempFS(OSFS):
"""Create a Filesystem in a temporary directory (with tempfile.mkdtemp),
and removes it when the TempFS object is cleaned up."""
_meta = dict(OSFS._meta)
_meta['pickle_contents'] = False
_meta['network'] = False
_meta['atomic.move'] = True
_meta['atomic.copy'] = True
_meta = { 'thread_safe' : True,
'virtual' : False,
'read_only' : False,
'unicode_paths' : os.path.supports_unicode_filenames,
'case_insensitive_paths' : os.path.normcase('Aa') == 'aa',
'pickle_contents': False,
'network' : False,
'atomic.move' : True,
'atomic.copy' : True,
'atomic.makedir' : True,
'atomic.rename' : True,
'atomic.setcontents' : False
}
def __init__(self, identifier=None, temp_dir=None, dir_mode=0700, thread_synchronize=_thread_synchronize_default):
"""Creates a temporary Filesystem
......@@ -39,34 +45,33 @@ class TempFS(OSFS):
self.identifier = identifier
self.temp_dir = temp_dir
self.dir_mode = dir_mode
self._temp_dir = tempfile.mkdtemp(identifier or "TempFS", dir=temp_dir)
self._temp_dir = tempfile.mkdtemp(identifier or "TempFS",dir=temp_dir)
self._cleaned = False
super(TempFS, self).__init__(self._temp_dir, dir_mode=dir_mode, thread_synchronize=thread_synchronize)
def __repr__(self):
def __str__(self):
return '<TempFS: %s>' % self._temp_dir
__str__ = __repr__
__repr__ = __str__
def __unicode__(self):
return u'<TempFS: %s>' % self._temp_dir
def __getstate__(self):
# If we are picking a TempFS, we want to preserve its contents,
# so we *don't* do the clean
state = super(TempFS, self).__getstate__()
self._cleaned = True
return state
def __setstate__(self, state):
state = super(TempFS, self).__setstate__(state)
self._cleaned = False
#self._temp_dir = tempfile.mkdtemp(self.identifier or "TempFS", dir=self.temp_dir)
def __setstate__(self, state):
state = super(TempFS, self).__setstate__(state)
self._cleaned = False
#self._temp_dir = tempfile.mkdtemp(self.identifier or "TempFS", dir=self.temp_dir)
#super(TempFS, self).__init__(self._temp_dir,
# dir_mode=self.dir_mode,
# thread_synchronize=self.thread_synchronize)
# thread_synchronize=self.thread_synchronize)
@synchronize
def close(self):
"""Removes the temporary directory.
......@@ -75,13 +80,13 @@ class TempFS(OSFS):
Note that once this method has been called, the FS object may
no longer be used.
"""
super(TempFS, self).close()
super(TempFS,self).close()
# Depending on how resources are freed by the OS, there could
# be some transient errors when freeing a TempFS soon after it
# was used. If they occur, do a small sleep and try again.
try:
self._close()
except (ResourceLockedError, ResourceInvalidError):
except (ResourceLockedError,ResourceInvalidError):
time.sleep(0.5)
self._close()
......@@ -99,23 +104,20 @@ class TempFS(OSFS):
try:
# shutil.rmtree doesn't handle long paths on win32,
# so we walk the tree by hand.
entries = os.walk(self.root_path, topdown=False)
for (dir, dirnames, filenames) in entries:
entries = os.walk(self.root_path,topdown=False)
for (dir,dirnames,filenames) in entries:
for filename in filenames:
try:
os_remove(os.path.join(dir, filename))
os_remove(os.path.join(dir,filename))
except ResourceNotFoundError:
pass
for dirname in dirnames:
try:
os_rmdir(os.path.join(dir, dirname))
os_rmdir(os.path.join(dir,dirname))
except ResourceNotFoundError:
pass
try:
os.rmdir(self.root_path)
except OSError:
pass
os.rmdir(self.root_path)
self._cleaned = True
finally:
self._lock.release()
super(TempFS, self).close()
super(TempFS,self).close()
"""
fs.tests.test_archivefs: testcases for the ArchiveFS class
"""
import unittest
import os
import random
import zipfile
import tempfile
import shutil
import fs.tests
from fs.path import *
try:
from fs.contrib import archivefs
except ImportError:
libarchive_available = False
else:
libarchive_available = True
from six import PY3, b
class TestReadArchiveFS(unittest.TestCase):
__test__ = libarchive_available
def setUp(self):
self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip"
self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename)
self.zf = zipfile.ZipFile(self.temp_filename, "w")
zf = self.zf
zf.writestr("a.txt", b("Hello, World!"))
zf.writestr("b.txt", b("b"))
zf.writestr("1.txt", b("1"))
zf.writestr("foo/bar/baz.txt", b("baz"))
zf.writestr("foo/second.txt", b("hai"))
zf.close()
self.fs = archivefs.ArchiveFS(self.temp_filename, "r")
def tearDown(self):
self.fs.close()
os.remove(self.temp_filename)
def check(self, p):
try:
self.zipfile.getinfo(p)
return True
except:
return False
def test_reads(self):
def read_contents(path):
f = self.fs.open(path)
contents = f.read()
return contents
def check_contents(path, expected):
self.assert_(read_contents(path)==expected)
check_contents("a.txt", b("Hello, World!"))
check_contents("1.txt", b("1"))
check_contents("foo/bar/baz.txt", b("baz"))
def test_getcontents(self):
def read_contents(path):
return self.fs.getcontents(path)
def check_contents(path, expected):
self.assert_(read_contents(path)==expected)
check_contents("a.txt", b("Hello, World!"))
check_contents("1.txt", b("1"))
check_contents("foo/bar/baz.txt", b("baz"))
def test_is(self):
self.assert_(self.fs.isfile('a.txt'))
self.assert_(self.fs.isfile('1.txt'))
self.assert_(self.fs.isfile('foo/bar/baz.txt'))
self.assert_(self.fs.isdir('foo'))
self.assert_(self.fs.isdir('foo/bar'))
self.assert_(self.fs.exists('a.txt'))
self.assert_(self.fs.exists('1.txt'))
self.assert_(self.fs.exists('foo/bar/baz.txt'))
self.assert_(self.fs.exists('foo'))
self.assert_(self.fs.exists('foo/bar'))
def test_listdir(self):
def check_listing(path, expected):
dir_list = self.fs.listdir(path)
self.assert_(sorted(dir_list) == sorted(expected))
for item in dir_list:
self.assert_(isinstance(item,unicode))
check_listing('/', ['a.txt', '1.txt', 'foo', 'b.txt'])
check_listing('foo', ['second.txt', 'bar'])
check_listing('foo/bar', ['baz.txt'])
class TestWriteArchiveFS(unittest.TestCase):
__test__ = libarchive_available
def setUp(self):
self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip"
self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename)
archive_fs = archivefs.ArchiveFS(self.temp_filename, format='zip', mode='w')
def makefile(filename, contents):
if dirname(filename):
archive_fs.makedir(dirname(filename), recursive=True, allow_recreate=True)
f = archive_fs.open(filename, 'wb')
f.write(contents)
f.close()
makefile("a.txt", b("Hello, World!"))
makefile("b.txt", b("b"))
makefile(u"\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
makefile("foo/bar/baz.txt", b("baz"))
makefile("foo/second.txt", b("hai"))
archive_fs.close()
def tearDown(self):
os.remove(self.temp_filename)
def test_valid(self):
zf = zipfile.ZipFile(self.temp_filename, "r")
self.assert_(zf.testzip() is None)
zf.close()
def test_creation(self):
zf = zipfile.ZipFile(self.temp_filename, "r")
def check_contents(filename, contents):
if PY3:
zcontents = zf.read(filename)
else:
zcontents = zf.read(filename.encode(archivefs.ENCODING))
self.assertEqual(contents, zcontents)
check_contents("a.txt", b("Hello, World!"))
check_contents("b.txt", b("b"))
check_contents("foo/bar/baz.txt", b("baz"))
check_contents("foo/second.txt", b("hai"))
check_contents(u"\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
#~ class TestAppendArchiveFS(TestWriteArchiveFS):
#~ __test__ = libarchive_available
#~ def setUp(self):
#~ self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip"
#~ self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename)
#~ zip_fs = zipfs.ZipFS(self.temp_filename, 'w')
#~ def makefile(filename, contents):
#~ if dirname(filename):
#~ zip_fs.makedir(dirname(filename), recursive=True, allow_recreate=True)
#~ f = zip_fs.open(filename, 'wb')
#~ f.write(contents)
#~ f.close()
#~ makefile("a.txt", b("Hello, World!"))
#~ makefile("b.txt", b("b"))
#~ zip_fs.close()
#~ zip_fs = zipfs.ZipFS(self.temp_filename, 'a')
#~ makefile("foo/bar/baz.txt", b("baz"))
#~ makefile(u"\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
#~ makefile("foo/second.txt", b("hai"))
#~ zip_fs.close()
#~ class TestArchiveFSErrors(unittest.TestCase):
#~ __test__ = libarchive_available
#~ def setUp(self):
#~ self.workdir = tempfile.mkdtemp()
#~ def tearDown(self):
#~ shutil.rmtree(self.workdir)
#~ def test_bogus_zipfile(self):
#~ badzip = os.path.join(self.workdir,"bad.zip")
#~ f = open(badzip,"wb")
#~ f.write(b("I'm not really a zipfile"))
#~ f.close()
#~ self.assertRaises(zipfs.ZipOpenError,zipfs.ZipFS,badzip)
#~ def test_missing_zipfile(self):
#~ missingzip = os.path.join(self.workdir,"missing.zip")
#~ self.assertRaises(zipfs.ZipNotFoundError,zipfs.ZipFS,missingzip)
if __name__ == '__main__':
unittest.main()
# -*- encoding: utf-8 -*-
"""
fs.tests.test_errors: testcases for the fs error classes functions
......@@ -25,8 +24,3 @@ class TestErrorPickling(unittest.TestCase):
assert_dump_load(UnsupportedError("makepony"))
class TestFSError(unittest.TestCase):
def test_unicode_representation_of_error_with_non_ascii_characters(self):
path_error = PathError('/Shïrê/Frødø')
_ = unicode(path_error)
\ No newline at end of file
......@@ -6,8 +6,7 @@
import unittest
import sys
import os
import os.path
import os, os.path
import socket
import threading
import time
......@@ -21,34 +20,118 @@ from fs.errors import *
from fs import rpcfs
from fs.expose.xmlrpc import RPCFSServer
class TestRPCFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
def makeServer(self,fs,addr):
return RPCFSServer(fs,addr,logRequests=False)
def startServer(self):
port = 3000
self.temp_fs = TempFS()
self.server = None
self.serve_more_requests = True
self.server_thread = threading.Thread(target=self.runServer)
self.server_thread.setDaemon(True)
self.start_event = threading.Event()
self.end_event = threading.Event()
self.server_thread.start()
self.start_event.wait()
def runServer(self):
"""Run the server, swallowing shutdown-related execptions."""
port = 3000
while not self.server:
try:
self.server = self.makeServer(self.temp_fs,("127.0.0.1",port))
except socket.error, e:
if e.args[1] == "Address already in use":
port += 1
else:
raise
self.server_addr = ("127.0.0.1", port)
self.server.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# if sys.platform != "win32":
# try:
# self.server.socket.settimeout(1)
# except socket.error:
# pass
#
self.start_event.set()
try:
#self.server.serve_forever()
while self.serve_more_requests:
self.server.handle_request()
except Exception, e:
pass
self.end_event.set()
import six
from six import PY3, b
from fs.tests.test_rpcfs import TestRPCFS
try:
from fs import sftpfs
from fs.expose.sftp import BaseSFTPServer
except ImportError:
if not PY3:
raise
import logging
logging.getLogger('paramiko').setLevel(logging.ERROR)
logging.getLogger('paramiko.transport').setLevel(logging.ERROR)
def setUp(self):
self.startServer()
self.fs = rpcfs.RPCFS("http://%s:%d" % self.server_addr)
def tearDown(self):
self.serve_more_requests = False
#self.server.socket.close()
# self.server.socket.shutdown(socket.SHUT_RDWR)
# self.server.socket.close()
# self.temp_fs.close()
#self.server_thread.join()
#self.end_event.wait()
#return
try:
self.bump()
self.server.server_close()
except Exception:
pass
#self.server_thread.join()
self.temp_fs.close()
def bump(self):
host, port = self.server_addr
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, cn, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(.1)
sock.connect(sa)
sock.send("\n")
except socket.error, e:
pass
finally:
if sock is not None:
sock.close()
from fs import sftpfs
from fs.expose.sftp import BaseSFTPServer
class TestSFTPFS(TestRPCFS):
__test__ = not PY3
def makeServer(self,fs,addr):
return BaseSFTPServer(addr,fs)
def setUp(self):
self.startServer()
self.fs = sftpfs.SFTPFS(self.server_addr, no_auth=True)
#def runServer(self):
# self.server.serve_forever()
#
#def tearDown(self):
# self.server.shutdown()
# self.server_thread.join()
# self.temp_fs.close()
def bump(self):
# paramiko doesn't like being bumped, just wait for it to timeout.
......@@ -62,16 +145,16 @@ except ImportError:
pass
else:
from fs.osfs import OSFS
class TestFUSE(unittest.TestCase, FSTestCases, ThreadingTestCases):
class TestFUSE(unittest.TestCase,FSTestCases,ThreadingTestCases):
def setUp(self):
self.temp_fs = TempFS()
self.temp_fs = TempFS()
self.temp_fs.makedir("root")
self.temp_fs.makedir("mount")
self.mounted_fs = self.temp_fs.opendir("root")
self.mount_point = self.temp_fs.getsyspath("mount")
self.fs = OSFS(self.temp_fs.getsyspath("mount"))
self.mount_proc = fuse.mount(self.mounted_fs, self.mount_point)
self.mount_proc = fuse.mount(self.mounted_fs,self.mount_point)
def tearDown(self):
self.mount_proc.unmount()
......@@ -83,7 +166,7 @@ else:
fuse.unmount(self.mount_point)
self.temp_fs.close()
def check(self, p):
def check(self,p):
return self.mounted_fs.exists(p)
......@@ -126,10 +209,10 @@ if dokan.is_available:
def test_safety_wrapper(self):
rawfs = MemoryFS()
safefs = dokan.Win32SafetyFS(rawfs)
rawfs.setcontents("autoRun.inf", b("evilcodeevilcode"))
rawfs.setcontents("autoRun.inf","evilcodeevilcode")
self.assertTrue(safefs.exists("_autoRun.inf"))
self.assertTrue("autoRun.inf" not in safefs.listdir("/"))
safefs.setcontents("file:stream",b("test"))
safefs.setcontents("file:stream","test")
self.assertFalse(rawfs.exists("file:stream"))
self.assertTrue(rawfs.exists("file__colon__stream"))
self.assertTrue("file:stream" in safefs.listdir("/"))
......
......@@ -5,8 +5,6 @@
"""
from fs.tests import FSTestCases, ThreadingTestCases
from fs.path import *
from fs import errors
import unittest
......@@ -15,6 +13,8 @@ import sys
import shutil
import tempfile
from fs.path import *
from fs import osfs
class TestOSFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
......@@ -30,18 +30,10 @@ class TestOSFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
def check(self, p):
return os.path.exists(os.path.join(self.temp_dir, relpath(p)))
def test_invalid_chars(self):
super(TestOSFS, self).test_invalid_chars()
self.assertRaises(errors.InvalidCharsInPathError, self.fs.open, 'invalid\0file', 'wb')
self.assertFalse(self.fs.isvalidpath('invalid\0file'))
self.assert_(self.fs.isvalidpath('validfile'))
self.assert_(self.fs.isvalidpath('completely_valid/path/foo.bar'))
class TestSubFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
def setUp(self):
def setUp(self):
self.temp_dir = tempfile.mkdtemp(u"fstest")
self.parent_fs = osfs.OSFS(self.temp_dir)
self.parent_fs.makedir("foo/bar", recursive=True)
......@@ -77,7 +69,7 @@ class TestMountFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
self.fs.close()
def check(self, p):
return self.mount_fs.exists(pathjoin("mounted/memfs", relpath(p)))
return self.mount_fs.exists(os.path.join("mounted/memfs", relpath(p)))
class TestMountFS_atroot(unittest.TestCase,FSTestCases,ThreadingTestCases):
......@@ -101,12 +93,12 @@ class TestMountFS_stacked(unittest.TestCase,FSTestCases,ThreadingTestCases):
self.mount_fs.mountdir("mem", self.mem_fs1)
self.mount_fs.mountdir("mem/two", self.mem_fs2)
self.fs = self.mount_fs.opendir("/mem/two")
def tearDown(self):
self.fs.close()
def check(self, p):
return self.mount_fs.exists(pathjoin("mem/two", relpath(p)))
return self.mount_fs.exists(os.path.join("mem/two", relpath(p)))
from fs import tempfs
......@@ -123,11 +115,4 @@ class TestTempFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
def check(self, p):
td = self.fs._temp_dir
return os.path.exists(os.path.join(td, relpath(p)))
def test_invalid_chars(self):
super(TestTempFS, self).test_invalid_chars()
self.assertRaises(errors.InvalidCharsInPathError, self.fs.open, 'invalid\0file', 'wb')
self.assertFalse(self.fs.isvalidpath('invalid\0file'))
self.assert_(self.fs.isvalidpath('validfile'))
self.assert_(self.fs.isvalidpath('completely_valid/path/foo.bar'))
......@@ -12,16 +12,10 @@ import time
from os.path import abspath
import urllib
from six import PY3
try:
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
from pyftpdlib import ftpserver
except ImportError:
if not PY3:
raise ImportError("Requires pyftpdlib <http://code.google.com/p/pyftpdlib/>")
raise ImportError("Requires pyftpdlib <http://code.google.com/p/pyftpdlib/>")
from fs.path import *
......@@ -30,8 +24,6 @@ from fs import ftpfs
ftp_port = 30000
class TestFTPFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
__test__ = not PY3
def setUp(self):
global ftp_port
ftp_port += 1
......@@ -49,11 +41,11 @@ class TestFTPFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
file_path,
self.temp_dir,
use_port],
stdout=subprocess.PIPE,
stdout=subprocess.PIPE,
env=env)
# Block until the server writes a line to stdout
self.ftp_server.stdout.readline()
# Poll until a connection can be made
start_time = time.time()
while time.time() - start_time < 5:
......@@ -68,14 +60,14 @@ class TestFTPFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
else:
# Avoid a possible infinite loop
raise Exception("Unable to connect to ftp server")
self.fs = ftpfs.FTPFS('127.0.0.1', 'user', '12345', dircache=True, port=use_port, timeout=5.0)
self.fs.cache_hint(True)
def tearDown(self):
#self.ftp_server.terminate()
if sys.platform == 'win32':
#self.ftp_server.terminate()
if sys.platform == 'win32':
os.popen('TASKKILL /PID '+str(self.ftp_server.pid)+' /F')
else:
os.system('kill '+str(self.ftp_server.pid))
......@@ -91,21 +83,21 @@ if __name__ == "__main__":
# Run an ftp server that exposes a given directory
import sys
authorizer = DummyAuthorizer()
authorizer = ftpserver.DummyAuthorizer()
authorizer.add_user("user", "12345", sys.argv[1], perm="elradfmw")
authorizer.add_anonymous(sys.argv[1])
#def nolog(*args):
# pass
#ftpserver.log = nolog
#ftpserver.logline = nolog
def nolog(*args):
pass
ftpserver.log = nolog
ftpserver.logline = nolog
handler = FTPHandler
handler = ftpserver.FTPHandler
handler.authorizer = authorizer
address = ("127.0.0.1", int(sys.argv[2]))
#print address
ftpd = FTPServer(address, handler)
ftpd = ftpserver.FTPServer(address, handler)
sys.stdout.write('serving\n')
sys.stdout.flush()
......
......@@ -10,8 +10,6 @@ from fs.expose.importhook import FSImportHook
from fs.tempfs import TempFS
from fs.zipfs import ZipFS
from six import b
class TestFSImportHook(unittest.TestCase):
......@@ -34,23 +32,23 @@ class TestFSImportHook(unittest.TestCase):
sys.path_importer_cache.clear()
def _init_modules(self,fs):
fs.setcontents("fsih_hello.py",b(dedent("""
fs.setcontents("fsih_hello.py",dedent("""
message = 'hello world!'
""")))
"""))
fs.makedir("fsih_pkg")
fs.setcontents("fsih_pkg/__init__.py",b(dedent("""
fs.setcontents("fsih_pkg/__init__.py",dedent("""
a = 42
""")))
fs.setcontents("fsih_pkg/sub1.py",b(dedent("""
"""))
fs.setcontents("fsih_pkg/sub1.py",dedent("""
import fsih_pkg
from fsih_hello import message
a = fsih_pkg.a
""")))
fs.setcontents("fsih_pkg/sub2.pyc",self._getpyc(b(dedent("""
"""))
fs.setcontents("fsih_pkg/sub2.pyc",self._getpyc(dedent("""
import fsih_pkg
from fsih_hello import message
a = fsih_pkg.a * 2
"""))))
""")))
def _getpyc(self,src):
"""Get the .pyc contents to match th given .py source code."""
......@@ -141,3 +139,4 @@ class TestFSImportHook(unittest.TestCase):
sys.path_hooks.remove(FSImportHook)
sys.path.pop()
t.close()
from __future__ import unicode_literals
from fs import iotools
import io
import unittest
from os.path import dirname, join, abspath
try:
unicode
except NameError:
unicode = str
class OpenFilelike(object):
def __init__(self, make_f):
self.make_f = make_f
@iotools.filelike_to_stream
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
return self.make_f()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.f.close()
class TestIOTools(unittest.TestCase):
def get_bin_file(self):
path = join(dirname(abspath(__file__)), 'data/UTF-8-demo.txt')
return io.open(path, 'rb')
def test_make_stream(self):
"""Test make_stream"""
with self.get_bin_file() as f:
text = f.read()
self.assert_(isinstance(text, bytes))
with self.get_bin_file() as f:
with iotools.make_stream("data/UTF-8-demo.txt", f, 'rt') as f2:
text = f2.read()
self.assert_(isinstance(text, unicode))
def test_decorator(self):
"""Test filelike_to_stream decorator"""
o = OpenFilelike(self.get_bin_file)
with o.open('file', 'rb') as f:
text = f.read()
self.assert_(isinstance(text, bytes))
with o.open('file', 'rt') as f:
text = f.read()
self.assert_(isinstance(text, unicode))
from fs.mountfs import MountFS
from fs.memoryfs import MemoryFS
import unittest
class TestMountFS(unittest.TestCase):
def test_auto_close(self):
"""Test MountFS auto close is working"""
multi_fs = MountFS()
m1 = MemoryFS()
m2 = MemoryFS()
multi_fs.mount('/m1', m1)
multi_fs.mount('/m2', m2)
self.assert_(not m1.closed)
self.assert_(not m2.closed)
multi_fs.close()
self.assert_(m1.closed)
self.assert_(m2.closed)
def test_no_auto_close(self):
"""Test MountFS auto close can be disabled"""
multi_fs = MountFS(auto_close=False)
m1 = MemoryFS()
m2 = MemoryFS()
multi_fs.mount('/m1', m1)
multi_fs.mount('/m2', m2)
self.assert_(not m1.closed)
self.assert_(not m2.closed)
multi_fs.close()
self.assert_(not m1.closed)
self.assert_(not m2.closed)
def test_mountfile(self):
"""Test mounting a file"""
quote = b"""If you wish to make an apple pie from scratch, you must first invent the universe."""
mem_fs = MemoryFS()
mem_fs.makedir('foo')
mem_fs.setcontents('foo/bar.txt', quote)
foo_dir = mem_fs.opendir('foo')
mount_fs = MountFS()
mount_fs.mountfile('bar.txt', foo_dir.open, foo_dir.getinfo)
self.assert_(mount_fs.isdir('/'))
self.assert_(mount_fs.isdir('./'))
self.assert_(mount_fs.isdir(''))
# Check we can see the mounted file in the dir list
self.assertEqual(mount_fs.listdir(), ["bar.txt"])
self.assert_(not mount_fs.exists('nobodyhere.txt'))
self.assert_(mount_fs.exists('bar.txt'))
self.assert_(mount_fs.isfile('bar.txt'))
self.assert_(not mount_fs.isdir('bar.txt'))
# Check open and getinfo callables
self.assertEqual(mount_fs.getcontents('bar.txt'), quote)
self.assertEqual(mount_fs.getsize('bar.txt'), len(quote))
# Check changes are written back
mem_fs.setcontents('foo/bar.txt', 'baz')
self.assertEqual(mount_fs.getcontents('bar.txt'), b'baz')
self.assertEqual(mount_fs.getsize('bar.txt'), len('baz'))
# Check changes are written to the original fs
self.assertEqual(mem_fs.getcontents('foo/bar.txt'), b'baz')
self.assertEqual(mem_fs.getsize('foo/bar.txt'), len('baz'))
# Check unmount
self.assert_(mount_fs.unmount("bar.txt"))
self.assertEqual(mount_fs.listdir(), [])
self.assert_(not mount_fs.exists('bar.txt'))
# Check unount a second time is a null op, and returns False
self.assertFalse(mount_fs.unmount("bar.txt"))
def test_empty(self):
"""Test MountFS with nothing mounted."""
mount_fs = MountFS()
self.assertEqual(mount_fs.getinfo(''), {})
self.assertEqual(mount_fs.getxattr('', 'yo'), None)
self.assertEqual(mount_fs.listdir(), [])
self.assertEqual(list(mount_fs.ilistdir()), [])
from fs.multifs import MultiFS
from fs.memoryfs import MemoryFS
import unittest
from six import b
class TestMultiFS(unittest.TestCase):
def test_auto_close(self):
"""Test MultiFS auto close is working"""
multi_fs = MultiFS()
m1 = MemoryFS()
m2 = MemoryFS()
multi_fs.addfs('m1', m1)
multi_fs.addfs('m2', m2)
self.assert_(not m1.closed)
self.assert_(not m2.closed)
multi_fs.close()
self.assert_(m1.closed)
self.assert_(m2.closed)
def test_no_auto_close(self):
"""Test MultiFS auto close can be disables"""
multi_fs = MultiFS(auto_close=False)
m1 = MemoryFS()
m2 = MemoryFS()
multi_fs.addfs('m1', m1)
multi_fs.addfs('m2', m2)
self.assert_(not m1.closed)
self.assert_(not m2.closed)
multi_fs.close()
self.assert_(not m1.closed)
self.assert_(not m2.closed)
def test_priority(self):
"""Test priority order is working"""
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
m1.setcontents("name", b("m1"))
m2.setcontents("name", b("m2"))
m3.setcontents("name", b("m3"))
multi_fs = MultiFS(auto_close=False)
multi_fs.addfs("m1", m1)
multi_fs.addfs("m2", m2)
multi_fs.addfs("m3", m3)
self.assert_(multi_fs.getcontents("name") == b("m3"))
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
m1.setcontents("name", b("m1"))
m2.setcontents("name", b("m2"))
m3.setcontents("name", b("m3"))
multi_fs = MultiFS(auto_close=False)
multi_fs.addfs("m1", m1)
multi_fs.addfs("m2", m2, priority=10)
multi_fs.addfs("m3", m3)
self.assert_(multi_fs.getcontents("name") == b("m2"))
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
m1.setcontents("name", b("m1"))
m2.setcontents("name", b("m2"))
m3.setcontents("name", b("m3"))
multi_fs = MultiFS(auto_close=False)
multi_fs.addfs("m1", m1)
multi_fs.addfs("m2", m2, priority=10)
multi_fs.addfs("m3", m3, priority=10)
self.assert_(multi_fs.getcontents("name") == b("m3"))
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
m1.setcontents("name", b("m1"))
m2.setcontents("name", b("m2"))
m3.setcontents("name", b("m3"))
multi_fs = MultiFS(auto_close=False)
multi_fs.addfs("m1", m1, priority=11)
multi_fs.addfs("m2", m2, priority=10)
multi_fs.addfs("m3", m3, priority=10)
self.assert_(multi_fs.getcontents("name") == b("m1"))
"""
fs.tests.test_opener: testcases for FS opener
"""
import unittest
import tempfile
import shutil
from fs.opener import opener
from fs import path
class TestOpener(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp(u"fstest_opener")
def tearDown(self):
shutil.rmtree(self.temp_dir)
def testOpen(self):
filename = path.join(self.temp_dir, 'foo.txt')
file_object = opener.open(filename, 'wb')
file_object.close()
self.assertTrue(file_object.closed)
......@@ -14,16 +14,15 @@ class TestPathFunctions(unittest.TestCase):
"""Testcases for FS path functions."""
def test_normpath(self):
tests = [ ("\\a\\b\\c", "\\a\\b\\c"),
tests = [ ("\\a\\b\\c", "/a/b/c"),
(".", ""),
("./", ""),
("", ""),
("/.", "/"),
("/a/b/c", "/a/b/c"),
("a/b/c", "a/b/c"),
("a/b/../c/", "a/c"),
("/","/"),
(u"a/\N{GREEK SMALL LETTER BETA}/c",u"a/\N{GREEK SMALL LETTER BETA}/c"),
(u"a/\N{GREEK SMALL LETTER BETA}\\c",u"a/\N{GREEK SMALL LETTER BETA}/c"),
]
for path, result in tests:
self.assertEqual(normpath(path), result)
......@@ -39,7 +38,7 @@ class TestPathFunctions(unittest.TestCase):
("a/b/c", "../d", "c", "a/b/d/c"),
("a/b/c", "../d", "/a", "/a"),
("aaa", "bbb/ccc", "aaa/bbb/ccc"),
("aaa", "bbb\\ccc", "aaa/bbb\\ccc"),
("aaa", "bbb\ccc", "aaa/bbb/ccc"),
("aaa", "bbb", "ccc", "/aaa", "eee", "/aaa/eee"),
("a/b", "./d", "e", "a/b/d/e"),
("/", "/", "/"),
......@@ -51,9 +50,7 @@ class TestPathFunctions(unittest.TestCase):
result = testpaths[-1]
self.assertEqual(pathjoin(*paths), result)
self.assertRaises(ValueError, pathjoin, "..")
self.assertRaises(ValueError, pathjoin, "../")
self.assertRaises(ValueError, pathjoin, "/..")
self.assertRaises(ValueError, pathjoin, "./../")
self.assertRaises(ValueError, pathjoin, "a/b", "../../..")
self.assertRaises(ValueError, pathjoin, "a/b/../../../d")
......@@ -107,7 +104,7 @@ class TestPathFunctions(unittest.TestCase):
self.assertEquals(recursepath("/hello/world/",reverse=True),["/hello/world","/hello","/"])
self.assertEquals(recursepath("hello",reverse=True),["/hello","/"])
self.assertEquals(recursepath("",reverse=True),["/"])
def test_isdotfile(self):
for path in ['.foo',
'.svn',
......@@ -115,14 +112,14 @@ class TestPathFunctions(unittest.TestCase):
'foo/bar/.svn',
'/foo/.bar']:
self.assert_(isdotfile(path))
for path in ['asfoo',
'df.svn',
'foo/er.svn',
'foo/bar/test.txt',
'/foo/bar']:
self.assertFalse(isdotfile(path))
def test_dirname(self):
tests = [('foo', ''),
('foo/bar', 'foo'),
......@@ -132,7 +129,7 @@ class TestPathFunctions(unittest.TestCase):
('/', '/')]
for path, test_dirname in tests:
self.assertEqual(dirname(path), test_dirname)
def test_basename(self):
tests = [('foo', 'foo'),
('foo/bar', 'bar'),
......@@ -141,30 +138,6 @@ class TestPathFunctions(unittest.TestCase):
for path, test_basename in tests:
self.assertEqual(basename(path), test_basename)
def test_iswildcard(self):
self.assert_(iswildcard('*'))
self.assert_(iswildcard('*.jpg'))
self.assert_(iswildcard('foo/*'))
self.assert_(iswildcard('foo/{}'))
self.assertFalse(iswildcard('foo'))
self.assertFalse(iswildcard('img.jpg'))
self.assertFalse(iswildcard('foo/bar'))
def test_realtivefrom(self):
tests = [('/', '/foo.html', 'foo.html'),
('/foo', '/foo/bar.html', 'bar.html'),
('/foo/bar/', '/egg.html', '../../egg.html'),
('/a/b/c/d', 'e', '../../../../e'),
('/a/b/c/d', 'a/d', '../../../d'),
('/docs/', 'tags/index.html', '../tags/index.html'),
('foo/bar', 'baz/index.html', '../../baz/index.html'),
('', 'a', 'a'),
('a', 'b/c', '../b/c')
]
for base, path, result in tests:
self.assertEqual(relativefrom(base, path), result)
class Test_PathMap(unittest.TestCase):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment