Commit 73cac8d1 by Piotr Mitros

Revert to version 0.4.0

parent c30091b0
Metadata-Version: 1.0
Name: fs
Version: 0.4.0
Summary: Filesystem abstraction
Home-page: http://code.google.com/p/pyfilesystem/
Author: Will McGugan
Author-email: will@willmcgugan.com
License: BSD
Download-URL: http://code.google.com/p/pyfilesystem/downloads/list
Description: Pyfilesystem is a module that provides a simplified common interface to many types of filesystem. Filesystems exposed via Pyfilesystem can also be served over the network, or 'mounted' on the native filesystem.
Even if you only need to work with file and directories on the local hard-drive, Pyfilesystem can simplify your code and make it more robust -- with the added advantage that you can change where the files are located by changing a single line of code.
Platform: any
Classifier: Development Status :: 3 - Alpha
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Topic :: System :: Filesystems
...@@ -15,12 +15,15 @@ implementations of this interface such as: ...@@ -15,12 +15,15 @@ implementations of this interface such as:
""" """
__version__ = "0.5.0" __version__ = "0.4.0"
__author__ = "Will McGugan (will@willmcgugan.com)" __author__ = "Will McGugan (will@willmcgugan.com)"
# No longer necessary - WM
#from base import *
# provide these by default so people can use 'fs.path.basename' etc. # provide these by default so people can use 'fs.path.basename' etc.
from fs import errors import errors
from fs import path import path
_thread_synchronize_default = True _thread_synchronize_default = True
def set_thread_synchronize_default(sync): def set_thread_synchronize_default(sync):
......
...@@ -21,7 +21,6 @@ __all__ = ['UserDataFS', ...@@ -21,7 +21,6 @@ __all__ = ['UserDataFS',
'UserCacheFS', 'UserCacheFS',
'UserLogFS'] 'UserLogFS']
class UserDataFS(OSFS): class UserDataFS(OSFS):
"""A filesystem for per-user application data.""" """A filesystem for per-user application data."""
def __init__(self, appname, appauthor=None, version=None, roaming=False, create=True): def __init__(self, appname, appauthor=None, version=None, roaming=False, create=True):
...@@ -34,7 +33,7 @@ class UserDataFS(OSFS): ...@@ -34,7 +33,7 @@ class UserDataFS(OSFS):
""" """
app_dirs = AppDirs(appname, appauthor, version, roaming) app_dirs = AppDirs(appname, appauthor, version, roaming)
super(UserDataFS, self).__init__(app_dirs.user_data_dir, create=create) super(self.__class__, self).__init__(app_dirs.user_data_dir, create=create)
class SiteDataFS(OSFS): class SiteDataFS(OSFS):
...@@ -49,7 +48,7 @@ class SiteDataFS(OSFS): ...@@ -49,7 +48,7 @@ class SiteDataFS(OSFS):
""" """
app_dirs = AppDirs(appname, appauthor, version, roaming) app_dirs = AppDirs(appname, appauthor, version, roaming)
super(SiteDataFS, self).__init__(app_dirs.site_data_dir, create=create) super(self.__class__, self).__init__(app_dirs.site_data_dir, create=create)
class UserCacheFS(OSFS): class UserCacheFS(OSFS):
...@@ -64,7 +63,7 @@ class UserCacheFS(OSFS): ...@@ -64,7 +63,7 @@ class UserCacheFS(OSFS):
""" """
app_dirs = AppDirs(appname, appauthor, version, roaming) app_dirs = AppDirs(appname, appauthor, version, roaming)
super(UserCacheFS, self).__init__(app_dirs.user_cache_dir, create=create) super(self.__class__, self).__init__(app_dirs.user_cache_dir, create=create)
class UserLogFS(OSFS): class UserLogFS(OSFS):
...@@ -79,11 +78,10 @@ class UserLogFS(OSFS): ...@@ -79,11 +78,10 @@ class UserLogFS(OSFS):
""" """
app_dirs = AppDirs(appname, appauthor, version, roaming) app_dirs = AppDirs(appname, appauthor, version, roaming)
super(UserLogFS, self).__init__(app_dirs.user_log_dir, create=create) super(self.__class__, self).__init__(app_dirs.user_log_dir, create=create)
if __name__ == "__main__": if __name__ == "__main__":
udfs = UserDataFS('exampleapp', appauthor='pyfs') udfs = UserDataFS('sexytime', appauthor='pyfs')
print udfs print udfs
udfs2 = UserDataFS('exampleapp2', appauthor='pyfs', create=False) udfs2 = UserDataFS('sexytime2', appauthor='pyfs', create=False)
print udfs2 print udfs2
import fnmatch
from itertools import chain
import re
class BatchError(Exception):
pass
def _params(*args, **kwargs):
return (args, kwargs)
class BatchBase(object):
def __init__(self):
self._stack = []
self._eval_cache = None
self._eval_level = 0
def _eval(self, paths):
operations = []
for cmd in self._stack[::-1]:
cmd_name, (args, kwargs) = cmd
cmd_func = getattr(self, '_cmd_' + cmd_name, None)
assert cmd_func is not None, "Unknown batch command"
operations.append(lambda paths:cmd_func(paths, *args, **kwargs))
def recurse_operations(op_index=0):
if op_index >= len(operations):
for fs, path in paths:
yield fs, path
else:
for fs, path in operations[op_index](recurse_operations(op_index+1), ):
yield fs, path
for fs, path in recurse_operations():
yield fs, path
def filter(self, *wildcards):
cmd = ('filter', _params(wildcards))
self._stack.append(cmd)
return self
def exclude(self, *wildcards):
cmd = ('exclude', _params(wildcards))
self._stack.append(cmd)
return self
def _cmd_filter(self, fs_paths, wildcards):
wildcard_res = [re.compile(fnmatch.translate(w)) for w in wildcards]
for fs, path in fs_paths:
for wildcard_re in wildcard_res:
if wildcard_re.match(path):
yield fs, path
def _cmd_exclude(self, fs_paths, wildcards):
wildcard_res = [re.compile(fnmatch.translate(w)) for w in wildcards]
for fs, path in fs_paths:
for wildcard_re in wildcard_res:
if wildcard_re.match(path):
break
else:
yield fs, path
class Batch(BatchBase):
def __init__(self, *fs, **kwargs):
super(Batch, self).__init__()
self.fs_list = fs
self.recursive = kwargs.get('recursive', False)
def path_iter(self, fs_list):
if self.recursive:
for fs in fs_list:
for path in fs.walkfiles():
yield fs, path
else:
for fs in fs_list:
for path in fs.listdir(full=True, absolute=True):
yield fs, path
def __iter__(self):
return self._eval(self.path_iter(self.fs_list))
def paths(self):
for fs, path in self:
yield path
class BatchList(BatchBase):
def __init__(self, fs, paths):
self.fs_list = [(fs, path) for path in paths]
def __iter__(self):
return self.fs_list
class BatchOp(Batch):
def __init__(self):
super(BatchBase, self).__init__(None)
self._op_stack = []
def remove(self):
cmd = ('remove', _params())
self._op_stack.append(cmd)
return self
def _op_remove(self, fs, path):
fs.remove(path)
def apply(self, fs=None, ignore_errors=False):
def do_call(func, *args, **kwargs):
return func(*args, **kwargs)
def ignore_exceptions(func, *arg, **kwargs):
try:
return func(*args, **kwargs)
except:
return None
if ignore_errors:
call_cmd = ignore_exceptions
else:
call_cmd = do_call
for fs, path in self.path_iter():
for cmd in self._op_stack:
cmd_name, (args, kwargs) = cmd
cmd_func = getattr(self, '_op_' + cmd_name)
call_cmd(cmd_func, fs, path, *args, **kwargs)
if __name__ == "__main__":
from fs.osfs import OSFS
test_fs = OSFS("/home/will/projects/meshminds/meshminds")
b = Batch(test_fs, recursive=True).exclude("*.py", "*.html")
print list(b.paths())
#b=BatchBase()
#b.filter('*.py')
#print b._eval([[None, 'a/b/c.py'],
# [None, 'a/b/c.pyw']])
\ No newline at end of file
...@@ -84,7 +84,7 @@ List contents of [PATH]""" ...@@ -84,7 +84,7 @@ List contents of [PATH]"""
file_paths = filter(None, [fs.getpathurl(path, allow_none=True) for path in file_paths]) file_paths = filter(None, [fs.getpathurl(path, allow_none=True) for path in file_paths])
dirs = frozenset(dir_paths) dirs = frozenset(dir_paths)
paths = sorted(dir_paths + file_paths, key=lambda p: p.lower()) paths = sorted(dir_paths + file_paths, key=lambda p:p.lower())
if not options.all: if not options.all:
paths = [path for path in paths if not isdotfile(path)] paths = [path for path in paths if not isdotfile(path)]
...@@ -109,7 +109,6 @@ List contents of [PATH]""" ...@@ -109,7 +109,6 @@ List contents of [PATH]"""
wrap_filename = self.wrap_filename wrap_filename = self.wrap_filename
wrap_dirname = self.wrap_dirname wrap_dirname = self.wrap_dirname
def wrap(path): def wrap(path):
if path in dirs: if path in dirs:
return wrap_dirname(path.ljust(max_width)) return wrap_dirname(path.ljust(max_width))
...@@ -126,6 +125,7 @@ List contents of [PATH]""" ...@@ -126,6 +125,7 @@ List contents of [PATH]"""
return padded_columns return padded_columns
def condense_columns(columns): def condense_columns(columns):
max_column_height = max([len(col) for col in columns]) max_column_height = max([len(col) for col in columns])
lines = [[] for _ in xrange(max_column_height)] lines = [[] for _ in xrange(max_column_height)]
...@@ -134,6 +134,7 @@ List contents of [PATH]""" ...@@ -134,6 +134,7 @@ List contents of [PATH]"""
line.append(path) line.append(path)
return '\n'.join(u' '.join(line) for line in lines) return '\n'.join(u' '.join(line) for line in lines)
if options.long: if options.long:
for path in paths: for path in paths:
if path in dirs: if path in dirs:
...@@ -147,18 +148,18 @@ List contents of [PATH]""" ...@@ -147,18 +148,18 @@ List contents of [PATH]"""
smallest_paths = min(path_widths) smallest_paths = min(path_widths)
num_paths = len(paths) num_paths = len(paths)
num_cols = min(terminal_width // (smallest_paths + 2), num_paths) num_cols = min(terminal_width / (smallest_paths + 2), num_paths)
while num_cols: while num_cols:
col_height = (num_paths + num_cols - 1) // num_cols col_height = (num_paths + num_cols - 1) / num_cols
line_width = 0 line_width = 0
for col_no in xrange(num_cols): for col_no in xrange(num_cols):
try: try:
col_width = max(path_widths[col_no * col_height: (col_no + 1) * col_height]) col_width = max(path_widths[col_no*col_height:(col_no + 1) * col_height])
except ValueError: except ValueError:
continue continue
line_width += col_width line_width += col_width
if line_width > terminal_width: if line_width > terminal_width:
break break;
line_width += 2 line_width += 2
else: else:
if line_width - 1 <= terminal_width: if line_width - 1 <= terminal_width:
...@@ -173,3 +174,4 @@ def run(): ...@@ -173,3 +174,4 @@ def run():
if __name__ == "__main__": if __name__ == "__main__":
sys.exit(run()) sys.exit(run())
\ No newline at end of file
...@@ -8,7 +8,6 @@ import os.path ...@@ -8,7 +8,6 @@ import os.path
platform = platform.system() platform = platform.system()
class FSMount(Command): class FSMount(Command):
if platform == "Windows": if platform == "Windows":
...@@ -33,6 +32,7 @@ Mounts a file system on a system path""" ...@@ -33,6 +32,7 @@ Mounts a file system on a system path"""
return optparse return optparse
def do_run(self, options, args): def do_run(self, options, args):
windows = platform == "Windows" windows = platform == "Windows"
...@@ -84,7 +84,7 @@ Mounts a file system on a system path""" ...@@ -84,7 +84,7 @@ Mounts a file system on a system path"""
fs, path = self.open_fs(fs_url, create_dir=True) fs, path = self.open_fs(fs_url, create_dir=True)
if path: if path:
if not fs.isdir(path): if not fs.isdir(path):
self.error('%s is not a directory on %s' % (fs_url, fs)) self.error('%s is not a directory on %s' % (fs_url. fs))
return 1 return 1
fs = fs.opendir(path) fs = fs.opendir(path)
path = '/' path = '/'
...@@ -130,10 +130,11 @@ Mounts a file system on a system path""" ...@@ -130,10 +130,11 @@ Mounts a file system on a system path"""
mount_path, mount_path,
foreground=True) foreground=True)
else: else:
fs.close = lambda: None fs.close = lambda:None
def run(): def run():
return FSMount().run() return FSMount().run()
if __name__ == "__main__": if __name__ == "__main__":
sys.exit(run()) sys.exit(run())
\ No newline at end of file
...@@ -5,7 +5,6 @@ import sys ...@@ -5,7 +5,6 @@ import sys
from fs.opener import opener from fs.opener import opener
from fs.commands.runner import Command from fs.commands.runner import Command
from fs.utils import print_fs from fs.utils import print_fs
import errno
class FSServe(Command): class FSServe(Command):
...@@ -57,13 +56,6 @@ Serves the contents of PATH with one of a number of methods""" ...@@ -57,13 +56,6 @@ Serves the contents of PATH with one of a number of methods"""
self.output("Starting rpc server on %s:%i\n" % (options.addr, port), verbose=True) self.output("Starting rpc server on %s:%i\n" % (options.addr, port), verbose=True)
s.serve_forever() s.serve_forever()
elif options.type == 'ftp':
from fs.expose.ftp import serve_fs
if port is None:
port = 21
self.output("Starting ftp server on %s:%i\n" % (options.addr, port), verbose=True)
serve_fs(fs, options.addr, port)
elif options.type == 'sftp': elif options.type == 'sftp':
from fs.expose.sftp import BaseSFTPServer from fs.expose.sftp import BaseSFTPServer
import logging import logging
...@@ -91,11 +83,11 @@ Serves the contents of PATH with one of a number of methods""" ...@@ -91,11 +83,11 @@ Serves the contents of PATH with one of a number of methods"""
self.error("Server type '%s' not recognised\n" % options.type) self.error("Server type '%s' not recognised\n" % options.type)
except IOError, e: except IOError, e:
if e.errno == errno.EACCES: if e.errno == 13:
self.error('Permission denied\n') self.error('Permission denied\n')
return 1 return 1
else: else:
self.error(str(e) + '\n') self.error(e.strerror + '\n')
return 1 return 1
def run(): def run():
......
...@@ -19,12 +19,8 @@ Recursively display the contents of PATH in an ascii tree""" ...@@ -19,12 +19,8 @@ Recursively display the contents of PATH in an ascii tree"""
help="browse the tree with a gui") help="browse the tree with a gui")
optparse.add_option('-a', '--all', dest='all', action='store_true', default=False, optparse.add_option('-a', '--all', dest='all', action='store_true', default=False,
help="do not hide dot files") help="do not hide dot files")
optparse.add_option('--dirsfirst', dest='dirsfirst', action='store_true', default=False, optparse.add_option('-d', '--dirsfirst', dest='dirsfirst', action='store_true', default=False,
help="List directories before files") help="List directories before files")
optparse.add_option('-P', dest="pattern", default=None,
help="Only list files that match the given pattern")
optparse.add_option('-d', dest="dirsonly", default=False, action='store_true',
help="List directories only")
return optparse return optparse
def do_run(self, options, args): def do_run(self, options, args):
...@@ -47,24 +43,12 @@ Recursively display the contents of PATH in an ascii tree""" ...@@ -47,24 +43,12 @@ Recursively display the contents of PATH in an ascii tree"""
max_levels = None max_levels = None
else: else:
max_levels = options.depth max_levels = options.depth
self.output(self.wrap_dirname(args[0] + '\n')) print_fs(fs, path or '',
dircount, filecount = print_fs(fs, path or '',
file_out=self.output_file, file_out=self.output_file,
max_levels=max_levels, max_levels=max_levels,
terminal_colors=self.terminal_colors, terminal_colors=self.terminal_colors,
hide_dotfiles=not options.all, hide_dotfiles=not options.all,
dirs_first=options.dirsfirst, dirs_first=options.dirsfirst)
files_wildcard=options.pattern,
dirs_only=options.dirsonly)
self.output('\n')
def pluralize(one, many, count):
if count == 1:
return '%i %s' % (count, one)
else:
return '%i %s' % (count, many)
self.output("%s, %s\n" % (pluralize('directory', 'directories', dircount),
pluralize('file', 'files', filecount)))
def run(): def run():
return FSTree().run() return FSTree().run()
......
import warnings import warnings
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
import sys
from optparse import OptionParser
from fs.opener import opener, OpenerError, Opener from fs.opener import opener, OpenerError, Opener
from fs.errors import FSError from fs.errors import FSError
from fs.path import splitext, pathsplit, isdotfile, iswildcard from fs.path import splitext, pathsplit, isdotfile, iswildcard
import re
import sys
import platform import platform
import six
from optparse import OptionParser
from collections import defaultdict from collections import defaultdict
import re
if platform.system() == 'Windows': if platform.system() == 'Windows':
def getTerminalSize(): def getTerminalSize():
try: try:
## {{{ http://code.activestate.com/recipes/440694/ (r3) ## {{{ http://code.activestate.com/recipes/440694/ (r3)
...@@ -40,6 +38,7 @@ if platform.system() == 'Windows': ...@@ -40,6 +38,7 @@ if platform.system() == 'Windows':
return 80, 25 return 80, 25
else: else:
def getTerminalSize(): def getTerminalSize():
def ioctl_GWINSZ(fd): def ioctl_GWINSZ(fd):
try: try:
...@@ -66,30 +65,24 @@ else: ...@@ -66,30 +65,24 @@ else:
pass pass
return 80, 25 return 80, 25
def _unicode(text): def _unicode(text):
if not isinstance(text, unicode): if not isinstance(text, unicode):
return text.decode('ascii', 'replace') return text.decode('ascii', 'replace')
return text return text
class Command(object): class Command(object):
usage = '' usage = ''
version = '' version = ''
def __init__(self, usage='', version=''): def __init__(self, usage='', version=''):
if six.PY3:
self.output_file = sys.stdout.buffer
self.error_file = sys.stderr.buffer
else:
self.output_file = sys.stdout self.output_file = sys.stdout
self.error_file = sys.stderr self.error_file = sys.stderr
self.encoding = getattr(self.output_file, 'encoding', 'utf-8') or 'utf-8' self.encoding = getattr(self.output_file, 'encoding', 'utf-8') or 'utf-8'
self.verbosity_level = 0 self.verbosity_level = 0
self.terminal_colors = not sys.platform.startswith('win') and self.is_terminal() self.terminal_colors = not sys.platform.startswith('win') and self.is_terminal()
if self.is_terminal(): if self.is_terminal():
w, _h = getTerminalSize() w, h = getTerminalSize()
self.terminal_width = w self.terminal_width = w
else: else:
self.terminal_width = 80 self.terminal_width = 80
...@@ -104,7 +97,7 @@ class Command(object): ...@@ -104,7 +97,7 @@ class Command(object):
def wrap_dirname(self, dirname): def wrap_dirname(self, dirname):
if not self.terminal_colors: if not self.terminal_colors:
return dirname return dirname
return '\x1b[1;34m%s\x1b[0m' % dirname return '\x1b[1;32m%s\x1b[0m' % dirname
def wrap_error(self, msg): def wrap_error(self, msg):
if not self.terminal_colors: if not self.terminal_colors:
...@@ -117,11 +110,11 @@ class Command(object): ...@@ -117,11 +110,11 @@ class Command(object):
return fname return fname
if '://' in fname: if '://' in fname:
return fname return fname
# if '.' in fname: if '.' in fname:
# name, ext = splitext(fname) name, ext = splitext(fname)
# fname = u'%s\x1b[36m%s\x1b[0m' % (name, ext) fname = u'%s\x1b[36m%s\x1b[0m' % (name, ext)
if isdotfile(fname): if isdotfile(fname):
fname = '\x1b[33m%s\x1b[0m' % fname fname = u'\x1b[2m%s\x1b[0m' % fname
return fname return fname
def wrap_faded(self, text): def wrap_faded(self, text):
...@@ -149,7 +142,6 @@ class Command(object): ...@@ -149,7 +142,6 @@ class Command(object):
if not self.terminal_colors: if not self.terminal_colors:
return text return text
re_fs = r'(\S*?://\S*)' re_fs = r'(\S*?://\S*)'
def repl(matchobj): def repl(matchobj):
fs_url = matchobj.group(0) fs_url = matchobj.group(0)
return self.wrap_link(fs_url) return self.wrap_link(fs_url)
...@@ -218,9 +210,11 @@ class Command(object): ...@@ -218,9 +210,11 @@ class Command(object):
return raw_input('%s: %s ' % (self.name, msg)) return raw_input('%s: %s ' % (self.name, msg))
def text_encode(self, text): def text_encode(self, text):
if not isinstance(text, unicode): if not isinstance(text, unicode):
text = text.decode('ascii', 'replace') text = text.decode('ascii', 'replace')
text = text.encode(self.encoding, 'replace') text = text.encode(self.encoding, 'replace')
return text return text
def output(self, msgs, verbose=False): def output(self, msgs, verbose=False):
...@@ -232,8 +226,10 @@ class Command(object): ...@@ -232,8 +226,10 @@ class Command(object):
self.output_file.write(self.text_encode(msg)) self.output_file.write(self.text_encode(msg))
def output_table(self, table, col_process=None, verbose=False): def output_table(self, table, col_process=None, verbose=False):
if verbose and not self.verbose: if verbose and not self.verbose:
return return
if col_process is None: if col_process is None:
col_process = {} col_process = {}
...@@ -252,9 +248,7 @@ class Command(object): ...@@ -252,9 +248,7 @@ class Command(object):
td = col_process[col_no](td) td = col_process[col_no](td)
out_col.append(td) out_col.append(td)
lines.append(self.text_encode('%s\n' % ' '.join(out_col).rstrip())) lines.append(self.text_encode('%s\n' % ' '.join(out_col).rstrip()))
for l in lines: self.output(''.join(lines))
self.output_file.write(l)
#self.output(''.join(lines))
def error(self, *msgs): def error(self, *msgs):
for msg in msgs: for msg in msgs:
...@@ -281,7 +275,7 @@ class Command(object): ...@@ -281,7 +275,7 @@ class Command(object):
desc = getattr(fs_opener, 'desc', '') desc = getattr(fs_opener, 'desc', '')
opener_table.append((names, desc)) opener_table.append((names, desc))
opener_table.sort(key=lambda r: r[0]) opener_table.sort(key = lambda r:r[0])
def wrap_line(text): def wrap_line(text):
...@@ -304,13 +298,14 @@ class Command(object): ...@@ -304,13 +298,14 @@ class Command(object):
for names, desc in opener_table: for names, desc in opener_table:
self.output(('-' * self.terminal_width, '\n')) self.output(('-' * self.terminal_width, '\n'))
proto = ', '.join([n + '://' for n in names]) proto = ', '.join([n+'://' for n in names])
self.output((self.wrap_dirname('[%s]' % proto), '\n\n')) self.output((self.wrap_dirname('[%s]' % proto), '\n\n'))
if not desc.strip(): if not desc.strip():
desc = "No information available" desc = "No information available"
wrap_line(desc) wrap_line(desc)
self.output('\n') self.output('\n')
def run(self): def run(self):
parser = self.get_optparse() parser = self.get_optparse()
options, args = parser.parse_args() options, args = parser.parse_args()
...@@ -345,7 +340,6 @@ class Command(object): ...@@ -345,7 +340,6 @@ class Command(object):
opener.add(new_opener) opener.add(new_opener)
if not six.PY3:
args = [unicode(arg, sys.getfilesystemencoding()) for arg in args] args = [unicode(arg, sys.getfilesystemencoding()) for arg in args]
self.verbose = options.verbose self.verbose = options.verbose
try: try:
......
"""
Some functions for Python3 compatibility.
Not for general usage, the functionality in this file is exposed elsewhere
"""
import six
from six import PY3
def copy_file_to_fs(data, dst_fs, dst_path, chunk_size=64 * 1024, progress_callback=None, finished_callback=None):
"""Copy data from a string or a file-like object to a given fs/path"""
if progress_callback is None:
progress_callback = lambda bytes_written: None
bytes_written = 0
f = None
try:
progress_callback(bytes_written)
if hasattr(data, "read"):
read = data.read
chunk = read(chunk_size)
if isinstance(chunk, six.text_type):
f = dst_fs.open(dst_path, 'w')
else:
f = dst_fs.open(dst_path, 'wb')
write = f.write
while chunk:
write(chunk)
bytes_written += len(chunk)
progress_callback(bytes_written)
chunk = read(chunk_size)
else:
if isinstance(data, six.text_type):
f = dst_fs.open(dst_path, 'w')
else:
f = dst_fs.open(dst_path, 'wb')
f.write(data)
bytes_written += len(data)
progress_callback(bytes_written)
if hasattr(f, 'flush'):
f.flush()
if finished_callback is not None:
finished_callback()
finally:
if f is not None:
f.close()
...@@ -41,15 +41,11 @@ from fs.base import * ...@@ -41,15 +41,11 @@ from fs.base import *
from fs.path import * from fs.path import *
from fs.errors import * from fs.errors import *
from fs.remote import RemoteFileBuffer from fs.remote import RemoteFileBuffer
from fs import iotools
from fs.contrib.davfs.util import * from fs.contrib.davfs.util import *
from fs.contrib.davfs import xmlobj from fs.contrib.davfs import xmlobj
from fs.contrib.davfs.xmlobj import * from fs.contrib.davfs.xmlobj import *
import six
from six import b
import errno import errno
_RETRYABLE_ERRORS = [errno.EADDRINUSE] _RETRYABLE_ERRORS = [errno.EADDRINUSE]
try: try:
...@@ -345,10 +341,8 @@ class DAVFS(FS): ...@@ -345,10 +341,8 @@ class DAVFS(FS):
msg = str(e) msg = str(e)
raise RemoteConnectionError("",msg=msg,details=e) raise RemoteConnectionError("",msg=msg,details=e)
def setcontents(self,path, data=b'', encoding=None, errors=None, chunk_size=1024 * 64): def setcontents(self,path, contents, chunk_size=1024*64):
if isinstance(data, six.text_type): resp = self._request(path,"PUT",contents)
data = data.encode(encoding=encoding, errors=errors)
resp = self._request(path, "PUT", data)
resp.close() resp.close()
if resp.status == 405: if resp.status == 405:
raise ResourceInvalidError(path) raise ResourceInvalidError(path)
...@@ -357,11 +351,10 @@ class DAVFS(FS): ...@@ -357,11 +351,10 @@ class DAVFS(FS):
if resp.status not in (200,201,204): if resp.status not in (200,201,204):
raise_generic_error(resp,"setcontents",path) raise_generic_error(resp,"setcontents",path)
@iotools.filelike_to_stream def open(self,path,mode="r"):
def open(self,path,mode="r", **kwargs):
mode = mode.replace("b","").replace("t","") mode = mode.replace("b","").replace("t","")
# Truncate the file if requested # Truncate the file if requested
contents = b("") contents = ""
if "w" in mode: if "w" in mode:
self.setcontents(path,contents) self.setcontents(path,contents)
else: else:
...@@ -371,7 +364,7 @@ class DAVFS(FS): ...@@ -371,7 +364,7 @@ class DAVFS(FS):
if "a" not in mode: if "a" not in mode:
contents.close() contents.close()
raise ResourceNotFoundError(path) raise ResourceNotFoundError(path)
contents = b("") contents = ""
self.setcontents(path,contents) self.setcontents(path,contents)
elif contents.status in (401,403): elif contents.status in (401,403):
contents.close() contents.close()
......
...@@ -73,8 +73,6 @@ from fs.base import fnmatch, NoDefaultMeta ...@@ -73,8 +73,6 @@ from fs.base import fnmatch, NoDefaultMeta
from util import TahoeUtil from util import TahoeUtil
from connection import Connection from connection import Connection
from six import b
logger = fs.getLogger('fs.tahoelafs') logger = fs.getLogger('fs.tahoelafs')
def _fix_path(func): def _fix_path(func):
...@@ -157,7 +155,7 @@ class _TahoeLAFS(FS): ...@@ -157,7 +155,7 @@ class _TahoeLAFS(FS):
self._log(DEBUG, 'Creating empty file %s' % path) self._log(DEBUG, 'Creating empty file %s' % path)
if self.getmeta("read_only"): if self.getmeta("read_only"):
raise errors.UnsupportedError('read only filesystem') raise errors.UnsupportedError('read only filesystem')
self.setcontents(path, b('')) self.setcontents(path, '')
handler = NullFile() handler = NullFile()
else: else:
self._log(DEBUG, 'Opening existing file %s for reading' % path) self._log(DEBUG, 'Opening existing file %s for reading' % path)
......
...@@ -11,8 +11,6 @@ catch-all exception. ...@@ -11,8 +11,6 @@ catch-all exception.
__all__ = ['FSError', __all__ = ['FSError',
'CreateFailedError', 'CreateFailedError',
'PathError', 'PathError',
'InvalidPathError',
'InvalidCharsInPathError',
'OperationFailedError', 'OperationFailedError',
'UnsupportedError', 'UnsupportedError',
'RemoteConnectionError', 'RemoteConnectionError',
...@@ -20,7 +18,6 @@ __all__ = ['FSError', ...@@ -20,7 +18,6 @@ __all__ = ['FSError',
'PermissionDeniedError', 'PermissionDeniedError',
'FSClosedError', 'FSClosedError',
'OperationTimeoutError', 'OperationTimeoutError',
'RemoveRootError',
'ResourceError', 'ResourceError',
'NoSysPathError', 'NoSysPathError',
'NoMetaError', 'NoMetaError',
...@@ -32,14 +29,12 @@ __all__ = ['FSError', ...@@ -32,14 +29,12 @@ __all__ = ['FSError',
'ParentDirectoryMissingError', 'ParentDirectoryMissingError',
'ResourceLockedError', 'ResourceLockedError',
'NoMMapError', 'NoMMapError',
'BackReferenceError',
'convert_fs_errors', 'convert_fs_errors',
'convert_os_errors', 'convert_os_errors'
] ]
import sys import sys
import errno import errno
import six
from fs.path import * from fs.path import *
from fs.local_functools import wraps from fs.local_functools import wraps
...@@ -64,17 +59,13 @@ class FSError(Exception): ...@@ -64,17 +59,13 @@ class FSError(Exception):
return str(self.msg % keys) return str(self.msg % keys)
def __unicode__(self): def __unicode__(self):
keys = {} return unicode(self.msg) % self.__dict__
for k,v in self.__dict__.iteritems():
if isinstance(v, six.binary_type):
v = v.decode(sys.getfilesystemencoding(), 'replace')
keys[k] = v
return unicode(self.msg, encoding=sys.getfilesystemencoding(), errors='replace') % keys
def __reduce__(self): def __reduce__(self):
return (self.__class__,(),self.__dict__.copy(),) return (self.__class__,(),self.__dict__.copy(),)
class CreateFailedError(FSError): class CreateFailedError(FSError):
"""An exception thrown when a FS could not be created""" """An exception thrown when a FS could not be created"""
default_message = "Unable to create filesystem" default_message = "Unable to create filesystem"
...@@ -90,16 +81,6 @@ class PathError(FSError): ...@@ -90,16 +81,6 @@ class PathError(FSError):
super(PathError,self).__init__(**kwds) super(PathError,self).__init__(**kwds)
class InvalidPathError(PathError):
"""Base exception for fs paths that can't be mapped on to the underlaying filesystem."""
default_message = "Path is invalid on this filesystem %(path)s"
class InvalidCharsInPathError(InvalidPathError):
"""The path contains characters that are invalid on this filesystem"""
default_message = "Path contains invalid characters: %(path)s"
class OperationFailedError(FSError): class OperationFailedError(FSError):
"""Base exception class for errors associated with a specific operation.""" """Base exception class for errors associated with a specific operation."""
default_message = "Unable to %(opname)s: unspecified error [%(errno)s - %(details)s]" default_message = "Unable to %(opname)s: unspecified error [%(errno)s - %(details)s]"
...@@ -138,10 +119,6 @@ class OperationTimeoutError(OperationFailedError): ...@@ -138,10 +119,6 @@ class OperationTimeoutError(OperationFailedError):
default_message = "Unable to %(opname)s: operation timed out" default_message = "Unable to %(opname)s: operation timed out"
class RemoveRootError(OperationFailedError):
default_message = "Can't remove root dir"
class ResourceError(FSError): class ResourceError(FSError):
"""Base exception class for error associated with a specific resource.""" """Base exception class for error associated with a specific resource."""
default_message = "Unspecified resource error: %(path)s" default_message = "Unspecified resource error: %(path)s"
...@@ -201,16 +178,11 @@ class ResourceLockedError(ResourceError): ...@@ -201,16 +178,11 @@ class ResourceLockedError(ResourceError):
"""Exception raised when a resource can't be used because it is locked.""" """Exception raised when a resource can't be used because it is locked."""
default_message = "Resource is locked: %(path)s" default_message = "Resource is locked: %(path)s"
class NoMMapError(ResourceError): class NoMMapError(ResourceError):
"""Exception raise when getmmap fails to create a mmap""" """Exception raise when getmmap fails to create a mmap"""
default_message = "Can't get mmap for %(path)s" default_message = "Can't get mmap for %(path)s"
class BackReferenceError(ValueError):
"""Exception raised when too many backrefs exist in a path (ex: '/..', '/docs/../..')."""
def convert_fs_errors(func): def convert_fs_errors(func):
"""Function wrapper to convert FSError instances into OSError.""" """Function wrapper to convert FSError instances into OSError."""
@wraps(func) @wraps(func)
...@@ -266,10 +238,6 @@ def convert_os_errors(func): ...@@ -266,10 +238,6 @@ def convert_os_errors(func):
raise OperationFailedError(opname,details=e),None,tb raise OperationFailedError(opname,details=e),None,tb
if e.errno == errno.ENOENT: if e.errno == errno.ENOENT:
raise ResourceNotFoundError(path,opname=opname,details=e),None,tb raise ResourceNotFoundError(path,opname=opname,details=e),None,tb
if e.errno == errno.EFAULT:
# This can happen when listdir a directory that is deleted by another thread
# Best to interpret it as a resource not found
raise ResourceNotFoundError(path,opname=opname,details=e),None,tb
if e.errno == errno.ESRCH: if e.errno == errno.ESRCH:
raise ResourceNotFoundError(path,opname=opname,details=e),None,tb raise ResourceNotFoundError(path,opname=opname,details=e),None,tb
if e.errno == errno.ENOTEMPTY: if e.errno == errno.ENOTEMPTY:
......
"""
fs.expose.ftp
==============
Expose an FS object over FTP (via pyftpdlib).
This module provides the necessary interfaces to expose an FS object over
FTP, plugging into the infrastructure provided by the 'pyftpdlib' module.
To use this in combination with fsserve, do the following:
$ fsserve -t 'ftp' $HOME
The above will serve your home directory in read-only mode via anonymous FTP on the
loopback address.
"""
import os
import stat
import time
import errno
from functools import wraps
from pyftpdlib import ftpserver
from fs.path import *
from fs.osfs import OSFS
from fs.errors import convert_fs_errors
from fs import iotools
from six import text_type as unicode
# Get these once so we can reuse them:
UID = os.getuid()
GID = os.getgid()
def decode_args(f):
"""
Decodes string arguments using the decoding defined on the method's class.
This decorator is for use on methods (functions which take a class or instance
as the first parameter).
Pyftpdlib (as of 0.7.0) uses str internally, so this decoding is necessary.
"""
@wraps(f)
def wrapper(self, *args):
encoded = []
for arg in args:
if isinstance(arg, str):
arg = arg.decode(self.encoding)
encoded.append(arg)
return f(self, *encoded)
return wrapper
class FakeStat(object):
"""
Pyftpdlib uses stat inside the library. This class emulates the standard
os.stat_result class to make pyftpdlib happy. Think of it as a stat-like
object ;-).
"""
def __init__(self, **kwargs):
for attr in dir(stat):
if not attr.startswith('ST_'):
continue
attr = attr.lower()
value = kwargs.get(attr, 0)
setattr(self, attr, value)
class FTPFS(ftpserver.AbstractedFS):
"""
The basic FTP Filesystem. This is a bridge between a pyfs filesystem and pyftpdlib's
AbstractedFS. This class will cause the FTP server to serve the given fs instance.
"""
encoding = 'utf8'
"Sets the encoding to use for paths."
def __init__(self, fs, root, cmd_channel, encoding=None):
self.fs = fs
if encoding is not None:
self.encoding = encoding
super(FTPFS, self).__init__(root, cmd_channel)
def close(self):
# Close and dereference the pyfs file system.
if self.fs:
self.fs.close()
self.fs = None
def validpath(self, path):
try:
normpath(path)
return True
except:
return False
@convert_fs_errors
@decode_args
@iotools.filelike_to_stream
def open(self, path, mode, **kwargs):
return self.fs.open(path, mode, **kwargs)
@convert_fs_errors
def chdir(self, path):
# We dont' use the decorator here, we actually decode a version of the
# path for use with pyfs, but keep the original for use with pyftpdlib.
if not isinstance(path, unicode):
# pyftpdlib 0.7.x
unipath = unicode(path, self.encoding)
else:
# pyftpdlib 1.x
unipath = path
# TODO: can the following conditional checks be farmed out to the fs?
# If we don't raise an error here for files, then the FTP server will
# happily allow the client to CWD into a file. We really only want to
# allow that for directories.
if self.fs.isfile(unipath):
raise OSError(errno.ENOTDIR, 'Not a directory')
# similarly, if we don't check for existence, the FTP server will allow
# the client to CWD into a non-existent directory.
if not self.fs.exists(unipath):
raise OSError(errno.ENOENT, 'Does not exist')
# We use the original path here, so we don't corrupt self._cwd
self._cwd = self.ftp2fs(path)
@convert_fs_errors
@decode_args
def mkdir(self, path):
self.fs.makedir(path)
@convert_fs_errors
@decode_args
def listdir(self, path):
return map(lambda x: x.encode(self.encoding), self.fs.listdir(path))
@convert_fs_errors
@decode_args
def rmdir(self, path):
self.fs.removedir(path)
@convert_fs_errors
@decode_args
def remove(self, path):
self.fs.remove(path)
@convert_fs_errors
@decode_args
def rename(self, src, dst):
self.fs.rename(src, dst)
@convert_fs_errors
@decode_args
def chmod(self, path, mode):
return
@convert_fs_errors
@decode_args
def stat(self, path):
info = self.fs.getinfo(path)
kwargs = {
'st_size': info.get('size'),
}
# Give the fs a chance to provide the uid/gid. Otherwise echo the current
# uid/gid.
kwargs['st_uid'] = info.get('st_uid', UID)
kwargs['st_gid'] = info.get('st_gid', GID)
if 'st_atime' in info:
kwargs['st_atime'] = info['st_atime']
elif 'accessed_time' in info:
kwargs['st_atime'] = time.mktime(info["accessed_time"].timetuple())
if 'st_mtime' in info:
kwargs['st_mtime'] = info.get('st_mtime')
elif 'modified_time' in info:
kwargs['st_mtime'] = time.mktime(info["modified_time"].timetuple())
# Pyftpdlib uses st_ctime on Windows platform, try to provide it.
if 'st_ctime' in info:
kwargs['st_ctime'] = info['st_ctime']
elif 'created_time' in info:
kwargs['st_ctime'] = time.mktime(info["created_time"].timetuple())
elif 'st_mtime' in kwargs:
# As a last resort, just copy the modified time.
kwargs['st_ctime'] = kwargs['st_mtime']
# Try to use existing mode.
if 'st_mode' in info:
kwargs['st_mode'] = info['st_mode']
elif 'mode' in info:
kwargs['st_mode'] = info['mode']
else:
# Otherwise, build one. Not executable by default.
mode = 0660
# Merge in the type (dir or file). File is tested first, some file systems
# such as ArchiveMountFS treat archive files as directories too. By checking
# file first, any such files will be only files (not directories).
if self.fs.isfile(path):
mode |= stat.S_IFREG
elif self.fs.isdir(path):
mode |= stat.S_IFDIR
mode |= 0110 # Merge in exec bit to signal dir is listable
kwargs['st_mode'] = mode
return FakeStat(**kwargs)
# No link support...
lstat = stat
@convert_fs_errors
@decode_args
def isfile(self, path):
return self.fs.isfile(path)
@convert_fs_errors
@decode_args
def isdir(self, path):
return self.fs.isdir(path)
@convert_fs_errors
@decode_args
def getsize(self, path):
return self.fs.getsize(path)
@convert_fs_errors
@decode_args
def getmtime(self, path):
return self.stat(path).st_mtime
def realpath(self, path):
return path
def lexists(self, path):
return True
class FTPFSHandler(ftpserver.FTPHandler):
"""
An FTPHandler class that closes the filesystem when done.
"""
def close(self):
# Close the FTPFS instance, it will close the pyfs file system.
if self.fs:
self.fs.close()
super(FTPFSHandler, self).close()
class FTPFSFactory(object):
"""
A factory class which can hold a reference to a file system object and
encoding, then later pass it along to an FTPFS instance. An instance of
this object allows multiple FTPFS instances to be created by pyftpdlib
while sharing the same fs.
"""
def __init__(self, fs, encoding=None):
"""
Initializes the factory with an fs instance.
"""
self.fs = fs
self.encoding = encoding
def __call__(self, root, cmd_channel):
"""
This is the entry point of pyftpdlib. We will pass along the two parameters
as well as the previously provided fs instance and encoding.
"""
return FTPFS(self.fs, root, cmd_channel, encoding=self.encoding)
class HomeFTPFS(FTPFS):
"""
A file system which serves a user's home directory.
"""
def __init__(self, root, cmd_channel):
"""
Use the provided user's home directory to create an FTPFS that serves an OSFS
rooted at the home directory.
"""
super(DemoFS, self).__init__(OSFS(root_path=root), '/', cmd_channel)
def serve_fs(fs, addr, port):
"""
Creates a basic anonymous FTP server serving the given FS on the given address/port
combo.
"""
from pyftpdlib.contrib.authorizers import UnixAuthorizer
ftp_handler = FTPFSHandler
ftp_handler.authorizer = ftpserver.DummyAuthorizer()
ftp_handler.authorizer.add_anonymous('/')
ftp_handler.abstracted_fs = FTPFSFactory(fs)
s = ftpserver.FTPServer((addr, port), ftp_handler)
s.serve_forever()
...@@ -22,27 +22,6 @@ from platform import machine, system ...@@ -22,27 +22,6 @@ from platform import machine, system
from stat import S_IFDIR from stat import S_IFDIR
from traceback import print_exc from traceback import print_exc
_system = system()
_machine = machine()
# Locate the fuse shared library.
# On OSX this can be provided by a number of different packages
# with slightly incompatible interfaces.
if _system == 'Darwin':
_libfuse_path = find_library('fuse4x') or find_library('fuse')
else:
_libfuse_path = find_library('fuse')
if not _libfuse_path:
raise EnvironmentError('Unable to find libfuse')
if _system == 'Darwin':
_libiconv = CDLL(find_library('iconv'), RTLD_GLOBAL) # libfuse dependency
_libfuse = CDLL(_libfuse_path)
# Check whether OSX is using the legacy "macfuse" system.
# This has a different struct layout than the newer fuse4x system.
if _system == 'Darwin' and hasattr(_libfuse, 'macfuse_version'):
_system = 'Darwin-MacFuse'
class c_timespec(Structure): class c_timespec(Structure):
_fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)] _fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)]
...@@ -53,7 +32,9 @@ class c_utimbuf(Structure): ...@@ -53,7 +32,9 @@ class c_utimbuf(Structure):
class c_stat(Structure): class c_stat(Structure):
pass # Platform dependent pass # Platform dependent
if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'): _system = system()
if _system in ('Darwin', 'FreeBSD'):
_libiconv = CDLL(find_library("iconv"), RTLD_GLOBAL) # libfuse dependency
ENOTSUP = 45 ENOTSUP = 45
c_dev_t = c_int32 c_dev_t = c_int32
c_fsblkcnt_t = c_ulong c_fsblkcnt_t = c_ulong
...@@ -67,29 +48,6 @@ if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'): ...@@ -67,29 +48,6 @@ if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'):
c_size_t, c_int, c_uint32) c_size_t, c_int, c_uint32)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_uint32) c_size_t, c_uint32)
# OSX with fuse4x uses 64-bit inodes and so has a different
# struct layout. Other darwinish platforms use 32-bit inodes.
if _system == 'Darwin':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_ino', c_uint64),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_birthtimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32),
('st_flags', c_int32),
('st_gen', c_int32),
('st_lspare', c_int32),
('st_qspare', c_int64)]
else:
c_stat._fields_ = [ c_stat._fields_ = [
('st_dev', c_dev_t), ('st_dev', c_dev_t),
('st_ino', c_uint32), ('st_ino', c_uint32),
...@@ -104,7 +62,6 @@ if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'): ...@@ -104,7 +62,6 @@ if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'):
('st_size', c_off_t), ('st_size', c_off_t),
('st_blocks', c_int64), ('st_blocks', c_int64),
('st_blksize', c_int32)] ('st_blksize', c_int32)]
elif _system == 'Linux': elif _system == 'Linux':
ENOTSUP = 95 ENOTSUP = 95
c_dev_t = c_ulonglong c_dev_t = c_ulonglong
...@@ -282,6 +239,10 @@ def set_st_attrs(st, attrs): ...@@ -282,6 +239,10 @@ def set_st_attrs(st, attrs):
setattr(st, key, val) setattr(st, key, val)
_libfuse_path = find_library('fuse')
if not _libfuse_path:
raise EnvironmentError('Unable to find libfuse')
_libfuse = CDLL(_libfuse_path)
_libfuse.fuse_get_context.restype = POINTER(fuse_context) _libfuse.fuse_get_context.restype = POINTER(fuse_context)
......
...@@ -68,7 +68,7 @@ class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): ...@@ -68,7 +68,7 @@ class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
ctype = self.guess_type(path) ctype = self.guess_type(path)
try: try:
info = self._fs.getinfo(path) info = self._fs.getinfo(path)
f = self._fs.open(path, 'rb') f = self._fs.open(path, 'r')
except FSError, e: except FSError, e:
self.send_error(404, str(e)) self.send_error(404, str(e))
return None return None
......
...@@ -42,8 +42,6 @@ from fs.opener import fsopendir, OpenerError ...@@ -42,8 +42,6 @@ from fs.opener import fsopendir, OpenerError
from fs.errors import * from fs.errors import *
from fs.path import * from fs.path import *
from six import b
class FSImportHook(object): class FSImportHook(object):
"""PEP-302-compliant module finder and loader for FS objects. """PEP-302-compliant module finder and loader for FS objects.
...@@ -85,7 +83,7 @@ class FSImportHook(object): ...@@ -85,7 +83,7 @@ class FSImportHook(object):
import machinery of the running process, if it is not already import machinery of the running process, if it is not already
installed. installed.
""" """
for imp in enumerate(sys.path_hooks): for i,imp in enumerate(sys.path_hooks):
try: try:
if issubclass(cls,imp): if issubclass(cls,imp):
break break
...@@ -206,9 +204,9 @@ class FSImportHook(object): ...@@ -206,9 +204,9 @@ class FSImportHook(object):
if info is None: if info is None:
info = self._get_module_info(fullname) info = self._get_module_info(fullname)
(path,type,ispkg) = info (path,type,ispkg) = info
code = self.fs.getcontents(path, 'rb') code = self.fs.getcontents(path)
if type == imp.PY_SOURCE: if type == imp.PY_SOURCE:
code = code.replace(b("\r\n"),b("\n")) code = code.replace("\r\n","\n")
return compile(code,path,"exec") return compile(code,path,"exec")
elif type == imp.PY_COMPILED: elif type == imp.PY_COMPILED:
if code[:4] != imp.get_magic(): if code[:4] != imp.get_magic():
...@@ -225,12 +223,12 @@ class FSImportHook(object): ...@@ -225,12 +223,12 @@ class FSImportHook(object):
(path,type,ispkg) = info (path,type,ispkg) = info
if type != imp.PY_SOURCE: if type != imp.PY_SOURCE:
return None return None
return self.fs.getcontents(path, 'rb').replace(b("\r\n"),b("\n")) return self.fs.getcontents(path).replace("\r\n","\n")
def get_data(self,path): def get_data(self,path):
"""Read the specified data file.""" """Read the specified data file."""
try: try:
return self.fs.getcontents(path, 'rb') return self.fs.getcontents(path)
except FSError, e: except FSError, e:
raise IOError(str(e)) raise IOError(str(e))
......
# Work in progress
\ No newline at end of file
try:
from json import dumps, loads
except ImportError:
from simplejson import dumps, loads
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def encode(header='', payload=''):
def textsize(s):
if s:
return str(len(s))
return ''
return '%i,%i:%s%s' % (textsize(header), textsize(payload), header, payload)
class FileEncoder(object):
def __init__(self, f):
self.f = f
def write(self, header='', payload=''):
fwrite = self.f.write
def textsize(s):
if s:
return str(len(s))
return ''
fwrite('%s,%s:' % (textsize(header), textsize(payload)))
if header:
fwrite(header)
if payload:
fwrite(payload)
class JSONFileEncoder(FileEncoder):
def write(self, header=None, payload=''):
if header is None:
super(JSONFileEncoder, self).write('', payload)
else:
header_json = dumps(header, separators=(',', ':'))
super(JSONFileEncoder, self).write(header_json, payload)
class DecoderError(Exception):
pass
class PreludeError(DecoderError):
pass
class Decoder(object):
STAGE_PRELUDE, STAGE_SIZE, STAGE_HEADER, STAGE_PAYLOAD = range(4)
MAX_PRELUDE = 255
def __init__(self, no_prelude=False, prelude_callback=None):
self.prelude_callback = prelude_callback
self.stream_broken = False
self.expecting_bytes = None
self.stage = self.STAGE_PRELUDE
self._prelude = []
self._size = []
self._expecting_bytes = None
self.header_size = None
self.payload_size = None
self._header_bytes = None
self._payload_bytes = None
self._header_data = []
self._payload_data = []
self.header = None
self.payload = None
if no_prelude:
self.stage = self.STAGE_SIZE
def feed(self, data):
if self.stream_broken:
raise DecoderError('Stream is broken')
STAGE_PRELUDE, STAGE_SIZE, STAGE_HEADER, STAGE_PAYLOAD = range(4)
size_append = self._size.append
header_append = self._header_data.append
payload_append = self._payload_data.append
datafind = data.find
def reset_packet():
self.expecting_bytes = None
del self._header_data[:]
del self._payload_data[:]
self.header = None
self.payload = None
data_len = len(data)
data_pos = 0
expecting_bytes = self.expecting_bytes
stage = self.stage
if stage == STAGE_PRELUDE:
max_find = min(len(data), data_pos + self.MAX_PRELUDE)
cr_pos = datafind('\n', data_pos, max_find)
if cr_pos == -1:
self._prelude.append(data[data_pos:])
data_pos = max_find
if sum(len(s) for s in self._prelude) > self.MAX_PRELUDE:
self.stream_broken = True
raise PreludeError('Prelude not found')
else:
self._prelude.append(data[data_pos:cr_pos])
if sum(len(s) for s in self._prelude) > self.MAX_PRELUDE:
self.stream_broken = True
raise PreludeError('Prelude not found')
data_pos = cr_pos + 1
prelude = ''.join(self._prelude)
del self._prelude[:]
reset_packet()
if not self.on_prelude(prelude):
self.broken = True
return
stage = STAGE_SIZE
while data_pos < data_len:
if stage == STAGE_HEADER:
bytes_to_read = min(data_len - data_pos, expecting_bytes)
header_append(data[data_pos:data_pos + bytes_to_read])
data_pos += bytes_to_read
expecting_bytes -= bytes_to_read
if not expecting_bytes:
self.header = ''.join(self._header_data)
if not self.payload_size:
yield self.header, ''
reset_packet()
expecting_bytes = None
stage = STAGE_SIZE
else:
stage = STAGE_PAYLOAD
expecting_bytes = self.payload_size
elif stage == STAGE_PAYLOAD:
bytes_to_read = min(data_len - data_pos, expecting_bytes)
payload_append(data[data_pos:data_pos + bytes_to_read])
data_pos += bytes_to_read
expecting_bytes -= bytes_to_read
if not expecting_bytes:
self.payload = ''.join(self._payload_data)
yield self.header, self.payload
reset_packet()
stage = STAGE_SIZE
expecting_bytes = None
elif stage == STAGE_SIZE:
term_pos = datafind(':', data_pos)
if term_pos == -1:
size_append(data[data_pos:])
break
else:
size_append(data[data_pos:term_pos])
data_pos = term_pos + 1
size = ''.join(self._size)
del self._size[:]
if ',' in size:
header_size, payload_size = size.split(',', 1)
else:
header_size = size
payload_size = ''
try:
self.header_size = int(header_size or '0')
self.payload_size = int(payload_size or '0')
except ValueError:
self.stream_broken = False
raise DecoderError('Invalid size in packet (%s)' % size)
if self.header_size:
expecting_bytes = self.header_size
stage = STAGE_HEADER
elif self.payload_size:
expecting_bytes = self.payload_size
stage = STAGE_PAYLOAD
else:
# A completely empty packet, permitted, if a little odd
yield '', ''
reset_packet()
expecting_bytes = None
self.expecting_bytes = expecting_bytes
self.stage = stage
def on_prelude(self, prelude):
if self.prelude_callback and not self.prelude_callback(self, prelude):
return False
#pass
#print "Prelude:", prelude
return True
class JSONDecoder(Decoder):
def feed(self, data):
for header, payload in Decoder.feed(self, data):
if header:
header = loads(header)
else:
header = {}
yield header, payload
if __name__ == "__main__":
f = StringIO()
encoder = JSONFileEncoder(f)
encoder.write(dict(a=1, b=2), 'Payload')
encoder.write(dict(foo="bar", nested=dict(apples="oranges"), alist=range(5)), 'Payload goes here')
encoder.write(None, 'Payload')
encoder.write(dict(a=1))
encoder.write()
stream = 'prelude\n' + f.getvalue()
#print stream
# packets = ['Prelude string\n',
# encode('header', 'payload'),
# encode('header number 2', 'second payload'),
# encode('', '')]
#
# stream = ''.join(packets)
decoder = JSONDecoder()
stream = 'pyfs/0.1\n59,13:{"type":"rpc","method":"ping","client_ref":"-1221142848:1"}Hello, World!'
fdata = StringIO(stream)
while 1:
data = fdata.read(3)
if not data:
break
for header, payload in decoder.feed(data):
print "Header:", repr(header)
print "Payload:", repr(payload)
\ No newline at end of file
from __future__ import with_statement
import socket
import threading
from packetstream import JSONDecoder, JSONFileEncoder
class _SocketFile(object):
def __init__(self, socket):
self.socket = socket
def read(self, size):
try:
return self.socket.recv(size)
except socket.error:
return ''
def write(self, data):
self.socket.sendall(data)
def remote_call(method_name=None):
method = method_name
def deco(f):
if not hasattr(f, '_remote_call_names'):
f._remote_call_names = []
f._remote_call_names.append(method or f.__name__)
return f
return deco
class RemoteResponse(Exception):
def __init__(self, header, payload):
self.header = header
self.payload = payload
class ConnectionHandlerBase(threading.Thread):
_methods = {}
def __init__(self, server, connection_id, socket, address):
super(ConnectionHandlerBase, self).__init__()
self.server = server
self.connection_id = connection_id
self.socket = socket
self.transport = _SocketFile(socket)
self.address = address
self.encoder = JSONFileEncoder(self.transport)
self.decoder = JSONDecoder(prelude_callback=self.on_stream_prelude)
self._lock = threading.RLock()
self.socket_error = None
if not self._methods:
for method_name in dir(self):
method = getattr(self, method_name)
if callable(method) and hasattr(method, '_remote_call_names'):
for name in method._remote_call_names:
self._methods[name] = method
print self._methods
self.fs = None
def run(self):
self.transport.write('pyfs/1.0\n')
while True:
try:
data = self.transport.read(4096)
except socket.error, socket_error:
print socket_error
self.socket_error = socket_error
break
print "data", repr(data)
if data:
for packet in self.decoder.feed(data):
print repr(packet)
self.on_packet(*packet)
else:
break
self.on_connection_close()
def close(self):
with self._lock:
self.socket.close()
def on_connection_close(self):
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
self.server.on_connection_close(self.connection_id)
def on_stream_prelude(self, packet_stream, prelude):
print "prelude", prelude
return True
def on_packet(self, header, payload):
print '-' * 30
print repr(header)
print repr(payload)
if header['type'] == 'rpc':
method = header['method']
args = header['args']
kwargs = header['kwargs']
method_callable = self._methods[method]
remote = dict(type='rpcresult',
client_ref = header['client_ref'])
try:
response = method_callable(*args, **kwargs)
remote['response'] = response
self.encoder.write(remote, '')
except RemoteResponse, response:
self.encoder.write(response.header, response.payload)
class RemoteFSConnection(ConnectionHandlerBase):
@remote_call()
def auth(self, username, password, resource):
self.username = username
self.password = password
self.resource = resource
from fs.memoryfs import MemoryFS
self.fs = MemoryFS()
class Server(object):
def __init__(self, addr='', port=3000, connection_factory=RemoteFSConnection):
self.addr = addr
self.port = port
self.connection_factory = connection_factory
self.socket = None
self.connection_id = 0
self.threads = {}
self._lock = threading.RLock()
def serve_forever(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.addr, self.port))
sock.listen(5)
try:
while True:
clientsocket, address = sock.accept()
self.on_connect(clientsocket, address)
except KeyboardInterrupt:
pass
try:
self._close_graceful()
except KeyboardInterrupt:
self._close_harsh()
def _close_graceful(self):
"""Tell all threads to exit and wait for them"""
with self._lock:
for connection in self.threads.itervalues():
connection.close()
for connection in self.threads.itervalues():
connection.join()
self.threads.clear()
def _close_harsh(self):
with self._lock:
for connection in self.threads.itervalues():
connection.close()
self.threads.clear()
def on_connect(self, clientsocket, address):
print "Connection from", address
with self._lock:
self.connection_id += 1
thread = self.connection_factory(self,
self.connection_id,
clientsocket,
address)
self.threads[self.connection_id] = thread
thread.start()
def on_connection_close(self, connection_id):
pass
#with self._lock:
# self.threads[connection_id].join()
# del self.threads[connection_id]
if __name__ == "__main__":
server = Server()
server.serve_forever()
\ No newline at end of file
import threading
import Queue as queue
def make_job(job_callable, *args, **kwargs):
""" Returns a callable that calls the supplied callable with given arguements. """
def job():
return job_callable(*args, **kwargs)
return job
class _PoolThread(threading.Thread):
""" Internal thread class that runs jobs. """
def __init__(self, queue, name):
super(_PoolThread, self).__init__()
self.queue = queue
self.name = name
def __str__(self):
return self.name
def run(self):
while True:
try:
_priority, job = self.queue.get()
except queue.Empty:
break
if job is None:
break
if callable(job):
try:
job()
except Exception, e:
print e
self.queue.task_done()
class ThreadPool(object):
def __init__(self, num_threads, size=None, name=''):
self.num_threads = num_threads
self.name = name
self.queue = queue.PriorityQueue(size)
self.job_no = 0
self.threads = [_PoolThread(self.queue, '%s #%i' % (name, i)) for i in xrange(num_threads)]
for thread in self.threads:
thread.start()
def _make_priority_key(self, i):
no = self.job_no
self.job_no += 1
return (i, no)
def job(self, job_callable, *args, **kwargs):
""" Post a job to the queue. """
def job():
return job_callable(*args, **kwargs)
self.queue.put( (self._make_priority_key(1), job), True )
return self.job_no
def flush_quit(self):
""" Quit after all tasks on the queue have been processed. """
for thread in self.threads:
self.queue.put( (self._make_priority_key(1), None) )
for thread in self.threads:
thread.join()
def quit(self):
""" Quit as soon as possible, potentially leaving tasks on the queue. """
for thread in self.threads:
self.queue.put( (self._make_priority_key(0), None) )
for thread in self.threads:
thread.join()
if __name__ == "__main__":
import time
def job(n):
print "Starting #%i" % n
time.sleep(1)
print "Ending #%i" % n
pool = ThreadPool(5, 'test thread')
for n in range(20):
pool.job(job, n)
pool.flush_quit()
\ No newline at end of file
...@@ -15,7 +15,6 @@ table.dirlist { ...@@ -15,7 +15,6 @@ table.dirlist {
margin:0 auto; margin:0 auto;
font-size:13px; font-size:13px;
color:#666; color:#666;
min-width:960px;
} }
table.dirlist tr.r1 { table.dirlist tr.r1 {
...@@ -59,21 +58,19 @@ table.dirlist tr:hover { ...@@ -59,21 +58,19 @@ table.dirlist tr:hover {
<table class="dirlist"> <table class="dirlist">
<thead>
<tr>
<th>File/Directory</th>
<th>Size</th>
<th>Created Date</th>
</tr>
</thead>
<tbody> <tbody>
% for i, entry in enumerate(dirlist): % for i, entry in enumerate(dirlist):
<tr class="${entry['type']} r${i%2}"> <tr class="${entry['type']} r${i%2}">
<td><a class="link-${entry['type']}" href="${ entry['path'] }">${entry['name']}</a></td> <td><a class="link-${entry['type']}" href="${ entry['path'] }">${entry['name']}</a></td>
<td>${entry['size']}</td> <td>${entry['size']}</td>
<td>${entry['created_time']}</td> <td>${entry['created_time']}</td>
</tr> </tr>
% endfor % endfor
</tbody> </tbody>
</table> </table>
......
from wsgiref.simple_server import make_server
from fs.osfs import OSFS from fs.osfs import OSFS
from wsgi import serve_fs from wsgi import serve_fs
osfs = OSFS('~/') osfs = OSFS('~/')
application = serve_fs(osfs) application = serve_fs(osfs)
\ No newline at end of file
httpd = make_server('', 8000, application)
print "Serving on http://127.0.0.1:8000"
httpd.serve_forever()
...@@ -62,24 +62,22 @@ class WSGIServer(object): ...@@ -62,24 +62,22 @@ class WSGIServer(object):
serving_file.close() serving_file.close()
return self.serve_500(request, str(e)) return self.serve_500(request, str(e))
mime_type = mimetypes.guess_type(basename(path))[0] or b'text/plain' mime_type = mimetypes.guess_type(basename(path))
file_size = self.serve_fs.getsize(path) file_size = self.serve_fs.getsize(path)
headers = [(b'Content-Type', bytes(mime_type)), headers = [('Content-Type', mime_type),
(b'Content-Length', bytes(file_size))] ('Content-Length', str(file_size))]
def gen_file(): def gen_file():
chunk_size = self.chunk_size
read = serving_file.read
try: try:
while 1: while True:
data = read(chunk_size) data = serving_file.read(self.chunk_size)
if not data: if not data:
break break
yield data yield data
finally: finally:
serving_file.close() serving_file.close()
request.start_response(b'200 OK', request.start_response('200 OK',
headers) headers)
return gen_file() return gen_file()
...@@ -123,21 +121,22 @@ class WSGIServer(object): ...@@ -123,21 +121,22 @@ class WSGIServer(object):
# Render the mako template # Render the mako template
html = self.dir_template.render(**dict(fs=self.serve_fs, html = self.dir_template.render(**dict(fs=self.serve_fs,
path=path, path=path,
dirlist=entries)).encode('utf-8') dirlist=entries))
request.start_response(b'200 OK', [(b'Content-Type', b'text/html'),
(b'Content-Length', b'%i' % len(html))]) request.start_response('200 OK', [('Content-Type', 'text/html'),
('Content-Length', '%i' % len(html))])
return [html] return [html]
def serve_404(self, request, msg='Not found'): def serve_404(self, request, msg='Not found'):
"""Serves a Not found page""" """Serves a Not found page"""
request.start_response(b'404 NOT FOUND', [(b'Content-Type', b'text/html')]) request.start_response('404 NOT FOUND', [('Content-Type', 'text/html')])
return [msg] return [msg]
def serve_500(self, request, msg='Unable to complete request'): def serve_500(self, request, msg='Unable to complete request'):
"""Serves an internal server error page""" """Serves an internal server error page"""
request.start_response(b'500 INTERNAL SERVER ERROR', [(b'Content-Type', b'text/html')]) request.start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')])
return [msg] return [msg]
......
...@@ -18,11 +18,6 @@ an FS object, which can then be exposed using whatever server you choose ...@@ -18,11 +18,6 @@ an FS object, which can then be exposed using whatever server you choose
import xmlrpclib import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer from SimpleXMLRPCServer import SimpleXMLRPCServer
from datetime import datetime from datetime import datetime
import base64
import six
from six import PY3
class RPCFSInterface(object): class RPCFSInterface(object):
"""Wrapper to expose an FS via a XML-RPC compatible interface. """Wrapper to expose an FS via a XML-RPC compatible interface.
...@@ -31,16 +26,6 @@ class RPCFSInterface(object): ...@@ -31,16 +26,6 @@ class RPCFSInterface(object):
the contents of files. the contents of files.
""" """
# info keys are restricted to a subset known to work over xmlrpc
# This fixes an issue with transporting Longs on Py3
_allowed_info = ["size",
"created_time",
"modified_time",
"accessed_time",
"st_size",
"st_mode",
"type"]
def __init__(self, fs): def __init__(self, fs):
super(RPCFSInterface, self).__init__() super(RPCFSInterface, self).__init__()
self.fs = fs self.fs = fs
...@@ -52,36 +37,31 @@ class RPCFSInterface(object): ...@@ -52,36 +37,31 @@ class RPCFSInterface(object):
must return something that can be represented in ASCII. The default must return something that can be represented in ASCII. The default
is base64-encoded UTF-8. is base64-encoded UTF-8.
""" """
#return path return path.encode("utf8").encode("base64")
return six.text_type(base64.b64encode(path.encode("utf8")), 'ascii')
def decode_path(self, path): def decode_path(self, path):
"""Decode paths arriving over the wire.""" """Decode paths arriving over the wire."""
return six.text_type(base64.b64decode(path.encode('ascii')), 'utf8') return path.decode("base64").decode("utf8")
def getmeta(self, meta_name): def getmeta(self, meta_name):
meta = self.fs.getmeta(meta_name) meta = self.fs.getmeta(meta_name)
if isinstance(meta, basestring):
meta = self.decode_path(meta)
return meta return meta
def getmeta_default(self, meta_name, default): def getmeta_default(self, meta_name, default):
meta = self.fs.getmeta(meta_name, default) meta = self.fs.getmeta(meta_name, default)
if isinstance(meta, basestring):
meta = self.decode_path(meta)
return meta return meta
def hasmeta(self, meta_name): def hasmeta(self, meta_name):
return self.fs.hasmeta(meta_name) return self.fs.hasmeta(meta_name)
def get_contents(self, path, mode="rb"): def get_contents(self, path):
path = self.decode_path(path) path = self.decode_path(path)
data = self.fs.getcontents(path, mode) data = self.fs.getcontents(path)
return xmlrpclib.Binary(data) return xmlrpclib.Binary(data)
def set_contents(self, path, data): def set_contents(self, path, data):
path = self.decode_path(path) path = self.decode_path(path)
self.fs.setcontents(path, data.data) self.fs.setcontents(path,data.data)
def exists(self, path): def exists(self, path):
path = self.decode_path(path) path = self.decode_path(path)
...@@ -97,7 +77,7 @@ class RPCFSInterface(object): ...@@ -97,7 +77,7 @@ class RPCFSInterface(object):
def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
path = self.decode_path(path) path = self.decode_path(path)
entries = self.fs.listdir(path, wildcard, full, absolute, dirs_only, files_only) entries = self.fs.listdir(path,wildcard,full,absolute,dirs_only,files_only)
return [self.encode_path(e) for e in entries] return [self.encode_path(e) for e in entries]
def makedir(self, path, recursive=False, allow_recreate=False): def makedir(self, path, recursive=False, allow_recreate=False):
...@@ -127,10 +107,7 @@ class RPCFSInterface(object): ...@@ -127,10 +107,7 @@ class RPCFSInterface(object):
def getinfo(self, path): def getinfo(self, path):
path = self.decode_path(path) path = self.decode_path(path)
info = self.fs.getinfo(path) return self.fs.getinfo(path)
info = dict((k, v) for k, v in info.iteritems()
if k in self._allowed_info)
return info
def desc(self, path): def desc(self, path):
path = self.decode_path(path) path = self.decode_path(path)
...@@ -160,7 +137,7 @@ class RPCFSInterface(object): ...@@ -160,7 +137,7 @@ class RPCFSInterface(object):
dst = self.decode_path(dst) dst = self.decode_path(dst)
return self.fs.copy(src, dst, overwrite, chunk_size) return self.fs.copy(src, dst, overwrite, chunk_size)
def move(self, src, dst, overwrite=False, chunk_size=16384): def move(self,src,dst,overwrite=False,chunk_size=16384):
src = self.decode_path(src) src = self.decode_path(src)
dst = self.decode_path(dst) dst = self.decode_path(dst)
return self.fs.move(src, dst, overwrite, chunk_size) return self.fs.move(src, dst, overwrite, chunk_size)
...@@ -198,10 +175,11 @@ class RPCFSServer(SimpleXMLRPCServer): ...@@ -198,10 +175,11 @@ class RPCFSServer(SimpleXMLRPCServer):
if logRequests is not None: if logRequests is not None:
kwds['logRequests'] = logRequests kwds['logRequests'] = logRequests
self.serve_more_requests = True self.serve_more_requests = True
SimpleXMLRPCServer.__init__(self, addr, **kwds) SimpleXMLRPCServer.__init__(self,addr,**kwds)
self.register_instance(RPCFSInterface(fs)) self.register_instance(RPCFSInterface(fs))
def serve_forever(self): def serve_forever(self):
"""Override serve_forever to allow graceful shutdown.""" """Override serve_forever to allow graceful shutdown."""
while self.serve_more_requests: while self.serve_more_requests:
self.handle_request() self.handle_request()
...@@ -33,6 +33,11 @@ Other useful classes include: ...@@ -33,6 +33,11 @@ Other useful classes include:
import tempfile as _tempfile import tempfile as _tempfile
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
import fs import fs
...@@ -45,17 +50,6 @@ class NotSeekableError(IOError): ...@@ -45,17 +50,6 @@ class NotSeekableError(IOError):
class NotTruncatableError(IOError): class NotTruncatableError(IOError):
pass pass
import six
from six import PY3, b
if PY3:
from six import BytesIO as _StringIO
else:
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
class FileLikeBase(object): class FileLikeBase(object):
"""Base class for implementing file-like objects. """Base class for implementing file-like objects.
...@@ -271,14 +265,14 @@ class FileLikeBase(object): ...@@ -271,14 +265,14 @@ class FileLikeBase(object):
if self.closed: if self.closed:
raise IOError("File has been closed") raise IOError("File has been closed")
if self._check_mode("w-") and self._wbuffer is not None: if self._check_mode("w-") and self._wbuffer is not None:
buffered = b("") buffered = ""
if self._sbuffer: if self._sbuffer:
buffered = buffered + self._sbuffer buffered = buffered + self._sbuffer
self._sbuffer = None self._sbuffer = None
buffered = buffered + self._wbuffer buffered = buffered + self._wbuffer
self._wbuffer = None self._wbuffer = None
leftover = self._write(buffered,flushing=True) leftover = self._write(buffered,flushing=True)
if leftover and not isinstance(leftover, int): if leftover:
raise IOError("Could not flush write buffer.") raise IOError("Could not flush write buffer.")
def close(self): def close(self):
...@@ -312,7 +306,7 @@ class FileLikeBase(object): ...@@ -312,7 +306,7 @@ class FileLikeBase(object):
next() returning subsequent lines from the file. next() returning subsequent lines from the file.
""" """
ln = self.readline() ln = self.readline()
if ln == b(""): if ln == "":
raise StopIteration() raise StopIteration()
return ln return ln
...@@ -443,24 +437,24 @@ class FileLikeBase(object): ...@@ -443,24 +437,24 @@ class FileLikeBase(object):
s -= self._bufsize s -= self._bufsize
self._do_read(s) self._do_read(s)
# Should the entire file be read? # Should the entire file be read?
if size < 0: if size <= 0:
if self._rbuffer: if self._rbuffer:
data = [self._rbuffer] data = [self._rbuffer]
else: else:
data = [] data = []
self._rbuffer = b("") self._rbuffer = ""
newData = self._read() newData = self._read()
while newData is not None: while newData is not None:
data.append(newData) data.append(newData)
newData = self._read() newData = self._read()
output = b("").join(data) output = "".join(data)
# Otherwise, we need to return a specific amount of data # Otherwise, we need to return a specific amount of data
else: else:
if self._rbuffer: if self._rbuffer:
newData = self._rbuffer newData = self._rbuffer
data = [newData] data = [newData]
else: else:
newData = b("") newData = ""
data = [] data = []
sizeSoFar = len(newData) sizeSoFar = len(newData)
while sizeSoFar < size: while sizeSoFar < size:
...@@ -469,20 +463,20 @@ class FileLikeBase(object): ...@@ -469,20 +463,20 @@ class FileLikeBase(object):
break break
data.append(newData) data.append(newData)
sizeSoFar += len(newData) sizeSoFar += len(newData)
data = b("").join(data) data = "".join(data)
if sizeSoFar > size: if sizeSoFar > size:
# read too many bytes, store in the buffer # read too many bytes, store in the buffer
self._rbuffer = data[size:] self._rbuffer = data[size:]
data = data[:size] data = data[:size]
else: else:
self._rbuffer = b("") self._rbuffer = ""
output = data output = data
return output return output
def _do_read_rest(self): def _do_read_rest(self):
"""Private method to read the file through to EOF.""" """Private method to read the file through to EOF."""
data = self._do_read(self._bufsize) data = self._do_read(self._bufsize)
while data != b(""): while data != "":
data = self._do_read(self._bufsize) data = self._do_read(self._bufsize)
def readline(self,size=-1): def readline(self,size=-1):
...@@ -494,15 +488,15 @@ class FileLikeBase(object): ...@@ -494,15 +488,15 @@ class FileLikeBase(object):
nextBit = self.read(self._bufsize) nextBit = self.read(self._bufsize)
bits.append(nextBit) bits.append(nextBit)
sizeSoFar += len(nextBit) sizeSoFar += len(nextBit)
if nextBit == b(""): if nextBit == "":
break break
if size > 0 and sizeSoFar >= size: if size > 0 and sizeSoFar >= size:
break break
indx = nextBit.find(b("\n")) indx = nextBit.find("\n")
# If not found, return whole string up to <size> length # If not found, return whole string up to <size> length
# Any leftovers are pushed onto front of buffer # Any leftovers are pushed onto front of buffer
if indx == -1: if indx == -1:
data = b("").join(bits) data = "".join(bits)
if size > 0 and sizeSoFar > size: if size > 0 and sizeSoFar > size:
extra = data[size:] extra = data[size:]
data = data[:size] data = data[:size]
...@@ -514,7 +508,7 @@ class FileLikeBase(object): ...@@ -514,7 +508,7 @@ class FileLikeBase(object):
extra = bits[-1][indx:] extra = bits[-1][indx:]
bits[-1] = bits[-1][:indx] bits[-1] = bits[-1][:indx]
self._rbuffer = extra + self._rbuffer self._rbuffer = extra + self._rbuffer
return b("").join(bits) return "".join(bits)
def readlines(self,sizehint=-1): def readlines(self,sizehint=-1):
"""Return a list of all lines in the file.""" """Return a list of all lines in the file."""
...@@ -531,7 +525,7 @@ class FileLikeBase(object): ...@@ -531,7 +525,7 @@ class FileLikeBase(object):
self._assert_mode("w-") self._assert_mode("w-")
# If we were previously reading, ensure position is correct # If we were previously reading, ensure position is correct
if self._rbuffer is not None: if self._rbuffer is not None:
self.seek(0, 1) self.seek(0,1)
# If we're actually behind the apparent position, we must also # If we're actually behind the apparent position, we must also
# write the data in the gap. # write the data in the gap.
if self._sbuffer: if self._sbuffer:
...@@ -544,16 +538,14 @@ class FileLikeBase(object): ...@@ -544,16 +538,14 @@ class FileLikeBase(object):
string = self._do_read(s) + string string = self._do_read(s) + string
except NotReadableError: except NotReadableError:
raise NotSeekableError("File not readable, could not complete simulation of seek") raise NotSeekableError("File not readable, could not complete simulation of seek")
self.seek(0, 0) self.seek(0,0)
if self._wbuffer: if self._wbuffer:
string = self._wbuffer + string string = self._wbuffer + string
leftover = self._write(string) leftover = self._write(string)
if leftover is None or isinstance(leftover, int): if leftover is None:
self._wbuffer = b("") self._wbuffer = ""
return len(string) - (leftover or 0)
else: else:
self._wbuffer = leftover self._wbuffer = leftover
return len(string) - len(leftover)
def writelines(self,seq): def writelines(self,seq):
"""Write a sequence of lines to the file.""" """Write a sequence of lines to the file."""
...@@ -657,12 +649,12 @@ class FileWrapper(FileLikeBase): ...@@ -657,12 +649,12 @@ class FileWrapper(FileLikeBase):
def _read(self,sizehint=-1): def _read(self,sizehint=-1):
data = self.wrapped_file.read(sizehint) data = self.wrapped_file.read(sizehint)
if data == b(""): if data == "":
return None return None
return data return data
def _write(self,string,flushing=False): def _write(self,string,flushing=False):
self.wrapped_file.write(string) return self.wrapped_file.write(string)
def _seek(self,offset,whence): def _seek(self,offset,whence):
self.wrapped_file.seek(offset,whence) self.wrapped_file.seek(offset,whence)
...@@ -702,7 +694,7 @@ class StringIO(FileWrapper): ...@@ -702,7 +694,7 @@ class StringIO(FileWrapper):
if size > curlen: if size > curlen:
self.wrapped_file.seek(curlen) self.wrapped_file.seek(curlen)
try: try:
self.wrapped_file.write(b("\x00")*(size-curlen)) self.wrapped_file.write("\x00"*(size-curlen))
finally: finally:
self.wrapped_file.seek(pos) self.wrapped_file.seek(pos)
...@@ -724,7 +716,6 @@ class SpooledTemporaryFile(FileWrapper): ...@@ -724,7 +716,6 @@ class SpooledTemporaryFile(FileWrapper):
stf_args = (max_size,mode,bufsize) + args stf_args = (max_size,mode,bufsize) + args
wrapped_file = _tempfile.SpooledTemporaryFile(*stf_args,**kwds) wrapped_file = _tempfile.SpooledTemporaryFile(*stf_args,**kwds)
wrapped_file._file = StringIO() wrapped_file._file = StringIO()
#wrapped_file._file = six.BytesIO()
self.__is_spooled = True self.__is_spooled = True
except AttributeError: except AttributeError:
ntf_args = (mode,bufsize) + args ntf_args = (mode,bufsize) + args
......
...@@ -14,7 +14,6 @@ import fs ...@@ -14,7 +14,6 @@ import fs
from fs.base import * from fs.base import *
from fs.errors import * from fs.errors import *
from fs.path import pathsplit, abspath, dirname, recursepath, normpath, pathjoin, isbase from fs.path import pathsplit, abspath, dirname, recursepath, normpath, pathjoin, isbase
from fs import iotools
from ftplib import FTP, error_perm, error_temp, error_proto, error_reply from ftplib import FTP, error_perm, error_temp, error_proto, error_reply
...@@ -30,15 +29,9 @@ import calendar ...@@ -30,15 +29,9 @@ import calendar
from socket import error as socket_error from socket import error as socket_error
from fs.local_functools import wraps from fs.local_functools import wraps
import six try:
from six import PY3, b
if PY3:
from six import BytesIO as StringIO
else:
try:
from cStringIO import StringIO from cStringIO import StringIO
except ImportError: except ImportError:
from StringIO import StringIO from StringIO import StringIO
import time import time
...@@ -337,7 +330,6 @@ class FTPListDataParser(object): ...@@ -337,7 +330,6 @@ class FTPListDataParser(object):
i = 0 i = 0
while (i + 3) < len(result.name): while (i + 3) < len(result.name):
if result.name[i:i+4] == ' -> ': if result.name[i:i+4] == ' -> ':
result.target = result.name[i+4:]
result.name = result.name[:i] result.name = result.name[:i]
break break
i += 1 i += 1
...@@ -656,7 +648,7 @@ class _FTPFile(object): ...@@ -656,7 +648,7 @@ class _FTPFile(object):
@fileftperrors @fileftperrors
def read(self, size=None): def read(self, size=None):
if self.conn is None: if self.conn is None:
return b('') return ''
chunks = [] chunks = []
if size is None or size < 0: if size is None or size < 0:
...@@ -669,7 +661,7 @@ class _FTPFile(object): ...@@ -669,7 +661,7 @@ class _FTPFile(object):
break break
chunks.append(data) chunks.append(data)
self.read_pos += len(data) self.read_pos += len(data)
return b('').join(chunks) return ''.join(chunks)
remaining_bytes = size remaining_bytes = size
while remaining_bytes: while remaining_bytes:
...@@ -684,7 +676,7 @@ class _FTPFile(object): ...@@ -684,7 +676,7 @@ class _FTPFile(object):
self.read_pos += len(data) self.read_pos += len(data)
remaining_bytes -= len(data) remaining_bytes -= len(data)
return b('').join(chunks) return ''.join(chunks)
@fileftperrors @fileftperrors
def write(self, data): def write(self, data):
...@@ -744,7 +736,7 @@ class _FTPFile(object): ...@@ -744,7 +736,7 @@ class _FTPFile(object):
self.ftp = self.ftpfs._open_ftp() self.ftp = self.ftpfs._open_ftp()
self.ftp.sendcmd('TYPE I') self.ftp.sendcmd('TYPE I')
self.ftp.sendcmd('REST %i' % (new_pos)) self.ftp.sendcmd('REST %i' % (new_pos))
self.__init__(self.ftpfs, self.ftp, self.path, self.mode) self.__init__(self.ftpfs, self.ftp, _encode(self.path), self.mode)
self.read_pos = new_pos self.read_pos = new_pos
finally: finally:
self._lock.release() self._lock.release()
...@@ -814,11 +806,11 @@ class _FTPFile(object): ...@@ -814,11 +806,11 @@ class _FTPFile(object):
This isn't terribly efficient. It would probably be better to do This isn't terribly efficient. It would probably be better to do
a read followed by splitlines. a read followed by splitlines.
""" """
endings = b('\r\n') endings = '\r\n'
chars = [] chars = []
append = chars.append append = chars.append
read = self.read read = self.read
join = b('').join join = ''.join
while True: while True:
char = read(1) char = read(1)
if not char: if not char:
...@@ -892,7 +884,7 @@ class FTPFS(FS): ...@@ -892,7 +884,7 @@ class FTPFS(FS):
'file.read_and_write' : False, 'file.read_and_write' : False,
} }
def __init__(self, host='', user='', passwd='', acct='', timeout=_GLOBAL_DEFAULT_TIMEOUT, port=21, dircache=True, follow_symlinks=False): def __init__(self, host='', user='', passwd='', acct='', timeout=_GLOBAL_DEFAULT_TIMEOUT, port=21, dircache=True):
"""Connect to a FTP server. """Connect to a FTP server.
:param host: Host to connect to :param host: Host to connect to
...@@ -918,7 +910,6 @@ class FTPFS(FS): ...@@ -918,7 +910,6 @@ class FTPFS(FS):
self.timeout = timeout self.timeout = timeout
self.default_timeout = timeout is _GLOBAL_DEFAULT_TIMEOUT self.default_timeout = timeout is _GLOBAL_DEFAULT_TIMEOUT
self.use_dircache = dircache self.use_dircache = dircache
self.follow_symlinks = follow_symlinks
self.use_mlst = False self.use_mlst = False
self._lock = threading.RLock() self._lock = threading.RLock()
...@@ -1020,30 +1011,6 @@ class FTPFS(FS): ...@@ -1020,30 +1011,6 @@ class FTPFS(FS):
pass pass
self.dircache[path] = dirlist self.dircache[path] = dirlist
def is_symlink(info):
return info['try_retr'] and info['try_cwd'] and info.has_key('target')
def resolve_symlink(linkpath):
linkinfo = self.getinfo(linkpath)
if not linkinfo.has_key('resolved'):
linkinfo['resolved'] = linkpath
if is_symlink(linkinfo):
target = linkinfo['target']
base, fname = pathsplit(linkpath)
return resolve_symlink(pathjoin(base, target))
else:
return linkinfo
if self.follow_symlinks:
for name in dirlist:
if is_symlink(dirlist[name]):
target = dirlist[name]['target']
linkinfo = resolve_symlink(pathjoin(path, target))
for key in linkinfo:
if key != 'name':
dirlist[name][key] = linkinfo[key]
del dirlist[name]['target']
return dirlist return dirlist
@synchronize @synchronize
...@@ -1179,9 +1146,8 @@ class FTPFS(FS): ...@@ -1179,9 +1146,8 @@ class FTPFS(FS):
url = 'ftp://%s@%s%s' % (credentials, self.host.rstrip('/'), abspath(path)) url = 'ftp://%s@%s%s' % (credentials, self.host.rstrip('/'), abspath(path))
return url return url
@iotools.filelike_to_stream
@ftperrors @ftperrors
def open(self, path, mode, buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs): def open(self, path, mode='r'):
path = normpath(path) path = normpath(path)
mode = mode.lower() mode = mode.lower()
if self.isdir(path): if self.isdir(path):
...@@ -1196,21 +1162,19 @@ class FTPFS(FS): ...@@ -1196,21 +1162,19 @@ class FTPFS(FS):
return f return f
@ftperrors @ftperrors
def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=1024*64): def setcontents(self, path, data, chunk_size=1024*64):
path = normpath(path) path = normpath(path)
data = iotools.make_bytes_io(data, encoding=encoding, errors=errors) if isinstance(data, basestring):
data = StringIO(data)
self.refresh_dircache(dirname(path)) self.refresh_dircache(dirname(path))
self.ftp.storbinary('STOR %s' % _encode(path), data, blocksize=chunk_size) self.ftp.storbinary('STOR %s' % _encode(path), data, blocksize=chunk_size)
@ftperrors @ftperrors
def getcontents(self, path, mode="rb", encoding=None, errors=None, newline=None): def getcontents(self, path):
path = normpath(path) path = normpath(path)
contents = StringIO() contents = StringIO()
self.ftp.retrbinary('RETR %s' % _encode(path), contents.write, blocksize=1024*64) self.ftp.retrbinary('RETR %s' % _encode(path), contents.write, blocksize=1024*64)
data = contents.getvalue() return contents.getvalue()
if 'b' in data:
return data
return iotools.decode_binary(data, encoding=encoding, errors=errors)
@ftperrors @ftperrors
def exists(self, path): def exists(self, path):
...@@ -1329,8 +1293,6 @@ class FTPFS(FS): ...@@ -1329,8 +1293,6 @@ class FTPFS(FS):
raise ResourceNotFoundError(path) raise ResourceNotFoundError(path)
if self.isfile(path): if self.isfile(path):
raise ResourceInvalidError(path) raise ResourceInvalidError(path)
if normpath(path) in ('', '/'):
raise RemoveRootError(path)
if not force: if not force:
for _checkpath in self.listdir(path): for _checkpath in self.listdir(path):
...@@ -1351,7 +1313,6 @@ class FTPFS(FS): ...@@ -1351,7 +1313,6 @@ class FTPFS(FS):
pass pass
if recursive: if recursive:
try: try:
if dirname(path) not in ('', '/'):
self.removedir(dirname(path), recursive=True) self.removedir(dirname(path), recursive=True)
except DirectoryNotEmptyError: except DirectoryNotEmptyError:
pass pass
......
...@@ -8,12 +8,9 @@ fs.httpfs ...@@ -8,12 +8,9 @@ fs.httpfs
from fs.base import FS from fs.base import FS
from fs.path import normpath from fs.path import normpath
from fs.errors import ResourceNotFoundError, UnsupportedError from fs.errors import ResourceNotFoundError, UnsupportedError
from fs.filelike import FileWrapper
from fs import iotools
from urllib2 import urlopen, URLError from urllib2 import urlopen, URLError
from datetime import datetime from datetime import datetime
from fs.filelike import FileWrapper
class HTTPFS(FS): class HTTPFS(FS):
...@@ -25,8 +22,8 @@ class HTTPFS(FS): ...@@ -25,8 +22,8 @@ class HTTPFS(FS):
""" """
_meta = {'read_only': True, _meta = {'read_only':True,
'network': True} 'network':True,}
def __init__(self, url): def __init__(self, url):
""" """
...@@ -41,8 +38,7 @@ class HTTPFS(FS): ...@@ -41,8 +38,7 @@ class HTTPFS(FS):
url = '%s/%s' % (self.root_url.rstrip('/'), path.lstrip('/')) url = '%s/%s' % (self.root_url.rstrip('/'), path.lstrip('/'))
return url return url
@iotools.filelike_to_stream def open(self, path, mode="r"):
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
if '+' in mode or 'w' in mode or 'a' in mode: if '+' in mode or 'w' in mode or 'a' in mode:
raise UnsupportedError('write') raise UnsupportedError('write')
......
from __future__ import unicode_literals
from __future__ import print_function
from fs import SEEK_SET, SEEK_CUR, SEEK_END
import io
from functools import wraps
import six
class RawWrapper(object):
"""Convert a Python 2 style file-like object in to a IO object"""
def __init__(self, f, mode=None, name=None):
self._f = f
self.is_io = isinstance(f, io.IOBase)
if mode is None and hasattr(f, 'mode'):
mode = f.mode
self.mode = mode
self.name = name
self.closed = False
super(RawWrapper, self).__init__()
def __repr__(self):
return "<IO wrapper for {0}>".format(self._f)
def close(self):
self._f.close()
self.closed = True
def fileno(self):
return self._f.fileno()
def flush(self):
return self._f.flush()
def isatty(self):
return self._f.isatty()
def seek(self, offset, whence=SEEK_SET):
return self._f.seek(offset, whence)
def readable(self):
if hasattr(self._f, 'readable'):
return self._f.readable()
return 'r' in self.mode
def writable(self):
if hasattr(self._f, 'writeable'):
return self._fs.writeable()
return 'w' in self.mode
def seekable(self):
if hasattr(self._f, 'seekable'):
return self._f.seekable()
try:
self.seek(0, SEEK_CUR)
except IOError:
return False
else:
return True
def tell(self):
return self._f.tell()
def truncate(self, size=None):
return self._f.truncate(size)
def write(self, data):
if self.is_io:
return self._f.write(data)
self._f.write(data)
return len(data)
def read(self, n=-1):
if n == -1:
return self.readall()
return self._f.read(n)
def read1(self, n=-1):
if self.is_io:
return self._f.read1(n)
return self.read(n)
def readall(self):
return self._f.read()
def readinto(self, b):
if self.is_io:
return self._f.readinto(b)
data = self._f.read(len(b))
bytes_read = len(data)
b[:len(data)] = data
return bytes_read
def readline(self, limit=-1):
return self._f.readline(limit)
def readlines(self, hint=-1):
return self._f.readlines(hint)
def writelines(self, sequence):
return self._f.writelines(sequence)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
def __iter__(self):
return iter(self._f)
def filelike_to_stream(f):
@wraps(f)
def wrapper(self, path, mode='rt', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
file_like = f(self,
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,
**kwargs)
return make_stream(path,
file_like,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering)
return wrapper
def make_stream(name,
f,
mode='r',
buffering=-1,
encoding=None,
errors=None,
newline=None,
line_buffering=False,
**kwargs):
"""Take a Python 2.x binary file and returns an IO Stream"""
r, w, a, binary = 'r' in mode, 'w' in mode, 'a' in mode, 'b' in mode
if '+' in mode:
r, w = True, True
io_object = RawWrapper(f, mode=mode, name=name)
if buffering >= 0:
if r and w:
io_object = io.BufferedRandom(io_object, buffering or io.DEFAULT_BUFFER_SIZE)
elif r:
io_object = io.BufferedReader(io_object, buffering or io.DEFAULT_BUFFER_SIZE)
elif w:
io_object = io.BufferedWriter(io_object, buffering or io.DEFAULT_BUFFER_SIZE)
if not binary:
io_object = io.TextIOWrapper(io_object,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,)
return io_object
def decode_binary(data, encoding=None, errors=None, newline=None):
"""Decode bytes as though read from a text file"""
return io.TextIOWrapper(io.BytesIO(data), encoding=encoding, errors=errors, newline=newline).read()
def make_bytes_io(data, encoding=None, errors=None):
"""Make a bytes IO object from either a string or an open file"""
if hasattr(data, 'mode') and 'b' in data.mode:
# It's already a binary file
return data
if not isinstance(data, basestring):
# It's a file, but we don't know if its binary
# TODO: Is there a better way than reading the entire file?
data = data.read() or b''
if isinstance(data, six.text_type):
# If its text, encoding in to bytes
data = data.encode(encoding=encoding, errors=errors)
return io.BytesIO(data)
def copy_file_to_fs(f, fs, path, encoding=None, errors=None, progress_callback=None, chunk_size=64 * 1024):
"""Copy an open file to a path on an FS"""
if progress_callback is None:
progress_callback = lambda bytes_written: None
read = f.read
chunk = read(chunk_size)
if isinstance(chunk, six.text_type):
f = fs.open(path, 'wt', encoding=encoding, errors=errors)
else:
f = fs.open(path, 'wb')
write = f.write
bytes_written = 0
try:
while chunk:
write(chunk)
bytes_written += len(chunk)
progress_callback(bytes_written)
chunk = read(chunk_size)
finally:
f.close()
return bytes_written
if __name__ == "__main__":
print("Reading a binary file")
bin_file = open('tests/data/UTF-8-demo.txt', 'rb')
with make_stream('UTF-8-demo.txt', bin_file, 'rb') as f:
print(repr(f))
print(type(f.read(200)))
print("Reading a text file")
bin_file = open('tests/data/UTF-8-demo.txt', 'rb')
with make_stream('UTF-8-demo.txt', bin_file, 'rt') as f:
print(repr(f))
print(type(f.read(200)))
print("Reading a buffered binary file")
bin_file = open('tests/data/UTF-8-demo.txt', 'rb')
with make_stream('UTF-8-demo.txt', bin_file, 'rb', buffering=0) as f:
print(repr(f))
print(type(f.read(200)))
...@@ -17,13 +17,9 @@ from fs.base import * ...@@ -17,13 +17,9 @@ from fs.base import *
from fs.errors import * from fs.errors import *
from fs import _thread_synchronize_default from fs import _thread_synchronize_default
from fs.filelike import StringIO from fs.filelike import StringIO
from fs import iotools
from os import SEEK_END from os import SEEK_END
import threading import threading
import six
from six import b
def _check_mode(mode, mode_chars): def _check_mode(mode, mode_chars):
for c in mode_chars: for c in mode_chars:
...@@ -31,7 +27,6 @@ def _check_mode(mode, mode_chars): ...@@ -31,7 +27,6 @@ def _check_mode(mode, mode_chars):
return False return False
return True return True
class MemoryFile(object): class MemoryFile(object):
def seek_and_lock(f): def seek_and_lock(f):
...@@ -72,13 +67,13 @@ class MemoryFile(object): ...@@ -72,13 +67,13 @@ class MemoryFile(object):
finally: finally:
lock.release() lock.release()
assert self.mem_file is not None, "self.mem_file should have a value" assert self.mem_file is not None, "self.mem_file should have a value"
def __str__(self): def __str__(self):
return "<MemoryFile in %s %s>" % (self.memory_fs, self.path) return "<MemoryFile in %s %s>" % (self.memory_fs, self.path)
def __repr__(self): __repr__ = __str__
return u"<MemoryFile in %s %s>" % (self.memory_fs, self.path)
def __unicode__(self): def __unicode__(self):
return u"<MemoryFile in %s %s>" % (self.memory_fs, self.path) return u"<MemoryFile in %s %s>" % (self.memory_fs, self.path)
...@@ -91,22 +86,14 @@ class MemoryFile(object): ...@@ -91,22 +86,14 @@ class MemoryFile(object):
pass pass
def __iter__(self): def __iter__(self):
if 'r' not in self.mode and '+' not in self.mode: return self
raise IOError("File not open for reading")
self.mem_file.seek(self.pos)
for line in self.mem_file:
yield line
@seek_and_lock @seek_and_lock
def next(self): def next(self):
if 'r' not in self.mode and '+' not in self.mode:
raise IOError("File not open for reading")
return self.mem_file.next() return self.mem_file.next()
@seek_and_lock @seek_and_lock
def readline(self, *args, **kwargs): def readline(self, *args, **kwargs):
if 'r' not in self.mode and '+' not in self.mode:
raise IOError("File not open for reading")
return self.mem_file.readline(*args, **kwargs) return self.mem_file.readline(*args, **kwargs)
def close(self): def close(self):
...@@ -123,8 +110,6 @@ class MemoryFile(object): ...@@ -123,8 +110,6 @@ class MemoryFile(object):
@seek_and_lock @seek_and_lock
def read(self, size=None): def read(self, size=None):
if 'r' not in self.mode and '+' not in self.mode:
raise IOError("File not open for reading")
if size is None: if size is None:
size = -1 size = -1
return self.mem_file.read(size) return self.mem_file.read(size)
...@@ -139,14 +124,10 @@ class MemoryFile(object): ...@@ -139,14 +124,10 @@ class MemoryFile(object):
@seek_and_lock @seek_and_lock
def truncate(self, *args, **kwargs): def truncate(self, *args, **kwargs):
if 'r' in self.mode and '+' not in self.mode:
raise IOError("File not open for writing")
return self.mem_file.truncate(*args, **kwargs) return self.mem_file.truncate(*args, **kwargs)
#@seek_and_lock #@seek_and_lock
def write(self, data): def write(self, data):
if 'r' in self.mode and '+' not in self.mode:
raise IOError("File not open for writing")
self.memory_fs._on_modify_memory_file(self.path) self.memory_fs._on_modify_memory_file(self.path)
self._lock.acquire() self._lock.acquire()
try: try:
...@@ -163,7 +144,7 @@ class MemoryFile(object): ...@@ -163,7 +144,7 @@ class MemoryFile(object):
def __enter__(self): def __enter__(self):
return self return self
def __exit__(self, exc_type, exc_value, traceback): def __exit__(self,exc_type,exc_value,traceback):
self.close() self.close()
return False return False
...@@ -218,7 +199,7 @@ class DirEntry(object): ...@@ -218,7 +199,7 @@ class DirEntry(object):
if self.isfile(): if self.isfile():
return "<file %s>" % self.name return "<file %s>" % self.name
elif self.isdir(): elif self.isdir():
return "<dir %s>" % "".join("%s: %s" % (k, v.desc_contents()) for k, v in self.contents.iteritems()) return "<dir %s>" % "".join( "%s: %s" % (k, v.desc_contents()) for k, v in self.contents.iteritems())
def isdir(self): def isdir(self):
return self.type == "dir" return self.type == "dir"
...@@ -248,23 +229,24 @@ class DirEntry(object): ...@@ -248,23 +229,24 @@ class DirEntry(object):
self.mem_file = StringIO() self.mem_file = StringIO()
self.mem_file.write(data) self.mem_file.write(data)
class MemoryFS(FS): class MemoryFS(FS):
"""An in-memory filesystem. """An in-memory filesystem.
""" """
_meta = {'thread_safe': True, _meta = {'thread_safe' : True,
'network': False, 'network' : False,
'virtual': False, 'virtual': False,
'read_only': False, 'read_only' : False,
'unicode_paths': True, 'unicode_paths' : True,
'case_insensitive_paths': False, 'case_insensitive_paths' : False,
'atomic.move': False, 'atomic.move' : False,
'atomic.copy': False, 'atomic.copy' : False,
'atomic.makedir': True, 'atomic.makedir' : True,
'atomic.rename': True, 'atomic.rename' : True,
'atomic.setcontents': False} 'atomic.setcontents' : False,
}
def _make_dir_entry(self, *args, **kwargs): def _make_dir_entry(self, *args, **kwargs):
return self.dir_entry_factory(*args, **kwargs) return self.dir_entry_factory(*args, **kwargs)
...@@ -282,11 +264,10 @@ class MemoryFS(FS): ...@@ -282,11 +264,10 @@ class MemoryFS(FS):
def __str__(self): def __str__(self):
return "<MemoryFS>" return "<MemoryFS>"
def __repr__(self): __repr__ = __str__
return "MemoryFS()"
def __unicode__(self): def __unicode__(self):
return "<MemoryFS>" return unicode(self.__str__())
@synchronize @synchronize
def _get_dir_entry(self, dirpath): def _get_dir_entry(self, dirpath):
...@@ -353,7 +334,7 @@ class MemoryFS(FS): ...@@ -353,7 +334,7 @@ class MemoryFS(FS):
if allow_recreate: if allow_recreate:
return return
raise DestinationExistsError(dirname) raise DestinationExistsError(dirname)
dirpath, dirname = pathsplit(dirname.rstrip('/')) dirpath, dirname = pathsplit(dirname)
if recursive: if recursive:
parent_dir = self._get_dir_entry(dirpath) parent_dir = self._get_dir_entry(dirpath)
...@@ -408,10 +389,8 @@ class MemoryFS(FS): ...@@ -408,10 +389,8 @@ class MemoryFS(FS):
# for f in file_dir_entry.open_files[:]: # for f in file_dir_entry.open_files[:]:
# f.close() # f.close()
@synchronize @synchronize
@iotools.filelike_to_stream def open(self, path, mode="r", **kwargs):
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
path = normpath(path) path = normpath(path)
filepath, filename = pathsplit(path) filepath, filename = pathsplit(path)
parent_dir_entry = self._get_dir_entry(filepath) parent_dir_entry = self._get_dir_entry(filepath)
...@@ -457,7 +436,7 @@ class MemoryFS(FS): ...@@ -457,7 +436,7 @@ class MemoryFS(FS):
raise ResourceNotFoundError(path) raise ResourceNotFoundError(path)
if dir_entry.isdir(): if dir_entry.isdir():
raise ResourceInvalidError(path, msg="That's a directory, not a file: %(path)s") raise ResourceInvalidError(path,msg="That's a directory, not a file: %(path)s")
pathname, dirname = pathsplit(path) pathname, dirname = pathsplit(path)
parent_dir = self._get_dir_entry(pathname) parent_dir = self._get_dir_entry(pathname)
...@@ -466,8 +445,6 @@ class MemoryFS(FS): ...@@ -466,8 +445,6 @@ class MemoryFS(FS):
@synchronize @synchronize
def removedir(self, path, recursive=False, force=False): def removedir(self, path, recursive=False, force=False):
path = normpath(path) path = normpath(path)
if path in ('', '/'):
raise RemoveRootError(path)
dir_entry = self._get_dir_entry(path) dir_entry = self._get_dir_entry(path)
if dir_entry is None: if dir_entry is None:
...@@ -483,17 +460,10 @@ class MemoryFS(FS): ...@@ -483,17 +460,10 @@ class MemoryFS(FS):
while rpathname: while rpathname:
rpathname, dirname = pathsplit(rpathname) rpathname, dirname = pathsplit(rpathname)
parent_dir = self._get_dir_entry(rpathname) parent_dir = self._get_dir_entry(rpathname)
if not dirname:
raise RemoveRootError(path)
del parent_dir.contents[dirname] del parent_dir.contents[dirname]
# stop recursing if the directory has other contents
if parent_dir.contents:
break
else: else:
pathname, dirname = pathsplit(path) pathname, dirname = pathsplit(path)
parent_dir = self._get_dir_entry(pathname) parent_dir = self._get_dir_entry(pathname)
if not dirname:
raise RemoveRootError(path)
del parent_dir.contents[dirname] del parent_dir.contents[dirname]
@synchronize @synchronize
...@@ -580,7 +550,7 @@ class MemoryFS(FS): ...@@ -580,7 +550,7 @@ class MemoryFS(FS):
if dir_entry.isdir(): if dir_entry.isdir():
info['st_mode'] = 0755 | stat.S_IFDIR info['st_mode'] = 0755 | stat.S_IFDIR
else: else:
info['size'] = len(dir_entry.data or b('')) info['size'] = len(dir_entry.data or '')
info['st_mode'] = 0666 | stat.S_IFREG info['st_mode'] = 0666 | stat.S_IFREG
return info return info
...@@ -630,43 +600,27 @@ class MemoryFS(FS): ...@@ -630,43 +600,27 @@ class MemoryFS(FS):
dst_dir_entry.xattrs.update(src_xattrs) dst_dir_entry.xattrs.update(src_xattrs)
@synchronize @synchronize
def getcontents(self, path, mode="rb", encoding=None, errors=None, newline=None): def getcontents(self, path):
dir_entry = self._get_dir_entry(path) dir_entry = self._get_dir_entry(path)
if dir_entry is None: if dir_entry is None:
raise ResourceNotFoundError(path) raise ResourceNotFoundError(path)
if not dir_entry.isfile(): if not dir_entry.isfile():
raise ResourceInvalidError(path, msg="not a file: %(path)s") raise ResourceInvalidError(path, msg="not a file: %(path)s")
data = dir_entry.data or b('') return dir_entry.data or ''
if 'b' not in mode:
return iotools.decode_binary(data, encoding=encoding, errors=errors, newline=newline)
return data
@synchronize @synchronize
def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=1024*64): def setcontents(self, path, data, chunk_size=1024*64):
if isinstance(data, six.binary_type): if not isinstance(data, str):
return super(MemoryFS, self).setcontents(path, data, chunk_size)
if not self.exists(path): if not self.exists(path):
self.open(path, 'wb').close() self.open(path, 'w').close()
dir_entry = self._get_dir_entry(path) dir_entry = self._get_dir_entry(path)
if not dir_entry.isfile(): if not dir_entry.isfile():
raise ResourceInvalidError('Not a directory %(path)s', path) raise ResourceInvalidError('Not a directory %(path)s', path)
new_mem_file = StringIO() new_mem_file = StringIO()
new_mem_file.write(data) new_mem_file.write(data)
dir_entry.mem_file = new_mem_file dir_entry.mem_file = new_mem_file
return len(data)
return super(MemoryFS, self).setcontents(path, data=data, encoding=encoding, errors=errors, chunk_size=chunk_size)
# if isinstance(data, six.text_type):
# return super(MemoryFS, self).setcontents(path, data, encoding=encoding, errors=errors, chunk_size=chunk_size)
# if not self.exists(path):
# self.open(path, 'wb').close()
# dir_entry = self._get_dir_entry(path)
# if not dir_entry.isfile():
# raise ResourceInvalidError('Not a directory %(path)s', path)
# new_mem_file = StringIO()
# new_mem_file.write(data)
# dir_entry.mem_file = new_mem_file
@synchronize @synchronize
def setxattr(self, path, key, value): def setxattr(self, path, key, value):
......
...@@ -19,7 +19,7 @@ For example, lets say we have two filesystems containing config files and resour ...@@ -19,7 +19,7 @@ For example, lets say we have two filesystems containing config files and resour
We can combine these filesystems in to a single filesystem with the following code:: We can combine these filesystems in to a single filesystem with the following code::
from fs.mountfs import MountFS from fs.mountfs import MountFS
combined_fs = MountFS() combined_fs = MountFS
combined_fs.mountdir('config', config_fs) combined_fs.mountdir('config', config_fs)
combined_fs.mountdir('resources', resources_fs) combined_fs.mountdir('resources', resources_fs)
...@@ -46,7 +46,6 @@ from fs.base import * ...@@ -46,7 +46,6 @@ from fs.base import *
from fs.errors import * from fs.errors import *
from fs.path import * from fs.path import *
from fs import _thread_synchronize_default from fs import _thread_synchronize_default
from fs import iotools
class DirMount(object): class DirMount(object):
...@@ -55,13 +54,10 @@ class DirMount(object): ...@@ -55,13 +54,10 @@ class DirMount(object):
self.fs = fs self.fs = fs
def __str__(self): def __str__(self):
return "<DirMount %s, %s>" % (self.path, self.fs) return "Mount point: <%s,%s>" % (self.path,self.fs,)
__repr__ = __str__
def __repr__(self):
return "<DirMount %s, %s>" % (self.path, self.fs)
def __unicode__(self): def __unicode__(self):
return u"<DirMount %s, %s>" % (self.path, self.fs) return unicode(str(self))
class FileMount(object): class FileMount(object):
...@@ -95,7 +91,7 @@ class MountFS(FS): ...@@ -95,7 +91,7 @@ class MountFS(FS):
__repr__ = __str__ __repr__ = __str__
def __unicode__(self): def __unicode__(self):
return u"<%s [%s]>" % (self.__class__.__name__,self.mount_tree.items(),) return unicode(self.__str__())
def _delegate(self, path): def _delegate(self, path):
path = abspath(normpath(path)) path = abspath(normpath(path))
...@@ -161,19 +157,16 @@ class MountFS(FS): ...@@ -161,19 +157,16 @@ class MountFS(FS):
return "Mount dir" return "Mount dir"
else: else:
return "Mounted file" return "Mounted file"
return "Mounted dir, maps to path %s on %s" % (abspath(delegate_path) or '/', str(fs)) return "Mounted dir, maps to path %s on %s" % (delegate_path, str(fs))
@synchronize @synchronize
def isdir(self, path): def isdir(self, path):
fs, _mount_path, delegate_path = self._delegate(path) fs, _mount_path, delegate_path = self._delegate(path)
if fs is None: if fs is None:
path = normpath(path)
if path in ("/", ""):
return True
return False return False
if fs is self: if fs is self:
obj = self.mount_tree.get(path, None) object = self.mount_tree.get(path, None)
return not isinstance(obj, MountFS.FileMount) return not isinstance(object,MountFS.FileMount)
return fs.isdir(delegate_path) return fs.isdir(delegate_path)
@synchronize @synchronize
...@@ -182,14 +175,12 @@ class MountFS(FS): ...@@ -182,14 +175,12 @@ class MountFS(FS):
if fs is None: if fs is None:
return False return False
if fs is self: if fs is self:
obj = self.mount_tree.get(path, None) object = self.mount_tree.get(path, None)
return isinstance(obj, MountFS.FileMount) return isinstance(object,MountFS.FileMount)
return fs.isfile(delegate_path) return fs.isfile(delegate_path)
@synchronize @synchronize
def exists(self, path): def exists(self, path):
if path in ("/", ""):
return True
fs, _mount_path, delegate_path = self._delegate(path) fs, _mount_path, delegate_path = self._delegate(path)
if fs is None: if fs is None:
return False return False
...@@ -202,11 +193,9 @@ class MountFS(FS): ...@@ -202,11 +193,9 @@ class MountFS(FS):
fs, _mount_path, delegate_path = self._delegate(path) fs, _mount_path, delegate_path = self._delegate(path)
if fs is None: if fs is None:
if path in ("/", ""): raise ResourceNotFoundError(path)
return []
raise ResourceNotFoundError("path")
elif fs is self: if fs is self:
paths = self.mount_tree.names(path) paths = self.mount_tree.names(path)
return self._listdir_helper(path, return self._listdir_helper(path,
paths, paths,
...@@ -246,8 +235,6 @@ class MountFS(FS): ...@@ -246,8 +235,6 @@ class MountFS(FS):
fs, _mount_path, delegate_path = self._delegate(path) fs, _mount_path, delegate_path = self._delegate(path)
if fs is None: if fs is None:
if path in ("/", ""):
return
raise ResourceNotFoundError(path) raise ResourceNotFoundError(path)
if fs is self: if fs is self:
...@@ -285,11 +272,12 @@ class MountFS(FS): ...@@ -285,11 +272,12 @@ class MountFS(FS):
else: else:
yield mkpath(p) yield mkpath(p)
@synchronize @synchronize
def makedir(self, path, recursive=False, allow_recreate=False): def makedir(self, path, recursive=False, allow_recreate=False):
fs, _mount_path, delegate_path = self._delegate(path) fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None: if fs is self or fs is None:
raise UnsupportedError("make directory", msg="Can only makedir for mounted paths") raise UnsupportedError("make directory", msg="Can only makedir for mounted paths" )
if not delegate_path: if not delegate_path:
if allow_recreate: if allow_recreate:
return return
...@@ -298,10 +286,10 @@ class MountFS(FS): ...@@ -298,10 +286,10 @@ class MountFS(FS):
return fs.makedir(delegate_path, recursive=recursive, allow_recreate=allow_recreate) return fs.makedir(delegate_path, recursive=recursive, allow_recreate=allow_recreate)
@synchronize @synchronize
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs): def open(self, path, mode="r", **kwargs):
obj = self.mount_tree.get(path, None) object = self.mount_tree.get(path, None)
if type(obj) is MountFS.FileMount: if type(object) is MountFS.FileMount:
callable = obj.open_callable callable = object.open_callable
return callable(path, mode, **kwargs) return callable(path, mode, **kwargs)
fs, _mount_path, delegate_path = self._delegate(path) fs, _mount_path, delegate_path = self._delegate(path)
...@@ -312,24 +300,20 @@ class MountFS(FS): ...@@ -312,24 +300,20 @@ class MountFS(FS):
return fs.open(delegate_path, mode, **kwargs) return fs.open(delegate_path, mode, **kwargs)
@synchronize @synchronize
def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=64*1024): def setcontents(self, path, data, chunk_size=64*1024):
obj = self.mount_tree.get(path, None) object = self.mount_tree.get(path, None)
if type(obj) is MountFS.FileMount: if type(object) is MountFS.FileMount:
return super(MountFS, self).setcontents(path, return super(MountFS,self).setcontents(path, data, chunk_size=chunk_size)
data,
encoding=encoding,
errors=errors,
chunk_size=chunk_size)
fs, _mount_path, delegate_path = self._delegate(path) fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None: if fs is self or fs is None:
raise ParentDirectoryMissingError(path) raise ParentDirectoryMissingError(path)
return fs.setcontents(delegate_path, data, encoding=encoding, errors=errors, chunk_size=chunk_size) return fs.setcontents(delegate_path, data, chunk_size)
@synchronize @synchronize
def createfile(self, path, wipe=False): def createfile(self, path, wipe=False):
obj = self.mount_tree.get(path, None) object = self.mount_tree.get(path, None)
if type(obj) is MountFS.FileMount: if type(object) is MountFS.FileMount:
return super(MountFS, self).createfile(path, wipe=wipe) return super(MountFS,self).createfile(path, wipe=wipe)
fs, _mount_path, delegate_path = self._delegate(path) fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None: if fs is self or fs is None:
raise ParentDirectoryMissingError(path) raise ParentDirectoryMissingError(path)
...@@ -345,8 +329,6 @@ class MountFS(FS): ...@@ -345,8 +329,6 @@ class MountFS(FS):
@synchronize @synchronize
def removedir(self, path, recursive=False, force=False): def removedir(self, path, recursive=False, force=False):
path = normpath(path) path = normpath(path)
if path in ('', '/'):
raise RemoveRootError(path)
fs, _mount_path, delegate_path = self._delegate(path) fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None: if fs is self or fs is None:
raise ResourceInvalidError(path, msg="Can not removedir for an un-mounted path") raise ResourceInvalidError(path, msg="Can not removedir for an un-mounted path")
...@@ -415,7 +397,6 @@ class MountFS(FS): ...@@ -415,7 +397,6 @@ class MountFS(FS):
:param fs: A filesystem object to mount :param fs: A filesystem object to mount
""" """
path = abspath(normpath(path))
self.mount_tree[path] = MountFS.DirMount(path, fs) self.mount_tree[path] = MountFS.DirMount(path, fs)
mount = mountdir mount = mountdir
...@@ -424,29 +405,20 @@ class MountFS(FS): ...@@ -424,29 +405,20 @@ class MountFS(FS):
"""Mounts a single file path. """Mounts a single file path.
:param path: A path within the MountFS :param path: A path within the MountFS
:param open_callable: A callable that returns a file-like object, :param open_callable: A callable that returns a file-like object
`open_callable` should have the same signature as :py:meth:`~fs.base.FS.open` :param info_callable: A callable that returns a dictionary with information regarding the file-like object
:param info_callable: A callable that returns a dictionary with information regarding the file-like object,
`info_callable` should have the same signagture as :py:meth:`~fs.base.FS.getinfo`
""" """
self.mount_tree[path] = MountFS.FileMount(path, open_callable, info_callable) self.mount_tree[path] = MountFS.FileMount(path, callable, info_callable)
@synchronize @synchronize
def unmount(self, path): def unmount(self, path):
"""Unmounts a path. """Unmounts a path.
:param path: Path to unmount :param path: Path to unmount
:return: True if a path was unmounted, False if the path was already unmounted
:rtype: bool
""" """
try:
del self.mount_tree[path] del self.mount_tree[path]
except KeyError:
return False
else:
return True
@synchronize @synchronize
def settimes(self, path, accessed_time=None, modified_time=None): def settimes(self, path, accessed_time=None, modified_time=None):
...@@ -467,8 +439,6 @@ class MountFS(FS): ...@@ -467,8 +439,6 @@ class MountFS(FS):
fs, _mount_path, delegate_path = self._delegate(path) fs, _mount_path, delegate_path = self._delegate(path)
if fs is None: if fs is None:
if path in ("/", ""):
return {}
raise ResourceNotFoundError(path) raise ResourceNotFoundError(path)
if fs is self: if fs is self:
...@@ -503,8 +473,6 @@ class MountFS(FS): ...@@ -503,8 +473,6 @@ class MountFS(FS):
path = normpath(path) path = normpath(path)
fs, _mount_path, delegate_path = self._delegate(path) fs, _mount_path, delegate_path = self._delegate(path)
if fs is None: if fs is None:
if path in ("/", ""):
return default
raise ResourceNotFoundError(path) raise ResourceNotFoundError(path)
if fs is self: if fs is self:
return default return default
......
...@@ -94,8 +94,7 @@ class MultiFS(FS): ...@@ -94,8 +94,7 @@ class MultiFS(FS):
self.auto_close = auto_close self.auto_close = auto_close
self.fs_sequence = [] self.fs_sequence = []
self.fs_lookup = {} self.fs_lookup = {}
self.fs_priorities = {} self.write_fs = None
self.writefs = None
@synchronize @synchronize
def __str__(self): def __str__(self):
...@@ -107,53 +106,35 @@ class MultiFS(FS): ...@@ -107,53 +106,35 @@ class MultiFS(FS):
def __unicode__(self): def __unicode__(self):
return u"<MultiFS: %s>" % ", ".join(unicode(fs) for fs in self.fs_sequence) return u"<MultiFS: %s>" % ", ".join(unicode(fs) for fs in self.fs_sequence)
def _get_priority(self, name):
return self.fs_priorities[name]
@synchronize @synchronize
def close(self): def close(self):
# Explicitly close if requested # Explicitly close if requested
if self.auto_close: if self.auto_close:
for fs in self.fs_sequence: for fs in self.fs_sequence:
fs.close() fs.close()
if self.writefs is not None: if self.write_fs is not None:
self.writefs.close() self.write_fs.close()
# Discard any references # Discard any references
del self.fs_sequence[:] del self.fs_sequence[:]
self.fs_lookup.clear() self.fs_lookup.clear()
self.fs_priorities.clear() self.write_fs = None
self.writefs = None
super(MultiFS, self).close() super(MultiFS, self).close()
def _priority_sort(self):
"""Sort filesystems by priority order"""
priority_order = sorted(self.fs_lookup.keys(), key=lambda n: self.fs_priorities[n], reverse=True)
self.fs_sequence = [self.fs_lookup[name] for name in priority_order]
@synchronize @synchronize
def addfs(self, name, fs, write=False, priority=0): def addfs(self, name, fs, write=False):
"""Adds a filesystem to the MultiFS. """Adds a filesystem to the MultiFS.
:param name: A unique name to refer to the filesystem being added. :param name: A unique name to refer to the filesystem being added.
The filesystem can later be retrieved by using this name as an index to the MultiFS, i.e. multifs['myfs'] The filesystem can later be retrieved by using this name as an index to the MultiFS, i.e. multifs['myfs']
:param fs: The filesystem to add :param fs: The filesystem to add
:param write: If this value is True, then the `fs` will be used as the writeable FS :param write: If this value is True, then the `fs` will be used as the writeable FS
:param priority: A number that gives the priorty of the filesystem being added.
Filesystems will be searched in descending priority order and then by the reverse order they were added.
So by default, the most recently added filesystem will be looked at first
""" """
if name in self.fs_lookup: if name in self.fs_lookup:
raise ValueError("Name already exists.") raise ValueError("Name already exists.")
priority = (priority, len(self.fs_sequence))
self.fs_priorities[name] = priority
self.fs_sequence.append(fs) self.fs_sequence.append(fs)
self.fs_lookup[name] = fs self.fs_lookup[name] = fs
self._priority_sort()
if write: if write:
self.setwritefs(fs) self.setwritefs(fs)
...@@ -181,11 +162,10 @@ class MultiFS(FS): ...@@ -181,11 +162,10 @@ class MultiFS(FS):
""" """
if name not in self.fs_lookup: if name not in self.fs_lookup:
raise ValueError("No filesystem called '%s'" % name) raise ValueError("No filesystem called '%s'"%name)
fs = self.fs_lookup[name] fs = self.fs_lookup[name]
self.fs_sequence.remove(fs) self.fs_sequence.remove(fs)
del self.fs_lookup[name] del self.fs_lookup[name]
self._priority_sort()
@synchronize @synchronize
def __getitem__(self, name): def __getitem__(self, name):
...@@ -193,7 +173,7 @@ class MultiFS(FS): ...@@ -193,7 +173,7 @@ class MultiFS(FS):
@synchronize @synchronize
def __iter__(self): def __iter__(self):
return iter(self.fs_sequence[:]) return reversed(self.fs_sequence[:])
def _delegate_search(self, path): def _delegate_search(self, path):
for fs in self: for fs in self:
...@@ -223,8 +203,6 @@ class MultiFS(FS): ...@@ -223,8 +203,6 @@ class MultiFS(FS):
fs = self._delegate_search(path) fs = self._delegate_search(path)
if fs is not None: if fs is not None:
return fs.getsyspath(path, allow_none=allow_none) return fs.getsyspath(path, allow_none=allow_none)
if allow_none:
return None
raise ResourceNotFoundError(path) raise ResourceNotFoundError(path)
@synchronize @synchronize
...@@ -235,18 +213,19 @@ class MultiFS(FS): ...@@ -235,18 +213,19 @@ class MultiFS(FS):
name, fs = self.which(path) name, fs = self.which(path)
if name is None: if name is None:
return "" return ""
return "%s (in %s)" % (fs.desc(path), name) return "%s, on %s (%s)" % (fs.desc(path), name, fs)
@synchronize @synchronize
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs): def open(self, path, mode="r", **kwargs):
if 'w' in mode or '+' in mode or 'a' in mode: if 'w' in mode or '+' in mode or 'a' in mode:
if self.writefs is None: if self.writefs is None:
raise OperationFailedError('open', path=path, msg="No writeable FS set") raise OperationFailedError('open', path=path, msg="No writeable FS set")
return self.writefs.open(path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, line_buffering=line_buffering, **kwargs) return self.writefs.open(path, mode)
for fs in self: for fs in self:
if fs.exists(path): if fs.exists(path):
fs_file = fs.open(path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, line_buffering=line_buffering, **kwargs) fs_file = fs.open(path, mode, **kwargs)
return fs_file return fs_file
raise ResourceNotFoundError(path) raise ResourceNotFoundError(path)
@synchronize @synchronize
...@@ -278,23 +257,16 @@ class MultiFS(FS): ...@@ -278,23 +257,16 @@ class MultiFS(FS):
return list(set(paths)) return list(set(paths))
@synchronize @synchronize
def makedir(self, path, recursive=False, allow_recreate=False):
if self.writefs is None:
raise OperationFailedError('makedir', path=path, msg="No writeable FS set")
self.writefs.makedir(path, recursive=recursive, allow_recreate=allow_recreate)
@synchronize
def remove(self, path): def remove(self, path):
if self.writefs is None: if self.writefs is None:
raise OperationFailedError('remove', path=path, msg="No writeable FS set") raise OperationFailedError('remove', path=path, msg="No writeable FS set")
self.writefs.remove(path) self.writefs.remove(path)
raise ResourceNotFoundError(path)
@synchronize @synchronize
def removedir(self, path, recursive=False, force=False): def removedir(self, path, recursive=False, force=False):
if self.writefs is None: if self.writefs is None:
raise OperationFailedError('removedir', path=path, msg="No writeable FS set") raise OperationFailedError('removedir', path=path, msg="No writeable FS set")
if normpath(path) in ('', '/'):
raise RemoveRootError(path)
self.writefs.removedir(path, recursive=recursive, force=force) self.writefs.removedir(path, recursive=recursive, force=force)
@synchronize @synchronize
......
...@@ -67,8 +67,6 @@ __all__ = ['OpenerError', ...@@ -67,8 +67,6 @@ __all__ = ['OpenerError',
'HTTPOpener'] 'HTTPOpener']
from fs.path import pathsplit, join, iswildcard, normpath from fs.path import pathsplit, join, iswildcard, normpath
from fs.osfs import OSFS
from fs.filelike import FileWrapper
from os import getcwd from os import getcwd
import os.path import os.path
import re import re
...@@ -85,8 +83,6 @@ class NoOpenerError(OpenerError): ...@@ -85,8 +83,6 @@ class NoOpenerError(OpenerError):
def _expand_syspath(path): def _expand_syspath(path):
if path is None: if path is None:
return path return path
if path.startswith('\\\\?\\'):
path = path[4:]
path = os.path.expanduser(os.path.expandvars(path)) path = os.path.expanduser(os.path.expandvars(path))
path = os.path.normpath(os.path.abspath(path)) path = os.path.normpath(os.path.abspath(path))
return path return path
...@@ -121,14 +117,6 @@ def _split_url_path(url): ...@@ -121,14 +117,6 @@ def _split_url_path(url):
url = '%s://%s' % (scheme, netloc) url = '%s://%s' % (scheme, netloc)
return url, path return url, path
class _FSClosingFile(FileWrapper):
"""A file like object that closes its parent FS when closed itself"""
def close(self):
fs = getattr(self, '_closefs', None)
ret = super(_FSClosingFile, self).close()
if fs is not None:
fs.close
return ret
class OpenerRegistry(object): class OpenerRegistry(object):
...@@ -250,7 +238,7 @@ class OpenerRegistry(object): ...@@ -250,7 +238,7 @@ class OpenerRegistry(object):
return fs, fs_path or '' return fs, fs_path or ''
def open(self, fs_url, mode='r', **kwargs): def open(self, fs_url, mode='rb'):
"""Opens a file from a given FS url """Opens a file from a given FS url
If you intend to do a lot of file manipulation, it would likely be more If you intend to do a lot of file manipulation, it would likely be more
...@@ -267,18 +255,33 @@ class OpenerRegistry(object): ...@@ -267,18 +255,33 @@ class OpenerRegistry(object):
fs, path = self.parse(fs_url, writeable=writeable) fs, path = self.parse(fs_url, writeable=writeable)
file_object = fs.open(path, mode) file_object = fs.open(path, mode)
file_object = _FSClosingFile(file_object, mode) from fs.filelike import FileWrapper
file_object.fs = fs file_object = FileWrapper(file_object, mode)
# If we just return the file, then fs goes out of scope and closes,
# which may make the file unusable. To get around this, we store a
# reference in the file object to the FS, and patch the file's
# close method to also close the FS.
close = file_object.close
close_fs = fs
def replace_close():
ret = close()
close_fs.close()
return ret
file_object.close = replace_close
return file_object return file_object
def getcontents(self, fs_url, mode='rb', encoding=None, errors=None, newline=None): def getcontents(self, fs_url):
"""Gets the contents from a given FS url (if it references a file) """Gets the contents from a given FS url (if it references a file)
:param fs_url: a FS URL e.g. ftp://ftp.mozilla.org/README :param fs_url: a FS URL e.g. ftp://ftp.mozilla.org/README
""" """
fs, path = self.parse(fs_url) fs, path = self.parse(fs_url)
return fs.getcontents(path, mode, encoding=encoding, errors=errors, newline=newline) return fs.getcontents(path)
def opendir(self, fs_url, writeable=True, create_dir=False): def opendir(self, fs_url, writeable=True, create_dir=False):
"""Opens an FS object from an FS URL """Opens an FS object from an FS URL
...@@ -290,11 +293,8 @@ class OpenerRegistry(object): ...@@ -290,11 +293,8 @@ class OpenerRegistry(object):
""" """
fs, path = self.parse(fs_url, writeable=writeable, create_dir=create_dir) fs, path = self.parse(fs_url, writeable=writeable, create_dir=create_dir)
if path and '://' not in fs_url:
# A shortcut to return an OSFS rather than a SubFS for os paths
return OSFS(fs_url)
if path: if path:
fs = fs.opendir(path) return fs.opendir(path)
return fs return fs
...@@ -434,7 +434,7 @@ examples: ...@@ -434,7 +434,7 @@ examples:
dirpath, resourcepath = pathsplit(path) dirpath, resourcepath = pathsplit(path)
url = netloc url = netloc
ftpfs = FTPFS(url, user=username or '', passwd=password or '', follow_symlinks=(fs_name_params == "symlinks")) ftpfs = FTPFS(url, user=username or '', passwd=password or '')
ftpfs.cache_hint(True) ftpfs.cache_hint(True)
if create_dir and path: if create_dir and path:
......
...@@ -34,8 +34,6 @@ if OSFSWatchMixin is None: ...@@ -34,8 +34,6 @@ if OSFSWatchMixin is None:
# Fall back to raising UnsupportedError # Fall back to raising UnsupportedError
if OSFSWatchMixin is None: if OSFSWatchMixin is None:
class OSFSWatchMixin(object): class OSFSWatchMixin(object):
def __init__(self, *args, **kwargs):
super(OSFSWatchMixin, self).__init__(*args, **kwargs)
def add_watcher(self,*args,**kwds): def add_watcher(self,*args,**kwds):
raise UnsupportedError raise UnsupportedError
def del_watcher(self,watcher_or_callback): def del_watcher(self,watcher_or_callback):
......
...@@ -170,7 +170,7 @@ class OSFSWatchMixin(WatchableFSMixin): ...@@ -170,7 +170,7 @@ class OSFSWatchMixin(WatchableFSMixin):
if inevt.mask & pyinotify.IN_MODIFY: if inevt.mask & pyinotify.IN_MODIFY:
watcher.handle_event(MODIFIED(self,path,True)) watcher.handle_event(MODIFIED(self,path,True))
if inevt.mask & pyinotify.IN_CLOSE_WRITE: if inevt.mask & pyinotify.IN_CLOSE_WRITE:
watcher.handle_event(MODIFIED(self,path,True, closed=True)) watcher.handle_event(MODIFIED(self,path,True))
if inevt.mask & pyinotify.IN_MOVED_FROM: if inevt.mask & pyinotify.IN_MOVED_FROM:
# Sorry folks, I'm not up for decoding the destination path. # Sorry folks, I'm not up for decoding the destination path.
watcher.handle_event(MOVED_SRC(self,path,None)) watcher.handle_event(MOVED_SRC(self,path,None))
...@@ -219,7 +219,7 @@ class SharedThreadedNotifier(threading.Thread): ...@@ -219,7 +219,7 @@ class SharedThreadedNotifier(threading.Thread):
self.watchers[fd] = watcher self.watchers[fd] = watcher
self._poller.register(fd,select.POLLIN) self._poller.register(fd,select.POLLIN)
# Bump the poll object so it recognises the new fd. # Bump the poll object so it recognises the new fd.
os.write(self._pipe_w,b"H") os.write(self._pipe_w,"H")
def del_watcher(self,watcher): def del_watcher(self,watcher):
fd = watcher._pyinotify_WatchManager.get_fd() fd = watcher._pyinotify_WatchManager.get_fd()
......
...@@ -23,12 +23,9 @@ except ImportError: ...@@ -23,12 +23,9 @@ except ImportError:
if xattr is not None: if xattr is not None:
class OSFSXAttrMixin(object): class OSFSXAttrMixin(FS):
"""Mixin providing extended-attribute support via the 'xattr' module""" """Mixin providing extended-attribute support via the 'xattr' module"""
def __init__(self, *args, **kwargs):
super(OSFSXAttrMixin, self).__init__(*args, **kwargs)
@convert_os_errors @convert_os_errors
def setxattr(self, path, key, value): def setxattr(self, path, key, value):
xattr.xattr(self.getsyspath(path))[key]=value xattr.xattr(self.getsyspath(path))[key]=value
...@@ -56,9 +53,6 @@ else: ...@@ -56,9 +53,6 @@ else:
class OSFSXAttrMixin(object): class OSFSXAttrMixin(object):
"""Mixin disable extended-attribute support.""" """Mixin disable extended-attribute support."""
def __init__(self, *args, **kwargs):
super(OSFSXAttrMixin, self).__init__(*args, **kwargs)
def getxattr(self,path,key,default=None): def getxattr(self,path,key,default=None):
raise UnsupportedError raise UnsupportedError
......
...@@ -39,8 +39,6 @@ from fs import SEEK_SET, SEEK_CUR, SEEK_END ...@@ -39,8 +39,6 @@ from fs import SEEK_SET, SEEK_CUR, SEEK_END
_SENTINAL = object() _SENTINAL = object()
from six import PY3, b
class RemoteFileBuffer(FileWrapper): class RemoteFileBuffer(FileWrapper):
"""File-like object providing buffer for local file operations. """File-like object providing buffer for local file operations.
...@@ -83,7 +81,7 @@ class RemoteFileBuffer(FileWrapper): ...@@ -83,7 +81,7 @@ class RemoteFileBuffer(FileWrapper):
self._readlen = 0 # How many bytes already loaded from rfile self._readlen = 0 # How many bytes already loaded from rfile
self._rfile = None # Reference to remote file object self._rfile = None # Reference to remote file object
self._eof = False # Reached end of rfile? self._eof = False # Reached end of rfile?
if getattr(fs, "_lock", None) is not None: if getattr(fs,"_lock",None) is not None:
self._lock = fs._lock.__class__() self._lock = fs._lock.__class__()
else: else:
self._lock = threading.RLock() self._lock = threading.RLock()
...@@ -95,15 +93,13 @@ class RemoteFileBuffer(FileWrapper): ...@@ -95,15 +93,13 @@ class RemoteFileBuffer(FileWrapper):
self._eof = True self._eof = True
if not hasattr(rfile, "read"): if not hasattr(rfile, "read"):
#rfile = StringIO(unicode(rfile)) rfile = StringIO(unicode(rfile))
rfile = StringIO(rfile)
self._rfile = rfile self._rfile = rfile
else: else:
# Do not use remote file object # Do not use remote file object
self._eof = True self._eof = True
self._rfile = None self._rfile = None
self._changed = True
if rfile is not None and hasattr(rfile,"close"): if rfile is not None and hasattr(rfile,"close"):
rfile.close() rfile.close()
super(RemoteFileBuffer,self).__init__(wrapped_file,mode) super(RemoteFileBuffer,self).__init__(wrapped_file,mode)
...@@ -190,7 +186,7 @@ class RemoteFileBuffer(FileWrapper): ...@@ -190,7 +186,7 @@ class RemoteFileBuffer(FileWrapper):
self.wrapped_file.seek(curpos) self.wrapped_file.seek(curpos)
def _read(self, length=None): def _read(self, length=None):
if length is not None and length < 0: if length < 0:
length = None length = None
with self._lock: with self._lock:
self._fillbuffer(length) self._fillbuffer(length)
...@@ -317,8 +313,8 @@ class ConnectionManagerFS(LazyFS): ...@@ -317,8 +313,8 @@ class ConnectionManagerFS(LazyFS):
self._poll_sleeper = threading.Event() self._poll_sleeper = threading.Event()
self.connected = connected self.connected = connected
def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=64*1024): def setcontents(self, path, data, chunk_size=64*1024):
return self.wrapped_fs.setcontents(path, data, encoding=encoding, errors=errors, chunk_size=chunk_size) return self.wrapped_fs.setcontents(path, data, chunk_size=chunk_size)
def __getstate__(self): def __getstate__(self):
state = super(ConnectionManagerFS,self).__getstate__() state = super(ConnectionManagerFS,self).__getstate__()
...@@ -538,12 +534,12 @@ class CacheFSMixin(FS): ...@@ -538,12 +534,12 @@ class CacheFSMixin(FS):
except KeyError: except KeyError:
pass pass
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs): def open(self,path,mode="r",**kwds):
# Try to validate the entry using the cached info # Try to validate the entry using the cached info
try: try:
ci = self.__get_cached_info(path) ci = self.__get_cached_info(path)
except KeyError: except KeyError:
if path in ("", "/"): if path in ("","/"):
raise ResourceInvalidError(path) raise ResourceInvalidError(path)
try: try:
ppath = dirname(path) ppath = dirname(path)
...@@ -551,38 +547,38 @@ class CacheFSMixin(FS): ...@@ -551,38 +547,38 @@ class CacheFSMixin(FS):
except KeyError: except KeyError:
pass pass
else: else:
if not fs.utils.isdir(super(CacheFSMixin, self), ppath, pci.info): if not fs.utils.isdir(super(CacheFSMixin,self),ppath,pci.info):
raise ResourceInvalidError(path) raise ResourceInvalidError(path)
if pci.has_full_children: if pci.has_full_children:
raise ResourceNotFoundError(path) raise ResourceNotFoundError(path)
else: else:
if not fs.utils.isfile(super(CacheFSMixin, self), path, ci.info): if not fs.utils.isfile(super(CacheFSMixin,self),path,ci.info):
raise ResourceInvalidError(path) raise ResourceInvalidError(path)
f = super(CacheFSMixin, self).open(path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, line_buffering=line_buffering, **kwargs) f = super(CacheFSMixin,self).open(path,mode,**kwds)
if "w" in mode or "a" in mode or "+" in mode: if "w" in mode or "a" in mode or "+" in mode:
with self.__cache_lock: with self.__cache_lock:
self.__cache.clear(path) self.__cache.clear(path)
f = self._CacheInvalidatingFile(self, path, f, mode) f = self._CacheInvalidatingFile(self,path,f,mode)
return f return f
class _CacheInvalidatingFile(FileWrapper): class _CacheInvalidatingFile(FileWrapper):
def __init__(self, owner, path, wrapped_file, mode=None): def __init__(self,owner,path,wrapped_file,mode=None):
self.path = path self.path = path
sup = super(CacheFSMixin._CacheInvalidatingFile, self) sup = super(CacheFSMixin._CacheInvalidatingFile,self)
sup.__init__(wrapped_file, mode) sup.__init__(wrapped_file,mode)
self.owner = owner self.owner = owner
def _write(self, string, flushing=False): def _write(self,string,flushing=False):
with self.owner._CacheFSMixin__cache_lock: with self.owner._CacheFSMixin__cache_lock:
self.owner._CacheFSMixin__cache.clear(self.path) self.owner._CacheFSMixin__cache.clear(self.path)
sup = super(CacheFSMixin._CacheInvalidatingFile, self) sup = super(CacheFSMixin._CacheInvalidatingFile,self)
return sup._write(string, flushing=flushing) return sup._write(string,flushing=flushing)
def _truncate(self, size): def _truncate(self,size):
with self.owner._CacheFSMixin__cache_lock: with self.owner._CacheFSMixin__cache_lock:
self.owner._CacheFSMixin__cache.clear(self.path) self.owner._CacheFSMixin__cache.clear(self.path)
sup = super(CacheFSMixin._CacheInvalidatingFile, self) sup = super(CacheFSMixin._CacheInvalidatingFile,self)
return sup._truncate(size) return sup._truncate(size)
def exists(self, path): def exists(self,path):
try: try:
self.getinfo(path) self.getinfo(path)
except ResourceNotFoundError: except ResourceNotFoundError:
...@@ -590,7 +586,7 @@ class CacheFSMixin(FS): ...@@ -590,7 +586,7 @@ class CacheFSMixin(FS):
else: else:
return True return True
def isdir(self, path): def isdir(self,path):
try: try:
self.__cache.iternames(path).next() self.__cache.iternames(path).next()
return True return True
...@@ -603,9 +599,9 @@ class CacheFSMixin(FS): ...@@ -603,9 +599,9 @@ class CacheFSMixin(FS):
except ResourceNotFoundError: except ResourceNotFoundError:
return False return False
else: else:
return fs.utils.isdir(super(CacheFSMixin, self), path, info) return fs.utils.isdir(super(CacheFSMixin,self),path,info)
def isfile(self, path): def isfile(self,path):
try: try:
self.__cache.iternames(path).next() self.__cache.iternames(path).next()
return False return False
...@@ -618,17 +614,17 @@ class CacheFSMixin(FS): ...@@ -618,17 +614,17 @@ class CacheFSMixin(FS):
except ResourceNotFoundError: except ResourceNotFoundError:
return False return False
else: else:
return fs.utils.isfile(super(CacheFSMixin, self), path, info) return fs.utils.isfile(super(CacheFSMixin,self),path,info)
def getinfo(self, path): def getinfo(self,path):
try: try:
ci = self.__get_cached_info(path) ci = self.__get_cached_info(path)
if not ci.has_full_info: if not ci.has_full_info:
raise KeyError raise KeyError
info = ci.info info = ci.info
except KeyError: except KeyError:
info = super(CacheFSMixin, self).getinfo(path) info = super(CacheFSMixin,self).getinfo(path)
self.__set_cached_info(path, CachedInfo(info)) self.__set_cached_info(path,CachedInfo(info))
return info return info
def listdir(self,path="",*args,**kwds): def listdir(self,path="",*args,**kwds):
...@@ -672,16 +668,16 @@ class CacheFSMixin(FS): ...@@ -672,16 +668,16 @@ class CacheFSMixin(FS):
def getsize(self,path): def getsize(self,path):
return self.getinfo(path)["size"] return self.getinfo(path)["size"]
def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=64*1024): def setcontents(self, path, contents="", chunk_size=64*1024):
supsc = super(CacheFSMixin, self).setcontents supsc = super(CacheFSMixin,self).setcontents
res = supsc(path, data, encoding=None, errors=None, chunk_size=chunk_size) res = supsc(path, contents, chunk_size=chunk_size)
with self.__cache_lock: with self.__cache_lock:
self.__cache.clear(path) self.__cache.clear(path)
self.__cache[path] = CachedInfo.new_file_stub() self.__cache[path] = CachedInfo.new_file_stub()
return res return res
def createfile(self, path, wipe=False): def createfile(self, path):
super(CacheFSMixin,self).createfile(path, wipe=wipe) super(CacheFSMixin,self).createfile(path)
with self.__cache_lock: with self.__cache_lock:
self.__cache.clear(path) self.__cache.clear(path)
self.__cache[path] = CachedInfo.new_file_stub() self.__cache[path] = CachedInfo.new_file_stub()
......
# Work in Progress - Do not use
from __future__ import with_statement
from fs.base import FS
from fs.expose.serve import packetstream
from collections import defaultdict
import threading
from threading import Lock, RLock
from json import dumps
import Queue as queue
import socket
from six import b
class PacketHandler(threading.Thread):
def __init__(self, transport, prelude_callback=None):
super(PacketHandler, self).__init__()
self.transport = transport
self.encoder = packetstream.JSONFileEncoder(transport)
self.decoder = packetstream.JSONDecoder(prelude_callback=None)
self.queues = defaultdict(queue.Queue)
self._encoder_lock = threading.Lock()
self._queues_lock = threading.Lock()
self._call_id_lock = threading.Lock()
self.call_id = 0
def run(self):
decoder = self.decoder
read = self.transport.read
on_packet = self.on_packet
while True:
data = read(1024*16)
if not data:
print "No data"
break
print "data", repr(data)
for header, payload in decoder.feed(data):
print repr(header)
print repr(payload)
on_packet(header, payload)
def _new_call_id(self):
with self._call_id_lock:
self.call_id += 1
return self.call_id
def get_thread_queue(self, queue_id=None):
if queue_id is None:
queue_id = threading.current_thread().ident
with self._queues_lock:
return self.queues[queue_id]
def send_packet(self, header, payload=''):
call_id = self._new_call_id()
queue_id = threading.current_thread().ident
client_ref = "%i:%i" % (queue_id, call_id)
header['client_ref'] = client_ref
with self._encoder_lock:
self.encoder.write(header, payload)
return call_id
def get_packet(self, call_id):
if call_id is not None:
queue_id = threading.current_thread().ident
client_ref = "%i:%i" % (queue_id, call_id)
else:
client_ref = None
queue = self.get_thread_queue()
while True:
header, payload = queue.get()
print repr(header)
print repr(payload)
if client_ref is not None and header.get('client_ref') != client_ref:
continue
break
return header, payload
def on_packet(self, header, payload):
client_ref = header.get('client_ref', '')
queue_id, call_id = client_ref.split(':', 1)
queue_id = int(queue_id)
#queue_id = header.get('queue_id', '')
queue = self.get_thread_queue(queue_id)
queue.put((header, payload))
class _SocketFile(object):
def __init__(self, socket):
self.socket = socket
def read(self, size):
try:
return self.socket.recv(size)
except:
return b('')
def write(self, data):
self.socket.sendall(data)
def close(self):
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
class _RemoteFile(object):
def __init__(self, path, connection):
self.path = path
self.connection = connection
class RemoteFS(FS):
_meta = { 'thead_safe' : True,
'network' : True,
'virtual' : False,
'read_only' : False,
'unicode_paths' : True,
}
def __init__(self, addr='', port=3000, username=None, password=None, resource=None, transport=None):
self.addr = addr
self.port = port
self.username = None
self.password = None
self.resource = None
self.transport = transport
if self.transport is None:
self.transport = self._open_connection()
self.packet_handler = PacketHandler(self.transport)
self.packet_handler.start()
self._remote_call('auth',
username=username,
password=password,
resource=resource)
def _open_connection(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.addr, self.port))
socket_file = _SocketFile(sock)
socket_file.write(b('pyfs/0.1\n'))
return socket_file
def _make_call(self, method_name, *args, **kwargs):
call = dict(type='rpc',
method=method_name,
args=args,
kwargs=kwargs)
return call
def _remote_call(self, method_name, *args, **kwargs):
call = self._make_call(method_name, *args, **kwargs)
call_id = self.packet_handler.send_packet(call)
header, payload = self.packet_handler.get_packet(call_id)
return header, payload
def ping(self, msg):
call_id = self.packet_handler.send_packet({'type':'rpc', 'method':'ping'}, msg)
header, payload = self.packet_handler.get_packet(call_id)
print "PING"
print header
print payload
def close(self):
self.transport.close()
self.packet_handler.join()
def open(self, path, mode="r", **kwargs):
pass
def exists(self, path):
remote = self._remote_call('exists', path)
return remote.get('response')
if __name__ == "__main__":
rfs = RemoteFS()
rfs.close()
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
fs.s3fs fs.s3fs
======= =======
**Currently only avaiable on Python2 due to boto not being available for Python3**
FS subclass accessing files in Amazon S3 FS subclass accessing files in Amazon S3
This module provides the class 'S3FS', which implements the FS filesystem This module provides the class 'S3FS', which implements the FS filesystem
...@@ -26,9 +24,7 @@ from fs.path import * ...@@ -26,9 +24,7 @@ from fs.path import *
from fs.errors import * from fs.errors import *
from fs.remote import * from fs.remote import *
from fs.filelike import LimitBytesFile from fs.filelike import LimitBytesFile
from fs import iotools
import six
# Boto is not thread-safe, so we need to use a per-thread S3 connection. # Boto is not thread-safe, so we need to use a per-thread S3 connection.
if hasattr(threading,"local"): if hasattr(threading,"local"):
...@@ -59,17 +55,17 @@ class S3FS(FS): ...@@ -59,17 +55,17 @@ class S3FS(FS):
or flushed. or flushed.
""" """
_meta = {'thread_safe': True, _meta = { 'thread_safe' : True,
'virtual': False, 'virtual': False,
'read_only': False, 'read_only' : False,
'unicode_paths': True, 'unicode_paths' : True,
'case_insensitive_paths': False, 'case_insensitive_paths' : False,
'network': True, 'network' : True,
'atomic.move': True, 'atomic.move' : True,
'atomic.copy': True, 'atomic.copy' : True,
'atomic.makedir': True, 'atomic.makedir' : True,
'atomic.rename': False, 'atomic.rename' : False,
'atomic.setcontent': True 'atomic.setconetns' : True
} }
class meta: class meta:
...@@ -108,6 +104,12 @@ class S3FS(FS): ...@@ -108,6 +104,12 @@ class S3FS(FS):
prefix = prefix + separator prefix = prefix + separator
if isinstance(prefix,unicode): if isinstance(prefix,unicode):
prefix = prefix.encode("utf8") prefix = prefix.encode("utf8")
if aws_access_key is None:
if "AWS_ACCESS_KEY_ID" not in os.environ:
raise CreateFailedError("AWS_ACCESS_KEY_ID not set")
if aws_secret_key is None:
if "AWS_SECRET_ACCESS_KEY" not in os.environ:
raise CreateFailedError("AWS_SECRET_ACCESS_KEY not set")
self._prefix = prefix self._prefix = prefix
self._tlocal = thread_local() self._tlocal = thread_local()
super(S3FS, self).__init__(thread_synchronize=thread_synchronize) super(S3FS, self).__init__(thread_synchronize=thread_synchronize)
...@@ -135,14 +137,7 @@ class S3FS(FS): ...@@ -135,14 +137,7 @@ class S3FS(FS):
return b return b
except AttributeError: except AttributeError:
try: try:
# Validate by listing the bucket if there is no prefix. b = self._s3conn.get_bucket(self._bucket_name, validate=True)
# If there is a prefix, validate by listing only the prefix
# itself, to avoid errors when an IAM policy has been applied.
if self._prefix:
b = self._s3conn.get_bucket(self._bucket_name, validate=0)
b.get_key(self._prefix)
else:
b = self._s3conn.get_bucket(self._bucket_name, validate=1)
except S3ResponseError, e: except S3ResponseError, e:
if "404 Not Found" not in str(e): if "404 Not Found" not in str(e):
raise raise
...@@ -160,11 +155,11 @@ class S3FS(FS): ...@@ -160,11 +155,11 @@ class S3FS(FS):
super(S3FS,self).__setstate__(state) super(S3FS,self).__setstate__(state)
self._tlocal = thread_local() self._tlocal = thread_local()
def __repr__(self): def __str__(self):
args = (self.__class__.__name__,self._bucket_name,self._prefix) args = (self.__class__.__name__,self._bucket_name,self._prefix)
return '<%s: %s:%s>' % args return '<%s: %s:%s>' % args
__str__ = __repr__ __repr__ = __str__
def _s3path(self,path): def _s3path(self,path):
"""Get the absolute path to a file stored in S3.""" """Get the absolute path to a file stored in S3."""
...@@ -249,9 +244,9 @@ class S3FS(FS): ...@@ -249,9 +244,9 @@ class S3FS(FS):
k = self._s3bukt.get_key(s3path) k = self._s3bukt.get_key(s3path)
# Is there AllUsers group with READ permissions? # Is there AllUsers group with READ permissions?
is_public = True in [grant.permission == 'READ' and is_public = True in [grant.permission == 'READ' and \
grant.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' grant.uri == 'http://acs.amazonaws.com/groups/global/AllUsers'
for grant in k.get_acl().acl.grants] for grant in k.get_acl().acl.grants ]
url = k.generate_url(expires, force_http=is_public) url = k.generate_url(expires, force_http=is_public)
...@@ -266,14 +261,11 @@ class S3FS(FS): ...@@ -266,14 +261,11 @@ class S3FS(FS):
return url return url
def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=64*1024): def setcontents(self, path, data, chunk_size=64*1024):
s3path = self._s3path(path) s3path = self._s3path(path)
if isinstance(data, six.text_type):
data = data.encode(encoding=encoding, errors=errors)
self._sync_set_contents(s3path, data) self._sync_set_contents(s3path, data)
@iotools.filelike_to_stream def open(self,path,mode="r"):
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
"""Open the named file in the given mode. """Open the named file in the given mode.
This method downloads the file contents into a local temporary file This method downloads the file contents into a local temporary file
...@@ -502,8 +494,6 @@ class S3FS(FS): ...@@ -502,8 +494,6 @@ class S3FS(FS):
def removedir(self,path,recursive=False,force=False): def removedir(self,path,recursive=False,force=False):
"""Remove the directory at the given path.""" """Remove the directory at the given path."""
if normpath(path) in ('', '/'):
raise RemoveRootError(path)
s3path = self._s3path(path) s3path = self._s3path(path)
if s3path != self._prefix: if s3path != self._prefix:
s3path = s3path + self._separator s3path = s3path + self._separator
......
...@@ -11,23 +11,29 @@ import os.path ...@@ -11,23 +11,29 @@ import os.path
import time import time
import tempfile import tempfile
from fs.base import synchronize
from fs.osfs import OSFS from fs.osfs import OSFS
from fs.errors import * from fs.errors import *
from fs import _thread_synchronize_default from fs import _thread_synchronize_default
class TempFS(OSFS): class TempFS(OSFS):
"""Create a Filesystem in a temporary directory (with tempfile.mkdtemp), """Create a Filesystem in a temporary directory (with tempfile.mkdtemp),
and removes it when the TempFS object is cleaned up.""" and removes it when the TempFS object is cleaned up."""
_meta = dict(OSFS._meta) _meta = { 'thread_safe' : True,
_meta['pickle_contents'] = False 'virtual' : False,
_meta['network'] = False 'read_only' : False,
_meta['atomic.move'] = True 'unicode_paths' : os.path.supports_unicode_filenames,
_meta['atomic.copy'] = True 'case_insensitive_paths' : os.path.normcase('Aa') == 'aa',
'pickle_contents': False,
'network' : False,
'atomic.move' : True,
'atomic.copy' : True,
'atomic.makedir' : True,
'atomic.rename' : True,
'atomic.setcontents' : False
}
def __init__(self, identifier=None, temp_dir=None, dir_mode=0700, thread_synchronize=_thread_synchronize_default): def __init__(self, identifier=None, temp_dir=None, dir_mode=0700, thread_synchronize=_thread_synchronize_default):
"""Creates a temporary Filesystem """Creates a temporary Filesystem
...@@ -39,14 +45,14 @@ class TempFS(OSFS): ...@@ -39,14 +45,14 @@ class TempFS(OSFS):
self.identifier = identifier self.identifier = identifier
self.temp_dir = temp_dir self.temp_dir = temp_dir
self.dir_mode = dir_mode self.dir_mode = dir_mode
self._temp_dir = tempfile.mkdtemp(identifier or "TempFS", dir=temp_dir) self._temp_dir = tempfile.mkdtemp(identifier or "TempFS",dir=temp_dir)
self._cleaned = False self._cleaned = False
super(TempFS, self).__init__(self._temp_dir, dir_mode=dir_mode, thread_synchronize=thread_synchronize) super(TempFS, self).__init__(self._temp_dir, dir_mode=dir_mode, thread_synchronize=thread_synchronize)
def __repr__(self): def __str__(self):
return '<TempFS: %s>' % self._temp_dir return '<TempFS: %s>' % self._temp_dir
__str__ = __repr__ __repr__ = __str__
def __unicode__(self): def __unicode__(self):
return u'<TempFS: %s>' % self._temp_dir return u'<TempFS: %s>' % self._temp_dir
...@@ -66,7 +72,6 @@ class TempFS(OSFS): ...@@ -66,7 +72,6 @@ class TempFS(OSFS):
# dir_mode=self.dir_mode, # dir_mode=self.dir_mode,
# thread_synchronize=self.thread_synchronize) # thread_synchronize=self.thread_synchronize)
@synchronize
def close(self): def close(self):
"""Removes the temporary directory. """Removes the temporary directory.
...@@ -75,13 +80,13 @@ class TempFS(OSFS): ...@@ -75,13 +80,13 @@ class TempFS(OSFS):
Note that once this method has been called, the FS object may Note that once this method has been called, the FS object may
no longer be used. no longer be used.
""" """
super(TempFS, self).close() super(TempFS,self).close()
# Depending on how resources are freed by the OS, there could # Depending on how resources are freed by the OS, there could
# be some transient errors when freeing a TempFS soon after it # be some transient errors when freeing a TempFS soon after it
# was used. If they occur, do a small sleep and try again. # was used. If they occur, do a small sleep and try again.
try: try:
self._close() self._close()
except (ResourceLockedError, ResourceInvalidError): except (ResourceLockedError,ResourceInvalidError):
time.sleep(0.5) time.sleep(0.5)
self._close() self._close()
...@@ -99,23 +104,20 @@ class TempFS(OSFS): ...@@ -99,23 +104,20 @@ class TempFS(OSFS):
try: try:
# shutil.rmtree doesn't handle long paths on win32, # shutil.rmtree doesn't handle long paths on win32,
# so we walk the tree by hand. # so we walk the tree by hand.
entries = os.walk(self.root_path, topdown=False) entries = os.walk(self.root_path,topdown=False)
for (dir, dirnames, filenames) in entries: for (dir,dirnames,filenames) in entries:
for filename in filenames: for filename in filenames:
try: try:
os_remove(os.path.join(dir, filename)) os_remove(os.path.join(dir,filename))
except ResourceNotFoundError: except ResourceNotFoundError:
pass pass
for dirname in dirnames: for dirname in dirnames:
try: try:
os_rmdir(os.path.join(dir, dirname)) os_rmdir(os.path.join(dir,dirname))
except ResourceNotFoundError: except ResourceNotFoundError:
pass pass
try:
os.rmdir(self.root_path) os.rmdir(self.root_path)
except OSError:
pass
self._cleaned = True self._cleaned = True
finally: finally:
self._lock.release() self._lock.release()
super(TempFS, self).close() super(TempFS,self).close()
"""
fs.tests.test_archivefs: testcases for the ArchiveFS class
"""
import unittest
import os
import random
import zipfile
import tempfile
import shutil
import fs.tests
from fs.path import *
try:
from fs.contrib import archivefs
except ImportError:
libarchive_available = False
else:
libarchive_available = True
from six import PY3, b
class TestReadArchiveFS(unittest.TestCase):
__test__ = libarchive_available
def setUp(self):
self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip"
self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename)
self.zf = zipfile.ZipFile(self.temp_filename, "w")
zf = self.zf
zf.writestr("a.txt", b("Hello, World!"))
zf.writestr("b.txt", b("b"))
zf.writestr("1.txt", b("1"))
zf.writestr("foo/bar/baz.txt", b("baz"))
zf.writestr("foo/second.txt", b("hai"))
zf.close()
self.fs = archivefs.ArchiveFS(self.temp_filename, "r")
def tearDown(self):
self.fs.close()
os.remove(self.temp_filename)
def check(self, p):
try:
self.zipfile.getinfo(p)
return True
except:
return False
def test_reads(self):
def read_contents(path):
f = self.fs.open(path)
contents = f.read()
return contents
def check_contents(path, expected):
self.assert_(read_contents(path)==expected)
check_contents("a.txt", b("Hello, World!"))
check_contents("1.txt", b("1"))
check_contents("foo/bar/baz.txt", b("baz"))
def test_getcontents(self):
def read_contents(path):
return self.fs.getcontents(path)
def check_contents(path, expected):
self.assert_(read_contents(path)==expected)
check_contents("a.txt", b("Hello, World!"))
check_contents("1.txt", b("1"))
check_contents("foo/bar/baz.txt", b("baz"))
def test_is(self):
self.assert_(self.fs.isfile('a.txt'))
self.assert_(self.fs.isfile('1.txt'))
self.assert_(self.fs.isfile('foo/bar/baz.txt'))
self.assert_(self.fs.isdir('foo'))
self.assert_(self.fs.isdir('foo/bar'))
self.assert_(self.fs.exists('a.txt'))
self.assert_(self.fs.exists('1.txt'))
self.assert_(self.fs.exists('foo/bar/baz.txt'))
self.assert_(self.fs.exists('foo'))
self.assert_(self.fs.exists('foo/bar'))
def test_listdir(self):
def check_listing(path, expected):
dir_list = self.fs.listdir(path)
self.assert_(sorted(dir_list) == sorted(expected))
for item in dir_list:
self.assert_(isinstance(item,unicode))
check_listing('/', ['a.txt', '1.txt', 'foo', 'b.txt'])
check_listing('foo', ['second.txt', 'bar'])
check_listing('foo/bar', ['baz.txt'])
class TestWriteArchiveFS(unittest.TestCase):
__test__ = libarchive_available
def setUp(self):
self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip"
self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename)
archive_fs = archivefs.ArchiveFS(self.temp_filename, format='zip', mode='w')
def makefile(filename, contents):
if dirname(filename):
archive_fs.makedir(dirname(filename), recursive=True, allow_recreate=True)
f = archive_fs.open(filename, 'wb')
f.write(contents)
f.close()
makefile("a.txt", b("Hello, World!"))
makefile("b.txt", b("b"))
makefile(u"\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
makefile("foo/bar/baz.txt", b("baz"))
makefile("foo/second.txt", b("hai"))
archive_fs.close()
def tearDown(self):
os.remove(self.temp_filename)
def test_valid(self):
zf = zipfile.ZipFile(self.temp_filename, "r")
self.assert_(zf.testzip() is None)
zf.close()
def test_creation(self):
zf = zipfile.ZipFile(self.temp_filename, "r")
def check_contents(filename, contents):
if PY3:
zcontents = zf.read(filename)
else:
zcontents = zf.read(filename.encode(archivefs.ENCODING))
self.assertEqual(contents, zcontents)
check_contents("a.txt", b("Hello, World!"))
check_contents("b.txt", b("b"))
check_contents("foo/bar/baz.txt", b("baz"))
check_contents("foo/second.txt", b("hai"))
check_contents(u"\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
#~ class TestAppendArchiveFS(TestWriteArchiveFS):
#~ __test__ = libarchive_available
#~ def setUp(self):
#~ self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip"
#~ self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename)
#~ zip_fs = zipfs.ZipFS(self.temp_filename, 'w')
#~ def makefile(filename, contents):
#~ if dirname(filename):
#~ zip_fs.makedir(dirname(filename), recursive=True, allow_recreate=True)
#~ f = zip_fs.open(filename, 'wb')
#~ f.write(contents)
#~ f.close()
#~ makefile("a.txt", b("Hello, World!"))
#~ makefile("b.txt", b("b"))
#~ zip_fs.close()
#~ zip_fs = zipfs.ZipFS(self.temp_filename, 'a')
#~ makefile("foo/bar/baz.txt", b("baz"))
#~ makefile(u"\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
#~ makefile("foo/second.txt", b("hai"))
#~ zip_fs.close()
#~ class TestArchiveFSErrors(unittest.TestCase):
#~ __test__ = libarchive_available
#~ def setUp(self):
#~ self.workdir = tempfile.mkdtemp()
#~ def tearDown(self):
#~ shutil.rmtree(self.workdir)
#~ def test_bogus_zipfile(self):
#~ badzip = os.path.join(self.workdir,"bad.zip")
#~ f = open(badzip,"wb")
#~ f.write(b("I'm not really a zipfile"))
#~ f.close()
#~ self.assertRaises(zipfs.ZipOpenError,zipfs.ZipFS,badzip)
#~ def test_missing_zipfile(self):
#~ missingzip = os.path.join(self.workdir,"missing.zip")
#~ self.assertRaises(zipfs.ZipNotFoundError,zipfs.ZipFS,missingzip)
if __name__ == '__main__':
unittest.main()
# -*- encoding: utf-8 -*-
""" """
fs.tests.test_errors: testcases for the fs error classes functions fs.tests.test_errors: testcases for the fs error classes functions
...@@ -25,8 +24,3 @@ class TestErrorPickling(unittest.TestCase): ...@@ -25,8 +24,3 @@ class TestErrorPickling(unittest.TestCase):
assert_dump_load(UnsupportedError("makepony")) assert_dump_load(UnsupportedError("makepony"))
class TestFSError(unittest.TestCase):
def test_unicode_representation_of_error_with_non_ascii_characters(self):
path_error = PathError('/Shïrê/Frødø')
_ = unicode(path_error)
\ No newline at end of file
...@@ -6,8 +6,7 @@ ...@@ -6,8 +6,7 @@
import unittest import unittest
import sys import sys
import os import os, os.path
import os.path
import socket import socket
import threading import threading
import time import time
...@@ -21,27 +20,103 @@ from fs.errors import * ...@@ -21,27 +20,103 @@ from fs.errors import *
from fs import rpcfs from fs import rpcfs
from fs.expose.xmlrpc import RPCFSServer from fs.expose.xmlrpc import RPCFSServer
class TestRPCFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
import six def makeServer(self,fs,addr):
from six import PY3, b return RPCFSServer(fs,addr,logRequests=False)
from fs.tests.test_rpcfs import TestRPCFS def startServer(self):
port = 3000
self.temp_fs = TempFS()
self.server = None
try: self.serve_more_requests = True
from fs import sftpfs self.server_thread = threading.Thread(target=self.runServer)
from fs.expose.sftp import BaseSFTPServer self.server_thread.setDaemon(True)
except ImportError:
if not PY3: self.start_event = threading.Event()
self.end_event = threading.Event()
self.server_thread.start()
self.start_event.wait()
def runServer(self):
"""Run the server, swallowing shutdown-related execptions."""
port = 3000
while not self.server:
try:
self.server = self.makeServer(self.temp_fs,("127.0.0.1",port))
except socket.error, e:
if e.args[1] == "Address already in use":
port += 1
else:
raise raise
self.server_addr = ("127.0.0.1", port)
import logging self.server.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
logging.getLogger('paramiko').setLevel(logging.ERROR)
logging.getLogger('paramiko.transport').setLevel(logging.ERROR)
# if sys.platform != "win32":
# try:
# self.server.socket.settimeout(1)
# except socket.error:
# pass
#
self.start_event.set()
try:
#self.server.serve_forever()
while self.serve_more_requests:
self.server.handle_request()
except Exception, e:
pass
self.end_event.set()
def setUp(self):
self.startServer()
self.fs = rpcfs.RPCFS("http://%s:%d" % self.server_addr)
def tearDown(self):
self.serve_more_requests = False
#self.server.socket.close()
# self.server.socket.shutdown(socket.SHUT_RDWR)
# self.server.socket.close()
# self.temp_fs.close()
#self.server_thread.join()
#self.end_event.wait()
#return
try:
self.bump()
self.server.server_close()
except Exception:
pass
#self.server_thread.join()
self.temp_fs.close()
def bump(self):
host, port = self.server_addr
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, cn, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(.1)
sock.connect(sa)
sock.send("\n")
except socket.error, e:
pass
finally:
if sock is not None:
sock.close()
class TestSFTPFS(TestRPCFS):
__test__ = not PY3 from fs import sftpfs
from fs.expose.sftp import BaseSFTPServer
class TestSFTPFS(TestRPCFS):
def makeServer(self,fs,addr): def makeServer(self,fs,addr):
return BaseSFTPServer(addr,fs) return BaseSFTPServer(addr,fs)
...@@ -50,6 +125,14 @@ class TestSFTPFS(TestRPCFS): ...@@ -50,6 +125,14 @@ class TestSFTPFS(TestRPCFS):
self.startServer() self.startServer()
self.fs = sftpfs.SFTPFS(self.server_addr, no_auth=True) self.fs = sftpfs.SFTPFS(self.server_addr, no_auth=True)
#def runServer(self):
# self.server.serve_forever()
#
#def tearDown(self):
# self.server.shutdown()
# self.server_thread.join()
# self.temp_fs.close()
def bump(self): def bump(self):
# paramiko doesn't like being bumped, just wait for it to timeout. # paramiko doesn't like being bumped, just wait for it to timeout.
# TODO: do this using a paramiko.Transport() connection # TODO: do this using a paramiko.Transport() connection
...@@ -62,7 +145,7 @@ except ImportError: ...@@ -62,7 +145,7 @@ except ImportError:
pass pass
else: else:
from fs.osfs import OSFS from fs.osfs import OSFS
class TestFUSE(unittest.TestCase, FSTestCases, ThreadingTestCases): class TestFUSE(unittest.TestCase,FSTestCases,ThreadingTestCases):
def setUp(self): def setUp(self):
self.temp_fs = TempFS() self.temp_fs = TempFS()
...@@ -71,7 +154,7 @@ else: ...@@ -71,7 +154,7 @@ else:
self.mounted_fs = self.temp_fs.opendir("root") self.mounted_fs = self.temp_fs.opendir("root")
self.mount_point = self.temp_fs.getsyspath("mount") self.mount_point = self.temp_fs.getsyspath("mount")
self.fs = OSFS(self.temp_fs.getsyspath("mount")) self.fs = OSFS(self.temp_fs.getsyspath("mount"))
self.mount_proc = fuse.mount(self.mounted_fs, self.mount_point) self.mount_proc = fuse.mount(self.mounted_fs,self.mount_point)
def tearDown(self): def tearDown(self):
self.mount_proc.unmount() self.mount_proc.unmount()
...@@ -83,7 +166,7 @@ else: ...@@ -83,7 +166,7 @@ else:
fuse.unmount(self.mount_point) fuse.unmount(self.mount_point)
self.temp_fs.close() self.temp_fs.close()
def check(self, p): def check(self,p):
return self.mounted_fs.exists(p) return self.mounted_fs.exists(p)
...@@ -126,10 +209,10 @@ if dokan.is_available: ...@@ -126,10 +209,10 @@ if dokan.is_available:
def test_safety_wrapper(self): def test_safety_wrapper(self):
rawfs = MemoryFS() rawfs = MemoryFS()
safefs = dokan.Win32SafetyFS(rawfs) safefs = dokan.Win32SafetyFS(rawfs)
rawfs.setcontents("autoRun.inf", b("evilcodeevilcode")) rawfs.setcontents("autoRun.inf","evilcodeevilcode")
self.assertTrue(safefs.exists("_autoRun.inf")) self.assertTrue(safefs.exists("_autoRun.inf"))
self.assertTrue("autoRun.inf" not in safefs.listdir("/")) self.assertTrue("autoRun.inf" not in safefs.listdir("/"))
safefs.setcontents("file:stream",b("test")) safefs.setcontents("file:stream","test")
self.assertFalse(rawfs.exists("file:stream")) self.assertFalse(rawfs.exists("file:stream"))
self.assertTrue(rawfs.exists("file__colon__stream")) self.assertTrue(rawfs.exists("file__colon__stream"))
self.assertTrue("file:stream" in safefs.listdir("/")) self.assertTrue("file:stream" in safefs.listdir("/"))
......
...@@ -5,8 +5,6 @@ ...@@ -5,8 +5,6 @@
""" """
from fs.tests import FSTestCases, ThreadingTestCases from fs.tests import FSTestCases, ThreadingTestCases
from fs.path import *
from fs import errors
import unittest import unittest
...@@ -15,6 +13,8 @@ import sys ...@@ -15,6 +13,8 @@ import sys
import shutil import shutil
import tempfile import tempfile
from fs.path import *
from fs import osfs from fs import osfs
class TestOSFS(unittest.TestCase,FSTestCases,ThreadingTestCases): class TestOSFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
...@@ -30,14 +30,6 @@ class TestOSFS(unittest.TestCase,FSTestCases,ThreadingTestCases): ...@@ -30,14 +30,6 @@ class TestOSFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
def check(self, p): def check(self, p):
return os.path.exists(os.path.join(self.temp_dir, relpath(p))) return os.path.exists(os.path.join(self.temp_dir, relpath(p)))
def test_invalid_chars(self):
super(TestOSFS, self).test_invalid_chars()
self.assertRaises(errors.InvalidCharsInPathError, self.fs.open, 'invalid\0file', 'wb')
self.assertFalse(self.fs.isvalidpath('invalid\0file'))
self.assert_(self.fs.isvalidpath('validfile'))
self.assert_(self.fs.isvalidpath('completely_valid/path/foo.bar'))
class TestSubFS(unittest.TestCase,FSTestCases,ThreadingTestCases): class TestSubFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
...@@ -77,7 +69,7 @@ class TestMountFS(unittest.TestCase,FSTestCases,ThreadingTestCases): ...@@ -77,7 +69,7 @@ class TestMountFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
self.fs.close() self.fs.close()
def check(self, p): def check(self, p):
return self.mount_fs.exists(pathjoin("mounted/memfs", relpath(p))) return self.mount_fs.exists(os.path.join("mounted/memfs", relpath(p)))
class TestMountFS_atroot(unittest.TestCase,FSTestCases,ThreadingTestCases): class TestMountFS_atroot(unittest.TestCase,FSTestCases,ThreadingTestCases):
...@@ -106,7 +98,7 @@ class TestMountFS_stacked(unittest.TestCase,FSTestCases,ThreadingTestCases): ...@@ -106,7 +98,7 @@ class TestMountFS_stacked(unittest.TestCase,FSTestCases,ThreadingTestCases):
self.fs.close() self.fs.close()
def check(self, p): def check(self, p):
return self.mount_fs.exists(pathjoin("mem/two", relpath(p))) return self.mount_fs.exists(os.path.join("mem/two", relpath(p)))
from fs import tempfs from fs import tempfs
...@@ -124,10 +116,3 @@ class TestTempFS(unittest.TestCase,FSTestCases,ThreadingTestCases): ...@@ -124,10 +116,3 @@ class TestTempFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
td = self.fs._temp_dir td = self.fs._temp_dir
return os.path.exists(os.path.join(td, relpath(p))) return os.path.exists(os.path.join(td, relpath(p)))
def test_invalid_chars(self):
super(TestTempFS, self).test_invalid_chars()
self.assertRaises(errors.InvalidCharsInPathError, self.fs.open, 'invalid\0file', 'wb')
self.assertFalse(self.fs.isvalidpath('invalid\0file'))
self.assert_(self.fs.isvalidpath('validfile'))
self.assert_(self.fs.isvalidpath('completely_valid/path/foo.bar'))
...@@ -12,15 +12,9 @@ import time ...@@ -12,15 +12,9 @@ import time
from os.path import abspath from os.path import abspath
import urllib import urllib
from six import PY3
try: try:
from pyftpdlib.authorizers import DummyAuthorizer from pyftpdlib import ftpserver
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
except ImportError: except ImportError:
if not PY3:
raise ImportError("Requires pyftpdlib <http://code.google.com/p/pyftpdlib/>") raise ImportError("Requires pyftpdlib <http://code.google.com/p/pyftpdlib/>")
from fs.path import * from fs.path import *
...@@ -30,8 +24,6 @@ from fs import ftpfs ...@@ -30,8 +24,6 @@ from fs import ftpfs
ftp_port = 30000 ftp_port = 30000
class TestFTPFS(unittest.TestCase, FSTestCases, ThreadingTestCases): class TestFTPFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
__test__ = not PY3
def setUp(self): def setUp(self):
global ftp_port global ftp_port
ftp_port += 1 ftp_port += 1
...@@ -91,21 +83,21 @@ if __name__ == "__main__": ...@@ -91,21 +83,21 @@ if __name__ == "__main__":
# Run an ftp server that exposes a given directory # Run an ftp server that exposes a given directory
import sys import sys
authorizer = DummyAuthorizer() authorizer = ftpserver.DummyAuthorizer()
authorizer.add_user("user", "12345", sys.argv[1], perm="elradfmw") authorizer.add_user("user", "12345", sys.argv[1], perm="elradfmw")
authorizer.add_anonymous(sys.argv[1]) authorizer.add_anonymous(sys.argv[1])
#def nolog(*args): def nolog(*args):
# pass pass
#ftpserver.log = nolog ftpserver.log = nolog
#ftpserver.logline = nolog ftpserver.logline = nolog
handler = FTPHandler handler = ftpserver.FTPHandler
handler.authorizer = authorizer handler.authorizer = authorizer
address = ("127.0.0.1", int(sys.argv[2])) address = ("127.0.0.1", int(sys.argv[2]))
#print address #print address
ftpd = FTPServer(address, handler) ftpd = ftpserver.FTPServer(address, handler)
sys.stdout.write('serving\n') sys.stdout.write('serving\n')
sys.stdout.flush() sys.stdout.flush()
......
...@@ -10,8 +10,6 @@ from fs.expose.importhook import FSImportHook ...@@ -10,8 +10,6 @@ from fs.expose.importhook import FSImportHook
from fs.tempfs import TempFS from fs.tempfs import TempFS
from fs.zipfs import ZipFS from fs.zipfs import ZipFS
from six import b
class TestFSImportHook(unittest.TestCase): class TestFSImportHook(unittest.TestCase):
...@@ -34,23 +32,23 @@ class TestFSImportHook(unittest.TestCase): ...@@ -34,23 +32,23 @@ class TestFSImportHook(unittest.TestCase):
sys.path_importer_cache.clear() sys.path_importer_cache.clear()
def _init_modules(self,fs): def _init_modules(self,fs):
fs.setcontents("fsih_hello.py",b(dedent(""" fs.setcontents("fsih_hello.py",dedent("""
message = 'hello world!' message = 'hello world!'
"""))) """))
fs.makedir("fsih_pkg") fs.makedir("fsih_pkg")
fs.setcontents("fsih_pkg/__init__.py",b(dedent(""" fs.setcontents("fsih_pkg/__init__.py",dedent("""
a = 42 a = 42
"""))) """))
fs.setcontents("fsih_pkg/sub1.py",b(dedent(""" fs.setcontents("fsih_pkg/sub1.py",dedent("""
import fsih_pkg import fsih_pkg
from fsih_hello import message from fsih_hello import message
a = fsih_pkg.a a = fsih_pkg.a
"""))) """))
fs.setcontents("fsih_pkg/sub2.pyc",self._getpyc(b(dedent(""" fs.setcontents("fsih_pkg/sub2.pyc",self._getpyc(dedent("""
import fsih_pkg import fsih_pkg
from fsih_hello import message from fsih_hello import message
a = fsih_pkg.a * 2 a = fsih_pkg.a * 2
""")))) """)))
def _getpyc(self,src): def _getpyc(self,src):
"""Get the .pyc contents to match th given .py source code.""" """Get the .pyc contents to match th given .py source code."""
...@@ -141,3 +139,4 @@ class TestFSImportHook(unittest.TestCase): ...@@ -141,3 +139,4 @@ class TestFSImportHook(unittest.TestCase):
sys.path_hooks.remove(FSImportHook) sys.path_hooks.remove(FSImportHook)
sys.path.pop() sys.path.pop()
t.close() t.close()
from __future__ import unicode_literals
from fs import iotools
import io
import unittest
from os.path import dirname, join, abspath
try:
unicode
except NameError:
unicode = str
class OpenFilelike(object):
def __init__(self, make_f):
self.make_f = make_f
@iotools.filelike_to_stream
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
return self.make_f()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.f.close()
class TestIOTools(unittest.TestCase):
def get_bin_file(self):
path = join(dirname(abspath(__file__)), 'data/UTF-8-demo.txt')
return io.open(path, 'rb')
def test_make_stream(self):
"""Test make_stream"""
with self.get_bin_file() as f:
text = f.read()
self.assert_(isinstance(text, bytes))
with self.get_bin_file() as f:
with iotools.make_stream("data/UTF-8-demo.txt", f, 'rt') as f2:
text = f2.read()
self.assert_(isinstance(text, unicode))
def test_decorator(self):
"""Test filelike_to_stream decorator"""
o = OpenFilelike(self.get_bin_file)
with o.open('file', 'rb') as f:
text = f.read()
self.assert_(isinstance(text, bytes))
with o.open('file', 'rt') as f:
text = f.read()
self.assert_(isinstance(text, unicode))
from fs.mountfs import MountFS
from fs.memoryfs import MemoryFS
import unittest
class TestMountFS(unittest.TestCase):
def test_auto_close(self):
"""Test MountFS auto close is working"""
multi_fs = MountFS()
m1 = MemoryFS()
m2 = MemoryFS()
multi_fs.mount('/m1', m1)
multi_fs.mount('/m2', m2)
self.assert_(not m1.closed)
self.assert_(not m2.closed)
multi_fs.close()
self.assert_(m1.closed)
self.assert_(m2.closed)
def test_no_auto_close(self):
"""Test MountFS auto close can be disabled"""
multi_fs = MountFS(auto_close=False)
m1 = MemoryFS()
m2 = MemoryFS()
multi_fs.mount('/m1', m1)
multi_fs.mount('/m2', m2)
self.assert_(not m1.closed)
self.assert_(not m2.closed)
multi_fs.close()
self.assert_(not m1.closed)
self.assert_(not m2.closed)
def test_mountfile(self):
"""Test mounting a file"""
quote = b"""If you wish to make an apple pie from scratch, you must first invent the universe."""
mem_fs = MemoryFS()
mem_fs.makedir('foo')
mem_fs.setcontents('foo/bar.txt', quote)
foo_dir = mem_fs.opendir('foo')
mount_fs = MountFS()
mount_fs.mountfile('bar.txt', foo_dir.open, foo_dir.getinfo)
self.assert_(mount_fs.isdir('/'))
self.assert_(mount_fs.isdir('./'))
self.assert_(mount_fs.isdir(''))
# Check we can see the mounted file in the dir list
self.assertEqual(mount_fs.listdir(), ["bar.txt"])
self.assert_(not mount_fs.exists('nobodyhere.txt'))
self.assert_(mount_fs.exists('bar.txt'))
self.assert_(mount_fs.isfile('bar.txt'))
self.assert_(not mount_fs.isdir('bar.txt'))
# Check open and getinfo callables
self.assertEqual(mount_fs.getcontents('bar.txt'), quote)
self.assertEqual(mount_fs.getsize('bar.txt'), len(quote))
# Check changes are written back
mem_fs.setcontents('foo/bar.txt', 'baz')
self.assertEqual(mount_fs.getcontents('bar.txt'), b'baz')
self.assertEqual(mount_fs.getsize('bar.txt'), len('baz'))
# Check changes are written to the original fs
self.assertEqual(mem_fs.getcontents('foo/bar.txt'), b'baz')
self.assertEqual(mem_fs.getsize('foo/bar.txt'), len('baz'))
# Check unmount
self.assert_(mount_fs.unmount("bar.txt"))
self.assertEqual(mount_fs.listdir(), [])
self.assert_(not mount_fs.exists('bar.txt'))
# Check unount a second time is a null op, and returns False
self.assertFalse(mount_fs.unmount("bar.txt"))
def test_empty(self):
"""Test MountFS with nothing mounted."""
mount_fs = MountFS()
self.assertEqual(mount_fs.getinfo(''), {})
self.assertEqual(mount_fs.getxattr('', 'yo'), None)
self.assertEqual(mount_fs.listdir(), [])
self.assertEqual(list(mount_fs.ilistdir()), [])
from fs.multifs import MultiFS
from fs.memoryfs import MemoryFS
import unittest
from six import b
class TestMultiFS(unittest.TestCase):
def test_auto_close(self):
"""Test MultiFS auto close is working"""
multi_fs = MultiFS()
m1 = MemoryFS()
m2 = MemoryFS()
multi_fs.addfs('m1', m1)
multi_fs.addfs('m2', m2)
self.assert_(not m1.closed)
self.assert_(not m2.closed)
multi_fs.close()
self.assert_(m1.closed)
self.assert_(m2.closed)
def test_no_auto_close(self):
"""Test MultiFS auto close can be disables"""
multi_fs = MultiFS(auto_close=False)
m1 = MemoryFS()
m2 = MemoryFS()
multi_fs.addfs('m1', m1)
multi_fs.addfs('m2', m2)
self.assert_(not m1.closed)
self.assert_(not m2.closed)
multi_fs.close()
self.assert_(not m1.closed)
self.assert_(not m2.closed)
def test_priority(self):
"""Test priority order is working"""
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
m1.setcontents("name", b("m1"))
m2.setcontents("name", b("m2"))
m3.setcontents("name", b("m3"))
multi_fs = MultiFS(auto_close=False)
multi_fs.addfs("m1", m1)
multi_fs.addfs("m2", m2)
multi_fs.addfs("m3", m3)
self.assert_(multi_fs.getcontents("name") == b("m3"))
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
m1.setcontents("name", b("m1"))
m2.setcontents("name", b("m2"))
m3.setcontents("name", b("m3"))
multi_fs = MultiFS(auto_close=False)
multi_fs.addfs("m1", m1)
multi_fs.addfs("m2", m2, priority=10)
multi_fs.addfs("m3", m3)
self.assert_(multi_fs.getcontents("name") == b("m2"))
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
m1.setcontents("name", b("m1"))
m2.setcontents("name", b("m2"))
m3.setcontents("name", b("m3"))
multi_fs = MultiFS(auto_close=False)
multi_fs.addfs("m1", m1)
multi_fs.addfs("m2", m2, priority=10)
multi_fs.addfs("m3", m3, priority=10)
self.assert_(multi_fs.getcontents("name") == b("m3"))
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
m1.setcontents("name", b("m1"))
m2.setcontents("name", b("m2"))
m3.setcontents("name", b("m3"))
multi_fs = MultiFS(auto_close=False)
multi_fs.addfs("m1", m1, priority=11)
multi_fs.addfs("m2", m2, priority=10)
multi_fs.addfs("m3", m3, priority=10)
self.assert_(multi_fs.getcontents("name") == b("m1"))
"""
fs.tests.test_opener: testcases for FS opener
"""
import unittest
import tempfile
import shutil
from fs.opener import opener
from fs import path
class TestOpener(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp(u"fstest_opener")
def tearDown(self):
shutil.rmtree(self.temp_dir)
def testOpen(self):
filename = path.join(self.temp_dir, 'foo.txt')
file_object = opener.open(filename, 'wb')
file_object.close()
self.assertTrue(file_object.closed)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment