Commit 3ea4efe1 by willmcgugan@gmail.com

Change of api (fs.open, fs.setcontent, fs.getcontents) to support io module in Py2.6+ and Py3

parent c6391b6f
include AUTHORS include AUTHORS
...@@ -19,19 +19,19 @@ __version__ = "0.4.1" ...@@ -19,19 +19,19 @@ __version__ = "0.4.1"
__author__ = "Will McGugan (will@willmcgugan.com)" __author__ = "Will McGugan (will@willmcgugan.com)"
# provide these by default so people can use 'fs.path.basename' etc. # provide these by default so people can use 'fs.path.basename' etc.
import errors from fs import errors
import path from fs import path
_thread_synchronize_default = True _thread_synchronize_default = True
def set_thread_synchronize_default(sync): def set_thread_synchronize_default(sync):
"""Sets the default thread synchronisation flag. """Sets the default thread synchronisation flag.
FS objects are made thread-safe through the use of a per-FS threading Lock FS objects are made thread-safe through the use of a per-FS threading Lock
object. Since this can introduce an small overhead it can be disabled with object. Since this can introduce an small overhead it can be disabled with
this function if the code is single-threaded. this function if the code is single-threaded.
:param sync: Set whether to use thread synchronisation for new FS objects :param sync: Set whether to use thread synchronisation for new FS objects
""" """
global _thread_synchronization_default global _thread_synchronization_default
_thread_synchronization_default = sync _thread_synchronization_default = sync
......
...@@ -6,8 +6,8 @@ A collection of filesystems that map to application specific locations. ...@@ -6,8 +6,8 @@ A collection of filesystems that map to application specific locations.
These classes abstract away the different requirements for user data across platforms, These classes abstract away the different requirements for user data across platforms,
which vary in their conventions. They are all subclasses of :class:`fs.osfs.OSFS`, which vary in their conventions. They are all subclasses of :class:`fs.osfs.OSFS`,
all that differs from `OSFS` is the constructor which detects the appropriate all that differs from `OSFS` is the constructor which detects the appropriate
location given the name of the application, author name and other parameters. location given the name of the application, author name and other parameters.
Uses `appdirs` (https://github.com/ActiveState/appdirs), written by Trent Mick and Sridhar Ratnakumar <trentm at gmail com; github at srid name> Uses `appdirs` (https://github.com/ActiveState/appdirs), written by Trent Mick and Sridhar Ratnakumar <trentm at gmail com; github at srid name>
...@@ -30,10 +30,10 @@ class UserDataFS(OSFS): ...@@ -30,10 +30,10 @@ class UserDataFS(OSFS):
:param version: optional version string, if a unique location per version of the application is required :param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx :param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist :param create: if True (the default) the directory will be created if it does not exist
""" """
app_dirs = AppDirs(appname, appauthor, version, roaming) app_dirs = AppDirs(appname, appauthor, version, roaming)
super(self.__class__, self).__init__(app_dirs.user_data_dir, create=create) super(UserDataFS, self).__init__(app_dirs.user_data_dir, create=create)
class SiteDataFS(OSFS): class SiteDataFS(OSFS):
...@@ -45,10 +45,10 @@ class SiteDataFS(OSFS): ...@@ -45,10 +45,10 @@ class SiteDataFS(OSFS):
:param version: optional version string, if a unique location per version of the application is required :param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx :param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist :param create: if True (the default) the directory will be created if it does not exist
""" """
app_dirs = AppDirs(appname, appauthor, version, roaming) app_dirs = AppDirs(appname, appauthor, version, roaming)
super(self.__class__, self).__init__(app_dirs.site_data_dir, create=create) super(SiteDataFS, self).__init__(app_dirs.site_data_dir, create=create)
class UserCacheFS(OSFS): class UserCacheFS(OSFS):
...@@ -60,10 +60,10 @@ class UserCacheFS(OSFS): ...@@ -60,10 +60,10 @@ class UserCacheFS(OSFS):
:param version: optional version string, if a unique location per version of the application is required :param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx :param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist :param create: if True (the default) the directory will be created if it does not exist
""" """
app_dirs = AppDirs(appname, appauthor, version, roaming) app_dirs = AppDirs(appname, appauthor, version, roaming)
super(self.__class__, self).__init__(app_dirs.user_cache_dir, create=create) super(UserCacheFS, self).__init__(app_dirs.user_cache_dir, create=create)
class UserLogFS(OSFS): class UserLogFS(OSFS):
...@@ -75,13 +75,14 @@ class UserLogFS(OSFS): ...@@ -75,13 +75,14 @@ class UserLogFS(OSFS):
:param version: optional version string, if a unique location per version of the application is required :param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx :param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist :param create: if True (the default) the directory will be created if it does not exist
""" """
app_dirs = AppDirs(appname, appauthor, version, roaming) app_dirs = AppDirs(appname, appauthor, version, roaming)
super(self.__class__, self).__init__(app_dirs.user_log_dir, create=create) super(UserLogFS, self).__init__(app_dirs.user_log_dir, create=create)
if __name__ == "__main__": if __name__ == "__main__":
udfs = UserDataFS('sexytime', appauthor='pyfs') udfs = UserDataFS('exampleapp', appauthor='pyfs')
print udfs print udfs
udfs2 = UserDataFS('sexytime2', appauthor='pyfs', create=False) udfs2 = UserDataFS('exampleapp2', appauthor='pyfs', create=False)
print udfs2 print udfs2
...@@ -8,10 +8,11 @@ Not for general usage, the functionality in this file is exposed elsewhere ...@@ -8,10 +8,11 @@ Not for general usage, the functionality in this file is exposed elsewhere
import six import six
from six import PY3 from six import PY3
def copy_file_to_fs(data, dst_fs, dst_path, chunk_size=64 * 1024, progress_callback=None, finished_callback=None): def copy_file_to_fs(data, dst_fs, dst_path, chunk_size=64 * 1024, progress_callback=None, finished_callback=None):
"""Copy data from a string or a file-like object to a given fs/path""" """Copy data from a string or a file-like object to a given fs/path"""
if progress_callback is None: if progress_callback is None:
progress_callback = lambda bytes_written:None progress_callback = lambda bytes_written: None
bytes_written = 0 bytes_written = 0
f = None f = None
try: try:
...@@ -19,7 +20,7 @@ def copy_file_to_fs(data, dst_fs, dst_path, chunk_size=64 * 1024, progress_callb ...@@ -19,7 +20,7 @@ def copy_file_to_fs(data, dst_fs, dst_path, chunk_size=64 * 1024, progress_callb
if hasattr(data, "read"): if hasattr(data, "read"):
read = data.read read = data.read
chunk = read(chunk_size) chunk = read(chunk_size)
if PY3 and isinstance(chunk, six.text_type): if isinstance(chunk, six.text_type):
f = dst_fs.open(dst_path, 'w') f = dst_fs.open(dst_path, 'w')
else: else:
f = dst_fs.open(dst_path, 'wb') f = dst_fs.open(dst_path, 'wb')
...@@ -30,7 +31,7 @@ def copy_file_to_fs(data, dst_fs, dst_path, chunk_size=64 * 1024, progress_callb ...@@ -30,7 +31,7 @@ def copy_file_to_fs(data, dst_fs, dst_path, chunk_size=64 * 1024, progress_callb
progress_callback(bytes_written) progress_callback(bytes_written)
chunk = read(chunk_size) chunk = read(chunk_size)
else: else:
if PY3 and isinstance(data, six.text_type): if isinstance(data, six.text_type):
f = dst_fs.open(dst_path, 'w') f = dst_fs.open(dst_path, 'w')
else: else:
f = dst_fs.open(dst_path, 'wb') f = dst_fs.open(dst_path, 'wb')
......
...@@ -112,11 +112,11 @@ class ArchiveFS(FS): ...@@ -112,11 +112,11 @@ class ArchiveFS(FS):
return SizeUpdater(entry, self.archive.writestream(path)) return SizeUpdater(entry, self.archive.writestream(path))
@synchronize @synchronize
def getcontents(self, path, mode="rb"): def getcontents(self, path, mode="rb", encoding=None, errors=None, newline=None):
if not self.exists(path): if not self.exists(path):
raise ResourceNotFoundError(path) raise ResourceNotFoundError(path)
f = self.open(path) with self.open(path, mode, encoding=encoding, errors=errors, newline=newline) as f:
return f.read() return f.read()
def desc(self, path): def desc(self, path):
return "%s in zip file" % path return "%s in zip file" % path
......
...@@ -41,11 +41,13 @@ from fs.base import * ...@@ -41,11 +41,13 @@ from fs.base import *
from fs.path import * from fs.path import *
from fs.errors import * from fs.errors import *
from fs.remote import RemoteFileBuffer from fs.remote import RemoteFileBuffer
from fs import iotools
from fs.contrib.davfs.util import * from fs.contrib.davfs.util import *
from fs.contrib.davfs import xmlobj from fs.contrib.davfs import xmlobj
from fs.contrib.davfs.xmlobj import * from fs.contrib.davfs.xmlobj import *
import six
from six import b from six import b
import errno import errno
...@@ -84,12 +86,12 @@ class DAVFS(FS): ...@@ -84,12 +86,12 @@ class DAVFS(FS):
"http": 80, "http": 80,
"https": 443, "https": 443,
} }
_meta = { 'virtual' : False, _meta = { 'virtual' : False,
'read_only' : False, 'read_only' : False,
'unicode_paths' : True, 'unicode_paths' : True,
'case_insensitive_paths' : False, 'case_insensitive_paths' : False,
'network' : True 'network' : True
} }
def __init__(self,url,credentials=None,get_credentials=None,thread_synchronize=True,connection_classes=None,timeout=None): def __init__(self,url,credentials=None,get_credentials=None,thread_synchronize=True,connection_classes=None,timeout=None):
...@@ -121,7 +123,7 @@ class DAVFS(FS): ...@@ -121,7 +123,7 @@ class DAVFS(FS):
self.url = url self.url = url
pf = propfind(prop="<prop xmlns='DAV:'><resourcetype /></prop>") pf = propfind(prop="<prop xmlns='DAV:'><resourcetype /></prop>")
resp = self._request("/","PROPFIND",pf.render(),{"Depth":"0"}) resp = self._request("/","PROPFIND",pf.render(),{"Depth":"0"})
try: try:
if resp.status == 404: if resp.status == 404:
raise ResourceNotFoundError("/",msg="root url gives 404") raise ResourceNotFoundError("/",msg="root url gives 404")
if resp.status in (401,403): if resp.status in (401,403):
...@@ -147,9 +149,9 @@ class DAVFS(FS): ...@@ -147,9 +149,9 @@ class DAVFS(FS):
if not port: if not port:
try: try:
port = self._DEFAULT_PORT_NUMBERS[scheme] port = self._DEFAULT_PORT_NUMBERS[scheme]
except KeyError: except KeyError:
msg = "unsupported protocol: '%s'" % (url.scheme,) msg = "unsupported protocol: '%s'" % (url.scheme,)
raise RemoteConnectionError(msg=msg) raise RemoteConnectionError(msg=msg)
# Can we re-use an existing connection? # Can we re-use an existing connection?
with self._connection_lock: with self._connection_lock:
now = time.time() now = time.time()
...@@ -165,12 +167,12 @@ class DAVFS(FS): ...@@ -165,12 +167,12 @@ class DAVFS(FS):
return (False,con) return (False,con)
self._discard_connection(con) self._discard_connection(con)
# Nope, we need to make a fresh one. # Nope, we need to make a fresh one.
try: try:
ConClass = self.connection_classes[scheme] ConClass = self.connection_classes[scheme]
except KeyError: except KeyError:
msg = "unsupported protocol: '%s'" % (url.scheme,) msg = "unsupported protocol: '%s'" % (url.scheme,)
raise RemoteConnectionError(msg=msg) raise RemoteConnectionError(msg=msg)
con = ConClass(url.hostname,url.port,timeout=self.timeout) con = ConClass(url.hostname,url.port,timeout=self.timeout)
self._connections.append(con) self._connections.append(con)
return (True,con) return (True,con)
...@@ -182,9 +184,9 @@ class DAVFS(FS): ...@@ -182,9 +184,9 @@ class DAVFS(FS):
if not port: if not port:
try: try:
port = self._DEFAULT_PORT_NUMBERS[scheme] port = self._DEFAULT_PORT_NUMBERS[scheme]
except KeyError: except KeyError:
msg = "unsupported protocol: '%s'" % (url.scheme,) msg = "unsupported protocol: '%s'" % (url.scheme,)
raise RemoteConnectionError(msg=msg) raise RemoteConnectionError(msg=msg)
with self._connection_lock: with self._connection_lock:
now = time.time() now = time.time()
try: try:
...@@ -256,7 +258,7 @@ class DAVFS(FS): ...@@ -256,7 +258,7 @@ class DAVFS(FS):
resp = None resp = None
try: try:
resp = self._raw_request(url,method,body,headers) resp = self._raw_request(url,method,body,headers)
# Loop to retry for redirects and authentication responses. # Loop to retry for redirects and authentication responses.
while resp.status in (301,302,401,403): while resp.status in (301,302,401,403):
resp.close() resp.close()
if resp.status in (301,302,): if resp.status in (301,302,):
...@@ -268,7 +270,7 @@ class DAVFS(FS): ...@@ -268,7 +270,7 @@ class DAVFS(FS):
raise OperationFailedError(msg="redirection seems to be looping") raise OperationFailedError(msg="redirection seems to be looping")
if len(visited) > 10: if len(visited) > 10:
raise OperationFailedError("too much redirection") raise OperationFailedError("too much redirection")
elif resp.status in (401,403): elif resp.status in (401,403):
if self.get_credentials is None: if self.get_credentials is None:
break break
else: else:
...@@ -276,7 +278,7 @@ class DAVFS(FS): ...@@ -276,7 +278,7 @@ class DAVFS(FS):
if creds is None: if creds is None:
break break
else: else:
self.credentials = creds self.credentials = creds
resp = self._raw_request(url,method,body,headers) resp = self._raw_request(url,method,body,headers)
except Exception: except Exception:
if resp is not None: if resp is not None:
...@@ -343,8 +345,10 @@ class DAVFS(FS): ...@@ -343,8 +345,10 @@ class DAVFS(FS):
msg = str(e) msg = str(e)
raise RemoteConnectionError("",msg=msg,details=e) raise RemoteConnectionError("",msg=msg,details=e)
def setcontents(self,path, contents, chunk_size=1024*64): def setcontents(self,path, data=b'', encoding=None, errors=None, chunk_size=1024 * 64):
resp = self._request(path,"PUT",contents) if isinstance(data, six.text_type):
data = data.encode(encoding=encoding, errors=errors)
resp = self._request(path, "PUT", data)
resp.close() resp.close()
if resp.status == 405: if resp.status == 405:
raise ResourceInvalidError(path) raise ResourceInvalidError(path)
...@@ -353,7 +357,8 @@ class DAVFS(FS): ...@@ -353,7 +357,8 @@ class DAVFS(FS):
if resp.status not in (200,201,204): if resp.status not in (200,201,204):
raise_generic_error(resp,"setcontents",path) raise_generic_error(resp,"setcontents",path)
def open(self,path,mode="r"): @iotools.filelike_to_stream
def open(self,path,mode="r", **kwargs):
mode = mode.replace("b","").replace("t","") mode = mode.replace("b","").replace("t","")
# Truncate the file if requested # Truncate the file if requested
contents = b("") contents = b("")
...@@ -417,7 +422,7 @@ class DAVFS(FS): ...@@ -417,7 +422,7 @@ class DAVFS(FS):
if self._isurl(path,res.href): if self._isurl(path,res.href):
for ps in res.propstats: for ps in res.propstats:
if ps.props.getElementsByTagNameNS("DAV:","collection"): if ps.props.getElementsByTagNameNS("DAV:","collection"):
return True return True
return False return False
finally: finally:
response.close() response.close()
...@@ -437,11 +442,11 @@ class DAVFS(FS): ...@@ -437,11 +442,11 @@ class DAVFS(FS):
rt = ps.props.getElementsByTagNameNS("DAV:","resourcetype") rt = ps.props.getElementsByTagNameNS("DAV:","resourcetype")
cl = ps.props.getElementsByTagNameNS("DAV:","collection") cl = ps.props.getElementsByTagNameNS("DAV:","collection")
if rt and not cl: if rt and not cl:
return True return True
return False return False
finally: finally:
response.close() response.close()
def listdir(self,path="./",wildcard=None,full=False,absolute=False,dirs_only=False,files_only=False): def listdir(self,path="./",wildcard=None,full=False,absolute=False,dirs_only=False,files_only=False):
return list(self.ilistdir(path=path,wildcard=wildcard,full=full,absolute=absolute,dirs_only=dirs_only,files_only=files_only)) return list(self.ilistdir(path=path,wildcard=wildcard,full=full,absolute=absolute,dirs_only=dirs_only,files_only=files_only))
......
...@@ -26,6 +26,7 @@ from pyftpdlib import ftpserver ...@@ -26,6 +26,7 @@ from pyftpdlib import ftpserver
from fs.path import * from fs.path import *
from fs.osfs import OSFS from fs.osfs import OSFS
from fs.errors import convert_fs_errors from fs.errors import convert_fs_errors
from fs import iotools
# Get these once so we can reuse them: # Get these once so we can reuse them:
...@@ -96,8 +97,9 @@ class FTPFS(ftpserver.AbstractedFS): ...@@ -96,8 +97,9 @@ class FTPFS(ftpserver.AbstractedFS):
@convert_fs_errors @convert_fs_errors
@decode_args @decode_args
def open(self, path, mode): @iotools.filelike_to_stream
return self.fs.open(path, mode) def open(self, path, mode, **kwargs):
return self.fs.open(path, mode, **kwargs)
@convert_fs_errors @convert_fs_errors
def chdir(self, path): def chdir(self, path):
......
...@@ -122,7 +122,7 @@ class SFTPServerInterface(paramiko.SFTPServerInterface): ...@@ -122,7 +122,7 @@ class SFTPServerInterface(paramiko.SFTPServerInterface):
if not isinstance(stat, int): if not isinstance(stat, int):
stats.append(stat) stats.append(stat)
return stats return stats
@report_sftp_errors @report_sftp_errors
def stat(self, path): def stat(self, path):
if not isinstance(path, unicode): if not isinstance(path, unicode):
...@@ -221,8 +221,8 @@ class SFTPHandle(paramiko.SFTPHandle): ...@@ -221,8 +221,8 @@ class SFTPHandle(paramiko.SFTPHandle):
""" """
def __init__(self, owner, path, flags): def __init__(self, owner, path, flags):
super(SFTPHandle,self).__init__(flags) super(SFTPHandle, self).__init__(flags)
mode = flags_to_mode(flags) + "b" mode = flags_to_mode(flags)
self.owner = owner self.owner = owner
if not isinstance(path, unicode): if not isinstance(path, unicode):
path = path.decode(self.owner.encoding) path = path.decode(self.owner.encoding)
......
...@@ -18,55 +18,55 @@ class Request(object): ...@@ -18,55 +18,55 @@ class Request(object):
"""Very simple request object""" """Very simple request object"""
def __init__(self, environ, start_response): def __init__(self, environ, start_response):
self.environ = environ self.environ = environ
self.start_response = start_response self.start_response = start_response
self.path = environ.get('PATH_INFO') self.path = environ.get('PATH_INFO')
class WSGIServer(object): class WSGIServer(object):
"""Light-weight WSGI server that exposes an FS""" """Light-weight WSGI server that exposes an FS"""
def __init__(self, serve_fs, indexes=True, dir_template=None, chunk_size=16*1024*1024): def __init__(self, serve_fs, indexes=True, dir_template=None, chunk_size=16*1024*1024):
if dir_template is None: if dir_template is None:
from dirtemplate import template as dir_template from dirtemplate import template as dir_template
self.serve_fs = serve_fs self.serve_fs = serve_fs
self.indexes = indexes self.indexes = indexes
self.chunk_size = chunk_size self.chunk_size = chunk_size
self.dir_template = Template(dir_template) self.dir_template = Template(dir_template)
def __call__(self, environ, start_response): def __call__(self, environ, start_response):
request = Request(environ, start_response) request = Request(environ, start_response)
if not self.serve_fs.exists(request.path): if not self.serve_fs.exists(request.path):
return self.serve_404(request) return self.serve_404(request)
if self.serve_fs.isdir(request.path): if self.serve_fs.isdir(request.path):
if not self.indexes: if not self.indexes:
return self.serve_404(request) return self.serve_404(request)
return self.serve_dir(request) return self.serve_dir(request)
else: else:
return self.serve_file(request) return self.serve_file(request)
def serve_file(self, request): def serve_file(self, request):
"""Serve a file, guessing a mime-type""" """Serve a file, guessing a mime-type"""
path = request.path path = request.path
serving_file = None serving_file = None
try: try:
serving_file = self.serve_fs.open(path, 'rb') serving_file = self.serve_fs.open(path, 'rb')
except Exception, e: except Exception, e:
if serving_file is not None: if serving_file is not None:
serving_file.close() serving_file.close()
return self.serve_500(request, str(e)) return self.serve_500(request, str(e))
mime_type = mimetypes.guess_type(basename(path)) mime_type = mimetypes.guess_type(basename(path))
file_size = self.serve_fs.getsize(path) file_size = self.serve_fs.getsize(path)
headers = [('Content-Type', mime_type), headers = [('Content-Type', mime_type),
('Content-Length', str(file_size))] ('Content-Length', str(file_size))]
def gen_file(): def gen_file():
try: try:
while True: while True:
...@@ -76,36 +76,36 @@ class WSGIServer(object): ...@@ -76,36 +76,36 @@ class WSGIServer(object):
yield data yield data
finally: finally:
serving_file.close() serving_file.close()
request.start_response('200 OK', request.start_response('200 OK',
headers) headers)
return gen_file() return gen_file()
def serve_dir(self, request): def serve_dir(self, request):
"""Serve an index page""" """Serve an index page"""
fs = self.serve_fs fs = self.serve_fs
isdir = fs.isdir isdir = fs.isdir
path = request.path path = request.path
dirinfo = fs.listdirinfo(path, full=True, absolute=True) dirinfo = fs.listdirinfo(path, full=True, absolute=True)
entries = [] entries = []
for p, info in dirinfo: for p, info in dirinfo:
entry = {} entry = {}
entry['path'] = p entry['path'] = p
entry['name'] = basename(p) entry['name'] = basename(p)
entry['size'] = info.get('size', 'unknown') entry['size'] = info.get('size', 'unknown')
entry['created_time'] = info.get('created_time') entry['created_time'] = info.get('created_time')
if isdir(p): if isdir(p):
entry['type'] = 'dir' entry['type'] = 'dir'
else: else:
entry['type'] = 'file' entry['type'] = 'file'
entries.append(entry) entries.append(entry)
# Put dirs first, and sort by reverse created time order # Put dirs first, and sort by reverse created time order
no_time = datetime(1970, 1, 1, 1, 0) no_time = datetime(1970, 1, 1, 1, 0)
entries.sort(key=lambda k:(k['type'] == 'dir', k.get('created_time') or no_time), reverse=True) entries.sort(key=lambda k:(k['type'] == 'dir', k.get('created_time') or no_time), reverse=True)
# Turn datetime to text and tweak names # Turn datetime to text and tweak names
for entry in entries: for entry in entries:
t = entry.get('created_time') t = entry.get('created_time')
...@@ -113,35 +113,35 @@ class WSGIServer(object): ...@@ -113,35 +113,35 @@ class WSGIServer(object):
entry['created_time'] = t.ctime() entry['created_time'] = t.ctime()
if entry['type'] == 'dir': if entry['type'] == 'dir':
entry['name'] += '/' entry['name'] += '/'
# Add an up dir link for non-root # Add an up dir link for non-root
if path not in ('', '/'): if path not in ('', '/'):
entries.insert(0, dict(name='../', path='../', type="dir", size='', created_time='..')) entries.insert(0, dict(name='../', path='../', type="dir", size='', created_time='..'))
# Render the mako template # Render the mako template
html = self.dir_template.render(**dict(fs=self.serve_fs, html = self.dir_template.render(**dict(fs=self.serve_fs,
path=path, path=path,
dirlist=entries)) dirlist=entries))
request.start_response('200 OK', [('Content-Type', 'text/html'), request.start_response('200 OK', [('Content-Type', 'text/html'),
('Content-Length', '%i' % len(html))]) ('Content-Length', '%i' % len(html))])
return [html] return [html]
def serve_404(self, request, msg='Not found'): def serve_404(self, request, msg='Not found'):
"""Serves a Not found page""" """Serves a Not found page"""
request.start_response('404 NOT FOUND', [('Content-Type', 'text/html')]) request.start_response('404 NOT FOUND', [('Content-Type', 'text/html')])
return [msg] return [msg]
def serve_500(self, request, msg='Unable to complete request'): def serve_500(self, request, msg='Unable to complete request'):
"""Serves an internal server error page""" """Serves an internal server error page"""
request.start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')]) request.start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')])
return [msg] return [msg]
def serve_fs(fs, indexes=True): def serve_fs(fs, indexes=True):
"""Serves an FS object via WSGI""" """Serves an FS object via WSGI"""
application = WSGIServer(fs, indexes) application = WSGIServer(fs, indexes)
return application return application
...@@ -18,9 +18,11 @@ an FS object, which can then be exposed using whatever server you choose ...@@ -18,9 +18,11 @@ an FS object, which can then be exposed using whatever server you choose
import xmlrpclib import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer from SimpleXMLRPCServer import SimpleXMLRPCServer
from datetime import datetime from datetime import datetime
import base64
import six import six
from six import PY3, b from six import PY3
class RPCFSInterface(object): class RPCFSInterface(object):
"""Wrapper to expose an FS via a XML-RPC compatible interface. """Wrapper to expose an FS via a XML-RPC compatible interface.
...@@ -40,26 +42,23 @@ class RPCFSInterface(object): ...@@ -40,26 +42,23 @@ class RPCFSInterface(object):
must return something that can be represented in ASCII. The default must return something that can be represented in ASCII. The default
is base64-encoded UTF-8. is base64-encoded UTF-8.
""" """
if PY3: #return path
return path return six.text_type(base64.b64encode(path.encode("utf8")), 'ascii')
return path.encode("utf8").encode("base64")
def decode_path(self, path): def decode_path(self, path):
"""Decode paths arriving over the wire.""" """Decode paths arriving over the wire."""
if PY3: return six.text_type(base64.b64decode(path.encode('ascii')), 'utf8')
return path
return path.decode("base64").decode("utf8")
def getmeta(self, meta_name): def getmeta(self, meta_name):
meta = self.fs.getmeta(meta_name) meta = self.fs.getmeta(meta_name)
if isinstance(meta, basestring): if isinstance(meta, basestring):
meta = meta.decode('base64') meta = self.decode_path(meta)
return meta return meta
def getmeta_default(self, meta_name, default): def getmeta_default(self, meta_name, default):
meta = self.fs.getmeta(meta_name, default) meta = self.fs.getmeta(meta_name, default)
if isinstance(meta, basestring): if isinstance(meta, basestring):
meta = meta.decode('base64') meta = self.decode_path(meta)
return meta return meta
def hasmeta(self, meta_name): def hasmeta(self, meta_name):
...@@ -72,7 +71,7 @@ class RPCFSInterface(object): ...@@ -72,7 +71,7 @@ class RPCFSInterface(object):
def set_contents(self, path, data): def set_contents(self, path, data):
path = self.decode_path(path) path = self.decode_path(path)
self.fs.setcontents(path,data.data) self.fs.setcontents(path, data.data)
def exists(self, path): def exists(self, path):
path = self.decode_path(path) path = self.decode_path(path)
...@@ -88,7 +87,7 @@ class RPCFSInterface(object): ...@@ -88,7 +87,7 @@ class RPCFSInterface(object):
def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
path = self.decode_path(path) path = self.decode_path(path)
entries = self.fs.listdir(path,wildcard,full,absolute,dirs_only,files_only) entries = self.fs.listdir(path, wildcard, full, absolute, dirs_only, files_only)
return [self.encode_path(e) for e in entries] return [self.encode_path(e) for e in entries]
def makedir(self, path, recursive=False, allow_recreate=False): def makedir(self, path, recursive=False, allow_recreate=False):
...@@ -149,7 +148,7 @@ class RPCFSInterface(object): ...@@ -149,7 +148,7 @@ class RPCFSInterface(object):
dst = self.decode_path(dst) dst = self.decode_path(dst)
return self.fs.copy(src, dst, overwrite, chunk_size) return self.fs.copy(src, dst, overwrite, chunk_size)
def move(self,src,dst,overwrite=False,chunk_size=16384): def move(self, src, dst, overwrite=False, chunk_size=16384):
src = self.decode_path(src) src = self.decode_path(src)
dst = self.decode_path(dst) dst = self.decode_path(dst)
return self.fs.move(src, dst, overwrite, chunk_size) return self.fs.move(src, dst, overwrite, chunk_size)
...@@ -187,11 +186,10 @@ class RPCFSServer(SimpleXMLRPCServer): ...@@ -187,11 +186,10 @@ class RPCFSServer(SimpleXMLRPCServer):
if logRequests is not None: if logRequests is not None:
kwds['logRequests'] = logRequests kwds['logRequests'] = logRequests
self.serve_more_requests = True self.serve_more_requests = True
SimpleXMLRPCServer.__init__(self,addr,**kwds) SimpleXMLRPCServer.__init__(self, addr, **kwds)
self.register_instance(RPCFSInterface(fs)) self.register_instance(RPCFSInterface(fs))
def serve_forever(self): def serve_forever(self):
"""Override serve_forever to allow graceful shutdown.""" """Override serve_forever to allow graceful shutdown."""
while self.serve_more_requests: while self.serve_more_requests:
self.handle_request() self.handle_request()
...@@ -8,41 +8,45 @@ fs.httpfs ...@@ -8,41 +8,45 @@ fs.httpfs
from fs.base import FS from fs.base import FS
from fs.path import normpath from fs.path import normpath
from fs.errors import ResourceNotFoundError, UnsupportedError from fs.errors import ResourceNotFoundError, UnsupportedError
from fs.filelike import FileWrapper
from fs import iotools
from urllib2 import urlopen, URLError from urllib2 import urlopen, URLError
from datetime import datetime from datetime import datetime
from fs.filelike import FileWrapper
class HTTPFS(FS): class HTTPFS(FS):
"""Can barely be called a filesystem, because HTTP servers generally don't support """Can barely be called a filesystem, because HTTP servers generally don't support
typical filesystem functionality. This class exists to allow the :doc:`opener` system typical filesystem functionality. This class exists to allow the :doc:`opener` system
to read files over HTTP. to read files over HTTP.
If you do need filesystem like functionality over HTTP, see :mod:`fs.contrib.davfs`. If you do need filesystem like functionality over HTTP, see :mod:`fs.contrib.davfs`.
""" """
_meta = {'read_only':True, _meta = {'read_only': True,
'network':True,} 'network': True}
def __init__(self, url): def __init__(self, url):
""" """
:param url: The base URL :param url: The base URL
""" """
self.root_url = url self.root_url = url
def _make_url(self, path): def _make_url(self, path):
path = normpath(path) path = normpath(path)
url = '%s/%s' % (self.root_url.rstrip('/'), path.lstrip('/')) url = '%s/%s' % (self.root_url.rstrip('/'), path.lstrip('/'))
return url return url
def open(self, path, mode="r"): @iotools.filelike_to_stream
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
if '+' in mode or 'w' in mode or 'a' in mode: if '+' in mode or 'w' in mode or 'a' in mode:
raise UnsupportedError('write') raise UnsupportedError('write')
url = self._make_url(path) url = self._make_url(path)
try: try:
f = urlopen(url) f = urlopen(url)
...@@ -50,15 +54,15 @@ class HTTPFS(FS): ...@@ -50,15 +54,15 @@ class HTTPFS(FS):
raise ResourceNotFoundError(path, details=e) raise ResourceNotFoundError(path, details=e)
except OSError, e: except OSError, e:
raise ResourceNotFoundError(path, details=e) raise ResourceNotFoundError(path, details=e)
return FileWrapper(f) return FileWrapper(f)
def exists(self, path): def exists(self, path):
return self.isfile(path) return self.isfile(path)
def isdir(self, path): def isdir(self, path):
return False return False
def isfile(self, path): def isfile(self, path):
url = self._make_url(path) url = self._make_url(path)
f = None f = None
...@@ -70,9 +74,9 @@ class HTTPFS(FS): ...@@ -70,9 +74,9 @@ class HTTPFS(FS):
finally: finally:
if f is not None: if f is not None:
f.close() f.close()
return True return True
def listdir(self, path="./", def listdir(self, path="./",
wildcard=None, wildcard=None,
full=False, full=False,
......
from __future__ import unicode_literals from __future__ import unicode_literals
from __future__ import print_function from __future__ import print_function
import io import io
from functools import wraps
import six
class RawWrapper(object): class RawWrapper(object):
"""Convert a Python 2 style file-like object in to a IO object""" """Convert a Python 2 style file-like object in to a IO object"""
def __init__(self, f, mode=None, name=None): def __init__(self, f, mode=None, name=None):
self._f = f self._f = f
self.is_io = isinstance(f, io.IOBase)
if mode is None and hasattr(f, 'mode'): if mode is None and hasattr(f, 'mode'):
mode = f.mode mode = f.mode
self.mode = mode self.mode = mode
self.name = name self.name = name
self.closed = False self.closed = False
super(RawWrapper, self).__init__() super(RawWrapper, self).__init__()
def __repr__(self): def __repr__(self):
...@@ -35,12 +39,18 @@ class RawWrapper(object): ...@@ -35,12 +39,18 @@ class RawWrapper(object):
return self._f.seek(offset, whence) return self._f.seek(offset, whence)
def readable(self): def readable(self):
if hasattr(self._f, 'readable'):
return self._f.readable()
return 'r' in self.mode return 'r' in self.mode
def writable(self): def writable(self):
if hasattr(self._f, 'writeable'):
return self._fs.writeable()
return 'w' in self.mode return 'w' in self.mode
def seekable(self): def seekable(self):
if hasattr(self._f, 'seekable'):
return self._f.seekable()
try: try:
self.seek(0, io.SEEK_CUR) self.seek(0, io.SEEK_CUR)
except IOError: except IOError:
...@@ -51,11 +61,14 @@ class RawWrapper(object): ...@@ -51,11 +61,14 @@ class RawWrapper(object):
def tell(self): def tell(self):
return self._f.tell() return self._f.tell()
def truncate(self, size): def truncate(self, size=None):
return self._f.truncate(size) return self._f.truncate(size)
def write(self, data): def write(self, data):
return self._f.write(data) if self.is_io:
return self._f.write(data)
self._f.write(data)
return len(data)
def read(self, n=-1): def read(self, n=-1):
if n == -1: if n == -1:
...@@ -63,21 +76,21 @@ class RawWrapper(object): ...@@ -63,21 +76,21 @@ class RawWrapper(object):
return self._f.read(n) return self._f.read(n)
def read1(self, n=-1): def read1(self, n=-1):
if self.is_io:
return self.read1(n)
return self.read(n) return self.read(n)
def readall(self): def readall(self):
return self._f.read() return self._f.read()
def readinto(self, b): def readinto(self, b):
if self.is_io:
return self._f.readinto(b)
data = self._f.read(len(b)) data = self._f.read(len(b))
bytes_read = len(data) bytes_read = len(data)
b[:len(data)] = data b[:len(data)] = data
return bytes_read return bytes_read
def write(self, b):
bytes_written = self._f.write(b)
return bytes_written
def writelines(self, sequence): def writelines(self, sequence):
return self._f.writelines(sequence) return self._f.writelines(sequence)
...@@ -87,6 +100,32 @@ class RawWrapper(object): ...@@ -87,6 +100,32 @@ class RawWrapper(object):
def __exit__(self, *args, **kwargs): def __exit__(self, *args, **kwargs):
self.close() self.close()
def __iter__(self):
return iter(self._f)
def filelike_to_stream(f):
@wraps(f)
def wrapper(self, path, mode='rt', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
file_like = f(self,
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,
**kwargs)
return make_stream(path,
file_like,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering)
return wrapper
def make_stream(name, def make_stream(name,
f, f,
...@@ -95,9 +134,8 @@ def make_stream(name, ...@@ -95,9 +134,8 @@ def make_stream(name,
encoding=None, encoding=None,
errors=None, errors=None,
newline=None, newline=None,
closefd=True,
line_buffering=False, line_buffering=False,
**params): **kwargs):
"""Take a Python 2.x binary file and returns an IO Stream""" """Take a Python 2.x binary file and returns an IO Stream"""
r, w, a, binary = 'r' in mode, 'w' in mode, 'a' in mode, 'b' in mode r, w, a, binary = 'r' in mode, 'w' in mode, 'a' in mode, 'b' in mode
if '+' in mode: if '+' in mode:
...@@ -122,6 +160,51 @@ def make_stream(name, ...@@ -122,6 +160,51 @@ def make_stream(name,
return io_object return io_object
def decode_binary(data, encoding=None, errors=None, newline=None):
"""Decode bytes as though read from a text file"""
return io.TextIOWrapper(io.BytesIO(data), encoding=encoding, errors=errors, newline=newline).read()
def make_bytes_io(data, encoding=None, errors=None):
"""Make a bytes IO object from either a string or an open file"""
if hasattr(data, 'mode') and 'b' in data.mode:
# It's already a binary file
return data
if not isinstance(data, basestring):
# It's a file, but we don't know if its binary
# TODO: Is there a better way than reading the entire file?
data = data.read() or b''
if isinstance(data, six.text_type):
# If its text, encoding in to bytes
data = data.encode(encoding=encoding, errors=errors)
return io.BytesIO(data)
def copy_file_to_fs(f, fs, path, encoding=None, errors=None, progress_callback=None, chunk_size=64 * 1024):
"""Copy an open file to a path on an FS"""
if progress_callback is None:
progress_callback = lambda bytes_written: None
read = f.read
chunk = read(chunk_size)
if isinstance(chunk, six.text_type):
f = fs.open(path, 'wt', encoding=encoding, errors=errors)
else:
f = fs.open(path, 'wb')
write = f.write
bytes_written = 0
try:
while chunk:
write(chunk)
bytes_written += len(chunk)
progress_callback(bytes_written)
chunk = read(chunk_size)
finally:
f.close()
return bytes_written
if __name__ == "__main__": if __name__ == "__main__":
print("Reading a binary file") print("Reading a binary file")
bin_file = open('tests/data/UTF-8-demo.txt', 'rb') bin_file = open('tests/data/UTF-8-demo.txt', 'rb')
......
...@@ -46,6 +46,7 @@ from fs.base import * ...@@ -46,6 +46,7 @@ from fs.base import *
from fs.errors import * from fs.errors import *
from fs.path import * from fs.path import *
from fs import _thread_synchronize_default from fs import _thread_synchronize_default
from fs import iotools
class DirMount(object): class DirMount(object):
...@@ -286,7 +287,7 @@ class MountFS(FS): ...@@ -286,7 +287,7 @@ class MountFS(FS):
def makedir(self, path, recursive=False, allow_recreate=False): def makedir(self, path, recursive=False, allow_recreate=False):
fs, _mount_path, delegate_path = self._delegate(path) fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None: if fs is self or fs is None:
raise UnsupportedError("make directory", msg="Can only makedir for mounted paths" ) raise UnsupportedError("make directory", msg="Can only makedir for mounted paths")
if not delegate_path: if not delegate_path:
if allow_recreate: if allow_recreate:
return return
...@@ -295,7 +296,7 @@ class MountFS(FS): ...@@ -295,7 +296,7 @@ class MountFS(FS):
return fs.makedir(delegate_path, recursive=recursive, allow_recreate=allow_recreate) return fs.makedir(delegate_path, recursive=recursive, allow_recreate=allow_recreate)
@synchronize @synchronize
def open(self, path, mode="r", **kwargs): def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
obj = self.mount_tree.get(path, None) obj = self.mount_tree.get(path, None)
if type(obj) is MountFS.FileMount: if type(obj) is MountFS.FileMount:
callable = obj.open_callable callable = obj.open_callable
...@@ -309,20 +310,24 @@ class MountFS(FS): ...@@ -309,20 +310,24 @@ class MountFS(FS):
return fs.open(delegate_path, mode, **kwargs) return fs.open(delegate_path, mode, **kwargs)
@synchronize @synchronize
def setcontents(self, path, data, chunk_size=64*1024): def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=64*1024):
obj = self.mount_tree.get(path, None) obj = self.mount_tree.get(path, None)
if type(obj) is MountFS.FileMount: if type(obj) is MountFS.FileMount:
return super(MountFS,self).setcontents(path, data, chunk_size=chunk_size) return super(MountFS, self).setcontents(path,
data,
encoding=encoding,
errors=errors,
chunk_size=chunk_size)
fs, _mount_path, delegate_path = self._delegate(path) fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None: if fs is self or fs is None:
raise ParentDirectoryMissingError(path) raise ParentDirectoryMissingError(path)
return fs.setcontents(delegate_path, data, chunk_size) return fs.setcontents(delegate_path, data, encoding=encoding, errors=errors, chunk_size=chunk_size)
@synchronize @synchronize
def createfile(self, path, wipe=False): def createfile(self, path, wipe=False):
obj = self.mount_tree.get(path, None) obj = self.mount_tree.get(path, None)
if type(obj) is MountFS.FileMount: if type(obj) is MountFS.FileMount:
return super(MountFS,self).createfile(path, wipe=wipe) return super(MountFS, self).createfile(path, wipe=wipe)
fs, _mount_path, delegate_path = self._delegate(path) fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None: if fs is self or fs is None:
raise ParentDirectoryMissingError(path) raise ParentDirectoryMissingError(path)
...@@ -430,7 +435,7 @@ class MountFS(FS): ...@@ -430,7 +435,7 @@ class MountFS(FS):
"""Unmounts a path. """Unmounts a path.
:param path: Path to unmount :param path: Path to unmount
:return: True if a dir was unmounted, False if the path was already unmounted :return: True if a path was unmounted, False if the path was already unmounted
:rtype: bool :rtype: bool
""" """
......
...@@ -15,17 +15,17 @@ to *theme* a web application. We start with the following directories:: ...@@ -15,17 +15,17 @@ to *theme* a web application. We start with the following directories::
`-- templates `-- templates
|-- snippets |-- snippets
| `-- panel.html | `-- panel.html
|-- index.html |-- index.html
|-- profile.html |-- profile.html
`-- base.html `-- base.html
`-- theme `-- theme
|-- snippets |-- snippets
| |-- widget.html | |-- widget.html
| `-- extra.html | `-- extra.html
|-- index.html |-- index.html
`-- theme.html `-- theme.html
And we want to create a single filesystem that looks for files in `templates` if And we want to create a single filesystem that looks for files in `templates` if
they don't exist in `theme`. We can do this with the following code:: they don't exist in `theme`. We can do this with the following code::
...@@ -36,29 +36,29 @@ they don't exist in `theme`. We can do this with the following code:: ...@@ -36,29 +36,29 @@ they don't exist in `theme`. We can do this with the following code::
themed_template_fs.addfs('templates', OSFS('templates')) themed_template_fs.addfs('templates', OSFS('templates'))
themed_template_fs.addfs('theme', OSFS('themes')) themed_template_fs.addfs('theme', OSFS('themes'))
Now we have a `themed_template_fs` FS object presents a single view of both
directories:: Now we have a `themed_template_fs` FS object presents a single view of both
directories::
|-- snippets |-- snippets
| |-- panel.html | |-- panel.html
| |-- widget.html | |-- widget.html
| `-- extra.html | `-- extra.html
|-- index.html |-- index.html
|-- profile.html |-- profile.html
|-- base.html |-- base.html
`-- theme.html `-- theme.html
A MultiFS is generally read-only, and any operation that may modify data A MultiFS is generally read-only, and any operation that may modify data
(including opening files for writing) will fail. However, you can set a (including opening files for writing) will fail. However, you can set a
writeable fs with the `setwritefs` method -- which does not have to be writeable fs with the `setwritefs` method -- which does not have to be
one of the FS objects set with `addfs`. one of the FS objects set with `addfs`.
The reason that only one FS object is ever considered for write access is The reason that only one FS object is ever considered for write access is
that otherwise it would be ambiguous as to which filesystem you would want that otherwise it would be ambiguous as to which filesystem you would want
to modify. If you need to be able to modify more than one FS in the MultiFS, to modify. If you need to be able to modify more than one FS in the MultiFS,
you can always access them directly. you can always access them directly.
""" """
...@@ -76,7 +76,7 @@ class MultiFS(FS): ...@@ -76,7 +76,7 @@ class MultiFS(FS):
it succeeds. In effect, creating a filesystem that combines the files and it succeeds. In effect, creating a filesystem that combines the files and
dirs of its children. dirs of its children.
""" """
_meta = { 'virtual': True, _meta = { 'virtual': True,
'read_only' : False, 'read_only' : False,
'unicode_paths' : True, 'unicode_paths' : True,
...@@ -85,9 +85,9 @@ class MultiFS(FS): ...@@ -85,9 +85,9 @@ class MultiFS(FS):
def __init__(self, auto_close=True): def __init__(self, auto_close=True):
""" """
:param auto_close: If True the child filesystems will be closed when the MultiFS is closed :param auto_close: If True the child filesystems will be closed when the MultiFS is closed
""" """
super(MultiFS, self).__init__(thread_synchronize=_thread_synchronize_default) super(MultiFS, self).__init__(thread_synchronize=_thread_synchronize_default)
...@@ -95,7 +95,7 @@ class MultiFS(FS): ...@@ -95,7 +95,7 @@ class MultiFS(FS):
self.fs_sequence = [] self.fs_sequence = []
self.fs_lookup = {} self.fs_lookup = {}
self.fs_priorities = {} self.fs_priorities = {}
self.writefs = None self.writefs = None
@synchronize @synchronize
def __str__(self): def __str__(self):
...@@ -117,19 +117,19 @@ class MultiFS(FS): ...@@ -117,19 +117,19 @@ class MultiFS(FS):
for fs in self.fs_sequence: for fs in self.fs_sequence:
fs.close() fs.close()
if self.writefs is not None: if self.writefs is not None:
self.writefs.close() self.writefs.close()
# Discard any references # Discard any references
del self.fs_sequence[:] del self.fs_sequence[:]
self.fs_lookup.clear() self.fs_lookup.clear()
self.fs_priorities.clear() self.fs_priorities.clear()
self.writefs = None self.writefs = None
super(MultiFS, self).close() super(MultiFS, self).close()
def _priority_sort(self): def _priority_sort(self):
"""Sort filesystems by priority order""" """Sort filesystems by priority order"""
priority_order = sorted(self.fs_lookup.keys(), key=lambda n:self.fs_priorities[n], reverse=True) priority_order = sorted(self.fs_lookup.keys(), key=lambda n:self.fs_priorities[n], reverse=True)
self.fs_sequence = [self.fs_lookup[name] for name in priority_order] self.fs_sequence = [self.fs_lookup[name] for name in priority_order]
@synchronize @synchronize
def addfs(self, name, fs, write=False, priority=0): def addfs(self, name, fs, write=False, priority=0):
"""Adds a filesystem to the MultiFS. """Adds a filesystem to the MultiFS.
...@@ -141,19 +141,19 @@ class MultiFS(FS): ...@@ -141,19 +141,19 @@ class MultiFS(FS):
:param priority: A number that gives the priorty of the filesystem being added. :param priority: A number that gives the priorty of the filesystem being added.
Filesystems will be searched in descending priority order and then by the reverse order they were added. Filesystems will be searched in descending priority order and then by the reverse order they were added.
So by default, the most recently added filesystem will be looked at first So by default, the most recently added filesystem will be looked at first
""" """
if name in self.fs_lookup: if name in self.fs_lookup:
raise ValueError("Name already exists.") raise ValueError("Name already exists.")
priority = (priority, len(self.fs_sequence)) priority = (priority, len(self.fs_sequence))
self.fs_priorities[name] = priority self.fs_priorities[name] = priority
self.fs_sequence.append(fs) self.fs_sequence.append(fs)
self.fs_lookup[name] = fs self.fs_lookup[name] = fs
self._priority_sort() self._priority_sort()
if write: if write:
self.setwritefs(fs) self.setwritefs(fs)
...@@ -162,16 +162,16 @@ class MultiFS(FS): ...@@ -162,16 +162,16 @@ class MultiFS(FS):
"""Sets the filesystem to use when write access is required. Without a writeable FS, """Sets the filesystem to use when write access is required. Without a writeable FS,
any operations that could modify data (including opening files for writing / appending) any operations that could modify data (including opening files for writing / appending)
will fail. will fail.
:param fs: An FS object that will be used to open writeable files :param fs: An FS object that will be used to open writeable files
""" """
self.writefs = fs self.writefs = fs
@synchronize @synchronize
def clearwritefs(self): def clearwritefs(self):
"""Clears the writeable filesystem (operations that modify the multifs will fail)""" """Clears the writeable filesystem (operations that modify the multifs will fail)"""
self.writefs = None self.writefs = None
@synchronize @synchronize
def removefs(self, name): def removefs(self, name):
...@@ -209,7 +209,7 @@ class MultiFS(FS): ...@@ -209,7 +209,7 @@ class MultiFS(FS):
:param path: A path in MultiFS :param path: A path in MultiFS
""" """
if 'w' in mode or '+' in mode or 'a' in mode: if 'w' in mode or '+' in mode or 'a' in mode:
return self.writefs return self.writefs
for fs in self: for fs in self:
if fs.exists(path): if fs.exists(path):
...@@ -238,14 +238,14 @@ class MultiFS(FS): ...@@ -238,14 +238,14 @@ class MultiFS(FS):
return "%s, on %s (%s)" % (fs.desc(path), name, fs) return "%s, on %s (%s)" % (fs.desc(path), name, fs)
@synchronize @synchronize
def open(self, path, mode="r", **kwargs): def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
if 'w' in mode or '+' in mode or 'a' in mode: if 'w' in mode or '+' in mode or 'a' in mode:
if self.writefs is None: if self.writefs is None:
raise OperationFailedError('open', path=path, msg="No writeable FS set") raise OperationFailedError('open', path=path, msg="No writeable FS set")
return self.writefs.open(path, mode) return self.writefs.open(path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, line_buffering=line_buffering, **kwargs)
for fs in self: for fs in self:
if fs.exists(path): if fs.exists(path):
fs_file = fs.open(path, mode, **kwargs) fs_file = fs.open(path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, line_buffering=line_buffering, **kwargs)
return fs_file return fs_file
raise ResourceNotFoundError(path) raise ResourceNotFoundError(path)
...@@ -280,34 +280,34 @@ class MultiFS(FS): ...@@ -280,34 +280,34 @@ class MultiFS(FS):
@synchronize @synchronize
def makedir(self, path, recursive=False, allow_recreate=False): def makedir(self, path, recursive=False, allow_recreate=False):
if self.writefs is None: if self.writefs is None:
raise OperationFailedError('makedir', path=path, msg="No writeable FS set") raise OperationFailedError('makedir', path=path, msg="No writeable FS set")
self.writefs.makedir(path, recursive=recursive, allow_recreate=allow_recreate) self.writefs.makedir(path, recursive=recursive, allow_recreate=allow_recreate)
@synchronize @synchronize
def remove(self, path): def remove(self, path):
if self.writefs is None: if self.writefs is None:
raise OperationFailedError('remove', path=path, msg="No writeable FS set") raise OperationFailedError('remove', path=path, msg="No writeable FS set")
self.writefs.remove(path) self.writefs.remove(path)
@synchronize @synchronize
def removedir(self, path, recursive=False, force=False): def removedir(self, path, recursive=False, force=False):
if self.writefs is None: if self.writefs is None:
raise OperationFailedError('removedir', path=path, msg="No writeable FS set") raise OperationFailedError('removedir', path=path, msg="No writeable FS set")
if normpath(path) in ('', '/'): if normpath(path) in ('', '/'):
raise RemoveRootError(path) raise RemoveRootError(path)
self.writefs.removedir(path, recursive=recursive, force=force) self.writefs.removedir(path, recursive=recursive, force=force)
@synchronize @synchronize
def rename(self, src, dst): def rename(self, src, dst):
if self.writefs is None: if self.writefs is None:
raise OperationFailedError('rename', path=src, msg="No writeable FS set") raise OperationFailedError('rename', path=src, msg="No writeable FS set")
self.writefs.rename(src, dst) self.writefs.rename(src, dst)
@synchronize @synchronize
def settimes(self, path, accessed_time=None, modified_time=None): def settimes(self, path, accessed_time=None, modified_time=None):
if self.writefs is None: if self.writefs is None:
raise OperationFailedError('settimes', path=path, msg="No writeable FS set") raise OperationFailedError('settimes', path=path, msg="No writeable FS set")
self.writefs.settimes(path, accessed_time, modified_time) self.writefs.settimes(path, accessed_time, modified_time)
@synchronize @synchronize
def getinfo(self, path): def getinfo(self, path):
......
...@@ -20,6 +20,7 @@ import sys ...@@ -20,6 +20,7 @@ import sys
import errno import errno
import datetime import datetime
import platform import platform
import io
from fs.base import * from fs.base import *
from fs.path import * from fs.path import *
...@@ -76,16 +77,15 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS): ...@@ -76,16 +77,15 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS):
methods in the os and os.path modules. methods in the os and os.path modules.
""" """
_meta = { 'thread_safe' : True, _meta = {'thread_safe': True,
'network' : False, 'network': False,
'virtual' : False, 'virtual': False,
'read_only' : False, 'read_only': False,
'unicode_paths' : os.path.supports_unicode_filenames, 'unicode_paths': os.path.supports_unicode_filenames,
'case_insensitive_paths' : os.path.normcase('Aa') == 'aa', 'case_insensitive_paths': os.path.normcase('Aa') == 'aa',
'atomic.makedir' : True, 'atomic.makedir': True,
'atomic.rename' : True, 'atomic.rename': True,
'atomic.setcontents' : False, 'atomic.setcontents': False}
}
if platform.system() == 'Windows': if platform.system() == 'Windows':
_meta["invalid_path_chars"] = ''.join(chr(n) for n in xrange(31)) + '\\:*?"<>|' _meta["invalid_path_chars"] = ''.join(chr(n) for n in xrange(31)) + '\\:*?"<>|'
...@@ -215,11 +215,11 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS): ...@@ -215,11 +215,11 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS):
return super(OSFS, self).getmeta(meta_name, default) return super(OSFS, self).getmeta(meta_name, default)
@convert_os_errors @convert_os_errors
def open(self, path, mode="r", **kwargs): def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
mode = ''.join(c for c in mode if c in 'rwabt+') mode = ''.join(c for c in mode if c in 'rwabt+')
sys_path = self.getsyspath(path) sys_path = self.getsyspath(path)
try: try:
return open(sys_path, mode, kwargs.get("buffering", -1)) return io.open(sys_path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline)
except EnvironmentError, e: except EnvironmentError, e:
# Win32 gives EACCES when opening a directory. # Win32 gives EACCES when opening a directory.
if sys.platform == "win32" and e.errno in (errno.EACCES,): if sys.platform == "win32" and e.errno in (errno.EACCES,):
...@@ -228,8 +228,8 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS): ...@@ -228,8 +228,8 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS):
raise raise
@convert_os_errors @convert_os_errors
def setcontents(self, path, contents, chunk_size=64 * 1024): def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=64 * 1024):
return super(OSFS, self).setcontents(path, contents, chunk_size) return super(OSFS, self).setcontents(path, data, encoding=encoding, errors=errors, chunk_size=chunk_size)
@convert_os_errors @convert_os_errors
def exists(self, path): def exists(self, path):
......
...@@ -26,7 +26,9 @@ from fs.path import * ...@@ -26,7 +26,9 @@ from fs.path import *
from fs.errors import * from fs.errors import *
from fs.remote import * from fs.remote import *
from fs.filelike import LimitBytesFile from fs.filelike import LimitBytesFile
from fs import iotools
import six
# Boto is not thread-safe, so we need to use a per-thread S3 connection. # Boto is not thread-safe, so we need to use a per-thread S3 connection.
if hasattr(threading,"local"): if hasattr(threading,"local"):
...@@ -246,19 +248,19 @@ class S3FS(FS): ...@@ -246,19 +248,19 @@ class S3FS(FS):
s3path = self._s3path(path) s3path = self._s3path(path)
k = self._s3bukt.get_key(s3path) k = self._s3bukt.get_key(s3path)
k.make_public() k.make_public()
def getpathurl(self, path, allow_none=False, expires=3600): def getpathurl(self, path, allow_none=False, expires=3600):
"""Returns a url that corresponds to the given path.""" """Returns a url that corresponds to the given path."""
s3path = self._s3path(path) s3path = self._s3path(path)
k = self._s3bukt.get_key(s3path) k = self._s3bukt.get_key(s3path)
# Is there AllUsers group with READ permissions? # Is there AllUsers group with READ permissions?
is_public = True in [grant.permission == 'READ' and \ is_public = True in [grant.permission == 'READ' and
grant.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' grant.uri == 'http://acs.amazonaws.com/groups/global/AllUsers'
for grant in k.get_acl().acl.grants ] for grant in k.get_acl().acl.grants]
url = k.generate_url(expires, force_http=is_public) url = k.generate_url(expires, force_http=is_public)
if url == None: if url == None:
if not allow_none: if not allow_none:
raise NoPathURLError(path=path) raise NoPathURLError(path=path)
...@@ -267,14 +269,17 @@ class S3FS(FS): ...@@ -267,14 +269,17 @@ class S3FS(FS):
if is_public: if is_public:
# Strip time token; it has no sense for public resource # Strip time token; it has no sense for public resource
url = url.split('?')[0] url = url.split('?')[0]
return url return url
def setcontents(self, path, data, chunk_size=64*1024): def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=64*1024):
s3path = self._s3path(path) s3path = self._s3path(path)
if isinstance(data, six.text_type):
data = data.encode(encoding=encoding, errors=errors)
self._sync_set_contents(s3path, data) self._sync_set_contents(s3path, data)
def open(self,path,mode="r"): @iotools.filelike_to_stream
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
"""Open the named file in the given mode. """Open the named file in the given mode.
This method downloads the file contents into a local temporary file This method downloads the file contents into a local temporary file
...@@ -504,7 +509,7 @@ class S3FS(FS): ...@@ -504,7 +509,7 @@ class S3FS(FS):
def removedir(self,path,recursive=False,force=False): def removedir(self,path,recursive=False,force=False):
"""Remove the directory at the given path.""" """Remove the directory at the given path."""
if normpath(path) in ('', '/'): if normpath(path) in ('', '/'):
raise RemoveRootError(path) raise RemoveRootError(path)
s3path = self._s3path(path) s3path = self._s3path(path)
if s3path != self._prefix: if s3path != self._prefix:
s3path = s3path + self._separator s3path = s3path + self._separator
...@@ -654,7 +659,7 @@ class S3FS(FS): ...@@ -654,7 +659,7 @@ class S3FS(FS):
yield item yield item
else: else:
prefix = self._s3path(path) prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix): for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix)) name = relpath(self._uns3path(k.name,prefix))
if name != "": if name != "":
if not isinstance(name,unicode): if not isinstance(name,unicode):
...@@ -682,7 +687,7 @@ class S3FS(FS): ...@@ -682,7 +687,7 @@ class S3FS(FS):
yield (item,self.getinfo(item)) yield (item,self.getinfo(item))
else: else:
prefix = self._s3path(path) prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix): for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix)) name = relpath(self._uns3path(k.name,prefix))
if name != "": if name != "":
if not isinstance(name,unicode): if not isinstance(name,unicode):
...@@ -709,7 +714,7 @@ class S3FS(FS): ...@@ -709,7 +714,7 @@ class S3FS(FS):
yield (item,self.getinfo(item)) yield (item,self.getinfo(item))
else: else:
prefix = self._s3path(path) prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix): for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix)) name = relpath(self._uns3path(k.name,prefix))
if name != "": if name != "":
if not isinstance(name,unicode): if not isinstance(name,unicode):
......
...@@ -19,6 +19,8 @@ from fs.base import * ...@@ -19,6 +19,8 @@ from fs.base import *
from fs.path import * from fs.path import *
from fs.errors import * from fs.errors import *
from fs.utils import isdir, isfile from fs.utils import isdir, isfile
from fs import iotools
class WrongHostKeyError(RemoteConnectionError): class WrongHostKeyError(RemoteConnectionError):
pass pass
...@@ -108,7 +110,6 @@ class SFTPFS(FS): ...@@ -108,7 +110,6 @@ class SFTPFS(FS):
if other authentication is not succesful if other authentication is not succesful
""" """
credentials = dict(username=username, credentials = dict(username=username,
password=password, password=password,
pkey=pkey) pkey=pkey)
...@@ -300,12 +301,12 @@ class SFTPFS(FS): ...@@ -300,12 +301,12 @@ class SFTPFS(FS):
self._transport.close() self._transport.close()
self.closed = True self.closed = True
def _normpath(self,path): def _normpath(self, path):
if not isinstance(path,unicode): if not isinstance(path, unicode):
path = path.decode(self.encoding) path = path.decode(self.encoding)
npath = pathjoin(self.root_path,relpath(normpath(path))) npath = pathjoin(self.root_path, relpath(normpath(path)))
if not isprefix(self.root_path,npath): if not isprefix(self.root_path, npath):
raise PathError(path,msg="Path is outside root: %(path)s") raise PathError(path, msg="Path is outside root: %(path)s")
return npath return npath
def getpathurl(self, path, allow_none=False): def getpathurl(self, path, allow_none=False):
...@@ -325,17 +326,19 @@ class SFTPFS(FS): ...@@ -325,17 +326,19 @@ class SFTPFS(FS):
@synchronize @synchronize
@convert_os_errors @convert_os_errors
def open(self,path,mode="rb",bufsize=-1): @iotools.filelike_to_stream
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, bufsize=-1, **kwargs):
npath = self._normpath(path) npath = self._normpath(path)
if self.isdir(path): if self.isdir(path):
msg = "that's a directory: %(path)s" msg = "that's a directory: %(path)s"
raise ResourceInvalidError(path,msg=msg) raise ResourceInvalidError(path, msg=msg)
# paramiko implements its own buffering and write-back logic, # paramiko implements its own buffering and write-back logic,
# so we don't need to use a RemoteFileBuffer here. # so we don't need to use a RemoteFileBuffer here.
f = self.client.open(npath,mode,bufsize) f = self.client.open(npath, mode, bufsize)
# Unfortunately it has a broken truncate() method. # Unfortunately it has a broken truncate() method.
# TODO: implement this as a wrapper # TODO: implement this as a wrapper
old_truncate = f.truncate old_truncate = f.truncate
def new_truncate(size=None): def new_truncate(size=None):
if size is None: if size is None:
size = f.tell() size = f.tell()
...@@ -354,7 +357,7 @@ class SFTPFS(FS): ...@@ -354,7 +357,7 @@ class SFTPFS(FS):
@synchronize @synchronize
@convert_os_errors @convert_os_errors
def exists(self,path): def exists(self, path):
if path in ('', '/'): if path in ('', '/'):
return True return True
npath = self._normpath(path) npath = self._normpath(path)
...@@ -369,7 +372,7 @@ class SFTPFS(FS): ...@@ -369,7 +372,7 @@ class SFTPFS(FS):
@synchronize @synchronize
@convert_os_errors @convert_os_errors
def isdir(self,path): def isdir(self,path):
if path in ('', '/'): if normpath(path) in ('', '/'):
return True return True
npath = self._normpath(path) npath = self._normpath(path)
try: try:
...@@ -378,7 +381,7 @@ class SFTPFS(FS): ...@@ -378,7 +381,7 @@ class SFTPFS(FS):
if getattr(e,"errno",None) == 2: if getattr(e,"errno",None) == 2:
return False return False
raise raise
return statinfo.S_ISDIR(stat.st_mode) return statinfo.S_ISDIR(stat.st_mode) != 0
@synchronize @synchronize
@convert_os_errors @convert_os_errors
...@@ -390,7 +393,7 @@ class SFTPFS(FS): ...@@ -390,7 +393,7 @@ class SFTPFS(FS):
if getattr(e,"errno",None) == 2: if getattr(e,"errno",None) == 2:
return False return False
raise raise
return statinfo.S_ISREG(stat.st_mode) return statinfo.S_ISREG(stat.st_mode) != 0
@synchronize @synchronize
@convert_os_errors @convert_os_errors
......
...@@ -10,13 +10,14 @@ import os ...@@ -10,13 +10,14 @@ import os
import os.path import os.path
import time import time
import tempfile import tempfile
import platform
from fs.base import synchronize
from fs.osfs import OSFS from fs.osfs import OSFS
from fs.errors import * from fs.errors import *
from fs import _thread_synchronize_default from fs import _thread_synchronize_default
class TempFS(OSFS): class TempFS(OSFS):
"""Create a Filesystem in a temporary directory (with tempfile.mkdtemp), """Create a Filesystem in a temporary directory (with tempfile.mkdtemp),
...@@ -38,7 +39,7 @@ class TempFS(OSFS): ...@@ -38,7 +39,7 @@ class TempFS(OSFS):
self.identifier = identifier self.identifier = identifier
self.temp_dir = temp_dir self.temp_dir = temp_dir
self.dir_mode = dir_mode self.dir_mode = dir_mode
self._temp_dir = tempfile.mkdtemp(identifier or "TempFS",dir=temp_dir) self._temp_dir = tempfile.mkdtemp(identifier or "TempFS", dir=temp_dir)
self._cleaned = False self._cleaned = False
super(TempFS, self).__init__(self._temp_dir, dir_mode=dir_mode, thread_synchronize=thread_synchronize) super(TempFS, self).__init__(self._temp_dir, dir_mode=dir_mode, thread_synchronize=thread_synchronize)
...@@ -65,6 +66,7 @@ class TempFS(OSFS): ...@@ -65,6 +66,7 @@ class TempFS(OSFS):
# dir_mode=self.dir_mode, # dir_mode=self.dir_mode,
# thread_synchronize=self.thread_synchronize) # thread_synchronize=self.thread_synchronize)
@synchronize
def close(self): def close(self):
"""Removes the temporary directory. """Removes the temporary directory.
...@@ -73,13 +75,13 @@ class TempFS(OSFS): ...@@ -73,13 +75,13 @@ class TempFS(OSFS):
Note that once this method has been called, the FS object may Note that once this method has been called, the FS object may
no longer be used. no longer be used.
""" """
super(TempFS,self).close() super(TempFS, self).close()
# Depending on how resources are freed by the OS, there could # Depending on how resources are freed by the OS, there could
# be some transient errors when freeing a TempFS soon after it # be some transient errors when freeing a TempFS soon after it
# was used. If they occur, do a small sleep and try again. # was used. If they occur, do a small sleep and try again.
try: try:
self._close() self._close()
except (ResourceLockedError,ResourceInvalidError): except (ResourceLockedError, ResourceInvalidError):
time.sleep(0.5) time.sleep(0.5)
self._close() self._close()
...@@ -97,20 +99,23 @@ class TempFS(OSFS): ...@@ -97,20 +99,23 @@ class TempFS(OSFS):
try: try:
# shutil.rmtree doesn't handle long paths on win32, # shutil.rmtree doesn't handle long paths on win32,
# so we walk the tree by hand. # so we walk the tree by hand.
entries = os.walk(self.root_path,topdown=False) entries = os.walk(self.root_path, topdown=False)
for (dir,dirnames,filenames) in entries: for (dir, dirnames, filenames) in entries:
for filename in filenames: for filename in filenames:
try: try:
os_remove(os.path.join(dir,filename)) os_remove(os.path.join(dir, filename))
except ResourceNotFoundError: except ResourceNotFoundError:
pass pass
for dirname in dirnames: for dirname in dirnames:
try: try:
os_rmdir(os.path.join(dir,dirname)) os_rmdir(os.path.join(dir, dirname))
except ResourceNotFoundError: except ResourceNotFoundError:
pass pass
os.rmdir(self.root_path) try:
os.rmdir(self.root_path)
except OSError:
pass
self._cleaned = True self._cleaned = True
finally: finally:
self._lock.release() self._lock.release()
super(TempFS,self).close() super(TempFS, self).close()
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
import unittest import unittest
import sys import sys
import os, os.path import os
import os.path
import socket import socket
import threading import threading
import time import time
...@@ -32,6 +33,12 @@ try: ...@@ -32,6 +33,12 @@ try:
except ImportError: except ImportError:
if not PY3: if not PY3:
raise raise
import logging
logging.getLogger('paramiko').setLevel(logging.ERROR)
logging.getLogger('paramiko.transport').setLevel(logging.ERROR)
class TestSFTPFS(TestRPCFS): class TestSFTPFS(TestRPCFS):
__test__ = not PY3 __test__ = not PY3
...@@ -55,7 +62,7 @@ except ImportError: ...@@ -55,7 +62,7 @@ except ImportError:
pass pass
else: else:
from fs.osfs import OSFS from fs.osfs import OSFS
class TestFUSE(unittest.TestCase,FSTestCases,ThreadingTestCases): class TestFUSE(unittest.TestCase, FSTestCases, ThreadingTestCases):
def setUp(self): def setUp(self):
self.temp_fs = TempFS() self.temp_fs = TempFS()
...@@ -64,7 +71,7 @@ else: ...@@ -64,7 +71,7 @@ else:
self.mounted_fs = self.temp_fs.opendir("root") self.mounted_fs = self.temp_fs.opendir("root")
self.mount_point = self.temp_fs.getsyspath("mount") self.mount_point = self.temp_fs.getsyspath("mount")
self.fs = OSFS(self.temp_fs.getsyspath("mount")) self.fs = OSFS(self.temp_fs.getsyspath("mount"))
self.mount_proc = fuse.mount(self.mounted_fs,self.mount_point) self.mount_proc = fuse.mount(self.mounted_fs, self.mount_point)
def tearDown(self): def tearDown(self):
self.mount_proc.unmount() self.mount_proc.unmount()
...@@ -76,7 +83,7 @@ else: ...@@ -76,7 +83,7 @@ else:
fuse.unmount(self.mount_point) fuse.unmount(self.mount_point)
self.temp_fs.close() self.temp_fs.close()
def check(self,p): def check(self, p):
return self.mounted_fs.exists(p) return self.mounted_fs.exists(p)
......
...@@ -12,6 +12,7 @@ from fs.zipfs import ZipFS ...@@ -12,6 +12,7 @@ from fs.zipfs import ZipFS
from six import b from six import b
class TestFSImportHook(unittest.TestCase): class TestFSImportHook(unittest.TestCase):
def setUp(self): def setUp(self):
...@@ -140,4 +141,3 @@ class TestFSImportHook(unittest.TestCase): ...@@ -140,4 +141,3 @@ class TestFSImportHook(unittest.TestCase):
sys.path_hooks.remove(FSImportHook) sys.path_hooks.remove(FSImportHook)
sys.path.pop() sys.path.pop()
t.close() t.close()
from __future__ import unicode_literals
from fs import iotools
import io
import unittest
from os.path import dirname, join, abspath
try:
unicode
except NameError:
unicode = str
class OpenFilelike(object):
def __init__(self, make_f):
self.make_f = make_f
@iotools.filelike_to_stream
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
return self.make_f()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.f.close()
class TestIOTools(unittest.TestCase):
def get_bin_file(self):
path = join(dirname(abspath(__file__)), 'data/UTF-8-demo.txt')
return io.open(path, 'rb')
def test_make_stream(self):
"""Test make_stream"""
with self.get_bin_file() as f:
text = f.read()
self.assert_(isinstance(text, bytes))
with self.get_bin_file() as f:
with iotools.make_stream("data/UTF-8-demo.txt", f, 'rt') as f2:
text = f2.read()
self.assert_(isinstance(text, unicode))
def test_decorator(self):
"""Test filelike_to_stream decorator"""
o = OpenFilelike(self.get_bin_file)
with o.open('file', 'rb') as f:
text = f.read()
self.assert_(isinstance(text, bytes))
with o.open('file', 'rt') as f:
text = f.read()
self.assert_(isinstance(text, unicode))
...@@ -2,10 +2,11 @@ from fs.mountfs import MountFS ...@@ -2,10 +2,11 @@ from fs.mountfs import MountFS
from fs.memoryfs import MemoryFS from fs.memoryfs import MemoryFS
import unittest import unittest
class TestMultiFS(unittest.TestCase):
class TestMountFS(unittest.TestCase):
def test_auto_close(self): def test_auto_close(self):
"""Test MultiFS auto close is working""" """Test MountFS auto close is working"""
multi_fs = MountFS() multi_fs = MountFS()
m1 = MemoryFS() m1 = MemoryFS()
m2 = MemoryFS() m2 = MemoryFS()
...@@ -18,7 +19,7 @@ class TestMultiFS(unittest.TestCase): ...@@ -18,7 +19,7 @@ class TestMultiFS(unittest.TestCase):
self.assert_(m2.closed) self.assert_(m2.closed)
def test_no_auto_close(self): def test_no_auto_close(self):
"""Test MultiFS auto close can be disabled""" """Test MountFS auto close can be disabled"""
multi_fs = MountFS(auto_close=False) multi_fs = MountFS(auto_close=False)
m1 = MemoryFS() m1 = MemoryFS()
m2 = MemoryFS() m2 = MemoryFS()
...@@ -32,7 +33,7 @@ class TestMultiFS(unittest.TestCase): ...@@ -32,7 +33,7 @@ class TestMultiFS(unittest.TestCase):
def test_mountfile(self): def test_mountfile(self):
"""Test mounting a file""" """Test mounting a file"""
quote = """If you wish to make an apple pie from scratch, you must first invent the universe.""" quote = b"""If you wish to make an apple pie from scratch, you must first invent the universe."""
mem_fs = MemoryFS() mem_fs = MemoryFS()
mem_fs.makedir('foo') mem_fs.makedir('foo')
mem_fs.setcontents('foo/bar.txt', quote) mem_fs.setcontents('foo/bar.txt', quote)
...@@ -58,11 +59,11 @@ class TestMultiFS(unittest.TestCase): ...@@ -58,11 +59,11 @@ class TestMultiFS(unittest.TestCase):
# Check changes are written back # Check changes are written back
mem_fs.setcontents('foo/bar.txt', 'baz') mem_fs.setcontents('foo/bar.txt', 'baz')
self.assertEqual(mount_fs.getcontents('bar.txt'), 'baz') self.assertEqual(mount_fs.getcontents('bar.txt'), b'baz')
self.assertEqual(mount_fs.getsize('bar.txt'), len('baz')) self.assertEqual(mount_fs.getsize('bar.txt'), len('baz'))
# Check changes are written to the original fs # Check changes are written to the original fs
self.assertEqual(mem_fs.getcontents('foo/bar.txt'), 'baz') self.assertEqual(mem_fs.getcontents('foo/bar.txt'), b'baz')
self.assertEqual(mem_fs.getsize('foo/bar.txt'), len('baz')) self.assertEqual(mem_fs.getsize('foo/bar.txt'), len('baz'))
# Check unmount # Check unmount
......
...@@ -24,23 +24,27 @@ from fs.local_functools import wraps ...@@ -24,23 +24,27 @@ from fs.local_functools import wraps
from six import PY3, b from six import PY3, b
class RemoteTempFS(TempFS): class RemoteTempFS(TempFS):
""" """
Simple filesystem implementing setfilecontents Simple filesystem implementing setfilecontents
for RemoteFileBuffer tests for RemoteFileBuffer tests
""" """
def open(self, path, mode='rb', write_on_flush=True): def open(self, path, mode='rb', write_on_flush=True, **kwargs):
if 'a' in mode or 'r' in mode or '+' in mode: if 'a' in mode or 'r' in mode or '+' in mode:
f = super(RemoteTempFS, self).open(path, 'rb') f = super(RemoteTempFS, self).open(path, mode='rb', **kwargs)
f = TellAfterCloseFile(f) f = TellAfterCloseFile(f)
else: else:
f = None f = None
return RemoteFileBuffer(self, path, mode, f, return RemoteFileBuffer(self,
write_on_flush=write_on_flush) path,
mode,
def setcontents(self, path, data, chunk_size=64*1024): f,
f = super(RemoteTempFS, self).open(path, 'wb') write_on_flush=write_on_flush)
def setcontents(self, path, data, encoding=None, errors=None, chunk_size=64*1024):
f = super(RemoteTempFS, self).open(path, 'wb', encoding=encoding, errors=errors, chunk_size=chunk_size)
if getattr(data, 'read', False): if getattr(data, 'read', False):
f.write(data.read()) f.write(data.read())
else: else:
...@@ -51,7 +55,7 @@ class RemoteTempFS(TempFS): ...@@ -51,7 +55,7 @@ class RemoteTempFS(TempFS):
class TellAfterCloseFile(object): class TellAfterCloseFile(object):
"""File-like object that allows calling tell() after it's been closed.""" """File-like object that allows calling tell() after it's been closed."""
def __init__(self,file): def __init__(self, file):
self._finalpos = None self._finalpos = None
self.file = file self.file = file
...@@ -65,49 +69,49 @@ class TellAfterCloseFile(object): ...@@ -65,49 +69,49 @@ class TellAfterCloseFile(object):
return self._finalpos return self._finalpos
return self.file.tell() return self.file.tell()
def __getattr__(self,attr): def __getattr__(self, attr):
return getattr(self.file,attr) return getattr(self.file, attr)
class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases): class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
class FakeException(Exception): pass class FakeException(Exception): pass
def setUp(self): def setUp(self):
self.fs = RemoteTempFS() self.fs = RemoteTempFS()
self.original_setcontents = self.fs.setcontents self.original_setcontents = self.fs.setcontents
def tearDown(self): def tearDown(self):
self.fs.close() self.fs.close()
self.fakeOff() self.fakeOff()
def fake_setcontents(self, path, content=b(''), chunk_size=16*1024): def fake_setcontents(self, path, content=b(''), chunk_size=16*1024):
''' Fake replacement for RemoteTempFS setcontents() ''' ''' Fake replacement for RemoteTempFS setcontents() '''
raise self.FakeException("setcontents should not be called here!") raise self.FakeException("setcontents should not be called here!")
def fakeOn(self): def fakeOn(self):
''' '''
Turn on fake_setcontents(). When setcontents on RemoteTempFS Turn on fake_setcontents(). When setcontents on RemoteTempFS
is called, FakeException is raised and nothing is stored. is called, FakeException is raised and nothing is stored.
''' '''
self.fs.setcontents = self.fake_setcontents self.fs.setcontents = self.fake_setcontents
def fakeOff(self): def fakeOff(self):
''' Switch off fake_setcontents(). ''' ''' Switch off fake_setcontents(). '''
self.fs.setcontents = self.original_setcontents self.fs.setcontents = self.original_setcontents
def test_ondemand(self): def test_ondemand(self):
''' '''
Tests on-demand loading of remote content in RemoteFileBuffer Tests on-demand loading of remote content in RemoteFileBuffer
''' '''
contents = b("Tristatricettri stribrnych strikacek strikalo") + \ contents = b("Tristatricettri stribrnych strikacek strikalo") + \
b("pres tristatricettri stribrnych strech.") b("pres tristatricettri stribrnych strech.")
f = self.fs.open('test.txt', 'wb') f = self.fs.open('test.txt', 'wb')
f.write(contents) f.write(contents)
f.close() f.close()
# During following tests, no setcontents() should be called. # During following tests, no setcontents() should be called.
self.fakeOn() self.fakeOn()
f = self.fs.open('test.txt', 'rb') f = self.fs.open('test.txt', 'rb')
self.assertEquals(f.read(10), contents[:10]) self.assertEquals(f.read(10), contents[:10])
f.wrapped_file.seek(0, SEEK_END) f.wrapped_file.seek(0, SEEK_END)
...@@ -118,18 +122,18 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases): ...@@ -118,18 +122,18 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
f.seek(0, SEEK_END) f.seek(0, SEEK_END)
self.assertEquals(f._rfile.tell(), len(contents)) self.assertEquals(f._rfile.tell(), len(contents))
f.close() f.close()
f = self.fs.open('test.txt', 'ab') f = self.fs.open('test.txt', 'ab')
self.assertEquals(f.tell(), len(contents)) self.assertEquals(f.tell(), len(contents))
f.close() f.close()
self.fakeOff() self.fakeOff()
# Writing over the rfile edge # Writing over the rfile edge
f = self.fs.open('test.txt', 'wb+') f = self.fs.open('test.txt', 'wb+')
self.assertEquals(f.tell(), 0) self.assertEquals(f.tell(), 0)
f.seek(len(contents) - 5) f.seek(len(contents) - 5)
# Last 5 characters not loaded from remote file # Last 5 characters not loaded from remote file
self.assertEquals(f._rfile.tell(), len(contents) - 5) self.assertEquals(f._rfile.tell(), len(contents) - 5)
# Confirm that last 5 characters are still in rfile buffer # Confirm that last 5 characters are still in rfile buffer
self.assertEquals(f._rfile.read(), contents[-5:]) self.assertEquals(f._rfile.read(), contents[-5:])
...@@ -141,9 +145,9 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases): ...@@ -141,9 +145,9 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
# We are on the end of file (and buffer not serve anything anymore) # We are on the end of file (and buffer not serve anything anymore)
self.assertEquals(f.read(), b('')) self.assertEquals(f.read(), b(''))
f.close() f.close()
self.fakeOn() self.fakeOn()
# Check if we wrote everything OK from # Check if we wrote everything OK from
# previous writing over the remote buffer edge # previous writing over the remote buffer edge
f = self.fs.open('test.txt', 'rb') f = self.fs.open('test.txt', 'rb')
...@@ -151,7 +155,7 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases): ...@@ -151,7 +155,7 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
f.close() f.close()
self.fakeOff() self.fakeOff()
def test_writeonflush(self): def test_writeonflush(self):
''' '''
Test 'write_on_flush' switch of RemoteFileBuffer. Test 'write_on_flush' switch of RemoteFileBuffer.
...@@ -168,7 +172,7 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases): ...@@ -168,7 +172,7 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
self.fakeOff() self.fakeOff()
f.close() f.close()
self.fakeOn() self.fakeOn()
f = self.fs.open('test.txt', 'wb', write_on_flush=False) f = self.fs.open('test.txt', 'wb', write_on_flush=False)
f.write(b('Sample text')) f.write(b('Sample text'))
# FakeException is not raised, because setcontents is not called # FakeException is not raised, because setcontents is not called
...@@ -176,16 +180,16 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases): ...@@ -176,16 +180,16 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
f.write(b('Second sample text')) f.write(b('Second sample text'))
self.assertRaises(self.FakeException, f.close) self.assertRaises(self.FakeException, f.close)
self.fakeOff() self.fakeOff()
def test_flush_and_continue(self): def test_flush_and_continue(self):
''' '''
This tests if partially loaded remote buffer can be flushed This tests if partially loaded remote buffer can be flushed
back to remote destination and opened file is still back to remote destination and opened file is still
in good condition. in good condition.
''' '''
contents = b("Zlutoucky kun upel dabelske ody.") contents = b("Zlutoucky kun upel dabelske ody.")
contents2 = b('Ententyky dva spaliky cert vyletel z elektriky') contents2 = b('Ententyky dva spaliky cert vyletel z elektriky')
f = self.fs.open('test.txt', 'wb') f = self.fs.open('test.txt', 'wb')
f.write(contents) f.write(contents)
f.close() f.close()
...@@ -202,12 +206,12 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases): ...@@ -202,12 +206,12 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
# Try if we have unocrrupted file locally... # Try if we have unocrrupted file locally...
self.assertEquals(f.read(), contents[:10] + b('x') + contents[11:]) self.assertEquals(f.read(), contents[:10] + b('x') + contents[11:])
f.close() f.close()
# And if we have uncorrupted file also on storage # And if we have uncorrupted file also on storage
f = self.fs.open('test.txt', 'rb') f = self.fs.open('test.txt', 'rb')
self.assertEquals(f.read(), contents[:10] + b('x') + contents[11:]) self.assertEquals(f.read(), contents[:10] + b('x') + contents[11:])
f.close() f.close()
# Now try it again, but write garbage behind edge of remote file # Now try it again, but write garbage behind edge of remote file
f = self.fs.open('test.txt', 'rb+') f = self.fs.open('test.txt', 'rb+')
self.assertEquals(f.read(10), contents[:10]) self.assertEquals(f.read(10), contents[:10])
...@@ -218,12 +222,12 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases): ...@@ -218,12 +222,12 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
# Try if we have unocrrupted file locally... # Try if we have unocrrupted file locally...
self.assertEquals(f.read(), contents[:10] + contents2) self.assertEquals(f.read(), contents[:10] + contents2)
f.close() f.close()
# And if we have uncorrupted file also on storage # And if we have uncorrupted file also on storage
f = self.fs.open('test.txt', 'rb') f = self.fs.open('test.txt', 'rb')
self.assertEquals(f.read(), contents[:10] + contents2) self.assertEquals(f.read(), contents[:10] + contents2)
f.close() f.close()
class TestCacheFS(unittest.TestCase,FSTestCases,ThreadingTestCases): class TestCacheFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
"""Test simple operation of CacheFS""" """Test simple operation of CacheFS"""
...@@ -267,7 +271,7 @@ class TestCacheFS(unittest.TestCase,FSTestCases,ThreadingTestCases): ...@@ -267,7 +271,7 @@ class TestCacheFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
self.assertFalse(self.fs.isfile("hello")) self.assertFalse(self.fs.isfile("hello"))
finally: finally:
self.fs.cache_timeout = old_timeout self.fs.cache_timeout = old_timeout
class TestConnectionManagerFS(unittest.TestCase,FSTestCases):#,ThreadingTestCases): class TestConnectionManagerFS(unittest.TestCase,FSTestCases):#,ThreadingTestCases):
...@@ -286,7 +290,7 @@ class TestConnectionManagerFS(unittest.TestCase,FSTestCases):#,ThreadingTestCase ...@@ -286,7 +290,7 @@ class TestConnectionManagerFS(unittest.TestCase,FSTestCases):#,ThreadingTestCase
class DisconnectingFS(WrapFS): class DisconnectingFS(WrapFS):
"""FS subclass that raises lots of RemoteConnectionErrors.""" """FS subclass that raises lots of RemoteConnectionErrors."""
def __init__(self,fs=None): def __init__(self,fs=None):
if fs is None: if fs is None:
fs = TempFS() fs = TempFS()
self._connected = True self._connected = True
...@@ -315,8 +319,8 @@ class DisconnectingFS(WrapFS): ...@@ -315,8 +319,8 @@ class DisconnectingFS(WrapFS):
time.sleep(random.random()*0.1) time.sleep(random.random()*0.1)
self._connected = not self._connected self._connected = not self._connected
def setcontents(self, path, contents=b(''), chunk_size=64*1024): def setcontents(self, path, data=b(''), encoding=None, errors=None, chunk_size=64*1024):
return self.wrapped_fs.setcontents(path, contents) return self.wrapped_fs.setcontents(path, data, encoding=encoding, errors=errors, chunk_size=chunk_size)
def close(self): def close(self):
if not self.closed: if not self.closed:
......
...@@ -29,6 +29,10 @@ if sys.platform == "win32": ...@@ -29,6 +29,10 @@ if sys.platform == "win32":
else: else:
watch_win32 = None watch_win32 = None
import logging
logging.getLogger('pyinotify').setLevel(logging.ERROR)
import six import six
from six import PY3, b from six import PY3, b
...@@ -53,7 +57,7 @@ class WatcherTestCases: ...@@ -53,7 +57,7 @@ class WatcherTestCases:
self.watchfs._poll_cond.wait() self.watchfs._poll_cond.wait()
self.watchfs._poll_cond.release() self.watchfs._poll_cond.release()
else: else:
time.sleep(2)#0.5) time.sleep(2)
def assertEventOccurred(self,cls,path=None,event_list=None,**attrs): def assertEventOccurred(self,cls,path=None,event_list=None,**attrs):
if not self.checkEventOccurred(cls,path,event_list,**attrs): if not self.checkEventOccurred(cls,path,event_list,**attrs):
...@@ -222,4 +226,3 @@ class TestWatchers_MemoryFS_polling(TestWatchers_MemoryFS): ...@@ -222,4 +226,3 @@ class TestWatchers_MemoryFS_polling(TestWatchers_MemoryFS):
def setUp(self): def setUp(self):
self.fs = memoryfs.MemoryFS() self.fs = memoryfs.MemoryFS()
self.watchfs = ensure_watchable(self.fs,poll_interval=0.1) self.watchfs = ensure_watchable(self.fs,poll_interval=0.1)
...@@ -17,6 +17,7 @@ from fs import zipfs ...@@ -17,6 +17,7 @@ from fs import zipfs
from six import PY3, b from six import PY3, b
class TestReadZipFS(unittest.TestCase): class TestReadZipFS(unittest.TestCase):
def setUp(self): def setUp(self):
...@@ -46,20 +47,22 @@ class TestReadZipFS(unittest.TestCase): ...@@ -46,20 +47,22 @@ class TestReadZipFS(unittest.TestCase):
def test_reads(self): def test_reads(self):
def read_contents(path): def read_contents(path):
f = self.fs.open(path) f = self.fs.open(path, 'rb')
contents = f.read() contents = f.read()
return contents return contents
def check_contents(path, expected): def check_contents(path, expected):
self.assert_(read_contents(path)==expected) self.assert_(read_contents(path) == expected)
check_contents("a.txt", b("Hello, World!")) check_contents("a.txt", b("Hello, World!"))
check_contents("1.txt", b("1")) check_contents("1.txt", b("1"))
check_contents("foo/bar/baz.txt", b("baz")) check_contents("foo/bar/baz.txt", b("baz"))
def test_getcontents(self): def test_getcontents(self):
def read_contents(path): def read_contents(path):
return self.fs.getcontents(path) return self.fs.getcontents(path, 'rb')
def check_contents(path, expected): def check_contents(path, expected):
self.assert_(read_contents(path)==expected) self.assert_(read_contents(path) == expected)
check_contents("a.txt", b("Hello, World!")) check_contents("a.txt", b("Hello, World!"))
check_contents("1.txt", b("1")) check_contents("1.txt", b("1"))
check_contents("foo/bar/baz.txt", b("baz")) check_contents("foo/bar/baz.txt", b("baz"))
...@@ -82,7 +85,7 @@ class TestReadZipFS(unittest.TestCase): ...@@ -82,7 +85,7 @@ class TestReadZipFS(unittest.TestCase):
dir_list = self.fs.listdir(path) dir_list = self.fs.listdir(path)
self.assert_(sorted(dir_list) == sorted(expected)) self.assert_(sorted(dir_list) == sorted(expected))
for item in dir_list: for item in dir_list:
self.assert_(isinstance(item,unicode)) self.assert_(isinstance(item, unicode))
check_listing('/', ['a.txt', '1.txt', 'foo', 'b.txt']) check_listing('/', ['a.txt', '1.txt', 'foo', 'b.txt'])
check_listing('foo', ['second.txt', 'bar']) check_listing('foo', ['second.txt', 'bar'])
check_listing('foo/bar', ['baz.txt']) check_listing('foo/bar', ['baz.txt'])
......
...@@ -72,6 +72,7 @@ def copyfile(src_fs, src_path, dst_fs, dst_path, overwrite=True, chunk_size=64*1 ...@@ -72,6 +72,7 @@ def copyfile(src_fs, src_path, dst_fs, dst_path, overwrite=True, chunk_size=64*1
if src_lock is not None: if src_lock is not None:
src_lock.release() src_lock.release()
def copyfile_non_atomic(src_fs, src_path, dst_fs, dst_path, overwrite=True, chunk_size=64*1024): def copyfile_non_atomic(src_fs, src_path, dst_fs, dst_path, overwrite=True, chunk_size=64*1024):
"""A non atomic version of copyfile (will not block other threads using src_fs or dst_fst) """A non atomic version of copyfile (will not block other threads using src_fs or dst_fst)
......
...@@ -5,14 +5,14 @@ fs.watch ...@@ -5,14 +5,14 @@ fs.watch
Change notification support for FS. Change notification support for FS.
This module defines a standard interface for FS subclasses that support change This module defines a standard interface for FS subclasses that support change
notification callbacks. It also offers some WrapFS subclasses that can notification callbacks. It also offers some WrapFS subclasses that can
simulate such an ability on top of an ordinary FS object. simulate such an ability on top of an ordinary FS object.
An FS object that wants to be "watchable" must provide the following methods: An FS object that wants to be "watchable" must provide the following methods:
* ``add_watcher(callback,path="/",events=None,recursive=True)`` * ``add_watcher(callback,path="/",events=None,recursive=True)``
Request that the given callback be executed in response to changes Request that the given callback be executed in response to changes
to the given path. A specific set of change events can be specified. to the given path. A specific set of change events can be specified.
This method returns a Watcher object. This method returns a Watcher object.
...@@ -31,7 +31,7 @@ an iterator over the change events. ...@@ -31,7 +31,7 @@ an iterator over the change events.
import sys import sys
import weakref import weakref
import threading import threading
import Queue import Queue
import traceback import traceback
...@@ -291,29 +291,36 @@ class WatchableFS(WatchableFSMixin,WrapFS): ...@@ -291,29 +291,36 @@ class WatchableFS(WatchableFSMixin,WrapFS):
that might be made through other interfaces to the same filesystem. that might be made through other interfaces to the same filesystem.
""" """
def __init__(self,*args,**kwds): def __init__(self, *args, **kwds):
super(WatchableFS,self).__init__(*args,**kwds) super(WatchableFS, self).__init__(*args, **kwds)
def close(self): def close(self):
super(WatchableFS,self).close() super(WatchableFS, self).close()
self.notify_watchers(CLOSED) self.notify_watchers(CLOSED)
def open(self,path,mode="r",**kwargs): def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
existed = self.wrapped_fs.isfile(path) existed = self.wrapped_fs.isfile(path)
f = super(WatchableFS,self).open(path,mode,**kwargs) f = super(WatchableFS, self).open(path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,
**kwargs)
if not existed: if not existed:
self.notify_watchers(CREATED,path) self.notify_watchers(CREATED, path)
self.notify_watchers(ACCESSED,path) self.notify_watchers(ACCESSED, path)
return WatchedFile(f,self,path,mode) return WatchedFile(f, self, path, mode)
def setcontents(self, path, data=b(''), chunk_size=64*1024): def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=64*1024):
existed = self.wrapped_fs.isfile(path) existed = self.wrapped_fs.isfile(path)
ret = super(WatchableFS, self).setcontents(path, data, chunk_size=chunk_size) ret = super(WatchableFS, self).setcontents(path, data, chunk_size=chunk_size)
if not existed: if not existed:
self.notify_watchers(CREATED,path) self.notify_watchers(CREATED, path)
self.notify_watchers(ACCESSED,path) self.notify_watchers(ACCESSED, path)
if data: if data:
self.notify_watchers(MODIFIED,path,True) self.notify_watchers(MODIFIED, path, True)
return ret return ret
def createfile(self, path): def createfile(self, path):
...@@ -550,18 +557,18 @@ class PollingWatchableFS(WatchableFS): ...@@ -550,18 +557,18 @@ class PollingWatchableFS(WatchableFS):
for (k,v) in new_info.iteritems(): for (k,v) in new_info.iteritems():
if k not in old_info: if k not in old_info:
was_modified = True was_modified = True
break break
elif old_info[k] != v: elif old_info[k] != v:
if k in ("accessed_time","st_atime",): if k in ("accessed_time","st_atime",):
was_accessed = True was_accessed = True
elif k: elif k:
was_modified = True was_modified = True
break break
else: else:
for k in old_info: for k in old_info:
if k not in new_info: if k not in new_info:
was_modified = True was_modified = True
break break
if was_modified: if was_modified:
self.notify_watchers(MODIFIED,fpath,True) self.notify_watchers(MODIFIED,fpath,True)
elif was_accessed: elif was_accessed:
......
...@@ -150,21 +150,21 @@ class WrapFS(FS): ...@@ -150,21 +150,21 @@ class WrapFS(FS):
return self.wrapped_fs.hassyspath(self._encode(path)) return self.wrapped_fs.hassyspath(self._encode(path))
@rewrite_errors @rewrite_errors
def open(self, path, mode="r", **kwargs): def open(self, path, mode='r', **kwargs):
(mode, wmode) = self._adjust_mode(mode) (mode, wmode) = self._adjust_mode(mode)
f = self.wrapped_fs.open(self._encode(path), wmode, **kwargs) f = self.wrapped_fs.open(self._encode(path), wmode, **kwargs)
return self._file_wrap(f, mode) return self._file_wrap(f, mode)
@rewrite_errors @rewrite_errors
def setcontents(self, path, data, chunk_size=64*1024): def setcontents(self, path, data, encoding=None, errors=None, chunk_size=64*1024):
# We can't pass setcontents() through to the wrapped FS if the # We can't pass setcontents() through to the wrapped FS if the
# wrapper has defined a _file_wrap method, as it would bypass # wrapper has defined a _file_wrap method, as it would bypass
# the file contents wrapping. # the file contents wrapping.
#if self._file_wrap.im_func is WrapFS._file_wrap.im_func: #if self._file_wrap.im_func is WrapFS._file_wrap.im_func:
if getattr(self.__class__, '_file_wrap', None) is getattr(WrapFS, '_file_wrap', None): if getattr(self.__class__, '_file_wrap', None) is getattr(WrapFS, '_file_wrap', None):
return self.wrapped_fs.setcontents(self._encode(path), data, chunk_size=chunk_size) return self.wrapped_fs.setcontents(self._encode(path), data, encoding=encoding, errors=errors, chunk_size=chunk_size)
else: else:
return super(WrapFS,self).setcontents(path, data, chunk_size=chunk_size) return super(WrapFS, self).setcontents(path, data, encoding=encoding, errors=errors, chunk_size=chunk_size)
@rewrite_errors @rewrite_errors
def createfile(self, path): def createfile(self, path):
......
...@@ -58,14 +58,20 @@ class LimitSizeFS(WrapFS): ...@@ -58,14 +58,20 @@ class LimitSizeFS(WrapFS):
raise NoSysPathError(path) raise NoSysPathError(path)
return None return None
def open(self, path, mode="r"): def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
path = relpath(normpath(path)) path = relpath(normpath(path))
with self._size_lock: with self._size_lock:
try: try:
size = self.getsize(path) size = self.getsize(path)
except ResourceNotFoundError: except ResourceNotFoundError:
size = 0 size = 0
f = super(LimitSizeFS,self).open(path,mode) f = super(LimitSizeFS,self).open(path,
mode=mode,
buffering=buffering,
errors=errors,
newline=newline,
line_buffering=line_buffering,
**kwargs)
if "w" not in mode: if "w" not in mode:
self._set_file_size(path,None,1) self._set_file_size(path,None,1)
else: else:
...@@ -92,12 +98,12 @@ class LimitSizeFS(WrapFS): ...@@ -92,12 +98,12 @@ class LimitSizeFS(WrapFS):
else: else:
self._file_sizes[path] = (size,count) self._file_sizes[path] = (size,count)
def setcontents(self, path, data, chunk_size=64*1024): def setcontents(self, path, data, chunk_size=64*1024):
f = None f = None
try: try:
f = self.open(path, 'wb') f = self.open(path, 'wb')
if hasattr(data, 'read'): if hasattr(data, 'read'):
chunk = data.read(chunk_size) chunk = data.read(chunk_size)
while chunk: while chunk:
f.write(chunk) f.write(chunk)
chunk = data.read(chunk_size) chunk = data.read(chunk_size)
...@@ -106,7 +112,7 @@ class LimitSizeFS(WrapFS): ...@@ -106,7 +112,7 @@ class LimitSizeFS(WrapFS):
finally: finally:
if f is not None: if f is not None:
f.close() f.close()
def _file_closed(self, path): def _file_closed(self, path):
self._set_file_size(path,None,-1) self._set_file_size(path,None,-1)
...@@ -135,7 +141,7 @@ class LimitSizeFS(WrapFS): ...@@ -135,7 +141,7 @@ class LimitSizeFS(WrapFS):
return cur_size return cur_size
# We force use of several base FS methods, # We force use of several base FS methods,
# since they will fall back to writing out each file # since they will fall back to writing out each file
# and thus will route through our size checking logic. # and thus will route through our size checking logic.
def copy(self, src, dst, **kwds): def copy(self, src, dst, **kwds):
FS.copy(self,src,dst,**kwds) FS.copy(self,src,dst,**kwds)
...@@ -233,7 +239,7 @@ class LimitSizeFile(FileWrapper): ...@@ -233,7 +239,7 @@ class LimitSizeFile(FileWrapper):
self.fs = fs self.fs = fs
self.path = path self.path = path
self._lock = fs._lock self._lock = fs._lock
@synchronize @synchronize
def _write(self, data, flushing=False): def _write(self, data, flushing=False):
pos = self.wrapped_file.tell() pos = self.wrapped_file.tell()
......
...@@ -10,44 +10,52 @@ from fs.base import NoDefaultMeta ...@@ -10,44 +10,52 @@ from fs.base import NoDefaultMeta
from fs.wrapfs import WrapFS from fs.wrapfs import WrapFS
from fs.errors import UnsupportedError, NoSysPathError from fs.errors import UnsupportedError, NoSysPathError
class ReadOnlyFS(WrapFS): class ReadOnlyFS(WrapFS):
""" Makes a FS object read only. Any operation that could potentially modify """ Makes a FS object read only. Any operation that could potentially modify
the underlying file system will throw an UnsupportedError the underlying file system will throw an UnsupportedError
Note that this isn't a secure sandbox, untrusted code could work around the Note that this isn't a secure sandbox, untrusted code could work around the
read-only restrictions by getting the base class. Its main purpose is to read-only restrictions by getting the base class. Its main purpose is to
provide a degree of safety if you want to protect an FS object from provide a degree of safety if you want to protect an FS object from
accidental modification. accidental modification.
""" """
def getmeta(self, meta_name, default=NoDefaultMeta): def getmeta(self, meta_name, default=NoDefaultMeta):
if meta_name == 'read_only': if meta_name == 'read_only':
return True return True
return self.wrapped_fs.getmeta(meta_name, default) return self.wrapped_fs.getmeta(meta_name, default)
def hasmeta(self, meta_name): def hasmeta(self, meta_name):
if meta_name == 'read_only': if meta_name == 'read_only':
return True return True
return self.wrapped_fs.hasmeta(meta_name) return self.wrapped_fs.hasmeta(meta_name)
def getsyspath(self, path, allow_none=False): def getsyspath(self, path, allow_none=False):
""" Doesn't technically modify the filesystem but could be used to work """ Doesn't technically modify the filesystem but could be used to work
around read-only restrictions. """ around read-only restrictions. """
if allow_none: if allow_none:
return None return None
raise NoSysPathError(path) raise NoSysPathError(path)
def open(self, path, mode='r', **kwargs): def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
""" Only permit read access """ """ Only permit read access """
if 'w' in mode or 'a' in mode or '+' in mode: if 'w' in mode or 'a' in mode or '+' in mode:
raise UnsupportedError('write') raise UnsupportedError('write')
return super(ReadOnlyFS, self).open(path, mode, **kwargs) return super(ReadOnlyFS, self).open(path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,
**kwargs)
def _no_can_do(self, *args, **kwargs): def _no_can_do(self, *args, **kwargs):
""" Replacement method for methods that can modify the file system """ """ Replacement method for methods that can modify the file system """
raise UnsupportedError('write') raise UnsupportedError('write')
move = _no_can_do move = _no_can_do
movedir = _no_can_do movedir = _no_can_do
copy = _no_can_do copy = _no_can_do
......
...@@ -21,7 +21,7 @@ class SubFS(WrapFS): ...@@ -21,7 +21,7 @@ class SubFS(WrapFS):
def __init__(self, wrapped_fs, sub_dir): def __init__(self, wrapped_fs, sub_dir):
self.sub_dir = abspath(normpath(sub_dir)) self.sub_dir = abspath(normpath(sub_dir))
super(SubFS,self).__init__(wrapped_fs) super(SubFS, self).__init__(wrapped_fs)
def _encode(self, path): def _encode(self, path):
return pathjoin(self.sub_dir, relpath(normpath(path))) return pathjoin(self.sub_dir, relpath(normpath(path)))
...@@ -34,17 +34,17 @@ class SubFS(WrapFS): ...@@ -34,17 +34,17 @@ class SubFS(WrapFS):
return '<SubFS: %s/%s>' % (self.wrapped_fs, self.sub_dir.lstrip('/')) return '<SubFS: %s/%s>' % (self.wrapped_fs, self.sub_dir.lstrip('/'))
def __unicode__(self): def __unicode__(self):
return u'<SubFS: %s/%s>' % (self.wrapped_fs, self.sub_dir.lstrip('/')) return u'<SubFS: %s/%s>' % (self.wrapped_fs, self.sub_dir.lstrip('/'))
def __repr__(self): def __repr__(self):
return "SubFS(%r, %r)" % (self.wrapped_fs, self.sub_dir) return "SubFS(%r, %r)" % (self.wrapped_fs, self.sub_dir)
def desc(self, path): def desc(self, path):
if path in ('', '/'): if path in ('', '/'):
return self.wrapped_fs.desc(self.sub_dir) return self.wrapped_fs.desc(self.sub_dir)
return '%s!%s' % (self.wrapped_fs.desc(self.sub_dir), path) return '%s!%s' % (self.wrapped_fs.desc(self.sub_dir), path)
def setcontents(self, path, data, chunk_size=64*1024): def setcontents(self, path, data, encoding=None, errors=None, chunk_size=64*1024):
path = self._encode(path) path = self._encode(path)
return self.wrapped_fs.setcontents(path, data, chunk_size=chunk_size) return self.wrapped_fs.setcontents(path, data, chunk_size=chunk_size)
...@@ -62,14 +62,14 @@ class SubFS(WrapFS): ...@@ -62,14 +62,14 @@ class SubFS(WrapFS):
path = normpath(path) path = normpath(path)
if path in ('', '/'): if path in ('', '/'):
raise RemoveRootError(path) raise RemoveRootError(path)
super(SubFS,self).removedir(path,force=force) super(SubFS, self).removedir(path, force=force)
if recursive: if recursive:
try: try:
if dirname(path) not in ('', '/'): if dirname(path) not in ('', '/'):
self.removedir(dirname(path),recursive=True) self.removedir(dirname(path), recursive=True)
except DirectoryNotEmptyError: except DirectoryNotEmptyError:
pass pass
# if path in ("","/"): # if path in ("","/"):
# if not force: # if not force:
# for path2 in self.listdir(path): # for path2 in self.listdir(path):
......
...@@ -13,6 +13,7 @@ from fs.base import * ...@@ -13,6 +13,7 @@ from fs.base import *
from fs.path import * from fs.path import *
from fs.errors import * from fs.errors import *
from fs.filelike import StringIO from fs.filelike import StringIO
from fs import iotools
from zipfile import ZipFile, ZIP_DEFLATED, ZIP_STORED, BadZipfile, LargeZipFile from zipfile import ZipFile, ZIP_DEFLATED, ZIP_STORED, BadZipfile, LargeZipFile
from memoryfs import MemoryFS from memoryfs import MemoryFS
...@@ -21,6 +22,7 @@ import tempfs ...@@ -21,6 +22,7 @@ import tempfs
from six import PY3 from six import PY3
class ZipOpenError(CreateFailedError): class ZipOpenError(CreateFailedError):
"""Thrown when the zip file could not be opened""" """Thrown when the zip file could not be opened"""
pass pass
...@@ -76,13 +78,13 @@ class _ExceptionProxy(object): ...@@ -76,13 +78,13 @@ class _ExceptionProxy(object):
class ZipFS(FS): class ZipFS(FS):
"""A FileSystem that represents a zip file.""" """A FileSystem that represents a zip file."""
_meta = { 'thread_safe' : True, _meta = {'thread_safe': True,
'virtual' : False, 'virtual': False,
'read_only' : False, 'read_only': False,
'unicode_paths' : True, 'unicode_paths': True,
'case_insensitive_paths' : False, 'case_insensitive_paths': False,
'network' : False, 'network': False,
'atomic.setcontents' : False 'atomic.setcontents': False
} }
def __init__(self, zip_file, mode="r", compression="deflated", allow_zip_64=False, encoding="CP437", thread_synchronize=True): def __init__(self, zip_file, mode="r", compression="deflated", allow_zip_64=False, encoding="CP437", thread_synchronize=True):
...@@ -129,7 +131,7 @@ class ZipFS(FS): ...@@ -129,7 +131,7 @@ class ZipFS(FS):
raise ZipOpenError("Not a zip file or corrupt (%s)" % str(zip_file), raise ZipOpenError("Not a zip file or corrupt (%s)" % str(zip_file),
details=ioe) details=ioe)
raise ZipNotFoundError("Zip file not found (%s)" % str(zip_file), raise ZipNotFoundError("Zip file not found (%s)" % str(zip_file),
details=ioe) details=ioe)
self.zip_path = str(zip_file) self.zip_path = str(zip_file)
self.temp_fs = None self.temp_fs = None
...@@ -189,7 +191,8 @@ class ZipFS(FS): ...@@ -189,7 +191,8 @@ class ZipFS(FS):
self.zf = _ExceptionProxy() self.zf = _ExceptionProxy()
@synchronize @synchronize
def open(self, path, mode="r", **kwargs): @iotools.filelike_to_stream
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
path = normpath(relpath(path)) path = normpath(relpath(path))
if 'r' in mode: if 'r' in mode:
...@@ -222,7 +225,7 @@ class ZipFS(FS): ...@@ -222,7 +225,7 @@ class ZipFS(FS):
raise ValueError("Mode must contain be 'r' or 'w'") raise ValueError("Mode must contain be 'r' or 'w'")
@synchronize @synchronize
def getcontents(self, path, mode="rb"): def getcontents(self, path, mode="rb", encoding=None, errors=None, newline=None):
if not self.exists(path): if not self.exists(path):
raise ResourceNotFoundError(path) raise ResourceNotFoundError(path)
path = normpath(relpath(path)) path = normpath(relpath(path))
...@@ -232,7 +235,9 @@ class ZipFS(FS): ...@@ -232,7 +235,9 @@ class ZipFS(FS):
raise ResourceNotFoundError(path) raise ResourceNotFoundError(path)
except RuntimeError: except RuntimeError:
raise OperationFailedError("read file", path=path, msg="3 Zip file must be opened with 'r' or 'a' to read") raise OperationFailedError("read file", path=path, msg="3 Zip file must be opened with 'r' or 'a' to read")
return contents if 'b' in mode:
return contents
return iotools.decode_binary(contents, encoding=encoding, errors=errors, newline=newline)
@synchronize @synchronize
def _on_write_close(self, filename): def _on_write_close(self, filename):
......
...@@ -28,7 +28,6 @@ classifiers = [ ...@@ -28,7 +28,6 @@ classifiers = [
'License :: OSI Approved :: BSD License', 'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent', 'Operating System :: OS Independent',
'Programming Language :: Python', 'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3',
...@@ -41,7 +40,7 @@ Even if you only need to work with file and directories on the local hard-drive, ...@@ -41,7 +40,7 @@ Even if you only need to work with file and directories on the local hard-drive,
""" """
extra = {} extra = {}
if PY3: if PY3:
extra["use_2to3"] = True extra["use_2to3"] = True
setup(install_requires=['distribute', 'six'], setup(install_requires=['distribute', 'six'],
...@@ -49,12 +48,12 @@ setup(install_requires=['distribute', 'six'], ...@@ -49,12 +48,12 @@ setup(install_requires=['distribute', 'six'],
version=VERSION, version=VERSION,
description="Filesystem abstraction", description="Filesystem abstraction",
long_description=long_desc, long_description=long_desc,
license = "BSD", license="BSD",
author="Will McGugan", author="Will McGugan",
author_email="will@willmcgugan.com", author_email="will@willmcgugan.com",
url="http://code.google.com/p/pyfilesystem/", url="http://code.google.com/p/pyfilesystem/",
download_url="http://code.google.com/p/pyfilesystem/downloads/list", download_url="http://code.google.com/p/pyfilesystem/downloads/list",
platforms = ['any'], platforms=['any'],
packages=['fs', packages=['fs',
'fs.expose', 'fs.expose',
'fs.expose.dokan', 'fs.expose.dokan',
...@@ -66,10 +65,10 @@ setup(install_requires=['distribute', 'six'], ...@@ -66,10 +65,10 @@ setup(install_requires=['distribute', 'six'],
'fs.contrib', 'fs.contrib',
'fs.contrib.bigfs', 'fs.contrib.bigfs',
'fs.contrib.davfs', 'fs.contrib.davfs',
'fs.contrib.tahoelafs', 'fs.contrib.tahoelafs',
'fs.commands'], 'fs.commands'],
package_data={'fs': ['tests/data/*.txt']},
scripts=['fs/commands/%s' % command for command in COMMANDS], scripts=['fs/commands/%s' % command for command in COMMANDS],
classifiers=classifiers, classifiers=classifiers,
**extra **extra
) )
[tox] [tox]
envlist = py25,py26,py27,py31,py32,pypy envlist = py26,py27,py31,py32,pypy
sitepackages = False sitepackages = False
[testenv] [testenv]
...@@ -10,30 +10,17 @@ deps = distribute ...@@ -10,30 +10,17 @@ deps = distribute
boto boto
nose nose
mako mako
python-libarchive pyftpdlib
pyftpdlib
changedir=.tox changedir=.tox
commands = nosetests fs.tests -v \ commands = nosetests fs.tests -v \
[] []
[testenv:py25]
deps = distribute
six
dexml
paramiko
boto
nose
mako
python-libarchive
pyftpdlib
simplejson
[testenv:py32] [testenv:py32]
commands = nosetests fs.tests -v \ commands = nosetests fs.tests -v \
[] []
deps = distribute deps = distribute
six six
dexml dexml
nose nose
winpdb winpdb
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment