Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
P
pyfs
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
OpenEdx
pyfs
Commits
3ea4efe1
Commit
3ea4efe1
authored
Mar 31, 2013
by
willmcgugan@gmail.com
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Change of api (fs.open, fs.setcontent, fs.getcontents) to support io module in Py2.6+ and Py3
parent
c6391b6f
Hide whitespace changes
Inline
Side-by-side
Showing
45 changed files
with
1705 additions
and
1333 deletions
+1705
-1333
MANIFEST.in
+0
-1
fs/__init__.py
+5
-5
fs/appdirfs.py
+13
-12
fs/base.py
+136
-68
fs/compatibility.py
+4
-3
fs/contrib/archivefs.py
+3
-3
fs/contrib/davfs/__init__.py
+29
-24
fs/expose/dokan/__init__.py
+63
-63
fs/expose/ftp.py
+4
-2
fs/expose/fuse/__init__.py
+84
-76
fs/expose/sftp.py
+3
-3
fs/expose/wsgi/wsgi.py
+46
-46
fs/expose/xmlrpc.py
+12
-14
fs/filelike.py
+38
-36
fs/ftpfs.py
+109
-105
fs/httpfs.py
+26
-22
fs/iotools.py
+92
-9
fs/memoryfs.py
+107
-88
fs/mountfs.py
+12
-7
fs/multifs.py
+42
-42
fs/opener.py
+214
-215
fs/osfs/__init__.py
+14
-14
fs/remote.py
+53
-52
fs/rpcfs.py
+59
-47
fs/s3fs.py
+17
-12
fs/sftpfs.py
+16
-13
fs/tempfs.py
+15
-10
fs/tests/__init__.py
+263
-202
fs/tests/data/__init__.py
+0
-0
fs/tests/test_expose.py
+11
-4
fs/tests/test_importhook.py
+1
-1
fs/tests/test_iotools.py
+56
-0
fs/tests/test_mountfs.py
+7
-6
fs/tests/test_remote.py
+42
-38
fs/tests/test_watch.py
+5
-2
fs/tests/test_zipfs.py
+8
-5
fs/utils.py
+1
-0
fs/watch.py
+25
-18
fs/wrapfs/__init__.py
+4
-4
fs/wrapfs/limitsizefs.py
+14
-8
fs/wrapfs/readonlyfs.py
+18
-10
fs/wrapfs/subfs.py
+9
-9
fs/zipfs.py
+16
-11
setup.py
+5
-6
tox.ini
+4
-17
No files found.
MANIFEST.in
View file @
3ea4efe1
include AUTHORS
fs/__init__.py
View file @
3ea4efe1
...
...
@@ -19,19 +19,19 @@ __version__ = "0.4.1"
__author__
=
"Will McGugan (will@willmcgugan.com)"
# provide these by default so people can use 'fs.path.basename' etc.
import
errors
import
path
from
fs
import
errors
from
fs
import
path
_thread_synchronize_default
=
True
def
set_thread_synchronize_default
(
sync
):
"""Sets the default thread synchronisation flag.
FS objects are made thread-safe through the use of a per-FS threading Lock
object. Since this can introduce an small overhead it can be disabled with
this function if the code is single-threaded.
:param sync: Set whether to use thread synchronisation for new FS objects
"""
global
_thread_synchronization_default
_thread_synchronization_default
=
sync
...
...
fs/appdirfs.py
View file @
3ea4efe1
...
...
@@ -6,8 +6,8 @@ A collection of filesystems that map to application specific locations.
These classes abstract away the different requirements for user data across platforms,
which vary in their conventions. They are all subclasses of :class:`fs.osfs.OSFS`,
all that differs from `OSFS` is the constructor which detects the appropriate
location given the name of the application, author name and other parameters.
all that differs from `OSFS` is the constructor which detects the appropriate
location given the name of the application, author name and other parameters.
Uses `appdirs` (https://github.com/ActiveState/appdirs), written by Trent Mick and Sridhar Ratnakumar <trentm at gmail com; github at srid name>
...
...
@@ -30,10 +30,10 @@ class UserDataFS(OSFS):
:param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist
"""
app_dirs
=
AppDirs
(
appname
,
appauthor
,
version
,
roaming
)
super
(
self
.
__class__
,
self
)
.
__init__
(
app_dirs
.
user_data_dir
,
create
=
create
)
super
(
UserDataFS
,
self
)
.
__init__
(
app_dirs
.
user_data_dir
,
create
=
create
)
class
SiteDataFS
(
OSFS
):
...
...
@@ -45,10 +45,10 @@ class SiteDataFS(OSFS):
:param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist
"""
app_dirs
=
AppDirs
(
appname
,
appauthor
,
version
,
roaming
)
super
(
self
.
__class__
,
self
)
.
__init__
(
app_dirs
.
site_data_dir
,
create
=
create
)
super
(
SiteDataFS
,
self
)
.
__init__
(
app_dirs
.
site_data_dir
,
create
=
create
)
class
UserCacheFS
(
OSFS
):
...
...
@@ -60,10 +60,10 @@ class UserCacheFS(OSFS):
:param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist
"""
app_dirs
=
AppDirs
(
appname
,
appauthor
,
version
,
roaming
)
super
(
self
.
__class__
,
self
)
.
__init__
(
app_dirs
.
user_cache_dir
,
create
=
create
)
super
(
UserCacheFS
,
self
)
.
__init__
(
app_dirs
.
user_cache_dir
,
create
=
create
)
class
UserLogFS
(
OSFS
):
...
...
@@ -75,13 +75,14 @@ class UserLogFS(OSFS):
:param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist
"""
app_dirs
=
AppDirs
(
appname
,
appauthor
,
version
,
roaming
)
super
(
self
.
__class__
,
self
)
.
__init__
(
app_dirs
.
user_log_dir
,
create
=
create
)
super
(
UserLogFS
,
self
)
.
__init__
(
app_dirs
.
user_log_dir
,
create
=
create
)
if
__name__
==
"__main__"
:
udfs
=
UserDataFS
(
'
sexytime
'
,
appauthor
=
'pyfs'
)
udfs
=
UserDataFS
(
'
exampleapp
'
,
appauthor
=
'pyfs'
)
print
udfs
udfs2
=
UserDataFS
(
'
sexytime
2'
,
appauthor
=
'pyfs'
,
create
=
False
)
udfs2
=
UserDataFS
(
'
exampleapp
2'
,
appauthor
=
'pyfs'
,
create
=
False
)
print
udfs2
fs/base.py
View file @
3ea4efe1
...
...
@@ -37,9 +37,10 @@ from fs.path import *
from
fs.errors
import
*
from
fs.local_functools
import
wraps
import
compatibility
import
six
from
six
import
b
class
DummyLock
(
object
):
"""A dummy lock object that doesn't do anything.
...
...
@@ -373,7 +374,7 @@ class FS(object):
"""
return
self
.
getpathurl
(
path
,
allow_none
=
True
)
is
not
None
def
open
(
self
,
path
,
mode
=
"r"
,
**
kwargs
):
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
"""Open a the given path as a file-like object.
:param path: a path to file that should be opened
...
...
@@ -394,7 +395,7 @@ class FS(object):
"""
raise
UnsupportedError
(
"open file"
)
def
safeopen
(
self
,
path
,
mode
=
"r"
,
**
kwargs
):
def
safeopen
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
"""Like :py:meth:`~fs.base.FS.open`, but returns a
:py:class:`~fs.base.NullFile` if the file could not be opened.
...
...
@@ -414,7 +415,7 @@ class FS(object):
"""
try
:
f
=
self
.
open
(
path
,
mode
,
**
kwargs
)
f
=
self
.
open
(
path
,
mode
=
mode
,
buffering
=
buffering
,
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
,
line_buffering
=
line_buffering
,
**
kwargs
)
except
ResourceNotFoundError
:
return
NullFile
()
return
f
...
...
@@ -457,12 +458,13 @@ class FS(object):
for
f
in
self
.
listdir
():
yield
f
def
listdir
(
self
,
path
=
"./"
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
def
listdir
(
self
,
path
=
"./"
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
"""Lists the the files and directories under a given path.
The directory contents are returned as a list of unicode paths.
...
...
@@ -489,12 +491,13 @@ class FS(object):
"""
raise
UnsupportedError
(
"list directory"
)
def
listdirinfo
(
self
,
path
=
"./"
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
def
listdirinfo
(
self
,
path
=
"./"
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
"""Retrieves a list of paths and path info under a given path.
This method behaves like listdir() but instead of just returning
...
...
@@ -517,6 +520,7 @@ class FS(object):
"""
path
=
normpath
(
path
)
def
getinfo
(
p
):
try
:
if
full
or
absolute
:
...
...
@@ -527,20 +531,21 @@ class FS(object):
return
{}
return
[(
p
,
getinfo
(
p
))
for
p
in
self
.
listdir
(
path
,
wildcard
=
wildcard
,
full
=
full
,
absolute
=
absolute
,
dirs_only
=
dirs_only
,
files_only
=
files_only
)]
def
_listdir_helper
(
self
,
path
,
entries
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
for
p
in
self
.
listdir
(
path
,
wildcard
=
wildcard
,
full
=
full
,
absolute
=
absolute
,
dirs_only
=
dirs_only
,
files_only
=
files_only
)]
def
_listdir_helper
(
self
,
path
,
entries
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
"""A helper method called by listdir method that applies filtering.
Given the path to a directory and a list of the names of entries within
...
...
@@ -556,7 +561,7 @@ class FS(object):
if
wildcard
is
not
None
:
if
not
callable
(
wildcard
):
wildcard_re
=
re
.
compile
(
fnmatch
.
translate
(
wildcard
))
wildcard
=
lambda
fn
:
bool
(
wildcard_re
.
match
(
fn
))
wildcard
=
lambda
fn
:
bool
(
wildcard_re
.
match
(
fn
))
entries
=
[
p
for
p
in
entries
if
wildcard
(
p
)]
if
dirs_only
:
...
...
@@ -574,12 +579,13 @@ class FS(object):
return
entries
def
ilistdir
(
self
,
path
=
"./"
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
def
ilistdir
(
self
,
path
=
"./"
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
"""Generator yielding the files and directories under a given path.
This method behaves identically to :py:meth:`fs.base.FS.listdir` but returns an generator
...
...
@@ -594,12 +600,13 @@ class FS(object):
dirs_only
=
dirs_only
,
files_only
=
files_only
))
def
ilistdirinfo
(
self
,
path
=
"./"
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
def
ilistdirinfo
(
self
,
path
=
"./"
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
"""Generator yielding paths and path info under a given path.
This method behaves identically to :py:meth:`~fs.base.listdirinfo` but returns an generator
...
...
@@ -748,40 +755,94 @@ class FS(object):
return
"No description available"
return
sys_path
def
getcontents
(
self
,
path
,
mode
=
"rb"
):
def
getcontents
(
self
,
path
,
mode
=
'rb'
,
encoding
=
None
,
errors
=
None
,
newline
=
None
):
"""Returns the contents of a file as a string.
:param path: A path of file to read
:rtype: str
:returns: file contents
"""
if
'r'
not
in
mode
:
raise
ValueError
(
"mode must contain 'r' to be readable"
)
f
=
None
try
:
f
=
self
.
open
(
path
,
mode
)
f
=
self
.
open
(
path
,
mode
=
mode
,
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
)
contents
=
f
.
read
()
return
contents
finally
:
if
f
is
not
None
:
f
.
close
()
def
setcontents
(
self
,
path
,
data
,
chunk_size
=
1024
*
64
):
def
_setcontents
(
self
,
path
,
data
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
1024
*
64
,
progress_callback
=
None
,
finished_callback
=
None
):
"""Does the work of setcontents. Factored out, so that `setcontents_async` can use it"""
if
progress_callback
is
None
:
progress_callback
=
lambda
bytes_written
:
None
if
finished_callback
is
None
:
finished_callback
=
lambda
:
None
if
not
data
:
progress_callback
(
0
)
self
.
createfile
(
path
)
finished_callback
()
return
0
bytes_written
=
0
progress_callback
(
0
)
if
hasattr
(
data
,
'read'
):
read
=
data
.
read
chunk
=
read
(
chunk_size
)
if
isinstance
(
chunk
,
six
.
text_type
):
f
=
self
.
open
(
path
,
'wt'
,
encoding
=
encoding
,
errors
=
errors
)
else
:
f
=
self
.
open
(
path
,
'wb'
)
write
=
f
.
write
try
:
while
chunk
:
write
(
chunk
)
bytes_written
+=
len
(
chunk
)
progress_callback
(
bytes_written
)
chunk
=
read
(
chunk_size
)
finally
:
f
.
close
()
else
:
if
isinstance
(
data
,
six
.
text_type
):
with
self
.
open
(
path
,
'wt'
,
encoding
=
encoding
,
errors
=
errors
)
as
f
:
f
.
write
(
data
)
bytes_written
+=
len
(
data
)
else
:
with
self
.
open
(
path
,
'wb'
)
as
f
:
f
.
write
(
data
)
bytes_written
+=
len
(
data
)
progress_callback
(
bytes_written
)
finished_callback
()
return
bytes_written
def
setcontents
(
self
,
path
,
data
=
b
''
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
1024
*
64
):
"""A convenience method to create a new file from a string or file-like object
:param path: a path of the file to create
:param data: a string or a file-like object containing the contents for the new file
:param data: a string or bytes object containing the contents for the new file
:param encoding: if `data` is a file open in text mode, or a text string, then use this `encoding` to write to the destination file
:param errors: if `data` is a file open in text mode or a text string, then use `errors` when opening the destination file
:param chunk_size: Number of bytes to read in a chunk, if the implementation has to resort to a read / copy loop
"""
if
not
data
:
self
.
createfile
(
path
)
else
:
compatibility
.
copy_file_to_fs
(
data
,
self
,
path
,
chunk_size
=
chunk_size
)
return
self
.
_setcontents
(
path
,
data
,
encoding
=
encoding
,
errors
=
errors
,
chunk_size
=
1024
*
64
)
def
setcontents_async
(
self
,
path
,
data
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
1024
*
64
,
progress_callback
=
None
,
finished_callback
=
None
,
...
...
@@ -793,6 +854,8 @@ class FS(object):
:param path: a path of the file to create
:param data: a string or a file-like object containing the contents for the new file
:param encoding: if `data` is a file open in text mode, or a text string, then use this `encoding` to write to the destination file
:param errors: if `data` is a file open in text mode or a text string, then use `errors` when opening the destination file
:param chunk_size: Number of bytes to read and write in a chunk
:param progress_callback: A function that is called periodically
with the number of bytes written.
...
...
@@ -805,9 +868,16 @@ class FS(object):
"""
finished_event
=
threading
.
Event
()
def
do_setcontents
():
try
:
compatibility
.
copy_file_to_fs
(
data
,
self
,
path
,
chunk_size
=
chunk_size
,
progress_callback
=
progress_callback
,
finished_callback
=
finished_callback
)
self
.
_setcontents
(
path
,
data
,
encoding
=
encoding
,
errors
=
errors
,
chunk_size
=
1024
*
64
,
progress_callback
=
progress_callback
,
finished_callback
=
finished_callback
)
except
Exception
,
e
:
if
error_callback
is
not
None
:
error_callback
(
e
)
...
...
@@ -817,7 +887,6 @@ class FS(object):
threading
.
Thread
(
target
=
do_setcontents
)
.
start
()
return
finished_event
def
createfile
(
self
,
path
,
wipe
=
False
):
"""Creates an empty file if it doesn't exist
...
...
@@ -835,7 +904,6 @@ class FS(object):
if
f
is
not
None
:
f
.
close
()
def
opendir
(
self
,
path
):
"""Opens a directory and returns a FS object representing its contents.
...
...
@@ -897,19 +965,18 @@ class FS(object):
return
self
.
listdir
(
path
,
*
args
,
**
kwargs
)
if
wildcard
is
None
:
wildcard
=
lambda
f
:
True
wildcard
=
lambda
f
:
True
elif
not
callable
(
wildcard
):
wildcard_re
=
re
.
compile
(
fnmatch
.
translate
(
wildcard
))
wildcard
=
lambda
fn
:
bool
(
wildcard_re
.
match
(
fn
))
wildcard
=
lambda
fn
:
bool
(
wildcard_re
.
match
(
fn
))
if
dir_wildcard
is
None
:
dir_wildcard
=
lambda
f
:
True
dir_wildcard
=
lambda
f
:
True
elif
not
callable
(
dir_wildcard
):
dir_wildcard_re
=
re
.
compile
(
fnmatch
.
translate
(
dir_wildcard
))
dir_wildcard
=
lambda
fn
:
bool
(
dir_wildcard_re
.
match
(
fn
))
dir_wildcard
=
lambda
fn
:
bool
(
dir_wildcard_re
.
match
(
fn
))
if
search
==
"breadth"
:
dirs
=
[
path
]
dirs_append
=
dirs
.
append
dirs_pop
=
dirs
.
pop
...
...
@@ -1005,7 +1072,6 @@ class FS(object):
for
p
,
_files
in
self
.
walk
(
path
,
dir_wildcard
=
wildcard
,
search
=
search
,
ignore_errors
=
ignore_errors
):
yield
p
def
getsize
(
self
,
path
):
"""Returns the size (in bytes) of a resource.
...
...
@@ -1207,6 +1273,7 @@ class FS(object):
with
self
.
_lock
:
if
not
self
.
isdir
(
src
):
raise
ResourceInvalidError
(
src
,
msg
=
"Source is not a directory:
%(path)
s"
)
def
copyfile_noerrors
(
src
,
dst
,
**
kwargs
):
try
:
return
self
.
copy
(
src
,
dst
,
**
kwargs
)
...
...
@@ -1227,13 +1294,10 @@ class FS(object):
self
.
makedir
(
dst
,
allow_recreate
=
True
)
for
dirname
,
filenames
in
self
.
walk
(
src
):
dst_dirname
=
relpath
(
frombase
(
src
,
abspath
(
dirname
)))
dst_dirpath
=
pathjoin
(
dst
,
dst_dirname
)
self
.
makedir
(
dst_dirpath
,
allow_recreate
=
True
,
recursive
=
True
)
for
filename
in
filenames
:
src_filename
=
pathjoin
(
dirname
,
filename
)
dst_filename
=
pathjoin
(
dst_dirpath
,
filename
)
copyfile
(
src_filename
,
dst_filename
,
overwrite
=
overwrite
,
chunk_size
=
chunk_size
)
...
...
@@ -1248,9 +1312,9 @@ class FS(object):
"""
with
self
.
_lock
:
path
=
normpath
(
path
)
iter_dir
=
iter
(
self
.
listdir
(
path
))
iter_dir
=
iter
(
self
.
i
listdir
(
path
))
try
:
iter_dir
.
next
(
)
next
(
iter_dir
)
except
StopIteration
:
return
True
return
False
...
...
@@ -1326,7 +1390,7 @@ class FS(object):
return
m
def
flags_to_mode
(
flags
):
def
flags_to_mode
(
flags
,
binary
=
True
):
"""Convert an os.O_* flag bitmask into an FS mode string."""
if
flags
&
os
.
O_WRONLY
:
if
flags
&
os
.
O_TRUNC
:
...
...
@@ -1346,6 +1410,10 @@ def flags_to_mode(flags):
mode
=
"r"
if
flags
&
os
.
O_EXCL
:
mode
+=
"x"
if
binary
:
mode
+=
'b'
else
:
mode
+=
't'
return
mode
fs/compatibility.py
View file @
3ea4efe1
...
...
@@ -8,10 +8,11 @@ Not for general usage, the functionality in this file is exposed elsewhere
import
six
from
six
import
PY3
def
copy_file_to_fs
(
data
,
dst_fs
,
dst_path
,
chunk_size
=
64
*
1024
,
progress_callback
=
None
,
finished_callback
=
None
):
"""Copy data from a string or a file-like object to a given fs/path"""
if
progress_callback
is
None
:
progress_callback
=
lambda
bytes_written
:
None
progress_callback
=
lambda
bytes_written
:
None
bytes_written
=
0
f
=
None
try
:
...
...
@@ -19,7 +20,7 @@ def copy_file_to_fs(data, dst_fs, dst_path, chunk_size=64 * 1024, progress_callb
if
hasattr
(
data
,
"read"
):
read
=
data
.
read
chunk
=
read
(
chunk_size
)
if
PY3
and
isinstance
(
chunk
,
six
.
text_type
):
if
isinstance
(
chunk
,
six
.
text_type
):
f
=
dst_fs
.
open
(
dst_path
,
'w'
)
else
:
f
=
dst_fs
.
open
(
dst_path
,
'wb'
)
...
...
@@ -30,7 +31,7 @@ def copy_file_to_fs(data, dst_fs, dst_path, chunk_size=64 * 1024, progress_callb
progress_callback
(
bytes_written
)
chunk
=
read
(
chunk_size
)
else
:
if
PY3
and
isinstance
(
data
,
six
.
text_type
):
if
isinstance
(
data
,
six
.
text_type
):
f
=
dst_fs
.
open
(
dst_path
,
'w'
)
else
:
f
=
dst_fs
.
open
(
dst_path
,
'wb'
)
...
...
fs/contrib/archivefs.py
View file @
3ea4efe1
...
...
@@ -112,11 +112,11 @@ class ArchiveFS(FS):
return
SizeUpdater
(
entry
,
self
.
archive
.
writestream
(
path
))
@synchronize
def
getcontents
(
self
,
path
,
mode
=
"rb"
):
def
getcontents
(
self
,
path
,
mode
=
"rb"
,
encoding
=
None
,
errors
=
None
,
newline
=
None
):
if
not
self
.
exists
(
path
):
raise
ResourceNotFoundError
(
path
)
f
=
self
.
open
(
path
)
return
f
.
read
()
with
self
.
open
(
path
,
mode
,
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
)
as
f
:
return
f
.
read
()
def
desc
(
self
,
path
):
return
"
%
s in zip file"
%
path
...
...
fs/contrib/davfs/__init__.py
View file @
3ea4efe1
...
...
@@ -41,11 +41,13 @@ from fs.base import *
from
fs.path
import
*
from
fs.errors
import
*
from
fs.remote
import
RemoteFileBuffer
from
fs
import
iotools
from
fs.contrib.davfs.util
import
*
from
fs.contrib.davfs
import
xmlobj
from
fs.contrib.davfs.xmlobj
import
*
import
six
from
six
import
b
import
errno
...
...
@@ -84,12 +86,12 @@ class DAVFS(FS):
"http"
:
80
,
"https"
:
443
,
}
_meta
=
{
'virtual'
:
False
,
'read_only'
:
False
,
'unicode_paths'
:
True
,
'case_insensitive_paths'
:
False
,
'network'
:
True
'network'
:
True
}
def
__init__
(
self
,
url
,
credentials
=
None
,
get_credentials
=
None
,
thread_synchronize
=
True
,
connection_classes
=
None
,
timeout
=
None
):
...
...
@@ -121,7 +123,7 @@ class DAVFS(FS):
self
.
url
=
url
pf
=
propfind
(
prop
=
"<prop xmlns='DAV:'><resourcetype /></prop>"
)
resp
=
self
.
_request
(
"/"
,
"PROPFIND"
,
pf
.
render
(),{
"Depth"
:
"0"
})
try
:
try
:
if
resp
.
status
==
404
:
raise
ResourceNotFoundError
(
"/"
,
msg
=
"root url gives 404"
)
if
resp
.
status
in
(
401
,
403
):
...
...
@@ -147,9 +149,9 @@ class DAVFS(FS):
if
not
port
:
try
:
port
=
self
.
_DEFAULT_PORT_NUMBERS
[
scheme
]
except
KeyError
:
msg
=
"unsupported protocol: '
%
s'"
%
(
url
.
scheme
,)
raise
RemoteConnectionError
(
msg
=
msg
)
except
KeyError
:
msg
=
"unsupported protocol: '
%
s'"
%
(
url
.
scheme
,)
raise
RemoteConnectionError
(
msg
=
msg
)
# Can we re-use an existing connection?
with
self
.
_connection_lock
:
now
=
time
.
time
()
...
...
@@ -165,12 +167,12 @@ class DAVFS(FS):
return
(
False
,
con
)
self
.
_discard_connection
(
con
)
# Nope, we need to make a fresh one.
try
:
ConClass
=
self
.
connection_classes
[
scheme
]
except
KeyError
:
msg
=
"unsupported protocol: '
%
s'"
%
(
url
.
scheme
,)
raise
RemoteConnectionError
(
msg
=
msg
)
con
=
ConClass
(
url
.
hostname
,
url
.
port
,
timeout
=
self
.
timeout
)
try
:
ConClass
=
self
.
connection_classes
[
scheme
]
except
KeyError
:
msg
=
"unsupported protocol: '
%
s'"
%
(
url
.
scheme
,)
raise
RemoteConnectionError
(
msg
=
msg
)
con
=
ConClass
(
url
.
hostname
,
url
.
port
,
timeout
=
self
.
timeout
)
self
.
_connections
.
append
(
con
)
return
(
True
,
con
)
...
...
@@ -182,9 +184,9 @@ class DAVFS(FS):
if
not
port
:
try
:
port
=
self
.
_DEFAULT_PORT_NUMBERS
[
scheme
]
except
KeyError
:
msg
=
"unsupported protocol: '
%
s'"
%
(
url
.
scheme
,)
raise
RemoteConnectionError
(
msg
=
msg
)
except
KeyError
:
msg
=
"unsupported protocol: '
%
s'"
%
(
url
.
scheme
,)
raise
RemoteConnectionError
(
msg
=
msg
)
with
self
.
_connection_lock
:
now
=
time
.
time
()
try
:
...
...
@@ -256,7 +258,7 @@ class DAVFS(FS):
resp
=
None
try
:
resp
=
self
.
_raw_request
(
url
,
method
,
body
,
headers
)
# Loop to retry for redirects and authentication responses.
# Loop to retry for redirects and authentication responses.
while
resp
.
status
in
(
301
,
302
,
401
,
403
):
resp
.
close
()
if
resp
.
status
in
(
301
,
302
,):
...
...
@@ -268,7 +270,7 @@ class DAVFS(FS):
raise
OperationFailedError
(
msg
=
"redirection seems to be looping"
)
if
len
(
visited
)
>
10
:
raise
OperationFailedError
(
"too much redirection"
)
elif
resp
.
status
in
(
401
,
403
):
elif
resp
.
status
in
(
401
,
403
):
if
self
.
get_credentials
is
None
:
break
else
:
...
...
@@ -276,7 +278,7 @@ class DAVFS(FS):
if
creds
is
None
:
break
else
:
self
.
credentials
=
creds
self
.
credentials
=
creds
resp
=
self
.
_raw_request
(
url
,
method
,
body
,
headers
)
except
Exception
:
if
resp
is
not
None
:
...
...
@@ -343,8 +345,10 @@ class DAVFS(FS):
msg
=
str
(
e
)
raise
RemoteConnectionError
(
""
,
msg
=
msg
,
details
=
e
)
def
setcontents
(
self
,
path
,
contents
,
chunk_size
=
1024
*
64
):
resp
=
self
.
_request
(
path
,
"PUT"
,
contents
)
def
setcontents
(
self
,
path
,
data
=
b
''
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
1024
*
64
):
if
isinstance
(
data
,
six
.
text_type
):
data
=
data
.
encode
(
encoding
=
encoding
,
errors
=
errors
)
resp
=
self
.
_request
(
path
,
"PUT"
,
data
)
resp
.
close
()
if
resp
.
status
==
405
:
raise
ResourceInvalidError
(
path
)
...
...
@@ -353,7 +357,8 @@ class DAVFS(FS):
if
resp
.
status
not
in
(
200
,
201
,
204
):
raise_generic_error
(
resp
,
"setcontents"
,
path
)
def
open
(
self
,
path
,
mode
=
"r"
):
@iotools.filelike_to_stream
def
open
(
self
,
path
,
mode
=
"r"
,
**
kwargs
):
mode
=
mode
.
replace
(
"b"
,
""
)
.
replace
(
"t"
,
""
)
# Truncate the file if requested
contents
=
b
(
""
)
...
...
@@ -417,7 +422,7 @@ class DAVFS(FS):
if
self
.
_isurl
(
path
,
res
.
href
):
for
ps
in
res
.
propstats
:
if
ps
.
props
.
getElementsByTagNameNS
(
"DAV:"
,
"collection"
):
return
True
return
True
return
False
finally
:
response
.
close
()
...
...
@@ -437,11 +442,11 @@ class DAVFS(FS):
rt
=
ps
.
props
.
getElementsByTagNameNS
(
"DAV:"
,
"resourcetype"
)
cl
=
ps
.
props
.
getElementsByTagNameNS
(
"DAV:"
,
"collection"
)
if
rt
and
not
cl
:
return
True
return
True
return
False
finally
:
response
.
close
()
def
listdir
(
self
,
path
=
"./"
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
return
list
(
self
.
ilistdir
(
path
=
path
,
wildcard
=
wildcard
,
full
=
full
,
absolute
=
absolute
,
dirs_only
=
dirs_only
,
files_only
=
files_only
))
...
...
fs/expose/dokan/__init__.py
View file @
3ea4efe1
...
...
@@ -43,7 +43,7 @@ to subprocess.Popen::
If you are exposing an untrusted filesystem, you may like to apply the
wrapper class Win32SafetyFS before passing it into dokan. This will take
a number of steps to avoid suspicious operations on windows, such as
a number of steps to avoid suspicious operations on windows, such as
hiding autorun files.
The binding to Dokan is created via ctypes. Due to the very stable ABI of
...
...
@@ -77,9 +77,9 @@ from fs.wrapfs import WrapFS
try
:
import
libdokan
except
(
NotImplementedError
,
EnvironmentError
,
ImportError
,
NameError
,):
except
(
NotImplementedError
,
EnvironmentError
,
ImportError
,
NameError
,):
is_available
=
False
sys
.
modules
.
pop
(
"fs.expose.dokan.libdokan"
,
None
)
sys
.
modules
.
pop
(
"fs.expose.dokan.libdokan"
,
None
)
libdokan
=
None
else
:
is_available
=
True
...
...
@@ -168,7 +168,7 @@ def handle_fs_errors(func):
name
=
func
.
__name__
func
=
convert_fs_errors
(
func
)
@wraps
(
func
)
def
wrapper
(
*
args
,
**
kwds
):
def
wrapper
(
*
args
,
**
kwds
):
try
:
res
=
func
(
*
args
,
**
kwds
)
except
OSError
,
e
:
...
...
@@ -183,7 +183,7 @@ def handle_fs_errors(func):
res
=
0
return
res
return
wrapper
# During long-running operations, Dokan requires that the DokanResetTimeout
# function be called periodically to indicate the progress is still being
...
...
@@ -204,7 +204,7 @@ _TIMEOUT_PROTECT_RESET_TIME = 5 * 60 * 1000
def
_start_timeout_protect_thread
():
"""Start the background thread used to protect dokan from timeouts.
This function starts the background thread that monitors calls into the
dokan API and resets their timeouts. It's safe to call this more than
once, only a single thread will be started.
...
...
@@ -287,7 +287,7 @@ class FSOperations(object):
# We explicitly keep track of the size Dokan expects a file to be.
# This dict is indexed by path, then file handle.
self
.
_files_size_written
=
PathMap
()
def
get_ops_struct
(
self
):
"""Get a DOKAN_OPERATIONS struct mapping to our methods."""
struct
=
libdokan
.
DOKAN_OPERATIONS
()
...
...
@@ -325,9 +325,9 @@ class FSOperations(object):
"""
self
.
_files_lock
.
acquire
()
try
:
(
f2
,
path
,
lock
)
=
self
.
_files_by_handle
[
fh
]
(
f2
,
path
,
lock
)
=
self
.
_files_by_handle
[
fh
]
assert
f2
.
closed
self
.
_files_by_handle
[
fh
]
=
(
f
,
path
,
lock
)
self
.
_files_by_handle
[
fh
]
=
(
f
,
path
,
lock
)
return
fh
finally
:
self
.
_files_lock
.
release
()
...
...
@@ -336,10 +336,10 @@ class FSOperations(object):
"""Unregister the given file handle."""
self
.
_files_lock
.
acquire
()
try
:
(
f
,
path
,
lock
)
=
self
.
_files_by_handle
.
pop
(
fh
)
(
f
,
path
,
lock
)
=
self
.
_files_by_handle
.
pop
(
fh
)
del
self
.
_files_size_written
[
path
][
fh
]
if
not
self
.
_files_size_written
[
path
]:
del
self
.
_files_size_written
[
path
]
del
self
.
_files_size_written
[
path
]
finally
:
self
.
_files_lock
.
release
()
...
...
@@ -368,7 +368,7 @@ class FSOperations(object):
locks
=
self
.
_active_locks
[
path
]
except
KeyError
:
return
0
for
(
lh
,
lstart
,
lend
)
in
locks
:
for
(
lh
,
lstart
,
lend
)
in
locks
:
if
info
is
not
None
and
info
.
contents
.
Context
==
lh
:
continue
if
lstart
>=
offset
+
length
:
...
...
@@ -423,7 +423,8 @@ class FSOperations(object):
# Try to open the requested file. It may actually be a directory.
info
.
contents
.
Context
=
1
try
:
f
=
self
.
fs
.
open
(
path
,
mode
)
f
=
self
.
fs
.
open
(
path
,
mode
)
print
path
,
mode
,
repr
(
f
)
except
ResourceInvalidError
:
info
.
contents
.
IsDirectory
=
True
except
FSError
:
...
...
@@ -434,7 +435,7 @@ class FSOperations(object):
else
:
raise
else
:
info
.
contents
.
Context
=
self
.
_reg_file
(
f
,
path
)
info
.
contents
.
Context
=
self
.
_reg_file
(
f
,
path
)
return
retcode
@timeout_protect
...
...
@@ -444,10 +445,10 @@ class FSOperations(object):
if
self
.
_is_pending_delete
(
path
):
raise
ResourceNotFoundError
(
path
)
if
not
self
.
fs
.
isdir
(
path
):
if
not
self
.
fs
.
exists
(
path
):
raise
ResourceNotFoundError
(
path
)
else
:
raise
ResourceInvalidError
(
path
)
if
not
self
.
fs
.
exists
(
path
):
raise
ResourceNotFoundError
(
path
)
else
:
raise
ResourceInvalidError
(
path
)
info
.
contents
.
IsDirectory
=
True
@timeout_protect
...
...
@@ -468,7 +469,7 @@ class FSOperations(object):
self
.
fs
.
removedir
(
path
)
self
.
_pending_delete
.
remove
(
path
)
else
:
(
file
,
_
,
lock
)
=
self
.
_get_file
(
info
.
contents
.
Context
)
(
file
,
_
,
lock
)
=
self
.
_get_file
(
info
.
contents
.
Context
)
lock
.
acquire
()
try
:
file
.
close
()
...
...
@@ -484,7 +485,7 @@ class FSOperations(object):
@handle_fs_errors
def
CloseFile
(
self
,
path
,
info
):
if
info
.
contents
.
Context
>=
MIN_FH
:
(
file
,
_
,
lock
)
=
self
.
_get_file
(
info
.
contents
.
Context
)
(
file
,
_
,
lock
)
=
self
.
_get_file
(
info
.
contents
.
Context
)
lock
.
acquire
()
try
:
file
.
close
()
...
...
@@ -497,20 +498,20 @@ class FSOperations(object):
@handle_fs_errors
def
ReadFile
(
self
,
path
,
buffer
,
nBytesToRead
,
nBytesRead
,
offset
,
info
):
path
=
normpath
(
path
)
(
file
,
_
,
lock
)
=
self
.
_get_file
(
info
.
contents
.
Context
)
(
file
,
_
,
lock
)
=
self
.
_get_file
(
info
.
contents
.
Context
)
lock
.
acquire
()
try
:
errno
=
self
.
_check_lock
(
path
,
offset
,
nBytesToRead
,
info
)
errno
=
self
.
_check_lock
(
path
,
offset
,
nBytesToRead
,
info
)
if
errno
:
return
errno
# This may be called after Cleanup, meaning we
# need to re-open the file.
if
file
.
closed
:
file
=
self
.
fs
.
open
(
path
,
file
.
mode
)
self
.
_rereg_file
(
info
.
contents
.
Context
,
file
)
file
=
self
.
fs
.
open
(
path
,
file
.
mode
)
self
.
_rereg_file
(
info
.
contents
.
Context
,
file
)
file
.
seek
(
offset
)
data
=
file
.
read
(
nBytesToRead
)
ctypes
.
memmove
(
buffer
,
ctypes
.
create_string_buffer
(
data
),
len
(
data
))
ctypes
.
memmove
(
buffer
,
ctypes
.
create_string_buffer
(
data
),
len
(
data
))
nBytesRead
[
0
]
=
len
(
data
)
finally
:
lock
.
release
()
...
...
@@ -520,23 +521,23 @@ class FSOperations(object):
def
WriteFile
(
self
,
path
,
buffer
,
nBytesToWrite
,
nBytesWritten
,
offset
,
info
):
path
=
normpath
(
path
)
fh
=
info
.
contents
.
Context
(
file
,
_
,
lock
)
=
self
.
_get_file
(
fh
)
(
file
,
_
,
lock
)
=
self
.
_get_file
(
fh
)
lock
.
acquire
()
try
:
errno
=
self
.
_check_lock
(
path
,
offset
,
nBytesToWrite
,
info
)
errno
=
self
.
_check_lock
(
path
,
offset
,
nBytesToWrite
,
info
)
if
errno
:
return
errno
# This may be called after Cleanup, meaning we
# need to re-open the file.
if
file
.
closed
:
file
=
self
.
fs
.
open
(
path
,
file
.
mode
)
self
.
_rereg_file
(
info
.
contents
.
Context
,
file
)
file
=
self
.
fs
.
open
(
path
,
file
.
mode
)
self
.
_rereg_file
(
info
.
contents
.
Context
,
file
)
if
info
.
contents
.
WriteToEndOfFile
:
file
.
seek
(
0
,
os
.
SEEK_END
)
file
.
seek
(
0
,
os
.
SEEK_END
)
else
:
file
.
seek
(
offset
)
data
=
ctypes
.
create_string_buffer
(
nBytesToWrite
)
ctypes
.
memmove
(
data
,
buffer
,
nBytesToWrite
)
ctypes
.
memmove
(
data
,
buffer
,
nBytesToWrite
)
file
.
write
(
data
.
raw
)
nBytesWritten
[
0
]
=
len
(
data
.
raw
)
try
:
...
...
@@ -554,7 +555,7 @@ class FSOperations(object):
@handle_fs_errors
def
FlushFileBuffers
(
self
,
path
,
info
):
path
=
normpath
(
path
)
(
file
,
_
,
lock
)
=
self
.
_get_file
(
info
.
contents
.
Context
)
(
file
,
_
,
lock
)
=
self
.
_get_file
(
info
.
contents
.
Context
)
lock
.
acquire
()
try
:
file
.
flush
()
...
...
@@ -567,7 +568,7 @@ class FSOperations(object):
path
=
normpath
(
path
)
finfo
=
self
.
fs
.
getinfo
(
path
)
data
=
buffer
.
contents
self
.
_info2finddataw
(
path
,
finfo
,
data
,
info
)
self
.
_info2finddataw
(
path
,
finfo
,
data
,
info
)
try
:
written_size
=
max
(
self
.
_files_size_written
[
path
]
.
values
())
except
KeyError
:
...
...
@@ -583,26 +584,25 @@ class FSOperations(object):
@handle_fs_errors
def
FindFiles
(
self
,
path
,
fillFindData
,
info
):
path
=
normpath
(
path
)
for
(
nm
,
finfo
)
in
self
.
fs
.
listdirinfo
(
path
):
fpath
=
pathjoin
(
path
,
nm
)
for
(
nm
,
finfo
)
in
self
.
fs
.
listdirinfo
(
path
):
fpath
=
pathjoin
(
path
,
nm
)
if
self
.
_is_pending_delete
(
fpath
):
continue
data
=
self
.
_info2finddataw
(
fpath
,
finfo
)
fillFindData
(
ctypes
.
byref
(
data
),
info
)
data
=
self
.
_info2finddataw
(
fpath
,
finfo
)
fillFindData
(
ctypes
.
byref
(
data
),
info
)
@timeout_protect
@handle_fs_errors
def
FindFilesWithPattern
(
self
,
path
,
pattern
,
fillFindData
,
info
):
path
=
normpath
(
path
)
infolist
=
[]
for
(
nm
,
finfo
)
in
self
.
fs
.
listdirinfo
(
path
):
fpath
=
pathjoin
(
path
,
nm
)
for
(
nm
,
finfo
)
in
self
.
fs
.
listdirinfo
(
path
):
fpath
=
pathjoin
(
path
,
nm
)
if
self
.
_is_pending_delete
(
fpath
):
continue
if
not
libdokan
.
DokanIsNameInExpression
(
pattern
,
nm
,
True
):
if
not
libdokan
.
DokanIsNameInExpression
(
pattern
,
nm
,
True
):
continue
data
=
self
.
_info2finddataw
(
fpath
,
finfo
,
None
)
fillFindData
(
ctypes
.
byref
(
data
),
info
)
data
=
self
.
_info2finddataw
(
fpath
,
finfo
,
None
)
fillFindData
(
ctypes
.
byref
(
data
),
info
)
@timeout_protect
@handle_fs_errors
...
...
@@ -620,7 +620,7 @@ class FSOperations(object):
atime
=
_filetime2datetime
(
atime
.
contents
)
except
ValueError
:
atime
=
None
if
mtime
is
not
None
:
if
mtime
is
not
None
:
try
:
mtime
=
_filetime2datetime
(
mtime
.
contents
)
except
ValueError
:
...
...
@@ -648,7 +648,7 @@ class FSOperations(object):
def
DeleteDirectory
(
self
,
path
,
info
):
path
=
normpath
(
path
)
for
nm
in
self
.
fs
.
listdir
(
path
):
if
not
self
.
_is_pending_delete
(
pathjoin
(
path
,
nm
)):
if
not
self
.
_is_pending_delete
(
pathjoin
(
path
,
nm
)):
raise
DirectoryNotEmptyError
(
path
)
self
.
_pending_delete
.
add
(
path
)
# the actual delete takes place in self.CloseFile()
...
...
@@ -658,7 +658,7 @@ class FSOperations(object):
def
MoveFile
(
self
,
src
,
dst
,
overwrite
,
info
):
# Close the file if we have an open handle to it.
if
info
.
contents
.
Context
>=
MIN_FH
:
(
file
,
_
,
lock
)
=
self
.
_get_file
(
info
.
contents
.
Context
)
(
file
,
_
,
lock
)
=
self
.
_get_file
(
info
.
contents
.
Context
)
lock
.
acquire
()
try
:
file
.
close
()
...
...
@@ -668,15 +668,15 @@ class FSOperations(object):
src
=
normpath
(
src
)
dst
=
normpath
(
dst
)
if
info
.
contents
.
IsDirectory
:
self
.
fs
.
movedir
(
src
,
dst
,
overwrite
=
overwrite
)
self
.
fs
.
movedir
(
src
,
dst
,
overwrite
=
overwrite
)
else
:
self
.
fs
.
move
(
src
,
dst
,
overwrite
=
overwrite
)
self
.
fs
.
move
(
src
,
dst
,
overwrite
=
overwrite
)
@timeout_protect
@handle_fs_errors
def
SetEndOfFile
(
self
,
path
,
length
,
info
):
path
=
normpath
(
path
)
(
file
,
_
,
lock
)
=
self
.
_get_file
(
info
.
contents
.
Context
)
(
file
,
_
,
lock
)
=
self
.
_get_file
(
info
.
contents
.
Context
)
lock
.
acquire
()
try
:
pos
=
file
.
tell
()
...
...
@@ -684,7 +684,7 @@ class FSOperations(object):
file
.
seek
(
length
)
file
.
truncate
()
if
pos
<
length
:
file
.
seek
(
min
(
pos
,
length
))
file
.
seek
(
min
(
pos
,
length
))
finally
:
lock
.
release
()
...
...
@@ -694,15 +694,15 @@ class FSOperations(object):
# It's better to pretend an operation is possible and have it fail
# than to pretend an operation will fail when it's actually possible.
large_amount
=
100
*
1024
*
1024
*
1024
nBytesFree
[
0
]
=
self
.
fs
.
getmeta
(
"free_space"
,
large_amount
)
nBytesTotal
[
0
]
=
self
.
fs
.
getmeta
(
"total_space"
,
2
*
large_amount
)
nBytesFree
[
0
]
=
self
.
fs
.
getmeta
(
"free_space"
,
large_amount
)
nBytesTotal
[
0
]
=
self
.
fs
.
getmeta
(
"total_space"
,
2
*
large_amount
)
nBytesAvail
[
0
]
=
nBytesFree
[
0
]
@handle_fs_errors
def
GetVolumeInformation
(
self
,
vnmBuf
,
vnmSz
,
sNum
,
maxLen
,
flags
,
fnmBuf
,
fnmSz
,
info
):
nm
=
ctypes
.
create_unicode_buffer
(
self
.
volname
[:
vnmSz
-
1
])
sz
=
(
len
(
nm
.
value
)
+
1
)
*
ctypes
.
sizeof
(
ctypes
.
c_wchar
)
ctypes
.
memmove
(
vnmBuf
,
nm
,
sz
)
sz
=
(
len
(
nm
.
value
)
+
1
)
*
ctypes
.
sizeof
(
ctypes
.
c_wchar
)
ctypes
.
memmove
(
vnmBuf
,
nm
,
sz
)
if
sNum
:
sNum
[
0
]
=
0
if
maxLen
:
...
...
@@ -710,8 +710,8 @@ class FSOperations(object):
if
flags
:
flags
[
0
]
=
0
nm
=
ctypes
.
create_unicode_buffer
(
self
.
fsname
[:
fnmSz
-
1
])
sz
=
(
len
(
nm
.
value
)
+
1
)
*
ctypes
.
sizeof
(
ctypes
.
c_wchar
)
ctypes
.
memmove
(
fnmBuf
,
nm
,
sz
)
sz
=
(
len
(
nm
.
value
)
+
1
)
*
ctypes
.
sizeof
(
ctypes
.
c_wchar
)
ctypes
.
memmove
(
fnmBuf
,
nm
,
sz
)
@timeout_protect
@handle_fs_errors
...
...
@@ -731,10 +731,10 @@ class FSOperations(object):
except
KeyError
:
locks
=
self
.
_active_locks
[
path
]
=
[]
else
:
errno
=
self
.
_check_lock
(
path
,
offset
,
length
,
None
,
locks
)
errno
=
self
.
_check_lock
(
path
,
offset
,
length
,
None
,
locks
)
if
errno
:
return
errno
locks
.
append
((
info
.
contents
.
Context
,
offset
,
end
))
locks
.
append
((
info
.
contents
.
Context
,
offset
,
end
))
return
0
@timeout_protect
...
...
@@ -747,7 +747,7 @@ class FSOperations(object):
except
KeyError
:
return
-
ERROR_NOT_LOCKED
todel
=
[]
for
i
,
(
lh
,
lstart
,
lend
)
in
enumerate
(
locks
):
for
i
,
(
lh
,
lstart
,
lend
)
in
enumerate
(
locks
):
if
info
.
contents
.
Context
==
lh
:
if
lstart
==
offset
:
if
lend
==
offset
+
length
:
...
...
@@ -755,17 +755,17 @@ class FSOperations(object):
if
not
todel
:
return
-
ERROR_NOT_LOCKED
for
i
in
reversed
(
todel
):
del
locks
[
i
]
del
locks
[
i
]
return
0
@handle_fs_errors
def
Unmount
(
self
,
info
):
pass
def
_info2attrmask
(
self
,
path
,
info
,
hinfo
=
None
):
def
_info2attrmask
(
self
,
path
,
info
,
hinfo
=
None
):
"""Convert a file/directory info dict to a win32 file attribute mask."""
attrs
=
0
st_mode
=
info
.
get
(
"st_mode"
,
None
)
st_mode
=
info
.
get
(
"st_mode"
,
None
)
if
st_mode
:
if
statinfo
.
S_ISDIR
(
st_mode
):
attrs
|=
FILE_ATTRIBUTE_DIRECTORY
...
...
@@ -859,7 +859,7 @@ def _normalise_drive_string(drive):
if
not
":
\\
"
.
startswith
(
drive
[
1
:]):
raise
ValueError
(
"invalid drive letter:
%
r"
%
(
drive
,))
return
drive
[
0
]
.
upper
()
def
mount
(
fs
,
drive
,
foreground
=
False
,
ready_callback
=
None
,
unmount_callback
=
None
,
**
kwds
):
"""Mount the given FS at the given drive letter, using Dokan.
...
...
fs/expose/ftp.py
View file @
3ea4efe1
...
...
@@ -26,6 +26,7 @@ from pyftpdlib import ftpserver
from
fs.path
import
*
from
fs.osfs
import
OSFS
from
fs.errors
import
convert_fs_errors
from
fs
import
iotools
# Get these once so we can reuse them:
...
...
@@ -96,8 +97,9 @@ class FTPFS(ftpserver.AbstractedFS):
@convert_fs_errors
@decode_args
def
open
(
self
,
path
,
mode
):
return
self
.
fs
.
open
(
path
,
mode
)
@iotools.filelike_to_stream
def
open
(
self
,
path
,
mode
,
**
kwargs
):
return
self
.
fs
.
open
(
path
,
mode
,
**
kwargs
)
@convert_fs_errors
def
chdir
(
self
,
path
):
...
...
fs/expose/fuse/__init__.py
View file @
3ea4efe1
...
...
@@ -70,11 +70,11 @@ from six import PY3
from
six
import
b
try
:
#
if PY3:
# import fuse3
as fuse
#
else:
# import
fuse
import
fuse_ctypes
as
fuse
if
PY3
:
from
fs.expose.fuse
import
fuse_ctypes
as
fuse
else
:
from
fs.expose.fuse
import
fuse3
as
fuse
except
NotImplementedError
:
raise
ImportError
(
"FUSE found but not usable"
)
try
:
...
...
@@ -101,7 +101,7 @@ def handle_fs_errors(func):
name
=
func
.
__name__
func
=
convert_fs_errors
(
func
)
@wraps
(
func
)
def
wrapper
(
*
args
,
**
kwds
):
def
wrapper
(
*
args
,
**
kwds
):
#logger.debug("CALL %r %s",name,repr(args))
try
:
res
=
func
(
*
args
,
**
kwds
)
...
...
@@ -114,7 +114,6 @@ def handle_fs_errors(func):
return
0
return
res
return
wrapper
class
FSOperations
(
Operations
):
...
...
@@ -159,7 +158,7 @@ class FSOperations(Operations):
(
f
,
path
,
lock
)
=
self
.
_files_by_handle
.
pop
(
fh
.
fh
)
del
self
.
_files_size_written
[
path
][
fh
.
fh
]
if
not
self
.
_files_size_written
[
path
]:
del
self
.
_files_size_written
[
path
]
del
self
.
_files_size_written
[
path
]
finally
:
self
.
_files_lock
.
release
()
...
...
@@ -170,11 +169,11 @@ class FSOperations(Operations):
def
destroy
(
self
,
data
):
if
self
.
_on_destroy
:
self
.
_on_destroy
()
@handle_fs_errors
def
chmod
(
self
,
path
,
mode
):
raise
UnsupportedError
(
"chmod"
)
@handle_fs_errors
def
chown
(
self
,
path
,
uid
,
gid
):
raise
UnsupportedError
(
"chown"
)
...
...
@@ -186,13 +185,13 @@ class FSOperations(Operations):
# I haven't figured out how to distinguish between "w" and "w+".
# Go with the most permissive option.
mode
=
flags_to_mode
(
fi
.
flags
)
fh
=
self
.
_reg_file
(
self
.
fs
.
open
(
path
,
mode
),
path
)
fh
=
self
.
_reg_file
(
self
.
fs
.
open
(
path
,
mode
),
path
)
fi
.
fh
=
fh
fi
.
keep_cache
=
0
@handle_fs_errors
def
flush
(
self
,
path
,
fh
):
(
file
,
_
,
lock
)
=
self
.
_get_file
(
fh
)
(
file
,
_
,
lock
)
=
self
.
_get_file
(
fh
)
lock
.
acquire
()
try
:
file
.
flush
()
...
...
@@ -201,7 +200,7 @@ class FSOperations(Operations):
@handle_fs_errors
def
getattr
(
self
,
path
,
fh
=
None
):
attrs
=
self
.
_get_stat_dict
(
path
.
decode
(
NATIVE_ENCODING
))
attrs
=
self
.
_get_stat_dict
(
path
.
decode
(
NATIVE_ENCODING
))
return
attrs
@handle_fs_errors
...
...
@@ -209,12 +208,12 @@ class FSOperations(Operations):
path
=
path
.
decode
(
NATIVE_ENCODING
)
name
=
name
.
decode
(
NATIVE_ENCODING
)
try
:
value
=
self
.
fs
.
getxattr
(
path
,
name
)
value
=
self
.
fs
.
getxattr
(
path
,
name
)
except
AttributeError
:
raise
UnsupportedError
(
"getxattr"
)
else
:
if
value
is
None
:
raise
OSError
(
errno
.
ENODATA
,
"no attribute '
%
s'"
%
(
name
,))
raise
OSError
(
errno
.
ENODATA
,
"no attribute '
%
s'"
%
(
name
,))
return
value
@handle_fs_errors
...
...
@@ -245,13 +244,13 @@ class FSOperations(Operations):
def
open
(
self
,
path
,
fi
):
path
=
path
.
decode
(
NATIVE_ENCODING
)
mode
=
flags_to_mode
(
fi
.
flags
)
fi
.
fh
=
self
.
_reg_file
(
self
.
fs
.
open
(
path
,
mode
),
path
)
fi
.
fh
=
self
.
_reg_file
(
self
.
fs
.
open
(
path
,
mode
),
path
)
fi
.
keep_cache
=
0
return
0
@handle_fs_errors
def
read
(
self
,
path
,
size
,
offset
,
fh
):
(
file
,
_
,
lock
)
=
self
.
_get_file
(
fh
)
(
file
,
_
,
lock
)
=
self
.
_get_file
(
fh
)
lock
.
acquire
()
try
:
file
.
seek
(
offset
)
...
...
@@ -263,10 +262,10 @@ class FSOperations(Operations):
@handle_fs_errors
def
readdir
(
self
,
path
,
fh
=
None
):
path
=
path
.
decode
(
NATIVE_ENCODING
)
entries
=
[
'.'
,
'..'
]
for
(
nm
,
info
)
in
self
.
fs
.
listdirinfo
(
path
):
self
.
_fill_stat_dict
(
pathjoin
(
path
,
nm
),
info
)
entries
.
append
((
nm
.
encode
(
NATIVE_ENCODING
),
info
,
0
))
entries
=
[
'.'
,
'..'
]
for
(
nm
,
info
)
in
self
.
fs
.
listdirinfo
(
path
):
self
.
_fill_stat_dict
(
pathjoin
(
path
,
nm
),
info
)
entries
.
append
((
nm
.
encode
(
NATIVE_ENCODING
),
info
,
0
))
return
entries
@handle_fs_errors
...
...
@@ -275,7 +274,7 @@ class FSOperations(Operations):
@handle_fs_errors
def
release
(
self
,
path
,
fh
):
(
file
,
_
,
lock
)
=
self
.
_get_file
(
fh
)
(
file
,
_
,
lock
)
=
self
.
_get_file
(
fh
)
lock
.
acquire
()
try
:
file
.
close
()
...
...
@@ -288,7 +287,7 @@ class FSOperations(Operations):
path
=
path
.
decode
(
NATIVE_ENCODING
)
name
=
name
.
decode
(
NATIVE_ENCODING
)
try
:
return
self
.
fs
.
delxattr
(
path
,
name
)
return
self
.
fs
.
delxattr
(
path
,
name
)
except
AttributeError
:
raise
UnsupportedError
(
"removexattr"
)
...
...
@@ -297,12 +296,12 @@ class FSOperations(Operations):
old
=
old
.
decode
(
NATIVE_ENCODING
)
new
=
new
.
decode
(
NATIVE_ENCODING
)
try
:
self
.
fs
.
rename
(
old
,
new
)
self
.
fs
.
rename
(
old
,
new
)
except
FSError
:
if
self
.
fs
.
isdir
(
old
):
self
.
fs
.
movedir
(
old
,
new
)
self
.
fs
.
movedir
(
old
,
new
)
else
:
self
.
fs
.
move
(
old
,
new
)
self
.
fs
.
move
(
old
,
new
)
@handle_fs_errors
def
rmdir
(
self
,
path
):
...
...
@@ -314,7 +313,7 @@ class FSOperations(Operations):
path
=
path
.
decode
(
NATIVE_ENCODING
)
name
=
name
.
decode
(
NATIVE_ENCODING
)
try
:
return
self
.
fs
.
setxattr
(
path
,
name
,
value
)
return
self
.
fs
.
setxattr
(
path
,
name
,
value
)
except
AttributeError
:
raise
UnsupportedError
(
"setxattr"
)
...
...
@@ -326,18 +325,18 @@ class FSOperations(Operations):
def
truncate
(
self
,
path
,
length
,
fh
=
None
):
path
=
path
.
decode
(
NATIVE_ENCODING
)
if
fh
is
None
and
length
==
0
:
self
.
fs
.
open
(
path
,
"wb"
)
.
close
()
self
.
fs
.
open
(
path
,
"wb"
)
.
close
()
else
:
if
fh
is
None
:
f
=
self
.
fs
.
open
(
path
,
"rb+"
)
if
not
hasattr
(
f
,
"truncate"
):
f
=
self
.
fs
.
open
(
path
,
"rb+"
)
if
not
hasattr
(
f
,
"truncate"
):
raise
UnsupportedError
(
"truncate"
)
f
.
truncate
(
length
)
else
:
(
file
,
_
,
lock
)
=
self
.
_get_file
(
fh
)
(
file
,
_
,
lock
)
=
self
.
_get_file
(
fh
)
lock
.
acquire
()
try
:
if
not
hasattr
(
file
,
"truncate"
):
if
not
hasattr
(
file
,
"truncate"
):
raise
UnsupportedError
(
"truncate"
)
file
.
truncate
(
length
)
finally
:
...
...
@@ -360,18 +359,18 @@ class FSOperations(Operations):
self
.
fs
.
remove
(
path
)
@handle_fs_errors
def
utimens
(
self
,
path
,
times
=
None
):
def
utimens
(
self
,
path
,
times
=
None
):
path
=
path
.
decode
(
NATIVE_ENCODING
)
accessed_time
,
modified_time
=
times
if
accessed_time
is
not
None
:
accessed_time
=
datetime
.
datetime
.
fromtimestamp
(
accessed_time
)
if
modified_time
is
not
None
:
modified_time
=
datetime
.
datetime
.
fromtimestamp
(
modified_time
)
self
.
fs
.
settimes
(
path
,
accessed_time
,
modified_time
)
self
.
fs
.
settimes
(
path
,
accessed_time
,
modified_time
)
@handle_fs_errors
def
write
(
self
,
path
,
data
,
offset
,
fh
):
(
file
,
path
,
lock
)
=
self
.
_get_file
(
fh
)
(
file
,
path
,
lock
)
=
self
.
_get_file
(
fh
)
lock
.
acquire
()
try
:
file
.
seek
(
offset
)
...
...
@@ -385,7 +384,7 @@ class FSOperations(Operations):
def
_get_stat_dict
(
self
,
path
):
"""Build a 'stat' dictionary for the given file."""
info
=
self
.
fs
.
getinfo
(
path
)
self
.
_fill_stat_dict
(
path
,
info
)
self
.
_fill_stat_dict
(
path
,
info
)
return
info
def
_fill_stat_dict
(
self
,
path
,
info
):
...
...
@@ -395,28 +394,28 @@ class FSOperations(Operations):
for
k
in
private_keys
:
del
info
[
k
]
# Basic stuff that is constant for all paths
info
.
setdefault
(
"st_ino"
,
0
)
info
.
setdefault
(
"st_dev"
,
0
)
info
.
setdefault
(
"st_uid"
,
uid
)
info
.
setdefault
(
"st_gid"
,
gid
)
info
.
setdefault
(
"st_rdev"
,
0
)
info
.
setdefault
(
"st_blksize"
,
1024
)
info
.
setdefault
(
"st_blocks"
,
1
)
info
.
setdefault
(
"st_ino"
,
0
)
info
.
setdefault
(
"st_dev"
,
0
)
info
.
setdefault
(
"st_uid"
,
uid
)
info
.
setdefault
(
"st_gid"
,
gid
)
info
.
setdefault
(
"st_rdev"
,
0
)
info
.
setdefault
(
"st_blksize"
,
1024
)
info
.
setdefault
(
"st_blocks"
,
1
)
# The interesting stuff
if
'st_mode'
not
in
info
:
if
self
.
fs
.
isdir
(
path
):
info
[
'st_mode'
]
=
0755
else
:
info
[
'st_mode'
]
=
0666
mode
=
info
[
'st_mode'
]
info
[
'st_mode'
]
=
0666
mode
=
info
[
'st_mode'
]
if
not
statinfo
.
S_ISDIR
(
mode
)
and
not
statinfo
.
S_ISREG
(
mode
):
if
self
.
fs
.
isdir
(
path
):
info
[
"st_mode"
]
=
mode
|
statinfo
.
S_IFDIR
info
.
setdefault
(
"st_nlink"
,
2
)
info
.
setdefault
(
"st_nlink"
,
2
)
else
:
info
[
"st_mode"
]
=
mode
|
statinfo
.
S_IFREG
info
.
setdefault
(
"st_nlink"
,
1
)
for
(
key1
,
key2
)
in
[(
"st_atime"
,
"accessed_time"
),(
"st_mtime"
,
"modified_time"
),(
"st_ctime"
,
"created_time"
)]:
info
.
setdefault
(
"st_nlink"
,
1
)
for
(
key1
,
key2
)
in
[(
"st_atime"
,
"accessed_time"
),
(
"st_mtime"
,
"modified_time"
),
(
"st_ctime"
,
"created_time"
)]:
if
key1
not
in
info
:
if
key2
in
info
:
info
[
key1
]
=
time
.
mktime
(
info
[
key2
]
.
timetuple
())
...
...
@@ -467,6 +466,7 @@ def mount(fs, path, foreground=False, ready_callback=None, unmount_callback=None
ready_callback
()
if
unmount_callback
:
orig_unmount
=
mp
.
unmount
def
new_unmount
():
orig_unmount
()
unmount_callback
()
...
...
@@ -492,8 +492,9 @@ def unmount(path):
args
=
[
"fusermount"
,
"-u"
,
path
]
for
num_tries
in
xrange
(
3
):
p
=
subprocess
.
Popen
(
args
,
stderr
=
subprocess
.
PIPE
,
stdout
=
subprocess
.
PIPE
)
p
=
subprocess
.
Popen
(
args
,
stderr
=
subprocess
.
PIPE
,
stdout
=
subprocess
.
PIPE
)
(
stdout
,
stderr
)
=
p
.
communicate
()
if
p
.
returncode
==
0
:
return
...
...
@@ -544,7 +545,7 @@ class MountProcess(subprocess.Popen):
def
__init__
(
self
,
fs
,
path
,
fuse_opts
=
{},
nowait
=
False
,
**
kwds
):
self
.
path
=
path
if
nowait
or
kwds
.
get
(
"close_fds"
,
False
):
if
nowait
or
kwds
.
get
(
"close_fds"
,
False
):
if
PY3
:
cmd
=
"from pickle import loads;"
else
:
...
...
@@ -553,11 +554,11 @@ class MountProcess(subprocess.Popen):
cmd
=
cmd
+
'data = loads(
%
s); '
cmd
=
cmd
+
'from fs.expose.fuse import MountProcess; '
cmd
=
cmd
+
'MountProcess._do_mount_nowait(data)'
cmd
=
cmd
%
(
repr
(
cPickle
.
dumps
((
fs
,
path
,
fuse_opts
),
-
1
)),)
cmd
=
[
sys
.
executable
,
"-c"
,
cmd
]
super
(
MountProcess
,
self
)
.
__init__
(
cmd
,
**
kwds
)
cmd
=
cmd
%
(
repr
(
cPickle
.
dumps
((
fs
,
path
,
fuse_opts
),
-
1
)),)
cmd
=
[
sys
.
executable
,
"-c"
,
cmd
]
super
(
MountProcess
,
self
)
.
__init__
(
cmd
,
**
kwds
)
else
:
(
r
,
w
)
=
os
.
pipe
()
(
r
,
w
)
=
os
.
pipe
()
if
PY3
:
cmd
=
"from pickle import loads;"
else
:
...
...
@@ -566,15 +567,18 @@ class MountProcess(subprocess.Popen):
cmd
=
cmd
+
'data = loads(
%
s); '
cmd
=
cmd
+
'from fs.expose.fuse import MountProcess; '
cmd
=
cmd
+
'MountProcess._do_mount_wait(data)'
cmd
=
cmd
%
(
repr
(
cPickle
.
dumps
((
fs
,
path
,
fuse_opts
,
r
,
w
),
-
1
)),)
cmd
=
[
sys
.
executable
,
"-c"
,
cmd
]
super
(
MountProcess
,
self
)
.
__init__
(
cmd
,
**
kwds
)
cmd
=
cmd
%
(
repr
(
cPickle
.
dumps
((
fs
,
path
,
fuse_opts
,
r
,
w
),
-
1
)),)
cmd
=
[
sys
.
executable
,
"-c"
,
cmd
]
super
(
MountProcess
,
self
)
.
__init__
(
cmd
,
**
kwds
)
os
.
close
(
w
)
byte
=
os
.
read
(
r
,
1
)
byte
=
os
.
read
(
r
,
1
)
if
byte
!=
b
(
"S"
):
err_text
=
os
.
read
(
r
,
20
)
self
.
terminate
()
raise
RuntimeError
(
"FUSE error: "
+
os
.
read
(
r
,
20
)
.
decode
(
NATIVE_ENCODING
))
if
hasattr
(
err_text
,
'decode'
):
err_text
=
err_text
.
decode
(
NATIVE_ENCODING
)
raise
RuntimeError
(
"FUSE error: "
+
err_text
)
def
unmount
(
self
):
"""Cleanly unmount the FUSE filesystem, terminating this subprocess."""
...
...
@@ -586,7 +590,7 @@ class MountProcess(subprocess.Popen):
unmount
(
self
.
path
)
except
OSError
:
pass
tmr
=
threading
.
Timer
(
self
.
unmount_timeout
,
killme
)
tmr
=
threading
.
Timer
(
self
.
unmount_timeout
,
killme
)
tmr
.
start
()
self
.
wait
()
tmr
.
cancel
()
...
...
@@ -594,56 +598,60 @@ class MountProcess(subprocess.Popen):
if
not
hasattr
(
subprocess
.
Popen
,
"terminate"
):
def
terminate
(
self
):
"""Gracefully terminate the subprocess."""
os
.
kill
(
self
.
pid
,
signal
.
SIGTERM
)
os
.
kill
(
self
.
pid
,
signal
.
SIGTERM
)
if
not
hasattr
(
subprocess
.
Popen
,
"kill"
):
def
kill
(
self
):
"""Forcibly terminate the subprocess."""
os
.
kill
(
self
.
pid
,
signal
.
SIGKILL
)
os
.
kill
(
self
.
pid
,
signal
.
SIGKILL
)
@staticmethod
def
_do_mount_nowait
(
data
):
"""Perform the specified mount, return without waiting."""
(
fs
,
path
,
opts
)
=
data
fs
,
path
,
opts
=
data
opts
[
"foreground"
]
=
True
def
unmount_callback
():
fs
.
close
()
opts
[
"unmount_callback"
]
=
unmount_callback
mount
(
fs
,
path
,
*
opts
)
mount
(
fs
,
path
,
*
opts
)
@staticmethod
def
_do_mount_wait
(
data
):
"""Perform the specified mount, signalling when ready."""
(
fs
,
path
,
opts
,
r
,
w
)
=
data
fs
,
path
,
opts
,
r
,
w
=
data
os
.
close
(
r
)
opts
[
"foreground"
]
=
True
successful
=
[]
def
ready_callback
():
successful
.
append
(
True
)
os
.
write
(
w
,
b
(
"S"
))
os
.
close
(
w
)
opts
[
"ready_callback"
]
=
ready_callback
def
unmount_callback
():
fs
.
close
()
opts
[
"unmount_callback"
]
=
unmount_callback
try
:
mount
(
fs
,
path
,
**
opts
)
except
Exception
,
e
:
os
.
write
(
w
,
b
(
"E"
)
+
b
(
e
))
mount
(
fs
,
path
,
**
opts
)
except
Exception
,
e
:
os
.
write
(
w
,
b
(
"E"
)
+
unicode
(
e
)
.
encode
(
'ascii'
,
errors
=
'replace'
))
os
.
close
(
w
)
if
not
successful
:
os
.
write
(
w
,
b
(
"EMount unsuccessful"
))
os
.
close
(
w
)
else
:
if
not
successful
:
os
.
write
(
w
,
b
(
"E"
))
os
.
close
(
w
)
if
__name__
==
"__main__"
:
import
os
,
os
.
path
import
os
import
os.path
from
fs.tempfs
import
TempFS
mount_point
=
os
.
path
.
join
(
os
.
environ
[
"HOME"
],
"fs.expose.fuse"
)
if
not
os
.
path
.
exists
(
mount_point
):
os
.
makedirs
(
mount_point
)
def
ready_callback
():
print
"READY"
mount
(
TempFS
(),
mount_point
,
foreground
=
True
,
ready_callback
=
ready_callback
)
fs/expose/sftp.py
View file @
3ea4efe1
...
...
@@ -122,7 +122,7 @@ class SFTPServerInterface(paramiko.SFTPServerInterface):
if
not
isinstance
(
stat
,
int
):
stats
.
append
(
stat
)
return
stats
@report_sftp_errors
def
stat
(
self
,
path
):
if
not
isinstance
(
path
,
unicode
):
...
...
@@ -221,8 +221,8 @@ class SFTPHandle(paramiko.SFTPHandle):
"""
def
__init__
(
self
,
owner
,
path
,
flags
):
super
(
SFTPHandle
,
self
)
.
__init__
(
flags
)
mode
=
flags_to_mode
(
flags
)
+
"b"
super
(
SFTPHandle
,
self
)
.
__init__
(
flags
)
mode
=
flags_to_mode
(
flags
)
self
.
owner
=
owner
if
not
isinstance
(
path
,
unicode
):
path
=
path
.
decode
(
self
.
owner
.
encoding
)
...
...
fs/expose/wsgi/wsgi.py
View file @
3ea4efe1
...
...
@@ -18,55 +18,55 @@ class Request(object):
"""Very simple request object"""
def
__init__
(
self
,
environ
,
start_response
):
self
.
environ
=
environ
self
.
start_response
=
start_response
self
.
start_response
=
start_response
self
.
path
=
environ
.
get
(
'PATH_INFO'
)
class
WSGIServer
(
object
):
"""Light-weight WSGI server that exposes an FS"""
def
__init__
(
self
,
serve_fs
,
indexes
=
True
,
dir_template
=
None
,
chunk_size
=
16
*
1024
*
1024
):
if
dir_template
is
None
:
from
dirtemplate
import
template
as
dir_template
from
dirtemplate
import
template
as
dir_template
self
.
serve_fs
=
serve_fs
self
.
indexes
=
indexes
self
.
indexes
=
indexes
self
.
chunk_size
=
chunk_size
self
.
dir_template
=
Template
(
dir_template
)
self
.
dir_template
=
Template
(
dir_template
)
def
__call__
(
self
,
environ
,
start_response
):
request
=
Request
(
environ
,
start_response
)
if
not
self
.
serve_fs
.
exists
(
request
.
path
):
return
self
.
serve_404
(
request
)
return
self
.
serve_404
(
request
)
if
self
.
serve_fs
.
isdir
(
request
.
path
):
if
not
self
.
indexes
:
return
self
.
serve_404
(
request
)
return
self
.
serve_dir
(
request
)
else
:
return
self
.
serve_file
(
request
)
def
serve_file
(
self
,
request
):
def
serve_file
(
self
,
request
):
"""Serve a file, guessing a mime-type"""
path
=
request
.
path
path
=
request
.
path
serving_file
=
None
try
:
try
:
serving_file
=
self
.
serve_fs
.
open
(
path
,
'rb'
)
except
Exception
,
e
:
if
serving_file
is
not
None
:
serving_file
.
close
()
return
self
.
serve_500
(
request
,
str
(
e
))
mime_type
=
mimetypes
.
guess_type
(
basename
(
path
))
mime_type
=
mimetypes
.
guess_type
(
basename
(
path
))
file_size
=
self
.
serve_fs
.
getsize
(
path
)
headers
=
[(
'Content-Type'
,
mime_type
),
(
'Content-Length'
,
str
(
file_size
))]
def
gen_file
():
try
:
while
True
:
...
...
@@ -76,36 +76,36 @@ class WSGIServer(object):
yield
data
finally
:
serving_file
.
close
()
request
.
start_response
(
'200 OK'
,
headers
)
return
gen_file
()
return
gen_file
()
def
serve_dir
(
self
,
request
):
"""Serve an index page"""
fs
=
self
.
serve_fs
isdir
=
fs
.
isdir
path
=
request
.
path
dirinfo
=
fs
.
listdirinfo
(
path
,
full
=
True
,
absolute
=
True
)
isdir
=
fs
.
isdir
path
=
request
.
path
dirinfo
=
fs
.
listdirinfo
(
path
,
full
=
True
,
absolute
=
True
)
entries
=
[]
for
p
,
info
in
dirinfo
:
entry
=
{}
entry
[
'path'
]
=
p
entry
[
'name'
]
=
basename
(
p
)
entry
[
'size'
]
=
info
.
get
(
'size'
,
'unknown'
)
entry
[
'created_time'
]
=
info
.
get
(
'created_time'
)
entry
[
'created_time'
]
=
info
.
get
(
'created_time'
)
if
isdir
(
p
):
entry
[
'type'
]
=
'dir'
else
:
entry
[
'type'
]
=
'file'
entry
[
'type'
]
=
'file'
entries
.
append
(
entry
)
# Put dirs first, and sort by reverse created time order
no_time
=
datetime
(
1970
,
1
,
1
,
1
,
0
)
entries
.
sort
(
key
=
lambda
k
:(
k
[
'type'
]
==
'dir'
,
k
.
get
(
'created_time'
)
or
no_time
),
reverse
=
True
)
# Turn datetime to text and tweak names
for
entry
in
entries
:
t
=
entry
.
get
(
'created_time'
)
...
...
@@ -113,35 +113,35 @@ class WSGIServer(object):
entry
[
'created_time'
]
=
t
.
ctime
()
if
entry
[
'type'
]
==
'dir'
:
entry
[
'name'
]
+=
'/'
# Add an up dir link for non-root
if
path
not
in
(
''
,
'/'
):
entries
.
insert
(
0
,
dict
(
name
=
'../'
,
path
=
'../'
,
type
=
"dir"
,
size
=
''
,
created_time
=
'..'
))
# Render the mako template
html
=
self
.
dir_template
.
render
(
**
dict
(
fs
=
self
.
serve_fs
,
path
=
path
,
dirlist
=
entries
))
request
.
start_response
(
'200 OK'
,
[(
'Content-Type'
,
'text/html'
),
(
'Content-Length'
,
'
%
i'
%
len
(
html
))])
return
[
html
]
def
serve_404
(
self
,
request
,
msg
=
'Not found'
):
"""Serves a Not found page"""
request
.
start_response
(
'404 NOT FOUND'
,
[(
'Content-Type'
,
'text/html'
)])
return
[
msg
]
def
serve_500
(
self
,
request
,
msg
=
'Unable to complete request'
):
"""Serves an internal server error page"""
"""Serves an internal server error page"""
request
.
start_response
(
'500 INTERNAL SERVER ERROR'
,
[(
'Content-Type'
,
'text/html'
)])
return
[
msg
]
def
serve_fs
(
fs
,
indexes
=
True
):
"""Serves an FS object via WSGI"""
application
=
WSGIServer
(
fs
,
indexes
)
return
application
return
application
fs/expose/xmlrpc.py
View file @
3ea4efe1
...
...
@@ -18,9 +18,11 @@ an FS object, which can then be exposed using whatever server you choose
import
xmlrpclib
from
SimpleXMLRPCServer
import
SimpleXMLRPCServer
from
datetime
import
datetime
import
base64
import
six
from
six
import
PY3
,
b
from
six
import
PY3
class
RPCFSInterface
(
object
):
"""Wrapper to expose an FS via a XML-RPC compatible interface.
...
...
@@ -40,26 +42,23 @@ class RPCFSInterface(object):
must return something that can be represented in ASCII. The default
is base64-encoded UTF-8.
"""
if
PY3
:
return
path
return
path
.
encode
(
"utf8"
)
.
encode
(
"base64"
)
#return path
return
six
.
text_type
(
base64
.
b64encode
(
path
.
encode
(
"utf8"
)),
'ascii'
)
def
decode_path
(
self
,
path
):
"""Decode paths arriving over the wire."""
if
PY3
:
return
path
return
path
.
decode
(
"base64"
)
.
decode
(
"utf8"
)
return
six
.
text_type
(
base64
.
b64decode
(
path
.
encode
(
'ascii'
)),
'utf8'
)
def
getmeta
(
self
,
meta_name
):
meta
=
self
.
fs
.
getmeta
(
meta_name
)
if
isinstance
(
meta
,
basestring
):
meta
=
meta
.
decode
(
'base64'
)
meta
=
self
.
decode_path
(
meta
)
return
meta
def
getmeta_default
(
self
,
meta_name
,
default
):
meta
=
self
.
fs
.
getmeta
(
meta_name
,
default
)
if
isinstance
(
meta
,
basestring
):
meta
=
meta
.
decode
(
'base64'
)
meta
=
self
.
decode_path
(
meta
)
return
meta
def
hasmeta
(
self
,
meta_name
):
...
...
@@ -72,7 +71,7 @@ class RPCFSInterface(object):
def
set_contents
(
self
,
path
,
data
):
path
=
self
.
decode_path
(
path
)
self
.
fs
.
setcontents
(
path
,
data
.
data
)
self
.
fs
.
setcontents
(
path
,
data
.
data
)
def
exists
(
self
,
path
):
path
=
self
.
decode_path
(
path
)
...
...
@@ -88,7 +87,7 @@ class RPCFSInterface(object):
def
listdir
(
self
,
path
=
"./"
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
path
=
self
.
decode_path
(
path
)
entries
=
self
.
fs
.
listdir
(
path
,
wildcard
,
full
,
absolute
,
dirs_only
,
files_only
)
entries
=
self
.
fs
.
listdir
(
path
,
wildcard
,
full
,
absolute
,
dirs_only
,
files_only
)
return
[
self
.
encode_path
(
e
)
for
e
in
entries
]
def
makedir
(
self
,
path
,
recursive
=
False
,
allow_recreate
=
False
):
...
...
@@ -149,7 +148,7 @@ class RPCFSInterface(object):
dst
=
self
.
decode_path
(
dst
)
return
self
.
fs
.
copy
(
src
,
dst
,
overwrite
,
chunk_size
)
def
move
(
self
,
src
,
dst
,
overwrite
=
False
,
chunk_size
=
16384
):
def
move
(
self
,
src
,
dst
,
overwrite
=
False
,
chunk_size
=
16384
):
src
=
self
.
decode_path
(
src
)
dst
=
self
.
decode_path
(
dst
)
return
self
.
fs
.
move
(
src
,
dst
,
overwrite
,
chunk_size
)
...
...
@@ -187,11 +186,10 @@ class RPCFSServer(SimpleXMLRPCServer):
if
logRequests
is
not
None
:
kwds
[
'logRequests'
]
=
logRequests
self
.
serve_more_requests
=
True
SimpleXMLRPCServer
.
__init__
(
self
,
addr
,
**
kwds
)
SimpleXMLRPCServer
.
__init__
(
self
,
addr
,
**
kwds
)
self
.
register_instance
(
RPCFSInterface
(
fs
))
def
serve_forever
(
self
):
"""Override serve_forever to allow graceful shutdown."""
while
self
.
serve_more_requests
:
self
.
handle_request
()
fs/filelike.py
View file @
3ea4efe1
...
...
@@ -20,14 +20,14 @@ Other useful classes include:
(think e.g. compression or decryption).
* SpooledTemporaryFile: a version of the builtin SpooledTemporaryFile
class, patched to more closely preserve the
class, patched to more closely preserve the
semantics of a standard file.
* LimitBytesFile: a filelike wrapper that limits the total bytes read
from a file; useful for turning a socket into a file
without reading past end-of-data.
"""
"""
# Copyright (C) 2006-2009, Ryan Kelly
# All rights reserved; available under the terms of the MIT License.
...
...
@@ -55,24 +55,24 @@ else:
from
cStringIO
import
StringIO
as
_StringIO
except
ImportError
:
from
StringIO
import
StringIO
as
_StringIO
class
FileLikeBase
(
object
):
"""Base class for implementing file-like objects.
This class takes a lot of the legwork out of writing file-like objects
with a rich interface. It implements the higher-level file-like methods
on top of five primitive methods: _read, _write, _seek, _tell and
_truncate. See their docstrings for precise details on how these methods
behave.
Subclasses then need only implement some subset of these methods for
rich file-like interface compatibility. They may of course override
other methods as desired.
The class is missing the following attributes and methods, which don't
really make sense for anything but real files:
* fileno()
* isatty()
* encoding
...
...
@@ -104,7 +104,7 @@ class FileLikeBase(object):
expensive to simulate (e.g. compressed files). Note that any file
opened for both reading and writing must also support seeking.
"""
def
__init__
(
self
,
bufsize
=
1024
*
64
):
"""FileLikeBase Constructor.
...
...
@@ -121,8 +121,8 @@ class FileLikeBase(object):
self
.
_rbuffer
=
None
# data that's been read but not returned
self
.
_wbuffer
=
None
# data that's been given but not written
self
.
_sbuffer
=
None
# data between real & apparent file pos
self
.
_soffset
=
0
# internal offset of file pointer
self
.
_soffset
=
0
# internal offset of file pointer
#
# The following five methods are the ones that subclasses are expected
# to implement. Carefully check their docstrings.
...
...
@@ -130,33 +130,33 @@ class FileLikeBase(object):
def
_read
(
self
,
sizehint
=-
1
):
"""Read approximately <sizehint> bytes from the file-like object.
This method is to be implemented by subclasses that wish to be
readable. It should read approximately <sizehint> bytes from the
file and return them as a string. If <sizehint> is missing or
less than or equal to zero, try to read all the remaining contents.
The method need not guarantee any particular number of bytes -
it may return more bytes than requested, or fewer. If needed the
size hint may be completely ignored. It may even return an empty
string if no data is yet available.
Because of this, the method must return None to signify that EOF
has been reached. The higher-level methods will never indicate EOF
until None has been read from _read(). Once EOF is reached, it
should be safe to call _read() again, immediately returning None.
"""
raise
NotReadableError
(
"Object not readable"
)
def
_write
(
self
,
string
,
flushing
=
False
):
"""Write the given string to the file-like object.
This method must be implemented by subclasses wishing to be writable.
It must attempt to write as much of the given data as possible to the
file, but need not guarantee that it is all written. It may return
None to indicate that all data was written, or return as a string any
data that could not be written.
If the keyword argument 'flushing' is true, it indicates that the
internal write buffers are being flushed, and *all* the given data
is expected to be written to the file. If unwritten data is returned
...
...
@@ -166,7 +166,7 @@ class FileLikeBase(object):
def
_seek
(
self
,
offset
,
whence
):
"""Set the file's internal position pointer, approximately.
This method should set the file's position to approximately 'offset'
bytes relative to the position specified by 'whence'. If it is
not possible to position the pointer exactly at the given offset,
...
...
@@ -234,15 +234,15 @@ class FileLikeBase(object):
if
"+"
in
mstr
:
return
True
if
"-"
in
mstr
and
"-"
not
in
mode
:
return
False
return
False
if
"r"
in
mode
:
if
"r"
not
in
mstr
:
return
False
return
False
if
"w"
in
mode
:
if
"w"
not
in
mstr
and
"a"
not
in
mstr
:
return
False
return
True
def
_assert_mode
(
self
,
mode
,
mstr
=
None
):
"""Check whether the file may be accessed in the given mode.
...
...
@@ -265,7 +265,7 @@ class FileLikeBase(object):
if
"w"
not
in
mstr
and
"a"
not
in
mstr
:
raise
NotWritableError
(
"File not opened for writing"
)
return
True
def
flush
(
self
):
"""Flush internal write buffer, if necessary."""
if
self
.
closed
:
...
...
@@ -280,7 +280,7 @@ class FileLikeBase(object):
leftover
=
self
.
_write
(
buffered
,
flushing
=
True
)
if
leftover
and
not
isinstance
(
leftover
,
int
):
raise
IOError
(
"Could not flush write buffer."
)
def
close
(
self
):
"""Flush write buffers and close the file.
...
...
@@ -304,7 +304,7 @@ class FileLikeBase(object):
def
__exit__
(
self
,
exc_type
,
exc_val
,
exc_tb
):
self
.
close
()
return
False
def
next
(
self
):
"""next() method complying with the iterator protocol.
...
...
@@ -315,7 +315,7 @@ class FileLikeBase(object):
if
ln
==
b
(
""
):
raise
StopIteration
()
return
ln
def
__iter__
(
self
):
return
self
...
...
@@ -402,7 +402,7 @@ class FileLikeBase(object):
if
self
.
_soffset
:
pos
=
pos
+
self
.
_soffset
return
pos
def
read
(
self
,
size
=-
1
):
"""Read at most 'size' bytes from the file.
...
...
@@ -484,7 +484,7 @@ class FileLikeBase(object):
data
=
self
.
_do_read
(
self
.
_bufsize
)
while
data
!=
b
(
""
):
data
=
self
.
_do_read
(
self
.
_bufsize
)
def
readline
(
self
,
size
=-
1
):
"""Read a line from the file, or at most <size> bytes."""
bits
=
[]
...
...
@@ -515,11 +515,11 @@ class FileLikeBase(object):
bits
[
-
1
]
=
bits
[
-
1
][:
indx
]
self
.
_rbuffer
=
extra
+
self
.
_rbuffer
return
b
(
""
)
.
join
(
bits
)
def
readlines
(
self
,
sizehint
=-
1
):
"""Return a list of all lines in the file."""
return
[
ln
for
ln
in
self
]
def
xreadlines
(
self
):
"""Iterator over lines in the file - equivalent to iter(self)."""
return
iter
(
self
)
...
...
@@ -531,7 +531,7 @@ class FileLikeBase(object):
self
.
_assert_mode
(
"w-"
)
# If we were previously reading, ensure position is correct
if
self
.
_rbuffer
is
not
None
:
self
.
seek
(
0
,
1
)
self
.
seek
(
0
,
1
)
# If we're actually behind the apparent position, we must also
# write the data in the gap.
if
self
.
_sbuffer
:
...
...
@@ -544,15 +544,17 @@ class FileLikeBase(object):
string
=
self
.
_do_read
(
s
)
+
string
except
NotReadableError
:
raise
NotSeekableError
(
"File not readable, could not complete simulation of seek"
)
self
.
seek
(
0
,
0
)
self
.
seek
(
0
,
0
)
if
self
.
_wbuffer
:
string
=
self
.
_wbuffer
+
string
leftover
=
self
.
_write
(
string
)
if
leftover
is
None
or
isinstance
(
leftover
,
int
):
self
.
_wbuffer
=
b
(
""
)
return
len
(
string
)
-
(
leftover
or
0
)
else
:
self
.
_wbuffer
=
leftover
return
len
(
string
)
-
len
(
leftover
)
def
writelines
(
self
,
seq
):
"""Write a sequence of lines to the file."""
for
ln
in
seq
:
...
...
@@ -561,7 +563,7 @@ class FileLikeBase(object):
class
FileWrapper
(
FileLikeBase
):
"""Base class for objects that wrap a file-like object.
This class provides basic functionality for implementing file-like
objects that wrap another file-like object to alter its functionality
in some way. It takes care of house-keeping duties such as flushing
...
...
@@ -571,7 +573,7 @@ class FileWrapper(FileLikeBase):
By convention, the subclass's constructor should accept this as its
first argument and pass it to its superclass's constructor in the
same position.
This class provides a basic implementation of _read() and _write()
which just calls read() and write() on the wrapped object. Subclasses
will probably want to override these.
...
...
@@ -581,10 +583,10 @@ class FileWrapper(FileLikeBase):
def
__init__
(
self
,
wrapped_file
,
mode
=
None
):
"""FileWrapper constructor.
'wrapped_file' must be a file-like object, which is to be wrapped
in another file-like object to provide additional functionality.
If given, 'mode' must be the access mode string under which
the wrapped file is to be accessed. If not given or None, it
is looked up on the wrapped file if possible. Otherwise, it
...
...
@@ -660,7 +662,7 @@ class FileWrapper(FileLikeBase):
return
data
def
_write
(
self
,
string
,
flushing
=
False
):
return
self
.
wrapped_file
.
write
(
string
)
self
.
wrapped_file
.
write
(
string
)
def
_seek
(
self
,
offset
,
whence
):
self
.
wrapped_file
.
seek
(
offset
,
whence
)
...
...
@@ -669,7 +671,7 @@ class FileWrapper(FileLikeBase):
return
self
.
wrapped_file
.
tell
()
def
_truncate
(
self
,
size
):
return
self
.
wrapped_file
.
truncate
(
size
)
return
self
.
wrapped_file
.
truncate
(
size
)
class
StringIO
(
FileWrapper
):
...
...
fs/ftpfs.py
View file @
3ea4efe1
...
...
@@ -8,19 +8,20 @@ FTPFS is a filesystem for accessing an FTP server (uses ftplib in standard libra
__all__
=
[
'FTPFS'
]
import
sys
import
sys
import
fs
from
fs.base
import
*
from
fs.errors
import
*
from
fs.path
import
pathsplit
,
abspath
,
dirname
,
recursepath
,
normpath
,
pathjoin
,
isbase
from
fs
import
iotools
from
ftplib
import
FTP
,
error_perm
,
error_temp
,
error_proto
,
error_reply
try
:
from
ftplib
import
_GLOBAL_DEFAULT_TIMEOUT
from
ftplib
import
_GLOBAL_DEFAULT_TIMEOUT
except
ImportError
:
_GLOBAL_DEFAULT_TIMEOUT
=
object
()
_GLOBAL_DEFAULT_TIMEOUT
=
object
()
import
threading
import
datetime
...
...
@@ -596,15 +597,15 @@ def _skip(s, i, c):
return
i
def
fileftperrors
(
f
):
def
fileftperrors
(
f
):
@wraps
(
f
)
def
deco
(
self
,
*
args
,
**
kwargs
):
self
.
_lock
.
acquire
()
try
:
try
:
try
:
ret
=
f
(
self
,
*
args
,
**
kwargs
)
except
Exception
,
e
:
self
.
ftpfs
.
_translate_exception
(
args
[
0
]
if
args
else
''
,
e
)
except
Exception
,
e
:
self
.
ftpfs
.
_translate_exception
(
args
[
0
]
if
args
else
''
,
e
)
finally
:
self
.
_lock
.
release
()
return
ret
...
...
@@ -620,7 +621,7 @@ class _FTPFile(object):
def
__init__
(
self
,
ftpfs
,
ftp
,
path
,
mode
):
if
not
hasattr
(
self
,
'_lock'
):
self
.
_lock
=
threading
.
RLock
()
self
.
_lock
=
threading
.
RLock
()
self
.
ftpfs
=
ftpfs
self
.
ftp
=
ftp
self
.
path
=
normpath
(
path
)
...
...
@@ -760,29 +761,29 @@ class _FTPFile(object):
def
truncate
(
self
,
size
=
None
):
self
.
ftpfs
.
_on_file_written
(
self
.
path
)
# Inefficient, but I don't know how else to implement this
if
size
is
None
:
if
size
is
None
:
size
=
self
.
tell
()
if
self
.
conn
is
not
None
:
if
self
.
conn
is
not
None
:
self
.
conn
.
close
()
self
.
close
()
read_f
=
None
read_f
=
None
try
:
read_f
=
self
.
ftpfs
.
open
(
self
.
path
,
'rb'
)
data
=
read_f
.
read
(
size
)
finally
:
if
read_f
is
not
None
:
read_f
.
close
()
self
.
ftp
=
self
.
ftpfs
.
_open_ftp
()
read_f
.
close
()
self
.
ftp
=
self
.
ftpfs
.
_open_ftp
()
self
.
mode
=
'w'
self
.
__init__
(
self
.
ftpfs
,
self
.
ftp
,
_encode
(
self
.
path
),
self
.
mode
)
#self._start_file(self.mode, self.path)
#self._start_file(self.mode, self.path)
self
.
write
(
data
)
if
len
(
data
)
<
size
:
self
.
write
(
'
\0
'
*
(
size
-
len
(
data
)))
@fileftperrors
def
close
(
self
):
...
...
@@ -800,7 +801,7 @@ class _FTPFile(object):
self
.
ftp
.
close
()
except
error_temp
,
error_perm
:
pass
self
.
closed
=
True
self
.
closed
=
True
def
__iter__
(
self
):
return
self
.
next
()
...
...
@@ -837,7 +838,7 @@ class _FTPFile(object):
yield
line
append
(
c
)
def
ftperrors
(
f
):
def
ftperrors
(
f
):
@wraps
(
f
)
def
deco
(
self
,
*
args
,
**
kwargs
):
self
.
_lock
.
acquire
()
...
...
@@ -849,33 +850,33 @@ def ftperrors(f):
except
Exception
,
e
:
self
.
_translate_exception
(
args
[
0
]
if
args
else
''
,
e
)
finally
:
self
.
_leave_dircache
()
self
.
_leave_dircache
()
finally
:
self
.
_lock
.
release
()
return
ret
return
deco
def
_encode
(
s
):
def
_encode
(
s
):
if
isinstance
(
s
,
unicode
):
return
s
.
encode
(
'utf-8'
)
return
s
class
_DirCache
(
dict
):
def
__init__
(
self
):
super
(
_DirCache
,
self
)
.
__init__
()
def
__init__
(
self
):
super
(
_DirCache
,
self
)
.
__init__
()
self
.
count
=
0
def
addref
(
self
):
self
.
count
+=
1
return
self
.
count
def
decref
(
self
):
self
.
count
-=
1
return
self
.
count
class
FTPFS
(
FS
):
class
FTPFS
(
FS
):
_meta
=
{
'thread_safe'
:
True
,
'network'
:
True
,
'virtual'
:
False
,
...
...
@@ -883,7 +884,7 @@ class FTPFS(FS):
'unicode_paths'
:
True
,
'case_insensitive_paths'
:
False
,
'atomic.move'
:
True
,
'atomic.copy'
:
True
,
'atomic.copy'
:
True
,
'atomic.makedir'
:
True
,
'atomic.rename'
:
True
,
'atomic.setcontents'
:
False
,
...
...
@@ -892,7 +893,7 @@ class FTPFS(FS):
def
__init__
(
self
,
host
=
''
,
user
=
''
,
passwd
=
''
,
acct
=
''
,
timeout
=
_GLOBAL_DEFAULT_TIMEOUT
,
port
=
21
,
dircache
=
True
):
"""Connect to a FTP server.
:param host: Host to connect to
:param user: Username, or a blank string for anonymous
:param passwd: Password, if required
...
...
@@ -902,7 +903,7 @@ class FTPFS(FS):
:param dircache: If True then directory information will be cached,
speeding up operations such as `getinfo`, `isdir`, `isfile`, but
changes to the ftp file structure will not be visible until
:meth:`~fs.ftpfs.FTPFS.clear_dircache` is called
:meth:`~fs.ftpfs.FTPFS.clear_dircache` is called
"""
...
...
@@ -915,28 +916,28 @@ class FTPFS(FS):
self
.
acct
=
acct
self
.
timeout
=
timeout
self
.
default_timeout
=
timeout
is
_GLOBAL_DEFAULT_TIMEOUT
self
.
use_dircache
=
dircache
self
.
use_dircache
=
dircache
self
.
use_mlst
=
False
self
.
_lock
=
threading
.
RLock
()
self
.
_init_dircache
()
self
.
_cache_hint
=
False
try
:
try
:
self
.
ftp
except
FSError
:
self
.
closed
=
True
raise
def
_init_dircache
(
self
):
def
_init_dircache
(
self
):
self
.
dircache
=
_DirCache
()
@synchronize
def
cache_hint
(
self
,
enabled
):
def
cache_hint
(
self
,
enabled
):
self
.
_cache_hint
=
bool
(
enabled
)
def
_enter_dircache
(
self
):
self
.
dircache
.
addref
()
self
.
dircache
.
addref
()
def
_leave_dircache
(
self
):
self
.
dircache
.
decref
()
...
...
@@ -945,8 +946,8 @@ class FTPFS(FS):
self
.
clear_dircache
()
else
:
self
.
clear_dircache
()
assert
self
.
dircache
.
count
>=
0
,
"dircache count should never be negative"
assert
self
.
dircache
.
count
>=
0
,
"dircache count should never be negative"
@synchronize
def
_on_file_written
(
self
,
path
):
self
.
refresh_dircache
(
dirname
(
path
))
...
...
@@ -979,7 +980,7 @@ class FTPFS(FS):
def
on_line
(
line
):
if
not
isinstance
(
line
,
unicode
):
line
=
line
.
decode
(
'utf-8'
)
line
=
line
.
decode
(
'utf-8'
)
info
=
parse_ftp_list_line
(
line
,
self
.
use_mlst
)
if
info
:
info
=
info
.
__dict__
...
...
@@ -1027,24 +1028,24 @@ class FTPFS(FS):
:param path: Path of directory to clear cache for, or all directories if
None (the default)
"""
"""
if
not
paths
:
self
.
dircache
.
clear
()
else
:
else
:
dircache
=
self
.
dircache
paths
=
[
normpath
(
abspath
(
path
))
for
path
in
paths
]
for
cached_path
in
dircache
.
keys
():
for
cached_path
in
dircache
.
keys
():
for
path
in
paths
:
if
isbase
(
cached_path
,
path
):
dircache
.
pop
(
cached_path
,
None
)
break
@synchronize
def
refresh_dircache
(
self
,
*
paths
):
def
refresh_dircache
(
self
,
*
paths
):
for
path
in
paths
:
path
=
abspath
(
normpath
(
path
))
self
.
dircache
.
pop
(
path
,
None
)
self
.
dircache
.
pop
(
path
,
None
)
@synchronize
def
_check_path
(
self
,
path
):
...
...
@@ -1065,20 +1066,20 @@ class FTPFS(FS):
@ftperrors
def
get_ftp
(
self
):
if
self
.
closed
:
return
None
return
None
if
not
getattr
(
self
,
'_ftp'
,
None
):
self
.
_ftp
=
self
.
_open_ftp
()
return
self
.
_ftp
return
self
.
_ftp
ftp
=
property
(
get_ftp
)
@ftperrors
def
_open_ftp
(
self
):
try
:
ftp
=
FTP
()
ftp
=
FTP
()
if
self
.
default_timeout
or
sys
.
version_info
<
(
2
,
6
,):
ftp
.
connect
(
self
.
host
,
self
.
port
)
else
:
else
:
ftp
.
connect
(
self
.
host
,
self
.
port
,
self
.
timeout
)
ftp
.
login
(
self
.
user
,
self
.
passwd
,
self
.
acct
)
except
socket_error
,
e
:
...
...
@@ -1086,13 +1087,13 @@ class FTPFS(FS):
return
ftp
def
__getstate__
(
self
):
state
=
super
(
FTPFS
,
self
)
.
__getstate__
()
state
=
super
(
FTPFS
,
self
)
.
__getstate__
()
del
state
[
'_lock'
]
state
.
pop
(
'_ftp'
,
None
)
return
state
def
__setstate__
(
self
,
state
):
super
(
FTPFS
,
self
)
.
__setstate__
(
state
)
super
(
FTPFS
,
self
)
.
__setstate__
(
state
)
self
.
_init_dircache
()
self
.
_lock
=
threading
.
RLock
()
#self._ftp = None
...
...
@@ -1127,7 +1128,7 @@ class FTPFS(FS):
code
,
message
=
str
(
exception
)
.
split
(
' '
,
1
)
code
=
int
(
code
)
if
code
==
550
:
pass
pass
if
code
==
552
:
raise
StorageSpaceError
raise
PermissionDeniedError
(
str
(
exception
),
path
=
path
,
msg
=
"FTP error:
%
s"
%
str
(
exception
),
details
=
exception
)
...
...
@@ -1140,53 +1141,56 @@ class FTPFS(FS):
try
:
self
.
ftp
.
close
()
except
FSError
:
pass
pass
self
.
closed
=
True
def
getpathurl
(
self
,
path
,
allow_none
=
False
):
path
=
normpath
(
path
)
credentials
=
'
%
s:
%
s'
%
(
self
.
user
,
self
.
passwd
)
if
credentials
==
':'
:
credentials
=
'
%
s:
%
s'
%
(
self
.
user
,
self
.
passwd
)
if
credentials
==
':'
:
url
=
'ftp://
%
s
%
s'
%
(
self
.
host
.
rstrip
(
'/'
),
abspath
(
path
))
else
:
url
=
'ftp://
%
s@
%
s
%
s'
%
(
credentials
,
self
.
host
.
rstrip
(
'/'
),
abspath
(
path
))
return
url
url
=
'ftp://
%
s@
%
s
%
s'
%
(
credentials
,
self
.
host
.
rstrip
(
'/'
),
abspath
(
path
))
return
url
@iotools.filelike_to_stream
@ftperrors
def
open
(
self
,
path
,
mode
=
'r'
):
def
open
(
self
,
path
,
mode
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
path
=
normpath
(
path
)
mode
=
mode
.
lower
()
mode
=
mode
.
lower
()
if
self
.
isdir
(
path
):
raise
ResourceInvalidError
(
path
)
raise
ResourceInvalidError
(
path
)
if
'r'
in
mode
or
'a'
in
mode
:
if
not
self
.
isfile
(
path
):
raise
ResourceNotFoundError
(
path
)
if
'w'
in
mode
or
'a'
in
mode
or
'+'
in
mode
:
self
.
refresh_dircache
(
dirname
(
path
))
ftp
=
self
.
_open_ftp
()
f
=
_FTPFile
(
self
,
ftp
,
normpath
(
path
),
mode
)
return
f
f
=
_FTPFile
(
self
,
ftp
,
normpath
(
path
),
mode
)
return
f
@ftperrors
def
setcontents
(
self
,
path
,
data
,
chunk_size
=
1024
*
64
):
path
=
normpath
(
path
)
if
isinstance
(
data
,
basestring
):
data
=
StringIO
(
data
)
self
.
refresh_dircache
(
dirname
(
path
))
self
.
ftp
.
storbinary
(
'STOR
%
s'
%
_encode
(
path
),
data
,
blocksize
=
chunk_size
)
def
setcontents
(
self
,
path
,
data
=
b
''
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
1024
*
64
):
path
=
normpath
(
path
)
data
=
iotools
.
make_bytes_io
(
data
,
encoding
=
encoding
,
errors
=
errors
)
self
.
refresh_dircache
(
dirname
(
path
))
self
.
ftp
.
storbinary
(
'STOR
%
s'
%
_encode
(
path
),
data
,
blocksize
=
chunk_size
)
@ftperrors
def
getcontents
(
self
,
path
,
mode
=
"rb"
):
path
=
normpath
(
path
)
contents
=
StringIO
()
self
.
ftp
.
retrbinary
(
'RETR
%
s'
%
_encode
(
path
),
contents
.
write
,
blocksize
=
1024
*
64
)
return
contents
.
getvalue
()
def
getcontents
(
self
,
path
,
mode
=
"rb"
,
encoding
=
None
,
errors
=
None
,
newline
=
None
):
path
=
normpath
(
path
)
contents
=
StringIO
()
self
.
ftp
.
retrbinary
(
'RETR
%
s'
%
_encode
(
path
),
contents
.
write
,
blocksize
=
1024
*
64
)
data
=
contents
.
getvalue
()
if
'b'
in
data
:
return
data
return
iotools
.
decode_binary
(
data
,
encoding
=
encoding
,
errors
=
errors
)
@ftperrors
def
exists
(
self
,
path
):
path
=
normpath
(
path
)
if
path
in
(
''
,
'/'
):
return
True
return
True
dirlist
,
fname
=
self
.
_get_dirlist
(
path
)
return
fname
in
dirlist
...
...
@@ -1213,7 +1217,7 @@ class FTPFS(FS):
return
not
info
[
'try_cwd'
]
@ftperrors
def
listdir
(
self
,
path
=
"./"
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
def
listdir
(
self
,
path
=
"./"
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
path
=
normpath
(
path
)
#self.clear_dircache(path)
if
not
self
.
exists
(
path
):
...
...
@@ -1242,7 +1246,7 @@ class FTPFS(FS):
return
{}
return
[(
p
,
getinfo
(
p
))
for
p
in
self
.
listdir
(
path
,
for
p
in
self
.
listdir
(
path
,
wildcard
=
wildcard
,
full
=
full
,
absolute
=
absolute
,
...
...
@@ -1281,20 +1285,20 @@ class FTPFS(FS):
if
self
.
isfile
(
path
):
raise
ResourceInvalidError
(
path
)
raise
DestinationExistsError
(
path
)
checkdir
(
path
)
checkdir
(
path
)
@ftperrors
def
remove
(
self
,
path
):
def
remove
(
self
,
path
):
if
not
self
.
exists
(
path
):
raise
ResourceNotFoundError
(
path
)
if
not
self
.
isfile
(
path
):
raise
ResourceInvalidError
(
path
)
self
.
refresh_dircache
(
dirname
(
path
))
self
.
ftp
.
delete
(
_encode
(
path
))
self
.
refresh_dircache
(
dirname
(
path
))
self
.
ftp
.
delete
(
_encode
(
path
))
@ftperrors
def
removedir
(
self
,
path
,
recursive
=
False
,
force
=
False
):
path
=
abspath
(
normpath
(
path
))
path
=
abspath
(
normpath
(
path
))
if
not
self
.
exists
(
path
):
raise
ResourceNotFoundError
(
path
)
if
self
.
isfile
(
path
):
...
...
@@ -1328,10 +1332,10 @@ class FTPFS(FS):
self
.
clear_dircache
(
dirname
(
path
),
path
)
@ftperrors
def
rename
(
self
,
src
,
dst
):
def
rename
(
self
,
src
,
dst
):
try
:
self
.
refresh_dircache
(
dirname
(
src
),
dirname
(
dst
))
self
.
ftp
.
rename
(
_encode
(
src
),
_encode
(
dst
))
self
.
ftp
.
rename
(
_encode
(
src
),
_encode
(
dst
))
except
error_perm
,
exception
:
code
,
message
=
str
(
exception
)
.
split
(
' '
,
1
)
if
code
==
"550"
:
...
...
@@ -1339,7 +1343,7 @@ class FTPFS(FS):
raise
ParentDirectoryMissingError
(
dst
)
raise
except
error_reply
:
pass
pass
@ftperrors
def
getinfo
(
self
,
path
):
...
...
@@ -1386,46 +1390,46 @@ class FTPFS(FS):
def
move
(
self
,
src
,
dst
,
overwrite
=
False
,
chunk_size
=
16384
):
if
not
overwrite
and
self
.
exists
(
dst
):
raise
DestinationExistsError
(
dst
)
#self.refresh_dircache(dirname(src), dirname(dst))
#self.refresh_dircache(dirname(src), dirname(dst))
try
:
self
.
rename
(
src
,
dst
)
except
:
self
.
copy
(
src
,
dst
,
overwrite
=
overwrite
)
self
.
remove
(
src
)
finally
:
self
.
refresh_dircache
(
src
,
dirname
(
src
),
dst
,
dirname
(
dst
))
@ftperrors
def
copy
(
self
,
src
,
dst
,
overwrite
=
False
,
chunk_size
=
1024
*
64
):
self
.
refresh_dircache
(
src
,
dirname
(
src
),
dst
,
dirname
(
dst
))
@ftperrors
def
copy
(
self
,
src
,
dst
,
overwrite
=
False
,
chunk_size
=
1024
*
64
):
if
not
self
.
isfile
(
src
):
if
self
.
isdir
(
src
):
raise
ResourceInvalidError
(
src
,
msg
=
"Source is not a file:
%(path)
s"
)
raise
ResourceNotFoundError
(
src
)
if
not
overwrite
and
self
.
exists
(
dst
):
raise
DestinationExistsError
(
dst
)
dst
=
normpath
(
dst
)
src_file
=
None
src_file
=
None
try
:
src_file
=
self
.
open
(
src
,
"rb"
)
ftp
=
self
.
_open_ftp
()
ftp
.
voidcmd
(
'TYPE I'
)
ftp
.
storbinary
(
'STOR
%
s'
%
_encode
(
normpath
(
dst
)),
src_file
,
blocksize
=
chunk_size
)
ftp
=
self
.
_open_ftp
()
ftp
.
voidcmd
(
'TYPE I'
)
ftp
.
storbinary
(
'STOR
%
s'
%
_encode
(
normpath
(
dst
)),
src_file
,
blocksize
=
chunk_size
)
finally
:
self
.
refresh_dircache
(
dirname
(
dst
))
self
.
refresh_dircache
(
dirname
(
dst
))
if
src_file
is
not
None
:
src_file
.
close
()
src_file
.
close
()
@ftperrors
def
movedir
(
self
,
src
,
dst
,
overwrite
=
False
,
ignore_errors
=
False
,
chunk_size
=
16384
):
self
.
clear_dircache
(
dirname
(
src
),
dirname
(
dst
))
super
(
FTPFS
,
self
)
.
movedir
(
src
,
dst
,
overwrite
,
ignore_errors
,
chunk_size
)
self
.
clear_dircache
(
dirname
(
src
),
dirname
(
dst
))
super
(
FTPFS
,
self
)
.
movedir
(
src
,
dst
,
overwrite
,
ignore_errors
,
chunk_size
)
@ftperrors
def
copydir
(
self
,
src
,
dst
,
overwrite
=
False
,
ignore_errors
=
False
,
chunk_size
=
16384
):
self
.
clear_dircache
(
dirname
(
dst
))
super
(
FTPFS
,
self
)
.
copydir
(
src
,
dst
,
overwrite
,
ignore_errors
,
chunk_size
)
self
.
clear_dircache
(
dirname
(
dst
))
super
(
FTPFS
,
self
)
.
copydir
(
src
,
dst
,
overwrite
,
ignore_errors
,
chunk_size
)
if
__name__
==
"__main__"
:
...
...
fs/httpfs.py
View file @
3ea4efe1
...
...
@@ -8,41 +8,45 @@ fs.httpfs
from
fs.base
import
FS
from
fs.path
import
normpath
from
fs.errors
import
ResourceNotFoundError
,
UnsupportedError
from
fs.filelike
import
FileWrapper
from
fs
import
iotools
from
urllib2
import
urlopen
,
URLError
from
datetime
import
datetime
from
fs.filelike
import
FileWrapper
class
HTTPFS
(
FS
):
"""Can barely be called a filesystem, because HTTP servers generally don't support
"""Can barely be called a filesystem, because HTTP servers generally don't support
typical filesystem functionality. This class exists to allow the :doc:`opener` system
to read files over HTTP.
to read files over HTTP.
If you do need filesystem like functionality over HTTP, see :mod:`fs.contrib.davfs`.
"""
_meta
=
{
'read_only'
:
True
,
'network'
:
True
,
}
_meta
=
{
'read_only'
:
True
,
'network'
:
True
}
def
__init__
(
self
,
url
):
"""
:param url: The base URL
"""
self
.
root_url
=
url
def
_make_url
(
self
,
path
):
path
=
normpath
(
path
)
url
=
'
%
s/
%
s'
%
(
self
.
root_url
.
rstrip
(
'/'
),
path
.
lstrip
(
'/'
))
return
url
def
open
(
self
,
path
,
mode
=
"r"
):
@iotools.filelike_to_stream
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
if
'+'
in
mode
or
'w'
in
mode
or
'a'
in
mode
:
raise
UnsupportedError
(
'write'
)
url
=
self
.
_make_url
(
path
)
try
:
f
=
urlopen
(
url
)
...
...
@@ -50,15 +54,15 @@ class HTTPFS(FS):
raise
ResourceNotFoundError
(
path
,
details
=
e
)
except
OSError
,
e
:
raise
ResourceNotFoundError
(
path
,
details
=
e
)
return
FileWrapper
(
f
)
def
exists
(
self
,
path
):
return
self
.
isfile
(
path
)
def
isdir
(
self
,
path
):
return
False
def
isfile
(
self
,
path
):
url
=
self
.
_make_url
(
path
)
f
=
None
...
...
@@ -70,9 +74,9 @@ class HTTPFS(FS):
finally
:
if
f
is
not
None
:
f
.
close
()
return
True
def
listdir
(
self
,
path
=
"./"
,
wildcard
=
None
,
full
=
False
,
...
...
fs/iotools.py
View file @
3ea4efe1
from
__future__
import
unicode_literals
from
__future__
import
print_function
import
io
from
functools
import
wraps
import
six
class
RawWrapper
(
object
):
"""Convert a Python 2 style file-like object in to a IO object"""
def
__init__
(
self
,
f
,
mode
=
None
,
name
=
None
):
self
.
_f
=
f
self
.
is_io
=
isinstance
(
f
,
io
.
IOBase
)
if
mode
is
None
and
hasattr
(
f
,
'mode'
):
mode
=
f
.
mode
self
.
mode
=
mode
self
.
name
=
name
self
.
closed
=
False
super
(
RawWrapper
,
self
)
.
__init__
()
def
__repr__
(
self
):
...
...
@@ -35,12 +39,18 @@ class RawWrapper(object):
return
self
.
_f
.
seek
(
offset
,
whence
)
def
readable
(
self
):
if
hasattr
(
self
.
_f
,
'readable'
):
return
self
.
_f
.
readable
()
return
'r'
in
self
.
mode
def
writable
(
self
):
if
hasattr
(
self
.
_f
,
'writeable'
):
return
self
.
_fs
.
writeable
()
return
'w'
in
self
.
mode
def
seekable
(
self
):
if
hasattr
(
self
.
_f
,
'seekable'
):
return
self
.
_f
.
seekable
()
try
:
self
.
seek
(
0
,
io
.
SEEK_CUR
)
except
IOError
:
...
...
@@ -51,11 +61,14 @@ class RawWrapper(object):
def
tell
(
self
):
return
self
.
_f
.
tell
()
def
truncate
(
self
,
size
):
def
truncate
(
self
,
size
=
None
):
return
self
.
_f
.
truncate
(
size
)
def
write
(
self
,
data
):
return
self
.
_f
.
write
(
data
)
if
self
.
is_io
:
return
self
.
_f
.
write
(
data
)
self
.
_f
.
write
(
data
)
return
len
(
data
)
def
read
(
self
,
n
=-
1
):
if
n
==
-
1
:
...
...
@@ -63,21 +76,21 @@ class RawWrapper(object):
return
self
.
_f
.
read
(
n
)
def
read1
(
self
,
n
=-
1
):
if
self
.
is_io
:
return
self
.
read1
(
n
)
return
self
.
read
(
n
)
def
readall
(
self
):
return
self
.
_f
.
read
()
def
readinto
(
self
,
b
):
if
self
.
is_io
:
return
self
.
_f
.
readinto
(
b
)
data
=
self
.
_f
.
read
(
len
(
b
))
bytes_read
=
len
(
data
)
b
[:
len
(
data
)]
=
data
return
bytes_read
def
write
(
self
,
b
):
bytes_written
=
self
.
_f
.
write
(
b
)
return
bytes_written
def
writelines
(
self
,
sequence
):
return
self
.
_f
.
writelines
(
sequence
)
...
...
@@ -87,6 +100,32 @@ class RawWrapper(object):
def
__exit__
(
self
,
*
args
,
**
kwargs
):
self
.
close
()
def
__iter__
(
self
):
return
iter
(
self
.
_f
)
def
filelike_to_stream
(
f
):
@wraps
(
f
)
def
wrapper
(
self
,
path
,
mode
=
'rt'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
file_like
=
f
(
self
,
path
,
mode
=
mode
,
buffering
=
buffering
,
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
,
line_buffering
=
line_buffering
,
**
kwargs
)
return
make_stream
(
path
,
file_like
,
mode
=
mode
,
buffering
=
buffering
,
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
,
line_buffering
=
line_buffering
)
return
wrapper
def
make_stream
(
name
,
f
,
...
...
@@ -95,9 +134,8 @@ def make_stream(name,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
closefd
=
True
,
line_buffering
=
False
,
**
param
s
):
**
kwarg
s
):
"""Take a Python 2.x binary file and returns an IO Stream"""
r
,
w
,
a
,
binary
=
'r'
in
mode
,
'w'
in
mode
,
'a'
in
mode
,
'b'
in
mode
if
'+'
in
mode
:
...
...
@@ -122,6 +160,51 @@ def make_stream(name,
return
io_object
def
decode_binary
(
data
,
encoding
=
None
,
errors
=
None
,
newline
=
None
):
"""Decode bytes as though read from a text file"""
return
io
.
TextIOWrapper
(
io
.
BytesIO
(
data
),
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
)
.
read
()
def
make_bytes_io
(
data
,
encoding
=
None
,
errors
=
None
):
"""Make a bytes IO object from either a string or an open file"""
if
hasattr
(
data
,
'mode'
)
and
'b'
in
data
.
mode
:
# It's already a binary file
return
data
if
not
isinstance
(
data
,
basestring
):
# It's a file, but we don't know if its binary
# TODO: Is there a better way than reading the entire file?
data
=
data
.
read
()
or
b
''
if
isinstance
(
data
,
six
.
text_type
):
# If its text, encoding in to bytes
data
=
data
.
encode
(
encoding
=
encoding
,
errors
=
errors
)
return
io
.
BytesIO
(
data
)
def
copy_file_to_fs
(
f
,
fs
,
path
,
encoding
=
None
,
errors
=
None
,
progress_callback
=
None
,
chunk_size
=
64
*
1024
):
"""Copy an open file to a path on an FS"""
if
progress_callback
is
None
:
progress_callback
=
lambda
bytes_written
:
None
read
=
f
.
read
chunk
=
read
(
chunk_size
)
if
isinstance
(
chunk
,
six
.
text_type
):
f
=
fs
.
open
(
path
,
'wt'
,
encoding
=
encoding
,
errors
=
errors
)
else
:
f
=
fs
.
open
(
path
,
'wb'
)
write
=
f
.
write
bytes_written
=
0
try
:
while
chunk
:
write
(
chunk
)
bytes_written
+=
len
(
chunk
)
progress_callback
(
bytes_written
)
chunk
=
read
(
chunk_size
)
finally
:
f
.
close
()
return
bytes_written
if
__name__
==
"__main__"
:
print
(
"Reading a binary file"
)
bin_file
=
open
(
'tests/data/UTF-8-demo.txt'
,
'rb'
)
...
...
fs/memoryfs.py
View file @
3ea4efe1
...
...
@@ -17,6 +17,7 @@ from fs.base import *
from
fs.errors
import
*
from
fs
import
_thread_synchronize_default
from
fs.filelike
import
StringIO
from
fs
import
iotools
from
os
import
SEEK_END
import
threading
...
...
@@ -31,7 +32,7 @@ def _check_mode(mode, mode_chars):
return
True
class
MemoryFile
(
object
):
def
seek_and_lock
(
f
):
def
deco
(
self
,
*
args
,
**
kwargs
):
try
:
...
...
@@ -40,7 +41,7 @@ class MemoryFile(object):
ret
=
f
(
self
,
*
args
,
**
kwargs
)
self
.
pos
=
self
.
mem_file
.
tell
()
return
ret
finally
:
finally
:
self
.
_lock
.
release
()
return
deco
...
...
@@ -48,12 +49,12 @@ class MemoryFile(object):
self
.
closed
=
False
self
.
path
=
path
self
.
memory_fs
=
memory_fs
self
.
mem_file
=
mem_file
self
.
mode
=
mode
self
.
mem_file
=
mem_file
self
.
mode
=
mode
self
.
_lock
=
lock
self
.
pos
=
0
self
.
pos
=
0
if
_check_mode
(
mode
,
'a'
):
lock
.
acquire
()
try
:
...
...
@@ -61,7 +62,7 @@ class MemoryFile(object):
self
.
pos
=
self
.
mem_file
.
tell
()
finally
:
lock
.
release
()
elif
_check_mode
(
mode
,
'w'
):
lock
.
acquire
()
try
:
...
...
@@ -69,7 +70,7 @@ class MemoryFile(object):
self
.
mem_file
.
truncate
()
finally
:
lock
.
release
()
assert
self
.
mem_file
is
not
None
,
"self.mem_file should have a value"
...
...
@@ -77,7 +78,7 @@ class MemoryFile(object):
return
"<MemoryFile in
%
s
%
s>"
%
(
self
.
memory_fs
,
self
.
path
)
def
__repr__
(
self
):
return
u"<MemoryFile in
%
s
%
s>"
%
(
self
.
memory_fs
,
self
.
path
)
return
u"<MemoryFile in
%
s
%
s>"
%
(
self
.
memory_fs
,
self
.
path
)
def
__unicode__
(
self
):
return
u"<MemoryFile in
%
s
%
s>"
%
(
self
.
memory_fs
,
self
.
path
)
...
...
@@ -94,10 +95,10 @@ class MemoryFile(object):
raise
IOError
(
"File not open for reading"
)
self
.
mem_file
.
seek
(
self
.
pos
)
for
line
in
self
.
mem_file
:
yield
line
yield
line
@seek_and_lock
def
next
(
self
):
def
next
(
self
):
if
'r'
not
in
self
.
mode
and
'+'
not
in
self
.
mode
:
raise
IOError
(
"File not open for reading"
)
return
self
.
mem_file
.
next
()
...
...
@@ -116,9 +117,9 @@ class MemoryFile(object):
if
do_close
:
self
.
closed
=
True
finally
:
self
.
_lock
.
release
()
self
.
_lock
.
release
()
if
do_close
:
self
.
memory_fs
.
_on_close_memory_file
(
self
,
self
.
path
)
self
.
memory_fs
.
_on_close_memory_file
(
self
,
self
.
path
)
@seek_and_lock
def
read
(
self
,
size
=
None
):
...
...
@@ -129,7 +130,7 @@ class MemoryFile(object):
return
self
.
mem_file
.
read
(
size
)
@seek_and_lock
def
seek
(
self
,
*
args
,
**
kwargs
):
def
seek
(
self
,
*
args
,
**
kwargs
):
return
self
.
mem_file
.
seek
(
*
args
,
**
kwargs
)
@seek_and_lock
...
...
@@ -143,7 +144,7 @@ class MemoryFile(object):
return
self
.
mem_file
.
truncate
(
*
args
,
**
kwargs
)
#@seek_and_lock
def
write
(
self
,
data
):
def
write
(
self
,
data
):
if
'r'
in
self
.
mode
and
'+'
not
in
self
.
mode
:
raise
IOError
(
"File not open for writing"
)
self
.
memory_fs
.
_on_modify_memory_file
(
self
.
path
)
...
...
@@ -156,7 +157,7 @@ class MemoryFile(object):
self
.
_lock
.
release
()
@seek_and_lock
def
writelines
(
self
,
*
args
,
**
kwargs
):
def
writelines
(
self
,
*
args
,
**
kwargs
):
return
self
.
mem_file
.
writelines
(
*
args
,
**
kwargs
)
def
__enter__
(
self
):
...
...
@@ -192,22 +193,22 @@ class DirEntry(object):
contents
=
{}
self
.
open_files
=
[]
self
.
contents
=
contents
self
.
mem_file
=
None
self
.
contents
=
contents
self
.
mem_file
=
None
self
.
created_time
=
datetime
.
datetime
.
now
()
self
.
modified_time
=
self
.
created_time
self
.
accessed_time
=
self
.
created_time
self
.
xattrs
=
{}
self
.
lock
=
None
if
self
.
type
==
'file'
:
self
.
mem_file
=
StringIO
()
self
.
mem_file
=
StringIO
()
self
.
lock
=
threading
.
RLock
()
def
get_value
(
self
):
self
.
lock
.
acquire
()
try
:
try
:
return
self
.
mem_file
.
getvalue
()
finally
:
self
.
lock
.
release
()
...
...
@@ -227,15 +228,15 @@ class DirEntry(object):
def
__str__
(
self
):
return
"
%
s:
%
s"
%
(
self
.
name
,
self
.
desc_contents
())
@sync
def
__getstate__
(
self
):
state
=
self
.
__dict__
.
copy
()
state
=
self
.
__dict__
.
copy
()
state
.
pop
(
'lock'
)
if
self
.
mem_file
is
not
None
:
state
[
'mem_file'
]
=
self
.
data
return
state
def
__setstate__
(
self
,
state
):
self
.
__dict__
.
update
(
state
)
if
self
.
type
==
'file'
:
...
...
@@ -244,16 +245,16 @@ class DirEntry(object):
self
.
lock
=
None
if
self
.
mem_file
is
not
None
:
data
=
self
.
mem_file
self
.
mem_file
=
StringIO
()
self
.
mem_file
.
write
(
data
)
self
.
mem_file
=
StringIO
()
self
.
mem_file
.
write
(
data
)
class
MemoryFS
(
FS
):
"""An in-memory filesystem.
"""
_meta
=
{
'thread_safe'
:
True
,
_meta
=
{
'thread_safe'
:
True
,
'network'
:
False
,
'virtual'
:
False
,
'read_only'
:
False
,
...
...
@@ -263,7 +264,7 @@ class MemoryFS(FS):
'atomic.copy'
:
False
,
'atomic.makedir'
:
True
,
'atomic.rename'
:
True
,
'atomic.setcontents'
:
False
,
'atomic.setcontents'
:
False
,
}
def
_make_dir_entry
(
self
,
*
args
,
**
kwargs
):
...
...
@@ -277,13 +278,13 @@ class MemoryFS(FS):
if
not
callable
(
self
.
file_factory
):
raise
ValueError
(
"file_factory should be callable"
)
self
.
root
=
self
.
_make_dir_entry
(
'dir'
,
'root'
)
self
.
root
=
self
.
_make_dir_entry
(
'dir'
,
'root'
)
def
__str__
(
self
):
return
"<MemoryFS>"
def
__repr__
(
self
):
return
"MemoryFS()"
return
"MemoryFS()"
def
__unicode__
(
self
):
return
"<MemoryFS>"
...
...
@@ -300,7 +301,7 @@ class MemoryFS(FS):
return
None
current_dir
=
dir_entry
return
current_dir
@synchronize
def
_dir_entry
(
self
,
path
):
dir_entry
=
self
.
_get_dir_entry
(
path
)
...
...
@@ -338,7 +339,7 @@ class MemoryFS(FS):
return
dir_item
.
isfile
()
@synchronize
def
exists
(
self
,
path
):
def
exists
(
self
,
path
):
path
=
normpath
(
path
)
if
path
in
(
''
,
'/'
):
return
True
...
...
@@ -401,22 +402,24 @@ class MemoryFS(FS):
if
dir_item
is
None
:
parent_dir
.
contents
[
dirname
]
=
self
.
_make_dir_entry
(
"dir"
,
dirname
)
#@synchronize
#def _orphan_files(self, file_dir_entry):
# for f in file_dir_entry.open_files[:]:
# f.close()
@synchronize
def
open
(
self
,
path
,
mode
=
"r"
,
**
kwargs
):
@iotools.filelike_to_stream
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
path
=
normpath
(
path
)
filepath
,
filename
=
pathsplit
(
path
)
parent_dir_entry
=
self
.
_get_dir_entry
(
filepath
)
if
parent_dir_entry
is
None
or
not
parent_dir_entry
.
isdir
():
raise
ResourceNotFoundError
(
path
)
if
'r'
in
mode
or
'a'
in
mode
:
if
filename
not
in
parent_dir_entry
.
contents
:
raise
ResourceNotFoundError
(
path
)
...
...
@@ -424,7 +427,7 @@ class MemoryFS(FS):
file_dir_entry
=
parent_dir_entry
.
contents
[
filename
]
if
file_dir_entry
.
isdir
():
raise
ResourceInvalidError
(
path
)
file_dir_entry
.
accessed_time
=
datetime
.
datetime
.
now
()
mem_file
=
self
.
file_factory
(
path
,
self
,
file_dir_entry
.
mem_file
,
mode
,
file_dir_entry
.
lock
)
...
...
@@ -438,8 +441,8 @@ class MemoryFS(FS):
else
:
file_dir_entry
=
parent_dir_entry
.
contents
[
filename
]
file_dir_entry
.
accessed_time
=
datetime
.
datetime
.
now
()
file_dir_entry
.
accessed_time
=
datetime
.
datetime
.
now
()
mem_file
=
self
.
file_factory
(
path
,
self
,
file_dir_entry
.
mem_file
,
mode
,
file_dir_entry
.
lock
)
file_dir_entry
.
open_files
.
append
(
mem_file
)
return
mem_file
...
...
@@ -455,7 +458,7 @@ class MemoryFS(FS):
raise
ResourceNotFoundError
(
path
)
if
dir_entry
.
isdir
():
raise
ResourceInvalidError
(
path
,
msg
=
"That's a directory, not a file:
%(path)
s"
)
raise
ResourceInvalidError
(
path
,
msg
=
"That's a directory, not a file:
%(path)
s"
)
pathname
,
dirname
=
pathsplit
(
path
)
parent_dir
=
self
.
_get_dir_entry
(
pathname
)
...
...
@@ -465,7 +468,7 @@ class MemoryFS(FS):
def
removedir
(
self
,
path
,
recursive
=
False
,
force
=
False
):
path
=
normpath
(
path
)
if
path
in
(
''
,
'/'
):
raise
RemoveRootError
(
path
)
raise
RemoveRootError
(
path
)
dir_entry
=
self
.
_get_dir_entry
(
path
)
if
dir_entry
is
None
:
...
...
@@ -491,7 +494,7 @@ class MemoryFS(FS):
pathname
,
dirname
=
pathsplit
(
path
)
parent_dir
=
self
.
_get_dir_entry
(
pathname
)
if
not
dirname
:
raise
RemoveRootError
(
path
)
raise
RemoveRootError
(
path
)
del
parent_dir
.
contents
[
dirname
]
@synchronize
...
...
@@ -528,27 +531,27 @@ class MemoryFS(FS):
if
accessed_time
is
None
:
accessed_time
=
now
if
modified_time
is
None
:
modified_time
=
now
modified_time
=
now
dir_entry
=
self
.
_get_dir_entry
(
path
)
if
dir_entry
is
not
None
:
dir_entry
.
accessed_time
=
accessed_time
dir_entry
.
modified_time
=
modified_time
return
True
return
False
@synchronize
def
_on_close_memory_file
(
self
,
open_file
,
path
):
dir_entry
=
self
.
_get_dir_entry
(
path
)
if
dir_entry
is
not
None
:
dir_entry
.
open_files
.
remove
(
open_file
)
dir_entry
.
open_files
.
remove
(
open_file
)
@synchronize
def
_on_modify_memory_file
(
self
,
path
):
dir_entry
=
self
.
_get_dir_entry
(
path
)
if
dir_entry
is
not
None
:
dir_entry
.
modified_time
=
datetime
.
datetime
.
now
()
dir_entry
.
modified_time
=
datetime
.
datetime
.
now
()
@synchronize
def
listdir
(
self
,
path
=
"/"
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
...
...
@@ -573,38 +576,38 @@ class MemoryFS(FS):
info
=
{}
info
[
'created_time'
]
=
dir_entry
.
created_time
info
[
'modified_time'
]
=
dir_entry
.
modified_time
info
[
'accessed_time'
]
=
dir_entry
.
accessed_time
info
[
'accessed_time'
]
=
dir_entry
.
accessed_time
if
dir_entry
.
isdir
():
info
[
'st_mode'
]
=
0755
|
stat
.
S_IFDIR
else
:
info
[
'size'
]
=
len
(
dir_entry
.
data
or
b
(
''
))
info
[
'st_mode'
]
=
0666
|
stat
.
S_IFREG
return
info
return
info
@synchronize
def
copydir
(
self
,
src
,
dst
,
overwrite
=
False
,
ignore_errors
=
False
,
chunk_size
=
1024
*
64
):
src_dir_entry
=
self
.
_get_dir_entry
(
src
)
if
src_dir_entry
is
None
:
raise
ResourceNotFoundError
(
src
)
src_xattrs
=
src_dir_entry
.
xattrs
.
copy
()
super
(
MemoryFS
,
self
)
.
copydir
(
src
,
dst
,
overwrite
,
ignore_errors
=
ignore_errors
,
chunk_size
=
chunk_size
)
super
(
MemoryFS
,
self
)
.
copydir
(
src
,
dst
,
overwrite
,
ignore_errors
=
ignore_errors
,
chunk_size
=
chunk_size
)
dst_dir_entry
=
self
.
_get_dir_entry
(
dst
)
if
dst_dir_entry
is
not
None
:
dst_dir_entry
.
xattrs
.
update
(
src_xattrs
)
@synchronize
def
movedir
(
self
,
src
,
dst
,
overwrite
=
False
,
ignore_errors
=
False
,
chunk_size
=
1024
*
64
):
src_dir_entry
=
self
.
_get_dir_entry
(
src
)
if
src_dir_entry
is
None
:
raise
ResourceNotFoundError
(
src
)
src_xattrs
=
src_dir_entry
.
xattrs
.
copy
()
super
(
MemoryFS
,
self
)
.
movedir
(
src
,
dst
,
overwrite
,
ignore_errors
=
ignore_errors
,
chunk_size
=
chunk_size
)
super
(
MemoryFS
,
self
)
.
movedir
(
src
,
dst
,
overwrite
,
ignore_errors
=
ignore_errors
,
chunk_size
=
chunk_size
)
dst_dir_entry
=
self
.
_get_dir_entry
(
dst
)
if
dst_dir_entry
is
not
None
:
dst_dir_entry
.
xattrs
.
update
(
src_xattrs
)
dst_dir_entry
.
xattrs
.
update
(
src_xattrs
)
@synchronize
def
copy
(
self
,
src
,
dst
,
overwrite
=
False
,
chunk_size
=
1024
*
64
):
src_dir_entry
=
self
.
_get_dir_entry
(
src
)
...
...
@@ -615,53 +618,69 @@ class MemoryFS(FS):
dst_dir_entry
=
self
.
_get_dir_entry
(
dst
)
if
dst_dir_entry
is
not
None
:
dst_dir_entry
.
xattrs
.
update
(
src_xattrs
)
@synchronize
def
move
(
self
,
src
,
dst
,
overwrite
=
False
,
chunk_size
=
1024
*
64
):
src_dir_entry
=
self
.
_get_dir_entry
(
src
)
if
src_dir_entry
is
None
:
raise
ResourceNotFoundError
(
src
)
src_xattrs
=
src_dir_entry
.
xattrs
.
copy
()
super
(
MemoryFS
,
self
)
.
move
(
src
,
dst
,
overwrite
,
chunk_size
)
super
(
MemoryFS
,
self
)
.
move
(
src
,
dst
,
overwrite
,
chunk_size
)
dst_dir_entry
=
self
.
_get_dir_entry
(
dst
)
if
dst_dir_entry
is
not
None
:
dst_dir_entry
.
xattrs
.
update
(
src_xattrs
)
dst_dir_entry
.
xattrs
.
update
(
src_xattrs
)
@synchronize
def
getcontents
(
self
,
path
,
mode
=
"rb"
):
def
getcontents
(
self
,
path
,
mode
=
"rb"
,
encoding
=
None
,
errors
=
None
,
newline
=
None
):
dir_entry
=
self
.
_get_dir_entry
(
path
)
if
dir_entry
is
None
:
raise
ResourceNotFoundError
(
path
)
if
not
dir_entry
.
isfile
():
raise
ResourceInvalidError
(
path
,
msg
=
"not a file:
%(path)
s"
)
return
dir_entry
.
data
or
b
(
''
)
data
=
dir_entry
.
data
or
b
(
''
)
if
'b'
not
in
mode
:
return
iotools
.
decode_binary
(
data
,
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
)
return
data
@synchronize
def
setcontents
(
self
,
path
,
data
,
chunk_size
=
1024
*
64
):
if
not
isinstance
(
data
,
six
.
binary_type
):
return
super
(
MemoryFS
,
self
)
.
setcontents
(
path
,
data
,
chunk_size
)
if
not
self
.
exists
(
path
):
self
.
open
(
path
,
'wb'
)
.
close
()
dir_entry
=
self
.
_get_dir_entry
(
path
)
if
not
dir_entry
.
isfile
():
raise
ResourceInvalidError
(
'Not a directory
%(path)
s'
,
path
)
new_mem_file
=
StringIO
()
new_mem_file
.
write
(
data
)
dir_entry
.
mem_file
=
new_mem_file
def
setcontents
(
self
,
path
,
data
=
b
''
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
1024
*
64
):
if
isinstance
(
data
,
six
.
binary_type
):
if
not
self
.
exists
(
path
):
self
.
open
(
path
,
'wb'
)
.
close
()
dir_entry
=
self
.
_get_dir_entry
(
path
)
if
not
dir_entry
.
isfile
():
raise
ResourceInvalidError
(
'Not a directory
%(path)
s'
,
path
)
new_mem_file
=
StringIO
()
new_mem_file
.
write
(
data
)
dir_entry
.
mem_file
=
new_mem_file
return
len
(
data
)
return
super
(
MemoryFS
,
self
)
.
setcontents
(
path
,
data
=
data
,
encoding
=
encoding
,
errors
=
errors
,
chunk_size
=
chunk_size
)
# if isinstance(data, six.text_type):
# return super(MemoryFS, self).setcontents(path, data, encoding=encoding, errors=errors, chunk_size=chunk_size)
# if not self.exists(path):
# self.open(path, 'wb').close()
# dir_entry = self._get_dir_entry(path)
# if not dir_entry.isfile():
# raise ResourceInvalidError('Not a directory %(path)s', path)
# new_mem_file = StringIO()
# new_mem_file.write(data)
# dir_entry.mem_file = new_mem_file
@synchronize
def
setxattr
(
self
,
path
,
key
,
value
):
def
setxattr
(
self
,
path
,
key
,
value
):
dir_entry
=
self
.
_dir_entry
(
path
)
key
=
unicode
(
key
)
key
=
unicode
(
key
)
dir_entry
.
xattrs
[
key
]
=
value
@synchronize
@synchronize
def
getxattr
(
self
,
path
,
key
,
default
=
None
):
key
=
unicode
(
key
)
dir_entry
=
self
.
_dir_entry
(
path
)
dir_entry
=
self
.
_dir_entry
(
path
)
return
dir_entry
.
xattrs
.
get
(
key
,
default
)
@synchronize
def
delxattr
(
self
,
path
,
key
):
dir_entry
=
self
.
_dir_entry
(
path
)
...
...
@@ -669,7 +688,7 @@ class MemoryFS(FS):
del
dir_entry
.
xattrs
[
key
]
except
KeyError
:
pass
@synchronize
def
listxattrs
(
self
,
path
):
dir_entry
=
self
.
_dir_entry
(
path
)
...
...
fs/mountfs.py
View file @
3ea4efe1
...
...
@@ -46,6 +46,7 @@ from fs.base import *
from
fs.errors
import
*
from
fs.path
import
*
from
fs
import
_thread_synchronize_default
from
fs
import
iotools
class
DirMount
(
object
):
...
...
@@ -286,7 +287,7 @@ class MountFS(FS):
def
makedir
(
self
,
path
,
recursive
=
False
,
allow_recreate
=
False
):
fs
,
_mount_path
,
delegate_path
=
self
.
_delegate
(
path
)
if
fs
is
self
or
fs
is
None
:
raise
UnsupportedError
(
"make directory"
,
msg
=
"Can only makedir for mounted paths"
)
raise
UnsupportedError
(
"make directory"
,
msg
=
"Can only makedir for mounted paths"
)
if
not
delegate_path
:
if
allow_recreate
:
return
...
...
@@ -295,7 +296,7 @@ class MountFS(FS):
return
fs
.
makedir
(
delegate_path
,
recursive
=
recursive
,
allow_recreate
=
allow_recreate
)
@synchronize
def
open
(
self
,
path
,
mode
=
"r"
,
**
kwargs
):
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
obj
=
self
.
mount_tree
.
get
(
path
,
None
)
if
type
(
obj
)
is
MountFS
.
FileMount
:
callable
=
obj
.
open_callable
...
...
@@ -309,20 +310,24 @@ class MountFS(FS):
return
fs
.
open
(
delegate_path
,
mode
,
**
kwargs
)
@synchronize
def
setcontents
(
self
,
path
,
data
,
chunk_size
=
64
*
1024
):
def
setcontents
(
self
,
path
,
data
=
b
''
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
64
*
1024
):
obj
=
self
.
mount_tree
.
get
(
path
,
None
)
if
type
(
obj
)
is
MountFS
.
FileMount
:
return
super
(
MountFS
,
self
)
.
setcontents
(
path
,
data
,
chunk_size
=
chunk_size
)
return
super
(
MountFS
,
self
)
.
setcontents
(
path
,
data
,
encoding
=
encoding
,
errors
=
errors
,
chunk_size
=
chunk_size
)
fs
,
_mount_path
,
delegate_path
=
self
.
_delegate
(
path
)
if
fs
is
self
or
fs
is
None
:
raise
ParentDirectoryMissingError
(
path
)
return
fs
.
setcontents
(
delegate_path
,
data
,
chunk_size
)
return
fs
.
setcontents
(
delegate_path
,
data
,
encoding
=
encoding
,
errors
=
errors
,
chunk_size
=
chunk_size
)
@synchronize
def
createfile
(
self
,
path
,
wipe
=
False
):
obj
=
self
.
mount_tree
.
get
(
path
,
None
)
if
type
(
obj
)
is
MountFS
.
FileMount
:
return
super
(
MountFS
,
self
)
.
createfile
(
path
,
wipe
=
wipe
)
return
super
(
MountFS
,
self
)
.
createfile
(
path
,
wipe
=
wipe
)
fs
,
_mount_path
,
delegate_path
=
self
.
_delegate
(
path
)
if
fs
is
self
or
fs
is
None
:
raise
ParentDirectoryMissingError
(
path
)
...
...
@@ -430,7 +435,7 @@ class MountFS(FS):
"""Unmounts a path.
:param path: Path to unmount
:return: True if a
dir
was unmounted, False if the path was already unmounted
:return: True if a
path
was unmounted, False if the path was already unmounted
:rtype: bool
"""
...
...
fs/multifs.py
View file @
3ea4efe1
...
...
@@ -15,17 +15,17 @@ to *theme* a web application. We start with the following directories::
`-- templates
|-- snippets
| `-- panel.html
| `-- panel.html
|-- index.html
|-- profile.html
`-- base.html
`-- theme
|-- snippets
| |-- widget.html
| `-- extra.html
|-- index.html
`-- theme.html
`-- theme.html
And we want to create a single filesystem that looks for files in `templates` if
they don't exist in `theme`. We can do this with the following code::
...
...
@@ -36,29 +36,29 @@ they don't exist in `theme`. We can do this with the following code::
themed_template_fs.addfs('templates', OSFS('templates'))
themed_template_fs.addfs('theme', OSFS('themes'))
Now we have a `themed_template_fs` FS object presents a single view of both
directories::
Now we have a `themed_template_fs` FS object presents a single view of both
directories::
|-- snippets
| |-- panel.html
| |-- widget.html
| `-- extra.html
|-- index.html
|-- index.html
|-- profile.html
|-- base.html
`-- theme.html
A MultiFS is generally read-only, and any operation that may modify data
(including opening files for writing) will fail. However, you can set a
(including opening files for writing) will fail. However, you can set a
writeable fs with the `setwritefs` method -- which does not have to be
one of the FS objects set with `addfs`.
The reason that only one FS object is ever considered for write access is
that otherwise it would be ambiguous as to which filesystem you would want
to modify. If you need to be able to modify more than one FS in the MultiFS,
you can always access them directly.
you can always access them directly.
"""
...
...
@@ -76,7 +76,7 @@ class MultiFS(FS):
it succeeds. In effect, creating a filesystem that combines the files and
dirs of its children.
"""
_meta
=
{
'virtual'
:
True
,
'read_only'
:
False
,
'unicode_paths'
:
True
,
...
...
@@ -85,9 +85,9 @@ class MultiFS(FS):
def
__init__
(
self
,
auto_close
=
True
):
"""
:param auto_close: If True the child filesystems will be closed when the MultiFS is closed
:param auto_close: If True the child filesystems will be closed when the MultiFS is closed
"""
super
(
MultiFS
,
self
)
.
__init__
(
thread_synchronize
=
_thread_synchronize_default
)
...
...
@@ -95,7 +95,7 @@ class MultiFS(FS):
self
.
fs_sequence
=
[]
self
.
fs_lookup
=
{}
self
.
fs_priorities
=
{}
self
.
writefs
=
None
self
.
writefs
=
None
@synchronize
def
__str__
(
self
):
...
...
@@ -117,19 +117,19 @@ class MultiFS(FS):
for
fs
in
self
.
fs_sequence
:
fs
.
close
()
if
self
.
writefs
is
not
None
:
self
.
writefs
.
close
()
self
.
writefs
.
close
()
# Discard any references
del
self
.
fs_sequence
[:]
self
.
fs_lookup
.
clear
()
self
.
fs_priorities
.
clear
()
self
.
writefs
=
None
super
(
MultiFS
,
self
)
.
close
()
def
_priority_sort
(
self
):
"""Sort filesystems by priority order"""
priority_order
=
sorted
(
self
.
fs_lookup
.
keys
(),
key
=
lambda
n
:
self
.
fs_priorities
[
n
],
reverse
=
True
)
self
.
fs_sequence
=
[
self
.
fs_lookup
[
name
]
for
name
in
priority_order
]
self
.
fs_sequence
=
[
self
.
fs_lookup
[
name
]
for
name
in
priority_order
]
@synchronize
def
addfs
(
self
,
name
,
fs
,
write
=
False
,
priority
=
0
):
"""Adds a filesystem to the MultiFS.
...
...
@@ -141,19 +141,19 @@ class MultiFS(FS):
:param priority: A number that gives the priorty of the filesystem being added.
Filesystems will be searched in descending priority order and then by the reverse order they were added.
So by default, the most recently added filesystem will be looked at first
"""
if
name
in
self
.
fs_lookup
:
raise
ValueError
(
"Name already exists."
)
priority
=
(
priority
,
len
(
self
.
fs_sequence
))
self
.
fs_priorities
[
name
]
=
priority
self
.
fs_sequence
.
append
(
fs
)
self
.
fs_lookup
[
name
]
=
fs
self
.
_priority_sort
()
self
.
_priority_sort
()
if
write
:
self
.
setwritefs
(
fs
)
...
...
@@ -162,16 +162,16 @@ class MultiFS(FS):
"""Sets the filesystem to use when write access is required. Without a writeable FS,
any operations that could modify data (including opening files for writing / appending)
will fail.
:param fs: An FS object that will be used to open writeable files
"""
self
.
writefs
=
fs
@synchronize
self
.
writefs
=
fs
@synchronize
def
clearwritefs
(
self
):
"""Clears the writeable filesystem (operations that modify the multifs will fail)"""
self
.
writefs
=
None
self
.
writefs
=
None
@synchronize
def
removefs
(
self
,
name
):
...
...
@@ -209,7 +209,7 @@ class MultiFS(FS):
:param path: A path in MultiFS
"""
if
'w'
in
mode
or
'+'
in
mode
or
'a'
in
mode
:
if
'w'
in
mode
or
'+'
in
mode
or
'a'
in
mode
:
return
self
.
writefs
for
fs
in
self
:
if
fs
.
exists
(
path
):
...
...
@@ -238,14 +238,14 @@ class MultiFS(FS):
return
"
%
s, on
%
s (
%
s)"
%
(
fs
.
desc
(
path
),
name
,
fs
)
@synchronize
def
open
(
self
,
path
,
mode
=
"r"
,
**
kwargs
):
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
if
'w'
in
mode
or
'+'
in
mode
or
'a'
in
mode
:
if
self
.
writefs
is
None
:
raise
OperationFailedError
(
'open'
,
path
=
path
,
msg
=
"No writeable FS set"
)
return
self
.
writefs
.
open
(
path
,
mode
)
return
self
.
writefs
.
open
(
path
,
mode
=
mode
,
buffering
=
buffering
,
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
,
line_buffering
=
line_buffering
,
**
kwargs
)
for
fs
in
self
:
if
fs
.
exists
(
path
):
fs_file
=
fs
.
open
(
path
,
mode
,
**
kwargs
)
fs_file
=
fs
.
open
(
path
,
mode
=
mode
,
buffering
=
buffering
,
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
,
line_buffering
=
line_buffering
,
**
kwargs
)
return
fs_file
raise
ResourceNotFoundError
(
path
)
...
...
@@ -280,34 +280,34 @@ class MultiFS(FS):
@synchronize
def
makedir
(
self
,
path
,
recursive
=
False
,
allow_recreate
=
False
):
if
self
.
writefs
is
None
:
raise
OperationFailedError
(
'makedir'
,
path
=
path
,
msg
=
"No writeable FS set"
)
self
.
writefs
.
makedir
(
path
,
recursive
=
recursive
,
allow_recreate
=
allow_recreate
)
raise
OperationFailedError
(
'makedir'
,
path
=
path
,
msg
=
"No writeable FS set"
)
self
.
writefs
.
makedir
(
path
,
recursive
=
recursive
,
allow_recreate
=
allow_recreate
)
@synchronize
def
remove
(
self
,
path
):
if
self
.
writefs
is
None
:
raise
OperationFailedError
(
'remove'
,
path
=
path
,
msg
=
"No writeable FS set"
)
self
.
writefs
.
remove
(
path
)
raise
OperationFailedError
(
'remove'
,
path
=
path
,
msg
=
"No writeable FS set"
)
self
.
writefs
.
remove
(
path
)
@synchronize
def
removedir
(
self
,
path
,
recursive
=
False
,
force
=
False
):
if
self
.
writefs
is
None
:
raise
OperationFailedError
(
'removedir'
,
path
=
path
,
msg
=
"No writeable FS set"
)
if
normpath
(
path
)
in
(
''
,
'/'
):
raise
RemoveRootError
(
path
)
self
.
writefs
.
removedir
(
path
,
recursive
=
recursive
,
force
=
force
)
raise
RemoveRootError
(
path
)
self
.
writefs
.
removedir
(
path
,
recursive
=
recursive
,
force
=
force
)
@synchronize
def
rename
(
self
,
src
,
dst
):
if
self
.
writefs
is
None
:
raise
OperationFailedError
(
'rename'
,
path
=
src
,
msg
=
"No writeable FS set"
)
self
.
writefs
.
rename
(
src
,
dst
)
self
.
writefs
.
rename
(
src
,
dst
)
@synchronize
def
settimes
(
self
,
path
,
accessed_time
=
None
,
modified_time
=
None
):
if
self
.
writefs
is
None
:
raise
OperationFailedError
(
'settimes'
,
path
=
path
,
msg
=
"No writeable FS set"
)
self
.
writefs
.
settimes
(
path
,
accessed_time
,
modified_time
)
self
.
writefs
.
settimes
(
path
,
accessed_time
,
modified_time
)
@synchronize
def
getinfo
(
self
,
path
):
...
...
fs/opener.py
View file @
3ea4efe1
...
...
@@ -11,23 +11,23 @@ the syntax of http://commons.apache.org/vfs/filesystems.html).
The `OpenerRegistry` class maps the protocol (file, ftp etc.) on to an Opener
object, which returns an appropriate filesystem object and path. You can
create a custom opener registry that opens just the filesystems you require, or
create a custom opener registry that opens just the filesystems you require, or
use the opener registry defined here (also called `opener`) that can open any
supported filesystem.
The `parse` method of an `OpenerRegsitry` object returns a tuple of an FS
object a path. Here's an example of how to use the default opener registry::
object a path. Here's an example of how to use the default opener registry::
>>> from fs.opener import opener
>>> opener.parse('ftp://ftp.mozilla.org/pub')
(<fs.ftpfs.FTPFS object at 0x96e66ec>, u'pub')
You can use use the `opendir` method, which just returns an FS object. In the
You can use use the `opendir` method, which just returns an FS object. In the
example above, `opendir` will return a FS object for the directory `pub`::
>>> opener.opendir('ftp://ftp.mozilla.org/pub')
<SubFS: <FTPFS ftp.mozilla.org>/pub>
If you are just interested in a single file, use the `open` method of a registry
which returns a file-like object, and has the same signature as FS objects and
the `open` builtin::
...
...
@@ -50,9 +50,9 @@ __all__ = ['OpenerError',
'OpenerRegistry'
,
'opener'
,
'fsopen'
,
'fsopendir'
,
'fsopendir'
,
'OpenerRegistry'
,
'Opener'
,
'Opener'
,
'OSFSOpener'
,
'ZipOpener'
,
'RPCOpener'
,
...
...
@@ -90,13 +90,13 @@ def _expand_syspath(path):
path
=
os
.
path
.
expanduser
(
os
.
path
.
expandvars
(
path
))
path
=
os
.
path
.
normpath
(
os
.
path
.
abspath
(
path
))
return
path
def
_parse_credentials
(
url
):
scheme
=
None
if
'://'
in
url
:
scheme
,
url
=
url
.
split
(
'://'
,
1
)
scheme
,
url
=
url
.
split
(
'://'
,
1
)
username
=
None
password
=
None
password
=
None
if
'@'
in
url
:
credentials
,
url
=
url
.
split
(
'@'
,
1
)
if
':'
in
credentials
:
...
...
@@ -113,7 +113,7 @@ def _parse_name(fs_name):
return
fs_name
,
fs_name_params
else
:
return
fs_name
,
None
def
_split_url_path
(
url
):
if
'://'
not
in
url
:
url
=
'http://'
+
url
...
...
@@ -131,7 +131,7 @@ class _FSClosingFile(FileWrapper):
return
ret
class
OpenerRegistry
(
object
):
"""An opener registry that stores a number of opener objects used to parse FS URIs"""
re_fs_url
=
re
.
compile
(
r'''
...
...
@@ -147,60 +147,60 @@ class OpenerRegistry(object):
(?:
!(.*?)$
)*$
'''
,
re
.
VERBOSE
)
'''
,
re
.
VERBOSE
)
def
__init__
(
self
,
openers
=
[]):
self
.
registry
=
{}
self
.
openers
=
{}
self
.
default_opener
=
'osfs'
for
opener
in
openers
:
self
.
add
(
opener
)
@classmethod
def
split_segments
(
self
,
fs_url
):
match
=
self
.
re_fs_url
.
match
(
fs_url
)
return
match
def
split_segments
(
self
,
fs_url
):
match
=
self
.
re_fs_url
.
match
(
fs_url
)
return
match
def
get_opener
(
self
,
name
):
"""Retrieve an opener for the given protocol
:param name: name of the opener to open
:raises NoOpenerError: if no opener has been registered of that name
"""
if
name
not
in
self
.
registry
:
raise
NoOpenerError
(
"No opener for
%
s"
%
name
)
index
=
self
.
registry
[
name
]
return
self
.
openers
[
index
]
return
self
.
openers
[
index
]
def
add
(
self
,
opener
):
"""Adds an opener to the registry
:param opener: a class derived from fs.opener.Opener
"""
index
=
len
(
self
.
openers
)
self
.
openers
[
index
]
=
opener
for
name
in
opener
.
names
:
self
.
registry
[
name
]
=
index
def
parse
(
self
,
fs_url
,
default_fs_name
=
None
,
writeable
=
False
,
create_dir
=
False
,
cache_hint
=
True
):
"""Parses a FS url and returns an fs object a path within that FS object
(if indicated in the path). A tuple of (<FS instance>, <path>) is returned.
:param fs_url: an FS url
:param default_fs_name: the default FS to use if none is indicated (defaults is OSFS)
:param writeable: if True, a writeable FS will be returned
:param create_dir: if True, then the directory in the FS will be created
"""
orig_url
=
fs_url
orig_url
=
fs_url
match
=
self
.
split_segments
(
fs_url
)
if
match
:
if
match
:
fs_name
,
credentials
,
url1
,
url2
,
path
=
match
.
groups
()
if
credentials
:
fs_url
=
'
%
s@
%
s'
%
(
credentials
,
url1
)
...
...
@@ -215,149 +215,148 @@ class OpenerRegistry(object):
paths
=
path
.
split
(
'!'
)
path
=
paths
.
pop
()
fs_url
=
'
%
s!
%
s'
%
(
fs_url
,
'!'
.
join
(
paths
))
fs_name
=
fs_name
or
self
.
default_opener
else
:
fs_name
=
default_fs_name
or
self
.
default_opener
fs_url
=
_expand_syspath
(
fs_url
)
path
=
''
fs_name
,
fs_name_params
=
_parse_name
(
fs_name
)
fs_url
=
_expand_syspath
(
fs_url
)
path
=
''
fs_name
,
fs_name_params
=
_parse_name
(
fs_name
)
opener
=
self
.
get_opener
(
fs_name
)
if
fs_url
is
None
:
raise
OpenerError
(
"Unable to parse '
%
s'"
%
orig_url
)
raise
OpenerError
(
"Unable to parse '
%
s'"
%
orig_url
)
fs
,
fs_path
=
opener
.
get_fs
(
self
,
fs_name
,
fs_name_params
,
fs_url
,
writeable
,
create_dir
)
fs
,
fs_path
=
opener
.
get_fs
(
self
,
fs_name
,
fs_name_params
,
fs_url
,
writeable
,
create_dir
)
fs
.
cache_hint
(
cache_hint
)
if
fs_path
and
iswildcard
(
fs_path
):
pathname
,
resourcename
=
pathsplit
(
fs_path
or
''
)
if
pathname
:
fs
=
fs
.
opendir
(
pathname
)
return
fs
,
resourcename
fs_path
=
join
(
fs_path
,
path
)
if
create_dir
and
fs_path
:
if
not
fs
.
getmeta
(
'read_only'
,
False
):
fs
.
makedir
(
fs_path
,
allow_recreate
=
True
)
pathname
,
resourcename
=
pathsplit
(
fs_path
or
''
)
fs
.
makedir
(
fs_path
,
allow_recreate
=
True
)
pathname
,
resourcename
=
pathsplit
(
fs_path
or
''
)
if
pathname
and
resourcename
:
fs
=
fs
.
opendir
(
pathname
)
fs_path
=
resourcename
return
fs
,
fs_path
or
''
def
open
(
self
,
fs_url
,
mode
=
'rb'
):
return
fs
,
fs_path
or
''
def
open
(
self
,
fs_url
,
mode
=
'r'
,
**
kwargs
):
"""Opens a file from a given FS url
If you intend to do a lot of file manipulation, it would likely be more
efficient to do it directly through the an FS instance (from `parse` or
efficient to do it directly through the an FS instance (from `parse` or
`opendir`). This method is fine for one-offs though.
:param fs_url: a FS URL, e.g. ftp://ftp.mozilla.org/README
:param mode: mode to open file file
:rtype: a file
"""
:rtype: a file
"""
writeable
=
'w'
in
mode
or
'a'
in
mode
or
'+'
in
mode
fs
,
path
=
self
.
parse
(
fs_url
,
writeable
=
writeable
)
fs
,
path
=
self
.
parse
(
fs_url
,
writeable
=
writeable
)
file_object
=
fs
.
open
(
path
,
mode
)
file_object
=
_FSClosingFile
(
file_object
,
mode
)
file_object
.
fs
=
fs
return
file_object
def
getcontents
(
self
,
fs_url
,
mode
=
"rb"
):
return
file_object
def
getcontents
(
self
,
fs_url
,
node
=
'rb'
,
encoding
=
None
,
errors
=
None
,
newline
=
None
):
"""Gets the contents from a given FS url (if it references a file)
:param fs_url: a FS URL e.g. ftp://ftp.mozilla.org/README
"""
fs
,
path
=
self
.
parse
(
fs_url
)
return
fs
.
getcontents
(
path
,
mode
)
return
fs
.
getcontents
(
path
,
mode
,
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
)
def
opendir
(
self
,
fs_url
,
writeable
=
True
,
create_dir
=
False
):
"""Opens an FS object from an FS URL
:param fs_url: an FS URL e.g. ftp://ftp.mozilla.org
:param writeable: set to True (the default) if the FS must be writeable
:param create_dir: create the directory references by the FS URL, if
it doesn't already exist
"""
fs
,
path
=
self
.
parse
(
fs_url
,
writeable
=
writeable
,
create_dir
=
create_dir
)
it doesn't already exist
"""
fs
,
path
=
self
.
parse
(
fs_url
,
writeable
=
writeable
,
create_dir
=
create_dir
)
if
path
and
'://'
not
in
fs_url
:
# A shortcut to return an OSFS rather than a SubFS for os paths
return
OSFS
(
fs_url
)
if
path
:
fs
=
fs
.
opendir
(
path
)
return
fs
class
Opener
(
object
):
"""The base class for openers
Opener follow a very simple protocol. To create an opener, derive a class
from `Opener` and define a classmethod called `get_fs`, which should have the following signature::
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
The parameters of `get_fs` are as follows:
* `fs_name` the name of the opener, as extracted from the protocol part of the url,
* `fs_name_params` reserved for future use
* `fs_path` the path part of the url
* `writeable` if True, then `get_fs` must return an FS that can be written to
* `create_dir` if True then `get_fs` should attempt to silently create the directory references in path
In addition to `get_fs` an opener class should contain
In addition to `get_fs` an opener class should contain
two class attributes: names and desc. `names` is a list of protocols that
list opener will opener. `desc` is an English description of the individual opener syntax.
"""
"""
pass
class
OSFSOpener
(
Opener
):
names
=
[
'osfs'
,
'file'
]
names
=
[
'osfs'
,
'file'
]
desc
=
"""OS filesystem opener, works with any valid system path. This is the default opener and will be used if you don't indicate which opener to use.
examples:
* file://relative/foo/bar/baz.txt (opens a relative file)
* file:///home/user (opens a directory from a absolute path)
* osfs://~/ (open the user's home directory)
* foo/bar.baz (file:// is the default opener)"""
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.osfs
import
OSFS
from
fs.osfs
import
OSFS
path
=
os
.
path
.
normpath
(
fs_path
)
if
create_dir
and
not
os
.
path
.
exists
(
path
):
from
fs.osfs
import
_os_makedirs
_os_makedirs
(
path
)
from
fs.osfs
import
_os_makedirs
_os_makedirs
(
path
)
dirname
,
resourcename
=
os
.
path
.
split
(
fs_path
)
osfs
=
OSFS
(
dirname
)
return
osfs
,
resourcename
return
osfs
,
resourcename
class
ZipOpener
(
Opener
):
names
=
[
'zip'
,
'zip64'
]
names
=
[
'zip'
,
'zip64'
]
desc
=
"""Opens zip files. Use zip64 for > 2 gigabyte zip files, if you have a 64 bit processor.
examples:
* zip://myzip.zip (open a local zip file)
* zip://myzip.zip!foo/bar/insidezip.txt (reference a file insize myzip.zip)
* zip:ftp://ftp.example.org/myzip.zip (open a zip file stored on a ftp server)"""
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
zip_fs
,
zip_path
=
registry
.
parse
(
fs_path
)
if
zip_path
is
None
:
raise
OpenerError
(
'File required for zip opener'
)
...
...
@@ -371,46 +370,46 @@ class ZipOpener(Opener):
if
zip_fs
.
hassyspath
(
zip_path
):
zip_file
=
zip_fs
.
getsyspath
(
zip_path
)
else
:
zip_file
=
zip_fs
.
open
(
zip_path
,
mode
=
open_mode
)
zip_file
=
zip_fs
.
open
(
zip_path
,
mode
=
open_mode
)
_username
,
_password
,
fs_path
=
_parse_credentials
(
fs_path
)
from
fs.zipfs
import
ZipFS
if
zip_file
is
None
:
if
zip_file
is
None
:
zip_file
=
fs_path
mode
=
'r'
if
writeable
:
mode
=
'a'
allow_zip_64
=
fs_name
.
endswith
(
'64'
)
mode
=
'a'
allow_zip_64
=
fs_name
.
endswith
(
'64'
)
zipfs
=
ZipFS
(
zip_file
,
mode
=
mode
,
allow_zip_64
=
allow_zip_64
)
return
zipfs
,
None
class
RPCOpener
(
Opener
):
names
=
[
'rpc'
]
desc
=
"""An opener for filesystems server over RPC (see the fsserve command).
examples:
rpc://127.0.0.1:8000 (opens a RPC server running on local host, port 80)
rpc://www.example.org (opens an RPC server on www.example.org, default port 80)"""
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.rpcfs
import
RPCFS
from
fs.rpcfs
import
RPCFS
_username
,
_password
,
fs_path
=
_parse_credentials
(
fs_path
)
if
'://'
not
in
fs_path
:
fs_path
=
'http://'
+
fs_path
scheme
,
netloc
,
path
,
_params
,
_query
,
_fragment
=
urlparse
(
fs_path
)
rpcfs
=
RPCFS
(
'
%
s://
%
s'
%
(
scheme
,
netloc
))
if
create_dir
and
path
:
rpcfs
.
makedir
(
path
,
recursive
=
True
,
allow_recreate
=
True
)
return
rpcfs
,
path
or
None
...
...
@@ -421,31 +420,31 @@ class FTPOpener(Opener):
examples:
* ftp://ftp.mozilla.org (opens the root of ftp.mozilla.org)
* ftp://ftp.example.org/foo/bar (opens /foo/bar on ftp.mozilla.org)"""
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.ftpfs
import
FTPFS
username
,
password
,
fs_path
=
_parse_credentials
(
fs_path
)
scheme
,
_netloc
,
_path
,
_params
,
_query
,
_fragment
=
urlparse
(
fs_path
)
if
not
scheme
:
fs_path
=
'ftp://'
+
fs_path
scheme
,
netloc
,
path
,
_params
,
_query
,
_fragment
=
urlparse
(
fs_path
)
dirpath
,
resourcepath
=
pathsplit
(
path
)
dirpath
,
resourcepath
=
pathsplit
(
path
)
url
=
netloc
ftpfs
=
FTPFS
(
url
,
user
=
username
or
''
,
passwd
=
password
or
''
)
ftpfs
.
cache_hint
(
True
)
if
create_dir
and
path
:
ftpfs
.
makedir
(
path
,
recursive
=
True
,
allow_recreate
=
True
)
if
dirpath
:
ftpfs
=
ftpfs
.
opendir
(
dirpath
)
if
not
resourcepath
:
return
ftpfs
,
None
return
ftpfs
,
None
else
:
return
ftpfs
,
resourcepath
...
...
@@ -453,31 +452,31 @@ examples:
class
SFTPOpener
(
Opener
):
names
=
[
'sftp'
]
desc
=
"""An opener for SFTP (Secure File Transfer Protocol) servers
examples:
* sftp://username:password@example.org (opens sftp server example.org with username and password
* sftp://example.org (opens example.org with public key authentication)"""
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
username
,
password
,
fs_path
=
_parse_credentials
(
fs_path
)
username
,
password
,
fs_path
=
_parse_credentials
(
fs_path
)
from
fs.sftpfs
import
SFTPFS
credentials
=
{}
if
username
is
not
None
:
credentials
[
'username'
]
=
username
if
password
is
not
None
:
credentials
[
'password'
]
=
password
if
'/'
in
fs_path
:
addr
,
fs_path
=
fs_path
.
split
(
'/'
,
1
)
else
:
addr
=
fs_path
fs_path
=
'/'
fs_path
,
resourcename
=
pathsplit
(
fs_path
)
host
=
addr
port
=
None
if
':'
in
host
:
...
...
@@ -488,7 +487,7 @@ examples:
pass
else
:
host
=
(
addr
,
port
)
if
create_dir
:
sftpfs
=
SFTPFS
(
host
,
root_path
=
'/'
,
**
credentials
)
if
not
sftpfs
.
_transport
.
is_authenticated
():
...
...
@@ -496,15 +495,15 @@ examples:
raise
OpenerError
(
'SFTP requires authentication'
)
sftpfs
=
sftpfs
.
makeopendir
(
fs_path
)
return
sftpfs
,
None
sftpfs
=
SFTPFS
(
host
,
root_path
=
fs_path
,
**
credentials
)
if
not
sftpfs
.
_transport
.
is_authenticated
():
sftpfs
.
close
()
raise
OpenerError
(
'SFTP requires authentication'
)
raise
OpenerError
(
'SFTP requires authentication'
)
return
sftpfs
,
resourcename
class
MemOpener
(
Opener
):
names
=
[
'mem'
,
'ram'
]
desc
=
"""Creates an in-memory filesystem (very fast but contents will disappear on exit).
...
...
@@ -514,7 +513,7 @@ NB: If you user fscp or fsmv to copy/move files here, you are effectively deleti
examples:
* mem:// (opens a new memory filesystem)
* mem://foo/bar (opens a new memory filesystem with subdirectory /foo/bar) """
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.memoryfs
import
MemoryFS
...
...
@@ -522,29 +521,29 @@ examples:
if
create_dir
:
memfs
=
memfs
.
makeopendir
(
fs_path
)
return
memfs
,
None
class
DebugOpener
(
Opener
):
names
=
[
'debug'
]
desc
=
"""For developers -- adds debugging information to output.
example:
* debug:ftp://ftp.mozilla.org (displays details of calls made to a ftp filesystem)"""
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.wrapfs.debugfs
import
DebugFS
if
fs_path
:
fs
,
_path
=
registry
.
parse
(
fs_path
,
writeable
=
writeable
,
create_dir
=
create_dir
)
return
DebugFS
(
fs
,
verbose
=
False
),
None
return
DebugFS
(
fs
,
verbose
=
False
),
None
if
fs_name_params
==
'ram'
:
from
fs.memoryfs
import
MemoryFS
return
DebugFS
(
MemoryFS
(),
identifier
=
fs_name_params
,
verbose
=
False
),
None
else
:
from
fs.tempfs
import
TempFS
return
DebugFS
(
TempFS
(),
identifier
=
fs_name_params
,
verbose
=
False
),
None
class
TempOpener
(
Opener
):
names
=
[
'temp'
]
desc
=
"""Creates a temporary filesystem, that is erased on exit.
...
...
@@ -553,12 +552,12 @@ NB: If you user fscp or fsmv to copy/move files here, you are effectively deleti
example:
* temp://"""
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.tempfs
import
TempFS
from
fs.tempfs
import
TempFS
from
fs.wrapfs.lazyfs
import
LazyFS
fs
=
LazyFS
((
TempFS
,(),{
"identifier"
:
fs_name_params
}))
fs
=
LazyFS
((
TempFS
,(),{
"identifier"
:
fs_name_params
}))
return
fs
,
fs_path
class
S3Opener
(
Opener
):
...
...
@@ -568,43 +567,43 @@ class S3Opener(Opener):
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.s3fs
import
S3FS
bucket
=
fs_path
path
=
''
if
'/'
in
fs_path
:
bucket
,
path
=
fs_path
.
split
(
'/'
,
1
)
fs
=
S3FS
(
bucket
)
if
path
:
dirpath
,
resourcepath
=
pathsplit
(
path
)
if
dirpath
:
fs
=
fs
.
opendir
(
dirpath
)
path
=
resourcepath
return
fs
,
path
class
TahoeOpener
(
Opener
):
names
=
[
'tahoe'
]
desc
=
"""Opens a Tahoe-LAFS filesystem
example:
* tahoe://http://pubgrid.tahoe-lafs.org/uri/URI:DIR2:h5bkxelehowscijdb [...]"""
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.contrib.tahoelafs
import
TahoeLAFS
if
'/uri/'
not
in
fs_path
:
raise
OpenerError
(
"""Tahoe-LAFS url should be in the form <url>/uri/<dicap>"""
)
url
,
dircap
=
fs_path
.
split
(
'/uri/'
)
path
=
''
if
'/'
in
dircap
:
dircap
,
path
=
dircap
.
split
(
'/'
,
1
)
fs
=
TahoeLAFS
(
dircap
,
webapi
=
url
)
if
'/'
in
path
:
dirname
,
_resourcename
=
pathsplit
(
path
)
if
create_dir
:
...
...
@@ -612,48 +611,48 @@ class TahoeOpener(Opener):
else
:
fs
=
fs
.
opendir
(
dirname
)
path
=
''
return
fs
,
path
return
fs
,
path
class
DavOpener
(
Opener
):
names
=
[
'dav'
]
desc
=
"""Opens a WebDAV server
example:
* dav://example.org/dav"""
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.contrib.davfs
import
DAVFS
url
=
fs_path
if
'://'
not
in
url
:
url
=
'http://'
+
url
scheme
,
url
=
url
.
split
(
'://'
,
1
)
username
,
password
,
url
=
_parse_credentials
(
url
)
credentials
=
None
credentials
=
None
if
username
or
password
:
credentials
=
{}
credentials
=
{}
if
username
:
credentials
[
'username'
]
=
username
if
password
:
credentials
[
'password'
]
=
password
credentials
[
'password'
]
=
password
url
=
'
%
s://
%
s'
%
(
scheme
,
url
)
fs
=
DAVFS
(
url
,
credentials
=
credentials
)
return
fs
,
''
return
fs
,
''
class
HTTPOpener
(
Opener
):
names
=
[
'http'
]
desc
=
"""HTTP file opener. HTTP only supports reading files, and not much else.
desc
=
"""HTTP file opener. HTTP only supports reading files, and not much else.
example:
* http://www.example.org/index.html"""
...
...
@@ -667,18 +666,18 @@ example:
resourcename
=
''
fs
=
HTTPFS
(
'http://'
+
dirname
)
return
fs
,
resourcename
class
UserDataOpener
(
Opener
):
names
=
[
'appuserdata'
,
'appuser'
]
desc
=
"""Opens a filesystem for a per-user application directory.
The 'domain' should be in the form <author name>:<application name>.<version> (the author name and version are optional).
The 'domain' should be in the form <author name>:<application name>.<version> (the author name and version are optional).
example:
* appuserdata://myapplication
* appuserdata://examplesoft:myapplication
* appuserdata://anotherapp.1.1
* appuserdata://examplesoft:anotherapp.1.3"""
* appuserdata://examplesoft:anotherapp.1.3"""
FSClass
=
'UserDataFS'
...
...
@@ -691,35 +690,35 @@ example:
else
:
appauthor
=
None
appname
=
fs_path
if
'/'
in
appname
:
appname
,
path
=
appname
.
split
(
'/'
,
1
)
else
:
path
=
''
if
'.'
in
appname
:
appname
,
appversion
=
appname
.
split
(
'.'
,
1
)
else
:
appversion
=
None
fs
=
fs_class
(
appname
,
appauthor
=
appauthor
,
version
=
appversion
,
create
=
create_dir
)
if
'/'
in
path
:
subdir
,
path
=
path
.
rsplit
(
'/'
,
1
)
if
create_dir
:
fs
=
fs
.
makeopendir
(
subdir
,
recursive
=
True
)
else
:
fs
=
fs
.
opendir
(
subdir
)
return
fs
,
path
class
SiteDataOpener
(
UserDataOpener
):
names
=
[
'appsitedata'
,
'appsite'
]
desc
=
"""Opens a filesystem for an application site data directory.
The 'domain' should be in the form <author name>:<application name>.<version> (the author name and version are optional).
The 'domain' should be in the form <author name>:<application name>.<version> (the author name and version are optional).
example:
* appsitedata://myapplication
* appsitedata://examplesoft:myapplication
...
...
@@ -727,14 +726,14 @@ example:
* appsitedata://examplesoft:anotherapp.1.3"""
FSClass
=
'SiteDataFS'
class
UserCacheOpener
(
UserDataOpener
):
names
=
[
'appusercache'
,
'appcache'
]
desc
=
"""Opens a filesystem for an per-user application cache directory.
The 'domain' should be in the form <author name>:<application name>.<version> (the author name and version are optional).
The 'domain' should be in the form <author name>:<application name>.<version> (the author name and version are optional).
example:
* appusercache://myapplication
* appusercache://examplesoft:myapplication
...
...
@@ -742,15 +741,15 @@ example:
* appusercache://examplesoft:anotherapp.1.3"""
FSClass
=
'UserCacheFS'
class
UserLogOpener
(
UserDataOpener
):
names
=
[
'appuserlog'
,
'applog'
]
desc
=
"""Opens a filesystem for an application site data directory.
The 'domain' should be in the form <author name>:<application name>.<version> (the author name and version are optional).
The 'domain' should be in the form <author name>:<application name>.<version> (the author name and version are optional).
example:
* appuserlog://myapplication
* appuserlog://examplesoft:myapplication
...
...
@@ -763,7 +762,7 @@ example:
class
MountOpener
(
Opener
):
names
=
[
'mount'
]
desc
=
"""Mounts other filesystems on a 'virtual' filesystem
The path portion of the FS URL should be a path to an ini file, where the keys are the mount point, and the values are FS URLs to mount.
The following is an example of such an ini file:
...
...
@@ -780,24 +779,24 @@ example:
* mount://fs.ini
* mount://fs.ini!resources
* mount://fs.ini:fs2"""
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.mountfs
import
MountFS
from
ConfigParser
import
ConfigParser
cfg
=
ConfigParser
()
if
'#'
in
fs_path
:
path
,
section
=
fs_path
.
split
(
'#'
,
1
)
path
,
section
=
fs_path
.
split
(
'#'
,
1
)
else
:
path
=
fs_path
section
=
'fs'
cfg
.
readfp
(
registry
.
open
(
path
))
mount_fs
=
MountFS
()
for
mount_point
,
mount_path
in
cfg
.
items
(
section
):
for
mount_point
,
mount_path
in
cfg
.
items
(
section
):
mount_fs
.
mount
(
mount_point
,
registry
.
opendir
(
mount_path
,
create_dir
=
create_dir
))
return
mount_fs
,
''
...
...
@@ -805,7 +804,7 @@ example:
class
MultiOpener
(
Opener
):
names
=
[
'multi'
]
desc
=
"""Combines other filesystems in to a single filesystem.
The path portion of the FS URL should be a path to an ini file, where the keys are the mount point, and the values are FS URLs to mount.
The following is an example of such an ini file:
...
...
@@ -816,24 +815,24 @@ The following is an example of such an ini file:
example:
* multi://fs.ini"""
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.multifs
import
MultiFS
from
ConfigParser
import
ConfigParser
cfg
=
ConfigParser
()
if
'#'
in
fs_path
:
path
,
section
=
fs_path
.
split
(
'#'
,
1
)
path
,
section
=
fs_path
.
split
(
'#'
,
1
)
else
:
path
=
fs_path
section
=
'fs'
cfg
.
readfp
(
registry
.
open
(
path
))
multi_fs
=
MultiFS
()
for
name
,
fs_url
in
cfg
.
items
(
section
):
for
name
,
fs_url
in
cfg
.
items
(
section
):
multi_fs
.
addfs
(
name
,
registry
.
opendir
(
fs_url
,
create_dir
=
create_dir
))
return
multi_fs
,
''
...
...
fs/osfs/__init__.py
View file @
3ea4efe1
...
...
@@ -20,6 +20,7 @@ import sys
import
errno
import
datetime
import
platform
import
io
from
fs.base
import
*
from
fs.path
import
*
...
...
@@ -76,16 +77,15 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS):
methods in the os and os.path modules.
"""
_meta
=
{
'thread_safe'
:
True
,
'network'
:
False
,
'virtual'
:
False
,
'read_only'
:
False
,
'unicode_paths'
:
os
.
path
.
supports_unicode_filenames
,
'case_insensitive_paths'
:
os
.
path
.
normcase
(
'Aa'
)
==
'aa'
,
'atomic.makedir'
:
True
,
'atomic.rename'
:
True
,
'atomic.setcontents'
:
False
,
}
_meta
=
{
'thread_safe'
:
True
,
'network'
:
False
,
'virtual'
:
False
,
'read_only'
:
False
,
'unicode_paths'
:
os
.
path
.
supports_unicode_filenames
,
'case_insensitive_paths'
:
os
.
path
.
normcase
(
'Aa'
)
==
'aa'
,
'atomic.makedir'
:
True
,
'atomic.rename'
:
True
,
'atomic.setcontents'
:
False
}
if
platform
.
system
()
==
'Windows'
:
_meta
[
"invalid_path_chars"
]
=
''
.
join
(
chr
(
n
)
for
n
in
xrange
(
31
))
+
'
\\
:*?"<>|'
...
...
@@ -215,11 +215,11 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS):
return
super
(
OSFS
,
self
)
.
getmeta
(
meta_name
,
default
)
@convert_os_errors
def
open
(
self
,
path
,
mode
=
"r"
,
**
kwargs
):
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
mode
=
''
.
join
(
c
for
c
in
mode
if
c
in
'rwabt+'
)
sys_path
=
self
.
getsyspath
(
path
)
try
:
return
open
(
sys_path
,
mode
,
kwargs
.
get
(
"buffering"
,
-
1
)
)
return
io
.
open
(
sys_path
,
mode
=
mode
,
buffering
=
buffering
,
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
)
except
EnvironmentError
,
e
:
# Win32 gives EACCES when opening a directory.
if
sys
.
platform
==
"win32"
and
e
.
errno
in
(
errno
.
EACCES
,):
...
...
@@ -228,8 +228,8 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS):
raise
@convert_os_errors
def
setcontents
(
self
,
path
,
contents
,
chunk_size
=
64
*
1024
):
return
super
(
OSFS
,
self
)
.
setcontents
(
path
,
contents
,
chunk_size
)
def
setcontents
(
self
,
path
,
data
=
b
''
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
64
*
1024
):
return
super
(
OSFS
,
self
)
.
setcontents
(
path
,
data
,
encoding
=
encoding
,
errors
=
errors
,
chunk_size
=
chunk_size
)
@convert_os_errors
def
exists
(
self
,
path
):
...
...
fs/remote.py
View file @
3ea4efe1
...
...
@@ -41,6 +41,7 @@ _SENTINAL = object()
from
six
import
PY3
,
b
class
RemoteFileBuffer
(
FileWrapper
):
"""File-like object providing buffer for local file operations.
...
...
@@ -79,24 +80,24 @@ class RemoteFileBuffer(FileWrapper):
self
.
path
=
path
self
.
write_on_flush
=
write_on_flush
self
.
_changed
=
False
self
.
_readlen
=
0
# How many bytes already loaded from rfile
self
.
_rfile
=
None
# Reference to remote file object
self
.
_eof
=
False
# Reached end of rfile?
if
getattr
(
fs
,
"_lock"
,
None
)
is
not
None
:
self
.
_readlen
=
0
# How many bytes already loaded from rfile
self
.
_rfile
=
None
# Reference to remote file object
self
.
_eof
=
False
# Reached end of rfile?
if
getattr
(
fs
,
"_lock"
,
None
)
is
not
None
:
self
.
_lock
=
fs
.
_lock
.
__class__
()
else
:
self
.
_lock
=
threading
.
RLock
()
if
"r"
in
mode
or
"+"
in
mode
or
"a"
in
mode
:
if
rfile
is
None
:
# File was just created, force to write anything
self
.
_changed
=
True
self
.
_eof
=
True
if
not
hasattr
(
rfile
,
"read"
):
#rfile = StringIO(unicode(rfile))
rfile
=
StringIO
(
rfile
)
self
.
_rfile
=
rfile
else
:
# Do not use remote file object
...
...
@@ -141,27 +142,27 @@ class RemoteFileBuffer(FileWrapper):
toread
=
length
-
bytes_read
if
not
toread
:
break
data
=
self
.
_rfile
.
read
(
toread
)
datalen
=
len
(
data
)
if
not
datalen
:
self
.
_eof
=
True
break
bytes_read
+=
datalen
bytes_read
+=
datalen
self
.
wrapped_file
.
write
(
data
)
if
datalen
<
toread
:
# We reached EOF,
# no more reads needed
self
.
_eof
=
True
break
if
self
.
_eof
and
self
.
_rfile
is
not
None
:
self
.
_rfile
.
close
()
self
.
_readlen
+=
bytes_read
def
_fillbuffer
(
self
,
length
=
None
):
def
_fillbuffer
(
self
,
length
=
None
):
"""Fill the local buffer, leaving file position unchanged.
This method is used for on-demand loading of data from the remote file
...
...
@@ -177,7 +178,7 @@ class RemoteFileBuffer(FileWrapper):
self
.
_read_remote
()
self
.
_eof
=
True
self
.
wrapped_file
.
seek
(
curpos
)
elif
not
self
.
_eof
:
if
curpos
+
length
>
self
.
_readlen
:
# Read all data and we didn't reached EOF
...
...
@@ -186,7 +187,7 @@ class RemoteFileBuffer(FileWrapper):
self
.
wrapped_file
.
seek
(
0
,
SEEK_END
)
self
.
_read_remote
(
toload
)
self
.
wrapped_file
.
seek
(
curpos
)
def
_read
(
self
,
length
=
None
):
if
length
is
not
None
and
length
<
0
:
length
=
None
...
...
@@ -209,7 +210,7 @@ class RemoteFileBuffer(FileWrapper):
abspos
=
None
else
:
raise
IOError
(
EINVAL
,
'Invalid whence'
)
if
abspos
!=
None
:
toread
=
abspos
-
self
.
_readlen
if
toread
>
0
:
...
...
@@ -218,7 +219,7 @@ class RemoteFileBuffer(FileWrapper):
else
:
self
.
wrapped_file
.
seek
(
self
.
_readlen
)
self
.
_fillbuffer
()
self
.
wrapped_file
.
seek
(
offset
,
whence
)
def
_truncate
(
self
,
size
):
...
...
@@ -227,7 +228,7 @@ class RemoteFileBuffer(FileWrapper):
# Read the rest of file
self
.
_fillbuffer
(
size
-
self
.
_readlen
)
# Lock rfile
self
.
_eof
=
True
self
.
_eof
=
True
elif
self
.
_readlen
>=
size
:
# Crop rfile metadata
self
.
_readlen
=
size
if
size
!=
None
else
0
...
...
@@ -236,7 +237,7 @@ class RemoteFileBuffer(FileWrapper):
self
.
wrapped_file
.
truncate
(
size
)
self
.
_changed
=
True
self
.
flush
()
if
self
.
_rfile
is
not
None
:
self
.
_rfile
.
close
()
...
...
@@ -251,17 +252,17 @@ class RemoteFileBuffer(FileWrapper):
if
not
self
.
_changed
:
# Nothing changed, no need to write data back
return
# If not all data loaded, load until eof
if
not
self
.
_eof
:
self
.
_fillbuffer
()
if
"w"
in
self
.
mode
or
"a"
in
self
.
mode
or
"+"
in
self
.
mode
:
pos
=
self
.
wrapped_file
.
tell
()
self
.
wrapped_file
.
seek
(
0
)
self
.
fs
.
setcontents
(
self
.
path
,
self
.
wrapped_file
)
self
.
wrapped_file
.
seek
(
pos
)
def
close
(
self
):
with
self
.
_lock
:
if
not
self
.
closed
:
...
...
@@ -315,8 +316,8 @@ class ConnectionManagerFS(LazyFS):
self
.
_poll_sleeper
=
threading
.
Event
()
self
.
connected
=
connected
def
setcontents
(
self
,
path
,
data
,
chunk_size
=
64
*
1024
):
return
self
.
wrapped_fs
.
setcontents
(
path
,
data
,
chunk_size
=
chunk_size
)
def
setcontents
(
self
,
path
,
data
=
b
''
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
64
*
1024
):
return
self
.
wrapped_fs
.
setcontents
(
path
,
data
,
encoding
=
encoding
,
errors
=
errors
,
chunk_size
=
chunk_size
)
def
__getstate__
(
self
):
state
=
super
(
ConnectionManagerFS
,
self
)
.
__getstate__
()
...
...
@@ -329,7 +330,7 @@ class ConnectionManagerFS(LazyFS):
super
(
ConnectionManagerFS
,
self
)
.
__setstate__
(
state
)
self
.
_connection_cond
=
threading
.
Condition
()
self
.
_poll_sleeper
=
threading
.
Event
()
def
wait_for_connection
(
self
,
timeout
=
None
,
force_wait
=
False
):
self
.
_connection_cond
.
acquire
()
try
:
...
...
@@ -397,7 +398,7 @@ def _ConnectionManagerFS_method_wrapper(func):
self
.
connected
=
True
return
result
return
wrapper
wrap_fs_methods
(
_ConnectionManagerFS_method_wrapper
,
ConnectionManagerFS
)
...
...
@@ -536,12 +537,12 @@ class CacheFSMixin(FS):
except
KeyError
:
pass
def
open
(
self
,
path
,
mode
=
"r"
,
**
kwd
s
):
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwarg
s
):
# Try to validate the entry using the cached info
try
:
ci
=
self
.
__get_cached_info
(
path
)
except
KeyError
:
if
path
in
(
""
,
"/"
):
if
path
in
(
""
,
"/"
):
raise
ResourceInvalidError
(
path
)
try
:
ppath
=
dirname
(
path
)
...
...
@@ -549,38 +550,38 @@ class CacheFSMixin(FS):
except
KeyError
:
pass
else
:
if
not
fs
.
utils
.
isdir
(
super
(
CacheFSMixin
,
self
),
ppath
,
pci
.
info
):
if
not
fs
.
utils
.
isdir
(
super
(
CacheFSMixin
,
self
),
ppath
,
pci
.
info
):
raise
ResourceInvalidError
(
path
)
if
pci
.
has_full_children
:
raise
ResourceNotFoundError
(
path
)
else
:
if
not
fs
.
utils
.
isfile
(
super
(
CacheFSMixin
,
self
),
path
,
ci
.
info
):
if
not
fs
.
utils
.
isfile
(
super
(
CacheFSMixin
,
self
),
path
,
ci
.
info
):
raise
ResourceInvalidError
(
path
)
f
=
super
(
CacheFSMixin
,
self
)
.
open
(
path
,
mode
,
**
kwd
s
)
f
=
super
(
CacheFSMixin
,
self
)
.
open
(
path
,
mode
=
mode
,
buffering
=
buffering
,
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
,
line_buffering
=
line_buffering
,
**
kwarg
s
)
if
"w"
in
mode
or
"a"
in
mode
or
"+"
in
mode
:
with
self
.
__cache_lock
:
self
.
__cache
.
clear
(
path
)
f
=
self
.
_CacheInvalidatingFile
(
self
,
path
,
f
,
mode
)
f
=
self
.
_CacheInvalidatingFile
(
self
,
path
,
f
,
mode
)
return
f
class
_CacheInvalidatingFile
(
FileWrapper
):
def
__init__
(
self
,
owner
,
path
,
wrapped_file
,
mode
=
None
):
def
__init__
(
self
,
owner
,
path
,
wrapped_file
,
mode
=
None
):
self
.
path
=
path
sup
=
super
(
CacheFSMixin
.
_CacheInvalidatingFile
,
self
)
sup
.
__init__
(
wrapped_file
,
mode
)
sup
=
super
(
CacheFSMixin
.
_CacheInvalidatingFile
,
self
)
sup
.
__init__
(
wrapped_file
,
mode
)
self
.
owner
=
owner
def
_write
(
self
,
string
,
flushing
=
False
):
def
_write
(
self
,
string
,
flushing
=
False
):
with
self
.
owner
.
_CacheFSMixin__cache_lock
:
self
.
owner
.
_CacheFSMixin__cache
.
clear
(
self
.
path
)
sup
=
super
(
CacheFSMixin
.
_CacheInvalidatingFile
,
self
)
return
sup
.
_write
(
string
,
flushing
=
flushing
)
def
_truncate
(
self
,
size
):
sup
=
super
(
CacheFSMixin
.
_CacheInvalidatingFile
,
self
)
return
sup
.
_write
(
string
,
flushing
=
flushing
)
def
_truncate
(
self
,
size
):
with
self
.
owner
.
_CacheFSMixin__cache_lock
:
self
.
owner
.
_CacheFSMixin__cache
.
clear
(
self
.
path
)
sup
=
super
(
CacheFSMixin
.
_CacheInvalidatingFile
,
self
)
sup
=
super
(
CacheFSMixin
.
_CacheInvalidatingFile
,
self
)
return
sup
.
_truncate
(
size
)
def
exists
(
self
,
path
):
def
exists
(
self
,
path
):
try
:
self
.
getinfo
(
path
)
except
ResourceNotFoundError
:
...
...
@@ -588,7 +589,7 @@ class CacheFSMixin(FS):
else
:
return
True
def
isdir
(
self
,
path
):
def
isdir
(
self
,
path
):
try
:
self
.
__cache
.
iternames
(
path
)
.
next
()
return
True
...
...
@@ -601,9 +602,9 @@ class CacheFSMixin(FS):
except
ResourceNotFoundError
:
return
False
else
:
return
fs
.
utils
.
isdir
(
super
(
CacheFSMixin
,
self
),
path
,
info
)
return
fs
.
utils
.
isdir
(
super
(
CacheFSMixin
,
self
),
path
,
info
)
def
isfile
(
self
,
path
):
def
isfile
(
self
,
path
):
try
:
self
.
__cache
.
iternames
(
path
)
.
next
()
return
False
...
...
@@ -616,17 +617,17 @@ class CacheFSMixin(FS):
except
ResourceNotFoundError
:
return
False
else
:
return
fs
.
utils
.
isfile
(
super
(
CacheFSMixin
,
self
),
path
,
info
)
return
fs
.
utils
.
isfile
(
super
(
CacheFSMixin
,
self
),
path
,
info
)
def
getinfo
(
self
,
path
):
def
getinfo
(
self
,
path
):
try
:
ci
=
self
.
__get_cached_info
(
path
)
if
not
ci
.
has_full_info
:
raise
KeyError
info
=
ci
.
info
except
KeyError
:
info
=
super
(
CacheFSMixin
,
self
)
.
getinfo
(
path
)
self
.
__set_cached_info
(
path
,
CachedInfo
(
info
))
info
=
super
(
CacheFSMixin
,
self
)
.
getinfo
(
path
)
self
.
__set_cached_info
(
path
,
CachedInfo
(
info
))
return
info
def
listdir
(
self
,
path
=
""
,
*
args
,
**
kwds
):
...
...
@@ -670,9 +671,9 @@ class CacheFSMixin(FS):
def
getsize
(
self
,
path
):
return
self
.
getinfo
(
path
)[
"size"
]
def
setcontents
(
self
,
path
,
contents
=
b
(
""
)
,
chunk_size
=
64
*
1024
):
supsc
=
super
(
CacheFSMixin
,
self
)
.
setcontents
res
=
supsc
(
path
,
contents
,
chunk_size
=
chunk_size
)
def
setcontents
(
self
,
path
,
data
=
b
''
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
64
*
1024
):
supsc
=
super
(
CacheFSMixin
,
self
)
.
setcontents
res
=
supsc
(
path
,
data
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
chunk_size
)
with
self
.
__cache_lock
:
self
.
__cache
.
clear
(
path
)
self
.
__cache
[
path
]
=
CachedInfo
.
new_file_stub
()
...
...
fs/rpcfs.py
View file @
3ea4efe1
...
...
@@ -10,36 +10,42 @@ class from the :mod:`fs.expose.xmlrpc` module.
import
xmlrpclib
import
socket
import
threading
import
base64
from
fs.base
import
*
from
fs.errors
import
*
from
fs.path
import
*
from
fs
import
iotools
from
fs.filelike
import
StringIO
import
six
from
six
import
PY3
,
b
def
re_raise_faults
(
func
):
"""Decorator to re-raise XML-RPC faults as proper exceptions."""
def
wrapper
(
*
args
,
**
kwds
):
def
wrapper
(
*
args
,
**
kwds
):
try
:
return
func
(
*
args
,
**
kwds
)
return
func
(
*
args
,
**
kwds
)
except
(
xmlrpclib
.
Fault
),
f
:
#raise
# Make sure it's in a form we can handle
print
f
.
faultString
bits
=
f
.
faultString
.
split
(
" "
)
if
bits
[
0
]
not
in
[
"<type"
,
"<class"
]:
if
bits
[
0
]
not
in
[
"<type"
,
"<class"
]:
raise
f
# Find the class/type object
bits
=
" "
.
join
(
bits
[
1
:])
.
split
(
">:"
)
cls
=
bits
[
0
]
msg
=
">:"
.
join
(
bits
[
1
:])
cls
=
cls
.
strip
(
'
\'
'
)
print
"-"
+
cls
cls
=
_object_by_name
(
cls
)
# Re-raise using the remainder of the fault code as message
if
cls
:
if
issubclass
(
cls
,
FSError
):
if
issubclass
(
cls
,
FSError
):
raise
cls
(
''
,
msg
=
msg
)
else
:
raise
cls
(
msg
)
...
...
@@ -49,7 +55,7 @@ def re_raise_faults(func):
return
wrapper
def
_object_by_name
(
name
,
root
=
None
):
def
_object_by_name
(
name
,
root
=
None
):
"""Look up an object by dotted-name notation."""
bits
=
name
.
split
(
"."
)
if
root
is
None
:
...
...
@@ -59,11 +65,11 @@ def _object_by_name(name,root=None):
try
:
obj
=
__builtins__
[
bits
[
0
]]
except
KeyError
:
obj
=
__import__
(
bits
[
0
],
globals
())
obj
=
__import__
(
bits
[
0
],
globals
())
else
:
obj
=
getattr
(
root
,
bits
[
0
])
obj
=
getattr
(
root
,
bits
[
0
])
if
len
(
bits
)
>
1
:
return
_object_by_name
(
"."
.
join
(
bits
[
1
:]),
obj
)
return
_object_by_name
(
"."
.
join
(
bits
[
1
:]),
obj
)
else
:
return
obj
...
...
@@ -71,11 +77,11 @@ def _object_by_name(name,root=None):
class
ReRaiseFaults
:
"""XML-RPC proxy wrapper that re-raises Faults as proper Exceptions."""
def
__init__
(
self
,
obj
):
def
__init__
(
self
,
obj
):
self
.
_obj
=
obj
def
__getattr__
(
self
,
attr
):
val
=
getattr
(
self
.
_obj
,
attr
)
def
__getattr__
(
self
,
attr
):
val
=
getattr
(
self
.
_obj
,
attr
)
if
callable
(
val
):
val
=
re_raise_faults
(
val
)
self
.
__dict__
[
attr
]
=
val
...
...
@@ -120,9 +126,9 @@ class RPCFS(FS):
kwds
=
dict
(
allow_none
=
True
,
use_datetime
=
True
)
if
self
.
_transport
is
not
None
:
proxy
=
xmlrpclib
.
ServerProxy
(
self
.
uri
,
self
.
_transport
,
**
kwds
)
proxy
=
xmlrpclib
.
ServerProxy
(
self
.
uri
,
self
.
_transport
,
**
kwds
)
else
:
proxy
=
xmlrpclib
.
ServerProxy
(
self
.
uri
,
**
kwds
)
proxy
=
xmlrpclib
.
ServerProxy
(
self
.
uri
,
**
kwds
)
return
ReRaiseFaults
(
proxy
)
...
...
@@ -134,7 +140,7 @@ class RPCFS(FS):
@synchronize
def
__getstate__
(
self
):
state
=
super
(
RPCFS
,
self
)
.
__getstate__
()
state
=
super
(
RPCFS
,
self
)
.
__getstate__
()
try
:
del
state
[
'proxy'
]
except
KeyError
:
...
...
@@ -152,15 +158,11 @@ class RPCFS(FS):
must return something that can be represented in ASCII. The default
is base64-encoded UTF8.
"""
if
PY3
:
return
path
return
path
.
encode
(
"utf8"
)
.
encode
(
"base64"
)
return
six
.
text_type
(
base64
.
b64encode
(
path
.
encode
(
"utf8"
)),
'ascii'
)
def
decode_path
(
self
,
path
):
"""Decode paths arriving over the wire."""
if
PY3
:
return
path
return
path
.
decode
(
"base64"
)
.
decode
(
"utf8"
)
return
six
.
text_type
(
base64
.
b64decode
(
path
.
encode
(
'ascii'
)),
'utf8'
)
@synchronize
def
getmeta
(
self
,
meta_name
,
default
=
NoDefaultMeta
):
...
...
@@ -170,7 +172,7 @@ class RPCFS(FS):
meta
=
self
.
proxy
.
getmeta_default
(
meta_name
,
default
)
if
isinstance
(
meta
,
basestring
):
# To allow transport of meta with invalid xml chars (like null)
meta
=
meta
.
encode
(
'base64'
)
meta
=
self
.
encode_path
(
meta
)
return
meta
@synchronize
...
...
@@ -178,37 +180,40 @@ class RPCFS(FS):
return
self
.
proxy
.
hasmeta
(
meta_name
)
@synchronize
def
open
(
self
,
path
,
mode
=
"r"
):
@iotools.filelike_to_stream
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
# TODO: chunked transport of large files
path
=
self
.
encode_path
(
path
)
e
path
=
self
.
encode_path
(
path
)
if
"w"
in
mode
:
self
.
proxy
.
set_contents
(
path
,
xmlrpclib
.
Binary
(
b
(
""
)))
self
.
proxy
.
set_contents
(
epath
,
xmlrpclib
.
Binary
(
b
(
""
)))
if
"r"
in
mode
or
"a"
in
mode
or
"+"
in
mode
:
try
:
data
=
self
.
proxy
.
get_contents
(
path
,
"rb"
)
.
data
data
=
self
.
proxy
.
get_contents
(
e
path
,
"rb"
)
.
data
except
IOError
:
if
"w"
not
in
mode
and
"a"
not
in
mode
:
raise
ResourceNotFoundError
(
path
)
if
not
self
.
isdir
(
dirname
(
path
)):
raise
ParentDirectoryMissingError
(
path
)
self
.
proxy
.
set_contents
(
path
,
xmlrpclib
.
Binary
(
b
(
""
)))
self
.
proxy
.
set_contents
(
path
,
xmlrpclib
.
Binary
(
b
(
""
)))
else
:
data
=
b
(
""
)
f
=
StringIO
(
data
)
if
"a"
not
in
mode
:
f
.
seek
(
0
,
0
)
f
.
seek
(
0
,
0
)
else
:
f
.
seek
(
0
,
2
)
f
.
seek
(
0
,
2
)
oldflush
=
f
.
flush
oldclose
=
f
.
close
oldtruncate
=
f
.
truncate
def
newflush
():
self
.
_lock
.
acquire
()
try
:
oldflush
()
self
.
proxy
.
set_contents
(
path
,
xmlrpclib
.
Binary
(
f
.
getvalue
()))
self
.
proxy
.
set_contents
(
epath
,
xmlrpclib
.
Binary
(
f
.
getvalue
()))
finally
:
self
.
_lock
.
release
()
def
newclose
():
self
.
_lock
.
acquire
()
try
:
...
...
@@ -216,6 +221,7 @@ class RPCFS(FS):
oldclose
()
finally
:
self
.
_lock
.
release
()
def
newtruncate
(
size
=
None
):
self
.
_lock
.
acquire
()
try
:
...
...
@@ -248,24 +254,32 @@ class RPCFS(FS):
def
listdir
(
self
,
path
=
"./"
,
wildcard
=
None
,
full
=
False
,
absolute
=
False
,
dirs_only
=
False
,
files_only
=
False
):
enc_path
=
self
.
encode_path
(
path
)
if
not
callable
(
wildcard
):
entries
=
self
.
proxy
.
listdir
(
enc_path
,
wildcard
,
full
,
absolute
,
dirs_only
,
files_only
)
entries
=
self
.
proxy
.
listdir
(
enc_path
,
wildcard
,
full
,
absolute
,
dirs_only
,
files_only
)
entries
=
[
self
.
decode_path
(
e
)
for
e
in
entries
]
else
:
entries
=
self
.
proxy
.
listdir
(
enc_path
,
None
,
False
,
False
,
dirs_only
,
files_only
)
entries
=
self
.
proxy
.
listdir
(
enc_path
,
None
,
False
,
False
,
dirs_only
,
files_only
)
entries
=
[
self
.
decode_path
(
e
)
for
e
in
entries
]
entries
=
[
e
for
e
in
entries
if
wildcard
(
e
)]
if
full
:
entries
=
[
relpath
(
pathjoin
(
path
,
e
))
for
e
in
entries
]
entries
=
[
relpath
(
pathjoin
(
path
,
e
))
for
e
in
entries
]
elif
absolute
:
entries
=
[
abspath
(
pathjoin
(
path
,
e
))
for
e
in
entries
]
entries
=
[
abspath
(
pathjoin
(
path
,
e
))
for
e
in
entries
]
return
entries
@synchronize
def
makedir
(
self
,
path
,
recursive
=
False
,
allow_recreate
=
False
):
path
=
self
.
encode_path
(
path
)
return
self
.
proxy
.
makedir
(
path
,
recursive
,
allow_recreate
)
return
self
.
proxy
.
makedir
(
path
,
recursive
,
allow_recreate
)
@synchronize
def
remove
(
self
,
path
):
...
...
@@ -275,13 +289,13 @@ class RPCFS(FS):
@synchronize
def
removedir
(
self
,
path
,
recursive
=
False
,
force
=
False
):
path
=
self
.
encode_path
(
path
)
return
self
.
proxy
.
removedir
(
path
,
recursive
,
force
)
return
self
.
proxy
.
removedir
(
path
,
recursive
,
force
)
@synchronize
def
rename
(
self
,
src
,
dst
):
src
=
self
.
encode_path
(
src
)
dst
=
self
.
encode_path
(
dst
)
return
self
.
proxy
.
rename
(
src
,
dst
)
return
self
.
proxy
.
rename
(
src
,
dst
)
@synchronize
def
settimes
(
self
,
path
,
accessed_time
,
modified_time
):
...
...
@@ -302,19 +316,19 @@ class RPCFS(FS):
def
getxattr
(
self
,
path
,
attr
,
default
=
None
):
path
=
self
.
encode_path
(
path
)
attr
=
self
.
encode_path
(
attr
)
return
self
.
fs
.
getxattr
(
path
,
attr
,
default
)
return
self
.
fs
.
getxattr
(
path
,
attr
,
default
)
@synchronize
def
setxattr
(
self
,
path
,
attr
,
value
):
path
=
self
.
encode_path
(
path
)
attr
=
self
.
encode_path
(
attr
)
return
self
.
fs
.
setxattr
(
path
,
attr
,
value
)
return
self
.
fs
.
setxattr
(
path
,
attr
,
value
)
@synchronize
def
delxattr
(
self
,
path
,
attr
):
path
=
self
.
encode_path
(
path
)
attr
=
self
.
encode_path
(
attr
)
return
self
.
fs
.
delxattr
(
path
,
attr
)
return
self
.
fs
.
delxattr
(
path
,
attr
)
@synchronize
def
listxattrs
(
self
,
path
):
...
...
@@ -325,13 +339,13 @@ class RPCFS(FS):
def
copy
(
self
,
src
,
dst
,
overwrite
=
False
,
chunk_size
=
16384
):
src
=
self
.
encode_path
(
src
)
dst
=
self
.
encode_path
(
dst
)
return
self
.
proxy
.
copy
(
src
,
dst
,
overwrite
,
chunk_size
)
return
self
.
proxy
.
copy
(
src
,
dst
,
overwrite
,
chunk_size
)
@synchronize
def
move
(
self
,
src
,
dst
,
overwrite
=
False
,
chunk_size
=
16384
):
src
=
self
.
encode_path
(
src
)
dst
=
self
.
encode_path
(
dst
)
return
self
.
proxy
.
move
(
src
,
dst
,
overwrite
,
chunk_size
)
return
self
.
proxy
.
move
(
src
,
dst
,
overwrite
,
chunk_size
)
@synchronize
def
movedir
(
self
,
src
,
dst
,
overwrite
=
False
,
ignore_errors
=
False
,
chunk_size
=
16384
):
...
...
@@ -343,6 +357,4 @@ class RPCFS(FS):
def
copydir
(
self
,
src
,
dst
,
overwrite
=
False
,
ignore_errors
=
False
,
chunk_size
=
16384
):
src
=
self
.
encode_path
(
src
)
dst
=
self
.
encode_path
(
dst
)
return
self
.
proxy
.
copydir
(
src
,
dst
,
overwrite
,
ignore_errors
,
chunk_size
)
return
self
.
proxy
.
copydir
(
src
,
dst
,
overwrite
,
ignore_errors
,
chunk_size
)
fs/s3fs.py
View file @
3ea4efe1
...
...
@@ -26,7 +26,9 @@ from fs.path import *
from
fs.errors
import
*
from
fs.remote
import
*
from
fs.filelike
import
LimitBytesFile
from
fs
import
iotools
import
six
# Boto is not thread-safe, so we need to use a per-thread S3 connection.
if
hasattr
(
threading
,
"local"
):
...
...
@@ -246,19 +248,19 @@ class S3FS(FS):
s3path
=
self
.
_s3path
(
path
)
k
=
self
.
_s3bukt
.
get_key
(
s3path
)
k
.
make_public
()
def
getpathurl
(
self
,
path
,
allow_none
=
False
,
expires
=
3600
):
"""Returns a url that corresponds to the given path."""
s3path
=
self
.
_s3path
(
path
)
k
=
self
.
_s3bukt
.
get_key
(
s3path
)
# Is there AllUsers group with READ permissions?
is_public
=
True
in
[
grant
.
permission
==
'READ'
and
\
is_public
=
True
in
[
grant
.
permission
==
'READ'
and
grant
.
uri
==
'http://acs.amazonaws.com/groups/global/AllUsers'
for
grant
in
k
.
get_acl
()
.
acl
.
grants
]
for
grant
in
k
.
get_acl
()
.
acl
.
grants
]
url
=
k
.
generate_url
(
expires
,
force_http
=
is_public
)
if
url
==
None
:
if
not
allow_none
:
raise
NoPathURLError
(
path
=
path
)
...
...
@@ -267,14 +269,17 @@ class S3FS(FS):
if
is_public
:
# Strip time token; it has no sense for public resource
url
=
url
.
split
(
'?'
)[
0
]
return
url
def
setcontents
(
self
,
path
,
data
,
chunk_size
=
64
*
1024
):
def
setcontents
(
self
,
path
,
data
=
b
''
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
64
*
1024
):
s3path
=
self
.
_s3path
(
path
)
if
isinstance
(
data
,
six
.
text_type
):
data
=
data
.
encode
(
encoding
=
encoding
,
errors
=
errors
)
self
.
_sync_set_contents
(
s3path
,
data
)
def
open
(
self
,
path
,
mode
=
"r"
):
@iotools.filelike_to_stream
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
"""Open the named file in the given mode.
This method downloads the file contents into a local temporary file
...
...
@@ -504,7 +509,7 @@ class S3FS(FS):
def
removedir
(
self
,
path
,
recursive
=
False
,
force
=
False
):
"""Remove the directory at the given path."""
if
normpath
(
path
)
in
(
''
,
'/'
):
raise
RemoveRootError
(
path
)
raise
RemoveRootError
(
path
)
s3path
=
self
.
_s3path
(
path
)
if
s3path
!=
self
.
_prefix
:
s3path
=
s3path
+
self
.
_separator
...
...
@@ -654,7 +659,7 @@ class S3FS(FS):
yield
item
else
:
prefix
=
self
.
_s3path
(
path
)
for
k
in
self
.
_s3bukt
.
list
(
prefix
=
prefix
):
for
k
in
self
.
_s3bukt
.
list
(
prefix
=
prefix
):
name
=
relpath
(
self
.
_uns3path
(
k
.
name
,
prefix
))
if
name
!=
""
:
if
not
isinstance
(
name
,
unicode
):
...
...
@@ -682,7 +687,7 @@ class S3FS(FS):
yield
(
item
,
self
.
getinfo
(
item
))
else
:
prefix
=
self
.
_s3path
(
path
)
for
k
in
self
.
_s3bukt
.
list
(
prefix
=
prefix
):
for
k
in
self
.
_s3bukt
.
list
(
prefix
=
prefix
):
name
=
relpath
(
self
.
_uns3path
(
k
.
name
,
prefix
))
if
name
!=
""
:
if
not
isinstance
(
name
,
unicode
):
...
...
@@ -709,7 +714,7 @@ class S3FS(FS):
yield
(
item
,
self
.
getinfo
(
item
))
else
:
prefix
=
self
.
_s3path
(
path
)
for
k
in
self
.
_s3bukt
.
list
(
prefix
=
prefix
):
for
k
in
self
.
_s3bukt
.
list
(
prefix
=
prefix
):
name
=
relpath
(
self
.
_uns3path
(
k
.
name
,
prefix
))
if
name
!=
""
:
if
not
isinstance
(
name
,
unicode
):
...
...
fs/sftpfs.py
View file @
3ea4efe1
...
...
@@ -19,6 +19,8 @@ from fs.base import *
from
fs.path
import
*
from
fs.errors
import
*
from
fs.utils
import
isdir
,
isfile
from
fs
import
iotools
class
WrongHostKeyError
(
RemoteConnectionError
):
pass
...
...
@@ -108,7 +110,6 @@ class SFTPFS(FS):
if other authentication is not succesful
"""
credentials
=
dict
(
username
=
username
,
password
=
password
,
pkey
=
pkey
)
...
...
@@ -300,12 +301,12 @@ class SFTPFS(FS):
self
.
_transport
.
close
()
self
.
closed
=
True
def
_normpath
(
self
,
path
):
if
not
isinstance
(
path
,
unicode
):
def
_normpath
(
self
,
path
):
if
not
isinstance
(
path
,
unicode
):
path
=
path
.
decode
(
self
.
encoding
)
npath
=
pathjoin
(
self
.
root_path
,
relpath
(
normpath
(
path
)))
if
not
isprefix
(
self
.
root_path
,
npath
):
raise
PathError
(
path
,
msg
=
"Path is outside root:
%(path)
s"
)
npath
=
pathjoin
(
self
.
root_path
,
relpath
(
normpath
(
path
)))
if
not
isprefix
(
self
.
root_path
,
npath
):
raise
PathError
(
path
,
msg
=
"Path is outside root:
%(path)
s"
)
return
npath
def
getpathurl
(
self
,
path
,
allow_none
=
False
):
...
...
@@ -325,17 +326,19 @@ class SFTPFS(FS):
@synchronize
@convert_os_errors
def
open
(
self
,
path
,
mode
=
"rb"
,
bufsize
=-
1
):
@iotools.filelike_to_stream
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
bufsize
=-
1
,
**
kwargs
):
npath
=
self
.
_normpath
(
path
)
if
self
.
isdir
(
path
):
msg
=
"that's a directory:
%(path)
s"
raise
ResourceInvalidError
(
path
,
msg
=
msg
)
raise
ResourceInvalidError
(
path
,
msg
=
msg
)
# paramiko implements its own buffering and write-back logic,
# so we don't need to use a RemoteFileBuffer here.
f
=
self
.
client
.
open
(
npath
,
mode
,
bufsize
)
f
=
self
.
client
.
open
(
npath
,
mode
,
bufsize
)
# Unfortunately it has a broken truncate() method.
# TODO: implement this as a wrapper
old_truncate
=
f
.
truncate
def
new_truncate
(
size
=
None
):
if
size
is
None
:
size
=
f
.
tell
()
...
...
@@ -354,7 +357,7 @@ class SFTPFS(FS):
@synchronize
@convert_os_errors
def
exists
(
self
,
path
):
def
exists
(
self
,
path
):
if
path
in
(
''
,
'/'
):
return
True
npath
=
self
.
_normpath
(
path
)
...
...
@@ -369,7 +372,7 @@ class SFTPFS(FS):
@synchronize
@convert_os_errors
def
isdir
(
self
,
path
):
if
path
in
(
''
,
'/'
):
if
normpath
(
path
)
in
(
''
,
'/'
):
return
True
npath
=
self
.
_normpath
(
path
)
try
:
...
...
@@ -378,7 +381,7 @@ class SFTPFS(FS):
if
getattr
(
e
,
"errno"
,
None
)
==
2
:
return
False
raise
return
statinfo
.
S_ISDIR
(
stat
.
st_mode
)
return
statinfo
.
S_ISDIR
(
stat
.
st_mode
)
!=
0
@synchronize
@convert_os_errors
...
...
@@ -390,7 +393,7 @@ class SFTPFS(FS):
if
getattr
(
e
,
"errno"
,
None
)
==
2
:
return
False
raise
return
statinfo
.
S_ISREG
(
stat
.
st_mode
)
return
statinfo
.
S_ISREG
(
stat
.
st_mode
)
!=
0
@synchronize
@convert_os_errors
...
...
fs/tempfs.py
View file @
3ea4efe1
...
...
@@ -10,13 +10,14 @@ import os
import
os.path
import
time
import
tempfile
import
platform
from
fs.base
import
synchronize
from
fs.osfs
import
OSFS
from
fs.errors
import
*
from
fs
import
_thread_synchronize_default
class
TempFS
(
OSFS
):
"""Create a Filesystem in a temporary directory (with tempfile.mkdtemp),
...
...
@@ -38,7 +39,7 @@ class TempFS(OSFS):
self
.
identifier
=
identifier
self
.
temp_dir
=
temp_dir
self
.
dir_mode
=
dir_mode
self
.
_temp_dir
=
tempfile
.
mkdtemp
(
identifier
or
"TempFS"
,
dir
=
temp_dir
)
self
.
_temp_dir
=
tempfile
.
mkdtemp
(
identifier
or
"TempFS"
,
dir
=
temp_dir
)
self
.
_cleaned
=
False
super
(
TempFS
,
self
)
.
__init__
(
self
.
_temp_dir
,
dir_mode
=
dir_mode
,
thread_synchronize
=
thread_synchronize
)
...
...
@@ -65,6 +66,7 @@ class TempFS(OSFS):
# dir_mode=self.dir_mode,
# thread_synchronize=self.thread_synchronize)
@synchronize
def
close
(
self
):
"""Removes the temporary directory.
...
...
@@ -73,13 +75,13 @@ class TempFS(OSFS):
Note that once this method has been called, the FS object may
no longer be used.
"""
super
(
TempFS
,
self
)
.
close
()
super
(
TempFS
,
self
)
.
close
()
# Depending on how resources are freed by the OS, there could
# be some transient errors when freeing a TempFS soon after it
# was used. If they occur, do a small sleep and try again.
try
:
self
.
_close
()
except
(
ResourceLockedError
,
ResourceInvalidError
):
except
(
ResourceLockedError
,
ResourceInvalidError
):
time
.
sleep
(
0.5
)
self
.
_close
()
...
...
@@ -97,20 +99,23 @@ class TempFS(OSFS):
try
:
# shutil.rmtree doesn't handle long paths on win32,
# so we walk the tree by hand.
entries
=
os
.
walk
(
self
.
root_path
,
topdown
=
False
)
for
(
dir
,
dirnames
,
filenames
)
in
entries
:
entries
=
os
.
walk
(
self
.
root_path
,
topdown
=
False
)
for
(
dir
,
dirnames
,
filenames
)
in
entries
:
for
filename
in
filenames
:
try
:
os_remove
(
os
.
path
.
join
(
dir
,
filename
))
os_remove
(
os
.
path
.
join
(
dir
,
filename
))
except
ResourceNotFoundError
:
pass
for
dirname
in
dirnames
:
try
:
os_rmdir
(
os
.
path
.
join
(
dir
,
dirname
))
os_rmdir
(
os
.
path
.
join
(
dir
,
dirname
))
except
ResourceNotFoundError
:
pass
os
.
rmdir
(
self
.
root_path
)
try
:
os
.
rmdir
(
self
.
root_path
)
except
OSError
:
pass
self
.
_cleaned
=
True
finally
:
self
.
_lock
.
release
()
super
(
TempFS
,
self
)
.
close
()
super
(
TempFS
,
self
)
.
close
()
fs/tests/__init__.py
View file @
3ea4efe1
...
...
@@ -11,7 +11,7 @@ from __future__ import with_statement
# be captured by nose and reported appropriately
import
sys
import
logging
#
logging.basicConfig(level=logging.ERROR, stream=sys.stdout)
logging
.
basicConfig
(
level
=
logging
.
ERROR
,
stream
=
sys
.
stdout
)
from
fs.base
import
*
from
fs.path
import
*
...
...
@@ -20,7 +20,8 @@ from fs.filelike import StringIO
import
datetime
import
unittest
import
os
,
os
.
path
import
os
import
os.path
import
pickle
import
random
import
copy
...
...
@@ -34,6 +35,7 @@ except ImportError:
import
six
from
six
import
PY3
,
b
class
FSTestCases
(
object
):
"""Base suite of testcases for filesystem implementations.
...
...
@@ -80,7 +82,6 @@ class FSTestCases(object):
except
NoMetaError
:
self
.
assertFalse
(
self
.
fs
.
hasmeta
(
meta_name
))
def
test_root_dir
(
self
):
self
.
assertTrue
(
self
.
fs
.
isdir
(
""
))
self
.
assertTrue
(
self
.
fs
.
isdir
(
"/"
))
...
...
@@ -94,10 +95,10 @@ class FSTestCases(object):
except
NoSysPathError
:
pass
else
:
self
.
assertTrue
(
isinstance
(
syspath
,
unicode
))
syspath
=
self
.
fs
.
getsyspath
(
"/"
,
allow_none
=
True
)
self
.
assertTrue
(
isinstance
(
syspath
,
unicode
))
syspath
=
self
.
fs
.
getsyspath
(
"/"
,
allow_none
=
True
)
if
syspath
is
not
None
:
self
.
assertTrue
(
isinstance
(
syspath
,
unicode
))
self
.
assertTrue
(
isinstance
(
syspath
,
unicode
))
def
test_debug
(
self
):
str
(
self
.
fs
)
...
...
@@ -119,49 +120,54 @@ class FSTestCases(object):
assert
False
,
"ResourceInvalidError was not raised"
def
test_writefile
(
self
):
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
open
,
"test1.txt"
)
f
=
self
.
fs
.
open
(
"test1.txt"
,
"wb"
)
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
open
,
"test1.txt"
)
f
=
self
.
fs
.
open
(
"test1.txt"
,
"wb"
)
f
.
write
(
b
(
"testing"
))
f
.
close
()
self
.
assertTrue
(
self
.
check
(
"test1.txt"
))
f
=
self
.
fs
.
open
(
"test1.txt"
,
"rb"
)
self
.
assertEquals
(
f
.
read
(),
b
(
"testing"
))
f
=
self
.
fs
.
open
(
"test1.txt"
,
"rb"
)
self
.
assertEquals
(
f
.
read
(),
b
(
"testing"
))
f
.
close
()
f
=
self
.
fs
.
open
(
"test1.txt"
,
"wb"
)
f
=
self
.
fs
.
open
(
"test1.txt"
,
"wb"
)
f
.
write
(
b
(
"test file overwrite"
))
f
.
close
()
self
.
assertTrue
(
self
.
check
(
"test1.txt"
))
f
=
self
.
fs
.
open
(
"test1.txt"
,
"rb"
)
self
.
assertEquals
(
f
.
read
(),
b
(
"test file overwrite"
))
f
=
self
.
fs
.
open
(
"test1.txt"
,
"rb"
)
self
.
assertEquals
(
f
.
read
(),
b
(
"test file overwrite"
))
f
.
close
()
def
test_setcontents
(
self
):
# setcontents() should accept both a string...
self
.
fs
.
setcontents
(
"hello"
,
b
(
"world"
))
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"hello"
,
"rb"
),
b
(
"world"
))
self
.
fs
.
setcontents
(
"hello"
,
b
(
"world"
))
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"hello"
,
"rb"
),
b
(
"world"
))
# ...and a file-like object
self
.
fs
.
setcontents
(
"hello"
,
StringIO
(
b
(
"to you, good sir!"
)))
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"hello"
,
"rb"
),
b
(
"to you, good sir!"
))
self
.
fs
.
setcontents
(
"hello"
,
StringIO
(
b
(
"to you, good sir!"
)))
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"hello"
,
"rb"
),
b
(
"to you, good sir!"
))
# setcontents() should accept both a string...
self
.
fs
.
setcontents
(
"hello"
,
b
(
"world"
),
chunk_size
=
2
)
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"hello"
,
"rb"
),
b
(
"world"
))
self
.
fs
.
setcontents
(
"hello"
,
b
(
"world"
),
chunk_size
=
2
)
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"hello"
,
"rb"
),
b
(
"world"
))
# ...and a file-like object
self
.
fs
.
setcontents
(
"hello"
,
StringIO
(
b
(
"to you, good sir!"
)),
chunk_size
=
2
)
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"hello"
,
"rb"
),
b
(
"to you, good sir!"
))
self
.
fs
.
setcontents
(
"hello"
,
StringIO
(
b
(
"to you, good sir!"
)),
chunk_size
=
2
)
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"hello"
,
"rb"
),
b
(
"to you, good sir!"
))
def
test_setcontents_async
(
self
):
# setcontents() should accept both a string...
self
.
fs
.
setcontents_async
(
"hello"
,
b
(
"world"
))
.
wait
()
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"hello"
,
"rb"
),
b
(
"world"
))
# ...and a file-like object
self
.
fs
.
setcontents_async
(
"hello"
,
StringIO
(
b
(
"to you, good sir!"
)))
.
wait
()
self
.
fs
.
setcontents_async
(
"hello"
,
StringIO
(
b
(
"to you, good sir!"
)))
.
wait
()
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"hello"
),
b
(
"to you, good sir!"
))
self
.
fs
.
setcontents_async
(
"hello"
,
b
(
"world"
),
chunk_size
=
2
)
.
wait
()
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"hello"
,
"rb"
),
b
(
"world"
))
# ...and a file-like object
self
.
fs
.
setcontents_async
(
"hello"
,
StringIO
(
b
(
"to you, good sir!"
)),
chunk_size
=
2
)
.
wait
()
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"hello"
,
"rb"
),
b
(
"to you, good sir!"
))
self
.
fs
.
setcontents_async
(
"hello"
,
StringIO
(
b
(
"to you, good sir!"
)),
chunk_size
=
2
)
.
wait
()
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"hello"
,
"rb"
),
b
(
"to you, good sir!"
))
def
test_isdir_isfile
(
self
):
self
.
assertFalse
(
self
.
fs
.
exists
(
"dir1"
))
...
...
@@ -182,7 +188,7 @@ class FSTestCases(object):
def
test_listdir
(
self
):
def
check_unicode
(
items
):
for
item
in
items
:
self
.
assertTrue
(
isinstance
(
item
,
unicode
))
self
.
assertTrue
(
isinstance
(
item
,
unicode
))
self
.
fs
.
setcontents
(
u"a"
,
b
(
''
))
self
.
fs
.
setcontents
(
"b"
,
b
(
''
))
self
.
fs
.
setcontents
(
"foo"
,
b
(
''
))
...
...
@@ -206,7 +212,7 @@ class FSTestCases(object):
check_unicode
(
d2
)
# Create some deeper subdirectories, to make sure their
# contents are not inadvertantly included
self
.
fs
.
makedir
(
"p/1/2/3"
,
recursive
=
True
)
self
.
fs
.
makedir
(
"p/1/2/3"
,
recursive
=
True
)
self
.
fs
.
setcontents
(
"p/1/2/3/a"
,
b
(
''
))
self
.
fs
.
setcontents
(
"p/1/2/3/b"
,
b
(
''
))
self
.
fs
.
setcontents
(
"p/1/2/3/foo"
,
b
(
''
))
...
...
@@ -218,7 +224,7 @@ class FSTestCases(object):
contains_a
=
self
.
fs
.
listdir
(
wildcard
=
"*a*"
)
self
.
assertEqual
(
sorted
(
dirs_only
),
[
u"p"
,
u"q"
])
self
.
assertEqual
(
sorted
(
files_only
),
[
u"a"
,
u"b"
,
u"bar"
,
u"foo"
])
self
.
assertEqual
(
sorted
(
contains_a
),
[
u"a"
,
u"bar"
])
self
.
assertEqual
(
sorted
(
contains_a
),
[
u"a"
,
u"bar"
])
check_unicode
(
dirs_only
)
check_unicode
(
files_only
)
check_unicode
(
contains_a
)
...
...
@@ -237,16 +243,17 @@ class FSTestCases(object):
self
.
assertEqual
(
sorted
(
d4
),
[
u"p/1/2/3/a"
,
u"p/1/2/3/b"
,
u"p/1/2/3/bar"
,
u"p/1/2/3/foo"
])
check_unicode
(
d4
)
# Test that appropriate errors are raised
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
listdir
,
"zebra"
)
self
.
assertRaises
(
ResourceInvalidError
,
self
.
fs
.
listdir
,
"foo"
)
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
listdir
,
"zebra"
)
self
.
assertRaises
(
ResourceInvalidError
,
self
.
fs
.
listdir
,
"foo"
)
def
test_listdirinfo
(
self
):
def
check_unicode
(
items
):
for
(
nm
,
info
)
in
items
:
self
.
assertTrue
(
isinstance
(
nm
,
unicode
))
def
check_equal
(
items
,
target
):
names
=
[
nm
for
(
nm
,
info
)
in
items
]
self
.
assertEqual
(
sorted
(
names
),
sorted
(
target
))
for
(
nm
,
info
)
in
items
:
self
.
assertTrue
(
isinstance
(
nm
,
unicode
))
def
check_equal
(
items
,
target
):
names
=
[
nm
for
(
nm
,
info
)
in
items
]
self
.
assertEqual
(
sorted
(
names
),
sorted
(
target
))
self
.
fs
.
setcontents
(
u"a"
,
b
(
''
))
self
.
fs
.
setcontents
(
"b"
,
b
(
''
))
self
.
fs
.
setcontents
(
"foo"
,
b
(
''
))
...
...
@@ -271,7 +278,7 @@ class FSTestCases(object):
check_unicode
(
d2
)
# Create some deeper subdirectories, to make sure their
# contents are not inadvertantly included
self
.
fs
.
makedir
(
"p/1/2/3"
,
recursive
=
True
)
self
.
fs
.
makedir
(
"p/1/2/3"
,
recursive
=
True
)
self
.
fs
.
setcontents
(
"p/1/2/3/a"
,
b
(
''
))
self
.
fs
.
setcontents
(
"p/1/2/3/b"
,
b
(
''
))
self
.
fs
.
setcontents
(
"p/1/2/3/foo"
,
b
(
''
))
...
...
@@ -283,7 +290,7 @@ class FSTestCases(object):
contains_a
=
self
.
fs
.
listdirinfo
(
wildcard
=
"*a*"
)
check_equal
(
dirs_only
,
[
u"p"
,
u"q"
])
check_equal
(
files_only
,
[
u"a"
,
u"b"
,
u"bar"
,
u"foo"
])
check_equal
(
contains_a
,
[
u"a"
,
u"bar"
])
check_equal
(
contains_a
,
[
u"a"
,
u"bar"
])
check_unicode
(
dirs_only
)
check_unicode
(
files_only
)
check_unicode
(
contains_a
)
...
...
@@ -302,20 +309,20 @@ class FSTestCases(object):
check_equal
(
d4
,
[
u"p/1/2/3/a"
,
u"p/1/2/3/b"
,
u"p/1/2/3/bar"
,
u"p/1/2/3/foo"
])
check_unicode
(
d4
)
# Test that appropriate errors are raised
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
listdirinfo
,
"zebra"
)
self
.
assertRaises
(
ResourceInvalidError
,
self
.
fs
.
listdirinfo
,
"foo"
)
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
listdirinfo
,
"zebra"
)
self
.
assertRaises
(
ResourceInvalidError
,
self
.
fs
.
listdirinfo
,
"foo"
)
def
test_walk
(
self
):
self
.
fs
.
setcontents
(
'a.txt'
,
b
(
'hello'
))
self
.
fs
.
setcontents
(
'b.txt'
,
b
(
'world'
))
self
.
fs
.
makeopendir
(
'foo'
)
.
setcontents
(
'c'
,
b
(
'123'
))
sorted_walk
=
sorted
([(
d
,
sorted
(
fs
))
for
(
d
,
fs
)
in
self
.
fs
.
walk
()])
sorted_walk
=
sorted
([(
d
,
sorted
(
fs
))
for
(
d
,
fs
)
in
self
.
fs
.
walk
()])
self
.
assertEquals
(
sorted_walk
,
[(
"/"
,
[
"a.txt"
,
"b.txt"
]),
(
"/foo"
,[
"c"
])])
[(
"/"
,
[
"a.txt"
,
"b.txt"
]),
(
"/foo"
,
[
"c"
])])
# When searching breadth-first, shallow entries come first
found_a
=
False
for
_
,
files
in
self
.
fs
.
walk
(
search
=
"breadth"
):
for
_
,
files
in
self
.
fs
.
walk
(
search
=
"breadth"
):
if
"a.txt"
in
files
:
found_a
=
True
if
"c"
in
files
:
...
...
@@ -323,12 +330,13 @@ class FSTestCases(object):
assert
found_a
,
"breadth search order was wrong"
# When searching depth-first, deep entries come first
found_c
=
False
for
_
,
files
in
self
.
fs
.
walk
(
search
=
"depth"
):
for
_
,
files
in
self
.
fs
.
walk
(
search
=
"depth"
):
if
"c"
in
files
:
found_c
=
True
if
"a.txt"
in
files
:
break
assert
found_c
,
"depth search order was wrong: "
+
str
(
list
(
self
.
fs
.
walk
(
search
=
"depth"
)))
assert
found_c
,
"depth search order was wrong: "
+
\
str
(
list
(
self
.
fs
.
walk
(
search
=
"depth"
)))
def
test_walk_wildcard
(
self
):
self
.
fs
.
setcontents
(
'a.txt'
,
b
(
'hello'
))
...
...
@@ -338,7 +346,7 @@ class FSTestCases(object):
for
dir_path
,
paths
in
self
.
fs
.
walk
(
wildcard
=
'*.txt'
):
for
path
in
paths
:
self
.
assert_
(
path
.
endswith
(
'.txt'
))
for
dir_path
,
paths
in
self
.
fs
.
walk
(
wildcard
=
lambda
fn
:
fn
.
endswith
(
'.txt'
)):
for
dir_path
,
paths
in
self
.
fs
.
walk
(
wildcard
=
lambda
fn
:
fn
.
endswith
(
'.txt'
)):
for
path
in
paths
:
self
.
assert_
(
path
.
endswith
(
'.txt'
))
...
...
@@ -347,22 +355,28 @@ class FSTestCases(object):
self
.
fs
.
setcontents
(
'b.txt'
,
b
(
'world'
))
self
.
fs
.
makeopendir
(
'foo'
)
.
setcontents
(
'c'
,
b
(
'123'
))
self
.
fs
.
makeopendir
(
'.svn'
)
.
setcontents
(
'ignored'
,
b
(
''
))
for
dir_path
,
paths
in
self
.
fs
.
walk
(
dir_wildcard
=
lambda
fn
:
not
fn
.
endswith
(
'.svn'
)):
for
dir_path
,
paths
in
self
.
fs
.
walk
(
dir_wildcard
=
lambda
fn
:
not
fn
.
endswith
(
'.svn'
)):
for
path
in
paths
:
self
.
assert_
(
'.svn'
not
in
path
)
def
test_walkfiles
(
self
):
self
.
fs
.
makeopendir
(
'bar'
)
.
setcontents
(
'a.txt'
,
b
(
'123'
))
self
.
fs
.
makeopendir
(
'foo'
)
.
setcontents
(
'b'
,
b
(
'123'
))
self
.
assertEquals
(
sorted
(
self
.
fs
.
walkfiles
()),[
"/bar/a.txt"
,
"/foo/b"
])
self
.
assertEquals
(
sorted
(
self
.
fs
.
walkfiles
(
dir_wildcard
=
"*foo*"
)),[
"/foo/b"
])
self
.
assertEquals
(
sorted
(
self
.
fs
.
walkfiles
(
wildcard
=
"*.txt"
)),[
"/bar/a.txt"
])
self
.
assertEquals
(
sorted
(
self
.
fs
.
walkfiles
()),
[
"/bar/a.txt"
,
"/foo/b"
])
self
.
assertEquals
(
sorted
(
self
.
fs
.
walkfiles
(
dir_wildcard
=
"*foo*"
)),
[
"/foo/b"
])
self
.
assertEquals
(
sorted
(
self
.
fs
.
walkfiles
(
wildcard
=
"*.txt"
)),
[
"/bar/a.txt"
])
def
test_walkdirs
(
self
):
self
.
fs
.
makeopendir
(
'bar'
)
.
setcontents
(
'a.txt'
,
b
(
'123'
))
self
.
fs
.
makeopendir
(
'foo'
)
.
makeopendir
(
"baz"
)
.
setcontents
(
'b'
,
b
(
'123'
))
self
.
assertEquals
(
sorted
(
self
.
fs
.
walkdirs
()),[
"/"
,
"/bar"
,
"/foo"
,
"/foo/baz"
])
self
.
assertEquals
(
sorted
(
self
.
fs
.
walkdirs
(
wildcard
=
"*foo*"
)),[
"/"
,
"/foo"
,
"/foo/baz"
])
self
.
fs
.
makeopendir
(
'foo'
)
.
makeopendir
(
"baz"
)
.
setcontents
(
'b'
,
b
(
'123'
))
self
.
assertEquals
(
sorted
(
self
.
fs
.
walkdirs
()),
[
"/"
,
"/bar"
,
"/foo"
,
"/foo/baz"
])
self
.
assertEquals
(
sorted
(
self
.
fs
.
walkdirs
(
wildcard
=
"*foo*"
)),
[
"/"
,
"/foo"
,
"/foo/baz"
])
def
test_unicode
(
self
):
alpha
=
u"
\N{GREEK SMALL LETTER ALPHA}
"
...
...
@@ -371,32 +385,33 @@ class FSTestCases(object):
self
.
fs
.
setcontents
(
alpha
+
"/a"
,
b
(
''
))
self
.
fs
.
setcontents
(
alpha
+
"/"
+
beta
,
b
(
''
))
self
.
assertTrue
(
self
.
check
(
alpha
))
self
.
assertEquals
(
sorted
(
self
.
fs
.
listdir
(
alpha
)),
[
"a"
,
beta
])
self
.
assertEquals
(
sorted
(
self
.
fs
.
listdir
(
alpha
)),
[
"a"
,
beta
])
def
test_makedir
(
self
):
check
=
self
.
check
self
.
fs
.
makedir
(
"a"
)
self
.
assertTrue
(
check
(
"a"
))
self
.
assertRaises
(
ParentDirectoryMissingError
,
self
.
fs
.
makedir
,
"a/b/c"
)
self
.
assertRaises
(
ParentDirectoryMissingError
,
self
.
fs
.
makedir
,
"a/b/c"
)
self
.
fs
.
makedir
(
"a/b/c"
,
recursive
=
True
)
self
.
assert_
(
check
(
"a/b/c"
))
self
.
fs
.
makedir
(
"foo/bar/baz"
,
recursive
=
True
)
self
.
assert_
(
check
(
"foo/bar/baz"
))
self
.
fs
.
makedir
(
"a/b/child"
)
self
.
assert_
(
check
(
"a/b/child"
))
self
.
assertRaises
(
DestinationExistsError
,
self
.
fs
.
makedir
,
"/a/b"
)
self
.
fs
.
makedir
(
"/a/b"
,
allow_recreate
=
True
)
self
.
assertRaises
(
DestinationExistsError
,
self
.
fs
.
makedir
,
"/a/b"
)
self
.
fs
.
makedir
(
"/a/b"
,
allow_recreate
=
True
)
self
.
fs
.
setcontents
(
"/a/file"
,
b
(
''
))
self
.
assertRaises
(
ResourceInvalidError
,
self
.
fs
.
makedir
,
"a/file"
)
self
.
assertRaises
(
ResourceInvalidError
,
self
.
fs
.
makedir
,
"a/file"
)
def
test_remove
(
self
):
self
.
fs
.
setcontents
(
"a.txt"
,
b
(
''
))
self
.
assertTrue
(
self
.
check
(
"a.txt"
))
self
.
fs
.
remove
(
"a.txt"
)
self
.
assertFalse
(
self
.
check
(
"a.txt"
))
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
remove
,
"a.txt"
)
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
remove
,
"a.txt"
)
self
.
fs
.
makedir
(
"dir1"
)
self
.
assertRaises
(
ResourceInvalidError
,
self
.
fs
.
remove
,
"dir1"
)
self
.
assertRaises
(
ResourceInvalidError
,
self
.
fs
.
remove
,
"dir1"
)
self
.
fs
.
setcontents
(
"/dir1/a.txt"
,
b
(
''
))
self
.
assertTrue
(
self
.
check
(
"dir1/a.txt"
))
self
.
fs
.
remove
(
"dir1/a.txt"
)
...
...
@@ -431,10 +446,11 @@ class FSTestCases(object):
self
.
assert_
(
check
(
"foo/file.txt"
))
# Ensure that force=True works as expected
self
.
fs
.
makedir
(
"frollic/waggle"
,
recursive
=
True
)
self
.
fs
.
setcontents
(
"frollic/waddle.txt"
,
b
(
"waddlewaddlewaddle"
))
self
.
assertRaises
(
DirectoryNotEmptyError
,
self
.
fs
.
removedir
,
"frollic"
)
self
.
assertRaises
(
ResourceInvalidError
,
self
.
fs
.
removedir
,
"frollic/waddle.txt"
)
self
.
fs
.
removedir
(
"frollic"
,
force
=
True
)
self
.
fs
.
setcontents
(
"frollic/waddle.txt"
,
b
(
"waddlewaddlewaddle"
))
self
.
assertRaises
(
DirectoryNotEmptyError
,
self
.
fs
.
removedir
,
"frollic"
)
self
.
assertRaises
(
ResourceInvalidError
,
self
.
fs
.
removedir
,
"frollic/waddle.txt"
)
self
.
fs
.
removedir
(
"frollic"
,
force
=
True
)
self
.
assert_
(
not
check
(
"frollic"
))
# Test removing unicode dirs
kappa
=
u"
\N{GREEK CAPITAL LETTER KAPPA}
"
...
...
@@ -443,59 +459,64 @@ class FSTestCases(object):
self
.
fs
.
removedir
(
kappa
)
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
removedir
,
kappa
)
self
.
assert_
(
not
self
.
fs
.
isdir
(
kappa
))
self
.
fs
.
makedir
(
pathjoin
(
"test"
,
kappa
),
recursive
=
True
)
self
.
assert_
(
check
(
pathjoin
(
"test"
,
kappa
)))
self
.
fs
.
removedir
(
"test"
,
force
=
True
)
self
.
fs
.
makedir
(
pathjoin
(
"test"
,
kappa
),
recursive
=
True
)
self
.
assert_
(
check
(
pathjoin
(
"test"
,
kappa
)))
self
.
fs
.
removedir
(
"test"
,
force
=
True
)
self
.
assert_
(
not
check
(
"test"
))
def
test_rename
(
self
):
check
=
self
.
check
# test renaming a file in the same directory
self
.
fs
.
setcontents
(
"foo.txt"
,
b
(
"Hello, World!"
))
self
.
fs
.
setcontents
(
"foo.txt"
,
b
(
"Hello, World!"
))
self
.
assert_
(
check
(
"foo.txt"
))
self
.
fs
.
rename
(
"foo.txt"
,
"bar.txt"
)
self
.
assert_
(
check
(
"bar.txt"
))
self
.
assert_
(
not
check
(
"foo.txt"
))
# test renaming a directory in the same directory
self
.
fs
.
makedir
(
"dir_a"
)
self
.
fs
.
setcontents
(
"dir_a/test.txt"
,
b
(
"testerific"
))
self
.
fs
.
setcontents
(
"dir_a/test.txt"
,
b
(
"testerific"
))
self
.
assert_
(
check
(
"dir_a"
))
self
.
fs
.
rename
(
"dir_a"
,
"dir_b"
)
self
.
fs
.
rename
(
"dir_a"
,
"dir_b"
)
self
.
assert_
(
check
(
"dir_b"
))
self
.
assert_
(
check
(
"dir_b/test.txt"
))
self
.
assert_
(
not
check
(
"dir_a/test.txt"
))
self
.
assert_
(
not
check
(
"dir_a"
))
# test renaming a file into a different directory
self
.
fs
.
makedir
(
"dir_a"
)
self
.
fs
.
rename
(
"dir_b/test.txt"
,
"dir_a/test.txt"
)
self
.
fs
.
rename
(
"dir_b/test.txt"
,
"dir_a/test.txt"
)
self
.
assert_
(
not
check
(
"dir_b/test.txt"
))
self
.
assert_
(
check
(
"dir_a/test.txt"
))
# test renaming a file into a non-existent directory
self
.
assertRaises
(
ParentDirectoryMissingError
,
self
.
fs
.
rename
,
"dir_a/test.txt"
,
"nonexistent/test.txt"
)
self
.
assertRaises
(
ParentDirectoryMissingError
,
self
.
fs
.
rename
,
"dir_a/test.txt"
,
"nonexistent/test.txt"
)
def
test_info
(
self
):
test_str
=
b
(
"Hello, World!"
)
self
.
fs
.
setcontents
(
"info.txt"
,
test_str
)
self
.
fs
.
setcontents
(
"info.txt"
,
test_str
)
info
=
self
.
fs
.
getinfo
(
"info.txt"
)
self
.
assertEqual
(
info
[
'size'
],
len
(
test_str
))
self
.
fs
.
desc
(
"info.txt"
)
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
getinfo
,
"notafile"
)
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
getinfo
,
"info.txt/inval"
)
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
getinfo
,
"notafile"
)
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
getinfo
,
"info.txt/inval"
)
def
test_getsize
(
self
):
test_str
=
b
(
"*"
)
*
23
self
.
fs
.
setcontents
(
"info.txt"
,
test_str
)
self
.
fs
.
setcontents
(
"info.txt"
,
test_str
)
size
=
self
.
fs
.
getsize
(
"info.txt"
)
self
.
assertEqual
(
size
,
len
(
test_str
))
def
test_movefile
(
self
):
check
=
self
.
check
contents
=
b
(
"If the implementation is hard to explain, it's a bad idea."
)
contents
=
b
(
"If the implementation is hard to explain, it's a bad idea."
)
def
makefile
(
path
):
self
.
fs
.
setcontents
(
path
,
contents
)
self
.
fs
.
setcontents
(
path
,
contents
)
def
checkcontents
(
path
):
check_contents
=
self
.
fs
.
getcontents
(
path
,
"rb"
)
self
.
assertEqual
(
check_contents
,
contents
)
self
.
assertEqual
(
check_contents
,
contents
)
return
contents
==
check_contents
self
.
fs
.
makedir
(
"foo/bar"
,
recursive
=
True
)
...
...
@@ -513,21 +534,23 @@ class FSTestCases(object):
self
.
assert_
(
checkcontents
(
"/c.txt"
))
makefile
(
"foo/bar/a.txt"
)
self
.
assertRaises
(
DestinationExistsError
,
self
.
fs
.
move
,
"foo/bar/a.txt"
,
"/c.txt"
)
self
.
assertRaises
(
DestinationExistsError
,
self
.
fs
.
move
,
"foo/bar/a.txt"
,
"/c.txt"
)
self
.
assert_
(
check
(
"foo/bar/a.txt"
))
self
.
assert_
(
check
(
"/c.txt"
))
self
.
fs
.
move
(
"foo/bar/a.txt"
,
"/c.txt"
,
overwrite
=
True
)
self
.
fs
.
move
(
"foo/bar/a.txt"
,
"/c.txt"
,
overwrite
=
True
)
self
.
assert_
(
not
check
(
"foo/bar/a.txt"
))
self
.
assert_
(
check
(
"/c.txt"
))
def
test_movedir
(
self
):
check
=
self
.
check
contents
=
b
(
"If the implementation is hard to explain, it's a bad idea."
)
contents
=
b
(
"If the implementation is hard to explain, it's a bad idea."
)
def
makefile
(
path
):
self
.
fs
.
setcontents
(
path
,
contents
)
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
movedir
,
"a"
,
"b"
)
self
.
assertRaises
(
ResourceNotFoundError
,
self
.
fs
.
movedir
,
"a"
,
"b"
)
self
.
fs
.
makedir
(
"a"
)
self
.
fs
.
makedir
(
"b"
)
makefile
(
"a/1.txt"
)
...
...
@@ -553,34 +576,37 @@ class FSTestCases(object):
self
.
assert_
(
not
check
(
"a"
))
self
.
fs
.
makedir
(
"a"
)
self
.
assertRaises
(
DestinationExistsError
,
self
.
fs
.
movedir
,
"copy of a"
,
"a"
)
self
.
fs
.
movedir
(
"copy of a"
,
"a"
,
overwrite
=
True
)
self
.
assertRaises
(
DestinationExistsError
,
self
.
fs
.
movedir
,
"copy of a"
,
"a"
)
self
.
fs
.
movedir
(
"copy of a"
,
"a"
,
overwrite
=
True
)
self
.
assert_
(
not
check
(
"copy of a"
))
self
.
assert_
(
check
(
"a/1.txt"
))
self
.
assert_
(
check
(
"a/2.txt"
))
self
.
assert_
(
check
(
"a/3.txt"
))
self
.
assert_
(
check
(
"a/foo/bar/baz.txt"
))
def
test_cant_copy_from_os
(
self
):
sys_executable
=
os
.
path
.
abspath
(
os
.
path
.
realpath
(
sys
.
executable
))
self
.
assertRaises
(
FSError
,
self
.
fs
.
copy
,
sys_executable
,
"py.exe"
)
self
.
assertRaises
(
FSError
,
self
.
fs
.
copy
,
sys_executable
,
"py.exe"
)
def
test_copyfile
(
self
):
check
=
self
.
check
contents
=
b
(
"If the implementation is hard to explain, it's a bad idea."
)
def
makefile
(
path
,
contents
=
contents
):
self
.
fs
.
setcontents
(
path
,
contents
)
def
checkcontents
(
path
,
contents
=
contents
):
contents
=
b
(
"If the implementation is hard to explain, it's a bad idea."
)
def
makefile
(
path
,
contents
=
contents
):
self
.
fs
.
setcontents
(
path
,
contents
)
def
checkcontents
(
path
,
contents
=
contents
):
check_contents
=
self
.
fs
.
getcontents
(
path
,
"rb"
)
self
.
assertEqual
(
check_contents
,
contents
)
self
.
assertEqual
(
check_contents
,
contents
)
return
contents
==
check_contents
self
.
fs
.
makedir
(
"foo/bar"
,
recursive
=
True
)
makefile
(
"foo/bar/a.txt"
)
self
.
assert_
(
check
(
"foo/bar/a.txt"
))
self
.
assert_
(
checkcontents
(
"foo/bar/a.txt"
))
#import rpdb2; rpdb2.start_embedded_debugger('password');
#
import rpdb2; rpdb2.start_embedded_debugger('password');
self
.
fs
.
copy
(
"foo/bar/a.txt"
,
"foo/b.txt"
)
self
.
assert_
(
check
(
"foo/bar/a.txt"
))
self
.
assert_
(
check
(
"foo/b.txt"
))
...
...
@@ -592,23 +618,26 @@ class FSTestCases(object):
self
.
assert_
(
check
(
"/c.txt"
))
self
.
assert_
(
checkcontents
(
"/c.txt"
))
makefile
(
"foo/bar/a.txt"
,
b
(
"different contents"
))
self
.
assert_
(
checkcontents
(
"foo/bar/a.txt"
,
b
(
"different contents"
)))
self
.
assertRaises
(
DestinationExistsError
,
self
.
fs
.
copy
,
"foo/bar/a.txt"
,
"/c.txt"
)
makefile
(
"foo/bar/a.txt"
,
b
(
"different contents"
))
self
.
assert_
(
checkcontents
(
"foo/bar/a.txt"
,
b
(
"different contents"
)))
self
.
assertRaises
(
DestinationExistsError
,
self
.
fs
.
copy
,
"foo/bar/a.txt"
,
"/c.txt"
)
self
.
assert_
(
checkcontents
(
"/c.txt"
))
self
.
fs
.
copy
(
"foo/bar/a.txt"
,
"/c.txt"
,
overwrite
=
True
)
self
.
assert_
(
checkcontents
(
"foo/bar/a.txt"
,
b
(
"different contents"
)))
self
.
assert_
(
checkcontents
(
"/c.txt"
,
b
(
"different contents"
)))
self
.
fs
.
copy
(
"foo/bar/a.txt"
,
"/c.txt"
,
overwrite
=
True
)
self
.
assert_
(
checkcontents
(
"foo/bar/a.txt"
,
b
(
"different contents"
)))
self
.
assert_
(
checkcontents
(
"/c.txt"
,
b
(
"different contents"
)))
def
test_copydir
(
self
):
check
=
self
.
check
contents
=
b
(
"If the implementation is hard to explain, it's a bad idea."
)
contents
=
b
(
"If the implementation is hard to explain, it's a bad idea."
)
def
makefile
(
path
):
self
.
fs
.
setcontents
(
path
,
contents
)
self
.
fs
.
setcontents
(
path
,
contents
)
def
checkcontents
(
path
):
check_contents
=
self
.
fs
.
getcontents
(
path
)
self
.
assertEqual
(
check_contents
,
contents
)
self
.
assertEqual
(
check_contents
,
contents
)
return
contents
==
check_contents
self
.
fs
.
makedir
(
"a"
)
...
...
@@ -632,8 +661,8 @@ class FSTestCases(object):
self
.
assert_
(
check
(
"a/foo/bar/baz.txt"
))
checkcontents
(
"a/1.txt"
)
self
.
assertRaises
(
DestinationExistsError
,
self
.
fs
.
copydir
,
"a"
,
"b"
)
self
.
fs
.
copydir
(
"a"
,
"b"
,
overwrite
=
True
)
self
.
assertRaises
(
DestinationExistsError
,
self
.
fs
.
copydir
,
"a"
,
"b"
)
self
.
fs
.
copydir
(
"a"
,
"b"
,
overwrite
=
True
)
self
.
assert_
(
check
(
"b/1.txt"
))
self
.
assert_
(
check
(
"b/2.txt"
))
self
.
assert_
(
check
(
"b/3.txt"
))
...
...
@@ -642,9 +671,11 @@ class FSTestCases(object):
def
test_copydir_with_dotfile
(
self
):
check
=
self
.
check
contents
=
b
(
"If the implementation is hard to explain, it's a bad idea."
)
contents
=
b
(
"If the implementation is hard to explain, it's a bad idea."
)
def
makefile
(
path
):
self
.
fs
.
setcontents
(
path
,
contents
)
self
.
fs
.
setcontents
(
path
,
contents
)
self
.
fs
.
makedir
(
"a"
)
makefile
(
"a/1.txt"
)
...
...
@@ -663,7 +694,7 @@ class FSTestCases(object):
def
test_readwriteappendseek
(
self
):
def
checkcontents
(
path
,
check_contents
):
read_contents
=
self
.
fs
.
getcontents
(
path
,
"rb"
)
self
.
assertEqual
(
read_contents
,
check_contents
)
self
.
assertEqual
(
read_contents
,
check_contents
)
return
read_contents
==
check_contents
test_strings
=
[
b
(
"Beautiful is better than ugly."
),
b
(
"Explicit is better than implicit."
),
...
...
@@ -688,11 +719,11 @@ class FSTestCases(object):
self
.
assert_
(
checkcontents
(
"b.txt"
,
test_strings
[
0
]))
f3
=
self
.
fs
.
open
(
"b.txt"
,
"ab"
)
# On win32, tell() gives zero until you actually write to the file
#self.assertEquals(f3.tell(),len(test_strings[0]))
#
self.assertEquals(f3.tell(),len(test_strings[0]))
f3
.
write
(
test_strings
[
1
])
self
.
assertEquals
(
f3
.
tell
(),
len
(
test_strings
[
0
])
+
len
(
test_strings
[
1
]))
self
.
assertEquals
(
f3
.
tell
(),
len
(
test_strings
[
0
])
+
len
(
test_strings
[
1
]))
f3
.
write
(
test_strings
[
2
])
self
.
assertEquals
(
f3
.
tell
(),
len
(
all_strings
))
self
.
assertEquals
(
f3
.
tell
(),
len
(
all_strings
))
f3
.
close
()
self
.
assert_
(
checkcontents
(
"b.txt"
,
all_strings
))
f4
=
self
.
fs
.
open
(
"b.txt"
,
"wb"
)
...
...
@@ -723,46 +754,45 @@ class FSTestCases(object):
def
test_truncate
(
self
):
def
checkcontents
(
path
,
check_contents
):
read_contents
=
self
.
fs
.
getcontents
(
path
,
"rb"
)
self
.
assertEqual
(
read_contents
,
check_contents
)
self
.
assertEqual
(
read_contents
,
check_contents
)
return
read_contents
==
check_contents
self
.
fs
.
setcontents
(
"hello"
,
b
(
"world"
))
checkcontents
(
"hello"
,
b
(
"world"
))
self
.
fs
.
setcontents
(
"hello"
,
b
(
"hi"
))
checkcontents
(
"hello"
,
b
(
"hi"
))
self
.
fs
.
setcontents
(
"hello"
,
b
(
"1234567890"
))
checkcontents
(
"hello"
,
b
(
"1234567890"
))
with
self
.
fs
.
open
(
"hello"
,
"rb+"
)
as
f
:
self
.
fs
.
setcontents
(
"hello"
,
b
(
"world"
))
checkcontents
(
"hello"
,
b
(
"world"
))
self
.
fs
.
setcontents
(
"hello"
,
b
(
"hi"
))
checkcontents
(
"hello"
,
b
(
"hi"
))
self
.
fs
.
setcontents
(
"hello"
,
b
(
"1234567890"
))
checkcontents
(
"hello"
,
b
(
"1234567890"
))
with
self
.
fs
.
open
(
"hello"
,
"rb+"
)
as
f
:
f
.
truncate
(
7
)
checkcontents
(
"hello"
,
b
(
"1234567"
))
with
self
.
fs
.
open
(
"hello"
,
"rb+"
)
as
f
:
checkcontents
(
"hello"
,
b
(
"1234567"
))
with
self
.
fs
.
open
(
"hello"
,
"rb+"
)
as
f
:
f
.
seek
(
5
)
f
.
truncate
()
checkcontents
(
"hello"
,
b
(
"12345"
))
checkcontents
(
"hello"
,
b
(
"12345"
))
def
test_truncate_to_larger_size
(
self
):
with
self
.
fs
.
open
(
"hello"
,
"wb"
)
as
f
:
with
self
.
fs
.
open
(
"hello"
,
"wb"
)
as
f
:
f
.
truncate
(
30
)
self
.
assertEquals
(
self
.
fs
.
getsize
(
"hello"
),
30
)
# Some file systems (FTPFS) don't support both reading and writing
if
self
.
fs
.
getmeta
(
'file.read_and_write'
,
True
):
with
self
.
fs
.
open
(
"hello"
,
"rb+"
)
as
f
:
with
self
.
fs
.
open
(
"hello"
,
"rb+"
)
as
f
:
f
.
seek
(
25
)
f
.
write
(
b
(
"123456"
))
with
self
.
fs
.
open
(
"hello"
,
"rb"
)
as
f
:
with
self
.
fs
.
open
(
"hello"
,
"rb"
)
as
f
:
f
.
seek
(
25
)
self
.
assertEquals
(
f
.
read
(),
b
(
"123456"
))
self
.
assertEquals
(
f
.
read
(),
b
(
"123456"
))
def
test_write_past_end_of_file
(
self
):
if
self
.
fs
.
getmeta
(
'file.read_and_write'
,
True
):
with
self
.
fs
.
open
(
"write_at_end"
,
"wb"
)
as
f
:
with
self
.
fs
.
open
(
"write_at_end"
,
"wb"
)
as
f
:
f
.
seek
(
25
)
f
.
write
(
b
(
"EOF"
))
with
self
.
fs
.
open
(
"write_at_end"
,
"rb"
)
as
f
:
self
.
assertEquals
(
f
.
read
(),
b
(
"
\x00
"
)
*
25
+
b
(
"EOF"
))
with
self
.
fs
.
open
(
"write_at_end"
,
"rb"
)
as
f
:
self
.
assertEquals
(
f
.
read
(),
b
(
"
\x00
"
)
*
25
+
b
(
"EOF"
))
def
test_with_statement
(
self
):
# This is a little tricky since 'with' is actually new syntax.
...
...
@@ -775,7 +805,7 @@ class FSTestCases(object):
code
+=
"with self.fs.open('f.txt','wb-') as testfile:
\n
"
code
+=
" testfile.write(contents)
\n
"
code
+=
"self.assertEquals(self.fs.getcontents('f.txt', 'rb'),contents)"
code
=
compile
(
code
,
"<string>"
,
'exec'
)
code
=
compile
(
code
,
"<string>"
,
'exec'
)
eval
(
code
)
# A 'with' statement raising an error
contents
=
"testing the with statement"
...
...
@@ -783,42 +813,43 @@ class FSTestCases(object):
code
+=
"with self.fs.open('f.txt','wb-') as testfile:
\n
"
code
+=
" testfile.write(contents)
\n
"
code
+=
" raise ValueError
\n
"
code
=
compile
(
code
,
"<string>"
,
'exec'
)
self
.
assertRaises
(
ValueError
,
eval
,
code
,
globals
(),
locals
())
self
.
assertEquals
(
self
.
fs
.
getcontents
(
'f.txt'
,
'rb'
),
contents
)
code
=
compile
(
code
,
"<string>"
,
'exec'
)
self
.
assertRaises
(
ValueError
,
eval
,
code
,
globals
(),
locals
())
self
.
assertEquals
(
self
.
fs
.
getcontents
(
'f.txt'
,
'rb'
),
contents
)
def
test_pickling
(
self
):
if
self
.
fs
.
getmeta
(
'pickle_contents'
,
True
):
self
.
fs
.
setcontents
(
"test1"
,
b
(
"hello world"
))
self
.
fs
.
setcontents
(
"test1"
,
b
(
"hello world"
))
fs2
=
pickle
.
loads
(
pickle
.
dumps
(
self
.
fs
))
self
.
assert_
(
fs2
.
isfile
(
"test1"
))
fs3
=
pickle
.
loads
(
pickle
.
dumps
(
self
.
fs
,
-
1
))
fs3
=
pickle
.
loads
(
pickle
.
dumps
(
self
.
fs
,
-
1
))
self
.
assert_
(
fs3
.
isfile
(
"test1"
))
else
:
# Just make sure it doesn't throw an exception
fs2
=
pickle
.
loads
(
pickle
.
dumps
(
self
.
fs
))
def
test_big_file
(
self
):
"""Test handling of a big file (1MB)"""
chunk_size
=
1024
*
256
num_chunks
=
4
def
chunk_stream
():
"""Generate predictable-but-randomy binary content."""
r
=
random
.
Random
(
0
)
randint
=
r
.
randint
int2byte
=
six
.
int2byte
for
_i
in
xrange
(
num_chunks
):
c
=
b
(
""
)
.
join
(
int2byte
(
randint
(
0
,
255
))
for
_j
in
xrange
(
chunk_size
//
8
))
c
=
b
(
""
)
.
join
(
int2byte
(
randint
(
0
,
255
))
for
_j
in
xrange
(
chunk_size
//
8
))
yield
c
*
8
f
=
self
.
fs
.
open
(
"bigfile"
,
"wb"
)
f
=
self
.
fs
.
open
(
"bigfile"
,
"wb"
)
try
:
for
chunk
in
chunk_stream
():
f
.
write
(
chunk
)
finally
:
f
.
close
()
chunks
=
chunk_stream
()
f
=
self
.
fs
.
open
(
"bigfile"
,
"rb"
)
f
=
self
.
fs
.
open
(
"bigfile"
,
"rb"
)
try
:
try
:
while
True
:
...
...
@@ -854,17 +885,19 @@ class FSTestCases(object):
self
.
assertRaises
(
RemoveRootError
,
self
.
fs
.
removedir
,
"/"
)
# May be disabled - see end of file
class
ThreadingTestCases
(
object
):
"""Testcases for thread-safety of FS implementations."""
# These are either too slow to be worth repeating,
# or cannot possibly break cross-thread.
_dont_retest
=
(
"test_pickling"
,
"test_multiple_overwrite"
,)
_dont_retest
=
(
"test_pickling"
,
"test_multiple_overwrite"
,)
__lock
=
threading
.
RLock
()
def
_yield
(
self
):
#time.sleep(0.001)
#
time.sleep(0.001)
# Yields without a delay
time
.
sleep
(
0
)
...
...
@@ -874,7 +907,7 @@ class ThreadingTestCases(object):
def
_unlock
(
self
):
self
.
__lock
.
release
()
def
_makeThread
(
self
,
func
,
errors
):
def
_makeThread
(
self
,
func
,
errors
):
def
runThread
():
try
:
func
()
...
...
@@ -884,74 +917,79 @@ class ThreadingTestCases(object):
thread
.
daemon
=
True
return
thread
def
_runThreads
(
self
,
*
funcs
):
def
_runThreads
(
self
,
*
funcs
):
check_interval
=
sys
.
getcheckinterval
()
sys
.
setcheckinterval
(
1
)
try
:
errors
=
[]
threads
=
[
self
.
_makeThread
(
f
,
errors
)
for
f
in
funcs
]
threads
=
[
self
.
_makeThread
(
f
,
errors
)
for
f
in
funcs
]
for
t
in
threads
:
t
.
start
()
for
t
in
threads
:
t
.
join
()
for
(
c
,
e
,
t
)
in
errors
:
raise
c
,
e
,
t
for
(
c
,
e
,
t
)
in
errors
:
raise
e
,
None
,
t
finally
:
sys
.
setcheckinterval
(
check_interval
)
def
test_setcontents_threaded
(
self
):
def
setcontents
(
name
,
contents
):
f
=
self
.
fs
.
open
(
name
,
"wb"
)
def
setcontents
(
name
,
contents
):
f
=
self
.
fs
.
open
(
name
,
"wb"
)
self
.
_yield
()
try
:
f
.
write
(
contents
)
self
.
_yield
()
finally
:
f
.
close
()
def
thread1
():
c
=
b
(
"thread1 was 'ere"
)
setcontents
(
"thread1.txt"
,
c
)
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"thread1.txt"
,
'rb'
),
c
)
setcontents
(
"thread1.txt"
,
c
)
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"thread1.txt"
,
'rb'
),
c
)
def
thread2
():
c
=
b
(
"thread2 was 'ere"
)
setcontents
(
"thread2.txt"
,
c
)
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"thread2.txt"
,
'rb'
),
c
)
self
.
_runThreads
(
thread1
,
thread2
)
setcontents
(
"thread2.txt"
,
c
)
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"thread2.txt"
,
'rb'
),
c
)
self
.
_runThreads
(
thread1
,
thread2
)
def
test_setcontents_threaded_samefile
(
self
):
def
setcontents
(
name
,
contents
):
f
=
self
.
fs
.
open
(
name
,
"wb"
)
def
setcontents
(
name
,
contents
):
f
=
self
.
fs
.
open
(
name
,
"wb"
)
self
.
_yield
()
try
:
f
.
write
(
contents
)
self
.
_yield
()
finally
:
f
.
close
()
def
thread1
():
c
=
b
(
"thread1 was 'ere"
)
setcontents
(
"threads.txt"
,
c
)
setcontents
(
"threads.txt"
,
c
)
self
.
_yield
()
self
.
assertEquals
(
self
.
fs
.
listdir
(
"/"
),[
"threads.txt"
])
self
.
assertEquals
(
self
.
fs
.
listdir
(
"/"
),
[
"threads.txt"
])
def
thread2
():
c
=
b
(
"thread2 was 'ere"
)
setcontents
(
"threads.txt"
,
c
)
setcontents
(
"threads.txt"
,
c
)
self
.
_yield
()
self
.
assertEquals
(
self
.
fs
.
listdir
(
"/"
),[
"threads.txt"
])
self
.
assertEquals
(
self
.
fs
.
listdir
(
"/"
),
[
"threads.txt"
])
def
thread3
():
c
=
b
(
"thread3 was 'ere"
)
setcontents
(
"threads.txt"
,
c
)
setcontents
(
"threads.txt"
,
c
)
self
.
_yield
()
self
.
assertEquals
(
self
.
fs
.
listdir
(
"/"
),[
"threads.txt"
])
self
.
assertEquals
(
self
.
fs
.
listdir
(
"/"
),
[
"threads.txt"
])
try
:
self
.
_runThreads
(
thread1
,
thread2
,
thread3
)
self
.
_runThreads
(
thread1
,
thread2
,
thread3
)
except
ResourceLockedError
:
# that's ok, some implementations don't support concurrent writes
pass
def
test_cases_in_separate_dirs
(
self
):
class
TestCases_in_subdir
(
self
.
__class__
,
unittest
.
TestCase
):
class
TestCases_in_subdir
(
self
.
__class__
,
unittest
.
TestCase
):
"""Run all testcases against a subdir of self.fs"""
def
__init__
(
this
,
subdir
):
def
__init__
(
this
,
subdir
):
super
(
TestCases_in_subdir
,
this
)
.
__init__
(
"test_listdir"
)
this
.
subdir
=
subdir
for
meth
in
dir
(
this
):
...
...
@@ -959,113 +997,136 @@ class ThreadingTestCases(object):
continue
if
meth
in
self
.
_dont_retest
:
continue
if
not
hasattr
(
FSTestCases
,
meth
):
if
not
hasattr
(
FSTestCases
,
meth
):
continue
if
self
.
fs
.
exists
(
subdir
):
self
.
fs
.
removedir
(
subdir
,
force
=
True
)
self
.
fs
.
removedir
(
subdir
,
force
=
True
)
self
.
assertFalse
(
self
.
fs
.
isdir
(
subdir
))
self
.
assertTrue
(
self
.
fs
.
isdir
(
"/"
))
self
.
fs
.
makedir
(
subdir
)
self
.
_yield
()
getattr
(
this
,
meth
)()
getattr
(
this
,
meth
)()
@property
def
fs
(
this
):
return
self
.
fs
.
opendir
(
this
.
subdir
)
def
check
(
this
,
p
):
return
self
.
check
(
pathjoin
(
this
.
subdir
,
relpath
(
p
)))
def
check
(
this
,
p
):
return
self
.
check
(
pathjoin
(
this
.
subdir
,
relpath
(
p
)))
def
thread1
():
TestCases_in_subdir
(
"thread1"
)
def
thread2
():
TestCases_in_subdir
(
"thread2"
)
def
thread3
():
TestCases_in_subdir
(
"thread3"
)
self
.
_runThreads
(
thread1
,
thread2
,
thread3
)
self
.
_runThreads
(
thread1
,
thread2
,
thread3
)
def
test_makedir_winner
(
self
):
errors
=
[]
def
makedir
():
try
:
self
.
fs
.
makedir
(
"testdir"
)
except
DestinationExistsError
,
e
:
errors
.
append
(
e
)
def
makedir_noerror
():
try
:
self
.
fs
.
makedir
(
"testdir"
,
allow_recreate
=
True
)
self
.
fs
.
makedir
(
"testdir"
,
allow_recreate
=
True
)
except
DestinationExistsError
,
e
:
errors
.
append
(
e
)
def
removedir
():
try
:
self
.
fs
.
removedir
(
"testdir"
)
except
(
ResourceNotFoundError
,
ResourceLockedError
),
e
:
except
(
ResourceNotFoundError
,
ResourceLockedError
),
e
:
errors
.
append
(
e
)
# One thread should succeed, one should error
self
.
_runThreads
(
makedir
,
makedir
)
self
.
assertEquals
(
len
(
errors
),
1
)
self
.
_runThreads
(
makedir
,
makedir
)
self
.
assertEquals
(
len
(
errors
),
1
)
self
.
fs
.
removedir
(
"testdir"
)
# One thread should succeed, two should error
errors
=
[]
self
.
_runThreads
(
makedir
,
makedir
,
makedir
)
self
.
_runThreads
(
makedir
,
makedir
,
makedir
)
if
len
(
errors
)
!=
2
:
raise
AssertionError
(
errors
)
self
.
fs
.
removedir
(
"testdir"
)
# All threads should succeed
errors
=
[]
self
.
_runThreads
(
makedir_noerror
,
makedir_noerror
,
makedir_noerror
)
self
.
assertEquals
(
len
(
errors
),
0
)
self
.
_runThreads
(
makedir_noerror
,
makedir_noerror
,
makedir_noerror
)
self
.
assertEquals
(
len
(
errors
),
0
)
self
.
assertTrue
(
self
.
fs
.
isdir
(
"testdir"
))
self
.
fs
.
removedir
(
"testdir"
)
# makedir() can beat removedir() and vice-versa
errors
=
[]
self
.
_runThreads
(
makedir
,
removedir
)
self
.
_runThreads
(
makedir
,
removedir
)
if
self
.
fs
.
isdir
(
"testdir"
):
self
.
assertEquals
(
len
(
errors
),
1
)
self
.
assertFalse
(
isinstance
(
errors
[
0
],
DestinationExistsError
))
self
.
assertEquals
(
len
(
errors
),
1
)
self
.
assertFalse
(
isinstance
(
errors
[
0
],
DestinationExistsError
))
self
.
fs
.
removedir
(
"testdir"
)
else
:
self
.
assertEquals
(
len
(
errors
),
0
)
self
.
assertEquals
(
len
(
errors
),
0
)
def
test_concurrent_copydir
(
self
):
self
.
fs
.
makedir
(
"a"
)
self
.
fs
.
makedir
(
"a/b"
)
self
.
fs
.
setcontents
(
"a/hello.txt"
,
b
(
"hello world"
))
self
.
fs
.
setcontents
(
"a/guido.txt"
,
b
(
"is a space alien"
))
self
.
fs
.
setcontents
(
"a/b/parrot.txt"
,
b
(
"pining for the fiords"
))
self
.
fs
.
setcontents
(
"a/hello.txt"
,
b
(
"hello world"
))
self
.
fs
.
setcontents
(
"a/guido.txt"
,
b
(
"is a space alien"
))
self
.
fs
.
setcontents
(
"a/b/parrot.txt"
,
b
(
"pining for the fiords"
))
def
copydir
():
self
.
_yield
()
self
.
fs
.
copydir
(
"a"
,
"copy of a"
)
self
.
fs
.
copydir
(
"a"
,
"copy of a"
)
def
copydir_overwrite
():
self
.
_yield
()
self
.
fs
.
copydir
(
"a"
,
"copy of a"
,
overwrite
=
True
)
self
.
fs
.
copydir
(
"a"
,
"copy of a"
,
overwrite
=
True
)
# This should error out since we're not overwriting
self
.
assertRaises
(
DestinationExistsError
,
self
.
_runThreads
,
copydir
,
copydir
)
self
.
assertRaises
(
DestinationExistsError
,
self
.
_runThreads
,
copydir
,
copydir
)
self
.
assert_
(
self
.
fs
.
isdir
(
'a'
))
self
.
assert_
(
self
.
fs
.
isdir
(
'a'
))
copydir_overwrite
()
self
.
assert_
(
self
.
fs
.
isdir
(
'a'
))
# This should run to completion and give a valid state, unless
# files get locked when written to.
try
:
self
.
_runThreads
(
copydir_overwrite
,
copydir_overwrite
)
self
.
_runThreads
(
copydir_overwrite
,
copydir_overwrite
)
except
ResourceLockedError
:
pass
self
.
assertTrue
(
self
.
fs
.
isdir
(
"copy of a"
))
self
.
assertTrue
(
self
.
fs
.
isdir
(
"copy of a/b"
))
self
.
assertEqual
(
self
.
fs
.
getcontents
(
"copy of a/b/parrot.txt"
,
'rb'
),
b
(
"pining for the fiords"
))
self
.
assertEqual
(
self
.
fs
.
getcontents
(
"copy of a/hello.txt"
,
'rb'
),
b
(
"hello world"
))
self
.
assertEqual
(
self
.
fs
.
getcontents
(
"copy of a/guido.txt"
,
'rb'
),
b
(
"is a space alien"
))
self
.
assertEqual
(
self
.
fs
.
getcontents
(
"copy of a/b/parrot.txt"
,
'rb'
),
b
(
"pining for the fiords"
))
self
.
assertEqual
(
self
.
fs
.
getcontents
(
"copy of a/hello.txt"
,
'rb'
),
b
(
"hello world"
))
self
.
assertEqual
(
self
.
fs
.
getcontents
(
"copy of a/guido.txt"
,
'rb'
),
b
(
"is a space alien"
))
def
test_multiple_overwrite
(
self
):
contents
=
[
b
(
"contents one"
),
b
(
"contents the second"
),
b
(
"number three"
)]
contents
=
[
b
(
"contents one"
),
b
(
"contents the second"
),
b
(
"number three"
)]
def
thread1
():
for
i
in
xrange
(
30
):
for
c
in
contents
:
self
.
fs
.
setcontents
(
"thread1.txt"
,
c
)
self
.
assertEquals
(
self
.
fs
.
getsize
(
"thread1.txt"
),
len
(
c
))
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"thread1.txt"
,
'rb'
),
c
)
self
.
fs
.
setcontents
(
"thread1.txt"
,
c
)
self
.
assertEquals
(
self
.
fs
.
getsize
(
"thread1.txt"
),
len
(
c
))
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"thread1.txt"
,
'rb'
),
c
)
def
thread2
():
for
i
in
xrange
(
30
):
for
c
in
contents
:
self
.
fs
.
setcontents
(
"thread2.txt"
,
c
)
self
.
assertEquals
(
self
.
fs
.
getsize
(
"thread2.txt"
),
len
(
c
))
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"thread2.txt"
,
'rb'
),
c
)
self
.
_runThreads
(
thread1
,
thread2
)
self
.
fs
.
setcontents
(
"thread2.txt"
,
c
)
self
.
assertEquals
(
self
.
fs
.
getsize
(
"thread2.txt"
),
len
(
c
))
self
.
assertEquals
(
self
.
fs
.
getcontents
(
"thread2.txt"
,
'rb'
),
c
)
self
.
_runThreads
(
thread1
,
thread2
)
# Uncomment to temporarily disable threading tests
#class ThreadingTestCases(object):
#
class ThreadingTestCases(object):
# _dont_retest = ()
fs/tests/data/__init__.py
0 → 100644
View file @
3ea4efe1
fs/tests/test_expose.py
View file @
3ea4efe1
...
...
@@ -6,7 +6,8 @@
import
unittest
import
sys
import
os
,
os
.
path
import
os
import
os.path
import
socket
import
threading
import
time
...
...
@@ -32,6 +33,12 @@ try:
except
ImportError
:
if
not
PY3
:
raise
import
logging
logging
.
getLogger
(
'paramiko'
)
.
setLevel
(
logging
.
ERROR
)
logging
.
getLogger
(
'paramiko.transport'
)
.
setLevel
(
logging
.
ERROR
)
class
TestSFTPFS
(
TestRPCFS
):
__test__
=
not
PY3
...
...
@@ -55,7 +62,7 @@ except ImportError:
pass
else
:
from
fs.osfs
import
OSFS
class
TestFUSE
(
unittest
.
TestCase
,
FSTestCases
,
ThreadingTestCases
):
class
TestFUSE
(
unittest
.
TestCase
,
FSTestCases
,
ThreadingTestCases
):
def
setUp
(
self
):
self
.
temp_fs
=
TempFS
()
...
...
@@ -64,7 +71,7 @@ else:
self
.
mounted_fs
=
self
.
temp_fs
.
opendir
(
"root"
)
self
.
mount_point
=
self
.
temp_fs
.
getsyspath
(
"mount"
)
self
.
fs
=
OSFS
(
self
.
temp_fs
.
getsyspath
(
"mount"
))
self
.
mount_proc
=
fuse
.
mount
(
self
.
mounted_fs
,
self
.
mount_point
)
self
.
mount_proc
=
fuse
.
mount
(
self
.
mounted_fs
,
self
.
mount_point
)
def
tearDown
(
self
):
self
.
mount_proc
.
unmount
()
...
...
@@ -76,7 +83,7 @@ else:
fuse
.
unmount
(
self
.
mount_point
)
self
.
temp_fs
.
close
()
def
check
(
self
,
p
):
def
check
(
self
,
p
):
return
self
.
mounted_fs
.
exists
(
p
)
...
...
fs/tests/test_importhook.py
View file @
3ea4efe1
...
...
@@ -12,6 +12,7 @@ from fs.zipfs import ZipFS
from
six
import
b
class
TestFSImportHook
(
unittest
.
TestCase
):
def
setUp
(
self
):
...
...
@@ -140,4 +141,3 @@ class TestFSImportHook(unittest.TestCase):
sys
.
path_hooks
.
remove
(
FSImportHook
)
sys
.
path
.
pop
()
t
.
close
()
fs/tests/test_iotools.py
0 → 100644
View file @
3ea4efe1
from
__future__
import
unicode_literals
from
fs
import
iotools
import
io
import
unittest
from
os.path
import
dirname
,
join
,
abspath
try
:
unicode
except
NameError
:
unicode
=
str
class
OpenFilelike
(
object
):
def
__init__
(
self
,
make_f
):
self
.
make_f
=
make_f
@iotools.filelike_to_stream
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
return
self
.
make_f
()
def
__enter__
(
self
):
return
self
def
__exit__
(
self
,
*
args
,
**
kwargs
):
self
.
f
.
close
()
class
TestIOTools
(
unittest
.
TestCase
):
def
get_bin_file
(
self
):
path
=
join
(
dirname
(
abspath
(
__file__
)),
'data/UTF-8-demo.txt'
)
return
io
.
open
(
path
,
'rb'
)
def
test_make_stream
(
self
):
"""Test make_stream"""
with
self
.
get_bin_file
()
as
f
:
text
=
f
.
read
()
self
.
assert_
(
isinstance
(
text
,
bytes
))
with
self
.
get_bin_file
()
as
f
:
with
iotools
.
make_stream
(
"data/UTF-8-demo.txt"
,
f
,
'rt'
)
as
f2
:
text
=
f2
.
read
()
self
.
assert_
(
isinstance
(
text
,
unicode
))
def
test_decorator
(
self
):
"""Test filelike_to_stream decorator"""
o
=
OpenFilelike
(
self
.
get_bin_file
)
with
o
.
open
(
'file'
,
'rb'
)
as
f
:
text
=
f
.
read
()
self
.
assert_
(
isinstance
(
text
,
bytes
))
with
o
.
open
(
'file'
,
'rt'
)
as
f
:
text
=
f
.
read
()
self
.
assert_
(
isinstance
(
text
,
unicode
))
fs/tests/test_mountfs.py
View file @
3ea4efe1
...
...
@@ -2,10 +2,11 @@ from fs.mountfs import MountFS
from
fs.memoryfs
import
MemoryFS
import
unittest
class
TestMultiFS
(
unittest
.
TestCase
):
class
TestMountFS
(
unittest
.
TestCase
):
def
test_auto_close
(
self
):
"""Test M
ulti
FS auto close is working"""
"""Test M
ount
FS auto close is working"""
multi_fs
=
MountFS
()
m1
=
MemoryFS
()
m2
=
MemoryFS
()
...
...
@@ -18,7 +19,7 @@ class TestMultiFS(unittest.TestCase):
self
.
assert_
(
m2
.
closed
)
def
test_no_auto_close
(
self
):
"""Test M
ulti
FS auto close can be disabled"""
"""Test M
ount
FS auto close can be disabled"""
multi_fs
=
MountFS
(
auto_close
=
False
)
m1
=
MemoryFS
()
m2
=
MemoryFS
()
...
...
@@ -32,7 +33,7 @@ class TestMultiFS(unittest.TestCase):
def
test_mountfile
(
self
):
"""Test mounting a file"""
quote
=
"""If you wish to make an apple pie from scratch, you must first invent the universe."""
quote
=
b
"""If you wish to make an apple pie from scratch, you must first invent the universe."""
mem_fs
=
MemoryFS
()
mem_fs
.
makedir
(
'foo'
)
mem_fs
.
setcontents
(
'foo/bar.txt'
,
quote
)
...
...
@@ -58,11 +59,11 @@ class TestMultiFS(unittest.TestCase):
# Check changes are written back
mem_fs
.
setcontents
(
'foo/bar.txt'
,
'baz'
)
self
.
assertEqual
(
mount_fs
.
getcontents
(
'bar.txt'
),
'baz'
)
self
.
assertEqual
(
mount_fs
.
getcontents
(
'bar.txt'
),
b
'baz'
)
self
.
assertEqual
(
mount_fs
.
getsize
(
'bar.txt'
),
len
(
'baz'
))
# Check changes are written to the original fs
self
.
assertEqual
(
mem_fs
.
getcontents
(
'foo/bar.txt'
),
'baz'
)
self
.
assertEqual
(
mem_fs
.
getcontents
(
'foo/bar.txt'
),
b
'baz'
)
self
.
assertEqual
(
mem_fs
.
getsize
(
'foo/bar.txt'
),
len
(
'baz'
))
# Check unmount
...
...
fs/tests/test_remote.py
View file @
3ea4efe1
...
...
@@ -24,23 +24,27 @@ from fs.local_functools import wraps
from
six
import
PY3
,
b
class
RemoteTempFS
(
TempFS
):
"""
Simple filesystem implementing setfilecontents
for RemoteFileBuffer tests
"""
def
open
(
self
,
path
,
mode
=
'rb'
,
write_on_flush
=
True
):
def
open
(
self
,
path
,
mode
=
'rb'
,
write_on_flush
=
True
,
**
kwargs
):
if
'a'
in
mode
or
'r'
in
mode
or
'+'
in
mode
:
f
=
super
(
RemoteTempFS
,
self
)
.
open
(
path
,
'rb'
)
f
=
super
(
RemoteTempFS
,
self
)
.
open
(
path
,
mode
=
'rb'
,
**
kwargs
)
f
=
TellAfterCloseFile
(
f
)
else
:
f
=
None
return
RemoteFileBuffer
(
self
,
path
,
mode
,
f
,
write_on_flush
=
write_on_flush
)
def
setcontents
(
self
,
path
,
data
,
chunk_size
=
64
*
1024
):
f
=
super
(
RemoteTempFS
,
self
)
.
open
(
path
,
'wb'
)
return
RemoteFileBuffer
(
self
,
path
,
mode
,
f
,
write_on_flush
=
write_on_flush
)
def
setcontents
(
self
,
path
,
data
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
64
*
1024
):
f
=
super
(
RemoteTempFS
,
self
)
.
open
(
path
,
'wb'
,
encoding
=
encoding
,
errors
=
errors
,
chunk_size
=
chunk_size
)
if
getattr
(
data
,
'read'
,
False
):
f
.
write
(
data
.
read
())
else
:
...
...
@@ -51,7 +55,7 @@ class RemoteTempFS(TempFS):
class
TellAfterCloseFile
(
object
):
"""File-like object that allows calling tell() after it's been closed."""
def
__init__
(
self
,
file
):
def
__init__
(
self
,
file
):
self
.
_finalpos
=
None
self
.
file
=
file
...
...
@@ -65,49 +69,49 @@ class TellAfterCloseFile(object):
return
self
.
_finalpos
return
self
.
file
.
tell
()
def
__getattr__
(
self
,
attr
):
return
getattr
(
self
.
file
,
attr
)
def
__getattr__
(
self
,
attr
):
return
getattr
(
self
.
file
,
attr
)
class
TestRemoteFileBuffer
(
unittest
.
TestCase
,
FSTestCases
,
ThreadingTestCases
):
class
FakeException
(
Exception
):
pass
def
setUp
(
self
):
self
.
fs
=
RemoteTempFS
()
self
.
original_setcontents
=
self
.
fs
.
setcontents
def
tearDown
(
self
):
def
tearDown
(
self
):
self
.
fs
.
close
()
self
.
fakeOff
()
def
fake_setcontents
(
self
,
path
,
content
=
b
(
''
),
chunk_size
=
16
*
1024
):
''' Fake replacement for RemoteTempFS setcontents() '''
raise
self
.
FakeException
(
"setcontents should not be called here!"
)
def
fakeOn
(
self
):
'''
Turn on fake_setcontents(). When setcontents on RemoteTempFS
is called, FakeException is raised and nothing is stored.
'''
self
.
fs
.
setcontents
=
self
.
fake_setcontents
def
fakeOff
(
self
):
''' Switch off fake_setcontents(). '''
self
.
fs
.
setcontents
=
self
.
original_setcontents
def
test_ondemand
(
self
):
'''
Tests on-demand loading of remote content in RemoteFileBuffer
'''
'''
contents
=
b
(
"Tristatricettri stribrnych strikacek strikalo"
)
+
\
b
(
"pres tristatricettri stribrnych strech."
)
f
=
self
.
fs
.
open
(
'test.txt'
,
'wb'
)
f
.
write
(
contents
)
f
.
close
()
# During following tests, no setcontents() should be called.
self
.
fakeOn
()
f
=
self
.
fs
.
open
(
'test.txt'
,
'rb'
)
self
.
assertEquals
(
f
.
read
(
10
),
contents
[:
10
])
f
.
wrapped_file
.
seek
(
0
,
SEEK_END
)
...
...
@@ -118,18 +122,18 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
f
.
seek
(
0
,
SEEK_END
)
self
.
assertEquals
(
f
.
_rfile
.
tell
(),
len
(
contents
))
f
.
close
()
f
=
self
.
fs
.
open
(
'test.txt'
,
'ab'
)
self
.
assertEquals
(
f
.
tell
(),
len
(
contents
))
f
.
close
()
self
.
fakeOff
()
# Writing over the rfile edge
f
=
self
.
fs
.
open
(
'test.txt'
,
'wb+'
)
self
.
assertEquals
(
f
.
tell
(),
0
)
f
.
seek
(
len
(
contents
)
-
5
)
# Last 5 characters not loaded from remote file
# Last 5 characters not loaded from remote file
self
.
assertEquals
(
f
.
_rfile
.
tell
(),
len
(
contents
)
-
5
)
# Confirm that last 5 characters are still in rfile buffer
self
.
assertEquals
(
f
.
_rfile
.
read
(),
contents
[
-
5
:])
...
...
@@ -141,9 +145,9 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
# We are on the end of file (and buffer not serve anything anymore)
self
.
assertEquals
(
f
.
read
(),
b
(
''
))
f
.
close
()
self
.
fakeOn
()
# Check if we wrote everything OK from
# previous writing over the remote buffer edge
f
=
self
.
fs
.
open
(
'test.txt'
,
'rb'
)
...
...
@@ -151,7 +155,7 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
f
.
close
()
self
.
fakeOff
()
def
test_writeonflush
(
self
):
'''
Test 'write_on_flush' switch of RemoteFileBuffer.
...
...
@@ -168,7 +172,7 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
self
.
fakeOff
()
f
.
close
()
self
.
fakeOn
()
f
=
self
.
fs
.
open
(
'test.txt'
,
'wb'
,
write_on_flush
=
False
)
f
.
write
(
b
(
'Sample text'
))
# FakeException is not raised, because setcontents is not called
...
...
@@ -176,16 +180,16 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
f
.
write
(
b
(
'Second sample text'
))
self
.
assertRaises
(
self
.
FakeException
,
f
.
close
)
self
.
fakeOff
()
def
test_flush_and_continue
(
self
):
'''
This tests if partially loaded remote buffer can be flushed
back to remote destination and opened file is still
in good condition.
in good condition.
'''
contents
=
b
(
"Zlutoucky kun upel dabelske ody."
)
contents2
=
b
(
'Ententyky dva spaliky cert vyletel z elektriky'
)
f
=
self
.
fs
.
open
(
'test.txt'
,
'wb'
)
f
.
write
(
contents
)
f
.
close
()
...
...
@@ -202,12 +206,12 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
# Try if we have unocrrupted file locally...
self
.
assertEquals
(
f
.
read
(),
contents
[:
10
]
+
b
(
'x'
)
+
contents
[
11
:])
f
.
close
()
# And if we have uncorrupted file also on storage
f
=
self
.
fs
.
open
(
'test.txt'
,
'rb'
)
self
.
assertEquals
(
f
.
read
(),
contents
[:
10
]
+
b
(
'x'
)
+
contents
[
11
:])
f
.
close
()
# Now try it again, but write garbage behind edge of remote file
f
=
self
.
fs
.
open
(
'test.txt'
,
'rb+'
)
self
.
assertEquals
(
f
.
read
(
10
),
contents
[:
10
])
...
...
@@ -218,12 +222,12 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
# Try if we have unocrrupted file locally...
self
.
assertEquals
(
f
.
read
(),
contents
[:
10
]
+
contents2
)
f
.
close
()
# And if we have uncorrupted file also on storage
f
=
self
.
fs
.
open
(
'test.txt'
,
'rb'
)
self
.
assertEquals
(
f
.
read
(),
contents
[:
10
]
+
contents2
)
f
.
close
()
class
TestCacheFS
(
unittest
.
TestCase
,
FSTestCases
,
ThreadingTestCases
):
"""Test simple operation of CacheFS"""
...
...
@@ -267,7 +271,7 @@ class TestCacheFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
self
.
assertFalse
(
self
.
fs
.
isfile
(
"hello"
))
finally
:
self
.
fs
.
cache_timeout
=
old_timeout
class
TestConnectionManagerFS
(
unittest
.
TestCase
,
FSTestCases
):
#,ThreadingTestCases):
...
...
@@ -286,7 +290,7 @@ class TestConnectionManagerFS(unittest.TestCase,FSTestCases):#,ThreadingTestCase
class
DisconnectingFS
(
WrapFS
):
"""FS subclass that raises lots of RemoteConnectionErrors."""
def
__init__
(
self
,
fs
=
None
):
def
__init__
(
self
,
fs
=
None
):
if
fs
is
None
:
fs
=
TempFS
()
self
.
_connected
=
True
...
...
@@ -315,8 +319,8 @@ class DisconnectingFS(WrapFS):
time
.
sleep
(
random
.
random
()
*
0.1
)
self
.
_connected
=
not
self
.
_connected
def
setcontents
(
self
,
path
,
contents
=
b
(
''
)
,
chunk_size
=
64
*
1024
):
return
self
.
wrapped_fs
.
setcontents
(
path
,
contents
)
def
setcontents
(
self
,
path
,
data
=
b
(
''
),
encoding
=
None
,
errors
=
None
,
chunk_size
=
64
*
1024
):
return
self
.
wrapped_fs
.
setcontents
(
path
,
data
,
encoding
=
encoding
,
errors
=
errors
,
chunk_size
=
chunk_size
)
def
close
(
self
):
if
not
self
.
closed
:
...
...
fs/tests/test_watch.py
View file @
3ea4efe1
...
...
@@ -29,6 +29,10 @@ if sys.platform == "win32":
else
:
watch_win32
=
None
import
logging
logging
.
getLogger
(
'pyinotify'
)
.
setLevel
(
logging
.
ERROR
)
import
six
from
six
import
PY3
,
b
...
...
@@ -53,7 +57,7 @@ class WatcherTestCases:
self
.
watchfs
.
_poll_cond
.
wait
()
self
.
watchfs
.
_poll_cond
.
release
()
else
:
time
.
sleep
(
2
)
#0.5)
time
.
sleep
(
2
)
def
assertEventOccurred
(
self
,
cls
,
path
=
None
,
event_list
=
None
,
**
attrs
):
if
not
self
.
checkEventOccurred
(
cls
,
path
,
event_list
,
**
attrs
):
...
...
@@ -222,4 +226,3 @@ class TestWatchers_MemoryFS_polling(TestWatchers_MemoryFS):
def
setUp
(
self
):
self
.
fs
=
memoryfs
.
MemoryFS
()
self
.
watchfs
=
ensure_watchable
(
self
.
fs
,
poll_interval
=
0.1
)
fs/tests/test_zipfs.py
View file @
3ea4efe1
...
...
@@ -17,6 +17,7 @@ from fs import zipfs
from
six
import
PY3
,
b
class
TestReadZipFS
(
unittest
.
TestCase
):
def
setUp
(
self
):
...
...
@@ -46,20 +47,22 @@ class TestReadZipFS(unittest.TestCase):
def
test_reads
(
self
):
def
read_contents
(
path
):
f
=
self
.
fs
.
open
(
path
)
f
=
self
.
fs
.
open
(
path
,
'rb'
)
contents
=
f
.
read
()
return
contents
def
check_contents
(
path
,
expected
):
self
.
assert_
(
read_contents
(
path
)
==
expected
)
self
.
assert_
(
read_contents
(
path
)
==
expected
)
check_contents
(
"a.txt"
,
b
(
"Hello, World!"
))
check_contents
(
"1.txt"
,
b
(
"1"
))
check_contents
(
"foo/bar/baz.txt"
,
b
(
"baz"
))
def
test_getcontents
(
self
):
def
read_contents
(
path
):
return
self
.
fs
.
getcontents
(
path
)
return
self
.
fs
.
getcontents
(
path
,
'rb'
)
def
check_contents
(
path
,
expected
):
self
.
assert_
(
read_contents
(
path
)
==
expected
)
self
.
assert_
(
read_contents
(
path
)
==
expected
)
check_contents
(
"a.txt"
,
b
(
"Hello, World!"
))
check_contents
(
"1.txt"
,
b
(
"1"
))
check_contents
(
"foo/bar/baz.txt"
,
b
(
"baz"
))
...
...
@@ -82,7 +85,7 @@ class TestReadZipFS(unittest.TestCase):
dir_list
=
self
.
fs
.
listdir
(
path
)
self
.
assert_
(
sorted
(
dir_list
)
==
sorted
(
expected
))
for
item
in
dir_list
:
self
.
assert_
(
isinstance
(
item
,
unicode
))
self
.
assert_
(
isinstance
(
item
,
unicode
))
check_listing
(
'/'
,
[
'a.txt'
,
'1.txt'
,
'foo'
,
'b.txt'
])
check_listing
(
'foo'
,
[
'second.txt'
,
'bar'
])
check_listing
(
'foo/bar'
,
[
'baz.txt'
])
...
...
fs/utils.py
View file @
3ea4efe1
...
...
@@ -72,6 +72,7 @@ def copyfile(src_fs, src_path, dst_fs, dst_path, overwrite=True, chunk_size=64*1
if
src_lock
is
not
None
:
src_lock
.
release
()
def
copyfile_non_atomic
(
src_fs
,
src_path
,
dst_fs
,
dst_path
,
overwrite
=
True
,
chunk_size
=
64
*
1024
):
"""A non atomic version of copyfile (will not block other threads using src_fs or dst_fst)
...
...
fs/watch.py
View file @
3ea4efe1
...
...
@@ -5,14 +5,14 @@ fs.watch
Change notification support for FS.
This module defines a standard interface for FS subclasses that support change
notification callbacks. It also offers some WrapFS subclasses that can
notification callbacks. It also offers some WrapFS subclasses that can
simulate such an ability on top of an ordinary FS object.
An FS object that wants to be "watchable" must provide the following methods:
* ``add_watcher(callback,path="/",events=None,recursive=True)``
Request that the given callback be executed in response to changes
Request that the given callback be executed in response to changes
to the given path. A specific set of change events can be specified.
This method returns a Watcher object.
...
...
@@ -31,7 +31,7 @@ an iterator over the change events.
import
sys
import
weakref
import
threading
import
threading
import
Queue
import
traceback
...
...
@@ -291,29 +291,36 @@ class WatchableFS(WatchableFSMixin,WrapFS):
that might be made through other interfaces to the same filesystem.
"""
def
__init__
(
self
,
*
args
,
**
kwds
):
super
(
WatchableFS
,
self
)
.
__init__
(
*
args
,
**
kwds
)
def
__init__
(
self
,
*
args
,
**
kwds
):
super
(
WatchableFS
,
self
)
.
__init__
(
*
args
,
**
kwds
)
def
close
(
self
):
super
(
WatchableFS
,
self
)
.
close
()
super
(
WatchableFS
,
self
)
.
close
()
self
.
notify_watchers
(
CLOSED
)
def
open
(
self
,
path
,
mode
=
"r"
,
**
kwargs
):
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
existed
=
self
.
wrapped_fs
.
isfile
(
path
)
f
=
super
(
WatchableFS
,
self
)
.
open
(
path
,
mode
,
**
kwargs
)
f
=
super
(
WatchableFS
,
self
)
.
open
(
path
,
mode
=
mode
,
buffering
=
buffering
,
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
,
line_buffering
=
line_buffering
,
**
kwargs
)
if
not
existed
:
self
.
notify_watchers
(
CREATED
,
path
)
self
.
notify_watchers
(
ACCESSED
,
path
)
return
WatchedFile
(
f
,
self
,
path
,
mode
)
self
.
notify_watchers
(
CREATED
,
path
)
self
.
notify_watchers
(
ACCESSED
,
path
)
return
WatchedFile
(
f
,
self
,
path
,
mode
)
def
setcontents
(
self
,
path
,
data
=
b
(
''
)
,
chunk_size
=
64
*
1024
):
def
setcontents
(
self
,
path
,
data
=
b
''
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
64
*
1024
):
existed
=
self
.
wrapped_fs
.
isfile
(
path
)
ret
=
super
(
WatchableFS
,
self
)
.
setcontents
(
path
,
data
,
chunk_size
=
chunk_size
)
if
not
existed
:
self
.
notify_watchers
(
CREATED
,
path
)
self
.
notify_watchers
(
ACCESSED
,
path
)
self
.
notify_watchers
(
CREATED
,
path
)
self
.
notify_watchers
(
ACCESSED
,
path
)
if
data
:
self
.
notify_watchers
(
MODIFIED
,
path
,
True
)
self
.
notify_watchers
(
MODIFIED
,
path
,
True
)
return
ret
def
createfile
(
self
,
path
):
...
...
@@ -550,18 +557,18 @@ class PollingWatchableFS(WatchableFS):
for
(
k
,
v
)
in
new_info
.
iteritems
():
if
k
not
in
old_info
:
was_modified
=
True
break
break
elif
old_info
[
k
]
!=
v
:
if
k
in
(
"accessed_time"
,
"st_atime"
,):
was_accessed
=
True
elif
k
:
was_modified
=
True
break
break
else
:
for
k
in
old_info
:
if
k
not
in
new_info
:
was_modified
=
True
break
break
if
was_modified
:
self
.
notify_watchers
(
MODIFIED
,
fpath
,
True
)
elif
was_accessed
:
...
...
fs/wrapfs/__init__.py
View file @
3ea4efe1
...
...
@@ -150,21 +150,21 @@ class WrapFS(FS):
return
self
.
wrapped_fs
.
hassyspath
(
self
.
_encode
(
path
))
@rewrite_errors
def
open
(
self
,
path
,
mode
=
"r"
,
**
kwargs
):
def
open
(
self
,
path
,
mode
=
'r'
,
**
kwargs
):
(
mode
,
wmode
)
=
self
.
_adjust_mode
(
mode
)
f
=
self
.
wrapped_fs
.
open
(
self
.
_encode
(
path
),
wmode
,
**
kwargs
)
return
self
.
_file_wrap
(
f
,
mode
)
@rewrite_errors
def
setcontents
(
self
,
path
,
data
,
chunk_size
=
64
*
1024
):
def
setcontents
(
self
,
path
,
data
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
64
*
1024
):
# We can't pass setcontents() through to the wrapped FS if the
# wrapper has defined a _file_wrap method, as it would bypass
# the file contents wrapping.
#if self._file_wrap.im_func is WrapFS._file_wrap.im_func:
if
getattr
(
self
.
__class__
,
'_file_wrap'
,
None
)
is
getattr
(
WrapFS
,
'_file_wrap'
,
None
):
return
self
.
wrapped_fs
.
setcontents
(
self
.
_encode
(
path
),
data
,
chunk_size
=
chunk_size
)
return
self
.
wrapped_fs
.
setcontents
(
self
.
_encode
(
path
),
data
,
encoding
=
encoding
,
errors
=
errors
,
chunk_size
=
chunk_size
)
else
:
return
super
(
WrapFS
,
self
)
.
setcontents
(
path
,
data
,
chunk_size
=
chunk_size
)
return
super
(
WrapFS
,
self
)
.
setcontents
(
path
,
data
,
encoding
=
encoding
,
errors
=
errors
,
chunk_size
=
chunk_size
)
@rewrite_errors
def
createfile
(
self
,
path
):
...
...
fs/wrapfs/limitsizefs.py
View file @
3ea4efe1
...
...
@@ -58,14 +58,20 @@ class LimitSizeFS(WrapFS):
raise
NoSysPathError
(
path
)
return
None
def
open
(
self
,
path
,
mode
=
"r"
):
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
path
=
relpath
(
normpath
(
path
))
with
self
.
_size_lock
:
try
:
size
=
self
.
getsize
(
path
)
except
ResourceNotFoundError
:
size
=
0
f
=
super
(
LimitSizeFS
,
self
)
.
open
(
path
,
mode
)
f
=
super
(
LimitSizeFS
,
self
)
.
open
(
path
,
mode
=
mode
,
buffering
=
buffering
,
errors
=
errors
,
newline
=
newline
,
line_buffering
=
line_buffering
,
**
kwargs
)
if
"w"
not
in
mode
:
self
.
_set_file_size
(
path
,
None
,
1
)
else
:
...
...
@@ -92,12 +98,12 @@ class LimitSizeFS(WrapFS):
else
:
self
.
_file_sizes
[
path
]
=
(
size
,
count
)
def
setcontents
(
self
,
path
,
data
,
chunk_size
=
64
*
1024
):
def
setcontents
(
self
,
path
,
data
,
chunk_size
=
64
*
1024
):
f
=
None
try
:
try
:
f
=
self
.
open
(
path
,
'wb'
)
if
hasattr
(
data
,
'read'
):
chunk
=
data
.
read
(
chunk_size
)
chunk
=
data
.
read
(
chunk_size
)
while
chunk
:
f
.
write
(
chunk
)
chunk
=
data
.
read
(
chunk_size
)
...
...
@@ -106,7 +112,7 @@ class LimitSizeFS(WrapFS):
finally
:
if
f
is
not
None
:
f
.
close
()
def
_file_closed
(
self
,
path
):
self
.
_set_file_size
(
path
,
None
,
-
1
)
...
...
@@ -135,7 +141,7 @@ class LimitSizeFS(WrapFS):
return
cur_size
# We force use of several base FS methods,
# since they will fall back to writing out each file
# since they will fall back to writing out each file
# and thus will route through our size checking logic.
def
copy
(
self
,
src
,
dst
,
**
kwds
):
FS
.
copy
(
self
,
src
,
dst
,
**
kwds
)
...
...
@@ -233,7 +239,7 @@ class LimitSizeFile(FileWrapper):
self
.
fs
=
fs
self
.
path
=
path
self
.
_lock
=
fs
.
_lock
@synchronize
def
_write
(
self
,
data
,
flushing
=
False
):
pos
=
self
.
wrapped_file
.
tell
()
...
...
fs/wrapfs/readonlyfs.py
View file @
3ea4efe1
...
...
@@ -10,44 +10,52 @@ from fs.base import NoDefaultMeta
from
fs.wrapfs
import
WrapFS
from
fs.errors
import
UnsupportedError
,
NoSysPathError
class
ReadOnlyFS
(
WrapFS
):
""" Makes a FS object read only. Any operation that could potentially modify
the underlying file system will throw an UnsupportedError
Note that this isn't a secure sandbox, untrusted code could work around the
read-only restrictions by getting the base class. Its main purpose is to
provide a degree of safety if you want to protect an FS object from
accidental modification.
"""
def
getmeta
(
self
,
meta_name
,
default
=
NoDefaultMeta
):
if
meta_name
==
'read_only'
:
return
True
return
self
.
wrapped_fs
.
getmeta
(
meta_name
,
default
)
def
hasmeta
(
self
,
meta_name
):
if
meta_name
==
'read_only'
:
return
True
return
self
.
wrapped_fs
.
hasmeta
(
meta_name
)
def
getsyspath
(
self
,
path
,
allow_none
=
False
):
""" Doesn't technically modify the filesystem but could be used to work
around read-only restrictions. """
if
allow_none
:
return
None
raise
NoSysPathError
(
path
)
def
open
(
self
,
path
,
mode
=
'r'
,
**
kwargs
):
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
""" Only permit read access """
if
'w'
in
mode
or
'a'
in
mode
or
'+'
in
mode
:
raise
UnsupportedError
(
'write'
)
return
super
(
ReadOnlyFS
,
self
)
.
open
(
path
,
mode
,
**
kwargs
)
return
super
(
ReadOnlyFS
,
self
)
.
open
(
path
,
mode
=
mode
,
buffering
=
buffering
,
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
,
line_buffering
=
line_buffering
,
**
kwargs
)
def
_no_can_do
(
self
,
*
args
,
**
kwargs
):
""" Replacement method for methods that can modify the file system """
raise
UnsupportedError
(
'write'
)
move
=
_no_can_do
movedir
=
_no_can_do
copy
=
_no_can_do
...
...
fs/wrapfs/subfs.py
View file @
3ea4efe1
...
...
@@ -21,7 +21,7 @@ class SubFS(WrapFS):
def
__init__
(
self
,
wrapped_fs
,
sub_dir
):
self
.
sub_dir
=
abspath
(
normpath
(
sub_dir
))
super
(
SubFS
,
self
)
.
__init__
(
wrapped_fs
)
super
(
SubFS
,
self
)
.
__init__
(
wrapped_fs
)
def
_encode
(
self
,
path
):
return
pathjoin
(
self
.
sub_dir
,
relpath
(
normpath
(
path
)))
...
...
@@ -34,17 +34,17 @@ class SubFS(WrapFS):
return
'<SubFS:
%
s/
%
s>'
%
(
self
.
wrapped_fs
,
self
.
sub_dir
.
lstrip
(
'/'
))
def
__unicode__
(
self
):
return
u'<SubFS:
%
s/
%
s>'
%
(
self
.
wrapped_fs
,
self
.
sub_dir
.
lstrip
(
'/'
))
return
u'<SubFS:
%
s/
%
s>'
%
(
self
.
wrapped_fs
,
self
.
sub_dir
.
lstrip
(
'/'
))
def
__repr__
(
self
):
return
"SubFS(
%
r,
%
r)"
%
(
self
.
wrapped_fs
,
self
.
sub_dir
)
return
"SubFS(
%
r,
%
r)"
%
(
self
.
wrapped_fs
,
self
.
sub_dir
)
def
desc
(
self
,
path
):
def
desc
(
self
,
path
):
if
path
in
(
''
,
'/'
):
return
self
.
wrapped_fs
.
desc
(
self
.
sub_dir
)
return
'
%
s!
%
s'
%
(
self
.
wrapped_fs
.
desc
(
self
.
sub_dir
),
path
)
def
setcontents
(
self
,
path
,
data
,
chunk_size
=
64
*
1024
):
def
setcontents
(
self
,
path
,
data
,
encoding
=
None
,
errors
=
None
,
chunk_size
=
64
*
1024
):
path
=
self
.
_encode
(
path
)
return
self
.
wrapped_fs
.
setcontents
(
path
,
data
,
chunk_size
=
chunk_size
)
...
...
@@ -62,14 +62,14 @@ class SubFS(WrapFS):
path
=
normpath
(
path
)
if
path
in
(
''
,
'/'
):
raise
RemoveRootError
(
path
)
super
(
SubFS
,
self
)
.
removedir
(
path
,
force
=
force
)
super
(
SubFS
,
self
)
.
removedir
(
path
,
force
=
force
)
if
recursive
:
try
:
if
dirname
(
path
)
not
in
(
''
,
'/'
):
self
.
removedir
(
dirname
(
path
),
recursive
=
True
)
self
.
removedir
(
dirname
(
path
),
recursive
=
True
)
except
DirectoryNotEmptyError
:
pass
# if path in ("","/"):
# if not force:
# for path2 in self.listdir(path):
...
...
fs/zipfs.py
View file @
3ea4efe1
...
...
@@ -13,6 +13,7 @@ from fs.base import *
from
fs.path
import
*
from
fs.errors
import
*
from
fs.filelike
import
StringIO
from
fs
import
iotools
from
zipfile
import
ZipFile
,
ZIP_DEFLATED
,
ZIP_STORED
,
BadZipfile
,
LargeZipFile
from
memoryfs
import
MemoryFS
...
...
@@ -21,6 +22,7 @@ import tempfs
from
six
import
PY3
class
ZipOpenError
(
CreateFailedError
):
"""Thrown when the zip file could not be opened"""
pass
...
...
@@ -76,13 +78,13 @@ class _ExceptionProxy(object):
class
ZipFS
(
FS
):
"""A FileSystem that represents a zip file."""
_meta
=
{
'thread_safe'
:
True
,
'virtual'
:
False
,
'read_only'
:
False
,
'unicode_paths'
:
True
,
'case_insensitive_paths'
:
False
,
'network'
:
False
,
'atomic.setcontents'
:
False
_meta
=
{
'thread_safe'
:
True
,
'virtual'
:
False
,
'read_only'
:
False
,
'unicode_paths'
:
True
,
'case_insensitive_paths'
:
False
,
'network'
:
False
,
'atomic.setcontents'
:
False
}
def
__init__
(
self
,
zip_file
,
mode
=
"r"
,
compression
=
"deflated"
,
allow_zip_64
=
False
,
encoding
=
"CP437"
,
thread_synchronize
=
True
):
...
...
@@ -129,7 +131,7 @@ class ZipFS(FS):
raise
ZipOpenError
(
"Not a zip file or corrupt (
%
s)"
%
str
(
zip_file
),
details
=
ioe
)
raise
ZipNotFoundError
(
"Zip file not found (
%
s)"
%
str
(
zip_file
),
details
=
ioe
)
details
=
ioe
)
self
.
zip_path
=
str
(
zip_file
)
self
.
temp_fs
=
None
...
...
@@ -189,7 +191,8 @@ class ZipFS(FS):
self
.
zf
=
_ExceptionProxy
()
@synchronize
def
open
(
self
,
path
,
mode
=
"r"
,
**
kwargs
):
@iotools.filelike_to_stream
def
open
(
self
,
path
,
mode
=
'r'
,
buffering
=-
1
,
encoding
=
None
,
errors
=
None
,
newline
=
None
,
line_buffering
=
False
,
**
kwargs
):
path
=
normpath
(
relpath
(
path
))
if
'r'
in
mode
:
...
...
@@ -222,7 +225,7 @@ class ZipFS(FS):
raise
ValueError
(
"Mode must contain be 'r' or 'w'"
)
@synchronize
def
getcontents
(
self
,
path
,
mode
=
"rb"
):
def
getcontents
(
self
,
path
,
mode
=
"rb"
,
encoding
=
None
,
errors
=
None
,
newline
=
None
):
if
not
self
.
exists
(
path
):
raise
ResourceNotFoundError
(
path
)
path
=
normpath
(
relpath
(
path
))
...
...
@@ -232,7 +235,9 @@ class ZipFS(FS):
raise
ResourceNotFoundError
(
path
)
except
RuntimeError
:
raise
OperationFailedError
(
"read file"
,
path
=
path
,
msg
=
"3 Zip file must be opened with 'r' or 'a' to read"
)
return
contents
if
'b'
in
mode
:
return
contents
return
iotools
.
decode_binary
(
contents
,
encoding
=
encoding
,
errors
=
errors
,
newline
=
newline
)
@synchronize
def
_on_write_close
(
self
,
filename
):
...
...
setup.py
View file @
3ea4efe1
...
...
@@ -28,7 +28,6 @@ classifiers = [
'License :: OSI Approved :: BSD License'
,
'Operating System :: OS Independent'
,
'Programming Language :: Python'
,
'Programming Language :: Python :: 2.5'
,
'Programming Language :: Python :: 2.6'
,
'Programming Language :: Python :: 2.7'
,
'Programming Language :: Python :: 3'
,
...
...
@@ -41,7 +40,7 @@ Even if you only need to work with file and directories on the local hard-drive,
"""
extra
=
{}
if
PY3
:
if
PY3
:
extra
[
"use_2to3"
]
=
True
setup
(
install_requires
=
[
'distribute'
,
'six'
],
...
...
@@ -49,12 +48,12 @@ setup(install_requires=['distribute', 'six'],
version
=
VERSION
,
description
=
"Filesystem abstraction"
,
long_description
=
long_desc
,
license
=
"BSD"
,
license
=
"BSD"
,
author
=
"Will McGugan"
,
author_email
=
"will@willmcgugan.com"
,
url
=
"http://code.google.com/p/pyfilesystem/"
,
download_url
=
"http://code.google.com/p/pyfilesystem/downloads/list"
,
platforms
=
[
'any'
],
platforms
=
[
'any'
],
packages
=
[
'fs'
,
'fs.expose'
,
'fs.expose.dokan'
,
...
...
@@ -66,10 +65,10 @@ setup(install_requires=['distribute', 'six'],
'fs.contrib'
,
'fs.contrib.bigfs'
,
'fs.contrib.davfs'
,
'fs.contrib.tahoelafs'
,
'fs.contrib.tahoelafs'
,
'fs.commands'
],
package_data
=
{
'fs'
:
[
'tests/data/*.txt'
]},
scripts
=
[
'fs/commands/
%
s'
%
command
for
command
in
COMMANDS
],
classifiers
=
classifiers
,
**
extra
)
tox.ini
View file @
3ea4efe1
[tox]
envlist
=
py2
5,py2
6,py27,py31,py32,pypy
envlist
=
py26,py27,py31,py32,pypy
sitepackages
=
False
[testenv]
...
...
@@ -10,30 +10,17 @@ deps = distribute
boto
nose
mako
python-libarchive
pyftpdlib
pyftpdlib
changedir
=
.tox
commands
=
nosetests fs.tests -v
\
[]
[testenv:py25]
deps
=
distribute
six
dexml
paramiko
boto
nose
mako
python-libarchive
pyftpdlib
simplejson
[testenv:py32]
commands
=
nosetests fs.tests -v
\
[]
deps
=
distribute
six
six
dexml
nose
winpdb
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment