Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
P
pyfs
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
OpenEdx
pyfs
Commits
3f3138fe
Commit
3f3138fe
authored
Dec 18, 2010
by
willmcgugan
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added openers for Tahoe and Dav
parent
dc2e9a34
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
247 additions
and
34 deletions
+247
-34
ChangeLog
+2
-1
fs/base.py
+2
-0
fs/commands/fscp.py
+19
-13
fs/commands/fsmv.py
+4
-1
fs/commands/runner.py
+28
-5
fs/contrib/davfs/__init__.py
+2
-1
fs/contrib/tahoefs/__init__.py
+6
-6
fs/opener.py
+110
-4
fs/s3fs.py
+1
-1
fs/utils.py
+73
-2
No files found.
ChangeLog
View file @
3f3138fe
...
@@ -64,7 +64,8 @@
...
@@ -64,7 +64,8 @@
* Added a getmmap to base
* Added a getmmap to base
* Added command line scripts fsls, fstree, fscat, fscp, fsmv
* Added command line scripts fsls, fstree, fscat, fscp, fsmv
* Added command line scripts fsmkdir, fsmount
* Added command line scripts fsmkdir, fsmount
* Made automatically pick up keys if no other authentication is available
* Made
SFTP
automatically pick up keys if no other authentication is available
* Optimized listdir and listdirinfo in SFTPFS
* Optimized listdir and listdirinfo in SFTPFS
* Made memoryfs work with threads
* Made memoryfs work with threads
* Added copyfile_non_atomic and movefile_non_atomic for improved performance of multi-threaded copies
fs/base.py
View file @
3f3138fe
...
@@ -845,6 +845,8 @@ class FS(object):
...
@@ -845,6 +845,8 @@ class FS(object):
:param overwrite: if True, then an existing file at the destination path
:param overwrite: if True, then an existing file at the destination path
will be silently overwritten; if False then an exception
will be silently overwritten; if False then an exception
will be raised in this case.
will be raised in this case.
:param overwrite: When True the destination will be overwritten (if it exists),
otherwise a DestinationExistsError will be thrown
:type overwrite: bool
:type overwrite: bool
:param chunk_size: Size of chunks to use when copying, if a simple copy
:param chunk_size: Size of chunks to use when copying, if a simple copy
is required
is required
...
...
fs/commands/fscp.py
View file @
3f3138fe
from
fs.opener
import
opener
from
fs.opener
import
opener
from
fs.utils
import
copyfile
,
copystructure
from
fs.utils
import
copyfile
,
copy
file_non_atomic
,
copy
structure
from
fs.path
import
pathjoin
,
iswildcard
from
fs.path
import
pathjoin
,
iswildcard
from
fs.errors
import
FSError
from
fs.errors
import
FSError
from
fs.commands.runner
import
Command
from
fs.commands.runner
import
Command
...
@@ -22,7 +22,6 @@ class FileOpThread(threading.Thread):
...
@@ -22,7 +22,6 @@ class FileOpThread(threading.Thread):
def
run
(
self
):
def
run
(
self
):
try
:
while
not
self
.
finish_event
.
isSet
():
while
not
self
.
finish_event
.
isSet
():
try
:
try
:
path_type
,
fs
,
path
,
dest_path
=
self
.
queue
.
get
(
timeout
=
0.1
)
path_type
,
fs
,
path
,
dest_path
=
self
.
queue
.
get
(
timeout
=
0.1
)
...
@@ -34,14 +33,15 @@ class FileOpThread(threading.Thread):
...
@@ -34,14 +33,15 @@ class FileOpThread(threading.Thread):
else
:
else
:
self
.
action
(
fs
,
path
,
self
.
dest_fs
,
dest_path
,
overwrite
=
True
)
self
.
action
(
fs
,
path
,
self
.
dest_fs
,
dest_path
,
overwrite
=
True
)
except
Exception
,
e
:
except
Exception
,
e
:
print
e
self
.
on_error
(
e
)
self
.
queue
.
task_done
()
self
.
queue
.
task_done
()
raise
break
else
:
else
:
self
.
queue
.
task_done
()
self
.
queue
.
task_done
()
self
.
on_done
(
path_type
,
fs
,
path
,
self
.
dest_fs
,
dest_path
)
self
.
on_done
(
path_type
,
fs
,
path
,
self
.
dest_fs
,
dest_path
)
except
Exception
,
e
:
self
.
on_error
(
e
)
class
FScp
(
Command
):
class
FScp
(
Command
):
...
@@ -51,6 +51,9 @@ class FScp(Command):
...
@@ -51,6 +51,9 @@ class FScp(Command):
Copy SOURCE to DESTINATION"""
Copy SOURCE to DESTINATION"""
def
get_action
(
self
):
def
get_action
(
self
):
if
self
.
options
.
threads
>
1
:
return
copyfile_non_atomic
else
:
return
copyfile
return
copyfile
def
get_verb
(
self
):
def
get_verb
(
self
):
...
@@ -147,10 +150,11 @@ Copy SOURCE to DESTINATION"""
...
@@ -147,10 +150,11 @@ Copy SOURCE to DESTINATION"""
self
.
on_done
,
self
.
on_done
,
self
.
on_error
)
self
.
on_error
)
for
i
in
xrange
(
options
.
threads
)]
for
i
in
xrange
(
options
.
threads
)]
for
thread
in
threads
:
for
thread
in
threads
:
thread
.
start
()
thread
.
start
()
self
.
action_error
=
None
self
.
action_error
s
=
[]
complete
=
False
complete
=
False
try
:
try
:
enqueue
=
file_queue
.
put
enqueue
=
file_queue
.
put
...
@@ -167,15 +171,10 @@ Copy SOURCE to DESTINATION"""
...
@@ -167,15 +171,10 @@ Copy SOURCE to DESTINATION"""
except
KeyboardInterrupt
:
except
KeyboardInterrupt
:
options
.
progress
=
False
options
.
progress
=
False
if
self
.
action_error
:
self
.
error
(
self
.
wrap_error
(
unicode
(
self
.
action_error
))
+
'
\n
'
)
else
:
self
.
output
(
"
\n
Cancelling...
\n
"
)
self
.
output
(
"
\n
Cancelling...
\n
"
)
except
SystemExit
:
except
SystemExit
:
options
.
progress
=
False
options
.
progress
=
False
if
self
.
action_error
:
self
.
error
(
self
.
wrap_error
(
unicode
(
self
.
action_error
))
+
'
\n
'
)
finally
:
finally
:
sys
.
stdout
.
flush
()
sys
.
stdout
.
flush
()
...
@@ -188,6 +187,12 @@ Copy SOURCE to DESTINATION"""
...
@@ -188,6 +187,12 @@ Copy SOURCE to DESTINATION"""
dst_fs
.
close
()
dst_fs
.
close
()
if
self
.
action_errors
:
for
error
in
self
.
action_errors
:
self
.
error
(
self
.
wrap_error
(
unicode
(
error
))
+
'
\n
'
)
sys
.
stdout
.
write
(
'
\n
'
)
sys
.
stdout
.
flush
()
else
:
if
complete
and
options
.
progress
:
if
complete
and
options
.
progress
:
sys
.
stdout
.
write
(
self
.
progress_bar
(
self
.
total_files
,
self
.
done_files
,
''
))
sys
.
stdout
.
write
(
self
.
progress_bar
(
self
.
total_files
,
self
.
done_files
,
''
))
sys
.
stdout
.
write
(
'
\n
'
)
sys
.
stdout
.
write
(
'
\n
'
)
...
@@ -212,16 +217,17 @@ Copy SOURCE to DESTINATION"""
...
@@ -212,16 +217,17 @@ Copy SOURCE to DESTINATION"""
self
.
lock
.
release
()
self
.
lock
.
release
()
def
on_error
(
self
,
e
):
def
on_error
(
self
,
e
):
print
e
self
.
lock
.
acquire
()
self
.
lock
.
acquire
()
try
:
try
:
self
.
action_error
=
e
self
.
action_error
s
.
append
(
e
)
finally
:
finally
:
self
.
lock
.
release
()
self
.
lock
.
release
()
def
any_error
(
self
):
def
any_error
(
self
):
self
.
lock
.
acquire
()
self
.
lock
.
acquire
()
try
:
try
:
return
bool
(
self
.
action_error
)
return
bool
(
self
.
action_error
s
)
finally
:
finally
:
self
.
lock
.
release
()
self
.
lock
.
release
()
...
...
fs/commands/fsmv.py
View file @
3f3138fe
from
fs.utils
import
movefile
,
contains_files
from
fs.utils
import
movefile
,
movefile_non_atomic
,
contains_files
from
fs.commands
import
fscp
from
fs.commands
import
fscp
import
sys
import
sys
...
@@ -11,6 +11,9 @@ Move files from SOURCE to DESTINATION"""
...
@@ -11,6 +11,9 @@ Move files from SOURCE to DESTINATION"""
return
'moving...'
return
'moving...'
def
get_action
(
self
):
def
get_action
(
self
):
if
self
.
options
.
threads
>
1
:
return
movefile_non_atomic
else
:
return
movefile
return
movefile
def
post_actions
(
self
):
def
post_actions
(
self
):
...
...
fs/commands/runner.py
View file @
3f3138fe
import
sys
import
sys
from
optparse
import
OptionParser
from
optparse
import
OptionParser
from
fs.opener
import
opener
,
OpenerError
from
fs.opener
import
opener
,
OpenerError
,
Opener
from
fs.errors
import
FSError
from
fs.errors
import
FSError
from
fs.path
import
splitext
,
pathsplit
,
isdotfile
,
iswildcard
from
fs.path
import
splitext
,
pathsplit
,
isdotfile
,
iswildcard
import
platform
import
platform
...
@@ -118,11 +118,7 @@ class Command(object):
...
@@ -118,11 +118,7 @@ class Command(object):
return
re
.
sub
(
re_fs
,
repl
,
text
)
return
re
.
sub
(
re_fs
,
repl
,
text
)
def
open_fs
(
self
,
fs_url
,
writeable
=
False
,
create_dir
=
False
):
def
open_fs
(
self
,
fs_url
,
writeable
=
False
,
create_dir
=
False
):
try
:
fs
,
path
=
opener
.
parse
(
fs_url
,
writeable
=
writeable
,
create_dir
=
create_dir
)
fs
,
path
=
opener
.
parse
(
fs_url
,
writeable
=
writeable
,
create_dir
=
create_dir
)
except
OpenerError
,
e
:
self
.
error
(
str
(
e
),
'
\n
'
)
sys
.
exit
(
1
)
fs
.
cache_hint
(
True
)
fs
.
cache_hint
(
True
)
return
fs
,
path
return
fs
,
path
...
@@ -238,6 +234,8 @@ class Command(object):
...
@@ -238,6 +234,8 @@ class Command(object):
help
=
"make output verbose"
,
metavar
=
"VERBOSE"
)
help
=
"make output verbose"
,
metavar
=
"VERBOSE"
)
optparse
.
add_option
(
'--listopeners'
,
dest
=
'listopeners'
,
action
=
"store_true"
,
default
=
False
,
optparse
.
add_option
(
'--listopeners'
,
dest
=
'listopeners'
,
action
=
"store_true"
,
default
=
False
,
help
=
"list all FS openers"
,
metavar
=
"LISTOPENERS"
)
help
=
"list all FS openers"
,
metavar
=
"LISTOPENERS"
)
optparse
.
add_option
(
'--fs'
,
dest
=
'fs'
,
action
=
'append'
,
type
=
"string"
,
help
=
"import an FS opener e.g --fs foo.bar.MyOpener"
,
metavar
=
"OPENER"
)
return
optparse
return
optparse
def
list_openers
(
self
):
def
list_openers
(
self
):
...
@@ -288,6 +286,31 @@ class Command(object):
...
@@ -288,6 +286,31 @@ class Command(object):
self
.
list_openers
()
self
.
list_openers
()
return
0
return
0
ilocals
=
{}
if
options
.
fs
:
for
import_opener
in
options
.
fs
:
module_name
,
opener_class
=
import_opener
.
rsplit
(
'.'
,
1
)
try
:
opener_module
=
__import__
(
module_name
,
globals
(),
ilocals
,
[
opener_class
],
-
1
)
except
ImportError
:
self
.
error
(
"Unable to import opener
%
s
\n
"
%
import_opener
)
return
0
new_opener
=
getattr
(
opener_module
,
opener_class
)
try
:
if
not
issubclass
(
new_opener
,
Opener
):
self
.
error
(
'
%
s is not an fs.opener.Opener
\n
'
%
import_opener
)
return
0
except
TypeError
:
self
.
error
(
'
%
s is not an opener class
\n
'
%
import_opener
)
return
0
if
options
.
verbose
:
self
.
output
(
'Imported opener
%
s
\n
'
%
import_opener
)
opener
.
add
(
new_opener
)
args
=
[
unicode
(
arg
,
sys
.
getfilesystemencoding
())
for
arg
in
args
]
args
=
[
unicode
(
arg
,
sys
.
getfilesystemencoding
())
for
arg
in
args
]
self
.
verbose
=
options
.
verbose
self
.
verbose
=
options
.
verbose
try
:
try
:
...
...
fs/contrib/davfs/__init__.py
View file @
3f3138fe
...
@@ -110,7 +110,7 @@ class DAVFS(FS):
...
@@ -110,7 +110,7 @@ class DAVFS(FS):
if
resp
.
status
==
404
:
if
resp
.
status
==
404
:
raise
ResourceNotFoundError
(
"/"
,
msg
=
"root url gives 404"
)
raise
ResourceNotFoundError
(
"/"
,
msg
=
"root url gives 404"
)
if
resp
.
status
in
(
401
,
403
):
if
resp
.
status
in
(
401
,
403
):
raise
PermissionDeniedError
(
"listdir
"
)
raise
PermissionDeniedError
(
"listdir
(http
%
s)"
%
resp
.
status
)
if
resp
.
status
!=
207
:
if
resp
.
status
!=
207
:
msg
=
"server at
%
s doesn't speak WebDAV"
%
(
self
.
url
,)
msg
=
"server at
%
s doesn't speak WebDAV"
%
(
self
.
url
,)
raise
RemoteConnectionError
(
""
,
msg
=
msg
,
details
=
resp
.
read
())
raise
RemoteConnectionError
(
""
,
msg
=
msg
,
details
=
resp
.
read
())
...
@@ -494,6 +494,7 @@ class DAVFS(FS):
...
@@ -494,6 +494,7 @@ class DAVFS(FS):
if
response
.
status
==
405
:
if
response
.
status
==
405
:
raise
ResourceInvalidError
(
path
)
raise
ResourceInvalidError
(
path
)
if
response
.
status
<
200
or
response
.
status
>=
300
:
if
response
.
status
<
200
or
response
.
status
>=
300
:
print
response
.
read
()
raise_generic_error
(
response
,
"remove"
,
path
)
raise_generic_error
(
response
,
"remove"
,
path
)
return
True
return
True
...
...
fs/contrib/tahoefs/__init__.py
View file @
3f3138fe
'''
'''
Example (it will use publicly available, but slow-as-hell Tahoe-LAFS cloud):
Example (it will use publicly available, but slow-as-hell Tahoe-LAFS cloud):
from fs.tahoefs import TahoeFS, Connection
from fs.
contrib.
tahoefs import TahoeFS, Connection
dircap = TahoeFS.createdircap(webapi='http://pubgrid.tahoe-lafs.org')
dircap = TahoeFS.createdircap(webapi='http://pubgrid.tahoe-lafs.org')
print "Your dircap (unique key to your storage directory) is", dircap
print "Your dircap (unique key to your storage directory) is", dircap
print "Keep it safe!"
print "Keep it safe!"
...
@@ -86,13 +86,13 @@ class TahoeFS(CacheFS):
...
@@ -86,13 +86,13 @@ class TahoeFS(CacheFS):
def
__init__
(
self
,
dircap
,
timeout
=
60
,
autorun
=
True
,
largefilesize
=
10
*
1024
*
1024
,
webapi
=
'http://127.0.0.1:3456'
):
def
__init__
(
self
,
dircap
,
timeout
=
60
,
autorun
=
True
,
largefilesize
=
10
*
1024
*
1024
,
webapi
=
'http://127.0.0.1:3456'
):
'''
'''
Creates instance of TahoeFS.
Creates instance of TahoeFS.
dircap -
special hash allowing user to work with TahoeLAFS directory.
:param dircap:
special hash allowing user to work with TahoeLAFS directory.
timeout -
how long should underlying CacheFS keep information about files
:param timeout:
how long should underlying CacheFS keep information about files
before asking TahoeLAFS node again.
before asking TahoeLAFS node again.
autorun -
Allow listing autorun files? Can be very dangerous on Windows!.
:param autorun:
Allow listing autorun files? Can be very dangerous on Windows!.
This is temporary hack, as it should be part of Windows-specific middleware,
This is temporary hack, as it should be part of Windows-specific middleware,
not Tahoe itself.
not Tahoe itself.
largefilesize - Create placeholder file for files larger than this tresholf
.
:param largefilesize: - Create placeholder file for files larger than this treshold
.
Uploading and processing of large files can last extremely long (many hours),
Uploading and processing of large files can last extremely long (many hours),
so placing this placeholder can help you to remember that upload is processing.
so placing this placeholder can help you to remember that upload is processing.
Setting this to None will skip creating placeholder files for any uploads.
Setting this to None will skip creating placeholder files for any uploads.
...
@@ -384,7 +384,7 @@ class _TahoeFS(FS):
...
@@ -384,7 +384,7 @@ class _TahoeFS(FS):
offset
=
offset
,
length
=
length
)
offset
=
offset
,
length
=
length
)
@_fix_path
@_fix_path
def
setcontents
(
self
,
path
,
file
):
def
setcontents
(
self
,
path
,
file
,
chunk_size
=
64
*
1024
):
self
.
_log
(
INFO
,
'Uploading file
%
s'
%
path
)
self
.
_log
(
INFO
,
'Uploading file
%
s'
%
path
)
path
=
self
.
tahoeutil
.
fixwinpath
(
path
,
False
)
path
=
self
.
tahoeutil
.
fixwinpath
(
path
,
False
)
size
=
None
size
=
None
...
...
fs/opener.py
View file @
3f3138fe
...
@@ -32,6 +32,9 @@ def _expand_syspath(path):
...
@@ -32,6 +32,9 @@ def _expand_syspath(path):
def
_parse_credentials
(
url
):
def
_parse_credentials
(
url
):
scheme
=
None
if
'://'
in
url
:
scheme
,
url
=
url
.
split
(
'://'
,
1
)
username
=
None
username
=
None
password
=
None
password
=
None
if
'@'
in
url
:
if
'@'
in
url
:
...
@@ -40,6 +43,8 @@ def _parse_credentials(url):
...
@@ -40,6 +43,8 @@ def _parse_credentials(url):
username
,
password
=
credentials
.
split
(
':'
,
1
)
username
,
password
=
credentials
.
split
(
':'
,
1
)
else
:
else
:
username
=
credentials
username
=
credentials
if
scheme
is
not
None
:
url
=
'
%
s://
%
s'
%
(
scheme
,
url
)
return
username
,
password
,
url
return
username
,
password
,
url
def
_parse_name
(
fs_name
):
def
_parse_name
(
fs_name
):
...
@@ -49,6 +54,13 @@ def _parse_name(fs_name):
...
@@ -49,6 +54,13 @@ def _parse_name(fs_name):
else
:
else
:
return
fs_name
,
None
return
fs_name
,
None
def
_split_url_path
(
url
):
if
'://'
not
in
url
:
url
=
'http://'
+
url
scheme
,
netloc
,
path
,
params
,
query
,
fragment
=
urlparse
(
url
)
url
=
'
%
s://
%
s'
%
(
scheme
,
netloc
)
return
url
,
path
class
OpenerRegistry
(
object
):
class
OpenerRegistry
(
object
):
...
@@ -134,6 +146,9 @@ class OpenerRegistry(object):
...
@@ -134,6 +146,9 @@ class OpenerRegistry(object):
fs_path
=
join
(
fs_path
,
path
)
fs_path
=
join
(
fs_path
,
path
)
if
create_dir
and
fs_path
:
fs
.
makedir
(
fs_path
,
allow_recreate
=
True
)
pathname
,
resourcename
=
pathsplit
(
fs_path
or
''
)
pathname
,
resourcename
=
pathsplit
(
fs_path
or
''
)
if
pathname
and
resourcename
:
if
pathname
and
resourcename
:
fs
=
fs
.
opendir
(
pathname
)
fs
=
fs
.
opendir
(
pathname
)
...
@@ -418,7 +433,7 @@ example:
...
@@ -418,7 +433,7 @@ example:
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.wrapfs.debugfs
import
DebugFS
from
fs.wrapfs.debugfs
import
DebugFS
if
fs_path
:
if
fs_path
:
fs
,
path
=
registry
.
parse
(
fs_path
,
writeable
=
writeable
,
create
=
create_dir
)
fs
,
path
=
registry
.
parse
(
fs_path
,
writeable
=
writeable
,
create
_dir
=
create_dir
)
return
DebugFS
(
fs
,
verbose
=
False
),
None
return
DebugFS
(
fs
,
verbose
=
False
),
None
if
fs_name_params
==
'ram'
:
if
fs_name_params
==
'ram'
:
from
fs.memoryfs
import
MemoryFS
from
fs.memoryfs
import
MemoryFS
...
@@ -441,11 +456,99 @@ example:
...
@@ -441,11 +456,99 @@ example:
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.tempfs
import
TempFS
from
fs.tempfs
import
TempFS
fs
=
TempFS
(
identifier
=
fs_name_params
)
fs
=
TempFS
(
identifier
=
fs_name_params
)
if
create_dir
and
fs_path
:
fs
=
fs
.
makeopendir
(
fs_path
)
fs_path
=
pathsplit
(
fs_path
)
return
fs
,
fs_path
return
fs
,
fs_path
class
S3Opener
(
Opener
):
names
=
[
's3'
]
desc
=
"""Opens a filesystem stored on Amazon S3 storage
The environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY should be set"""
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.s3fs
import
S3FS
bucket
=
fs_path
path
=
''
if
'/'
in
fs_path
:
bucket
,
path
=
fs_path
.
split
(
'/'
,
1
)
fs
=
S3FS
(
bucket
)
if
path
:
dirpath
,
resourcepath
=
pathsplit
(
path
)
if
dirpath
:
fs
=
fs
.
opendir
(
dirpath
)
path
=
resourcepath
return
fs
,
path
class
TahoeOpener
(
Opener
):
names
=
[
'tahoe'
]
desc
=
"""Opens a Tahoe-LAFS filesystem
example:
* tahoe://http://pubgrid.tahoe-lafs.org/uri/URI:DIR2:h5bkxelehowscijdb [...]"""
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.contrib.tahoefs
import
TahoeFS
if
'/uri/'
not
in
fs_path
:
raise
OpenerError
(
"""Tahoe url should be in the form <url>/uri/<dicap>"""
)
url
,
dircap
=
fs_path
.
split
(
'/uri/'
)
path
=
''
if
'/'
in
dircap
:
dircap
,
path
=
dircap
.
split
(
'/'
,
1
)
fs
=
TahoeFS
(
dircap
,
webapi
=
url
)
if
'/'
in
path
:
dirname
,
resourcename
=
pathsplit
(
path
)
if
createdir
:
fs
=
fs
.
makeopendir
(
dirname
)
else
:
fs
=
fs
.
opendir
(
dirname
)
path
=
''
return
fs
,
path
class
DavOpener
(
Opener
):
names
=
[
'dav'
]
desc
=
"""Opens a WebDAV server
example:
* dav://example.org/dav"""
@classmethod
def
get_fs
(
cls
,
registry
,
fs_name
,
fs_name_params
,
fs_path
,
writeable
,
create_dir
):
from
fs.contrib.davfs
import
DAVFS
url
=
fs_path
if
'://'
not
in
url
:
url
=
'http://'
+
url
scheme
,
url
=
url
.
split
(
'://'
,
1
)
username
,
password
,
url
=
_parse_credentials
(
url
)
credentials
=
None
if
username
or
password
:
credentials
=
{}
if
username
:
credentials
[
'username'
]
=
username
if
password
:
credentials
[
'password'
]
=
password
url
=
'
%
s://
%
s'
%
(
scheme
,
url
)
fs
=
DAVFS
(
url
,
credentials
=
credentials
)
return
fs
,
''
opener
=
OpenerRegistry
([
OSFSOpener
,
opener
=
OpenerRegistry
([
OSFSOpener
,
ZipOpener
,
ZipOpener
,
...
@@ -455,6 +558,9 @@ opener = OpenerRegistry([OSFSOpener,
...
@@ -455,6 +558,9 @@ opener = OpenerRegistry([OSFSOpener,
MemOpener
,
MemOpener
,
DebugOpener
,
DebugOpener
,
TempOpener
,
TempOpener
,
S3Opener
,
TahoeOpener
,
DavOpener
,
])
])
...
...
fs/s3fs.py
View file @
3f3138fe
...
@@ -72,7 +72,7 @@ class S3FS(FS):
...
@@ -72,7 +72,7 @@ class S3FS(FS):
PATH_MAX
=
None
PATH_MAX
=
None
NAME_MAX
=
None
NAME_MAX
=
None
def
__init__
(
self
,
bucket
,
prefix
=
""
,
aws_access_key
=
None
,
aws_secret_key
=
None
,
separator
=
"/"
,
thread_synchronize
=
True
,
key_sync_timeout
=
1
):
def
__init__
(
self
,
bucket
,
prefix
=
""
,
aws_access_key
=
None
,
aws_secret_key
=
None
,
separator
=
"/"
,
thread_synchronize
=
True
,
key_sync_timeout
=
1
):
"""Constructor for S3FS objects.
"""Constructor for S3FS objects.
S3FS objects require the name of the S3 bucket in which to store
S3FS objects require the name of the S3 bucket in which to store
...
...
fs/utils.py
View file @
3f3138fe
...
@@ -53,6 +53,37 @@ def copyfile(src_fs, src_path, dst_fs, dst_path, overwrite=True, chunk_size=64*1
...
@@ -53,6 +53,37 @@ def copyfile(src_fs, src_path, dst_fs, dst_path, overwrite=True, chunk_size=64*1
FS
.
_shutil_copyfile
(
src_syspath
,
dst_syspath
)
FS
.
_shutil_copyfile
(
src_syspath
,
dst_syspath
)
return
return
src_lock
=
getattr
(
src_fs
,
'_lock'
,
None
)
if
src_lock
is
not
None
:
src_lock
.
acquire
()
try
:
src
=
None
try
:
src
=
src_fs
.
open
(
src_path
,
'rb'
)
dst_fs
.
setcontents
(
dst_path
,
src
,
chunk_size
=
chunk_size
)
finally
:
if
src
is
not
None
:
src
.
close
()
finally
:
if
src_lock
is
not
None
:
src_lock
.
release
()
def
copyfile_non_atomic
(
src_fs
,
src_path
,
dst_fs
,
dst_path
,
overwrite
=
True
,
chunk_size
=
64
*
1024
):
"""A non atomic version of copyfile (will not block other threads using src_fs or dst_fst)
:param src_fs: Source filesystem object
:param src_path: -- Source path
:param dst_fs: Destination filesystem object
:param dst_path: Destination filesystem object
:param chunk_size: Size of chunks to move if system copyfile is not available (default 16K)
"""
if
not
overwrite
and
dst_fs
.
exists
(
dst_path
):
raise
DestinationExistsError
(
dst_path
)
src
=
None
src
=
None
dst
=
None
dst
=
None
try
:
try
:
...
@@ -97,24 +128,64 @@ def movefile(src_fs, src_path, dst_fs, dst_path, overwrite=True, chunk_size=64*1
...
@@ -97,24 +128,64 @@ def movefile(src_fs, src_path, dst_fs, dst_path, overwrite=True, chunk_size=64*1
FS
.
_shutil_movefile
(
src_syspath
,
dst_syspath
)
FS
.
_shutil_movefile
(
src_syspath
,
dst_syspath
)
return
return
src_lock
=
getattr
(
src_fs
,
'_lock'
,
None
)
if
src_lock
is
not
None
:
src_lock
.
acquire
()
try
:
src
=
None
try
:
# Chunk copy
src
=
src_fs
.
open
(
src_path
,
'rb'
)
dst_fs
.
setcontents
(
dst_path
,
src
,
chunk_size
=
chunk_size
)
except
:
raise
else
:
src_fs
.
remove
(
src_path
)
finally
:
if
src
is
not
None
:
src
.
close
()
finally
:
if
src_lock
is
not
None
:
src_lock
.
release
()
def
movefile_non_atomic
(
src_fs
,
src_path
,
dst_fs
,
dst_path
,
overwrite
=
True
,
chunk_size
=
64
*
1024
):
"""A non atomic version of movefile (wont block other threads using src_fs or dst_fs
:param src_fs: Source filesystem object
:param src_path: Source path
:param dst_fs: Destination filesystem object
:param dst_path: Destination filesystem object
:param chunk_size: Size of chunks to move if system copyfile is not available (default 16K)
"""
if
not
overwrite
and
dst_fs
.
exists
(
dst_path
):
raise
DestinationExistsError
(
dst_path
)
src
=
None
src
=
None
dst
=
None
dst
=
None
try
:
try
:
# Chunk copy
# Chunk copy
src
=
src_fs
.
open
(
src_path
,
'rb'
)
src
=
src_fs
.
open
(
src_path
,
'rb'
)
dst
=
src
_fs
.
open
(
dst_path
,
'wb'
)
dst
=
dst
_fs
.
open
(
dst_path
,
'wb'
)
write
=
dst
.
write
write
=
dst
.
write
read
=
src
.
read
read
=
src
.
read
chunk
=
read
(
chunk_size
)
chunk
=
read
(
chunk_size
)
while
chunk
:
while
chunk
:
write
(
chunk
)
write
(
chunk
)
chunk
=
read
(
chunk_size
)
chunk
=
read
(
chunk_size
)
except
:
raise
else
:
src_fs
.
remove
(
src_path
)
finally
:
finally
:
if
src
is
not
None
:
if
src
is
not
None
:
src
.
close
()
src
.
close
()
if
dst
is
not
None
:
if
dst
is
not
None
:
dst
.
close
()
dst
.
close
()
src_fs
.
remove
(
src_path
)
def
movedir
(
fs1
,
fs2
,
overwrite
=
False
,
ignore_errors
=
False
,
chunk_size
=
64
*
1024
):
def
movedir
(
fs1
,
fs2
,
overwrite
=
False
,
ignore_errors
=
False
,
chunk_size
=
64
*
1024
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment