Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
A
ansible
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
OpenEdx
ansible
Commits
d05c61d1
Commit
d05c61d1
authored
Sep 02, 2013
by
James Cammarata
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'devel' into fireball2
Conflicts: lib/ansible/playbook/__init__.py lib/ansible/playbook/play.py
parents
d69d5c29
50f54f6b
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
138 additions
and
59 deletions
+138
-59
docsite/latest/rst/playbooks.rst
+13
-5
docsite/latest/rst/playbooks2.rst
+1
-1
lib/ansible/playbook/__init__.py
+3
-2
lib/ansible/playbook/play.py
+77
-23
lib/ansible/playbook/task.py
+5
-4
lib/ansible/runner/__init__.py
+5
-2
lib/ansible/runner/action_plugins/fetch.py
+1
-1
library/cloud/quantum_network
+14
-9
library/system/user
+15
-11
plugins/inventory/ec2.ini
+1
-0
plugins/inventory/ec2.py
+3
-1
No files found.
docsite/latest/rst/playbooks.rst
View file @
d05c61d1
...
...
@@ -568,10 +568,10 @@ Role dependencies can also be specified as a full path, just like top level role
dependencies:
- { role: '
/
path
/
to
/
common
/
roles
/
foo
', x: 1 }
Roles dependencies are always executed before the role that includes them, and are recursive.
Role dependencies may be included more than once. Continuing the above example, the '
car
' role could
add '
wheel
'
dependencies as follows::
Roles dependencies are always executed before the role that includes them, and are recursive.
By default,
roles can also only be added as a dependency once - if another role also lists it as a dependency it will
not be run again. This behavior can be overridden by adding `allow_duplicates: yes` to the `meta/main.yml` file.
For example, a role named '
car
' could add a role named '
wheel
' to its
dependencies as follows::
---
dependencies:
...
...
@@ -580,7 +580,15 @@ add 'wheel' dependencies as follows::
- { role: wheel, n: 3 }
- { role: wheel, n: 4 }
If the wheel role required tire and brake in turn, this would result in the following execution order::
And the `meta/main.yml` for wheel contained the following::
---
allow_duplicates: yes
dependencies:
- { role: tire }
- { role: brake }
The resulting order of execution would be as follows::
tire(n=1)
brake(n=1)
...
...
docsite/latest/rst/playbooks2.rst
View file @
d05c61d1
...
...
@@ -285,7 +285,7 @@ Example::
As of Ansible 1.2, you can also pass in extra vars as quoted JSON, like so::
--extra-vars
"{'pacman':'mrs','ghosts':['inky','pinky','clyde','sue']}"
--extra-vars
'{"pacman":"mrs","ghosts":["inky","pinky","clyde","sue"]}'
The key=value form is obviously simpler, but it's there if you need it!
...
...
lib/ansible/playbook/__init__.py
View file @
d05c61d1
...
...
@@ -307,7 +307,7 @@ class PlayBook(object):
remote_pass
=
self
.
remote_pass
,
module_path
=
self
.
module_path
,
timeout
=
self
.
timeout
,
remote_user
=
task
.
play
.
remote_user
,
remote_port
=
task
.
play
.
remote_port
,
module_vars
=
task
.
module_vars
,
private_key_file
=
self
.
private_key_file
,
default_vars
=
task
.
default_vars
,
private_key_file
=
self
.
private_key_file
,
setup_cache
=
self
.
SETUP_CACHE
,
basedir
=
task
.
play
.
basedir
,
conditional
=
task
.
only_if
,
callbacks
=
self
.
runner_callbacks
,
sudo
=
task
.
sudo
,
sudo_user
=
task
.
sudo_user
,
...
...
@@ -448,7 +448,8 @@ class PlayBook(object):
remote_pass
=
self
.
remote_pass
,
remote_port
=
play
.
remote_port
,
private_key_file
=
self
.
private_key_file
,
setup_cache
=
self
.
SETUP_CACHE
,
callbacks
=
self
.
runner_callbacks
,
sudo
=
play
.
sudo
,
sudo_user
=
play
.
sudo_user
,
transport
=
play
.
transport
,
sudo_pass
=
self
.
sudo_pass
,
is_playbook
=
True
,
module_vars
=
play
.
vars
,
check
=
self
.
check
,
diff
=
self
.
diff
,
accelerate
=
play
.
accelerate
,
accelerate_port
=
play
.
accelerate_port
default_vars
=
play
.
default_vars
,
check
=
self
.
check
,
diff
=
self
.
diff
,
accelerate
=
play
.
accelerate
,
accelerate_port
=
play
.
accelerate_port
)
.
run
()
self
.
stats
.
compute
(
setup_results
,
setup
=
True
)
...
...
lib/ansible/playbook/play.py
View file @
d05c61d1
...
...
@@ -28,9 +28,9 @@ import os
class
Play
(
object
):
__slots__
=
[
'hosts'
,
'name'
,
'vars'
,
'vars_prompt'
,
'vars_files'
,
'handlers'
,
'remote_user'
,
'remote_port'
,
'
accelerate'
,
'accelerate_port
'
,
'sudo'
,
'sudo_user'
,
'transport'
,
'playbook'
,
'hosts'
,
'name'
,
'vars'
,
'
default_vars'
,
'
vars_prompt'
,
'vars_files'
,
'handlers'
,
'remote_user'
,
'remote_port'
,
'
included_roles'
,
'accelerate
'
,
'
accelerate_port'
,
'
sudo'
,
'sudo_user'
,
'transport'
,
'playbook'
,
'tags'
,
'gather_facts'
,
'serial'
,
'_ds'
,
'_handlers'
,
'_tasks'
,
'basedir'
,
'any_errors_fatal'
,
'roles'
,
'max_fail_pct'
]
...
...
@@ -69,9 +69,18 @@ class Play(object):
elif
type
(
self
.
tags
)
!=
list
:
self
.
tags
=
[]
ds
=
self
.
_load_roles
(
self
.
roles
,
ds
)
self
.
vars_files
=
ds
.
get
(
'vars_files'
,
[])
# We first load the vars files from the datastructure
# so we have the default variables to pass into the roles
self
.
vars_files
=
ds
.
get
(
'vars_files'
,
[])
self
.
_update_vars_files_for_host
(
None
)
# now we load the roles into the datastructure
self
.
included_roles
=
[]
ds
=
self
.
_load_roles
(
self
.
roles
,
ds
)
# and finally re-process the vars files as they may have
# been updated by the included roles
self
.
vars_files
=
ds
.
get
(
'vars_files'
,
[])
self
.
_update_vars_files_for_host
(
None
)
# template everything to be efficient, but do not pre-mature template
...
...
@@ -155,6 +164,17 @@ class Play(object):
raise
errors
.
AnsibleError
(
"too many levels of recursion while resolving role dependencies"
)
for
role
in
roles
:
role_path
,
role_vars
=
self
.
_get_role_path
(
role
)
role_vars
=
utils
.
combine_vars
(
role_vars
,
passed_vars
)
vars
=
self
.
_resolve_main
(
utils
.
path_dwim
(
self
.
basedir
,
os
.
path
.
join
(
role_path
,
'vars'
)))
vars_data
=
{}
if
os
.
path
.
isfile
(
vars
):
vars_data
=
utils
.
parse_yaml_from_file
(
vars
)
if
vars_data
:
role_vars
=
utils
.
combine_vars
(
vars_data
,
role_vars
)
defaults
=
self
.
_resolve_main
(
utils
.
path_dwim
(
self
.
basedir
,
os
.
path
.
join
(
role_path
,
'defaults'
)))
defaults_data
=
{}
if
os
.
path
.
isfile
(
defaults
):
defaults_data
=
utils
.
parse_yaml_from_file
(
defaults
)
# the meta directory contains the yaml that should
# hold the list of dependencies (if any)
meta
=
self
.
_resolve_main
(
utils
.
path_dwim
(
self
.
basedir
,
os
.
path
.
join
(
role_path
,
'meta'
)))
...
...
@@ -164,27 +184,52 @@ class Play(object):
dependencies
=
data
.
get
(
'dependencies'
,[])
for
dep
in
dependencies
:
(
dep_path
,
dep_vars
)
=
self
.
_get_role_path
(
dep
)
meta
=
self
.
_resolve_main
(
utils
.
path_dwim
(
self
.
basedir
,
os
.
path
.
join
(
dep_path
,
'meta'
)))
if
os
.
path
.
isfile
(
meta
):
meta_data
=
utils
.
parse_yaml_from_file
(
meta
)
if
meta_data
:
allow_dupes
=
utils
.
boolean
(
meta_data
.
get
(
'allow_duplicates'
,
''
))
if
not
allow_dupes
:
if
dep
in
self
.
included_roles
:
continue
else
:
self
.
included_roles
.
append
(
dep
)
dep_vars
=
utils
.
combine_vars
(
passed_vars
,
dep_vars
)
dep_vars
=
utils
.
combine_vars
(
role_vars
,
dep_vars
)
vars
=
self
.
_resolve_main
(
utils
.
path_dwim
(
self
.
basedir
,
os
.
path
.
join
(
dep_path
,
'vars'
)))
vars_data
=
{}
if
os
.
path
.
isfile
(
vars
):
vars_data
=
utils
.
parse_yaml_from_file
(
vars
)
dep_vars
.
update
(
role_vars
)
for
k
in
passed_vars
.
keys
():
if
not
k
in
dep_vars
:
dep_vars
[
k
]
=
passed_vars
[
k
]
for
k
in
vars_data
.
keys
():
if
not
k
in
dep_vars
:
dep_vars
[
k
]
=
vars_data
[
k
]
if
vars_data
:
dep_vars
=
utils
.
combine_vars
(
vars_data
,
dep_vars
)
defaults
=
self
.
_resolve_main
(
utils
.
path_dwim
(
self
.
basedir
,
os
.
path
.
join
(
dep_path
,
'defaults'
)))
dep_defaults_data
=
{}
if
os
.
path
.
isfile
(
defaults
):
dep_defaults_data
=
utils
.
parse_yaml_from_file
(
defaults
)
if
'role'
in
dep_vars
:
del
dep_vars
[
'role'
]
self
.
_build_role_dependencies
([
dep
],
dep_stack
,
passed_vars
=
dep_vars
,
level
=
level
+
1
)
dep_stack
.
append
([
dep
,
dep_path
,
dep_vars
])
dep_stack
.
append
([
dep
,
dep_path
,
dep_vars
,
dep_defaults_data
])
# only add the current role when we're at the top level,
# otherwise we'll end up in a recursive loop
if
level
==
0
:
dep_stack
.
append
([
role
,
role_path
,
role_vars
])
dep_stack
.
append
([
role
,
role_path
,
role_vars
,
defaults_data
])
return
dep_stack
def
_load_role_defaults
(
self
,
defaults_files
):
# process default variables
default_vars
=
{}
for
filename
in
defaults_files
:
if
os
.
path
.
exists
(
filename
):
new_default_vars
=
utils
.
parse_yaml_from_file
(
filename
)
if
new_default_vars
:
if
type
(
new_default_vars
)
!=
dict
:
raise
errors
.
AnsibleError
(
"
%
s must be stored as dictonary/hash:
%
s"
%
(
filename
,
type
(
new_default_vars
)))
default_vars
=
utils
.
combine_vars
(
default_vars
,
new_default_vars
)
return
default_vars
def
_load_roles
(
self
,
roles
,
ds
):
# a role is a name that auto-includes the following if they exist
# <rolename>/tasks/main.yml
...
...
@@ -201,6 +246,7 @@ class Play(object):
new_tasks
=
[]
new_handlers
=
[]
new_vars_files
=
[]
defaults_files
=
[]
pre_tasks
=
ds
.
get
(
'pre_tasks'
,
None
)
if
type
(
pre_tasks
)
!=
list
:
...
...
@@ -213,7 +259,7 @@ class Play(object):
roles
=
self
.
_build_role_dependencies
(
roles
,
[],
self
.
vars
)
for
role
,
role_path
,
role_vars
in
roles
:
for
(
role
,
role_path
,
role_vars
,
default_vars
)
in
roles
:
# special vars must be extracted from the dict to the included tasks
special_keys
=
[
"sudo"
,
"sudo_user"
,
"when"
,
"with_items"
]
special_vars
=
{}
...
...
@@ -221,19 +267,22 @@ class Play(object):
if
k
in
role_vars
:
special_vars
[
k
]
=
role_vars
[
k
]
task_basepath
=
utils
.
path_dwim
(
self
.
basedir
,
os
.
path
.
join
(
role_path
,
'tasks'
))
handler_basepath
=
utils
.
path_dwim
(
self
.
basedir
,
os
.
path
.
join
(
role_path
,
'handlers'
))
vars_basepath
=
utils
.
path_dwim
(
self
.
basedir
,
os
.
path
.
join
(
role_path
,
'vars'
))
task_basepath
=
utils
.
path_dwim
(
self
.
basedir
,
os
.
path
.
join
(
role_path
,
'tasks'
))
handler_basepath
=
utils
.
path_dwim
(
self
.
basedir
,
os
.
path
.
join
(
role_path
,
'handlers'
))
vars_basepath
=
utils
.
path_dwim
(
self
.
basedir
,
os
.
path
.
join
(
role_path
,
'vars'
))
defaults_basepath
=
utils
.
path_dwim
(
self
.
basedir
,
os
.
path
.
join
(
role_path
,
'defaults'
))
task
=
self
.
_resolve_main
(
task_basepath
)
handler
=
self
.
_resolve_main
(
handler_basepath
)
vars_file
=
self
.
_resolve_main
(
vars_basepath
)
defaults_file
=
self
.
_resolve_main
(
defaults_basepath
)
library
=
utils
.
path_dwim
(
self
.
basedir
,
os
.
path
.
join
(
role_path
,
'library'
))
if
not
os
.
path
.
isfile
(
task
)
and
not
os
.
path
.
isfile
(
handler
)
and
not
os
.
path
.
isfile
(
vars_file
)
and
not
os
.
path
.
isdir
(
library
):
raise
errors
.
AnsibleError
(
"found role at
%
s, but cannot find
%
s or
%
s or
%
s or
%
s"
%
(
role_path
,
task
,
handler
,
vars_file
,
library
))
if
os
.
path
.
isfile
(
task
):
nt
=
dict
(
include
=
pipes
.
quote
(
task
),
vars
=
role_vars
)
nt
=
dict
(
include
=
pipes
.
quote
(
task
),
vars
=
role_vars
,
default_vars
=
default_vars
)
for
k
in
special_keys
:
if
k
in
special_vars
:
nt
[
k
]
=
special_vars
[
k
]
...
...
@@ -246,6 +295,8 @@ class Play(object):
new_handlers
.
append
(
nt
)
if
os
.
path
.
isfile
(
vars_file
):
new_vars_files
.
append
(
vars_file
)
if
os
.
path
.
isfile
(
defaults_file
):
defaults_files
.
append
(
defaults_file
)
if
os
.
path
.
isdir
(
library
):
utils
.
plugins
.
module_finder
.
add_directory
(
library
)
...
...
@@ -277,6 +328,8 @@ class Play(object):
ds
[
'handlers'
]
=
new_handlers
ds
[
'vars_files'
]
=
new_vars_files
self
.
default_vars
=
self
.
_load_role_defaults
(
defaults_files
)
return
ds
# *************************************************
...
...
@@ -299,7 +352,7 @@ class Play(object):
# *************************************************
def
_load_tasks
(
self
,
tasks
,
vars
=
{},
sudo_vars
=
{},
additional_conditions
=
[],
original_file
=
None
):
def
_load_tasks
(
self
,
tasks
,
vars
=
{},
default_vars
=
{},
sudo_vars
=
{},
additional_conditions
=
[],
original_file
=
None
):
''' handle task and handler include statements '''
results
=
[]
...
...
@@ -345,11 +398,12 @@ class Play(object):
included_additional_conditions
.
insert
(
0
,
utils
.
compile_when_to_only_if
(
"
%
s
%
s"
%
(
k
[
5
:],
x
[
k
])))
elif
k
==
'when'
:
included_additional_conditions
.
insert
(
0
,
utils
.
compile_when_to_only_if
(
"jinja2_compare
%
s"
%
x
[
k
]))
elif
k
in
(
"include"
,
"vars"
,
"only_if"
,
"sudo"
,
"sudo_user"
):
elif
k
in
(
"include"
,
"vars"
,
"
default_vars"
,
"
only_if"
,
"sudo"
,
"sudo_user"
):
pass
else
:
raise
errors
.
AnsibleError
(
"parse error: task includes cannot be used with other directives:
%
s"
%
k
)
default_vars
=
utils
.
combine_vars
(
self
.
default_vars
,
x
.
get
(
'default_vars'
,
{}))
if
'vars'
in
x
:
task_vars
=
utils
.
combine_vars
(
task_vars
,
x
[
'vars'
])
if
'only_if'
in
x
:
...
...
@@ -367,9 +421,9 @@ class Play(object):
include_file
=
template
(
dirname
,
tokens
[
0
],
mv
)
include_filename
=
utils
.
path_dwim
(
dirname
,
include_file
)
data
=
utils
.
parse_yaml_from_file
(
include_filename
)
results
+=
self
.
_load_tasks
(
data
,
mv
,
included_sudo_vars
,
included_additional_conditions
,
original_file
=
include_filename
)
results
+=
self
.
_load_tasks
(
data
,
mv
,
default_vars
,
included_sudo_vars
,
included_additional_conditions
,
original_file
=
include_filename
)
elif
type
(
x
)
==
dict
:
results
.
append
(
Task
(
self
,
x
,
module_vars
=
task_vars
,
additional_conditions
=
additional_conditions
))
results
.
append
(
Task
(
self
,
x
,
module_vars
=
task_vars
,
default_vars
=
default_vars
,
additional_conditions
=
additional_conditions
))
else
:
raise
Exception
(
"unexpected task type"
)
...
...
lib/ansible/playbook/task.py
View file @
d05c61d1
...
...
@@ -24,7 +24,7 @@ class Task(object):
__slots__
=
[
'name'
,
'meta'
,
'action'
,
'only_if'
,
'when'
,
'async_seconds'
,
'async_poll_interval'
,
'notify'
,
'module_name'
,
'module_args'
,
'module_vars'
,
'notify'
,
'module_name'
,
'module_args'
,
'module_vars'
,
'default_vars'
,
'play'
,
'notified_by'
,
'tags'
,
'register'
,
'delegate_to'
,
'first_available_file'
,
'ignore_errors'
,
'local_action'
,
'transport'
,
'sudo'
,
'sudo_user'
,
'sudo_pass'
,
...
...
@@ -41,7 +41,7 @@ class Task(object):
'any_errors_fatal'
,
'changed_when'
,
'always_run'
]
def
__init__
(
self
,
play
,
ds
,
module_vars
=
None
,
additional_conditions
=
None
):
def
__init__
(
self
,
play
,
ds
,
module_vars
=
None
,
default_vars
=
None
,
additional_conditions
=
None
):
''' constructor loads from a task or handler datastructure '''
# meta directives are used to tell things like ansible/playbook to run
...
...
@@ -100,8 +100,9 @@ class Task(object):
elif
not
x
in
Task
.
VALID_KEYS
:
raise
errors
.
AnsibleError
(
"
%
s is not a legal parameter in an Ansible task or handler"
%
x
)
self
.
module_vars
=
module_vars
self
.
play
=
play
self
.
module_vars
=
module_vars
self
.
default_vars
=
default_vars
self
.
play
=
play
# load various attributes
self
.
name
=
ds
.
get
(
'name'
,
None
)
...
...
lib/ansible/runner/__init__.py
View file @
d05c61d1
...
...
@@ -55,7 +55,6 @@ multiprocessing_runner = None
OUTPUT_LOCKFILE
=
tempfile
.
TemporaryFile
()
PROCESS_LOCKFILE
=
tempfile
.
TemporaryFile
()
MULTIPROCESSING_MANAGER
=
multiprocessing
.
Manager
()
################################################
...
...
@@ -131,6 +130,7 @@ class Runner(object):
sudo
=
False
,
# whether to run sudo or not
sudo_user
=
C
.
DEFAULT_SUDO_USER
,
# ex: 'root'
module_vars
=
None
,
# a playbooks internals thing
default_vars
=
None
,
# ditto
is_playbook
=
False
,
# running from playbook or not?
inventory
=
None
,
# reference to Inventory object
subset
=
None
,
# subset pattern
...
...
@@ -161,6 +161,7 @@ class Runner(object):
self
.
inventory
=
utils
.
default
(
inventory
,
lambda
:
ansible
.
inventory
.
Inventory
(
host_list
))
self
.
module_vars
=
utils
.
default
(
module_vars
,
lambda
:
{})
self
.
default_vars
=
utils
.
default
(
default_vars
,
lambda
:
{})
self
.
always_run
=
None
self
.
connector
=
connection
.
Connection
(
self
)
self
.
conditional
=
conditional
...
...
@@ -415,6 +416,7 @@ class Runner(object):
port
=
self
.
remote_port
inject
=
{}
inject
=
utils
.
combine_vars
(
inject
,
self
.
default_vars
)
inject
=
utils
.
combine_vars
(
inject
,
host_variables
)
inject
=
utils
.
combine_vars
(
inject
,
self
.
module_vars
)
inject
=
utils
.
combine_vars
(
inject
,
self
.
setup_cache
[
host
])
...
...
@@ -423,6 +425,7 @@ class Runner(object):
inject
[
'group_names'
]
=
host_variables
.
get
(
'group_names'
,
[])
inject
[
'groups'
]
=
self
.
inventory
.
groups_list
()
inject
[
'vars'
]
=
self
.
module_vars
inject
[
'defaults'
]
=
self
.
default_vars
inject
[
'environment'
]
=
self
.
environment
if
self
.
inventory
.
basedir
()
is
not
None
:
...
...
@@ -845,7 +848,7 @@ class Runner(object):
def
_parallel_exec
(
self
,
hosts
):
''' handles mulitprocessing when more than 1 fork is required '''
manager
=
MULTIPROCESSING_MANAGER
manager
=
multiprocessing
.
Manager
()
job_queue
=
manager
.
Queue
()
for
host
in
hosts
:
job_queue
.
put
(
host
)
...
...
lib/ansible/runner/action_plugins/fetch.py
View file @
d05c61d1
...
...
@@ -73,7 +73,7 @@ class ActionModule(object):
# use slurp if sudo and permissions are lacking
remote_data
=
None
if
remote_md5
in
(
'1'
,
'2'
)
and
self
.
runner
.
sudo
:
if
remote_md5
in
(
'1'
,
'2'
)
or
self
.
runner
.
sudo
:
slurpres
=
self
.
runner
.
_execute_module
(
conn
,
tmp
,
'slurp'
,
'src=
%
s'
%
source
,
inject
=
inject
)
if
slurpres
.
is_successful
():
if
slurpres
.
result
[
'encoding'
]
==
'base64'
:
...
...
library/cloud/quantum_network
View file @
d05c61d1
...
...
@@ -44,6 +44,11 @@ options:
- The tenant name of the login user
required: true
default: 'yes'
tenant_name:
description:
- The name of the tenant for whom the network is created
required: false
default: None
auth_url:
description:
- The keystone url for authentication
...
...
@@ -99,15 +104,15 @@ requirements: ["quantumclient", "keystoneclient"]
'''
EXAMPLES
=
'''
# Create
s an external,public network
- quantum_network:
state=present login_username=admin login_password=admin
provider_network_type=gre
login_tenant_name=admin
provider_segmentation_id=1 tenant_name=tenant1 name=t1network"
# Create
a GRE backed Quantum network with tunnel id 1 for tenant1
- quantum_network:
name=t1network tenant_name=tenant1 state=present
provider_network_type=gre
provider_segmentation_id=1
login_username=admin login_password=admin login_tenant_name=admin
# Create
ss a GRE nework with tunnel id of 1 for tenant 1
- quantum_network:
state=present login_username=admin login_password=admin
provider_network_type=local
login_tenant_name=admin
provider_segmentation_id=1 router_external=yes name=external_network
# Create
an external network
- quantum_network:
name=external_network state=present
provider_network_type=local
router_external=yes
login_username=admin login_password=admin login_tenant_name=admin
'''
_os_keystone
=
None
...
...
@@ -130,7 +135,7 @@ def _get_endpoint(module, ksclient):
try
:
endpoint
=
ksclient
.
service_catalog
.
url_for
(
service_type
=
'network'
,
endpoint_type
=
'publicURL'
)
except
Exception
as
e
:
module
.
fail_json
(
msg
=
"Error getting endpoint for
glance
:
%
s "
%
e
.
message
)
module
.
fail_json
(
msg
=
"Error getting endpoint for
Quantum
:
%
s "
%
e
.
message
)
return
endpoint
def
_get_quantum_client
(
module
,
kwargs
):
...
...
library/system/user
View file @
d05c61d1
...
...
@@ -1273,20 +1273,24 @@ class AIX(User):
if
self
.
groups
is
not
None
:
current_groups
=
self
.
user_group_membership
()
groups
=
self
.
get_groups_set
()
group_diff
=
set
(
current_groups
)
.
symmetric_difference
(
groups
)
groups_need_mod
=
False
groups
=
[]
if
group_diff
:
if
self
.
append
:
for
g
in
groups
:
if
g
in
group_diff
:
groups
.
extend
(
current_groups
)
set
(
groups
)
groups_need_mod
=
True
break
else
:
if
self
.
groups
==
''
:
if
current_groups
and
not
self
.
append
:
groups_need_mod
=
True
else
:
groups
=
self
.
get_groups_set
()
group_diff
=
set
(
current_groups
)
.
symmetric_difference
(
groups
)
if
group_diff
:
if
self
.
append
:
for
g
in
groups
:
if
g
in
group_diff
:
groups_need_mod
=
True
break
else
:
groups_need_mod
=
True
if
groups_need_mod
:
cmd
.
append
(
'-G'
)
...
...
plugins/inventory/ec2.ini
View file @
d05c61d1
...
...
@@ -12,6 +12,7 @@
# in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
regions
=
all
regions_exclude
=
us-gov-west-1
# When generating inventory, Ansible needs to know how to address a server.
# Each EC2 instance has a lot of variables associated with it. Here is the list:
...
...
plugins/inventory/ec2.py
View file @
d05c61d1
...
...
@@ -189,12 +189,14 @@ class Ec2Inventory(object):
# Regions
self
.
regions
=
[]
configRegions
=
config
.
get
(
'ec2'
,
'regions'
)
configRegions_exclude
=
config
.
get
(
'ec2'
,
'regions_exclude'
)
if
(
configRegions
==
'all'
):
if
self
.
eucalyptus_host
:
self
.
regions
.
append
(
boto
.
connect_euca
(
host
=
self
.
eucalyptus_host
)
.
region
.
name
)
else
:
for
regionInfo
in
ec2
.
regions
():
self
.
regions
.
append
(
regionInfo
.
name
)
if
regionInfo
.
name
not
in
configRegions_exclude
:
self
.
regions
.
append
(
regionInfo
.
name
)
else
:
self
.
regions
=
configRegions
.
split
(
","
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment