Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
A
ansible
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
OpenEdx
ansible
Commits
64673cc9
Commit
64673cc9
authored
May 11, 2012
by
John Kleint
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add test for large output; fix indentation.
parent
0956aa96
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
203 additions
and
199 deletions
+203
-199
test/TestRunner.py
+203
-199
No files found.
test/TestRunner.py
View file @
64673cc9
...
@@ -10,9 +10,9 @@ import os
...
@@ -10,9 +10,9 @@ import os
import
shutil
import
shutil
import
time
import
time
try
:
try
:
import
json
import
json
except
:
except
:
import
simplejson
as
json
import
simplejson
as
json
from
nose.plugins.skip
import
SkipTest
from
nose.plugins.skip
import
SkipTest
...
@@ -25,202 +25,206 @@ def get_binary(name):
...
@@ -25,202 +25,206 @@ def get_binary(name):
class
TestRunner
(
unittest
.
TestCase
):
class
TestRunner
(
unittest
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
user
=
getpass
.
getuser
()
self
.
user
=
getpass
.
getuser
()
self
.
runner
=
ansible
.
runner
.
Runner
(
self
.
runner
=
ansible
.
runner
.
Runner
(
module_name
=
'ping'
,
module_name
=
'ping'
,
module_path
=
'library/'
,
module_path
=
'library/'
,
module_args
=
''
,
module_args
=
''
,
remote_user
=
self
.
user
,
remote_user
=
self
.
user
,
remote_pass
=
None
,
remote_pass
=
None
,
host_list
=
'test/ansible_hosts'
,
host_list
=
'test/ansible_hosts'
,
timeout
=
5
,
timeout
=
5
,
forks
=
1
,
forks
=
1
,
background
=
0
,
background
=
0
,
pattern
=
'all'
,
pattern
=
'all'
,
)
)
self
.
cwd
=
os
.
getcwd
()
self
.
cwd
=
os
.
getcwd
()
self
.
test_dir
=
os
.
path
.
join
(
self
.
cwd
,
'test'
)
self
.
test_dir
=
os
.
path
.
join
(
self
.
cwd
,
'test'
)
self
.
stage_dir
=
self
.
_prepare_stage_dir
()
self
.
stage_dir
=
self
.
_prepare_stage_dir
()
def
_prepare_stage_dir
(
self
):
def
_prepare_stage_dir
(
self
):
stage_path
=
os
.
path
.
join
(
self
.
test_dir
,
'test_data'
)
stage_path
=
os
.
path
.
join
(
self
.
test_dir
,
'test_data'
)
if
os
.
path
.
exists
(
stage_path
):
if
os
.
path
.
exists
(
stage_path
):
shutil
.
rmtree
(
stage_path
,
ignore_errors
=
False
)
shutil
.
rmtree
(
stage_path
,
ignore_errors
=
False
)
assert
not
os
.
path
.
exists
(
stage_path
)
assert
not
os
.
path
.
exists
(
stage_path
)
os
.
makedirs
(
stage_path
)
os
.
makedirs
(
stage_path
)
assert
os
.
path
.
exists
(
stage_path
)
assert
os
.
path
.
exists
(
stage_path
)
return
stage_path
return
stage_path
def
_get_test_file
(
self
,
filename
):
def
_get_test_file
(
self
,
filename
):
# get a file inside the test input directory
# get a file inside the test input directory
filename
=
os
.
path
.
join
(
self
.
test_dir
,
filename
)
filename
=
os
.
path
.
join
(
self
.
test_dir
,
filename
)
assert
os
.
path
.
exists
(
filename
)
assert
os
.
path
.
exists
(
filename
)
return
filename
return
filename
def
_get_stage_file
(
self
,
filename
):
def
_get_stage_file
(
self
,
filename
):
# get a file inside the test output directory
# get a file inside the test output directory
filename
=
os
.
path
.
join
(
self
.
stage_dir
,
filename
)
filename
=
os
.
path
.
join
(
self
.
stage_dir
,
filename
)
return
filename
return
filename
def
_run
(
self
,
module_name
,
module_args
,
background
=
0
):
def
_run
(
self
,
module_name
,
module_args
,
background
=
0
):
''' run a module and get the localhost results '''
''' run a module and get the localhost results '''
self
.
runner
.
module_name
=
module_name
self
.
runner
.
module_name
=
module_name
args
=
' '
.
join
(
module_args
)
args
=
' '
.
join
(
module_args
)
print
"DEBUG: using args=
%
s"
%
args
print
"DEBUG: using args=
%
s"
%
args
self
.
runner
.
module_args
=
args
self
.
runner
.
module_args
=
args
self
.
runner
.
background
=
background
self
.
runner
.
background
=
background
results
=
self
.
runner
.
run
()
results
=
self
.
runner
.
run
()
# when using nosetests this will only show up on failure
# when using nosetests this will only show up on failure
# which is pretty useful
# which is pretty useful
print
"RESULTS=
%
s"
%
results
print
"RESULTS=
%
s"
%
results
assert
"127.0.0.2"
in
results
[
'contacted'
]
assert
"127.0.0.2"
in
results
[
'contacted'
]
return
results
[
'contacted'
][
'127.0.0.2'
]
return
results
[
'contacted'
][
'127.0.0.2'
]
def
test_ping
(
self
):
def
test_ping
(
self
):
result
=
self
.
_run
(
'ping'
,[])
result
=
self
.
_run
(
'ping'
,
[])
assert
"ping"
in
result
assert
"ping"
in
result
def
test_facter
(
self
):
def
test_facter
(
self
):
if
not
get_binary
(
"facter"
):
if
not
get_binary
(
"facter"
):
raise
SkipTest
raise
SkipTest
result
=
self
.
_run
(
'facter'
,[])
result
=
self
.
_run
(
'facter'
,
[])
assert
"hostname"
in
result
assert
"hostname"
in
result
# temporarily disbabled since it occasionally hangs
# temporarily disbabled since it occasionally hangs
# ohai's fault, setup module doesn't actually run this
# ohai's fault, setup module doesn't actually run this
# to get ohai's "facts" anyway
# to get ohai's "facts" anyway
#
#
#def test_ohai(self):
#def test_ohai(self):
# if not get_binary("facter"):
# if not get_binary("facter"):
# raise SkipTest
# raise SkipTest
# result = self._run('ohai',[])
# result = self._run('ohai',[])
# assert "hostname" in result
# assert "hostname" in result
def
test_copy
(
self
):
def
test_copy
(
self
):
# test copy module, change trigger, etc
# test copy module, change trigger, etc
pass
input_
=
self
.
_get_test_file
(
'sample.j2'
)
output
=
self
.
_get_stage_file
(
'sample.out'
)
def
test_copy
(
self
):
assert
not
os
.
path
.
exists
(
output
)
input
=
self
.
_get_test_file
(
'sample.j2'
)
result
=
self
.
_run
(
'copy'
,
[
output
=
self
.
_get_stage_file
(
'sample.out'
)
"src=
%
s"
%
input_
,
assert
not
os
.
path
.
exists
(
output
)
"dest=
%
s"
%
output
,
result
=
self
.
_run
(
'copy'
,
[
])
"src=
%
s"
%
input
,
assert
os
.
path
.
exists
(
output
)
"dest=
%
s"
%
output
,
data_in
=
file
(
input_
)
.
read
()
])
data_out
=
file
(
output
)
.
read
()
assert
os
.
path
.
exists
(
output
)
assert
data_in
==
data_out
data_in
=
file
(
input
)
.
read
()
assert
'failed'
not
in
result
data_out
=
file
(
output
)
.
read
()
assert
result
[
'changed'
]
==
True
assert
data_in
==
data_out
assert
'md5sum'
in
result
assert
'failed'
not
in
result
result
=
self
.
_run
(
'copy'
,
[
assert
result
[
'changed'
]
==
True
"src=
%
s"
%
input_
,
assert
'md5sum'
in
result
"dest=
%
s"
%
output
,
result
=
self
.
_run
(
'copy'
,
[
])
"src=
%
s"
%
input
,
assert
result
[
'changed'
]
==
False
"dest=
%
s"
%
output
,
])
def
test_template
(
self
):
assert
result
[
'changed'
]
==
False
input_
=
self
.
_get_test_file
(
'sample.j2'
)
metadata
=
self
.
_get_test_file
(
'metadata.json'
)
def
test_template
(
self
):
output
=
self
.
_get_stage_file
(
'sample.out'
)
input
=
self
.
_get_test_file
(
'sample.j2'
)
result
=
self
.
_run
(
'template'
,
[
metadata
=
self
.
_get_test_file
(
'metadata.json'
)
"src=
%
s"
%
input_
,
output
=
self
.
_get_stage_file
(
'sample.out'
)
"dest=
%
s"
%
output
,
result
=
self
.
_run
(
'template'
,
[
"metadata=
%
s"
%
metadata
"src=
%
s"
%
input
,
])
"dest=
%
s"
%
output
,
assert
os
.
path
.
exists
(
output
)
"metadata=
%
s"
%
metadata
out
=
file
(
output
)
.
read
()
])
assert
out
.
find
(
"duck"
)
!=
-
1
assert
os
.
path
.
exists
(
output
)
assert
result
[
'changed'
]
==
True
out
=
file
(
output
)
.
read
()
assert
'md5sum'
in
result
assert
out
.
find
(
"duck"
)
!=
-
1
assert
'failed'
not
in
result
assert
result
[
'changed'
]
==
True
result
=
self
.
_run
(
'template'
,
[
assert
'md5sum'
in
result
"src=
%
s"
%
input_
,
assert
'failed'
not
in
result
"dest=
%
s"
%
output
,
result
=
self
.
_run
(
'template'
,
[
"metadata=
%
s"
%
metadata
"src=
%
s"
%
input
,
])
"dest=
%
s"
%
output
,
assert
result
[
'changed'
]
==
False
"metadata=
%
s"
%
metadata
])
def
test_command
(
self
):
assert
result
[
'changed'
]
==
False
# test command module, change trigger, etc
result
=
self
.
_run
(
'command'
,
[
"/bin/echo"
,
"hi"
])
def
test_command
(
self
):
assert
"failed"
not
in
result
assert
"msg"
not
in
result
# test command module, change trigger, etc
assert
result
[
'rc'
]
==
0
result
=
self
.
_run
(
'command'
,
[
"/bin/echo"
,
"hi"
])
assert
result
[
'stdout'
]
==
'hi'
assert
"failed"
not
in
result
assert
result
[
'stderr'
]
==
''
assert
"msg"
not
in
result
assert
result
[
'rc'
]
==
0
result
=
self
.
_run
(
'command'
,
[
"/bin/false"
])
assert
result
[
'stdout'
]
==
'hi'
assert
result
[
'rc'
]
==
1
assert
result
[
'stderr'
]
==
''
assert
'failed'
not
in
result
result
=
self
.
_run
(
'command'
,
[
"/bin/false"
])
result
=
self
.
_run
(
'command'
,
[
"/usr/bin/this_does_not_exist"
,
"splat"
])
assert
result
[
'rc'
]
==
1
assert
'msg'
in
result
assert
'failed'
not
in
result
assert
'failed'
in
result
assert
'rc'
not
in
result
result
=
self
.
_run
(
'command'
,
[
"/usr/bin/this_does_not_exist"
,
"splat"
])
assert
'msg'
in
result
result
=
self
.
_run
(
'shell'
,
[
"/bin/echo"
,
"$HOME"
])
assert
'failed'
in
result
assert
'failed'
not
in
result
assert
'rc'
not
in
result
assert
result
[
'rc'
]
==
0
result
=
self
.
_run
(
'shell'
,
[
"/bin/echo"
,
"$HOME"
])
def
test_large_output
(
self
):
assert
'failed'
not
in
result
# Ensure reading a large amount of output from a command doesn't hang.
assert
result
[
'rc'
]
==
0
result
=
self
.
_run
(
'command'
,
[
"/bin/cat"
,
"/usr/share/dict/words"
])
assert
"failed"
not
in
result
assert
"msg"
not
in
result
def
test_setup
(
self
):
assert
result
[
'rc'
]
==
0
output
=
self
.
_get_stage_file
(
'output.json'
)
assert
len
(
result
[
'stdout'
])
>
100000
result
=
self
.
_run
(
'setup'
,
[
"metadata=
%
s"
%
output
,
"a=2"
,
"b=3"
,
"c=4"
])
assert
result
[
'stderr'
]
==
''
assert
'failed'
not
in
result
assert
'md5sum'
in
result
def
test_setup
(
self
):
assert
result
[
'changed'
]
==
True
output
=
self
.
_get_stage_file
(
'output.json'
)
outds
=
json
.
loads
(
file
(
output
)
.
read
())
result
=
self
.
_run
(
'setup'
,
[
"metadata=
%
s"
%
output
,
"a=2"
,
"b=3"
,
"c=4"
])
assert
outds
[
'c'
]
==
'4'
assert
'failed'
not
in
result
# not bothering to test change hooks here since ohai/facter results change
assert
'md5sum'
in
result
# almost every time so changed is always true, this just tests that
assert
result
[
'changed'
]
==
True
# rewriting the file is ok
outds
=
json
.
loads
(
file
(
output
)
.
read
())
result
=
self
.
_run
(
'setup'
,
[
"metadata=
%
s"
%
output
,
"a=2"
,
"b=3"
,
"c=4"
])
assert
outds
[
'c'
]
==
'4'
print
"RAW RESULT=
%
s"
%
result
# not bothering to test change hooks here since ohai/facter results change
assert
'md5sum'
in
result
# almost every time so changed is always true, this just tests that
# rewriting the file is ok
def
test_async
(
self
):
result
=
self
.
_run
(
'setup'
,
[
"metadata=
%
s"
%
output
,
"a=2"
,
"b=3"
,
"c=4"
])
# test async launch and job status
print
"RAW RESULT=
%
s"
%
result
# of any particular module
assert
'md5sum'
in
result
result
=
self
.
_run
(
'command'
,
[
get_binary
(
"sleep"
),
"3"
],
background
=
20
)
assert
'ansible_job_id'
in
result
def
test_async
(
self
):
assert
'started'
in
result
# test async launch and job status
jid
=
result
[
'ansible_job_id'
]
# of any particular module
# no real chance of this op taking a while, but whatever
result
=
self
.
_run
(
'command'
,
[
get_binary
(
"sleep"
),
"3"
],
background
=
20
)
time
.
sleep
(
5
)
assert
'ansible_job_id'
in
result
# CLI will abstract this (when polling), but this is how it works internally
assert
'started'
in
result
result
=
self
.
_run
(
'async_status'
,
[
"jid=
%
s"
%
jid
])
jid
=
result
[
'ansible_job_id'
]
# TODO: would be nice to have tests for supervisory process
# no real chance of this op taking a while, but whatever
# killing job after X seconds
time
.
sleep
(
5
)
assert
'finished'
in
result
# CLI will abstract this (when polling), but this is how it works internally
assert
'failed'
not
in
result
result
=
self
.
_run
(
'async_status'
,
[
"jid=
%
s"
%
jid
])
assert
'rc'
in
result
# TODO: would be nice to have tests for supervisory process
assert
'stdout'
in
result
# killing job after X seconds
assert
result
[
'ansible_job_id'
]
==
jid
assert
'finished'
in
result
assert
'failed'
not
in
result
def
test_fetch
(
self
):
assert
'rc'
in
result
input
=
self
.
_get_test_file
(
'sample.j2'
)
assert
'stdout'
in
result
output
=
os
.
path
.
join
(
self
.
stage_dir
,
'127.0.0.2'
,
input
)
assert
result
[
'ansible_job_id'
]
==
jid
result
=
self
.
_run
(
'fetch'
,
[
"src=
%
s"
%
input
,
"dest=
%
s"
%
self
.
stage_dir
])
assert
os
.
path
.
exists
(
output
)
def
test_fetch
(
self
):
assert
open
(
input
)
.
read
()
==
open
(
output
)
.
read
()
input_
=
self
.
_get_test_file
(
'sample.j2'
)
output
=
os
.
path
.
join
(
self
.
stage_dir
,
'127.0.0.2'
,
input_
)
def
test_yum
(
self
):
result
=
self
.
_run
(
'fetch'
,
[
"src=
%
s"
%
input_
,
"dest=
%
s"
%
self
.
stage_dir
])
if
not
get_binary
(
"yum"
):
assert
os
.
path
.
exists
(
output
)
raise
SkipTest
assert
open
(
input_
)
.
read
()
==
open
(
output
)
.
read
()
result
=
self
.
_run
(
'yum'
,
[
"list=repos"
])
assert
'failed'
not
in
result
def
test_yum
(
self
):
if
not
get_binary
(
"yum"
):
def
test_git
(
self
):
raise
SkipTest
# TODO: tests for the git module
result
=
self
.
_run
(
'yum'
,
[
"list=repos"
])
pass
assert
'failed'
not
in
result
def
test_service
(
self
):
def
test_git
(
self
):
# TODO: tests for the service module
# TODO: tests for the git module
pass
pass
def
test_service
(
self
):
# TODO: tests for the service module
pass
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment