Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
A
ansible
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
OpenEdx
ansible
Commits
630367eb
Commit
630367eb
authored
Apr 22, 2014
by
Richard Hoop
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Creation complete
parent
3e75e504
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
546 additions
and
136 deletions
+546
-136
library/cloud/vsphere_guest
+546
-136
No files found.
library/cloud/vsphere_guest
View file @
630367eb
...
...
@@ -10,124 +10,520 @@ try:
except
ImportError
:
import
simplejson
as
json
DOCUMENTATION
=
'''
---
module: vsphere_client
short_description: Creates a virtual guest on vsphere.
description:
- Communicates with vsphere, creating a new virtual guest OS based on
the specifications you specify to the module.
version_added: "1.1"
options:
vcenter_hostname:
description:
- The hostname of the vcenter server the module will connect to, to create the guest.
required: true
default: null
aliases: []
user:
description:
- username of the user to connect to vcenter as.
required: true
default: null
password:
description:
- password of the user to connect to vcenter as.
required: true
default: null
resource_pool:
description:
- The name of the resource_pool to create the VM in.
required: false
default: None
cluster:
description:
- The name of the cluster to create the VM in. By default this is derived from the host you tell the module to build the guest on.
required: false
default: None
datacenter:
description:
- The name of the datacenter to create the VM in.
required: true
default: null
datastore:
description:
- The datastore to store the VMs config files in. (Hard-disk locations are specified separately.)
required: true
default: null
esxi_hostname:
description:
- The hostname of the esxi host you want the VM to be created on.
required: true
default: null
power_on:
description:
- Whether or not to power on the VM after creation.
required: false
default: no
choices: [yes, no]
vm_name:
description:
- The name you want to call the VM.
required: true
default: null
vm_memory_mb:
description:
- How much memory in MB to give the VM.
required: false
default: 1024
vm_num_cpus:
description:
- How many vCPUs to give the VM.
required: false
default: 1
vm_scsi:
description:
- The type of scsi controller to add to the VM.
required: false
default: "paravirtual"
choices: [paravirtual, lsi, lsi_sas, bus_logic]
vm_disk:
description:
- A key, value list of disks and their sizes and which datastore to keep it in.
required: false
default: null
vm_nic:
description:
- A key, value list of nics, their types and what network to put them on.
required: false
default: null
choices: [vmxnet3, vmxnet2, vmxnet, e1000, e1000e, pcnet32]
vm_notes:
description:
- Any notes that you want to show up in the VMs Annotations field.
required: false
default: null
vm_cdrom:
description:
- A path, including datastore, to an ISO you want the CDROM device on the VM to have.
required: false
default: null
vm_extra_config:
description:
- A key, value pair of any extra values you want set or changed in the vmx file of the VM. Useful to set advanced options on the VM.
required: false
default: null
guestosid:
description:
- "A vmware guest needs to have a specific OS identifier set on it
during creation. You can find your os guestosid at the following URL:
http://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html"
required: true
default: null
# informational: requirements for nodes
requirements: [ pysphere ]
author: Romeo Theriault
'''
HAS_PYSPHERE
=
False
try
:
from
pysphere
import
VIServer
,
VIProperty
,
MORTypes
from
pysphere.resources
import
VimService_services
as
VI
from
pysphere.vi_task
import
VITask
from
pysphere
import
VIException
,
VIApiException
,
FaultTypes
HAS_PYSPHERE
=
True
except
ImportError
:
pass
def
add_scsi_controller
(
module
,
s
,
config
,
devices
,
type
=
"paravirtual"
,
bus_num
=
0
,
disk_ctrl_key
=
1
):
# add a scsi controller
scsi_ctrl_spec
=
config
.
new_deviceChange
()
scsi_ctrl_spec
.
set_element_operation
(
'add'
)
if
type
==
"lsi"
:
# For RHEL5
scsi_ctrl
=
VI
.
ns0
.
VirtualLsiLogicController_Def
(
"scsi_ctrl"
)
.
pyclass
()
elif
type
==
"paravirtual"
:
# For RHEL6
scsi_ctrl
=
VI
.
ns0
.
ParaVirtualSCSIController_Def
(
"scsi_ctrl"
)
.
pyclass
()
elif
type
==
"lsi_sas"
:
scsi_ctrl
=
VI
.
ns0
.
VirtualLsiLogicSASController_Def
(
"scsi_ctrl"
)
.
pyclass
()
elif
type
==
"bus_logic"
:
scsi_ctrl
=
VI
.
ns0
.
VirtualBusLogicController_Def
(
"scsi_ctrl"
)
.
pyclass
()
else
:
s
.
disconnect
()
module
.
fail_json
(
msg
=
"Error adding scsi controller to vm spec. No scsi controller"
" type of:
%
s"
%
(
type
))
scsi_ctrl
.
set_element_busNumber
(
bus_num
)
scsi_ctrl
.
set_element_key
(
disk_ctrl_key
)
scsi_ctrl
.
set_element_sharedBus
(
"noSharing"
)
scsi_ctrl_spec
.
set_element_device
(
scsi_ctrl
)
# Add the scsi controller to the VM spec.
devices
.
append
(
scsi_ctrl_spec
)
return
disk_ctrl_key
def
add_disk
(
module
,
s
,
config_target
,
config
,
devices
,
datastore
,
type
=
"thin"
,
size
=
200000
,
disk_ctrl_key
=
1
,
disk_number
=
0
,
key
=
0
):
# add a vmdk disk
# Verify the datastore exists
datastore_name
,
ds
=
find_datastore
(
module
,
s
,
datastore
,
config_target
)
# create a new disk - file based - for the vm
disk_spec
=
config
.
new_deviceChange
()
disk_spec
.
set_element_fileOperation
(
"create"
)
disk_spec
.
set_element_operation
(
"add"
)
disk_ctlr
=
VI
.
ns0
.
VirtualDisk_Def
(
"disk_ctlr"
)
.
pyclass
()
disk_backing
=
VI
.
ns0
.
VirtualDiskFlatVer2BackingInfo_Def
(
"disk_backing"
)
.
pyclass
()
disk_backing
.
set_element_fileName
(
datastore_name
)
disk_backing
.
set_element_diskMode
(
"persistent"
)
if
type
!=
"thick"
:
disk_backing
.
set_element_thinProvisioned
(
1
)
disk_ctlr
.
set_element_key
(
key
)
disk_ctlr
.
set_element_controllerKey
(
disk_ctrl_key
)
disk_ctlr
.
set_element_unitNumber
(
disk_number
)
disk_ctlr
.
set_element_backing
(
disk_backing
)
disk_ctlr
.
set_element_capacityInKB
(
size
)
disk_spec
.
set_element_device
(
disk_ctlr
)
devices
.
append
(
disk_spec
)
def
add_cdrom
(
module
,
s
,
config_target
,
config
,
devices
,
default_devs
,
type
=
"client"
,
vm_cd_iso_path
=
None
):
# Add a cd-rom
# Make sure the datastore exists.
if
vm_cd_iso_path
:
iso_location
=
vm_cd_iso_path
.
split
(
'/'
,
1
)
datastore
,
ds
=
find_datastore
(
module
,
s
,
iso_location
[
0
],
config_target
)
iso_path
=
iso_location
[
1
]
# find ide controller
ide_ctlr
=
None
for
dev
in
default_devs
:
if
dev
.
typecode
.
type
[
1
]
==
"VirtualIDEController"
:
ide_ctlr
=
dev
# add a cdrom based on a physical device
if
ide_ctlr
:
cd_spec
=
config
.
new_deviceChange
()
cd_spec
.
set_element_operation
(
'add'
)
cd_ctrl
=
VI
.
ns0
.
VirtualCdrom_Def
(
"cd_ctrl"
)
.
pyclass
()
if
type
==
"iso"
:
iso
=
VI
.
ns0
.
VirtualCdromIsoBackingInfo_Def
(
"iso"
)
.
pyclass
()
ds_ref
=
iso
.
new_datastore
(
ds
)
ds_ref
.
set_attribute_type
(
ds
.
get_attribute_type
())
iso
.
set_element_datastore
(
ds_ref
)
iso
.
set_element_fileName
(
"
%
s
%
s"
%
(
datastore
,
iso_path
))
cd_ctrl
.
set_element_backing
(
iso
)
cd_ctrl
.
set_element_key
(
20
)
cd_ctrl
.
set_element_controllerKey
(
ide_ctlr
.
get_element_key
())
cd_ctrl
.
set_element_unitNumber
(
0
)
cd_spec
.
set_element_device
(
cd_ctrl
)
elif
type
==
"client"
:
client
=
VI
.
ns0
.
VirtualCdromRemoteAtapiBackingInfo_Def
(
"client"
)
.
pyclass
()
client
.
set_element_deviceName
(
""
)
cd_ctrl
.
set_element_backing
(
client
)
cd_ctrl
.
set_element_key
(
20
)
cd_ctrl
.
set_element_controllerKey
(
ide_ctlr
.
get_element_key
())
cd_ctrl
.
set_element_unitNumber
(
0
)
cd_spec
.
set_element_device
(
cd_ctrl
)
else
:
s
.
disconnect
()
module
.
fail_json
(
msg
=
"Error adding cdrom of type
%
s to vm spec. "
" cdrom type can either be iso or client"
%
(
type
))
devices
.
append
(
cd_spec
)
def
add_nic
(
module
,
s
,
nfmor
,
config
,
devices
,
nic_type
=
"vmxnet3"
,
network_name
=
"VM Network"
,
network_type
=
"standard"
):
# add a NIC
# Different network card types are: "VirtualE1000",
# "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet", "VirtualNmxnet2",
# "VirtualVmxnet3"
nic_spec
=
config
.
new_deviceChange
()
nic_spec
.
set_element_operation
(
"add"
)
if
nic_type
==
"e1000"
:
nic_ctlr
=
VI
.
ns0
.
VirtualE1000_Def
(
"nic_ctlr"
)
.
pyclass
()
elif
nic_type
==
"e1000e"
:
nic_ctlr
=
VI
.
ns0
.
VirtualE1000e_Def
(
"nic_ctlr"
)
.
pyclass
()
elif
nic_type
==
"pcnet32"
:
nic_ctlr
=
VI
.
ns0
.
VirtualPCNet32_Def
(
"nic_ctlr"
)
.
pyclass
()
elif
nic_type
==
"vmxnet"
:
nic_ctlr
=
VI
.
ns0
.
VirtualVmxnet_Def
(
"nic_ctlr"
)
.
pyclass
()
elif
nic_type
==
"vmxnet2"
:
nic_ctlr
=
VI
.
ns0
.
VirtualVmxnet2_Def
(
"nic_ctlr"
)
.
pyclass
()
elif
nic_type
==
"vmxnet3"
:
nic_ctlr
=
VI
.
ns0
.
VirtualVmxnet3_Def
(
"nic_ctlr"
)
.
pyclass
()
else
:
s
.
disconnect
()
module
.
fail_json
(
msg
=
"Error adding nic to vm spec. No nic type of:
%
s"
%
(
nic_type
))
if
network_type
==
"standard"
:
nic_backing
=
VI
.
ns0
.
VirtualEthernetCardNetworkBackingInfo_Def
(
"nic_backing"
)
.
pyclass
()
nic_backing
.
set_element_deviceName
(
network_name
)
elif
network_type
==
"dvs"
:
# Get the portgroup key
portgroupKey
=
find_portgroup_key
(
module
,
s
,
nfmor
,
network_name
)
# Get the dvswitch uuid
dvswitch_uuid
=
find_dvswitch_uuid
(
module
,
s
,
nfmor
,
portgroupKey
)
nic_backing_port
=
VI
.
ns0
.
DistributedVirtualSwitchPortConnection_Def
(
"nic_backing_port"
)
.
pyclass
()
nic_backing_port
.
set_element_switchUuid
(
dvswitch_uuid
)
nic_backing_port
.
set_element_portgroupKey
(
portgroupKey
)
nic_backing
=
VI
.
ns0
.
VirtualEthernetCardDistributedVirtualPortBackingInfo_Def
(
"nic_backing"
)
.
pyclass
()
nic_backing
.
set_element_port
(
nic_backing_port
)
else
:
s
.
disconnect
()
module
.
fail_json
(
msg
=
"Error adding nic backing to vm spec. No network type of:"
"
%
s"
%
(
network_type
))
nic_ctlr
.
set_element_addressType
(
"generated"
)
nic_ctlr
.
set_element_backing
(
nic_backing
)
nic_ctlr
.
set_element_key
(
4
)
nic_spec
.
set_element_device
(
nic_ctlr
)
devices
.
append
(
nic_spec
)
def
find_datastore
(
module
,
s
,
datastore
,
config_target
):
# Verify the datastore exists and put it in brackets if it does.
ds
=
None
for
d
in
config_target
.
Datastore
:
if
(
d
.
Datastore
.
Accessible
and
(
datastore
and
d
.
Datastore
.
Name
==
datastore
)
or
(
not
datastore
)):
ds
=
d
.
Datastore
.
Datastore
datastore
=
d
.
Datastore
.
Name
break
if
not
ds
:
s
.
disconnect
()
module
.
fail_json
(
msg
=
"Datastore:
%
s does not appear to exist"
%
(
datastore
))
datastore_name
=
"[
%
s]"
%
datastore
return
datastore_name
,
ds
def
find_portgroup_key
(
module
,
s
,
nfmor
,
network_name
):
# Find a portgroups key given the portgroup name.
# Grab all the distributed virtual portgroup's names and key's.
dvpg_mors
=
s
.
_retrieve_properties_traversal
(
property_names
=
[
'name'
,
'key'
],
from_node
=
nfmor
,
obj_type
=
'DistributedVirtualPortgroup'
)
# Get the correct portgroup managed object.
dvpg_mor
=
None
for
dvpg
in
dvpg_mors
:
if
dvpg_mor
:
break
for
p
in
dvpg
.
PropSet
:
if
p
.
Name
==
"name"
and
p
.
Val
==
network_name
:
dvpg_mor
=
dvpg
if
dvpg_mor
:
break
# If dvpg_mor is empty we didn't find the named portgroup.
if
dvpg_mor
is
None
:
s
.
disconnect
()
module
.
fail_json
(
msg
=
"Could not find the distributed virtual portgroup named"
"
%
s"
%
network_name
)
# Get the portgroup key
portgroupKey
=
None
for
p
in
dvpg_mor
.
PropSet
:
if
p
.
Name
==
"key"
:
portgroupKey
=
p
.
Val
return
portgroupKey
def
find_dvswitch_uuid
(
module
,
s
,
nfmor
,
portgroupKey
):
# Find a dvswitch's uuid given a portgroup key.
# Function searches all dvswitches in the datacenter to find the switch
# that has the portgroup key.
# Grab the dvswitch uuid and portgroup properties
dvswitch_mors
=
s
.
_retrieve_properties_traversal
(
property_names
=
[
'uuid'
,
'portgroup'
],
from_node
=
nfmor
,
obj_type
=
'DistributedVirtualSwitch'
)
dvswitch_mor
=
None
# Get the dvswitches managed object
for
dvswitch
in
dvswitch_mors
:
if
dvswitch_mor
:
break
for
p
in
dvswitch
.
PropSet
:
if
p
.
Name
==
"portgroup"
:
pg_mors
=
p
.
Val
.
ManagedObjectReference
for
pg_mor
in
pg_mors
:
if
dvswitch_mor
:
break
key_mor
=
s
.
_get_object_properties
(
pg_mor
,
property_names
=
[
'key'
])
for
key
in
key_mor
.
PropSet
:
if
key
.
Val
==
portgroupKey
:
dvswitch_mor
=
dvswitch
# Get the switches uuid
dvswitch_uuid
=
None
for
p
in
dvswitch_mor
.
PropSet
:
if
p
.
Name
==
"uuid"
:
dvswitch_uuid
=
p
.
Val
return
dvswitch_uuid
def
create_vm
(
vsphere_client
,
module
,
esxi
,
resource_pool
,
cluster_name
,
guest
,
vm_extra_config
,
vm_hardware
,
vm_disk
,
vm_nic
,
state
):
datacenter
=
esxi
[
'datacenter'
]
esxi_hostname
=
esxi
[
'hostname'
]
# Datacenter managed object reference
dcmor
=
[
k
for
k
,
v
in
vsphere_client
.
get_datacenters
()
.
items
()
if
v
==
datacenter
][
0
]
if
dcmor
is
None
:
vsphere_client
.
disconnect
()
module
.
fail_json
(
msg
=
"Cannot find datacenter named:
%
s"
%
datacenter
)
dcprops
=
VIProperty
(
vsphere_client
,
dcmor
)
# hostFolder managed reference
hfmor
=
dcprops
.
hostFolder
.
_obj
# virtualmachineFolder managed object reference
vmfmor
=
dcprops
.
vmFolder
.
_obj
# networkFolder managed object reference
nfmor
=
dcprops
.
networkFolder
.
_obj
# Grab the computerResource name and host properties
crmors
=
vsphere_client
.
_retrieve_properties_traversal
(
property_names
=
[
'name'
,
'host'
],
from_node
=
hfmor
,
obj_type
=
'ComputeResource'
)
# Grab the host managed object reference of the esxi_hostname
try
:
hostmor
=
[
k
for
k
,
v
in
vsphere_client
.
get_hosts
()
.
items
()
if
v
==
esxi_hostname
][
0
]
except
IndexError
,
e
:
vsphere_client
.
disconnect
()
module
.
fail_json
(
msg
=
"Cannot find esx host named:
%
s"
%
esxi_hostname
)
# Grab the computerResource managed object reference of the host we are
# creating the VM on.
crmor
=
None
for
cr
in
crmors
:
if
crmor
:
break
for
p
in
cr
.
PropSet
:
if
p
.
Name
==
"host"
:
for
h
in
p
.
Val
.
get_element_ManagedObjectReference
():
if
h
==
hostmor
:
crmor
=
cr
.
Obj
break
if
crmor
:
break
crprops
=
VIProperty
(
vsphere_client
,
crmor
)
# Get resource pool managed reference
# Requires that a cluster name be specified.
if
resource_pool
:
try
:
cluster
=
[
k
for
k
,
v
in
vsphere_client
.
get_clusters
()
.
items
()
if
v
==
cluster_name
][
0
]
except
IndexError
,
e
:
vsphere_client
.
disconnect
()
module
.
fail_json
(
msg
=
"Cannot find Cluster named:
%
s"
%
cluster_name
)
try
:
rpmor
=
[
k
for
k
,
v
in
vsphere_client
.
get_resource_pools
(
from_mor
=
cluster
)
.
items
()
if
v
==
resource_pool
][
0
]
except
IndexError
,
e
:
vsphere_client
.
disconnect
()
module
.
fail_json
(
msg
=
"Cannot find Resource Pool named:
%
s"
%
resource_pool
)
def
power_state
(
vm
,
state
,
force
):
else
:
rpmor
=
crprops
.
resourcePool
.
_obj
# CREATE VM CONFIGURATION
# get config target
request
=
VI
.
QueryConfigTargetRequestMsg
()
_this
=
request
.
new__this
(
crprops
.
environmentBrowser
.
_obj
)
_this
.
set_attribute_type
(
crprops
.
environmentBrowser
.
_obj
.
get_attribute_type
())
request
.
set_element__this
(
_this
)
h
=
request
.
new_host
(
hostmor
)
h
.
set_attribute_type
(
hostmor
.
get_attribute_type
())
request
.
set_element_host
(
h
)
config_target
=
vsphere_client
.
_proxy
.
QueryConfigTarget
(
request
)
.
_returnval
# get default devices
request
=
VI
.
QueryConfigOptionRequestMsg
()
_this
=
request
.
new__this
(
crprops
.
environmentBrowser
.
_obj
)
_this
.
set_attribute_type
(
crprops
.
environmentBrowser
.
_obj
.
get_attribute_type
())
request
.
set_element__this
(
_this
)
h
=
request
.
new_host
(
hostmor
)
h
.
set_attribute_type
(
hostmor
.
get_attribute_type
())
request
.
set_element_host
(
h
)
config_option
=
vsphere_client
.
_proxy
.
QueryConfigOption
(
request
)
.
_returnval
default_devs
=
config_option
.
DefaultDevice
# add parameters to the create vm task
create_vm_request
=
VI
.
CreateVM_TaskRequestMsg
()
config
=
create_vm_request
.
new_config
()
vmfiles
=
config
.
new_files
()
datastore_name
,
ds
=
find_datastore
(
module
,
vsphere_client
,
vm_disk
[
'disk1'
][
'datastore'
],
config_target
)
vmfiles
.
set_element_vmPathName
(
datastore_name
)
config
.
set_element_files
(
vmfiles
)
config
.
set_element_name
(
guest
)
if
vm_extra_config
[
'notes'
]
is
not
None
:
config
.
set_element_annotation
(
vm_extra_config
[
'notes'
])
config
.
set_element_memoryMB
(
vm_hardware
[
'memory_mb'
])
config
.
set_element_numCPUs
(
vm_hardware
[
'num_cpus'
])
config
.
set_element_guestId
(
vm_hardware
[
'osid'
])
devices
=
[]
# Attach all the hardware we want to the VM spec.
# Add a scsi controller to the VM spec.
disk_ctrl_key
=
add_scsi_controller
(
module
,
vsphere_client
,
config
,
devices
,
vm_hardware
[
'scsi'
])
if
vm_disk
:
disk_num
=
0
disk_key
=
0
for
disk
in
sorted
(
vm_disk
.
iterkeys
()):
try
:
datastore
=
vm_disk
[
disk
][
'datastore'
]
except
KeyError
:
vsphere_client
.
disconnect
()
module
.
fail_json
(
msg
=
"Error on
%
s definition. datastore needs to be"
" specified."
%
disk
)
try
:
disksize
=
vm_disk
[
disk
][
'size_gb'
]
# Convert the disk size to kiloboytes
disksize
=
disksize
*
1024
*
1024
except
KeyError
:
vsphere_client
.
disconnect
()
module
.
fail_json
(
msg
=
"Error on
%
s definition. size needs to be"
" specified."
%
disk
)
try
:
disktype
=
vm_disk
[
disk
][
'type'
]
except
KeyError
:
vsphere_client
.
disconnect
()
module
.
fail_json
(
msg
=
"Error on
%
s definition. type needs to be"
" specified."
%
disk
)
# Add the disk to the VM spec.
add_disk
(
module
,
vsphere_client
,
config_target
,
config
,
devices
,
datastore
,
disktype
,
disksize
,
disk_ctrl_key
,
disk_num
,
disk_key
)
disk_num
=
disk_num
+
1
disk_key
=
disk_key
+
1
if
'vm_cdrom'
in
vm_hardware
:
cdrom_iso_path
=
None
cdrom_type
=
None
try
:
cdrom_type
=
vm_hardware
[
'vm_cdrom'
][
'type'
]
except
KeyError
:
vsphere_client
.
disconnect
()
module
.
fail_json
(
msg
=
"Error on
%
s definition. cdrom type needs to be"
" specified."
%
vm_hardware
[
'vm_cdrom'
])
if
cdrom_type
==
'iso'
:
try
:
cdrom_iso_path
=
vm_hardware
[
'vm_cdrom'
][
'iso_path'
]
except
KeyError
:
vsphere_client
.
disconnect
()
module
.
fail_json
(
msg
=
"Error on
%
s definition. cdrom iso_path needs"
" to be specified."
%
vm_hardware
[
'vm_cdrom'
])
# Add a CD-ROM device to the VM.
add_cdrom
(
module
,
vsphere_client
,
config_target
,
config
,
devices
,
default_devs
,
cdrom_type
,
cdrom_iso_path
)
if
vm_nic
:
for
nic
in
sorted
(
vm_nic
.
iterkeys
()):
try
:
nictype
=
vm_nic
[
nic
][
'type'
]
except
KeyError
:
vsphere_client
.
disconnect
()
module
.
fail_json
(
msg
=
"Error on
%
s definition. type needs to be "
" specified."
%
nic
)
try
:
network
=
vm_nic
[
nic
][
'network'
]
except
KeyError
:
vsphere_client
.
disconnect
()
module
.
fail_json
(
msg
=
"Error on
%
s definition. network needs to be "
" specified."
%
nic
)
try
:
network_type
=
vm_nic
[
nic
][
'network_type'
]
except
KeyError
:
vsphere_client
.
disconnect
()
module
.
fail_json
(
msg
=
"Error on
%
s definition. network_type needs to be "
" specified."
%
nic
)
# Add the nic to the VM spec.
add_nic
(
module
,
vsphere_client
,
nfmor
,
config
,
devices
,
nictype
,
network
,
network_type
)
config
.
set_element_deviceChange
(
devices
)
create_vm_request
.
set_element_config
(
config
)
folder_mor
=
create_vm_request
.
new__this
(
vmfmor
)
folder_mor
.
set_attribute_type
(
vmfmor
.
get_attribute_type
())
create_vm_request
.
set_element__this
(
folder_mor
)
rp_mor
=
create_vm_request
.
new_pool
(
rpmor
)
rp_mor
.
set_attribute_type
(
rpmor
.
get_attribute_type
())
create_vm_request
.
set_element_pool
(
rp_mor
)
host_mor
=
create_vm_request
.
new_host
(
hostmor
)
host_mor
.
set_attribute_type
(
hostmor
.
get_attribute_type
())
create_vm_request
.
set_element_host
(
host_mor
)
# CREATE THE VM
taskmor
=
vsphere_client
.
_proxy
.
CreateVM_Task
(
create_vm_request
)
.
_returnval
task
=
VITask
(
taskmor
,
vsphere_client
)
task
.
wait_for_state
([
task
.
STATE_SUCCESS
,
task
.
STATE_ERROR
])
if
task
.
get_state
()
==
task
.
STATE_ERROR
:
vsphere_client
.
disconnect
()
module
.
fail_json
(
msg
=
"Error creating vm:
%
s"
%
task
.
get_error_message
())
else
:
vm
=
None
if
vm_extra_config
or
state
in
[
'powered_on'
,
'powered_off'
]:
vm
=
vsphere_client
.
get_vm_by_name
(
guest
)
# VM was created. If there is any extra config options specified, set
# them here , disconnect from vcenter, then exit.
if
vm_extra_config
:
vm
.
set_extra_config
(
vm_extra_config
)
# Power on the VM if it was requested
power_state
(
vm
,
state
,
True
)
def
power_state
(
vm
,
state
,
force
):
"""
Correctly set the power status for a VM determined by the current and
requested states. force is forceful
"""
power_status
=
vm
.
get_status
()
check_status
=
' '
.
join
(
state
.
split
(
"_"
))
.
upper
()
...
...
@@ -203,6 +599,10 @@ def gather_facts(vm):
class
DefaultVMConfig
(
object
):
"""
Shallow and deep dict comparison for interfaces
"""
def
__init__
(
self
,
check_dict
,
interface_dict
):
self
.
check_dict
,
self
.
interface_dict
=
check_dict
,
interface_dict
self
.
set_current
,
self
.
set_past
=
set
(
...
...
@@ -230,6 +630,10 @@ class DefaultVMConfig(object):
def
config_check
(
name
,
passed
,
default
,
module
):
"""
Checks that the dict passed for VM configuration matches the required
interface declared at the top of __main__
"""
diff
=
DefaultVMConfig
(
passed
,
default
)
if
len
(
diff
.
shallow_diff
()):
...
...
@@ -251,11 +655,14 @@ def main():
proto_vm_hardware
=
{
'memory_mb'
:
int
,
'num_cpus'
:
int
'num_cpus'
:
int
,
'scsi'
:
basestring
,
'osid'
:
basestring
}
proto_vm_disk
=
{
'disk1'
:
{
'datastore'
:
basestring
,
'size_gb'
:
int
,
'type'
:
basestring
}
...
...
@@ -270,7 +677,6 @@ def main():
}
proto_esxi
=
{
'datastore'
:
basestring
,
'datacenter'
:
basestring
,
'hostname'
:
basestring
}
...
...
@@ -294,16 +700,11 @@ def main():
vmware_guest_facts
=
dict
(
required
=
False
,
choices
=
BOOLEANS
),
guest
=
dict
(
required
=
True
,
type
=
'str'
),
vm_disk
=
dict
(
required
=
False
,
type
=
'dict'
,
default
=
{}),
vm_boot_state
=
dict
(
required
=
False
,
choices
=
[
'powered_on'
,
'powered_off'
,
],
default
=
'powered_on'
),
vm_nic
=
dict
(
required
=
False
,
type
=
'dict'
,
default
=
{}),
vm_hardware
=
dict
(
required
=
False
,
type
=
'dict'
,
default
=
{}),
vm_extra_config
=
dict
(
required
=
False
,
type
=
'dict'
,
default
=
{}),
resource_pool
=
dict
(
required
=
False
,
default
=
None
,
type
=
'str'
),
cluster
=
dict
(
required
=
False
,
default
=
None
,
type
=
'str'
),
force
=
dict
(
required
=
False
,
choices
=
BOOLEANS
,
default
=
False
),
esxi
=
dict
(
required
=
False
,
type
=
'dict'
,
default
=
{}),
...
...
@@ -316,20 +717,15 @@ def main():
[
'state'
,
'vm_disk'
,
'vm_boot_state'
,
'vm_nic'
,
'vm_hardware'
,
'esxi'
]
],
[
'resource_pool'
,
'cluster'
]
],
)
try
:
from
pysphere
import
VIServer
,
VIProperty
,
MORTypes
from
pysphere.resources
import
VimService_services
as
VI
from
pysphere.vi_task
import
VITask
from
pysphere
import
VIException
,
VIApiException
,
FaultTypes
except
ImportError
,
e
:
if
not
HAS_PYSPHERE
:
module
.
fail_json
(
msg
=
'pysphere module required'
)
vcenter_hostname
=
module
.
params
[
'vcenter_hostname'
]
...
...
@@ -340,11 +736,12 @@ def main():
guest
=
module
.
params
[
'guest'
]
force
=
module
.
params
[
'force'
]
vm_disk
=
module
.
params
[
'vm_disk'
]
vm_boot_state
=
module
.
params
[
'vm_boot_state'
]
vm_nic
=
module
.
params
[
'vm_nic'
]
vm_hardware
=
module
.
params
[
'vm_hardware'
]
vm_extra_config
=
module
.
params
[
'vm_extra_config'
]
esxi
=
module
.
params
[
'esxi'
]
resource_pool
=
module
.
params
[
'resource_pool'
]
cluster
=
module
.
params
[
'cluster'
]
# CONNECT TO THE SERVER
viserver
=
VIServer
()
...
...
@@ -392,15 +789,15 @@ def main():
module
.
fail_json
(
msg
=
"No such VM
%
s. Fact gathering requires an existing vm"
%
guest
)
if
state
not
in
[
'absent'
,
'present
'
]:
if
state
in
[
'restarted'
,
'reconfigured
'
]:
module
.
fail_json
(
msg
=
"No such VM
%
s. States [
powered_on, powered_off,
"
msg
=
"No such VM
%
s. States ["
"restarted, reconfigured] required an existing VM"
%
guest
)
elif
state
==
'absent'
:
module
.
exit_json
(
changed
=
False
,
msg
=
"vm
%
s not present"
%
guest
)
# Create the VM
elif
state
==
'present'
:
elif
state
in
[
'present'
,
'powered_off'
,
'powered_on'
]
:
# Check the guest_config
config_check
(
"vm_disk"
,
vm_disk
,
proto_vm_disk
,
module
)
...
...
@@ -408,6 +805,19 @@ def main():
config_check
(
"vm_hardware"
,
vm_hardware
,
proto_vm_hardware
,
module
)
config_check
(
"esxi"
,
esxi
,
proto_esxi
,
module
)
create_vm
(
vsphere_client
=
viserver
,
module
=
module
,
esxi
=
esxi
,
resource_pool
=
resource_pool
,
cluster_name
=
cluster
,
guest
=
guest
,
vm_extra_config
=
vm_extra_config
,
vm_hardware
=
vm_hardware
,
vm_disk
=
vm_disk
,
vm_nic
=
vm_nic
,
state
=
state
)
if
vm
:
# If the vm already exists, lets get some info from it, pass back the
# vm's vmware_guest_facts and then exit.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment