mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-03 10:47:08 +00:00
Initial commit
This commit is contained in:
commit
aebc1b03fd
4861 changed files with 812621 additions and 0 deletions
133
plugins/modules/cloud/misc/cloud_init_data_facts.py
Normal file
133
plugins/modules/cloud/misc/cloud_init_data_facts.py
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# (c) 2018, René Moser <mail@renemoser.net>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cloud_init_data_facts
|
||||
short_description: Retrieve facts of cloud-init.
|
||||
description:
|
||||
- Gathers facts by reading the status.json and result.json of cloud-init.
|
||||
author: René Moser (@resmo)
|
||||
options:
|
||||
filter:
|
||||
description:
|
||||
- Filter facts
|
||||
choices: [ status, result ]
|
||||
notes:
|
||||
- See http://cloudinit.readthedocs.io/ for more information about cloud-init.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather all facts of cloud init
|
||||
cloud_init_data_facts:
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
var: result
|
||||
|
||||
- name: Wait for cloud init to finish
|
||||
cloud_init_data_facts:
|
||||
filter: status
|
||||
register: res
|
||||
until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
|
||||
retries: 50
|
||||
delay: 5
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
cloud_init_data_facts:
|
||||
description: Facts of result and status.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '{
|
||||
"status": {
|
||||
"v1": {
|
||||
"datasource": "DataSourceCloudStack",
|
||||
"errors": []
|
||||
},
|
||||
"result": {
|
||||
"v1": {
|
||||
"datasource": "DataSourceCloudStack",
|
||||
"init": {
|
||||
"errors": [],
|
||||
"finished": 1522066377.0185432,
|
||||
"start": 1522066375.2648022
|
||||
},
|
||||
"init-local": {
|
||||
"errors": [],
|
||||
"finished": 1522066373.70919,
|
||||
"start": 1522066373.4726632
|
||||
},
|
||||
"modules-config": {
|
||||
"errors": [],
|
||||
"finished": 1522066380.9097016,
|
||||
"start": 1522066379.0011985
|
||||
},
|
||||
"modules-final": {
|
||||
"errors": [],
|
||||
"finished": 1522066383.56594,
|
||||
"start": 1522066382.3449218
|
||||
},
|
||||
"stage": null
|
||||
}
|
||||
}'
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_text
|
||||
|
||||
|
||||
CLOUD_INIT_PATH = "/var/lib/cloud/data/"
|
||||
|
||||
|
||||
def gather_cloud_init_data_facts(module):
|
||||
res = {
|
||||
'cloud_init_data_facts': dict()
|
||||
}
|
||||
|
||||
for i in ['result', 'status']:
|
||||
filter = module.params.get('filter')
|
||||
if filter is None or filter == i:
|
||||
res['cloud_init_data_facts'][i] = dict()
|
||||
json_file = CLOUD_INIT_PATH + i + '.json'
|
||||
|
||||
if os.path.exists(json_file):
|
||||
f = open(json_file, 'rb')
|
||||
contents = to_text(f.read(), errors='surrogate_or_strict')
|
||||
f.close()
|
||||
|
||||
if contents:
|
||||
res['cloud_init_data_facts'][i] = module.from_json(contents)
|
||||
return res
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
filter=dict(choices=['result', 'status']),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
facts = gather_cloud_init_data_facts(module)
|
||||
result = dict(changed=False, ansible_facts=facts, **facts)
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
209
plugins/modules/cloud/misc/helm.py
Normal file
209
plugins/modules/cloud/misc/helm.py
Normal file
|
|
@ -0,0 +1,209 @@
|
|||
#!/usr/bin/python
|
||||
# (c) 2016, Flavio Percoco <flavio@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: helm
|
||||
short_description: Manages Kubernetes packages with the Helm package manager
|
||||
author: "Flavio Percoco (@flaper87)"
|
||||
description:
|
||||
- Install, upgrade, delete and list packages with the Helm package manager.
|
||||
requirements:
|
||||
- "pyhelm"
|
||||
- "grpcio"
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
- Tiller's server host.
|
||||
default: "localhost"
|
||||
port:
|
||||
description:
|
||||
- Tiller's server port.
|
||||
default: 44134
|
||||
namespace:
|
||||
description:
|
||||
- Kubernetes namespace where the chart should be installed.
|
||||
default: "default"
|
||||
name:
|
||||
description:
|
||||
- Release name to manage.
|
||||
state:
|
||||
description:
|
||||
- Whether to install C(present), remove C(absent), or purge C(purged) a package.
|
||||
choices: ['absent', 'purged', 'present']
|
||||
default: "present"
|
||||
chart:
|
||||
description: |
|
||||
A map describing the chart to install. See examples for available options.
|
||||
default: {}
|
||||
values:
|
||||
description:
|
||||
- A map of value options for the chart.
|
||||
default: {}
|
||||
disable_hooks:
|
||||
description:
|
||||
- Whether to disable hooks during the uninstall process.
|
||||
type: bool
|
||||
default: 'no'
|
||||
'''
|
||||
|
||||
RETURN = ''' # '''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install helm chart
|
||||
helm:
|
||||
host: localhost
|
||||
chart:
|
||||
name: memcached
|
||||
version: 0.4.0
|
||||
source:
|
||||
type: repo
|
||||
location: https://kubernetes-charts.storage.googleapis.com
|
||||
state: present
|
||||
name: my-memcached
|
||||
namespace: default
|
||||
|
||||
- name: Uninstall helm chart
|
||||
helm:
|
||||
host: localhost
|
||||
state: absent
|
||||
name: my-memcached
|
||||
|
||||
- name: Install helm chart from a git repo
|
||||
helm:
|
||||
host: localhost
|
||||
chart:
|
||||
source:
|
||||
type: git
|
||||
location: https://github.com/user/helm-chart.git
|
||||
state: present
|
||||
name: my-example
|
||||
namespace: default
|
||||
values:
|
||||
foo: "bar"
|
||||
|
||||
- name: Install helm chart from a git repo specifying path
|
||||
helm:
|
||||
host: localhost
|
||||
chart:
|
||||
source:
|
||||
type: git
|
||||
location: https://github.com/helm/charts.git
|
||||
path: stable/memcached
|
||||
state: present
|
||||
name: my-memcached
|
||||
namespace: default
|
||||
values: "{{ lookup('file', '/path/to/file/values.yaml') | from_yaml }}"
|
||||
'''
|
||||
|
||||
import traceback
|
||||
HELM_IMPORT_ERR = None
|
||||
try:
|
||||
import grpc
|
||||
from pyhelm import tiller
|
||||
from pyhelm import chartbuilder
|
||||
except ImportError:
|
||||
HELM_IMPORT_ERR = traceback.format_exc()
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
def install(module, tserver):
|
||||
changed = False
|
||||
params = module.params
|
||||
name = params['name']
|
||||
values = params['values']
|
||||
chart = module.params['chart']
|
||||
namespace = module.params['namespace']
|
||||
|
||||
chartb = chartbuilder.ChartBuilder(chart)
|
||||
r_matches = (x for x in tserver.list_releases()
|
||||
if x.name == name and x.namespace == namespace)
|
||||
installed_release = next(r_matches, None)
|
||||
if installed_release:
|
||||
if installed_release.chart.metadata.version != chart['version']:
|
||||
tserver.update_release(chartb.get_helm_chart(), False,
|
||||
namespace, name=name, values=values)
|
||||
changed = True
|
||||
else:
|
||||
tserver.install_release(chartb.get_helm_chart(), namespace,
|
||||
dry_run=False, name=name,
|
||||
values=values)
|
||||
changed = True
|
||||
|
||||
return dict(changed=changed)
|
||||
|
||||
|
||||
def delete(module, tserver, purge=False):
|
||||
changed = False
|
||||
params = module.params
|
||||
|
||||
if not module.params['name']:
|
||||
module.fail_json(msg='Missing required field name')
|
||||
|
||||
name = module.params['name']
|
||||
disable_hooks = params['disable_hooks']
|
||||
|
||||
try:
|
||||
tserver.uninstall_release(name, disable_hooks, purge)
|
||||
changed = True
|
||||
except grpc._channel._Rendezvous as exc:
|
||||
if 'not found' not in str(exc):
|
||||
raise exc
|
||||
|
||||
return dict(changed=changed)
|
||||
|
||||
|
||||
def main():
|
||||
"""The main function."""
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(type='str', default='localhost'),
|
||||
port=dict(type='int', default=44134),
|
||||
name=dict(type='str', default=''),
|
||||
chart=dict(type='dict'),
|
||||
state=dict(
|
||||
choices=['absent', 'purged', 'present'],
|
||||
default='present'
|
||||
),
|
||||
# Install options
|
||||
values=dict(type='dict'),
|
||||
namespace=dict(type='str', default='default'),
|
||||
|
||||
# Uninstall options
|
||||
disable_hooks=dict(type='bool', default=False),
|
||||
),
|
||||
supports_check_mode=True)
|
||||
|
||||
if HELM_IMPORT_ERR:
|
||||
module.fail_json(msg=missing_required_lib('pyhelm'), exception=HELM_IMPORT_ERR)
|
||||
|
||||
host = module.params['host']
|
||||
port = module.params['port']
|
||||
state = module.params['state']
|
||||
tserver = tiller.Tiller(host, port)
|
||||
|
||||
if state == 'present':
|
||||
rst = install(module, tserver)
|
||||
|
||||
if state in 'absent':
|
||||
rst = delete(module, tserver)
|
||||
|
||||
if state in 'purged':
|
||||
rst = delete(module, tserver, True)
|
||||
|
||||
module.exit_json(**rst)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
475
plugins/modules/cloud/misc/ovirt.py
Normal file
475
plugins/modules/cloud/misc/ovirt.py
Normal file
|
|
@ -0,0 +1,475 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright: (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ovirt
|
||||
author:
|
||||
- Vincent Van der Kussen (@vincentvdk)
|
||||
short_description: oVirt/RHEV platform management
|
||||
deprecated:
|
||||
removed_in: "2.10"
|
||||
why: This module is for deprecated version of ovirt.
|
||||
alternative: Use M(ovirt_vm) instead
|
||||
description:
|
||||
- This module only supports oVirt/RHEV version 3. A newer module M(ovirt_vm) supports oVirt/RHV version 4.
|
||||
- Allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform.
|
||||
options:
|
||||
user:
|
||||
description:
|
||||
- The user to authenticate with.
|
||||
required: true
|
||||
url:
|
||||
description:
|
||||
- The url of the oVirt instance.
|
||||
required: true
|
||||
instance_name:
|
||||
description:
|
||||
- The name of the instance to use.
|
||||
required: true
|
||||
aliases: [ vmname ]
|
||||
password:
|
||||
description:
|
||||
- Password of the user to authenticate with.
|
||||
required: true
|
||||
image:
|
||||
description:
|
||||
- The template to use for the instance.
|
||||
resource_type:
|
||||
description:
|
||||
- Whether you want to deploy an image or create an instance from scratch.
|
||||
choices: [ new, template ]
|
||||
zone:
|
||||
description:
|
||||
- Deploy the image to this oVirt cluster.
|
||||
instance_disksize:
|
||||
description:
|
||||
- Size of the instance's disk in GB.
|
||||
aliases: [ vm_disksize]
|
||||
instance_cpus:
|
||||
description:
|
||||
- The instance's number of CPUs.
|
||||
default: 1
|
||||
aliases: [ vmcpus ]
|
||||
instance_nic:
|
||||
description:
|
||||
- The name of the network interface in oVirt/RHEV.
|
||||
aliases: [ vmnic ]
|
||||
instance_network:
|
||||
description:
|
||||
- The logical network the machine should belong to.
|
||||
default: rhevm
|
||||
aliases: [ vmnetwork ]
|
||||
instance_mem:
|
||||
description:
|
||||
- The instance's amount of memory in MB.
|
||||
aliases: [ vmmem ]
|
||||
instance_type:
|
||||
description:
|
||||
- Define whether the instance is a server, desktop or high_performance.
|
||||
- I(high_performance) is supported since Ansible 2.5 and oVirt/RHV 4.2.
|
||||
choices: [ desktop, server, high_performance ]
|
||||
default: server
|
||||
aliases: [ vmtype ]
|
||||
disk_alloc:
|
||||
description:
|
||||
- Define whether disk is thin or preallocated.
|
||||
choices: [ preallocated, thin ]
|
||||
default: thin
|
||||
disk_int:
|
||||
description:
|
||||
- Interface type of the disk.
|
||||
choices: [ ide, virtio ]
|
||||
default: virtio
|
||||
instance_os:
|
||||
description:
|
||||
- Type of Operating System.
|
||||
aliases: [ vmos ]
|
||||
instance_cores:
|
||||
description:
|
||||
- Define the instance's number of cores.
|
||||
default: 1
|
||||
aliases: [ vmcores ]
|
||||
sdomain:
|
||||
description:
|
||||
- The Storage Domain where you want to create the instance's disk on.
|
||||
region:
|
||||
description:
|
||||
- The oVirt/RHEV datacenter where you want to deploy to.
|
||||
instance_dns:
|
||||
description:
|
||||
- Define the instance's Primary DNS server.
|
||||
aliases: [ dns ]
|
||||
instance_domain:
|
||||
description:
|
||||
- Define the instance's Domain.
|
||||
aliases: [ domain ]
|
||||
instance_hostname:
|
||||
description:
|
||||
- Define the instance's Hostname.
|
||||
aliases: [ hostname ]
|
||||
instance_ip:
|
||||
description:
|
||||
- Define the instance's IP.
|
||||
aliases: [ ip ]
|
||||
instance_netmask:
|
||||
description:
|
||||
- Define the instance's Netmask.
|
||||
aliases: [ netmask ]
|
||||
instance_rootpw:
|
||||
description:
|
||||
- Define the instance's Root password.
|
||||
aliases: [ rootpw ]
|
||||
instance_key:
|
||||
description:
|
||||
- Define the instance's Authorized key.
|
||||
aliases: [ key ]
|
||||
state:
|
||||
description:
|
||||
- Create, terminate or remove instances.
|
||||
choices: [ absent, present, restarted, shutdown, started ]
|
||||
default: present
|
||||
requirements:
|
||||
- ovirt-engine-sdk-python
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Basic example to provision from image
|
||||
ovirt:
|
||||
user: admin@internal
|
||||
url: https://ovirt.example.com
|
||||
instance_name: ansiblevm04
|
||||
password: secret
|
||||
image: centos_64
|
||||
zone: cluster01
|
||||
resource_type: template
|
||||
|
||||
- name: Full example to create new instance from scratch
|
||||
ovirt:
|
||||
instance_name: testansible
|
||||
resource_type: new
|
||||
instance_type: server
|
||||
user: admin@internal
|
||||
password: secret
|
||||
url: https://ovirt.example.com
|
||||
instance_disksize: 10
|
||||
zone: cluster01
|
||||
region: datacenter1
|
||||
instance_cpus: 1
|
||||
instance_nic: nic1
|
||||
instance_network: rhevm
|
||||
instance_mem: 1000
|
||||
disk_alloc: thin
|
||||
sdomain: FIBER01
|
||||
instance_cores: 1
|
||||
instance_os: rhel_6x64
|
||||
disk_int: virtio
|
||||
|
||||
- name: Stopping an existing instance
|
||||
ovirt:
|
||||
instance_name: testansible
|
||||
state: stopped
|
||||
user: admin@internal
|
||||
password: secret
|
||||
url: https://ovirt.example.com
|
||||
|
||||
- name: Start an existing instance
|
||||
ovirt:
|
||||
instance_name: testansible
|
||||
state: started
|
||||
user: admin@internal
|
||||
password: secret
|
||||
url: https://ovirt.example.com
|
||||
|
||||
- name: Start an instance with cloud init information
|
||||
ovirt:
|
||||
instance_name: testansible
|
||||
state: started
|
||||
user: admin@internal
|
||||
password: secret
|
||||
url: https://ovirt.example.com
|
||||
hostname: testansible
|
||||
domain: ansible.local
|
||||
ip: 192.0.2.100
|
||||
netmask: 255.255.255.0
|
||||
gateway: 192.0.2.1
|
||||
rootpw: bigsecret
|
||||
'''
|
||||
|
||||
import time
|
||||
|
||||
try:
|
||||
from ovirtsdk.api import API
|
||||
from ovirtsdk.xml import params
|
||||
HAS_OVIRTSDK = True
|
||||
except ImportError:
|
||||
HAS_OVIRTSDK = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# create connection with API
|
||||
#
|
||||
def conn(url, user, password):
|
||||
api = API(url=url, username=user, password=password, insecure=True)
|
||||
try:
|
||||
value = api.test()
|
||||
except Exception:
|
||||
raise Exception("error connecting to the oVirt API")
|
||||
return api
|
||||
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# Create VM from scratch
|
||||
def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
|
||||
if vmdisk_alloc == 'thin':
|
||||
# define VM params
|
||||
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
|
||||
template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
|
||||
cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype)
|
||||
# define disk params
|
||||
vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System",
|
||||
format='cow',
|
||||
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
|
||||
# define network parameters
|
||||
network_net = params.Network(name=vmnetwork)
|
||||
nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
|
||||
elif vmdisk_alloc == 'preallocated':
|
||||
# define VM params
|
||||
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
|
||||
template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
|
||||
cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype)
|
||||
# define disk params
|
||||
vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System",
|
||||
format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
|
||||
# define network parameters
|
||||
network_net = params.Network(name=vmnetwork)
|
||||
nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
|
||||
|
||||
try:
|
||||
conn.vms.add(vmparams)
|
||||
except Exception:
|
||||
raise Exception("Error creating VM with specified parameters")
|
||||
vm = conn.vms.get(name=vmname)
|
||||
try:
|
||||
vm.disks.add(vmdisk)
|
||||
except Exception:
|
||||
raise Exception("Error attaching disk")
|
||||
try:
|
||||
vm.nics.add(nic_net1)
|
||||
except Exception:
|
||||
raise Exception("Error adding nic")
|
||||
|
||||
|
||||
# create an instance from a template
|
||||
def create_vm_template(conn, vmname, image, zone):
|
||||
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image), disks=params.Disks(clone=True))
|
||||
try:
|
||||
conn.vms.add(vmparams)
|
||||
except Exception:
|
||||
raise Exception('error adding template %s' % image)
|
||||
|
||||
|
||||
# start instance
|
||||
def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None,
|
||||
domain=None, dns=None, rootpw=None, key=None):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
use_cloud_init = False
|
||||
nics = None
|
||||
nic = None
|
||||
if hostname or ip or netmask or gateway or domain or dns or rootpw or key:
|
||||
use_cloud_init = True
|
||||
if ip and netmask and gateway:
|
||||
ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway)
|
||||
nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True)
|
||||
nics = params.Nics()
|
||||
nics = params.GuestNicsConfiguration(nic_configuration=[nic])
|
||||
initialization = params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root',
|
||||
root_password=rootpw, nic_configurations=nics, dns_servers=dns,
|
||||
authorized_ssh_keys=key)
|
||||
action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization))
|
||||
vm.start(action=action)
|
||||
|
||||
|
||||
# Stop instance
|
||||
def vm_stop(conn, vmname):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
vm.stop()
|
||||
|
||||
|
||||
# restart instance
|
||||
def vm_restart(conn, vmname):
|
||||
state = vm_status(conn, vmname)
|
||||
vm = conn.vms.get(name=vmname)
|
||||
vm.stop()
|
||||
while conn.vms.get(vmname).get_status().get_state() != 'down':
|
||||
time.sleep(5)
|
||||
vm.start()
|
||||
|
||||
|
||||
# remove an instance
|
||||
def vm_remove(conn, vmname):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
vm.delete()
|
||||
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# VM statuses
|
||||
#
|
||||
# Get the VMs status
|
||||
def vm_status(conn, vmname):
|
||||
status = conn.vms.get(name=vmname).status.state
|
||||
return status
|
||||
|
||||
|
||||
# Get VM object and return it's name if object exists
|
||||
def get_vm(conn, vmname):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
if vm is None:
|
||||
name = "empty"
|
||||
else:
|
||||
name = vm.get_name()
|
||||
return name
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# Hypervisor operations
|
||||
#
|
||||
# not available yet
|
||||
# ------------------------------------------------------------------- #
|
||||
# Main
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', default='present', choices=['absent', 'present', 'restart', 'shutdown', 'started']),
|
||||
user=dict(type='str', required=True),
|
||||
url=dict(type='str', required=True),
|
||||
instance_name=dict(type='str', required=True, aliases=['vmname']),
|
||||
password=dict(type='str', required=True, no_log=True),
|
||||
image=dict(type='str'),
|
||||
resource_type=dict(type='str', choices=['new', 'template']),
|
||||
zone=dict(type='str'),
|
||||
instance_disksize=dict(type='str', aliases=['vm_disksize']),
|
||||
instance_cpus=dict(type='str', default=1, aliases=['vmcpus']),
|
||||
instance_nic=dict(type='str', aliases=['vmnic']),
|
||||
instance_network=dict(type='str', default='rhevm', aliases=['vmnetwork']),
|
||||
instance_mem=dict(type='str', aliases=['vmmem']),
|
||||
instance_type=dict(type='str', default='server', aliases=['vmtype'], choices=['desktop', 'server', 'high_performance']),
|
||||
disk_alloc=dict(type='str', default='thin', choices=['preallocated', 'thin']),
|
||||
disk_int=dict(type='str', default='virtio', choices=['ide', 'virtio']),
|
||||
instance_os=dict(type='str', aliases=['vmos']),
|
||||
instance_cores=dict(type='str', default=1, aliases=['vmcores']),
|
||||
instance_hostname=dict(type='str', aliases=['hostname']),
|
||||
instance_ip=dict(type='str', aliases=['ip']),
|
||||
instance_netmask=dict(type='str', aliases=['netmask']),
|
||||
instance_gateway=dict(type='str', aliases=['gateway']),
|
||||
instance_domain=dict(type='str', aliases=['domain']),
|
||||
instance_dns=dict(type='str', aliases=['dns']),
|
||||
instance_rootpw=dict(type='str', aliases=['rootpw']),
|
||||
instance_key=dict(type='str', aliases=['key']),
|
||||
sdomain=dict(type='str'),
|
||||
region=dict(type='str'),
|
||||
),
|
||||
)
|
||||
|
||||
if not HAS_OVIRTSDK:
|
||||
module.fail_json(msg='ovirtsdk required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
user = module.params['user']
|
||||
url = module.params['url']
|
||||
vmname = module.params['instance_name']
|
||||
password = module.params['password']
|
||||
image = module.params['image'] # name of the image to deploy
|
||||
resource_type = module.params['resource_type'] # template or from scratch
|
||||
zone = module.params['zone'] # oVirt cluster
|
||||
vmdisk_size = module.params['instance_disksize'] # disksize
|
||||
vmcpus = module.params['instance_cpus'] # number of cpu
|
||||
vmnic = module.params['instance_nic'] # network interface
|
||||
vmnetwork = module.params['instance_network'] # logical network
|
||||
vmmem = module.params['instance_mem'] # mem size
|
||||
vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
|
||||
vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
|
||||
vmos = module.params['instance_os'] # Operating System
|
||||
vmtype = module.params['instance_type'] # server, desktop or high_performance
|
||||
vmcores = module.params['instance_cores'] # number of cores
|
||||
sdomain = module.params['sdomain'] # storage domain to store disk on
|
||||
region = module.params['region'] # oVirt Datacenter
|
||||
hostname = module.params['instance_hostname']
|
||||
ip = module.params['instance_ip']
|
||||
netmask = module.params['instance_netmask']
|
||||
gateway = module.params['instance_gateway']
|
||||
domain = module.params['instance_domain']
|
||||
dns = module.params['instance_dns']
|
||||
rootpw = module.params['instance_rootpw']
|
||||
key = module.params['instance_key']
|
||||
# initialize connection
|
||||
try:
|
||||
c = conn(url + "/api", user, password)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
|
||||
if state == 'present':
|
||||
if get_vm(c, vmname) == "empty":
|
||||
if resource_type == 'template':
|
||||
try:
|
||||
create_vm_template(c, vmname, image, zone)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname, image))
|
||||
elif resource_type == 'new':
|
||||
# FIXME: refactor, use keyword args.
|
||||
try:
|
||||
create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="You did not specify a resource type")
|
||||
else:
|
||||
module.exit_json(changed=False, msg="VM %s already exists" % vmname)
|
||||
|
||||
if state == 'started':
|
||||
if vm_status(c, vmname) == 'up':
|
||||
module.exit_json(changed=False, msg="VM %s is already running" % vmname)
|
||||
else:
|
||||
# vm_start(c, vmname)
|
||||
vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key)
|
||||
module.exit_json(changed=True, msg="VM %s started" % vmname)
|
||||
|
||||
if state == 'shutdown':
|
||||
if vm_status(c, vmname) == 'down':
|
||||
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
|
||||
else:
|
||||
vm_stop(c, vmname)
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
|
||||
|
||||
if state == 'restart':
|
||||
if vm_status(c, vmname) == 'up':
|
||||
vm_restart(c, vmname)
|
||||
module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="VM %s is not running" % vmname)
|
||||
|
||||
if state == 'absent':
|
||||
if get_vm(c, vmname) == "empty":
|
||||
module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
|
||||
else:
|
||||
vm_remove(c, vmname)
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmname)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
removed_module("2.10")
|
||||
614
plugins/modules/cloud/misc/proxmox.py
Normal file
614
plugins/modules/cloud/misc/proxmox.py
Normal file
|
|
@ -0,0 +1,614 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: proxmox
|
||||
short_description: management of instances in Proxmox VE cluster
|
||||
description:
|
||||
- allows you to create/delete/stop instances in Proxmox VE cluster
|
||||
- Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
|
||||
options:
|
||||
api_host:
|
||||
description:
|
||||
- the host of the Proxmox VE cluster
|
||||
required: true
|
||||
api_user:
|
||||
description:
|
||||
- the user to authenticate with
|
||||
required: true
|
||||
api_password:
|
||||
description:
|
||||
- the password to authenticate with
|
||||
- you can use PROXMOX_PASSWORD environment variable
|
||||
vmid:
|
||||
description:
|
||||
- the instance id
|
||||
- if not set, the next available VM ID will be fetched from ProxmoxAPI.
|
||||
- if not set, will be fetched from PromoxAPI based on the hostname
|
||||
validate_certs:
|
||||
description:
|
||||
- enable / disable https certificate verification
|
||||
type: bool
|
||||
default: 'no'
|
||||
node:
|
||||
description:
|
||||
- Proxmox VE node, when new VM will be created
|
||||
- required only for C(state=present)
|
||||
- for another states will be autodiscovered
|
||||
pool:
|
||||
description:
|
||||
- Proxmox VE resource pool
|
||||
password:
|
||||
description:
|
||||
- the instance root password
|
||||
- required only for C(state=present)
|
||||
hostname:
|
||||
description:
|
||||
- the instance hostname
|
||||
- required only for C(state=present)
|
||||
- must be unique if vmid is not passed
|
||||
ostemplate:
|
||||
description:
|
||||
- the template for VM creating
|
||||
- required only for C(state=present)
|
||||
disk:
|
||||
description:
|
||||
- hard disk size in GB for instance
|
||||
default: 3
|
||||
cores:
|
||||
description:
|
||||
- Specify number of cores per socket.
|
||||
default: 1
|
||||
cpus:
|
||||
description:
|
||||
- numbers of allocated cpus for instance
|
||||
default: 1
|
||||
memory:
|
||||
description:
|
||||
- memory size in MB for instance
|
||||
default: 512
|
||||
swap:
|
||||
description:
|
||||
- swap memory size in MB for instance
|
||||
default: 0
|
||||
netif:
|
||||
description:
|
||||
- specifies network interfaces for the container. As a hash/dictionary defining interfaces.
|
||||
mounts:
|
||||
description:
|
||||
- specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points
|
||||
ip_address:
|
||||
description:
|
||||
- specifies the address the container will be assigned
|
||||
onboot:
|
||||
description:
|
||||
- specifies whether a VM will be started during system bootup
|
||||
type: bool
|
||||
default: 'no'
|
||||
storage:
|
||||
description:
|
||||
- target storage
|
||||
default: 'local'
|
||||
cpuunits:
|
||||
description:
|
||||
- CPU weight for a VM
|
||||
default: 1000
|
||||
nameserver:
|
||||
description:
|
||||
- sets DNS server IP address for a container
|
||||
searchdomain:
|
||||
description:
|
||||
- sets DNS search domain for a container
|
||||
timeout:
|
||||
description:
|
||||
- timeout for operations
|
||||
default: 30
|
||||
force:
|
||||
description:
|
||||
- forcing operations
|
||||
- can be used only with states C(present), C(stopped), C(restarted)
|
||||
- with C(state=present) force option allow to overwrite existing container
|
||||
- with states C(stopped) , C(restarted) allow to force stop instance
|
||||
type: bool
|
||||
default: 'no'
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the instance
|
||||
choices: ['present', 'started', 'absent', 'stopped', 'restarted']
|
||||
default: present
|
||||
pubkey:
|
||||
description:
|
||||
- Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions
|
||||
unprivileged:
|
||||
description:
|
||||
- Indicate if the container should be unprivileged
|
||||
type: bool
|
||||
default: 'no'
|
||||
|
||||
notes:
|
||||
- Requires proxmoxer and requests modules on host. This modules can be installed with pip.
|
||||
requirements: [ "proxmoxer", "python >= 2.7", "requests" ]
|
||||
author: Sergei Antipov (@UnderGreen)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create new container with minimal options
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
password: 123456
|
||||
hostname: example.org
|
||||
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
|
||||
# Create new container automatically selecting the next available vmid.
|
||||
- proxmox:
|
||||
node: 'uk-mc02'
|
||||
api_user: 'root@pam'
|
||||
api_password: '1q2w3e'
|
||||
api_host: 'node1'
|
||||
password: '123456'
|
||||
hostname: 'example.org'
|
||||
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
|
||||
# Create new container with minimal options with force(it will rewrite existing container)
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
password: 123456
|
||||
hostname: example.org
|
||||
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
force: yes
|
||||
|
||||
# Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_host: node1
|
||||
password: 123456
|
||||
hostname: example.org
|
||||
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
|
||||
# Create new container with minimal options defining network interface with dhcp
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
password: 123456
|
||||
hostname: example.org
|
||||
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}'
|
||||
|
||||
# Create new container with minimal options defining network interface with static ip
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
password: 123456
|
||||
hostname: example.org
|
||||
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}'
|
||||
|
||||
# Create new container with minimal options defining a mount with 8GB
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
password: 123456
|
||||
hostname: example.org
|
||||
ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
mounts: '{"mp0":"local:8,mp=/mnt/test/"}'
|
||||
|
||||
# Create new container with minimal options defining a cpu core limit
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
password: 123456
|
||||
hostname: example.org
|
||||
ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
cores: 2
|
||||
|
||||
# Start container
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
state: started
|
||||
|
||||
# Start container with mount. You should enter a 90-second timeout because servers with additional disks take longer to boot.
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
state: started
|
||||
timeout: 90
|
||||
|
||||
# Stop container
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
state: stopped
|
||||
|
||||
# Stop container with force
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
force: yes
|
||||
state: stopped
|
||||
|
||||
# Restart container(stopped or mounted container you can't restart)
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
state: restarted
|
||||
|
||||
# Remove container
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
state: absent
|
||||
'''
|
||||
|
||||
import os
|
||||
import time
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
try:
|
||||
from proxmoxer import ProxmoxAPI
|
||||
HAS_PROXMOXER = True
|
||||
except ImportError:
|
||||
HAS_PROXMOXER = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
VZ_TYPE = None
|
||||
|
||||
|
||||
def get_nextvmid(module, proxmox):
|
||||
try:
|
||||
vmid = proxmox.cluster.nextid.get()
|
||||
return vmid
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % to_native(e),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
def get_vmid(proxmox, hostname):
|
||||
return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if 'name' in vm and vm['name'] == hostname]
|
||||
|
||||
|
||||
def get_instance(proxmox, vmid):
|
||||
return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
|
||||
|
||||
|
||||
def content_check(proxmox, node, ostemplate, template_store):
|
||||
return [True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate]
|
||||
|
||||
|
||||
def node_check(proxmox, node):
|
||||
return [True for nd in proxmox.nodes.get() if nd['node'] == node]
|
||||
|
||||
|
||||
def proxmox_version(proxmox):
|
||||
apireturn = proxmox.version.get()
|
||||
return LooseVersion(apireturn['version'])
|
||||
|
||||
|
||||
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
|
||||
proxmox_node = proxmox.nodes(node)
|
||||
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
|
||||
|
||||
if VZ_TYPE == 'lxc':
|
||||
kwargs['cpulimit'] = cpus
|
||||
kwargs['rootfs'] = disk
|
||||
if 'netif' in kwargs:
|
||||
kwargs.update(kwargs['netif'])
|
||||
del kwargs['netif']
|
||||
if 'mounts' in kwargs:
|
||||
kwargs.update(kwargs['mounts'])
|
||||
del kwargs['mounts']
|
||||
if 'pubkey' in kwargs:
|
||||
if proxmox_version(proxmox) >= LooseVersion('4.2'):
|
||||
kwargs['ssh-public-keys'] = kwargs['pubkey']
|
||||
del kwargs['pubkey']
|
||||
else:
|
||||
kwargs['cpus'] = cpus
|
||||
kwargs['disk'] = disk
|
||||
|
||||
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
|
||||
|
||||
while timeout:
|
||||
if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
|
||||
proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
|
||||
def start_instance(module, proxmox, vm, vmid, timeout):
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
|
||||
def stop_instance(module, proxmox, vm, vmid, timeout, force):
|
||||
if force:
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
|
||||
else:
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
|
||||
def umount_instance(module, proxmox, vm, vmid, timeout):
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post()
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' %
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
api_host=dict(required=True),
|
||||
api_user=dict(required=True),
|
||||
api_password=dict(no_log=True),
|
||||
vmid=dict(required=False),
|
||||
validate_certs=dict(type='bool', default='no'),
|
||||
node=dict(),
|
||||
pool=dict(),
|
||||
password=dict(no_log=True),
|
||||
hostname=dict(),
|
||||
ostemplate=dict(),
|
||||
disk=dict(type='str', default='3'),
|
||||
cores=dict(type='int', default=1),
|
||||
cpus=dict(type='int', default=1),
|
||||
memory=dict(type='int', default=512),
|
||||
swap=dict(type='int', default=0),
|
||||
netif=dict(type='dict'),
|
||||
mounts=dict(type='dict'),
|
||||
ip_address=dict(),
|
||||
onboot=dict(type='bool', default='no'),
|
||||
storage=dict(default='local'),
|
||||
cpuunits=dict(type='int', default=1000),
|
||||
nameserver=dict(),
|
||||
searchdomain=dict(),
|
||||
timeout=dict(type='int', default=30),
|
||||
force=dict(type='bool', default='no'),
|
||||
state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
|
||||
pubkey=dict(type='str', default=None),
|
||||
unprivileged=dict(type='bool', default='no')
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg='proxmoxer required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
api_user = module.params['api_user']
|
||||
api_host = module.params['api_host']
|
||||
api_password = module.params['api_password']
|
||||
vmid = module.params['vmid']
|
||||
validate_certs = module.params['validate_certs']
|
||||
node = module.params['node']
|
||||
disk = module.params['disk']
|
||||
cpus = module.params['cpus']
|
||||
memory = module.params['memory']
|
||||
swap = module.params['swap']
|
||||
storage = module.params['storage']
|
||||
hostname = module.params['hostname']
|
||||
if module.params['ostemplate'] is not None:
|
||||
template_store = module.params['ostemplate'].split(":")[0]
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# If password not set get it from PROXMOX_PASSWORD env
|
||||
if not api_password:
|
||||
try:
|
||||
api_password = os.environ['PROXMOX_PASSWORD']
|
||||
except KeyError as e:
|
||||
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
|
||||
|
||||
try:
|
||||
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
|
||||
global VZ_TYPE
|
||||
VZ_TYPE = 'openvz' if proxmox_version(proxmox) < LooseVersion('4.0') else 'lxc'
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
|
||||
|
||||
# If vmid not set get the Next VM id from ProxmoxAPI
|
||||
# If hostname is set get the VM id from ProxmoxAPI
|
||||
if not vmid and state == 'present':
|
||||
vmid = get_nextvmid(module, proxmox)
|
||||
elif not vmid and hostname:
|
||||
hosts = get_vmid(proxmox, hostname)
|
||||
if len(hosts) == 0:
|
||||
module.fail_json(msg="Vmid could not be fetched => Hostname doesn't exist (action: %s)" % state)
|
||||
vmid = hosts[0]
|
||||
elif not vmid:
|
||||
module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
if get_instance(proxmox, vmid) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
|
||||
# If no vmid was passed, there cannot be another VM named 'hostname'
|
||||
if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0]))
|
||||
elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
|
||||
module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
|
||||
elif not node_check(proxmox, node):
|
||||
module.fail_json(msg="node '%s' not exists in cluster" % node)
|
||||
elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
|
||||
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
|
||||
% (module.params['ostemplate'], node, template_store))
|
||||
|
||||
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
|
||||
cores=module.params['cores'],
|
||||
pool=module.params['pool'],
|
||||
password=module.params['password'],
|
||||
hostname=module.params['hostname'],
|
||||
ostemplate=module.params['ostemplate'],
|
||||
netif=module.params['netif'],
|
||||
mounts=module.params['mounts'],
|
||||
ip_address=module.params['ip_address'],
|
||||
onboot=int(module.params['onboot']),
|
||||
cpuunits=module.params['cpuunits'],
|
||||
nameserver=module.params['nameserver'],
|
||||
searchdomain=module.params['searchdomain'],
|
||||
force=int(module.params['force']),
|
||||
pubkey=module.params['pubkey'],
|
||||
unprivileged=int(module.params['unprivileged']))
|
||||
|
||||
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
|
||||
|
||||
elif state == 'started':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is already running" % vmid)
|
||||
|
||||
if start_instance(module, proxmox, vm, vmid, timeout):
|
||||
module.exit_json(changed=True, msg="VM %s started" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
|
||||
|
||||
elif state == 'stopped':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
|
||||
if module.params['force']:
|
||||
if umount_instance(module, proxmox, vm, vmid, timeout):
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
|
||||
else:
|
||||
module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
|
||||
"You can use force option to umount it.") % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
|
||||
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
|
||||
|
||||
if stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']):
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
|
||||
|
||||
elif state == 'restarted':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
if (getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped' or
|
||||
getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted'):
|
||||
module.exit_json(changed=False, msg="VM %s is not running" % vmid)
|
||||
|
||||
if (stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']) and
|
||||
start_instance(module, proxmox, vm, vmid, timeout)):
|
||||
module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
|
||||
|
||||
elif state == 'absent':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
|
||||
module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
|
||||
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmid)
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
|
||||
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1122
plugins/modules/cloud/misc/proxmox_kvm.py
Normal file
1122
plugins/modules/cloud/misc/proxmox_kvm.py
Normal file
File diff suppressed because it is too large
Load diff
243
plugins/modules/cloud/misc/proxmox_template.py
Normal file
243
plugins/modules/cloud/misc/proxmox_template.py
Normal file
|
|
@ -0,0 +1,243 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright: Ansible Project
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: proxmox_template
|
||||
short_description: management of OS templates in Proxmox VE cluster
|
||||
description:
|
||||
- allows you to upload/delete templates in Proxmox VE cluster
|
||||
options:
|
||||
api_host:
|
||||
description:
|
||||
- the host of the Proxmox VE cluster
|
||||
required: true
|
||||
api_user:
|
||||
description:
|
||||
- the user to authenticate with
|
||||
required: true
|
||||
api_password:
|
||||
description:
|
||||
- the password to authenticate with
|
||||
- you can use PROXMOX_PASSWORD environment variable
|
||||
validate_certs:
|
||||
description:
|
||||
- enable / disable https certificate verification
|
||||
default: 'no'
|
||||
type: bool
|
||||
node:
|
||||
description:
|
||||
- Proxmox VE node, when you will operate with template
|
||||
required: true
|
||||
src:
|
||||
description:
|
||||
- path to uploaded file
|
||||
- required only for C(state=present)
|
||||
aliases: ['path']
|
||||
template:
|
||||
description:
|
||||
- the template name
|
||||
- required only for states C(absent), C(info)
|
||||
content_type:
|
||||
description:
|
||||
- content type
|
||||
- required only for C(state=present)
|
||||
default: 'vztmpl'
|
||||
choices: ['vztmpl', 'iso']
|
||||
storage:
|
||||
description:
|
||||
- target storage
|
||||
default: 'local'
|
||||
timeout:
|
||||
description:
|
||||
- timeout for operations
|
||||
default: 30
|
||||
force:
|
||||
description:
|
||||
- can be used only with C(state=present), exists template will be overwritten
|
||||
type: bool
|
||||
default: 'no'
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the template
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
notes:
|
||||
- Requires proxmoxer and requests modules on host. This modules can be installed with pip.
|
||||
requirements: [ "proxmoxer", "requests" ]
|
||||
author: Sergei Antipov (@UnderGreen)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Upload new openvz template with minimal options
|
||||
- proxmox_template:
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
src: ~/ubuntu-14.04-x86_64.tar.gz
|
||||
|
||||
# Upload new openvz template with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
|
||||
- proxmox_template:
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_host: node1
|
||||
src: ~/ubuntu-14.04-x86_64.tar.gz
|
||||
|
||||
# Upload new openvz template with all options and force overwrite
|
||||
- proxmox_template:
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
storage: local
|
||||
content_type: vztmpl
|
||||
src: ~/ubuntu-14.04-x86_64.tar.gz
|
||||
force: yes
|
||||
|
||||
# Delete template with minimal options
|
||||
- proxmox_template:
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
template: ubuntu-14.04-x86_64.tar.gz
|
||||
state: absent
|
||||
'''
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
try:
|
||||
from proxmoxer import ProxmoxAPI
|
||||
HAS_PROXMOXER = True
|
||||
except ImportError:
|
||||
HAS_PROXMOXER = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def get_template(proxmox, node, storage, content_type, template):
|
||||
return [True for tmpl in proxmox.nodes(node).storage(storage).content.get()
|
||||
if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)]
|
||||
|
||||
|
||||
def upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout):
|
||||
taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb'))
|
||||
while timeout:
|
||||
task_status = proxmox.nodes(api_host.split('.')[0]).tasks(taskid).status.get()
|
||||
if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for uploading template. Last line in task before timeout: %s'
|
||||
% proxmox.node(node).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
|
||||
def delete_template(module, proxmox, node, storage, content_type, template, timeout):
|
||||
volid = '%s:%s/%s' % (storage, content_type, template)
|
||||
proxmox.nodes(node).storage(storage).content.delete(volid)
|
||||
while timeout:
|
||||
if not get_template(proxmox, node, storage, content_type, template):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for deleting template.')
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
api_host=dict(required=True),
|
||||
api_user=dict(required=True),
|
||||
api_password=dict(no_log=True),
|
||||
validate_certs=dict(type='bool', default='no'),
|
||||
node=dict(),
|
||||
src=dict(type='path'),
|
||||
template=dict(),
|
||||
content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']),
|
||||
storage=dict(default='local'),
|
||||
timeout=dict(type='int', default=30),
|
||||
force=dict(type='bool', default='no'),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg='proxmoxer required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
api_user = module.params['api_user']
|
||||
api_host = module.params['api_host']
|
||||
api_password = module.params['api_password']
|
||||
validate_certs = module.params['validate_certs']
|
||||
node = module.params['node']
|
||||
storage = module.params['storage']
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# If password not set get it from PROXMOX_PASSWORD env
|
||||
if not api_password:
|
||||
try:
|
||||
api_password = os.environ['PROXMOX_PASSWORD']
|
||||
except KeyError as e:
|
||||
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
|
||||
|
||||
try:
|
||||
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
content_type = module.params['content_type']
|
||||
src = module.params['src']
|
||||
|
||||
template = os.path.basename(src)
|
||||
if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
|
||||
elif not src:
|
||||
module.fail_json(msg='src param to uploading template file is mandatory')
|
||||
elif not (os.path.exists(src) and os.path.isfile(src)):
|
||||
module.fail_json(msg='template file on path %s not exists' % src)
|
||||
|
||||
if upload_template(module, proxmox, api_host, node, storage, content_type, src, timeout):
|
||||
module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="uploading of template %s failed with exception: %s" % (template, e))
|
||||
|
||||
elif state == 'absent':
|
||||
try:
|
||||
content_type = module.params['content_type']
|
||||
template = module.params['template']
|
||||
|
||||
if not template:
|
||||
module.fail_json(msg='template param is mandatory')
|
||||
elif not get_template(proxmox, node, storage, content_type, template):
|
||||
module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
|
||||
|
||||
if delete_template(module, proxmox, node, storage, content_type, template, timeout):
|
||||
module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1516
plugins/modules/cloud/misc/rhevm.py
Normal file
1516
plugins/modules/cloud/misc/rhevm.py
Normal file
File diff suppressed because it is too large
Load diff
235
plugins/modules/cloud/misc/serverless.py
Normal file
235
plugins/modules/cloud/misc/serverless.py
Normal file
|
|
@ -0,0 +1,235 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2016, Ryan Scott Brown <ryansb@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: serverless
|
||||
short_description: Manages a Serverless Framework project
|
||||
description:
|
||||
- Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks.
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Goal state of given stage/project.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
serverless_bin_path:
|
||||
description:
|
||||
- The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless
|
||||
type: path
|
||||
service_path:
|
||||
description:
|
||||
- The path to the root of the Serverless Service to be operated on.
|
||||
type: path
|
||||
required: true
|
||||
stage:
|
||||
description:
|
||||
- The name of the serverless framework project stage to deploy to.
|
||||
- This uses the serverless framework default "dev".
|
||||
type: str
|
||||
functions:
|
||||
description:
|
||||
- A list of specific functions to deploy.
|
||||
- If this is not provided, all functions in the service will be deployed.
|
||||
type: list
|
||||
default: []
|
||||
region:
|
||||
description:
|
||||
- AWS region to deploy the service to.
|
||||
- This parameter defaults to C(us-east-1).
|
||||
type: str
|
||||
deploy:
|
||||
description:
|
||||
- Whether or not to deploy artifacts after building them.
|
||||
- When this option is C(false) all the functions will be built, but no stack update will be run to send them out.
|
||||
- This is mostly useful for generating artifacts to be stored/deployed elsewhere.
|
||||
type: bool
|
||||
default: yes
|
||||
force:
|
||||
description:
|
||||
- Whether or not to force full deployment, equivalent to serverless C(--force) option.
|
||||
type: bool
|
||||
default: no
|
||||
verbose:
|
||||
description:
|
||||
- Shows all stack events during deployment, and display any Stack Output.
|
||||
type: bool
|
||||
default: no
|
||||
notes:
|
||||
- Currently, the C(serverless) command must be in the path of the node executing the task.
|
||||
In the future this may be a flag.
|
||||
requirements:
|
||||
- serverless
|
||||
- yaml
|
||||
author:
|
||||
- Ryan Scott Brown (@ryansb)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Basic deploy of a service
|
||||
serverless:
|
||||
service_path: '{{ project_dir }}'
|
||||
state: present
|
||||
|
||||
- name: Deploy specific functions
|
||||
serverless:
|
||||
service_path: '{{ project_dir }}'
|
||||
functions:
|
||||
- my_func_one
|
||||
- my_func_two
|
||||
|
||||
- name: Deploy a project, then pull its resource list back into Ansible
|
||||
serverless:
|
||||
stage: dev
|
||||
region: us-east-1
|
||||
service_path: '{{ project_dir }}'
|
||||
register: sls
|
||||
|
||||
# The cloudformation stack is always named the same as the full service, so the
|
||||
# cloudformation_info module can get a full list of the stack resources, as
|
||||
# well as stack events and outputs
|
||||
- cloudformation_info:
|
||||
region: us-east-1
|
||||
stack_name: '{{ sls.service_name }}'
|
||||
stack_resources: true
|
||||
|
||||
- name: Deploy a project using a locally installed serverless binary
|
||||
serverless:
|
||||
stage: dev
|
||||
region: us-east-1
|
||||
service_path: '{{ project_dir }}'
|
||||
serverless_bin_path: node_modules/.bin/serverless
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
service_name:
|
||||
type: str
|
||||
description: The service name specified in the serverless.yml that was just deployed.
|
||||
returned: always
|
||||
sample: my-fancy-service-dev
|
||||
state:
|
||||
type: str
|
||||
description: Whether the stack for the serverless project is present/absent.
|
||||
returned: always
|
||||
command:
|
||||
type: str
|
||||
description: Full `serverless` command run by this module, in case you want to re-run the command outside the module.
|
||||
returned: always
|
||||
sample: serverless deploy --stage production
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
try:
|
||||
import yaml
|
||||
HAS_YAML = True
|
||||
except ImportError:
|
||||
HAS_YAML = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def read_serverless_config(module):
|
||||
path = module.params.get('service_path')
|
||||
|
||||
try:
|
||||
with open(os.path.join(path, 'serverless.yml')) as sls_config:
|
||||
config = yaml.safe_load(sls_config.read())
|
||||
return config
|
||||
except IOError as e:
|
||||
module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(path, str(e)))
|
||||
|
||||
module.fail_json(msg="Failed to open serverless config at {0}".format(
|
||||
os.path.join(path, 'serverless.yml')))
|
||||
|
||||
|
||||
def get_service_name(module, stage):
|
||||
config = read_serverless_config(module)
|
||||
if config.get('service') is None:
|
||||
module.fail_json(msg="Could not read `service` key from serverless.yml file")
|
||||
|
||||
if stage:
|
||||
return "{0}-{1}".format(config['service'], stage)
|
||||
|
||||
return "{0}-{1}".format(config['service'], config.get('stage', 'dev'))
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
service_path=dict(type='path', required=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
functions=dict(type='list'),
|
||||
region=dict(type='str', default=''),
|
||||
stage=dict(type='str', default=''),
|
||||
deploy=dict(type='bool', default=True),
|
||||
serverless_bin_path=dict(type='path'),
|
||||
force=dict(type='bool', default=False),
|
||||
verbose=dict(type='bool', default=False),
|
||||
),
|
||||
)
|
||||
|
||||
if not HAS_YAML:
|
||||
module.fail_json(msg='yaml is required for this module')
|
||||
|
||||
service_path = module.params.get('service_path')
|
||||
state = module.params.get('state')
|
||||
functions = module.params.get('functions')
|
||||
region = module.params.get('region')
|
||||
stage = module.params.get('stage')
|
||||
deploy = module.params.get('deploy', True)
|
||||
force = module.params.get('force', False)
|
||||
verbose = module.params.get('verbose', False)
|
||||
serverless_bin_path = module.params.get('serverless_bin_path')
|
||||
|
||||
if serverless_bin_path is not None:
|
||||
command = serverless_bin_path + " "
|
||||
else:
|
||||
command = "serverless "
|
||||
|
||||
if state == 'present':
|
||||
command += 'deploy '
|
||||
elif state == 'absent':
|
||||
command += 'remove '
|
||||
else:
|
||||
module.fail_json(msg="State must either be 'present' or 'absent'. Received: {0}".format(state))
|
||||
|
||||
if state == 'present':
|
||||
if not deploy:
|
||||
command += '--noDeploy '
|
||||
elif force:
|
||||
command += '--force '
|
||||
|
||||
if region:
|
||||
command += '--region {0} '.format(region)
|
||||
if stage:
|
||||
command += '--stage {0} '.format(stage)
|
||||
if verbose:
|
||||
command += '--verbose '
|
||||
|
||||
rc, out, err = module.run_command(command, cwd=service_path)
|
||||
if rc != 0:
|
||||
if state == 'absent' and "-{0}' does not exist".format(stage) in out:
|
||||
module.exit_json(changed=False, state='absent', command=command,
|
||||
out=out, service_name=get_service_name(module, stage))
|
||||
|
||||
module.fail_json(msg="Failure when executing Serverless command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err))
|
||||
|
||||
# gather some facts about the deployment
|
||||
module.exit_json(changed=True, state='present', out=out, command=command,
|
||||
service_name=get_service_name(module, stage))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
394
plugins/modules/cloud/misc/terraform.py
Normal file
394
plugins/modules/cloud/misc/terraform.py
Normal file
|
|
@ -0,0 +1,394 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2017, Ryan Scott Brown <ryansb@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: terraform
|
||||
short_description: Manages a Terraform deployment (and plans)
|
||||
description:
|
||||
- Provides support for deploying resources with Terraform and pulling
|
||||
resource information back into Ansible.
|
||||
options:
|
||||
state:
|
||||
choices: ['planned', 'present', 'absent']
|
||||
description:
|
||||
- Goal state of given stage/project
|
||||
required: false
|
||||
default: present
|
||||
binary_path:
|
||||
description:
|
||||
- The path of a terraform binary to use, relative to the 'service_path'
|
||||
unless you supply an absolute path.
|
||||
required: false
|
||||
project_path:
|
||||
description:
|
||||
- The path to the root of the Terraform directory with the
|
||||
vars.tf/main.tf/etc to use.
|
||||
required: true
|
||||
workspace:
|
||||
description:
|
||||
- The terraform workspace to work with.
|
||||
required: false
|
||||
default: default
|
||||
purge_workspace:
|
||||
description:
|
||||
- Only works with state = absent
|
||||
- If true, the workspace will be deleted after the "terraform destroy" action.
|
||||
- The 'default' workspace will not be deleted.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
plan_file:
|
||||
description:
|
||||
- The path to an existing Terraform plan file to apply. If this is not
|
||||
specified, Ansible will build a new TF plan and execute it.
|
||||
Note that this option is required if 'state' has the 'planned' value.
|
||||
required: false
|
||||
state_file:
|
||||
description:
|
||||
- The path to an existing Terraform state file to use when building plan.
|
||||
If this is not specified, the default `terraform.tfstate` will be used.
|
||||
- This option is ignored when plan is specified.
|
||||
required: false
|
||||
variables_file:
|
||||
description:
|
||||
- The path to a variables file for Terraform to fill into the TF
|
||||
configurations.
|
||||
required: false
|
||||
variables:
|
||||
description:
|
||||
- A group of key-values to override template variables or those in
|
||||
variables files.
|
||||
required: false
|
||||
targets:
|
||||
description:
|
||||
- A list of specific resources to target in this plan/application. The
|
||||
resources selected here will also auto-include any dependencies.
|
||||
required: false
|
||||
lock:
|
||||
description:
|
||||
- Enable statefile locking, if you use a service that accepts locks (such
|
||||
as S3+DynamoDB) to store your statefile.
|
||||
required: false
|
||||
type: bool
|
||||
lock_timeout:
|
||||
description:
|
||||
- How long to maintain the lock on the statefile, if you use a service
|
||||
that accepts locks (such as S3+DynamoDB).
|
||||
required: false
|
||||
force_init:
|
||||
description:
|
||||
- To avoid duplicating infra, if a state file can't be found this will
|
||||
force a `terraform init`. Generally, this should be turned off unless
|
||||
you intend to provision an entirely new Terraform deployment.
|
||||
default: false
|
||||
required: false
|
||||
type: bool
|
||||
backend_config:
|
||||
description:
|
||||
- A group of key-values to provide at init stage to the -backend-config parameter.
|
||||
required: false
|
||||
notes:
|
||||
- To just run a `terraform plan`, use check mode.
|
||||
requirements: [ "terraform" ]
|
||||
author: "Ryan Scott Brown (@ryansb)"
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
# Basic deploy of a service
|
||||
- terraform:
|
||||
project_path: '{{ project_dir }}'
|
||||
state: present
|
||||
|
||||
# Define the backend configuration at init
|
||||
- terraform:
|
||||
project_path: 'project/'
|
||||
state: "{{ state }}"
|
||||
force_init: true
|
||||
backend_config:
|
||||
region: "eu-west-1"
|
||||
bucket: "some-bucket"
|
||||
key: "random.tfstate"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
outputs:
|
||||
type: complex
|
||||
description: A dictionary of all the TF outputs by their assigned name. Use `.outputs.MyOutputName.value` to access the value.
|
||||
returned: on success
|
||||
sample: '{"bukkit_arn": {"sensitive": false, "type": "string", "value": "arn:aws:s3:::tf-test-bukkit"}'
|
||||
contains:
|
||||
sensitive:
|
||||
type: bool
|
||||
returned: always
|
||||
description: Whether Terraform has marked this value as sensitive
|
||||
type:
|
||||
type: str
|
||||
returned: always
|
||||
description: The type of the value (string, int, etc)
|
||||
value:
|
||||
returned: always
|
||||
description: The value of the output as interpolated by Terraform
|
||||
stdout:
|
||||
type: str
|
||||
description: Full `terraform` command stdout, in case you want to display it or examine the event log
|
||||
returned: always
|
||||
sample: ''
|
||||
command:
|
||||
type: str
|
||||
description: Full `terraform` command built by this module, in case you want to re-run the command outside the module or debug a problem.
|
||||
returned: always
|
||||
sample: terraform apply ...
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import tempfile
|
||||
import traceback
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
DESTROY_ARGS = ('destroy', '-no-color', '-force')
|
||||
APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true')
|
||||
module = None
|
||||
|
||||
|
||||
def preflight_validation(bin_path, project_path, variables_args=None, plan_file=None):
|
||||
if project_path in [None, ''] or '/' not in project_path:
|
||||
module.fail_json(msg="Path for Terraform project can not be None or ''.")
|
||||
if not os.path.exists(bin_path):
|
||||
module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path))
|
||||
if not os.path.isdir(project_path):
|
||||
module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path))
|
||||
|
||||
rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, cwd=project_path, use_unsafe_shell=True)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed to validate Terraform configuration files:\r\n{0}".format(err))
|
||||
|
||||
|
||||
def _state_args(state_file):
|
||||
if state_file and os.path.exists(state_file):
|
||||
return ['-state', state_file]
|
||||
if state_file and not os.path.exists(state_file):
|
||||
module.fail_json(msg='Could not find state_file "{0}", check the path and try again.'.format(state_file))
|
||||
return []
|
||||
|
||||
|
||||
def init_plugins(bin_path, project_path, backend_config):
|
||||
command = [bin_path, 'init', '-input=false']
|
||||
if backend_config:
|
||||
for key, val in backend_config.items():
|
||||
command.extend([
|
||||
'-backend-config',
|
||||
shlex_quote('{0}={1}'.format(key, val))
|
||||
])
|
||||
rc, out, err = module.run_command(command, cwd=project_path)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed to initialize Terraform modules:\r\n{0}".format(err))
|
||||
|
||||
|
||||
def get_workspace_context(bin_path, project_path):
|
||||
workspace_ctx = {"current": "default", "all": []}
|
||||
command = [bin_path, 'workspace', 'list', '-no-color']
|
||||
rc, out, err = module.run_command(command, cwd=project_path)
|
||||
if rc != 0:
|
||||
module.warn("Failed to list Terraform workspaces:\r\n{0}".format(err))
|
||||
for item in out.split('\n'):
|
||||
stripped_item = item.strip()
|
||||
if not stripped_item:
|
||||
continue
|
||||
elif stripped_item.startswith('* '):
|
||||
workspace_ctx["current"] = stripped_item.replace('* ', '')
|
||||
else:
|
||||
workspace_ctx["all"].append(stripped_item)
|
||||
return workspace_ctx
|
||||
|
||||
|
||||
def _workspace_cmd(bin_path, project_path, action, workspace):
|
||||
command = [bin_path, 'workspace', action, workspace, '-no-color']
|
||||
rc, out, err = module.run_command(command, cwd=project_path)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed to {0} workspace:\r\n{1}".format(action, err))
|
||||
return rc, out, err
|
||||
|
||||
|
||||
def create_workspace(bin_path, project_path, workspace):
|
||||
_workspace_cmd(bin_path, project_path, 'new', workspace)
|
||||
|
||||
|
||||
def select_workspace(bin_path, project_path, workspace):
|
||||
_workspace_cmd(bin_path, project_path, 'select', workspace)
|
||||
|
||||
|
||||
def remove_workspace(bin_path, project_path, workspace):
|
||||
_workspace_cmd(bin_path, project_path, 'delete', workspace)
|
||||
|
||||
|
||||
def build_plan(command, project_path, variables_args, state_file, targets, state, plan_path=None):
|
||||
if plan_path is None:
|
||||
f, plan_path = tempfile.mkstemp(suffix='.tfplan')
|
||||
|
||||
plan_command = [command[0], 'plan', '-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path]
|
||||
|
||||
for t in (module.params.get('targets') or []):
|
||||
plan_command.extend(['-target', t])
|
||||
|
||||
plan_command.extend(_state_args(state_file))
|
||||
|
||||
rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path, use_unsafe_shell=True)
|
||||
|
||||
if rc == 0:
|
||||
# no changes
|
||||
return plan_path, False, out, err, plan_command if state == 'planned' else command
|
||||
elif rc == 1:
|
||||
# failure to plan
|
||||
module.fail_json(msg='Terraform plan could not be created\r\nSTDOUT: {0}\r\n\r\nSTDERR: {1}'.format(out, err))
|
||||
elif rc == 2:
|
||||
# changes, but successful
|
||||
return plan_path, True, out, err, plan_command if state == 'planned' else command
|
||||
|
||||
module.fail_json(msg='Terraform plan failed with unexpected exit code {0}. \r\nSTDOUT: {1}\r\n\r\nSTDERR: {2}'.format(rc, out, err))
|
||||
|
||||
|
||||
def main():
|
||||
global module
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
project_path=dict(required=True, type='path'),
|
||||
binary_path=dict(type='path'),
|
||||
workspace=dict(required=False, type='str', default='default'),
|
||||
purge_workspace=dict(type='bool', default=False),
|
||||
state=dict(default='present', choices=['present', 'absent', 'planned']),
|
||||
variables=dict(type='dict'),
|
||||
variables_file=dict(type='path'),
|
||||
plan_file=dict(type='path'),
|
||||
state_file=dict(type='path'),
|
||||
targets=dict(type='list', default=[]),
|
||||
lock=dict(type='bool', default=True),
|
||||
lock_timeout=dict(type='int',),
|
||||
force_init=dict(type='bool', default=False),
|
||||
backend_config=dict(type='dict', default=None),
|
||||
),
|
||||
required_if=[('state', 'planned', ['plan_file'])],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
project_path = module.params.get('project_path')
|
||||
bin_path = module.params.get('binary_path')
|
||||
workspace = module.params.get('workspace')
|
||||
purge_workspace = module.params.get('purge_workspace')
|
||||
state = module.params.get('state')
|
||||
variables = module.params.get('variables') or {}
|
||||
variables_file = module.params.get('variables_file')
|
||||
plan_file = module.params.get('plan_file')
|
||||
state_file = module.params.get('state_file')
|
||||
force_init = module.params.get('force_init')
|
||||
backend_config = module.params.get('backend_config')
|
||||
|
||||
if bin_path is not None:
|
||||
command = [bin_path]
|
||||
else:
|
||||
command = [module.get_bin_path('terraform', required=True)]
|
||||
|
||||
if force_init:
|
||||
init_plugins(command[0], project_path, backend_config)
|
||||
|
||||
workspace_ctx = get_workspace_context(command[0], project_path)
|
||||
if workspace_ctx["current"] != workspace:
|
||||
if workspace not in workspace_ctx["all"]:
|
||||
create_workspace(command[0], project_path, workspace)
|
||||
else:
|
||||
select_workspace(command[0], project_path, workspace)
|
||||
|
||||
if state == 'present':
|
||||
command.extend(APPLY_ARGS)
|
||||
elif state == 'absent':
|
||||
command.extend(DESTROY_ARGS)
|
||||
|
||||
variables_args = []
|
||||
for k, v in variables.items():
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={1}'.format(k, v)
|
||||
])
|
||||
if variables_file:
|
||||
variables_args.extend(['-var-file', variables_file])
|
||||
|
||||
preflight_validation(command[0], project_path, variables_args)
|
||||
|
||||
if module.params.get('lock') is not None:
|
||||
if module.params.get('lock'):
|
||||
command.append('-lock=true')
|
||||
else:
|
||||
command.append('-lock=false')
|
||||
if module.params.get('lock_timeout') is not None:
|
||||
command.append('-lock-timeout=%ds' % module.params.get('lock_timeout'))
|
||||
|
||||
for t in (module.params.get('targets') or []):
|
||||
command.extend(['-target', t])
|
||||
|
||||
# we aren't sure if this plan will result in changes, so assume yes
|
||||
needs_application, changed = True, False
|
||||
|
||||
out, err = '', ''
|
||||
|
||||
if state == 'absent':
|
||||
command.extend(variables_args)
|
||||
elif state == 'present' and plan_file:
|
||||
if any([os.path.isfile(project_path + "/" + plan_file), os.path.isfile(plan_file)]):
|
||||
command.append(plan_file)
|
||||
else:
|
||||
module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file))
|
||||
else:
|
||||
plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file,
|
||||
module.params.get('targets'), state, plan_file)
|
||||
command.append(plan_file)
|
||||
|
||||
if needs_application and not module.check_mode and not state == 'planned':
|
||||
rc, out, err = module.run_command(command, cwd=project_path)
|
||||
# checks out to decide if changes were made during execution
|
||||
if '0 added, 0 changed' not in out and not state == "absent" or '0 destroyed' not in out:
|
||||
changed = True
|
||||
if rc != 0:
|
||||
module.fail_json(
|
||||
msg="Failure when executing Terraform command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err),
|
||||
command=' '.join(command)
|
||||
)
|
||||
|
||||
outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file)
|
||||
rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path)
|
||||
if rc == 1:
|
||||
module.warn("Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}".format(outputs_text, outputs_err))
|
||||
outputs = {}
|
||||
elif rc != 0:
|
||||
module.fail_json(
|
||||
msg="Failure when getting Terraform outputs. "
|
||||
"Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, outputs_text, outputs_err),
|
||||
command=' '.join(outputs_command))
|
||||
else:
|
||||
outputs = json.loads(outputs_text)
|
||||
|
||||
# Restore the Terraform workspace found when running the module
|
||||
if workspace_ctx["current"] != workspace:
|
||||
select_workspace(command[0], project_path, workspace_ctx["current"])
|
||||
if state == 'absent' and workspace != 'default' and purge_workspace is True:
|
||||
remove_workspace(command[0], project_path, workspace)
|
||||
|
||||
module.exit_json(changed=changed, state=state, workspace=workspace, outputs=outputs, stdout=out, stderr=err, command=' '.join(command))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
602
plugins/modules/cloud/misc/virt.py
Normal file
602
plugins/modules/cloud/misc/virt.py
Normal file
|
|
@ -0,0 +1,602 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2007, 2012 Red Hat, Inc
|
||||
# Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# Seth Vidal <skvidal@fedoraproject.org>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: virt
|
||||
short_description: Manages virtual machines supported by libvirt
|
||||
description:
|
||||
- Manages virtual machines supported by I(libvirt).
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- name of the guest VM being managed. Note that VM must be previously
|
||||
defined with xml.
|
||||
- This option is required unless I(command) is C(list_vms) or C(info).
|
||||
state:
|
||||
description:
|
||||
- Note that there may be some lag for state requests like C(shutdown)
|
||||
since these refer only to VM states. After starting a guest, it may not
|
||||
be immediately accessible.
|
||||
state and command are mutually exclusive except when command=list_vms. In
|
||||
this case all VMs in specified state will be listed.
|
||||
choices: [ destroyed, paused, running, shutdown ]
|
||||
command:
|
||||
description:
|
||||
- In addition to state management, various non-idempotent commands are available.
|
||||
choices: [ create, define, destroy, freemem, get_xml, info, list_vms, nodeinfo, pause, shutdown, start, status, stop, undefine, unpause, virttype ]
|
||||
autostart:
|
||||
description:
|
||||
- start VM at host startup.
|
||||
type: bool
|
||||
uri:
|
||||
description:
|
||||
- libvirt connection uri.
|
||||
default: qemu:///system
|
||||
xml:
|
||||
description:
|
||||
- XML document used with the define command.
|
||||
- Must be raw XML content using C(lookup). XML cannot be reference to a file.
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- libvirt-python
|
||||
author:
|
||||
- Ansible Core Team
|
||||
- Michael DeHaan
|
||||
- Seth Vidal (@skvidal)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# a playbook task line:
|
||||
- virt:
|
||||
name: alpha
|
||||
state: running
|
||||
|
||||
# /usr/bin/ansible invocations
|
||||
# ansible host -m virt -a "name=alpha command=status"
|
||||
# ansible host -m virt -a "name=alpha command=get_xml"
|
||||
# ansible host -m virt -a "name=alpha command=create uri=lxc:///"
|
||||
|
||||
# defining and launching an LXC guest
|
||||
- name: define vm
|
||||
virt:
|
||||
command: define
|
||||
xml: "{{ lookup('template', 'container-template.xml.j2') }}"
|
||||
uri: 'lxc:///'
|
||||
- name: start vm
|
||||
virt:
|
||||
name: foo
|
||||
state: running
|
||||
uri: 'lxc:///'
|
||||
|
||||
# setting autostart on a qemu VM (default uri)
|
||||
- name: set autostart for a VM
|
||||
virt:
|
||||
name: foo
|
||||
autostart: yes
|
||||
|
||||
# Defining a VM and making is autostart with host. VM will be off after this task
|
||||
- name: define vm from xml and set autostart
|
||||
virt:
|
||||
command: define
|
||||
xml: "{{ lookup('template', 'vm_template.xml.j2') }}"
|
||||
autostart: yes
|
||||
|
||||
# Listing VMs
|
||||
- name: list all VMs
|
||||
virt:
|
||||
command: list_vms
|
||||
register: all_vms
|
||||
|
||||
- name: list only running VMs
|
||||
virt:
|
||||
command: list_vms
|
||||
state: running
|
||||
register: running_vms
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
# for list_vms command
|
||||
list_vms:
|
||||
description: The list of vms defined on the remote system
|
||||
type: list
|
||||
returned: success
|
||||
sample: [
|
||||
"build.example.org",
|
||||
"dev.example.org"
|
||||
]
|
||||
# for status command
|
||||
status:
|
||||
description: The status of the VM, among running, crashed, paused and shutdown
|
||||
type: str
|
||||
sample: "success"
|
||||
returned: success
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
import libvirt
|
||||
from libvirt import libvirtError
|
||||
except ImportError:
|
||||
HAS_VIRT = False
|
||||
else:
|
||||
HAS_VIRT = True
|
||||
|
||||
import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
VIRT_FAILED = 1
|
||||
VIRT_SUCCESS = 0
|
||||
VIRT_UNAVAILABLE = 2
|
||||
|
||||
ALL_COMMANDS = []
|
||||
VM_COMMANDS = ['create', 'define', 'destroy', 'get_xml', 'pause', 'shutdown', 'status', 'start', 'stop', 'undefine', 'unpause']
|
||||
HOST_COMMANDS = ['freemem', 'info', 'list_vms', 'nodeinfo', 'virttype']
|
||||
ALL_COMMANDS.extend(VM_COMMANDS)
|
||||
ALL_COMMANDS.extend(HOST_COMMANDS)
|
||||
|
||||
VIRT_STATE_NAME_MAP = {
|
||||
0: 'running',
|
||||
1: 'running',
|
||||
2: 'running',
|
||||
3: 'paused',
|
||||
4: 'shutdown',
|
||||
5: 'shutdown',
|
||||
6: 'crashed',
|
||||
}
|
||||
|
||||
|
||||
class VMNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class LibvirtConnection(object):
|
||||
|
||||
def __init__(self, uri, module):
|
||||
|
||||
self.module = module
|
||||
|
||||
cmd = "uname -r"
|
||||
rc, stdout, stderr = self.module.run_command(cmd)
|
||||
|
||||
if "xen" in stdout:
|
||||
conn = libvirt.open(None)
|
||||
elif "esx" in uri:
|
||||
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], [], None]
|
||||
conn = libvirt.openAuth(uri, auth)
|
||||
else:
|
||||
conn = libvirt.open(uri)
|
||||
|
||||
if not conn:
|
||||
raise Exception("hypervisor connection failure")
|
||||
|
||||
self.conn = conn
|
||||
|
||||
def find_vm(self, vmid):
|
||||
"""
|
||||
Extra bonus feature: vmid = -1 returns a list of everything
|
||||
"""
|
||||
conn = self.conn
|
||||
|
||||
vms = []
|
||||
|
||||
# this block of code borrowed from virt-manager:
|
||||
# get working domain's name
|
||||
ids = conn.listDomainsID()
|
||||
for id in ids:
|
||||
vm = conn.lookupByID(id)
|
||||
vms.append(vm)
|
||||
# get defined domain
|
||||
names = conn.listDefinedDomains()
|
||||
for name in names:
|
||||
vm = conn.lookupByName(name)
|
||||
vms.append(vm)
|
||||
|
||||
if vmid == -1:
|
||||
return vms
|
||||
|
||||
for vm in vms:
|
||||
if vm.name() == vmid:
|
||||
return vm
|
||||
|
||||
raise VMNotFound("virtual machine %s not found" % vmid)
|
||||
|
||||
def shutdown(self, vmid):
|
||||
return self.find_vm(vmid).shutdown()
|
||||
|
||||
def pause(self, vmid):
|
||||
return self.suspend(vmid)
|
||||
|
||||
def unpause(self, vmid):
|
||||
return self.resume(vmid)
|
||||
|
||||
def suspend(self, vmid):
|
||||
return self.find_vm(vmid).suspend()
|
||||
|
||||
def resume(self, vmid):
|
||||
return self.find_vm(vmid).resume()
|
||||
|
||||
def create(self, vmid):
|
||||
return self.find_vm(vmid).create()
|
||||
|
||||
def destroy(self, vmid):
|
||||
return self.find_vm(vmid).destroy()
|
||||
|
||||
def undefine(self, vmid):
|
||||
return self.find_vm(vmid).undefine()
|
||||
|
||||
def get_status2(self, vm):
|
||||
state = vm.info()[0]
|
||||
return VIRT_STATE_NAME_MAP.get(state, "unknown")
|
||||
|
||||
def get_status(self, vmid):
|
||||
state = self.find_vm(vmid).info()[0]
|
||||
return VIRT_STATE_NAME_MAP.get(state, "unknown")
|
||||
|
||||
def nodeinfo(self):
|
||||
return self.conn.getInfo()
|
||||
|
||||
def get_type(self):
|
||||
return self.conn.getType()
|
||||
|
||||
def get_xml(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.XMLDesc(0)
|
||||
|
||||
def get_maxVcpus(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.maxVcpus()
|
||||
|
||||
def get_maxMemory(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.maxMemory()
|
||||
|
||||
def getFreeMemory(self):
|
||||
return self.conn.getFreeMemory()
|
||||
|
||||
def get_autostart(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.autostart()
|
||||
|
||||
def set_autostart(self, vmid, val):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.setAutostart(val)
|
||||
|
||||
def define_from_xml(self, xml):
|
||||
return self.conn.defineXML(xml)
|
||||
|
||||
|
||||
class Virt(object):
|
||||
|
||||
def __init__(self, uri, module):
|
||||
self.module = module
|
||||
self.uri = uri
|
||||
|
||||
def __get_conn(self):
|
||||
self.conn = LibvirtConnection(self.uri, self.module)
|
||||
return self.conn
|
||||
|
||||
def get_vm(self, vmid):
|
||||
self.__get_conn()
|
||||
return self.conn.find_vm(vmid)
|
||||
|
||||
def state(self):
|
||||
vms = self.list_vms()
|
||||
state = []
|
||||
for vm in vms:
|
||||
state_blurb = self.conn.get_status(vm)
|
||||
state.append("%s %s" % (vm, state_blurb))
|
||||
return state
|
||||
|
||||
def info(self):
|
||||
vms = self.list_vms()
|
||||
info = dict()
|
||||
for vm in vms:
|
||||
data = self.conn.find_vm(vm).info()
|
||||
# libvirt returns maxMem, memory, and cpuTime as long()'s, which
|
||||
# xmlrpclib tries to convert to regular int's during serialization.
|
||||
# This throws exceptions, so convert them to strings here and
|
||||
# assume the other end of the xmlrpc connection can figure things
|
||||
# out or doesn't care.
|
||||
info[vm] = dict(
|
||||
state=VIRT_STATE_NAME_MAP.get(data[0], "unknown"),
|
||||
maxMem=str(data[1]),
|
||||
memory=str(data[2]),
|
||||
nrVirtCpu=data[3],
|
||||
cpuTime=str(data[4]),
|
||||
autostart=self.conn.get_autostart(vm),
|
||||
)
|
||||
|
||||
return info
|
||||
|
||||
def nodeinfo(self):
|
||||
self.__get_conn()
|
||||
data = self.conn.nodeinfo()
|
||||
info = dict(
|
||||
cpumodel=str(data[0]),
|
||||
phymemory=str(data[1]),
|
||||
cpus=str(data[2]),
|
||||
cpumhz=str(data[3]),
|
||||
numanodes=str(data[4]),
|
||||
sockets=str(data[5]),
|
||||
cpucores=str(data[6]),
|
||||
cputhreads=str(data[7])
|
||||
)
|
||||
return info
|
||||
|
||||
def list_vms(self, state=None):
|
||||
self.conn = self.__get_conn()
|
||||
vms = self.conn.find_vm(-1)
|
||||
results = []
|
||||
for x in vms:
|
||||
try:
|
||||
if state:
|
||||
vmstate = self.conn.get_status2(x)
|
||||
if vmstate == state:
|
||||
results.append(x.name())
|
||||
else:
|
||||
results.append(x.name())
|
||||
except Exception:
|
||||
pass
|
||||
return results
|
||||
|
||||
def virttype(self):
|
||||
return self.__get_conn().get_type()
|
||||
|
||||
def autostart(self, vmid, as_flag):
|
||||
self.conn = self.__get_conn()
|
||||
# Change autostart flag only if needed
|
||||
if self.conn.get_autostart(vmid) != as_flag:
|
||||
self.conn.set_autostart(vmid, as_flag)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def freemem(self):
|
||||
self.conn = self.__get_conn()
|
||||
return self.conn.getFreeMemory()
|
||||
|
||||
def shutdown(self, vmid):
|
||||
""" Make the machine with the given vmid stop running. Whatever that takes. """
|
||||
self.__get_conn()
|
||||
self.conn.shutdown(vmid)
|
||||
return 0
|
||||
|
||||
def pause(self, vmid):
|
||||
""" Pause the machine with the given vmid. """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.suspend(vmid)
|
||||
|
||||
def unpause(self, vmid):
|
||||
""" Unpause the machine with the given vmid. """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.resume(vmid)
|
||||
|
||||
def create(self, vmid):
|
||||
""" Start the machine via the given vmid """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.create(vmid)
|
||||
|
||||
def start(self, vmid):
|
||||
""" Start the machine via the given id/name """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.create(vmid)
|
||||
|
||||
def destroy(self, vmid):
|
||||
""" Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """
|
||||
self.__get_conn()
|
||||
return self.conn.destroy(vmid)
|
||||
|
||||
def undefine(self, vmid):
|
||||
""" Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.undefine(vmid)
|
||||
|
||||
def status(self, vmid):
|
||||
"""
|
||||
Return a state suitable for server consumption. Aka, codes.py values, not XM output.
|
||||
"""
|
||||
self.__get_conn()
|
||||
return self.conn.get_status(vmid)
|
||||
|
||||
def get_xml(self, vmid):
|
||||
"""
|
||||
Receive a Vm id as input
|
||||
Return an xml describing vm config returned by a libvirt call
|
||||
"""
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.get_xml(vmid)
|
||||
|
||||
def get_maxVcpus(self, vmid):
|
||||
"""
|
||||
Gets the max number of VCPUs on a guest
|
||||
"""
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.get_maxVcpus(vmid)
|
||||
|
||||
def get_max_memory(self, vmid):
|
||||
"""
|
||||
Gets the max memory on a guest
|
||||
"""
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.get_MaxMemory(vmid)
|
||||
|
||||
def define(self, xml):
|
||||
"""
|
||||
Define a guest with the given xml
|
||||
"""
|
||||
self.__get_conn()
|
||||
return self.conn.define_from_xml(xml)
|
||||
|
||||
|
||||
def core(module):
|
||||
|
||||
state = module.params.get('state', None)
|
||||
autostart = module.params.get('autostart', None)
|
||||
guest = module.params.get('name', None)
|
||||
command = module.params.get('command', None)
|
||||
uri = module.params.get('uri', None)
|
||||
xml = module.params.get('xml', None)
|
||||
|
||||
v = Virt(uri, module)
|
||||
res = dict()
|
||||
|
||||
if state and command == 'list_vms':
|
||||
res = v.list_vms(state=state)
|
||||
if not isinstance(res, dict):
|
||||
res = {command: res}
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if autostart is not None and command != 'define':
|
||||
if not guest:
|
||||
module.fail_json(msg="autostart requires 1 argument: name")
|
||||
try:
|
||||
v.get_vm(guest)
|
||||
except VMNotFound:
|
||||
module.fail_json(msg="domain %s not found" % guest)
|
||||
res['changed'] = v.autostart(guest, autostart)
|
||||
if not command and not state:
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if state:
|
||||
if not guest:
|
||||
module.fail_json(msg="state change requires a guest specified")
|
||||
|
||||
if state == 'running':
|
||||
if v.status(guest) == 'paused':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.unpause(guest)
|
||||
elif v.status(guest) != 'running':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.start(guest)
|
||||
elif state == 'shutdown':
|
||||
if v.status(guest) != 'shutdown':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.shutdown(guest)
|
||||
elif state == 'destroyed':
|
||||
if v.status(guest) != 'shutdown':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.destroy(guest)
|
||||
elif state == 'paused':
|
||||
if v.status(guest) == 'running':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.pause(guest)
|
||||
else:
|
||||
module.fail_json(msg="unexpected state")
|
||||
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if command:
|
||||
if command in VM_COMMANDS:
|
||||
if command == 'define':
|
||||
if not xml:
|
||||
module.fail_json(msg="define requires xml argument")
|
||||
if guest:
|
||||
# there might be a mismatch between quest 'name' in the module and in the xml
|
||||
module.warn("'xml' is given - ignoring 'name'")
|
||||
found_name = re.search('<name>(.*)</name>', xml).groups()
|
||||
if found_name:
|
||||
domain_name = found_name[0]
|
||||
else:
|
||||
module.fail_json(msg="Could not find domain 'name' in xml")
|
||||
|
||||
# From libvirt docs (https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainDefineXML):
|
||||
# -- A previous definition for this domain would be overridden if it already exists.
|
||||
#
|
||||
# In real world testing with libvirt versions 1.2.17-13, 2.0.0-10 and 3.9.0-14
|
||||
# on qemu and lxc domains results in:
|
||||
# operation failed: domain '<name>' already exists with <uuid>
|
||||
#
|
||||
# In case a domain would be indeed overwritten, we should protect idempotency:
|
||||
try:
|
||||
existing_domain = v.get_vm(domain_name)
|
||||
except VMNotFound:
|
||||
existing_domain = None
|
||||
try:
|
||||
domain = v.define(xml)
|
||||
if existing_domain:
|
||||
# if we are here, then libvirt redefined existing domain as the doc promised
|
||||
if existing_domain.XMLDesc() != domain.XMLDesc():
|
||||
res = {'changed': True, 'change_reason': 'config changed'}
|
||||
else:
|
||||
res = {'changed': True, 'created': domain.name()}
|
||||
except libvirtError as e:
|
||||
if e.get_error_code() != 9: # 9 means 'domain already exists' error
|
||||
module.fail_json(msg='libvirtError: %s' % e.message)
|
||||
if autostart is not None and v.autostart(domain_name, autostart):
|
||||
res = {'changed': True, 'change_reason': 'autostart'}
|
||||
|
||||
elif not guest:
|
||||
module.fail_json(msg="%s requires 1 argument: guest" % command)
|
||||
else:
|
||||
res = getattr(v, command)(guest)
|
||||
if not isinstance(res, dict):
|
||||
res = {command: res}
|
||||
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
elif hasattr(v, command):
|
||||
res = getattr(v, command)()
|
||||
if not isinstance(res, dict):
|
||||
res = {command: res}
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Command %s not recognized" % command)
|
||||
|
||||
module.fail_json(msg="expected state or command parameter to be specified")
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', aliases=['guest']),
|
||||
state=dict(type='str', choices=['destroyed', 'pause', 'running', 'shutdown']),
|
||||
autostart=dict(type='bool'),
|
||||
command=dict(type='str', choices=ALL_COMMANDS),
|
||||
uri=dict(type='str', default='qemu:///system'),
|
||||
xml=dict(type='str'),
|
||||
),
|
||||
)
|
||||
|
||||
if not HAS_VIRT:
|
||||
module.fail_json(msg='The `libvirt` module is not importable. Check the requirements.')
|
||||
|
||||
rc = VIRT_SUCCESS
|
||||
try:
|
||||
rc, result = core(module)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
if rc != 0: # something went wrong emit the msg
|
||||
module.fail_json(rc=rc, msg=result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
638
plugins/modules/cloud/misc/virt_net.py
Normal file
638
plugins/modules/cloud/misc/virt_net.py
Normal file
|
|
@ -0,0 +1,638 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2015, Maciej Delmanowski <drybjed@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: virt_net
|
||||
author: "Maciej Delmanowski (@drybjed)"
|
||||
short_description: Manage libvirt network configuration
|
||||
description:
|
||||
- Manage I(libvirt) networks.
|
||||
options:
|
||||
name:
|
||||
required: true
|
||||
aliases: ['network']
|
||||
description:
|
||||
- name of the network being managed. Note that network must be previously
|
||||
defined with xml.
|
||||
state:
|
||||
required: false
|
||||
choices: [ "active", "inactive", "present", "absent" ]
|
||||
description:
|
||||
- specify which state you want a network to be in.
|
||||
If 'active', network will be started.
|
||||
If 'present', ensure that network is present but do not change its
|
||||
state; if it's missing, you need to specify xml argument.
|
||||
If 'inactive', network will be stopped.
|
||||
If 'undefined' or 'absent', network will be removed from I(libvirt) configuration.
|
||||
command:
|
||||
required: false
|
||||
choices: [ "define", "create", "start", "stop", "destroy",
|
||||
"undefine", "get_xml", "list_nets", "facts",
|
||||
"info", "status", "modify"]
|
||||
description:
|
||||
- in addition to state management, various non-idempotent commands are available.
|
||||
See examples.
|
||||
Modify was added in version 2.1
|
||||
autostart:
|
||||
required: false
|
||||
type: bool
|
||||
description:
|
||||
- Specify if a given network should be started automatically on system boot.
|
||||
uri:
|
||||
required: false
|
||||
default: "qemu:///system"
|
||||
description:
|
||||
- libvirt connection uri.
|
||||
xml:
|
||||
required: false
|
||||
description:
|
||||
- XML document used with the define command.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "python-libvirt"
|
||||
- "python-lxml"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Define a new network
|
||||
- virt_net:
|
||||
command: define
|
||||
name: br_nat
|
||||
xml: '{{ lookup("template", "network/bridge.xml.j2") }}'
|
||||
|
||||
# Start a network
|
||||
- virt_net:
|
||||
command: create
|
||||
name: br_nat
|
||||
|
||||
# List available networks
|
||||
- virt_net:
|
||||
command: list_nets
|
||||
|
||||
# Get XML data of a specified network
|
||||
- virt_net:
|
||||
command: get_xml
|
||||
name: br_nat
|
||||
|
||||
# Stop a network
|
||||
- virt_net:
|
||||
command: destroy
|
||||
name: br_nat
|
||||
|
||||
# Undefine a network
|
||||
- virt_net:
|
||||
command: undefine
|
||||
name: br_nat
|
||||
|
||||
# Gather facts about networks
|
||||
# Facts will be available as 'ansible_libvirt_networks'
|
||||
- virt_net:
|
||||
command: facts
|
||||
|
||||
# Gather information about network managed by 'libvirt' remotely using uri
|
||||
- virt_net:
|
||||
command: info
|
||||
uri: '{{ item }}'
|
||||
with_items: '{{ libvirt_uris }}'
|
||||
register: networks
|
||||
|
||||
# Ensure that a network is active (needs to be defined and built first)
|
||||
- virt_net:
|
||||
state: active
|
||||
name: br_nat
|
||||
|
||||
# Ensure that a network is inactive
|
||||
- virt_net:
|
||||
state: inactive
|
||||
name: br_nat
|
||||
|
||||
# Ensure that a given network will be started at boot
|
||||
- virt_net:
|
||||
autostart: yes
|
||||
name: br_nat
|
||||
|
||||
# Disable autostart for a given network
|
||||
- virt_net:
|
||||
autostart: no
|
||||
name: br_nat
|
||||
|
||||
# Add a new host in the dhcp pool
|
||||
- virt_net:
|
||||
name: br_nat
|
||||
command: modify
|
||||
xml: "<host mac='FC:C2:33:00:6c:3c' name='my_vm' ip='192.168.122.30'/>"
|
||||
'''
|
||||
|
||||
try:
|
||||
import libvirt
|
||||
except ImportError:
|
||||
HAS_VIRT = False
|
||||
else:
|
||||
HAS_VIRT = True
|
||||
|
||||
try:
|
||||
from lxml import etree
|
||||
except ImportError:
|
||||
HAS_XML = False
|
||||
else:
|
||||
HAS_XML = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
VIRT_FAILED = 1
|
||||
VIRT_SUCCESS = 0
|
||||
VIRT_UNAVAILABLE = 2
|
||||
|
||||
ALL_COMMANDS = []
|
||||
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop',
|
||||
'undefine', 'destroy', 'get_xml', 'define',
|
||||
'modify']
|
||||
HOST_COMMANDS = ['list_nets', 'facts', 'info']
|
||||
ALL_COMMANDS.extend(ENTRY_COMMANDS)
|
||||
ALL_COMMANDS.extend(HOST_COMMANDS)
|
||||
|
||||
ENTRY_STATE_ACTIVE_MAP = {
|
||||
0: "inactive",
|
||||
1: "active"
|
||||
}
|
||||
|
||||
ENTRY_STATE_AUTOSTART_MAP = {
|
||||
0: "no",
|
||||
1: "yes"
|
||||
}
|
||||
|
||||
ENTRY_STATE_PERSISTENT_MAP = {
|
||||
0: "no",
|
||||
1: "yes"
|
||||
}
|
||||
|
||||
|
||||
class EntryNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class LibvirtConnection(object):
|
||||
|
||||
def __init__(self, uri, module):
|
||||
|
||||
self.module = module
|
||||
|
||||
conn = libvirt.open(uri)
|
||||
|
||||
if not conn:
|
||||
raise Exception("hypervisor connection failure")
|
||||
|
||||
self.conn = conn
|
||||
|
||||
def find_entry(self, entryid):
|
||||
if entryid == -1: # Get active entries
|
||||
names = self.conn.listNetworks() + self.conn.listDefinedNetworks()
|
||||
return [self.conn.networkLookupByName(n) for n in names]
|
||||
|
||||
try:
|
||||
return self.conn.networkLookupByName(entryid)
|
||||
except libvirt.libvirtError as e:
|
||||
if e.get_error_code() == libvirt.VIR_ERR_NO_NETWORK:
|
||||
raise EntryNotFound("network %s not found" % entryid)
|
||||
raise
|
||||
|
||||
def create(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).create()
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid).isActive()
|
||||
except Exception:
|
||||
return self.module.exit_json(changed=True)
|
||||
if not state:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def modify(self, entryid, xml):
|
||||
network = self.find_entry(entryid)
|
||||
# identify what type of entry is given in the xml
|
||||
new_data = etree.fromstring(xml)
|
||||
old_data = etree.fromstring(network.XMLDesc(0))
|
||||
if new_data.tag == 'host':
|
||||
mac_addr = new_data.get('mac')
|
||||
hosts = old_data.xpath('/network/ip/dhcp/host')
|
||||
# find the one mac we're looking for
|
||||
host = None
|
||||
for h in hosts:
|
||||
if h.get('mac') == mac_addr:
|
||||
host = h
|
||||
break
|
||||
if host is None:
|
||||
# add the host
|
||||
if not self.module.check_mode:
|
||||
res = network.update(libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST,
|
||||
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
|
||||
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
|
||||
else:
|
||||
# pretend there was a change
|
||||
res = 0
|
||||
if res == 0:
|
||||
return True
|
||||
else:
|
||||
# change the host
|
||||
if host.get('name') == new_data.get('name') and host.get('ip') == new_data.get('ip'):
|
||||
return False
|
||||
else:
|
||||
if not self.module.check_mode:
|
||||
res = network.update(libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY,
|
||||
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
|
||||
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
|
||||
else:
|
||||
# pretend there was a change
|
||||
res = 0
|
||||
if res == 0:
|
||||
return True
|
||||
# command, section, parentIndex, xml, flags=0
|
||||
self.module.fail_json(msg='updating this is not supported yet %s' % to_native(xml))
|
||||
|
||||
def destroy(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).destroy()
|
||||
else:
|
||||
if self.find_entry(entryid).isActive():
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def undefine(self, entryid):
|
||||
entry = None
|
||||
try:
|
||||
entry = self.find_entry(entryid)
|
||||
found = True
|
||||
except EntryNotFound:
|
||||
found = False
|
||||
|
||||
if found:
|
||||
return self.find_entry(entryid).undefine()
|
||||
|
||||
if self.module.check_mode:
|
||||
return self.module.exit_json(changed=found)
|
||||
|
||||
def get_status2(self, entry):
|
||||
state = entry.isActive()
|
||||
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
|
||||
|
||||
def get_status(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
state = self.find_entry(entryid).isActive()
|
||||
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid).isActive()
|
||||
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
|
||||
except Exception:
|
||||
return ENTRY_STATE_ACTIVE_MAP.get("inactive", "unknown")
|
||||
|
||||
def get_uuid(self, entryid):
|
||||
return self.find_entry(entryid).UUIDString()
|
||||
|
||||
def get_xml(self, entryid):
|
||||
return self.find_entry(entryid).XMLDesc(0)
|
||||
|
||||
def get_forward(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
try:
|
||||
result = xml.xpath('/network/forward')[0].get('mode')
|
||||
except Exception:
|
||||
raise ValueError('Forward mode not specified')
|
||||
return result
|
||||
|
||||
def get_domain(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
try:
|
||||
result = xml.xpath('/network/domain')[0].get('name')
|
||||
except Exception:
|
||||
raise ValueError('Domain not specified')
|
||||
return result
|
||||
|
||||
def get_macaddress(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
try:
|
||||
result = xml.xpath('/network/mac')[0].get('address')
|
||||
except Exception:
|
||||
raise ValueError('MAC address not specified')
|
||||
return result
|
||||
|
||||
def get_autostart(self, entryid):
|
||||
state = self.find_entry(entryid).autostart()
|
||||
return ENTRY_STATE_AUTOSTART_MAP.get(state, "unknown")
|
||||
|
||||
def get_autostart2(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).autostart()
|
||||
else:
|
||||
try:
|
||||
return self.find_entry(entryid).autostart()
|
||||
except Exception:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def set_autostart(self, entryid, val):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).setAutostart(val)
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid).autostart()
|
||||
except Exception:
|
||||
return self.module.exit_json(changed=True)
|
||||
if bool(state) != val:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def get_bridge(self, entryid):
|
||||
return self.find_entry(entryid).bridgeName()
|
||||
|
||||
def get_persistent(self, entryid):
|
||||
state = self.find_entry(entryid).isPersistent()
|
||||
return ENTRY_STATE_PERSISTENT_MAP.get(state, "unknown")
|
||||
|
||||
def get_dhcp_leases(self, entryid):
|
||||
network = self.find_entry(entryid)
|
||||
return network.DHCPLeases()
|
||||
|
||||
def define_from_xml(self, entryid, xml):
|
||||
if not self.module.check_mode:
|
||||
return self.conn.networkDefineXML(xml)
|
||||
else:
|
||||
try:
|
||||
self.find_entry(entryid)
|
||||
except Exception:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
|
||||
class VirtNetwork(object):
|
||||
|
||||
def __init__(self, uri, module):
|
||||
self.module = module
|
||||
self.uri = uri
|
||||
self.conn = LibvirtConnection(self.uri, self.module)
|
||||
|
||||
def get_net(self, entryid):
|
||||
return self.conn.find_entry(entryid)
|
||||
|
||||
def list_nets(self, state=None):
|
||||
results = []
|
||||
for entry in self.conn.find_entry(-1):
|
||||
if state:
|
||||
if state == self.conn.get_status2(entry):
|
||||
results.append(entry.name())
|
||||
else:
|
||||
results.append(entry.name())
|
||||
return results
|
||||
|
||||
def state(self):
|
||||
results = []
|
||||
for entry in self.list_nets():
|
||||
state_blurb = self.conn.get_status(entry)
|
||||
results.append("%s %s" % (entry, state_blurb))
|
||||
return results
|
||||
|
||||
def autostart(self, entryid):
|
||||
return self.conn.set_autostart(entryid, True)
|
||||
|
||||
def get_autostart(self, entryid):
|
||||
return self.conn.get_autostart2(entryid)
|
||||
|
||||
def set_autostart(self, entryid, state):
|
||||
return self.conn.set_autostart(entryid, state)
|
||||
|
||||
def create(self, entryid):
|
||||
if self.conn.get_status(entryid) == "active":
|
||||
return
|
||||
try:
|
||||
return self.conn.create(entryid)
|
||||
except libvirt.libvirtError as e:
|
||||
if e.get_error_code() == libvirt.VIR_ERR_NETWORK_EXIST:
|
||||
return None
|
||||
raise
|
||||
|
||||
def modify(self, entryid, xml):
|
||||
return self.conn.modify(entryid, xml)
|
||||
|
||||
def start(self, entryid):
|
||||
return self.create(entryid)
|
||||
|
||||
def stop(self, entryid):
|
||||
if self.conn.get_status(entryid) == "active":
|
||||
return self.conn.destroy(entryid)
|
||||
|
||||
def destroy(self, entryid):
|
||||
return self.stop(entryid)
|
||||
|
||||
def undefine(self, entryid):
|
||||
return self.conn.undefine(entryid)
|
||||
|
||||
def status(self, entryid):
|
||||
return self.conn.get_status(entryid)
|
||||
|
||||
def get_xml(self, entryid):
|
||||
return self.conn.get_xml(entryid)
|
||||
|
||||
def define(self, entryid, xml):
|
||||
return self.conn.define_from_xml(entryid, xml)
|
||||
|
||||
def info(self):
|
||||
return self.facts(facts_mode='info')
|
||||
|
||||
def facts(self, name=None, facts_mode='facts'):
|
||||
results = dict()
|
||||
if name:
|
||||
entries = [name]
|
||||
else:
|
||||
entries = self.list_nets()
|
||||
for entry in entries:
|
||||
results[entry] = dict()
|
||||
results[entry]["autostart"] = self.conn.get_autostart(entry)
|
||||
results[entry]["persistent"] = self.conn.get_persistent(entry)
|
||||
results[entry]["state"] = self.conn.get_status(entry)
|
||||
results[entry]["bridge"] = self.conn.get_bridge(entry)
|
||||
results[entry]["uuid"] = self.conn.get_uuid(entry)
|
||||
try:
|
||||
results[entry]["dhcp_leases"] = self.conn.get_dhcp_leases(entry)
|
||||
# not supported on RHEL 6
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
results[entry]["forward_mode"] = self.conn.get_forward(entry)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
results[entry]["domain"] = self.conn.get_domain(entry)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
results[entry]["macaddress"] = self.conn.get_macaddress(entry)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
facts = dict()
|
||||
if facts_mode == 'facts':
|
||||
facts["ansible_facts"] = dict()
|
||||
facts["ansible_facts"]["ansible_libvirt_networks"] = results
|
||||
elif facts_mode == 'info':
|
||||
facts['networks'] = results
|
||||
return facts
|
||||
|
||||
|
||||
def core(module):
|
||||
|
||||
state = module.params.get('state', None)
|
||||
name = module.params.get('name', None)
|
||||
command = module.params.get('command', None)
|
||||
uri = module.params.get('uri', None)
|
||||
xml = module.params.get('xml', None)
|
||||
autostart = module.params.get('autostart', None)
|
||||
|
||||
v = VirtNetwork(uri, module)
|
||||
res = {}
|
||||
|
||||
if state and command == 'list_nets':
|
||||
res = v.list_nets(state=state)
|
||||
if not isinstance(res, dict):
|
||||
res = {command: res}
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if state:
|
||||
if not name:
|
||||
module.fail_json(msg="state change requires a specified name")
|
||||
|
||||
res['changed'] = False
|
||||
if state in ['active']:
|
||||
if v.status(name) != 'active':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.start(name)
|
||||
elif state in ['present']:
|
||||
try:
|
||||
v.get_net(name)
|
||||
except EntryNotFound:
|
||||
if not xml:
|
||||
module.fail_json(msg="network '" + name + "' not present, but xml not specified")
|
||||
v.define(name, xml)
|
||||
res = {'changed': True, 'created': name}
|
||||
elif state in ['inactive']:
|
||||
entries = v.list_nets()
|
||||
if name in entries:
|
||||
if v.status(name) != 'inactive':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.destroy(name)
|
||||
elif state in ['undefined', 'absent']:
|
||||
entries = v.list_nets()
|
||||
if name in entries:
|
||||
if v.status(name) != 'inactive':
|
||||
v.destroy(name)
|
||||
res['changed'] = True
|
||||
res['msg'] = v.undefine(name)
|
||||
else:
|
||||
module.fail_json(msg="unexpected state")
|
||||
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if command:
|
||||
if command in ENTRY_COMMANDS:
|
||||
if not name:
|
||||
module.fail_json(msg="%s requires 1 argument: name" % command)
|
||||
if command in ('define', 'modify'):
|
||||
if not xml:
|
||||
module.fail_json(msg=command + " requires xml argument")
|
||||
try:
|
||||
v.get_net(name)
|
||||
except EntryNotFound:
|
||||
v.define(name, xml)
|
||||
res = {'changed': True, 'created': name}
|
||||
else:
|
||||
if command == 'modify':
|
||||
mod = v.modify(name, xml)
|
||||
res = {'changed': mod, 'modified': name}
|
||||
return VIRT_SUCCESS, res
|
||||
res = getattr(v, command)(name)
|
||||
if not isinstance(res, dict):
|
||||
res = {command: res}
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
elif hasattr(v, command):
|
||||
if command == 'facts' and name:
|
||||
res = v.facts(name)
|
||||
else:
|
||||
res = getattr(v, command)()
|
||||
if not isinstance(res, dict):
|
||||
res = {command: res}
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Command %s not recognized" % command)
|
||||
|
||||
if autostart is not None:
|
||||
if not name:
|
||||
module.fail_json(msg="state change requires a specified name")
|
||||
|
||||
res['changed'] = False
|
||||
if autostart:
|
||||
if not v.get_autostart(name):
|
||||
res['changed'] = True
|
||||
res['msg'] = v.set_autostart(name, True)
|
||||
else:
|
||||
if v.get_autostart(name):
|
||||
res['changed'] = True
|
||||
res['msg'] = v.set_autostart(name, False)
|
||||
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
module.fail_json(msg="expected state or command parameter to be specified")
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(aliases=['network']),
|
||||
state=dict(choices=['active', 'inactive', 'present', 'absent']),
|
||||
command=dict(choices=ALL_COMMANDS),
|
||||
uri=dict(default='qemu:///system'),
|
||||
xml=dict(),
|
||||
autostart=dict(type='bool')
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not HAS_VIRT:
|
||||
module.fail_json(
|
||||
msg='The `libvirt` module is not importable. Check the requirements.'
|
||||
)
|
||||
|
||||
if not HAS_XML:
|
||||
module.fail_json(
|
||||
msg='The `lxml` module is not importable. Check the requirements.'
|
||||
)
|
||||
|
||||
rc = VIRT_SUCCESS
|
||||
try:
|
||||
rc, result = core(module)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if rc != 0: # something went wrong emit the msg
|
||||
module.fail_json(rc=rc, msg=result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
711
plugins/modules/cloud/misc/virt_pool.py
Normal file
711
plugins/modules/cloud/misc/virt_pool.py
Normal file
|
|
@ -0,0 +1,711 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2015, Maciej Delmanowski <drybjed@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: virt_pool
|
||||
author: "Maciej Delmanowski (@drybjed)"
|
||||
short_description: Manage libvirt storage pools
|
||||
description:
|
||||
- Manage I(libvirt) storage pools.
|
||||
options:
|
||||
name:
|
||||
required: false
|
||||
aliases: [ "pool" ]
|
||||
description:
|
||||
- name of the storage pool being managed. Note that pool must be previously
|
||||
defined with xml.
|
||||
state:
|
||||
required: false
|
||||
choices: [ "active", "inactive", "present", "absent", "undefined", "deleted" ]
|
||||
description:
|
||||
- specify which state you want a storage pool to be in.
|
||||
If 'active', pool will be started.
|
||||
If 'present', ensure that pool is present but do not change its
|
||||
state; if it's missing, you need to specify xml argument.
|
||||
If 'inactive', pool will be stopped.
|
||||
If 'undefined' or 'absent', pool will be removed from I(libvirt) configuration.
|
||||
If 'deleted', pool contents will be deleted and then pool undefined.
|
||||
command:
|
||||
required: false
|
||||
choices: [ "define", "build", "create", "start", "stop", "destroy",
|
||||
"delete", "undefine", "get_xml", "list_pools", "facts",
|
||||
"info", "status" ]
|
||||
description:
|
||||
- in addition to state management, various non-idempotent commands are available.
|
||||
See examples.
|
||||
autostart:
|
||||
required: false
|
||||
type: bool
|
||||
description:
|
||||
- Specify if a given storage pool should be started automatically on system boot.
|
||||
uri:
|
||||
required: false
|
||||
default: "qemu:///system"
|
||||
description:
|
||||
- I(libvirt) connection uri.
|
||||
xml:
|
||||
required: false
|
||||
description:
|
||||
- XML document used with the define command.
|
||||
mode:
|
||||
required: false
|
||||
choices: [ 'new', 'repair', 'resize', 'no_overwrite', 'overwrite', 'normal', 'zeroed' ]
|
||||
description:
|
||||
- Pass additional parameters to 'build' or 'delete' commands.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "python-libvirt"
|
||||
- "python-lxml"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Define a new storage pool
|
||||
- virt_pool:
|
||||
command: define
|
||||
name: vms
|
||||
xml: '{{ lookup("template", "pool/dir.xml.j2") }}'
|
||||
|
||||
# Build a storage pool if it does not exist
|
||||
- virt_pool:
|
||||
command: build
|
||||
name: vms
|
||||
|
||||
# Start a storage pool
|
||||
- virt_pool:
|
||||
command: create
|
||||
name: vms
|
||||
|
||||
# List available pools
|
||||
- virt_pool:
|
||||
command: list_pools
|
||||
|
||||
# Get XML data of a specified pool
|
||||
- virt_pool:
|
||||
command: get_xml
|
||||
name: vms
|
||||
|
||||
# Stop a storage pool
|
||||
- virt_pool:
|
||||
command: destroy
|
||||
name: vms
|
||||
|
||||
# Delete a storage pool (destroys contents)
|
||||
- virt_pool:
|
||||
command: delete
|
||||
name: vms
|
||||
|
||||
# Undefine a storage pool
|
||||
- virt_pool:
|
||||
command: undefine
|
||||
name: vms
|
||||
|
||||
# Gather facts about storage pools
|
||||
# Facts will be available as 'ansible_libvirt_pools'
|
||||
- virt_pool:
|
||||
command: facts
|
||||
|
||||
# Gather information about pools managed by 'libvirt' remotely using uri
|
||||
- virt_pool:
|
||||
command: info
|
||||
uri: '{{ item }}'
|
||||
with_items: '{{ libvirt_uris }}'
|
||||
register: storage_pools
|
||||
|
||||
# Ensure that a pool is active (needs to be defined and built first)
|
||||
- virt_pool:
|
||||
state: active
|
||||
name: vms
|
||||
|
||||
# Ensure that a pool is inactive
|
||||
- virt_pool:
|
||||
state: inactive
|
||||
name: vms
|
||||
|
||||
# Ensure that a given pool will be started at boot
|
||||
- virt_pool:
|
||||
autostart: yes
|
||||
name: vms
|
||||
|
||||
# Disable autostart for a given pool
|
||||
- virt_pool:
|
||||
autostart: no
|
||||
name: vms
|
||||
'''
|
||||
|
||||
try:
|
||||
import libvirt
|
||||
except ImportError:
|
||||
HAS_VIRT = False
|
||||
else:
|
||||
HAS_VIRT = True
|
||||
|
||||
try:
|
||||
from lxml import etree
|
||||
except ImportError:
|
||||
HAS_XML = False
|
||||
else:
|
||||
HAS_XML = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
VIRT_FAILED = 1
|
||||
VIRT_SUCCESS = 0
|
||||
VIRT_UNAVAILABLE = 2
|
||||
|
||||
ALL_COMMANDS = []
|
||||
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete',
|
||||
'undefine', 'destroy', 'get_xml', 'define', 'refresh']
|
||||
HOST_COMMANDS = ['list_pools', 'facts', 'info']
|
||||
ALL_COMMANDS.extend(ENTRY_COMMANDS)
|
||||
ALL_COMMANDS.extend(HOST_COMMANDS)
|
||||
|
||||
ENTRY_STATE_ACTIVE_MAP = {
|
||||
0: "inactive",
|
||||
1: "active"
|
||||
}
|
||||
|
||||
ENTRY_STATE_AUTOSTART_MAP = {
|
||||
0: "no",
|
||||
1: "yes"
|
||||
}
|
||||
|
||||
ENTRY_STATE_PERSISTENT_MAP = {
|
||||
0: "no",
|
||||
1: "yes"
|
||||
}
|
||||
|
||||
ENTRY_STATE_INFO_MAP = {
|
||||
0: "inactive",
|
||||
1: "building",
|
||||
2: "running",
|
||||
3: "degraded",
|
||||
4: "inaccessible"
|
||||
}
|
||||
|
||||
ENTRY_BUILD_FLAGS_MAP = {
|
||||
"new": 0,
|
||||
"repair": 1,
|
||||
"resize": 2,
|
||||
"no_overwrite": 4,
|
||||
"overwrite": 8
|
||||
}
|
||||
|
||||
ENTRY_DELETE_FLAGS_MAP = {
|
||||
"normal": 0,
|
||||
"zeroed": 1
|
||||
}
|
||||
|
||||
ALL_MODES = []
|
||||
ALL_MODES.extend(ENTRY_BUILD_FLAGS_MAP.keys())
|
||||
ALL_MODES.extend(ENTRY_DELETE_FLAGS_MAP.keys())
|
||||
|
||||
|
||||
class EntryNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class LibvirtConnection(object):
|
||||
|
||||
def __init__(self, uri, module):
|
||||
|
||||
self.module = module
|
||||
|
||||
conn = libvirt.open(uri)
|
||||
|
||||
if not conn:
|
||||
raise Exception("hypervisor connection failure")
|
||||
|
||||
self.conn = conn
|
||||
|
||||
def find_entry(self, entryid):
|
||||
# entryid = -1 returns a list of everything
|
||||
|
||||
results = []
|
||||
|
||||
# Get active entries
|
||||
for name in self.conn.listStoragePools():
|
||||
entry = self.conn.storagePoolLookupByName(name)
|
||||
results.append(entry)
|
||||
|
||||
# Get inactive entries
|
||||
for name in self.conn.listDefinedStoragePools():
|
||||
entry = self.conn.storagePoolLookupByName(name)
|
||||
results.append(entry)
|
||||
|
||||
if entryid == -1:
|
||||
return results
|
||||
|
||||
for entry in results:
|
||||
if entry.name() == entryid:
|
||||
return entry
|
||||
|
||||
raise EntryNotFound("storage pool %s not found" % entryid)
|
||||
|
||||
def create(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).create()
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid).isActive()
|
||||
except Exception:
|
||||
return self.module.exit_json(changed=True)
|
||||
if not state:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def destroy(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).destroy()
|
||||
else:
|
||||
if self.find_entry(entryid).isActive():
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def undefine(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).undefine()
|
||||
else:
|
||||
if not self.find_entry(entryid):
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def get_status2(self, entry):
|
||||
state = entry.isActive()
|
||||
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
|
||||
|
||||
def get_status(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
state = self.find_entry(entryid).isActive()
|
||||
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid).isActive()
|
||||
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
|
||||
except Exception:
|
||||
return ENTRY_STATE_ACTIVE_MAP.get("inactive", "unknown")
|
||||
|
||||
def get_uuid(self, entryid):
|
||||
return self.find_entry(entryid).UUIDString()
|
||||
|
||||
def get_xml(self, entryid):
|
||||
return self.find_entry(entryid).XMLDesc(0)
|
||||
|
||||
def get_info(self, entryid):
|
||||
return self.find_entry(entryid).info()
|
||||
|
||||
def get_volume_count(self, entryid):
|
||||
return self.find_entry(entryid).numOfVolumes()
|
||||
|
||||
def get_volume_names(self, entryid):
|
||||
return self.find_entry(entryid).listVolumes()
|
||||
|
||||
def get_devices(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
if xml.xpath('/pool/source/device'):
|
||||
result = []
|
||||
for device in xml.xpath('/pool/source/device'):
|
||||
result.append(device.get('path'))
|
||||
try:
|
||||
return result
|
||||
except Exception:
|
||||
raise ValueError('No devices specified')
|
||||
|
||||
def get_format(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
try:
|
||||
result = xml.xpath('/pool/source/format')[0].get('type')
|
||||
except Exception:
|
||||
raise ValueError('Format not specified')
|
||||
return result
|
||||
|
||||
def get_host(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
try:
|
||||
result = xml.xpath('/pool/source/host')[0].get('name')
|
||||
except Exception:
|
||||
raise ValueError('Host not specified')
|
||||
return result
|
||||
|
||||
def get_source_path(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
try:
|
||||
result = xml.xpath('/pool/source/dir')[0].get('path')
|
||||
except Exception:
|
||||
raise ValueError('Source path not specified')
|
||||
return result
|
||||
|
||||
def get_path(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
return xml.xpath('/pool/target/path')[0].text
|
||||
|
||||
def get_type(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
return xml.get('type')
|
||||
|
||||
def build(self, entryid, flags):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).build(flags)
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid)
|
||||
except Exception:
|
||||
return self.module.exit_json(changed=True)
|
||||
if not state:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def delete(self, entryid, flags):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).delete(flags)
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid)
|
||||
except Exception:
|
||||
return self.module.exit_json(changed=True)
|
||||
if state:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def get_autostart(self, entryid):
|
||||
state = self.find_entry(entryid).autostart()
|
||||
return ENTRY_STATE_AUTOSTART_MAP.get(state, "unknown")
|
||||
|
||||
def get_autostart2(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).autostart()
|
||||
else:
|
||||
try:
|
||||
return self.find_entry(entryid).autostart()
|
||||
except Exception:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def set_autostart(self, entryid, val):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).setAutostart(val)
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid).autostart()
|
||||
except Exception:
|
||||
return self.module.exit_json(changed=True)
|
||||
if bool(state) != val:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def refresh(self, entryid):
|
||||
return self.find_entry(entryid).refresh()
|
||||
|
||||
def get_persistent(self, entryid):
|
||||
state = self.find_entry(entryid).isPersistent()
|
||||
return ENTRY_STATE_PERSISTENT_MAP.get(state, "unknown")
|
||||
|
||||
def define_from_xml(self, entryid, xml):
|
||||
if not self.module.check_mode:
|
||||
return self.conn.storagePoolDefineXML(xml)
|
||||
else:
|
||||
try:
|
||||
self.find_entry(entryid)
|
||||
except Exception:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
|
||||
class VirtStoragePool(object):
|
||||
|
||||
def __init__(self, uri, module):
|
||||
self.module = module
|
||||
self.uri = uri
|
||||
self.conn = LibvirtConnection(self.uri, self.module)
|
||||
|
||||
def get_pool(self, entryid):
|
||||
return self.conn.find_entry(entryid)
|
||||
|
||||
def list_pools(self, state=None):
|
||||
results = []
|
||||
for entry in self.conn.find_entry(-1):
|
||||
if state:
|
||||
if state == self.conn.get_status2(entry):
|
||||
results.append(entry.name())
|
||||
else:
|
||||
results.append(entry.name())
|
||||
return results
|
||||
|
||||
def state(self):
|
||||
results = []
|
||||
for entry in self.list_pools():
|
||||
state_blurb = self.conn.get_status(entry)
|
||||
results.append("%s %s" % (entry, state_blurb))
|
||||
return results
|
||||
|
||||
def autostart(self, entryid):
|
||||
return self.conn.set_autostart(entryid, True)
|
||||
|
||||
def get_autostart(self, entryid):
|
||||
return self.conn.get_autostart2(entryid)
|
||||
|
||||
def set_autostart(self, entryid, state):
|
||||
return self.conn.set_autostart(entryid, state)
|
||||
|
||||
def create(self, entryid):
|
||||
return self.conn.create(entryid)
|
||||
|
||||
def start(self, entryid):
|
||||
return self.conn.create(entryid)
|
||||
|
||||
def stop(self, entryid):
|
||||
return self.conn.destroy(entryid)
|
||||
|
||||
def destroy(self, entryid):
|
||||
return self.conn.destroy(entryid)
|
||||
|
||||
def undefine(self, entryid):
|
||||
return self.conn.undefine(entryid)
|
||||
|
||||
def status(self, entryid):
|
||||
return self.conn.get_status(entryid)
|
||||
|
||||
def get_xml(self, entryid):
|
||||
return self.conn.get_xml(entryid)
|
||||
|
||||
def define(self, entryid, xml):
|
||||
return self.conn.define_from_xml(entryid, xml)
|
||||
|
||||
def build(self, entryid, flags):
|
||||
return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags, 0))
|
||||
|
||||
def delete(self, entryid, flags):
|
||||
return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags, 0))
|
||||
|
||||
def refresh(self, entryid):
|
||||
return self.conn.refresh(entryid)
|
||||
|
||||
def info(self):
|
||||
return self.facts(facts_mode='info')
|
||||
|
||||
def facts(self, facts_mode='facts'):
|
||||
results = dict()
|
||||
for entry in self.list_pools():
|
||||
results[entry] = dict()
|
||||
if self.conn.find_entry(entry):
|
||||
data = self.conn.get_info(entry)
|
||||
# libvirt returns maxMem, memory, and cpuTime as long()'s, which
|
||||
# xmlrpclib tries to convert to regular int's during serialization.
|
||||
# This throws exceptions, so convert them to strings here and
|
||||
# assume the other end of the xmlrpc connection can figure things
|
||||
# out or doesn't care.
|
||||
results[entry] = {
|
||||
"status": ENTRY_STATE_INFO_MAP.get(data[0], "unknown"),
|
||||
"size_total": str(data[1]),
|
||||
"size_used": str(data[2]),
|
||||
"size_available": str(data[3]),
|
||||
}
|
||||
results[entry]["autostart"] = self.conn.get_autostart(entry)
|
||||
results[entry]["persistent"] = self.conn.get_persistent(entry)
|
||||
results[entry]["state"] = self.conn.get_status(entry)
|
||||
results[entry]["path"] = self.conn.get_path(entry)
|
||||
results[entry]["type"] = self.conn.get_type(entry)
|
||||
results[entry]["uuid"] = self.conn.get_uuid(entry)
|
||||
if self.conn.find_entry(entry).isActive():
|
||||
results[entry]["volume_count"] = self.conn.get_volume_count(entry)
|
||||
results[entry]["volumes"] = list()
|
||||
for volume in self.conn.get_volume_names(entry):
|
||||
results[entry]["volumes"].append(volume)
|
||||
else:
|
||||
results[entry]["volume_count"] = -1
|
||||
|
||||
try:
|
||||
results[entry]["host"] = self.conn.get_host(entry)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
results[entry]["source_path"] = self.conn.get_source_path(entry)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
results[entry]["format"] = self.conn.get_format(entry)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
devices = self.conn.get_devices(entry)
|
||||
results[entry]["devices"] = devices
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
else:
|
||||
results[entry]["state"] = self.conn.get_status(entry)
|
||||
|
||||
facts = dict()
|
||||
if facts_mode == 'facts':
|
||||
facts["ansible_facts"] = dict()
|
||||
facts["ansible_facts"]["ansible_libvirt_pools"] = results
|
||||
elif facts_mode == 'info':
|
||||
facts['pools'] = results
|
||||
return facts
|
||||
|
||||
|
||||
def core(module):
|
||||
|
||||
state = module.params.get('state', None)
|
||||
name = module.params.get('name', None)
|
||||
command = module.params.get('command', None)
|
||||
uri = module.params.get('uri', None)
|
||||
xml = module.params.get('xml', None)
|
||||
autostart = module.params.get('autostart', None)
|
||||
mode = module.params.get('mode', None)
|
||||
|
||||
v = VirtStoragePool(uri, module)
|
||||
res = {}
|
||||
|
||||
if state and command == 'list_pools':
|
||||
res = v.list_pools(state=state)
|
||||
if not isinstance(res, dict):
|
||||
res = {command: res}
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if state:
|
||||
if not name:
|
||||
module.fail_json(msg="state change requires a specified name")
|
||||
|
||||
res['changed'] = False
|
||||
if state in ['active']:
|
||||
if v.status(name) != 'active':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.start(name)
|
||||
elif state in ['present']:
|
||||
try:
|
||||
v.get_pool(name)
|
||||
except EntryNotFound:
|
||||
if not xml:
|
||||
module.fail_json(msg="storage pool '" + name + "' not present, but xml not specified")
|
||||
v.define(name, xml)
|
||||
res = {'changed': True, 'created': name}
|
||||
elif state in ['inactive']:
|
||||
entries = v.list_pools()
|
||||
if name in entries:
|
||||
if v.status(name) != 'inactive':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.destroy(name)
|
||||
elif state in ['undefined', 'absent']:
|
||||
entries = v.list_pools()
|
||||
if name in entries:
|
||||
if v.status(name) != 'inactive':
|
||||
v.destroy(name)
|
||||
res['changed'] = True
|
||||
res['msg'] = v.undefine(name)
|
||||
elif state in ['deleted']:
|
||||
entries = v.list_pools()
|
||||
if name in entries:
|
||||
if v.status(name) != 'inactive':
|
||||
v.destroy(name)
|
||||
v.delete(name, mode)
|
||||
res['changed'] = True
|
||||
res['msg'] = v.undefine(name)
|
||||
else:
|
||||
module.fail_json(msg="unexpected state")
|
||||
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if command:
|
||||
if command in ENTRY_COMMANDS:
|
||||
if not name:
|
||||
module.fail_json(msg="%s requires 1 argument: name" % command)
|
||||
if command == 'define':
|
||||
if not xml:
|
||||
module.fail_json(msg="define requires xml argument")
|
||||
try:
|
||||
v.get_pool(name)
|
||||
except EntryNotFound:
|
||||
v.define(name, xml)
|
||||
res = {'changed': True, 'created': name}
|
||||
return VIRT_SUCCESS, res
|
||||
elif command == 'build':
|
||||
res = v.build(name, mode)
|
||||
if not isinstance(res, dict):
|
||||
res = {'changed': True, command: res}
|
||||
return VIRT_SUCCESS, res
|
||||
elif command == 'delete':
|
||||
res = v.delete(name, mode)
|
||||
if not isinstance(res, dict):
|
||||
res = {'changed': True, command: res}
|
||||
return VIRT_SUCCESS, res
|
||||
res = getattr(v, command)(name)
|
||||
if not isinstance(res, dict):
|
||||
res = {command: res}
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
elif hasattr(v, command):
|
||||
res = getattr(v, command)()
|
||||
if not isinstance(res, dict):
|
||||
res = {command: res}
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Command %s not recognized" % command)
|
||||
|
||||
if autostart is not None:
|
||||
if not name:
|
||||
module.fail_json(msg="state change requires a specified name")
|
||||
|
||||
res['changed'] = False
|
||||
if autostart:
|
||||
if not v.get_autostart(name):
|
||||
res['changed'] = True
|
||||
res['msg'] = v.set_autostart(name, True)
|
||||
else:
|
||||
if v.get_autostart(name):
|
||||
res['changed'] = True
|
||||
res['msg'] = v.set_autostart(name, False)
|
||||
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
module.fail_json(msg="expected state or command parameter to be specified")
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(aliases=['pool']),
|
||||
state=dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']),
|
||||
command=dict(choices=ALL_COMMANDS),
|
||||
uri=dict(default='qemu:///system'),
|
||||
xml=dict(),
|
||||
autostart=dict(type='bool'),
|
||||
mode=dict(choices=ALL_MODES),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not HAS_VIRT:
|
||||
module.fail_json(
|
||||
msg='The `libvirt` module is not importable. Check the requirements.'
|
||||
)
|
||||
|
||||
if not HAS_XML:
|
||||
module.fail_json(
|
||||
msg='The `lxml` module is not importable. Check the requirements.'
|
||||
)
|
||||
|
||||
rc = VIRT_SUCCESS
|
||||
try:
|
||||
rc, result = core(module)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if rc != 0: # something went wrong emit the msg
|
||||
module.fail_json(rc=rc, msg=result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
207
plugins/modules/cloud/misc/xenserver_facts.py
Normal file
207
plugins/modules/cloud/misc/xenserver_facts.py
Normal file
|
|
@ -0,0 +1,207 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: xenserver_facts
|
||||
short_description: get facts reported on xenserver
|
||||
description:
|
||||
- Reads data out of XenAPI, can be used instead of multiple xe commands.
|
||||
author:
|
||||
- Andy Hill (@andyhky)
|
||||
- Tim Rupp (@caphrim007)
|
||||
- Robin Lee (@cheese)
|
||||
options: {}
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather facts from xenserver
|
||||
xenserver_facts:
|
||||
|
||||
- name: Print running VMs
|
||||
debug:
|
||||
msg: "{{ item }}"
|
||||
with_items: "{{ xs_vms.keys() }}"
|
||||
when: xs_vms[item]['power_state'] == "Running"
|
||||
|
||||
# Which will print:
|
||||
#
|
||||
# TASK: [Print running VMs] ***********************************************************
|
||||
# skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit))
|
||||
# ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => {
|
||||
# "item": "Control domain on host: 10.0.13.22",
|
||||
# "msg": "Control domain on host: 10.0.13.22"
|
||||
# }
|
||||
'''
|
||||
|
||||
|
||||
HAVE_XENAPI = False
|
||||
try:
|
||||
import XenAPI
|
||||
HAVE_XENAPI = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils import distro
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class XenServerFacts:
|
||||
def __init__(self):
|
||||
self.codes = {
|
||||
'5.5.0': 'george',
|
||||
'5.6.100': 'oxford',
|
||||
'6.0.0': 'boston',
|
||||
'6.1.0': 'tampa',
|
||||
'6.2.0': 'clearwater'
|
||||
}
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
result = distro.linux_distribution()[1]
|
||||
return result
|
||||
|
||||
@property
|
||||
def codename(self):
|
||||
if self.version in self.codes:
|
||||
result = self.codes[self.version]
|
||||
else:
|
||||
result = None
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_xenapi_session():
|
||||
session = XenAPI.xapi_local()
|
||||
session.xenapi.login_with_password('', '')
|
||||
return session
|
||||
|
||||
|
||||
def get_networks(session):
|
||||
recs = session.xenapi.network.get_all_records()
|
||||
networks = change_keys(recs, key='name_label')
|
||||
return networks
|
||||
|
||||
|
||||
def get_pifs(session):
|
||||
recs = session.xenapi.PIF.get_all_records()
|
||||
pifs = change_keys(recs, key='uuid')
|
||||
xs_pifs = {}
|
||||
devicenums = range(0, 7)
|
||||
for pif in pifs.values():
|
||||
for eth in devicenums:
|
||||
interface_name = "eth%s" % (eth)
|
||||
bond_name = interface_name.replace('eth', 'bond')
|
||||
if pif['device'] == interface_name:
|
||||
xs_pifs[interface_name] = pif
|
||||
elif pif['device'] == bond_name:
|
||||
xs_pifs[bond_name] = pif
|
||||
return xs_pifs
|
||||
|
||||
|
||||
def get_vlans(session):
|
||||
recs = session.xenapi.VLAN.get_all_records()
|
||||
return change_keys(recs, key='tag')
|
||||
|
||||
|
||||
def change_keys(recs, key='uuid', filter_func=None):
|
||||
"""
|
||||
Take a xapi dict, and make the keys the value of recs[ref][key].
|
||||
|
||||
Preserves the ref in rec['ref']
|
||||
|
||||
"""
|
||||
new_recs = {}
|
||||
|
||||
for ref, rec in recs.items():
|
||||
if filter_func is not None and not filter_func(rec):
|
||||
continue
|
||||
|
||||
for param_name, param_value in rec.items():
|
||||
# param_value may be of type xmlrpc.client.DateTime,
|
||||
# which is not simply convertable to str.
|
||||
# Use 'value' attr to get the str value,
|
||||
# following an example in xmlrpc.client.DateTime document
|
||||
if hasattr(param_value, "value"):
|
||||
rec[param_name] = param_value.value
|
||||
new_recs[rec[key]] = rec
|
||||
new_recs[rec[key]]['ref'] = ref
|
||||
|
||||
return new_recs
|
||||
|
||||
|
||||
def get_host(session):
|
||||
"""Get the host"""
|
||||
host_recs = session.xenapi.host.get_all()
|
||||
# We only have one host, so just return its entry
|
||||
return session.xenapi.host.get_record(host_recs[0])
|
||||
|
||||
|
||||
def get_vms(session):
|
||||
recs = session.xenapi.VM.get_all_records()
|
||||
if not recs:
|
||||
return None
|
||||
vms = change_keys(recs, key='name_label')
|
||||
return vms
|
||||
|
||||
|
||||
def get_srs(session):
|
||||
recs = session.xenapi.SR.get_all_records()
|
||||
if not recs:
|
||||
return None
|
||||
srs = change_keys(recs, key='name_label')
|
||||
return srs
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule({})
|
||||
|
||||
if not HAVE_XENAPI:
|
||||
module.fail_json(changed=False, msg="python xen api required for this module")
|
||||
|
||||
obj = XenServerFacts()
|
||||
try:
|
||||
session = get_xenapi_session()
|
||||
except XenAPI.Failure as e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
|
||||
data = {
|
||||
'xenserver_version': obj.version,
|
||||
'xenserver_codename': obj.codename
|
||||
}
|
||||
|
||||
xs_networks = get_networks(session)
|
||||
xs_pifs = get_pifs(session)
|
||||
xs_vlans = get_vlans(session)
|
||||
xs_vms = get_vms(session)
|
||||
xs_srs = get_srs(session)
|
||||
|
||||
if xs_vlans:
|
||||
data['xs_vlans'] = xs_vlans
|
||||
if xs_pifs:
|
||||
data['xs_pifs'] = xs_pifs
|
||||
if xs_networks:
|
||||
data['xs_networks'] = xs_networks
|
||||
|
||||
if xs_vms:
|
||||
data['xs_vms'] = xs_vms
|
||||
|
||||
if xs_srs:
|
||||
data['xs_srs'] = xs_srs
|
||||
|
||||
module.exit_json(ansible_facts=data)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Loading…
Add table
Add a link
Reference in a new issue