commit 856246285cb083534e2570a80f42de45538c0380
Author: Nicolas Chauvet <kwizart(a)gmail.com>
Date: Thu Apr 4 18:08:03 2019 +0200
Add few ansible modules
library/_nova_compute.py | 604 ++++++++++++++++++++++++++++++++++++++++++++++
library/virt_boot | 299 +++++++++++++++++++++++
2 files changed, 903 insertions(+), 0 deletions(-)
---
diff --git a/library/_nova_compute.py b/library/_nova_compute.py
new file mode 100644
index 0000000..ea51d67
--- /dev/null
+++ b/library/_nova_compute.py
@@ -0,0 +1,604 @@
+#!/usr/bin/python
+#coding: utf-8 -*-
+
+# (c) 2013, Benno Joy <benno(a)ansible.com>
+# (c) 2013, John Dewey <john(a)dewey.ws>
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <
http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'metadata_version': '1.0',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: nova_compute
+version_added: "1.2"
+author: "Benno Joy (@bennojoy)"
+deprecated: Deprecated in 2.0. Use M(os_server) instead.
+short_description: Create/Delete VMs from OpenStack
+description:
+ - Create or Remove virtual machines from Openstack.
+options:
+ login_username:
+ description:
+ - login username to authenticate to keystone
+ required: true
+ default: admin
+ login_password:
+ description:
+ - Password of login user
+ required: true
+ default: 'yes'
+ login_tenant_name:
+ description:
+ - The tenant name of the login user
+ required: true
+ default: 'yes'
+ auth_url:
+ description:
+ - The keystone url for authentication
+ required: false
+ default:
http://127.0.0.1:35357/v2.0/
+ region_name:
+ description:
+ - Name of the region
+ required: false
+ default: None
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+ name:
+ description:
+ - Name that has to be given to the instance
+ required: true
+ default: None
+ image_id:
+ description:
+ - The id of the base image to boot. Mutually exclusive with image_name
+ required: true
+ default: None
+ image_name:
+ description:
+ - The name of the base image to boot. Mutually exclusive with image_id
+ required: true
+ default: None
+ version_added: "1.8"
+ image_exclude:
+ description:
+ - Text to use to filter image names, for the case, such as HP, where there are
multiple image names matching the common identifying portions. image_exclude is a negative
match filter - it is text that may not exist in the image name. Defaults to
"(deprecated)"
+ version_added: "1.8"
+ flavor_id:
+ description:
+ - The id of the flavor in which the new VM has to be created. Mutually exclusive
with flavor_ram
+ required: false
+ default: 1
+ flavor_ram:
+ description:
+ - The minimum amount of ram in MB that the flavor in which the new VM has to be
created must have. Mutually exclusive with flavor_id
+ required: false
+ default: 1
+ version_added: "1.8"
+ flavor_include:
+ description:
+ - Text to use to filter flavor names, for the case, such as Rackspace, where
there are multiple flavors that have the same ram count. flavor_include is a positive
match filter - it must exist in the flavor name.
+ version_added: "1.8"
+ key_name:
+ description:
+ - The key pair name to be used when creating a VM
+ required: false
+ default: None
+ security_groups:
+ description:
+ - The name of the security group to which the VM should be added
+ required: false
+ default: None
+ nics:
+ description:
+ - A list of network id's to which the VM's interface should be attached
+ required: false
+ default: None
+ auto_floating_ip:
+ description:
+ - Should a floating ip be auto created and assigned
+ required: false
+ default: 'no'
+ version_added: "1.8"
+ floating_ips:
+ description:
+ - list of valid floating IPs that pre-exist to assign to this node
+ required: false
+ default: None
+ version_added: "1.8"
+ floating_ip_pools:
+ description:
+ - list of floating IP pools from which to choose a floating IP
+ required: false
+ default: None
+ version_added: "1.8"
+ availability_zone:
+ description:
+ - Name of the availability zone
+ required: false
+ default: None
+ version_added: "1.8"
+ meta:
+ description:
+ - A list of key value pairs that should be provided as a metadata to the new VM
+ required: false
+ default: None
+ wait:
+ description:
+ - If the module should wait for the VM to be created.
+ required: false
+ default: 'yes'
+ wait_for:
+ description:
+ - The amount of time the module should wait for the VM to get into active state
+ required: false
+ default: 180
+ config_drive:
+ description:
+ - Whether to boot the server with config drive enabled
+ required: false
+ default: 'no'
+ version_added: "1.8"
+ user_data:
+ description:
+ - Opaque blob of data which is made available to the instance
+ required: false
+ default: None
+ version_added: "1.6"
+ scheduler_hints:
+ description:
+ - Arbitrary key/value pairs to the scheduler for custom use
+ required: false
+ default: None
+ version_added: "1.9"
+requirements:
+ - "python >= 2.6"
+ - "python-novaclient"
+'''
+
+EXAMPLES = '''
+# Creates a new VM and attaches to a network and passes metadata to the instance
+- nova_compute:
+ state: present
+ login_username: admin
+ login_password: admin
+ login_tenant_name: admin
+ name: vm1
+ image_id: 4f905f38-e52a-43d2-b6ec-754a13ffb529
+ key_name: ansible_key
+ wait_for: 200
+ flavor_id: 4
+ nics:
+ - net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
+ meta:
+ hostname: test1
+ group: uge_master
+
+# Creates a new VM in HP Cloud AE1 region availability zone az2 and automatically assigns
a floating IP
+- name: launch a nova instance
+ hosts: localhost
+ tasks:
+ - name: launch an instance
+ nova_compute:
+ state: present
+ login_username: username
+ login_password: Equality7-2521
+ login_tenant_name: username-project1
+ name: vm1
+ auth_url:
https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
+ region_name: region-b.geo-1
+ availability_zone: az2
+ image_id: 9302692b-b787-4b52-a3a6-daebb79cb498
+ key_name: test
+ wait_for: 200
+ flavor_id: 101
+ security_groups: default
+ auto_floating_ip: yes
+
+# Creates a new VM in HP Cloud AE1 region availability zone az2 and assigns a pre-known
floating IP
+- name: launch a nova instance
+ hosts: localhost
+ tasks:
+ - name: launch an instance
+ nova_compute:
+ state: present
+ login_username: username
+ login_password: Equality7-2521
+ login_tenant_name: username-project1
+ name: vm1
+ auth_url:
https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
+ region_name: region-b.geo-1
+ availability_zone: az2
+ image_id: 9302692b-b787-4b52-a3a6-daebb79cb498
+ key_name: test
+ wait_for: 200
+ flavor_id: 101
+ floating_ips:
+ - 12.34.56.79
+
+# Creates a new VM with 4G of RAM on Ubuntu Trusty, ignoring deprecated images
+- name: launch a nova instance
+ hosts: localhost
+ tasks:
+ - name: launch an instance
+ nova_compute:
+ name: vm1
+ state: present
+ login_username: username
+ login_password: Equality7-2521
+ login_tenant_name: username-project1
+ auth_url:
https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
+ region_name: region-b.geo-1
+ image_name: Ubuntu Server 14.04
+ image_exclude: deprecated
+ flavor_ram: 4096
+
+# Creates a new VM with 4G of RAM on Ubuntu Trusty on a Rackspace Performance node in
DFW
+- name: launch a nova instance
+ hosts: localhost
+ tasks:
+ - name: launch an instance
+ nova_compute:
+ name: vm1
+ state: present
+ login_username: username
+ login_password: Equality7-2521
+ login_tenant_name: username-project1
+ auth_url:
https://identity.api.rackspacecloud.com/v2.0/
+ region_name: DFW
+ image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
+ flavor_ram: 4096
+ flavor_include: Performance
+'''
+
+import operator
+import os
+import time
+
+try:
+ from novaclient.v1_1 import client as nova_client
+ from novaclient.v1_1 import floating_ips
+ from novaclient import exceptions
+ from novaclient import utils
+ HAS_NOVACLIENT = True
+except ImportError:
+ HAS_NOVACLIENT = False
+
+
+def _delete_server(module, nova):
+ name = None
+ server_list = None
+ try:
+ server_list = nova.servers.list(True, {'name':
module.params['name']})
+ if server_list:
+ server = [x for x in server_list if x.name == module.params['name']]
+ nova.servers.delete(server.pop())
+ except Exception as e:
+ module.fail_json( msg = "Error in deleting vm: %s" % e.message)
+ if module.params['wait'] == 'no':
+ module.exit_json(changed = True, result = "deleted")
+ expire = time.time() + int(module.params['wait_for'])
+ while time.time() < expire:
+ name = nova.servers.list(True, {'name': module.params['name']})
+ if not name:
+ module.exit_json(changed = True, result = "deleted")
+ time.sleep(5)
+ module.fail_json(msg = "Timed out waiting for server to get deleted, please
check manually")
+
+
+def _add_floating_ip_from_pool(module, nova, server):
+
+ # instantiate FloatingIPManager object
+ floating_ip_obj = floating_ips.FloatingIPManager(nova)
+
+ # empty dict and list
+ usable_floating_ips = {}
+ pools = []
+
+ # user specified
+ pools = module.params['floating_ip_pools']
+
+ # get the list of all floating IPs. Mileage may
+ # vary according to Nova Compute configuration
+ # per cloud provider
+ all_floating_ips = floating_ip_obj.list()
+
+ # iterate through all pools of IP address. Empty
+ # string means all and is the default value
+ for pool in pools:
+ # temporary list per pool
+ pool_ips = []
+ # loop through all floating IPs
+ for f_ip in all_floating_ips:
+ # if not reserved and the correct pool, add
+ if f_ip.fixed_ip is None and (f_ip.pool == pool):
+ pool_ips.append(f_ip.ip)
+ # only need one
+ break
+
+ # if the list is empty, add for this pool
+ if not pool_ips:
+ try:
+ new_ip = nova.floating_ips.create(pool)
+ except Exception as e:
+ module.fail_json(msg = "Unable to create floating ip: %s" %
(e.message))
+ pool_ips.append(new_ip.ip)
+ # Add to the main list
+ usable_floating_ips[pool] = pool_ips
+
+ # finally, add ip(s) to instance for each pool
+ for pool in usable_floating_ips:
+ for ip in usable_floating_ips[pool]:
+ try:
+ server.add_floating_ip(ip)
+ # We only need to assign one ip - but there is an inherent
+ # race condition and some other cloud operation may have
+ # stolen an available floating ip
+ break
+ except Exception as e:
+ module.fail_json(msg = "Error attaching IP %s to instance %s: %s
" % (ip, server.id, e.message))
+
+
+def _add_floating_ip_list(module, server, ips):
+ # add ip(s) to instance
+ for ip in ips:
+ try:
+ server.add_floating_ip(ip)
+ except Exception as e:
+ module.fail_json(msg = "Error attaching IP %s to instance %s: %s "
% (ip, server.id, e.message))
+
+
+def _add_auto_floating_ip(module, nova, server):
+
+ try:
+ new_ip = nova.floating_ips.create()
+ except Exception as e:
+ module.fail_json(msg = "Unable to create floating ip: %s" % (e))
+
+ try:
+ server.add_floating_ip(new_ip)
+ except Exception as e:
+ # Clean up - we auto-created this ip, and it's not attached
+ # to the server, so the cloud will not know what to do with it
+ server.floating_ips.delete(new_ip)
+ module.fail_json(msg = "Error attaching IP %s to instance %s: %s " %
(ip, server.id, e.message))
+
+
+def _add_floating_ip(module, nova, server):
+
+ if module.params['floating_ip_pools']:
+ _add_floating_ip_from_pool(module, nova, server)
+ elif module.params['floating_ips']:
+ _add_floating_ip_list(module, server, module.params['floating_ips'])
+ elif module.params['auto_floating_ip']:
+ _add_auto_floating_ip(module, nova, server)
+ else:
+ return server
+
+ # this may look redundant, but if there is now a
+ # floating IP, then it needs to be obtained from
+ # a recent server object if the above code path exec'd
+ try:
+ server = nova.servers.get(server.id)
+ except Exception as e:
+ module.fail_json(msg = "Error in getting info from instance: %s " %
e.message)
+ return server
+
+
+def _get_image_id(module, nova):
+ if module.params['image_name']:
+ for image in nova.images.list():
+ if (module.params['image_name'] in image.name and (
+ not module.params['image_exclude']
+ or module.params['image_exclude'] not in image.name)):
+ return image.id
+ module.fail_json(msg = "Error finding image id from name(%s)" %
module.params['image_name'])
+ return module.params['image_id']
+
+
+def _get_flavor_id(module, nova):
+ if module.params['flavor_ram']:
+ for flavor in sorted(nova.flavors.list(),
key=operator.attrgetter('ram')):
+ if (flavor.ram >= module.params['flavor_ram'] and
+ (not module.params['flavor_include'] or
module.params['flavor_include'] in flavor.name)):
+ return flavor.id
+ module.fail_json(msg = "Error finding flavor with %sMB of RAM" %
module.params['flavor_ram'])
+ return module.params['flavor_id']
+
+
+def _create_server(module, nova):
+ image_id = _get_image_id(module, nova)
+ flavor_id = _get_flavor_id(module, nova)
+ bootargs = [module.params['name'], image_id, flavor_id]
+ bootkwargs = {
+ 'nics' : module.params['nics'],
+ 'meta' : module.params['meta'],
+ 'security_groups':
module.params['security_groups'].split(','),
+ #userdata is unhyphenated in novaclient, but hyphenated here for consistency with
the ec2 module:
+ 'userdata': module.params['user_data'],
+ 'config_drive': module.params['config_drive'],
+ }
+
+ for optional_param in ('region_name', 'key_name',
'availability_zone', 'scheduler_hints'):
+ if module.params[optional_param]:
+ bootkwargs[optional_param] = module.params[optional_param]
+ try:
+ server = nova.servers.create(*bootargs, **bootkwargs)
+ server = nova.servers.get(server.id)
+ except Exception as e:
+ module.fail_json( msg = "Error in creating instance: %s " % e.message)
+ if module.params['wait'] == 'yes':
+ expire = time.time() + int(module.params['wait_for'])
+ while time.time() < expire:
+ try:
+ server = nova.servers.get(server.id)
+ except Exception as e:
+ module.fail_json( msg = "Error in getting info from instance:
%s" % e.message)
+ if server.status == 'ACTIVE':
+ server = _add_floating_ip(module, nova, server)
+
+ private = openstack_find_nova_addresses(getattr(server,
'addresses'), 'fixed', 'private')
+ public = openstack_find_nova_addresses(getattr(server,
'addresses'), 'floating', 'public')
+
+ # now exit with info
+ module.exit_json(changed = True, id = server.id,
private_ip=''.join(private), public_ip=''.join(public), status =
server.status, info = server._info)
+
+ if server.status == 'ERROR':
+ module.fail_json(msg = "Error in creating the server, please check
logs")
+ time.sleep(2)
+
+ module.fail_json(msg = "Timeout waiting for the server to come up.. Please
check manually")
+ if server.status == 'ERROR':
+ module.fail_json(msg = "Error in creating the server.. Please check
manually")
+ private = openstack_find_nova_addresses(getattr(server, 'addresses'),
'fixed', 'private')
+ public = openstack_find_nova_addresses(getattr(server, 'addresses'),
'floating', 'public')
+
+ module.exit_json(changed = True, id = info['id'],
private_ip=''.join(private), public_ip=''.join(public), status =
server.status, info = server._info)
+
+
+def _delete_floating_ip_list(module, nova, server, extra_ips):
+ for ip in extra_ips:
+ nova.servers.remove_floating_ip(server=server.id, address=ip)
+
+
+def _check_floating_ips(module, nova, server):
+ changed = False
+ if module.params['floating_ip_pools'] or
module.params['floating_ips'] or module.params['auto_floating_ip']:
+ ips = openstack_find_nova_addresses(server.addresses, 'floating')
+ if not ips:
+ # If we're configured to have a floating but we don't have one,
+ # let's add one
+ server = _add_floating_ip(module, nova, server)
+ changed = True
+ elif module.params['floating_ips']:
+ # we were configured to have specific ips, let's make sure we have
+ # those
+ missing_ips = []
+ for ip in module.params['floating_ips']:
+ if ip not in ips:
+ missing_ips.append(ip)
+ if missing_ips:
+ server = _add_floating_ip_list(module, server, missing_ips)
+ changed = True
+ extra_ips = []
+ for ip in ips:
+ if ip not in module.params['floating_ips']:
+ extra_ips.append(ip)
+ if extra_ips:
+ _delete_floating_ip_list(module, server, extra_ips)
+ changed = True
+ return (changed, server)
+
+
+def _get_server_state(module, nova):
+ server = None
+ try:
+ servers = nova.servers.list(True, {'name':
module.params['name']})
+ if servers:
+ # the {'name': module.params['name']} will also return
servers
+ # with names that partially match the server name, so we have to
+ # strictly filter here
+ servers = [x for x in servers if x.name == module.params['name']]
+ if servers:
+ server = servers[0]
+ except Exception as e:
+ module.fail_json(msg = "Error in getting the server list: %s" %
e.message)
+ if server and module.params['state'] == 'present':
+ if server.status != 'ACTIVE':
+ module.fail_json( msg="The VM is available but not Active. state:"
+ server.status)
+ (ip_changed, server) = _check_floating_ips(module, nova, server)
+ private = openstack_find_nova_addresses(getattr(server, 'addresses'),
'fixed', 'private')
+ public = openstack_find_nova_addresses(getattr(server, 'addresses'),
'floating', 'public')
+ module.exit_json(changed = ip_changed, id = server.id, public_ip = public,
private_ip = private, info = server._info)
+ if server and module.params['state'] == 'absent':
+ return True
+ if module.params['state'] == 'absent':
+ module.exit_json(changed = False, result = "not present")
+ return True
+
+
+
+def main():
+ argument_spec = openstack_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ image_id = dict(default=None),
+ image_name = dict(default=None),
+ image_exclude = dict(default='(deprecated)'),
+ flavor_id = dict(default=1),
+ flavor_ram = dict(default=None, type='int'),
+ flavor_include = dict(default=None),
+ key_name = dict(default=None),
+ security_groups = dict(default='default'),
+ nics = dict(default=None, type='list'),
+ meta = dict(default=None, type='dict'),
+ wait = dict(default='yes',
choices=['yes', 'no']),
+ wait_for = dict(default=180),
+ state = dict(default='present',
choices=['absent', 'present']),
+ user_data = dict(default=None),
+ config_drive = dict(default=False, type='bool'),
+ auto_floating_ip = dict(default=False, type='bool'),
+ floating_ips = dict(default=None, type='list'),
+ floating_ip_pools = dict(default=None, type='list'),
+ scheduler_hints = dict(default=None, type='dict'),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['auto_floating_ip','floating_ips'],
+ ['auto_floating_ip','floating_ip_pools'],
+ ['floating_ips','floating_ip_pools'],
+ ['image_id','image_name'],
+ ['flavor_id','flavor_ram'],
+ ],
+ )
+
+ if not HAS_NOVACLIENT:
+ module.fail_json(msg='python-novaclient is required for this module')
+
+ nova = nova_client.Client(module.params['login_username'],
+ module.params['login_password'],
+ module.params['login_tenant_name'],
+ module.params['auth_url'],
+ region_name=module.params['region_name'],
+ service_type='compute')
+ try:
+ nova.authenticate()
+ except exceptions.Unauthorized as e:
+ module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" %
e.message)
+ except exceptions.AuthorizationFailure as e:
+ module.fail_json(msg = "Unable to authorize user: %s" % e.message)
+
+ if module.params['state'] == 'present':
+ if not module.params['image_id'] and not
module.params['image_name']:
+ module.fail_json( msg = "Parameter 'image_id' or `image_name` is
required if state == 'present'")
+ else:
+ _get_server_state(module, nova)
+ _create_server(module, nova)
+ if module.params['state'] == 'absent':
+ _get_server_state(module, nova)
+ _delete_server(module, nova)
+
+# this is magic, see lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+if __name__ == '__main__':
+ main()
diff --git a/library/virt_boot b/library/virt_boot
new file mode 100755
index 0000000..6591e56
--- /dev/null
+++ b/library/virt_boot
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Jeroen Hoekx <jeroen(a)hoekx.be>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <
http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+author: Jeroen Hoekx
+module: virt_boot
+short_description: Define libvirt boot parameters
+description:
+ - "This module configures the boot order or boot media of a libvirt virtual
+ machine. A guest can be configured to boot from network, hard disk, floppy,
+ cdrom or a direct kernel boot. Specific media can be attached for cdrom,
+ floppy and direct kernel boot."
+ - This module requires the libvirt module.
+version_added: "0.8"
+options:
+ domain:
+ description:
+ - The name of the libvirt domain.
+ required: true
+ boot:
+ description:
+ - "Specify the boot order of the virtual machine. This is a comma-separated
+ list of: I(fd), I(hd), I(cdrom) and I(network)."
+ required: false
+ bootmenu:
+ choices: [ "yes", "no" ]
+ description:
+ - Enable or disable the boot menu.
+ required: false
+ kernel:
+ description:
+ - The path of the kernel to boot.
+ required: false
+ initrd:
+ description:
+ - The path of the initrd to boot.
+ required: false
+ cmdline:
+ description:
+ - The command line to boot the kernel with.
+ required: false
+ device:
+ default: hdc
+ description:
+ - The libvirt device name of the cdrom/floppy.
+ required: false
+ image:
+ description:
+ - The image to connect to the cdrom/floppy device.
+ required: false
+ start:
+ choices: [ "yes", "no" ]
+ default: yes
+ description:
+ - Start the guest after configuration.
+ required: false
+examples:
+ - description: Boot from a cdrom image.
+ code: virt_boot domain=archrear image=/srv/rear/archrear/rear-archrear.iso
boot=cdrom
+ - description: Boot from the local disk.
+ code: virt_boot domain=archrear boot=hd
+ - description: Boot a specific kernel with a special command line.
+ code: virt_boot domain=archrear kernel={{ storage }}/kernel-archrear initrd={{
storage }}/initramfs-archrear.img cmdline="root=/dev/ram0 vga=normal rw"
+ - description: Boot from the harddisk and if that fails from the network.
+ code: virt_boot domain=archrear boot=hd,network
+ - description: Enable the boot menu.
+ code: virt_boot domain=archrear bootmenu=yes
+requirements: [ "libvirt" ]
+notes:
+ - Run this on the libvirt host.
+ - I(kernel) and I(boot) are mutually exclusive.
+ - This module does not change a running system. A shutdown/restart is required.
+'''
+
+import sys
+
+try:
+ import xml.etree.ElementTree as ET
+ from xml.etree.ElementTree import SubElement
+except ImportError:
+ try:
+ import elementtree.ElementTree as ET
+ from elementtree.ElementTree import SubElement
+ except ImportError:
+ print "failed=True msg='ElementTree python module
unavailable'"
+
+try:
+ import libvirt
+except ImportError:
+ print "failed=True msg='libvirt python module unavailable'"
+ sys.exit(1)
+
+from ansible.module_utils.basic import AnsibleModule
+
+def get_disk(doc, device):
+ for disk in doc.findall('.//disk'):
+ target = disk.find('target')
+ if target is not None:
+ if target.get('dev','') == device:
+ return disk
+
+def attach_disk(domain, doc, device, image):
+ disk = get_disk(doc, device)
+ if disk is not None:
+ source = disk.find('source')
+ if source is not None and source.get('file') == image:
+ return False
+
+ xml = '''<disk type="file" device="cdrom">
+ <driver name="qemu" type="raw"/>
+ <source file="{path}"/>
+ <target bus="ide" dev="{dev}"/>
+ </disk>'''.format(path=image, dev=device)
+ domain.updateDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_CONFIG)
+ return True
+
+def detach_disk(domain, doc, device):
+ disk = get_disk(doc, device)
+ if disk is not None:
+ source = disk.find('source')
+ if source is not None and 'file' in source.attrib:
+ del source.attrib['file']
+ domain.updateDeviceFlags(ET.tostring(disk),
libvirt.VIR_DOMAIN_AFFECT_CONFIG)
+ return True
+ return False
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ domain=dict(required=True, aliases=['guest']),
+ boot=dict(),
+ bootmenu=dict(type='bool'),
+ kernel=dict(),
+ initrd=dict(),
+ cmdline=dict(),
+ device=dict(default='hdc'),
+ image=dict(),
+ start=dict(type='bool', default='yes'),
+ ),
+ required_one_of =
[['boot','kernel','image','bootmenu']],
+ mutually_exclusive = [['boot','kernel']]
+ )
+
+ params = module.params
+
+ domain_name = params['domain']
+
+ bootmenu = module.boolean(params['bootmenu'])
+
+ boot = params['boot']
+ kernel = params['kernel']
+ initrd = params['initrd']
+ cmdline = params['cmdline']
+
+ device = params['device']
+ image = params['image']
+
+ start = module.boolean(params['start'])
+
+ changed = False
+
+ conn = libvirt.open("qemu:///system")
+ domain = conn.lookupByName(domain_name)
+
+ doc = ET.fromstring( domain.XMLDesc(libvirt.VIR_DOMAIN_XML_INACTIVE) )
+
+ ### Connect image
+ if image:
+ changed = changed or attach_disk(domain, doc, device, image)
+ if not boot and not kernel:
+ module.exit_json(changed=changed, image=image, device=device)
+ else:
+ changed = changed or detach_disk(domain, doc, device)
+
+ if changed:
+ doc = ET.fromstring( domain.XMLDesc(libvirt.VIR_DOMAIN_XML_INACTIVE) )
+
+ ### Boot ordering
+ os = doc.find('os')
+ boot_list = os.findall('boot')
+ kernel_el = os.find('kernel')
+ initrd_el = os.find('initrd')
+ cmdline_el = os.find('cmdline')
+
+ ### traditional boot
+ if boot:
+ if kernel_el is not None:
+ changed = True
+ os.remove(kernel_el)
+ if initrd_el is not None:
+ changed = True
+ os.remove(initrd_el)
+ if cmdline_el is not None:
+ changed = True
+ os.remove(cmdline_el)
+
+ items = boot.split(',')
+ if boot_list:
+ needs_change = False
+ if len(items) == len(boot_list):
+ for (boot_el, dev) in zip(boot_list, items):
+ if boot_el.get('dev') != dev:
+ needs_change = True
+ else:
+ needs_change = True
+
+ if needs_change:
+ changed = True
+ for boot_el in boot_list:
+ os.remove(boot_el)
+ for item in items:
+ boot_el = SubElement(os, 'boot')
+ boot_el.set('dev', item)
+ else:
+ changed = True
+ for item in items:
+ boot_el = SubElement(os, 'boot')
+ boot_el.set('dev', item)
+ ### direct kernel boot
+ elif kernel:
+ if boot_list:
+ ### libvirt alwas adds boot=hd using direct kernel boot
+ if not (len(boot_list)==1 and
boot_list[0].get('dev')=='hd'):
+ changed = True
+ for boot_el in boot_list:
+ os.remove(boot_el)
+
+ if kernel_el is not None:
+ if kernel_el.text != kernel:
+ changed = True
+ kernel_el.text = kernel
+ else:
+ changed = True
+ kernel_el = SubElement(os, 'kernel')
+ kernel_el.text = kernel
+
+ if initrd_el is not None:
+ if initrd_el.text != initrd:
+ changed = True
+ initrd_el.text = initrd
+ else:
+ changed = True
+ initrd_el = SubElement(os, 'initrd')
+ initrd_el.text = initrd
+
+ if cmdline_el is not None:
+ if cmdline_el.text != cmdline:
+ changed = True
+ cmdline_el.text = cmdline
+ else:
+ changed = True
+ cmdline_el = SubElement(os, 'cmdline')
+ cmdline_el.text = cmdline
+
+ ### Enable/disable bootmenu
+ bootmenu_el = os.find('bootmenu')
+ if bootmenu and bootmenu_el is not None:
+ bootmenu_enabled = bootmenu_el.get('enable')
+ if bootmenu_enabled != 'yes':
+ changed = True
+ bootmenu_el.set('enable', 'yes')
+ elif bootmenu:
+ bootmenu_el = SubElement(os, 'bootmenu')
+ bootmenu_el.set('enable', 'yes')
+ changed = True
+ elif bootmenu_el is not None:
+ os.remove(bootmenu_el)
+ changed = True
+
+ ### save back
+ conn.defineXML( ET.tostring(doc) )
+
+ if start and not domain.isActive():
+ changed = True
+ domain.create()
+
+ module.exit_json(changed=changed)
+
+if __name__ == '__main__':
+ main()