mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-15 08:21:31 +00:00
Remove deprecated modules scheduled for removal in 3.0.0 (#1924)
* Remove deprecated modules scheduled for removal in 3.0.0. * Update BOTMETA. * Update ignore-2.12.txt. * Next release will be 3.0.0.
This commit is contained in:
parent
98af8161b2
commit
081c534d40
161 changed files with 167 additions and 10434 deletions
|
|
@ -1,203 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2016, Red Hat, Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: gluster_heal_info
|
||||
short_description: Gather information on self-heal or rebalance status
|
||||
deprecated:
|
||||
removed_in: 3.0.0
|
||||
why: The gluster modules have migrated to the gluster.gluster collection.
|
||||
alternative: Use M(gluster.gluster.gluster_heal_info) instead.
|
||||
author: "Devyani Kota (@devyanikota)"
|
||||
description:
|
||||
- Gather facts about either self-heal or rebalance status.
|
||||
- This module was called C(gluster_heal_facts) before Ansible 2.9, returning C(ansible_facts).
|
||||
Note that the M(community.general.gluster_heal_info) module no longer returns C(ansible_facts)!
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The volume name.
|
||||
required: true
|
||||
aliases: ['volume']
|
||||
status_filter:
|
||||
default: "self-heal"
|
||||
choices: ["self-heal", "rebalance"]
|
||||
description:
|
||||
- Determines which facts are to be returned.
|
||||
- If the C(status_filter) is C(self-heal), status of self-heal, along with the number of files still in process are returned.
|
||||
- If the C(status_filter) is C(rebalance), rebalance status is returned.
|
||||
requirements:
|
||||
- GlusterFS > 3.2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather self-heal facts about all gluster hosts in the cluster
|
||||
community.general.gluster_heal_info:
|
||||
name: test_volume
|
||||
status_filter: self-heal
|
||||
register: self_heal_status
|
||||
- ansible.builtin.debug:
|
||||
var: self_heal_status
|
||||
|
||||
- name: Gather rebalance facts about all gluster hosts in the cluster
|
||||
community.general.gluster_heal_info:
|
||||
name: test_volume
|
||||
status_filter: rebalance
|
||||
register: rebalance_status
|
||||
- ansible.builtin.debug:
|
||||
var: rebalance_status
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
name:
|
||||
description: GlusterFS volume name
|
||||
returned: always
|
||||
type: str
|
||||
status_filter:
|
||||
description: Whether self-heal or rebalance status is to be returned
|
||||
returned: always
|
||||
type: str
|
||||
heal_info:
|
||||
description: List of files that still need healing process
|
||||
returned: On success
|
||||
type: list
|
||||
rebalance_status:
|
||||
description: Status of rebalance operation
|
||||
returned: On success
|
||||
type: list
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
glusterbin = ''
|
||||
|
||||
|
||||
def run_gluster(gargs, **kwargs):
|
||||
global glusterbin
|
||||
global module
|
||||
args = [glusterbin, '--mode=script']
|
||||
args.extend(gargs)
|
||||
try:
|
||||
rc, out, err = module.run_command(args, **kwargs)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' %
|
||||
(' '.join(args), rc, out or err), exception=traceback.format_exc())
|
||||
except Exception as e:
|
||||
module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args),
|
||||
to_native(e)), exception=traceback.format_exc())
|
||||
return out
|
||||
|
||||
|
||||
def get_self_heal_status(name):
|
||||
out = run_gluster(['volume', 'heal', name, 'info'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
|
||||
raw_out = out.split("\n")
|
||||
heal_info = []
|
||||
# return files that still need healing.
|
||||
for line in raw_out:
|
||||
if 'Brick' in line:
|
||||
br_dict = {}
|
||||
br_dict['brick'] = line.strip().strip("Brick")
|
||||
elif 'Status' in line:
|
||||
br_dict['status'] = line.split(":")[1].strip()
|
||||
elif 'Number' in line:
|
||||
br_dict['no_of_entries'] = line.split(":")[1].strip()
|
||||
elif line.startswith('/') or line.startswith('<') or '\n' in line:
|
||||
continue
|
||||
else:
|
||||
br_dict and heal_info.append(br_dict)
|
||||
br_dict = {}
|
||||
return heal_info
|
||||
|
||||
|
||||
def get_rebalance_status(name):
|
||||
out = run_gluster(['volume', 'rebalance', name, 'status'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
|
||||
raw_out = out.split("\n")
|
||||
rebalance_status = []
|
||||
# return the files that are either still 'in progress' state or 'completed'.
|
||||
for line in raw_out:
|
||||
line = " ".join(line.split())
|
||||
line_vals = line.split(" ")
|
||||
if line_vals[0].startswith('-') or line_vals[0].startswith('Node'):
|
||||
continue
|
||||
node_dict = {}
|
||||
if len(line_vals) == 1 or len(line_vals) == 4:
|
||||
continue
|
||||
node_dict['node'] = line_vals[0]
|
||||
node_dict['rebalanced_files'] = line_vals[1]
|
||||
node_dict['failures'] = line_vals[4]
|
||||
if 'in progress' in line:
|
||||
node_dict['status'] = line_vals[5] + line_vals[6]
|
||||
rebalance_status.append(node_dict)
|
||||
elif 'completed' in line:
|
||||
node_dict['status'] = line_vals[5]
|
||||
rebalance_status.append(node_dict)
|
||||
return rebalance_status
|
||||
|
||||
|
||||
def is_invalid_gluster_version(module, required_version):
|
||||
cmd = module.get_bin_path('gluster', True) + ' --version'
|
||||
result = module.run_command(cmd)
|
||||
ver_line = result[1].split('\n')[0]
|
||||
version = ver_line.split(' ')[1]
|
||||
# If the installed version is less than 3.2, it is an invalid version
|
||||
# return True
|
||||
return LooseVersion(version) < LooseVersion(required_version)
|
||||
|
||||
|
||||
def main():
|
||||
global module
|
||||
global glusterbin
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', required=True, aliases=['volume']),
|
||||
status_filter=dict(type='str', default='self-heal', choices=['self-heal', 'rebalance']),
|
||||
),
|
||||
)
|
||||
is_old_facts = module._name in ('gluster_heal_facts', 'community.general.gluster_heal_facts')
|
||||
if is_old_facts:
|
||||
module.deprecate("The 'gluster_heal_facts' module has been renamed to 'gluster_heal_info', "
|
||||
"and the renamed one no longer returns ansible_facts",
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.13
|
||||
|
||||
glusterbin = module.get_bin_path('gluster', True)
|
||||
required_version = "3.2"
|
||||
status_filter = module.params['status_filter']
|
||||
volume_name = module.params['name']
|
||||
heal_info = ''
|
||||
rebalance_status = ''
|
||||
|
||||
# Verify if required GlusterFS version is installed
|
||||
if is_invalid_gluster_version(module, required_version):
|
||||
module.fail_json(msg="GlusterFS version > %s is required" %
|
||||
required_version)
|
||||
|
||||
try:
|
||||
if status_filter == "self-heal":
|
||||
heal_info = get_self_heal_status(volume_name)
|
||||
elif status_filter == "rebalance":
|
||||
rebalance_status = get_rebalance_status(volume_name)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Error retrieving status: %s' % e, exception=traceback.format_exc())
|
||||
|
||||
facts = {}
|
||||
facts['glusterfs'] = {'volume': volume_name, 'status_filter': status_filter, 'heal_info': heal_info, 'rebalance': rebalance_status}
|
||||
|
||||
if is_old_facts:
|
||||
module.exit_json(ansible_facts=facts)
|
||||
else:
|
||||
module.exit_json(**facts)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -1,176 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2015 Nandaja Varma <nvarma@redhat.com>
|
||||
# Copyright 2018 Red Hat, Inc.
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
#
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: gluster_peer
|
||||
short_description: Attach/Detach peers to/from the cluster
|
||||
deprecated:
|
||||
removed_in: 3.0.0
|
||||
why: The gluster modules have migrated to the gluster.gluster collection.
|
||||
alternative: Use M(gluster.gluster.gluster_peer) instead.
|
||||
description:
|
||||
- Create or diminish a GlusterFS trusted storage pool. A set of nodes can be
|
||||
added into an existing trusted storage pool or a new storage pool can be
|
||||
formed. Or, nodes can be removed from an existing trusted storage pool.
|
||||
author: Sachidananda Urs (@sac)
|
||||
options:
|
||||
state:
|
||||
choices: ["present", "absent"]
|
||||
default: "present"
|
||||
description:
|
||||
- Determines whether the nodes should be attached to the pool or
|
||||
removed from the pool. If the state is present, nodes will be
|
||||
attached to the pool. If state is absent, nodes will be detached
|
||||
from the pool.
|
||||
type: str
|
||||
nodes:
|
||||
description:
|
||||
- List of nodes that have to be probed into the pool.
|
||||
required: true
|
||||
type: list
|
||||
force:
|
||||
type: bool
|
||||
default: false
|
||||
description:
|
||||
- Applicable only while removing the nodes from the pool. gluster
|
||||
will refuse to detach a node from the pool if any one of the node
|
||||
is down, in such cases force can be used.
|
||||
requirements:
|
||||
- GlusterFS > 3.2
|
||||
notes:
|
||||
- This module does not support check mode.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a trusted storage pool
|
||||
community.general.gluster_peer:
|
||||
state: present
|
||||
nodes:
|
||||
- 10.0.1.5
|
||||
- 10.0.1.10
|
||||
|
||||
- name: Delete a node from the trusted storage pool
|
||||
community.general.gluster_peer:
|
||||
state: absent
|
||||
nodes:
|
||||
- 10.0.1.10
|
||||
|
||||
- name: Delete a node from the trusted storage pool by force
|
||||
community.general.gluster_peer:
|
||||
state: absent
|
||||
nodes:
|
||||
- 10.0.0.1
|
||||
force: true
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
|
||||
class Peer(object):
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.state = self.module.params['state']
|
||||
self.nodes = self.module.params['nodes']
|
||||
self.glustercmd = self.module.get_bin_path('gluster', True)
|
||||
self.lang = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
|
||||
self.action = ''
|
||||
self.force = ''
|
||||
|
||||
def gluster_peer_ops(self):
|
||||
if not self.nodes:
|
||||
self.module.fail_json(msg="nodes list cannot be empty")
|
||||
self.force = 'force' if self.module.params.get('force') else ''
|
||||
if self.state == 'present':
|
||||
self.nodes = self.get_to_be_probed_hosts(self.nodes)
|
||||
self.action = 'probe'
|
||||
# In case of peer probe, we do not need `force'
|
||||
self.force = ''
|
||||
else:
|
||||
self.action = 'detach'
|
||||
self.call_peer_commands()
|
||||
|
||||
def get_to_be_probed_hosts(self, hosts):
|
||||
peercmd = [self.glustercmd, 'pool', 'list', '--mode=script']
|
||||
rc, output, err = self.module.run_command(peercmd,
|
||||
environ_update=self.lang)
|
||||
peers_in_cluster = [line.split('\t')[1].strip() for
|
||||
line in filter(None, output.split('\n')[1:])]
|
||||
try:
|
||||
peers_in_cluster.remove('localhost')
|
||||
except ValueError:
|
||||
# It is ok not to have localhost in list
|
||||
pass
|
||||
hosts_to_be_probed = [host for host in hosts if host not in
|
||||
peers_in_cluster]
|
||||
return hosts_to_be_probed
|
||||
|
||||
def call_peer_commands(self):
|
||||
result = {}
|
||||
result['msg'] = ''
|
||||
result['changed'] = False
|
||||
|
||||
for node in self.nodes:
|
||||
peercmd = [self.glustercmd, 'peer', self.action, node, '--mode=script']
|
||||
if self.force:
|
||||
peercmd.append(self.force)
|
||||
rc, out, err = self.module.run_command(peercmd,
|
||||
environ_update=self.lang)
|
||||
if rc:
|
||||
result['rc'] = rc
|
||||
result['msg'] = err
|
||||
# Fail early, do not wait for the loop to finish
|
||||
self.module.fail_json(**result)
|
||||
else:
|
||||
if 'already in peer' in out or \
|
||||
'localhost not needed' in out:
|
||||
result['changed'] |= False
|
||||
else:
|
||||
result['changed'] = True
|
||||
self.module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
force=dict(type='bool', required=False, default=False),
|
||||
nodes=dict(type='list', required=True),
|
||||
state=dict(type='str', choices=['absent', 'present'],
|
||||
default='present'),
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
pops = Peer(module)
|
||||
required_version = "3.2"
|
||||
# Verify if required GlusterFS version is installed
|
||||
if is_invalid_gluster_version(module, required_version):
|
||||
module.fail_json(msg="GlusterFS version > %s is required" %
|
||||
required_version)
|
||||
pops.gluster_peer_ops()
|
||||
|
||||
|
||||
def is_invalid_gluster_version(module, required_version):
|
||||
cmd = module.get_bin_path('gluster', True) + ' --version'
|
||||
result = module.run_command(cmd)
|
||||
ver_line = result[1].split('\n')[0]
|
||||
version = ver_line.split(' ')[1]
|
||||
# If the installed version is less than 3.2, it is an invalid version
|
||||
# return True
|
||||
return LooseVersion(version) < LooseVersion(required_version)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,608 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2014, Taneli Leppä <taneli@crasman.fi>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: gluster_volume
|
||||
short_description: Manage GlusterFS volumes
|
||||
deprecated:
|
||||
removed_in: 3.0.0
|
||||
why: The gluster modules have migrated to the gluster.gluster collection.
|
||||
alternative: Use M(gluster.gluster.gluster_volume) instead.
|
||||
description:
|
||||
- Create, remove, start, stop and tune GlusterFS volumes
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The volume name.
|
||||
required: true
|
||||
aliases: ['volume']
|
||||
state:
|
||||
description:
|
||||
- Use present/absent ensure if a volume exists or not.
|
||||
Use started/stopped to control its availability.
|
||||
required: true
|
||||
choices: ['absent', 'present', 'started', 'stopped']
|
||||
cluster:
|
||||
description:
|
||||
- List of hosts to use for probing and brick setup.
|
||||
host:
|
||||
description:
|
||||
- Override local hostname (for peer probing purposes).
|
||||
replicas:
|
||||
description:
|
||||
- Replica count for volume.
|
||||
arbiters:
|
||||
description:
|
||||
- Arbiter count for volume.
|
||||
stripes:
|
||||
description:
|
||||
- Stripe count for volume.
|
||||
disperses:
|
||||
description:
|
||||
- Disperse count for volume.
|
||||
redundancies:
|
||||
description:
|
||||
- Redundancy count for volume.
|
||||
transport:
|
||||
description:
|
||||
- Transport type for volume.
|
||||
default: tcp
|
||||
choices: [ tcp, rdma, 'tcp,rdma' ]
|
||||
bricks:
|
||||
description:
|
||||
- Brick paths on servers. Multiple brick paths can be separated by commas.
|
||||
aliases: [ brick ]
|
||||
start_on_create:
|
||||
description:
|
||||
- Controls whether the volume is started after creation or not.
|
||||
type: bool
|
||||
default: 'yes'
|
||||
rebalance:
|
||||
description:
|
||||
- Controls whether the cluster is rebalanced after changes.
|
||||
type: bool
|
||||
default: 'no'
|
||||
directory:
|
||||
description:
|
||||
- Directory for limit-usage.
|
||||
options:
|
||||
description:
|
||||
- A dictionary/hash with options/settings for the volume.
|
||||
quota:
|
||||
description:
|
||||
- Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list).
|
||||
force:
|
||||
description:
|
||||
- If brick is being created in the root partition, module will fail.
|
||||
Set force to true to override this behaviour.
|
||||
type: bool
|
||||
default: false
|
||||
notes:
|
||||
- Requires cli tools for GlusterFS on servers.
|
||||
- Will add new bricks, but not remove them.
|
||||
author:
|
||||
- Taneli Leppä (@rosmo)
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Create gluster volume
|
||||
community.general.gluster_volume:
|
||||
state: present
|
||||
name: test1
|
||||
bricks: /bricks/brick1/g1
|
||||
rebalance: yes
|
||||
cluster:
|
||||
- 192.0.2.10
|
||||
- 192.0.2.11
|
||||
run_once: true
|
||||
|
||||
- name: Tune
|
||||
community.general.gluster_volume:
|
||||
state: present
|
||||
name: test1
|
||||
options:
|
||||
performance.cache-size: 256MB
|
||||
|
||||
- name: Set multiple options on GlusterFS volume
|
||||
community.general.gluster_volume:
|
||||
state: present
|
||||
name: test1
|
||||
options:
|
||||
{ performance.cache-size: 128MB,
|
||||
write-behind: 'off',
|
||||
quick-read: 'on'
|
||||
}
|
||||
|
||||
- name: Start gluster volume
|
||||
community.general.gluster_volume:
|
||||
state: started
|
||||
name: test1
|
||||
|
||||
- name: Limit usage
|
||||
community.general.gluster_volume:
|
||||
state: present
|
||||
name: test1
|
||||
directory: /foo
|
||||
quota: 20.0MB
|
||||
|
||||
- name: Stop gluster volume
|
||||
community.general.gluster_volume:
|
||||
state: stopped
|
||||
name: test1
|
||||
|
||||
- name: Remove gluster volume
|
||||
community.general.gluster_volume:
|
||||
state: absent
|
||||
name: test1
|
||||
|
||||
- name: Create gluster volume with multiple bricks
|
||||
community.general.gluster_volume:
|
||||
state: present
|
||||
name: test2
|
||||
bricks: /bricks/brick1/g2,/bricks/brick2/g2
|
||||
cluster:
|
||||
- 192.0.2.10
|
||||
- 192.0.2.11
|
||||
run_once: true
|
||||
|
||||
- name: Remove the bricks from gluster volume
|
||||
community.general.gluster_volume:
|
||||
state: present
|
||||
name: testvol
|
||||
bricks: /bricks/brick1/b1,/bricks/brick2/b2
|
||||
cluster:
|
||||
- 10.70.42.85
|
||||
force: true
|
||||
run_once: true
|
||||
|
||||
- name: Reduce cluster configuration
|
||||
community.general.gluster_volume:
|
||||
state: present
|
||||
name: testvol
|
||||
bricks: /bricks/brick3/b1,/bricks/brick4/b2
|
||||
replicas: 2
|
||||
cluster:
|
||||
- 10.70.42.85
|
||||
force: true
|
||||
run_once: true
|
||||
"""
|
||||
|
||||
import re
|
||||
import socket
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
glusterbin = ''
|
||||
|
||||
|
||||
def run_gluster(gargs, **kwargs):
|
||||
global glusterbin
|
||||
global module
|
||||
args = [glusterbin, '--mode=script']
|
||||
args.extend(gargs)
|
||||
try:
|
||||
rc, out, err = module.run_command(args, **kwargs)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' %
|
||||
(' '.join(args), rc, out or err), exception=traceback.format_exc())
|
||||
except Exception as e:
|
||||
module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args),
|
||||
to_native(e)), exception=traceback.format_exc())
|
||||
return out
|
||||
|
||||
|
||||
def run_gluster_nofail(gargs, **kwargs):
|
||||
global glusterbin
|
||||
global module
|
||||
args = [glusterbin]
|
||||
args.extend(gargs)
|
||||
rc, out, err = module.run_command(args, **kwargs)
|
||||
if rc != 0:
|
||||
return None
|
||||
return out
|
||||
|
||||
|
||||
def get_peers():
|
||||
out = run_gluster(['peer', 'status'])
|
||||
peers = {}
|
||||
hostname = None
|
||||
uuid = None
|
||||
state = None
|
||||
shortNames = False
|
||||
for row in out.split('\n'):
|
||||
if ': ' in row:
|
||||
key, value = row.split(': ')
|
||||
if key.lower() == 'hostname':
|
||||
hostname = value
|
||||
shortNames = False
|
||||
if key.lower() == 'uuid':
|
||||
uuid = value
|
||||
if key.lower() == 'state':
|
||||
state = value
|
||||
peers[hostname] = [uuid, state]
|
||||
elif row.lower() == 'other names:':
|
||||
shortNames = True
|
||||
elif row != '' and shortNames is True:
|
||||
peers[row] = [uuid, state]
|
||||
elif row == '':
|
||||
shortNames = False
|
||||
return peers
|
||||
|
||||
|
||||
def get_volumes():
|
||||
out = run_gluster(['volume', 'info'])
|
||||
|
||||
volumes = {}
|
||||
volume = {}
|
||||
for row in out.split('\n'):
|
||||
if ': ' in row:
|
||||
key, value = row.split(': ')
|
||||
if key.lower() == 'volume name':
|
||||
volume['name'] = value
|
||||
volume['options'] = {}
|
||||
volume['quota'] = False
|
||||
if key.lower() == 'volume id':
|
||||
volume['id'] = value
|
||||
if key.lower() == 'status':
|
||||
volume['status'] = value
|
||||
if key.lower() == 'transport-type':
|
||||
volume['transport'] = value
|
||||
if value.lower().endswith(' (arbiter)'):
|
||||
if 'arbiters' not in volume:
|
||||
volume['arbiters'] = []
|
||||
value = value[:-10]
|
||||
volume['arbiters'].append(value)
|
||||
elif key.lower() == 'number of bricks':
|
||||
volume['replicas'] = value[-1:]
|
||||
if key.lower() != 'bricks' and key.lower()[:5] == 'brick':
|
||||
if 'bricks' not in volume:
|
||||
volume['bricks'] = []
|
||||
volume['bricks'].append(value)
|
||||
# Volume options
|
||||
if '.' in key:
|
||||
if 'options' not in volume:
|
||||
volume['options'] = {}
|
||||
volume['options'][key] = value
|
||||
if key == 'features.quota' and value == 'on':
|
||||
volume['quota'] = True
|
||||
else:
|
||||
if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:':
|
||||
if len(volume) > 0:
|
||||
volumes[volume['name']] = volume
|
||||
volume = {}
|
||||
return volumes
|
||||
|
||||
|
||||
def get_quotas(name, nofail):
|
||||
quotas = {}
|
||||
if nofail:
|
||||
out = run_gluster_nofail(['volume', 'quota', name, 'list'])
|
||||
if not out:
|
||||
return quotas
|
||||
else:
|
||||
out = run_gluster(['volume', 'quota', name, 'list'])
|
||||
for row in out.split('\n'):
|
||||
if row[:1] == '/':
|
||||
q = re.split(r'\s+', row)
|
||||
quotas[q[0]] = q[1]
|
||||
return quotas
|
||||
|
||||
|
||||
def wait_for_peer(host):
|
||||
for x in range(0, 4):
|
||||
peers = get_peers()
|
||||
if host in peers and peers[host][1].lower().find('peer in cluster') != -1:
|
||||
return True
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
|
||||
def probe(host, myhostname):
|
||||
global module
|
||||
out = run_gluster(['peer', 'probe', host])
|
||||
if out.find('localhost') == -1 and not wait_for_peer(host):
|
||||
module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname))
|
||||
|
||||
|
||||
def probe_all_peers(hosts, peers, myhostname):
|
||||
for host in hosts:
|
||||
host = host.strip() # Clean up any extra space for exact comparison
|
||||
if host not in peers:
|
||||
probe(host, myhostname)
|
||||
|
||||
|
||||
def create_volume(name, stripe, replica, arbiter, disperse, redundancy, transport, hosts, bricks, force):
|
||||
args = ['volume', 'create']
|
||||
args.append(name)
|
||||
if stripe:
|
||||
args.append('stripe')
|
||||
args.append(str(stripe))
|
||||
if replica:
|
||||
args.append('replica')
|
||||
args.append(str(replica))
|
||||
if arbiter:
|
||||
args.append('arbiter')
|
||||
args.append(str(arbiter))
|
||||
if disperse:
|
||||
args.append('disperse')
|
||||
args.append(str(disperse))
|
||||
if redundancy:
|
||||
args.append('redundancy')
|
||||
args.append(str(redundancy))
|
||||
args.append('transport')
|
||||
args.append(transport)
|
||||
for brick in bricks:
|
||||
for host in hosts:
|
||||
args.append(('%s:%s' % (host, brick)))
|
||||
if force:
|
||||
args.append('force')
|
||||
run_gluster(args)
|
||||
|
||||
|
||||
def start_volume(name):
|
||||
run_gluster(['volume', 'start', name])
|
||||
|
||||
|
||||
def stop_volume(name):
|
||||
run_gluster(['volume', 'stop', name])
|
||||
|
||||
|
||||
def set_volume_option(name, option, parameter):
|
||||
run_gluster(['volume', 'set', name, option, parameter])
|
||||
|
||||
|
||||
def add_bricks(name, new_bricks, stripe, replica, force):
|
||||
args = ['volume', 'add-brick', name]
|
||||
if stripe:
|
||||
args.append('stripe')
|
||||
args.append(str(stripe))
|
||||
if replica:
|
||||
args.append('replica')
|
||||
args.append(str(replica))
|
||||
args.extend(new_bricks)
|
||||
if force:
|
||||
args.append('force')
|
||||
run_gluster(args)
|
||||
|
||||
|
||||
def remove_bricks(name, removed_bricks, force):
|
||||
# max-tries=12 with default_interval=10 secs
|
||||
max_tries = 12
|
||||
retries = 0
|
||||
success = False
|
||||
args = ['volume', 'remove-brick', name]
|
||||
args.extend(removed_bricks)
|
||||
# create a copy of args to use for commit operation
|
||||
args_c = args[:]
|
||||
args.append('start')
|
||||
run_gluster(args)
|
||||
# remove-brick operation needs to be followed by commit operation.
|
||||
if not force:
|
||||
module.fail_json(msg="Force option is mandatory.")
|
||||
else:
|
||||
while retries < max_tries:
|
||||
last_brick = removed_bricks[-1]
|
||||
out = run_gluster(['volume', 'remove-brick', name, last_brick, 'status'])
|
||||
for row in out.split('\n')[1:]:
|
||||
if 'completed' in row:
|
||||
# remove-brick successful, call commit operation.
|
||||
args_c.append('commit')
|
||||
out = run_gluster(args_c)
|
||||
success = True
|
||||
break
|
||||
else:
|
||||
time.sleep(10)
|
||||
if success:
|
||||
break
|
||||
retries += 1
|
||||
if not success:
|
||||
# remove-brick still in process, needs to be committed after completion.
|
||||
module.fail_json(msg="Exceeded number of tries, check remove-brick status.\n"
|
||||
"Commit operation needs to be followed.")
|
||||
|
||||
|
||||
def reduce_config(name, removed_bricks, replicas, force):
|
||||
out = run_gluster(['volume', 'heal', name, 'info'])
|
||||
summary = out.split("\n")
|
||||
for line in summary:
|
||||
if 'Number' in line and int(line.split(":")[1].strip()) != 0:
|
||||
module.fail_json(msg="Operation aborted, self-heal in progress.")
|
||||
args = ['volume', 'remove-brick', name, 'replica', replicas]
|
||||
args.extend(removed_bricks)
|
||||
if force:
|
||||
args.append('force')
|
||||
else:
|
||||
module.fail_json(msg="Force option is mandatory")
|
||||
run_gluster(args)
|
||||
|
||||
|
||||
def do_rebalance(name):
|
||||
run_gluster(['volume', 'rebalance', name, 'start'])
|
||||
|
||||
|
||||
def enable_quota(name):
|
||||
run_gluster(['volume', 'quota', name, 'enable'])
|
||||
|
||||
|
||||
def set_quota(name, directory, value):
|
||||
run_gluster(['volume', 'quota', name, 'limit-usage', directory, value])
|
||||
|
||||
|
||||
def main():
|
||||
# MAIN
|
||||
|
||||
global module
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', required=True, aliases=['volume']),
|
||||
state=dict(type='str', required=True, choices=['absent', 'started', 'stopped', 'present']),
|
||||
cluster=dict(type='list'),
|
||||
host=dict(type='str'),
|
||||
stripes=dict(type='int'),
|
||||
replicas=dict(type='int'),
|
||||
arbiters=dict(type='int'),
|
||||
disperses=dict(type='int'),
|
||||
redundancies=dict(type='int'),
|
||||
transport=dict(type='str', default='tcp', choices=['tcp', 'rdma', 'tcp,rdma']),
|
||||
bricks=dict(type='str', aliases=['brick']),
|
||||
start_on_create=dict(type='bool', default=True),
|
||||
rebalance=dict(type='bool', default=False),
|
||||
options=dict(type='dict', default={}),
|
||||
quota=dict(type='str'),
|
||||
directory=dict(type='str'),
|
||||
force=dict(type='bool', default=False),
|
||||
),
|
||||
)
|
||||
|
||||
global glusterbin
|
||||
glusterbin = module.get_bin_path('gluster', True)
|
||||
|
||||
changed = False
|
||||
|
||||
action = module.params['state']
|
||||
volume_name = module.params['name']
|
||||
cluster = module.params['cluster']
|
||||
brick_paths = module.params['bricks']
|
||||
stripes = module.params['stripes']
|
||||
replicas = module.params['replicas']
|
||||
arbiters = module.params['arbiters']
|
||||
disperses = module.params['disperses']
|
||||
redundancies = module.params['redundancies']
|
||||
transport = module.params['transport']
|
||||
myhostname = module.params['host']
|
||||
start_on_create = module.boolean(module.params['start_on_create'])
|
||||
rebalance = module.boolean(module.params['rebalance'])
|
||||
force = module.boolean(module.params['force'])
|
||||
|
||||
if not myhostname:
|
||||
myhostname = socket.gethostname()
|
||||
|
||||
# Clean up if last element is empty. Consider that yml can look like this:
|
||||
# cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}"
|
||||
if cluster is not None and len(cluster) > 1 and cluster[-1] == '':
|
||||
cluster = cluster[0:-1]
|
||||
|
||||
if cluster is None:
|
||||
cluster = []
|
||||
|
||||
if brick_paths is not None and "," in brick_paths:
|
||||
brick_paths = brick_paths.split(",")
|
||||
else:
|
||||
brick_paths = [brick_paths]
|
||||
|
||||
options = module.params['options']
|
||||
quota = module.params['quota']
|
||||
directory = module.params['directory']
|
||||
|
||||
# get current state info
|
||||
peers = get_peers()
|
||||
volumes = get_volumes()
|
||||
quotas = {}
|
||||
if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started':
|
||||
quotas = get_quotas(volume_name, True)
|
||||
|
||||
# do the work!
|
||||
if action == 'absent':
|
||||
if volume_name in volumes:
|
||||
if volumes[volume_name]['status'].lower() != 'stopped':
|
||||
stop_volume(volume_name)
|
||||
run_gluster(['volume', 'delete', volume_name])
|
||||
changed = True
|
||||
|
||||
if action == 'present':
|
||||
probe_all_peers(cluster, peers, myhostname)
|
||||
|
||||
# create if it doesn't exist
|
||||
if volume_name not in volumes:
|
||||
create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force)
|
||||
volumes = get_volumes()
|
||||
changed = True
|
||||
|
||||
if volume_name in volumes:
|
||||
if volumes[volume_name]['status'].lower() != 'started' and start_on_create:
|
||||
start_volume(volume_name)
|
||||
changed = True
|
||||
|
||||
# switch bricks
|
||||
new_bricks = []
|
||||
removed_bricks = []
|
||||
all_bricks = []
|
||||
bricks_in_volume = volumes[volume_name]['bricks']
|
||||
|
||||
for node in cluster:
|
||||
for brick_path in brick_paths:
|
||||
brick = '%s:%s' % (node, brick_path)
|
||||
all_bricks.append(brick)
|
||||
if brick not in bricks_in_volume:
|
||||
new_bricks.append(brick)
|
||||
|
||||
if not new_bricks and len(all_bricks) > 0 and \
|
||||
len(all_bricks) < len(bricks_in_volume):
|
||||
for brick in bricks_in_volume:
|
||||
if brick not in all_bricks:
|
||||
removed_bricks.append(brick)
|
||||
|
||||
if new_bricks:
|
||||
add_bricks(volume_name, new_bricks, stripes, replicas, force)
|
||||
changed = True
|
||||
|
||||
if removed_bricks:
|
||||
if replicas and int(replicas) < int(volumes[volume_name]['replicas']):
|
||||
reduce_config(volume_name, removed_bricks, str(replicas), force)
|
||||
else:
|
||||
remove_bricks(volume_name, removed_bricks, force)
|
||||
changed = True
|
||||
|
||||
# handle quotas
|
||||
if quota:
|
||||
if not volumes[volume_name]['quota']:
|
||||
enable_quota(volume_name)
|
||||
quotas = get_quotas(volume_name, False)
|
||||
if directory not in quotas or quotas[directory] != quota:
|
||||
set_quota(volume_name, directory, quota)
|
||||
changed = True
|
||||
|
||||
# set options
|
||||
for option in options.keys():
|
||||
if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]:
|
||||
set_volume_option(volume_name, option, options[option])
|
||||
changed = True
|
||||
|
||||
else:
|
||||
module.fail_json(msg='failed to create volume %s' % volume_name)
|
||||
|
||||
if action != 'absent' and volume_name not in volumes:
|
||||
module.fail_json(msg='volume not found %s' % volume_name)
|
||||
|
||||
if action == 'started':
|
||||
if volumes[volume_name]['status'].lower() != 'started':
|
||||
start_volume(volume_name)
|
||||
changed = True
|
||||
|
||||
if action == 'stopped':
|
||||
if volumes[volume_name]['status'].lower() != 'stopped':
|
||||
stop_volume(volume_name)
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
volumes = get_volumes()
|
||||
if rebalance:
|
||||
do_rebalance(volume_name)
|
||||
|
||||
facts = {}
|
||||
facts['glusterfs'] = {'peers': peers, 'volumes': volumes, 'quotas': quotas}
|
||||
|
||||
module.exit_json(changed=changed, ansible_facts=facts)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -1,613 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2018 Piotr Olczak <piotr.olczak@redhat.com>
|
||||
# (c) 2018-2019, NetApp, Inc
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: na_ontap_gather_facts
|
||||
deprecated:
|
||||
removed_in: 3.0.0 # was Ansible 2.13
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(netapp.ontap.na_ontap_info) instead.
|
||||
author: Piotr Olczak (@dprts) <polczak@redhat.com>
|
||||
extends_documentation_fragment:
|
||||
- community.general._netapp.na_ontap
|
||||
|
||||
short_description: NetApp information gatherer
|
||||
description:
|
||||
- This module allows you to gather various information about ONTAP configuration
|
||||
requirements:
|
||||
- netapp_lib
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Returns "info"
|
||||
default: info
|
||||
choices: [info]
|
||||
type: str
|
||||
gather_subset:
|
||||
description:
|
||||
- When supplied, this argument will restrict the facts collected
|
||||
to a given subset. Possible values for this argument include
|
||||
C(aggregate_info), C(cluster_node_info), C(igroup_info), C(lun_info), C(net_dns_info),
|
||||
C(net_ifgrp_info),
|
||||
C(net_interface_info), C(net_port_info), C(nvme_info), C(nvme_interface_info),
|
||||
C(nvme_namespace_info), C(nvme_subsystem_info), C(ontap_version),
|
||||
C(qos_adaptive_policy_info), C(qos_policy_info), C(security_key_manager_key_info),
|
||||
C(security_login_account_info), C(storage_failover_info), C(volume_info),
|
||||
C(vserver_info), C(vserver_login_banner_info), C(vserver_motd_info), C(vserver_nfs_info)
|
||||
Can specify a list of values to include a larger subset. Values can also be used
|
||||
with an initial C(M(!)) to specify that a specific subset should
|
||||
not be collected.
|
||||
- nvme is supported with ONTAP 9.4 onwards.
|
||||
- use C(help) to get a list of supported facts for your system.
|
||||
default: all
|
||||
type: list
|
||||
elements: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get NetApp info (Password Authentication)
|
||||
community.general.na_ontap_gather_facts:
|
||||
state: info
|
||||
hostname: "na-vsim"
|
||||
username: "admin"
|
||||
password: "admins_password"
|
||||
- ansible.builtin.debug:
|
||||
var: ontap_facts
|
||||
- name: Limit Fact Gathering to Aggregate Information
|
||||
community.general.na_ontap_gather_facts:
|
||||
state: info
|
||||
hostname: "na-vsim"
|
||||
username: "admin"
|
||||
password: "admins_password"
|
||||
gather_subset: "aggregate_info"
|
||||
- name: Limit Fact Gathering to Volume and Lun Information
|
||||
community.general.na_ontap_gather_facts:
|
||||
state: info
|
||||
hostname: "na-vsim"
|
||||
username: "admin"
|
||||
password: "admins_password"
|
||||
gather_subset:
|
||||
- volume_info
|
||||
- lun_info
|
||||
- name: Gather all facts except for volume and lun information
|
||||
community.general.na_ontap_gather_facts:
|
||||
state: info
|
||||
hostname: "na-vsim"
|
||||
username: "admin"
|
||||
password: "admins_password"
|
||||
gather_subset:
|
||||
- "!volume_info"
|
||||
- "!lun_info"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
ontap_facts:
|
||||
description: Returns various information about NetApp cluster configuration
|
||||
returned: always
|
||||
type: dict
|
||||
sample: '{
|
||||
"ontap_facts": {
|
||||
"aggregate_info": {...},
|
||||
"cluster_node_info": {...},
|
||||
"net_dns_info": {...},
|
||||
"net_ifgrp_info": {...},
|
||||
"net_interface_info": {...},
|
||||
"net_port_info": {...},
|
||||
"security_key_manager_key_info": {...},
|
||||
"security_login_account_info": {...},
|
||||
"volume_info": {...},
|
||||
"lun_info": {...},
|
||||
"storage_failover_info": {...},
|
||||
"vserver_login_banner_info": {...},
|
||||
"vserver_motd_info": {...},
|
||||
"vserver_info": {...},
|
||||
"vserver_nfs_info": {...},
|
||||
"ontap_version": {...},
|
||||
"igroup_info": {...},
|
||||
"qos_policy_info": {...},
|
||||
"qos_adaptive_policy_info": {...}
|
||||
}'
|
||||
'''
|
||||
|
||||
import traceback
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
|
||||
|
||||
try:
|
||||
import xmltodict
|
||||
HAS_XMLTODICT = True
|
||||
except ImportError:
|
||||
HAS_XMLTODICT = False
|
||||
|
||||
try:
|
||||
import json
|
||||
HAS_JSON = True
|
||||
except ImportError:
|
||||
HAS_JSON = False
|
||||
|
||||
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
|
||||
|
||||
|
||||
class NetAppONTAPGatherFacts(object):
|
||||
'''Class with gather facts methods'''
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.netapp_info = dict()
|
||||
|
||||
# thanks to coreywan (https://github.com/ansible/ansible/pull/47016)
|
||||
# for starting this
|
||||
# min_version identifies the ontapi version which supports this ZAPI
|
||||
# use 0 if it is supported since 9.1
|
||||
self.fact_subsets = {
|
||||
'net_dns_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'net-dns-get-iter',
|
||||
'attribute': 'net-dns-info',
|
||||
'field': 'vserver-name',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'net_interface_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'net-interface-get-iter',
|
||||
'attribute': 'net-interface-info',
|
||||
'field': 'interface-name',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'net_port_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'net-port-get-iter',
|
||||
'attribute': 'net-port-info',
|
||||
'field': ('node', 'port'),
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'cluster_node_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'cluster-node-get-iter',
|
||||
'attribute': 'cluster-node-info',
|
||||
'field': 'node-name',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'security_login_account_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'security-login-get-iter',
|
||||
'attribute': 'security-login-account-info',
|
||||
'field': ('vserver', 'user-name', 'application', 'authentication-method'),
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'aggregate_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'aggr-get-iter',
|
||||
'attribute': 'aggr-attributes',
|
||||
'field': 'aggregate-name',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'volume_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'volume-get-iter',
|
||||
'attribute': 'volume-attributes',
|
||||
'field': ('name', 'owning-vserver-name'),
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'lun_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'lun-get-iter',
|
||||
'attribute': 'lun-info',
|
||||
'field': 'path',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'storage_failover_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'cf-get-iter',
|
||||
'attribute': 'storage-failover-info',
|
||||
'field': 'node',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'vserver_motd_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'vserver-motd-get-iter',
|
||||
'attribute': 'vserver-motd-info',
|
||||
'field': 'vserver',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'vserver_login_banner_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'vserver-login-banner-get-iter',
|
||||
'attribute': 'vserver-login-banner-info',
|
||||
'field': 'vserver',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'security_key_manager_key_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'security-key-manager-key-get-iter',
|
||||
'attribute': 'security-key-manager-key-info',
|
||||
'field': ('node', 'key-id'),
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'vserver_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'vserver-get-iter',
|
||||
'attribute': 'vserver-info',
|
||||
'field': 'vserver-name',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'vserver_nfs_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'nfs-service-get-iter',
|
||||
'attribute': 'nfs-info',
|
||||
'field': 'vserver',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'net_ifgrp_info': {
|
||||
'method': self.get_ifgrp_info,
|
||||
'kwargs': {},
|
||||
'min_version': '0',
|
||||
},
|
||||
'ontap_version': {
|
||||
'method': self.ontapi,
|
||||
'kwargs': {},
|
||||
'min_version': '0',
|
||||
},
|
||||
'system_node_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'system-node-get-iter',
|
||||
'attribute': 'node-details-info',
|
||||
'field': 'node',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'igroup_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'igroup-get-iter',
|
||||
'attribute': 'initiator-group-info',
|
||||
'field': ('vserver', 'initiator-group-name'),
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
'qos_policy_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'qos-policy-group-get-iter',
|
||||
'attribute': 'qos-policy-group-info',
|
||||
'field': 'policy-group',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '0',
|
||||
},
|
||||
# supported in ONTAP 9.3 and onwards
|
||||
'qos_adaptive_policy_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'qos-adaptive-policy-group-get-iter',
|
||||
'attribute': 'qos-adaptive-policy-group-info',
|
||||
'field': 'policy-group',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '130',
|
||||
},
|
||||
# supported in ONTAP 9.4 and onwards
|
||||
'nvme_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'nvme-get-iter',
|
||||
'attribute': 'nvme-target-service-info',
|
||||
'field': 'vserver',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '140',
|
||||
},
|
||||
'nvme_interface_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'nvme-interface-get-iter',
|
||||
'attribute': 'nvme-interface-info',
|
||||
'field': 'vserver',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '140',
|
||||
},
|
||||
'nvme_subsystem_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'nvme-subsystem-get-iter',
|
||||
'attribute': 'nvme-subsystem-info',
|
||||
'field': 'subsystem',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '140',
|
||||
},
|
||||
'nvme_namespace_info': {
|
||||
'method': self.get_generic_get_iter,
|
||||
'kwargs': {
|
||||
'call': 'nvme-namespace-get-iter',
|
||||
'attribute': 'nvme-namespace-info',
|
||||
'field': 'path',
|
||||
'query': {'max-records': '1024'},
|
||||
},
|
||||
'min_version': '140',
|
||||
},
|
||||
}
|
||||
|
||||
if HAS_NETAPP_LIB is False:
|
||||
self.module.fail_json(msg="the python NetApp-Lib module is required")
|
||||
else:
|
||||
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
|
||||
|
||||
def ontapi(self):
|
||||
'''Method to get ontapi version'''
|
||||
|
||||
api = 'system-get-ontapi-version'
|
||||
api_call = netapp_utils.zapi.NaElement(api)
|
||||
try:
|
||||
results = self.server.invoke_successfully(api_call, enable_tunneling=False)
|
||||
ontapi_version = results.get_child_content('minor-version')
|
||||
return ontapi_version if ontapi_version is not None else '0'
|
||||
except netapp_utils.zapi.NaApiError as error:
|
||||
self.module.fail_json(msg="Error calling API %s: %s" %
|
||||
(api, to_native(error)), exception=traceback.format_exc())
|
||||
|
||||
def call_api(self, call, query=None):
|
||||
'''Main method to run an API call'''
|
||||
|
||||
api_call = netapp_utils.zapi.NaElement(call)
|
||||
result = None
|
||||
|
||||
if query:
|
||||
for key, val in query.items():
|
||||
# Can val be nested?
|
||||
api_call.add_new_child(key, val)
|
||||
try:
|
||||
result = self.server.invoke_successfully(api_call, enable_tunneling=False)
|
||||
return result
|
||||
except netapp_utils.zapi.NaApiError as error:
|
||||
if call in ['security-key-manager-key-get-iter']:
|
||||
return result
|
||||
else:
|
||||
self.module.fail_json(msg="Error calling API %s: %s"
|
||||
% (call, to_native(error)), exception=traceback.format_exc())
|
||||
|
||||
def get_ifgrp_info(self):
|
||||
'''Method to get network port ifgroups info'''
|
||||
|
||||
try:
|
||||
net_port_info = self.netapp_info['net_port_info']
|
||||
except KeyError:
|
||||
net_port_info_calls = self.fact_subsets['net_port_info']
|
||||
net_port_info = net_port_info_calls['method'](**net_port_info_calls['kwargs'])
|
||||
interfaces = net_port_info.keys()
|
||||
|
||||
ifgrps = []
|
||||
for ifn in interfaces:
|
||||
if net_port_info[ifn]['port_type'] == 'if_group':
|
||||
ifgrps.append(ifn)
|
||||
|
||||
net_ifgrp_info = dict()
|
||||
for ifgrp in ifgrps:
|
||||
query = dict()
|
||||
query['node'], query['ifgrp-name'] = ifgrp.split(':')
|
||||
|
||||
tmp = self.get_generic_get_iter('net-port-ifgrp-get', field=('node', 'ifgrp-name'),
|
||||
attribute='net-ifgrp-info', query=query)
|
||||
net_ifgrp_info = net_ifgrp_info.copy()
|
||||
net_ifgrp_info.update(tmp)
|
||||
return net_ifgrp_info
|
||||
|
||||
def get_generic_get_iter(self, call, attribute=None, field=None, query=None):
|
||||
'''Method to run a generic get-iter call'''
|
||||
|
||||
generic_call = self.call_api(call, query)
|
||||
|
||||
if call == 'net-port-ifgrp-get':
|
||||
children = 'attributes'
|
||||
else:
|
||||
children = 'attributes-list'
|
||||
|
||||
if generic_call is None:
|
||||
return None
|
||||
|
||||
if field is None:
|
||||
out = []
|
||||
else:
|
||||
out = {}
|
||||
|
||||
attributes_list = generic_call.get_child_by_name(children)
|
||||
|
||||
if attributes_list is None:
|
||||
return None
|
||||
|
||||
for child in attributes_list.get_children():
|
||||
dic = xmltodict.parse(child.to_string(), xml_attribs=False)
|
||||
|
||||
if attribute is not None:
|
||||
dic = dic[attribute]
|
||||
|
||||
if isinstance(field, str):
|
||||
unique_key = _finditem(dic, field)
|
||||
out = out.copy()
|
||||
out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
|
||||
elif isinstance(field, tuple):
|
||||
unique_key = ':'.join([_finditem(dic, el) for el in field])
|
||||
out = out.copy()
|
||||
out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
|
||||
else:
|
||||
out.append(convert_keys(json.loads(json.dumps(dic))))
|
||||
|
||||
return out
|
||||
|
||||
def get_all(self, gather_subset):
|
||||
'''Method to get all subsets'''
|
||||
|
||||
results = netapp_utils.get_cserver(self.server)
|
||||
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
|
||||
netapp_utils.ems_log_event("na_ontap_gather_facts", cserver)
|
||||
|
||||
self.netapp_info['ontap_version'] = self.ontapi()
|
||||
|
||||
run_subset = self.get_subset(gather_subset, self.netapp_info['ontap_version'])
|
||||
if 'help' in gather_subset:
|
||||
self.netapp_info['help'] = sorted(run_subset)
|
||||
else:
|
||||
for subset in run_subset:
|
||||
call = self.fact_subsets[subset]
|
||||
self.netapp_info[subset] = call['method'](**call['kwargs'])
|
||||
|
||||
return self.netapp_info
|
||||
|
||||
def get_subset(self, gather_subset, version):
|
||||
'''Method to get a single subset'''
|
||||
|
||||
runable_subsets = set()
|
||||
exclude_subsets = set()
|
||||
usable_subsets = [key for key in self.fact_subsets.keys() if version >= self.fact_subsets[key]['min_version']]
|
||||
if 'help' in gather_subset:
|
||||
return usable_subsets
|
||||
for subset in gather_subset:
|
||||
if subset == 'all':
|
||||
runable_subsets.update(usable_subsets)
|
||||
return runable_subsets
|
||||
if subset.startswith('!'):
|
||||
subset = subset[1:]
|
||||
if subset == 'all':
|
||||
return set()
|
||||
exclude = True
|
||||
else:
|
||||
exclude = False
|
||||
|
||||
if subset not in usable_subsets:
|
||||
if subset not in self.fact_subsets.keys():
|
||||
self.module.fail_json(msg='Bad subset: %s' % subset)
|
||||
self.module.fail_json(msg='Remote system at version %s does not support %s' %
|
||||
(version, subset))
|
||||
|
||||
if exclude:
|
||||
exclude_subsets.add(subset)
|
||||
else:
|
||||
runable_subsets.add(subset)
|
||||
|
||||
if not runable_subsets:
|
||||
runable_subsets.update(usable_subsets)
|
||||
|
||||
runable_subsets.difference_update(exclude_subsets)
|
||||
|
||||
return runable_subsets
|
||||
|
||||
|
||||
# https://stackoverflow.com/questions/14962485/finding-a-key-recursively-in-a-dictionary
|
||||
def __finditem(obj, key):
|
||||
|
||||
if key in obj:
|
||||
return obj[key]
|
||||
for dummy, val in obj.items():
|
||||
if isinstance(val, dict):
|
||||
item = __finditem(val, key)
|
||||
if item is not None:
|
||||
return item
|
||||
return None
|
||||
|
||||
|
||||
def _finditem(obj, key):
|
||||
|
||||
value = __finditem(obj, key)
|
||||
if value is not None:
|
||||
return value
|
||||
raise KeyError(key)
|
||||
|
||||
|
||||
def convert_keys(d_param):
|
||||
'''Method to convert hyphen to underscore'''
|
||||
|
||||
out = {}
|
||||
if isinstance(d_param, dict):
|
||||
for key, val in d_param.items():
|
||||
val = convert_keys(val)
|
||||
out[key.replace('-', '_')] = val
|
||||
else:
|
||||
return d_param
|
||||
return out
|
||||
|
||||
|
||||
def main():
|
||||
'''Execute action'''
|
||||
|
||||
argument_spec = netapp_utils.na_ontap_host_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(default='info', choices=['info']),
|
||||
gather_subset=dict(default=['all'], type='list', elements='str'),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not HAS_XMLTODICT:
|
||||
module.fail_json(msg="xmltodict missing")
|
||||
|
||||
if not HAS_JSON:
|
||||
module.fail_json(msg="json missing")
|
||||
|
||||
state = module.params['state']
|
||||
gather_subset = module.params['gather_subset']
|
||||
if gather_subset is None:
|
||||
gather_subset = ['all']
|
||||
gf_obj = NetAppONTAPGatherFacts(module)
|
||||
gf_all = gf_obj.get_all(gather_subset)
|
||||
result = {'state': state, 'changed': False}
|
||||
module.exit_json(ansible_facts={'ontap_facts': gf_all}, **result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -1,858 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2018, Simon Dodsley (simon@purestorage.com)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: purefa_facts
|
||||
deprecated:
|
||||
removed_in: 3.0.0 # was Ansible 2.13
|
||||
why: Deprecated in favor of C(_info) module.
|
||||
alternative: Use M(purestorage.flasharray.purefa_info) instead.
|
||||
short_description: Collect facts from Pure Storage FlashArray
|
||||
description:
|
||||
- Collect facts information from a Pure Storage Flasharray running the
|
||||
Purity//FA operating system. By default, the module will collect basic
|
||||
fact information including hosts, host groups, protection
|
||||
groups and volume counts. Additional fact information can be collected
|
||||
based on the configured set of arguments.
|
||||
author:
|
||||
- Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
|
||||
options:
|
||||
gather_subset:
|
||||
description:
|
||||
- When supplied, this argument will define the facts to be collected.
|
||||
Possible values for this include all, minimum, config, performance,
|
||||
capacity, network, subnet, interfaces, hgroups, pgroups, hosts,
|
||||
admins, volumes, snapshots, pods, vgroups, offload, apps and arrays.
|
||||
type: list
|
||||
required: false
|
||||
default: minimum
|
||||
extends_documentation_fragment:
|
||||
- community.general.purestorage.fa
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Collect default set of facts
|
||||
community.general.purefa_facts:
|
||||
fa_url: 10.10.10.2
|
||||
api_token: e31060a7-21fc-e277-6240-25983c6c4592
|
||||
|
||||
- name: Collect configuration and capacity facts
|
||||
community.general.purefa_facts:
|
||||
gather_subset:
|
||||
- config
|
||||
- capacity
|
||||
fa_url: 10.10.10.2
|
||||
api_token: e31060a7-21fc-e277-6240-25983c6c4592
|
||||
|
||||
- name: Collect all facts
|
||||
community.general.purefa_facts:
|
||||
gather_subset:
|
||||
- all
|
||||
fa_url: 10.10.10.2
|
||||
api_token: e31060a7-21fc-e277-6240-25983c6c4592
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
ansible_facts:
|
||||
description: Returns the facts collected from the FlashArray
|
||||
returned: always
|
||||
type: complex
|
||||
sample: {
|
||||
"capacity": {},
|
||||
"config": {
|
||||
"directory_service": {
|
||||
"array_admin_group": null,
|
||||
"base_dn": null,
|
||||
"bind_password": null,
|
||||
"bind_user": null,
|
||||
"check_peer": false,
|
||||
"enabled": false,
|
||||
"group_base": null,
|
||||
"readonly_group": null,
|
||||
"storage_admin_group": null,
|
||||
"uri": []
|
||||
},
|
||||
"dns": {
|
||||
"domain": "domain.com",
|
||||
"nameservers": [
|
||||
"8.8.8.8",
|
||||
"8.8.4.4"
|
||||
]
|
||||
},
|
||||
"ntp": [
|
||||
"0.ntp.pool.org",
|
||||
"1.ntp.pool.org",
|
||||
"2.ntp.pool.org",
|
||||
"3.ntp.pool.org"
|
||||
],
|
||||
"smtp": [
|
||||
{
|
||||
"enabled": true,
|
||||
"name": "alerts@acme.com"
|
||||
},
|
||||
{
|
||||
"enabled": true,
|
||||
"name": "user@acme.com"
|
||||
}
|
||||
],
|
||||
"snmp": [
|
||||
{
|
||||
"auth_passphrase": null,
|
||||
"auth_protocol": null,
|
||||
"community": null,
|
||||
"host": "localhost",
|
||||
"name": "localhost",
|
||||
"privacy_passphrase": null,
|
||||
"privacy_protocol": null,
|
||||
"user": null,
|
||||
"version": "v2c"
|
||||
}
|
||||
],
|
||||
"ssl_certs": {
|
||||
"country": null,
|
||||
"email": null,
|
||||
"issued_by": "",
|
||||
"issued_to": "",
|
||||
"key_size": 2048,
|
||||
"locality": null,
|
||||
"organization": "Acme Storage, Inc.",
|
||||
"organizational_unit": "Acme Storage, Inc.",
|
||||
"state": null,
|
||||
"status": "self-signed",
|
||||
"valid_from": "2017-08-11T23:09:06Z",
|
||||
"valid_to": "2027-08-09T23:09:06Z"
|
||||
},
|
||||
"syslog": []
|
||||
},
|
||||
"default": {
|
||||
"array_name": "flasharray1",
|
||||
"connected_arrays": 1,
|
||||
"hostgroups": 0,
|
||||
"hosts": 10,
|
||||
"pods": 3,
|
||||
"protection_groups": 1,
|
||||
"purity_version": "5.0.4",
|
||||
"snapshots": 1,
|
||||
"volume_groups": 2
|
||||
},
|
||||
"hgroups": {},
|
||||
"hosts": {
|
||||
"host1": {
|
||||
"hgroup": null,
|
||||
"iqn": [
|
||||
"iqn.1994-05.com.redhat:2f6f5715a533"
|
||||
],
|
||||
"wwn": []
|
||||
},
|
||||
"host2": {
|
||||
"hgroup": null,
|
||||
"iqn": [
|
||||
"iqn.1994-05.com.redhat:d17fb13fe0b"
|
||||
],
|
||||
"wwn": []
|
||||
},
|
||||
"host3": {
|
||||
"hgroup": null,
|
||||
"iqn": [
|
||||
"iqn.1994-05.com.redhat:97b1351bfb2"
|
||||
],
|
||||
"wwn": []
|
||||
},
|
||||
"host4": {
|
||||
"hgroup": null,
|
||||
"iqn": [
|
||||
"iqn.1994-05.com.redhat:dd84e9a7b2cb"
|
||||
],
|
||||
"wwn": [
|
||||
"10000000C96C48D1",
|
||||
"10000000C96C48D2"
|
||||
]
|
||||
}
|
||||
},
|
||||
"interfaces": {
|
||||
"CT0.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
|
||||
"CT0.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
|
||||
"CT1.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
|
||||
"CT1.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682"
|
||||
},
|
||||
"network": {
|
||||
"ct0.eth0": {
|
||||
"address": "10.10.10.10",
|
||||
"gateway": "10.10.10.1",
|
||||
"hwaddr": "ec:f4:bb:c8:8a:04",
|
||||
"mtu": 1500,
|
||||
"netmask": "255.255.255.0",
|
||||
"services": [
|
||||
"management"
|
||||
],
|
||||
"speed": 1000000000
|
||||
},
|
||||
"ct0.eth2": {
|
||||
"address": "10.10.10.11",
|
||||
"gateway": null,
|
||||
"hwaddr": "ec:f4:bb:c8:8a:00",
|
||||
"mtu": 1500,
|
||||
"netmask": "255.255.255.0",
|
||||
"services": [
|
||||
"replication"
|
||||
],
|
||||
"speed": 10000000000
|
||||
},
|
||||
"ct0.eth3": {
|
||||
"address": "10.10.10.12",
|
||||
"gateway": null,
|
||||
"hwaddr": "ec:f4:bb:c8:8a:02",
|
||||
"mtu": 1500,
|
||||
"netmask": "255.255.255.0",
|
||||
"services": [
|
||||
"replication"
|
||||
],
|
||||
"speed": 10000000000
|
||||
},
|
||||
"ct0.eth4": {
|
||||
"address": "10.10.10.13",
|
||||
"gateway": null,
|
||||
"hwaddr": "90:e2:ba:83:79:0c",
|
||||
"mtu": 1500,
|
||||
"netmask": "255.255.255.0",
|
||||
"services": [
|
||||
"iscsi"
|
||||
],
|
||||
"speed": 10000000000
|
||||
},
|
||||
"ct0.eth5": {
|
||||
"address": "10.10.10.14",
|
||||
"gateway": null,
|
||||
"hwaddr": "90:e2:ba:83:79:0d",
|
||||
"mtu": 1500,
|
||||
"netmask": "255.255.255.0",
|
||||
"services": [
|
||||
"iscsi"
|
||||
],
|
||||
"speed": 10000000000
|
||||
},
|
||||
"vir0": {
|
||||
"address": "10.10.10.20",
|
||||
"gateway": "10.10.10.1",
|
||||
"hwaddr": "fe:ba:e9:e7:6b:0f",
|
||||
"mtu": 1500,
|
||||
"netmask": "255.255.255.0",
|
||||
"services": [
|
||||
"management"
|
||||
],
|
||||
"speed": 1000000000
|
||||
}
|
||||
},
|
||||
"offload": {
|
||||
"nfstarget": {
|
||||
"address": "10.0.2.53",
|
||||
"mount_options": null,
|
||||
"mount_point": "/offload",
|
||||
"protocol": "nfs",
|
||||
"status": "scanning"
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"input_per_sec": 8191,
|
||||
"output_per_sec": 0,
|
||||
"queue_depth": 1,
|
||||
"reads_per_sec": 0,
|
||||
"san_usec_per_write_op": 15,
|
||||
"usec_per_read_op": 0,
|
||||
"usec_per_write_op": 642,
|
||||
"writes_per_sec": 2
|
||||
},
|
||||
"pgroups": {
|
||||
"consisgroup-07b6b983-986e-46f5-bdc3-deaa3dbb299e-cinder": {
|
||||
"hgroups": null,
|
||||
"hosts": null,
|
||||
"source": "host1",
|
||||
"targets": null,
|
||||
"volumes": [
|
||||
"volume-1"
|
||||
]
|
||||
}
|
||||
},
|
||||
"pods": {
|
||||
"srm-pod": {
|
||||
"arrays": [
|
||||
{
|
||||
"array_id": "52595f7e-b460-4b46-8851-a5defd2ac192",
|
||||
"mediator_status": "online",
|
||||
"name": "sn1-405-c09-37",
|
||||
"status": "online"
|
||||
},
|
||||
{
|
||||
"array_id": "a2c32301-f8a0-4382-949b-e69b552ce8ca",
|
||||
"mediator_status": "online",
|
||||
"name": "sn1-420-c11-31",
|
||||
"status": "online"
|
||||
}
|
||||
],
|
||||
"source": null
|
||||
}
|
||||
},
|
||||
"snapshots": {
|
||||
"consisgroup.cgsnapshot": {
|
||||
"created": "2018-03-28T09:34:02Z",
|
||||
"size": 13958643712,
|
||||
"source": "volume-1"
|
||||
}
|
||||
},
|
||||
"subnet": {},
|
||||
"vgroups": {
|
||||
"vvol--vSphere-HA-0ffc7dd1-vg": {
|
||||
"volumes": [
|
||||
"vvol--vSphere-HA-0ffc7dd1-vg/Config-aad5d7c6"
|
||||
]
|
||||
}
|
||||
},
|
||||
"volumes": {
|
||||
"ansible_data": {
|
||||
"bandwidth": null,
|
||||
"hosts": [
|
||||
[
|
||||
"host1",
|
||||
1
|
||||
]
|
||||
],
|
||||
"serial": "43BE47C12334399B000114A6",
|
||||
"size": 1099511627776,
|
||||
"source": null
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.pure import get_system, purefa_argument_spec
|
||||
|
||||
|
||||
ADMIN_API_VERSION = '1.14'
|
||||
S3_REQUIRED_API_VERSION = '1.16'
|
||||
LATENCY_REQUIRED_API_VERSION = '1.16'
|
||||
AC_REQUIRED_API_VERSION = '1.14'
|
||||
CAP_REQUIRED_API_VERSION = '1.6'
|
||||
SAN_REQUIRED_API_VERSION = '1.10'
|
||||
NVME_API_VERSION = '1.16'
|
||||
PREFERRED_API_VERSION = '1.15'
|
||||
CONN_STATUS_API_VERSION = '1.17'
|
||||
|
||||
|
||||
def generate_default_dict(array):
|
||||
default_facts = {}
|
||||
defaults = array.get()
|
||||
api_version = array._list_available_rest_versions()
|
||||
if AC_REQUIRED_API_VERSION in api_version:
|
||||
default_facts['volume_groups'] = len(array.list_vgroups())
|
||||
default_facts['connected_arrays'] = len(array.list_array_connections())
|
||||
default_facts['pods'] = len(array.list_pods())
|
||||
default_facts['connection_key'] = array.get(connection_key=True)['connection_key']
|
||||
hosts = array.list_hosts()
|
||||
admins = array.list_admins()
|
||||
snaps = array.list_volumes(snap=True, pending=True)
|
||||
pgroups = array.list_pgroups(pending=True)
|
||||
hgroups = array.list_hgroups()
|
||||
# Old FA arrays only report model from the primary controller
|
||||
ct0_model = array.get_hardware('CT0')['model']
|
||||
if ct0_model:
|
||||
model = ct0_model
|
||||
else:
|
||||
ct1_model = array.get_hardware('CT1')['model']
|
||||
model = ct1_model
|
||||
default_facts['array_model'] = model
|
||||
default_facts['array_name'] = defaults['array_name']
|
||||
default_facts['purity_version'] = defaults['version']
|
||||
default_facts['hosts'] = len(hosts)
|
||||
default_facts['snapshots'] = len(snaps)
|
||||
default_facts['protection_groups'] = len(pgroups)
|
||||
default_facts['hostgroups'] = len(hgroups)
|
||||
default_facts['admins'] = len(admins)
|
||||
return default_facts
|
||||
|
||||
|
||||
def generate_perf_dict(array):
|
||||
perf_facts = {}
|
||||
api_version = array._list_available_rest_versions()
|
||||
if LATENCY_REQUIRED_API_VERSION in api_version:
|
||||
latency_info = array.get(action='monitor', latency=True)[0]
|
||||
perf_info = array.get(action='monitor')[0]
|
||||
# IOPS
|
||||
perf_facts['writes_per_sec'] = perf_info['writes_per_sec']
|
||||
perf_facts['reads_per_sec'] = perf_info['reads_per_sec']
|
||||
|
||||
# Bandwidth
|
||||
perf_facts['input_per_sec'] = perf_info['input_per_sec']
|
||||
perf_facts['output_per_sec'] = perf_info['output_per_sec']
|
||||
|
||||
# Latency
|
||||
if LATENCY_REQUIRED_API_VERSION in api_version:
|
||||
perf_facts['san_usec_per_read_op'] = latency_info['san_usec_per_read_op']
|
||||
perf_facts['san_usec_per_write_op'] = latency_info['san_usec_per_write_op']
|
||||
perf_facts['queue_usec_per_read_op'] = latency_info['queue_usec_per_read_op']
|
||||
perf_facts['queue_usec_per_write_op'] = latency_info['queue_usec_per_write_op']
|
||||
perf_facts['qos_rate_limit_usec_per_read_op'] = latency_info['qos_rate_limit_usec_per_read_op']
|
||||
perf_facts['qos_rate_limit_usec_per_write_op'] = latency_info['qos_rate_limit_usec_per_write_op']
|
||||
perf_facts['local_queue_usec_per_op'] = perf_info['local_queue_usec_per_op']
|
||||
perf_facts['usec_per_read_op'] = perf_info['usec_per_read_op']
|
||||
perf_facts['usec_per_write_op'] = perf_info['usec_per_write_op']
|
||||
perf_facts['queue_depth'] = perf_info['queue_depth']
|
||||
return perf_facts
|
||||
|
||||
|
||||
def generate_config_dict(array):
|
||||
config_facts = {}
|
||||
api_version = array._list_available_rest_versions()
|
||||
# DNS
|
||||
config_facts['dns'] = array.get_dns()
|
||||
# SMTP
|
||||
config_facts['smtp'] = array.list_alert_recipients()
|
||||
# SNMP
|
||||
config_facts['snmp'] = array.list_snmp_managers()
|
||||
config_facts['snmp_v3_engine_id'] = array.get_snmp_engine_id()['engine_id']
|
||||
# DS
|
||||
config_facts['directory_service'] = array.get_directory_service()
|
||||
if S3_REQUIRED_API_VERSION in api_version:
|
||||
config_facts['directory_service_roles'] = {}
|
||||
roles = array.list_directory_service_roles()
|
||||
for role in range(0, len(roles)):
|
||||
role_name = roles[role]['name']
|
||||
config_facts['directory_service_roles'][role_name] = {
|
||||
'group': roles[role]['group'],
|
||||
'group_base': roles[role]['group_base'],
|
||||
}
|
||||
else:
|
||||
config_facts['directory_service'].update(array.get_directory_service(groups=True))
|
||||
# NTP
|
||||
config_facts['ntp'] = array.get(ntpserver=True)['ntpserver']
|
||||
# SYSLOG
|
||||
config_facts['syslog'] = array.get(syslogserver=True)['syslogserver']
|
||||
# Phonehome
|
||||
config_facts['phonehome'] = array.get(phonehome=True)['phonehome']
|
||||
# Proxy
|
||||
config_facts['proxy'] = array.get(proxy=True)['proxy']
|
||||
# Relay Host
|
||||
config_facts['relayhost'] = array.get(relayhost=True)['relayhost']
|
||||
# Sender Domain
|
||||
config_facts['senderdomain'] = array.get(senderdomain=True)['senderdomain']
|
||||
# SYSLOG
|
||||
config_facts['syslog'] = array.get(syslogserver=True)['syslogserver']
|
||||
# Idle Timeout
|
||||
config_facts['idle_timeout'] = array.get(idle_timeout=True)['idle_timeout']
|
||||
# SCSI Timeout
|
||||
config_facts['scsi_timeout'] = array.get(scsi_timeout=True)['scsi_timeout']
|
||||
# SSL
|
||||
config_facts['ssl_certs'] = array.get_certificate()
|
||||
# Global Admin settings
|
||||
if S3_REQUIRED_API_VERSION in api_version:
|
||||
config_facts['global_admin'] = array.get_global_admin_attributes()
|
||||
return config_facts
|
||||
|
||||
|
||||
def generate_admin_dict(array):
|
||||
api_version = array._list_available_rest_versions()
|
||||
admin_facts = {}
|
||||
if ADMIN_API_VERSION in api_version:
|
||||
admins = array.list_admins()
|
||||
for admin in range(0, len(admins)):
|
||||
admin_name = admins[admin]['name']
|
||||
admin_facts[admin_name] = {
|
||||
'type': admins[admin]['type'],
|
||||
'role': admins[admin]['role'],
|
||||
}
|
||||
return admin_facts
|
||||
|
||||
|
||||
def generate_subnet_dict(array):
|
||||
sub_facts = {}
|
||||
subnets = array.list_subnets()
|
||||
for sub in range(0, len(subnets)):
|
||||
sub_name = subnets[sub]['name']
|
||||
if subnets[sub]['enabled']:
|
||||
sub_facts[sub_name] = {
|
||||
'gateway': subnets[sub]['gateway'],
|
||||
'mtu': subnets[sub]['mtu'],
|
||||
'vlan': subnets[sub]['vlan'],
|
||||
'prefix': subnets[sub]['prefix'],
|
||||
'interfaces': subnets[sub]['interfaces'],
|
||||
'services': subnets[sub]['services'],
|
||||
}
|
||||
return sub_facts
|
||||
|
||||
|
||||
def generate_network_dict(array):
|
||||
net_facts = {}
|
||||
ports = array.list_network_interfaces()
|
||||
for port in range(0, len(ports)):
|
||||
int_name = ports[port]['name']
|
||||
net_facts[int_name] = {
|
||||
'hwaddr': ports[port]['hwaddr'],
|
||||
'mtu': ports[port]['mtu'],
|
||||
'enabled': ports[port]['enabled'],
|
||||
'speed': ports[port]['speed'],
|
||||
'address': ports[port]['address'],
|
||||
'slaves': ports[port]['slaves'],
|
||||
'services': ports[port]['services'],
|
||||
'gateway': ports[port]['gateway'],
|
||||
'netmask': ports[port]['netmask'],
|
||||
}
|
||||
if ports[port]['subnet']:
|
||||
subnets = array.get_subnet(ports[port]['subnet'])
|
||||
if subnets['enabled']:
|
||||
net_facts[int_name]['subnet'] = {
|
||||
'name': subnets['name'],
|
||||
'prefix': subnets['prefix'],
|
||||
'vlan': subnets['vlan'],
|
||||
}
|
||||
return net_facts
|
||||
|
||||
|
||||
def generate_capacity_dict(array):
|
||||
capacity_facts = {}
|
||||
api_version = array._list_available_rest_versions()
|
||||
if CAP_REQUIRED_API_VERSION in api_version:
|
||||
volumes = array.list_volumes(pending=True)
|
||||
capacity_facts['provisioned_space'] = sum(item['size'] for item in volumes)
|
||||
capacity = array.get(space=True)
|
||||
total_capacity = capacity[0]['capacity']
|
||||
used_space = capacity[0]["total"]
|
||||
capacity_facts['free_space'] = total_capacity - used_space
|
||||
capacity_facts['total_capacity'] = total_capacity
|
||||
capacity_facts['data_reduction'] = capacity[0]['data_reduction']
|
||||
capacity_facts['system_space'] = capacity[0]['system']
|
||||
capacity_facts['volume_space'] = capacity[0]['volumes']
|
||||
capacity_facts['shared_space'] = capacity[0]['shared_space']
|
||||
capacity_facts['snapshot_space'] = capacity[0]['snapshots']
|
||||
capacity_facts['thin_provisioning'] = capacity[0]['thin_provisioning']
|
||||
capacity_facts['total_reduction'] = capacity[0]['total_reduction']
|
||||
|
||||
return capacity_facts
|
||||
|
||||
|
||||
def generate_snap_dict(array):
|
||||
snap_facts = {}
|
||||
snaps = array.list_volumes(snap=True)
|
||||
for snap in range(0, len(snaps)):
|
||||
snapshot = snaps[snap]['name']
|
||||
snap_facts[snapshot] = {
|
||||
'size': snaps[snap]['size'],
|
||||
'source': snaps[snap]['source'],
|
||||
'created': snaps[snap]['created'],
|
||||
}
|
||||
return snap_facts
|
||||
|
||||
|
||||
def generate_vol_dict(array):
|
||||
volume_facts = {}
|
||||
vols = array.list_volumes()
|
||||
for vol in range(0, len(vols)):
|
||||
volume = vols[vol]['name']
|
||||
volume_facts[volume] = {
|
||||
'source': vols[vol]['source'],
|
||||
'size': vols[vol]['size'],
|
||||
'serial': vols[vol]['serial'],
|
||||
'hosts': [],
|
||||
'bandwidth': ""
|
||||
}
|
||||
api_version = array._list_available_rest_versions()
|
||||
if AC_REQUIRED_API_VERSION in api_version:
|
||||
qvols = array.list_volumes(qos=True)
|
||||
for qvol in range(0, len(qvols)):
|
||||
volume = qvols[qvol]['name']
|
||||
qos = qvols[qvol]['bandwidth_limit']
|
||||
volume_facts[volume]['bandwidth'] = qos
|
||||
vvols = array.list_volumes(protocol_endpoint=True)
|
||||
for vvol in range(0, len(vvols)):
|
||||
volume = vvols[vvol]['name']
|
||||
volume_facts[volume] = {
|
||||
'source': vvols[vvol]['source'],
|
||||
'serial': vvols[vvol]['serial'],
|
||||
'hosts': []
|
||||
}
|
||||
cvols = array.list_volumes(connect=True)
|
||||
for cvol in range(0, len(cvols)):
|
||||
volume = cvols[cvol]['name']
|
||||
voldict = [cvols[cvol]['host'], cvols[cvol]['lun']]
|
||||
volume_facts[volume]['hosts'].append(voldict)
|
||||
return volume_facts
|
||||
|
||||
|
||||
def generate_host_dict(array):
|
||||
api_version = array._list_available_rest_versions()
|
||||
host_facts = {}
|
||||
hosts = array.list_hosts()
|
||||
for host in range(0, len(hosts)):
|
||||
hostname = hosts[host]['name']
|
||||
tports = []
|
||||
host_all_info = array.get_host(hostname, all=True)
|
||||
if host_all_info:
|
||||
tports = host_all_info[0]['target_port']
|
||||
host_facts[hostname] = {
|
||||
'hgroup': hosts[host]['hgroup'],
|
||||
'iqn': hosts[host]['iqn'],
|
||||
'wwn': hosts[host]['wwn'],
|
||||
'personality': array.get_host(hostname,
|
||||
personality=True)['personality'],
|
||||
'target_port': tports
|
||||
}
|
||||
if NVME_API_VERSION in api_version:
|
||||
host_facts[hostname]['nqn'] = hosts[host]['nqn']
|
||||
if PREFERRED_API_VERSION in api_version:
|
||||
hosts = array.list_hosts(preferred_array=True)
|
||||
for host in range(0, len(hosts)):
|
||||
hostname = hosts[host]['name']
|
||||
host_facts[hostname]['preferred_array'] = hosts[host]['preferred_array']
|
||||
return host_facts
|
||||
|
||||
|
||||
def generate_pgroups_dict(array):
|
||||
pgroups_facts = {}
|
||||
pgroups = array.list_pgroups()
|
||||
for pgroup in range(0, len(pgroups)):
|
||||
protgroup = pgroups[pgroup]['name']
|
||||
pgroups_facts[protgroup] = {
|
||||
'hgroups': pgroups[pgroup]['hgroups'],
|
||||
'hosts': pgroups[pgroup]['hosts'],
|
||||
'source': pgroups[pgroup]['source'],
|
||||
'targets': pgroups[pgroup]['targets'],
|
||||
'volumes': pgroups[pgroup]['volumes'],
|
||||
}
|
||||
prot_sched = array.get_pgroup(protgroup, schedule=True)
|
||||
prot_reten = array.get_pgroup(protgroup, retention=True)
|
||||
if prot_sched['snap_enabled'] or prot_sched['replicate_enabled']:
|
||||
pgroups_facts[protgroup]['snap_freqyency'] = prot_sched['snap_frequency']
|
||||
pgroups_facts[protgroup]['replicate_freqyency'] = prot_sched['replicate_frequency']
|
||||
pgroups_facts[protgroup]['snap_enabled'] = prot_sched['snap_enabled']
|
||||
pgroups_facts[protgroup]['replicate_enabled'] = prot_sched['replicate_enabled']
|
||||
pgroups_facts[protgroup]['snap_at'] = prot_sched['snap_at']
|
||||
pgroups_facts[protgroup]['replicate_at'] = prot_sched['replicate_at']
|
||||
pgroups_facts[protgroup]['replicate_blackout'] = prot_sched['replicate_blackout']
|
||||
pgroups_facts[protgroup]['per_day'] = prot_reten['per_day']
|
||||
pgroups_facts[protgroup]['target_per_day'] = prot_reten['target_per_day']
|
||||
pgroups_facts[protgroup]['target_days'] = prot_reten['target_days']
|
||||
pgroups_facts[protgroup]['days'] = prot_reten['days']
|
||||
pgroups_facts[protgroup]['all_for'] = prot_reten['all_for']
|
||||
pgroups_facts[protgroup]['target_all_for'] = prot_reten['target_all_for']
|
||||
if ":" in protgroup:
|
||||
snap_transfers = array.get_pgroup(protgroup, snap=True, transfer=True)
|
||||
pgroups_facts[protgroup]['snaps'] = {}
|
||||
for snap_transfer in range(0, len(snap_transfers)):
|
||||
snap = snap_transfers[snap_transfer]['name']
|
||||
pgroups_facts[protgroup]['snaps'][snap] = {
|
||||
'created': snap_transfers[snap_transfer]['created'],
|
||||
'started': snap_transfers[snap_transfer]['started'],
|
||||
'completed': snap_transfers[snap_transfer]['completed'],
|
||||
'physical_bytes_written': snap_transfers[snap_transfer]['physical_bytes_written'],
|
||||
'data_transferred': snap_transfers[snap_transfer]['data_transferred'],
|
||||
'progress': snap_transfers[snap_transfer]['progress'],
|
||||
}
|
||||
return pgroups_facts
|
||||
|
||||
|
||||
def generate_pods_dict(array):
|
||||
pods_facts = {}
|
||||
api_version = array._list_available_rest_versions()
|
||||
if AC_REQUIRED_API_VERSION in api_version:
|
||||
pods = array.list_pods()
|
||||
for pod in range(0, len(pods)):
|
||||
acpod = pods[pod]['name']
|
||||
pods_facts[acpod] = {
|
||||
'source': pods[pod]['source'],
|
||||
'arrays': pods[pod]['arrays'],
|
||||
}
|
||||
return pods_facts
|
||||
|
||||
|
||||
def generate_conn_array_dict(array):
|
||||
conn_array_facts = {}
|
||||
api_version = array._list_available_rest_versions()
|
||||
if CONN_STATUS_API_VERSION in api_version:
|
||||
carrays = array.list_connected_arrays()
|
||||
for carray in range(0, len(carrays)):
|
||||
arrayname = carrays[carray]['array_name']
|
||||
conn_array_facts[arrayname] = {
|
||||
'array_id': carrays[carray]['id'],
|
||||
'throtled': carrays[carray]['throtled'],
|
||||
'version': carrays[carray]['version'],
|
||||
'type': carrays[carray]['type'],
|
||||
'mgmt_ip': carrays[carray]['management_address'],
|
||||
'repl_ip': carrays[carray]['replication_address'],
|
||||
}
|
||||
if CONN_STATUS_API_VERSION in api_version:
|
||||
conn_array_facts[arrayname]['status'] = carrays[carray]['status']
|
||||
return conn_array_facts
|
||||
|
||||
|
||||
def generate_apps_dict(array):
|
||||
apps_facts = {}
|
||||
api_version = array._list_available_rest_versions()
|
||||
if SAN_REQUIRED_API_VERSION in api_version:
|
||||
apps = array.list_apps()
|
||||
for app in range(0, len(apps)):
|
||||
appname = apps[app]['name']
|
||||
apps_facts[appname] = {
|
||||
'version': apps[app]['version'],
|
||||
'status': apps[app]['status'],
|
||||
'description': apps[app]['description'],
|
||||
}
|
||||
return apps_facts
|
||||
|
||||
|
||||
def generate_vgroups_dict(array):
|
||||
vgroups_facts = {}
|
||||
api_version = array._list_available_rest_versions()
|
||||
if AC_REQUIRED_API_VERSION in api_version:
|
||||
vgroups = array.list_vgroups()
|
||||
for vgroup in range(0, len(vgroups)):
|
||||
virtgroup = vgroups[vgroup]['name']
|
||||
vgroups_facts[virtgroup] = {
|
||||
'volumes': vgroups[vgroup]['volumes'],
|
||||
}
|
||||
return vgroups_facts
|
||||
|
||||
|
||||
def generate_nfs_offload_dict(array):
|
||||
offload_facts = {}
|
||||
api_version = array._list_available_rest_versions()
|
||||
if AC_REQUIRED_API_VERSION in api_version:
|
||||
offload = array.list_nfs_offload()
|
||||
for target in range(0, len(offload)):
|
||||
offloadt = offload[target]['name']
|
||||
offload_facts[offloadt] = {
|
||||
'status': offload[target]['status'],
|
||||
'mount_point': offload[target]['mount_point'],
|
||||
'protocol': offload[target]['protocol'],
|
||||
'mount_options': offload[target]['mount_options'],
|
||||
'address': offload[target]['address'],
|
||||
}
|
||||
return offload_facts
|
||||
|
||||
|
||||
def generate_s3_offload_dict(array):
|
||||
offload_facts = {}
|
||||
api_version = array._list_available_rest_versions()
|
||||
if S3_REQUIRED_API_VERSION in api_version:
|
||||
offload = array.list_s3_offload()
|
||||
for target in range(0, len(offload)):
|
||||
offloadt = offload[target]['name']
|
||||
offload_facts[offloadt] = {
|
||||
'status': offload[target]['status'],
|
||||
'bucket': offload[target]['bucket'],
|
||||
'protocol': offload[target]['protocol'],
|
||||
'access_key_id': offload[target]['access_key_id'],
|
||||
}
|
||||
return offload_facts
|
||||
|
||||
|
||||
def generate_hgroups_dict(array):
|
||||
hgroups_facts = {}
|
||||
hgroups = array.list_hgroups()
|
||||
for hgroup in range(0, len(hgroups)):
|
||||
hostgroup = hgroups[hgroup]['name']
|
||||
hgroups_facts[hostgroup] = {
|
||||
'hosts': hgroups[hgroup]['hosts'],
|
||||
'pgs': [],
|
||||
'vols': [],
|
||||
}
|
||||
pghgroups = array.list_hgroups(protect=True)
|
||||
for pghg in range(0, len(pghgroups)):
|
||||
pgname = pghgroups[pghg]['name']
|
||||
hgroups_facts[pgname]['pgs'].append(pghgroups[pghg]['protection_group'])
|
||||
volhgroups = array.list_hgroups(connect=True)
|
||||
for pgvol in range(0, len(volhgroups)):
|
||||
pgname = volhgroups[pgvol]['name']
|
||||
volpgdict = [volhgroups[pgvol]['vol'], volhgroups[pgvol]['lun']]
|
||||
hgroups_facts[pgname]['vols'].append(volpgdict)
|
||||
return hgroups_facts
|
||||
|
||||
|
||||
def generate_interfaces_dict(array):
|
||||
api_version = array._list_available_rest_versions()
|
||||
int_facts = {}
|
||||
ports = array.list_ports()
|
||||
for port in range(0, len(ports)):
|
||||
int_name = ports[port]['name']
|
||||
if ports[port]['wwn']:
|
||||
int_facts[int_name] = ports[port]['wwn']
|
||||
if ports[port]['iqn']:
|
||||
int_facts[int_name] = ports[port]['iqn']
|
||||
if NVME_API_VERSION in api_version:
|
||||
if ports[port]['nqn']:
|
||||
int_facts[int_name] = ports[port]['nqn']
|
||||
return int_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = purefa_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
gather_subset=dict(default='minimum', type='list',)
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec, supports_check_mode=False)
|
||||
|
||||
array = get_system(module)
|
||||
|
||||
subset = [test.lower() for test in module.params['gather_subset']]
|
||||
valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
|
||||
'network', 'subnet', 'interfaces', 'hgroups', 'pgroups',
|
||||
'hosts', 'admins', 'volumes', 'snapshots', 'pods',
|
||||
'vgroups', 'offload', 'apps', 'arrays')
|
||||
subset_test = (test in valid_subsets for test in subset)
|
||||
if not all(subset_test):
|
||||
module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
|
||||
% (",".join(valid_subsets), ",".join(subset)))
|
||||
|
||||
facts = {}
|
||||
|
||||
if 'minimum' in subset or 'all' in subset:
|
||||
facts['default'] = generate_default_dict(array)
|
||||
if 'performance' in subset or 'all' in subset:
|
||||
facts['performance'] = generate_perf_dict(array)
|
||||
if 'config' in subset or 'all' in subset:
|
||||
facts['config'] = generate_config_dict(array)
|
||||
if 'capacity' in subset or 'all' in subset:
|
||||
facts['capacity'] = generate_capacity_dict(array)
|
||||
if 'network' in subset or 'all' in subset:
|
||||
facts['network'] = generate_network_dict(array)
|
||||
if 'subnet' in subset or 'all' in subset:
|
||||
facts['subnet'] = generate_subnet_dict(array)
|
||||
if 'interfaces' in subset or 'all' in subset:
|
||||
facts['interfaces'] = generate_interfaces_dict(array)
|
||||
if 'hosts' in subset or 'all' in subset:
|
||||
facts['hosts'] = generate_host_dict(array)
|
||||
if 'volumes' in subset or 'all' in subset:
|
||||
facts['volumes'] = generate_vol_dict(array)
|
||||
if 'snapshots' in subset or 'all' in subset:
|
||||
facts['snapshots'] = generate_snap_dict(array)
|
||||
if 'hgroups' in subset or 'all' in subset:
|
||||
facts['hgroups'] = generate_hgroups_dict(array)
|
||||
if 'pgroups' in subset or 'all' in subset:
|
||||
facts['pgroups'] = generate_pgroups_dict(array)
|
||||
if 'pods' in subset or 'all' in subset:
|
||||
facts['pods'] = generate_pods_dict(array)
|
||||
if 'admins' in subset or 'all' in subset:
|
||||
facts['admins'] = generate_admin_dict(array)
|
||||
if 'vgroups' in subset or 'all' in subset:
|
||||
facts['vgroups'] = generate_vgroups_dict(array)
|
||||
if 'offload' in subset or 'all' in subset:
|
||||
facts['nfs_offload'] = generate_nfs_offload_dict(array)
|
||||
facts['s3_offload'] = generate_s3_offload_dict(array)
|
||||
if 'apps' in subset or 'all' in subset:
|
||||
facts['apps'] = generate_apps_dict(array)
|
||||
if 'arrays' in subset or 'all' in subset:
|
||||
facts['arrays'] = generate_conn_array_dict(array)
|
||||
|
||||
module.exit_json(ansible_facts={'ansible_purefa_facts': facts})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -1,652 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2018, Simon Dodsley (simon@purestorage.com)
|
||||
# GNU General Public License v3.0+ (see COPYING or
|
||||
# https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: purefb_facts
|
||||
deprecated:
|
||||
removed_in: 3.0.0 # was Ansible 2.13
|
||||
why: Deprecated in favor of C(_info) module.
|
||||
alternative: Use M(purestorage.flashblade.purefb_info) instead.
|
||||
short_description: Collect facts from Pure Storage FlashBlade
|
||||
description:
|
||||
- Collect facts information from a Pure Storage FlashBlade running the
|
||||
Purity//FB operating system. By default, the module will collect basic
|
||||
fact information including hosts, host groups, protection
|
||||
groups and volume counts. Additional fact information can be collected
|
||||
based on the configured set of arguments.
|
||||
author:
|
||||
- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
|
||||
options:
|
||||
gather_subset:
|
||||
description:
|
||||
- When supplied, this argument will define the facts to be collected.
|
||||
Possible values for this include all, minimum, config, performance,
|
||||
capacity, network, subnets, lags, filesystems and snapshots.
|
||||
required: false
|
||||
type: list
|
||||
default: minimum
|
||||
extends_documentation_fragment:
|
||||
- community.general.purestorage.fb
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Collect default set of facts
|
||||
community.general.purefb_facts:
|
||||
fb_url: 10.10.10.2
|
||||
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
|
||||
|
||||
- name: Collect configuration and capacity facts
|
||||
community.general.purefb_facts:
|
||||
gather_subset:
|
||||
- config
|
||||
- capacity
|
||||
fb_url: 10.10.10.2
|
||||
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
|
||||
|
||||
- name: Collect all facts
|
||||
community.general.purefb_facts:
|
||||
gather_subset:
|
||||
- all
|
||||
fb_url: 10.10.10.2
|
||||
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
ansible_facts:
|
||||
description: Returns the facts collected from the FlashBlade
|
||||
returned: always
|
||||
type: complex
|
||||
sample: {
|
||||
"capacity": {
|
||||
"aggregate": {
|
||||
"data_reduction": 1.1179228,
|
||||
"snapshots": 0,
|
||||
"total_physical": 17519748439,
|
||||
"unique": 17519748439,
|
||||
"virtual": 19585726464
|
||||
},
|
||||
"file-system": {
|
||||
"data_reduction": 1.3642412,
|
||||
"snapshots": 0,
|
||||
"total_physical": 4748219708,
|
||||
"unique": 4748219708,
|
||||
"virtual": 6477716992
|
||||
},
|
||||
"object-store": {
|
||||
"data_reduction": 1.0263462,
|
||||
"snapshots": 0,
|
||||
"total_physical": 12771528731,
|
||||
"unique": 12771528731,
|
||||
"virtual": 6477716992
|
||||
},
|
||||
"total": 83359896948925
|
||||
},
|
||||
"config": {
|
||||
"alert_watchers": {
|
||||
"enabled": true,
|
||||
"name": "notify@acmestorage.com"
|
||||
},
|
||||
"array_management": {
|
||||
"base_dn": null,
|
||||
"bind_password": null,
|
||||
"bind_user": null,
|
||||
"enabled": false,
|
||||
"name": "management",
|
||||
"services": [
|
||||
"management"
|
||||
],
|
||||
"uris": []
|
||||
},
|
||||
"directory_service_roles": {
|
||||
"array_admin": {
|
||||
"group": null,
|
||||
"group_base": null
|
||||
},
|
||||
"ops_admin": {
|
||||
"group": null,
|
||||
"group_base": null
|
||||
},
|
||||
"readonly": {
|
||||
"group": null,
|
||||
"group_base": null
|
||||
},
|
||||
"storage_admin": {
|
||||
"group": null,
|
||||
"group_base": null
|
||||
}
|
||||
},
|
||||
"dns": {
|
||||
"domain": "demo.acmestorage.com",
|
||||
"name": "demo-fb-1",
|
||||
"nameservers": [
|
||||
"8.8.8.8"
|
||||
],
|
||||
"search": [
|
||||
"demo.acmestorage.com"
|
||||
]
|
||||
},
|
||||
"nfs_directory_service": {
|
||||
"base_dn": null,
|
||||
"bind_password": null,
|
||||
"bind_user": null,
|
||||
"enabled": false,
|
||||
"name": "nfs",
|
||||
"services": [
|
||||
"nfs"
|
||||
],
|
||||
"uris": []
|
||||
},
|
||||
"ntp": [
|
||||
"0.ntp.pool.org"
|
||||
],
|
||||
"smb_directory_service": {
|
||||
"base_dn": null,
|
||||
"bind_password": null,
|
||||
"bind_user": null,
|
||||
"enabled": false,
|
||||
"name": "smb",
|
||||
"services": [
|
||||
"smb"
|
||||
],
|
||||
"uris": []
|
||||
},
|
||||
"smtp": {
|
||||
"name": "demo-fb-1",
|
||||
"relay_host": null,
|
||||
"sender_domain": "acmestorage.com"
|
||||
},
|
||||
"ssl_certs": {
|
||||
"certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----",
|
||||
"common_name": "Acme Storage",
|
||||
"country": "US",
|
||||
"email": null,
|
||||
"intermediate_certificate": null,
|
||||
"issued_by": "Acme Storage",
|
||||
"issued_to": "Acme Storage",
|
||||
"key_size": 4096,
|
||||
"locality": null,
|
||||
"name": "global",
|
||||
"organization": "Acme Storage",
|
||||
"organizational_unit": "Acme Storage",
|
||||
"passphrase": null,
|
||||
"private_key": null,
|
||||
"state": null,
|
||||
"status": "self-signed",
|
||||
"valid_from": "1508433967000",
|
||||
"valid_to": "2458833967000"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"blades": 15,
|
||||
"buckets": 7,
|
||||
"filesystems": 2,
|
||||
"flashblade_name": "demo-fb-1",
|
||||
"object_store_accounts": 1,
|
||||
"object_store_users": 1,
|
||||
"purity_version": "2.2.0",
|
||||
"snapshots": 1,
|
||||
"total_capacity": 83359896948925
|
||||
},
|
||||
"filesystems": {
|
||||
"k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": {
|
||||
"destroyed": false,
|
||||
"fast_remove": false,
|
||||
"hard_limit": true,
|
||||
"nfs_rules": "*(rw,no_root_squash)",
|
||||
"provisioned": 21474836480,
|
||||
"snapshot_enabled": false
|
||||
},
|
||||
"z": {
|
||||
"destroyed": false,
|
||||
"fast_remove": false,
|
||||
"hard_limit": false,
|
||||
"provisioned": 1073741824,
|
||||
"snapshot_enabled": false
|
||||
}
|
||||
},
|
||||
"lag": {
|
||||
"uplink": {
|
||||
"lag_speed": 0,
|
||||
"port_speed": 40000000000,
|
||||
"ports": [
|
||||
{
|
||||
"name": "CH1.FM1.ETH1.1"
|
||||
},
|
||||
{
|
||||
"name": "CH1.FM1.ETH1.2"
|
||||
},
|
||||
],
|
||||
"status": "healthy"
|
||||
}
|
||||
},
|
||||
"network": {
|
||||
"fm1.admin0": {
|
||||
"address": "10.10.100.6",
|
||||
"gateway": "10.10.100.1",
|
||||
"mtu": 1500,
|
||||
"netmask": "255.255.255.0",
|
||||
"services": [
|
||||
"support"
|
||||
],
|
||||
"type": "vip",
|
||||
"vlan": 2200
|
||||
},
|
||||
"fm2.admin0": {
|
||||
"address": "10.10.100.7",
|
||||
"gateway": "10.10.100.1",
|
||||
"mtu": 1500,
|
||||
"netmask": "255.255.255.0",
|
||||
"services": [
|
||||
"support"
|
||||
],
|
||||
"type": "vip",
|
||||
"vlan": 2200
|
||||
},
|
||||
"nfs1": {
|
||||
"address": "10.10.100.4",
|
||||
"gateway": "10.10.100.1",
|
||||
"mtu": 1500,
|
||||
"netmask": "255.255.255.0",
|
||||
"services": [
|
||||
"data"
|
||||
],
|
||||
"type": "vip",
|
||||
"vlan": 2200
|
||||
},
|
||||
"vir0": {
|
||||
"address": "10.10.100.5",
|
||||
"gateway": "10.10.100.1",
|
||||
"mtu": 1500,
|
||||
"netmask": "255.255.255.0",
|
||||
"services": [
|
||||
"management"
|
||||
],
|
||||
"type": "vip",
|
||||
"vlan": 2200
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"aggregate": {
|
||||
"bytes_per_op": 0,
|
||||
"bytes_per_read": 0,
|
||||
"bytes_per_write": 0,
|
||||
"read_bytes_per_sec": 0,
|
||||
"reads_per_sec": 0,
|
||||
"usec_per_other_op": 0,
|
||||
"usec_per_read_op": 0,
|
||||
"usec_per_write_op": 0,
|
||||
"write_bytes_per_sec": 0,
|
||||
"writes_per_sec": 0
|
||||
},
|
||||
"http": {
|
||||
"bytes_per_op": 0,
|
||||
"bytes_per_read": 0,
|
||||
"bytes_per_write": 0,
|
||||
"read_bytes_per_sec": 0,
|
||||
"reads_per_sec": 0,
|
||||
"usec_per_other_op": 0,
|
||||
"usec_per_read_op": 0,
|
||||
"usec_per_write_op": 0,
|
||||
"write_bytes_per_sec": 0,
|
||||
"writes_per_sec": 0
|
||||
},
|
||||
"nfs": {
|
||||
"bytes_per_op": 0,
|
||||
"bytes_per_read": 0,
|
||||
"bytes_per_write": 0,
|
||||
"read_bytes_per_sec": 0,
|
||||
"reads_per_sec": 0,
|
||||
"usec_per_other_op": 0,
|
||||
"usec_per_read_op": 0,
|
||||
"usec_per_write_op": 0,
|
||||
"write_bytes_per_sec": 0,
|
||||
"writes_per_sec": 0
|
||||
},
|
||||
"s3": {
|
||||
"bytes_per_op": 0,
|
||||
"bytes_per_read": 0,
|
||||
"bytes_per_write": 0,
|
||||
"read_bytes_per_sec": 0,
|
||||
"reads_per_sec": 0,
|
||||
"usec_per_other_op": 0,
|
||||
"usec_per_read_op": 0,
|
||||
"usec_per_write_op": 0,
|
||||
"write_bytes_per_sec": 0,
|
||||
"writes_per_sec": 0
|
||||
}
|
||||
},
|
||||
"snapshots": {
|
||||
"z.188": {
|
||||
"destroyed": false,
|
||||
"source": "z",
|
||||
"source_destroyed": false,
|
||||
"suffix": "188"
|
||||
}
|
||||
},
|
||||
"subnet": {
|
||||
"new-mgmt": {
|
||||
"gateway": "10.10.100.1",
|
||||
"interfaces": [
|
||||
{
|
||||
"name": "fm1.admin0"
|
||||
},
|
||||
{
|
||||
"name": "fm2.admin0"
|
||||
},
|
||||
{
|
||||
"name": "nfs1"
|
||||
},
|
||||
{
|
||||
"name": "vir0"
|
||||
}
|
||||
],
|
||||
"lag": "uplink",
|
||||
"mtu": 1500,
|
||||
"prefix": "10.10.100.0/24",
|
||||
"services": [
|
||||
"data",
|
||||
"management",
|
||||
"support"
|
||||
],
|
||||
"vlan": 2200
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.pure import get_blade, purefb_argument_spec
|
||||
|
||||
|
||||
MIN_REQUIRED_API_VERSION = '1.3'
|
||||
HARD_LIMIT_API_VERSION = '1.4'
|
||||
|
||||
|
||||
def generate_default_dict(blade):
|
||||
default_facts = {}
|
||||
defaults = blade.arrays.list_arrays().items[0]
|
||||
default_facts['flashblade_name'] = defaults.name
|
||||
default_facts['purity_version'] = defaults.version
|
||||
default_facts['filesystems'] = \
|
||||
len(blade.file_systems.list_file_systems().items)
|
||||
default_facts['snapshots'] = \
|
||||
len(blade.file_system_snapshots.list_file_system_snapshots().items)
|
||||
default_facts['buckets'] = len(blade.buckets.list_buckets().items)
|
||||
default_facts['object_store_users'] = \
|
||||
len(blade.object_store_users.list_object_store_users().items)
|
||||
default_facts['object_store_accounts'] = \
|
||||
len(blade.object_store_accounts.list_object_store_accounts().items)
|
||||
default_facts['blades'] = len(blade.blade.list_blades().items)
|
||||
default_facts['total_capacity'] = \
|
||||
blade.arrays.list_arrays_space().items[0].capacity
|
||||
return default_facts
|
||||
|
||||
|
||||
def generate_perf_dict(blade):
|
||||
perf_facts = {}
|
||||
total_perf = blade.arrays.list_arrays_performance()
|
||||
http_perf = blade.arrays.list_arrays_performance(protocol='http')
|
||||
s3_perf = blade.arrays.list_arrays_performance(protocol='s3')
|
||||
nfs_perf = blade.arrays.list_arrays_performance(protocol='nfs')
|
||||
perf_facts['aggregate'] = {
|
||||
'bytes_per_op': total_perf.items[0].bytes_per_op,
|
||||
'bytes_per_read': total_perf.items[0].bytes_per_read,
|
||||
'bytes_per_write': total_perf.items[0].bytes_per_write,
|
||||
'read_bytes_per_sec': total_perf.items[0].read_bytes_per_sec,
|
||||
'reads_per_sec': total_perf.items[0].reads_per_sec,
|
||||
'usec_per_other_op': total_perf.items[0].usec_per_other_op,
|
||||
'usec_per_read_op': total_perf.items[0].usec_per_read_op,
|
||||
'usec_per_write_op': total_perf.items[0].usec_per_write_op,
|
||||
'write_bytes_per_sec': total_perf.items[0].write_bytes_per_sec,
|
||||
'writes_per_sec': total_perf.items[0].writes_per_sec,
|
||||
}
|
||||
perf_facts['http'] = {
|
||||
'bytes_per_op': http_perf.items[0].bytes_per_op,
|
||||
'bytes_per_read': http_perf.items[0].bytes_per_read,
|
||||
'bytes_per_write': http_perf.items[0].bytes_per_write,
|
||||
'read_bytes_per_sec': http_perf.items[0].read_bytes_per_sec,
|
||||
'reads_per_sec': http_perf.items[0].reads_per_sec,
|
||||
'usec_per_other_op': http_perf.items[0].usec_per_other_op,
|
||||
'usec_per_read_op': http_perf.items[0].usec_per_read_op,
|
||||
'usec_per_write_op': http_perf.items[0].usec_per_write_op,
|
||||
'write_bytes_per_sec': http_perf.items[0].write_bytes_per_sec,
|
||||
'writes_per_sec': http_perf.items[0].writes_per_sec,
|
||||
}
|
||||
perf_facts['s3'] = {
|
||||
'bytes_per_op': s3_perf.items[0].bytes_per_op,
|
||||
'bytes_per_read': s3_perf.items[0].bytes_per_read,
|
||||
'bytes_per_write': s3_perf.items[0].bytes_per_write,
|
||||
'read_bytes_per_sec': s3_perf.items[0].read_bytes_per_sec,
|
||||
'reads_per_sec': s3_perf.items[0].reads_per_sec,
|
||||
'usec_per_other_op': s3_perf.items[0].usec_per_other_op,
|
||||
'usec_per_read_op': s3_perf.items[0].usec_per_read_op,
|
||||
'usec_per_write_op': s3_perf.items[0].usec_per_write_op,
|
||||
'write_bytes_per_sec': s3_perf.items[0].write_bytes_per_sec,
|
||||
'writes_per_sec': s3_perf.items[0].writes_per_sec,
|
||||
}
|
||||
perf_facts['nfs'] = {
|
||||
'bytes_per_op': nfs_perf.items[0].bytes_per_op,
|
||||
'bytes_per_read': nfs_perf.items[0].bytes_per_read,
|
||||
'bytes_per_write': nfs_perf.items[0].bytes_per_write,
|
||||
'read_bytes_per_sec': nfs_perf.items[0].read_bytes_per_sec,
|
||||
'reads_per_sec': nfs_perf.items[0].reads_per_sec,
|
||||
'usec_per_other_op': nfs_perf.items[0].usec_per_other_op,
|
||||
'usec_per_read_op': nfs_perf.items[0].usec_per_read_op,
|
||||
'usec_per_write_op': nfs_perf.items[0].usec_per_write_op,
|
||||
'write_bytes_per_sec': nfs_perf.items[0].write_bytes_per_sec,
|
||||
'writes_per_sec': nfs_perf.items[0].writes_per_sec,
|
||||
}
|
||||
|
||||
return perf_facts
|
||||
|
||||
|
||||
def generate_config_dict(blade):
|
||||
config_facts = {}
|
||||
config_facts['dns'] = blade.dns.list_dns().items[0].to_dict()
|
||||
config_facts['smtp'] = blade.smtp.list_smtp().items[0].to_dict()
|
||||
config_facts['alert_watchers'] = \
|
||||
blade.alert_watchers.list_alert_watchers().items[0].to_dict()
|
||||
api_version = blade.api_version.list_versions().versions
|
||||
if HARD_LIMIT_API_VERSION in api_version:
|
||||
config_facts['array_management'] = \
|
||||
blade.directory_services.list_directory_services(names=['management']).items[0].to_dict()
|
||||
config_facts['directory_service_roles'] = {}
|
||||
roles = blade.directory_services.list_directory_services_roles()
|
||||
for role in range(0, len(roles.items)):
|
||||
role_name = roles.items[role].name
|
||||
config_facts['directory_service_roles'][role_name] = {
|
||||
'group': roles.items[role].group,
|
||||
'group_base': roles.items[role].group_base
|
||||
}
|
||||
config_facts['nfs_directory_service'] = \
|
||||
blade.directory_services.list_directory_services(names=['nfs']).items[0].to_dict()
|
||||
config_facts['smb_directory_service'] = \
|
||||
blade.directory_services.list_directory_services(names=['smb']).items[0].to_dict()
|
||||
config_facts['ntp'] = blade.arrays.list_arrays().items[0].ntp_servers
|
||||
config_facts['ssl_certs'] = \
|
||||
blade.certificates.list_certificates().items[0].to_dict()
|
||||
return config_facts
|
||||
|
||||
|
||||
def generate_subnet_dict(blade):
|
||||
sub_facts = {}
|
||||
subnets = blade.subnets.list_subnets()
|
||||
for sub in range(0, len(subnets.items)):
|
||||
sub_name = subnets.items[sub].name
|
||||
if subnets.items[sub].enabled:
|
||||
sub_facts[sub_name] = {
|
||||
'gateway': subnets.items[sub].gateway,
|
||||
'mtu': subnets.items[sub].mtu,
|
||||
'vlan': subnets.items[sub].vlan,
|
||||
'prefix': subnets.items[sub].prefix,
|
||||
'services': subnets.items[sub].services,
|
||||
}
|
||||
sub_facts[sub_name]['lag'] = subnets.items[sub].link_aggregation_group.name
|
||||
sub_facts[sub_name]['interfaces'] = []
|
||||
for iface in range(0, len(subnets.items[sub].interfaces)):
|
||||
sub_facts[sub_name]['interfaces'].append({'name': subnets.items[sub].interfaces[iface].name})
|
||||
return sub_facts
|
||||
|
||||
|
||||
def generate_lag_dict(blade):
|
||||
lag_facts = {}
|
||||
groups = blade.link_aggregation_groups.list_link_aggregation_groups()
|
||||
for groupcnt in range(0, len(groups.items)):
|
||||
lag_name = groups.items[groupcnt].name
|
||||
lag_facts[lag_name] = {
|
||||
'lag_speed': groups.items[groupcnt].lag_speed,
|
||||
'port_speed': groups.items[groupcnt].port_speed,
|
||||
'status': groups.items[groupcnt].status,
|
||||
}
|
||||
lag_facts[lag_name]['ports'] = []
|
||||
for port in range(0, len(groups.items[groupcnt].ports)):
|
||||
lag_facts[lag_name]['ports'].append({'name': groups.items[groupcnt].ports[port].name})
|
||||
return lag_facts
|
||||
|
||||
|
||||
def generate_network_dict(blade):
|
||||
net_facts = {}
|
||||
ports = blade.network_interfaces.list_network_interfaces()
|
||||
for portcnt in range(0, len(ports.items)):
|
||||
int_name = ports.items[portcnt].name
|
||||
if ports.items[portcnt].enabled:
|
||||
net_facts[int_name] = {
|
||||
'type': ports.items[portcnt].type,
|
||||
'mtu': ports.items[portcnt].mtu,
|
||||
'vlan': ports.items[portcnt].vlan,
|
||||
'address': ports.items[portcnt].address,
|
||||
'services': ports.items[portcnt].services,
|
||||
'gateway': ports.items[portcnt].gateway,
|
||||
'netmask': ports.items[portcnt].netmask,
|
||||
}
|
||||
return net_facts
|
||||
|
||||
|
||||
def generate_capacity_dict(blade):
|
||||
capacity_facts = {}
|
||||
total_cap = blade.arrays.list_arrays_space()
|
||||
file_cap = blade.arrays.list_arrays_space(type='file-system')
|
||||
object_cap = blade.arrays.list_arrays_space(type='object-store')
|
||||
capacity_facts['total'] = total_cap.items[0].capacity
|
||||
capacity_facts['aggregate'] = {
|
||||
'data_reduction': total_cap.items[0].space.data_reduction,
|
||||
'snapshots': total_cap.items[0].space.snapshots,
|
||||
'total_physical': total_cap.items[0].space.total_physical,
|
||||
'unique': total_cap.items[0].space.unique,
|
||||
'virtual': total_cap.items[0].space.virtual,
|
||||
}
|
||||
capacity_facts['file-system'] = {
|
||||
'data_reduction': file_cap.items[0].space.data_reduction,
|
||||
'snapshots': file_cap.items[0].space.snapshots,
|
||||
'total_physical': file_cap.items[0].space.total_physical,
|
||||
'unique': file_cap.items[0].space.unique,
|
||||
'virtual': file_cap.items[0].space.virtual,
|
||||
}
|
||||
capacity_facts['object-store'] = {
|
||||
'data_reduction': object_cap.items[0].space.data_reduction,
|
||||
'snapshots': object_cap.items[0].space.snapshots,
|
||||
'total_physical': object_cap.items[0].space.total_physical,
|
||||
'unique': object_cap.items[0].space.unique,
|
||||
'virtual': file_cap.items[0].space.virtual,
|
||||
}
|
||||
|
||||
return capacity_facts
|
||||
|
||||
|
||||
def generate_snap_dict(blade):
|
||||
snap_facts = {}
|
||||
snaps = blade.file_system_snapshots.list_file_system_snapshots()
|
||||
for snap in range(0, len(snaps.items)):
|
||||
snapshot = snaps.items[snap].name
|
||||
snap_facts[snapshot] = {
|
||||
'destroyed': snaps.items[snap].destroyed,
|
||||
'source': snaps.items[snap].source,
|
||||
'suffix': snaps.items[snap].suffix,
|
||||
'source_destroyed': snaps.items[snap].source_destroyed,
|
||||
}
|
||||
return snap_facts
|
||||
|
||||
|
||||
def generate_fs_dict(blade):
|
||||
fs_facts = {}
|
||||
fsys = blade.file_systems.list_file_systems()
|
||||
for fsystem in range(0, len(fsys.items)):
|
||||
share = fsys.items[fsystem].name
|
||||
fs_facts[share] = {
|
||||
'fast_remove': fsys.items[fsystem].fast_remove_directory_enabled,
|
||||
'snapshot_enabled': fsys.items[fsystem].snapshot_directory_enabled,
|
||||
'provisioned': fsys.items[fsystem].provisioned,
|
||||
'destroyed': fsys.items[fsystem].destroyed,
|
||||
}
|
||||
if fsys.items[fsystem].http.enabled:
|
||||
fs_facts[share]['http'] = fsys.items[fsystem].http.enabled
|
||||
if fsys.items[fsystem].smb.enabled:
|
||||
fs_facts[share]['smb_mode'] = fsys.items[fsystem].smb.acl_mode
|
||||
if fsys.items[fsystem].nfs.enabled:
|
||||
fs_facts[share]['nfs_rules'] = fsys.items[fsystem].nfs.rules
|
||||
api_version = blade.api_version.list_versions().versions
|
||||
if HARD_LIMIT_API_VERSION in api_version:
|
||||
fs_facts[share]['hard_limit'] = fsys.items[fsystem].hard_limit_enabled
|
||||
|
||||
return fs_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = purefb_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
gather_subset=dict(default='minimum', type='list',)
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec, supports_check_mode=True)
|
||||
|
||||
blade = get_blade(module)
|
||||
versions = blade.api_version.list_versions().versions
|
||||
|
||||
if MIN_REQUIRED_API_VERSION not in versions:
|
||||
module.fail_json(msg='FlashBlade REST version not supported. Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
|
||||
|
||||
subset = [test.lower() for test in module.params['gather_subset']]
|
||||
valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
|
||||
'network', 'subnets', 'lags',
|
||||
'filesystems', 'snapshots')
|
||||
subset_test = (test in valid_subsets for test in subset)
|
||||
if not all(subset_test):
|
||||
module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
|
||||
% (",".join(valid_subsets), ",".join(subset)))
|
||||
|
||||
facts = {}
|
||||
|
||||
if 'minimum' in subset or 'all' in subset:
|
||||
facts['default'] = generate_default_dict(blade)
|
||||
if 'performance' in subset or 'all' in subset:
|
||||
facts['performance'] = generate_perf_dict(blade)
|
||||
if 'config' in subset or 'all' in subset:
|
||||
facts['config'] = generate_config_dict(blade)
|
||||
if 'capacity' in subset or 'all' in subset:
|
||||
facts['capacity'] = generate_capacity_dict(blade)
|
||||
if 'lags' in subset or 'all' in subset:
|
||||
facts['lag'] = generate_lag_dict(blade)
|
||||
if 'network' in subset or 'all' in subset:
|
||||
facts['network'] = generate_network_dict(blade)
|
||||
if 'subnets' in subset or 'all' in subset:
|
||||
facts['subnet'] = generate_subnet_dict(blade)
|
||||
if 'filesystems' in subset or 'all' in subset:
|
||||
facts['filesystems'] = generate_fs_dict(blade)
|
||||
if 'snapshots' in subset or 'all' in subset:
|
||||
facts['snapshots'] = generate_snap_dict(blade)
|
||||
|
||||
module.exit_json(ansible_facts={'ansible_purefb_facts': facts})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Loading…
Add table
Add a link
Reference in a new issue