1
0
Fork 0
mirror of https://github.com/ansible-collections/ansible.posix.git synced 2026-02-03 23:51:48 +00:00

Merge branch 'main' into fix/doc/mount_absent_description

This commit is contained in:
Adam Miller 2024-02-06 09:29:55 -06:00 committed by GitHub
commit a18d180246
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
155 changed files with 5126 additions and 2439 deletions

View file

@ -177,7 +177,7 @@ class ActionModule(ActionBase):
# Store remote connection type
self._remote_transport = self._connection.transport
use_ssh_args = _tmp_args.pop('use_ssh_args', None)
use_ssh_args = _tmp_args.pop('use_ssh_args', False)
if use_ssh_args and self._connection.transport == 'ssh':
ssh_args = [
@ -185,7 +185,7 @@ class ActionModule(ActionBase):
self._connection.get_option('ssh_common_args'),
self._connection.get_option('ssh_extra_args'),
]
_tmp_args['ssh_args'] = ' '.join([a for a in ssh_args if a])
_tmp_args['_ssh_args'] = ' '.join([a for a in ssh_args if a])
# Handle docker connection options
if self._remote_transport in DOCKER:
@ -225,7 +225,6 @@ class ActionModule(ActionBase):
# Parameter name needed by the ansible module
_tmp_args['_local_rsync_path'] = task_vars.get('ansible_rsync_path') or 'rsync'
_tmp_args['_local_rsync_password'] = task_vars.get('ansible_ssh_pass') or task_vars.get('ansible_password')
# rsync thinks that one end of the connection is localhost and the
# other is the host we're running the task for (Note: We use
@ -285,9 +284,6 @@ class ActionModule(ActionBase):
# told (via delegate_to) that a different host is the source of the
# rsync
if not use_delegate and remote_transport:
# Create a connection to localhost to run rsync on
new_stdin = self._connection._new_stdin
# Unlike port, there can be only one shell
localhost_shell = None
for host in C.LOCALHOST:
@ -316,7 +312,11 @@ class ActionModule(ActionBase):
localhost_executable = C.DEFAULT_EXECUTABLE
self._play_context.executable = localhost_executable
new_connection = connection_loader.get('local', self._play_context, new_stdin)
try:
new_connection = connection_loader.get('local', self._play_context)
except TypeError:
# Needed for ansible-core < 2.15
new_connection = connection_loader.get('local', self._play_context, self._connection._new_stdin)
self._connection = new_connection
# Override _remote_is_local as an instance attribute specifically for the synchronize use case
# ensuring we set local tmpdir correctly
@ -333,8 +333,9 @@ class ActionModule(ActionBase):
if src is None or dest is None:
return dict(failed=True, msg="synchronize requires both src and dest parameters are set")
# Determine if we need a user@
# Determine if we need a user@ and a password
user = None
password = task_vars.get('ansible_ssh_pass', None) or task_vars.get('ansible_password', None)
if not dest_is_local:
# Src and dest rsync "path" handling
if boolean(_tmp_args.get('set_remote_user', 'yes'), strict=False):
@ -344,10 +345,12 @@ class ActionModule(ActionBase):
user = task_vars.get('ansible_user') or self._play_context.remote_user
if not user:
user = C.DEFAULT_REMOTE_USER
else:
user = task_vars.get('ansible_user') or self._play_context.remote_user
if self._templar is not None:
user = self._templar.template(user)
# Private key handling
# Use the private_key parameter if passed else use context private_key_file
_tmp_args['private_key'] = _tmp_args.get('private_key', self._play_context.private_key_file)
@ -361,12 +364,17 @@ class ActionModule(ActionBase):
# src is a local path, dest is a remote path: <user>@<host>
src = self._process_origin(src_host, src, user)
dest = self._process_remote(_tmp_args, dest_host, dest, user, inv_port in localhost_ports)
password = dest_host_inventory_vars.get('ansible_ssh_pass', None) or dest_host_inventory_vars.get('ansible_password', None)
if self._templar is not None:
password = self._templar.template(password)
else:
# Still need to munge paths (to account for roles) even if we aren't
# copying files between hosts
src = self._get_absolute_path(path=src)
dest = self._get_absolute_path(path=dest)
_tmp_args['_local_rsync_password'] = password
_tmp_args['src'] = src
_tmp_args['dest'] = dest

View file

@ -11,8 +11,8 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'community'}
DOCUMENTATION = '''
callback: cgroup_perf_recap
callback_type: aggregate
name: cgroup_perf_recap
type: aggregate
requirements:
- whitelist in configuration
- cgroups

View file

@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: debug
name: debug
type: stdout
short_description: formatted stdout/stderr display
description:

View file

@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: json
name: json
short_description: Ansible screen output as JSON
description:
- This callback converts all events into JSON output to stdout
@ -25,6 +25,16 @@ DOCUMENTATION = '''
- key: show_custom_stats
section: defaults
type: bool
json_indent:
name: Use indenting for the JSON output
description: 'If specified, use this many spaces for indenting in the JSON output. If <= 0, write to a single line.'
default: 4
env:
- name: ANSIBLE_JSON_INDENT
ini:
- key: json_indent
section: defaults
type: integer
notes:
- When using a strategy such as free, host_pinned, or a custom strategy, host results will
be added to new task results in ``.plays[].tasks[]``. As such, there will exist duplicate
@ -61,12 +71,19 @@ class CallbackModule(CallbackBase):
self._task_map = {}
self._is_lockstep = False
self.set_options()
self._json_indent = self.get_option('json_indent')
if self._json_indent <= 0:
self._json_indent = None
def _new_play(self, play):
self._is_lockstep = play.strategy in LOCKSTEP_CALLBACKS
return {
'play': {
'name': play.get_name(),
'id': to_text(play._uuid),
'path': to_text(play.get_path()),
'duration': {
'start': current_time()
}
@ -79,6 +96,7 @@ class CallbackModule(CallbackBase):
'task': {
'name': task.get_name(),
'id': to_text(task._uuid),
'path': to_text(task.get_path()),
'duration': {
'start': current_time()
}
@ -143,7 +161,7 @@ class CallbackModule(CallbackBase):
'global_custom_stats': global_custom_stats,
}
self._display.display(json.dumps(output, cls=AnsibleJSONEncoder, indent=4, sort_keys=True))
self._display.display(json.dumps(output, cls=AnsibleJSONEncoder, indent=self._json_indent, sort_keys=True))
def _record_task_result(self, on_info, result, **kwargs):
"""This function is used as a partial to add failed/skipped info in a single method"""

214
plugins/callback/jsonl.py Normal file
View file

@ -0,0 +1,214 @@
# (c) 2016, Matt Martz <matt@sivel.net>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: jsonl
short_description: Ansible screen output as JSONL (lines in json format)
description:
- This callback converts all events into JSON output to stdout
- This callback in contrast with ansible.posix.json uses less memory, because it doesn't store results.
type: stdout
requirements:
- Set as stdout in config
options:
show_custom_stats:
name: Show custom stats
description: 'This adds the custom stats set via the set_stats plugin to the play recap'
default: False
env:
- name: ANSIBLE_SHOW_CUSTOM_STATS
ini:
- key: show_custom_stats
section: defaults
type: bool
json_indent:
name: Use indenting for the JSON output
description: 'If specified, use this many spaces for indenting in the JSON output. If not specified or <= 0, write to a single line.'
default: 0
env:
- name: ANSIBLE_JSON_INDENT
ini:
- key: json_indent
section: defaults
type: integer
notes:
- When using a strategy such as free, host_pinned, or a custom strategy, host results will
be added to new task results in ``.plays[].tasks[]``. As such, there will exist duplicate
task objects indicated by duplicate task IDs at ``.plays[].tasks[].task.id``, each with an
individual host result for the task.
'''
import datetime
import json
import copy
from functools import partial
from ansible.inventory.host import Host
from ansible.module_utils._text import to_text
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
LOCKSTEP_CALLBACKS = frozenset(('linear', 'debug'))
def current_time():
return '%sZ' % datetime.datetime.utcnow().isoformat()
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'ansible.posix.jsonl'
def __init__(self, display=None):
super(CallbackModule, self).__init__(display)
self.results = []
self._task_map = {}
self._is_lockstep = False
self.set_options()
self._json_indent = self.get_option('json_indent')
if self._json_indent <= 0:
self._json_indent = None
def _new_play(self, play):
self._is_lockstep = play.strategy in LOCKSTEP_CALLBACKS
return {
'play': {
'name': play.get_name(),
'id': to_text(play._uuid),
'path': to_text(play.get_path()),
'duration': {
'start': current_time()
}
},
'tasks': []
}
def _new_task(self, task):
return {
'task': {
'name': task.get_name(),
'id': to_text(task._uuid),
'path': to_text(task.get_path()),
'duration': {
'start': current_time()
}
},
'hosts': {}
}
def _find_result_task(self, host, task):
key = (host.get_name(), task._uuid)
return self._task_map.get(
key,
self.results[-1]['tasks'][-1]
)
def v2_playbook_on_play_start(self, play):
play_result = self._new_play(play)
self.results.append(play_result)
self._write_event('v2_playbook_on_play_start', play_result)
def v2_runner_on_start(self, host, task):
if self._is_lockstep:
return
key = (host.get_name(), task._uuid)
task_result = self._new_task(task)
self._task_map[key] = task_result
self.results[-1]['tasks'].append(task_result)
self._write_event('v2_runner_on_start', task_result)
def v2_playbook_on_task_start(self, task, is_conditional):
if not self._is_lockstep:
return
task_result = self._new_task(task)
self.results[-1]['tasks'].append(task_result)
self._write_event('v2_playbook_on_task_start', task_result)
def v2_playbook_on_handler_task_start(self, task):
if not self._is_lockstep:
return
task_result = self._new_task(task)
self.results[-1]['tasks'].append(task_result)
self._write_event('v2_playbook_on_handler_task_start', task_result)
def _convert_host_to_name(self, key):
if isinstance(key, (Host,)):
return key.get_name()
return key
def v2_playbook_on_stats(self, stats):
"""Display info about playbook statistics"""
hosts = sorted(stats.processed.keys())
summary = {}
for h in hosts:
s = stats.summarize(h)
summary[h] = s
custom_stats = {}
global_custom_stats = {}
if self.get_option('show_custom_stats') and stats.custom:
custom_stats.update(dict((self._convert_host_to_name(k), v) for k, v in stats.custom.items()))
global_custom_stats.update(custom_stats.pop('_run', {}))
output = {
'stats': summary,
'custom_stats': custom_stats,
'global_custom_stats': global_custom_stats,
}
self._write_event('v2_playbook_on_stats', output)
def _write_event(self, event_name, output):
output['_event'] = event_name
output['_timestamp'] = current_time()
self._display.display(json.dumps(output, cls=AnsibleJSONEncoder, indent=self._json_indent, separators=',:', sort_keys=True))
def _record_task_result(self, event_name, on_info, result, **kwargs):
"""This function is used as a partial to add failed/skipped info in a single method"""
host = result._host
task = result._task
result_copy = result._result.copy()
result_copy.update(on_info)
result_copy['action'] = task.action
task_result = self._find_result_task(host, task)
end_time = current_time()
task_result['task']['duration']['end'] = end_time
self.results[-1]['play']['duration']['end'] = end_time
task_result_copy = copy.deepcopy(task_result)
task_result_copy['hosts'][host.name] = result_copy
if not self._is_lockstep:
key = (host.get_name(), task._uuid)
del self._task_map[key]
self._write_event(event_name, task_result_copy)
def __getattribute__(self, name):
"""Return ``_record_task_result`` partial with a dict containing skipped/failed if necessary"""
if name not in ('v2_runner_on_ok', 'v2_runner_on_failed', 'v2_runner_on_unreachable', 'v2_runner_on_skipped'):
return object.__getattribute__(self, name)
on = name.rsplit('_', 1)[1]
on_info = {}
if on in ('failed', 'skipped'):
on_info[on] = True
return partial(self._record_task_result, name, on_info)

View file

@ -7,13 +7,26 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: profile_roles
name: profile_roles
type: aggregate
short_description: adds timing information to roles
description:
- This callback module provides profiling for ansible roles.
requirements:
- whitelisting in configuration
options:
summary_only:
description:
- Only show summary, not individual task profiles.
Especially usefull in combination with C(DISPLAY_SKIPPED_HOSTS=false) and/or C(ANSIBLE_DISPLAY_OK_HOSTS=false).
type: bool
default: False
env:
- name: PROFILE_ROLES_SUMMARY_ONLY
ini:
- section: callback_profile_roles
key: summary_only
version_added: 1.5.0
'''
import collections
@ -76,13 +89,26 @@ class CallbackModule(CallbackBase):
self.stats = collections.Counter()
self.totals = collections.Counter()
self.current = None
self.summary_only = None
super(CallbackModule, self).__init__()
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.summary_only = self.get_option('summary_only')
def _display_tasktime(self):
if not self.summary_only:
self._display.display(tasktime())
def _record_task(self, task):
"""
Logs the start of each task
"""
self._display.display(tasktime())
self._display_tasktime()
timestamp(self)
if task._role:
@ -99,10 +125,10 @@ class CallbackModule(CallbackBase):
self._record_task(task)
def playbook_on_setup(self):
self._display.display(tasktime())
self._display_tasktime()
def playbook_on_stats(self, stats):
self._display.display(tasktime())
self._display_tasktime()
self._display.display(filled("", fchar="="))
timestamp(self)

View file

@ -10,18 +10,18 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: profile_tasks
name: profile_tasks
type: aggregate
short_description: adds time information to tasks
description:
- Ansible callback plugin for timing individual tasks and overall execution time.
- "Mashup of 2 excellent original works: https://github.com/jlafon/ansible-profile,
https://github.com/junaid18183/ansible_home/blob/master/ansible_plugins/callback_plugins/timestamp.py.old"
- "Format: C(<task start timestamp> (<length of previous task>) <current elapsed playbook execution time>)"
- "Format: C(<task start timestamp>) C(<length of previous task>) C(<current elapsed playbook execution time>)"
- It also lists the top/bottom time consuming tasks in the summary (configurable)
- Before 2.4 only the environment variables were available for configuration.
requirements:
- whitelisting in configuration - see examples section below for details.
- enable in configuration - see examples section below for details.
options:
output_limit:
description: Number of tasks to display in the summary
@ -40,13 +40,25 @@ DOCUMENTATION = '''
ini:
- section: callback_profile_tasks
key: sort_order
summary_only:
description:
- Only show summary, not individual task profiles.
Especially usefull in combination with C(DISPLAY_SKIPPED_HOSTS=false) and/or C(ANSIBLE_DISPLAY_OK_HOSTS=false).
type: bool
default: False
env:
- name: PROFILE_TASKS_SUMMARY_ONLY
ini:
- section: callback_profile_tasks
key: summary_only
version_added: 1.5.0
'''
EXAMPLES = '''
example: >
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = ansible.posix.profile_tasks
callbacks_enabled=ansible.posix.profile_tasks
sample output: >
#
# TASK: [ensure messaging security group exists] ********************************
@ -120,6 +132,7 @@ class CallbackModule(CallbackBase):
self.current = None
self.sort_order = None
self.summary_only = None
self.task_output_limit = None
super(CallbackModule, self).__init__()
@ -137,6 +150,8 @@ class CallbackModule(CallbackBase):
elif self.sort_order == 'none':
self.sort_order = None
self.summary_only = self.get_option('summary_only')
self.task_output_limit = self.get_option('output_limit')
if self.task_output_limit is not None:
if self.task_output_limit == 'all':
@ -144,11 +159,15 @@ class CallbackModule(CallbackBase):
else:
self.task_output_limit = int(self.task_output_limit)
def _display_tasktime(self):
if not self.summary_only:
self._display.display(tasktime())
def _record_task(self, task):
"""
Logs the start of each task
"""
self._display.display(tasktime())
self._display_tasktime()
timestamp(self)
# Record the start time of the current task
@ -171,10 +190,10 @@ class CallbackModule(CallbackBase):
self._record_task(task)
def playbook_on_setup(self):
self._display.display(tasktime())
self._display_tasktime()
def playbook_on_stats(self, stats):
self._display.display(tasktime())
self._display_tasktime()
self._display.display(filled("", fchar="="))
timestamp(self)

View file

@ -7,19 +7,19 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: skippy
callback_type: stdout
name: skippy
type: stdout
requirements:
- set as main display callback
short_description: Ansible screen output that ignores skipped status
deprecated:
why: The 'default' callback plugin now supports this functionality
removed_at_date: 2022-06-01
alternative: "'default' callback plugin with 'display_skipped_hosts = no' option"
why: The 'default' callback plugin now supports this functionality
removed_at_date: '2022-06-01'
alternative: "'default' callback plugin with 'display_skipped_hosts = no' option"
extends_documentation_fragment:
- default_callback
description:
- This callback does the same as the default except it does not output skipped host/task/item status
- This callback does the same as the default except it does not output skipped host/task/item status
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default

View file

@ -6,8 +6,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: timer
callback_type: aggregate
name: timer
type: aggregate
requirements:
- whitelist in configuration
short_description: Adds time to play stats

View file

@ -0,0 +1,45 @@
# Copyright (c) 2023 Maxwell G <maxwell@gtmx.me>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
Helpers to respawn a module to run using the system interpreter
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
try:
from ansible.module_utils.common import respawn
except ImportError:
HAS_RESPAWN_UTIL = False
else:
HAS_RESPAWN_UTIL = True
SYSTEM_PYTHON_INTERPRETERS = (
"/usr/bin/libexec/platform-python",
"/usr/bin/python3",
"/usr/bin/python2",
"/usr/bin/python",
)
def respawn_module(module):
"""
Respawn an ansible module to using the first interpreter in
SYSTEM_PYTHON_INTERPRETERS that contains `module`.
Args:
module (str): Name of python module to search for
Returns:
Returns None if the module cannot be respawned.
"""
if respawn.has_respawned():
return
interpreter = respawn.probe_interpreters_for_module(
SYSTEM_PYTHON_INTERPRETERS, module
)
if interpreter:
respawn.respawn_module(interpreter)

View file

@ -3,7 +3,7 @@
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
# PSF License (see PSF-license.txt or https://opensource.org/licenses/Python-2.0)
#
"""Provides classes to represent module version numbers (one class for

View file

@ -5,6 +5,8 @@
from __future__ import absolute_import, division, print_function
from ansible_collections.ansible.posix.plugins.module_utils.version import LooseVersion
from ansible_collections.ansible.posix.plugins.module_utils._respawn import respawn_module, HAS_RESPAWN_UTIL
from ansible.module_utils.basic import missing_required_lib
__metaclass__ = type
@ -313,7 +315,8 @@ class FirewallTransaction(object):
installed version (%s) likely too old. Requires firewalld >= 0.2.11" % FW_VERSION)
if import_failure:
if HAS_RESPAWN_UTIL:
respawn_module("firewall")
module.fail_json(
msg='Python Module not found: firewalld and its python module are required for this module, \
version 0.2.11 or newer required (0.3.9 or newer for offline operations)'
msg=missing_required_lib('firewall') + '. Version 0.2.11 or newer required (0.3.9 or newer for offline operations)'
)

View file

@ -16,3 +16,5 @@ __metaclass__ = type
# from ansible.module_utils.compat.version import LooseVersion
from ._version import LooseVersion, StrictVersion
__all__ = ['LooseVersion', 'StrictVersion']

View file

@ -20,7 +20,7 @@ options:
description:
- The full path of the file or object.
type: path
required: yes
required: true
aliases: [ name ]
state:
description:
@ -33,17 +33,18 @@ options:
description:
- Whether to follow symlinks on the path if a symlink is encountered.
type: bool
default: yes
default: true
default:
description:
- If the target is a directory, setting this to C(yes) will make it the default ACL for entities created inside the directory.
- Setting C(default) to C(yes) causes an error if the path is a file.
- If the target is a directory, setting this to C(true) will make it the default ACL for entities created inside the directory.
- Setting C(default) to C(true) causes an error if the path is a file.
type: bool
default: no
default: false
entity:
description:
- The actual user or group that the ACL applies to when matching entity types user or group are selected.
type: str
default: ""
etype:
description:
- The entity type of the ACL to apply, see C(setfacl) documentation for more info.
@ -69,13 +70,13 @@ options:
- Incompatible with C(state=query).
- Alias C(recurse) added in version 1.3.0.
type: bool
default: no
default: false
aliases: [ recurse ]
use_nfsv4_acls:
description:
- Use NFSv4 ACLs instead of POSIX ACLs.
type: bool
default: no
default: false
recalculate_mask:
description:
- Select if and when to recalculate the effective right masks of the files.
@ -115,7 +116,7 @@ EXAMPLES = r'''
entity: joe
etype: user
permissions: rw
default: yes
default: true
state: present
- name: Same as previous but using entry shorthand

View file

@ -44,7 +44,7 @@ options:
description:
- If a matching job is present a new job will not be added.
type: bool
default: no
default: false
requirements:
- at
author:
@ -68,7 +68,7 @@ EXAMPLES = r'''
command: ls -d / >/dev/null
count: 20
units: minutes
unique: yes
unique: true
'''
import os

View file

@ -34,13 +34,13 @@ options:
manage_dir:
description:
- Whether this module should manage the directory of the authorized key file.
- If set to C(yes), the module will create the directory, as well as set the owner and permissions
- If set to C(true), the module will create the directory, as well as set the owner and permissions
of an existing directory.
- Be sure to set C(manage_dir=no) if you are using an alternate directory for authorized_keys,
- Be sure to set C(manage_dir=false) if you are using an alternate directory for authorized_keys,
as set with C(path), since you could lock yourself out of SSH access.
- See the example below.
type: bool
default: yes
default: true
state:
description:
- Whether the given key (with the given key_options) should or should not be in the file.
@ -58,15 +58,15 @@ options:
- This option is not loop aware, so if you use C(with_) , it will be exclusive per iteration of the loop.
- If you want multiple keys in the file you need to pass them all to C(key) in a single batch as mentioned above.
type: bool
default: no
default: false
validate_certs:
description:
- This only applies if using a https url as the source of the keys.
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
- Prior to 2.1 the code worked as if this was set to C(yes).
- If set to C(false), the SSL certificates will not be validated.
- This should only set to C(false) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
- Prior to 2.1 the code worked as if this was set to C(true).
type: bool
default: yes
default: true
comment:
description:
- Change the comment on the public key.
@ -77,7 +77,7 @@ options:
description:
- Follow path symlink instead of replacing it.
type: bool
default: no
default: false
author: Ansible Core Team
'''
@ -106,7 +106,7 @@ EXAMPLES = r'''
state: present
key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
path: /etc/ssh/authorized_keys/charlie
manage_dir: False
manage_dir: false
- name: Set up multiple authorized keys
ansible.posix.authorized_key:
@ -129,14 +129,14 @@ EXAMPLES = r'''
user: charlie
state: present
key: https://github.com/user.keys
validate_certs: False
validate_certs: false
- name: Set authorized key, removing all the authorized keys already set
ansible.posix.authorized_key:
user: root
key: "{{ lookup('file', 'public_keys/doe-jane') }}"
state: present
exclusive: True
exclusive: true
- name: Set authorized key for user ubuntu copying it from current user
ansible.posix.authorized_key:
@ -150,7 +150,7 @@ exclusive:
description: If the key has been forced to be exclusive or not.
returned: success
type: bool
sample: False
sample: false
key:
description: The key that the module was running against.
returned: success
@ -170,7 +170,7 @@ manage_dir:
description: Whether this module managed the directory of the authorized key file.
returned: success
type: bool
sample: True
sample: true
path:
description: Alternate path to the authorized_keys file
returned: success
@ -192,7 +192,7 @@ user:
type: str
sample: user
validate_certs:
description: This only applies if using a https url as the source of the keys. If set to C(no), the SSL certificates will not be validated.
description: This only applies if using a https url as the source of the keys. If set to C(false), the SSL certificates will not be validated.
returned: success
type: bool
sample: true
@ -347,6 +347,8 @@ def keyfile(module, user, write=False, path=None, manage_dir=True, follow=False)
basedir = os.path.dirname(keysfile)
if not os.path.exists(basedir):
os.makedirs(basedir)
f = None
try:
f = open(keysfile, "w") # touches file so we can set ownership and perms
finally:

View file

@ -19,6 +19,10 @@ options:
- Name of a service to add/remove to/from firewalld.
- The service must be listed in output of firewall-cmd --get-services.
type: str
protocol:
description:
- Name of a protocol to add/remove to/from firewalld.
type: str
port:
description:
- Name of a port or port range to add/remove to/from firewalld.
@ -80,15 +84,17 @@ options:
type: str
permanent:
description:
- Should this configuration be in the running firewalld configuration or persist across reboots.
- Whether to apply this change to the permanent firewalld configuration.
- As of Ansible 2.3, permanent operations can operate on firewalld configs when it is not running (requires firewalld >= 0.3.9).
- Note that if this is C(no), immediate is assumed C(yes).
- Note that if this is C(false), I(immediate) defaults to C(true).
type: bool
default: false
immediate:
description:
- Should this configuration be applied immediately, if set as permanent.
- Whether to apply this change to the runtime firewalld configuration.
- Defaults to C(true) if I(permanent=false).
type: bool
default: no
default: false
state:
description:
- Enable or disable a setting.
@ -108,8 +114,9 @@ options:
type: str
offline:
description:
- Whether to run this module even when firewalld is offline.
- Ignores I(immediate) if I(permanent=true) and firewalld is not running.
type: bool
default: false
target:
description:
- firewalld Zone target
@ -138,32 +145,46 @@ author:
'''
EXAMPLES = r'''
- name: permanently enable https service, also enable it immediately if possible
ansible.posix.firewalld:
service: https
state: enabled
permanent: true
immediate: true
offline: true
- name: permit traffic in default zone for https service
ansible.posix.firewalld:
service: https
permanent: yes
permanent: true
state: enabled
- name: permit ospf traffic
ansible.posix.firewalld:
protocol: ospf
permanent: true
state: enabled
- name: do not permit traffic in default zone on port 8081/tcp
ansible.posix.firewalld:
port: 8081/tcp
permanent: yes
permanent: true
state: disabled
- ansible.posix.firewalld:
port: 161-162/udp
permanent: yes
permanent: true
state: enabled
- ansible.posix.firewalld:
zone: dmz
service: http
permanent: yes
permanent: true
state: enabled
- ansible.posix.firewalld:
rich_rule: rule service name="ftp" audit limit value="1/m" accept
permanent: yes
permanent: true
state: enabled
- ansible.posix.firewalld:
@ -174,44 +195,44 @@ EXAMPLES = r'''
- ansible.posix.firewalld:
zone: trusted
interface: eth2
permanent: yes
permanent: true
state: enabled
- ansible.posix.firewalld:
masquerade: yes
masquerade: true
state: enabled
permanent: yes
permanent: true
zone: dmz
- ansible.posix.firewalld:
zone: custom
state: present
permanent: yes
permanent: true
- ansible.posix.firewalld:
zone: drop
state: enabled
permanent: yes
icmp_block_inversion: yes
permanent: true
icmp_block_inversion: true
- ansible.posix.firewalld:
zone: drop
state: enabled
permanent: yes
permanent: true
icmp_block: echo-request
- ansible.posix.firewalld:
zone: internal
state: present
permanent: yes
permanent: true
target: ACCEPT
- name: Redirect port 443 to 8443 with Rich Rule
ansible.posix.firewalld:
rich_rule: rule family=ipv4 forward-port port=443 protocol=tcp to-port=8443
zone: public
permanent: yes
immediate: yes
permanent: true
immediate: true
state: enabled
'''
@ -343,6 +364,47 @@ class ServiceTransaction(FirewallTransaction):
self.update_fw_settings(fw_zone, fw_settings)
class ProtocolTransaction(FirewallTransaction):
"""
ProtocolTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(ProtocolTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self, protocol, timeout):
if protocol in self.fw.getProtocols(self.zone):
return True
else:
return False
def get_enabled_permanent(self, protocol, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
if protocol in fw_settings.getProtocols():
return True
else:
return False
def set_enabled_immediate(self, protocol, timeout):
self.fw.addProtocol(self.zone, protocol, timeout)
def set_enabled_permanent(self, protocol, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addProtocol(protocol)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, protocol, timeout):
self.fw.removeProtocol(self.zone, protocol)
def set_disabled_permanent(self, protocol, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeProtocol(protocol)
self.update_fw_settings(fw_zone, fw_settings)
class MasqueradeTransaction(FirewallTransaction):
"""
MasqueradeTransaction
@ -469,6 +531,7 @@ class InterfaceTransaction(FirewallTransaction):
old_zone_obj = self.fw.config.get_zone(zone)
if interface in old_zone_obj.interfaces:
iface_zone_objs.append(old_zone_obj)
if len(iface_zone_objs) > 1:
# Even it shouldn't happen, it's actually possible that
# the same interface is in several zone XML files
@ -478,18 +541,17 @@ class InterfaceTransaction(FirewallTransaction):
len(iface_zone_objs)
)
)
old_zone_obj = iface_zone_objs[0]
if old_zone_obj.name != self.zone:
old_zone_settings = FirewallClientZoneSettings(
self.fw.config.get_zone_config(old_zone_obj)
)
elif len(iface_zone_objs) == 1 and iface_zone_objs[0].name != self.zone:
old_zone_obj = iface_zone_objs[0]
old_zone_config = self.fw.config.get_zone_config(old_zone_obj)
old_zone_settings = FirewallClientZoneSettings(list(old_zone_config))
old_zone_settings.removeInterface(interface) # remove from old
self.fw.config.set_zone_config(
old_zone_obj,
old_zone_settings.settings
)
fw_settings.addInterface(interface) # add to new
self.fw.config.set_zone_config(fw_zone, fw_settings.settings)
fw_settings.addInterface(interface) # add to new
self.fw.config.set_zone_config(fw_zone, fw_settings.settings)
else:
old_zone_name = self.fw.config().getZoneOfInterface(interface)
if old_zone_name != self.zone:
@ -675,25 +737,33 @@ class ZoneTransaction(FirewallTransaction):
self.module.fail_json(msg=self.tx_not_permanent_error_msg)
def get_enabled_permanent(self):
zones = self.fw.config().listZones()
zone_names = [self.fw.config().getZone(z).get_property("name") for z in zones]
if self.zone in zone_names:
return True
if self.fw_offline:
zones = self.fw.config.get_zones()
zone_names = [self.fw.config.get_zone(z).name for z in zones]
else:
return False
zones = self.fw.config().listZones()
zone_names = [self.fw.config().getZone(z).get_property("name") for z in zones]
return self.zone in zone_names
def set_enabled_immediate(self):
self.module.fail_json(msg=self.tx_not_permanent_error_msg)
def set_enabled_permanent(self):
self.fw.config().addZone(self.zone, FirewallClientZoneSettings())
if self.fw_offline:
self.fw.config.new_zone(self.zone, FirewallClientZoneSettings().settings)
else:
self.fw.config().addZone(self.zone, FirewallClientZoneSettings())
def set_disabled_immediate(self):
self.module.fail_json(msg=self.tx_not_permanent_error_msg)
def set_disabled_permanent(self):
zone_obj = self.fw.config().getZoneByName(self.zone)
zone_obj.remove()
if self.fw_offline:
zone = self.fw.config.get_zone(self.zone)
self.fw.config.remove_zone(zone)
else:
zone_obj = self.fw.config().getZoneByName(self.zone)
zone_obj.remove()
class ForwardPortTransaction(FirewallTransaction):
@ -740,18 +810,19 @@ def main():
icmp_block=dict(type='str'),
icmp_block_inversion=dict(type='str'),
service=dict(type='str'),
protocol=dict(type='str'),
port=dict(type='str'),
port_forward=dict(type='list', elements='dict'),
rich_rule=dict(type='str'),
zone=dict(type='str'),
immediate=dict(type='bool', default=False),
source=dict(type='str'),
permanent=dict(type='bool'),
permanent=dict(type='bool', default=False),
state=dict(type='str', required=True, choices=['absent', 'disabled', 'enabled', 'present']),
timeout=dict(type='int', default=0),
interface=dict(type='str'),
masquerade=dict(type='str'),
offline=dict(type='bool'),
offline=dict(type='bool', default=False),
target=dict(type='str', choices=['default', 'ACCEPT', 'DROP', '%%REJECT%%']),
),
supports_check_mode=True,
@ -761,7 +832,7 @@ def main():
source=('permanent',),
),
mutually_exclusive=[
['icmp_block', 'icmp_block_inversion', 'service', 'port', 'port_forward', 'rich_rule',
['icmp_block', 'icmp_block_inversion', 'service', 'protocol', 'port', 'port_forward', 'rich_rule',
'interface', 'masquerade', 'source', 'target']
],
)
@ -772,38 +843,50 @@ def main():
timeout = module.params['timeout']
interface = module.params['interface']
masquerade = module.params['masquerade']
offline = module.params['offline']
# Sanity checks
FirewallTransaction.sanity_check(module)
# If neither permanent or immediate is provided, assume immediate (as
# written in the module's docs)
# `offline`, `immediate`, and `permanent` have a weird twisty relationship.
if offline:
# specifying offline without permanent makes no sense
if not permanent:
module.fail_json(msg='offline cannot be enabled unless permanent changes are allowed')
# offline overrides immediate to false if firewalld is offline
if fw_offline:
immediate = False
# immediate defaults to true if permanent is not enabled
if not permanent and not immediate:
immediate = True
# Verify required params are provided
if immediate and fw_offline:
module.fail_json(msg='firewall is not currently running, unable to perform immediate actions without a running firewall daemon')
# Verify required params are provided
changed = False
msgs = []
icmp_block = module.params['icmp_block']
icmp_block_inversion = module.params['icmp_block_inversion']
service = module.params['service']
protocol = module.params['protocol']
rich_rule = module.params['rich_rule']
source = module.params['source']
zone = module.params['zone']
target = module.params['target']
port = None
if module.params['port'] is not None:
if '/' in module.params['port']:
port, protocol = module.params['port'].strip().split('/')
port, port_protocol = module.params['port'].strip().split('/')
else:
protocol = None
if not protocol:
port_protocol = None
if not port_protocol:
module.fail_json(msg='improper port format (missing protocol?)')
else:
port = None
port_protocol = None
port_forward_toaddr = ''
port_forward = None
@ -821,7 +904,7 @@ def main():
port_forward_toaddr = port_forward['toaddr']
modification = False
if any([icmp_block, icmp_block_inversion, service, port, port_forward, rich_rule,
if any([icmp_block, icmp_block_inversion, service, protocol, port, port_forward, rich_rule,
interface, masquerade, source, target]):
modification = True
if modification and desired_state in ['absent', 'present'] and target is None:
@ -846,12 +929,21 @@ def main():
msgs.append("Changed icmp-block %s to %s" % (icmp_block, desired_state))
if icmp_block_inversion is not None:
# Type of icmp_block_inversion will be changed to boolean in a future release.
icmp_block_inversion_status = True
try:
icmp_block_inversion_status = boolean(icmp_block_inversion, True)
except TypeError:
module.warn('The value of the icmp_block_inversion option is "%s". '
'The type of the option will be changed from string to boolean in a future release. '
'To avoid unexpected behavior, please change the value to boolean.' % icmp_block_inversion)
expected_state = 'enabled' if (desired_state == 'enabled') == icmp_block_inversion_status else 'disabled'
transaction = IcmpBlockInversionTransaction(
module,
action_args=(),
zone=zone,
desired_state=desired_state,
desired_state=expected_state,
permanent=permanent,
immediate=immediate,
)
@ -861,14 +953,6 @@ def main():
if changed is True:
msgs.append("Changed icmp-block-inversion %s to %s" % (icmp_block_inversion, desired_state))
# Type of icmp_block_inversion will be changed to boolean in a future release.
try:
boolean(icmp_block_inversion, True)
except TypeError:
module.warn('The value of the icmp_block_inversion option is "%s". '
'The type of the option will be changed from string to boolean in a future release. '
'To avoid unexpected behavior, please change the value to boolean.' % icmp_block_inversion)
if service is not None:
transaction = ServiceTransaction(
@ -885,6 +969,22 @@ def main():
if changed is True:
msgs.append("Changed service %s to %s" % (service, desired_state))
if protocol is not None:
transaction = ProtocolTransaction(
module,
action_args=(protocol, timeout),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed protocol %s to %s" % (protocol, desired_state))
if source is not None:
transaction = SourceTransaction(
@ -903,7 +1003,7 @@ def main():
transaction = PortTransaction(
module,
action_args=(port, protocol, timeout),
action_args=(port, port_protocol, timeout),
zone=zone,
desired_state=desired_state,
permanent=permanent,
@ -915,7 +1015,7 @@ def main():
if changed is True:
msgs.append(
"Changed port %s to %s" % (
"%s/%s" % (port, protocol), desired_state
"%s/%s" % (port, port_protocol), desired_state
)
)
@ -973,12 +1073,21 @@ def main():
msgs = msgs + transaction_msgs
if masquerade is not None:
# Type of masquerade will be changed to boolean in a future release.
masquerade_status = True
try:
masquerade_status = boolean(masquerade, True)
except TypeError:
module.warn('The value of the masquerade option is "%s". '
'The type of the option will be changed from string to boolean in a future release. '
'To avoid unexpected behavior, please change the value to boolean.' % masquerade)
expected_state = 'enabled' if (desired_state == 'enabled') == masquerade_status else 'disabled'
transaction = MasqueradeTransaction(
module,
action_args=(),
zone=zone,
desired_state=desired_state,
desired_state=expected_state,
permanent=permanent,
immediate=immediate,
)
@ -986,14 +1095,6 @@ def main():
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
# Type of masquerade will be changed to boolean in a future release.
try:
boolean(masquerade, True)
except TypeError:
module.warn('The value of the masquerade option is "%s". '
'The type of the option will be changed from string to boolean in a future release. '
'To avoid unexpected behavior, please change the value to boolean.' % masquerade)
if target is not None:
transaction = ZoneTargetTransaction(

View file

@ -17,7 +17,7 @@ options:
active_zones:
description: Gather information about active zones.
type: bool
default: no
default: false
zones:
description:
- Gather information about specific zones.
@ -36,7 +36,12 @@ author:
EXAMPLES = r'''
- name: Gather information about active zones
ansible.posix.firewalld_info:
active_zones: yes
active_zones: true
register: result
- name: Print default zone for debugging
ansible.builtin.debug:
var: result.firewalld_info.default_zone
- name: Gather information about specific zones
ansible.posix.firewalld_info:
@ -44,6 +49,7 @@ EXAMPLES = r'''
- public
- external
- internal
register: result
'''
RETURN = r'''
@ -78,7 +84,7 @@ firewalld_info:
returned: success
type: str
sample: 0.8.2
default_zones:
default_zone:
description:
- The zone name of default zone.
returned: success
@ -204,8 +210,8 @@ firewalld_info:
'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.six import raise_from
from ansible.module_utils._text import to_native
from ansible_collections.ansible.posix.plugins.module_utils._respawn import respawn_module, HAS_RESPAWN_UTIL
from ansible_collections.ansible.posix.plugins.module_utils.version import StrictVersion
@ -317,6 +323,12 @@ def main():
)
# Exit with failure message if requirements modules are not installed.
if not HAS_DBUS and not HAS_FIREWALLD and HAS_RESPAWN_UTIL:
# Only respawn the module if both libraries are missing.
# If only one is available, then usage of the "wrong" (i.e. not the system one)
# python interpreter is likely not the problem.
respawn_module("firewall")
if not HAS_DBUS:
module.fail_json(msg=missing_required_lib('python-dbus'))
if not HAS_FIREWALLD:
@ -344,8 +356,9 @@ def main():
specified_zones = module.params['zones']
collect_zones = list(set(specified_zones) & set(all_zones))
ignore_zones = list(set(specified_zones) - set(collect_zones))
warn.append(
'Please note: zone:(%s) have been ignored in the gathering process.' % ','.join(ignore_zones))
if ignore_zones:
warn.append(
'Please note: zone:(%s) have been ignored in the gathering process.' % ','.join(ignore_zones))
else:
collect_zones = get_all_zones(client)

View file

@ -31,13 +31,13 @@ options:
src:
description:
- Device (or NFS volume, or something else) to be mounted on I(path).
- Required when I(state) set to C(present) or C(mounted).
- Required when I(state) set to C(present), C(mounted) or C(ephemeral).
- Ignored when I(state) set to C(absent) or C(unmounted).
type: path
fstype:
description:
- Filesystem type.
- Required when I(state) is C(present) or C(mounted).
- Required when I(state) is C(present), C(mounted) or C(ephemeral).
type: str
opts:
description:
@ -49,18 +49,18 @@ options:
- Note that if set to C(null) and I(state) set to C(present),
it will cease to work and duplicate entries will be made
with subsequent runs.
- Has no effect on Solaris systems.
- Has no effect on Solaris systems or when used with C(ephemeral).
type: str
default: 0
default: '0'
passno:
description:
- Passno (see fstab(5)).
- Note that if set to C(null) and I(state) set to C(present),
it will cease to work and duplicate entries will be made
with subsequent runs.
- Deprecated on Solaris systems.
- Deprecated on Solaris systems. Has no effect when used with C(ephemeral).
type: str
default: 0
default: '0'
state:
description:
- If C(mounted), the device will be actively mounted and appropriately
@ -69,23 +69,35 @@ options:
- If C(unmounted), the device will be unmounted without changing I(fstab).
- C(present) only specifies that the device is to be configured in
I(fstab) and does not trigger or require a mount.
- C(ephemeral) only specifies that the device is to be mounted, without changing
I(fstab). If it is already mounted, a remount will be triggered.
This will always return changed=True. If the mount point I(path)
has already a device mounted on, and its source is different than I(src),
the module will fail to avoid unexpected unmount or mount point override.
If the mount point is not present, the mount point will be created.
The I(fstab) is completely ignored. This option is added in version 1.5.0.
- C(absent) specifies that the mount point entry I(path) will be removed
from I(fstab) and will also unmount the mounted device and remove the
mount point. A mounted device will be unmounted regardless of I(src) or its
real source. C(absent) does not unmount recursively, and the module will
fail if multiple devices are mounted on the same mount point. Using
C(absent) with a mount point that is not registered in the I(fstab) has
no effect. Use C(unmounted) instead.
no effect. Use C(unmounted) instead..
- C(remounted) specifies that the device will be remounted for when you
want to force a refresh on the mount itself (added in 2.9). This will
always return changed=true. If I(opts) is set, the options will be
applied to the remount, but will not change I(fstab). Additionally,
if I(opts) is set, and the remount command fails, the module will
error to prevent unexpected mount changes. Try using C(mounted)
instead to work around this issue.
instead to work around this issue. C(remounted) expects the mount point
to be present in the I(fstab). To remount a mount point not registered
in I(fstab), use C(ephemeral) instead, especially with BSD nodes.
- C(absent_from_fstab) specifies that the device mount's entry will be
removed from I(fstab). This option does not unmount it or delete the
mountpoint.
type: str
required: true
choices: [ absent, mounted, present, unmounted, remounted ]
choices: [ absent, absent_from_fstab, mounted, present, unmounted, remounted, ephemeral ]
fstab:
description:
- File to use instead of C(/etc/fstab).
@ -94,6 +106,7 @@ options:
- OpenBSD does not allow specifying alternate fstab files with mount so do not
use this on OpenBSD with any state that operates on the live filesystem.
- This parameter defaults to /etc/fstab or /etc/vfstab on Solaris.
- This parameter is ignored when I(state) is set to C(ephemeral).
type: str
boot:
description:
@ -105,14 +118,15 @@ options:
to mount options in I(/etc/fstab).
- To avoid mount option conflicts, if C(noauto) specified in C(opts),
mount module will ignore C(boot).
- This parameter is ignored when I(state) is set to C(ephemeral).
type: bool
default: yes
default: true
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
default: false
notes:
- As of Ansible 2.3, the I(name) option has been changed to I(path) as
default, but I(name) still works as well.
@ -186,9 +200,17 @@ EXAMPLES = r'''
src: 192.168.1.100:/nfs/ssd/shared_data
path: /mnt/shared_data
opts: rw,sync,hard
boot: no
boot: false
state: mounted
fstype: nfs
- name: Mount ephemeral SMB volume
ansible.posix.mount:
src: //192.168.1.200/share
path: /mnt/smb_share
opts: "rw,vers=3,file_mode=0600,dir_mode=0700,dom={{ ad_domain }},username={{ ad_username }},password={{ ad_password }}"
fstype: cifs
state: ephemeral
'''
import errno
@ -231,7 +253,7 @@ def _escape_fstab(v):
if isinstance(v, int):
return v
else:
return(
return (
v.
replace('\\', '\\134').
replace(' ', '\\040').
@ -435,6 +457,24 @@ def _set_fstab_args(fstab_file):
return result
def _set_ephemeral_args(args):
result = []
# Set fstype switch according to platform. SunOS/Solaris use -F
if platform.system().lower() == 'sunos':
result.append('-F')
else:
result.append('-t')
result.append(args['fstype'])
# Even if '-o remount' is already set, specifying multiple -o is valid
if args['opts'] != 'defaults':
result += ['-o', args['opts']]
result.append(args['src'])
return result
def mount(module, args):
"""Mount up a path or remount if needed."""
@ -451,7 +491,11 @@ def mount(module, args):
'OpenBSD does not support alternate fstab files. Do not '
'specify the fstab parameter for OpenBSD hosts'))
else:
cmd += _set_fstab_args(args['fstab'])
if module.params['state'] != 'ephemeral':
cmd += _set_fstab_args(args['fstab'])
if module.params['state'] == 'ephemeral':
cmd += _set_ephemeral_args(args)
cmd += [name]
@ -503,18 +547,24 @@ def remount(module, args):
'OpenBSD does not support alternate fstab files. Do not '
'specify the fstab parameter for OpenBSD hosts'))
else:
cmd += _set_fstab_args(args['fstab'])
if module.params['state'] != 'ephemeral':
cmd += _set_fstab_args(args['fstab'])
if module.params['state'] == 'ephemeral':
cmd += _set_ephemeral_args(args)
cmd += [args['name']]
out = err = ''
try:
if platform.system().lower().endswith('bsd'):
if module.params['state'] != 'ephemeral' and platform.system().lower().endswith('bsd'):
# Note: Forcing BSDs to do umount/mount due to BSD remount not
# working as expected (suspect bug in the BSD mount command)
# Interested contributor could rework this to use mount options on
# the CLI instead of relying on fstab
# https://github.com/ansible/ansible-modules-core/issues/5591
# Note: this does not affect ephemeral state as all options
# are set on the CLI and fstab is expected to be ignored.
rc = 1
else:
rc, out, err = module.run_command(cmd)
@ -668,24 +718,66 @@ def get_linux_mounts(module, mntinfo_file="/proc/self/mountinfo"):
return mounts
def _is_same_mount_src(module, src, mountpoint, linux_mounts):
"""Return True if the mounted fs on mountpoint is the same source than src. Return False if mountpoint is not a mountpoint"""
# If the provided mountpoint is not a mountpoint, don't waste time
if (
not ismount(mountpoint) and
not is_bind_mounted(module, linux_mounts, mountpoint)):
return False
# Treat Linux bind mounts
if platform.system() == 'Linux' and linux_mounts is not None:
# For Linux bind mounts only: the mount command does not return
# the actual source for bind mounts, but the device of the source.
# is_bind_mounted() called with the 'src' parameter will return True if
# the mountpoint is a bind mount AND the source FS is the same than 'src'.
# is_bind_mounted() is not reliable on Solaris, NetBSD and OpenBSD.
# But we can rely on 'mount -v' on all other platforms, and Linux non-bind mounts.
if is_bind_mounted(module, linux_mounts, mountpoint, src):
return True
# mount with parameter -v has a close behavior on Linux, *BSD, SunOS
# Requires -v with SunOS. Without -v, source and destination are reversed
# Output format differs from a system to another, but field[0:3] are consistent: [src, 'on', dest]
cmd = '%s -v' % module.get_bin_path('mount', required=True)
rc, out, err = module.run_command(cmd)
mounts = []
if len(out):
mounts = to_native(out).strip().split('\n')
else:
module.fail_json(msg="Unable to retrieve mount info with command '%s'" % cmd)
for mnt in mounts:
fields = mnt.split()
mp_src = fields[0]
mp_dst = fields[2]
if mp_src == src and mp_dst == mountpoint:
return True
return False
def main():
module = AnsibleModule(
argument_spec=dict(
boot=dict(type='bool', default=True),
dump=dict(type='str'),
dump=dict(type='str', default='0'),
fstab=dict(type='str'),
fstype=dict(type='str'),
path=dict(type='path', required=True, aliases=['name']),
opts=dict(type='str'),
passno=dict(type='str', no_log=False),
passno=dict(type='str', no_log=False, default='0'),
src=dict(type='path'),
backup=dict(type='bool', default=False),
state=dict(type='str', required=True, choices=['absent', 'mounted', 'present', 'unmounted', 'remounted']),
state=dict(type='str', required=True, choices=['absent', 'absent_from_fstab', 'mounted', 'present', 'unmounted', 'remounted', 'ephemeral']),
),
supports_check_mode=True,
required_if=(
['state', 'mounted', ['src', 'fstype']],
['state', 'present', ['src', 'fstype']],
['state', 'ephemeral', ['src', 'fstype']]
),
)
@ -744,7 +836,7 @@ def main():
# handle mount on boot. To avoid mount option conflicts, if 'noauto'
# specified in 'opts', mount module will ignore 'boot'.
opts = args['opts'].split(',')
if 'noauto' in opts:
if module.params['boot'] and 'noauto' in opts:
args['warnings'].append("Ignore the 'boot' due to 'opts' contains 'noauto'.")
elif not module.params['boot']:
args['boot'] = 'no'
@ -756,15 +848,17 @@ def main():
# If fstab file does not exist, we first need to create it. This mainly
# happens when fstab option is passed to the module.
if not os.path.exists(args['fstab']):
if not os.path.exists(os.path.dirname(args['fstab'])):
os.makedirs(os.path.dirname(args['fstab']))
try:
open(args['fstab'], 'a').close()
except PermissionError as e:
module.fail_json(msg="Failed to open %s due to permission issue" % args['fstab'])
except Exception as e:
module.fail_json(msg="Failed to open %s due to %s" % (args['fstab'], to_native(e)))
# If state is 'ephemeral', we do not need fstab file
if module.params['state'] != 'ephemeral':
if not os.path.exists(args['fstab']):
if not os.path.exists(os.path.dirname(args['fstab'])):
os.makedirs(os.path.dirname(args['fstab']))
try:
open(args['fstab'], 'a').close()
except PermissionError as e:
module.fail_json(msg="Failed to open %s due to permission issue" % args['fstab'])
except Exception as e:
module.fail_json(msg="Failed to open %s due to %s" % (args['fstab'], to_native(e)))
# absent:
# Remove from fstab and unmounted.
@ -775,12 +869,16 @@ def main():
# mounted:
# Add to fstab if not there and make sure it is mounted. If it has
# changed in fstab then remount it.
# ephemeral:
# Do not change fstab state, but mount.
state = module.params['state']
name = module.params['path']
changed = False
if state == 'absent':
if state == 'absent_from_fstab':
name, changed = unset_mount(module, args)
elif state == 'absent':
name, changed = unset_mount(module, args)
if changed and not module.check_mode:
@ -806,7 +904,7 @@ def main():
msg="Error unmounting %s: %s" % (name, msg))
changed = True
elif state == 'mounted':
elif state == 'mounted' or state == 'ephemeral':
dirs_created = []
if not os.path.exists(name) and not module.check_mode:
try:
@ -834,7 +932,11 @@ def main():
module.fail_json(
msg="Error making dir %s: %s" % (name, to_native(e)))
name, backup_lines, changed = _set_mount_save_old(module, args)
# ephemeral: completely ignore fstab
if state != 'ephemeral':
name, backup_lines, changed = _set_mount_save_old(module, args)
else:
name, backup_lines, changed = args['name'], [], False
res = 0
if (
@ -844,7 +946,26 @@ def main():
if changed and not module.check_mode:
res, msg = remount(module, args)
changed = True
# When 'state' == 'ephemeral', we don't know what is in fstab, and 'changed' is always False
if state == 'ephemeral':
# If state == 'ephemeral', check if the mountpoint src == module.params['src']
# If it doesn't, fail to prevent unwanted unmount or unwanted mountpoint override
if _is_same_mount_src(module, args['src'], args['name'], linux_mounts):
changed = True
if not module.check_mode:
res, msg = remount(module, args)
else:
module.fail_json(
msg=(
'Ephemeral mount point is already mounted with a different '
'source than the specified one. Failing in order to prevent an '
'unwanted unmount or override operation. Try replacing this command with '
'a "state: unmounted" followed by a "state: ephemeral", or use '
'a different destination path.'))
else:
# If not already mounted, mount it
changed = True
if not module.check_mode:
@ -856,7 +977,8 @@ def main():
# A non-working fstab entry may break the system at the reboot,
# so undo all the changes if possible.
try:
write_fstab(module, backup_lines, args['fstab'])
if state != 'ephemeral':
write_fstab(module, backup_lines, args['fstab'])
except Exception:
pass

View file

@ -37,7 +37,7 @@ options:
src:
description:
- Path of the patch file as accepted by the GNU patch tool. If
C(remote_src) is 'no', the patch source file is looked up from the
C(remote_src) is C(false), the patch source file is looked up from the
module's I(files) directory.
type: path
required: true
@ -50,10 +50,10 @@ options:
default: present
remote_src:
description:
- If C(no), it will search for src at originating/controller machine, if C(yes) it will
- If C(false), it will search for src at originating/controller machine, if C(true) it will
go to the remote/target machine for the C(src).
type: bool
default: no
default: false
strip:
description:
- Number that indicates the smallest prefix containing leading slashes
@ -65,20 +65,20 @@ options:
description:
- Passes C(--backup --version-control=numbered) to patch, producing numbered backup copies.
type: bool
default: no
default: false
binary:
description:
- Setting to C(yes) will disable patch's heuristic for transforming CRLF
- Setting to C(true) will disable patch's heuristic for transforming CRLF
line endings into LF.
- Line endings of src and dest must match.
- If set to C(no), C(patch) will replace CRLF in C(src) files on POSIX.
- If set to C(false), C(patch) will replace CRLF in C(src) files on POSIX.
type: bool
default: no
default: false
ignore_whitespace:
description:
- Setting to C(yes) will ignore white space changes between patch and input..
- Setting to C(true) will ignore white space changes between patch and input.
type: bool
default: no
default: false
notes:
- This module requires GNU I(patch) utility to be installed on the remote host.
'''

View file

@ -0,0 +1,76 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: rhel_facts
version_added: 1.5.0
short_description: Facts module to set or override RHEL specific facts.
description:
- Compatibility layer for using the "package" module for rpm-ostree based systems via setting the "pkg_mgr" fact correctly.
author:
- Adam Miller (@maxamillion)
requirements:
- rpm-ostree
seealso:
- module: ansible.builtin.package
options: {}
'''
EXAMPLES = '''
- name: Playbook to use the package module on all RHEL footprints
vars:
ansible_facts_modules:
- setup # REQUIRED to be run before all custom fact modules
- ansible.posix.rhel_facts
tasks:
- name: Ensure packages are installed
ansible.builtin.package:
name:
- htop
- ansible
state: present
'''
RETURN = """
ansible_facts:
description: Relevant Ansible Facts
returned: when needed
type: complex
contains:
pkg_mgr:
description: System-level package manager override
returned: when needed
type: str
sample: {'pkg_mgr': 'ansible.posix.rhel_facts'}
"""
import os
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(),
supports_check_mode=True,
)
ansible_facts = {}
# Verify that the platform is an rpm-ostree based system
if os.path.exists("/run/ostree-booted"):
ansible_facts['pkg_mgr'] = 'ansible.posix.rhel_rpm_ostree'
module.exit_json(ansible_facts=ansible_facts, changed=False)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,124 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rhel_rpm_ostree
version_added: 1.5.0
short_description: Ensure packages exist in a RHEL for Edge rpm-ostree based system
description:
- Compatibility layer for using the "package" module for RHEL for Edge systems utilizing the RHEL System Roles.
author:
- Adam Miller (@maxamillion)
requirements:
- rpm-ostree
options:
name:
description:
- A package name or package specifier with version, like C(name-1.0).
- Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name>=1.0)
- If a previous version is specified, the task also needs to turn C(allow_downgrade) on.
See the C(allow_downgrade) documentation for caveats with downgrading packages.
- When using state=latest, this can be C('*') which means run C(yum -y update).
- You can also pass a url or a local path to a rpm file (using state=present).
To operate on several packages this can accept a comma separated string of packages or (as of 2.0) a list of packages.
aliases: [ pkg ]
type: list
elements: str
default: []
state:
description:
- Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package.
- C(present) and C(installed) will simply ensure that a desired package is installed.
- C(latest) will update the specified package if it's not of the latest available version.
- C(absent) and C(removed) will remove the specified package.
- Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
enabled for this module, then C(absent) is inferred.
type: str
choices: [ absent, installed, latest, present, removed ]
notes:
- This module does not support installing or removing packages to/from an overlay as this is not supported
by RHEL for Edge, packages needed should be defined in the osbuild Blueprint and provided to Image Builder
at build time. This module exists only for C(package) module compatibility.
'''
EXAMPLES = '''
- name: Ensure htop and ansible are installed on rpm-ostree based RHEL
ansible.posix.rhel_rpm_ostree:
name:
- htop
- ansible
state: present
'''
RETURN = """
msg:
description: status of rpm transaction
returned: always
type: str
sample: "No changes made."
"""
import os
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
def locally_installed(module, pkgname):
(rc, out, err) = module.run_command('{0} -q {1}'.format(module.get_bin_path("rpm"), pkgname).split())
return (rc == 0)
def rpm_ostree_transaction(module):
pkgs = []
if module.params['state'] in ['present', 'installed', 'latest']:
for pkg in module.params['name']:
if not locally_installed(module, pkg):
pkgs.append(pkg)
elif module.params['state'] in ['absent', 'removed']:
for pkg in module.params['name']:
if locally_installed(module, pkg):
pkgs.append(pkg)
if not pkgs:
module.exit_json(msg="No changes made.")
else:
if module.params['state'] in ['present', 'installed', 'latest']:
module.fail_json(msg="The following packages are absent in the currently booted rpm-ostree commit: %s" ' '.join(pkgs))
else:
module.fail_json(msg="The following packages are present in the currently booted rpm-ostree commit: %s" ' '.join(pkgs))
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='list', elements='str', aliases=['pkg'], default=[]),
state=dict(type='str', default=None, choices=['absent', 'installed', 'latest', 'present', 'removed']),
),
)
# Verify that the platform is an rpm-ostree based system
if not os.path.exists("/run/ostree-booted"):
module.fail_json(msg="Module rpm_ostree is only applicable for rpm-ostree based systems.")
try:
rpm_ostree_transaction(module)
except Exception as e:
module.fail_json(msg=to_text(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View file

@ -0,0 +1,125 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rpm_ostree_upgrade
short_description: Manage rpm-ostree upgrade transactions
description:
- Manage an rpm-ostree upgrade transactions.
version_added: 1.5.0
author:
- Adam Miller (@maxamillion)
requirements:
- rpm-ostree
options:
os:
description:
- The OSNAME upon which to operate.
type: str
default: ""
required: false
cache_only:
description:
- Perform the transaction using only pre-cached data, do not download.
type: bool
default: false
required: false
allow_downgrade:
description:
- Allow for the upgrade to be a chronologically older tree.
type: bool
default: false
required: false
peer:
description:
- Force peer-to-peer connection instead of using a system message bus.
type: bool
default: false
required: false
'''
EXAMPLES = '''
- name: Upgrade the rpm-ostree image without options, accept all defaults
ansible.posix.rpm_ostree_upgrade:
- name: Upgrade the rpm-ostree image allowing downgrades
ansible.posix.rpm_ostree_upgrade:
allow_downgrade: true
'''
RETURN = '''
msg:
description: The command standard output
returned: always
type: str
sample: 'No upgrade available.'
'''
import os
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native, to_text
def rpm_ostree_transaction(module):
cmd = []
cmd.append(module.get_bin_path("rpm-ostree"))
cmd.append('upgrade')
if module.params['os']:
cmd += ['--os', module.params['os']]
if module.params['cache_only']:
cmd += ['--cache-only']
if module.params['allow_downgrade']:
cmd += ['--allow-downgrade']
if module.params['peer']:
cmd += ['--peer']
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = module.run_command(cmd)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
if to_text("No upgrade available.") in to_text(out):
module.exit_json(msg=out, changed=False)
else:
module.exit_json(msg=out, changed=True)
def main():
module = AnsibleModule(
argument_spec=dict(
os=dict(type='str', default=''),
cache_only=dict(type='bool', default=False),
allow_downgrade=dict(type='bool', default=False),
peer=dict(type='bool', default=False),
),
)
# Verify that the platform is an rpm-ostree based system
if not os.path.exists("/run/ostree-booted"):
module.fail_json(msg="Module rpm_ostree_upgrade is only applicable for rpm-ostree based systems.")
try:
rpm_ostree_transaction(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View file

@ -22,9 +22,9 @@ options:
type: str
persistent:
description:
- Set to C(yes) if the boolean setting should survive a reboot.
- Set to C(true) if the boolean setting should survive a reboot.
type: bool
default: 'no'
default: false
state:
description:
- Desired boolean value
@ -49,8 +49,8 @@ EXAMPLES = r'''
- name: Set httpd_can_network_connect flag on and keep it persistent across reboots
ansible.posix.seboolean:
name: httpd_can_network_connect
state: yes
persistent: yes
state: true
persistent: true
'''
import os
@ -73,31 +73,14 @@ except ImportError:
HAVE_SEMANAGE = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.six import binary_type
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils._text import to_text
from ansible_collections.ansible.posix.plugins.module_utils._respawn import respawn_module, HAS_RESPAWN_UTIL
def get_runtime_status(ignore_selinux_state=False):
return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
def has_boolean_value(module, name):
bools = []
try:
rc, bools = selinux.security_get_boolean_names()
except OSError:
module.fail_json(msg="Failed to get list of boolean names")
# work around for selinux who changed its API, see
# https://github.com/ansible/ansible/issues/25651
if len(bools) > 0:
if isinstance(bools[0], binary_type):
name = to_bytes(name)
if name in bools:
return True
else:
return False
def get_boolean_value(module, name):
state = 0
try:
@ -173,7 +156,10 @@ def semanage_set_boolean_value(module, handle, name, value):
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to modify boolean key with semanage")
if semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0:
if (
selinux.is_selinux_enabled()
and semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0
):
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to set boolean key active with semanage")
@ -281,6 +267,12 @@ def main():
supports_check_mode=True,
)
if not HAVE_SELINUX and not HAVE_SEMANAGE and HAS_RESPAWN_UTIL:
# Only respawn the module if both libraries are missing.
# If only one is available, then usage of the "wrong" (i.e. not the system one)
# python interpreter is likely not the problem.
respawn_module("selinux")
if not HAVE_SELINUX:
module.fail_json(msg=missing_required_lib('libselinux-python'), exception=SELINUX_IMP_ERR)
@ -308,12 +300,9 @@ def main():
# Feature only available in selinux library since 2012.
name = selinux.selinux_boolean_sub(name)
if not has_boolean_value(module, name):
module.fail_json(msg="SELinux boolean %s does not exist." % name)
if persistent:
changed = semanage_boolean_value(module, name, state)
else:
elif selinux.is_selinux_enabled():
cur_value = get_boolean_value(module, name)
if cur_value != state:
changed = True

View file

@ -32,7 +32,7 @@ options:
description:
- If set to I(true), will update also the kernel boot parameters when disabling/enabling SELinux.
- The C(grubby) tool must be present on the target system for this to work.
default: no
default: false
type: bool
version_added: '1.4.0'
configfile:
@ -107,6 +107,8 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.facts.utils import get_file_lines
from ansible_collections.ansible.posix.plugins.module_utils._respawn import respawn_module, HAS_RESPAWN_UTIL
# getter subroutines
def get_config_state(configfile):
@ -236,6 +238,8 @@ def main():
)
if not HAS_SELINUX:
if HAS_RESPAWN_UTIL:
respawn_module("selinux")
module.fail_json(msg=missing_required_lib('libselinux-python'), exception=SELINUX_IMP_ERR)
# global vars

View file

@ -26,13 +26,13 @@ options:
description:
- Path on the source host that will be synchronized to the destination.
- The path can be absolute or relative.
type: str
type: path
required: true
dest:
description:
- Path on the destination host that will be synchronized from the source.
- The path can be absolute or relative.
type: str
type: path
required: true
dest_port:
description:
@ -53,36 +53,36 @@ options:
description:
- Mirrors the rsync archive flag, enables recursive, links, perms, times, owner, group flags and -D.
type: bool
default: yes
default: true
checksum:
description:
- Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will
not disable it.
type: bool
default: no
default: false
compress:
description:
- Compress file data during the transfer.
- In most cases, leave this enabled unless it causes problems.
type: bool
default: yes
default: true
existing_only:
description:
- Skip creating new files on receiver.
type: bool
default: no
default: false
delete:
description:
- Delete files in I(dest) that do not exist (after transfer, not before) in the I(src) path.
- This option requires I(recursive=yes).
- This option requires I(recursive=true).
- This option ignores excluded files and behaves like the rsync opt C(--delete-after).
type: bool
default: no
default: false
dirs:
description:
- Transfer directories without recursing.
type: bool
default: no
default: false
recursive:
description:
- Recurse into directories.
@ -97,7 +97,7 @@ options:
description:
- Copy symlinks as the item that they point to (the referent) is copied, rather than the symlink.
type: bool
default: no
default: false
perms:
description:
- Preserve permissions.
@ -132,43 +132,36 @@ options:
description:
- Put user@ for the remote paths.
- If you have a custom ssh config to define the remote user for a host
that does not match the inventory user, you should set this parameter to C(no).
that does not match the inventory user, you should set this parameter to C(false).
type: bool
default: yes
use_ssh_args:
description:
- In Ansible 2.10 and lower, it uses the ssh_args specified in C(ansible.cfg).
- In Ansible 2.11 and onwards, when set to C(true), it uses all SSH connection configurations like
C(ansible_ssh_args), C(ansible_ssh_common_args), and C(ansible_ssh_extra_args).
type: bool
default: no
default: true
ssh_connection_multiplexing:
description:
- SSH connection multiplexing for rsync is disabled by default to prevent misconfigured ControlSockets from resulting in failed SSH connections.
This is accomplished by setting the SSH C(ControlSocket) to C(none).
- Set this option to C(yes) to allow multiplexing and reduce SSH connection overhead.
- Note that simply setting this option to C(yes) is not enough;
- Set this option to C(true) to allow multiplexing and reduce SSH connection overhead.
- Note that simply setting this option to C(true) is not enough;
You must also configure SSH connection multiplexing in your SSH client config by setting values for
C(ControlMaster), C(ControlPersist) and C(ControlPath).
type: bool
default: no
default: false
rsync_opts:
description:
- Specify additional rsync options by passing in an array.
- Note that an empty string in C(rsync_opts) will end up transfer the current working directory.
type: list
default:
default: []
elements: str
partial:
description:
- Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster.
type: bool
default: no
default: false
verify_host:
description:
- Verify destination host key.
type: bool
default: no
default: false
private_key:
description:
- Specify the private key to use for SSH-based rsync connections (e.g. C(~/.ssh/id_rsa)).
@ -178,14 +171,38 @@ options:
- Add a destination to hard link against during the rsync.
type: list
default:
elements: str
elements: path
delay_updates:
description:
- This option puts the temporary file from each updated file into a holding directory until the end of the transfer,
at which time all the files are renamed into place in rapid succession.
type: bool
default: yes
default: true
version_added: '1.3.0'
use_ssh_args:
description:
- In Ansible 2.10 and lower, it uses the ssh_args specified in C(ansible.cfg).
- In Ansible 2.11 and onwards, when set to C(true), it uses all SSH connection configurations like
C(ansible_ssh_args), C(ansible_ssh_common_args), and C(ansible_ssh_extra_args).
type: bool
default: false
_local_rsync_path:
description: Internal use only.
type: path
default: 'rsync'
required: false
_local_rsync_password:
description: Internal use only, never logged.
type: str
required: false
_substitute_controller:
description: Internal use only.
type: bool
default: false
_ssh_args:
description: Internal use only. See C(use_ssh_args) for ssh arg settings.
type: str
required: false
notes:
- rsync must be installed on both the local and remote host.
@ -212,7 +229,7 @@ notes:
- link_destination is subject to the same limitations as the underlying rsync daemon. Hard links are only preserved if the relative subtrees
of the source and destination are the same. Attempts to hardlink into a directory that is a subdirectory of the source will be prevented.
seealso:
- module: copy
- module: ansible.builtin.copy
- module: community.windows.win_robocopy
author:
- Timothy Appnel (@tima)
@ -235,7 +252,7 @@ EXAMPLES = r'''
src: rsync://somehost.com/path/
dest: /some/absolute/path/
- name: Synchronization using rsync protocol on delegate host (push)
- name: Synchronization using rsync protocol on delegate host (push)
ansible.posix.synchronize:
src: /some/absolute/path/
dest: rsync://somehost.com/path/
@ -252,27 +269,27 @@ EXAMPLES = r'''
ansible.posix.synchronize:
src: some/relative/path
dest: /some/absolute/path
archive: no
archive: false
- name: Synchronization with --archive options enabled except for --recursive
ansible.posix.synchronize:
src: some/relative/path
dest: /some/absolute/path
recursive: no
recursive: false
- name: Synchronization with --archive options enabled except for --times, with --checksum option enabled
ansible.posix.synchronize:
src: some/relative/path
dest: /some/absolute/path
checksum: yes
times: no
checksum: true
times: false
- name: Synchronization without --archive options enabled except use --links
ansible.posix.synchronize:
src: some/relative/path
dest: /some/absolute/path
archive: no
links: yes
archive: false
links: true
- name: Synchronization of two paths both on the control machine
ansible.posix.synchronize:
@ -302,8 +319,8 @@ EXAMPLES = r'''
ansible.posix.synchronize:
src: some/relative/path
dest: /some/absolute/path
delete: yes
recursive: yes
delete: true
recursive: true
# This specific command is granted su privileges on the destination
- name: Synchronize using an alternate rsync command
@ -362,11 +379,11 @@ def substitute_controller(path):
if not client_addr:
ssh_env_string = os.environ.get('SSH_CLIENT', None)
try:
client_addr, _ = ssh_env_string.split(None, 1)
client_addr, _ = ssh_env_string.split(None, 1) # pylint: disable=disallowed-name
except AttributeError:
ssh_env_string = os.environ.get('SSH_CONNECTION', None)
try:
client_addr, _ = ssh_env_string.split(None, 1)
client_addr, _ = ssh_env_string.split(None, 1) # pylint: disable=disallowed-name
except AttributeError:
pass
if not client_addr:
@ -388,8 +405,8 @@ def is_rsh_needed(source, dest):
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(type='str', required=True),
dest=dict(type='str', required=True),
src=dict(type='path', required=True),
dest=dict(type='path', required=True),
dest_port=dict(type='int'),
delete=dict(type='bool', default=False),
private_key=dict(type='path'),
@ -412,13 +429,14 @@ def main():
set_remote_user=dict(type='bool', default=True),
rsync_timeout=dict(type='int', default=0),
rsync_opts=dict(type='list', default=[], elements='str'),
ssh_args=dict(type='str'),
_ssh_args=dict(type='str'),
use_ssh_args=dict(type='bool', default=False),
ssh_connection_multiplexing=dict(type='bool', default=False),
partial=dict(type='bool', default=False),
verify_host=dict(type='bool', default=False),
delay_updates=dict(type='bool', default=True),
mode=dict(type='str', default='push', choices=['pull', 'push']),
link_dest=dict(type='list', elements='str'),
link_dest=dict(type='list', elements='path'),
),
supports_check_mode=True,
)
@ -454,7 +472,7 @@ def main():
owner = module.params['owner']
group = module.params['group']
rsync_opts = module.params['rsync_opts']
ssh_args = module.params['ssh_args']
ssh_args = module.params['_ssh_args']
ssh_connection_multiplexing = module.params['ssh_connection_multiplexing']
verify_host = module.params['verify_host']
link_dest = module.params['link_dest']
@ -572,7 +590,7 @@ def main():
# hardlink is actually a change
cmd.append('-vv')
for x in link_dest:
link_path = os.path.abspath(os.path.expanduser(x))
link_path = os.path.abspath(x)
destination_path = os.path.abspath(os.path.dirname(dest))
if destination_path.find(link_path) == 0:
module.fail_json(msg='Hardlinking into a subdirectory of the source would cause recursion. %s and %s' % (destination_path, dest))
@ -581,12 +599,6 @@ def main():
changed_marker = '<<CHANGED>>'
cmd.append('--out-format=%s' % shlex_quote(changed_marker + '%i %n%L'))
# expand the paths
if '@' not in source:
source = os.path.expanduser(source)
if '@' not in dest:
dest = os.path.expanduser(dest)
cmd.append(shlex_quote(source))
cmd.append(shlex_quote(dest))
cmdstr = ' '.join(cmd)

View file

@ -38,14 +38,14 @@ options:
description:
- Use this option to ignore errors about unknown keys.
type: bool
default: 'no'
default: false
reload:
description:
- If C(yes), performs a I(/sbin/sysctl -p) if the C(sysctl_file) is
updated. If C(no), does not reload I(sysctl) even if the
- If C(true), performs a I(/sbin/sysctl -p) if the C(sysctl_file) is
updated. If C(false), does not reload I(sysctl) even if the
C(sysctl_file) is updated.
type: bool
default: 'yes'
default: true
sysctl_file:
description:
- Specifies the absolute path to C(sysctl.conf), if not C(/etc/sysctl.conf).
@ -53,9 +53,9 @@ options:
type: path
sysctl_set:
description:
- Verify token value with the sysctl command and set with -w if necessary
- Verify token value with the sysctl command and set with -w if necessary.
type: bool
default: 'no'
default: false
author:
- David CHANIAL (@davixx)
'''
@ -78,21 +78,21 @@ EXAMPLES = r'''
name: kernel.panic
value: '3'
sysctl_file: /tmp/test_sysctl.conf
reload: no
reload: false
# Set ip forwarding on in /proc and verify token value with the sysctl command
- ansible.posix.sysctl:
name: net.ipv4.ip_forward
value: '1'
sysctl_set: yes
sysctl_set: true
# Set ip forwarding on in /proc and in the sysctl file and reload if necessary
- ansible.posix.sysctl:
name: net.ipv4.ip_forward
value: '1'
sysctl_set: yes
sysctl_set: true
state: present
reload: yes
reload: true
'''
# ==============================================================

View file

@ -4,13 +4,8 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.six import text_type
from ansible.module_utils.six.moves import shlex_quote
from ansible.plugins.shell import ShellBase
DOCUMENTATION = '''
name: csh
plugin_type: shell
short_description: C shell (/bin/csh)
description:
- When you have no other option than to use csh
@ -18,6 +13,10 @@ DOCUMENTATION = '''
- shell_common
'''
from ansible.module_utils.six import text_type
from ansible.module_utils.six.moves import shlex_quote
from ansible.plugins.shell import ShellBase
class ShellModule(ShellBase):

View file

@ -4,13 +4,8 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.six import text_type
from ansible.module_utils.six.moves import shlex_quote
from ansible.plugins.shell.sh import ShellModule as ShModule
DOCUMENTATION = '''
name: fish
plugin_type: shell
short_description: fish shell (/bin/fish)
description:
- This is here because some people are restricted to fish.
@ -18,6 +13,10 @@ DOCUMENTATION = '''
- shell_common
'''
from ansible.module_utils.six import text_type
from ansible.module_utils.six.moves import shlex_quote
from ansible.plugins.shell.sh import ShellModule as ShModule
class ShellModule(ShModule):