mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-03-22 05:09:12 +00:00
modules [t-z]*: use f-strings (#10978)
* modules [t-z]*: use f-strings * add changelog frag * remove extraneous file
This commit is contained in:
parent
af246f8de3
commit
adcc683da7
45 changed files with 514 additions and 536 deletions
45
changelogs/fragments/10978-mod-fstr-tz.yml
Normal file
45
changelogs/fragments/10978-mod-fstr-tz.yml
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
minor_changes:
|
||||
- taiga_issue - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- telegram - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- terraform - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- timezone - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- twilio - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- typetalk - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- udm_dns_record - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- udm_dns_zone - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- udm_group - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- udm_share - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- udm_user - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- ufw - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- urpmi - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- usb_facts - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- vdo - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- vertica_configuration - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- vertica_info - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- vertica_role - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- vertica_schema - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- vertica_user - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- vexata_eg - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- vexata_volume - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- vmadm - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- wakeonlan - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- wdc_redfish_command - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- wdc_redfish_info - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- xattr - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- xbps - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- xcc_redfish_command - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- xenserver_facts - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- xenserver_guest - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- xfconf - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- xfs_quota - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- xml - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- yarn - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- yum_versionlock - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- zfs - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- zfs_delegate_admin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- zfs_facts - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- zpool - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- zpool_facts - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- zypper - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- zypper_repository - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
- zypper_repository_info - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10978).
|
||||
|
|
@ -123,7 +123,6 @@ import traceback
|
|||
from os import getenv
|
||||
from os.path import isfile
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
TAIGA_IMP_ERR = None
|
||||
try:
|
||||
|
|
@ -169,28 +168,28 @@ def manage_issue(taiga_host, project_name, issue_subject, issue_priority,
|
|||
user_id = api.me().id
|
||||
project_list = [x for x in api.projects.list(member=user_id) if x.name == project_name]
|
||||
if len(project_list) != 1:
|
||||
return False, changed, "Unable to find project %s" % project_name, {}
|
||||
return False, changed, f"Unable to find project {project_name}", {}
|
||||
project = project_list[0]
|
||||
project_id = project.id
|
||||
|
||||
priority_list = [x for x in api.priorities.list(project=project_id) if x.name == issue_priority]
|
||||
if len(priority_list) != 1:
|
||||
return False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {}
|
||||
return False, changed, f"Unable to find issue priority {issue_priority} for project {project_name}", {}
|
||||
priority_id = priority_list[0].id
|
||||
|
||||
status_list = [x for x in api.issue_statuses.list(project=project_id) if x.name == issue_status]
|
||||
if len(status_list) != 1:
|
||||
return False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {}
|
||||
return False, changed, f"Unable to find issue status {issue_status} for project {project_name}", {}
|
||||
status_id = status_list[0].id
|
||||
|
||||
type_list = [x for x in project.list_issue_types() if x.name == issue_type]
|
||||
if len(type_list) != 1:
|
||||
return False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {}
|
||||
return False, changed, f"Unable to find issue type {issue_type} for project {project_name}", {}
|
||||
type_id = type_list[0].id
|
||||
|
||||
severity_list = [x for x in project.list_severities() if x.name == issue_severity]
|
||||
if len(severity_list) != 1:
|
||||
return False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {}
|
||||
return False, changed, f"Unable to find severity {issue_severity} for project {project_name}", {}
|
||||
severity_id = severity_list[0].id
|
||||
|
||||
issue = {
|
||||
|
|
@ -243,10 +242,10 @@ def manage_issue(taiga_host, project_name, issue_subject, issue_priority,
|
|||
|
||||
else:
|
||||
# More than 1 matching issue
|
||||
return False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {}
|
||||
return False, changed, f"More than one issue with subject {issue_subject} in project {project_name}", {}
|
||||
|
||||
except TaigaException as exc:
|
||||
msg = "An exception happened: %s" % to_native(exc)
|
||||
msg = f"An exception happened: {exc}"
|
||||
return False, changed, msg, {}
|
||||
|
||||
|
||||
|
|
@ -284,7 +283,7 @@ def main():
|
|||
issue_attachment_description = module.params['attachment_description']
|
||||
if issue_attachment:
|
||||
if not isfile(issue_attachment):
|
||||
msg = "%s is not a file" % issue_attachment
|
||||
msg = f"{issue_attachment} is not a file"
|
||||
module.fail_json(msg=msg)
|
||||
issue_tags = module.params['tags']
|
||||
state = module.params['state']
|
||||
|
|
|
|||
|
|
@ -112,7 +112,7 @@ def main():
|
|||
if api_args['parse_mode'] == 'plain':
|
||||
del api_args['parse_mode']
|
||||
|
||||
url = 'https://api.telegram.org/bot{token}/{api_method}'.format(token=token, api_method=api_method)
|
||||
url = f'https://api.telegram.org/bot{token}/{api_method}'
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=False)
|
||||
|
|
@ -127,11 +127,7 @@ def main():
|
|||
else:
|
||||
body = json.loads(info['body'])
|
||||
module.fail_json(
|
||||
msg="Failed to send message, return status = {status}\n"
|
||||
"url = {api_url}\n"
|
||||
"api_args = {api_args}".format(
|
||||
status=info['status'], api_url=url, api_args=api_args
|
||||
),
|
||||
msg=f"Failed to send message, return status = {info['status']}\nurl = {url}\napi_args = {api_args}",
|
||||
telegram_error=body['description'],
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -300,9 +300,9 @@ def preflight_validation(bin_path, project_path, version, variables_args=None, p
|
|||
if project_path is None or '/' not in project_path:
|
||||
module.fail_json(msg="Path for Terraform project can not be None or ''.")
|
||||
if not os.path.exists(bin_path):
|
||||
module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path))
|
||||
module.fail_json(msg=f"Path for Terraform binary '{bin_path}' doesn't exist on this host - check the path and try again please.")
|
||||
if not os.path.isdir(project_path):
|
||||
module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path))
|
||||
module.fail_json(msg=f"Path for Terraform project '{project_path}' doesn't exist on this host - check the path and try again please.")
|
||||
cmd = [bin_path, 'validate']
|
||||
if no_color:
|
||||
cmd.append('-no-color')
|
||||
|
|
@ -316,7 +316,7 @@ def _state_args(state_file):
|
|||
if not state_file:
|
||||
return []
|
||||
if not os.path.exists(state_file):
|
||||
module.warn('Could not find state_file "{0}", the process will not destroy any resources, please check your state file path.'.format(state_file))
|
||||
module.warn(f'Could not find state_file "{state_file}", the process will not destroy any resources, please check your state file path.')
|
||||
return ['-state', state_file]
|
||||
|
||||
|
||||
|
|
@ -328,7 +328,7 @@ def init_plugins(bin_path, project_path, backend_config, backend_config_files, i
|
|||
for key, val in backend_config.items():
|
||||
command.extend([
|
||||
'-backend-config',
|
||||
'{0}={1}'.format(key, val)
|
||||
f'{key}={val}'
|
||||
])
|
||||
if backend_config_files:
|
||||
for f in backend_config_files:
|
||||
|
|
@ -351,7 +351,7 @@ def get_workspace_context(bin_path, project_path, no_color=True):
|
|||
|
||||
rc, out, err = module.run_command(command, cwd=project_path)
|
||||
if rc != 0:
|
||||
module.warn("Failed to list Terraform workspaces:\n{0}".format(err))
|
||||
module.warn(f"Failed to list Terraform workspaces:\n{err}")
|
||||
for item in out.split('\n'):
|
||||
stripped_item = item.strip()
|
||||
if not stripped_item:
|
||||
|
|
@ -424,24 +424,15 @@ def build_plan(command, project_path, variables_args, state_file, targets, state
|
|||
elif rc == 1:
|
||||
# failure to plan
|
||||
module.fail_json(
|
||||
msg='Terraform plan could not be created\nSTDOUT: {out}\nSTDERR: {err}\nCOMMAND: {cmd} {args}'.format(
|
||||
out=out,
|
||||
err=err,
|
||||
cmd=' '.join(plan_command),
|
||||
args=' '.join([shlex_quote(arg) for arg in variables_args])
|
||||
)
|
||||
msg=(f"Terraform plan could not be created\nSTDOUT: {out}\nSTDERR: {err}\n"
|
||||
f"COMMAND: {' '.join(plan_command)} {' '.join([shlex_quote(arg) for arg in variables_args])}")
|
||||
)
|
||||
elif rc == 2:
|
||||
# changes, but successful
|
||||
return plan_path, True, out, err, plan_command if state == 'planned' else command
|
||||
|
||||
module.fail_json(msg='Terraform plan failed with unexpected exit code {rc}.\nSTDOUT: {out}\nSTDERR: {err}\nCOMMAND: {cmd} {args}'.format(
|
||||
rc=rc,
|
||||
out=out,
|
||||
err=err,
|
||||
cmd=' '.join(plan_command),
|
||||
args=' '.join([shlex_quote(arg) for arg in variables_args])
|
||||
))
|
||||
module.fail_json(msg=f"Terraform plan failed with unexpected exit code {rc}.\nSTDOUT: {out}\nSTDERR: {err}\n"
|
||||
f"COMMAND: {' '.join(plan_command)} {' '.join([shlex_quote(arg) for arg in variables_args])}")
|
||||
|
||||
|
||||
def get_diff(diff_output):
|
||||
|
|
@ -578,7 +569,7 @@ def main():
|
|||
command.extend(DESTROY_ARGS)
|
||||
|
||||
if state == 'present' and module.params.get('parallelism') is not None:
|
||||
command.append('-parallelism=%d' % module.params.get('parallelism'))
|
||||
command.append(f"-parallelism={module.params.get('parallelism')}")
|
||||
|
||||
def format_args(vars):
|
||||
if isinstance(vars, str):
|
||||
|
|
@ -595,11 +586,11 @@ def main():
|
|||
if isinstance(vars, dict):
|
||||
for k, v in vars.items():
|
||||
if isinstance(v, dict):
|
||||
ret_out.append('{0}={{{1}}}'.format(k, process_complex_args(v)))
|
||||
ret_out.append(f'{k}={{{process_complex_args(v)}}}')
|
||||
elif isinstance(v, list):
|
||||
ret_out.append("{0}={1}".format(k, process_complex_args(v)))
|
||||
ret_out.append(f"{k}={process_complex_args(v)}")
|
||||
elif isinstance(v, (int, float, str, bool)):
|
||||
ret_out.append('{0}={1}'.format(k, format_args(v)))
|
||||
ret_out.append(f'{k}={format_args(v)}')
|
||||
else:
|
||||
# only to handle anything unforeseen
|
||||
module.fail_json(msg="Supported types are, dictionaries, lists, strings, integers, boolean and float.")
|
||||
|
|
@ -607,16 +598,16 @@ def main():
|
|||
l_out = []
|
||||
for item in vars:
|
||||
if isinstance(item, dict):
|
||||
l_out.append("{{{0}}}".format(process_complex_args(item)))
|
||||
l_out.append(f"{{{process_complex_args(item)}}}")
|
||||
elif isinstance(item, list):
|
||||
l_out.append("{0}".format(process_complex_args(item)))
|
||||
l_out.append(f"{process_complex_args(item)}")
|
||||
elif isinstance(item, (str, int, float, bool)):
|
||||
l_out.append(format_args(item))
|
||||
else:
|
||||
# only to handle anything unforeseen
|
||||
module.fail_json(msg="Supported types are, dictionaries, lists, strings, integers, boolean and float.")
|
||||
|
||||
ret_out.append("[{0}]".format(",".join(l_out)))
|
||||
ret_out.append(f"[{','.join(l_out)}]")
|
||||
return ",".join(ret_out)
|
||||
|
||||
variables_args = []
|
||||
|
|
@ -625,30 +616,30 @@ def main():
|
|||
if isinstance(v, dict):
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={{{1}}}'.format(k, process_complex_args(v))
|
||||
f'{k}={{{process_complex_args(v)}}}'
|
||||
])
|
||||
elif isinstance(v, list):
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={1}'.format(k, process_complex_args(v))
|
||||
f'{k}={process_complex_args(v)}'
|
||||
])
|
||||
# on the top-level we need to pass just the python string with necessary
|
||||
# terraform string escape sequences
|
||||
elif isinstance(v, str):
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
"{0}={1}".format(k, v)
|
||||
f"{k}={v}"
|
||||
])
|
||||
else:
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={1}'.format(k, format_args(v))
|
||||
f'{k}={format_args(v)}'
|
||||
])
|
||||
else:
|
||||
for k, v in variables.items():
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={1}'.format(k, v)
|
||||
f'{k}={v}'
|
||||
])
|
||||
|
||||
if variables_files:
|
||||
|
|
@ -663,7 +654,7 @@ def main():
|
|||
else:
|
||||
command.append('-lock=false')
|
||||
if module.params.get('lock_timeout') is not None:
|
||||
command.append('-lock-timeout=%ds' % module.params.get('lock_timeout'))
|
||||
command.append(f"-lock-timeout={module.params.get('lock_timeout')}s")
|
||||
|
||||
for t in (module.params.get('targets') or []):
|
||||
command.extend(['-target', t])
|
||||
|
|
@ -676,10 +667,10 @@ def main():
|
|||
if state == 'absent':
|
||||
command.extend(variables_args)
|
||||
elif state == 'present' and plan_file:
|
||||
if any([os.path.isfile(project_path + "/" + plan_file), os.path.isfile(plan_file)]):
|
||||
if any([os.path.isfile(f"{project_path}/{plan_file}"), os.path.isfile(plan_file)]):
|
||||
command.append(plan_file)
|
||||
else:
|
||||
module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file))
|
||||
module.fail_json(msg=f'Could not find plan_file "{plan_file}", check the path and try again.')
|
||||
else:
|
||||
plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file,
|
||||
module.params.get('targets'), state, APPLY_ARGS, plan_file, no_color)
|
||||
|
|
@ -726,11 +717,10 @@ def main():
|
|||
rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path)
|
||||
outputs = {}
|
||||
if rc == 1:
|
||||
module.warn("Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}".format(outputs_text, outputs_err))
|
||||
module.warn(f"Could not get Terraform outputs. This usually means none have been defined.\nstdout: {outputs_text}\nstderr: {outputs_err}")
|
||||
elif rc != 0:
|
||||
module.fail_json(
|
||||
msg="Failure when getting Terraform outputs. "
|
||||
"Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, outputs_text, outputs_err),
|
||||
msg=f"Failure when getting Terraform outputs. Exited {rc}.\nstdout: {outputs_text}\nstderr: {outputs_err}",
|
||||
command=' '.join(outputs_command))
|
||||
else:
|
||||
outputs = json.loads(outputs_text)
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ class Timezone(object):
|
|||
if rc == 0:
|
||||
return super(Timezone, SystemdTimezone).__new__(SystemdTimezone)
|
||||
else:
|
||||
module.debug('timedatectl command was found but not usable: %s. using other method.' % stderr)
|
||||
module.debug(f'timedatectl command was found but not usable: {stderr}. using other method.')
|
||||
return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
|
||||
else:
|
||||
return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
|
||||
|
|
@ -121,7 +121,7 @@ class Timezone(object):
|
|||
if AIXoslevel >= 61:
|
||||
return super(Timezone, AIXTimezone).__new__(AIXTimezone)
|
||||
else:
|
||||
module.fail_json(msg='AIX os level must be >= 61 for timezone module (Target: %s).' % AIXoslevel)
|
||||
module.fail_json(msg=f'AIX os level must be >= 61 for timezone module (Target: {AIXoslevel}).')
|
||||
else:
|
||||
# Not supported yet
|
||||
return super(Timezone, Timezone).__new__(Timezone)
|
||||
|
|
@ -173,7 +173,7 @@ class Timezone(object):
|
|||
"""
|
||||
(rc, stdout, stderr) = self.module.run_command(list(commands), check_rc=True)
|
||||
if kwargs.get('log', False):
|
||||
self.msg.append('executed `%s`' % ' '.join(commands))
|
||||
self.msg.append(f"executed `{' '.join(commands)}`")
|
||||
return stdout
|
||||
|
||||
def diff(self, phase1='before', phase2='after'):
|
||||
|
|
@ -243,9 +243,9 @@ class Timezone(object):
|
|||
|
||||
def _verify_timezone(self):
|
||||
tz = self.value['name']['planned']
|
||||
tzfile = '/usr/share/zoneinfo/%s' % tz
|
||||
tzfile = f'/usr/share/zoneinfo/{tz}'
|
||||
if not os.path.isfile(tzfile):
|
||||
self.abort('given timezone "%s" is not available' % tz)
|
||||
self.abort(f'given timezone "{tz}" is not available')
|
||||
return tzfile
|
||||
|
||||
|
||||
|
|
@ -391,7 +391,7 @@ class NosystemdTimezone(Timezone):
|
|||
self.regexps['name'] = self.dist_regexps['redhat']
|
||||
self.tzline_format = self.dist_tzline_format['redhat']
|
||||
else:
|
||||
self.abort('could not read configuration file "%s"' % self.conf_files['name'])
|
||||
self.abort(f'could not read configuration file "{self.conf_files["name"]}"')
|
||||
else:
|
||||
# The key for timezone might be `ZONE` or `TIMEZONE`
|
||||
# (the former is used in RHEL/CentOS and the latter is used in SUSE linux).
|
||||
|
|
@ -434,7 +434,7 @@ class NosystemdTimezone(Timezone):
|
|||
if self._allow_ioerror(err, key):
|
||||
lines = []
|
||||
else:
|
||||
self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
|
||||
self.abort(f'tried to configure {key} using a file "{filename}", but could not read it')
|
||||
# Find the all matched lines
|
||||
matched_indices = []
|
||||
for i, line in enumerate(lines):
|
||||
|
|
@ -454,8 +454,8 @@ class NosystemdTimezone(Timezone):
|
|||
with open(filename, 'w') as file:
|
||||
file.writelines(lines)
|
||||
except IOError:
|
||||
self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename))
|
||||
self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename))
|
||||
self.abort(f'tried to configure {key} using a file "{filename}", but could not write to it')
|
||||
self.msg.append(f'Added 1 line and deleted {len(matched_indices)} line(s) on {filename}')
|
||||
|
||||
def _get_value_from_config(self, key, phase):
|
||||
filename = self.conf_files[key]
|
||||
|
|
@ -471,7 +471,7 @@ class NosystemdTimezone(Timezone):
|
|||
elif key == 'name':
|
||||
return 'n/a'
|
||||
else:
|
||||
self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
|
||||
self.abort(f'tried to configure {key} using a file "{filename}", but could not read it')
|
||||
else:
|
||||
try:
|
||||
value = self.regexps[key].search(status).group(1)
|
||||
|
|
@ -489,7 +489,7 @@ class NosystemdTimezone(Timezone):
|
|||
# the timezone config file, so we ignore this error.
|
||||
return 'n/a'
|
||||
else:
|
||||
self.abort('tried to configure %s using a file "%s", but could not find a valid value in it' % (key, filename))
|
||||
self.abort(f'tried to configure {key} using a file "{filename}", but could not find a valid value in it')
|
||||
else:
|
||||
if key == 'hwclock':
|
||||
# convert yes/no -> UTC/local
|
||||
|
|
@ -542,12 +542,12 @@ class NosystemdTimezone(Timezone):
|
|||
# If /etc/localtime is not a symlink best we can do is compare it with
|
||||
# the 'planned' zone info file and return 'n/a' if they are different.
|
||||
try:
|
||||
if not filecmp.cmp('/etc/localtime', '/usr/share/zoneinfo/' + planned):
|
||||
if not filecmp.cmp('/etc/localtime', f"/usr/share/zoneinfo/{planned}"):
|
||||
return 'n/a'
|
||||
except Exception:
|
||||
return 'n/a'
|
||||
else:
|
||||
self.abort('unknown parameter "%s"' % key)
|
||||
self.abort(f'unknown parameter "{key}"')
|
||||
return value
|
||||
|
||||
def set_timezone(self, value):
|
||||
|
|
@ -568,7 +568,7 @@ class NosystemdTimezone(Timezone):
|
|||
if self.conf_files['hwclock'] is not None:
|
||||
self._edit_file(filename=self.conf_files['hwclock'],
|
||||
regexp=self.regexps['hwclock'],
|
||||
value='UTC=%s\n' % utc,
|
||||
value=f'UTC={utc}\n',
|
||||
key='hwclock')
|
||||
self.execute(self.update_hwclock, '--systohc', option, log=True)
|
||||
|
||||
|
|
@ -578,7 +578,7 @@ class NosystemdTimezone(Timezone):
|
|||
elif key == 'hwclock':
|
||||
self.set_hwclock(value)
|
||||
else:
|
||||
self.abort('unknown parameter "%s"' % key)
|
||||
self.abort(f'unknown parameter "{key}"')
|
||||
|
||||
|
||||
class SmartOSTimezone(Timezone):
|
||||
|
|
@ -611,7 +611,7 @@ class SmartOSTimezone(Timezone):
|
|||
except Exception:
|
||||
self.module.fail_json(msg='Failed to read /etc/default/init')
|
||||
else:
|
||||
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
|
||||
self.module.fail_json(msg=f'{key} is not a supported option on target platform')
|
||||
|
||||
def set(self, key, value):
|
||||
"""Set the requested timezone through sm-set-timezone, an invalid timezone name
|
||||
|
|
@ -627,11 +627,11 @@ class SmartOSTimezone(Timezone):
|
|||
|
||||
# sm-set-timezone knows no state and will always set the timezone.
|
||||
# XXX: https://github.com/joyent/smtools/pull/2
|
||||
m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1])
|
||||
m = re.match(rf'^\* Changed (to)? timezone (to)? ({value}).*', stdout.splitlines()[1])
|
||||
if not (m and m.groups()[-1] == value):
|
||||
self.module.fail_json(msg='Failed to set timezone')
|
||||
else:
|
||||
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
|
||||
self.module.fail_json(msg=f'{key} is not a supported option on target platform')
|
||||
|
||||
|
||||
class DarwinTimezone(Timezone):
|
||||
|
|
@ -665,7 +665,7 @@ class DarwinTimezone(Timezone):
|
|||
out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:]
|
||||
tz_list = list(map(lambda x: x.strip(), out))
|
||||
if tz not in tz_list:
|
||||
self.abort('given timezone "%s" is not available' % tz)
|
||||
self.abort(f'given timezone "{tz}" is not available')
|
||||
return tz
|
||||
|
||||
def get(self, key, phase):
|
||||
|
|
@ -674,13 +674,13 @@ class DarwinTimezone(Timezone):
|
|||
value = self.regexps[key].search(status).group(1)
|
||||
return value
|
||||
else:
|
||||
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
|
||||
self.module.fail_json(msg=f'{key} is not a supported option on target platform')
|
||||
|
||||
def set(self, key, value):
|
||||
if key == 'name':
|
||||
self.execute(self.systemsetup, '-settimezone', value, log=True)
|
||||
else:
|
||||
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
|
||||
self.module.fail_json(msg=f'{key} is not a supported option on target platform')
|
||||
|
||||
|
||||
class BSDTimezone(Timezone):
|
||||
|
|
@ -733,18 +733,18 @@ class BSDTimezone(Timezone):
|
|||
if key == 'name':
|
||||
return self.__get_timezone()
|
||||
else:
|
||||
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
|
||||
self.module.fail_json(msg=f'{key} is not a supported option on target platform')
|
||||
|
||||
def set(self, key, value):
|
||||
if key == 'name':
|
||||
# First determine if the requested timezone is valid by looking in
|
||||
# the zoneinfo directory.
|
||||
zonefile = '/usr/share/zoneinfo/' + value
|
||||
zonefile = f"/usr/share/zoneinfo/{value}"
|
||||
try:
|
||||
if not os.path.isfile(zonefile):
|
||||
self.module.fail_json(msg='%s is not a recognized timezone' % value)
|
||||
self.module.fail_json(msg=f'{value} is not a recognized timezone')
|
||||
except Exception:
|
||||
self.module.fail_json(msg='Failed to stat %s' % zonefile)
|
||||
self.module.fail_json(msg=f'Failed to stat {zonefile}')
|
||||
|
||||
# Now (somewhat) atomically update the symlink by creating a new
|
||||
# symlink and move it into place. Otherwise we have to remove the
|
||||
|
|
@ -752,7 +752,7 @@ class BSDTimezone(Timezone):
|
|||
# create a race condition in case another process tries to read
|
||||
# /etc/localtime between removal and creation.
|
||||
suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)])
|
||||
new_localtime = '/etc/localtime.' + suffix
|
||||
new_localtime = f"/etc/localtime.{suffix}"
|
||||
|
||||
try:
|
||||
os.symlink(zonefile, new_localtime)
|
||||
|
|
@ -761,7 +761,7 @@ class BSDTimezone(Timezone):
|
|||
os.remove(new_localtime)
|
||||
self.module.fail_json(msg='Could not update /etc/localtime')
|
||||
else:
|
||||
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
|
||||
self.module.fail_json(msg=f'{key} is not a supported option on target platform')
|
||||
|
||||
|
||||
class AIXTimezone(Timezone):
|
||||
|
|
@ -804,7 +804,7 @@ class AIXTimezone(Timezone):
|
|||
if key == 'name':
|
||||
return self.__get_timezone()
|
||||
else:
|
||||
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
|
||||
self.module.fail_json(msg=f'{key} is not a supported option on target platform')
|
||||
|
||||
def set(self, key, value):
|
||||
"""Set the requested timezone through chtz, an invalid timezone name
|
||||
|
|
@ -826,12 +826,12 @@ class AIXTimezone(Timezone):
|
|||
|
||||
# First determine if the requested timezone is valid by looking in the zoneinfo
|
||||
# directory.
|
||||
zonefile = '/usr/share/lib/zoneinfo/' + value
|
||||
zonefile = f"/usr/share/lib/zoneinfo/{value}"
|
||||
try:
|
||||
if not os.path.isfile(zonefile):
|
||||
self.module.fail_json(msg='%s is not a recognized timezone.' % value)
|
||||
self.module.fail_json(msg=f'{value} is not a recognized timezone.')
|
||||
except Exception:
|
||||
self.module.fail_json(msg='Failed to check %s.' % zonefile)
|
||||
self.module.fail_json(msg=f'Failed to check {zonefile}.')
|
||||
|
||||
# Now set the TZ using chtz
|
||||
cmd = ['chtz', value]
|
||||
|
|
@ -844,11 +844,11 @@ class AIXTimezone(Timezone):
|
|||
# change.
|
||||
TZ = self.__get_timezone()
|
||||
if TZ != value:
|
||||
msg = 'TZ value does not match post-change (Actual: %s, Expected: %s).' % (TZ, value)
|
||||
msg = f'TZ value does not match post-change (Actual: {TZ}, Expected: {value}).'
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
else:
|
||||
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
|
||||
self.module.fail_json(msg=f'{key} is not a supported option on target platform')
|
||||
|
||||
|
||||
def main():
|
||||
|
|
@ -879,8 +879,7 @@ def main():
|
|||
# Examine if the current state matches planned state
|
||||
(after, planned) = tz.diff('after', 'planned').values()
|
||||
if after != planned:
|
||||
tz.abort('still not desired state, though changes have made - '
|
||||
'planned: %s, after: %s' % (str(planned), str(after)))
|
||||
tz.abort(f'still not desired state, though changes have made - planned: {planned}, after: {after}')
|
||||
diff = tz.diff('before', 'after')
|
||||
|
||||
changed = (diff['before'] != diff['after'])
|
||||
|
|
|
|||
|
|
@ -114,8 +114,7 @@ from ansible.module_utils.urls import fetch_url
|
|||
|
||||
def post_twilio_api(module, account_sid, auth_token, msg, from_number,
|
||||
to_number, media_url=None):
|
||||
URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \
|
||||
% (account_sid,)
|
||||
URI = f"https://api.twilio.com/2010-04-01/Accounts/{account_sid}/Messages.json"
|
||||
AGENT = "Ansible"
|
||||
|
||||
data = {'From': from_number, 'To': to_number, 'Body': msg}
|
||||
|
|
@ -168,7 +167,7 @@ def main():
|
|||
if 'body' in info:
|
||||
body = module.from_json(info['body'])
|
||||
body_message = body['message']
|
||||
module.fail_json(msg="unable to send message to %s: %s" % (number, body_message))
|
||||
module.fail_json(msg=f"unable to send message to {number}: {body_message}")
|
||||
|
||||
module.exit_json(msg=msg, changed=False)
|
||||
|
||||
|
|
|
|||
|
|
@ -96,9 +96,9 @@ def send_message(module, client_id, client_secret, topic, msg):
|
|||
"""
|
||||
try:
|
||||
access_token = get_access_token(module, client_id, client_secret)
|
||||
url = 'https://typetalk.com/api/v1/topics/%d' % topic
|
||||
url = f'https://typetalk.com/api/v1/topics/{topic}'
|
||||
headers = {
|
||||
'Authorization': 'Bearer %s' % access_token,
|
||||
'Authorization': f'Bearer {access_token}',
|
||||
}
|
||||
do_request(module, url, {'message': msg}, headers)
|
||||
return True, {'access_token': access_token}
|
||||
|
|
@ -128,7 +128,7 @@ def main():
|
|||
|
||||
res, error = send_message(module, client_id, client_secret, topic, msg)
|
||||
if not res:
|
||||
module.fail_json(msg='fail to send message with response code %s' % error.code)
|
||||
module.fail_json(msg=f'fail to send message with response code {error.code}')
|
||||
|
||||
module.exit_json(changed=True, topic=topic, msg=msg)
|
||||
|
||||
|
|
|
|||
|
|
@ -150,20 +150,20 @@ def main():
|
|||
ipaddr_rev = ipaddress.ip_address(name).reverse_pointer
|
||||
subnet_offset = ipaddr_rev.find(zone)
|
||||
if subnet_offset == -1:
|
||||
raise Exception("reversed IP address {0} is not part of zone.".format(ipaddr_rev))
|
||||
raise Exception(f"reversed IP address {ipaddr_rev} is not part of zone.")
|
||||
workname = ipaddr_rev[0:subnet_offset - 1]
|
||||
except Exception as e:
|
||||
module.fail_json(
|
||||
msg='handling PTR record for {0} in zone {1} failed: {2}'.format(name, zone, e)
|
||||
msg=f'handling PTR record for {name} in zone {zone} failed: {e}'
|
||||
)
|
||||
|
||||
obj = list(ldap_search(
|
||||
'(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, workname),
|
||||
f'(&(objectClass=dNSZone)(zoneName={zone})(relativeDomainName={workname}))',
|
||||
attr=['dNSZone']
|
||||
))
|
||||
exists = bool(len(obj))
|
||||
container = 'zoneName={0},cn=dns,{1}'.format(zone, base_dn())
|
||||
dn = 'relativeDomainName={0},{1}'.format(workname, container)
|
||||
container = f'zoneName={zone},cn=dns,{base_dn()}'
|
||||
dn = f'relativeDomainName={workname},{container}'
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
|
|
@ -171,19 +171,19 @@ def main():
|
|||
so = forward_zone.lookup(
|
||||
config(),
|
||||
uldap(),
|
||||
'(zone={0})'.format(zone),
|
||||
f'(zone={zone})',
|
||||
scope='domain',
|
||||
) or reverse_zone.lookup(
|
||||
config(),
|
||||
uldap(),
|
||||
'(zoneName={0})'.format(zone),
|
||||
f'(zoneName={zone})',
|
||||
scope='domain',
|
||||
)
|
||||
if not so == 0:
|
||||
raise Exception("Did not find zone '{0}' in Univention".format(zone))
|
||||
obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0])
|
||||
raise Exception(f"Did not find zone '{zone}' in Univention")
|
||||
obj = umc_module_for_add(f'dns/{type}', container, superordinate=so[0])
|
||||
else:
|
||||
obj = umc_module_for_edit('dns/{0}'.format(type), dn)
|
||||
obj = umc_module_for_edit(f'dns/{type}', dn)
|
||||
|
||||
if type == 'ptr_record':
|
||||
obj['ip'] = name
|
||||
|
|
@ -201,18 +201,18 @@ def main():
|
|||
obj.modify()
|
||||
except Exception as e:
|
||||
module.fail_json(
|
||||
msg='Creating/editing dns entry {0} in {1} failed: {2}'.format(name, container, e)
|
||||
msg=f'Creating/editing dns entry {name} in {container} failed: {e}'
|
||||
)
|
||||
|
||||
if state == 'absent' and exists:
|
||||
try:
|
||||
obj = umc_module_for_edit('dns/{0}'.format(type), dn)
|
||||
obj = umc_module_for_edit(f'dns/{type}', dn)
|
||||
if not module.check_mode:
|
||||
obj.remove()
|
||||
changed = True
|
||||
except Exception as e:
|
||||
module.fail_json(
|
||||
msg='Removing dns entry {0} in {1} failed: {2}'.format(name, container, e)
|
||||
msg=f'Removing dns entry {name} in {container} failed: {e}'
|
||||
)
|
||||
|
||||
module.exit_json(
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@ def convert_time(time):
|
|||
return ('0', 'seconds')
|
||||
for unit in units:
|
||||
if time >= unit[0]:
|
||||
return ('{0}'.format(time // unit[0]), unit[1])
|
||||
return (f'{time // unit[0]}', unit[1])
|
||||
|
||||
|
||||
def main():
|
||||
|
|
@ -178,22 +178,22 @@ def main():
|
|||
diff = None
|
||||
|
||||
obj = list(ldap_search(
|
||||
'(&(objectClass=dNSZone)(zoneName={0}))'.format(zone),
|
||||
f'(&(objectClass=dNSZone)(zoneName={zone}))',
|
||||
attr=['dNSZone']
|
||||
))
|
||||
|
||||
exists = bool(len(obj))
|
||||
container = 'cn=dns,{0}'.format(base_dn())
|
||||
dn = 'zoneName={0},{1}'.format(zone, container)
|
||||
container = f'cn=dns,{base_dn()}'
|
||||
dn = f'zoneName={zone},{container}'
|
||||
if contact == '':
|
||||
contact = 'root@{0}.'.format(zone)
|
||||
contact = f'root@{zone}.'
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
if not exists:
|
||||
obj = umc_module_for_add('dns/{0}'.format(type), container)
|
||||
obj = umc_module_for_add(f'dns/{type}', container)
|
||||
else:
|
||||
obj = umc_module_for_edit('dns/{0}'.format(type), dn)
|
||||
obj = umc_module_for_edit(f'dns/{type}', dn)
|
||||
obj['zone'] = zone
|
||||
obj['nameserver'] = nameserver
|
||||
obj['a'] = interfaces
|
||||
|
|
@ -217,18 +217,18 @@ def main():
|
|||
obj.modify()
|
||||
except Exception as e:
|
||||
module.fail_json(
|
||||
msg='Creating/editing dns zone {0} failed: {1}'.format(zone, e)
|
||||
msg=f'Creating/editing dns zone {zone} failed: {e}'
|
||||
)
|
||||
|
||||
if state == 'absent' and exists:
|
||||
try:
|
||||
obj = umc_module_for_edit('dns/{0}'.format(type), dn)
|
||||
obj = umc_module_for_edit(f'dns/{type}', dn)
|
||||
if not module.check_mode:
|
||||
obj.remove()
|
||||
changed = True
|
||||
except Exception as e:
|
||||
module.fail_json(
|
||||
msg='Removing dns zone {0} failed: {1}'.format(zone, e)
|
||||
msg=f'Removing dns zone {zone} failed: {e}'
|
||||
)
|
||||
|
||||
module.exit_json(
|
||||
|
|
|
|||
|
|
@ -122,18 +122,18 @@ def main():
|
|||
diff = None
|
||||
|
||||
groups = list(ldap_search(
|
||||
'(&(objectClass=posixGroup)(cn={0}))'.format(name),
|
||||
f'(&(objectClass=posixGroup)(cn={name}))',
|
||||
attr=['cn']
|
||||
))
|
||||
if position != '':
|
||||
container = position
|
||||
else:
|
||||
if ou != '':
|
||||
ou = 'ou={0},'.format(ou)
|
||||
ou = f'ou={ou},'
|
||||
if subpath != '':
|
||||
subpath = '{0},'.format(subpath)
|
||||
container = '{0}{1}{2}'.format(subpath, ou, base_dn())
|
||||
group_dn = 'cn={0},{1}'.format(name, container)
|
||||
subpath = f'{subpath},'
|
||||
container = f'{subpath}{ou}{base_dn()}'
|
||||
group_dn = f'cn={name},{container}'
|
||||
|
||||
exists = bool(len(groups))
|
||||
|
||||
|
|
@ -154,7 +154,7 @@ def main():
|
|||
grp.modify()
|
||||
except Exception:
|
||||
module.fail_json(
|
||||
msg="Creating/editing group {0} in {1} failed".format(name, container)
|
||||
msg=f"Creating/editing group {name} in {container} failed"
|
||||
)
|
||||
|
||||
if state == 'absent' and exists:
|
||||
|
|
@ -165,7 +165,7 @@ def main():
|
|||
changed = True
|
||||
except Exception:
|
||||
module.fail_json(
|
||||
msg="Removing group {0} failed".format(name)
|
||||
msg=f"Removing group {name} failed"
|
||||
)
|
||||
|
||||
module.exit_json(
|
||||
|
|
|
|||
|
|
@ -503,13 +503,13 @@ def main():
|
|||
diff = None
|
||||
|
||||
obj = list(ldap_search(
|
||||
'(&(objectClass=univentionShare)(cn={0}))'.format(name),
|
||||
f'(&(objectClass=univentionShare)(cn={name}))',
|
||||
attr=['cn']
|
||||
))
|
||||
|
||||
exists = bool(len(obj))
|
||||
container = 'cn=shares,ou={0},{1}'.format(module.params['ou'], base_dn())
|
||||
dn = 'cn={0},{1}'.format(name, container)
|
||||
container = f"cn=shares,ou={module.params['ou']},{base_dn()}"
|
||||
dn = f'cn={name},{container}'
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
|
|
@ -518,7 +518,7 @@ def main():
|
|||
else:
|
||||
obj = umc_module_for_edit('shares/share', dn)
|
||||
|
||||
module.params['printablename'] = '{0} ({1})'.format(name, module.params['host'])
|
||||
module.params['printablename'] = f"{name} ({module.params['host']})"
|
||||
for k in obj.keys():
|
||||
if module.params[k] is True:
|
||||
module.params[k] = '1'
|
||||
|
|
@ -540,11 +540,7 @@ def main():
|
|||
obj.modify()
|
||||
except Exception as err:
|
||||
module.fail_json(
|
||||
msg='Creating/editing share {0} in {1} failed: {2}'.format(
|
||||
name,
|
||||
container,
|
||||
err,
|
||||
)
|
||||
msg=f'Creating/editing share {name} in {container} failed: {err}'
|
||||
)
|
||||
|
||||
if state == 'absent' and exists:
|
||||
|
|
@ -555,11 +551,7 @@ def main():
|
|||
changed = True
|
||||
except Exception as err:
|
||||
module.fail_json(
|
||||
msg='Removing share {0} in {1} failed: {2}'.format(
|
||||
name,
|
||||
container,
|
||||
err,
|
||||
)
|
||||
msg=f'Removing share {name} in {container} failed: {err}'
|
||||
)
|
||||
|
||||
module.exit_json(
|
||||
|
|
|
|||
|
|
@ -480,18 +480,18 @@ def main():
|
|||
diff = None
|
||||
|
||||
users = list(ldap_search(
|
||||
'(&(objectClass=posixAccount)(uid={0}))'.format(username),
|
||||
f'(&(objectClass=posixAccount)(uid={username}))',
|
||||
attr=['uid']
|
||||
))
|
||||
if position != '':
|
||||
container = position
|
||||
else:
|
||||
if ou != '':
|
||||
ou = 'ou={0},'.format(ou)
|
||||
ou = f'ou={ou},'
|
||||
if subpath != '':
|
||||
subpath = '{0},'.format(subpath)
|
||||
container = '{0}{1}{2}'.format(subpath, ou, base_dn())
|
||||
user_dn = 'uid={0},{1}'.format(username, container)
|
||||
subpath = f'{subpath},'
|
||||
container = f'{subpath}{ou}{base_dn()}'
|
||||
user_dn = f'uid={username},{container}'
|
||||
|
||||
exists = bool(len(users))
|
||||
|
||||
|
|
@ -503,14 +503,9 @@ def main():
|
|||
obj = umc_module_for_edit('users/user', user_dn)
|
||||
|
||||
if module.params['displayName'] is None:
|
||||
module.params['displayName'] = '{0} {1}'.format(
|
||||
module.params['firstname'],
|
||||
module.params['lastname']
|
||||
)
|
||||
module.params['displayName'] = f"{module.params['firstname']} {module.params['lastname']}"
|
||||
if module.params['unixhome'] is None:
|
||||
module.params['unixhome'] = '/home/{0}'.format(
|
||||
module.params['username']
|
||||
)
|
||||
module.params['unixhome'] = f"/home/{module.params['username']}"
|
||||
for k in obj.keys():
|
||||
if (k != 'password' and
|
||||
k != 'groups' and
|
||||
|
|
@ -546,17 +541,12 @@ def main():
|
|||
obj.modify()
|
||||
except Exception:
|
||||
module.fail_json(
|
||||
msg="Creating/editing user {0} in {1} failed".format(
|
||||
username,
|
||||
container
|
||||
)
|
||||
msg=f"Creating/editing user {username} in {container} failed"
|
||||
)
|
||||
try:
|
||||
groups = module.params['groups']
|
||||
if groups:
|
||||
filter = '(&(objectClass=posixGroup)(|(cn={0})))'.format(
|
||||
')(cn='.join(groups)
|
||||
)
|
||||
filter = f"(&(objectClass=posixGroup)(|(cn={')(cn='.join(groups)})))"
|
||||
group_dns = list(ldap_search(filter, attr=['dn']))
|
||||
for dn in group_dns:
|
||||
grp = umc_module_for_edit('groups/group', dn[0])
|
||||
|
|
@ -567,7 +557,7 @@ def main():
|
|||
changed = True
|
||||
except Exception:
|
||||
module.fail_json(
|
||||
msg="Adding groups to user {0} failed".format(username)
|
||||
msg=f"Adding groups to user {username} failed"
|
||||
)
|
||||
|
||||
if state == 'absent' and exists:
|
||||
|
|
@ -578,7 +568,7 @@ def main():
|
|||
changed = True
|
||||
except Exception:
|
||||
module.fail_json(
|
||||
msg="Removing user {0} failed".format(username)
|
||||
msg=f"Removing user {username} failed"
|
||||
)
|
||||
|
||||
module.exit_json(
|
||||
|
|
|
|||
|
|
@ -542,12 +542,12 @@ def main():
|
|||
# ufw does not like it when the insert number is larger than the
|
||||
# maximal rule number for IPv4/IPv6.
|
||||
insert_to = None
|
||||
cmd.append([insert_to is not None, "insert %s" % insert_to])
|
||||
cmd.append([insert_to is not None, f"insert {insert_to}"])
|
||||
cmd.append([value])
|
||||
cmd.append([params['direction'], params['direction']])
|
||||
cmd.append([params['interface'], "on %s" % params['interface']])
|
||||
cmd.append([params['interface_in'], "in on %s" % params['interface_in']])
|
||||
cmd.append([params['interface_out'], "out on %s" % params['interface_out']])
|
||||
cmd.append([params['interface'], f"on {params['interface']}"])
|
||||
cmd.append([params['interface_in'], f"in on {params['interface_in']}"])
|
||||
cmd.append([params['interface_out'], f"out on {params['interface_out']}"])
|
||||
cmd.append([module.boolean(params['log']), 'log'])
|
||||
|
||||
for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
|
||||
|
|
@ -559,7 +559,7 @@ def main():
|
|||
ufw_major, ufw_minor, dummy = ufw_version()
|
||||
# comment is supported only in ufw version after 0.35
|
||||
if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
|
||||
cmd.append([params['comment'], "comment '%s'" % params['comment']])
|
||||
cmd.append([params['comment'], f"comment '{params['comment']}'"])
|
||||
|
||||
rules_dry = execute(cmd)
|
||||
|
||||
|
|
|
|||
|
|
@ -131,13 +131,13 @@ def remove_packages(module, packages, root):
|
|||
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to remove %s" % (package))
|
||||
module.fail_json(msg=f"failed to remove {package}")
|
||||
|
||||
remove_c += 1
|
||||
|
||||
if remove_c > 0:
|
||||
|
||||
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
|
||||
module.exit_json(changed=True, msg=f"removed {remove_c} package(s)")
|
||||
|
||||
module.exit_json(changed=False, msg="package(s) already absent")
|
||||
|
||||
|
|
@ -147,7 +147,7 @@ def install_packages(module, pkgspec, root, force=True, no_recommends=True):
|
|||
packages = ""
|
||||
for package in pkgspec:
|
||||
if not query_package_provides(module, package, root):
|
||||
packages += "'%s' " % package
|
||||
packages += f"'{package}' "
|
||||
|
||||
if len(packages) != 0:
|
||||
if no_recommends:
|
||||
|
|
@ -167,20 +167,20 @@ def install_packages(module, pkgspec, root, force=True, no_recommends=True):
|
|||
|
||||
for package in pkgspec:
|
||||
if not query_package_provides(module, package, root):
|
||||
module.fail_json(msg="'urpmi %s' failed: %s" % (package, err))
|
||||
module.fail_json(msg=f"'urpmi {package}' failed: {err}")
|
||||
|
||||
# urpmi always have 0 for exit code if --force is used
|
||||
if rc:
|
||||
module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err))
|
||||
module.fail_json(msg=f"'urpmi {packages}' failed: {err}")
|
||||
else:
|
||||
module.exit_json(changed=True, msg="%s present(s)" % packages)
|
||||
module.exit_json(changed=True, msg=f"{packages} present(s)")
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
|
||||
def root_option(root):
|
||||
if root:
|
||||
return ["--root=%s" % (root)]
|
||||
return [f"--root={root}"]
|
||||
else:
|
||||
return []
|
||||
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ def parse_lsusb(module, lsusb_path):
|
|||
for line in stdout.splitlines():
|
||||
match = re.match(regex, line)
|
||||
if not match:
|
||||
module.fail_json(msg="failed to parse unknown lsusb output %s" % (line), stdout=stdout, stderr=stderr)
|
||||
module.fail_json(msg=f"failed to parse unknown lsusb output {line}", stdout=stdout, stderr=stderr)
|
||||
current_device = {
|
||||
'bus': match.group(1),
|
||||
'device': match.group(2),
|
||||
|
|
@ -90,7 +90,7 @@ def parse_lsusb(module, lsusb_path):
|
|||
return_value = {
|
||||
"usb_devices": usb_devices
|
||||
}
|
||||
module.exit_json(msg="parsed %s USB devices" % (len(usb_devices)), stdout=stdout, stderr=stderr, ansible_facts=return_value)
|
||||
module.exit_json(msg=f"parsed {len(usb_devices)} USB devices", stdout=stdout, stderr=stderr, ansible_facts=return_value)
|
||||
|
||||
|
||||
def main():
|
||||
|
|
|
|||
|
|
@ -254,7 +254,7 @@ def inventory_vdos(module, vdocmd):
|
|||
return vdolist
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Inventorying VDOs failed: %s" % vdostatusout, rc=rc, err=err)
|
||||
module.fail_json(msg=f"Inventorying VDOs failed: {vdostatusout}", rc=rc, err=err)
|
||||
|
||||
vdostatusyaml = yaml.safe_load(vdostatusout)
|
||||
if vdostatusyaml is None:
|
||||
|
|
@ -283,30 +283,30 @@ def list_running_vdos(module, vdocmd):
|
|||
#
|
||||
# @return vdocmdoptions A string to be used in a 'vdo <action>' command.
|
||||
def start_vdo(module, vdoname, vdocmd):
|
||||
rc, out, err = module.run_command([vdocmd, "start", "--name=%s" % vdoname])
|
||||
rc, out, err = module.run_command([vdocmd, "start", f"--name={vdoname}"])
|
||||
if rc == 0:
|
||||
module.log("started VDO volume %s" % vdoname)
|
||||
module.log(f"started VDO volume {vdoname}")
|
||||
return rc
|
||||
|
||||
|
||||
def stop_vdo(module, vdoname, vdocmd):
|
||||
rc, out, err = module.run_command([vdocmd, "stop", "--name=%s" % vdoname])
|
||||
rc, out, err = module.run_command([vdocmd, "stop", f"--name={vdoname}"])
|
||||
if rc == 0:
|
||||
module.log("stopped VDO volume %s" % vdoname)
|
||||
module.log(f"stopped VDO volume {vdoname}")
|
||||
return rc
|
||||
|
||||
|
||||
def activate_vdo(module, vdoname, vdocmd):
|
||||
rc, out, err = module.run_command([vdocmd, "activate", "--name=%s" % vdoname])
|
||||
rc, out, err = module.run_command([vdocmd, "activate", f"--name={vdoname}"])
|
||||
if rc == 0:
|
||||
module.log("activated VDO volume %s" % vdoname)
|
||||
module.log(f"activated VDO volume {vdoname}")
|
||||
return rc
|
||||
|
||||
|
||||
def deactivate_vdo(module, vdoname, vdocmd):
|
||||
rc, out, err = module.run_command([vdocmd, "deactivate", "--name=%s" % vdoname])
|
||||
rc, out, err = module.run_command([vdocmd, "deactivate", f"--name={vdoname}"])
|
||||
if rc == 0:
|
||||
module.log("deactivated VDO volume %s" % vdoname)
|
||||
module.log(f"deactivated VDO volume {vdoname}")
|
||||
return rc
|
||||
|
||||
|
||||
|
|
@ -314,25 +314,25 @@ def add_vdooptions(params):
|
|||
options = []
|
||||
|
||||
if params.get('logicalsize') is not None:
|
||||
options.append("--vdoLogicalSize=" + params['logicalsize'])
|
||||
options.append(f"--vdoLogicalSize={params['logicalsize']}")
|
||||
|
||||
if params.get('blockmapcachesize') is not None:
|
||||
options.append("--blockMapCacheSize=" + params['blockmapcachesize'])
|
||||
options.append(f"--blockMapCacheSize={params['blockmapcachesize']}")
|
||||
|
||||
if params.get('readcache') == 'enabled':
|
||||
options.append("--readCache=enabled")
|
||||
|
||||
if params.get('readcachesize') is not None:
|
||||
options.append("--readCacheSize=" + params['readcachesize'])
|
||||
options.append(f"--readCacheSize={params['readcachesize']}")
|
||||
|
||||
if params.get('slabsize') is not None:
|
||||
options.append("--vdoSlabSize=" + params['slabsize'])
|
||||
options.append(f"--vdoSlabSize={params['slabsize']}")
|
||||
|
||||
if params.get('emulate512'):
|
||||
options.append("--emulate512=enabled")
|
||||
|
||||
if params.get('indexmem') is not None:
|
||||
options.append("--indexMem=" + params['indexmem'])
|
||||
options.append(f"--indexMem={params['indexmem']}")
|
||||
|
||||
if params.get('indexmode') == 'sparse':
|
||||
options.append("--sparseIndex=enabled")
|
||||
|
|
@ -347,19 +347,19 @@ def add_vdooptions(params):
|
|||
# onto that system to read the error. For now, heed the thread
|
||||
# limit warnings in the DOCUMENTATION section above.
|
||||
if params.get('ackthreads') is not None:
|
||||
options.append("--vdoAckThreads=" + params['ackthreads'])
|
||||
options.append(f"--vdoAckThreads={params['ackthreads']}")
|
||||
|
||||
if params.get('biothreads') is not None:
|
||||
options.append("--vdoBioThreads=" + params['biothreads'])
|
||||
options.append(f"--vdoBioThreads={params['biothreads']}")
|
||||
|
||||
if params.get('cputhreads') is not None:
|
||||
options.append("--vdoCpuThreads=" + params['cputhreads'])
|
||||
options.append(f"--vdoCpuThreads={params['cputhreads']}")
|
||||
|
||||
if params.get('logicalthreads') is not None:
|
||||
options.append("--vdoLogicalThreads=" + params['logicalthreads'])
|
||||
options.append(f"--vdoLogicalThreads={params['logicalthreads']}")
|
||||
|
||||
if params.get('physicalthreads') is not None:
|
||||
options.append("--vdoPhysicalThreads=" + params['physicalthreads'])
|
||||
options.append(f"--vdoPhysicalThreads={params['physicalthreads']}")
|
||||
|
||||
return options
|
||||
|
||||
|
|
@ -446,17 +446,17 @@ def run_module():
|
|||
# assume default values.
|
||||
vdocmdoptions = add_vdooptions(module.params)
|
||||
rc, out, err = module.run_command(
|
||||
[vdocmd, "create", "--name=%s" % desiredvdo, "--device=%s" % device] + vdocmdoptions)
|
||||
[vdocmd, "create", f"--name={desiredvdo}", f"--device={device}"] + vdocmdoptions)
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Creating VDO %s failed." % desiredvdo, rc=rc, err=err)
|
||||
module.fail_json(msg=f"Creating VDO {desiredvdo} failed.", rc=rc, err=err)
|
||||
|
||||
if module.params['compression'] == 'disabled':
|
||||
rc, out, err = module.run_command([vdocmd, "disableCompression", "--name=%s" % desiredvdo])
|
||||
rc, out, err = module.run_command([vdocmd, "disableCompression", f"--name={desiredvdo}"])
|
||||
|
||||
if module.params['deduplication'] == 'disabled':
|
||||
rc, out, err = module.run_command([vdocmd, "disableDeduplication", "--name=%s" % desiredvdo])
|
||||
rc, out, err = module.run_command([vdocmd, "disableDeduplication", f"--name={desiredvdo}"])
|
||||
|
||||
if module.params['activated'] is False:
|
||||
deactivate_vdo(module, desiredvdo, vdocmd)
|
||||
|
|
@ -466,7 +466,7 @@ def run_module():
|
|||
|
||||
# Print a post-run list of VDO volumes in the result object.
|
||||
vdolist = inventory_vdos(module, vdocmd)
|
||||
module.log("created VDO volume %s" % desiredvdo)
|
||||
module.log(f"created VDO volume {desiredvdo}")
|
||||
module.exit_json(**result)
|
||||
|
||||
# Modify the current parameters of a VDO that exists.
|
||||
|
|
@ -547,45 +547,44 @@ def run_module():
|
|||
if diffparams:
|
||||
vdocmdoptions = add_vdooptions(diffparams)
|
||||
if vdocmdoptions:
|
||||
rc, out, err = module.run_command([vdocmd, "modify", "--name=%s" % desiredvdo] + vdocmdoptions)
|
||||
rc, out, err = module.run_command([vdocmd, "modify", f"--name={desiredvdo}"] + vdocmdoptions)
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Modifying VDO %s failed."
|
||||
% desiredvdo, rc=rc, err=err)
|
||||
module.fail_json(msg=f"Modifying VDO {desiredvdo} failed.", rc=rc, err=err)
|
||||
|
||||
if 'deduplication' in diffparams.keys():
|
||||
dedupemod = diffparams['deduplication']
|
||||
dedupeparam = "disableDeduplication" if dedupemod == 'disabled' else "enableDeduplication"
|
||||
rc, out, err = module.run_command([vdocmd, dedupeparam, "--name=%s" % desiredvdo])
|
||||
rc, out, err = module.run_command([vdocmd, dedupeparam, f"--name={desiredvdo}"])
|
||||
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Changing deduplication on VDO volume %s failed." % desiredvdo, rc=rc, err=err)
|
||||
module.fail_json(msg=f"Changing deduplication on VDO volume {desiredvdo} failed.", rc=rc, err=err)
|
||||
|
||||
if 'compression' in diffparams.keys():
|
||||
compressmod = diffparams['compression']
|
||||
compressparam = "disableCompression" if compressmod == 'disabled' else "enableCompression"
|
||||
rc, out, err = module.run_command([vdocmd, compressparam, "--name=%s" % desiredvdo])
|
||||
rc, out, err = module.run_command([vdocmd, compressparam, f"--name={desiredvdo}"])
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Changing compression on VDO volume %s failed." % desiredvdo, rc=rc, err=err)
|
||||
module.fail_json(msg=f"Changing compression on VDO volume {desiredvdo} failed.", rc=rc, err=err)
|
||||
|
||||
if 'writepolicy' in diffparams.keys():
|
||||
writepolmod = diffparams['writepolicy']
|
||||
rc, out, err = module.run_command([
|
||||
vdocmd,
|
||||
"changeWritePolicy",
|
||||
"--name=%s" % desiredvdo,
|
||||
"--writePolicy=%s" % writepolmod,
|
||||
f"--name={desiredvdo}",
|
||||
f"--writePolicy={writepolmod}",
|
||||
])
|
||||
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Changing write policy on VDO volume %s failed." % desiredvdo, rc=rc, err=err)
|
||||
module.fail_json(msg=f"Changing write policy on VDO volume {desiredvdo} failed.", rc=rc, err=err)
|
||||
|
||||
# Process the size parameters, to determine of a growPhysical or
|
||||
# growLogical operation needs to occur.
|
||||
|
|
@ -610,7 +609,7 @@ def run_module():
|
|||
physdevice = module.params['device']
|
||||
rc, devsectors, err = module.run_command([module.get_bin_path("blockdev"), "--getsz", physdevice])
|
||||
devblocks = (int(devsectors) / 8)
|
||||
dmvdoname = ('/dev/mapper/' + desiredvdo)
|
||||
dmvdoname = f"/dev/mapper/{desiredvdo}"
|
||||
currentvdostats = processedvdos[desiredvdo]['VDO statistics'][dmvdoname]
|
||||
currentphysblocks = currentvdostats['physical blocks']
|
||||
|
||||
|
|
@ -623,11 +622,11 @@ def run_module():
|
|||
|
||||
if currentphysblocks > growthresh:
|
||||
result['changed'] = True
|
||||
rc, out, err = module.run_command([vdocmd, "growPhysical", "--name=%s" % desiredvdo])
|
||||
rc, out, err = module.run_command([vdocmd, "growPhysical", f"--name={desiredvdo}"])
|
||||
|
||||
if 'logicalsize' in diffsizeparams.keys():
|
||||
result['changed'] = True
|
||||
rc, out, err = module.run_command([vdocmd, "growLogical", "--name=%s" % desiredvdo, "--vdoLogicalSize=%s" % diffsizeparams['logicalsize']])
|
||||
rc, out, err = module.run_command([vdocmd, "growLogical", f"--name={desiredvdo}", f"--vdoLogicalSize={diffsizeparams['logicalsize']}"])
|
||||
|
||||
vdoactivatestatus = processedvdos[desiredvdo]['Activate']
|
||||
|
||||
|
|
@ -661,21 +660,21 @@ def run_module():
|
|||
# Print a post-run list of VDO volumes in the result object.
|
||||
vdolist = inventory_vdos(module, vdocmd)
|
||||
if diffparams:
|
||||
module.log("modified parameters of VDO volume %s" % desiredvdo)
|
||||
module.log(f"modified parameters of VDO volume {desiredvdo}")
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
# Remove a desired VDO that currently exists.
|
||||
if desiredvdo in vdolist and state == 'absent':
|
||||
rc, out, err = module.run_command([vdocmd, "remove", "--name=%s" % desiredvdo])
|
||||
rc, out, err = module.run_command([vdocmd, "remove", f"--name={desiredvdo}"])
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Removing VDO %s failed." % desiredvdo, rc=rc, err=err)
|
||||
module.fail_json(msg=f"Removing VDO {desiredvdo} failed.", rc=rc, err=err)
|
||||
|
||||
# Print a post-run list of VDO volumes in the result object.
|
||||
vdolist = inventory_vdos(module, vdocmd)
|
||||
module.log("removed VDO volume %s" % desiredvdo)
|
||||
module.log(f"removed VDO volume {desiredvdo}")
|
||||
module.exit_json(**result)
|
||||
|
||||
# fall through
|
||||
|
|
@ -683,7 +682,7 @@ def run_module():
|
|||
# not exist. Print a post-run list of VDO volumes in the result
|
||||
# object.
|
||||
vdolist = inventory_vdos(module, vdocmd)
|
||||
module.log("received request to remove non-existent VDO volume %s" % desiredvdo)
|
||||
module.log(f"received request to remove non-existent VDO volume {desiredvdo}")
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
|
|
|||
|
|
@ -123,7 +123,7 @@ def present(configuration_facts, cursor, parameter_name, current_value):
|
|||
parameter_key = parameter_name.lower()
|
||||
changed = False
|
||||
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
|
||||
cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value))
|
||||
cursor.execute(f"select set_config_parameter('{parameter_name}', '{current_value}')")
|
||||
changed = True
|
||||
if changed:
|
||||
configuration_facts.update(get_configuration_facts(cursor, parameter_name))
|
||||
|
|
@ -159,18 +159,17 @@ def main():
|
|||
try:
|
||||
dsn = (
|
||||
"Driver=Vertica;"
|
||||
"Server={0};"
|
||||
"Port={1};"
|
||||
"Database={2};"
|
||||
"User={3};"
|
||||
"Password={4};"
|
||||
"ConnectionLoadBalance={5}"
|
||||
).format(module.params['cluster'], module.params['port'], db,
|
||||
module.params['login_user'], module.params['login_password'], 'true')
|
||||
f"Server={module.params['cluster']};"
|
||||
f"Port={module.params['port']};"
|
||||
f"Database={db};"
|
||||
f"User={module.params['login_user']};"
|
||||
f"Password={module.params['login_password']};"
|
||||
f"ConnectionLoadBalance=true"
|
||||
)
|
||||
db_conn = pyodbc.connect(dsn, autocommit=True)
|
||||
cursor = db_conn.cursor()
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)),
|
||||
module.fail_json(msg=f"Unable to connect to database: {e}.",
|
||||
exception=traceback.format_exc())
|
||||
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -240,18 +240,17 @@ def main():
|
|||
try:
|
||||
dsn = (
|
||||
"Driver=Vertica;"
|
||||
"Server=%s;"
|
||||
"Port=%s;"
|
||||
"Database=%s;"
|
||||
"User=%s;"
|
||||
"Password=%s;"
|
||||
"ConnectionLoadBalance=%s"
|
||||
) % (module.params['cluster'], module.params['port'], db,
|
||||
module.params['login_user'], module.params['login_password'], 'true')
|
||||
f"Server={module.params['cluster']};"
|
||||
f"Port={module.params['port']};"
|
||||
f"Database={db};"
|
||||
f"User={module.params['login_user']};"
|
||||
f"Password={module.params['login_password']};"
|
||||
f"ConnectionLoadBalance=true"
|
||||
)
|
||||
db_conn = pyodbc.connect(dsn, autocommit=True)
|
||||
cursor = db_conn.cursor()
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc())
|
||||
module.fail_json(msg=f"Unable to connect to database: {e}.", exception=traceback.format_exc())
|
||||
|
||||
try:
|
||||
schema_facts = get_schema_facts(cursor)
|
||||
|
|
|
|||
|
|
@ -127,9 +127,9 @@ def get_role_facts(cursor, role=''):
|
|||
def update_roles(role_facts, cursor, role,
|
||||
existing, required):
|
||||
for assigned_role in set(existing) - set(required):
|
||||
cursor.execute("revoke {0} from {1}".format(assigned_role, role))
|
||||
cursor.execute(f"revoke {assigned_role} from {role}")
|
||||
for assigned_role in set(required) - set(existing):
|
||||
cursor.execute("grant {0} to {1}".format(assigned_role, role))
|
||||
cursor.execute(f"grant {assigned_role} to {role}")
|
||||
|
||||
|
||||
def check(role_facts, role, assigned_roles):
|
||||
|
|
@ -144,7 +144,7 @@ def check(role_facts, role, assigned_roles):
|
|||
def present(role_facts, cursor, role, assigned_roles):
|
||||
role_key = role.lower()
|
||||
if role_key not in role_facts:
|
||||
cursor.execute("create role {0}".format(role))
|
||||
cursor.execute(f"create role {role}")
|
||||
update_roles(role_facts, cursor, role, [], assigned_roles)
|
||||
role_facts.update(get_role_facts(cursor, role))
|
||||
return True
|
||||
|
|
@ -164,7 +164,7 @@ def absent(role_facts, cursor, role, assigned_roles):
|
|||
if role_key in role_facts:
|
||||
update_roles(role_facts, cursor, role,
|
||||
role_facts[role_key]['assigned_roles'], [])
|
||||
cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
|
||||
cursor.execute(f"drop role {role_facts[role_key]['name']} cascade")
|
||||
del role_facts[role_key]
|
||||
return True
|
||||
else:
|
||||
|
|
@ -205,18 +205,17 @@ def main():
|
|||
try:
|
||||
dsn = (
|
||||
"Driver=Vertica;"
|
||||
"Server={0};"
|
||||
"Port={1};"
|
||||
"Database={2};"
|
||||
"User={3};"
|
||||
"Password={4};"
|
||||
"ConnectionLoadBalance={5}"
|
||||
).format(module.params['cluster'], module.params['port'], db,
|
||||
module.params['login_user'], module.params['login_password'], 'true')
|
||||
f"Server={module.params['cluster']};"
|
||||
f"Port={module.params['port']};"
|
||||
f"Database={db};"
|
||||
f"User={module.params['login_user']};"
|
||||
f"Password={module.params['login_password']};"
|
||||
f"ConnectionLoadBalance=true"
|
||||
)
|
||||
db_conn = pyodbc.connect(dsn, autocommit=True)
|
||||
cursor = db_conn.cursor()
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
|
||||
module.fail_json(msg=f"Unable to connect to database: {e}.")
|
||||
|
||||
try:
|
||||
role_facts = get_role_facts(cursor)
|
||||
|
|
|
|||
|
|
@ -164,14 +164,14 @@ def update_roles(schema_facts, cursor, schema,
|
|||
existing, required,
|
||||
create_existing, create_required):
|
||||
for role in set(existing + create_existing) - set(required + create_required):
|
||||
cursor.execute("drop role {0} cascade".format(role))
|
||||
cursor.execute(f"drop role {role} cascade")
|
||||
for role in set(create_existing) - set(create_required):
|
||||
cursor.execute("revoke create on schema {0} from {1}".format(schema, role))
|
||||
cursor.execute(f"revoke create on schema {schema} from {role}")
|
||||
for role in set(required + create_required) - set(existing + create_existing):
|
||||
cursor.execute("create role {0}".format(role))
|
||||
cursor.execute("grant usage on schema {0} to {1}".format(schema, role))
|
||||
cursor.execute(f"create role {role}")
|
||||
cursor.execute(f"grant usage on schema {schema} to {role}")
|
||||
for role in set(create_required) - set(create_existing):
|
||||
cursor.execute("grant create on schema {0} to {1}".format(schema, role))
|
||||
cursor.execute(f"grant create on schema {schema} to {role}")
|
||||
|
||||
|
||||
def check(schema_facts, schema, usage_roles, create_roles, owner):
|
||||
|
|
@ -190,9 +190,9 @@ def check(schema_facts, schema, usage_roles, create_roles, owner):
|
|||
def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
|
||||
schema_key = schema.lower()
|
||||
if schema_key not in schema_facts:
|
||||
query_fragments = ["create schema {0}".format(schema)]
|
||||
query_fragments = [f"create schema {schema}"]
|
||||
if owner:
|
||||
query_fragments.append("authorization {0}".format(owner))
|
||||
query_fragments.append(f"authorization {owner}")
|
||||
cursor.execute(' '.join(query_fragments))
|
||||
update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles)
|
||||
schema_facts.update(get_schema_facts(cursor, schema))
|
||||
|
|
@ -200,10 +200,7 @@ def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
|
|||
else:
|
||||
changed = False
|
||||
if owner and owner.lower() != schema_facts[schema_key]['owner'].lower():
|
||||
raise NotSupportedError((
|
||||
"Changing schema owner is not supported. "
|
||||
"Current owner: {0}."
|
||||
).format(schema_facts[schema_key]['owner']))
|
||||
raise NotSupportedError(f"Changing schema owner is not supported. Current owner: {schema_facts[schema_key]['owner']}.")
|
||||
if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']) or \
|
||||
sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']):
|
||||
|
||||
|
|
@ -222,7 +219,7 @@ def absent(schema_facts, cursor, schema, usage_roles, create_roles):
|
|||
update_roles(schema_facts, cursor, schema,
|
||||
schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], [])
|
||||
try:
|
||||
cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name']))
|
||||
cursor.execute(f"drop schema {schema_facts[schema_key]['name']} restrict")
|
||||
except pyodbc.Error:
|
||||
raise CannotDropError("Dropping schema failed due to dependencies.")
|
||||
del schema_facts[schema_key]
|
||||
|
|
@ -272,18 +269,17 @@ def main():
|
|||
try:
|
||||
dsn = (
|
||||
"Driver=Vertica;"
|
||||
"Server={0};"
|
||||
"Port={1};"
|
||||
"Database={2};"
|
||||
"User={3};"
|
||||
"Password={4};"
|
||||
"ConnectionLoadBalance={5}"
|
||||
).format(module.params['cluster'], module.params['port'], db,
|
||||
module.params['login_user'], module.params['login_password'], 'true')
|
||||
f"Server={module.params['cluster']};"
|
||||
f"Port={module.params['port']};"
|
||||
f"Database={db};"
|
||||
f"User={module.params['login_user']};"
|
||||
f"Password={module.params['login_password']};"
|
||||
f"ConnectionLoadBalance=true"
|
||||
)
|
||||
db_conn = pyodbc.connect(dsn, autocommit=True)
|
||||
cursor = db_conn.cursor()
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
|
||||
module.fail_json(msg=f"Unable to connect to database: {e}.")
|
||||
|
||||
try:
|
||||
schema_facts = get_schema_facts(cursor)
|
||||
|
|
|
|||
|
|
@ -166,12 +166,12 @@ def update_roles(user_facts, cursor, user,
|
|||
existing_all, existing_default, required):
|
||||
del_roles = list(set(existing_all) - set(required))
|
||||
if del_roles:
|
||||
cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user))
|
||||
cursor.execute(f"revoke {','.join(del_roles)} from {user}")
|
||||
new_roles = list(set(required) - set(existing_all))
|
||||
if new_roles:
|
||||
cursor.execute("grant {0} to {1}".format(','.join(new_roles), user))
|
||||
cursor.execute(f"grant {','.join(new_roles)} to {user}")
|
||||
if required:
|
||||
cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
|
||||
cursor.execute(f"alter user {user} default role {','.join(required)}")
|
||||
|
||||
|
||||
def check(user_facts, user, profile, resource_pool,
|
||||
|
|
@ -200,39 +200,38 @@ def present(user_facts, cursor, user, profile, resource_pool,
|
|||
locked, password, expired, ldap, roles):
|
||||
user_key = user.lower()
|
||||
if user_key not in user_facts:
|
||||
query_fragments = ["create user {0}".format(user)]
|
||||
query_fragments = [f"create user {user}"]
|
||||
if locked:
|
||||
query_fragments.append("account lock")
|
||||
if password or ldap:
|
||||
if password:
|
||||
query_fragments.append("identified by '{0}'".format(password))
|
||||
query_fragments.append(f"identified by '{password}'")
|
||||
else:
|
||||
query_fragments.append("identified by '$ldap$'")
|
||||
if expired or ldap:
|
||||
query_fragments.append("password expire")
|
||||
if profile:
|
||||
query_fragments.append("profile {0}".format(profile))
|
||||
query_fragments.append(f"profile {profile}")
|
||||
if resource_pool:
|
||||
query_fragments.append("resource pool {0}".format(resource_pool))
|
||||
query_fragments.append(f"resource pool {resource_pool}")
|
||||
cursor.execute(' '.join(query_fragments))
|
||||
if resource_pool and resource_pool != 'general':
|
||||
cursor.execute("grant usage on resource pool {0} to {1}".format(
|
||||
resource_pool, user))
|
||||
cursor.execute(f"grant usage on resource pool {resource_pool} to {user}")
|
||||
update_roles(user_facts, cursor, user, [], [], roles)
|
||||
user_facts.update(get_user_facts(cursor, user))
|
||||
return True
|
||||
else:
|
||||
changed = False
|
||||
query_fragments = ["alter user {0}".format(user)]
|
||||
query_fragments = [f"alter user {user}"]
|
||||
if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'):
|
||||
if locked:
|
||||
state = 'lock'
|
||||
else:
|
||||
state = 'unlock'
|
||||
query_fragments.append("account {0}".format(state))
|
||||
query_fragments.append(f"account {state}")
|
||||
changed = True
|
||||
if password and password != user_facts[user_key]['password']:
|
||||
query_fragments.append("identified by '{0}'".format(password))
|
||||
query_fragments.append(f"identified by '{password}'")
|
||||
changed = True
|
||||
if ldap:
|
||||
if ldap != (user_facts[user_key]['expired'] == 'True'):
|
||||
|
|
@ -245,16 +244,14 @@ def present(user_facts, cursor, user, profile, resource_pool,
|
|||
else:
|
||||
raise NotSupportedError("Unexpiring user password is not supported.")
|
||||
if profile and profile != user_facts[user_key]['profile']:
|
||||
query_fragments.append("profile {0}".format(profile))
|
||||
query_fragments.append(f"profile {profile}")
|
||||
changed = True
|
||||
if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
|
||||
query_fragments.append("resource pool {0}".format(resource_pool))
|
||||
query_fragments.append(f"resource pool {resource_pool}")
|
||||
if user_facts[user_key]['resource_pool'] != 'general':
|
||||
cursor.execute("revoke usage on resource pool {0} from {1}".format(
|
||||
user_facts[user_key]['resource_pool'], user))
|
||||
cursor.execute(f"revoke usage on resource pool {user_facts[user_key]['resource_pool']} from {user}")
|
||||
if resource_pool != 'general':
|
||||
cursor.execute("grant usage on resource pool {0} to {1}".format(
|
||||
resource_pool, user))
|
||||
cursor.execute(f"grant usage on resource pool {resource_pool} to {user}")
|
||||
changed = True
|
||||
if changed:
|
||||
cursor.execute(' '.join(query_fragments))
|
||||
|
|
@ -274,7 +271,7 @@ def absent(user_facts, cursor, user, roles):
|
|||
update_roles(user_facts, cursor, user,
|
||||
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
|
||||
try:
|
||||
cursor.execute("drop user {0}".format(user_facts[user_key]['name']))
|
||||
cursor.execute(f"drop user {user_facts[user_key]['name']}")
|
||||
except pyodbc.Error:
|
||||
raise CannotDropError("Dropping user failed due to dependencies.")
|
||||
del user_facts[user_key]
|
||||
|
|
@ -335,18 +332,17 @@ def main():
|
|||
try:
|
||||
dsn = (
|
||||
"Driver=Vertica;"
|
||||
"Server={0};"
|
||||
"Port={1};"
|
||||
"Database={2};"
|
||||
"User={3};"
|
||||
"Password={4};"
|
||||
"ConnectionLoadBalance={5}"
|
||||
).format(module.params['cluster'], module.params['port'], db,
|
||||
module.params['login_user'], module.params['login_password'], 'true')
|
||||
f"Server={module.params['cluster']};"
|
||||
f"Port={module.params['port']};"
|
||||
f"Database={db};"
|
||||
f"User={module.params['login_user']};"
|
||||
f"Password={module.params['login_password']};"
|
||||
f"ConnectionLoadBalance=true"
|
||||
)
|
||||
db_conn = pyodbc.connect(dsn, autocommit=True)
|
||||
cursor = db_conn.cursor()
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
|
||||
module.fail_json(msg=f"Unable to connect to database: {e}.")
|
||||
|
||||
try:
|
||||
user_facts = get_user_facts(cursor)
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ def get_vg_id(module, array):
|
|||
if len(vg) == 1:
|
||||
return vg[0]['id']
|
||||
else:
|
||||
module.fail_json(msg='Volume group {0} was not found.'.format(name))
|
||||
module.fail_json(msg=f'Volume group {name} was not found.')
|
||||
except Exception:
|
||||
module.fail_json(msg='Error while attempting to retrieve volume groups.')
|
||||
|
||||
|
|
@ -116,7 +116,7 @@ def get_ig_id(module, array):
|
|||
if len(ig) == 1:
|
||||
return ig[0]['id']
|
||||
else:
|
||||
module.fail_json(msg='Initiator group {0} was not found.'.format(name))
|
||||
module.fail_json(msg=f'Initiator group {name} was not found.')
|
||||
except Exception:
|
||||
module.fail_json(msg='Error while attempting to retrieve initiator groups.')
|
||||
|
||||
|
|
@ -130,7 +130,7 @@ def get_pg_id(module, array):
|
|||
if len(pg) == 1:
|
||||
return pg[0]['id']
|
||||
else:
|
||||
module.fail_json(msg='Port group {0} was not found.'.format(name))
|
||||
module.fail_json(msg=f'Port group {name} was not found.')
|
||||
except Exception:
|
||||
module.fail_json(msg='Error while attempting to retrieve port groups.')
|
||||
|
||||
|
|
@ -151,12 +151,12 @@ def create_eg(module, array):
|
|||
'Ansible export group',
|
||||
(vg_id, ig_id, pg_id))
|
||||
if eg:
|
||||
module.log(msg='Created export group {0}'.format(eg_name))
|
||||
module.log(msg=f'Created export group {eg_name}')
|
||||
changed = True
|
||||
else:
|
||||
raise Exception
|
||||
except Exception:
|
||||
module.fail_json(msg='Export group {0} create failed.'.format(eg_name))
|
||||
module.fail_json(msg=f'Export group {eg_name} create failed.')
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
|
|
@ -170,12 +170,12 @@ def delete_eg(module, array, eg):
|
|||
ok = array.delete_eg(
|
||||
eg['id'])
|
||||
if ok:
|
||||
module.log(msg='Export group {0} deleted.'.format(eg_name))
|
||||
module.log(msg=f'Export group {eg_name} deleted.')
|
||||
changed = True
|
||||
else:
|
||||
raise Exception
|
||||
except Exception:
|
||||
module.fail_json(msg='Export group {0} delete failed.'.format(eg_name))
|
||||
module.fail_json(msg=f'Export group {eg_name} delete failed.')
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ def create_volume(module, array):
|
|||
'Ansible volume',
|
||||
size)
|
||||
if vol:
|
||||
module.log(msg='Created volume {0}'.format(vol['id']))
|
||||
module.log(msg=f"Created volume {vol['id']}")
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg='Volume create failed.')
|
||||
|
|
@ -157,7 +157,7 @@ def delete_volume(module, array, volume):
|
|||
ok = array.delete_volume(
|
||||
volume['id'])
|
||||
if ok:
|
||||
module.log(msg='Volume {0} deleted.'.format(vol_name))
|
||||
module.log(msg=f'Volume {vol_name} deleted.')
|
||||
changed = True
|
||||
else:
|
||||
raise Exception
|
||||
|
|
|
|||
|
|
@ -417,19 +417,19 @@ from ansible.module_utils.common.text.converters import to_native
|
|||
def get_vm_prop(module, uuid, prop):
|
||||
# Lookup a property for the given VM.
|
||||
# Returns the property, or None if not found.
|
||||
cmd = [module.vmadm, 'lookup', '-j', '-o', prop, 'uuid={0}'.format(uuid)]
|
||||
cmd = [module.vmadm, 'lookup', '-j', '-o', prop, f'uuid={uuid}']
|
||||
|
||||
(rc, stdout, stderr) = module.run_command(cmd)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(
|
||||
msg='Could not perform lookup of {0} on {1}'.format(prop, uuid), exception=stderr)
|
||||
msg=f'Could not perform lookup of {prop} on {uuid}', exception=stderr)
|
||||
|
||||
try:
|
||||
stdout_json = json.loads(stdout)
|
||||
except Exception as e:
|
||||
module.fail_json(
|
||||
msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(prop),
|
||||
msg=f'Invalid JSON returned by vmadm for uuid lookup of {prop}',
|
||||
details=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
if stdout_json:
|
||||
|
|
@ -439,13 +439,13 @@ def get_vm_prop(module, uuid, prop):
|
|||
def get_vm_uuid(module, alias):
|
||||
# Lookup the uuid that goes with the given alias.
|
||||
# Returns the uuid or '' if not found.
|
||||
cmd = [module.vmadm, 'lookup', '-j', '-o', 'uuid', 'alias={0}'.format(alias)]
|
||||
cmd = [module.vmadm, 'lookup', '-j', '-o', 'uuid', f'alias={alias}']
|
||||
|
||||
(rc, stdout, stderr) = module.run_command(cmd)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(
|
||||
msg='Could not retrieve UUID of {0}'.format(alias), exception=stderr)
|
||||
msg=f'Could not retrieve UUID of {alias}', exception=stderr)
|
||||
|
||||
# If no VM was found matching the given alias, we get back an empty array.
|
||||
# That is not an error condition as we might be explicitly checking for its
|
||||
|
|
@ -454,7 +454,7 @@ def get_vm_uuid(module, alias):
|
|||
stdout_json = json.loads(stdout)
|
||||
except Exception as e:
|
||||
module.fail_json(
|
||||
msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias),
|
||||
msg=f'Invalid JSON returned by vmadm for uuid lookup of {alias}',
|
||||
details=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
if stdout_json:
|
||||
|
|
@ -493,7 +493,7 @@ def new_vm(module, uuid, vm_state):
|
|||
if match:
|
||||
vm_uuid = match.groups()[0]
|
||||
if not is_valid_uuid(vm_uuid):
|
||||
module.fail_json(msg='Invalid UUID for VM {0}?'.format(vm_uuid))
|
||||
module.fail_json(msg=f'Invalid UUID for VM {vm_uuid}?')
|
||||
else:
|
||||
module.fail_json(msg='Could not retrieve UUID of newly created(?) VM')
|
||||
|
||||
|
|
@ -501,14 +501,14 @@ def new_vm(module, uuid, vm_state):
|
|||
if vm_state != 'running':
|
||||
ret = set_vm_state(module, vm_uuid, vm_state)
|
||||
if not ret:
|
||||
module.fail_json(msg='Could not set VM {0} to state {1}'.format(vm_uuid, vm_state))
|
||||
module.fail_json(msg=f'Could not set VM {vm_uuid} to state {vm_state}')
|
||||
|
||||
try:
|
||||
os.unlink(payload_file)
|
||||
except Exception as e:
|
||||
# Since the payload may contain sensitive information, fail hard
|
||||
# if we cannot remove the file so the operator knows about it.
|
||||
module.fail_json(msg='Could not remove temporary JSON payload file {0}: {1}'.format(payload_file, to_native(e)),
|
||||
module.fail_json(msg=f'Could not remove temporary JSON payload file {payload_file}: {e}',
|
||||
exception=traceback.format_exc())
|
||||
|
||||
return changed, vm_uuid
|
||||
|
|
@ -577,7 +577,7 @@ def create_payload(module, uuid):
|
|||
with open(fname, 'w') as fh:
|
||||
fh.write(vmdef_json)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Could not save JSON payload: %s' % to_native(e), exception=traceback.format_exc())
|
||||
module.fail_json(msg=f'Could not save JSON payload: {e}', exception=traceback.format_exc())
|
||||
|
||||
return fname
|
||||
|
||||
|
|
@ -591,7 +591,7 @@ def vm_state_transition(module, uuid, vm_state):
|
|||
elif ret:
|
||||
return True
|
||||
else:
|
||||
module.fail_json(msg='Failed to set VM {0} to state {1}'.format(uuid, vm_state))
|
||||
module.fail_json(msg=f'Failed to set VM {uuid} to state {vm_state}')
|
||||
|
||||
|
||||
def is_valid_uuid(uuid):
|
||||
|
|
@ -606,7 +606,7 @@ def validate_uuids(module):
|
|||
]
|
||||
|
||||
if failed:
|
||||
module.fail_json(msg='No valid UUID(s) found for: {0}'.format(", ".join(failed)))
|
||||
module.fail_json(msg=f"No valid UUID(s) found for: {', '.join(failed)}")
|
||||
|
||||
|
||||
def manage_all_vms(module, vm_state):
|
||||
|
|
|
|||
|
|
@ -84,17 +84,17 @@ def wakeonlan(module, mac, broadcast, port):
|
|||
|
||||
# If we don't end up with 12 hexadecimal characters, fail
|
||||
if len(mac) != 12:
|
||||
module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig)
|
||||
module.fail_json(msg=f"Incorrect MAC address length: {mac_orig}")
|
||||
|
||||
# Test if it converts to an integer, otherwise fail
|
||||
try:
|
||||
int(mac, 16)
|
||||
except ValueError:
|
||||
module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
|
||||
module.fail_json(msg=f"Incorrect MAC address format: {mac_orig}")
|
||||
|
||||
# Create payload for magic packet
|
||||
data = b''
|
||||
padding = ''.join(['FFFFFFFFFFFF', mac * 20])
|
||||
padding = f"FFFFFFFFFFFF{mac * 20}"
|
||||
for i in range(0, len(padding), 2):
|
||||
data = b''.join([data, struct.pack('B', int(padding[i: i + 2], 16))])
|
||||
|
||||
|
|
|
|||
|
|
@ -270,20 +270,20 @@ def main():
|
|||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, sorted(CATEGORY_COMMANDS_ALL.keys()))))
|
||||
module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {sorted(CATEGORY_COMMANDS_ALL.keys())}"))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
# Fail if even one command given is invalid
|
||||
if cmd not in CATEGORY_COMMANDS_ALL[category]:
|
||||
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
|
||||
module.fail_json(msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}"))
|
||||
|
||||
# Build root URI(s)
|
||||
if module.params.get("baseuri") is not None:
|
||||
root_uris = ["https://" + module.params['baseuri']]
|
||||
root_uris = [f"https://{module.params['baseuri']}"]
|
||||
else:
|
||||
root_uris = [
|
||||
"https://" + iom for iom in module.params['ioms']
|
||||
f"https://{iom}" for iom in module.params['ioms']
|
||||
]
|
||||
rf_utils = WdcRedfishUtils(creds, root_uris, timeout, module,
|
||||
resource_id=resource_id, data_modification=True)
|
||||
|
|
|
|||
|
|
@ -171,20 +171,20 @@ def main():
|
|||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, sorted(CATEGORY_COMMANDS_ALL.keys()))))
|
||||
module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {sorted(CATEGORY_COMMANDS_ALL.keys())}"))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
# Fail if even one command given is invalid
|
||||
if cmd not in CATEGORY_COMMANDS_ALL[category]:
|
||||
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
|
||||
module.fail_json(msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}"))
|
||||
|
||||
# Build root URI(s)
|
||||
if module.params.get("baseuri") is not None:
|
||||
root_uris = ["https://" + module.params['baseuri']]
|
||||
root_uris = [f"https://{module.params['baseuri']}"]
|
||||
else:
|
||||
root_uris = [
|
||||
"https://" + iom for iom in module.params['ioms']
|
||||
f"https://{iom}" for iom in module.params['ioms']
|
||||
]
|
||||
rf_utils = WdcRedfishUtils(creds, root_uris, timeout, module,
|
||||
resource_id=None,
|
||||
|
|
|
|||
|
|
@ -96,7 +96,6 @@ import os
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def get_xattr_keys(module, path, follow):
|
||||
|
|
@ -155,7 +154,7 @@ def _run_xattr(module, cmd, check_rc=True):
|
|||
try:
|
||||
(rc, out, err) = module.run_command(cmd, check_rc=check_rc)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="%s!" % to_native(e))
|
||||
module.fail_json(msg=f"{e}!")
|
||||
|
||||
# result = {'raw': out}
|
||||
result = {}
|
||||
|
|
@ -197,7 +196,7 @@ def main():
|
|||
res = {}
|
||||
|
||||
if key is None and state in ['absent', 'present']:
|
||||
module.fail_json(msg="%s needs a key parameter" % state)
|
||||
module.fail_json(msg=f"{state} needs a key parameter")
|
||||
|
||||
# Prepend the key with the namespace if defined
|
||||
if (
|
||||
|
|
@ -205,7 +204,7 @@ def main():
|
|||
namespace is not None and
|
||||
len(namespace) > 0 and
|
||||
not (namespace == 'user' and key.startswith('user.'))):
|
||||
key = '%s.%s' % (namespace, key)
|
||||
key = f'{namespace}.{key}'
|
||||
|
||||
if state == 'present' or value is not None:
|
||||
current = get_xattr(module, path, key, follow)
|
||||
|
|
@ -214,7 +213,7 @@ def main():
|
|||
res = set_xattr(module, path, key, value, follow)
|
||||
changed = True
|
||||
res = current
|
||||
msg = "%s set to %s" % (key, value)
|
||||
msg = f"{key} set to {value}"
|
||||
elif state == 'absent':
|
||||
current = get_xattr(module, path, key, follow)
|
||||
if current is not None and key in current:
|
||||
|
|
@ -222,7 +221,7 @@ def main():
|
|||
res = rm_xattr(module, path, key, follow)
|
||||
changed = True
|
||||
res = current
|
||||
msg = "%s removed" % (key)
|
||||
msg = f"{key} removed"
|
||||
elif state == 'keys':
|
||||
res = get_xattr_keys(module, path, follow)
|
||||
msg = "returning all keys"
|
||||
|
|
@ -231,7 +230,7 @@ def main():
|
|||
msg = "dumping all"
|
||||
else:
|
||||
res = get_xattr(module, path, key, follow)
|
||||
msg = "returning %s" % key
|
||||
msg = f"returning {key}"
|
||||
|
||||
module.exit_json(changed=changed, msg=msg, xattr=res)
|
||||
|
||||
|
|
|
|||
|
|
@ -175,7 +175,7 @@ def append_flags(module, xbps_path, cmd, skip_repo=False):
|
|||
cmd = cmd + ["-r", module.params["root"]]
|
||||
if module.params["repositories"] and cmd[0] != xbps_path["remove"] and not skip_repo:
|
||||
for repo in module.params["repositories"]:
|
||||
cmd = cmd + ["--repository=%s" % repo]
|
||||
cmd = cmd + [f"--repository={repo}"]
|
||||
|
||||
return cmd
|
||||
|
||||
|
|
@ -273,14 +273,13 @@ def remove_packages(module, xbps_path, packages):
|
|||
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to remove %s" % (package))
|
||||
module.fail_json(msg=f"failed to remove {package}")
|
||||
|
||||
changed_packages.append(package)
|
||||
|
||||
if len(changed_packages) > 0:
|
||||
|
||||
module.exit_json(changed=True, msg="removed %s package(s)" %
|
||||
len(changed_packages), packages=changed_packages)
|
||||
module.exit_json(changed=True, msg=f"removed {len(changed_packages)} package(s)", packages=changed_packages)
|
||||
|
||||
module.exit_json(changed=False, msg="package(s) already absent")
|
||||
|
||||
|
|
@ -311,12 +310,10 @@ def install_packages(module, xbps_path, state, packages):
|
|||
module.params['upgrade_xbps'] = False
|
||||
install_packages(module, xbps_path, state, packages)
|
||||
elif rc != 0 and not (state == 'latest' and rc == 17):
|
||||
module.fail_json(msg="failed to install %s packages(s)"
|
||||
% (len(toInstall)),
|
||||
module.fail_json(msg=f"failed to install {len(toInstall)} packages(s)",
|
||||
packages=toInstall)
|
||||
|
||||
module.exit_json(changed=True, msg="installed %s package(s)"
|
||||
% (len(toInstall)),
|
||||
module.exit_json(changed=True, msg=f"installed {len(toInstall)} package(s)",
|
||||
packages=toInstall)
|
||||
|
||||
|
||||
|
|
@ -332,11 +329,9 @@ def check_packages(module, xbps_path, packages, state):
|
|||
if would_be_changed:
|
||||
if state == "absent":
|
||||
state = "removed"
|
||||
module.exit_json(changed=True, msg="%s package(s) would be %s" % (
|
||||
len(would_be_changed), state),
|
||||
packages=would_be_changed)
|
||||
module.exit_json(changed=True, msg=f"{len(would_be_changed)} package(s) would be {state}", packages=would_be_changed)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="package(s) already %s" % state,
|
||||
module.exit_json(changed=False, msg=f"package(s) already {state}",
|
||||
packages=[])
|
||||
|
||||
|
||||
|
|
@ -382,8 +377,7 @@ def main():
|
|||
xbps_path['remove'] = module.get_bin_path('xbps-remove', True)
|
||||
|
||||
if not os.path.exists(xbps_path['install']):
|
||||
module.fail_json(msg="cannot find xbps, in path %s"
|
||||
% (xbps_path['install']))
|
||||
module.fail_json(msg=f"cannot find xbps, in path {xbps_path['install']}")
|
||||
|
||||
p = module.params
|
||||
|
||||
|
|
|
|||
|
|
@ -364,8 +364,7 @@ class XCCRedfishUtils(RedfishUtils):
|
|||
if 'PATCH' not in methods:
|
||||
# if Allow header present and PATCH missing, return error
|
||||
return {'ret': False,
|
||||
'msg': "%s action not found and PATCH not allowed"
|
||||
% '#VirtualMedia.EjectMedia'}
|
||||
'msg': "#VirtualMedia.EjectMedia action not found and PATCH not allowed"}
|
||||
return self.virtual_media_eject_via_patch(uri)
|
||||
else:
|
||||
# POST to the EjectMedia Action
|
||||
|
|
@ -387,13 +386,11 @@ class XCCRedfishUtils(RedfishUtils):
|
|||
elif uri and not eject:
|
||||
# already ejected: return success but changed=False
|
||||
return {'ret': True, 'changed': False,
|
||||
'msg': "VirtualMedia image '%s' already ejected" %
|
||||
image_url}
|
||||
'msg': f"VirtualMedia image '{image_url}' already ejected"}
|
||||
else:
|
||||
# return failure (no resources matching image_url found)
|
||||
return {'ret': False, 'changed': False,
|
||||
'msg': "No VirtualMedia resource found with image '%s' "
|
||||
"inserted" % image_url}
|
||||
'msg': f"No VirtualMedia resource found with image '{image_url}' inserted"}
|
||||
|
||||
def virtual_media_eject(self, options):
|
||||
if options:
|
||||
|
|
@ -441,7 +438,7 @@ class XCCRedfishUtils(RedfishUtils):
|
|||
'msg': "No VirtualMedia image inserted"}
|
||||
else:
|
||||
return {'ret': True, 'changed': True,
|
||||
'msg': "VirtualMedia %s ejected" % str(ejected_media_list)}
|
||||
'msg': f"VirtualMedia {ejected_media_list!s} ejected"}
|
||||
|
||||
def virtual_media_insert(self, options):
|
||||
param_map = {
|
||||
|
|
@ -484,7 +481,7 @@ class XCCRedfishUtils(RedfishUtils):
|
|||
# see if image already inserted; if so, nothing to do
|
||||
if self._virt_media_image_inserted(resources, image_url):
|
||||
return {'ret': True, 'changed': False,
|
||||
'msg': "VirtualMedia '%s' already inserted" % image_url}
|
||||
'msg': f"VirtualMedia '{image_url}' already inserted"}
|
||||
|
||||
# find an empty slot to insert the media
|
||||
# try first with strict media_type matching
|
||||
|
|
@ -496,9 +493,7 @@ class XCCRedfishUtils(RedfishUtils):
|
|||
resources, media_types, media_match_strict=False)
|
||||
if not uri:
|
||||
return {'ret': False,
|
||||
'msg': "Unable to find an available VirtualMedia resource "
|
||||
"%s" % ('supporting ' + str(media_types)
|
||||
if media_types else '')}
|
||||
'msg': f'Unable to find an available VirtualMedia resource {f"supporting {media_types}" if media_types else ""}'}
|
||||
|
||||
# confirm InsertMedia action found
|
||||
if ('Actions' not in data or
|
||||
|
|
@ -510,8 +505,7 @@ class XCCRedfishUtils(RedfishUtils):
|
|||
if 'PATCH' not in methods:
|
||||
# if Allow header present and PATCH missing, return error
|
||||
return {'ret': False,
|
||||
'msg': "%s action not found and PATCH not allowed"
|
||||
% '#VirtualMedia.InsertMedia'}
|
||||
'msg': "#VirtualMedia.InsertMedia action not found and PATCH not allowed"}
|
||||
return self.virtual_media_insert_via_patch(options, param_map,
|
||||
uri, data)
|
||||
|
||||
|
|
@ -578,7 +572,7 @@ class XCCRedfishUtils(RedfishUtils):
|
|||
data = response['data']
|
||||
for key in request_body.keys():
|
||||
if key not in data:
|
||||
return {'ret': False, 'msg': "Key %s not found. Supported key list: %s" % (key, str(data.keys()))}
|
||||
return {'ret': False, 'msg': f"Key {key} not found. Supported key list: {data.keys()}"}
|
||||
|
||||
# perform patch
|
||||
response = self.patch_request(self.root_uri + resource_uri, request_body)
|
||||
|
|
@ -609,7 +603,7 @@ class XCCRedfishUtils(RedfishUtils):
|
|||
return response
|
||||
if 'Actions' not in response['data']:
|
||||
if resource_uri_has_actions:
|
||||
return {'ret': False, 'msg': "Actions property not found in %s" % action_base_uri}
|
||||
return {'ret': False, 'msg': f"Actions property not found in {action_base_uri}"}
|
||||
else:
|
||||
response['data']['Actions'] = {}
|
||||
|
||||
|
|
@ -645,8 +639,8 @@ class XCCRedfishUtils(RedfishUtils):
|
|||
|
||||
if not action_found and resource_uri_has_actions:
|
||||
return {'ret': False,
|
||||
'msg': 'Specified resource_uri is not a supported action target uri, please specify a supported target uri instead. Supported uri: %s'
|
||||
% (str(action_target_uri_list))}
|
||||
'msg': (f'Specified resource_uri is not a supported action target uri, please specify a supported target uri instead. '
|
||||
f'Supported uri: {action_target_uri_list}')}
|
||||
|
||||
# check request_body with parameter name defined by @Redfish.ActionInfo
|
||||
if action_info_uri is not None:
|
||||
|
|
@ -661,8 +655,8 @@ class XCCRedfishUtils(RedfishUtils):
|
|||
break
|
||||
if not key_found:
|
||||
return {'ret': False,
|
||||
'msg': 'Invalid property %s found in request_body. Please refer to @Redfish.ActionInfo Parameters: %s'
|
||||
% (key, str(response['data']['Parameters']))}
|
||||
'msg': (f"Invalid property {key} found in request_body. "
|
||||
f"Please refer to @Redfish.ActionInfo Parameters: {response['data']['Parameters']}")}
|
||||
|
||||
# perform post
|
||||
response = self.post_request(self.root_uri + resource_uri, request_body)
|
||||
|
|
@ -750,18 +744,18 @@ def main():
|
|||
request_body = module.params['request_body']
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
root_uri = f"https://{module.params['baseuri']}"
|
||||
rf_utils = XCCRedfishUtils(creds, root_uri, timeout, module, resource_id=resource_id, data_modification=True)
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
|
||||
module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {CATEGORY_COMMANDS_ALL.keys()}"))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
# Fail if even one command given is invalid
|
||||
if cmd not in CATEGORY_COMMANDS_ALL[category]:
|
||||
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
|
||||
module.fail_json(msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}"))
|
||||
|
||||
# Organize by Categories / Commands
|
||||
if category == "Manager":
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ def get_pifs(session):
|
|||
devicenums = range(0, 7)
|
||||
for pif in pifs.values():
|
||||
for eth in devicenums:
|
||||
interface_name = "eth%s" % (eth)
|
||||
interface_name = f"eth{eth}"
|
||||
bond_name = interface_name.replace('eth', 'bond')
|
||||
if pif['device'] == interface_name:
|
||||
xs_pifs[interface_name] = pif
|
||||
|
|
|
|||
|
|
@ -685,7 +685,7 @@ class XenServerVM(XenServerObject):
|
|||
self.set_power_state("poweredon")
|
||||
|
||||
except XenAPI.Failure as f:
|
||||
self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
|
||||
self.module.fail_json(msg=f"XAPI ERROR: {f.details}")
|
||||
|
||||
def reconfigure(self):
|
||||
"""Reconfigures an existing VM.
|
||||
|
|
@ -767,14 +767,14 @@ class XenServerVM(XenServerObject):
|
|||
self.xapi_session.xenapi.VDI.set_name_description(vdi_ref, self.module.params['disks'][position]['name_desc'])
|
||||
elif disk_change == "size":
|
||||
self.xapi_session.xenapi.VDI.resize(vdi_ref, str(self.get_normalized_disk_size(self.module.params['disks'][position],
|
||||
"VM reconfigure disks[%s]: " % position)))
|
||||
f"VM reconfigure disks[{position}]: ")))
|
||||
|
||||
position += 1
|
||||
elif change.get('disks_new'):
|
||||
for position, disk_userdevice in change['disks_new']:
|
||||
disk_params = self.module.params['disks'][position]
|
||||
|
||||
disk_name = disk_params['name'] if disk_params.get('name') else "%s-%s" % (self.vm_params['name_label'], position)
|
||||
disk_name = disk_params['name'] if disk_params.get('name') else f"{self.vm_params['name_label']}-{position}"
|
||||
disk_name_desc = disk_params['name_desc'] if disk_params.get('name_desc') else ""
|
||||
|
||||
if disk_params.get('sr_uuid'):
|
||||
|
|
@ -784,7 +784,7 @@ class XenServerVM(XenServerObject):
|
|||
else:
|
||||
sr_ref = self.default_sr_ref
|
||||
|
||||
disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], "VM reconfigure disks[%s]: " % position))
|
||||
disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], f"VM reconfigure disks[{position}]: "))
|
||||
|
||||
new_disk_vdi = {
|
||||
"name_label": disk_name,
|
||||
|
|
@ -937,10 +937,10 @@ class XenServerVM(XenServerObject):
|
|||
network_ip = ""
|
||||
|
||||
if "prefix" in network_change_list:
|
||||
network_prefix = "/%s" % network_params['prefix']
|
||||
network_prefix = f"/{network_params['prefix']}"
|
||||
vif_reconfigure_needed = True
|
||||
elif vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
|
||||
network_prefix = "/%s" % vm_vif_params['ipv4_addresses'][0].split('/')[1]
|
||||
network_prefix = f"/{vm_vif_params['ipv4_addresses'][0].split('/')[1]}"
|
||||
else:
|
||||
network_prefix = ""
|
||||
|
||||
|
|
@ -952,7 +952,7 @@ class XenServerVM(XenServerObject):
|
|||
|
||||
if vif_recreated or vif_reconfigure_needed:
|
||||
self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref, network_type,
|
||||
"%s%s" % (network_ip, network_prefix), network_gateway)
|
||||
f"{network_ip}{network_prefix}", network_gateway)
|
||||
|
||||
vif_reconfigure_needed = False
|
||||
|
||||
|
|
@ -971,10 +971,10 @@ class XenServerVM(XenServerObject):
|
|||
network_ip6 = ""
|
||||
|
||||
if "prefix6" in network_change_list:
|
||||
network_prefix6 = "/%s" % network_params['prefix6']
|
||||
network_prefix6 = f"/{network_params['prefix6']}"
|
||||
vif_reconfigure_needed = True
|
||||
elif vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
|
||||
network_prefix6 = "/%s" % vm_vif_params['ipv6_addresses'][0].split('/')[1]
|
||||
network_prefix6 = f"/{vm_vif_params['ipv6_addresses'][0].split('/')[1]}"
|
||||
else:
|
||||
network_prefix6 = ""
|
||||
|
||||
|
|
@ -986,7 +986,7 @@ class XenServerVM(XenServerObject):
|
|||
|
||||
if vif_recreated or vif_reconfigure_needed:
|
||||
self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref, network_type6,
|
||||
"%s%s" % (network_ip6, network_prefix6), network_gateway6)
|
||||
f"{network_ip6}{network_prefix6}", network_gateway6)
|
||||
|
||||
elif self.vm_params['customization_agent'] == "custom":
|
||||
vif_device = vm_vif_params['device']
|
||||
|
|
@ -1006,7 +1006,7 @@ class XenServerVM(XenServerObject):
|
|||
|
||||
for network_change in network_change_list_tmp + ['name', 'mac']:
|
||||
self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref,
|
||||
"vm-data/networks/%s/%s" % (vif_device, network_change))
|
||||
f"vm-data/networks/{vif_device}/{network_change}")
|
||||
|
||||
if network_params.get('name'):
|
||||
network_name = network_params['name']
|
||||
|
|
@ -1014,7 +1014,7 @@ class XenServerVM(XenServerObject):
|
|||
network_name = vm_vif_params['network']['name_label']
|
||||
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
|
||||
"vm-data/networks/%s/%s" % (vif_device, 'name'), network_name)
|
||||
f"vm-data/networks/{vif_device}/name", network_name)
|
||||
|
||||
if network_params.get('mac'):
|
||||
network_mac = network_params['mac'].lower()
|
||||
|
|
@ -1022,11 +1022,11 @@ class XenServerVM(XenServerObject):
|
|||
network_mac = vm_vif_params['MAC'].lower()
|
||||
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
|
||||
"vm-data/networks/%s/%s" % (vif_device, 'mac'), network_mac)
|
||||
f"vm-data/networks/{vif_device}/mac", network_mac)
|
||||
|
||||
for network_change in network_change_list_tmp:
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
|
||||
"vm-data/networks/%s/%s" % (vif_device, network_change),
|
||||
f"vm-data/networks/{vif_device}/{network_change}",
|
||||
network_params[network_change])
|
||||
|
||||
position += 1
|
||||
|
|
@ -1067,11 +1067,11 @@ class XenServerVM(XenServerObject):
|
|||
if self.vm_params['customization_agent'] == "native":
|
||||
if network_type and network_type == "static":
|
||||
self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref_new, "Static",
|
||||
"%s/%s" % (network_ip, network_prefix), network_gateway)
|
||||
f"{network_ip}/{network_prefix}", network_gateway)
|
||||
|
||||
if network_type6 and network_type6 == "static":
|
||||
self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref_new, "Static",
|
||||
"%s/%s" % (network_ip6, network_prefix6), network_gateway6)
|
||||
f"{network_ip6}/{network_prefix6}", network_gateway6)
|
||||
elif self.vm_params['customization_agent'] == "custom":
|
||||
# We first have to remove any existing data
|
||||
# from xenstore_data because there could be
|
||||
|
|
@ -1079,45 +1079,45 @@ class XenServerVM(XenServerObject):
|
|||
# that once occupied same device location as
|
||||
# our new interface.
|
||||
for network_param in ['name', 'mac', 'type', 'ip', 'prefix', 'netmask', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
|
||||
self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, "vm-data/networks/%s/%s" % (vif_device, network_param))
|
||||
self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, f"vm-data/networks/{vif_device}/{network_param}")
|
||||
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/name" % vif_device, network_name)
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, f"vm-data/networks/{vif_device}/name", network_name)
|
||||
|
||||
# We get MAC from VIF itself instead of
|
||||
# networks.mac because it could be
|
||||
# autogenerated.
|
||||
vm_vif_mac = self.xapi_session.xenapi.VIF.get_MAC(vif_ref_new)
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/mac" % vif_device, vm_vif_mac)
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, f"vm-data/networks/{vif_device}/mac", vm_vif_mac)
|
||||
|
||||
if network_type:
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type" % vif_device, network_type)
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, f"vm-data/networks/{vif_device}/type", network_type)
|
||||
|
||||
if network_type == "static":
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
|
||||
"vm-data/networks/%s/ip" % vif_device, network_ip)
|
||||
f"vm-data/networks/{vif_device}/ip", network_ip)
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
|
||||
"vm-data/networks/%s/prefix" % vif_device, network_prefix)
|
||||
f"vm-data/networks/{vif_device}/prefix", network_prefix)
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
|
||||
"vm-data/networks/%s/netmask" % vif_device, network_netmask)
|
||||
f"vm-data/networks/{vif_device}/netmask", network_netmask)
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
|
||||
"vm-data/networks/%s/gateway" % vif_device, network_gateway)
|
||||
f"vm-data/networks/{vif_device}/gateway", network_gateway)
|
||||
|
||||
if network_type6:
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type6" % vif_device, network_type6)
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, f"vm-data/networks/{vif_device}/type6", network_type6)
|
||||
|
||||
if network_type6 == "static":
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
|
||||
"vm-data/networks/%s/ip6" % vif_device, network_ip6)
|
||||
f"vm-data/networks/{vif_device}/ip6", network_ip6)
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
|
||||
"vm-data/networks/%s/prefix6" % vif_device, network_prefix6)
|
||||
f"vm-data/networks/{vif_device}/prefix6", network_prefix6)
|
||||
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
|
||||
"vm-data/networks/%s/gateway6" % vif_device, network_gateway6)
|
||||
f"vm-data/networks/{vif_device}/gateway6", network_gateway6)
|
||||
|
||||
elif change.get('custom_params'):
|
||||
for position in change['custom_params']:
|
||||
custom_param_key = self.module.params['custom_params'][position]['key']
|
||||
custom_param_value = self.module.params['custom_params'][position]['value']
|
||||
self.xapi_session.xenapi_request("VM.set_%s" % custom_param_key, (self.vm_ref, custom_param_value))
|
||||
self.xapi_session.xenapi_request(f"VM.set_{custom_param_key}", (self.vm_ref, custom_param_value))
|
||||
|
||||
if self.module.params['is_template']:
|
||||
self.xapi_session.xenapi.VM.set_is_a_template(self.vm_ref, True)
|
||||
|
|
@ -1128,7 +1128,7 @@ class XenServerVM(XenServerObject):
|
|||
self.gather_params()
|
||||
|
||||
except XenAPI.Failure as f:
|
||||
self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
|
||||
self.module.fail_json(msg=f"XAPI ERROR: {f.details}")
|
||||
|
||||
return config_changes
|
||||
|
||||
|
|
@ -1161,7 +1161,7 @@ class XenServerVM(XenServerObject):
|
|||
self.xapi_session.xenapi.VDI.destroy(vdi_ref)
|
||||
|
||||
except XenAPI.Failure as f:
|
||||
self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
|
||||
self.module.fail_json(msg=f"XAPI ERROR: {f.details}")
|
||||
|
||||
def get_changes(self):
|
||||
"""Finds VM parameters that differ from specified ones.
|
||||
|
|
@ -1326,8 +1326,8 @@ class XenServerVM(XenServerObject):
|
|||
# higher than a number of existing disks attached to the VM.
|
||||
# We don't support removal or detachment of disks.
|
||||
if len(self.module.params['disks']) < len(vm_disk_params_list):
|
||||
self.module.fail_json(msg="VM check disks: provided disks configuration has less disks than the target VM (%d < %d)!" %
|
||||
(len(self.module.params['disks']), len(vm_disk_params_list)))
|
||||
self.module.fail_json(msg=f"VM check disks: provided disks configuration has less disks than the "
|
||||
f"target VM ({len(self.module.params['disks'])} < {len(vm_disk_params_list)})!")
|
||||
|
||||
# Find the highest disk occupied userdevice.
|
||||
if not vm_disk_params_list:
|
||||
|
|
@ -1343,12 +1343,12 @@ class XenServerVM(XenServerObject):
|
|||
|
||||
disk_params = self.module.params['disks'][position]
|
||||
|
||||
disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], "VM check disks[%s]: " % position)
|
||||
disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], f"VM check disks[{position}]: ")
|
||||
|
||||
disk_name = disk_params.get('name')
|
||||
|
||||
if disk_name is not None and not disk_name:
|
||||
self.module.fail_json(msg="VM check disks[%s]: disk name cannot be an empty string!" % position)
|
||||
self.module.fail_json(msg=f"VM check disks[{position}]: disk name cannot be an empty string!")
|
||||
|
||||
# If this is an existing disk.
|
||||
if vm_disk_params and vm_disk_params['VDI']:
|
||||
|
|
@ -1367,14 +1367,14 @@ class XenServerVM(XenServerObject):
|
|||
disk_changes.append('size')
|
||||
need_poweredoff = True
|
||||
elif disk_size < int(vm_disk_params['VDI']['virtual_size']):
|
||||
self.module.fail_json(msg="VM check disks[%s]: disk size is smaller than existing (%d bytes < %s bytes). "
|
||||
"Reducing disk size is not allowed!" % (position, disk_size, vm_disk_params['VDI']['virtual_size']))
|
||||
self.module.fail_json(msg=f"VM check disks[{position}]: disk size is smaller than existing ({disk_size} bytes < "
|
||||
f"{vm_disk_params['VDI']['virtual_size']} bytes). Reducing disk size is not allowed!")
|
||||
|
||||
config_changes_disks.append(disk_changes)
|
||||
# If this is a new disk.
|
||||
else:
|
||||
if not disk_size:
|
||||
self.module.fail_json(msg="VM check disks[%s]: no valid disk size specification found!" % position)
|
||||
self.module.fail_json(msg=f"VM check disks[{position}]: no valid disk size specification found!")
|
||||
|
||||
disk_sr_uuid = disk_params.get('sr_uuid')
|
||||
disk_sr = disk_params.get('sr')
|
||||
|
|
@ -1382,12 +1382,12 @@ class XenServerVM(XenServerObject):
|
|||
if disk_sr_uuid is not None or disk_sr is not None:
|
||||
# Check existence only. Ignore return value.
|
||||
get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
|
||||
msg_prefix="VM check disks[%s]: " % position)
|
||||
msg_prefix=f"VM check disks[{position}]: ")
|
||||
elif self.default_sr_ref == 'OpaqueRef:NULL':
|
||||
self.module.fail_json(msg="VM check disks[%s]: no default SR found! You must specify SR explicitly." % position)
|
||||
self.module.fail_json(msg=f"VM check disks[{position}]: no default SR found! You must specify SR explicitly.")
|
||||
|
||||
if not vbd_userdevices_allowed:
|
||||
self.module.fail_json(msg="VM check disks[%s]: maximum number of devices reached!" % position)
|
||||
self.module.fail_json(msg=f"VM check disks[{position}]: maximum number of devices reached!")
|
||||
|
||||
disk_userdevice = None
|
||||
|
||||
|
|
@ -1409,7 +1409,7 @@ class XenServerVM(XenServerObject):
|
|||
# so we have to include all devices regardless of
|
||||
# type when calculating out-of-bound position.
|
||||
disk_userdevice = str(int(self.vm_params['VBDs'][-1]['userdevice']) + 1)
|
||||
self.module.fail_json(msg="VM check disks[%s]: new disk position %s is out of bounds!" % (position, disk_userdevice))
|
||||
self.module.fail_json(msg=f"VM check disks[{position}]: new disk position {disk_userdevice} is out of bounds!")
|
||||
|
||||
# For new disks we only track their position.
|
||||
config_new_disks.append((position, disk_userdevice))
|
||||
|
|
@ -1482,8 +1482,8 @@ class XenServerVM(XenServerObject):
|
|||
# higher than a number of existing VIFs attached to the VM.
|
||||
# We don't support removal of VIFs.
|
||||
if len(self.module.params['networks']) < len(self.vm_params['VIFs']):
|
||||
self.module.fail_json(msg="VM check networks: provided networks configuration has less interfaces than the target VM (%d < %d)!" %
|
||||
(len(self.module.params['networks']), len(self.vm_params['VIFs'])))
|
||||
self.module.fail_json(msg=f"VM check networks: provided networks configuration has less interfaces than the target "
|
||||
f"VM ({len(self.module.params['networks'])} < {len(self.vm_params['VIFs'])})!")
|
||||
|
||||
# Find the highest occupied device.
|
||||
if not self.vm_params['VIFs']:
|
||||
|
|
@ -1502,12 +1502,12 @@ class XenServerVM(XenServerObject):
|
|||
network_name = network_params.get('name')
|
||||
|
||||
if network_name is not None and not network_name:
|
||||
self.module.fail_json(msg="VM check networks[%s]: network name cannot be an empty string!" % position)
|
||||
self.module.fail_json(msg=f"VM check networks[{position}]: network name cannot be an empty string!")
|
||||
|
||||
if network_name:
|
||||
# Check existence only. Ignore return value.
|
||||
get_object_ref(self.module, network_name, uuid=None, obj_type="network", fail=True,
|
||||
msg_prefix="VM check networks[%s]: " % position)
|
||||
msg_prefix=f"VM check networks[{position}]: ")
|
||||
|
||||
network_mac = network_params.get('mac')
|
||||
|
||||
|
|
@ -1515,7 +1515,7 @@ class XenServerVM(XenServerObject):
|
|||
network_mac = network_mac.lower()
|
||||
|
||||
if not is_mac(network_mac):
|
||||
self.module.fail_json(msg="VM check networks[%s]: specified MAC address '%s' is not valid!" % (position, network_mac))
|
||||
self.module.fail_json(msg=f"VM check networks[{position}]: specified MAC address '{network_mac}' is not valid!")
|
||||
|
||||
# IPv4 reconfiguration.
|
||||
network_type = network_params.get('type')
|
||||
|
|
@ -1539,17 +1539,17 @@ class XenServerVM(XenServerObject):
|
|||
network_ip = network_ip_split[0]
|
||||
|
||||
if network_ip and not is_valid_ip_addr(network_ip):
|
||||
self.module.fail_json(msg="VM check networks[%s]: specified IPv4 address '%s' is not valid!" % (position, network_ip))
|
||||
self.module.fail_json(msg=f"VM check networks[{position}]: specified IPv4 address '{network_ip}' is not valid!")
|
||||
|
||||
if len(network_ip_split) > 1:
|
||||
network_prefix = network_ip_split[1]
|
||||
|
||||
if not is_valid_ip_prefix(network_prefix):
|
||||
self.module.fail_json(msg="VM check networks[%s]: specified IPv4 prefix '%s' is not valid!" % (position, network_prefix))
|
||||
self.module.fail_json(msg=f"VM check networks[{position}]: specified IPv4 prefix '{network_prefix}' is not valid!")
|
||||
|
||||
if network_netmask is not None:
|
||||
if not is_valid_ip_netmask(network_netmask):
|
||||
self.module.fail_json(msg="VM check networks[%s]: specified IPv4 netmask '%s' is not valid!" % (position, network_netmask))
|
||||
self.module.fail_json(msg=f"VM check networks[{position}]: specified IPv4 netmask '{network_netmask}' is not valid!")
|
||||
|
||||
network_prefix = ip_netmask_to_prefix(network_netmask, skip_check=True)
|
||||
elif network_prefix is not None:
|
||||
|
|
@ -1573,7 +1573,7 @@ class XenServerVM(XenServerObject):
|
|||
# Gateway can be an empty string (when removing gateway
|
||||
# configuration) but if it is not, it should be validated.
|
||||
if network_gateway and not is_valid_ip_addr(network_gateway):
|
||||
self.module.fail_json(msg="VM check networks[%s]: specified IPv4 gateway '%s' is not valid!" % (position, network_gateway))
|
||||
self.module.fail_json(msg=f"VM check networks[{position}]: specified IPv4 gateway '{network_gateway}' is not valid!")
|
||||
|
||||
# IPv6 reconfiguration.
|
||||
network_type6 = network_params.get('type6')
|
||||
|
|
@ -1596,13 +1596,13 @@ class XenServerVM(XenServerObject):
|
|||
network_ip6 = network_ip6_split[0]
|
||||
|
||||
if network_ip6 and not is_valid_ip6_addr(network_ip6):
|
||||
self.module.fail_json(msg="VM check networks[%s]: specified IPv6 address '%s' is not valid!" % (position, network_ip6))
|
||||
self.module.fail_json(msg=f"VM check networks[{position}]: specified IPv6 address '{network_ip6}' is not valid!")
|
||||
|
||||
if len(network_ip6_split) > 1:
|
||||
network_prefix6 = network_ip6_split[1]
|
||||
|
||||
if not is_valid_ip6_prefix(network_prefix6):
|
||||
self.module.fail_json(msg="VM check networks[%s]: specified IPv6 prefix '%s' is not valid!" % (position, network_prefix6))
|
||||
self.module.fail_json(msg=f"VM check networks[{position}]: specified IPv6 prefix '{network_prefix6}' is not valid!")
|
||||
|
||||
# If any parameter is overridden at this point, update it.
|
||||
if network_type6:
|
||||
|
|
@ -1619,7 +1619,7 @@ class XenServerVM(XenServerObject):
|
|||
# Gateway can be an empty string (when removing gateway
|
||||
# configuration) but if it is not, it should be validated.
|
||||
if network_gateway6 and not is_valid_ip6_addr(network_gateway6):
|
||||
self.module.fail_json(msg="VM check networks[%s]: specified IPv6 gateway '%s' is not valid!" % (position, network_gateway6))
|
||||
self.module.fail_json(msg=f"VM check networks[{position}]: specified IPv6 gateway '{network_gateway6}' is not valid!")
|
||||
|
||||
# If this is an existing VIF.
|
||||
if vm_vif_params and vm_vif_params['network']:
|
||||
|
|
@ -1670,40 +1670,40 @@ class XenServerVM(XenServerObject):
|
|||
elif self.vm_params['customization_agent'] == "custom":
|
||||
vm_xenstore_data = self.vm_params['xenstore_data']
|
||||
|
||||
if network_type and network_type != vm_xenstore_data.get('vm-data/networks/%s/type' % vm_vif_params['device'], "none"):
|
||||
if network_type and network_type != vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/type", "none"):
|
||||
network_changes.append('type')
|
||||
need_poweredoff = True
|
||||
|
||||
if network_type and network_type == "static":
|
||||
if network_ip and network_ip != vm_xenstore_data.get('vm-data/networks/%s/ip' % vm_vif_params['device'], ""):
|
||||
if network_ip and network_ip != vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/ip", ""):
|
||||
network_changes.append('ip')
|
||||
need_poweredoff = True
|
||||
|
||||
if network_prefix and network_prefix != vm_xenstore_data.get('vm-data/networks/%s/prefix' % vm_vif_params['device'], ""):
|
||||
if network_prefix and network_prefix != vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/prefix", ""):
|
||||
network_changes.append('prefix')
|
||||
network_changes.append('netmask')
|
||||
need_poweredoff = True
|
||||
|
||||
if network_gateway is not None and network_gateway != vm_xenstore_data.get('vm-data/networks/%s/gateway' %
|
||||
vm_vif_params['device'], ""):
|
||||
_device_gw_path = f"vm-data/networks/{vm_vif_params['device']}/gateway"
|
||||
if network_gateway is not None and network_gateway != vm_xenstore_data.get(_device_gw_path, ""):
|
||||
network_changes.append('gateway')
|
||||
need_poweredoff = True
|
||||
|
||||
if network_type6 and network_type6 != vm_xenstore_data.get('vm-data/networks/%s/type6' % vm_vif_params['device'], "none"):
|
||||
if network_type6 and network_type6 != vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/type6", "none"):
|
||||
network_changes.append('type6')
|
||||
need_poweredoff = True
|
||||
|
||||
if network_type6 and network_type6 == "static":
|
||||
if network_ip6 and network_ip6 != vm_xenstore_data.get('vm-data/networks/%s/ip6' % vm_vif_params['device'], ""):
|
||||
if network_ip6 and network_ip6 != vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/ip6", ""):
|
||||
network_changes.append('ip6')
|
||||
need_poweredoff = True
|
||||
|
||||
if network_prefix6 and network_prefix6 != vm_xenstore_data.get('vm-data/networks/%s/prefix6' % vm_vif_params['device'], ""):
|
||||
if network_prefix6 and network_prefix6 != vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/prefix6", ""):
|
||||
network_changes.append('prefix6')
|
||||
need_poweredoff = True
|
||||
|
||||
if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get('vm-data/networks/%s/gateway6' %
|
||||
vm_vif_params['device'], ""):
|
||||
_device_gw6_path = f"vm-data/networks/{vm_vif_params['device']}/gateway6"
|
||||
if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get(_device_gw6_path, ""):
|
||||
network_changes.append('gateway6')
|
||||
need_poweredoff = True
|
||||
|
||||
|
|
@ -1711,13 +1711,13 @@ class XenServerVM(XenServerObject):
|
|||
# If this is a new VIF.
|
||||
else:
|
||||
if not network_name:
|
||||
self.module.fail_json(msg="VM check networks[%s]: network name is required for new network interface!" % position)
|
||||
self.module.fail_json(msg=f"VM check networks[{position}]: network name is required for new network interface!")
|
||||
|
||||
if network_type and network_type == "static" and network_ip and not network_netmask:
|
||||
self.module.fail_json(msg="VM check networks[%s]: IPv4 netmask or prefix is required for new network interface!" % position)
|
||||
self.module.fail_json(msg=f"VM check networks[{position}]: IPv4 netmask or prefix is required for new network interface!")
|
||||
|
||||
if network_type6 and network_type6 == "static" and network_ip6 and not network_prefix6:
|
||||
self.module.fail_json(msg="VM check networks[%s]: IPv6 prefix is required for new network interface!" % position)
|
||||
self.module.fail_json(msg=f"VM check networks[{position}]: IPv6 prefix is required for new network interface!")
|
||||
|
||||
# Restart is needed if we are adding new network
|
||||
# interface with IP/gateway parameters specified
|
||||
|
|
@ -1729,7 +1729,7 @@ class XenServerVM(XenServerObject):
|
|||
break
|
||||
|
||||
if not vif_devices_allowed:
|
||||
self.module.fail_json(msg="VM check networks[%s]: maximum number of network interfaces reached!" % position)
|
||||
self.module.fail_json(msg=f"VM check networks[{position}]: maximum number of network interfaces reached!")
|
||||
|
||||
# We need to place a new network interface right above the
|
||||
# highest placed existing interface to maintain relative
|
||||
|
|
@ -1738,7 +1738,7 @@ class XenServerVM(XenServerObject):
|
|||
vif_device = str(int(vif_device_highest) + 1)
|
||||
|
||||
if vif_device not in vif_devices_allowed:
|
||||
self.module.fail_json(msg="VM check networks[%s]: new network interface position %s is out of bounds!" % (position, vif_device))
|
||||
self.module.fail_json(msg=f"VM check networks[{position}]: new network interface position {vif_device} is out of bounds!")
|
||||
|
||||
vif_devices_allowed.remove(vif_device)
|
||||
vif_device_highest = vif_device
|
||||
|
|
@ -1766,7 +1766,7 @@ class XenServerVM(XenServerObject):
|
|||
custom_param_value = custom_param['value']
|
||||
|
||||
if custom_param_key not in self.vm_params:
|
||||
self.module.fail_json(msg="VM check custom_params[%s]: unknown VM param '%s'!" % (position, custom_param_key))
|
||||
self.module.fail_json(msg=f"VM check custom_params[{position}]: unknown VM param '{custom_param_key}'!")
|
||||
|
||||
if custom_param_value != self.vm_params[custom_param_key]:
|
||||
# We only need to track custom param position.
|
||||
|
|
@ -1781,7 +1781,7 @@ class XenServerVM(XenServerObject):
|
|||
return config_changes
|
||||
|
||||
except XenAPI.Failure as f:
|
||||
self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
|
||||
self.module.fail_json(msg=f"XAPI ERROR: {f.details}")
|
||||
|
||||
def get_normalized_disk_size(self, disk_params, msg_prefix=""):
|
||||
"""Parses disk size parameters and returns disk size in bytes.
|
||||
|
|
@ -1842,7 +1842,7 @@ class XenServerVM(XenServerObject):
|
|||
|
||||
except (TypeError, ValueError, NameError):
|
||||
# Common failure
|
||||
self.module.fail_json(msg="%sfailed to parse disk size! Please review value provided using documentation." % msg_prefix)
|
||||
self.module.fail_json(msg=f"{msg_prefix}failed to parse disk size! Please review value provided using documentation.")
|
||||
|
||||
disk_units = dict(tb=4, gb=3, mb=2, kb=1, b=0)
|
||||
|
||||
|
|
|
|||
|
|
@ -192,7 +192,7 @@ class XFConfProperty(StateModuleHelper):
|
|||
def __init_module__(self):
|
||||
self.runner = xfconf_runner(self.module)
|
||||
self.vars.version = get_xfconf_version(self.runner)
|
||||
self.does_not = 'Property "{0}" does not exist on channel "{1}".'.format(self.vars.property, self.vars.channel)
|
||||
self.does_not = f'Property "{self.vars.property}" does not exist on channel "{self.vars.channel}".'
|
||||
self.vars.set('previous_value', self._get())
|
||||
self.vars.set('type', self.vars.value_type)
|
||||
self.vars.set_meta('value', initial_value=self.vars.previous_value)
|
||||
|
|
@ -201,7 +201,7 @@ class XFConfProperty(StateModuleHelper):
|
|||
if err.rstrip() == self.does_not:
|
||||
return None
|
||||
if rc or len(err):
|
||||
self.do_raise('xfconf-query failed with error (rc={0}): {1}'.format(rc, err))
|
||||
self.do_raise(f'xfconf-query failed with error (rc={rc}): {err}')
|
||||
|
||||
result = out.rstrip()
|
||||
if 'Value is an array with' in result:
|
||||
|
|
|
|||
|
|
@ -195,13 +195,12 @@ def main():
|
|||
)
|
||||
|
||||
if not os.path.ismount(mountpoint):
|
||||
module.fail_json(msg="Path '%s' is not a mount point" % mountpoint, **result)
|
||||
module.fail_json(msg=f"Path '{mountpoint}' is not a mount point", **result)
|
||||
|
||||
mp = get_fs_by_mountpoint(mountpoint)
|
||||
if mp is None:
|
||||
module.fail_json(
|
||||
msg="Path '%s' is not a mount point or not located on an xfs file system."
|
||||
% mountpoint,
|
||||
msg=f"Path '{mountpoint}' is not a mount point or not located on an xfs file system.",
|
||||
**result
|
||||
)
|
||||
|
||||
|
|
@ -219,14 +218,13 @@ def main():
|
|||
and "qnoenforce" not in mp["mntopts"]
|
||||
):
|
||||
module.fail_json(
|
||||
msg="Path '%s' is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option."
|
||||
% mountpoint,
|
||||
msg=f"Path '{mountpoint}' is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option.",
|
||||
**result
|
||||
)
|
||||
try:
|
||||
pwd.getpwnam(name)
|
||||
except KeyError as e:
|
||||
module.fail_json(msg="User '%s' does not exist." % name, **result)
|
||||
module.fail_json(msg=f"User '{name}' does not exist.", **result)
|
||||
|
||||
elif quota_type == "group":
|
||||
type_arg = "-g"
|
||||
|
|
@ -240,14 +238,13 @@ def main():
|
|||
and "gqnoenforce" not in mp["mntopts"]
|
||||
):
|
||||
module.fail_json(
|
||||
msg="Path '%s' is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)"
|
||||
% (mountpoint, mp["mntopts"]),
|
||||
msg=f"Path '{mountpoint}' is not mounted with the gquota/grpquota/gqnoenforce option. (current options: {mp['mntopts']})",
|
||||
**result
|
||||
)
|
||||
try:
|
||||
grp.getgrnam(name)
|
||||
except KeyError as e:
|
||||
module.fail_json(msg="User '%s' does not exist." % name, **result)
|
||||
module.fail_json(msg=f"User '{name}' does not exist.", **result)
|
||||
|
||||
elif quota_type == "project":
|
||||
type_arg = "-p"
|
||||
|
|
@ -261,8 +258,7 @@ def main():
|
|||
and "pqnoenforce" not in mp["mntopts"]
|
||||
):
|
||||
module.fail_json(
|
||||
msg="Path '%s' is not mounted with the pquota/prjquota/pqnoenforce option."
|
||||
% mountpoint,
|
||||
msg=f"Path '{mountpoint}' is not mounted with the pquota/prjquota/pqnoenforce option.",
|
||||
**result
|
||||
)
|
||||
|
||||
|
|
@ -274,12 +270,12 @@ def main():
|
|||
|
||||
if name != quota_default and name is not None and get_project_id(name) is None:
|
||||
module.fail_json(
|
||||
msg="Entry '%s' has not been defined in /etc/projid." % name, **result
|
||||
msg=f"Entry '{name}' has not been defined in /etc/projid.", **result
|
||||
)
|
||||
|
||||
prj_set = True
|
||||
if name != quota_default:
|
||||
cmd = "project %s" % name
|
||||
cmd = f"project {name}"
|
||||
rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
|
||||
if rc != 0:
|
||||
result["cmd"] = cmd
|
||||
|
|
@ -298,7 +294,7 @@ def main():
|
|||
|
||||
if state == "present" and not prj_set:
|
||||
if not module.check_mode:
|
||||
cmd = "project -s %s" % name
|
||||
cmd = f"project -s {name}"
|
||||
rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
|
||||
if rc != 0:
|
||||
result["cmd"] = cmd
|
||||
|
|
@ -313,7 +309,7 @@ def main():
|
|||
|
||||
elif state == "absent" and prj_set and name != quota_default:
|
||||
if not module.check_mode:
|
||||
cmd = "project -C %s" % name
|
||||
cmd = f"project -C {name}"
|
||||
rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
|
||||
if rc != 0:
|
||||
result["cmd"] = cmd
|
||||
|
|
@ -364,35 +360,35 @@ def main():
|
|||
|
||||
limit = []
|
||||
if bsoft is not None and int(bsoft) != current_bsoft:
|
||||
limit.append("bsoft=%s" % bsoft)
|
||||
limit.append(f"bsoft={bsoft}")
|
||||
result["bsoft"] = int(bsoft)
|
||||
|
||||
if bhard is not None and int(bhard) != current_bhard:
|
||||
limit.append("bhard=%s" % bhard)
|
||||
limit.append(f"bhard={bhard}")
|
||||
result["bhard"] = int(bhard)
|
||||
|
||||
if isoft is not None and isoft != current_isoft:
|
||||
limit.append("isoft=%s" % isoft)
|
||||
limit.append(f"isoft={isoft}")
|
||||
result["isoft"] = isoft
|
||||
|
||||
if ihard is not None and ihard != current_ihard:
|
||||
limit.append("ihard=%s" % ihard)
|
||||
limit.append(f"ihard={ihard}")
|
||||
result["ihard"] = ihard
|
||||
|
||||
if rtbsoft is not None and int(rtbsoft) != current_rtbsoft:
|
||||
limit.append("rtbsoft=%s" % rtbsoft)
|
||||
limit.append(f"rtbsoft={rtbsoft}")
|
||||
result["rtbsoft"] = int(rtbsoft)
|
||||
|
||||
if rtbhard is not None and int(rtbhard) != current_rtbhard:
|
||||
limit.append("rtbhard=%s" % rtbhard)
|
||||
limit.append(f"rtbhard={rtbhard}")
|
||||
result["rtbhard"] = int(rtbhard)
|
||||
|
||||
if len(limit) > 0:
|
||||
if not module.check_mode:
|
||||
if name == quota_default:
|
||||
cmd = "limit %s -d %s" % (type_arg, " ".join(limit))
|
||||
cmd = f"limit {type_arg} -d {' '.join(limit)}"
|
||||
else:
|
||||
cmd = "limit %s %s %s" % (type_arg, " ".join(limit), name)
|
||||
cmd = f"limit {type_arg} {' '.join(limit)} {name}"
|
||||
|
||||
rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
|
||||
if rc != 0:
|
||||
|
|
@ -432,7 +428,7 @@ def quota_report(module, xfs_quota_bin, mountpoint, name, quota_type, used_type)
|
|||
factor = 1024
|
||||
|
||||
rc, stdout, stderr = exec_quota(
|
||||
module, xfs_quota_bin, "report %s %s" % (type_arg, used_arg), mountpoint
|
||||
module, xfs_quota_bin, f"report {type_arg} {used_arg}", mountpoint
|
||||
)
|
||||
|
||||
if rc != 0:
|
||||
|
|
@ -442,7 +438,7 @@ def quota_report(module, xfs_quota_bin, mountpoint, name, quota_type, used_type)
|
|||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
)
|
||||
module.fail_json(msg="Could not get quota report for %s." % used_name, **result)
|
||||
module.fail_json(msg=f"Could not get quota report for {used_name}.", **result)
|
||||
|
||||
for line in stdout.split("\n"):
|
||||
line = line.strip().split()
|
||||
|
|
|
|||
|
|
@ -378,16 +378,16 @@ from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode
|
|||
from ansible.module_utils.common.text.converters import to_bytes, to_native
|
||||
|
||||
_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*"
|
||||
_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT
|
||||
_NSIDENT = f"{_IDENT}|{_IDENT}:{_IDENT}"
|
||||
# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate
|
||||
# strings wrapped by the other delimiter' XPath trick, especially as simple XPath.
|
||||
_XPSTR = "('(?:.*)'|\"(?:.*)\")"
|
||||
|
||||
_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$")
|
||||
_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$")
|
||||
_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$")
|
||||
_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$")
|
||||
_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$")
|
||||
_RE_SPLITSIMPLELAST = re.compile(f"^(.*)/({_NSIDENT})$")
|
||||
_RE_SPLITSIMPLELASTEQVALUE = re.compile(f'^(.*)/({_NSIDENT}' + ')/text\\(\\)=' + _XPSTR + '$')
|
||||
_RE_SPLITSIMPLEATTRLAST = re.compile(f"^(.*)/(@(?:{_NSIDENT}))$")
|
||||
_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile(f"^(.*)/(@(?:{_NSIDENT}))={_XPSTR}$")
|
||||
_RE_SPLITSUBLAST = re.compile(f'^(.*)/({_NSIDENT}' + ')\\[(.*)\\]$')
|
||||
_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$")
|
||||
|
||||
|
||||
|
|
@ -403,14 +403,14 @@ def do_print_match(module, tree, xpath, namespaces):
|
|||
for m in match:
|
||||
match_xpaths.append(tree.getpath(m))
|
||||
match_str = json.dumps(match_xpaths)
|
||||
msg = "selector '%s' match: %s" % (xpath, match_str)
|
||||
msg = f"selector '{xpath}' match: {match_str}"
|
||||
finish(module, tree, xpath, namespaces, changed=False, msg=msg)
|
||||
|
||||
|
||||
def count_nodes(module, tree, xpath, namespaces):
|
||||
""" Return the count of nodes matching the xpath """
|
||||
hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces)
|
||||
msg = "found %d nodes" % hits
|
||||
hits = tree.xpath(f"count(/{xpath})", namespaces=namespaces)
|
||||
msg = f"found {hits} nodes"
|
||||
finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits))
|
||||
|
||||
|
||||
|
|
@ -472,7 +472,7 @@ def delete_xpath_target(module, tree, xpath, namespaces):
|
|||
else:
|
||||
raise Exception("Impossible error")
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e))
|
||||
module.fail_json(msg=f"Couldn't delete xpath target: {xpath} ({e})")
|
||||
else:
|
||||
finish(module, tree, xpath, namespaces, changed=changed)
|
||||
|
||||
|
|
@ -571,7 +571,7 @@ def split_xpath_last(xpath):
|
|||
m = _RE_SPLITSUBLAST.match(xpath)
|
||||
if m:
|
||||
content = [x.strip() for x in m.group(3).split(" and ")]
|
||||
return (m.group(1), [('/' + m.group(2), content)])
|
||||
return (m.group(1), [(f"/{m.group(2)}", content)])
|
||||
|
||||
m = _RE_SPLITONLYEQVALUE.match(xpath)
|
||||
if m:
|
||||
|
|
@ -584,7 +584,7 @@ def nsnameToClark(name, namespaces):
|
|||
if ":" in name:
|
||||
(nsname, rawname) = name.split(":")
|
||||
# return "{{%s}}%s" % (namespaces[nsname], rawname)
|
||||
return "{{{0}}}{1}".format(namespaces[nsname], rawname)
|
||||
return f"{{{namespaces[nsname]}}}{rawname}"
|
||||
|
||||
# no namespace name here
|
||||
return name
|
||||
|
|
@ -593,8 +593,7 @@ def nsnameToClark(name, namespaces):
|
|||
def check_or_make_target(module, tree, xpath, namespaces):
|
||||
(inner_xpath, changes) = split_xpath_last(xpath)
|
||||
if (inner_xpath == xpath) or (changes is None):
|
||||
module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" %
|
||||
(xpath, etree.tostring(tree, pretty_print=True)))
|
||||
module.fail_json(msg=f"Can't process Xpath {xpath} in order to spawn nodes! tree is {etree.tostring(tree, pretty_print=True)}")
|
||||
return False
|
||||
|
||||
changed = False
|
||||
|
|
@ -625,7 +624,7 @@ def check_or_make_target(module, tree, xpath, namespaces):
|
|||
for subexpr in eoa_value:
|
||||
# module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" %
|
||||
# (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True))
|
||||
check_or_make_target(module, nk, "./" + subexpr, namespaces)
|
||||
check_or_make_target(module, nk, f"./{subexpr}", namespaces)
|
||||
changed = True
|
||||
|
||||
# module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
|
||||
|
|
@ -653,7 +652,7 @@ def check_or_make_target(module, tree, xpath, namespaces):
|
|||
# (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True)))
|
||||
|
||||
else:
|
||||
module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True))
|
||||
module.fail_json(msg=f"unknown tree transformation={etree.tostring(tree, pretty_print=True)}")
|
||||
|
||||
return changed
|
||||
|
||||
|
|
@ -679,12 +678,11 @@ def set_target_inner(module, tree, xpath, namespaces, attribute, value):
|
|||
# TODO: Implement a more robust check to check for child namespaces' existence
|
||||
if tree.getroot().nsmap and ":" not in xpath:
|
||||
missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n"
|
||||
module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" %
|
||||
(missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc())
|
||||
module.fail_json(msg=f"{missing_namespace}Xpath {xpath} causes a failure: {e}\n -- tree is {etree.tostring(tree, pretty_print=True)}",
|
||||
exception=traceback.format_exc())
|
||||
|
||||
if not is_node(tree, xpath, namespaces):
|
||||
module.fail_json(msg="Xpath %s does not reference a node! tree is %s" %
|
||||
(xpath, etree.tostring(tree, pretty_print=True)))
|
||||
module.fail_json(msg=f"Xpath {xpath} does not reference a node! tree is {etree.tostring(tree, pretty_print=True)}")
|
||||
|
||||
for element in tree.xpath(xpath, namespaces=namespaces):
|
||||
if not attribute:
|
||||
|
|
@ -696,7 +694,7 @@ def set_target_inner(module, tree, xpath, namespaces, attribute, value):
|
|||
if ":" in attribute:
|
||||
attr_ns, attr_name = attribute.split(":")
|
||||
# attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name)
|
||||
attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name)
|
||||
attribute = f"{{{namespaces[attr_ns]}}}{attr_name}"
|
||||
if element.get(attribute) != value:
|
||||
element.set(attribute, value)
|
||||
|
||||
|
|
@ -710,7 +708,7 @@ def set_target(module, tree, xpath, namespaces, attribute, value):
|
|||
|
||||
def get_element_text(module, tree, xpath, namespaces):
|
||||
if not is_node(tree, xpath, namespaces):
|
||||
module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
|
||||
module.fail_json(msg=f"Xpath {xpath} does not reference a node!")
|
||||
|
||||
elements = []
|
||||
for element in tree.xpath(xpath, namespaces=namespaces):
|
||||
|
|
@ -721,7 +719,7 @@ def get_element_text(module, tree, xpath, namespaces):
|
|||
|
||||
def get_element_attr(module, tree, xpath, namespaces):
|
||||
if not is_node(tree, xpath, namespaces):
|
||||
module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
|
||||
module.fail_json(msg=f"Xpath {xpath} does not reference a node!")
|
||||
|
||||
elements = []
|
||||
for element in tree.xpath(xpath, namespaces=namespaces):
|
||||
|
|
@ -743,7 +741,7 @@ def child_to_element(module, child, in_type):
|
|||
node = etree.parse(infile, parser)
|
||||
return node.getroot()
|
||||
except etree.XMLSyntaxError as e:
|
||||
module.fail_json(msg="Error while parsing child element: %s" % e)
|
||||
module.fail_json(msg=f"Error while parsing child element: {e}")
|
||||
elif in_type == 'yaml':
|
||||
if isinstance(child, str):
|
||||
return etree.Element(child)
|
||||
|
|
@ -760,7 +758,7 @@ def child_to_element(module, child, in_type):
|
|||
|
||||
if children is not None:
|
||||
if not isinstance(children, list):
|
||||
module.fail_json(msg="Invalid children type: %s, must be list." % type(children))
|
||||
module.fail_json(msg=f"Invalid children type: {type(children)}, must be list.")
|
||||
|
||||
subnodes = children_to_nodes(module, children)
|
||||
node.extend(subnodes)
|
||||
|
|
@ -772,9 +770,9 @@ def child_to_element(module, child, in_type):
|
|||
node.text = value
|
||||
return node
|
||||
else:
|
||||
module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child))
|
||||
module.fail_json(msg=f"Invalid child type: {type(child)}. Children must be either strings or hashes.")
|
||||
else:
|
||||
module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type)
|
||||
module.fail_json(msg=f"Invalid child input type: {in_type}. Type must be either xml or yaml.")
|
||||
|
||||
|
||||
def children_to_nodes(module=None, children=None, type='yaml'):
|
||||
|
|
@ -930,23 +928,23 @@ def main():
|
|||
elif os.path.isfile(xml_file):
|
||||
infile = open(xml_file, 'rb')
|
||||
else:
|
||||
module.fail_json(msg="The target XML source '%s' does not exist." % xml_file)
|
||||
module.fail_json(msg=f"The target XML source '{xml_file}' does not exist.")
|
||||
|
||||
# Parse and evaluate xpath expression
|
||||
if xpath is not None:
|
||||
try:
|
||||
etree.XPath(xpath)
|
||||
except etree.XPathSyntaxError as e:
|
||||
module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e))
|
||||
module.fail_json(msg=f"Syntax error in xpath expression: {xpath} ({e})")
|
||||
except etree.XPathEvalError as e:
|
||||
module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e))
|
||||
module.fail_json(msg=f"Evaluation error in xpath expression: {xpath} ({e})")
|
||||
|
||||
# Try to parse in the target XML file
|
||||
try:
|
||||
parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags)
|
||||
doc = etree.parse(infile, parser)
|
||||
except etree.XMLSyntaxError as e:
|
||||
module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e))
|
||||
module.fail_json(msg=f"Error while parsing document: {xml_file or 'xml_string'} ({e})")
|
||||
finally:
|
||||
if infile:
|
||||
infile.close()
|
||||
|
|
|
|||
|
|
@ -156,7 +156,7 @@ class Yarn(object):
|
|||
self.name_version = None
|
||||
|
||||
if kwargs['version'] and self.name is not None:
|
||||
self.name_version = self.name + '@' + str(self.version)
|
||||
self.name_version = f"{self.name}@{self.version!s}"
|
||||
elif self.name is not None:
|
||||
self.name_version = self.name
|
||||
|
||||
|
|
@ -186,7 +186,7 @@ class Yarn(object):
|
|||
# Module will make directory if not exists.
|
||||
os.makedirs(self.path)
|
||||
if not os.path.isdir(self.path):
|
||||
self.module.fail_json(msg="Path provided %s is not a directory" % self.path)
|
||||
self.module.fail_json(msg=f"Path provided {self.path} is not a directory")
|
||||
cwd = self.path
|
||||
|
||||
if not os.path.isfile(os.path.join(self.path, 'package.json')):
|
||||
|
|
@ -204,7 +204,7 @@ class Yarn(object):
|
|||
if json.loads(line)['type'] == 'error':
|
||||
self.module.fail_json(msg=err)
|
||||
except Exception:
|
||||
self.module.fail_json(msg="Unexpected stderr output from Yarn: %s" % err, stderr=err)
|
||||
self.module.fail_json(msg=f"Unexpected stderr output from Yarn: {err}", stderr=err)
|
||||
|
||||
def list(self):
|
||||
cmd = ['list', '--depth=0', '--json']
|
||||
|
|
|
|||
|
|
@ -110,8 +110,8 @@ class YumVersionLock:
|
|||
if rc == 0:
|
||||
return out
|
||||
elif rc == 1 and 'o such command:' in err:
|
||||
self.module.fail_json(msg="Error: Please install rpm package yum-plugin-versionlock : " + to_native(err) + to_native(out))
|
||||
self.module.fail_json(msg="Error: " + to_native(err) + to_native(out))
|
||||
self.module.fail_json(msg=f"Error: Please install rpm package yum-plugin-versionlock : {to_native(err)}{to_native(out)}")
|
||||
self.module.fail_json(msg=f"Error: {to_native(err)}{to_native(out)}")
|
||||
|
||||
def ensure_state(self, packages, command):
|
||||
""" Ensure packages state """
|
||||
|
|
@ -121,7 +121,7 @@ class YumVersionLock:
|
|||
self.module.fail_json(msg=out)
|
||||
if rc == 0:
|
||||
return True
|
||||
self.module.fail_json(msg="Error: " + to_native(err) + to_native(out))
|
||||
self.module.fail_json(msg=f"Error: {to_native(err)}{to_native(out)}")
|
||||
|
||||
|
||||
def match(entry, name):
|
||||
|
|
|
|||
|
|
@ -163,7 +163,7 @@ class Zfs(object):
|
|||
elif prop == 'volblocksize':
|
||||
cmd += ['-b', value]
|
||||
else:
|
||||
cmd += ['-o', '%s=%s' % (prop, value)]
|
||||
cmd += ['-o', f'{prop}={value}']
|
||||
if origin and action == 'clone':
|
||||
cmd.append(origin)
|
||||
cmd.append(self.name)
|
||||
|
|
@ -182,7 +182,7 @@ class Zfs(object):
|
|||
if self.module.check_mode:
|
||||
self.changed = True
|
||||
return
|
||||
cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
|
||||
cmd = [self.zfs_cmd, 'set', f"{prop}={value!s}", self.name]
|
||||
self.module.run_command(cmd, check_rc=True)
|
||||
|
||||
def set_properties_if_changed(self):
|
||||
|
|
@ -200,7 +200,7 @@ class Zfs(object):
|
|||
for prop in self.extra_zfs_properties:
|
||||
value = self.get_property(prop, updated_properties)
|
||||
if value is None:
|
||||
self.module.fail_json(msg="zfsprop was not present after being successfully set: %s" % prop)
|
||||
self.module.fail_json(msg=f"zfsprop was not present after being successfully set: {prop}")
|
||||
if self.get_property(prop, current_properties) != value:
|
||||
self.changed = True
|
||||
if prop in diff['after']['extra_zfs_properties']:
|
||||
|
|
|
|||
|
|
@ -192,7 +192,7 @@ class ZfsDelegateAdmin(object):
|
|||
elif line.startswith('\teveryone '):
|
||||
perms[scope]['e'] = line.split()[1].split(',')
|
||||
except ValueError:
|
||||
self.module.fail_json(msg="Cannot parse user/group permission output by `zfs allow`: '%s'" % line)
|
||||
self.module.fail_json(msg=f"Cannot parse user/group permission output by `zfs allow`: '{line}'")
|
||||
return perms
|
||||
|
||||
def run_zfs_raw(self, subcommand=None, args=None):
|
||||
|
|
@ -201,13 +201,13 @@ class ZfsDelegateAdmin(object):
|
|||
cmd = [self.zfs_path, subcommand or self.subcommand] + (args or []) + [self.name]
|
||||
rc, out, err = self.module.run_command(cmd)
|
||||
if rc:
|
||||
self.module.fail_json(msg='Command `%s` failed: %s' % (' '.join(cmd), err))
|
||||
self.module.fail_json(msg=f"Command `{' '.join(cmd)}` failed: {err}")
|
||||
return out
|
||||
|
||||
def run_zfs(self, args):
|
||||
""" Run zfs allow/unallow with appropriate options as per module arguments.
|
||||
"""
|
||||
args = self.recursive_opt + ['-' + self.scope] + args
|
||||
args = self.recursive_opt + [f"-{self.scope}"] + args
|
||||
if self.perms:
|
||||
args.append(','.join(self.perms))
|
||||
return self.run_zfs_raw(args=args)
|
||||
|
|
@ -219,7 +219,7 @@ class ZfsDelegateAdmin(object):
|
|||
stdout = ''
|
||||
for scope, ent_type in product(('ld', 'l', 'd'), ('u', 'g')):
|
||||
for ent in self.initial_perms[scope][ent_type].keys():
|
||||
stdout += self.run_zfs(['-%s' % ent_type, ent])
|
||||
stdout += self.run_zfs([f'-{ent_type}', ent])
|
||||
changed = True
|
||||
for scope in ('ld', 'l', 'd'):
|
||||
if self.initial_perms[scope]['e']:
|
||||
|
|
@ -233,7 +233,7 @@ class ZfsDelegateAdmin(object):
|
|||
stdout = ''
|
||||
for ent_type, entities in (('u', self.users), ('g', self.groups)):
|
||||
if entities:
|
||||
stdout += self.run_zfs(['-%s' % ent_type, ','.join(entities)])
|
||||
stdout += self.run_zfs([f'-{ent_type}', ','.join(entities)])
|
||||
if self.everyone:
|
||||
stdout += self.run_zfs(['-e'])
|
||||
return (self.initial_perms != self.current_perms, stdout)
|
||||
|
|
|
|||
|
|
@ -188,7 +188,7 @@ class ZFSFacts(object):
|
|||
cmd.append('-r')
|
||||
if self.depth != 0:
|
||||
cmd.append('-d')
|
||||
cmd.append('%d' % self.depth)
|
||||
cmd.append(f'{self.depth}')
|
||||
if self.type:
|
||||
cmd.append('-t')
|
||||
cmd.append(','.join(self.type))
|
||||
|
|
@ -197,7 +197,7 @@ class ZFSFacts(object):
|
|||
(rc, out, err) = self.module.run_command(cmd)
|
||||
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name,
|
||||
self.module.fail_json(msg=f'Error while trying to get facts about ZFS dataset: {self.name}',
|
||||
stderr=err,
|
||||
rc=rc)
|
||||
|
||||
|
|
@ -242,7 +242,7 @@ def main():
|
|||
result['recurse'] = zfs_facts.recurse
|
||||
|
||||
if not zfs_facts.dataset_exists():
|
||||
module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name)
|
||||
module.fail_json(msg=f'ZFS dataset {zfs_facts.name} does not exist!')
|
||||
|
||||
result['ansible_facts'] = zfs_facts.get_facts()
|
||||
|
||||
|
|
|
|||
|
|
@ -166,10 +166,10 @@ class Zpool(object):
|
|||
force=cmd_runner_fmt.as_bool('-f'),
|
||||
dry_run=cmd_runner_fmt.as_bool('-n'),
|
||||
pool_properties=cmd_runner_fmt.as_func(
|
||||
lambda props: sum([['-o', '{}={}'.format(prop, value)] for prop, value in (props or {}).items()], [])
|
||||
lambda props: sum([['-o', f'{prop}={value}'] for prop, value in (props or {}).items()], [])
|
||||
),
|
||||
filesystem_properties=cmd_runner_fmt.as_func(
|
||||
lambda props: sum([['-O', '{}={}'.format(prop, value)] for prop, value in (props or {}).items()], [])
|
||||
lambda props: sum([['-O', f'{prop}={value}'] for prop, value in (props or {}).items()], [])
|
||||
),
|
||||
mountpoint=cmd_runner_fmt.as_opt_val('-m'),
|
||||
altroot=cmd_runner_fmt.as_opt_val('-R'),
|
||||
|
|
@ -257,7 +257,7 @@ class Zpool(object):
|
|||
before[prop] = current.get(prop)
|
||||
if not self.module.check_mode:
|
||||
with self.zpool_runner('subcommand assignment name', check_rc=True) as ctx:
|
||||
rc, stdout, stderr = ctx.run(subcommand='set', assignment='{}={}'.format(prop, value))
|
||||
rc, stdout, stderr = ctx.run(subcommand='set', assignment=f'{prop}={value}')
|
||||
after[prop] = str(value)
|
||||
self.changed = True
|
||||
return {'before': {'pool_properties': before}, 'after': {'pool_properties': after}}
|
||||
|
|
@ -286,7 +286,7 @@ class Zpool(object):
|
|||
before[prop] = current.get(prop)
|
||||
if not self.module.check_mode:
|
||||
with self.zfs_runner('subcommand assignment name', check_rc=True) as ctx:
|
||||
rc, stdout, stderr = ctx.run(subcommand='set', assignment='{}={}'.format(prop, value))
|
||||
rc, stdout, stderr = ctx.run(subcommand='set', assignment=f'{prop}={value}')
|
||||
after[prop] = str(value)
|
||||
self.changed = True
|
||||
return {'before': {'filesystem_properties': before}, 'after': {'filesystem_properties': after}}
|
||||
|
|
@ -410,7 +410,7 @@ class Zpool(object):
|
|||
def add_vdevs(self):
|
||||
invalid_properties = [k for k in self.pool_properties if k != 'ashift']
|
||||
if invalid_properties:
|
||||
self.module.warn("zpool add only supports 'ashift', ignoring: {}".format(invalid_properties))
|
||||
self.module.warn(f"zpool add only supports 'ashift', ignoring: {invalid_properties}")
|
||||
|
||||
diff = self.diff_layout()
|
||||
before_vdevs = diff['before']['vdevs']
|
||||
|
|
@ -544,7 +544,7 @@ def main():
|
|||
for idx, vdev in enumerate(vdevs, start=1):
|
||||
disks = vdev.get('disks')
|
||||
if not isinstance(disks, list) or len(disks) == 0:
|
||||
module.fail_json(msg="vdev #{idx}: at least one disk is required (got: {disks!r})".format(idx=idx, disks=disks))
|
||||
module.fail_json(msg=f"vdev #{idx}: at least one disk is required (got: {disks!r})")
|
||||
|
||||
result = dict(
|
||||
name=name,
|
||||
|
|
@ -574,7 +574,7 @@ def main():
|
|||
prepared = ''
|
||||
for diff in (add_vdev_diff, remove_vdev_diff):
|
||||
if 'prepared' in diff:
|
||||
prepared += (diff['prepared'] if not prepared else '\n' + diff['prepared'])
|
||||
prepared += (diff['prepared'] if not prepared else f"\n{diff['prepared']}")
|
||||
result['diff']['prepared'] = prepared
|
||||
else:
|
||||
if module.check_mode:
|
||||
|
|
|
|||
|
|
@ -181,7 +181,7 @@ def main():
|
|||
if zpool_facts.pool_exists():
|
||||
result['ansible_facts'] = zpool_facts.get_facts()
|
||||
else:
|
||||
module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name)
|
||||
module.fail_json(msg=f'ZFS pool {zpool_facts.name} does not exist!')
|
||||
else:
|
||||
result['ansible_facts'] = zpool_facts.get_facts()
|
||||
|
||||
|
|
|
|||
|
|
@ -273,7 +273,6 @@ import os.path
|
|||
import xml
|
||||
import re
|
||||
from xml.dom.minidom import parseString as parseXML
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
|
@ -353,7 +352,7 @@ def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
|
|||
try:
|
||||
dom = parseXML(stdout)
|
||||
except xml.parsers.expat.ExpatError as exc:
|
||||
m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc),
|
||||
m.fail_json(msg=f"Failed to parse zypper xml output: {exc}",
|
||||
rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
|
||||
|
||||
if rc == 104:
|
||||
|
|
@ -403,7 +402,7 @@ def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
|
|||
if m.params['simple_errors']:
|
||||
stdout = get_simple_errors(dom) or stdout
|
||||
|
||||
m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
|
||||
m.fail_json(msg=f'Zypper run command failed with return code {rc}.', rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
|
||||
|
||||
|
||||
def get_simple_errors(dom):
|
||||
|
|
@ -475,7 +474,7 @@ def set_diff(m, retvals, result):
|
|||
for p in result:
|
||||
group = result[p]['group']
|
||||
if group == 'to-upgrade':
|
||||
versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')'
|
||||
versions = f" ({result[p]['oldversion']} => {result[p]['version']})"
|
||||
packages['upgraded'].append(p + versions)
|
||||
elif group == 'to-install':
|
||||
packages['installed'].append(p)
|
||||
|
|
@ -485,13 +484,13 @@ def set_diff(m, retvals, result):
|
|||
output = ''
|
||||
for state in packages:
|
||||
if packages[state]:
|
||||
output += state + ': ' + ', '.join(packages[state]) + '\n'
|
||||
output += f"{state}: {', '.join(packages[state])}\n"
|
||||
if 'diff' not in retvals:
|
||||
retvals['diff'] = {}
|
||||
if 'prepared' not in retvals['diff']:
|
||||
retvals['diff']['prepared'] = output
|
||||
else:
|
||||
retvals['diff']['prepared'] += '\n' + output
|
||||
retvals['diff']['prepared'] += f"\n{output}"
|
||||
|
||||
|
||||
def package_present(m, name, want_latest):
|
||||
|
|
|
|||
|
|
@ -182,7 +182,7 @@ def _parse_repos(module):
|
|||
elif rc == 6:
|
||||
return []
|
||||
else:
|
||||
module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr)
|
||||
module.fail_json(msg=f'Failed to execute "{" ".join(cmd)}"', rc=rc, stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def _repo_changes(module, realrepo, repocmp):
|
||||
|
|
@ -242,7 +242,7 @@ def repo_exists(module, repodata, overwrite_multiple):
|
|||
# Found two repos and want to overwrite_multiple
|
||||
return (True, True, repos)
|
||||
else:
|
||||
errmsg = 'More than one repo matched "%s": "%s".' % (name, repos)
|
||||
errmsg = f'More than one repo matched "{name}": "{repos}".'
|
||||
errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten'
|
||||
module.fail_json(msg=errmsg)
|
||||
|
||||
|
|
@ -413,7 +413,7 @@ def main():
|
|||
|
||||
# No support for .repo file with zero or more than one repository
|
||||
if len(repofile.sections()) != 1:
|
||||
err = "Invalid format, .repo file contains %s repositories, expected 1" % len(repofile.sections())
|
||||
err = f"Invalid format, .repo file contains {len(repofile.sections())} repositories, expected 1"
|
||||
module.fail_json(msg=err)
|
||||
|
||||
section = repofile.sections()[0]
|
||||
|
|
@ -464,7 +464,7 @@ def main():
|
|||
if rc == 0:
|
||||
module.exit_json(changed=True, repodata=repodata, state=state)
|
||||
else:
|
||||
module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state)
|
||||
module.fail_json(msg=f"Zypper failed with rc {rc}", rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ def _parse_repos(module):
|
|||
elif rc == 6:
|
||||
return []
|
||||
else:
|
||||
module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr)
|
||||
module.fail_json(msg=f'Failed to execute "{" ".join(cmd)}"', rc=rc, stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def main():
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue