1
0
Fork 0
mirror of https://github.com/ansible-collections/community.general.git synced 2026-02-04 07:51:50 +00:00

modules [no]*: use f-strings (#10973)

* modules [no]*: use f-strings

* add changelog frag
This commit is contained in:
Alexei Znamensky 2025-10-26 19:48:10 +13:00 committed by GitHub
parent 50846b7560
commit 749c06cd01
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
44 changed files with 399 additions and 412 deletions

View file

@ -0,0 +1,44 @@
minor_changes:
- nagios - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- newrelic_deployment - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- nexmo - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- nginx_status_info - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- nictagadm - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- nmcli - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- nomad_job - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- nomad_job_info - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- nomad_token - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- nosh - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- npm - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- nsupdate - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- ocapi_command - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- ocapi_info - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- odbc - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- office_365_connector_card - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- omapi_host - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- one_host - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- one_image - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- one_image_info - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- one_service - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- one_template - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- one_vm - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- one_vnet - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- oneandone_firewall_policy - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- oneandone_load_balancer - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- oneandone_monitoring_policy - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- oneandone_private_network - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- oneandone_public_ip - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- oneandone_server - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- onepassword_info - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- oneview_ethernet_network - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- oneview_network_set_info - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- online_server_info - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- open_iscsi - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- openbsd_pkg - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- opendj_backendprop - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- openwrt_init - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- opkg - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- osx_defaults - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- ovh_ip_failover - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- ovh_ip_loadbalancing_backend - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).
- ovh_monthly_billing - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10973).

View file

@ -455,7 +455,7 @@ class Nagios(object):
if start is None:
start = entry_time
hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
hdr = f"[{entry_time}] {cmd};{host};"
duration_s = (duration * 60)
end = start + duration_s
@ -474,7 +474,7 @@ class Nagios(object):
str(duration_s), author, comment]
dt_arg_str = ";".join(dt_args)
dt_str = hdr + dt_arg_str + "\n"
dt_str = f"{hdr}{dt_arg_str}\n"
return dt_str
@ -497,7 +497,7 @@ class Nagios(object):
"""
entry_time = self._now()
hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
hdr = f"[{entry_time}] {cmd};{host};"
if not author:
author = self.author
@ -512,7 +512,7 @@ class Nagios(object):
ack_args = [str(sticky), str(notify), str(persistent), author, comment]
ack_arg_str = ";".join(ack_args)
ack_str = hdr + ack_arg_str + "\n"
ack_str = f"{hdr}{ack_arg_str}\n"
return ack_str
@ -531,7 +531,7 @@ class Nagios(object):
"""
entry_time = self._now()
hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
hdr = f"[{entry_time}] {cmd};{host};"
if comment is None:
comment = self.comment
@ -553,7 +553,7 @@ class Nagios(object):
dt_del_args.append('')
dt_del_arg_str = ";".join(dt_del_args)
dt_del_str = hdr + dt_del_arg_str + "\n"
dt_del_str = f"{hdr}{dt_del_arg_str}\n"
return dt_del_str
@ -570,7 +570,7 @@ class Nagios(object):
"""
entry_time = self._now()
hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
hdr = f"[{entry_time}] {cmd};{host};"
if start is None:
start = entry_time + 3
@ -581,7 +581,7 @@ class Nagios(object):
chk_args = [svc, str(start)]
chk_arg_str = ";".join(chk_args)
chk_str = hdr + chk_arg_str + "\n"
chk_str = f"{hdr}{chk_arg_str}\n"
return chk_str
@ -599,12 +599,12 @@ class Nagios(object):
"""
entry_time = self._now()
notif_str = "[%s] %s" % (entry_time, cmd)
notif_str = f"[{entry_time}] {cmd}"
if host is not None:
notif_str += ";%s" % host
notif_str += f";{host}"
if svc is not None:
notif_str += ";%s" % svc
notif_str += f";{svc}"
notif_str += "\n"
@ -1170,10 +1170,10 @@ class Nagios(object):
You just have to provide the properly formatted command
"""
pre = '[%s]' % int(time.time())
pre = f'[{int(time.time())}]'
post = '\n'
cmdstr = '%s %s%s' % (pre, cmd, post)
cmdstr = f'{pre} {cmd}{post}'
self._write_command(cmdstr)
def act(self):

View file

@ -127,7 +127,7 @@ def main():
module.fail_json(msg="you must set one of 'app_name' or 'application_id'")
if app_id is None:
module.fail_json(msg="No application with name %s is found in NewRelic" % module.params["app_name"])
module.fail_json(msg=f"No application with name {module.params['app_name']} is found in NewRelic")
for item in ["changelog", "description", "revision", "user"]:
if module.params[item]:
@ -138,7 +138,7 @@ def main():
module.exit_json(changed=True)
# Send the data to New Relic
url = "https://api.newrelic.com/v2/applications/%s/deployments.json" % quote(str(app_id), safe='')
url = f"https://api.newrelic.com/v2/applications/{quote(str(app_id), safe='')}/deployments.json"
data = {
'deployment': params
}
@ -150,23 +150,23 @@ def main():
if info['status'] in (200, 201):
module.exit_json(changed=True)
else:
module.fail_json(msg="Unable to insert deployment marker: %s" % info['msg'])
module.fail_json(msg=f"Unable to insert deployment marker: {info['msg']}")
def get_application_id(module):
url = "https://api.newrelic.com/v2/applications.json"
data = "filter[name]=%s" % module.params["app_name"]
data = f"filter[name]={module.params['app_name']}"
application_id = None
headers = {
'Api-Key': module.params["token"],
}
response, info = fetch_url(module, url, data=data, headers=headers)
if info['status'] not in (200, 201):
module.fail_json(msg="Unable to get application: %s" % info['msg'])
module.fail_json(msg=f"Unable to get application: {info['msg']}")
result = json.loads(response.read())
if result is None or len(result.get("applications", "")) == 0:
module.fail_json(msg='No application found with name "%s"' % module.params["app_name"])
module.fail_json(msg=f"No application found with name \"{module.params['app_name']}\"")
if module.params["app_name_exact_match"]:
for item in result["applications"]:
@ -174,7 +174,7 @@ def get_application_id(module):
application_id = item["id"]
break
if application_id is None:
module.fail_json(msg='No application found with exact name "%s"' % module.params["app_name"])
module.fail_json(msg=f"No application found with exact name \"{module.params['app_name']}\"")
else:
application_id = result["applications"][0]["id"]

View file

@ -89,7 +89,7 @@ def send_msg(module):
}
for number in module.params.get('dest'):
msg['to'] = number
url = "%s?%s" % (NEXMO_API, urlencode(msg))
url = f"{NEXMO_API}?{urlencode(msg)}"
headers = dict(Accept='application/json')
response, info = fetch_url(module, url, headers=headers)

View file

@ -117,7 +117,7 @@ class NginxStatusInfo(object):
}
(response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout)
if not response:
module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout))
module.fail_json(msg=f"No valid or no response from url {self.url} within {self.timeout} seconds (timeout)")
data = to_text(response.read(), errors='surrogate_or_strict')
if not data:

View file

@ -137,11 +137,11 @@ class NicTag(object):
if self.mtu:
cmd.append('-p')
cmd.append('mtu=' + str(self.mtu))
cmd.append(f"mtu={self.mtu}")
if self.mac:
cmd.append('-p')
cmd.append('mac=' + str(self.mac))
cmd.append(f"mac={self.mac}")
cmd.append(self.name)

View file

@ -1922,10 +1922,8 @@ class Nmcli(object):
})
elif self.type == 'bond-slave':
if self.slave_type and self.slave_type != 'bond':
self.module.fail_json(msg="Connection type '%s' cannot be combined with '%s' slave-type. "
"Allowed slave-type for '%s' is 'bond'."
% (self.type, self.slave_type, self.type)
)
self.module.fail_json(msg=(f"Connection type '{self.type}' cannot be combined with '{self.slave_type}' slave-type. "
f"Allowed slave-type for '{self.type}' is 'bond'."))
if not self.slave_type:
self.module.warn("Connection 'slave-type' property automatically set to 'bond' "
"because of using 'bond-slave' connection type.")
@ -1956,10 +1954,8 @@ class Nmcli(object):
})
elif self.type == 'bridge-slave':
if self.slave_type and self.slave_type != 'bridge':
self.module.fail_json(msg="Connection type '%s' cannot be combined with '%s' slave-type. "
"Allowed slave-type for '%s' is 'bridge'."
% (self.type, self.slave_type, self.type)
)
self.module.fail_json(msg=(f"Connection type '{self.type}' cannot be combined with '{self.slave_type}' slave-type. "
f"Allowed slave-type for '{self.type}' is 'bridge'."))
if not self.slave_type:
self.module.warn("Connection 'slave-type' property automatically set to 'bridge' "
"because of using 'bridge-slave' connection type.")
@ -1975,10 +1971,8 @@ class Nmcli(object):
})
elif self.type == 'team-slave':
if self.slave_type and self.slave_type != 'team':
self.module.fail_json(msg="Connection type '%s' cannot be combined with '%s' slave-type. "
"Allowed slave-type for '%s' is 'team'."
% (self.type, self.slave_type, self.type)
)
self.module.fail_json(msg=(f"Connection type '{self.type}' cannot be combined with '{self.slave_type}' slave-type. "
f"Allowed slave-type for '{self.type}' is 'team'."))
if not self.slave_type:
self.module.warn("Connection 'slave-type' property automatically set to 'team' "
"because of using 'team-slave' connection type.")
@ -2019,24 +2013,24 @@ class Nmcli(object):
if self.wifi:
for name, value in self.wifi.items():
options.update({
'802-11-wireless.%s' % name: value
f'802-11-wireless.{name}': value
})
if self.wifi_sec:
for name, value in self.wifi_sec.items():
options.update({
'802-11-wireless-security.%s' % name: value
f'802-11-wireless-security.{name}': value
})
elif self.type == 'gsm':
if self.gsm:
for name, value in self.gsm.items():
options.update({
'gsm.%s' % name: value,
f'gsm.{name}': value,
})
elif self.type == 'macvlan':
if self.macvlan:
for name, value in self.macvlan.items():
options.update({
'macvlan.%s' % name: value,
f'macvlan.{name}': value,
})
elif self.state == 'present':
raise NmcliModuleError('type is macvlan but all of the following are missing: macvlan')
@ -2044,7 +2038,7 @@ class Nmcli(object):
if self.wireguard:
for name, value in self.wireguard.items():
options.update({
'wireguard.%s' % name: value,
f'wireguard.{name}': value,
})
elif self.type == 'vpn':
if self.vpn:
@ -2065,7 +2059,7 @@ class Nmcli(object):
if isinstance(value, bool):
value = self.bool_to_string(value)
vpn_data_values += '%s=%s' % (name, value)
vpn_data_values += f'{name}={value}'
options.update({
'vpn.data': vpn_data_values,
})
@ -2084,7 +2078,7 @@ class Nmcli(object):
if self.sriov:
for name, value in self.sriov.items():
options.update({
'sriov.%s' % name: value,
f'sriov.{name}': value,
})
# Convert settings values based on the situation.
@ -2184,7 +2178,7 @@ class Nmcli(object):
return None
if privacy not in ip6_privacy_values:
raise AssertionError('{privacy} is invalid ip_privacy6 option'.format(privacy=privacy))
raise AssertionError(f'{privacy} is invalid ip_privacy6 option')
return ip6_privacy_values[privacy]
@ -2218,13 +2212,13 @@ class Nmcli(object):
def enforce_ipv4_cidr_notation(ip4_addresses):
if ip4_addresses is None:
return None
return [address if '/' in address else address + '/32' for address in ip4_addresses]
return [address if '/' in address else f"{address}/32" for address in ip4_addresses]
@staticmethod
def enforce_ipv6_cidr_notation(ip6_addresses):
if ip6_addresses is None:
return None
return [address if '/' in address else address + '/128' for address in ip6_addresses]
return [address if '/' in address else f"{address}/128" for address in ip6_addresses]
def enforce_routes_format(self, routes, routes_extended):
if routes is not None:
@ -2239,13 +2233,13 @@ class Nmcli(object):
result_str = ''
result_str += route['ip']
if route.get('next_hop') is not None:
result_str += ' ' + route['next_hop']
result_str += f" {route['next_hop']}"
if route.get('metric') is not None:
result_str += ' ' + str(route['metric'])
result_str += f" {route['metric']!s}"
for attribute, value in sorted(route.items()):
if attribute not in ('ip', 'next_hop', 'metric') and value is not None:
result_str += ' {0}={1}'.format(attribute, str(value).lower())
result_str += f' {attribute}={str(value).lower()}'
return result_str
@ -2371,13 +2365,13 @@ class Nmcli(object):
for key, value in options.items():
if value is not None:
if key in self.SECRET_OPTIONS:
self.edit_commands += ['set %s %s' % (key, value)]
self.edit_commands += [f'set {key} {value}']
continue
if key == 'xmit_hash_policy':
cmd.extend(['+bond.options', 'xmit_hash_policy=%s' % value])
cmd.extend(['+bond.options', f'xmit_hash_policy={value}'])
continue
if key == 'fail_over_mac':
cmd.extend(['+bond.options', 'fail_over_mac=%s' % value])
cmd.extend(['+bond.options', f'fail_over_mac={value}'])
continue
cmd.extend([key, value])
@ -2467,11 +2461,11 @@ class Nmcli(object):
if setting == '802-11-wireless-security':
set_property = 'psk'
set_value = 'FAKEVALUE'
commands = ['set %s.%s %s' % (setting, set_property, set_value)]
commands = [f'set {setting}.{set_property} {set_value}']
else:
commands = []
commands += ['print %s' % setting, 'quit', 'yes']
commands += [f'print {setting}', 'quit', 'yes']
(rc, out, err) = self.execute_edit_commands(commands, arguments=['type', self.type])
@ -2479,7 +2473,7 @@ class Nmcli(object):
raise NmcliModuleError(err)
for line in out.splitlines():
prefix = '%s.' % setting
prefix = f'{setting}.'
if line.startswith(prefix):
pair = line.split(':', 1)
property = pair[0].strip().replace(prefix, '')
@ -2505,7 +2499,7 @@ class Nmcli(object):
if unsupported_properties:
msg_options = []
for property in unsupported_properties:
msg_options.append('%s.%s' % (setting_key, property))
msg_options.append(f'{setting_key}.{property}')
msg = 'Invalid or unsupported option(s): "%s"' % '", "'.join(msg_options)
if self.ignore_unsupported_suboptions:
@ -2806,7 +2800,7 @@ def main():
(rc, out, err) = nmcli.down_connection()
(rc, out, err) = nmcli.remove_connection()
if rc != 0:
module.fail_json(name=('Error removing connection named %s' % nmcli.conn_name), msg=err, rc=rc)
module.fail_json(name=f'Error removing connection named {nmcli.conn_name}', msg=err, rc=rc)
elif nmcli.state == 'present':
if nmcli.connection_exists():
@ -2828,7 +2822,7 @@ def main():
if module.check_mode:
module.exit_json(changed=False, **result)
if not nmcli.connection_exists():
result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
result['Connection'] = f'Connection {nmcli.conn_name} of Type {nmcli.type} is being added'
if module.check_mode:
module.exit_json(changed=True, **result)
(rc, out, err) = nmcli.create_connection()
@ -2843,7 +2837,7 @@ def main():
(rc, out, err) = nmcli.reload_connection()
(rc, out, err) = nmcli.up_connection()
if rc != 0:
module.fail_json(name=('Error bringing up connection named %s' % nmcli.conn_name), msg=err, rc=rc)
module.fail_json(name=f'Error bringing up connection named {nmcli.conn_name}', msg=err, rc=rc)
elif nmcli.state == 'down':
if nmcli.connection_exists():
@ -2853,7 +2847,7 @@ def main():
(rc, out, err) = nmcli.reload_connection()
(rc, out, err) = nmcli.down_connection()
if rc != 0:
module.fail_json(name=('Error bringing down connection named %s' % nmcli.conn_name), msg=err, rc=rc)
module.fail_json(name=f'Error bringing down connection named {nmcli.conn_name}', msg=err, rc=rc)
except NmcliModuleError as e:
module.fail_json(name=nmcli.conn_name, msg=str(e))

View file

@ -188,7 +188,7 @@ def run():
job = dict()
job['job'] = job_json
except nomad.api.exceptions.BadRequestNomadException as err:
msg = str(err.nomad_resp.reason) + " " + str(err.nomad_resp.text)
msg = f"{err.nomad_resp.reason} {err.nomad_resp.text}"
module.fail_json(msg=to_native(msg))
try:
job_id = job_json.get('ID')

View file

@ -323,7 +323,7 @@ def run():
filter.append(job)
result = filter
if not filter:
module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name')))
module.fail_json(msg=f"Couldn't find Job with id {module.params['name']}")
except Exception as e:
module.fail_json(msg=to_native(e))

View file

@ -282,7 +282,7 @@ def run(module):
msg = 'ACL token deleted.'
changed = True
else:
msg = "No token with name '{0}' found".format(module.params.get('name'))
msg = f"No token with name '{module.params['name']}' found"
except Exception as e:
module.fail_json(msg=to_native(e))

View file

@ -403,7 +403,7 @@ def handle_enabled(module, result, service_path):
if not module.check_mode:
(rc, out, err) = run_sys_ctl(module, [action, service_path])
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err))
module.fail_json(msg=f"Unable to {action} service {service_path}: {out + err}")
result['preset'] = not preset
result['enabled'] = not enabled
@ -420,7 +420,7 @@ def handle_enabled(module, result, service_path):
if not module.check_mode:
(rc, out, err) = run_sys_ctl(module, [action, service_path])
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err))
module.fail_json(msg=f"Unable to {action} service {service_path}: {out + err}")
result['enabled'] = not enabled
result['preset'] = not preset
@ -496,7 +496,7 @@ def handle_state(module, result, service_path):
if not module.check_mode:
(rc, out, err) = run_sys_ctl(module, [action, service_path])
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, err))
module.fail_json(msg=f"Unable to {action} service {service_path}: {err}")
# ===========================================
# Main control flow

View file

@ -157,7 +157,6 @@ import os
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
@ -183,7 +182,7 @@ class Npm(object):
self.executable = [module.get_bin_path('npm', True)]
if kwargs['version'] and kwargs['state'] != 'absent':
self.name_version = self.name + '@' + str(kwargs['version'])
self.name_version = f"{self.name}@{kwargs['version']}"
else:
self.name_version = self.name
@ -212,7 +211,7 @@ class Npm(object):
if not os.path.exists(self.path):
os.makedirs(self.path)
if not os.path.isdir(self.path):
self.module.fail_json(msg="path %s is not a directory" % self.path)
self.module.fail_json(msg=f"path {self.path} is not a directory")
cwd = self.path
params = dict(self.module.params)
@ -239,7 +238,7 @@ class Npm(object):
try:
data = json.loads(self._exec(cmd, True, False, False) or '{}')
except (getattr(json, 'JSONDecodeError', ValueError)) as e:
self.module.fail_json(msg="Failed to parse NPM output with error %s" % to_native(e))
self.module.fail_json(msg=f"Failed to parse NPM output with error {e}")
if 'dependencies' in data:
for dep, props in data['dependencies'].items():
@ -250,7 +249,7 @@ class Npm(object):
else:
installed.append(dep)
if 'version' in props and props['version']:
dep_version = dep + '@' + str(props['version'])
dep_version = f"{dep}@{props['version']}"
installed.append(dep_version)
if self.name_version and self.name_version not in installed:
missing.append(self.name)

View file

@ -198,7 +198,6 @@ except ImportError:
HAVE_DNSPYTHON = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_native
class RecordManager(object):
@ -213,7 +212,7 @@ class RecordManager(object):
except TypeError:
module.fail_json(msg='Missing key_secret')
except binascii_error as e:
module.fail_json(msg='TSIG key error: %s' % to_native(e))
module.fail_json(msg=f'TSIG key error: {e}')
else:
self.keyring = None
@ -233,7 +232,7 @@ class RecordManager(object):
self.zone += '.'
if module.params['record'][-1] != '.':
self.fqdn = module.params['record'] + '.' + self.zone
self.fqdn = f"{module.params['record']}.{self.zone}"
else:
self.fqdn = module.params['record']
@ -247,7 +246,7 @@ class RecordManager(object):
def txt_helper(self, entry):
if entry[0] == '"' and entry[-1] == '"':
return entry
return '"{text}"'.format(text=entry)
return f'"{entry}"'
def lookup_zone(self):
name = dns.name.from_text(self.module.params['record'])
@ -261,12 +260,12 @@ class RecordManager(object):
else:
lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
self.module.fail_json(msg=f'TSIG update error ({e.__class__.__name__}): {e}')
except (socket_error, dns.exception.Timeout) as e:
self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
self.module.fail_json(msg=f'DNS server error: ({e.__class__.__name__}): {e}')
if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]:
self.module.fail_json(msg='Zone lookup failure: \'%s\' will not respond to queries regarding \'%s\'.' % (
self.module.params['server'], self.module.params['record']))
self.module.fail_json(msg=f"Zone lookup failure: '{self.module.params['server']}' will not "
f"respond to queries regarding '{self.module.params['record']}'.")
# If the response contains an Answer SOA RR whose name matches the queried name,
# this is the name of the zone in which the record needs to be inserted.
for rr in lookup.answer:
@ -280,7 +279,7 @@ class RecordManager(object):
try:
name = name.parent()
except dns.name.NoParent:
self.module.fail_json(msg='Zone lookup of \'%s\' failed for unknown reason.' % (self.module.params['record']))
self.module.fail_json(msg=f"Zone lookup of '{self.module.params['record']}' failed for unknown reason.")
def __do_update(self, update):
response = None
@ -290,9 +289,9 @@ class RecordManager(object):
else:
response = dns.query.udp(update, self.module.params['server'], timeout=10, port=self.module.params['port'])
except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
self.module.fail_json(msg=f'TSIG update error ({e.__class__.__name__}): {e}')
except (socket_error, dns.exception.Timeout) as e:
self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
self.module.fail_json(msg=f'DNS server error: ({e.__class__.__name__}): {e}')
return response
def create_or_update_record(self):
@ -306,12 +305,12 @@ class RecordManager(object):
if exists == 0:
self.dns_rc = self.create_record()
if self.dns_rc != 0:
result['msg'] = "Failed to create DNS record (rc: %d)" % self.dns_rc
result['msg'] = f"Failed to create DNS record (rc: {int(self.dns_rc)})"
elif exists == 2:
self.dns_rc = self.modify_record()
if self.dns_rc != 0:
result['msg'] = "Failed to update DNS record (rc: %d)" % self.dns_rc
result['msg'] = f"Failed to update DNS record (rc: {int(self.dns_rc)})"
if self.dns_rc != 0:
result['failed'] = True
@ -358,9 +357,9 @@ class RecordManager(object):
else:
lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
self.module.fail_json(msg=f'TSIG update error ({e.__class__.__name__}): {e}')
except (socket_error, dns.exception.Timeout) as e:
self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
self.module.fail_json(msg=f'DNS server error: ({e.__class__.__name__}): {e}')
lookup_result = lookup.answer[0] if lookup.answer else lookup.authority[0]
entries_to_remove = [n.to_text() for n in lookup_result.items if n.to_text() not in self.value]
@ -404,7 +403,7 @@ class RecordManager(object):
if self.dns_rc != 0:
result['failed'] = True
result['msg'] = "Failed to delete record (rc: %d)" % self.dns_rc
result['msg'] = f"Failed to delete record (rc: {int(self.dns_rc)})"
else:
result['changed'] = True
@ -415,7 +414,7 @@ class RecordManager(object):
try:
update.present(self.module.params['record'], self.module.params['type'])
except dns.rdatatype.UnknownRdatatype as e:
self.module.fail_json(msg='Record error: {0}'.format(to_native(e)))
self.module.fail_json(msg=f'Record error: {e}')
response = self.__do_update(update)
self.dns_rc = dns.message.Message.rcode(response)
@ -452,9 +451,9 @@ class RecordManager(object):
else:
lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
self.module.fail_json(msg=f'TSIG update error ({e.__class__.__name__}): {e}')
except (socket_error, dns.exception.Timeout) as e:
self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
self.module.fail_json(msg=f'DNS server error: ({e.__class__.__name__}): {e}')
if lookup.rcode() != dns.rcode.NOERROR:
self.module.fail_json(msg='Failed to lookup TTL of existing matching record.')

View file

@ -208,17 +208,17 @@ def main():
# timeout
timeout = module.params['timeout']
base_uri = "https://" + module.params["baseuri"]
base_uri = f"https://{module.params['baseuri']}"
proxy_slot_number = module.params.get("proxy_slot_number")
ocapi_utils = OcapiUtils(creds, base_uri, proxy_slot_number, timeout, module)
# Check that Category is valid
if category not in CATEGORY_COMMANDS_ALL:
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}"))
# Check that the command is valid
if command not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (command, CATEGORY_COMMANDS_ALL[category])))
module.fail_json(msg=to_native(f"Invalid Command '{command}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}"))
# Organize by Categories / Commands
if category == "Chassis":
@ -244,7 +244,7 @@ def main():
job_name = module.params.get("job_name")
if job_name is None:
module.fail_json("Missing job_name")
job_uri = urljoin(base_uri, "Jobs/" + job_name)
job_uri = urljoin(base_uri, f"Jobs/{job_name}")
result = ocapi_utils.delete_job(job_uri)
if result['ret'] is False:

View file

@ -177,17 +177,17 @@ def main():
# timeout
timeout = module.params['timeout']
base_uri = "https://" + module.params["baseuri"]
base_uri = f"https://{module.params['baseuri']}"
proxy_slot_number = module.params.get("proxy_slot_number")
ocapi_utils = OcapiUtils(creds, base_uri, proxy_slot_number, timeout, module)
# Check that Category is valid
if category not in CATEGORY_COMMANDS_ALL:
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}"))
# Check that the command is valid
if command not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (command, CATEGORY_COMMANDS_ALL[category])))
module.fail_json(msg=to_native(f"Invalid Command '{command}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}"))
# Organize by Categories / Commands
if category == "Jobs":
@ -195,7 +195,7 @@ def main():
if module.params.get("job_name") is None:
module.fail_json(msg=to_native(
"job_name required for JobStatus command."))
job_uri = urljoin(base_uri, 'Jobs/' + module.params["job_name"])
job_uri = urljoin(base_uri, f"Jobs/{module.params['job_name']}")
result = ocapi_utils.get_job_status(job_uri)
if result['ret'] is False:

View file

@ -83,7 +83,6 @@ row_count:
"""
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_native
HAS_PYODBC = None
try:
@ -116,7 +115,7 @@ def main():
try:
connection = pyodbc.connect(dsn)
except Exception as e:
module.fail_json(msg='Failed to connect to DSN: {0}'.format(to_native(e)))
module.fail_json(msg=f'Failed to connect to DSN: {e}')
result = dict(
changed=True,
@ -137,9 +136,7 @@ def main():
try:
# Get the rows out into an 2d array
for row in cursor.fetchall():
new_row = []
for column in row:
new_row.append("{0}".format(column))
new_row = [f"{column}" for column in row]
result['results'].append(new_row)
# Return additional information from the cursor
@ -158,11 +155,11 @@ def main():
except pyodbc.ProgrammingError as pe:
pass
except Exception as e:
module.fail_json(msg="Exception while reading rows: {0}".format(to_native(e)))
module.fail_json(msg=f"Exception while reading rows: {e}")
cursor.close()
except Exception as e:
module.fail_json(msg="Failed to execute query: {0}".format(to_native(e)))
module.fail_json(msg=f"Failed to execute query: {e}")
finally:
connection.close()

View file

@ -257,8 +257,7 @@ def do_notify_connector_card_webhook(module, webhook, payload):
module.fail_json(msg=OFFICE_365_INVALID_WEBHOOK_MSG)
else:
module.fail_json(
msg="failed to send %s as a connector card to Incoming Webhook: %s"
% (payload, info['msg'])
msg=f"failed to send {payload} as a connector card to Incoming Webhook: {info['msg']}"
)

View file

@ -160,10 +160,9 @@ class OmapiHostManager:
except binascii.Error:
self.module.fail_json(msg="Unable to open OMAPI connection. 'key' is not a valid base64 key.")
except OmapiError as e:
self.module.fail_json(msg="Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' "
"are valid. Exception was: %s" % to_native(e))
self.module.fail_json(msg=f"Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' are valid. Exception was: {e}")
except socket.error as e:
self.module.fail_json(msg="Unable to connect to OMAPI server: %s" % to_native(e))
self.module.fail_json(msg=f"Unable to connect to OMAPI server: {e}")
def get_host(self, macaddr):
msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict'))
@ -207,14 +206,14 @@ class OmapiHostManager:
stmt_join = ""
if self.module.params['ddns']:
stmt_join += 'ddns-hostname "{0}"; '.format(self.module.params['hostname'])
stmt_join += f"ddns-hostname \"{self.module.params['hostname']}\"; "
try:
if len(self.module.params['statements']) > 0:
stmt_join += "; ".join(self.module.params['statements'])
stmt_join += "; "
except TypeError as e:
self.module.fail_json(msg="Invalid statements found: %s" % to_native(e))
self.module.fail_json(msg=f"Invalid statements found: {e}")
if len(stmt_join) > 0:
msg.obj.append((to_bytes('statements'), to_bytes(stmt_join)))
@ -226,7 +225,7 @@ class OmapiHostManager:
"are valid.")
self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj))
except OmapiError as e:
self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
self.module.fail_json(msg=f"OMAPI error: {e}")
# Forge update message
else:
response_obj = self.unpack_facts(host_response.obj)
@ -238,9 +237,8 @@ class OmapiHostManager:
# Name cannot be changed
if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']:
self.module.fail_json(msg="Changing hostname is not supported. Old was %s, new is %s. "
"Please delete host and add new." %
(response_obj['name'], self.module.params['hostname']))
self.module.fail_json(msg=(f"Changing hostname is not supported. Old was {response_obj['name']}, "
f"new is {self.module.params['hostname']}. Please delete host and add new."))
"""
# It seems statements are not returned by OMAPI, then we cannot modify them at this moment.
@ -263,7 +261,7 @@ class OmapiHostManager:
"are valid.")
self.module.exit_json(changed=True)
except OmapiError as e:
self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
self.module.fail_json(msg=f"OMAPI error: {e}")
def remove_host(self):
try:
@ -272,7 +270,7 @@ class OmapiHostManager:
except OmapiErrorNotFound:
self.module.exit_json()
except OmapiError as e:
self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
self.module.fail_json(msg=f"OMAPI error: {e}")
def main():
@ -308,7 +306,7 @@ def main():
elif module.params['state'] == 'absent':
host_manager.remove_host()
except ValueError as e:
module.fail_json(msg="OMAPI input value error: %s" % to_native(e))
module.fail_json(msg=f"OMAPI input value error: {e}")
if __name__ == '__main__':

View file

@ -160,7 +160,7 @@ class HostModule(OpenNebulaModule):
self.get_parameter('cluster_id'))
self.result['changed'] = True
except Exception as e:
self.fail(msg="Could not allocate host, ERROR: " + str(e))
self.fail(msg=f"Could not allocate host, ERROR: {e}")
return True
@ -199,7 +199,7 @@ class HostModule(OpenNebulaModule):
host = self.get_host_by_name(host_name)
self.wait_for_host_state(host, [HOST_STATES.MONITORED])
elif current_state in [HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]:
self.fail(msg="invalid host state %s" % current_state_name)
self.fail(msg=f"invalid host state {current_state_name}")
elif desired_state == 'enabled':
if current_state == HOST_ABSENT:
@ -215,7 +215,7 @@ class HostModule(OpenNebulaModule):
elif current_state in [HOST_STATES.MONITORED]:
pass
else:
self.fail(msg="unknown host state %s, cowardly refusing to change state to enable" % current_state_name)
self.fail(msg=f"unknown host state {current_state_name}, cowardly refusing to change state to enable")
elif desired_state == 'disabled':
if current_state == HOST_ABSENT:
@ -226,12 +226,12 @@ class HostModule(OpenNebulaModule):
one.host.status(host.ID, HOST_STATUS.DISABLED)
result['changed'] = True
except Exception as e:
self.fail(msg="Could not disable host, ERROR: " + str(e))
self.fail(msg=f"Could not disable host, ERROR: {e}")
self.wait_for_host_state(host, [HOST_STATES.DISABLED])
elif current_state in [HOST_STATES.DISABLED]:
pass
else:
self.fail(msg="unknown host state %s, cowardly refusing to change state to disable" % current_state_name)
self.fail(msg=f"unknown host state {current_state_name}, cowardly refusing to change state to disable")
elif desired_state == 'offline':
if current_state == HOST_ABSENT:
@ -242,12 +242,12 @@ class HostModule(OpenNebulaModule):
one.host.status(host.ID, HOST_STATUS.OFFLINE)
result['changed'] = True
except Exception as e:
self.fail(msg="Could not set host offline, ERROR: " + str(e))
self.fail(msg=f"Could not set host offline, ERROR: {e}")
self.wait_for_host_state(host, [HOST_STATES.OFFLINE])
elif current_state in [HOST_STATES.OFFLINE]:
pass
else:
self.fail(msg="unknown host state %s, cowardly refusing to change state to offline" % current_state_name)
self.fail(msg=f"unknown host state {current_state_name}, cowardly refusing to change state to offline")
elif desired_state == 'absent':
if current_state != HOST_ABSENT:
@ -256,7 +256,7 @@ class HostModule(OpenNebulaModule):
one.host.delete(host.ID)
result['changed'] = True
except Exception as e:
self.fail(msg="Could not delete host from cluster, ERROR: " + str(e))
self.fail(msg=f"Could not delete host from cluster, ERROR: {e}")
# if we reach this point we can assume that the host was taken to the desired state
@ -279,7 +279,7 @@ class HostModule(OpenNebulaModule):
one.host.update(host.ID, desired_template_changes, 1)
result['changed'] = True
except Exception as e:
self.fail(msg="Failed to update the host template, ERROR: " + str(e))
self.fail(msg=f"Failed to update the host template, ERROR: {e}")
# the cluster
if host.CLUSTER_ID != self.get_parameter('cluster_id'):
@ -288,7 +288,7 @@ class HostModule(OpenNebulaModule):
one.cluster.addhost(self.get_parameter('cluster_id'), host.ID)
result['changed'] = True
except Exception as e:
self.fail(msg="Failed to update the host cluster, ERROR: " + str(e))
self.fail(msg=f"Failed to update the host cluster, ERROR: {e}")
# return
self.exit()

View file

@ -423,9 +423,9 @@ class ImageModule(OpenNebulaModule):
self.result = self.create_image(name, template, datastore_id, wait_timeout)
# Using 'if id:' doesn't work properly when id=0
elif id is not None:
module.fail_json(msg="There is no image with id=" + str(id))
module.fail_json(msg=f"There is no image with id={id}")
elif name is not None:
module.fail_json(msg="There is no image with name=" + name)
module.fail_json(msg=f"There is no image with name={name}")
if desired_state == 'absent':
self.result = self.delete_image(image, wait_timeout)
@ -466,7 +466,7 @@ class ImageModule(OpenNebulaModule):
def create_image(self, image_name, template, datastore_id, wait_timeout):
if not self.module.check_mode:
image_id = self.one.image.allocate("NAME = \"" + image_name + "\"\n" + template, datastore_id)
image_id = self.one.image.allocate(f'NAME = "{image_name}"\n{template}', datastore_id)
self.wait_for_ready(image_id, wait_timeout)
image = self.get_image_by_id(image_id)
result = self.get_image_info(image)
@ -483,7 +483,7 @@ class ImageModule(OpenNebulaModule):
state = image.STATE
if state in [IMAGE_STATES.index('ERROR')]:
self.module.fail_json(msg="Got an ERROR state: " + image.TEMPLATE['ERROR'])
self.module.fail_json(msg=f"Got an ERROR state: {image.TEMPLATE['ERROR']}")
if state in [IMAGE_STATES.index('READY')]:
return True
@ -521,9 +521,9 @@ class ImageModule(OpenNebulaModule):
if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]:
if enable:
self.module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!")
self.module.fail_json(msg=f"Cannot enable {IMAGE_STATES[state]} image!")
else:
self.module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!")
self.module.fail_json(msg=f"Cannot disable {IMAGE_STATES[state]} image!")
if ((enable and state != IMAGE_STATES.index('READY')) or
(not enable and state != IMAGE_STATES.index('DISABLED'))):
@ -545,9 +545,9 @@ class ImageModule(OpenNebulaModule):
if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]:
if enable:
self.module.fail_json(msg="Cannot enable persistence for " + IMAGE_STATES[state] + " image!")
self.module.fail_json(msg=f"Cannot enable persistence for {IMAGE_STATES[state]} image!")
else:
self.module.fail_json(msg="Cannot disable persistence for " + IMAGE_STATES[state] + " image!")
self.module.fail_json(msg=f"Cannot disable persistence for {IMAGE_STATES[state]} image!")
if ((enable and state != IMAGE_STATES.index('READY')) or
(not enable and state != IMAGE_STATES.index('DISABLED'))):
@ -563,7 +563,7 @@ class ImageModule(OpenNebulaModule):
def clone_image(self, image, new_name, wait_timeout):
if new_name is None:
new_name = "Copy of " + image.NAME
new_name = f"Copy of {image.NAME}"
tmp_image = self.get_image_by_name(new_name)
if tmp_image:
@ -595,7 +595,7 @@ class ImageModule(OpenNebulaModule):
tmp_image = self.get_image_by_name(new_name)
if tmp_image:
self.module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.ID))
self.module.fail_json(msg=f"Name '{new_name}' is already taken by IMAGE with id={tmp_image.ID!s}")
if not self.module.check_mode:
self.one.image.rename(image.ID, new_name)
@ -609,7 +609,7 @@ class ImageModule(OpenNebulaModule):
return {'changed': False}
if image.RUNNING_VMS > 0:
self.module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.")
self.module.fail_json(msg=f"Cannot delete image. There are {image.RUNNING_VMS!s} VMs using it.")
if not self.module.check_mode:
self.one.image.delete(image.ID)

View file

@ -327,7 +327,7 @@ class ImageInfoModule(OpenNebulaModule):
break
if len(ids) > 0:
self.module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids))
self.module.fail_json(msg=f"There is no IMAGE(s) with id(s)={', '.join(str(image_id) for image_id in ids)}")
return images
@ -354,7 +354,7 @@ class ImageInfoModule(OpenNebulaModule):
# if the specific name is indicated
if pattern is None and len(images) == 0:
self.module.fail_json(msg="There is no IMAGE with name=" + name_pattern)
self.module.fail_json(msg=f"There is no IMAGE with name={name_pattern}")
return images

View file

@ -240,7 +240,7 @@ STATES = ("PENDING", "DEPLOYING", "RUNNING", "UNDEPLOYING", "WARNING", "DONE",
def get_all_templates(module, auth):
try:
all_templates = open_url(url=(auth.url + "/service_template"), method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
all_templates = open_url(url=f"{auth.url}/service_template", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
except Exception as e:
module.fail_json(msg=str(e))
@ -264,14 +264,14 @@ def get_template(module, auth, pred):
if found <= 0:
return None
elif found > 1:
module.fail_json(msg="There is no template with unique name: " + template_name)
module.fail_json(msg=f"There is no template with unique name: {template_name}")
else:
return found_template
def get_all_services(module, auth):
try:
response = open_url(auth.url + "/service", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
response = open_url(f"{auth.url}/service", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
except Exception as e:
module.fail_json(msg=str(e))
@ -294,8 +294,7 @@ def get_service(module, auth, pred):
# fail if there are more services with same name
if found > 1:
module.fail_json(msg="There are multiple services with a name: '" +
service_name + "'. You have to use a unique service name or use 'service_id' instead.")
module.fail_json(msg=f"There are multiple services with a name: '{service_name}'. You have to use a unique service name or use 'service_id' instead.")
elif found <= 0:
return None
else:
@ -354,7 +353,7 @@ def create_service(module, auth, template_id, service_name, custom_attrs, unique
}
try:
response = open_url(auth.url + "/service_template/" + str(template_id) + "/action", method="POST",
response = open_url(f"{auth.url}/service_template/{template_id!s}/action", method="POST",
data=module.jsonify(data), force_basic_auth=True, url_username=auth.user, url_password=auth.password)
except Exception as e:
module.fail_json(msg=str(e))
@ -370,10 +369,10 @@ def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout):
while (time.time() - start_time) < wait_timeout:
try:
status_result = open_url(auth.url + "/service/" + str(service_id), method="GET",
status_result = open_url(f"{auth.url}/service/{service_id!s}", method="GET",
force_basic_auth=True, url_username=auth.user, url_password=auth.password)
except Exception as e:
module.fail_json(msg="Request for service status has failed. Error message: " + str(e))
module.fail_json(msg=f"Request for service status has failed. Error message: {e!s}")
status_result = module.from_json(status_result.read())
service_state = status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["state"]
@ -387,7 +386,7 @@ def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout):
log_message = log_message + log_info["message"]
break
module.fail_json(msg="Deploying is unsuccessful. Service state: " + STATES[service_state] + ". Error message: " + log_message)
module.fail_json(msg=f"Deploying is unsuccessful. Service state: {STATES[service_state]}. Error message: {log_message}")
time.sleep(1)
@ -404,7 +403,7 @@ def change_service_permissions(module, auth, service_id, permissions):
}
try:
status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
status_result = open_url(f"{auth.url}/service/{service_id!s}/action", method="POST", force_basic_auth=True,
url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
except Exception as e:
module.fail_json(msg=str(e))
@ -419,7 +418,7 @@ def change_service_owner(module, auth, service_id, owner_id):
}
try:
status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
status_result = open_url(f"{auth.url}/service/{service_id!s}/action", method="POST", force_basic_auth=True,
url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
except Exception as e:
module.fail_json(msg=str(e))
@ -435,7 +434,7 @@ def change_service_group(module, auth, service_id, group_id):
}
try:
status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
status_result = open_url(f"{auth.url}/service/{service_id!s}/action", method="POST", force_basic_auth=True,
url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
except Exception as e:
module.fail_json(msg=str(e))
@ -449,13 +448,13 @@ def change_role_cardinality(module, auth, service_id, role, cardinality, force):
}
try:
status_result = open_url(auth.url + "/service/" + str(service_id) + "/role/" + role, method="PUT",
status_result = open_url(f"{auth.url}/service/{service_id!s}/role/{role}", method="PUT",
force_basic_auth=True, url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
except Exception as e:
module.fail_json(msg=str(e))
if status_result.getcode() != 204:
module.fail_json(msg="Failed to change cardinality for role: " + role + ". Return code: " + str(status_result.getcode()))
module.fail_json(msg=f"Failed to change cardinality for role: {role}. Return code: {status_result.getcode()!s}")
def check_change_service_owner(module, service, owner_id):
@ -510,7 +509,7 @@ def check_change_role_cardinality(module, service, role_name, cardinality):
if role["name"] == role_name:
return int(role["cardinality"]) != cardinality
module.fail_json(msg="There is no role with name: " + role_name)
module.fail_json(msg=f"There is no role with name: {role_name}")
def create_service_and_operation(module, auth, template_id, service_name, owner_id, group_id, permissions, custom_attrs, unique, wait, wait_timeout):
@ -553,7 +552,7 @@ def service_operation(module, auth, service_id=None, owner_id=None, group_id=Non
service_id = service["ID"]
if not service:
module.fail_json(msg="There is no service with id: " + str(service_id))
module.fail_json(msg=f"There is no service with id: {service_id!s}")
if owner_id:
if check_change_service_owner(module, service, owner_id):
@ -603,9 +602,9 @@ def delete_service(module, auth, service_id):
return service_info
try:
result = open_url(auth.url + '/service/' + str(service_id), method="DELETE", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
result = open_url(f"{auth.url}/service/{service_id!s}", method="DELETE", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
except Exception as e:
module.fail_json(msg="Service deletion has failed. Error message: " + str(e))
module.fail_json(msg=f"Service deletion has failed. Error message: {e}")
return service_info
@ -721,9 +720,9 @@ def main():
template_id = get_template_id(module, auth, requested_template_id, requested_template_name)
if not template_id:
if requested_template_id:
module.fail_json(msg="There is no template with template_id: " + str(requested_template_id))
module.fail_json(msg=f"There is no template with template_id: {requested_template_id!s}")
elif requested_template_name:
module.fail_json(msg="There is no template with name: " + requested_template_name)
module.fail_json(msg=f"There is no template with name: {requested_template_name}")
if unique and not service_name:
module.fail_json(msg="You cannot use unique without passing service_name!")
@ -744,7 +743,7 @@ def main():
service_id = get_service_id_by_name(module, auth, service_name)
# The task should be failed when we want to manage a non-existent service identified by its name
if not service_id and state == 'present':
module.fail_json(msg="There is no service with name: " + service_name)
module.fail_json(msg=f"There is no service with name: {service_name}")
if state == 'absent':
result = delete_service(module, auth, service_id)

View file

@ -199,7 +199,7 @@ class TemplateModule(OpenNebulaModule):
needs_creation = False
if not template and desired_state != 'absent':
if id:
module.fail_json(msg="There is no template with id=" + str(id))
module.fail_json(msg=f"There is no template with id={id}")
else:
needs_creation = True
@ -254,7 +254,7 @@ class TemplateModule(OpenNebulaModule):
def create_template(self, name, template_data, filter):
if not self.module.check_mode:
self.one.template.allocate("NAME = \"" + name + "\"\n" + template_data)
self.one.template.allocate(f'NAME = "{name}"\n{template_data}')
result = self.get_template_info(self.get_template_by_name(name, filter))
result['changed'] = True

View file

@ -708,12 +708,12 @@ def check_updateconf(module, to_check):
'''Checks if attributes are compatible with one.vm.updateconf API call.'''
for attr, subattributes in to_check.items():
if attr not in UPDATECONF_ATTRIBUTES:
module.fail_json(msg="'{0:}' is not a valid VM attribute.".format(attr))
module.fail_json(msg=f"'{attr}' is not a valid VM attribute.")
if not UPDATECONF_ATTRIBUTES[attr]:
continue
for subattr in subattributes:
if subattr not in UPDATECONF_ATTRIBUTES[attr]:
module.fail_json(msg="'{0:}' is not a valid VM subattribute of '{1:}'".format(subattr, attr))
module.fail_json(msg=f"'{subattr}' is not a valid VM subattribute of '{attr}'")
def parse_updateconf(vm_template):
@ -749,7 +749,7 @@ def get_template(module, client, predicate):
if found == 0:
return None
elif found > 1:
module.fail_json(msg='There are more templates with name: ' + template_name)
module.fail_json(msg=f"There are more templates with name: {template_name}")
return found_template
@ -784,7 +784,7 @@ def get_datastore(module, client, predicate):
if found == 0:
return None
elif found > 1:
module.fail_json(msg='There are more datastores with name: ' + datastore_name)
module.fail_json(msg=f"There are more datastores with name: {datastore_name}")
return found_datastore
@ -818,7 +818,7 @@ def get_vms_by_ids(module, client, state, ids):
for vm_id in ids:
vm = get_vm_by_id(client, vm_id)
if vm is None and state != 'absent':
module.fail_json(msg='There is no VM with id=' + str(vm_id))
module.fail_json(msg=f"There is no VM with id={vm_id}")
vms.append(vm)
return vms
@ -834,9 +834,9 @@ def get_vm_info(client, vm):
if 'DISK' in vm.TEMPLATE:
if isinstance(vm.TEMPLATE['DISK'], list):
for disk in vm.TEMPLATE['DISK']:
disk_size.append(disk['SIZE'] + ' MB')
disk_size.append(f"{disk['SIZE']} MB")
else:
disk_size.append(vm.TEMPLATE['DISK']['SIZE'] + ' MB')
disk_size.append(f"{vm.TEMPLATE['DISK']['SIZE']} MB")
if 'NIC' in vm.TEMPLATE:
if isinstance(vm.TEMPLATE['NIC'], list):
@ -884,7 +884,7 @@ def get_vm_info(client, vm):
'owner_id': vm.UID,
'networks': networks_info,
'disk_size': disk_size,
'memory': vm.TEMPLATE['MEMORY'] + ' MB',
'memory': f"{vm.TEMPLATE['MEMORY']} MB",
'vcpu': vm.TEMPLATE['VCPU'],
'cpu': vm.TEMPLATE['CPU'],
'group_name': vm.GNAME,
@ -988,7 +988,7 @@ def get_size_in_MB(module, size_str):
symbol = s.strip()
if symbol not in SYMBOLS:
module.fail_json(msg="Cannot interpret %r %r %d" % (init, symbol, num))
module.fail_json(msg=f"Cannot interpret {init!r} {symbol!r} {num}")
prefix = {'B': 1}
@ -1012,7 +1012,7 @@ def create_vm(module, client, template_id, attributes_dict, labels_list, disk_si
size_count = len(flatten(disk_size))
# check if the number of disks is correct
if disk_count != size_count:
module.fail_json(msg='This template has ' + str(disk_count) + ' disks but you defined ' + str(size_count))
module.fail_json(msg=f"This template has {disk_count} disks but you defined {size_count}")
vm_extra_template = dict_merge(template or {}, attributes_dict or {})
vm_extra_template = dict_merge(vm_extra_template, {
@ -1241,7 +1241,7 @@ def wait_for_state(module, client, vm, wait_timeout, state_predicate):
return vm
elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'),
VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]:
module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state])
module.fail_json(msg=f"Action is unsuccessful. VM state: {VM_STATES[state]}")
time.sleep(1)
@ -1351,8 +1351,8 @@ def resume_vm(module, client, vm):
lcm_state = vm.LCM_STATE
if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'):
module.fail_json(msg="Cannot perform action 'resume' because this action is not available " +
"for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly")
module.fail_json(msg="Cannot perform action 'resume' because this action is not available "
"for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly")
if lcm_state not in [LCM_STATES.index('RUNNING')]:
changed = True
@ -1377,8 +1377,8 @@ def release_vm(module, client, vm):
state = vm.STATE
if state != VM_STATES.index('HOLD'):
module.fail_json(msg="Cannot perform action 'release' because this action is not available " +
"because VM is not in state 'HOLD'.")
module.fail_json(msg="Cannot perform action 'release' because this action is not available "
"because VM is not in state 'HOLD'.")
else:
changed = True
@ -1392,8 +1392,8 @@ def check_name_attribute(module, attributes):
if attributes.get("NAME"):
import re
if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None:
module.fail_json(msg="Illegal 'NAME' attribute: '" + attributes.get("NAME") +
"' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.")
module.fail_json(msg=f"Illegal 'NAME' attribute: '{attributes.get('NAME')}"
"' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.")
TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS",
@ -1404,7 +1404,7 @@ TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DI
def check_attributes(module, attributes):
for key in attributes.keys():
if key in TEMPLATE_RESTRICTED_ATTRIBUTES:
module.fail_json(msg='Restricted attribute `' + key + '` cannot be used when filtering VMs.')
module.fail_json(msg=f"Restricted attribute `{key}` cannot be used when filtering VMs.")
# Check the format of the name attribute
check_name_attribute(module, attributes)
@ -1452,9 +1452,9 @@ def get_connection_info(module):
username = authstring.split(":")[0]
password = authstring.split(":")[1]
except (OSError, IOError):
module.fail_json(msg=("Could not find or read ONE_AUTH file at '%s'" % authfile))
module.fail_json(msg=f"Could not find or read ONE_AUTH file at '{authfile}'")
except Exception:
module.fail_json(msg=("Error occurs when read ONE_AUTH file at '%s'" % authfile))
module.fail_json(msg=f"Error occurs when read ONE_AUTH file at '{authfile}'")
if not url:
module.fail_json(msg="Opennebula API url (api_url) is not specified")
from collections import namedtuple
@ -1558,7 +1558,7 @@ def main():
if not (auth.username and auth.password):
module.warn("Credentials missing")
else:
one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
one_client = pyone.OneServer(auth.url, session=f"{auth.username}:{auth.password}")
if attributes:
attributes = {key.upper(): value for key, value in attributes.items()}
@ -1585,9 +1585,9 @@ def main():
template_id = get_template_id(module, one_client, requested_template_id, requested_template_name)
if template_id is None:
if requested_template_id is not None:
module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id))
module.fail_json(msg=f"There is no template with template_id: {requested_template_id}")
elif requested_template_name:
module.fail_json(msg="There is no template with name: " + requested_template_name)
module.fail_json(msg=f"There is no template with name: {requested_template_name}")
# Fetch datastore
datastore_id = None
@ -1595,11 +1595,11 @@ def main():
datastore_id = get_datastore_id(module, one_client, requested_datastore_id, requested_datastore_name)
if datastore_id is None:
if requested_datastore_id:
module.fail_json(msg='There is no datastore with datastore_id: ' + str(requested_datastore_id))
module.fail_json(msg=f"There is no datastore with datastore_id: {requested_datastore_id}")
elif requested_datastore_name:
module.fail_json(msg="There is no datastore with name: " + requested_datastore_name)
module.fail_json(msg=f"There is no datastore with name: {requested_datastore_name}")
else:
attributes['SCHED_DS_REQUIREMENTS'] = 'ID=' + str(datastore_id)
attributes['SCHED_DS_REQUIREMENTS'] = f"ID={datastore_id}"
if exact_count and template_id is None:
module.fail_json(msg='Option `exact_count` needs template_id or template_name')

View file

@ -297,7 +297,7 @@ class NetworksModule(OpenNebulaModule):
needs_creation = False
if not template and desired_state != 'absent':
if id:
module.fail_json(msg="There is no template with id=" + str(id))
module.fail_json(msg=f"There is no template with id={id}")
else:
needs_creation = True
@ -394,7 +394,7 @@ class NetworksModule(OpenNebulaModule):
def create_template(self, name, template_data):
if not self.module.check_mode:
# -1 means that network won't be added to any cluster which happens by default
self.one.vn.allocate("NAME = \"" + name + "\"\n" + template_data, -1)
self.one.vn.allocate(f'NAME = "{name}"\n{template_data}', -1)
result = self.get_template_info(self.get_template_by_name(name))
result['changed'] = True

View file

@ -521,8 +521,7 @@ def main():
if not module.params.get('auth_token'):
module.fail_json(
msg='The "auth_token" parameter or ' +
'ONEANDONE_AUTH_TOKEN environment variable is required.')
msg='The "auth_token" parameter or ONEANDONE_AUTH_TOKEN environment variable is required.')
if not module.params.get('api_url'):
oneandone_conn = oneandone.client.OneAndOneService(
@ -555,7 +554,7 @@ def main():
for param in ('name', 'rules'):
if not module.params.get(param):
module.fail_json(
msg="%s parameter is required for new firewall policies." % param)
msg=f"{param} parameter is required for new firewall policies.")
try:
(changed, firewall_policy) = create_firewall_policy(module, oneandone_conn)
except Exception as e:

View file

@ -514,7 +514,7 @@ def create_load_balancer(module, oneandone_conn):
datacenter_id = get_datacenter(oneandone_conn, datacenter)
if datacenter_id is None:
module.fail_json(
msg='datacenter %s not found.' % datacenter)
msg=f'datacenter {datacenter} not found.')
for rule in rules:
load_balancer_rule = oneandone.client.LoadBalancerRule(
@ -660,7 +660,7 @@ def main():
'persistence_time', 'method', 'rules'):
if not module.params.get(param):
module.fail_json(
msg="%s parameter is required for new load balancers." % param)
msg=f"{param} parameter is required for new load balancers.")
try:
(changed, load_balancer) = create_load_balancer(module, oneandone_conn)
except Exception as ex:

View file

@ -1007,7 +1007,7 @@ def main():
for param in ('name', 'agent', 'email', 'thresholds', 'ports', 'processes'):
if not module.params.get(param):
module.fail_json(
msg="%s parameter is required for a new monitoring policy." % param)
msg=f"{param} parameter is required for a new monitoring policy.")
try:
(changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn)
except Exception as ex:

View file

@ -236,7 +236,7 @@ def create_network(module, oneandone_conn):
datacenter_id = get_datacenter(oneandone_conn, datacenter)
if datacenter_id is None:
module.fail_json(
msg='datacenter %s not found.' % datacenter)
msg=f'datacenter {datacenter} not found.')
try:
_check_mode(module, True)

View file

@ -162,7 +162,7 @@ def create_public_ip(module, oneandone_conn):
if datacenter_id is None:
_check_mode(module, False)
module.fail_json(
msg='datacenter %s not found.' % datacenter)
msg=f'datacenter {datacenter} not found.')
try:
_check_mode(module, True)
@ -206,7 +206,7 @@ def update_public_ip(module, oneandone_conn):
if public_ip is None:
_check_mode(module, False)
module.fail_json(
msg='public IP %s not found.' % public_ip_id)
msg=f'public IP {public_ip_id} not found.')
try:
_check_mode(module, True)
@ -245,7 +245,7 @@ def delete_public_ip(module, oneandone_conn):
if public_ip is None:
_check_mode(module, False)
module.fail_json(
msg='public IP %s not found.' % public_ip_id)
msg=f'public IP {public_ip_id} not found.')
try:
_check_mode(module, True)

View file

@ -235,9 +235,7 @@ ONEANDONE_SERVER_STATES = (
def _check_mode(module, result):
if module.check_mode:
module.exit_json(
changed=result
)
module.exit_json(changed=result)
def _create_server(module, oneandone_conn, hostname, description,
@ -334,7 +332,7 @@ def create_server(module, oneandone_conn):
if datacenter_id is None:
_check_mode(module, False)
module.fail_json(
msg='datacenter %s not found.' % datacenter)
msg=f'datacenter {datacenter} not found.')
fixed_instance_size_id = None
if fixed_instance_size:
@ -344,13 +342,13 @@ def create_server(module, oneandone_conn):
if fixed_instance_size_id is None:
_check_mode(module, False)
module.fail_json(
msg='fixed_instance_size %s not found.' % fixed_instance_size)
msg=f'fixed_instance_size {fixed_instance_size} not found.')
appliance_id = get_appliance(oneandone_conn, appliance)
if appliance_id is None:
_check_mode(module, False)
module.fail_json(
msg='appliance %s not found.' % appliance)
msg=f'appliance {appliance} not found.')
private_network_id = None
if private_network:
@ -360,7 +358,7 @@ def create_server(module, oneandone_conn):
if private_network_id is None:
_check_mode(module, False)
module.fail_json(
msg='private network %s not found.' % private_network)
msg=f'private network {private_network} not found.')
monitoring_policy_id = None
if monitoring_policy:
@ -370,7 +368,7 @@ def create_server(module, oneandone_conn):
if monitoring_policy_id is None:
_check_mode(module, False)
module.fail_json(
msg='monitoring policy %s not found.' % monitoring_policy)
msg=f'monitoring policy {monitoring_policy} not found.')
firewall_policy_id = None
if firewall_policy:
@ -380,7 +378,7 @@ def create_server(module, oneandone_conn):
if firewall_policy_id is None:
_check_mode(module, False)
module.fail_json(
msg='firewall policy %s not found.' % firewall_policy)
msg=f'firewall policy {firewall_policy} not found.')
load_balancer_id = None
if load_balancer:
@ -390,7 +388,7 @@ def create_server(module, oneandone_conn):
if load_balancer_id is None:
_check_mode(module, False)
module.fail_json(
msg='load balancer %s not found.' % load_balancer)
msg=f'load balancer {load_balancer} not found.')
if auto_increment:
hostnames = _auto_increment_hostname(count, hostname)
@ -481,7 +479,7 @@ def remove_server(module, oneandone_conn):
changed = True
except Exception as ex:
module.fail_json(
msg="failed to terminate the server: %s" % str(ex))
msg=f"failed to terminate the server: {ex}")
removed_server = {
'id': server['id'],
@ -532,8 +530,7 @@ def startstop_server(module, oneandone_conn):
method='SOFTWARE')
except Exception as ex:
module.fail_json(
msg="failed to set server %s to state %s: %s" % (
server_id, state, str(ex)))
msg=f"failed to set server {server_id} to state {state}: {ex}")
_check_mode(module, False)
@ -553,8 +550,7 @@ def startstop_server(module, oneandone_conn):
break
if not operation_completed:
module.fail_json(
msg="Timeout waiting for server %s to get to state %s" % (
server_id, state))
msg=f"Timeout waiting for server {server_id} to get to state {state}")
changed = True
server = _insert_network_data(server)
@ -639,8 +635,7 @@ def main():
if not module.params.get('auth_token'):
module.fail_json(
msg='The "auth_token" parameter or ' +
'ONEANDONE_AUTH_TOKEN environment variable is required.')
msg='The "auth_token" parameter or ONEANDONE_AUTH_TOKEN environment variable is required.')
if not module.params.get('api_url'):
oneandone_conn = oneandone.client.OneAndOneService(
@ -675,7 +670,7 @@ def main():
'datacenter'):
if not module.params.get(param):
module.fail_json(
msg="%s parameter is required for new server." % param)
msg=f"{param} parameter is required for new server.")
try:
(changed, servers) = create_server(module, oneandone_conn)
except Exception as ex:

View file

@ -237,8 +237,8 @@ class OnePasswordInfo(object):
return {field_name: field_data.get('v', '')}
# We will get here if the field could not be found in any section and the item wasn't a document to be downloaded.
optional_section_title = '' if section_title is None else " in the section '%s'" % section_title
module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title))
optional_section_title = '' if section_title is None else f" in the section '{section_title}'"
module.fail_json(msg=f"Unable to find an item in 1Password named '{item_id}' with the field '{field_name}'{optional_section_title}.")
def parse_search_terms(self, terms):
processed_terms = []
@ -248,7 +248,7 @@ class OnePasswordInfo(object):
term = {'name': term}
if 'name' not in term:
module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term))
module.fail_json(msg=f"Missing required 'name' field from search term, got: '{term}'")
term['field'] = term.get('field', 'password')
term['section'] = term.get('section', None)
@ -262,15 +262,15 @@ class OnePasswordInfo(object):
try:
args = ["get", "item", item_id]
if vault is not None:
args += ['--vault={0}'.format(vault)]
args += [f'--vault={vault}']
rc, output, dummy = self._run(args)
return output
except Exception as e:
if re.search(".*not found.*", to_native(e)):
module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id)
module.fail_json(msg=f"Unable to find an item in 1Password named '{item_id}'.")
else:
module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e)))
module.fail_json(msg=f"Unexpected error attempting to find an item in 1Password named '{item_id}': {e}")
def get_field(self, item_id, field, section=None, vault=None):
output = self.get_raw(item_id, vault)
@ -285,7 +285,7 @@ class OnePasswordInfo(object):
args = [
'signin',
'{0}.1password.com'.format(self.auto_login['subdomain']),
f"{self.auto_login['subdomain']}.1password.com",
to_bytes(self.auto_login['username']),
to_bytes(self.auto_login['secret_key']),
'--output=raw',
@ -295,10 +295,10 @@ class OnePasswordInfo(object):
rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
self.token = out.strip()
except AnsibleModuleError as e:
module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e))
module.fail_json(msg=f"Failed to perform initial sign in to 1Password: {to_native(e)}")
else:
module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s signin' "
"or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path)
module.fail_json(msg=f"Unable to perform an initial sign in to 1Password. Please run '{self.cli_path} signin' "
"or define credentials in 'auto_login'. See the module documentation for details.")
def get_token(self):
# If the config file exists, assume an initial signin has taken place and try basic sign in
@ -339,7 +339,7 @@ class OnePasswordInfo(object):
self.get_token()
except OSError as e:
if e.errno == errno.ENOENT:
module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
module.fail_json(msg=f"1Password CLI tool '{self.cli_path}' not installed in path on control machine")
raise e
def run(self):

View file

@ -205,9 +205,9 @@ class EthernetNetworkModule(OneViewModuleBase):
result['changed'] = False
else:
if len(vlan_ids) == 1:
self.data['vlanIdRange'] = '{0}-{1}'.format(vlan_ids[0], vlan_ids[0])
self.data['vlanIdRange'] = f'{vlan_ids[0]}-{vlan_ids[0]}'
else:
self.data['vlanIdRange'] = ','.join(map(str, vlan_ids))
self.data['vlanIdRange'] = ','.join(str(s) for s in vlan_ids)
self.resource_client.create_bulk(self.data)
result['changed'] = True

View file

@ -151,7 +151,7 @@ class NetworkSetInfoModule(OneViewModuleBase):
name = self.module.params.get('name')
if 'withoutEthernet' in self.options:
filter_by_name = ("\"'name'='%s'\"" % name) if name else ''
filter_by_name = f"\"'name'='{name}'\"" if name else ''
network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name)
elif name:
network_sets = self.oneview_client.network_sets.get_by('name', name)

View file

@ -144,7 +144,7 @@ class OnlineServerInfo(Online):
try:
return self.get(path=server_path).json
except OnlineException as exc:
self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc))
self.module.fail_json(msg=f"A problem occurred while fetching: {server_path} ({exc})")
def all_detailed_servers(self):
servers_api_path = self.get_resources()

View file

@ -190,7 +190,7 @@ def iscsi_get_cached_nodes(module, portal=None):
def iscsi_discover(module, portal, port):
cmd = [iscsiadm_cmd, '--mode', 'discovery', '--type', 'sendtargets', '--portal', '%s:%s' % (portal, port)]
cmd = [iscsiadm_cmd, '--mode', 'discovery', '--type', 'sendtargets', '--portal', f'{portal}:{port}']
module.run_command(cmd, check_rc=True)
@ -213,7 +213,7 @@ def target_loggedon(module, target, portal=None, port=None):
port = ""
if rc == 0:
search_re = "%s:%s.*%s" % (re.escape(portal), port, re.escape(target))
search_re = f"{re.escape(portal)}:{port}.*{re.escape(target)}"
return re.search(search_re, out) is not None
elif rc == 21:
return False
@ -246,7 +246,7 @@ def target_login(module, target, check_rc, portal=None, port=None):
cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--login']
if portal is not None and port is not None:
cmd.append('--portal')
cmd.append('%s:%s' % (portal, port))
cmd.append(f'{portal}:{port}')
rc, out, err = module.run_command(cmd, check_rc=check_rc)
return rc
@ -261,7 +261,7 @@ def target_device_node(target):
# if anyone know a better way to find out which devicenodes get created for
# a given target...
devices = glob.glob('/dev/disk/by-path/*%s*' % target)
devices = glob.glob(f'/dev/disk/by-path/*{target}*')
devdisks = []
for dev in devices:
# exclude partitions
@ -278,7 +278,7 @@ def target_isauto(module, target, portal=None, port=None):
if portal is not None and port is not None:
cmd.append('--portal')
cmd.append('%s:%s' % (portal, port))
cmd.append(f'{portal}:{port}')
dummy, out, dummy = module.run_command(cmd, check_rc=True)
@ -294,7 +294,7 @@ def target_setauto(module, target, portal=None, port=None):
if portal is not None and port is not None:
cmd.append('--portal')
cmd.append('%s:%s' % (portal, port))
cmd.append(f'{portal}:{port}')
module.run_command(cmd, check_rc=True)
@ -304,7 +304,7 @@ def target_setmanual(module, target, portal=None, port=None):
if portal is not None and port is not None:
cmd.append('--portal')
cmd.append('%s:%s' % (portal, port))
cmd.append(f'{portal}:{port}')
module.run_command(cmd, check_rc=True)

View file

@ -179,7 +179,7 @@ def get_all_installed(module):
rc, stdout, stderr = execute_command(command, module)
if stderr:
module.fail_json(msg="failed in get_all_installed(): %s" % stderr)
module.fail_json(msg=f"failed in get_all_installed(): {stderr}")
return stdout
@ -189,22 +189,22 @@ def get_package_state(names, pkg_spec, module):
info_cmd = 'pkg_info -Iq'
for name in names:
command = "%s inst:%s" % (info_cmd, name)
command = f"{info_cmd} inst:{name}"
rc, stdout, stderr = execute_command(command, module)
if stderr:
match = re.search(r"^Can't find inst:%s$" % re.escape(name), stderr)
match = re.search(rf"^Can't find inst:{re.escape(name)}$", stderr)
if match:
pkg_spec[name]['installed_state'] = False
else:
module.fail_json(msg="failed in get_package_state(): " + stderr)
module.fail_json(msg=f"failed in get_package_state(): {stderr}")
if stdout:
# If the requested package name is just a stem, like "python", we may
# find multiple packages with that name.
pkg_spec[name]['installed_names'] = stdout.splitlines()
module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names'])
module.debug(f"get_package_state(): installed_names = {pkg_spec[name]['installed_names']}")
pkg_spec[name]['installed_state'] = True
else:
pkg_spec[name]['installed_state'] = False
@ -220,27 +220,26 @@ def package_present(names, pkg_spec, module):
# only the leftovers.
if pkg_spec['package_latest_leftovers']:
if name not in pkg_spec['package_latest_leftovers']:
module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name)
module.debug(f"package_present(): ignoring '{name}' which is not a package_latest() leftover")
continue
else:
module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name)
module.debug(f"package_present(): handling package_latest() leftovers, installing '{name}'")
if module.check_mode:
install_cmd = 'pkg_add -Imn'
else:
if build is True:
port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module))
port_dir = f"{module.params['ports_dir']}/{get_package_source_path(name, pkg_spec, module)}"
if os.path.isdir(port_dir):
if pkg_spec[name]['flavor']:
flavors = pkg_spec[name]['flavor'].replace('-', ' ')
install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors)
install_cmd = f"cd {port_dir} && make clean=depends && FLAVOR=\"{flavors}\" make install && make clean=depends"
elif pkg_spec[name]['subpackage']:
install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir,
pkg_spec[name]['subpackage'])
install_cmd = f"cd {port_dir} && make clean=depends && SUBPACKAGE=\"{pkg_spec[name]['subpackage']}\" make install && make clean=depends"
else:
install_cmd = "cd %s && make install && make clean=depends" % (port_dir)
install_cmd = f"cd {port_dir} && make install && make clean=depends"
else:
module.fail_json(msg="the port source directory %s does not exist" % (port_dir))
module.fail_json(msg=f"the port source directory {port_dir} does not exist")
else:
install_cmd = 'pkg_add -Im'
@ -253,7 +252,7 @@ def package_present(names, pkg_spec, module):
if build is True and not module.check_mode:
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True)
else:
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module)
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command(f"{install_cmd} {name}", module)
# The behaviour of pkg_add is a bit different depending on if a
# specific version is supplied or not.
@ -268,31 +267,31 @@ def package_present(names, pkg_spec, module):
# version string it is not used an one by pkg_add.
if pkg_spec[name]['version'] or build is True:
# Depend on the return code.
module.debug("package_present(): depending on return code for name '%s'" % name)
module.debug(f"package_present(): depending on return code for name '{name}'")
if pkg_spec[name]['rc']:
pkg_spec[name]['changed'] = False
else:
# Depend on stderr instead.
module.debug("package_present(): depending on stderr for name '%s'" % name)
module.debug(f"package_present(): depending on stderr for name '{name}'")
if pkg_spec[name]['stderr']:
# There is a corner case where having an empty directory in
# installpath prior to the right location will result in a
# "file:/local/package/directory/ is empty" message on stderr
# while still installing the package, so we need to look for
# for a message like "packagename-1.0: ok" just in case.
match = re.search(r"\W%s-[^:]+: ok\W" % re.escape(pkg_spec[name]['stem']), pkg_spec[name]['stdout'])
match = re.search(rf"\W{re.escape(pkg_spec[name]['stem'])}-[^:]+: ok\W", pkg_spec[name]['stdout'])
if match:
# It turns out we were able to install the package.
module.debug("package_present(): we were able to install package for name '%s'" % name)
module.debug(f"package_present(): we were able to install package for name '{name}'")
pkg_spec[name]['changed'] = True
else:
# We really did fail, fake the return code.
module.debug("package_present(): we really did fail for name '%s'" % name)
module.debug(f"package_present(): we really did fail for name '{name}'")
pkg_spec[name]['rc'] = 1
pkg_spec[name]['changed'] = False
else:
module.debug("package_present(): stderr was not set for name '%s'" % name)
module.debug(f"package_present(): stderr was not set for name '{name}'")
if pkg_spec[name]['rc'] == 0:
pkg_spec[name]['changed'] = True
@ -307,7 +306,7 @@ def package_present(names, pkg_spec, module):
# Function used to make sure a package is the latest available version.
def package_latest(names, pkg_spec, module):
if module.params['build'] is True:
module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build'])
module.fail_json(msg=f"the combination of build={module.params['build']} and state=latest is not supported")
upgrade_cmd = 'pkg_add -um'
@ -327,17 +326,17 @@ def package_latest(names, pkg_spec, module):
if pkg_spec[name]['installed_state'] is True:
# Attempt to upgrade the package.
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (upgrade_cmd, name), module)
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command(f"{upgrade_cmd} {name}", module)
# Look for output looking something like "nmap-6.01->6.25: ok" to see if
# something changed (or would have changed). Use \W to delimit the match
# from progress meter output.
pkg_spec[name]['changed'] = False
for installed_name in pkg_spec[name]['installed_names']:
module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name)
match = re.search(r"\W%s->.+: ok\W" % re.escape(installed_name), pkg_spec[name]['stdout'])
module.debug(f"package_latest(): checking for pre-upgrade package name: {installed_name}")
match = re.search(rf"\W{re.escape(installed_name)}->.+: ok\W", pkg_spec[name]['stdout'])
if match:
module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name)
module.debug(f"package_latest(): pre-upgrade package name match: {installed_name}")
pkg_spec[name]['changed'] = True
break
@ -357,7 +356,7 @@ def package_latest(names, pkg_spec, module):
else:
# Note packages that need to be handled by package_present
module.debug("package_latest(): package '%s' is not installed, will be handled by package_present()" % name)
module.debug(f"package_latest(): package '{name}' is not installed, will be handled by package_present()")
pkg_spec['package_latest_leftovers'].append(name)
# If there were any packages that were not installed we call
@ -383,7 +382,7 @@ def package_absent(names, pkg_spec, module):
for name in names:
if pkg_spec[name]['installed_state'] is True:
# Attempt to remove the package.
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (remove_cmd, name), module)
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command(f"{remove_cmd} {name}", module)
if pkg_spec[name]['rc'] == 0:
pkg_spec[name]['changed'] = True
@ -433,7 +432,7 @@ def parse_package_name(names, pkg_spec, module):
pkg_spec['package_latest_leftovers'] = []
for name in names:
module.debug("parse_package_name(): parsing name: %s" % name)
module.debug(f"parse_package_name(): parsing name: {name}")
# Do some initial matches so we can base the more advanced regex on that.
version_match = re.search("-[0-9]", name)
versionless_match = re.search("--", name)
@ -441,7 +440,7 @@ def parse_package_name(names, pkg_spec, module):
# Stop if someone is giving us a name that both has a version and is
# version-less at the same time.
if version_match and versionless_match:
module.fail_json(msg="package name both has a version and is version-less: " + name)
module.fail_json(msg=f"package name both has a version and is version-less: {name}")
# All information for a given name is kept in the pkg_spec keyed by that name.
pkg_spec[name] = {}
@ -457,10 +456,12 @@ def parse_package_name(names, pkg_spec, module):
pkg_spec[name]['flavor'] = match.group('flavor')
pkg_spec[name]['branch'] = match.group('branch')
pkg_spec[name]['style'] = 'version'
module.debug("version_match: stem: %(stem)s, version: %(version)s, flavor_separator: %(flavor_separator)s, "
"flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
module.debug(
f"version_match: stem: {pkg_spec[name]['stem']}, version: {pkg_spec[name]['version']}, "
f"flavor_separator: {pkg_spec[name]['flavor_separator']}, flavor: {pkg_spec[name]['flavor']}, branch: {pkg_spec[name]['branch']}, "
f"style: {pkg_spec[name]['style']}")
else:
module.fail_json(msg="unable to parse package name at version_match: " + name)
module.fail_json(msg=f"unable to parse package name at version_match: {name}")
# If name includes no version but is version-less ("--").
elif versionless_match:
@ -473,9 +474,10 @@ def parse_package_name(names, pkg_spec, module):
pkg_spec[name]['flavor'] = match.group('flavor')
pkg_spec[name]['branch'] = match.group('branch')
pkg_spec[name]['style'] = 'versionless'
module.debug("versionless_match: stem: %(stem)s, flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
module.debug(f"versionless_match: stem: {pkg_spec[name]['stem']}, flavor: {pkg_spec[name]['flavor']}, "
f"branch: {pkg_spec[name]['branch']}, style: {pkg_spec[name]['style']}")
else:
module.fail_json(msg="unable to parse package name at versionless_match: " + name)
module.fail_json(msg=f"unable to parse package name at versionless_match: {name}")
# If name includes no version, and is not version-less, it is all a
# stem, possibly with a branch (%branchname) tacked on at the
@ -490,23 +492,23 @@ def parse_package_name(names, pkg_spec, module):
pkg_spec[name]['flavor'] = None
pkg_spec[name]['branch'] = match.group('branch')
pkg_spec[name]['style'] = 'stem'
module.debug("stem_match: stem: %(stem)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
module.debug(f"stem_match: stem: {pkg_spec[name]['stem']}, branch: {pkg_spec[name]['branch']}, style: {pkg_spec[name]['style']}")
else:
module.fail_json(msg="unable to parse package name at else: " + name)
module.fail_json(msg=f"unable to parse package name at else: {name}")
# Verify that the managed host is new enough to support branch syntax.
if pkg_spec[name]['branch']:
branch_release = "6.0"
if LooseVersion(platform.release()) < LooseVersion(branch_release):
module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name))
module.fail_json(msg=f"package name using 'branch' syntax requires at least OpenBSD {branch_release}: {name}")
# Sanity check that there are no trailing dashes in flavor.
# Try to stop strange stuff early so we can be strict later.
if pkg_spec[name]['flavor']:
match = re.search("-$", pkg_spec[name]['flavor'])
if match:
module.fail_json(msg="trailing dash in flavor: " + pkg_spec[name]['flavor'])
module.fail_json(msg=f"trailing dash in flavor: {pkg_spec[name]['flavor']}")
# Function used for figuring out the port path.
@ -518,39 +520,39 @@ def get_package_source_path(name, pkg_spec, module):
# try for an exact match first
sqlports_db_file = '/usr/local/share/sqlports'
if not os.path.isfile(sqlports_db_file):
module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file)
module.fail_json(msg=f"sqlports file '{sqlports_db_file}' is missing")
conn = sqlite3.connect(sqlports_db_file)
first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname'
query = first_part_of_query + ' = ?'
module.debug("package_package_source_path(): exact query: %s" % query)
query = f"{first_part_of_query} = ?"
module.debug(f"package_package_source_path(): exact query: {query}")
cursor = conn.execute(query, (name,))
results = cursor.fetchall()
# next, try for a fuzzier match
if len(results) < 1:
looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%')
query = first_part_of_query + ' LIKE ?'
query = f"{first_part_of_query} LIKE ?"
if pkg_spec[name]['flavor']:
looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor']
module.debug("package_package_source_path(): fuzzy flavor query: %s" % query)
module.debug(f"package_package_source_path(): fuzzy flavor query: {query}")
cursor = conn.execute(query, (looking_for,))
elif pkg_spec[name]['style'] == 'versionless':
query += ' AND fullpkgname NOT LIKE ?'
module.debug("package_package_source_path(): fuzzy versionless query: %s" % query)
cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,))
module.debug(f"package_package_source_path(): fuzzy versionless query: {query}")
cursor = conn.execute(query, (looking_for, f"{looking_for}-%",))
else:
module.debug("package_package_source_path(): fuzzy query: %s" % query)
module.debug(f"package_package_source_path(): fuzzy query: {query}")
cursor = conn.execute(query, (looking_for,))
results = cursor.fetchall()
# error if we don't find exactly 1 match
conn.close()
if len(results) < 1:
module.fail_json(msg="could not find a port by the name '%s'" % name)
module.fail_json(msg=f"could not find a port by the name '{name}'")
if len(results) > 1:
matches = map(lambda x: x[1], results)
module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches))
module.fail_json(msg=f"too many matches, unsure which to build: {' OR '.join(matches)}")
# there's exactly 1 match, so figure out the subpackage, if any, then return
fullpkgpath = results[0][0]
@ -629,13 +631,13 @@ def main():
if build is True:
if not os.path.isdir(ports_dir):
module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir))
module.fail_json(msg=f"the ports source directory {ports_dir} does not exist")
# build sqlports if its not installed yet
parse_package_name(['sqlports'], pkg_spec, module)
get_package_state(['sqlports'], pkg_spec, module)
if not pkg_spec['sqlports']['installed_state']:
module.debug("main(): installing 'sqlports' because build=%s" % module.params['build'])
module.debug(f"main(): installing 'sqlports' because build={module.params['build']}")
package_present(['sqlports'], pkg_spec, module)
asterisk_name = False
@ -665,7 +667,7 @@ def main():
# with build mode. Disable it for now.
for n in name:
if pkg_spec[n]['branch'] and module.params['build'] is True:
module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n))
module.fail_json(msg=f"the combination of 'branch' syntax and build={module.params['build']} is not supported: {n}")
# Get state for all package names.
get_package_state(name, pkg_spec, module)
@ -701,12 +703,12 @@ def main():
combined_failed = True
if pkg_spec[n]['stderr']:
if combined_error_message:
combined_error_message += ", %s" % pkg_spec[n]['stderr']
combined_error_message += f", {pkg_spec[n]['stderr']}"
else:
combined_error_message = pkg_spec[n]['stderr']
else:
if combined_error_message:
combined_error_message += ", %s" % pkg_spec[n]['stdout']
combined_error_message += f", {pkg_spec[n]['stdout']}"
else:
combined_error_message = pkg_spec[n]['stdout']

View file

@ -104,7 +104,7 @@ class BackendProp(object):
def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name):
my_command = [
opendj_bindir + '/dsconfig',
f"{opendj_bindir}/dsconfig",
'get-backend-prop',
'-h', hostname,
'--port', str(port),
@ -116,24 +116,24 @@ class BackendProp(object):
if rc == 0:
return stdout
else:
self._module.fail_json(msg="Error message: " + str(stderr))
self._module.fail_json(msg=f"Error message: {stderr}")
def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value):
my_command = [
opendj_bindir + '/dsconfig',
f"{opendj_bindir}/dsconfig",
'set-backend-prop',
'-h', hostname,
'--port', str(port),
'--bindDN', username,
'--backend-name', backend_name,
'--set', name + ":" + value,
'--set', f"{name}:{value}",
'-n', '-X'
] + password_method
rc, stdout, stderr = self._module.run_command(my_command)
if rc == 0:
return True
else:
self._module.fail_json(msg="Error message: " + stderr)
self._module.fail_json(msg=f"Error message: {stderr}")
def validate_data(self, data=None, name=None, value=None):
for config_line in data.split('\n'):

View file

@ -106,14 +106,14 @@ def main():
# initialize
service = module.params['name']
init_script = '/etc/init.d/' + service
init_script = f"/etc/init.d/{service}"
result = {
'name': service,
'changed': False,
}
# check if service exists
if not os.path.exists(init_script):
module.fail_json(msg='service %s does not exist' % service)
module.fail_json(msg=f'service {service} does not exist')
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
@ -134,7 +134,7 @@ def main():
# command if the init script doesn't contain a STOP value, so we ignore the exit
# code and explicitly check if the service is now in the desired state
if is_enabled() != module.params['enabled']:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
module.fail_json(msg=f"Unable to {action} service {service}: {err}")
result['enabled'] = not enabled
@ -179,7 +179,7 @@ def main():
if not module.check_mode:
rc, dummy, err = module.run_command([init_script, action])
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
module.fail_json(msg=f"Unable to {action} service {service}: {err}")
module.exit_json(**result)

View file

@ -178,7 +178,7 @@ class Opkg(StateModuleHelper):
def _package_in_desired_state(self, name, want_installed, version=None):
dummy, out, dummy = self.runner("state package").run(state="query", package=name)
has_package = out.startswith(name + " - %s" % ("" if not version else (version + " ")))
has_package = out.startswith(f"{name} - {'' if not version else f'{version} '}")
return want_installed == has_package
def state_present(self):
@ -189,10 +189,10 @@ class Opkg(StateModuleHelper):
ctx.run(package=package)
self.vars.set("run_info", ctx.run_info, verbosity=4)
if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version):
self.do_raise("failed to install %s" % package)
self.do_raise(f"failed to install {package}")
self.vars.install_c += 1
if self.vars.install_c > 0:
self.vars.msg = "installed %s package(s)" % self.vars.install_c
self.vars.msg = f"installed {self.vars.install_c} package(s)"
else:
self.vars.msg = "package(s) already present"
@ -204,10 +204,10 @@ class Opkg(StateModuleHelper):
ctx.run(package=package)
self.vars.set("run_info", ctx.run_info, verbosity=4)
if not self._package_in_desired_state(package, want_installed=False):
self.do_raise("failed to remove %s" % package)
self.do_raise(f"failed to remove {package}")
self.vars.remove_c += 1
if self.vars.remove_c > 0:
self.vars.msg = "removed %s package(s)" % self.vars.remove_c
self.vars.msg = f"removed {self.vars.remove_c} package(s)"
else:
self.vars.msg = "package(s) already absent"

View file

@ -205,30 +205,30 @@ class OSXDefaults(object):
return True
elif value in [False, 0, "false", "0", "no"]:
return False
raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value)))
raise OSXDefaultsException(f"Invalid boolean value: {value!r}")
elif data_type == "date":
try:
return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
except ValueError:
raise OSXDefaultsException(
"Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value))
f"Invalid date value: {value!r}. Required format yyy-mm-dd hh:mm:ss."
)
elif data_type in ["int", "integer"]:
if not OSXDefaults.is_int(value):
raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value)))
raise OSXDefaultsException(f"Invalid integer value: {value!r}")
return int(value)
elif data_type == "float":
try:
value = float(value)
except ValueError:
raise OSXDefaultsException("Invalid float value: {0}".format(repr(value)))
raise OSXDefaultsException(f"Invalid float value: {value!r}")
return value
elif data_type == "array":
if not isinstance(value, list):
raise OSXDefaultsException("Invalid value. Expected value to be an array")
return value
raise OSXDefaultsException('Type is not supported: {0}'.format(data_type))
raise OSXDefaultsException(f'Type is not supported: {data_type}')
def _host_args(self):
""" Returns a normalized list of commandline arguments based on the "host" attribute """
@ -272,7 +272,7 @@ class OSXDefaults(object):
# If the RC is not 0, then terrible happened! Ooooh nooo!
if rc != 0:
raise OSXDefaultsException("An error occurred while reading key type from defaults: %s" % err)
raise OSXDefaultsException(f"An error occurred while reading key type from defaults: {err}")
# Ok, lets parse the type from output
data_type = out.strip().replace('Type is ', '')
@ -285,7 +285,7 @@ class OSXDefaults(object):
# A non zero RC at this point is kinda strange...
if rc != 0:
raise OSXDefaultsException("An error occurred while reading key value from defaults: %s" % err)
raise OSXDefaultsException(f"An error occurred while reading key value from defaults: {err}")
# Convert string to list when type is array
if data_type == "array":
@ -319,17 +319,17 @@ class OSXDefaults(object):
if not isinstance(value, list):
value = [value]
rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value,
rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, f"-{self.type}"] + value,
expand_user_and_vars=False)
if rc != 0:
raise OSXDefaultsException('An error occurred while writing value to defaults: %s' % err)
raise OSXDefaultsException(f'An error occurred while writing value to defaults: {err}')
def delete(self):
""" Deletes defaults key from domain """
rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key])
if rc != 0:
raise OSXDefaultsException("An error occurred while deleting key from defaults: %s" % err)
raise OSXDefaultsException(f"An error occurred while deleting key from defaults: {err}")
# /commands ----------------------------------------------------------- }}}
@ -357,7 +357,7 @@ class OSXDefaults(object):
if self.check_type:
value_type = type(self.value)
if self.current_value is not None and not isinstance(self.current_value, value_type):
raise OSXDefaultsException("Type mismatch. Type in defaults: %s" % type(self.current_value).__name__)
raise OSXDefaultsException(f"Type mismatch. Type in defaults: {type(self.current_value).__name__}")
# Current value matches the given value. Nothing need to be done. Arrays need extra care
if self.type == "array" and self.current_value is not None and not self.array_add and \

View file

@ -108,8 +108,6 @@ EXAMPLES = r"""
consumer_key: yourconsumerkey
"""
RETURN = r"""
"""
import time
from urllib.parse import quote_plus
@ -141,7 +139,7 @@ def getOvhClient(ansibleModule):
def waitForNoTask(client, name, timeout):
currentTimeout = timeout
while client.get('/ip/{0}/task'.format(quote_plus(name)),
while client.get(f'/ip/{quote_plus(name)}/task',
function='genericMoveFloatingIp',
status='todo'):
time.sleep(1) # Delay for 1 sec
@ -154,7 +152,7 @@ def waitForNoTask(client, name, timeout):
def waitForTaskDone(client, name, taskId, timeout):
currentTimeout = timeout
while True:
task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId))
task = client.get(f'/ip/{quote_plus(name)}/task/{taskId}')
if task['status'] == 'done':
return True
time.sleep(5) # Delay for 5 sec to not harass the API
@ -201,40 +199,34 @@ def main():
ips = client.get('/ip', ip=name, type='failover')
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the list of ips, '
'check application key, secret, consumerkey and parameters. '
'Error returned by OVH api was : {0}'.format(apiError))
msg=f'Unable to call OVH API for getting the list of ips, check application key, secret, consumerkey and parameters. '
f'Error returned by OVH API was : {apiError}')
if name not in ips and '{0}/32'.format(name) not in ips:
module.fail_json(msg='IP {0} does not exist'.format(name))
if name not in ips and f'{name}/32' not in ips:
module.fail_json(msg=f'IP {name} does not exist')
# Check that no task is pending before going on
try:
if not waitForNoTask(client, name, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for no pending '
'tasks before executing the module '.format(timeout))
msg=f'Timeout of {timeout} seconds while waiting for no pending tasks before executing the module ')
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the list of pending tasks '
'of the ip, check application key, secret, consumerkey '
'and parameters. Error returned by OVH api was : {0}'
.format(apiError))
msg=f'Unable to call OVH API for getting the list of pending tasks of the ip, check application key, secret, consumerkey and parameters. '
f'Error returned by OVH API was : {apiError}')
try:
ipproperties = client.get('/ip/{0}'.format(quote_plus(name)))
ipproperties = client.get(f'/ip/{quote_plus(name)}')
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the properties '
'of the ip, check application key, secret, consumerkey '
'and parameters. Error returned by OVH api was : {0}'
.format(apiError))
msg=f'Unable to call OVH API for getting the properties of the ip, check application key, secret, consumerkey and parameters. '
f'Error returned by OVH API was : {apiError}')
if ipproperties['routedTo']['serviceName'] != service:
if not module.check_mode:
if wait_task_completion == 0:
# Move the IP and get the created taskId
task = client.post('/ip/{0}/move'.format(quote_plus(name)), to=service)
task = client.post(f'/ip/{quote_plus(name)}/move', to=service)
taskId = task['taskId']
result['moved'] = True
else:
@ -245,8 +237,7 @@ def main():
if wait_completion or wait_task_completion != 0:
if not waitForTaskDone(client, name, taskId, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for completion '
'of move ip to service'.format(timeout))
msg=f'Timeout of {timeout} seconds while waiting for completion of move ip to service')
result['waited'] = True
else:
result['waited'] = False

View file

@ -104,9 +104,6 @@ EXAMPLES = r"""
consumer_key: yourconsumerkey
"""
RETURN = r"""
"""
import time
try:
@ -136,7 +133,7 @@ def getOvhClient(ansibleModule):
def waitForNoTask(client, name, timeout):
currentTimeout = timeout
while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0:
while len(client.get(f'/ip/loadBalancing/{name}/task')) > 0:
time.sleep(1) # Delay for 1 sec
currentTimeout -= 1
if currentTimeout < 0:
@ -162,8 +159,7 @@ def main():
)
if not HAS_OVH:
module.fail_json(msg='ovh-api python module'
'is required to run this module ')
module.fail_json(msg='ovh-api python module is required to run this module')
# Get parameters
name = module.params.get('name')
@ -181,34 +177,29 @@ def main():
loadBalancings = client.get('/ip/loadBalancing')
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the list of loadBalancing, '
'check application key, secret, consumerkey and parameters. '
'Error returned by OVH api was : {0}'.format(apiError))
msg=f'Unable to call OVH API for getting the list of loadBalancing, check application key, secret, consumerkey and parameters. '
f'Error returned by OVH API was : {apiError}')
if name not in loadBalancings:
module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name))
module.fail_json(msg=f'IP LoadBalancing {name} does not exist')
# Check that no task is pending before going on
try:
if not waitForNoTask(client, name, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for no pending '
'tasks before executing the module '.format(timeout))
msg=f'Timeout of {timeout} seconds while waiting for no pending tasks before executing the module ')
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the list of pending tasks '
'of the loadBalancing, check application key, secret, consumerkey '
'and parameters. Error returned by OVH api was : {0}'
.format(apiError))
msg=f'Unable to call OVH API for getting the list of pending tasks of the loadBalancing, check application key, secret, consumerkey and '
f'parameters. Error returned by OVH API was : {apiError}')
try:
backends = client.get('/ip/loadBalancing/{0}/backend'.format(name))
backends = client.get(f'/ip/loadBalancing/{name}/backend')
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the list of backends '
'of the loadBalancing, check application key, secret, consumerkey '
'and parameters. Error returned by OVH api was : {0}'
.format(apiError))
msg=('Unable to call OVH API for getting the list of backends '
'of the loadBalancing, check application key, secret, consumerkey '
f'and parameters. Error returned by OVH API was : {apiError}'))
backendExists = backend in backends
moduleChanged = False
@ -217,48 +208,38 @@ def main():
# Remove backend
try:
client.delete(
'/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
f'/ip/loadBalancing/{name}/backend/{backend}')
if not waitForNoTask(client, name, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for completion '
'of removing backend task'.format(timeout))
msg=f'Timeout of {timeout} seconds while waiting for completion of removing backend task')
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for deleting the backend, '
'check application key, secret, consumerkey and '
'parameters. Error returned by OVH api was : {0}'
.format(apiError))
msg=f'Unable to call OVH API for deleting the backend, check application key, secret, consumerkey and parameters. '
f'Error returned by OVH API was : {apiError}')
moduleChanged = True
else:
if backendExists:
# Get properties
try:
backendProperties = client.get(
'/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
f'/ip/loadBalancing/{name}/backend/{backend}')
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the backend properties, '
'check application key, secret, consumerkey and '
'parameters. Error returned by OVH api was : {0}'
.format(apiError))
msg=f'Unable to call OVH API for getting the backend properties, check application key, secret, consumerkey and parameters. '
f'Error returned by OVH API was : {apiError}')
if backendProperties['weight'] != weight:
# Change weight
try:
client.post(
'/ip/loadBalancing/{0}/backend/{1}/setWeight'
.format(name, backend), weight=weight)
f'/ip/loadBalancing/{name}/backend/{backend}/setWeight', weight=weight)
if not waitForNoTask(client, name, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for completion '
'of setWeight to backend task'
.format(timeout))
msg=f'Timeout of {timeout} seconds while waiting for completion of setWeight to backend task')
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for updating the weight of the '
'backend, check application key, secret, consumerkey '
'and parameters. Error returned by OVH api was : {0}'
.format(apiError))
msg=f'Unable to call OVH API for updating the weight of the backend, check application key, secret, consumerkey and parameters. '
f'Error returned by OVH API was : {apiError}')
moduleChanged = True
if backendProperties['probe'] != probe:
@ -266,44 +247,34 @@ def main():
backendProperties['probe'] = probe
try:
client.put(
'/ip/loadBalancing/{0}/backend/{1}'
.format(name, backend), probe=probe)
f'/ip/loadBalancing/{name}/backend/{backend}', probe=probe)
if not waitForNoTask(client, name, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for completion of '
'setProbe to backend task'
.format(timeout))
msg=f'Timeout of {timeout} seconds while waiting for completion of setProbe to backend task')
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for updating the probe of '
'the backend, check application key, secret, '
'consumerkey and parameters. Error returned by OVH api '
'was : {0}'
.format(apiError))
msg=f'Unable to call OVH API for updating the probe of the backend, check application key, secret, consumerkey and parameters. '
f'Error returned by OVH API was : {apiError}')
moduleChanged = True
else:
# Creates backend
try:
try:
client.post('/ip/loadBalancing/{0}/backend'.format(name),
client.post(f'/ip/loadBalancing/{name}/backend',
ipBackend=backend, probe=probe, weight=weight)
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for creating the backend, check '
'application key, secret, consumerkey and parameters. '
'Error returned by OVH api was : {0}'
.format(apiError))
msg=f'Unable to call OVH API for creating the backend, check application key, secret, consumerkey and parameters. '
f'Error returned by OVH API was : {apiError}')
if not waitForNoTask(client, name, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for completion of '
'backend creation task'.format(timeout))
msg=f'Timeout of {timeout} seconds while waiting for completion of backend creation task')
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for creating the backend, check '
'application key, secret, consumerkey and parameters. '
'Error returned by OVH api was : {0}'.format(apiError))
msg=f'Unable to call OVH API for creating the backend, check application key, secret, consumerkey and parameters. '
f'Error returned by OVH API was : {apiError}')
moduleChanged = True
module.exit_json(changed=moduleChanged)

View file

@ -128,15 +128,15 @@ def main():
# Check that the instance exists
try:
project = client.get('/cloud/project/{0}'.format(project_id))
project = client.get(f'/cloud/project/{project_id}')
except ovh.exceptions.ResourceNotFoundError:
module.fail_json(msg='project {0} does not exist'.format(project_id))
module.fail_json(msg=f'project {project_id} does not exist')
# Check that the instance exists
try:
instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id))
instance = client.get(f'/cloud/project/{project_id}/instance/{instance_id}')
except ovh.exceptions.ResourceNotFoundError:
module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id))
module.fail_json(msg=f'instance {instance_id} does not exist in project {project_id}')
# Is monthlyBilling already enabled or pending ?
if instance['monthlyBilling'] is not None:
@ -147,10 +147,10 @@ def main():
module.exit_json(changed=True, msg="Dry Run!")
try:
ovh_billing_status = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id))
ovh_billing_status = client.post(f'/cloud/project/{project_id}/instance/{instance_id}/activeMonthlyBilling')
module.exit_json(changed=True, ovh_billing_status=ovh_billing_status['monthlyBilling'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
module.fail_json(changed=False, msg=f"Failed to call OVH API: {apiError}")
# We should never reach here
module.fail_json(msg='Internal ovh_monthly_billing module error')