mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-02-04 07:51:50 +00:00
Address issues reported by ruff check (#11043)
* Resolve E713 and E714 (not in/is tests). * Address UP018 (unnecessary str call). * UP045 requires Python 3.10+. * Address UP007 (X | Y for type annotations). * Address UP035 (import Callable from collections.abc). * Address UP006 (t.Dict -> dict). * Address UP009 (UTF-8 encoding comment). * Address UP034 (extraneous parantheses). * Address SIM910 (dict.get() with None default). * Address F401 (unused import). * Address UP020 (use builtin open). * Address B009 and B010 (getattr/setattr with constant name). * Address SIM300 (Yoda conditions). * UP029 isn't in use anyway. * Address FLY002 (static join). * Address B034 (re.sub positional args). * Address B020 (loop variable overrides input). * Address B017 (assert raise Exception). * Address SIM211 (if expression with false/true). * Address SIM113 (enumerate for loop). * Address UP036 (sys.version_info checks). * Remove unnecessary UP039. * Address SIM201 (not ==). * Address SIM212 (if expr with twisted arms). * Add changelog fragment. * Reformat.
This commit is contained in:
parent
f5943201b9
commit
3478863ef0
77 changed files with 196 additions and 222 deletions
|
|
@ -631,8 +631,8 @@ def get_instances_info(connection, ids):
|
|||
if len(instances) > 0:
|
||||
for inst in instances:
|
||||
volumes = connection.describe_disks(instance_id=inst.id)
|
||||
setattr(inst, "block_device_mappings", volumes)
|
||||
setattr(inst, "user_data", inst.describe_user_data())
|
||||
inst.block_device_mappings = volumes
|
||||
inst.user_data = inst.describe_user_data()
|
||||
result.append(inst.read())
|
||||
return result
|
||||
|
||||
|
|
@ -748,7 +748,7 @@ def modify_instance(module, instance):
|
|||
password = module.params["password"]
|
||||
|
||||
# userdata can be modified only when instance is stopped
|
||||
setattr(instance, "user_data", instance.describe_user_data())
|
||||
instance.user_data = instance.describe_user_data()
|
||||
user_data = instance.user_data
|
||||
if state == "stopped":
|
||||
user_data = module.params["user_data"].encode()
|
||||
|
|
|
|||
|
|
@ -389,8 +389,8 @@ def main():
|
|||
if not str(inst.instance_name).startswith(name_prefix):
|
||||
continue
|
||||
volumes = ecs.describe_disks(instance_id=inst.id)
|
||||
setattr(inst, "block_device_mappings", volumes)
|
||||
setattr(inst, "user_data", inst.describe_user_data())
|
||||
inst.block_device_mappings = volumes
|
||||
inst.user_data = inst.describe_user_data()
|
||||
instances.append(inst.read())
|
||||
instance_ids.append(inst.id)
|
||||
|
||||
|
|
|
|||
|
|
@ -215,9 +215,9 @@ def update_package_db(module):
|
|||
|
||||
def dir_size(module, path):
|
||||
total_size = 0
|
||||
for path, dirs, files in os.walk(path):
|
||||
for cur_path, dirs, files in os.walk(path):
|
||||
for f in files:
|
||||
total_size += os.path.getsize(os.path.join(path, f))
|
||||
total_size += os.path.getsize(os.path.join(cur_path, f))
|
||||
return total_size
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -568,7 +568,7 @@ class BtrfsSubvolumeModule:
|
|||
last = None
|
||||
ordered = sorted(subvolumes, key=lambda x: x.path)
|
||||
for next in ordered:
|
||||
if last is None or not next.path[0 : len(last)] == last:
|
||||
if last is None or next.path[0 : len(last)] != last:
|
||||
filtered.append(next)
|
||||
last = next.path
|
||||
return filtered
|
||||
|
|
|
|||
|
|
@ -324,7 +324,7 @@ def add_service(module, service):
|
|||
|
||||
# there is no way to retrieve the details of checks so if a check is present
|
||||
# in the service it must be re-registered
|
||||
if service.has_checks() or not existing or not existing == service:
|
||||
if service.has_checks() or not existing or existing != service:
|
||||
service.register(consul_api)
|
||||
# check that it registered correctly
|
||||
registered = get_service_by_id_or_name(consul_api, service.id)
|
||||
|
|
|
|||
|
|
@ -171,15 +171,13 @@ class CronVar:
|
|||
raise CronVarError("Unable to read crontab")
|
||||
|
||||
lines = out.splitlines()
|
||||
count = 0
|
||||
for l in lines:
|
||||
for count, l in enumerate(lines):
|
||||
if count > 2 or (
|
||||
not re.match(r"# DO NOT EDIT THIS FILE - edit the master and reinstall.", l)
|
||||
and not re.match(r"# \(/tmp/.*installed on.*\)", l)
|
||||
and not re.match(r"# \(.*version.*\)", l)
|
||||
):
|
||||
self.lines.append(l)
|
||||
count += 1
|
||||
|
||||
def log_message(self, message):
|
||||
self.module.debug(f'ansible: "{message}"')
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ NEVRA_RE = re.compile(r"^(?P<name>.+)-(?P<epoch>\d+):(?P<version>.+)-(?P<release
|
|||
|
||||
|
||||
def do_versionlock(module, command, patterns=None, raw=False):
|
||||
patterns = [] if not patterns else patterns
|
||||
patterns = patterns if patterns else []
|
||||
raw_parameter = ["--raw"] if raw else []
|
||||
# Call dnf versionlock using a just one full NEVR package-name-spec each
|
||||
# time because multiple package-name-spec and globs are not well supported.
|
||||
|
|
|
|||
|
|
@ -608,7 +608,7 @@ def main():
|
|||
current_record = DME.getMatchingRecord(record_name, record_type, record_value)
|
||||
new_record = {"name": record_name}
|
||||
for i in ["record_value", "record_type", "record_ttl"]:
|
||||
if not module.params[i] is None:
|
||||
if module.params[i] is not None:
|
||||
new_record[i[len("record_") :]] = module.params[i]
|
||||
# Special handling for mx record
|
||||
if new_record["type"] == "MX":
|
||||
|
|
|
|||
|
|
@ -271,11 +271,11 @@ class GitlabIssue:
|
|||
for key, value in options.items():
|
||||
if value is not None:
|
||||
if key == "milestone_id":
|
||||
old_milestone = getattr(issue, "milestone")["id"] if getattr(issue, "milestone") else ""
|
||||
old_milestone = issue.milestone["id"] if issue.milestone else ""
|
||||
if value != old_milestone:
|
||||
return True
|
||||
elif key == "assignee_ids":
|
||||
if value != sorted([user["id"] for user in getattr(issue, "assignees")]):
|
||||
if value != sorted([user["id"] for user in issue.assignees]):
|
||||
return True
|
||||
|
||||
elif key == "labels":
|
||||
|
|
|
|||
|
|
@ -272,11 +272,11 @@ class GitlabMergeRequest:
|
|||
key = "force_remove_source_branch"
|
||||
|
||||
if key == "assignee_ids":
|
||||
if value != sorted([user["id"] for user in getattr(mr, "assignees")]):
|
||||
if value != sorted([user["id"] for user in mr.assignees]):
|
||||
return True
|
||||
|
||||
elif key == "reviewer_ids":
|
||||
if value != sorted([user["id"] for user in getattr(mr, "reviewers")]):
|
||||
if value != sorted([user["id"] for user in mr.reviewers]):
|
||||
return True
|
||||
|
||||
elif key == "labels":
|
||||
|
|
|
|||
|
|
@ -572,7 +572,7 @@ class GitLabProject:
|
|||
|
||||
if all(old_val.get(key) == value for key, value in final_val.items()):
|
||||
continue
|
||||
setattr(project, "container_expiration_policy_attributes", final_val)
|
||||
project.container_expiration_policy_attributes = final_val
|
||||
else:
|
||||
setattr(project, arg_key, arg_value)
|
||||
changed = True
|
||||
|
|
|
|||
|
|
@ -499,8 +499,8 @@ class GitLabUser:
|
|||
|
||||
for identity in identities:
|
||||
if identity not in user.identities:
|
||||
setattr(user, "provider", identity["provider"])
|
||||
setattr(user, "extern_uid", identity["extern_uid"])
|
||||
user.provider = identity["provider"]
|
||||
user.extern_uid = identity["extern_uid"]
|
||||
if not self._module.check_mode:
|
||||
user.save()
|
||||
changed = True
|
||||
|
|
|
|||
|
|
@ -413,7 +413,7 @@ class HAProxy:
|
|||
haproxy_version = self.discover_version()
|
||||
|
||||
# check if haproxy version supports DRAIN state (starting with 1.5)
|
||||
if haproxy_version and (1, 5) <= haproxy_version:
|
||||
if haproxy_version and haproxy_version >= (1, 5):
|
||||
cmd = "set server $pxname/$svname state drain"
|
||||
self.execute_for_backends(cmd, backend, host, "DRAIN")
|
||||
if status == "MAINT":
|
||||
|
|
|
|||
|
|
@ -431,7 +431,7 @@ class Homebrew:
|
|||
if len(package_names) != 1:
|
||||
self.failed = True
|
||||
self.message = (
|
||||
f"Package names for {name} are missing or ambiguous: {', '.join((str(p) for p in package_names))}"
|
||||
f"Package names for {name} are missing or ambiguous: {', '.join(str(p) for p in package_names)}"
|
||||
)
|
||||
raise HomebrewException(self.message)
|
||||
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ running:
|
|||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from typing import NamedTuple, Optional
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.homebrew import (
|
||||
|
|
@ -95,25 +95,12 @@ from ansible_collections.community.general.plugins.module_utils.homebrew import
|
|||
parse_brew_path,
|
||||
)
|
||||
|
||||
if sys.version_info < (3, 5):
|
||||
from collections import namedtuple
|
||||
# Stores validated arguments for an instance of an action.
|
||||
# See DOCUMENTATION string for argument-specific information.
|
||||
HomebrewServiceArgs = NamedTuple("HomebrewServiceArgs", [("name", str), ("state", str), ("brew_path", str)])
|
||||
|
||||
# Stores validated arguments for an instance of an action.
|
||||
# See DOCUMENTATION string for argument-specific information.
|
||||
HomebrewServiceArgs = namedtuple("HomebrewServiceArgs", ["name", "state", "brew_path"])
|
||||
|
||||
# Stores the state of a Homebrew service.
|
||||
HomebrewServiceState = namedtuple("HomebrewServiceState", ["running", "pid"])
|
||||
|
||||
else:
|
||||
from typing import NamedTuple, Optional
|
||||
|
||||
# Stores validated arguments for an instance of an action.
|
||||
# See DOCUMENTATION string for argument-specific information.
|
||||
HomebrewServiceArgs = NamedTuple("HomebrewServiceArgs", [("name", str), ("state", str), ("brew_path", str)])
|
||||
|
||||
# Stores the state of a Homebrew service.
|
||||
HomebrewServiceState = NamedTuple("HomebrewServiceState", [("running", bool), ("pid", Optional[int])])
|
||||
# Stores the state of a Homebrew service.
|
||||
HomebrewServiceState = NamedTuple("HomebrewServiceState", [("running", bool), ("pid", Optional[int])])
|
||||
|
||||
|
||||
def _brew_service_state(args, module):
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@ def main():
|
|||
elif state in ("poweroff"):
|
||||
power_status = ilo.get_host_power_status()
|
||||
|
||||
if not power_status == "OFF":
|
||||
if power_status != "OFF":
|
||||
ilo.hold_pwr_btn()
|
||||
# ilo.set_host_power(host_power=False)
|
||||
changed = True
|
||||
|
|
|
|||
|
|
@ -254,7 +254,6 @@ EXAMPLES = r"""
|
|||
state: present
|
||||
"""
|
||||
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
|
|
@ -351,7 +350,7 @@ def do_ini(
|
|||
os.makedirs(destpath)
|
||||
ini_lines = []
|
||||
else:
|
||||
with io.open(target_filename, "r", encoding="utf-8-sig") as ini_file:
|
||||
with open(target_filename, "r", encoding="utf-8-sig") as ini_file:
|
||||
ini_lines = [to_text(line) for line in ini_file.readlines()]
|
||||
|
||||
if module._diff:
|
||||
|
|
|
|||
|
|
@ -252,7 +252,7 @@ def get_otptoken_dict(
|
|||
if owner is not None:
|
||||
otptoken[ansible_to_ipa["owner"]] = owner
|
||||
if enabled is not None:
|
||||
otptoken[ansible_to_ipa["enabled"]] = False if enabled else True
|
||||
otptoken[ansible_to_ipa["enabled"]] = not enabled
|
||||
if notbefore is not None:
|
||||
otptoken[ansible_to_ipa["notbefore"]] = f"{notbefore}Z"
|
||||
if notafter is not None:
|
||||
|
|
|
|||
|
|
@ -455,7 +455,7 @@ def delete_target(module, headers):
|
|||
)
|
||||
|
||||
status = info.get("status", 0)
|
||||
if not status == 200:
|
||||
if status != 200:
|
||||
module.fail_json(msg=f"Failed to delete: HTTP {status}, {response}, {headers}")
|
||||
|
||||
except Exception as e:
|
||||
|
|
@ -605,7 +605,7 @@ def run_module():
|
|||
does_exist = target_exists(module)
|
||||
|
||||
# Check if the credential/domain doesn't exist and the user wants to delete
|
||||
if not does_exist and state == "absent" and not type == "token":
|
||||
if not does_exist and state == "absent" and type != "token":
|
||||
result["changed"] = False
|
||||
result["msg"] = f"{id} does not exist."
|
||||
module.exit_json(**result)
|
||||
|
|
@ -762,7 +762,7 @@ def run_module():
|
|||
|
||||
payload = {"credentials": credentials}
|
||||
|
||||
if not type == "file" and not type == "token":
|
||||
if type != "file" and type != "token":
|
||||
body = urlencode({"json": json.dumps(payload)})
|
||||
|
||||
else: # Delete
|
||||
|
|
@ -770,7 +770,7 @@ def run_module():
|
|||
|
||||
module.exit_json(changed=True, msg=f"{id} deleted successfully.")
|
||||
|
||||
if not type == "scope" and not scope == "_": # Check if custom scope exists if adding to a custom scope
|
||||
if type != "scope" and scope != "_": # Check if custom scope exists if adding to a custom scope
|
||||
if not target_exists(module, True):
|
||||
module.fail_json(msg=f"Domain {scope} doesn't exists")
|
||||
|
||||
|
|
@ -781,7 +781,7 @@ def run_module():
|
|||
|
||||
status = info.get("status", 0)
|
||||
|
||||
if not status == 200:
|
||||
if status != 200:
|
||||
body = response.read() if response else b""
|
||||
module.fail_json(
|
||||
msg=f"Failed to {'add/update' if state == 'present' else 'delete'} credential",
|
||||
|
|
|
|||
|
|
@ -332,7 +332,6 @@ state:
|
|||
"""
|
||||
|
||||
import hashlib
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
|
|
@ -770,7 +769,7 @@ class JenkinsPlugin:
|
|||
|
||||
# Open the updates file
|
||||
try:
|
||||
f = io.open(tmp_updates_file, encoding="utf-8")
|
||||
f = open(tmp_updates_file, encoding="utf-8")
|
||||
|
||||
# Read only the second line
|
||||
dummy = f.readline()
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Copyright © Thorsten Glaser <tglaser@b1-systems.de>
|
||||
|
|
|
|||
|
|
@ -339,7 +339,7 @@ def main():
|
|||
# defined resource does not include all those scopes.
|
||||
for scope in scopes:
|
||||
s = kc.get_authz_authorization_scope_by_name(scope, cid, realm)
|
||||
if r and not s["id"] in resource_scopes:
|
||||
if r and s["id"] not in resource_scopes:
|
||||
module.fail_json(
|
||||
msg=f"Resource {resources[0]} does not include scope {scope} for client {client_id} in realm {realm}"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -185,7 +185,7 @@ def clientscopes_to_add(existing, proposed):
|
|||
to_add = []
|
||||
existing_clientscope_ids = extract_field(existing, "id")
|
||||
for clientscope in proposed:
|
||||
if not clientscope["id"] in existing_clientscope_ids:
|
||||
if clientscope["id"] not in existing_clientscope_ids:
|
||||
to_add.append(clientscope)
|
||||
return to_add
|
||||
|
||||
|
|
@ -194,7 +194,7 @@ def clientscopes_to_delete(existing, proposed):
|
|||
to_delete = []
|
||||
proposed_clientscope_ids = extract_field(proposed, "id")
|
||||
for clientscope in existing:
|
||||
if not clientscope["id"] in proposed_clientscope_ids:
|
||||
if clientscope["id"] not in proposed_clientscope_ids:
|
||||
to_delete.append(clientscope)
|
||||
return to_delete
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2025, mariusbertram <marius@brtrm.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
|
|
|||
|
|
@ -1044,7 +1044,7 @@ def main():
|
|||
# we remove all unwanted default mappers
|
||||
# we use ids so we dont accidently remove one of the previously updated default mapper
|
||||
for default_mapper in default_mappers:
|
||||
if not default_mapper["id"] in [x["id"] for x in updated_mappers]:
|
||||
if default_mapper["id"] not in [x["id"] for x in updated_mappers]:
|
||||
kc.delete_component(default_mapper["id"], realm)
|
||||
|
||||
after_comp["mappers"] = kc.get_components(urlencode(dict(parent=cid)), realm)
|
||||
|
|
@ -1088,7 +1088,7 @@ def main():
|
|||
|
||||
for before_mapper in before_comp.get("mappers", []):
|
||||
# remove unwanted existing mappers that will not be updated
|
||||
if not before_mapper["id"] in [x["id"] for x in desired_mappers if "id" in x]:
|
||||
if before_mapper["id"] not in [x["id"] for x in desired_mappers if "id" in x]:
|
||||
kc.delete_component(before_mapper["id"], realm)
|
||||
|
||||
for mapper in desired_mappers:
|
||||
|
|
|
|||
|
|
@ -293,7 +293,7 @@ def ss_parse(raw):
|
|||
try:
|
||||
if len(cells) == 6:
|
||||
# no process column, e.g. due to unprivileged user
|
||||
process = str()
|
||||
process = ""
|
||||
protocol, state, recv_q, send_q, local_addr_port, peer_addr_port = cells
|
||||
else:
|
||||
protocol, state, recv_q, send_q, local_addr_port, peer_addr_port, process = cells
|
||||
|
|
@ -312,7 +312,7 @@ def ss_parse(raw):
|
|||
if pids is None:
|
||||
# likely unprivileged user, so add empty name & pid
|
||||
# as we do in netstat logic to be consistent with output
|
||||
pids = [(str(), 0)]
|
||||
pids = [("", 0)]
|
||||
|
||||
address = conns.group(1)
|
||||
port = conns.group(2)
|
||||
|
|
|
|||
|
|
@ -857,7 +857,7 @@ class LxcContainerManagement:
|
|||
|
||||
if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
|
||||
return str(self.container.state).lower()
|
||||
return str("absent")
|
||||
return "absent"
|
||||
|
||||
def _execute_command(self):
|
||||
"""Execute a shell command."""
|
||||
|
|
|
|||
|
|
@ -714,7 +714,7 @@ class LXDContainerManagement:
|
|||
|
||||
if self._needs_to_change_instance_config(param):
|
||||
if param == "config":
|
||||
body_json["config"] = body_json.get("config", None) or {}
|
||||
body_json["config"] = body_json.get("config") or {}
|
||||
for k, v in self.config["config"].items():
|
||||
body_json["config"][k] = v
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/python
|
||||
# coding: utf-8
|
||||
|
||||
# Copyright (c) 2018, Jan Christian Grünhage <jan.christian@gruenhage.xyz>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
|
|
|||
|
|
@ -237,7 +237,6 @@ import hashlib
|
|||
import os
|
||||
import posixpath
|
||||
import shutil
|
||||
import io
|
||||
import tempfile
|
||||
import traceback
|
||||
import re
|
||||
|
|
@ -503,7 +502,7 @@ class MavenDownloader:
|
|||
if self.local:
|
||||
parsed_url = urlparse(url)
|
||||
if os.path.isfile(parsed_url.path):
|
||||
with io.open(parsed_url.path, "rb") as f:
|
||||
with open(parsed_url.path, "rb") as f:
|
||||
return f.read()
|
||||
if force:
|
||||
raise ValueError(f"{failmsg} because can not find file: {url}")
|
||||
|
|
@ -630,7 +629,7 @@ class MavenDownloader:
|
|||
hash = hashlib.sha1()
|
||||
else:
|
||||
raise ValueError(f"Unknown checksum_alg {checksum_alg}")
|
||||
with io.open(file, "rb") as f:
|
||||
with open(file, "rb") as f:
|
||||
for chunk in iter(lambda: f.read(8192), b""):
|
||||
hash.update(chunk)
|
||||
return hash.hexdigest()
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ class Monit:
|
|||
:@param validate: Force monit to re-check the status of the process
|
||||
"""
|
||||
monit_command = "validate" if validate else "status"
|
||||
check_rc = False if validate else True # 'validate' always has rc = 1
|
||||
check_rc = not validate # 'validate' always has rc = 1
|
||||
command = [self.monit_bin_path, monit_command] + self.command_args + [self.process_name]
|
||||
rc, out, err = self.module.run_command(command, check_rc=check_rc)
|
||||
return self._parse_status(out, err)
|
||||
|
|
@ -307,7 +307,7 @@ def main():
|
|||
|
||||
present = monit.is_process_present()
|
||||
|
||||
if not present and not state == "present":
|
||||
if not present and state != "present":
|
||||
module.fail_json(msg=f"{name} process not presently configured with monit", name=name)
|
||||
|
||||
if state == "present":
|
||||
|
|
|
|||
|
|
@ -279,7 +279,7 @@ def main():
|
|||
for r in all_records
|
||||
if r.hostname == record.hostname
|
||||
and r.type == record.type
|
||||
and not r.destination == record.destination
|
||||
and r.destination != record.destination
|
||||
]
|
||||
|
||||
if obsolete_records:
|
||||
|
|
|
|||
|
|
@ -2878,7 +2878,7 @@ def main():
|
|||
|
||||
# team checks
|
||||
if nmcli.type == "team":
|
||||
if nmcli.runner_hwaddr_policy and not nmcli.runner == "activebackup":
|
||||
if nmcli.runner_hwaddr_policy and nmcli.runner != "activebackup":
|
||||
nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup")
|
||||
if nmcli.runner_fast_rate is not None and nmcli.runner != "lacp":
|
||||
nmcli.module.fail_json(msg="runner-fast-rate is only allowed for runner lacp")
|
||||
|
|
|
|||
|
|
@ -163,7 +163,7 @@ def run():
|
|||
if job_id is None:
|
||||
module.fail_json(msg="Cannot retrieve job with ID None")
|
||||
plan = nomad_client.job.plan_job(job_id, job, diff=True)
|
||||
if not plan["Diff"].get("Type") == "None":
|
||||
if plan["Diff"].get("Type") != "None":
|
||||
changed = True
|
||||
if not module.check_mode:
|
||||
result = nomad_client.jobs.register_job(job)
|
||||
|
|
@ -186,7 +186,7 @@ def run():
|
|||
try:
|
||||
job_id = job_json.get("ID")
|
||||
plan = nomad_client.job.plan_job(job_id, job, diff=True)
|
||||
if not plan["Diff"].get("Type") == "None":
|
||||
if plan["Diff"].get("Type") != "None":
|
||||
changed = True
|
||||
if not module.check_mode:
|
||||
result = nomad_client.jobs.register_job(job)
|
||||
|
|
@ -215,7 +215,7 @@ def run():
|
|||
result = nomad_client.jobs.register_job(job)
|
||||
else:
|
||||
result = nomad_client.validate.validate_job(job)
|
||||
if not result.status_code == 200:
|
||||
if result.status_code != 200:
|
||||
module.fail_json(msg=to_native(result.text))
|
||||
result = json.loads(result.text)
|
||||
changed = True
|
||||
|
|
@ -224,7 +224,7 @@ def run():
|
|||
|
||||
if module.params.get("state") == "absent":
|
||||
try:
|
||||
if not module.params.get("name") is None:
|
||||
if module.params.get("name") is not None:
|
||||
job_name = module.params.get("name")
|
||||
else:
|
||||
if module.params.get("content_format") == "hcl":
|
||||
|
|
|
|||
|
|
@ -280,7 +280,7 @@ class Npm:
|
|||
if dep:
|
||||
# node.js v0.10.22 changed the `npm outdated` module separator
|
||||
# from "@" to " ". Split on both for backwards compatibility.
|
||||
pkg, other = re.split(r"\s|@", dep, 1)
|
||||
pkg, other = re.split(r"\s|@", dep, maxsplit=1)
|
||||
outdated.append(pkg)
|
||||
|
||||
return outdated
|
||||
|
|
|
|||
|
|
@ -282,7 +282,7 @@ class HostModule(OpenNebulaModule):
|
|||
self.fail(msg=f"Failed to update the host template, ERROR: {e}")
|
||||
|
||||
# the cluster
|
||||
if host.CLUSTER_ID != self.get_parameter("cluster_id"):
|
||||
if self.get_parameter("cluster_id") != host.CLUSTER_ID:
|
||||
# returns cluster id in int
|
||||
try:
|
||||
one.cluster.addhost(self.get_parameter("cluster_id"), host.ID)
|
||||
|
|
|
|||
|
|
@ -466,10 +466,10 @@ class ImageModule(OpenNebulaModule):
|
|||
return None
|
||||
|
||||
def get_image_by_name(self, image_name):
|
||||
return self.get_image(lambda image: (image.NAME == image_name))
|
||||
return self.get_image(lambda image: (image_name == image.NAME))
|
||||
|
||||
def get_image_by_id(self, image_id):
|
||||
return self.get_image(lambda image: (image.ID == image_id))
|
||||
return self.get_image(lambda image: (image_id == image.ID))
|
||||
|
||||
def get_image_instance(self, requested_id, requested_name):
|
||||
# Using 'if requested_id:' doesn't work properly when requested_id=0
|
||||
|
|
@ -589,7 +589,7 @@ class ImageModule(OpenNebulaModule):
|
|||
result["changed"] = False
|
||||
return result
|
||||
|
||||
if image.STATE == IMAGE_STATES.index("DISABLED"):
|
||||
if IMAGE_STATES.index("DISABLED") == image.STATE:
|
||||
self.module.fail_json(msg="Cannot clone DISABLED image")
|
||||
|
||||
if not self.module.check_mode:
|
||||
|
|
|
|||
|
|
@ -226,10 +226,10 @@ class TemplateModule(OpenNebulaModule):
|
|||
return None
|
||||
|
||||
def get_template_by_id(self, template_id, filter):
|
||||
return self.get_template(lambda template: (template.ID == template_id), filter)
|
||||
return self.get_template(lambda template: (template_id == template.ID), filter)
|
||||
|
||||
def get_template_by_name(self, name, filter):
|
||||
return self.get_template(lambda template: (template.NAME == name), filter)
|
||||
return self.get_template(lambda template: (name == template.NAME), filter)
|
||||
|
||||
def get_template_instance(self, requested_id, requested_name, filter):
|
||||
if requested_id:
|
||||
|
|
@ -270,7 +270,7 @@ class TemplateModule(OpenNebulaModule):
|
|||
result["changed"] = True
|
||||
else:
|
||||
# if the previous parsed template data is not equal to the updated one, this has changed
|
||||
result["changed"] = template.TEMPLATE != result["template"]
|
||||
result["changed"] = result["template"] != template.TEMPLATE
|
||||
|
||||
return result
|
||||
|
||||
|
|
|
|||
|
|
@ -764,11 +764,11 @@ def get_template(module, client, predicate):
|
|||
|
||||
|
||||
def get_template_by_name(module, client, template_name):
|
||||
return get_template(module, client, lambda template: (template.NAME == template_name))
|
||||
return get_template(module, client, lambda template: (template_name == template.NAME))
|
||||
|
||||
|
||||
def get_template_by_id(module, client, template_id):
|
||||
return get_template(module, client, lambda template: (template.ID == template_id))
|
||||
return get_template(module, client, lambda template: (template_id == template.ID))
|
||||
|
||||
|
||||
def get_template_id(module, client, requested_id, requested_name):
|
||||
|
|
@ -803,11 +803,11 @@ def get_datastore(module, client, predicate):
|
|||
|
||||
|
||||
def get_datastore_by_name(module, client, datastore_name):
|
||||
return get_datastore(module, client, lambda datastore: (datastore.NAME == datastore_name))
|
||||
return get_datastore(module, client, lambda datastore: (datastore_name == datastore.NAME))
|
||||
|
||||
|
||||
def get_datastore_by_id(module, client, datastore_id):
|
||||
return get_datastore(module, client, lambda datastore: (datastore.ID == datastore_id))
|
||||
return get_datastore(module, client, lambda datastore: (datastore_id == datastore.ID))
|
||||
|
||||
|
||||
def get_datastore_id(module, client, requested_id, requested_name):
|
||||
|
|
@ -887,7 +887,7 @@ def get_vm_info(client, vm):
|
|||
|
||||
# LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE
|
||||
vm_lcm_state = None
|
||||
if vm.STATE == VM_STATES.index("ACTIVE"):
|
||||
if VM_STATES.index("ACTIVE") == vm.STATE:
|
||||
vm_lcm_state = LCM_STATES[vm.LCM_STATE]
|
||||
|
||||
vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID)
|
||||
|
|
@ -1141,7 +1141,7 @@ def get_all_vms_by_attributes(client, attributes_dict, labels_list):
|
|||
if with_hash and vm.NAME[len(base_name) :].isdigit():
|
||||
# If the name has indexed format and after base_name it has only digits it'll be matched
|
||||
vm_list.append(vm)
|
||||
elif not with_hash and vm.NAME == name:
|
||||
elif not with_hash and name == vm.NAME:
|
||||
# If the name is not indexed it has to be same
|
||||
vm_list.append(vm)
|
||||
pool = vm_list
|
||||
|
|
@ -1600,7 +1600,7 @@ def disk_save_as(module, client, vm, disk_saveas, wait_timeout):
|
|||
disk_id = disk_saveas.get("disk_id", 0)
|
||||
|
||||
if not module.check_mode:
|
||||
if vm.STATE != VM_STATES.index("POWEROFF"):
|
||||
if VM_STATES.index("POWEROFF") != vm.STATE:
|
||||
module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state")
|
||||
try:
|
||||
client.vm.disksaveas(vm.ID, disk_id, image_name, "OS", -1)
|
||||
|
|
|
|||
|
|
@ -320,10 +320,10 @@ class NetworksModule(OpenNebulaModule):
|
|||
return None
|
||||
|
||||
def get_template_by_id(self, template_id):
|
||||
return self.get_template(lambda template: (template.ID == template_id))
|
||||
return self.get_template(lambda template: (template_id == template.ID))
|
||||
|
||||
def get_template_by_name(self, name):
|
||||
return self.get_template(lambda template: (template.NAME == name))
|
||||
return self.get_template(lambda template: (name == template.NAME))
|
||||
|
||||
def get_template_instance(self, requested_id, requested_name):
|
||||
if requested_id:
|
||||
|
|
@ -411,7 +411,7 @@ class NetworksModule(OpenNebulaModule):
|
|||
result["changed"] = True
|
||||
else:
|
||||
# if the previous parsed template data is not equal to the updated one, this has changed
|
||||
result["changed"] = template.TEMPLATE != result["template"]
|
||||
result["changed"] = result["template"] != template.TEMPLATE
|
||||
|
||||
return result
|
||||
|
||||
|
|
|
|||
|
|
@ -252,8 +252,8 @@ class OnePasswordInfo:
|
|||
module.fail_json(msg=f"Missing required 'name' field from search term, got: '{term}'")
|
||||
|
||||
term["field"] = term.get("field", "password")
|
||||
term["section"] = term.get("section", None)
|
||||
term["vault"] = term.get("vault", None)
|
||||
term["section"] = term.get("section")
|
||||
term["vault"] = term.get("vault")
|
||||
|
||||
processed_terms.append(term)
|
||||
|
||||
|
|
|
|||
|
|
@ -792,7 +792,7 @@ def main():
|
|||
("state", "after", ["new_control", "new_type", "new_module_path"]),
|
||||
],
|
||||
)
|
||||
content = str()
|
||||
content = ""
|
||||
fname = os.path.join(module.params["path"], module.params["name"])
|
||||
|
||||
# Open the file and read the content or fail
|
||||
|
|
|
|||
|
|
@ -274,7 +274,7 @@ def install_packages(module, packages):
|
|||
|
||||
rc, out, err = module.run_command(format_pkgin_command(module, "install", package))
|
||||
|
||||
if not module.check_mode and not query_package(module, package) in [
|
||||
if not module.check_mode and query_package(module, package) not in [
|
||||
PackageState.PRESENT,
|
||||
PackageState.OUTDATED,
|
||||
]:
|
||||
|
|
|
|||
|
|
@ -231,7 +231,7 @@ class pulp_server:
|
|||
if key not in distributor["config"].keys():
|
||||
return False
|
||||
|
||||
if not distributor["config"][key] == value:
|
||||
if distributor["config"][key] != value:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
|
@ -245,7 +245,7 @@ class pulp_server:
|
|||
if key not in importer["config"].keys():
|
||||
return False
|
||||
|
||||
if not importer["config"][key] == value:
|
||||
if importer["config"][key] != value:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -901,10 +901,10 @@ class RhsmPool:
|
|||
return str(self.__getattribute__("_name"))
|
||||
|
||||
def get_pool_id(self):
|
||||
return getattr(self, "PoolId", getattr(self, "PoolID"))
|
||||
return getattr(self, "PoolId", self.PoolID)
|
||||
|
||||
def get_quantity_used(self):
|
||||
return int(getattr(self, "QuantityUsed"))
|
||||
return int(self.QuantityUsed)
|
||||
|
||||
def subscribe(self):
|
||||
args = f"subscription-manager attach --pool {self.get_pool_id()}"
|
||||
|
|
|
|||
|
|
@ -1078,14 +1078,13 @@ class RHEV:
|
|||
|
||||
def setDisks(self, name, disks):
|
||||
self.__get_conn()
|
||||
counter = 0
|
||||
bootselect = False
|
||||
for disk in disks:
|
||||
if "bootable" in disk:
|
||||
if disk["bootable"] is True:
|
||||
bootselect = True
|
||||
|
||||
for disk in disks:
|
||||
for counter, disk in enumerate(disks):
|
||||
diskname = f"{name}_Disk{counter}_{disk.get('name', '').replace('/', '_')}"
|
||||
disksize = disk.get("size", 1)
|
||||
diskdomain = disk.get("domain", None)
|
||||
|
|
@ -1110,7 +1109,6 @@ class RHEV:
|
|||
else:
|
||||
self.conn.set_Disk(diskname, disksize, diskinterface, diskboot)
|
||||
checkFail()
|
||||
counter += 1
|
||||
|
||||
return True
|
||||
|
||||
|
|
|
|||
|
|
@ -275,7 +275,7 @@ def sensu_check(module, path, name, state="present", backup=False):
|
|||
|
||||
for k, v in custom_params.items():
|
||||
if k in config["checks"][name]:
|
||||
if not config["checks"][name][k] == v:
|
||||
if config["checks"][name][k] != v:
|
||||
changed = True
|
||||
reasons.append(f"`custom param {k}' was changed")
|
||||
else:
|
||||
|
|
@ -365,7 +365,7 @@ def main():
|
|||
|
||||
module = AnsibleModule(argument_spec=arg_spec, required_together=required_together, supports_check_mode=True)
|
||||
if module.params["state"] != "absent" and module.params["command"] is None:
|
||||
module.fail_json(msg="missing required arguments: %s" % ",".join(["command"]))
|
||||
module.fail_json(msg="missing required arguments: command")
|
||||
|
||||
path = module.params["path"]
|
||||
name = module.params["name"]
|
||||
|
|
|
|||
|
|
@ -197,7 +197,7 @@ class SimpleinitMSB:
|
|||
|
||||
(rc, out, err) = self.execute_command(f"{self.telinit_cmd} {self.enable}d")
|
||||
|
||||
service_enabled = False if self.enable else True
|
||||
service_enabled = not self.enable
|
||||
|
||||
rex = re.compile(rf"^{self.name}$")
|
||||
|
||||
|
|
|
|||
|
|
@ -736,7 +736,7 @@ def main():
|
|||
state_msg = "no change in state"
|
||||
state_changed = False
|
||||
|
||||
module.exit_json(changed=state_changed, msg=f"{state_msg}: {'; '.join((x[1] for x in changed.values()))}")
|
||||
module.exit_json(changed=state_changed, msg=f"{state_msg}: {'; '.join(x[1] for x in changed.values())}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
|||
|
|
@ -756,7 +756,7 @@ def main():
|
|||
cmd=" ".join(command),
|
||||
)
|
||||
# checks out to decide if changes were made during execution
|
||||
if " 0 added, 0 changed" not in out and not state == "absent" or " 0 destroyed" not in out:
|
||||
if " 0 added, 0 changed" not in out and state != "absent" or " 0 destroyed" not in out:
|
||||
changed = True
|
||||
|
||||
if no_color:
|
||||
|
|
|
|||
|
|
@ -840,7 +840,7 @@ class AIXTimezone(Timezone):
|
|||
# The best condition check we can do is to check the value of TZ after making the
|
||||
# change.
|
||||
TZ = self.__get_timezone()
|
||||
if TZ != value:
|
||||
if value != TZ:
|
||||
msg = f"TZ value does not match post-change (Actual: {TZ}, Expected: {value})."
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
|
|
|
|||
|
|
@ -802,9 +802,8 @@ class XenServerVM(XenServerObject):
|
|||
vm_disk_params_list = [
|
||||
disk_params for disk_params in self.vm_params["VBDs"] if disk_params["type"] == "Disk"
|
||||
]
|
||||
position = 0
|
||||
|
||||
for disk_change_list in change["disks_changed"]:
|
||||
for position, disk_change_list in enumerate(change["disks_changed"]):
|
||||
for disk_change in disk_change_list:
|
||||
vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(
|
||||
vm_disk_params_list[position]["VDI"]["uuid"]
|
||||
|
|
@ -829,7 +828,6 @@ class XenServerVM(XenServerObject):
|
|||
),
|
||||
)
|
||||
|
||||
position += 1
|
||||
elif change.get("disks_new"):
|
||||
for position, disk_userdevice in change["disks_new"]:
|
||||
disk_params = self.module.params["disks"][position]
|
||||
|
|
|
|||
|
|
@ -142,7 +142,7 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
|||
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from io import StringIO, open
|
||||
from io import StringIO
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue