mirror of
https://github.com/containers/ansible-podman-collections.git
synced 2026-02-04 07:11:49 +00:00
Add Podman play kube module (#244)
* Add Podman play kube module * Add tests
This commit is contained in:
parent
95a402b869
commit
1c0a167a1d
10 changed files with 758 additions and 0 deletions
109
.github/workflows/podman_play.yml
vendored
Normal file
109
.github/workflows/podman_play.yml
vendored
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
name: Podman play
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- '.github/workflows/podman_play.yml'
|
||||
- 'ci/*.yml'
|
||||
- 'ci/run_containers_tests.sh'
|
||||
- 'ci/playbooks/containers/podman_play.yml'
|
||||
- 'plugins/modules/podman_play.py'
|
||||
- 'tests/integration/targets/podman_play/**'
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/podman_play.yml'
|
||||
- 'ci/*.yml'
|
||||
- 'ci/run_containers_tests.sh'
|
||||
- 'ci/playbooks/containers/podman_play.yml'
|
||||
- 'plugins/modules/podman_play.py'
|
||||
- 'tests/integration/targets/podman_play/**'
|
||||
schedule:
|
||||
- cron: 4 0 * * * # Run daily at 0:03 UTC
|
||||
|
||||
jobs:
|
||||
|
||||
test_podman_play:
|
||||
name: Podman play ${{ matrix.ansible-version }}-${{ matrix.os || 'ubuntu-latest' }}
|
||||
runs-on: ${{ matrix.os || 'ubuntu-latest' }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
ansible-version:
|
||||
- ansible<2.10
|
||||
# - git+https://github.com/ansible/ansible.git@stable-2.11
|
||||
- git+https://github.com/ansible/ansible.git@devel
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
python-version:
|
||||
- 3.7
|
||||
|
||||
steps:
|
||||
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Upgrade pip and display Python and PIP versions
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y python*-wheel python*-yaml
|
||||
python -m pip install --upgrade pip
|
||||
python -V
|
||||
pip --version
|
||||
|
||||
- name: Set up pip cache
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ github.ref }}-units-VMs
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
${{ runner.os }}-
|
||||
|
||||
- name: Install Ansible ${{ matrix.ansible-version }}
|
||||
run: python3 -m pip install --user --force-reinstall --upgrade '${{ matrix.ansible-version }}'
|
||||
|
||||
- name: Build and install the collection tarball
|
||||
run: |
|
||||
export PATH=~/.local/bin:$PATH
|
||||
|
||||
echo "Run ansible version"
|
||||
command -v ansible
|
||||
ansible --version
|
||||
rm -rf /tmp/just_new_collection
|
||||
~/.local/bin/ansible-galaxy collection build --output-path /tmp/just_new_collection --force
|
||||
~/.local/bin/ansible-galaxy collection install -vvv --force /tmp/just_new_collection/*.tar.gz
|
||||
|
||||
- name: Run collection tests for podman play
|
||||
run: |
|
||||
export PATH=~/.local/bin:$PATH
|
||||
|
||||
if [[ '${{ matrix.ansible-version }}' == 'git+https://github.com/ansible/ansible.git@devel' ]]; then
|
||||
export ANSIBLE_CONFIG=$(pwd)/ci/ansible-dev.cfg
|
||||
elif [[ '${{ matrix.ansible-version }}' == 'ansible<2.10' ]]; then
|
||||
export ANSIBLE_CONFIG=$(pwd)/ci/ansible-2.9.cfg
|
||||
fi
|
||||
|
||||
echo $ANSIBLE_CONFIG
|
||||
command -v ansible-playbook
|
||||
pip --version
|
||||
python --version
|
||||
ansible-playbook --version
|
||||
|
||||
ansible-playbook -vv ci/playbooks/pre.yml \
|
||||
-e host=localhost \
|
||||
-i localhost, \
|
||||
-e ansible_connection=local \
|
||||
-e setup_python=false
|
||||
|
||||
TEST2RUN=podman_play ./ci/run_containers_tests.sh
|
||||
shell: bash
|
||||
8
ci/playbooks/containers/podman_play.yml
Normal file
8
ci/playbooks/containers/podman_play.yml
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
tasks:
|
||||
- include_role:
|
||||
name: podman_play
|
||||
vars:
|
||||
ansible_python_interpreter: "{{ _ansible_python_interpreter }}"
|
||||
299
plugins/modules/podman_play.py
Normal file
299
plugins/modules/podman_play.py
Normal file
|
|
@ -0,0 +1,299 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (c) 2020 Red Hat
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
module: podman_play
|
||||
author:
|
||||
- "Sagi Shnaidman (@sshnaidm)"
|
||||
short_description: Play kubernetes YAML file using podman
|
||||
notes: []
|
||||
description:
|
||||
- The module reads in a structured file of Kubernetes YAML.
|
||||
It will then recreate the pod and containers described in the YAML.
|
||||
requirements:
|
||||
- "Podman installed on host"
|
||||
options:
|
||||
executable:
|
||||
description:
|
||||
- Name of executable to run, by default 'podman'
|
||||
type: str
|
||||
default: podman
|
||||
kube_file:
|
||||
description:
|
||||
- Path to file with YAML configuration for a Pod.
|
||||
type: path
|
||||
required: True
|
||||
authfile:
|
||||
description:
|
||||
- Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json,
|
||||
which is set using podman login. If the authorization state is not found there,
|
||||
$HOME/.docker/config.json is checked, which is set using docker login.
|
||||
Note - You can also override the default path of the authentication file
|
||||
by setting the REGISTRY_AUTH_FILE environment variable. export REGISTRY_AUTH_FILE=path
|
||||
type: path
|
||||
cert_dir:
|
||||
description:
|
||||
- Use certificates at path (*.crt, *.cert, *.key) to connect to the registry.
|
||||
Default certificates directory is /etc/containers/certs.d.
|
||||
(This option is not available with the remote Podman client)
|
||||
type: path
|
||||
configmap:
|
||||
description:
|
||||
- Use Kubernetes configmap YAML at path to provide a source for environment
|
||||
variable values within the containers of the pod.
|
||||
Note - The configmap option can be used multiple times to pass multiple
|
||||
Kubernetes configmap YAMLs
|
||||
type: list
|
||||
elements: path
|
||||
seccomp_profile_root:
|
||||
description:
|
||||
- Directory path for seccomp profiles (default is "/var/lib/kubelet/seccomp").
|
||||
This option is not available with the remote Podman client
|
||||
type: path
|
||||
username:
|
||||
description:
|
||||
- The username and password to use to authenticate with the registry if required.
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- The username and password to use to authenticate with the registry if required.
|
||||
type: str
|
||||
log_driver:
|
||||
description:
|
||||
- Set logging driver for all created containers.
|
||||
type: str
|
||||
log_level:
|
||||
description:
|
||||
- Set logging level for podman calls. Log messages above specified level
|
||||
("debug"|"info"|"warn"|"error"|"fatal"|"panic") (default "error")
|
||||
type: str
|
||||
choices:
|
||||
- debug
|
||||
- info
|
||||
- warn
|
||||
- error
|
||||
- fatal
|
||||
- panic
|
||||
network:
|
||||
description:
|
||||
- List of the names of CNI networks the pod should join.
|
||||
type: list
|
||||
elements: str
|
||||
state:
|
||||
description:
|
||||
- Start the pod after creating it, or to leave it created only.
|
||||
type: str
|
||||
choices:
|
||||
- created
|
||||
- started
|
||||
- absent
|
||||
required: True
|
||||
tls_verify:
|
||||
description:
|
||||
- Require HTTPS and verify certificates when contacting registries (default is true).
|
||||
If explicitly set to true, then TLS verification will be used. If set to false,
|
||||
then TLS verification will not be used. If not specified, TLS verification will be
|
||||
used unless the target registry is listed as an insecure registry in registries.conf.
|
||||
type: bool
|
||||
debug:
|
||||
description:
|
||||
- Enable debug for the module.
|
||||
type: bool
|
||||
recreate:
|
||||
description:
|
||||
- If pod already exists, delete it and run the new one.
|
||||
type: bool
|
||||
quiet:
|
||||
description:
|
||||
- Hide image pulls logs from output.
|
||||
type: bool
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Play kube file
|
||||
containers.podman.podman_play:
|
||||
kube_file: ~/kube.yaml
|
||||
|
||||
'''
|
||||
import re # noqa: F402
|
||||
try:
|
||||
import yaml
|
||||
HAS_YAML = True
|
||||
except ImportError:
|
||||
HAS_YAML = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule # noqa: F402
|
||||
|
||||
|
||||
NAME = re.compile('name "([^"]+)" is in use')
|
||||
|
||||
|
||||
class PodmanKubeManagement:
|
||||
|
||||
def __init__(self, module, executable):
|
||||
self.module = module
|
||||
self.actions = []
|
||||
self.executable = executable
|
||||
self.command = [self.executable, 'play', 'kube']
|
||||
creds = []
|
||||
# pod_name = extract_pod_name(module.params['kube_file'])
|
||||
if self.module.params['username']:
|
||||
creds += [self.module.params['username']]
|
||||
if self.module.params['password']:
|
||||
creds += [self.module.params['password']]
|
||||
creds = ":".join(creds)
|
||||
self.command.extend(['--creds=%s' % creds])
|
||||
if self.module.params['network']:
|
||||
networks = ",".join(self.module.params['network'])
|
||||
self.command.extend(['--network=%s' % networks])
|
||||
if self.module.params['configmap']:
|
||||
configmaps = ",".join(self.module.params['configmap'])
|
||||
self.command.extend(['--configmap=%s' % configmaps])
|
||||
start = self.module.params['state'] == 'started'
|
||||
self.command.extend(['--start=%s' % str(start).lower()])
|
||||
for arg, param in {
|
||||
'--authfile': 'authfile',
|
||||
'--cert-dir': 'cert_dir',
|
||||
'--log-driver': 'log_driver',
|
||||
'--seccomp-profile-root': 'seccomp_profile_root',
|
||||
'--tls-verify': 'tls_verify',
|
||||
'--log-level': 'log_level',
|
||||
'--quiet': 'quiet',
|
||||
}.items():
|
||||
if self.module.params[param] is not None:
|
||||
self.command += ["%s=%s" % (arg, self.module.params[param])]
|
||||
self.command += [self.module.params['kube_file']]
|
||||
|
||||
def _command_run(self, cmd):
|
||||
rc, out, err = self.module.run_command(cmd)
|
||||
self.actions.append(" ".join(cmd))
|
||||
if self.module.params['debug']:
|
||||
self.module.log('PODMAN-PLAY-KUBE command: %s' % " ".join(cmd))
|
||||
self.module.log('PODMAN-PLAY-KUBE stdout: %s' % out)
|
||||
self.module.log('PODMAN-PLAY-KUBE stderr: %s' % err)
|
||||
self.module.log('PODMAN-PLAY-KUBE rc: %s' % rc)
|
||||
return rc, out, err
|
||||
|
||||
def discover_pods(self):
|
||||
pod_name = ''
|
||||
if self.module.params['kube_file']:
|
||||
if HAS_YAML:
|
||||
with open(self.module.params['kube_file']) as f:
|
||||
pod = yaml.safe_load(f)
|
||||
if 'metadata' in pod:
|
||||
pod_name = pod['metadata'].get('name')
|
||||
else:
|
||||
self.module.fail_json(
|
||||
"No metadata in Kube file!\n%s" % pod)
|
||||
else:
|
||||
with open(self.module.params['kube_file']) as text:
|
||||
re_pod = NAME.search(text)
|
||||
if re_pod:
|
||||
pod_name = re_pod.group(1)
|
||||
if not pod_name:
|
||||
self.module.fail_json("Deployment doesn't have a name!")
|
||||
# Find all pods
|
||||
all_pods = ''
|
||||
# In case of one pod or replicasets
|
||||
for name in ("name=%s$", "name=%s-pod-*"):
|
||||
cmd = [self.executable,
|
||||
"pod", "ps", "-q", "--filter", name % pod_name]
|
||||
rc, out, err = self._command_run(cmd)
|
||||
all_pods += out
|
||||
ids = list(set([i for i in all_pods.splitlines() if i]))
|
||||
return ids
|
||||
|
||||
def remove_associated_pods(self, pods):
|
||||
changed = False
|
||||
out_all, err_all = '', ''
|
||||
# Delete all pods
|
||||
for pod_id in pods:
|
||||
rc, out, err = self._command_run(
|
||||
[self.executable, "pod", "rm", "-f", pod_id])
|
||||
if rc != 0:
|
||||
self.module.fail_json("Can NOT delete Pod %s" % pod_id)
|
||||
else:
|
||||
changed = True
|
||||
out_all += out
|
||||
err_all += err
|
||||
return changed, out_all, err_all
|
||||
|
||||
def pod_recreate(self):
|
||||
pods = self.discover_pods()
|
||||
self.remove_associated_pods(pods)
|
||||
# Create a pod
|
||||
rc, out, err = self._command_run(self.command)
|
||||
if rc != 0:
|
||||
self.module.fail_json("Can NOT create Pod! Error: %s" % err)
|
||||
return out, err
|
||||
|
||||
def play(self):
|
||||
rc, out, err = self._command_run(self.command)
|
||||
if rc != 0 and 'pod already exists' in err:
|
||||
if self.module.params['recreate']:
|
||||
out, err = self.pod_recreate()
|
||||
changed = True
|
||||
else:
|
||||
changed = False
|
||||
err = [
|
||||
i for i in err.splitlines() if 'pod already exists' not in i]
|
||||
elif rc != 0:
|
||||
self.module.fail_json(msg="Output: %s\nError=%s" % (out, err))
|
||||
else:
|
||||
changed = True
|
||||
return changed, out, err
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
executable=dict(type='str', default='podman'),
|
||||
kube_file=dict(type='path', required=True),
|
||||
authfile=dict(type='path'),
|
||||
cert_dir=dict(type='path'),
|
||||
configmap=dict(type='list', elements='path'),
|
||||
seccomp_profile_root=dict(type='path'),
|
||||
username=dict(type='str'),
|
||||
password=dict(type='str', no_log=True),
|
||||
log_driver=dict(type='str'),
|
||||
network=dict(type='list', elements='str'),
|
||||
state=dict(
|
||||
type='str',
|
||||
choices=['started', 'created', 'absent'],
|
||||
required=True),
|
||||
tls_verify=dict(type='bool'),
|
||||
debug=dict(type='bool'),
|
||||
quiet=dict(type='bool'),
|
||||
recreate=dict(type='bool'),
|
||||
log_level=dict(
|
||||
type='str',
|
||||
choices=["debug", "info", "warn", "error", "fatal", "panic"]),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
executable = module.get_bin_path(
|
||||
module.params['executable'], required=True)
|
||||
manage = PodmanKubeManagement(module, executable)
|
||||
if module.params['state'] == 'absent':
|
||||
pods = manage.discover_pods()
|
||||
changed, out, err = manage.remove_associated_pods(pods)
|
||||
else:
|
||||
changed, out, err = manage.play()
|
||||
results = {
|
||||
"changed": changed,
|
||||
"stdout": out,
|
||||
"stderr": err,
|
||||
"actions": manage.actions
|
||||
}
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: envdata
|
||||
namespace: default
|
||||
data:
|
||||
var1: somevalue1
|
||||
key2: value2
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: web-deploy-root
|
||||
labels:
|
||||
app: webapp
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webapp
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webapp
|
||||
spec:
|
||||
containers:
|
||||
- name: alpinexroot
|
||||
image: alpine
|
||||
command: ['sleep', '1d']
|
||||
ports:
|
||||
- containerPort: 80
|
||||
- name: alpineyroot
|
||||
image: alpine
|
||||
command: ['sleep', '1d']
|
||||
ports:
|
||||
- containerPort: 7777
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: web-deploy-root
|
||||
labels:
|
||||
app: webapp
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webapp
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webapp
|
||||
spec:
|
||||
containers:
|
||||
- name: alpinexroot
|
||||
image: alpine
|
||||
command: ['sleep', '1d']
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
- name: alpineyroot
|
||||
image: alpine
|
||||
command: ['sleep', '1d']
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: envdata
|
||||
ports:
|
||||
- containerPort: 7777
|
||||
hostPort: 7878
|
||||
26
tests/integration/targets/podman_play/tasks/files/play1.yaml
Normal file
26
tests/integration/targets/podman_play/tasks/files/play1.yaml
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: web-deploy
|
||||
labels:
|
||||
app: webapp
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webapp
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webapp
|
||||
spec:
|
||||
containers:
|
||||
- name: alpinex
|
||||
image: alpine
|
||||
command: ['sleep', '1d']
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
- name: alpiney
|
||||
image: alpine
|
||||
command: ['sleep', '1d']
|
||||
ports:
|
||||
- containerPort: 7777
|
||||
31
tests/integration/targets/podman_play/tasks/files/play3.yaml
Normal file
31
tests/integration/targets/podman_play/tasks/files/play3.yaml
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: web-deploy
|
||||
labels:
|
||||
app: webapp
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webapp
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webapp
|
||||
spec:
|
||||
containers:
|
||||
- name: alpinex
|
||||
image: alpine
|
||||
command: ['sleep', '1d']
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: envdata
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
hostPort: 8080
|
||||
- name: alpiney
|
||||
image: alpine
|
||||
command: ['sleep', '1d']
|
||||
ports:
|
||||
- containerPort: 7777
|
||||
hostPort: 8787
|
||||
122
tests/integration/targets/podman_play/tasks/main.yml
Normal file
122
tests/integration/targets/podman_play/tasks/main.yml
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
- name: Test podman play kube
|
||||
block:
|
||||
|
||||
- name: Discover podman version
|
||||
shell: podman version | grep "^Version:" | awk {'print $2'}
|
||||
register: podman_v
|
||||
|
||||
- name: Discover cgroups version
|
||||
shell: podman info | grep cgroupVersion | awk {'print $2'}
|
||||
register: cgroups
|
||||
|
||||
- name: Set cgroups version
|
||||
set_fact:
|
||||
cgroups_version: "{{ cgroups.stdout }}"
|
||||
|
||||
- name: Copy files to known place
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: "/tmp/{{ item }}"
|
||||
remote_src: false
|
||||
loop:
|
||||
- play1.yaml
|
||||
- play3.yaml
|
||||
- play-root1.yaml
|
||||
- play-root3.yaml
|
||||
- envdata.yaml
|
||||
|
||||
- name: Delete all pods leftovers from tests
|
||||
containers.podman.podman_pod:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- web-deploy
|
||||
- web-deploy-pod
|
||||
- web-deploy-pod-0
|
||||
|
||||
- name: Play kube file
|
||||
containers.podman.podman_play:
|
||||
kube_file: /tmp/play1.yaml
|
||||
state: started
|
||||
register: play1
|
||||
|
||||
- name: Play same kube file again
|
||||
containers.podman.podman_play:
|
||||
kube_file: /tmp/play1.yaml
|
||||
state: started
|
||||
register: play2
|
||||
|
||||
- name: Check info
|
||||
assert:
|
||||
that:
|
||||
- play1 is changed
|
||||
- play2 is not changed
|
||||
|
||||
- name: Recreate play
|
||||
containers.podman.podman_play:
|
||||
kube_file: /tmp/play1.yaml
|
||||
state: started
|
||||
recreate: true
|
||||
register: play3
|
||||
|
||||
- name: Check info
|
||||
assert:
|
||||
that:
|
||||
- play3 is changed
|
||||
|
||||
- name: Check 'created' after 'started'
|
||||
containers.podman.podman_play:
|
||||
kube_file: /tmp/play1.yaml
|
||||
state: created
|
||||
register: play4
|
||||
|
||||
- name: Check info
|
||||
assert:
|
||||
that:
|
||||
- play4 is not changed
|
||||
|
||||
- name: Run with configmap
|
||||
containers.podman.podman_play:
|
||||
kube_file: /tmp/play3.yaml
|
||||
debug: true
|
||||
state: started
|
||||
recreate: true
|
||||
configmap:
|
||||
- /tmp/envdata.yaml
|
||||
register: play5
|
||||
|
||||
- name: Check info
|
||||
assert:
|
||||
that:
|
||||
- play5 is changed
|
||||
|
||||
- name: Check if pod is running well
|
||||
containers.podman.podman_pod_info:
|
||||
name: web-deploy-pod-0
|
||||
register: info1
|
||||
|
||||
- name: Check pod info
|
||||
assert:
|
||||
that:
|
||||
- info1['pods'][0]['State'] == 'Running'
|
||||
|
||||
always:
|
||||
|
||||
- name: Delete all pods leftovers from tests
|
||||
containers.podman.podman_pod:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- web-deploy
|
||||
- web-deploy-pod
|
||||
- web-deploy-pod-0
|
||||
- web-deploy-pod-1
|
||||
- web-deploy-pod-2
|
||||
|
||||
- name: Test idempotency for root pods
|
||||
include_tasks: root-play.yml
|
||||
vars:
|
||||
ansible_python_interpreter: "/usr/bin/python"
|
||||
args:
|
||||
apply:
|
||||
become: true
|
||||
97
tests/integration/targets/podman_play/tasks/root-play.yml
Normal file
97
tests/integration/targets/podman_play/tasks/root-play.yml
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
- name: Test podman rootful pod play
|
||||
block:
|
||||
|
||||
- name: Discover cgroups version
|
||||
shell: podman info | grep cgroupVersion | awk {'print $2'}
|
||||
register: cgroups
|
||||
|
||||
- name: Set cgroups version
|
||||
set_fact:
|
||||
cgroups_version: "{{ cgroups.stdout }}"
|
||||
|
||||
- name: Delete all pods leftovers from tests
|
||||
containers.podman.podman_pod:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- web-deploy-root
|
||||
- web-deploy-root-pod
|
||||
- web-deploy-root-pod-0
|
||||
|
||||
- name: Play kube file
|
||||
containers.podman.podman_play:
|
||||
kube_file: /tmp/play-root1.yaml
|
||||
state: started
|
||||
register: play1
|
||||
|
||||
- name: Play same kube file again
|
||||
containers.podman.podman_play:
|
||||
kube_file: /tmp/play-root1.yaml
|
||||
state: started
|
||||
register: play2
|
||||
|
||||
- name: Check info
|
||||
assert:
|
||||
that:
|
||||
- play1 is changed
|
||||
- play2 is not changed
|
||||
|
||||
- name: Recreate play
|
||||
containers.podman.podman_play:
|
||||
kube_file: /tmp/play-root1.yaml
|
||||
state: started
|
||||
recreate: true
|
||||
register: play3
|
||||
|
||||
- name: Check info
|
||||
assert:
|
||||
that:
|
||||
- play3 is changed
|
||||
|
||||
- name: Check 'created' after 'started'
|
||||
containers.podman.podman_play:
|
||||
kube_file: /tmp/play-root1.yaml
|
||||
state: created
|
||||
register: play4
|
||||
|
||||
- name: Check info
|
||||
assert:
|
||||
that:
|
||||
- play4 is not changed
|
||||
|
||||
- name: Run with configmap
|
||||
containers.podman.podman_play:
|
||||
kube_file: /tmp/play-root3.yaml
|
||||
state: started
|
||||
recreate: true
|
||||
configmap:
|
||||
- /tmp/envdata.yaml
|
||||
register: play5
|
||||
|
||||
- name: Check info
|
||||
assert:
|
||||
that:
|
||||
- play5 is changed
|
||||
|
||||
- name: Check if pod is running well
|
||||
containers.podman.podman_pod_info:
|
||||
name: web-deploy-root-pod-0
|
||||
register: info1
|
||||
|
||||
- name: Check pod info
|
||||
assert:
|
||||
that:
|
||||
- info1['pods'][0]['State'] == 'Running'
|
||||
|
||||
always:
|
||||
|
||||
- name: Delete all pods leftovers from tests
|
||||
containers.podman.podman_pod:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- web-deploy
|
||||
- web-deploy-pod
|
||||
- web-deploy-pod-0
|
||||
- web-deploy-pod-1
|
||||
- web-deploy-pod-2
|
||||
Loading…
Add table
Add a link
Reference in a new issue