Add test case for k8s cascading deletes (#55987)

* Add test case for non-cascading deletes

Deleting a DaemonSet does not delete associated pods,
even though it should

* Add coverage module when using pip

Otherwise tests seemingly fail
This commit is contained in:
Will Thames 2019-05-30 09:26:43 +10:00 committed by jctanner
parent 5008e1d479
commit ac1895453f
7 changed files with 176 additions and 66 deletions

View file

@ -1,15 +1,15 @@
recreate_crd_default_merge_expectation: recreate_crd is not failed
wait_pod_metadata:
k8s_pod_metadata:
labels:
app: "{{ wait_pod_name }}"
app: "{{ k8s_pod_name }}"
wait_pod_spec:
k8s_pod_spec:
containers:
- image: "{{ wait_pod_image }}"
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ wait_pod_name }}"
command: "{{ wait_pod_command }}"
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
@ -19,14 +19,14 @@ wait_pod_spec:
limits:
cpu: "100m"
memory: "100Mi"
ports: "{{ wait_pod_ports }}"
ports: "{{ k8s_pod_ports }}"
wait_pod_command: []
k8s_pod_command: []
wait_pod_ports: []
k8s_pod_ports: []
wait_pod_template:
metadata: "{{ wait_pod_metadata }}"
spec: "{{ wait_pod_spec }}"
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
k8s_openshift: yes

View file

@ -0,0 +1,101 @@
- name: ensure that there are actually some nodes
k8s_facts:
kind: Node
register: nodes
- block:
- set_fact:
delete_namespace: delete
- name: ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ delete_namespace }}"
- name: add a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: delete-daemonset
namespace: "{{ delete_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 180
vars:
k8s_pod_name: delete-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- name: check if pods exist
k8s_facts:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_create
- name: assert that there are pods
assert:
that:
- pods_create.resources
- name: remove the daemonset
k8s:
kind: DaemonSet
name: delete-daemonset
namespace: "{{ delete_namespace }}"
state: absent
wait: yes
- name: show status of pods
k8s_facts:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
- name: wait for background deletion
pause:
seconds: 30
- name: check if pods still exist
k8s_facts:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_delete
- name: assert that deleting the daemonset deleted the pods
assert:
that:
- not pods_delete.resources
always:
- name: remove namespace
k8s:
kind: Namespace
name: "{{ delete_namespace }}"
state: absent
when: (nodes.resources | length) > 0

View file

@ -4,6 +4,7 @@
# Kubernetes resources
- include_tasks: delete.yml
- include_tasks: waiter.yml
- block:

View file

@ -12,10 +12,11 @@
- pip:
name:
- openshift==0.8.1
- openshift==0.8.8
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: yes
virtualenv_site_packages: no
- include_tasks: validate_not_installed.yml
vars:
@ -29,11 +30,12 @@
- pip:
name:
- openshift==0.8.1
- openshift==0.8.8
- kubernetes-validate==1.12.0
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: yes
virtualenv_site_packages: no
- include_tasks: validate_installed.yml
vars:
@ -50,9 +52,10 @@
name:
- openshift==0.6.0
- kubernetes==6.0.0
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: yes
virtualenv_site_packages: no
- include_tasks: older_openshift_fail.yml
vars:
@ -68,10 +71,11 @@
- pip:
name:
- openshift==0.8.1
- openshift==0.8.8
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: yes
virtualenv_site_packages: no
- include_tasks: full_test.yml
vars:

View file

@ -25,9 +25,6 @@
- "'Failed to import the required Python library (openshift >= 0.7.2)' in k8s_append_hash.msg"
- "'. This is required for append_hash.' in k8s_append_hash.msg"
# merge_type
- include_tasks: crd.yml
# validate
- name: attempt to use validate with older openshift
k8s:

View file

@ -1,3 +1,9 @@
- python_requirements_facts:
dependencies:
- openshift
- kubernetes
- kubernetes-validate
- k8s:
definition:
apiVersion: v1

View file

@ -6,6 +6,7 @@
- block:
- set_fact:
wait_namespace: wait
- name: ensure namespace exists
k8s:
definition:
@ -20,14 +21,14 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ wait_pod_name }}"
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ wait_pod_spec }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
vars:
wait_pod_name: wait-pod
wait_pod_image: alpine:3.8
wait_pod_command:
k8s_pod_name: wait-pod
k8s_pod_image: alpine:3.8
k8s_pod_command:
- sleep
- "10000"
register: wait_pod
@ -49,13 +50,13 @@
spec:
selector:
matchLabels:
app: "{{ wait_pod_name }}"
template: "{{ wait_pod_template }}"
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 180
vars:
wait_pod_name: wait-ds
wait_pod_image: gcr.io/kuar-demo/kuard-amd64:1
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: check that daemonset wait worked
@ -74,15 +75,15 @@
spec:
selector:
matchLabels:
app: "{{ wait_pod_name }}"
app: "{{ k8s_pod_name }}"
updateStrategy:
type: RollingUpdate
template: "{{ wait_pod_template }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 180
vars:
wait_pod_name: wait-ds
wait_pod_image: gcr.io/kuar-demo/kuard-amd64:2
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
register: update_ds_check_mode
- name: check that check_mode returned changed
@ -101,15 +102,15 @@
spec:
selector:
matchLabels:
app: "{{ wait_pod_name }}"
app: "{{ k8s_pod_name }}"
updateStrategy:
type: RollingUpdate
template: "{{ wait_pod_template }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 180
vars:
wait_pod_name: wait-ds
wait_pod_image: gcr.io/kuar-demo/kuard-amd64:2
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:3
register: ds
- name: get updated pods
@ -125,7 +126,7 @@
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- updated_ds_pods.resources[0].spec.containers[0].image.endswith(":2")
- updated_ds_pods.resources[0].spec.containers[0].image.endswith(":3")
- name: add a crashing pod
k8s:
@ -133,15 +134,15 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ wait_pod_name }}"
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ wait_pod_spec }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
wait_timeout: 30
vars:
wait_pod_name: wait-crash-pod
wait_pod_image: alpine:3.8
wait_pod_command:
k8s_pod_name: wait-crash-pod
k8s_pod_image: alpine:3.8
k8s_pod_command:
- /bin/false
register: crash_pod
ignore_errors: yes
@ -157,14 +158,14 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ wait_pod_name }}"
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ wait_pod_spec }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
wait_timeout: 30
vars:
wait_pod_name: wait-no-image-pod
wait_pod_image: i_made_this_up:and_this_too
k8s_pod_name: wait-no-image-pod
k8s_pod_image: i_made_this_up:and_this_too
register: no_image_pod
ignore_errors: yes
@ -185,13 +186,13 @@
replicas: 3
selector:
matchLabels:
app: "{{ wait_pod_name }}"
template: "{{ wait_pod_template }}"
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
vars:
wait_pod_name: wait-deploy
wait_pod_image: gcr.io/kuar-demo/kuard-amd64:1
wait_pod_ports:
k8s_pod_name: wait-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
@ -215,13 +216,13 @@
replicas: 3
selector:
matchLabels:
app: "{{ wait_pod_name }}"
template: "{{ wait_pod_template }}"
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
vars:
wait_pod_name: wait-deploy
wait_pod_image: gcr.io/kuar-demo/kuard-amd64:2
wait_pod_ports:
k8s_pod_name: wait-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
@ -277,14 +278,14 @@
namespace: "{{ wait_namespace }}"
spec:
selector:
app: "{{ wait_pod_name }}"
app: "{{ k8s_pod_name }}"
ports:
- port: 8080
targetPort: 8080
protocol: TCP
wait: yes
vars:
wait_pod_name: wait-deploy
k8s_pod_name: wait-deploy
register: service
- name: assert that waiting for service works
@ -304,13 +305,13 @@
replicas: 3
selector:
matchLabels:
app: "{{ wait_pod_name }}"
template: "{{ wait_pod_template }}"
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
vars:
wait_pod_name: wait-crash-deploy
wait_pod_image: alpine:3.8
wait_pod_command:
k8s_pod_name: wait-crash-deploy
k8s_pod_image: alpine:3.8
k8s_pod_command:
- /bin/false
register: wait_crash_deploy
ignore_errors: yes