Skip to content

Commit 3cd0bdf

Browse files
committed
Migrate OCP-38271 from openshift-tests-private
Add test to verify init containers do not restart when removed from node. - Add pod-initContainer.yaml template - Add helper functions in node_utils.go - Add OCP-38271 test in node_e2e/node.go Author: minmli@redhat.com (original) Migrated-by: bgudi@redhat.com Move OCP-38271 test to separate Describe block Refactor OCP-38271 to use standard origin patterns instead of compat_otp Fix race condition and Add retry logic and explicit failure for MicroShift cluster check Resolved typo issue Resolved gofmt issue Missing leading slash in shell command path Use direct crictl args instead of bash -c Fix shell command Container image - Changed from busybox to hello-openshift@sha256:4200f438
1 parent 1a93dad commit 3cd0bdf

1 file changed

Lines changed: 189 additions & 4 deletions

File tree

test/extended/node/node_e2e/node.go

Lines changed: 189 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,23 @@
11
package node
22

33
import (
4+
"context"
5+
"fmt"
46
"path/filepath"
7+
"regexp"
58
"strings"
69
"time"
710

811
g "github.com/onsi/ginkgo/v2"
912
o "github.com/onsi/gomega"
10-
nodeutils "github.com/openshift/origin/test/extended/node"
11-
exutil "github.com/openshift/origin/test/extended/util"
13+
corev1 "k8s.io/api/core/v1"
14+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1215
"k8s.io/apimachinery/pkg/util/wait"
1316
e2e "k8s.io/kubernetes/test/e2e/framework"
17+
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
18+
19+
nodeutils "github.com/openshift/origin/test/extended/node"
20+
exutil "github.com/openshift/origin/test/extended/util"
1421
)
1522

1623
var _ = g.Describe("[sig-node] [Jira:Node/Kubelet] Kubelet, CRI-O, CPU manager", func() {
@@ -22,8 +29,24 @@ var _ = g.Describe("[sig-node] [Jira:Node/Kubelet] Kubelet, CRI-O, CPU manager",
2229

2330
// Skip all tests on MicroShift clusters as MachineConfig resources are not available
2431
g.BeforeEach(func() {
25-
isMicroShift, err := exutil.IsMicroShiftCluster(oc.AdminKubeClient())
26-
o.Expect(err).NotTo(o.HaveOccurred())
32+
var isMicroShift bool
33+
var err error
34+
35+
// Retry check for robustness - OpenShift should eventually respond
36+
pollErr := wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
37+
isMicroShift, err = exutil.IsMicroShiftCluster(oc.AdminKubeClient())
38+
if err != nil {
39+
e2e.Logf("Failed to check if cluster is MicroShift: %v, retrying...", err)
40+
return false, nil
41+
}
42+
return true, nil
43+
})
44+
45+
if pollErr != nil {
46+
e2e.Logf("Setup failed: unable to determine if cluster is MicroShift after retries: %v", err)
47+
g.Fail("Setup failed: unable to determine cluster type - this is an infrastructure/connectivity issue, not a test failure")
48+
}
49+
2750
if isMicroShift {
2851
g.Skip("Skipping test on MicroShift cluster - MachineConfig resources are not available")
2952
}
@@ -143,3 +166,165 @@ var _ = g.Describe("[sig-node] [Jira:Node/Kubelet] Kubelet, CRI-O, CPU manager",
143166
o.Expect(output).To(o.ContainSubstring("fuse"), "dev fuse is not mounted inside pod")
144167
})
145168
})
169+
170+
var _ = g.Describe("[sig-node] [Jira:Node/Kubelet] NODE initContainer policy,volume,readiness,quota", func() {
171+
defer g.GinkgoRecover()
172+
173+
var (
174+
oc = exutil.NewCLI("node-initcontainer")
175+
)
176+
177+
// Skip all tests on MicroShift clusters as MachineConfig resources are not available
178+
g.BeforeEach(func() {
179+
var isMicroShift bool
180+
var err error
181+
182+
// Retry check for robustness - OpenShift should eventually respond
183+
pollErr := wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
184+
isMicroShift, err = exutil.IsMicroShiftCluster(oc.AdminKubeClient())
185+
if err != nil {
186+
e2e.Logf("Failed to check if cluster is MicroShift: %v, retrying...", err)
187+
return false, nil
188+
}
189+
return true, nil
190+
})
191+
192+
if pollErr != nil {
193+
e2e.Logf("Setup failed: unable to determine if cluster is MicroShift after retries: %v", err)
194+
g.Fail("Setup failed: unable to determine cluster type - this is an infrastructure/connectivity issue, not a test failure")
195+
}
196+
197+
if isMicroShift {
198+
g.Skip("Skipping test on MicroShift cluster - MachineConfig resources are not available")
199+
}
200+
})
201+
202+
//author: bgudi@redhat.com
203+
g.It("[OTP] Init containers should not restart when the exited init container is removed from node [OCP-38271]", func() {
204+
g.By("Test for case OCP-38271")
205+
oc.SetupProject()
206+
207+
podName := "initcon-pod"
208+
namespace := oc.Namespace()
209+
ctx := context.Background()
210+
211+
g.By("Create a pod with init container")
212+
pod := &corev1.Pod{
213+
ObjectMeta: metav1.ObjectMeta{
214+
Name: podName,
215+
Namespace: namespace,
216+
},
217+
Spec: corev1.PodSpec{
218+
InitContainers: []corev1.Container{
219+
{
220+
Name: "inittest",
221+
Image: "image-registry.openshift-image-registry.svc:5000/openshift/tools:latest",
222+
Command: []string{"/bin/sh", "-ec", "echo running >> /mnt/data/test"},
223+
VolumeMounts: []corev1.VolumeMount{
224+
{
225+
Name: "data",
226+
MountPath: "/mnt/data",
227+
},
228+
},
229+
},
230+
},
231+
Containers: []corev1.Container{
232+
{
233+
Name: "hello-test",
234+
Image: "image-registry.openshift-image-registry.svc:5000/openshift/tools:latest",
235+
Command: []string{"/bin/sh", "-c", "sleep 3600"},
236+
VolumeMounts: []corev1.VolumeMount{
237+
{
238+
Name: "data",
239+
MountPath: "/mnt/data",
240+
},
241+
},
242+
},
243+
},
244+
Volumes: []corev1.Volume{
245+
{
246+
Name: "data",
247+
VolumeSource: corev1.VolumeSource{
248+
EmptyDir: &corev1.EmptyDirVolumeSource{},
249+
},
250+
},
251+
},
252+
RestartPolicy: corev1.RestartPolicyNever,
253+
},
254+
}
255+
256+
_, err := oc.KubeClient().CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
257+
o.Expect(err).NotTo(o.HaveOccurred())
258+
defer func() {
259+
oc.KubeClient().CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{})
260+
}()
261+
262+
g.By("Check pod status")
263+
err = e2epod.WaitForPodRunningInNamespace(ctx, oc.KubeClient(), pod)
264+
o.Expect(err).NotTo(o.HaveOccurred(), "pod is not running")
265+
266+
g.By("Check init container exit normally")
267+
err = wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
268+
pod, err := oc.KubeClient().CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
269+
if err != nil {
270+
return false, err
271+
}
272+
for _, status := range pod.Status.InitContainerStatuses {
273+
if status.Name == "inittest" {
274+
if status.State.Terminated != nil && status.State.Terminated.ExitCode == 0 {
275+
e2e.Logf("Init container exited with code 0")
276+
return true, nil
277+
}
278+
}
279+
}
280+
return false, nil
281+
})
282+
o.Expect(err).NotTo(o.HaveOccurred(), "container not exit normally")
283+
284+
g.By("Get node where pod is running")
285+
pod, err = oc.KubeClient().CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
286+
o.Expect(err).NotTo(o.HaveOccurred())
287+
nodeName := pod.Spec.NodeName
288+
o.Expect(nodeName).NotTo(o.BeEmpty(), "pod node name is empty")
289+
290+
g.By("Get init container ID from pod status")
291+
var containerID string
292+
for _, status := range pod.Status.InitContainerStatuses {
293+
if status.Name == "inittest" {
294+
containerID = status.ContainerID
295+
break
296+
}
297+
}
298+
o.Expect(containerID).NotTo(o.BeEmpty(), "init container ID is empty")
299+
300+
// Extract the actual container ID (remove prefix like "cri-o://")
301+
containerIDPattern := regexp.MustCompile(`^[^/]+://(.+)$`)
302+
matches := containerIDPattern.FindStringSubmatch(containerID)
303+
o.Expect(matches).To(o.HaveLen(2), "failed to parse container ID")
304+
actualContainerID := matches[1]
305+
306+
g.By("Delete init container from node")
307+
output, err := nodeutils.ExecOnNodeWithChroot(oc, nodeName, "crictl", "rm", actualContainerID)
308+
o.Expect(err).NotTo(o.HaveOccurred(), "fail to delete container")
309+
e2e.Logf("Container deletion output: %s", output)
310+
311+
g.By("Check init container not restart again")
312+
err = wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
313+
pod, err := oc.KubeClient().CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
314+
if err != nil {
315+
return false, err
316+
}
317+
for _, status := range pod.Status.InitContainerStatuses {
318+
if status.Name == "inittest" {
319+
if status.RestartCount > 0 {
320+
e2e.Logf("Init container restarted, restart count: %d", status.RestartCount)
321+
return true, fmt.Errorf("init container restarted")
322+
}
323+
}
324+
}
325+
e2e.Logf("Init container has not restarted")
326+
return false, nil
327+
})
328+
o.Expect(err).To(o.Equal(wait.ErrWaitTimeout), "expected timeout while waiting confirms init container did not restart")
329+
})
330+
})

0 commit comments

Comments
 (0)