Skip to content

Commit ad2b5db

Browse files
author
Amarthya Valija
committed
Add NodePool STS permissions test
1 parent d3ba311 commit ad2b5db

File tree

1 file changed

+291
-0
lines changed

1 file changed

+291
-0
lines changed

pkg/e2e/verify/nodepool.go

Lines changed: 291 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,291 @@
1+
package verify
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"strings"
7+
"time"
8+
9+
"github.com/onsi/ginkgo/v2"
10+
. "github.com/onsi/gomega"
11+
12+
viper "github.com/openshift/osde2e/pkg/common/concurrentviper"
13+
"github.com/openshift/osde2e/pkg/common/config"
14+
"github.com/openshift/osde2e/pkg/common/expect"
15+
"github.com/openshift/osde2e/pkg/common/helper"
16+
"github.com/openshift/osde2e/pkg/common/label"
17+
corev1 "k8s.io/api/core/v1"
18+
apierrors "k8s.io/apimachinery/pkg/api/errors"
19+
"k8s.io/apimachinery/pkg/api/resource"
20+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
21+
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
22+
"k8s.io/apimachinery/pkg/runtime/schema"
23+
"sigs.k8s.io/e2e-framework/klient/k8s/resources"
24+
"sigs.k8s.io/e2e-framework/klient/wait"
25+
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
26+
)
27+
28+
var _ = ginkgo.Describe("[Suite: e2e] NodePool STS Permissions", ginkgo.Ordered, label.HyperShift, label.E2E, func() {
29+
var h *helper.H
30+
var client *resources.Resources
31+
var clusterNamespace string
32+
var testNodePoolName string
33+
var initialNodeCount int
34+
35+
nodePoolGVR := schema.GroupVersionResource{
36+
Group: "hypershift.openshift.io",
37+
Version: "v1beta1",
38+
Resource: "nodepools",
39+
}
40+
41+
ginkgo.BeforeAll(func() {
42+
if !viper.GetBool(config.Hypershift) {
43+
ginkgo.Skip("NodePool tests are only supported on HyperShift clusters")
44+
}
45+
46+
h = helper.New()
47+
client = h.AsUser("")
48+
49+
var nodeList corev1.NodeList
50+
expect.NoError(client.List(context.Background(), &nodeList))
51+
52+
if len(nodeList.Items) == 0 {
53+
ginkgo.Skip("No nodes found - cannot run NodePool tests")
54+
}
55+
56+
initialNodeCount = len(nodeList.Items)
57+
58+
for _, node := range nodeList.Items {
59+
if label, exists := node.Labels["hypershift.openshift.io/nodePool"]; exists {
60+
parts := strings.Split(label, "-workers-")
61+
if len(parts) >= 1 {
62+
clusterNamespace = parts[0]
63+
break
64+
}
65+
}
66+
}
67+
68+
if clusterNamespace == "" {
69+
ginkgo.Skip("Could not determine cluster namespace from node labels")
70+
}
71+
72+
testNodePoolName = fmt.Sprintf("test-%d", time.Now().Unix()%100000)
73+
})
74+
75+
ginkgo.AfterAll(func(ctx context.Context) {
76+
if testNodePoolName != "" {
77+
ginkgo.By("Cleaning up test NodePool")
78+
err := h.Dynamic().Resource(nodePoolGVR).Namespace(clusterNamespace).
79+
Delete(ctx, testNodePoolName, metav1.DeleteOptions{})
80+
if err != nil && !apierrors.IsNotFound(err) {
81+
ginkgo.GinkgoLogr.Error(err, "Failed to cleanup test NodePool", "name", testNodePoolName)
82+
}
83+
84+
ginkgo.By("Cleaning up test pods")
85+
podList := &corev1.PodList{}
86+
err = client.WithNamespace(h.CurrentProject()).List(ctx, podList)
87+
if err == nil {
88+
for _, pod := range podList.Items {
89+
if strings.HasPrefix(pod.Name, "nodepool-test-") {
90+
client.Delete(ctx, &pod)
91+
}
92+
}
93+
}
94+
}
95+
})
96+
97+
ginkgo.It("should successfully create NodePool", func(ctx context.Context) {
98+
ginkgo.By("Getting existing NodePool configuration")
99+
100+
existingNodePools, err := h.Dynamic().Resource(nodePoolGVR).Namespace(clusterNamespace).List(ctx, metav1.ListOptions{})
101+
expect.NoError(err, "Failed to list existing NodePools")
102+
Expect(len(existingNodePools.Items)).To(BeNumerically(">", 0), "No existing NodePools found to reference")
103+
104+
ginkgo.By("Creating test NodePool to validate STS permissions")
105+
106+
refNodePool := existingNodePools.Items[0]
107+
var subnet string
108+
if spec, found, err := unstructured.NestedMap(refNodePool.Object, "spec"); found && err == nil {
109+
if platform, found, err := unstructured.NestedMap(spec, "platform"); found && err == nil {
110+
if aws, found, err := unstructured.NestedMap(platform, "aws"); found && err == nil {
111+
if s, found, err := unstructured.NestedString(aws, "subnet"); found && err == nil {
112+
subnet = s
113+
}
114+
}
115+
}
116+
}
117+
118+
nodePoolSpec := map[string]interface{}{
119+
"apiVersion": "hypershift.openshift.io/v1beta1",
120+
"kind": "NodePool",
121+
"metadata": map[string]interface{}{
122+
"name": testNodePoolName,
123+
"namespace": clusterNamespace,
124+
},
125+
"spec": map[string]interface{}{
126+
"clusterName": clusterNamespace,
127+
"replicas": 1,
128+
"management": map[string]interface{}{
129+
"autoRepair": true,
130+
"upgradeType": "Replace",
131+
},
132+
"platform": map[string]interface{}{
133+
"aws": map[string]interface{}{
134+
"instanceType": "m5.large",
135+
},
136+
},
137+
},
138+
}
139+
140+
if subnet != "" {
141+
spec := nodePoolSpec["spec"].(map[string]interface{})
142+
platform := spec["platform"].(map[string]interface{})
143+
aws := platform["aws"].(map[string]interface{})
144+
aws["subnet"] = subnet
145+
}
146+
147+
nodePoolObj := &unstructured.Unstructured{Object: nodePoolSpec}
148+
_, err = h.Dynamic().Resource(nodePoolGVR).Namespace(clusterNamespace).Create(ctx, nodePoolObj, metav1.CreateOptions{})
149+
expect.NoError(err, "NodePool creation failed - STS permissions (ec2:RunInstances, ec2:CreateTags) missing")
150+
})
151+
152+
ginkgo.It("should provision nodes with correct labels", func(ctx context.Context) {
153+
ginkgo.By("Waiting for new nodes to be provisioned")
154+
155+
var newNodes []corev1.Node
156+
err := wait.For(func(ctx context.Context) (bool, error) {
157+
var nodeList corev1.NodeList
158+
err := client.List(ctx, &nodeList)
159+
if err != nil {
160+
return false, err
161+
}
162+
163+
newNodes = nil
164+
if len(nodeList.Items) > initialNodeCount {
165+
for _, node := range nodeList.Items {
166+
if label, exists := node.Labels["hypershift.openshift.io/nodePool"]; exists {
167+
if strings.Contains(label, testNodePoolName) && isNodeReady(node) {
168+
newNodes = append(newNodes, node)
169+
}
170+
}
171+
}
172+
}
173+
return len(newNodes) > 0, nil
174+
}, wait.WithTimeout(20*time.Minute), wait.WithInterval(30*time.Second))
175+
176+
expect.NoError(err, "NodePool failed to provision nodes - STS permissions (ec2:RunInstances) may be missing")
177+
Expect(len(newNodes)).To(BeNumerically(">", 0), "No new nodes found")
178+
179+
ginkgo.By("Validating node has proper AWS integration")
180+
181+
for _, node := range newNodes {
182+
nodePoolLabel, exists := node.Labels["hypershift.openshift.io/nodePool"]
183+
Expect(exists).To(BeTrue(), "Node %s missing NodePool label", node.Name)
184+
Expect(nodePoolLabel).To(ContainSubstring(testNodePoolName),
185+
"Node %s has incorrect NodePool label", node.Name)
186+
187+
Expect(node.Spec.ProviderID).To(HavePrefix("aws://"),
188+
"Node %s should have AWS provider ID - ec2:DescribeInstances permission may be missing", node.Name)
189+
190+
hasInternalIP := false
191+
for _, addr := range node.Status.Addresses {
192+
if addr.Type == corev1.NodeInternalIP {
193+
hasInternalIP = true
194+
Expect(addr.Address).To(MatchRegexp(`^10\.`),
195+
"Node %s should have VPC internal IP", node.Name)
196+
break
197+
}
198+
}
199+
Expect(hasInternalIP).To(BeTrue(), "Node %s should have internal IP", node.Name)
200+
}
201+
})
202+
203+
ginkgo.It("should schedule workloads on new NodePool nodes", func(ctx context.Context) {
204+
ginkgo.By("Creating test workload targeted at NodePool")
205+
206+
pod := &corev1.Pod{
207+
ObjectMeta: metav1.ObjectMeta{
208+
GenerateName: "nodepool-test-",
209+
Namespace: h.CurrentProject(),
210+
},
211+
Spec: corev1.PodSpec{
212+
NodeSelector: map[string]string{
213+
"hypershift.openshift.io/nodePool": fmt.Sprintf("%s-%s", clusterNamespace, testNodePoolName),
214+
},
215+
Containers: []corev1.Container{{
216+
Name: "test",
217+
Image: "registry.access.redhat.com/ubi8/ubi-minimal",
218+
Command: []string{"/bin/sh", "-c", "echo 'NodePool workload test successful' && sleep 5"},
219+
Resources: corev1.ResourceRequirements{
220+
Requests: corev1.ResourceList{
221+
corev1.ResourceCPU: resource.MustParse("100m"),
222+
corev1.ResourceMemory: resource.MustParse("128Mi"),
223+
},
224+
},
225+
}},
226+
RestartPolicy: corev1.RestartPolicyNever,
227+
},
228+
}
229+
230+
expect.NoError(client.Create(ctx, pod), "Failed to create test pod")
231+
232+
ginkgo.By("Waiting for workload to complete successfully")
233+
234+
err := wait.For(conditions.New(client).PodPhaseMatch(pod, corev1.PodSucceeded), wait.WithTimeout(5*time.Minute))
235+
expect.NoError(err, "Workload scheduling failed on NodePool")
236+
237+
expect.NoError(client.Delete(ctx, pod), "Failed to delete test pod")
238+
})
239+
240+
ginkgo.It("should reject duplicate NodePool names", func(ctx context.Context) {
241+
ginkgo.By("Testing duplicate NodePool creation")
242+
243+
duplicateNodePoolSpec := map[string]interface{}{
244+
"apiVersion": "hypershift.openshift.io/v1beta1",
245+
"kind": "NodePool",
246+
"metadata": map[string]interface{}{
247+
"name": testNodePoolName,
248+
"namespace": clusterNamespace,
249+
},
250+
"spec": map[string]interface{}{
251+
"clusterName": clusterNamespace,
252+
"replicas": 1,
253+
"platform": map[string]interface{}{
254+
"aws": map[string]interface{}{
255+
"instanceType": "m5.large",
256+
},
257+
},
258+
},
259+
}
260+
261+
duplicateNodePoolObj := &unstructured.Unstructured{Object: duplicateNodePoolSpec}
262+
_, err := h.Dynamic().Resource(nodePoolGVR).Namespace(clusterNamespace).Create(ctx, duplicateNodePoolObj, metav1.CreateOptions{})
263+
Expect(err).To(HaveOccurred(), "Should fail when creating NodePool with duplicate name")
264+
})
265+
266+
ginkgo.It("should reject operations on non-existent NodePool", func(ctx context.Context) {
267+
ginkgo.By("Testing access to non-existent NodePool")
268+
269+
testNodePool := &unstructured.Unstructured{}
270+
testNodePool.SetGroupVersionKind(schema.GroupVersionKind{
271+
Group: nodePoolGVR.Group,
272+
Version: nodePoolGVR.Version,
273+
Kind: "NodePool",
274+
})
275+
276+
err := h.Dynamic().Resource(nodePoolGVR).Namespace(clusterNamespace).
277+
Get(ctx, "non-existent-nodepool", testNodePool, metav1.GetOptions{})
278+
279+
Expect(err).To(HaveOccurred(), "Getting non-existent NodePool should fail")
280+
Expect(apierrors.IsNotFound(err)).To(BeTrue(), "Should return NotFound error")
281+
})
282+
})
283+
284+
func isNodeReady(node corev1.Node) bool {
285+
for _, condition := range node.Status.Conditions {
286+
if condition.Type == corev1.NodeReady && condition.Status == corev1.ConditionTrue {
287+
return true
288+
}
289+
}
290+
return false
291+
}

0 commit comments

Comments
 (0)