Skip to content

Commit 314c6a0

Browse files
committed
feat: upgrade to Cilium kube-proxy replacement
1 parent d5f3ea6 commit 314c6a0

File tree

7 files changed

+390
-7
lines changed

7 files changed

+390
-7
lines changed
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
// Copyright 2025 Nutanix. All rights reserved.
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
package utils
5+
6+
import (
7+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
8+
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
9+
)
10+
11+
// SkipKubeProxy returns true if the cluster should skip kube proxy installation.
12+
func SkipKubeProxy(cluster *clusterv1.Cluster) bool {
13+
if cluster.Spec.Topology != nil {
14+
_, isSkipKubeProxy := cluster.Spec.Topology.ControlPlane.Metadata.Annotations[controlplanev1.SkipKubeProxyAnnotation]
15+
return isSkipKubeProxy
16+
}
17+
return false
18+
}

pkg/handlers/generic/lifecycle/addons/helmaddon.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ func waitToBeReady(
231231
if obj.Generation != obj.Status.ObservedGeneration {
232232
return false, nil
233233
}
234-
return conditions.IsTrue(obj, caaphv1.HelmReleaseProxiesReadyCondition), nil
234+
return conditions.IsTrue(obj, clusterv1.ReadyCondition), nil
235235
},
236236
Interval: 5 * time.Second,
237237
Timeout: 30 * time.Second,
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
// Copyright 2025 Nutanix. All rights reserved.
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
package addons
5+
6+
import (
7+
"context"
8+
9+
"github.com/go-logr/logr"
10+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
11+
)
12+
13+
type TestStrategy struct {
14+
err error
15+
}
16+
17+
func NewTestStrategy(err error) *TestStrategy {
18+
return &TestStrategy{err: err}
19+
}
20+
21+
func (s TestStrategy) Apply(
22+
ctx context.Context,
23+
cluster *clusterv1.Cluster,
24+
defaultsNamespace string,
25+
log logr.Logger,
26+
) error {
27+
return s.err
28+
}

pkg/handlers/generic/lifecycle/cni/cilium/handler.go

Lines changed: 127 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,15 @@ package cilium
66
import (
77
"context"
88
"fmt"
9+
"time"
910

11+
"github.com/go-logr/logr"
1012
"github.com/spf13/pflag"
13+
appsv1 "k8s.io/api/apps/v1"
1114
corev1 "k8s.io/api/core/v1"
1215
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1316
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
17+
"sigs.k8s.io/cluster-api/controllers/remote"
1418
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
1519
ctrl "sigs.k8s.io/controller-runtime"
1620
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
@@ -19,10 +23,12 @@ import (
1923
commonhandlers "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/handlers"
2024
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/handlers/lifecycle"
2125
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/variables"
26+
capiutils "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/utils"
2227
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/lifecycle/addons"
2328
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/lifecycle/config"
2429
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/options"
2530
handlersutils "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/utils"
31+
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/wait"
2632
)
2733

2834
type CNIConfig struct {
@@ -222,7 +228,8 @@ func (c *CiliumCNI) apply(
222228
c.client,
223229
helmChart,
224230
).
225-
WithValueTemplater(templateValues)
231+
WithValueTemplater(templateValues).
232+
WithDefaultWaiter()
226233
case "":
227234
resp.SetStatus(runtimehooksv1.ResponseStatusFailure)
228235
resp.SetMessage("strategy not specified for Cilium CNI addon")
@@ -232,11 +239,129 @@ func (c *CiliumCNI) apply(
232239
return
233240
}
234241

235-
if err := strategy.Apply(ctx, cluster, targetNamespace, log); err != nil {
242+
if err := runApply(ctx, c.client, cluster, strategy, targetNamespace, log); err != nil {
236243
resp.SetStatus(runtimehooksv1.ResponseStatusFailure)
237244
resp.SetMessage(err.Error())
238245
return
239246
}
240247

241248
resp.SetStatus(runtimehooksv1.ResponseStatusSuccess)
242249
}
250+
251+
func runApply(
252+
ctx context.Context,
253+
client ctrlclient.Client,
254+
cluster *clusterv1.Cluster,
255+
strategy addons.Applier,
256+
targetNamespace string,
257+
log logr.Logger,
258+
) error {
259+
if err := strategy.Apply(ctx, cluster, targetNamespace, log); err != nil {
260+
return err
261+
}
262+
263+
// If skip kube-proxy is not set, return early.
264+
// Otherwise, wait for Cilium to be rolled out and then cleanup kube-proxy if installed.
265+
if !capiutils.SkipKubeProxy(cluster) {
266+
return nil
267+
}
268+
269+
log.Info(
270+
fmt.Sprintf("Waiting for Cilium to be ready for cluster %s", ctrlclient.ObjectKeyFromObject(cluster)),
271+
)
272+
if err := waitForCiliumToBeReady(ctx, client, cluster); err != nil {
273+
return fmt.Errorf("failed to wait for Cilium to be ready: %w", err)
274+
}
275+
276+
log.Info(
277+
fmt.Sprintf("Cleaning up kube-proxy for cluster %s", ctrlclient.ObjectKeyFromObject(cluster)),
278+
)
279+
if err := cleanupKubeProxy(ctx, client, cluster); err != nil {
280+
return fmt.Errorf("failed to cleanup kube-proxy: %w", err)
281+
}
282+
283+
return nil
284+
}
285+
286+
const (
287+
kubeProxyName = "kube-proxy"
288+
kubeProxyNamespace = "kube-system"
289+
)
290+
291+
func waitForCiliumToBeReady(
292+
ctx context.Context,
293+
c ctrlclient.Client,
294+
cluster *clusterv1.Cluster,
295+
) error {
296+
remoteClient, err := remote.NewClusterClient(
297+
ctx,
298+
"",
299+
c,
300+
ctrlclient.ObjectKeyFromObject(cluster),
301+
)
302+
if err != nil {
303+
return fmt.Errorf("error creating remote cluster client: %w", err)
304+
}
305+
306+
ds := &appsv1.DaemonSet{
307+
ObjectMeta: metav1.ObjectMeta{
308+
Name: defaultCiliumReleaseName,
309+
Namespace: defaultCiliumNamespace,
310+
},
311+
}
312+
if err := wait.ForObject(
313+
ctx,
314+
wait.ForObjectInput[*appsv1.DaemonSet]{
315+
Reader: remoteClient,
316+
Target: ds.DeepCopy(),
317+
Check: func(_ context.Context, obj *appsv1.DaemonSet) (bool, error) {
318+
return obj.Status.NumberAvailable == obj.Status.DesiredNumberScheduled && obj.Status.NumberUnavailable == 0, nil
319+
},
320+
Interval: 1 * time.Second,
321+
Timeout: 30 * time.Second,
322+
},
323+
); err != nil {
324+
return fmt.Errorf(
325+
"failed to wait for DaemonSet %s to be Ready: %w",
326+
ctrlclient.ObjectKeyFromObject(ds),
327+
err,
328+
)
329+
}
330+
331+
return nil
332+
}
333+
334+
// cleanupKubeProxy cleans up kube-proxy DaemonSet and ConfigMap on the remote cluster when kube-proxy is disabled.
335+
func cleanupKubeProxy(ctx context.Context, c ctrlclient.Client, cluster *clusterv1.Cluster) error {
336+
remoteClient, err := remote.NewClusterClient(
337+
ctx,
338+
"",
339+
c,
340+
ctrlclient.ObjectKeyFromObject(cluster),
341+
)
342+
if err != nil {
343+
return fmt.Errorf("error creating remote cluster client: %w", err)
344+
}
345+
346+
objs := []ctrlclient.Object{
347+
&appsv1.DaemonSet{
348+
ObjectMeta: metav1.ObjectMeta{
349+
Name: kubeProxyName,
350+
Namespace: kubeProxyNamespace,
351+
},
352+
},
353+
&corev1.ConfigMap{
354+
ObjectMeta: metav1.ObjectMeta{
355+
Name: kubeProxyName,
356+
Namespace: kubeProxyNamespace,
357+
},
358+
},
359+
}
360+
for _, obj := range objs {
361+
if err := ctrlclient.IgnoreNotFound(remoteClient.Delete(ctx, obj)); err != nil {
362+
return fmt.Errorf("failed to delete %s/%s: %w", obj.GetNamespace(), obj.GetName(), err)
363+
}
364+
}
365+
366+
return nil
367+
}

0 commit comments

Comments
 (0)