From 3d3b1e2c40c22eb84d4e853454b07945c77f1603 Mon Sep 17 00:00:00 2001 From: Petr Muller Date: Thu, 21 Aug 2025 15:14:20 +0200 Subject: [PATCH] `adm upgrade status`: capture testcase for OCPBUGS-60768 Mock test data captured from https://prow.ci.openshift.org/view/gs/test-platform-results/logs/periodic-ci-openshift-release-master-ci-4.20-e2e-azure-ovn-upgrade/1957867219337613312 --- .../status/examples/ocpbugs-60768-co.yaml | 5779 +++++++++++++++++ .../status/examples/ocpbugs-60768-cv.yaml | 233 + .../status/examples/ocpbugs-60768-mc.yaml | 4303 ++++++++++++ .../status/examples/ocpbugs-60768-mcp.yaml | 434 ++ .../status/examples/ocpbugs-60768-node.yaml | 2880 ++++++++ .../examples/ocpbugs-60768.detailed-output | 55 + .../status/examples/ocpbugs-60768.output | 35 + 7 files changed, 13719 insertions(+) create mode 100644 pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-co.yaml create mode 100644 pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-cv.yaml create mode 100644 pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-mc.yaml create mode 100644 pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-mcp.yaml create mode 100644 pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-node.yaml create mode 100644 pkg/cli/admin/upgrade/status/examples/ocpbugs-60768.detailed-output create mode 100644 pkg/cli/admin/upgrade/status/examples/ocpbugs-60768.output diff --git a/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-co.yaml b/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-co.yaml new file mode 100644 index 0000000000..72d71d320d --- /dev/null +++ b/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-co.yaml @@ -0,0 +1,5779 @@ +--- +apiVersion: config.openshift.io/v1 +items: +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + f:relatedObjects: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:versions: {} + manager: authentication-operator + operation: Update + subresource: status + time: "2025-08-19T21:36:13Z" + name: authentication + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "95784" + uid: 7fa6a26f-9392-4160-a620-bdfceaa7273d + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T21:36:13Z" + message: |- + APIServerDeploymentDegraded: 1 of 3 requested instances are unavailable for apiserver.openshift-oauth-apiserver () + OAuthServerDeploymentDegraded: 1 of 3 requested instances are unavailable for oauth-openshift.openshift-authentication () + reason: APIServerDeployment_UnavailablePod::OAuthServerDeployment_UnavailablePod + status: "True" + type: Degraded + - lastTransitionTime: "2025-08-19T20:23:53Z" + message: 'AuthenticatorCertKeyProgressing: All is well' + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T19:17:10Z" + message: All is well + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:54:22Z" + message: All is well + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:54:21Z" + reason: NoData + status: Unknown + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: operator.openshift.io + name: cluster + resource: authentications + - group: config.openshift.io + name: cluster + resource: authentications + - group: config.openshift.io + name: cluster + resource: infrastructures + - group: config.openshift.io + name: cluster + resource: oauths + - group: route.openshift.io + name: oauth-openshift + namespace: openshift-authentication + resource: routes + - group: "" + name: oauth-openshift + namespace: openshift-authentication + resource: services + - group: "" + name: openshift-config + resource: namespaces + - group: "" + name: openshift-config-managed + resource: namespaces + - group: "" + name: openshift-authentication + resource: namespaces + - group: "" + name: openshift-authentication-operator + resource: namespaces + - group: "" + name: openshift-ingress + resource: namespaces + - group: "" + name: openshift-oauth-apiserver + resource: namespaces + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: oauth-apiserver + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: oauth-openshift + version: 4.20.0-0.nightly-2025-08-19-180353_openshift +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: baremetal + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + f:relatedObjects: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Disabled"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + f:versions: {} + manager: cluster-baremetal-operator + operation: Update + subresource: status + time: "2025-08-19T20:16:00Z" + name: baremetal + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "54404" + uid: f31f86e8-eca0-47ec-960e-e58192f0b8ca + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:55:40Z" + reason: WaitingForProvisioningCR + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T18:55:40Z" + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T18:55:40Z" + message: Waiting for Provisioning CR on BareMetal Platform + reason: WaitingForProvisioningCR + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:55:40Z" + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:55:40Z" + status: "False" + type: Disabled + extension: null + relatedObjects: + - group: "" + name: openshift-machine-api + resource: namespaces + - group: metal3.io + name: "" + namespace: openshift-machine-api + resource: baremetalhosts + - group: metal3.io + name: "" + resource: provisioning + - group: metal3.io + name: "" + namespace: openshift-machine-api + resource: hostfirmwaresettings + - group: metal3.io + name: "" + namespace: openshift-machine-api + resource: firmwareschemas + - group: metal3.io + name: "" + namespace: openshift-machine-api + resource: preprovisioningimages + - group: metal3.io + name: "" + namespace: openshift-machine-api + resource: bmceventsubscriptions + - group: metal3.io + name: "" + namespace: openshift-machine-api + resource: hostfirmwarecomponents + - group: metal3.io + name: "" + namespace: openshift-machine-api + resource: dataimages + - group: metal3.io + name: "" + namespace: openshift-machine-api + resource: hostupdatepolicies + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: CloudControllerManager + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"CloudConfigControllerAvailable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"CloudConfigControllerDegraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"TrustedCABundleControllerControllerAvailable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"TrustedCABundleControllerControllerDegraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: config-sync-controllers + operation: Update + subresource: status + time: "2025-08-19T20:11:12Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + manager: cluster-controller-manager-operator + operation: Update + subresource: status + time: "2025-08-19T20:11:21Z" + name: cloud-controller-manager + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "52787" + uid: 90fc36f7-c451-4c63-be28-6116f58372ce + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:50:19Z" + message: Trusted CA Bundle Controller works as expected + reason: AsExpected + status: "True" + type: TrustedCABundleControllerControllerAvailable + - lastTransitionTime: "2025-08-19T18:50:19Z" + message: Trusted CA Bundle Controller works as expected + reason: AsExpected + status: "False" + type: TrustedCABundleControllerControllerDegraded + - lastTransitionTime: "2025-08-19T18:50:19Z" + message: Cloud Config Controller works as expected + reason: AsExpected + status: "True" + type: CloudConfigControllerAvailable + - lastTransitionTime: "2025-08-19T18:50:19Z" + message: Cloud Config Controller works as expected + reason: AsExpected + status: "False" + type: CloudConfigControllerDegraded + - lastTransitionTime: "2025-08-19T18:50:21Z" + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T18:50:21Z" + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:50:21Z" + message: Cluster Cloud Controller Manager Operator is available at 4.20.0-0.nightly-2025-08-19-180353 + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:50:21Z" + reason: AsExpected + status: "False" + type: Degraded + extension: null + relatedObjects: + - group: "" + name: openshift-cloud-controller-manager-operator + resource: namespaces + - group: config.openshift.io + name: cloud-controller-manager + resource: clusteroperators + - group: "" + name: openshift-cloud-controller-manager + resource: namespaces + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: CloudCredential + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: cloud-credential-operator + operation: Update + subresource: status + time: "2025-08-19T20:17:09Z" + name: cloud-credential + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "57536" + uid: 18abc99a-4d68-4b99-8fa2-cdcf14652a74 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:48:25Z" + message: All is well + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:48:25Z" + message: All is well + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T20:17:09Z" + message: All is well + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T18:48:25Z" + message: All is well + reason: AsExpected + status: "True" + type: Upgradeable + extension: null + relatedObjects: + - group: cloudcredential.openshift.io + name: aws-ebs-csi-driver-operator + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: azure-disk-csi-driver-operator + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: azure-file-csi-driver-operator + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: cloud-credential-operator-gcp-ro-creds + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: cloud-credential-operator-iam-ro + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: operator.openshift.io + name: cluster + resource: cloudcredentials + - group: cloudcredential.openshift.io + name: ibm-powervs-block-csi-driver-operator + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: ibm-vpc-block-csi-driver-operator + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: manila-csi-driver-operator + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: manila-csi-drivers + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-azure-cloud-controller-manager + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: "" + name: openshift-cloud-credential-operator + resource: namespaces + - group: cloudcredential.openshift.io + name: openshift-cloud-network-config-controller-aws + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-cloud-network-config-controller-azure + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-cloud-network-config-controller-gcp + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-cloud-network-config-controller-openstack + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-gcp-ccm + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-gcp-pd-csi-driver-operator + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-ibm-cloud-controller-manager + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-image-registry + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-image-registry-azure + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-image-registry-gcs + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-image-registry-ibmcos + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-image-registry-ibmcos-powervs + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-image-registry-openstack + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-ingress + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-ingress-azure + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-ingress-gcp + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-machine-api-aws + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-machine-api-azure + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-machine-api-gcp + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-machine-api-ibmcloud + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-machine-api-nutanix + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-machine-api-openstack + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-machine-api-powervs + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-machine-api-vsphere + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-network + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-nutanix-cloud-controller-manager + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-openstack-cloud-controller-manager + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-powervs-cloud-controller-manager + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-vmware-vsphere-csi-driver-operator + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-vsphere-cloud-controller-manager + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openshift-vsphere-problem-detector + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: openstack-cinder-csi-driver-operator + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + - group: cloudcredential.openshift.io + name: ovirt-csi-driver-operator + namespace: openshift-cloud-credential-operator + resource: credentialsrequests + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: MachineAPI + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: cluster-autoscaler-operator + operation: Update + subresource: status + time: "2025-08-19T20:19:40Z" + name: cluster-autoscaler + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "61486" + uid: e452ef34-d1da-454a-9ee0-1e12d419d30f + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:55:40Z" + message: at version 4.20.0-0.nightly-2025-08-19-180353 + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T20:19:40Z" + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T19:08:11Z" + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T18:55:40Z" + status: "True" + type: Upgradeable + extension: null + relatedObjects: + - group: autoscaling.openshift.io + name: "" + namespace: openshift-machine-api + resource: machineautoscalers + - group: autoscaling.openshift.io + name: "" + namespace: openshift-machine-api + resource: clusterautoscalers + - group: "" + name: openshift-machine-api + resource: namespaces + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + f:relatedObjects: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:versions: {} + manager: cluster-config-operator + operation: Update + subresource: status + time: "2025-08-19T19:33:43Z" + name: config-operator + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "40837" + uid: 669f5b59-cda1-412b-a9a4-dd95dfcdb1f3 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:54:27Z" + message: All is well + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T18:54:27Z" + message: All is well + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T18:54:27Z" + message: All is well + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:54:27Z" + message: All is well + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:54:27Z" + reason: NoData + status: Unknown + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: operator.openshift.io + name: cluster + resource: configs + - group: "" + name: openshift-config + resource: namespaces + - group: "" + name: openshift-config-operator + resource: namespaces + versions: + - name: feature-gates + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: Console + include.release.openshift.io/hypershift: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:include.release.openshift.io/hypershift: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: console + operation: Update + subresource: status + time: "2025-08-19T20:17:40Z" + name: console + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "59178" + uid: 8f79e4ad-cf5a-4249-9662-0811ef858f1f + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T19:11:07Z" + message: All is well + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T20:17:40Z" + message: All is well + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T19:11:07Z" + message: All is well + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T19:07:09Z" + message: All is well + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:54:47Z" + reason: NoData + status: Unknown + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: console.openshift.io + name: monitoring-plugin + resource: consoleplugins + - group: "" + name: openshift-monitoring + resource: namespaces + - group: console.openshift.io + name: networking-console-plugin + resource: consoleplugins + - group: "" + name: openshift-network-console + resource: namespaces + - group: operator.openshift.io + name: cluster + resource: consoles + - group: config.openshift.io + name: cluster + resource: consoles + - group: config.openshift.io + name: cluster + resource: infrastructures + - group: config.openshift.io + name: cluster + resource: proxies + - group: config.openshift.io + name: cluster + resource: oauths + - group: oauth.openshift.io + name: console + resource: oauthclients + - group: "" + name: openshift-console-operator + resource: namespaces + - group: "" + name: openshift-console + resource: namespaces + - group: "" + name: console-public + namespace: openshift-config-managed + resource: configmaps + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: MachineAPI + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + f:relatedObjects: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:versions: {} + manager: manager + operation: Update + subresource: status + time: "2025-08-19T20:12:10Z" + name: control-plane-machine-set + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "53203" + uid: fa1e89cc-6a37-4f5d-9cbb-1eb48632788a + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:56:22Z" + message: cluster operator is upgradable + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:56:22Z" + reason: AllReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:56:21Z" + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T18:56:22Z" + reason: AllReplicasUpdated + status: "False" + type: Progressing + extension: null + relatedObjects: + - group: "" + name: openshift-machine-api + resource: namespaces + - group: machine.openshift.io + name: "" + resource: controlplanemachinesets + - group: machine.openshift.io + name: "" + resource: machines + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: CSISnapshot + include.release.openshift.io/hypershift: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:include.release.openshift.io/hypershift: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: csi-snapshot-controller-operator + operation: Update + subresource: status + time: "2025-08-19T21:06:11Z" + name: csi-snapshot-controller + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "84193" + uid: 07c5be06-2ded-4f12-bb05-f8172005af11 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:54:58Z" + message: All is well + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T21:06:11Z" + message: 'CSISnapshotControllerProgressing: Deployment is not progressing' + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T19:05:10Z" + message: 'CSISnapshotControllerAvailable: Deployment is available' + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:54:58Z" + message: All is well + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:54:58Z" + reason: NoData + status: Unknown + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: "" + name: openshift-cluster-storage-operator + resource: namespaces + - group: operator.openshift.io + name: cluster + resource: csisnapshotcontrollers + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: csi-snapshot-controller + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: dns-operator + operation: Update + subresource: status + time: "2025-08-19T21:16:13Z" + name: dns + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "90195" + uid: 16759f2f-7870-43c0-9bfe-6536cd053671 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:55:20Z" + message: DNS "default" is available. + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T21:16:13Z" + message: Desired and current number of DNSes are equal + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T19:07:39Z" + reason: DNSNotDegraded + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T18:54:56Z" + message: 'DNS default is upgradeable: DNS Operator can be upgraded' + reason: DNSUpgradeable + status: "True" + type: Upgradeable + extension: null + relatedObjects: + - group: "" + name: openshift-dns-operator + resource: namespaces + - group: operator.openshift.io + name: default + resource: dnses + - group: "" + name: openshift-dns + resource: namespaces + versions: + - name: kube-rbac-proxy + version: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd03b21139b2921710987d9a617bf49e5d05825d6e9df66768259e7076c6d53a + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: coredns + version: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8698aafa8e2df3f9998aedefbecd845817883303c2aebfac07de2af63b3d6239 + - name: openshift-cli + version: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f9fd6ac23c7b2aa3431c11845d8e838f7f28573d8888acf960916204ff67da6b +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + f:relatedObjects: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:versions: {} + manager: cluster-etcd-operator + operation: Update + subresource: status + time: "2025-08-19T21:06:13Z" + name: etcd + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "84347" + uid: 21e65ac9-6eff-4deb-b10d-f63adf38ba36 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T21:05:38Z" + message: |- + NodeControllerDegraded: All master nodes are ready + EtcdMembersDegraded: No unhealthy members found + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T19:40:23Z" + message: |- + NodeInstallerProgressing: 3 nodes are at revision 15 + EtcdMembersProgressing: No unstarted etcd members found + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T18:56:49Z" + message: |- + StaticPodsAvailable: 3 nodes are active; 3 nodes are at revision 15 + EtcdMembersAvailable: 3 members are available + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:54:23Z" + message: All is well + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:54:23Z" + reason: NoData + status: Unknown + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: operator.openshift.io + name: cluster + resource: etcds + - group: "" + name: openshift-config + resource: namespaces + - group: "" + name: openshift-config-managed + resource: namespaces + - group: "" + name: openshift-etcd-operator + resource: namespaces + - group: "" + name: openshift-etcd + resource: namespaces + versions: + - name: raw-internal + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: etcd + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: ImageRegistry + include.release.openshift.io/hypershift: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:include.release.openshift.io/hypershift: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: cluster-image-registry-operator + operation: Update + subresource: status + time: "2025-08-19T21:15:23Z" + name: image-registry + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "89452" + uid: f96711b2-a4fc-49a0-8669-302ba2dd0d5b + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T19:07:17Z" + message: |- + Available: The registry is ready + NodeCADaemonAvailable: The daemon set node-ca has available replicas + ImagePrunerAvailable: Pruner CronJob has been created + reason: Ready + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T21:15:23Z" + message: |- + Progressing: The registry is ready + NodeCADaemonProgressing: The daemon set node-ca is deployed + reason: Ready + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T19:07:17Z" + reason: AsExpected + status: "False" + type: Degraded + extension: null + relatedObjects: + - group: imageregistry.operator.openshift.io + name: cluster + resource: configs + - group: imageregistry.operator.openshift.io + name: cluster + resource: imagepruners + - group: rbac.authorization.k8s.io + name: system:registry + resource: clusterroles + - group: rbac.authorization.k8s.io + name: registry-registry-role + resource: clusterrolebindings + - group: rbac.authorization.k8s.io + name: openshift-image-registry-pruner + resource: clusterrolebindings + - group: "" + name: openshift-image-registry + resource: namespaces + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: Ingress + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: ingress-operator + operation: Update + subresource: status + time: "2025-08-19T21:10:52Z" + name: ingress + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "87925" + uid: 79d1ba7a-d1c1-4276-9737-1fe65ea4b083 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T19:07:39Z" + message: The "default" ingress controller reports Available=True. + reason: IngressAvailable + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T21:10:52Z" + message: desired and current number of IngressControllers are equal + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T19:07:39Z" + message: The "default" ingress controller reports Degraded=False. + reason: IngressNotDegraded + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T18:55:42Z" + reason: IngressControllersUpgradeable + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:55:42Z" + reason: AsExpected + status: "False" + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: "" + name: openshift-ingress-operator + resource: namespaces + - group: operator.openshift.io + name: "" + namespace: openshift-ingress-operator + resource: ingresscontrollers + - group: ingress.operator.openshift.io + name: "" + namespace: openshift-ingress-operator + resource: dnsrecords + - group: "" + name: openshift-ingress + resource: namespaces + - group: ingress.operator.openshift.io + name: "" + namespace: openshift-ingress + resource: dnsrecords + - group: "" + name: openshift-ingress-canary + resource: namespaces + - group: gateway.networking.k8s.io + name: "" + resource: gatewayclasses + - group: gateway.networking.k8s.io + name: "" + resource: gateways + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: ingress-controller + version: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8412f40fa6a47fa89c5fa293f7ee6d2aa276dd57408dc8b7bd22698fb6b46b51 + - name: canary-server + version: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:728a324a24d27e68e511b6ff47f454a48479fd425d0e981f475fb788ea1e62c6 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: Insights + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/hypershift: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/hypershift: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"ClusterTransferAvailable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Disabled"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"GatheringDisabled"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"RemoteConfigurationAvailable"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"RemoteConfigurationValid"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"SCAAvailable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:extension: + f:lastReportTime: {} + f:relatedObjects: {} + f:versions: {} + manager: insights-operator + operation: Update + subresource: status + time: "2025-08-19T20:18:43Z" + name: insights + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "60829" + uid: ffeb9099-2e69-4eea-b253-64e7d18da684 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:55:01Z" + message: Insights works as expected + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T19:05:05Z" + message: no available cluster transfer + reason: NoClusterTransfer + status: "False" + type: ClusterTransferAvailable + - lastTransitionTime: "2025-08-19T18:55:01Z" + message: Insights works as expected + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T18:55:01Z" + reason: AsExpected + status: "False" + type: Disabled + - lastTransitionTime: "2025-08-19T18:55:01Z" + message: Gathering is enabled + reason: AsExpected + status: "False" + type: GatheringDisabled + - lastTransitionTime: "2025-08-19T18:55:01Z" + message: Monitoring the cluster + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T18:55:01Z" + reason: Succeeded + status: "True" + type: RemoteConfigurationAvailable + - lastTransitionTime: "2025-08-19T18:55:01Z" + reason: Succeeded + status: "True" + type: RemoteConfigurationValid + - lastTransitionTime: "2025-08-19T19:05:05Z" + message: 'Failed to pull SCA certs from https://api.openshift.com/api/accounts_mgmt/v1/entitlement_certificates: + OCM API https://api.openshift.com/api/accounts_mgmt/v1/entitlement_certificates + returned HTTP 403: {"code":"ACCT-MGMT-11","href":"/api/accounts_mgmt/v1/errors/11","id":"11","kind":"Error","operation_id":"cae81182-0d6d-48c8-8bda-5654cda9cdad","reason":"Account + with ID 2DUeKzzTD9ngfsQ6YgkzdJn1jA4 denied access to perform create on EntitlementCertificate + with HTTP call POST /api/accounts_mgmt/v1/entitlement_certificates"}' + reason: Forbidden + status: "False" + type: SCAAvailable + extension: + lastReportTime: "2025-08-19T20:18:37Z" + relatedObjects: + - group: "" + name: openshift-insights + resource: namespaces + - group: apps + name: insights-operator + namespace: openshift-insights + resource: deployments + - group: "" + name: pull-secret + namespace: openshift-config + resource: secrets + - group: "" + name: support + namespace: openshift-config + resource: secrets + - group: "" + name: gather + namespace: openshift-insights + resource: serviceaccounts + - group: "" + name: operator + namespace: openshift-insights + resource: serviceaccounts + - group: "" + name: metrics + namespace: openshift-insights + resource: services + - group: "" + name: service-ca-bundle + namespace: openshift-insights + resource: configmaps + - group: operator.openshift.io + name: cluster + resource: insightsoperators + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: cluster-kube-apiserver-operator + operation: Update + subresource: status + time: "2025-08-19T21:05:43Z" + name: kube-apiserver + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "82733" + uid: e6198456-3808-4597-82da-6bd88e6e2eae + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T21:05:43Z" + message: 'NodeControllerDegraded: All master nodes are ready' + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T20:03:16Z" + message: 'NodeInstallerProgressing: 3 nodes are at revision 8' + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T19:00:39Z" + message: 'StaticPodsAvailable: 3 nodes are active; 3 nodes are at revision 8' + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:54:22Z" + message: 'KubeletMinorVersionUpgradeable: Kubelet and API server minor versions + are synced.' + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:54:24Z" + message: 'PodSecurityInconclusiveEvaluationConditionsDetected: Could not evaluate + violations for namespaces: [kube-node-lease openshift openshift-apiserver-operator + openshift-cloud-credential-operator openshift-cloud-network-config-controller + openshift-cluster-olm-operator openshift-cluster-samples-operator openshift-cluster-storage-operator + openshift-config-managed openshift-config-operator openshift-console openshift-console-operator + openshift-console-user-settings openshift-controller-manager openshift-controller-manager-operator + openshift-dns-operator openshift-host-network openshift-ingress-canary openshift-ingress-operator + openshift-kube-controller-manager-operator openshift-kube-storage-version-migrator + openshift-kube-storage-version-migrator-operator openshift-network-console + openshift-network-diagnostics openshift-node openshift-route-controller-manager + openshift-service-ca openshift-service-ca-operator openshift-user-workload-monitoring]' + reason: PodSecurityInconclusive_PSViolationDecisionInconclusive + status: "True" + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: operator.openshift.io + name: cluster + resource: kubeapiservers + - group: apiextensions.k8s.io + name: "" + resource: customresourcedefinitions + - group: security.openshift.io + name: "" + resource: securitycontextconstraints + - group: "" + name: openshift-config + resource: namespaces + - group: "" + name: openshift-config-managed + resource: namespaces + - group: "" + name: openshift-kube-apiserver-operator + resource: namespaces + - group: "" + name: openshift-kube-apiserver + resource: namespaces + - group: admissionregistration.k8s.io + name: "" + resource: mutatingwebhookconfigurations + - group: admissionregistration.k8s.io + name: "" + resource: validatingwebhookconfigurations + - group: controlplane.operator.openshift.io + name: "" + namespace: openshift-kube-apiserver + resource: podnetworkconnectivitychecks + - group: apiserver.openshift.io + name: "" + resource: apirequestcounts + - group: config.openshift.io + name: cluster + resource: nodes + versions: + - name: raw-internal + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: kube-apiserver + version: 1.33.3 + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: cluster-kube-controller-manager-operator + operation: Update + subresource: status + time: "2025-08-19T21:05:42Z" + name: kube-controller-manager + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "82729" + uid: 8ab0d047-6c9a-40f4-a6b0-3f37a4896508 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T21:05:42Z" + message: 'NodeControllerDegraded: All master nodes are ready' + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T20:08:13Z" + message: 'NodeInstallerProgressing: 3 nodes are at revision 11' + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T19:00:12Z" + message: 'StaticPodsAvailable: 3 nodes are active; 3 nodes are at revision 11' + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:54:22Z" + message: All is well + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:54:22Z" + reason: NoData + status: Unknown + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: operator.openshift.io + name: cluster + resource: kubecontrollermanagers + - group: "" + name: openshift-config + resource: namespaces + - group: "" + name: openshift-config-managed + resource: namespaces + - group: "" + name: openshift-kube-controller-manager + resource: namespaces + - group: "" + name: openshift-kube-controller-manager-operator + resource: namespaces + - group: "" + name: kube-system + resource: namespaces + - group: certificates.k8s.io + name: "" + resource: certificatesigningrequests + - group: "" + name: "" + resource: nodes + - group: config.openshift.io + name: cluster + resource: nodes + versions: + - name: raw-internal + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: kube-controller-manager + version: 1.33.3 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: cluster-kube-scheduler-operator + operation: Update + subresource: status + time: "2025-08-19T21:05:42Z" + name: kube-scheduler + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "82724" + uid: d41ba83e-1f73-41d3-9802-c88c9fc1afdc + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T21:05:42Z" + message: 'NodeControllerDegraded: All master nodes are ready' + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T20:09:23Z" + message: 'NodeInstallerProgressing: 3 nodes are at revision 6' + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T18:57:46Z" + message: 'StaticPodsAvailable: 3 nodes are active; 3 nodes are at revision 6' + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:54:22Z" + message: All is well + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:54:22Z" + reason: NoData + status: Unknown + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: operator.openshift.io + name: cluster + resource: kubeschedulers + - group: config.openshift.io + name: "" + resource: schedulers + - group: "" + name: openshift-config + resource: namespaces + - group: "" + name: openshift-config-managed + resource: namespaces + - group: "" + name: openshift-kube-scheduler + resource: namespaces + - group: "" + name: openshift-kube-scheduler-operator + resource: namespaces + - group: controlplane.operator.openshift.io + name: "" + namespace: openshift-kube-apiserver + resource: podnetworkconnectivitychecks + versions: + - name: raw-internal + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: kube-scheduler + version: 1.33.3 + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + include.release.openshift.io/hypershift: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:include.release.openshift.io/hypershift: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + f:relatedObjects: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:versions: {} + manager: cluster-kube-storage-version-migrator-operator + operation: Update + subresource: status + time: "2025-08-19T20:58:06Z" + name: kube-storage-version-migrator + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "78281" + uid: 79805123-6117-4489-9c70-07976daa7680 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:54:22Z" + message: All is well + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T20:58:06Z" + message: All is well + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T20:58:06Z" + message: All is well + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:54:22Z" + message: All is well + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:54:22Z" + reason: NoData + status: Unknown + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: operator.openshift.io + name: cluster + resource: kubestorageversionmigrators + - group: migration.k8s.io + name: "" + resource: storageversionmigrations + - group: "" + name: openshift-kube-storage-version-migrator + resource: namespaces + - group: "" + name: openshift-kube-storage-version-migrator-operator + resource: namespaces + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: MachineAPI + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + f:openshift.io/required-scc: {} + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: machine-api-operator + operation: Update + subresource: status + time: "2025-08-19T20:15:45Z" + name: machine-api + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "54285" + uid: 576994b6-338b-40a2-a432-c9b600fafb31 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T20:15:45Z" + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T19:08:11Z" + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T19:08:11Z" + message: 'Cluster Machine API Operator is available at operator: 4.20.0-0.nightly-2025-08-19-180353' + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:55:44Z" + status: "True" + type: Upgradeable + extension: null + relatedObjects: + - group: "" + name: openshift-machine-api + resource: namespaces + - group: machine.openshift.io + name: "" + namespace: openshift-machine-api + resource: machines + - group: machine.openshift.io + name: "" + namespace: openshift-machine-api + resource: machinesets + - group: machine.openshift.io + name: "" + namespace: openshift-machine-api + resource: machinehealthchecks + - group: rbac.authorization.k8s.io + name: "" + namespace: openshift-machine-api + resource: roles + - group: rbac.authorization.k8s.io + name: machine-api-operator + resource: clusterroles + - group: rbac.authorization.k8s.io + name: machine-api-controllers + resource: clusterroles + - group: metal3.io + name: "" + namespace: openshift-machine-api + resource: baremetalhosts + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + f:relatedObjects: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + f:versions: {} + manager: machine-approver + operation: Update + subresource: status + time: "2025-08-19T20:16:30Z" + name: machine-approver + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "55049" + uid: 23ae79db-51cd-40f1-8fe6-b952076b0e6d + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:54:56Z" + message: Cluster Machine Approver is available at 4.20.0-0.nightly-2025-08-19-180353 + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:54:56Z" + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T18:54:56Z" + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T18:54:56Z" + reason: AsExpected + status: "True" + type: Upgradeable + extension: null + relatedObjects: + - group: "" + name: openshift-cluster-machine-approver + resource: namespaces + - group: certificates.k8s.io + name: "" + resource: certificatesigningrequests + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:extension: + f:lastSyncError: {} + f:master: {} + f:worker: {} + f:relatedObjects: {} + f:versions: {} + manager: machine-config-operator + operation: Update + subresource: status + time: "2025-08-19T22:07:25Z" + name: machine-config + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "105788" + uid: 7430b706-96ba-4579-8f98-b5215f15578d + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T20:55:19Z" + message: Working towards 4.20.0-0.nightly-2025-08-19-180353 + status: "True" + type: Progressing + - lastTransitionTime: "2025-08-19T21:27:31Z" + message: 'Unable to apply 4.20.0-0.nightly-2025-08-19-180353: error during syncRequiredMachineConfigPools: + [context deadline exceeded, error MachineConfigPool master is not ready, retrying. + Status: (pool degraded: true total: 3, ready 1, updated: 1, unavailable: 1, + reason: Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 is reporting: "Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 + upgrade failure. failed to run command nice (6 tries): [timed out waiting + for the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json + quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 + failed: time=\"2025-08-19T21:22:32Z\" level=warning msg=\"Failed, retrying + in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial + tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:38553->168.63.129.16:53: + i/o timeout\"\ntime=\"2025-08-19T21:22:43Z\" level=warning msg=\"Failed, retrying + in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial + tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:40258->168.63.129.16:53: + i/o timeout\"\ntime=\"2025-08-19T21:22:54Z\" level=warning msg=\"Failed, retrying + in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial + tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:58117->168.63.129.16:53: + i/o timeout\"\nError: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: + lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:35905->168.63.129.16:53: + i/o timeout\n: exit status 125]", Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 + is reporting: "failed to run command nice (6 tries): [timed out waiting for + the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json + quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 + failed: time=\"2025-08-19T21:22:32Z\" level=warning msg=\"Failed, retrying + in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial + tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:38553->168.63.129.16:53: + i/o timeout\"\ntime=\"2025-08-19T21:22:43Z\" level=warning msg=\"Failed, retrying + in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial + tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:40258->168.63.129.16:53: + i/o timeout\"\ntime=\"2025-08-19T21:22:54Z\" level=warning msg=\"Failed, retrying + in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial + tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:58117->168.63.129.16:53: + i/o timeout\"\nError: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: + lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:35905->168.63.129.16:53: + i/o timeout\n: exit status 125]")]' + reason: RequiredPoolsFailed + status: "True" + type: Degraded + - lastTransitionTime: "2025-08-19T18:57:25Z" + message: Cluster has deployed [{operator 4.20.0-0.nightly-2025-08-12-153542} + {operator-image quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84}] + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T21:27:31Z" + message: One or more machine config pools are degraded, please see `oc get mcp` + for further details and resolve before upgrading + reason: DegradedPool + status: "False" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:57:25Z" + reason: AsExpected + status: "False" + type: EvaluationConditionsDetected + extension: + lastSyncError: 'error MachineConfigPool master is not ready, retrying. Status: + (pool degraded: true total: 3, ready 1, updated: 1, unavailable: 1, reason: + Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 is reporting: "Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 + upgrade failure. failed to run command nice (6 tries): [timed out waiting + for the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json + quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 + failed: time=\"2025-08-19T22:06:34Z\" level=warning msg=\"Failed, retrying + in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial + tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:59257->168.63.129.16:53: + i/o timeout\"\ntime=\"2025-08-19T22:06:45Z\" level=warning msg=\"Failed, retrying + in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial + tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:43943->168.63.129.16:53: + i/o timeout\"\ntime=\"2025-08-19T22:06:56Z\" level=warning msg=\"Failed, retrying + in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial + tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:41890->168.63.129.16:53: + i/o timeout\"\nError: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: + lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:47578->168.63.129.16:53: + i/o timeout\n: exit status 125]", Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 + is reporting: "failed to run command nice (6 tries): [timed out waiting for + the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json + quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 + failed: time=\"2025-08-19T22:06:34Z\" level=warning msg=\"Failed, retrying + in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial + tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:59257->168.63.129.16:53: + i/o timeout\"\ntime=\"2025-08-19T22:06:45Z\" level=warning msg=\"Failed, retrying + in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial + tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:43943->168.63.129.16:53: + i/o timeout\"\ntime=\"2025-08-19T22:06:56Z\" level=warning msg=\"Failed, retrying + in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial + tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:41890->168.63.129.16:53: + i/o timeout\"\nError: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: + lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:47578->168.63.129.16:53: + i/o timeout\n: exit status 125]")' + master: 'pool is degraded because nodes fail with "1 nodes are reporting degraded + status on sync": "Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 is reporting: \"Node + ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 upgrade failure. failed to run command + nice (6 tries): [timed out waiting for the condition, running nice -- ionice + -c 3 podman pull -q --authfile /var/lib/kubelet/config.json quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 + failed: time=\\\"2025-08-19T22:06:34Z\\\" level=warning msg=\\\"Failed, retrying + in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": + dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:59257->168.63.129.16:53: + i/o timeout\\\"\\ntime=\\\"2025-08-19T22:06:45Z\\\" level=warning msg=\\\"Failed, + retrying in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": + dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:43943->168.63.129.16:53: + i/o timeout\\\"\\ntime=\\\"2025-08-19T22:06:56Z\\\" level=warning msg=\\\"Failed, + retrying in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": + dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:41890->168.63.129.16:53: + i/o timeout\\\"\\nError: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial + tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:47578->168.63.129.16:53: + i/o timeout\\n: exit status 125]\", Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 + is reporting: \"failed to run command nice (6 tries): [timed out waiting for + the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json + quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 + failed: time=\\\"2025-08-19T22:06:34Z\\\" level=warning msg=\\\"Failed, retrying + in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": + dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:59257->168.63.129.16:53: + i/o timeout\\\"\\ntime=\\\"2025-08-19T22:06:45Z\\\" level=warning msg=\\\"Failed, + retrying in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": + dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:43943->168.63.129.16:53: + i/o timeout\\\"\\ntime=\\\"2025-08-19T22:06:56Z\\\" level=warning msg=\\\"Failed, + retrying in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": + dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:41890->168.63.129.16:53: + i/o timeout\\\"\\nError: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: + pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial + tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:47578->168.63.129.16:53: + i/o timeout\\n: exit status 125]\""' + worker: all 3 nodes are at latest configuration rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1 + relatedObjects: + - group: "" + name: openshift-machine-config-operator + resource: namespaces + - group: machineconfiguration.openshift.io + name: "" + resource: machineconfigpools + - group: machineconfiguration.openshift.io + name: "" + resource: controllerconfigs + - group: machineconfiguration.openshift.io + name: "" + resource: kubeletconfigs + - group: machineconfiguration.openshift.io + name: "" + resource: containerruntimeconfigs + - group: machineconfiguration.openshift.io + name: "" + resource: machineconfigs + - group: operator.openshift.io + name: "" + resource: machineconfigurations + - group: "" + name: "" + resource: nodes + - group: "" + name: openshift-kni-infra + resource: namespaces + - group: "" + name: openshift-openstack-infra + resource: namespaces + - group: "" + name: openshift-ovirt-infra + resource: namespaces + - group: "" + name: openshift-vsphere-infra + resource: namespaces + - group: "" + name: openshift-nutanix-infra + resource: namespaces + - group: "" + name: openshift-cloud-platform-infra + resource: namespaces + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-12-153542 + - name: operator-image + version: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: marketplace + include.release.openshift.io/hypershift: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:include.release.openshift.io/hypershift: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: marketplace-operator + operation: Update + subresource: status + time: "2025-08-19T20:17:08Z" + name: marketplace + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "57417" + uid: 3415c8ff-fecc-4d10-ad6f-c91a4ab34307 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:55:41Z" + message: 'Successfully progressed to release version: 4.20.0-0.nightly-2025-08-19-180353' + reason: OperatorAvailable + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T18:55:41Z" + message: 'Available release version: 4.20.0-0.nightly-2025-08-19-180353' + reason: OperatorAvailable + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:55:41Z" + message: Marketplace is upgradeable + reason: OperatorAvailable + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:55:41Z" + message: 'Available release version: 4.20.0-0.nightly-2025-08-19-180353' + reason: OperatorAvailable + status: "False" + type: Degraded + extension: null + relatedObjects: + - group: "" + name: openshift-marketplace + resource: namespaces + - group: operators.coreos.com + name: "" + namespace: openshift-marketplace + resource: catalogsources + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + include.release.openshift.io/hypershift: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:include.release.openshift.io/hypershift: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + f:relatedObjects: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + f:versions: {} + manager: operator + operation: Update + subresource: status + time: "2025-08-19T20:23:07Z" + name: monitoring + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "63294" + uid: 6d269106-b157-4c9e-bef1-eb4447d83cd9 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T19:09:12Z" + message: Successfully rolled out the stack. + reason: RollOutDone + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T19:09:12Z" + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T20:23:07Z" + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T19:09:12Z" + status: "True" + type: Upgradeable + extension: null + relatedObjects: + - group: "" + name: openshift-monitoring + resource: namespaces + - group: "" + name: openshift-user-workload-monitoring + resource: namespaces + - group: monitoring.coreos.com + name: "" + resource: servicemonitors + - group: monitoring.coreos.com + name: "" + resource: podmonitors + - group: monitoring.coreos.com + name: "" + resource: prometheusrules + - group: monitoring.coreos.com + name: "" + resource: alertmanagers + - group: monitoring.coreos.com + name: "" + resource: prometheuses + - group: monitoring.coreos.com + name: "" + resource: thanosrulers + - group: monitoring.coreos.com + name: "" + resource: alertmanagerconfigs + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + network.operator.openshift.io/last-seen-state: '{"DaemonsetStates":[],"DeploymentStates":[],"StatefulsetStates":[]}' + network.operator.openshift.io/relatedClusterObjects: "" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + f:network.operator.openshift.io/last-seen-state: {} + f:network.operator.openshift.io/relatedClusterObjects: {} + manager: network-operator + operation: Update + time: "2025-08-19T21:15:37Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"ManagementStateDegraded"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: network-operator + operation: Update + subresource: status + time: "2025-08-19T21:15:54Z" + name: network + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "90027" + uid: e4227192-fbac-4e99-a45d-7a9237e6acae + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:53:22Z" + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T18:52:46Z" + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:52:46Z" + status: "False" + type: ManagementStateDegraded + - lastTransitionTime: "2025-08-19T21:15:54Z" + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T18:52:56Z" + status: "True" + type: Available + extension: null + relatedObjects: + - group: "" + name: applied-cluster + namespace: openshift-network-operator + resource: configmaps + - group: apiextensions.k8s.io + name: cloudprivateipconfigs.cloud.network.openshift.io + resource: customresourcedefinitions + - group: "" + name: cloud-network-config-controller + namespace: openshift-cloud-network-config-controller + resource: serviceaccounts + - group: rbac.authorization.k8s.io + name: cloud-network-config-controller + resource: clusterroles + - group: rbac.authorization.k8s.io + name: cloud-network-config-controller + namespace: openshift-cloud-network-config-controller + resource: roles + - group: rbac.authorization.k8s.io + name: cloud-network-config-controller + resource: clusterrolebindings + - group: rbac.authorization.k8s.io + name: cloud-network-config-controller-rb + namespace: openshift-cloud-network-config-controller + resource: rolebindings + - group: "" + name: kube-cloud-config + namespace: openshift-cloud-network-config-controller + resource: configmaps + - group: "" + name: trusted-ca + namespace: openshift-cloud-network-config-controller + resource: configmaps + - group: apps + name: cloud-network-config-controller + namespace: openshift-cloud-network-config-controller + resource: deployments + - group: apiextensions.k8s.io + name: network-attachment-definitions.k8s.cni.cncf.io + resource: customresourcedefinitions + - group: apiextensions.k8s.io + name: ippools.whereabouts.cni.cncf.io + resource: customresourcedefinitions + - group: apiextensions.k8s.io + name: overlappingrangeipreservations.whereabouts.cni.cncf.io + resource: customresourcedefinitions + - group: apiextensions.k8s.io + name: nodeslicepools.whereabouts.cni.cncf.io + resource: customresourcedefinitions + - group: "" + name: openshift-multus + resource: namespaces + - group: rbac.authorization.k8s.io + name: multus + resource: clusterroles + - group: rbac.authorization.k8s.io + name: multus-ancillary-tools + resource: clusterroles + - group: "" + name: multus + namespace: openshift-multus + resource: serviceaccounts + - group: rbac.authorization.k8s.io + name: multus-transient + resource: clusterrolebindings + - group: rbac.authorization.k8s.io + name: multus-group + resource: clusterrolebindings + - group: "" + name: multus-ancillary-tools + namespace: openshift-multus + resource: serviceaccounts + - group: rbac.authorization.k8s.io + name: multus-ancillary-tools + resource: clusterrolebindings + - group: rbac.authorization.k8s.io + name: multus-cluster-readers + resource: clusterrolebindings + - group: rbac.authorization.k8s.io + name: multus-whereabouts + resource: clusterrolebindings + - group: rbac.authorization.k8s.io + name: multus-whereabouts + namespace: openshift-multus + resource: rolebindings + - group: rbac.authorization.k8s.io + name: whereabouts-cni + resource: clusterroles + - group: rbac.authorization.k8s.io + name: whereabouts-cni + namespace: openshift-multus + resource: roles + - group: rbac.authorization.k8s.io + name: net-attach-def-project + resource: clusterroles + - group: "" + name: default-cni-sysctl-allowlist + namespace: openshift-multus + resource: configmaps + - group: "" + name: cni-copy-resources + namespace: openshift-multus + resource: configmaps + - group: "" + name: whereabouts-flatfile-config + namespace: openshift-multus + resource: configmaps + - group: "" + name: multus-daemon-config + namespace: openshift-multus + resource: configmaps + - group: apps + name: multus + namespace: openshift-multus + resource: daemonsets + - group: apps + name: multus-additional-cni-plugins + namespace: openshift-multus + resource: daemonsets + - group: "" + name: metrics-daemon-sa + namespace: openshift-multus + resource: serviceaccounts + - group: rbac.authorization.k8s.io + name: metrics-daemon-role + resource: clusterroles + - group: rbac.authorization.k8s.io + name: metrics-daemon-sa-rolebinding + resource: clusterrolebindings + - group: apps + name: network-metrics-daemon + namespace: openshift-multus + resource: daemonsets + - group: monitoring.coreos.com + name: monitor-network + namespace: openshift-multus + resource: servicemonitors + - group: "" + name: network-metrics-service + namespace: openshift-multus + resource: services + - group: rbac.authorization.k8s.io + name: prometheus-k8s + namespace: openshift-multus + resource: roles + - group: rbac.authorization.k8s.io + name: prometheus-k8s + namespace: openshift-multus + resource: rolebindings + - group: "" + name: multus-admission-controller + namespace: openshift-multus + resource: services + - group: "" + name: multus-ac + namespace: openshift-multus + resource: serviceaccounts + - group: rbac.authorization.k8s.io + name: multus-admission-controller-webhook + resource: clusterroles + - group: rbac.authorization.k8s.io + name: multus-admission-controller-webhook + resource: clusterrolebindings + - group: admissionregistration.k8s.io + name: multus.openshift.io + resource: validatingwebhookconfigurations + - group: apps + name: multus-admission-controller + namespace: openshift-multus + resource: deployments + - group: monitoring.coreos.com + name: monitor-multus-admission-controller + namespace: openshift-multus + resource: servicemonitors + - group: rbac.authorization.k8s.io + name: prometheus-k8s + namespace: openshift-multus + resource: roles + - group: rbac.authorization.k8s.io + name: prometheus-k8s + namespace: openshift-multus + resource: rolebindings + - group: monitoring.coreos.com + name: prometheus-k8s-rules + namespace: openshift-multus + resource: prometheusrules + - group: "" + name: openshift-ovn-kubernetes + resource: namespaces + - group: apiextensions.k8s.io + name: egressfirewalls.k8s.ovn.org + resource: customresourcedefinitions + - group: apiextensions.k8s.io + name: egressips.k8s.ovn.org + resource: customresourcedefinitions + - group: apiextensions.k8s.io + name: egressqoses.k8s.ovn.org + resource: customresourcedefinitions + - group: apiextensions.k8s.io + name: adminpolicybasedexternalroutes.k8s.ovn.org + resource: customresourcedefinitions + - group: apiextensions.k8s.io + name: egressservices.k8s.ovn.org + resource: customresourcedefinitions + - group: apiextensions.k8s.io + name: adminnetworkpolicies.policy.networking.k8s.io + resource: customresourcedefinitions + - group: apiextensions.k8s.io + name: baselineadminnetworkpolicies.policy.networking.k8s.io + resource: customresourcedefinitions + - group: apiextensions.k8s.io + name: ipamclaims.k8s.cni.cncf.io + resource: customresourcedefinitions + - group: apiextensions.k8s.io + name: userdefinednetworks.k8s.ovn.org + resource: customresourcedefinitions + - group: apiextensions.k8s.io + name: clusteruserdefinednetworks.k8s.ovn.org + resource: customresourcedefinitions + - group: "" + name: ovn-kubernetes-node + namespace: openshift-ovn-kubernetes + resource: serviceaccounts + - group: rbac.authorization.k8s.io + name: openshift-ovn-kubernetes-node-limited + namespace: openshift-ovn-kubernetes + resource: roles + - group: rbac.authorization.k8s.io + name: openshift-ovn-kubernetes-nodes-identity-limited + namespace: openshift-ovn-kubernetes + resource: rolebindings + - group: rbac.authorization.k8s.io + name: openshift-ovn-kubernetes-node-limited + resource: clusterroles + - group: rbac.authorization.k8s.io + name: openshift-ovn-kubernetes-node-identity-limited + resource: clusterrolebindings + - group: rbac.authorization.k8s.io + name: openshift-ovn-kubernetes-kube-rbac-proxy + resource: clusterroles + - group: rbac.authorization.k8s.io + name: openshift-ovn-kubernetes-node-kube-rbac-proxy + resource: clusterrolebindings + - group: "" + name: ovnkube-config + namespace: openshift-ovn-kubernetes + resource: configmaps + - group: "" + name: ovn-kubernetes-control-plane + namespace: openshift-ovn-kubernetes + resource: serviceaccounts + - group: rbac.authorization.k8s.io + name: openshift-ovn-kubernetes-control-plane-limited + resource: clusterroles + - group: rbac.authorization.k8s.io + name: openshift-ovn-kubernetes-control-plane-limited + resource: clusterrolebindings + - group: rbac.authorization.k8s.io + name: openshift-ovn-kubernetes-control-plane-limited + namespace: openshift-ovn-kubernetes + resource: roles + - group: rbac.authorization.k8s.io + name: openshift-ovn-kubernetes-control-plane-limited + namespace: openshift-ovn-kubernetes + resource: rolebindings + - group: network.operator.openshift.io + name: ovn + namespace: openshift-ovn-kubernetes + resource: operatorpkis + - group: network.operator.openshift.io + name: signer + namespace: openshift-ovn-kubernetes + resource: operatorpkis + - group: flowcontrol.apiserver.k8s.io + name: openshift-ovn-kubernetes + resource: flowschemas + - group: rbac.authorization.k8s.io + name: openshift-ovn-kubernetes-cluster-reader + resource: clusterroles + - group: "" + name: ovnkube-script-lib + namespace: openshift-ovn-kubernetes + resource: configmaps + - group: rbac.authorization.k8s.io + name: openshift-ovn-kubernetes-udn-editor + resource: clusterroles + - group: rbac.authorization.k8s.io + name: openshift-ovn-kubernetes-udn-viewer + resource: clusterroles + - group: monitoring.coreos.com + name: master-rules + namespace: openshift-ovn-kubernetes + resource: prometheusrules + - group: monitoring.coreos.com + name: networking-rules + namespace: openshift-ovn-kubernetes + resource: prometheusrules + - group: "" + name: openshift-network-features + namespace: openshift-config-managed + resource: configmaps + - group: monitoring.coreos.com + name: monitor-ovn-control-plane-metrics + namespace: openshift-ovn-kubernetes + resource: servicemonitors + - group: "" + name: ovn-kubernetes-control-plane + namespace: openshift-ovn-kubernetes + resource: services + - group: monitoring.coreos.com + name: monitor-ovn-node + namespace: openshift-ovn-kubernetes + resource: servicemonitors + - group: "" + name: ovn-kubernetes-node + namespace: openshift-ovn-kubernetes + resource: services + - group: rbac.authorization.k8s.io + name: prometheus-k8s + namespace: openshift-ovn-kubernetes + resource: roles + - group: rbac.authorization.k8s.io + name: prometheus-k8s + namespace: openshift-ovn-kubernetes + resource: rolebindings + - group: "" + name: openshift-host-network + resource: namespaces + - group: "" + name: host-network-namespace-quotas + namespace: openshift-host-network + resource: resourcequotas + - group: apps + name: ovnkube-control-plane + namespace: openshift-ovn-kubernetes + resource: deployments + - group: apps + name: ovnkube-node + namespace: openshift-ovn-kubernetes + resource: daemonsets + - group: admissionregistration.k8s.io + name: user-defined-networks-namespace-label + resource: validatingadmissionpolicies + - group: admissionregistration.k8s.io + name: user-defined-networks-namespace-label-binding + resource: validatingadmissionpolicybindings + - group: "" + name: openshift-network-diagnostics + resource: namespaces + - group: "" + name: network-diagnostics + namespace: openshift-network-diagnostics + resource: serviceaccounts + - group: rbac.authorization.k8s.io + name: network-diagnostics + namespace: openshift-network-diagnostics + resource: roles + - group: rbac.authorization.k8s.io + name: network-diagnostics + namespace: openshift-network-diagnostics + resource: rolebindings + - group: rbac.authorization.k8s.io + name: network-diagnostics + resource: clusterroles + - group: rbac.authorization.k8s.io + name: network-diagnostics + resource: clusterrolebindings + - group: rbac.authorization.k8s.io + name: network-diagnostics + namespace: kube-system + resource: rolebindings + - group: apps + name: network-check-source + namespace: openshift-network-diagnostics + resource: deployments + - group: "" + name: network-check-source + namespace: openshift-network-diagnostics + resource: services + - group: monitoring.coreos.com + name: network-check-source + namespace: openshift-network-diagnostics + resource: servicemonitors + - group: rbac.authorization.k8s.io + name: prometheus-k8s + namespace: openshift-network-diagnostics + resource: roles + - group: rbac.authorization.k8s.io + name: prometheus-k8s + namespace: openshift-network-diagnostics + resource: rolebindings + - group: apps + name: network-check-target + namespace: openshift-network-diagnostics + resource: daemonsets + - group: "" + name: network-check-target + namespace: openshift-network-diagnostics + resource: services + - group: rbac.authorization.k8s.io + name: openshift-network-public-role + namespace: openshift-config-managed + resource: roles + - group: rbac.authorization.k8s.io + name: openshift-network-public-role-binding + namespace: openshift-config-managed + resource: rolebindings + - group: "" + name: openshift-network-node-identity + resource: namespaces + - group: "" + name: network-node-identity + namespace: openshift-network-node-identity + resource: serviceaccounts + - group: rbac.authorization.k8s.io + name: network-node-identity + resource: clusterrolebindings + - group: rbac.authorization.k8s.io + name: network-node-identity + resource: clusterroles + - group: rbac.authorization.k8s.io + name: network-node-identity-leases + namespace: openshift-network-node-identity + resource: rolebindings + - group: rbac.authorization.k8s.io + name: network-node-identity-leases + namespace: openshift-network-node-identity + resource: roles + - group: rbac.authorization.k8s.io + name: system:openshift:scc:hostnetwork-v2 + namespace: openshift-network-node-identity + resource: rolebindings + - group: "" + name: ovnkube-identity-cm + namespace: openshift-network-node-identity + resource: configmaps + - group: network.operator.openshift.io + name: network-node-identity + namespace: openshift-network-node-identity + resource: operatorpkis + - group: admissionregistration.k8s.io + name: network-node-identity.openshift.io + resource: validatingwebhookconfigurations + - group: apps + name: network-node-identity + namespace: openshift-network-node-identity + resource: daemonsets + - group: monitoring.coreos.com + name: openshift-network-operator-ipsec-rules + namespace: openshift-network-operator + resource: prometheusrules + - group: rbac.authorization.k8s.io + name: openshift-iptables-alerter + resource: clusterroles + - group: "" + name: iptables-alerter + namespace: openshift-network-operator + resource: serviceaccounts + - group: rbac.authorization.k8s.io + name: openshift-iptables-alerter + resource: clusterrolebindings + - group: "" + name: iptables-alerter-script + namespace: openshift-network-operator + resource: configmaps + - group: apps + name: iptables-alerter + namespace: openshift-network-operator + resource: daemonsets + - group: "" + name: openshift-network-console + resource: namespaces + - group: "" + name: networking-console-plugin + namespace: openshift-network-console + resource: configmaps + - group: apps + name: networking-console-plugin + namespace: openshift-network-console + resource: deployments + - group: "" + name: networking-console-plugin + namespace: openshift-network-console + resource: services + - group: console.openshift.io + name: networking-console-plugin + resource: consoleplugins + - group: "" + name: openshift-network-operator + resource: namespaces + - group: operator.openshift.io + name: cluster + resource: networks + - group: "" + name: openshift-cloud-network-config-controller + resource: namespaces + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: NodeTuning + include.release.openshift.io/hypershift: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:include.release.openshift.io/hypershift: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: cluster-node-tuning-operator + operation: Update + subresource: status + time: "2025-08-19T21:15:22Z" + name: node-tuning + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "89419" + uid: 1f02c635-e2ec-468e-9972-372e5e70ad43 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T20:17:44Z" + message: Cluster has deployed 6/6 "4.20.0-0.nightly-2025-08-19-180353" operands + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T21:15:22Z" + message: Cluster version is "4.20.0-0.nightly-2025-08-19-180353" + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T18:54:56Z" + message: DaemonSet "tuned" available + reason: AsExpected + status: "False" + type: Degraded + extension: null + relatedObjects: + - group: "" + name: openshift-cluster-node-tuning-operator + resource: namespaces + - group: tuned.openshift.io + name: "" + namespace: openshift-cluster-node-tuning-operator + resource: profiles + - group: tuned.openshift.io + name: "" + namespace: openshift-cluster-node-tuning-operator + resource: tuneds + - group: apps + name: tuned + namespace: openshift-cluster-node-tuning-operator + resource: daemonsets + - group: performance.openshift.io + name: "" + resource: performanceprofiles + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: ocp-tuned + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: OperatorLifecycleManagerV1 + include.release.openshift.io/self-managed-high-availability: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: cluster-olm-operator + operation: Update + subresource: status + time: "2025-08-19T21:06:31Z" + name: olm + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "84979" + uid: e618c2f2-b4de-4c7e-a2af-64e4ae1c5c69 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T19:05:09Z" + message: All is well + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T21:06:31Z" + message: |- + CatalogdDeploymentCatalogdControllerManagerProgressing: Deployment is not progressing + OperatorcontrollerDeploymentOperatorControllerControllerManagerProgressing: Deployment is not progressing + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T21:06:31Z" + message: |- + CatalogdDeploymentCatalogdControllerManagerAvailable: Deployment is available + OperatorcontrollerDeploymentOperatorControllerControllerManagerAvailable: Deployment is available + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:54:31Z" + message: All is well + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:54:31Z" + reason: NoData + status: Unknown + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: "" + name: openshift-catalogd + resource: namespaces + - group: apiextensions.k8s.io + name: clustercatalogs.olm.operatorframework.io + resource: customresourcedefinitions + - group: "" + name: catalogd-controller-manager + namespace: openshift-catalogd + resource: serviceaccounts + - group: rbac.authorization.k8s.io + name: catalogd-leader-election-role + namespace: openshift-catalogd + resource: roles + - group: rbac.authorization.k8s.io + name: catalogd-manager-role + namespace: openshift-catalogd + resource: roles + - group: rbac.authorization.k8s.io + name: catalogd-metrics-monitor-role + namespace: openshift-catalogd + resource: roles + - group: rbac.authorization.k8s.io + name: catalogd-manager-role + namespace: openshift-config + resource: roles + - group: rbac.authorization.k8s.io + name: catalogd-manager-role + resource: clusterroles + - group: rbac.authorization.k8s.io + name: catalogd-metrics-reader + resource: clusterroles + - group: rbac.authorization.k8s.io + name: catalogd-proxy-role + resource: clusterroles + - group: rbac.authorization.k8s.io + name: catalogd-leader-election-rolebinding + namespace: openshift-catalogd + resource: rolebindings + - group: rbac.authorization.k8s.io + name: catalogd-manager-rolebinding + namespace: openshift-catalogd + resource: rolebindings + - group: rbac.authorization.k8s.io + name: catalogd-metrics-monitor-rolebinding + namespace: openshift-catalogd + resource: rolebindings + - group: rbac.authorization.k8s.io + name: catalogd-manager-rolebinding + namespace: openshift-config + resource: rolebindings + - group: rbac.authorization.k8s.io + name: catalogd-manager-rolebinding + resource: clusterrolebindings + - group: rbac.authorization.k8s.io + name: catalogd-proxy-rolebinding + resource: clusterrolebindings + - group: "" + name: catalogd-trusted-ca-bundle + namespace: openshift-catalogd + resource: configmaps + - group: "" + name: catalogd-service + namespace: openshift-catalogd + resource: services + - group: apps + name: catalogd-controller-manager + namespace: openshift-catalogd + resource: deployments + - group: monitoring.coreos.com + name: catalogd-metrics-monitor + namespace: openshift-catalogd + resource: servicemonitors + - group: networking.k8s.io + name: catalogd-controller-manager + namespace: openshift-catalogd + resource: networkpolicies + - group: networking.k8s.io + name: catalogd-default-deny-all-traffic + namespace: openshift-catalogd + resource: networkpolicies + - group: olm.operatorframework.io + name: openshift-certified-operators + resource: clustercatalogs + - group: olm.operatorframework.io + name: openshift-community-operators + resource: clustercatalogs + - group: olm.operatorframework.io + name: openshift-redhat-marketplace + resource: clustercatalogs + - group: olm.operatorframework.io + name: openshift-redhat-operators + resource: clustercatalogs + - group: admissionregistration.k8s.io + name: catalogd-mutating-webhook-configuration + resource: mutatingwebhookconfigurations + - group: "" + name: openshift-operator-controller + resource: namespaces + - group: apiextensions.k8s.io + name: clusterextensions.olm.operatorframework.io + resource: customresourcedefinitions + - group: "" + name: operator-controller-controller-manager + namespace: openshift-operator-controller + resource: serviceaccounts + - group: rbac.authorization.k8s.io + name: operator-controller-manager-role + namespace: openshift-config + resource: roles + - group: rbac.authorization.k8s.io + name: operator-controller-leader-election-role + namespace: openshift-operator-controller + resource: roles + - group: rbac.authorization.k8s.io + name: operator-controller-manager-role + namespace: openshift-operator-controller + resource: roles + - group: rbac.authorization.k8s.io + name: operator-controller-metrics-monitor-role + namespace: openshift-operator-controller + resource: roles + - group: rbac.authorization.k8s.io + name: operator-controller-clusterextension-editor-role + resource: clusterroles + - group: rbac.authorization.k8s.io + name: operator-controller-clusterextension-viewer-role + resource: clusterroles + - group: rbac.authorization.k8s.io + name: operator-controller-manager-role + resource: clusterroles + - group: rbac.authorization.k8s.io + name: operator-controller-metrics-reader + resource: clusterroles + - group: rbac.authorization.k8s.io + name: operator-controller-proxy-role + resource: clusterroles + - group: rbac.authorization.k8s.io + name: operator-controller-manager-rolebinding + namespace: openshift-config + resource: rolebindings + - group: rbac.authorization.k8s.io + name: operator-controller-leader-election-rolebinding + namespace: openshift-operator-controller + resource: rolebindings + - group: rbac.authorization.k8s.io + name: operator-controller-manager-rolebinding + namespace: openshift-operator-controller + resource: rolebindings + - group: rbac.authorization.k8s.io + name: operator-controller-metrics-monitor-rolebinding + namespace: openshift-operator-controller + resource: rolebindings + - group: rbac.authorization.k8s.io + name: operator-controller-manager-rolebinding + resource: clusterrolebindings + - group: rbac.authorization.k8s.io + name: operator-controller-proxy-rolebinding + resource: clusterrolebindings + - group: "" + name: operator-controller-trusted-ca-bundle + namespace: openshift-operator-controller + resource: configmaps + - group: "" + name: operator-controller-service + namespace: openshift-operator-controller + resource: services + - group: apps + name: operator-controller-controller-manager + namespace: openshift-operator-controller + resource: deployments + - group: monitoring.coreos.com + name: operator-controller-metrics-monitor + namespace: openshift-operator-controller + resource: servicemonitors + - group: networking.k8s.io + name: operator-controller-controller-manager + namespace: openshift-operator-controller + resource: networkpolicies + - group: networking.k8s.io + name: operator-controller-default-deny-all-traffic + namespace: openshift-operator-controller + resource: networkpolicies + - group: operator.openshift.io + name: cluster + resource: olms + - group: "" + name: openshift-cluster-olm-operator + resource: namespaces + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: cluster-openshift-apiserver-operator + operation: Update + subresource: status + time: "2025-08-19T21:36:26Z" + name: openshift-apiserver + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "95827" + uid: e91c4655-c2b6-48a7-a02b-a9084eda70d1 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T21:36:26Z" + message: 'APIServerDeploymentDegraded: 1 of 3 requested instances are unavailable + for apiserver.openshift-apiserver ()' + reason: APIServerDeployment_UnavailablePod + status: "True" + type: Degraded + - lastTransitionTime: "2025-08-19T20:27:03Z" + message: All is well + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T19:05:10Z" + message: All is well + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:54:22Z" + message: All is well + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:54:22Z" + reason: NoData + status: Unknown + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: operator.openshift.io + name: cluster + resource: openshiftapiservers + - group: "" + name: openshift-config + resource: namespaces + - group: "" + name: openshift-config-managed + resource: namespaces + - group: "" + name: openshift-apiserver-operator + resource: namespaces + - group: "" + name: openshift-apiserver + resource: namespaces + - group: "" + name: openshift-etcd-operator + resource: namespaces + - group: "" + name: host-etcd-2 + namespace: openshift-etcd + resource: endpoints + - group: controlplane.operator.openshift.io + name: "" + namespace: openshift-apiserver + resource: podnetworkconnectivitychecks + - group: apiregistration.k8s.io + name: v1.apps.openshift.io + resource: apiservices + - group: apiregistration.k8s.io + name: v1.authorization.openshift.io + resource: apiservices + - group: apiregistration.k8s.io + name: v1.build.openshift.io + resource: apiservices + - group: apiregistration.k8s.io + name: v1.image.openshift.io + resource: apiservices + - group: apiregistration.k8s.io + name: v1.project.openshift.io + resource: apiservices + - group: apiregistration.k8s.io + name: v1.quota.openshift.io + resource: apiservices + - group: apiregistration.k8s.io + name: v1.route.openshift.io + resource: apiservices + - group: apiregistration.k8s.io + name: v1.security.openshift.io + resource: apiservices + - group: apiregistration.k8s.io + name: v1.template.openshift.io + resource: apiservices + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: openshift-apiserver + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:exclude.release.openshift.io/internal-openshift-hosted: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: cluster-openshift-controller-manager-operator + operation: Update + subresource: status + time: "2025-08-19T20:17:28Z" + name: openshift-controller-manager + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "58488" + uid: f7811550-2a6b-4468-bb4f-e1ce0c7955b9 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:54:22Z" + message: All is well + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T20:17:28Z" + message: All is well + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T19:05:19Z" + message: All is well + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:54:27Z" + message: All is well + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:54:22Z" + reason: NoData + status: Unknown + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: operator.openshift.io + name: cluster + resource: openshiftcontrollermanagers + - group: "" + name: openshift-config + resource: namespaces + - group: "" + name: openshift-config-managed + resource: namespaces + - group: "" + name: openshift-controller-manager-operator + resource: namespaces + - group: "" + name: openshift-controller-manager + resource: namespaces + - group: "" + name: openshift-route-controller-manager + resource: namespaces + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: openshift-samples + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + f:relatedObjects: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:status: {} + f:type: {} + f:versions: {} + manager: cluster-samples-operator + operation: Update + subresource: status + time: "2025-08-19T20:17:00Z" + name: openshift-samples + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "57112" + uid: 2300a764-8606-4eda-bb2d-7eec29a33855 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T19:06:13Z" + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T19:06:23Z" + message: Samples installation successful at 4.20.0-0.nightly-2025-08-19-180353 + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T19:06:14Z" + message: Samples installation successful at 4.20.0-0.nightly-2025-08-19-180353 + status: "False" + type: Progressing + extension: null + relatedObjects: + - group: samples.operator.openshift.io + name: cluster + resource: configs + - group: "" + name: openshift-cluster-samples-operator + resource: namespaces + - group: template.openshift.io + name: "" + namespace: openshift + resource: templates + - group: image.openshift.io + name: "" + namespace: openshift + resource: imagestreams + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: OperatorLifecycleManager + include.release.openshift.io/hypershift: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:include.release.openshift.io/hypershift: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + f:relatedObjects: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + f:versions: {} + manager: olm + operation: Update + subresource: status + time: "2025-08-19T20:17:08Z" + name: operator-lifecycle-manager + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "57382" + uid: 341ffbcd-d458-40d1-bcf6-acfa113942fe + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:55:42Z" + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:55:42Z" + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T18:55:42Z" + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T20:17:08Z" + message: Deployed 4.20.0-202508150251.p2.g7fbd76c.assembly.stream.el9-7fbd76c + status: "False" + type: Progressing + extension: null + relatedObjects: + - group: operators.coreos.com + name: packageserver + namespace: openshift-operator-lifecycle-manager + resource: clusterserviceversions + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: operator-lifecycle-manager + version: 4.20.0-202508150251.p2.g7fbd76c.assembly.stream.el9-7fbd76c +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: OperatorLifecycleManager + include.release.openshift.io/hypershift: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + creationTimestamp: "2025-08-19T18:46:57Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:include.release.openshift.io/hypershift: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + f:relatedObjects: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + f:versions: {} + manager: catalog + operation: Update + subresource: status + time: "2025-08-19T20:17:38Z" + name: operator-lifecycle-manager-catalog + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "59019" + uid: 60e6c3ee-4773-48b7-98cd-c5f84df8e861 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:55:42Z" + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T20:17:38Z" + message: Deployed 4.20.0-202508150251.p2.g7fbd76c.assembly.stream.el9-7fbd76c + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T18:55:42Z" + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:55:42Z" + status: "True" + type: Upgradeable + extension: null + relatedObjects: + - group: "" + name: openshift-operator-lifecycle-manager + resource: namespaces + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: operator-lifecycle-manager + version: 4.20.0-202508150251.p2.g7fbd76c.assembly.stream.el9-7fbd76c +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: OperatorLifecycleManager + include.release.openshift.io/hypershift: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + creationTimestamp: "2025-08-19T18:46:57Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:include.release.openshift.io/hypershift: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: olm + operation: Update + subresource: status + time: "2025-08-19T20:17:11Z" + name: operator-lifecycle-manager-packageserver + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "57759" + uid: 18457ec9-d5a4-4fd1-9a54-99eb72a3bee8 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:55:42Z" + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T19:05:11Z" + message: ClusterServiceVersion openshift-operator-lifecycle-manager/packageserver + observed in phase Succeeded + reason: ClusterServiceVersionSucceeded + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T20:17:11Z" + message: Deployed version 0.0.1-snapshot + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T18:55:42Z" + message: Safe to upgrade + status: "True" + type: Upgradeable + extension: null + relatedObjects: + - group: "" + name: openshift-operator-lifecycle-manager + resource: namespaces + - group: operators.coreos.com + name: packageserver + namespace: openshift-operator-lifecycle-manager + resource: clusterserviceversions + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 + - name: packageserver + version: 0.0.1-snapshot +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + include.release.openshift.io/hypershift: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:include.release.openshift.io/hypershift: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:57Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: service-ca-operator + operation: Update + subresource: status + time: "2025-08-19T20:17:07Z" + name: service-ca + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "57338" + uid: c087d837-e7ac-4359-b4e8-04be97b9241f + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:54:23Z" + message: All is well + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T20:17:07Z" + message: 'Progressing: All service-ca-operator deployments updated' + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T18:54:25Z" + message: All is well + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:54:25Z" + message: All is well + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:54:22Z" + reason: NoData + status: Unknown + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: operator.openshift.io + name: cluster + resource: servicecas + - group: "" + name: openshift-config + resource: namespaces + - group: "" + name: openshift-config-managed + resource: namespaces + - group: "" + name: openshift-service-ca-operator + resource: namespaces + - group: "" + name: openshift-service-ca + resource: namespaces + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +- apiVersion: config.openshift.io/v1 + kind: ClusterOperator + metadata: + annotations: + capability.openshift.io/name: Storage + include.release.openshift.io/hypershift: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: "2025-08-19T18:46:56Z" + generation: 1 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:capability.openshift.io/name: {} + f:include.release.openshift.io/hypershift: {} + f:include.release.openshift.io/ibm-cloud-managed: {} + f:include.release.openshift.io/self-managed-high-availability: {} + f:include.release.openshift.io/single-node-developer: {} + f:ownerReferences: + .: {} + k:{"uid":"e4a44894-5254-44a3-8c43-f56281b509eb"}: {} + f:spec: {} + manager: cluster-version-operator + operation: Update + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:extension: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T18:46:56Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Degraded"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"EvaluationConditionsDetected"}: + .: {} + f:lastTransitionTime: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:relatedObjects: {} + f:versions: {} + manager: cluster-storage-operator + operation: Update + subresource: status + time: "2025-08-19T21:15:26Z" + name: storage + ownerReferences: + - apiVersion: config.openshift.io/v1 + controller: true + kind: ClusterVersion + name: version + uid: e4a44894-5254-44a3-8c43-f56281b509eb + resourceVersion: "89586" + uid: 398c177c-8121-4ab0-9fbd-f41723754ec9 + spec: {} + status: + conditions: + - lastTransitionTime: "2025-08-19T18:54:56Z" + message: |- + AzureDiskCSIDriverOperatorCRDegraded: All is well + AzureFileCSIDriverOperatorCRDegraded: All is well + reason: AsExpected + status: "False" + type: Degraded + - lastTransitionTime: "2025-08-19T21:15:26Z" + message: |- + AzureDiskCSIDriverOperatorCRProgressing: AzureDiskDriverNodeServiceControllerProgressing: DaemonSet is not progressing + AzureDiskCSIDriverOperatorCRProgressing: AzureDiskDriverControllerServiceControllerProgressing: Deployment is not progressing + AzureFileCSIDriverOperatorCRProgressing: AzureFileDriverNodeServiceControllerProgressing: DaemonSet is not progressing + AzureFileCSIDriverOperatorCRProgressing: AzureFileDriverControllerServiceControllerProgressing: Deployment is not progressing + reason: AsExpected + status: "False" + type: Progressing + - lastTransitionTime: "2025-08-19T19:05:12Z" + message: |- + DefaultStorageClassControllerAvailable: StorageClass provided by supplied CSI Driver instead of the cluster-storage-operator + AzureDiskCSIDriverOperatorCRAvailable: AzureDiskDriverNodeServiceControllerAvailable: DaemonSet is available + AzureDiskCSIDriverOperatorCRAvailable: AzureDiskDriverControllerServiceControllerAvailable: Deployment is available + AzureFileCSIDriverOperatorCRAvailable: AzureFileDriverNodeServiceControllerAvailable: DaemonSet is available + AzureFileCSIDriverOperatorCRAvailable: AzureFileDriverControllerServiceControllerAvailable: Deployment is available + reason: AsExpected + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T18:55:02Z" + message: All is well + reason: AsExpected + status: "True" + type: Upgradeable + - lastTransitionTime: "2025-08-19T18:54:56Z" + reason: NoData + status: Unknown + type: EvaluationConditionsDetected + extension: null + relatedObjects: + - group: "" + name: azure-disk-csi-driver-operator + namespace: openshift-cluster-csi-drivers + resource: serviceaccounts + - group: rbac.authorization.k8s.io + name: azure-disk-csi-driver-operator-role + namespace: openshift-cluster-csi-drivers + resource: roles + - group: rbac.authorization.k8s.io + name: azure-disk-csi-driver-operator-clusterrole + resource: clusterroles + - group: rbac.authorization.k8s.io + name: azure-disk-csi-driver-operator-clusterrolebinding + resource: clusterrolebindings + - group: rbac.authorization.k8s.io + name: azure-disk-csi-driver-operator-rolebinding + namespace: openshift-cluster-csi-drivers + resource: rolebindings + - group: operator.openshift.io + name: disk.csi.azure.com + resource: clustercsidrivers + - group: "" + name: azure-file-csi-driver-operator + namespace: openshift-cluster-csi-drivers + resource: serviceaccounts + - group: rbac.authorization.k8s.io + name: azure-file-csi-driver-operator-role + namespace: openshift-cluster-csi-drivers + resource: roles + - group: rbac.authorization.k8s.io + name: azure-file-csi-driver-operator-rolebinding + namespace: openshift-cluster-csi-drivers + resource: rolebindings + - group: rbac.authorization.k8s.io + name: azure-file-csi-driver-operator-clusterrole + resource: clusterroles + - group: rbac.authorization.k8s.io + name: azure-file-csi-driver-operator-clusterrolebinding + resource: clusterrolebindings + - group: operator.openshift.io + name: file.csi.azure.com + resource: clustercsidrivers + - group: "" + name: openshift-cluster-storage-operator + resource: namespaces + - group: "" + name: openshift-cluster-csi-drivers + resource: namespaces + - group: operator.openshift.io + name: cluster + resource: storages + - group: rbac.authorization.k8s.io + name: cluster-storage-operator-role + resource: clusterrolebindings + versions: + - name: operator + version: 4.20.0-0.nightly-2025-08-19-180353 +kind: ClusterOperatorList +metadata: + continue: "" + resourceVersion: "107652" diff --git a/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-cv.yaml b/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-cv.yaml new file mode 100644 index 0000000000..54c89ef202 --- /dev/null +++ b/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-cv.yaml @@ -0,0 +1,233 @@ +apiVersion: config.openshift.io/v1 +kind: ClusterVersion +metadata: + creationTimestamp: "2025-08-19T18:46:31Z" + generation: 3 + managedFields: + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:spec: + .: {} + f:clusterID: {} + manager: cluster-bootstrap + operation: Update + time: "2025-08-19T18:46:31Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:spec: + f:desiredUpdate: + .: {} + f:architecture: {} + f:force: {} + f:image: {} + f:version: {} + manager: openshift-tests + operation: Update + time: "2025-08-19T19:32:58Z" + - apiVersion: config.openshift.io/v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + .: {} + f:availableUpdates: {} + f:capabilities: + .: {} + f:enabledCapabilities: {} + f:knownCapabilities: {} + f:conditions: + .: {} + k:{"type":"Available"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:status: {} + f:type: {} + k:{"type":"Failing"}: + .: {} + f:lastTransitionTime: {} + f:status: {} + f:type: {} + k:{"type":"ImplicitlyEnabledCapabilities"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Progressing"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"ReleaseAccepted"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"RetrievedUpdates"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"Upgradeable"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"UpgradeableClusterOperators"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + k:{"type":"UpgradeableUpgradeInProgress"}: + .: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:desired: + .: {} + f:image: {} + f:version: {} + f:history: {} + f:observedGeneration: {} + f:versionHash: {} + manager: cluster-version-operator + operation: Update + subresource: status + time: "2025-08-19T21:36:53Z" + name: version + resourceVersion: "95936" + uid: e4a44894-5254-44a3-8c43-f56281b509eb +spec: + clusterID: faf5ca13-bbd4-42f4-956c-5e62afe7be1f + desiredUpdate: + architecture: "" + force: true + image: registry.build11.ci.openshift.org/ci-op-pw3ghqzh/release@sha256:6ef995f80511e6edb232430e2a28edcbc5dd32dd124c0c81d79cd78da6e2ddf6 + version: "" +status: + availableUpdates: null + capabilities: + enabledCapabilities: + - Build + - CSISnapshot + - CloudControllerManager + - CloudCredential + - Console + - DeploymentConfig + - ImageRegistry + - Ingress + - Insights + - MachineAPI + - NodeTuning + - OperatorLifecycleManager + - OperatorLifecycleManagerV1 + - Storage + - baremetal + - marketplace + - openshift-samples + knownCapabilities: + - Build + - CSISnapshot + - CloudControllerManager + - CloudCredential + - Console + - DeploymentConfig + - ImageRegistry + - Ingress + - Insights + - MachineAPI + - NodeTuning + - OperatorLifecycleManager + - OperatorLifecycleManagerV1 + - Storage + - baremetal + - marketplace + - openshift-samples + conditions: + - lastTransitionTime: "2025-08-19T18:46:56Z" + message: The update channel has not been configured. + reason: NoChannel + status: "False" + type: RetrievedUpdates + - lastTransitionTime: "2025-08-19T18:46:56Z" + message: Capabilities match configured spec + reason: AsExpected + status: "False" + type: ImplicitlyEnabledCapabilities + - lastTransitionTime: "2025-08-19T18:46:56Z" + message: Payload loaded version="4.20.0-0.nightly-2025-08-19-180353" image="registry.build11.ci.openshift.org/ci-op-pw3ghqzh/release@sha256:6ef995f80511e6edb232430e2a28edcbc5dd32dd124c0c81d79cd78da6e2ddf6" + architecture="amd64" + reason: PayloadLoaded + status: "True" + type: ReleaseAccepted + - lastTransitionTime: "2025-08-19T19:17:40Z" + message: Done applying 4.20.0-0.nightly-2025-08-12-153542 + status: "True" + type: Available + - lastTransitionTime: "2025-08-19T21:06:23Z" + status: "False" + type: Failing + - lastTransitionTime: "2025-08-19T19:33:11Z" + message: 'Working towards 4.20.0-0.nightly-2025-08-19-180353: 747 of 957 done + (78% complete), waiting up to 40 minutes on authentication, openshift-apiserver' + reason: ClusterOperatorsDegraded + status: "True" + type: Progressing + - lastTransitionTime: "2025-08-19T19:33:15Z" + message: |- + Cluster should not be upgraded between minor versions for multiple reasons: DegradedPool,UpdateInProgress + * Cluster operator machine-config should not be upgraded between minor versions: One or more machine config pools are degraded, please see `oc get mcp` for further details and resolve before upgrading + * An update is already in progress and the details are in the Progressing condition + reason: MultipleReasons + status: "False" + type: Upgradeable + - lastTransitionTime: "2025-08-19T21:16:26Z" + message: 'Cluster operator machine-config should not be upgraded between minor + versions: One or more machine config pools are degraded, please see `oc get + mcp` for further details and resolve before upgrading' + reason: DegradedPool + status: "False" + type: UpgradeableClusterOperators + - lastTransitionTime: "2025-08-19T21:16:26Z" + message: An update is already in progress and the details are in the Progressing + condition + reason: UpdateInProgress + status: "True" + type: UpgradeableUpgradeInProgress + desired: + image: registry.build11.ci.openshift.org/ci-op-pw3ghqzh/release@sha256:6ef995f80511e6edb232430e2a28edcbc5dd32dd124c0c81d79cd78da6e2ddf6 + version: 4.20.0-0.nightly-2025-08-19-180353 + history: + - acceptedRisks: |- + Target release version="" image="registry.build11.ci.openshift.org/ci-op-pw3ghqzh/release@sha256:6ef995f80511e6edb232430e2a28edcbc5dd32dd124c0c81d79cd78da6e2ddf6" cannot be verified, but continuing anyway because the update was forced: unable to verify sha256:6ef995f80511e6edb232430e2a28edcbc5dd32dd124c0c81d79cd78da6e2ddf6 against keyrings: verifier-public-key-redhat + [2025-08-19T19:32:58Z: prefix sha256-6ef995f80511e6edb232430e2a28edcbc5dd32dd124c0c81d79cd78da6e2ddf6 in config map signatures-managed: no more signatures to check, 2025-08-19T19:32:59Z: unable to retrieve signature from https://storage.googleapis.com/openshift-release/official/signatures/openshift/release/sha256=6ef995f80511e6edb232430e2a28edcbc5dd32dd124c0c81d79cd78da6e2ddf6/signature-1: no more signatures to check, 2025-08-19T19:32:59Z: unable to retrieve signature from https://mirror.openshift.com/pub/openshift-v4/signatures/openshift/release/sha256=6ef995f80511e6edb232430e2a28edcbc5dd32dd124c0c81d79cd78da6e2ddf6/signature-1: no more signatures to check, 2025-08-19T19:32:59Z: parallel signature store wrapping containers/image signature store under https://storage.googleapis.com/openshift-release/official/signatures/openshift/release, containers/image signature store under https://mirror.openshift.com/pub/openshift-v4/signatures/openshift/release: no more signatures to check, 2025-08-19T19:32:59Z: serial signature store wrapping ClusterVersion signatureStores unset, falling back to default stores, parallel signature store wrapping containers/image signature store under https://storage.googleapis.com/openshift-release/official/signatures/openshift/release, containers/image signature store under https://mirror.openshift.com/pub/openshift-v4/signatures/openshift/release: no more signatures to check, 2025-08-19T19:32:59Z: serial signature store wrapping config maps in openshift-config-managed with label "release.openshift.io/verification-signatures", serial signature store wrapping ClusterVersion signatureStores unset, falling back to default stores, parallel signature store wrapping containers/image signature store under https://storage.googleapis.com/openshift-release/official/signatures/openshift/release, containers/image signature store under https://mirror.openshift.com/pub/openshift-v4/signatures/openshift/release: no more signatures to check] + Precondition "ClusterVersionRecommendedUpdate" failed because of "NoChannel": Configured channel is unset, so the recommended status of updating from 4.20.0-0.nightly-2025-08-12-153542 to 4.20.0-0.nightly-2025-08-19-180353 is unknown. + completionTime: null + image: registry.build11.ci.openshift.org/ci-op-pw3ghqzh/release@sha256:6ef995f80511e6edb232430e2a28edcbc5dd32dd124c0c81d79cd78da6e2ddf6 + startedTime: "2025-08-19T19:33:11Z" + state: Partial + verified: false + version: 4.20.0-0.nightly-2025-08-19-180353 + - completionTime: "2025-08-19T19:17:40Z" + image: registry.build11.ci.openshift.org/ci-op-pw3ghqzh/release@sha256:c63aa3fb2e82efdbd450c69eae7458d9e412eeaa7ff211c68bdf729d19bed89c + startedTime: "2025-08-19T18:46:56Z" + state: Completed + verified: false + version: 4.20.0-0.nightly-2025-08-12-153542 + observedGeneration: 3 + versionHash: wC9npWuRNGA= diff --git a/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-mc.yaml b/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-mc.yaml new file mode 100644 index 0000000000..94518dcaad --- /dev/null +++ b/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-mc.yaml @@ -0,0 +1,4303 @@ +{ + "apiVersion": "v1", + "items": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "03a3677b298e05ba870cdf0f26f4db16f80e64ae" + }, + "creationTimestamp": "2025-08-19T18:55:55Z", + "generation": 2, + "labels": { + "machineconfiguration.openshift.io/role": "master" + }, + "name": "00-master", + "ownerReferences": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "ControllerConfig", + "name": "machine-config-controller", + "uid": "39f6a6c9-fced-4161-8602-9f979cf21a91" + } + ], + "resourceVersion": "76344", + "uid": "a506e4d1-1cfa-4f52-aca9-445c77d09886" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "version": "3.5.0" + }, + "storage": { + "files": [ + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-ex%20-o%20pipefail%0A%0ANM_DEVICES_DIR%3D%2Frun%2FNetworkManager%2Fdevices%0ANM_RUN_CONN_DIR%3D%2Frun%2FNetworkManager%2Fsystem-connections%0ANM_ETC_CONN_DIR%3D%2Fetc%2FNetworkManager%2Fsystem-connections%0A%0Alogger%20-t%20nm-clean-initrd-state%20%22Cleaning%20network%20activation%20state%20generated%20by%20dracut...%22%0Alogger%20-t%20nm-clean-initrd-state%20%22To%20disable%2C%20remove%20%2Fvar%2Flib%2Fmco%2Fnm-clean-initrd-state%22%0A%0Aif%20%5B%20!%20-e%20%22%24NM_DEVICES_DIR%22%20%5D%20%7C%7C%20%5B%20!%20-e%20%22%24NM_RUN_CONN_DIR%22%20%5D%20%7C%7C%20%5B%20!%20-e%20%22%24NM_ETC_CONN_DIR%22%20%5D%3B%20then%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22There%20is%20no%20network%20activation%20state%20to%20clean%22%0A%20%20exit%0Afi%0A%0A%23%20Some%20deployments%20require%20an%20active%20network%20early%20in%20the%20boot%20process.%20To%0A%23%20achieve%20this%2C%20dracut%20runs%20specific%20NetworkManager%20modules.%20This%20results%20in%0A%23%20NetworkManager%20keyfiles%20being%20generated%20(either%20default%20or%20from%20ip%20kernel%0A%23%20arguments)%20and%20activated.%20This%20activation%20generates%20state%20that%20makes%20those%0A%23%20profiles%20to%20be%20re-activated%20by%20the%20NetworkManager%20service%20later%20in%20the%0A%23%20boot%20process.%20And%20this%20has%20the%20effect%20that%20other%20profiles%20deployed%20by%20the%0A%23%20user%20for%20the%20same%20set%20of%20devices%20are%20ignored.%0A%0A%23%20Most%20of%20the%20time%20this%20is%20the%20desired%20behavior.%20The%20exception%20to%20this%20rule%0A%23%20is%20when%20the%20user%20wants%20to%20run%20the%20boot%20process%20with%20a%20different%20network%0A%23%20setup%20than%20the%20real%20root%20which%20is%20usually%20related%20to%20the%20fact%20that%0A%23%20generating%20images%20with%20customized%20kernel%20arguments%20is%20a%20complication%20in%0A%23%20the%20deployment%20pipeline.%0A%0A%23%20This%20need%20has%20been%20exacerbated%20by%20past%20NetworkManager%20bugs%20that%20activated%0A%23%20the%20network%20on%20boot%20when%20it%20was%20not%20really%20needed.%20Most%20notably%20when%20ip%0A%23%20kernel%20argument%20is%20present%2C%20something%20that%20the%20baremetal%20installer%20adds%20by%0A%23%20default.%0A%0A%23%20The%20intention%20here%20is%20to%20remove%20the%20state%20that%20was%20generated%20with%20the%0A%23%20activation%20of%20those%20profiles%20during%20dracut%20execution.%20Then%20when%0A%23%20NetworkManager%20service%20runs%2C%20the%20profiles%20generated%20by%20dracut%2C%20along%20with%0A%23%20other%20profiles%20configured%20by%20the%20user%2C%20are%20evaluated%20towards%20finding%20the%0A%23%20most%20appropriate%20profile%20to%20connect%20a%20device%20with.%20As%20a%20precaution%2C%20clean%0A%23%20state%20only%20for%20devices%20that%3A%0A%23%20-%20have%20been%20activated%20with%20a%20default%20profile%20(assume%20that%20a%20non-default%0A%23%20%20%20configuration%20expresses%20intention%20by%20user%20to%20run%20with%20it%20permanently)%0A%23%20-%20have%20a%20specific%20configured%20profile%20set%20to%20auto-connect%20(if%20there%20is%20no%0A%23%20%20%20alternate%20configured%20profile%20for%20a%20device%20it%20makes%20no%20sense%20to%0A%23%20%20%20de-activate%20anything)%0A%23%0A%23%20Although%20this%20can%20theoretically%20happen%20on%20any%20deployment%20type%2C%20need%20has%0A%23%20mostly%20come%20from%20IPI%20bare%20metal%20deployments.%20For%20the%20time%20being%2C%20this%0A%23%20should%20be%20opt-in%20in%20any%20other%20deploment%20type.%0A%23%0A%23%20There%20is%20an%20RFE%20filed%20against%20NM%20that%20once%20implemented%20would%20make%20this%0A%23%20script%20unnecessary%3A%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D2089707%0A%0Afor%20device%20in%20%22%24%7BNM_DEVICES_DIR%7D%22%2F*%3B%20do%0A%20%20if%20%5B%20!%20-e%20%22%24device%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20the%20device%20file%20name%20is%20the%20ifindex%0A%20%20ifindex%3D%24(basename%20%22%24device%22)%0A%20%20%0A%20%20%23%20get%20the%20interface%20name%20by%20ifindex%0A%20%20ifname%3D%24(ip%20-j%20link%20show%20%7C%20jq%20-r%20%22.%5B%5D%20%7C%20select(.ifindex%20%3D%3D%20%24%7Bifindex%7D)%20%7C%20.ifname%20%2F%2F%20empty%22)%0A%0A%20%20%23%20no%20interface%20name%20found%2C%20ignore%0A%20%20if%20%5B%20-z%20%22%24ifname%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20get%20the%20uuid%20of%20the%20profile%20the%20device%20has%20been%20activated%20with%0A%20%20active_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bdevice%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Econnection-uuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24device%22)%0A%0A%20%20%23%20the%20device%20was%20not%20activated%20with%20any%20profile%2C%20ignore%0A%20%20if%20%5B%20-z%20%22%24active_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20find%20the%20generated%20profile%20by%20uuid%0A%20%20for%20profile%20in%20%22%24%7BNM_RUN_CONN_DIR%7D%22%2F*%3B%20do%0A%20%20%20%20generated_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Euuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20if%20%5B%20%22%24active_profile_uuid%22%20%3D%20%22%24generated_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20generated%20profile%20not%20found%2C%20ignore%0A%20%20if%20%5B%20%22%24active_profile_uuid%22%20!%3D%20%22%24generated_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20check%20that%20it%20is%20not%20specific%20for%20the%20device%2C%20otherwise%20ignore%0A%20%20profile_ifname%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Einterface-name%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20if%20%5B%20-n%20%22%24profile_ifname%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20profile%20not%20generated%20by%20nm-initrd-generator%2C%20ignore%0A%20%20%23%20only%20check%20it%20if%20the%20key%20is%20set%20(from%20NM%201.32.4)%0A%20%20origin%3D%24(sed%20-nr%20'%2F%5E%5C%5Buser%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Eorg.freedesktop.NetworkManager.origin%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20if%20%5B%20-n%20%22%24origin%22%20%5D%20%26%26%20%5B%20%22%24origin%22%20!%3D%20%22nm-initrd-generator%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20find%20the%20configured%20profile%20by%20name%20with%20auto-connect%20set%0A%20%20for%20profile%20in%20%22%24%7BNM_ETC_CONN_DIR%7D%22%2F*%3B%20do%0A%20%20%20%20profile_ifname%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Einterface-name%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20autoconnect%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Eautoconnect%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20if%20%5B%20%22%24profile_ifname%22%20%3D%20%22%24ifname%22%20%5D%20%26%26%20%5B%20%22%24autoconnect%22%20!%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20configured%20profile%20not%20found%2C%20ignore%0A%20%20if%20%5B%20%22%24profile_ifname%22%20!%3D%20%22%24ifname%22%20%5D%20%7C%7C%20%5B%20%22%24autoconnect%22%20%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20configured_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Euuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%20activated%20with%20default%20generated%20profile%20%24generated_profile_uuid%22%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%20has%20different%20configured%20specific%20profile%20%24configured_profile_uuid%22%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%3A%20removing%20state...%22%0A%20%20%0A%20%20%23%20NM%20can%20still%20generate%20internal%20profiles%20from%20the%20IP%20address%0A%20%20%23%20configuration%20of%20devices%2C%20so%20flush%20addresses%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22Flushing%20IP%20addresses%20from%20%24ifname%22%0A%20%20ip%20addr%20flush%20%22%24ifname%22%0A%20%20ip%20-6%20addr%20flush%20%22%24ifname%22%0A%0A%20%20%23%20remove%20device%20state%20file%20to%20prevent%20NM%20to%20unilaterally%20connect%20with%20the%0A%20%20%23%20latest%20activated%20profile%20without%20evaluating%20other%20profiles%20%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22Removing%20%24device%22%0A%20%20rm%20-f%20--%20%22%24device%22%0A%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/nm-clean-initrd-state.sh" + }, + { + "contents": { + "source": "data:,%5Bconnection%5D%0Aipv6.dhcp-duid%3Dll%0Aipv6.dhcp-iaid%3Dmac%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/01-ipv6.conf" + }, + { + "contents": { + "source": "data:,%5Bmain%5D%0Aplugins%3Dkeyfile%2Cifcfg-rh%0A%5Bkeyfile%5D%0Apath%3D%2Fetc%2FNetworkManager%2Fsystem-connections%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/20-keyfiles.conf" + }, + { + "contents": { + "source": "data:," + }, + "mode": 384, + "overwrite": true, + "path": "/etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt" + }, + { + "contents": { + "source": "data:,KUBERNETES_SERVICE_HOST%3D'api-int.ci-op-pw3ghqzh-bb5c4.ci2.azure.devcluster.openshift.com'%0AKUBERNETES_SERVICE_PORT%3D'6443'%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/apiserver-url.env" + }, + { + "contents": { + "source": "data:,%23%20This%20file%20is%20managed%20by%20machine-config-operator.%0A%23%20Suppress%20audit%20rules%20which%20always%20trigger%20for%20container%0A%23%20workloads%2C%20as%20they%20spam%20the%20audit%20log.%20%20Workloads%20are%20expected%0A%23%20to%20be%20dynamic%2C%20and%20the%20networking%20stack%20uses%20iptables.%0A-a%20exclude%2Calways%20-F%20msgtype%3DNETFILTER_CFG%0A%23%20The%20default%20bridged%20networking%20enables%20promiscuous%20on%20the%20veth%0A%23%20device.%20%20Ideally%2C%20we'd%20teach%20audit%20to%20ignore%20only%20veth%20devices%2C%0A%23%20since%20one%20might%20legitimately%20care%20about%20promiscuous%20on%20real%20physical%0A%23%20devices.%20%20But%20we%20can't%20currently%20differentiate.%0A-a%20exclude%2Calways%20-F%20msgtype%3DANOM_PROMISCUOUS%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/audit/rules.d/mco-audit-quiet-containers.rules" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20ESP%20offload%2C%20either%20in%20actual%20hardware%20or%20as%20part%20as%20GRO%20(generic%0A%23%20recieve%20offload)%20does%20not%20work%20for%20interfaces%20attached%20to%20an%20OVS%20bridge%0A%23%20so%20turn%20it%20off%20for%20the%20time%20being.%0A%23%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FRHEL-58811%0A%0A%23%20Depends%20on%20ipsec%20service%20drop-in%20to%20start%20it%20after%20configure-ovs%20to%20make%0A%23%20sure%20offloads%20are%20disabled%20before%20ipsec%20starts.%0A%0Aif%20%5B%5B%20%22%242%22%20!%3D%20%22up%22%20%5D%5D%3B%20then%0A%20%20exit%0Afi%0A%0Adevice%3D%24DEVICE_IFACE%0Akind_slave%3D%24(ip%20-j%20-d%20link%20show%20%22%24device%22%20%7C%20jq%20-r%20'.%5B0%5D%20%7C%20.linkinfo.info_slave_kind%20%2F%2F%20empty')%0A%0Aif%20%5B%20%22%24kind_slave%22%20%3D%20%22openvswitch%22%20%5D%3B%20then%0A%20%20for%20feature%20in%20tx-esp-segmentation%20esp-hw-offload%20esp-tx-csum-hw-offload%3B%20do%0A%20%20%20%20if%20ethtool%20-k%20%22%24device%22%20%7C%20grep%20-qE%20%22%5E%24%7Bfeature%7D%3A%20off%22%3B%20then%0A%20%20%20%20%20%20%23%20already%20disabled%2C%20nothing%20to%20do%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%20%20%20%20%0A%20%20%20%20logger%20-t%2099-esp-offload%20-s%20%22Setting%20%24feature%20off%20for%20%24device%3A%20unsupported%20when%20attached%20to%20Open%20vSwitch%20bridge%22%0A%20%20%20%20ethtool%20-K%20%22%24device%22%20%22%24feature%22%20off%0A%20%20done%0Afi%0A" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-esp-offload" + }, + { + "contents": { + "source": "data:,r%20%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F80-openshift-network.conf%0Ar%20%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F10-ovn-kubernetes.conf%0Ad%20%2Frun%2Fmultus%2Fcni%2Fnet.d%2F%200755%20root%20root%20-%20-%0AD%20%2Fvar%2Flib%2Fcni%2Fnetworks%2Fopenshift-sdn%2F%200755%20root%20root%20-%20-%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/tmpfiles.d/cleanup-cni.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-x%0A%0Aif%20%5B%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20echo%20%22Skipping%20configure-ovs%20due%20to%20manual%20network%20configuration%22%0A%20%20exit%200%0Afi%0A%0A%23%20This%20file%20is%20not%20needed%20anymore%20in%204.7%2B%2C%20but%20when%20rolling%20back%20to%204.6%0A%23%20the%20ovs%20pod%20needs%20it%20to%20know%20ovs%20is%20running%20on%20the%20host.%0Atouch%20%2Fvar%2Frun%2Fovs-config-executed%0A%0A%23%20always%20use%20--escape%20no%20to%20prevent%20'%3A'%20mangling.%20%20nmcli%20will%20escape%20all%20colons%20as%20%5C%3A%2C%20this%20breaks%20input%0ANMCLI_GET_VALUE%3D%22nmcli%20--escape%20no%20--get-values%22%0A%23%20These%20are%20well%20knwon%20NM%20default%20paths%0ANM_CONN_ETC_PATH%3D%22%2Fetc%2FNetworkManager%2Fsystem-connections%22%0ANM_CONN_RUN_PATH%3D%22%2Frun%2FNetworkManager%2Fsystem-connections%22%0A%0A%23%20This%20is%20the%20path%20where%20NM%20is%20known%20to%20be%20configured%20to%20store%20user%20keyfiles%20%0ANM_CONN_CONF_PATH%3D%22%24NM_CONN_ETC_PATH%22%0A%0A%23%20This%20is%20where%20we%20want%20our%20keyfiles%20to%20finally%20reside.%20configure-ovs%0A%23%20operates%20with%20temporary%20keyfiles%20in%20NM_CONN_RUN_PATH%20and%20then%20as%20a%20last%0A%23%20step%20moves%20those%20keyfiles%20to%20NM_CONN_SET_PATH%20if%20it%20is%20a%20different%20path%0A%23%20(not%20by%20default).%20This%20mitigates%20hard%20interruptions%20(SIGKILL%2C%20hard%20reboot)%0A%23%20of%20configure-ovs%20leaving%20the%20machine%20with%20a%20half-baked%20set%20of%20keyfiles%0A%23%20that%20might%20prevent%20machine%20networking%20from%20working%20correctly.%0ANM_CONN_SET_PATH%3D%22%24%7BNM_CONN_SET_PATH%3A-%24NM_CONN_RUN_PATH%7D%22%0A%0AMANAGED_NM_CONN_SUFFIX%3D%22-slave-ovs-clone%22%0A%23%20Workaround%20to%20ensure%20OVS%20is%20installed%20due%20to%20bug%20in%20systemd%20Requires%3A%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1888017%0Acopy_nm_conn_files()%20%7B%0A%20%20local%20dst_path%3D%22%241%22%0A%20%20for%20src%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20src_path%3D%24(dirname%20%22%24src%22)%0A%20%20%20%20file%3D%24(basename%20%22%24src%22)%0A%20%20%20%20if%20%5B%20-f%20%22%24src_path%2F%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20if%20%5B%20!%20-f%20%22%24dst_path%2F%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Copying%20configuration%20%24file%22%0A%20%20%20%20%20%20%20%20cp%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%0A%20%20%20%20%20%20elif%20!%20cmp%20--silent%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Copying%20updated%20configuration%20%24file%22%0A%20%20%20%20%20%20%20%20cp%20-f%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%0A%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20echo%20%22Skipping%20%24file%20since%20it's%20equal%20at%20destination%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20else%0A%20%20%20%20%20%20echo%20%22Skipping%20%24file%20since%20it%20does%20not%20exist%20at%20source%22%0A%20%20%20%20fi%0A%20%20done%0A%7D%0A%0Aupdate_nm_conn_files_base()%20%7B%0A%20%20base_path%3D%24%7B1%7D%0A%20%20bridge_name%3D%24%7B2%7D%0A%20%20port_name%3D%24%7B3%7D%0A%20%20ovs_port%3D%22ovs-port-%24%7Bbridge_name%7D%22%0A%20%20ovs_interface%3D%22ovs-if-%24%7Bbridge_name%7D%22%0A%20%20default_port_name%3D%22ovs-port-%24%7Bport_name%7D%22%20%23%20ovs-port-phys0%0A%20%20bridge_interface_name%3D%22ovs-if-%24%7Bport_name%7D%22%20%23%20ovs-if-phys0%0A%20%20%23%20In%20RHEL7%20files%20in%20%2F%7Betc%2Crun%7D%2FNetworkManager%2Fsystem-connections%20end%20without%20the%20suffix%20'.nmconnection'%2C%20whereas%20in%20RHCOS%20they%20end%20with%20the%20suffix.%0A%20%20MANAGED_NM_CONN_FILES%3D(%24(echo%20%22%24%7Bbase_path%7D%22%2F%7B%22%24bridge_name%22%2C%22%24ovs_interface%22%2C%22%24ovs_port%22%2C%22%24bridge_interface_name%22%2C%22%24default_port_name%22%7D%7B%2C.nmconnection%7D))%0A%20%20shopt%20-s%20nullglob%0A%20%20MANAGED_NM_CONN_FILES%2B%3D(%24%7Bbase_path%7D%2F*%24%7BMANAGED_NM_CONN_SUFFIX%7D.nmconnection%20%24%7Bbase_path%7D%2F*%24%7BMANAGED_NM_CONN_SUFFIX%7D)%0A%20%20shopt%20-u%20nullglob%0A%7D%0A%0Aupdate_nm_conn_run_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_RUN_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0Aupdate_nm_conn_set_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_SET_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0Aupdate_nm_conn_etc_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_ETC_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0A%23%20Move%20and%20reload%20keyfiles%20at%20their%20final%20destination%0Aset_nm_conn_files()%20%7B%0A%20%20if%20%5B%20%22%24NM_CONN_RUN_PATH%22%20!%3D%20%22%24NM_CONN_SET_PATH%22%20%5D%3B%20then%0A%20%20%20%20update_nm_conn_run_files%20br-ex%20phys0%0A%20%20%20%20copy_nm_conn_files%20%22%24NM_CONN_SET_PATH%22%0A%20%20%20%20rm_nm_conn_files%0A%20%20%20%20update_nm_conn_run_files%20br-ex1%20phys1%0A%20%20%20%20copy_nm_conn_files%20%22%24NM_CONN_SET_PATH%22%0A%20%20%20%20rm_nm_conn_files%0A%0A%20%20%20%20%23%20reload%20keyfiles%0A%20%20%20%20nmcli%20connection%20reload%0A%20%20fi%0A%7D%0A%0A%23%20Used%20to%20remove%20files%20managed%20by%20configure-ovs%20and%20temporary%20leftover%20files%20from%20network%20manager%0Arm_nm_conn_files()%20%7B%0A%20%20for%20file%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20if%20%5B%20-f%20%22%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20rm%20-f%20%22%24file%22%0A%20%20%20%20%20%20echo%20%22Removed%20nmconnection%20file%20%24file%22%0A%20%20%20%20%20%20nm_config_changed%3D1%0A%20%20%20%20fi%0A%20%20done%0A%20%20for%20file%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20for%20temp%20in%20%24(compgen%20-G%20%22%24%7Bfile%7D.*%22)%3B%20do%0A%20%20%20%20%20%20rm%20-f%20%22%24temp%22%0A%20%20%20%20%20%20echo%20%22Removed%20nmconnection%20temporary%20file%20%24temp%22%0A%20%20%20%20%20%20nm_config_changed%3D1%0A%20%20%20%20done%0A%20%20done%0A%7D%0A%0A%23%20Used%20to%20clone%20a%20slave%20connection%20by%20uuid%2C%20returns%20new%20name%0Aclone_slave_connection()%20%7B%0A%20%20local%20uuid%3D%22%241%22%0A%20%20local%20old_name%0A%20%20old_name%3D%22%24(%24NMCLI_GET_VALUE%20connection.id%20connection%20show%20uuid%20%22%24uuid%22)%22%0A%20%20local%20new_name%3D%22%24%7Bold_name%7D%24%7BMANAGED_NM_CONN_SUFFIX%7D%22%0A%20%20if%20nmcli%20connection%20show%20id%20%22%24%7Bnew_name%7D%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20existing%20ovs%20slave%20%24%7Bnew_name%7D%20connection%20profile%20file%20found%2C%20overwriting...%22%20%3E%262%0A%20%20%20%20nmcli%20connection%20delete%20id%20%22%24%7Bnew_name%7D%22%20%26%3E%20%2Fdev%2Fnull%0A%20%20fi%0A%20%20clone_nm_conn%20%24uuid%20%22%24%7Bnew_name%7D%22%0A%20%20echo%20%22%24%7Bnew_name%7D%22%0A%7D%0A%0A%23%20Used%20to%20replace%20an%20old%20master%20connection%20uuid%20with%20a%20new%20one%20on%20all%20connections%0Areplace_connection_master()%20%7B%0A%20%20local%20old%3D%22%241%22%0A%20%20local%20new%3D%22%242%22%0A%20%20for%20conn_uuid%20in%20%24(%24NMCLI_GET_VALUE%20UUID%20connection%20show)%20%3B%20do%0A%20%20%20%20if%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.master%20connection%20show%20uuid%20%22%24conn_uuid%22)%22%20!%3D%20%22%24old%22%20%5D%3B%20then%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20local%20active_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20connection%20show%20%22%24conn_uuid%22)%0A%20%20%20%20local%20autoconnect%3D%24(%24NMCLI_GET_VALUE%20connection.autoconnect%20connection%20show%20%22%24conn_uuid%22)%0A%20%20%20%20if%20%5B%20%22%24active_state%22%20!%3D%20%22activated%22%20%5D%20%26%26%20%5B%20%22%24autoconnect%22%20!%3D%20%22yes%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20Assume%20that%20slave%20profiles%20intended%20to%20be%20used%20are%20those%20that%20are%3A%0A%20%20%20%20%20%20%23%20-%20active%0A%20%20%20%20%20%20%23%20-%20or%20inactive%20(which%20might%20be%20due%20to%20link%20being%20down)%20but%20to%20be%20autoconnected.%0A%20%20%20%20%20%20%23%20Otherwise%2C%20ignore%20them.%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20make%20changes%20for%20slave%20profiles%20in%20a%20new%20clone%0A%20%20%20%20local%20new_name%0A%20%20%20%20new_name%3D%24(clone_slave_connection%20%24conn_uuid)%0A%0A%20%20%20%20mod_nm_conn%20%22%24new_name%22%20connection.master%20%22%24new%22%20connection.autoconnect-priority%20100%20connection.autoconnect%20no%20%0A%20%20%20%20echo%20%22Replaced%20master%20%24old%20with%20%24new%20for%20slave%20profile%20%24new_name%22%0A%20%20done%0A%7D%0A%0A%23%20when%20creating%20the%20bridge%2C%20we%20use%20a%20value%20lower%20than%20NM's%20ethernet%20device%20default%20route%20metric%0A%23%20(we%20pick%2048%20and%2049%20to%20be%20lower%20than%20anything%20that%20NM%20chooses%20by%20default)%0ABRIDGE_METRIC%3D%2248%22%0ABRIDGE1_METRIC%3D%2249%22%0A%23%20Given%20an%20interface%2C%20generates%20NM%20configuration%20to%20add%20to%20an%20OVS%20bridge%0Aconvert_to_bridge()%20%7B%0A%20%20local%20iface%3D%24%7B1%7D%0A%20%20local%20bridge_name%3D%24%7B2%7D%0A%20%20local%20port_name%3D%24%7B3%7D%0A%20%20local%20bridge_metric%3D%24%7B4%7D%0A%20%20local%20ovs_port%3D%22ovs-port-%24%7Bbridge_name%7D%22%0A%20%20local%20ovs_interface%3D%22ovs-if-%24%7Bbridge_name%7D%22%0A%20%20local%20default_port_name%3D%22ovs-port-%24%7Bport_name%7D%22%20%23%20ovs-port-phys0%0A%20%20local%20bridge_interface_name%3D%22ovs-if-%24%7Bport_name%7D%22%20%23%20ovs-if-phys0%0A%0A%20%20if%20%5B%20%22%24iface%22%20%3D%20%22%24bridge_name%22%20%5D%3B%20then%0A%20%20%20%20%23%20handle%20vlans%20and%20bonds%20etc%20if%20they%20have%20already%20been%0A%20%20%20%20%23%20configured%20via%20nm%20key%20files%20and%20br-ex%20is%20already%20up%0A%20%20%20%20ifaces%3D%24(ovs-vsctl%20list-ifaces%20%24%7Biface%7D)%0A%20%20%20%20for%20intf%20in%20%24ifaces%3B%20do%20configure_driver_options%20%24intf%3B%20done%0A%20%20%20%20echo%20%22Networking%20already%20configured%20and%20up%20for%20%24%7Bbridge-name%7D!%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20flag%20to%20reload%20NM%20to%20account%20for%20all%20the%20configuration%20changes%0A%20%20%23%20going%20forward%0A%20%20nm_config_changed%3D1%0A%0A%20%20if%20%5B%20-z%20%22%24iface%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22ERROR%3A%20Unable%20to%20find%20default%20gateway%20interface%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%20%20%23%20find%20the%20MAC%20from%20OVS%20config%20or%20the%20default%20interface%20to%20use%20for%20OVS%20internal%20port%0A%20%20%23%20this%20prevents%20us%20from%20getting%20a%20different%20DHCP%20lease%20and%20dropping%20connection%0A%20%20if%20!%20iface_mac%3D%24(%3C%22%2Fsys%2Fclass%2Fnet%2F%24%7Biface%7D%2Faddress%22)%3B%20then%0A%20%20%20%20echo%20%22Unable%20to%20determine%20default%20interface%20MAC%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20echo%20%22MAC%20address%20found%20for%20iface%3A%20%24%7Biface%7D%3A%20%24%7Biface_mac%7D%22%0A%0A%20%20%23%20find%20MTU%20from%20original%20iface%0A%20%20iface_mtu%3D%24(ip%20link%20show%20%22%24iface%22%20%7C%20awk%20'%7Bprint%20%245%3B%20exit%7D')%0A%20%20if%20%5B%5B%20-z%20%22%24iface_mtu%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Unable%20to%20determine%20default%20interface%20MTU%2C%20defaulting%20to%201500%22%0A%20%20%20%20iface_mtu%3D1500%0A%20%20else%0A%20%20%20%20echo%20%22MTU%20found%20for%20iface%3A%20%24%7Biface%7D%3A%20%24%7Biface_mtu%7D%22%0A%20%20fi%0A%0A%20%20%23%20store%20old%20conn%20for%20later%0A%20%20old_conn%3D%24(nmcli%20--fields%20UUID%2CDEVICE%20conn%20show%20--active%20%7C%20awk%20%22%2F%5Cs%24%7Biface%7D%5Cs*%5C%24%2F%20%7Bprint%20%5C%241%7D%22)%0A%0A%20%20if%20%5B%5B%20-z%20%22%24old_conn%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22ERROR%3A%20cannot%20find%20connection%20for%20interface%3A%20%24%7Biface%7D%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20create%20bridge%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24bridge_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20%22%24bridge_name%22%0A%20%20%20%20add_nm_conn%20%22%24bridge_name%22%20type%20ovs-bridge%20conn.interface%20%22%24bridge_name%22%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20%5C%0A%20%20%20%20connection.autoconnect-slaves%201%0A%20%20fi%0A%0A%20%20%23%20find%20default%20port%20to%20add%20to%20bridge%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24default_port_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-port%20%22%24bridge_name%22%20%24%7Biface%7D%0A%20%20%20%20add_nm_conn%20%22%24default_port_name%22%20type%20ovs-port%20conn.interface%20%24%7Biface%7D%20slave-type%20ovs-bridge%20master%20%22%24bridge_name%22%20%5C%0A%20%20%20%20connection.autoconnect-slaves%201%0A%20%20fi%0A%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24ovs_port%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-port%20%22%24bridge_name%22%20%22%24bridge_name%22%0A%20%20%20%20add_nm_conn%20%22%24ovs_port%22%20type%20ovs-port%20conn.interface%20%22%24bridge_name%22%20slave-type%20ovs-bridge%20master%20%22%24bridge_name%22%0A%20%20fi%0A%0A%20%20extra_phys_args%3D()%0A%20%20%23%20check%20if%20this%20interface%20is%20a%20vlan%2C%20bond%2C%20team%2C%20or%20ethernet%20type%0A%20%20if%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22vlan%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dvlan%0A%20%20%20%20vlan_id%3D%24(%24NMCLI_GET_VALUE%20vlan.id%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-z%20%22%24vlan_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20unable%20to%20determine%20vlan_id%20for%20vlan%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%20%20%20%20vlan_parent%3D%24(%24NMCLI_GET_VALUE%20vlan.parent%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-z%20%22%24vlan_parent%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20unable%20to%20determine%20vlan_parent%20for%20vlan%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%0A%20%20%20%20if%20nmcli%20connection%20show%20uuid%20%22%24vlan_parent%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20%20%20%23%20if%20the%20VLAN%20connection%20is%20configured%20with%20a%20connection%20UUID%20as%20parent%2C%20we%20need%20to%20find%20the%20underlying%20device%0A%20%20%20%20%20%20%23%20and%20create%20the%20bridge%20against%20it%2C%20as%20the%20parent%20connection%20can%20be%20replaced%20by%20another%20bridge.%0A%20%20%20%20%20%20vlan_parent%3D%24(%24NMCLI_GET_VALUE%20GENERAL.DEVICES%20conn%20show%20uuid%20%24%7Bvlan_parent%7D)%0A%20%20%20%20fi%0A%0A%20%20%20%20extra_phys_args%3D(%20dev%20%22%24%7Bvlan_parent%7D%22%20id%20%22%24%7Bvlan_id%7D%22%20)%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dbond%0A%20%20%20%20%23%20check%20bond%20options%0A%20%20%20%20bond_opts%3D%24(%24NMCLI_GET_VALUE%20bond.options%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-n%20%22%24bond_opts%22%20%5D%3B%20then%0A%20%20%20%20%20%20extra_phys_args%2B%3D(%20bond.options%20%22%24%7Bbond_opts%7D%22%20)%0A%20%20%20%20%20%20MODE_REGEX%3D%22(%5E%7C%2C)mode%3Dactive-backup(%2C%7C%24)%22%0A%20%20%20%20%20%20MAC_REGEX%3D%22(%5E%7C%2C)fail_over_mac%3D(1%7Cactive%7C2%7Cfollow)(%2C%7C%24)%22%0A%20%20%20%20%20%20if%20%5B%5B%20%24bond_opts%20%3D~%20%24MODE_REGEX%20%5D%5D%20%26%26%20%5B%5B%20%24bond_opts%20%3D~%20%24MAC_REGEX%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20clone_mac%3D0%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22team%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dteam%0A%20%20%20%20%23%20check%20team%20config%20options%0A%20%20%20%20team_config_opts%3D%24(%24NMCLI_GET_VALUE%20team.config%20-e%20no%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-n%20%22%24team_config_opts%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20team.config%20is%20json%2C%20remove%20spaces%20to%20avoid%20problems%20later%20on%0A%20%20%20%20%20%20extra_phys_args%2B%3D(%20team.config%20%22%24%7Bteam_config_opts%2F%2F%5B%5B%3Aspace%3A%5D%5D%2F%7D%22%20)%0A%20%20%20%20%20%20team_mode%3D%24(echo%20%22%24%7Bteam_config_opts%7D%22%20%7C%20jq%20-r%20%22.runner.name%20%2F%2F%20empty%22)%0A%20%20%20%20%20%20team_mac_policy%3D%24(echo%20%22%24%7Bteam_config_opts%7D%22%20%7C%20jq%20-r%20%22.runner.hwaddr_policy%20%2F%2F%20empty%22)%0A%20%20%20%20%20%20MAC_REGEX%3D%22(by_active%7Conly_active)%22%0A%20%20%20%20%20%20if%20%5B%20%22%24team_mode%22%20%3D%20%22activebackup%22%20%5D%20%26%26%20%5B%5B%20%22%24team_mac_policy%22%20%3D~%20%24MAC_REGEX%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20clone_mac%3D0%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22tun%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dtun%0A%20%20%20%20tun_mode%3D%24(%24NMCLI_GET_VALUE%20tun.mode%20-e%20no%20connection%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20extra_phys_args%2B%3D(%20tun.mode%20%22%24%7Btun_mode%7D%22%20)%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22bridge%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dbridge%0A%20%20else%0A%20%20%20%20iface_type%3D802-3-ethernet%0A%20%20fi%0A%0A%20%20if%20%5B%20!%20%22%24%7Bclone_mac%3A-%7D%22%20%3D%20%220%22%20%5D%3B%20then%0A%20%20%20%20%23%20In%20active-backup%20link%20aggregation%2C%20with%20fail_over_mac%20mode%20enabled%2C%0A%20%20%20%20%23%20cloning%20the%20mac%20address%20is%20not%20supported.%20It%20is%20possible%20then%20that%0A%20%20%20%20%23%20br-ex%20has%20a%20different%20mac%20address%20than%20the%20bond%20which%20might%20be%0A%20%20%20%20%23%20troublesome%20on%20some%20platforms%20where%20the%20nic%20won't%20accept%20packets%20with%0A%20%20%20%20%23%20a%20different%20destination%20mac.%20But%20nobody%20has%20complained%20so%20far%20so%20go%20on%0A%20%20%20%20%23%20with%20what%20we%20got.%20%0A%20%20%20%20%0A%20%20%20%20%23%20Do%20set%20it%20though%20for%20other%20link%20aggregation%20configurations%20where%20the%0A%20%20%20%20%23%20mac%20address%20would%20otherwise%20depend%20on%20enslave%20order%20for%20which%20we%20have%0A%20%20%20%20%23%20no%20control%20going%20forward.%0A%20%20%20%20extra_phys_args%2B%3D(%20802-3-ethernet.cloned-mac-address%20%22%24%7Biface_mac%7D%22%20)%0A%20%20fi%0A%0A%20%20%23%20use%20%24%7Bextra_phys_args%5B%40%5D%2B%22%24%7Bextra_phys_args%5B%40%5D%7D%22%7D%20instead%20of%20%24%7Bextra_phys_args%5B%40%5D%7D%20to%20be%20compatible%20with%20bash%204.2%20in%20RHEL7.9%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24bridge_interface_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20destroy%20interface%20%24%7Biface%7D%0A%20%20%20%20ovs_default_port_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24default_port_name%22)%0A%20%20%20%20add_nm_conn%20%22%24bridge_interface_name%22%20type%20%24%7Biface_type%7D%20conn.interface%20%24%7Biface%7D%20master%20%22%24ovs_default_port_conn%22%20%5C%0A%20%20%20%20%20%20slave-type%20ovs-port%20connection.autoconnect-priority%20100%20connection.autoconnect-slaves%201%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20%5C%0A%20%20%20%20%20%20%24%7Bextra_phys_args%5B%40%5D%2B%22%24%7Bextra_phys_args%5B%40%5D%7D%22%7D%0A%20%20fi%0A%0A%20%20%23%20Get%20the%20new%20connection%20uuids%0A%20%20new_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24bridge_interface_name%22)%0A%20%20ovs_port_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24ovs_port%22)%0A%0A%20%20%23%20Update%20connections%20with%20master%20property%20set%20to%20use%20the%20new%20connection%0A%20%20replace_connection_master%20%24old_conn%20%24new_conn%0A%20%20replace_connection_master%20%24iface%20%24new_conn%0A%0A%20%20ipv4_method%3D%24(%24NMCLI_GET_VALUE%20ipv4.method%20conn%20show%20%22%24old_conn%22)%0A%20%20ipv6_method%3D%24(%24NMCLI_GET_VALUE%20ipv6.method%20conn%20show%20%22%24old_conn%22)%0A%0A%20%20ipv4_addresses%3D%24(%24NMCLI_GET_VALUE%20ipv4.addresses%20conn%20show%20%22%24old_conn%22)%0A%20%20ipv6_addresses%3D%24(%24NMCLI_GET_VALUE%20ipv6.addresses%20conn%20show%20%22%24old_conn%22)%0A%0A%20%20%23%20Warn%20about%20an%20invalid%20MTU%20that%20will%20most%20likely%20fail%20in%20one%20way%20or%0A%20%20%23%20another%0A%20%20if%20%5B%20%24%7Biface_mtu%7D%20-lt%201280%20%5D%20%26%26%20%5B%20%22%24%7Bipv6_method%7D%22%20!%3D%20%22disabled%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20MTU%20%24%7Biface_mtu%7D%20is%20lower%20than%20the%20minimum%20required%20of%201280%20for%20IPv6%22%0A%20%20fi%0A%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24ovs_interface%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20destroy%20interface%20%22%24bridge_name%22%0A%20%20%20%20%23%20Clone%20the%20connection%20in%20case%20the%20method%20is%20manual%20or%20in%20case%20the%20an%20address%20is%20set%20(DHCP%20%2B%20static%20IP)%0A%20%20%20%20if%20%5B%20%22%24%7Bipv4_method%7D%22%20%3D%20%22manual%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv4_addresses%7D%22%20!%3D%20%22%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv6_method%7D%22%20%3D%20%22manual%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv6_addresses%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Static%20IP%20addressing%20detected%20on%20default%20gateway%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20%23%20clone%20the%20old%20connection%20to%20get%20the%20address%20settings%0A%20%20%20%20%20%20%23%20prefer%20cloning%20vs%20copying%20the%20connection%20file%20to%20avoid%20problems%20with%20selinux%0A%20%20%20%20%20%20clone_nm_conn%20%22%24%7Bold_conn%7D%22%20%22%24%7Bovs_interface%7D%22%0A%20%20%20%20%20%20shopt%20-s%20nullglob%0A%20%20%20%20%20%20new_conn_files%3D(%24%7BNM_CONN_RUN_PATH%7D%2F%22%24%7Bovs_interface%7D%22*)%0A%20%20%20%20%20%20shopt%20-u%20nullglob%0A%20%20%20%20%20%20if%20%5B%20%24%7B%23new_conn_files%5B%40%5D%7D%20-ne%201%20%5D%20%7C%7C%20%5B%20!%20-f%20%22%24%7Bnew_conn_files%5B0%5D%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22ERROR%3A%20could%20not%20find%20%24%7Bovs_interface%7D%20conn%20file%20after%20cloning%20from%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20%20%20exit%201%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20new_conn_file%3D%22%24%7Bnew_conn_files%5B0%5D%7D%22%0A%0A%20%20%20%20%20%20%23%20modify%20the%20connection%20type%20directly%20because%20it%20can't%20be%20modified%0A%20%20%20%20%20%20%23%20through%20nmcli%0A%20%20%20%20%20%20sed%20-i%20'%2F%5E%5C%5Bconnection%5C%5D%24%2F%2C%2F%5E%5C%5B%2F%20s%2F%5Etype%3D.*%24%2Ftype%3Dovs-interface%2F'%20%24%7Bnew_conn_file%7D%0A%0A%20%20%20%20%20%20%23%20modify%20some%20more%20settings%20through%20nmcli%0A%20%20%20%20%20%20mod_nm_conn%20%22%24%7Bovs_interface%7D%22%20conn.interface%20%22%24bridge_name%22%20%5C%0A%20%20%20%20%20%20%20%20connection.multi-connect%20%22%22%20connection.autoconnect%20no%20%5C%0A%20%20%20%20%20%20%20%20connection.master%20%22%24ovs_port_conn%22%20connection.slave-type%20ovs-port%20%5C%0A%20%20%20%20%20%20%20%20ovs-interface.type%20internal%20%5C%0A%20%20%20%20%20%20%20%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20802-3-ethernet.cloned-mac-address%20%24%7Biface_mac%7D%20%5C%0A%20%20%20%20%20%20%20%20ipv4.route-metric%20%22%24%7Bbridge_metric%7D%22%20ipv6.route-metric%20%22%24%7Bbridge_metric%7D%22%0A%0A%20%20%20%20%20%20echo%20%22Loaded%20new%20%24ovs_interface%20connection%20file%3A%20%24%7Bnew_conn_file%7D%22%0A%20%20%20%20else%0A%20%20%20%20%20%20extra_if_brex_args%3D%22%22%0A%20%20%20%20%20%20%23%20check%20if%20interface%20had%20ipv4%2Fipv6%20addresses%20assigned%0A%20%20%20%20%20%20num_ipv4_addrs%3D%24(ip%20-j%20a%20show%20dev%20%24%7Biface%7D%20%7C%20jq%20%22.%5B0%5D.addr_info%20%7C%20map(.%20%7C%20select(.family%20%3D%3D%20%5C%22inet%5C%22))%20%7C%20length%22)%0A%20%20%20%20%20%20if%20%5B%20%22%24num_ipv4_addrs%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.may-fail%20no%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20IPV6%20should%20have%20at%20least%20a%20link%20local%20address.%20Check%20for%20more%20than%201%20to%20see%20if%20there%20is%20an%0A%20%20%20%20%20%20%23%20assigned%20address.%0A%20%20%20%20%20%20num_ip6_addrs%3D%24(ip%20-j%20a%20show%20dev%20%24%7Biface%7D%20%7C%20jq%20%22.%5B0%5D.addr_info%20%7C%20map(.%20%7C%20select(.family%20%3D%3D%20%5C%22inet6%5C%22%20and%20.scope%20!%3D%20%5C%22link%5C%22))%20%7C%20length%22)%0A%20%20%20%20%20%20if%20%5B%20%22%24num_ip6_addrs%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.may-fail%20no%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20dhcp%20client%20ids%0A%20%20%20%20%20%20dhcp_client_id%3D%24(%24NMCLI_GET_VALUE%20ipv4.dhcp-client-id%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24dhcp_client_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.dhcp-client-id%20%24%7Bdhcp_client_id%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20dhcp6_client_id%3D%24(%24NMCLI_GET_VALUE%20ipv6.dhcp-duid%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24dhcp6_client_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.dhcp-duid%20%24%7Bdhcp6_client_id%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20ipv6_addr_gen_mode%3D%24(%24NMCLI_GET_VALUE%20ipv6.addr-gen-mode%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_addr_gen_mode%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.addr-gen-mode%20%24%7Bipv6_addr_gen_mode%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20static%20DNS%20address%0A%20%20%20%20%20%20ipv4_dns%3D%24(%24NMCLI_GET_VALUE%20ipv4.dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv4_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.dns%20%24%7Bipv4_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20ipv6_dns%3D%24(%24NMCLI_GET_VALUE%20ipv6.dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.dns%20%24%7Bipv6_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20auto-dns%0A%20%20%20%20%20%20ipv4_auto_dns%3D%24(%24NMCLI_GET_VALUE%20ipv4.ignore-auto-dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv4_auto_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.ignore-auto-dns%20%24%7Bipv4_auto_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20ipv6_auto_dns%3D%24(%24NMCLI_GET_VALUE%20ipv6.ignore-auto-dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_auto_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.ignore-auto-dns%20%24%7Bipv6_auto_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20add_nm_conn%20%22%24ovs_interface%22%20type%20ovs-interface%20slave-type%20ovs-port%20conn.interface%20%22%24bridge_name%22%20master%20%22%24ovs_port_conn%22%20%5C%0A%20%20%20%20%20%20%20%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20802-3-ethernet.cloned-mac-address%20%24%7Biface_mac%7D%20%5C%0A%20%20%20%20%20%20%20%20ipv4.method%20%22%24%7Bipv4_method%7D%22%20ipv4.route-metric%20%22%24%7Bbridge_metric%7D%22%20%5C%0A%20%20%20%20%20%20%20%20ipv6.method%20%22%24%7Bipv6_method%7D%22%20ipv6.route-metric%20%22%24%7Bbridge_metric%7D%22%20%5C%0A%20%20%20%20%20%20%20%20%24%7Bextra_if_brex_args%7D%0A%20%20%20%20fi%0A%20%20fi%0A%0A%20%20configure_driver_options%20%22%24%7Biface%7D%22%0A%7D%0A%0A%23%20Used%20to%20remove%20a%20bridge%0Aremove_ovn_bridges()%20%7B%0A%20%20bridge_name%3D%24%7B1%7D%0A%20%20port_name%3D%24%7B2%7D%0A%0A%20%20%23%20Remove%20the%20keyfiles%20from%20known%20configuration%20paths%0A%20%20update_nm_conn_run_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%20%20update_nm_conn_set_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%20%20%23%20Shouldn't%20be%20necessary%2C%20workaround%20for%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FOCPBUGS-41489%0A%20%20update_nm_conn_etc_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%0A%20%20%23%20NetworkManager%20will%20not%20remove%20%24%7Bbridge_name%7D%20if%20it%20has%20the%20patch%20port%20created%20by%20ovn-kubernetes%0A%20%20%23%20so%20remove%20explicitly%0A%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20%24%7Bbridge_name%7D%0A%7D%0A%0A%23%20Removes%20any%20previous%20ovs%20configuration%0Aremove_all_ovn_bridges()%20%7B%0A%20%20echo%20%22Reverting%20any%20previous%20OVS%20configuration%22%0A%20%20%0A%20%20remove_ovn_bridges%20br-ex%20phys0%0A%20%20remove_ovn_bridges%20br-ex1%20phys1%0A%20%20%0A%20%20echo%20%22OVS%20configuration%20successfully%20reverted%22%0A%7D%0A%0A%23%20Reloads%20NM%20NetworkManager%20profiles%20if%20any%20configuration%20change%20was%20done.%0A%23%20Accepts%20a%20list%20of%20devices%20that%20should%20be%20re-connect%20after%20reload.%0Areload_profiles_nm()%20%7B%0A%20%20if%20%5B%20%24%7Bnm_config_changed%3A-0%7D%20-eq%200%20%5D%3B%20then%0A%20%20%20%20%23%20no%20config%20was%20changed%2C%20no%20need%20to%20reload%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20reload%20profiles%0A%20%20nmcli%20connection%20reload%0A%0A%20%20%23%20precautionary%20sleep%20of%2010s%20(default%20timeout%20of%20NM%20to%20bring%20down%20devices)%0A%20%20sleep%2010%0A%0A%20%20%23%20After%20reload%2C%20devices%20that%20were%20already%20connected%20should%20connect%20again%0A%20%20%23%20if%20any%20profile%20is%20available.%20If%20no%20profile%20is%20available%2C%20a%20device%20can%0A%20%20%23%20remain%20disconnected%20and%20we%20have%20to%20explicitly%20connect%20it%20so%20that%20a%0A%20%20%23%20profile%20is%20generated.%20This%20can%20happen%20for%20physical%20devices%20but%20should%0A%20%20%23%20not%20happen%20for%20software%20devices%20as%20those%20always%20require%20a%20profile.%0A%20%20for%20dev%20in%20%24%40%3B%20do%0A%20%20%20%20%23%20Only%20attempt%20to%20connect%20a%20disconnected%20device%0A%20%20%20%20local%20connected_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20device%20show%20%22%24dev%22%20%7C%7C%20echo%20%22%22)%0A%20%20%20%20if%20%5B%5B%20%22%24connected_state%22%20%3D~%20%22disconnected%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%23%20keep%20track%20if%20a%20profile%20by%20the%20same%20name%20as%20the%20device%20existed%20%0A%20%20%20%20%20%20%23%20before%20we%20attempt%20activation%0A%20%20%20%20%20%20local%20named_profile_existed%3D%24(%5B%20-f%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D%22%20%5D%20%7C%7C%20%5B%20-f%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D.nmconnection%22%20%5D%20%26%26%20echo%20%22yes%22)%0A%20%20%20%20%20%20%0A%20%20%20%20%20%20for%20i%20in%20%7B1..10%7D%3B%20do%0A%20%20%20%20%20%20%20%20%20%20echo%20%22Attempt%20%24i%20to%20connect%20device%20%24dev%22%0A%20%20%20%20%20%20%20%20%20%20nmcli%20device%20connect%20%22%24dev%22%20%26%26%20break%0A%20%20%20%20%20%20%20%20%20%20sleep%205%0A%20%20%20%20%20%20done%0A%0A%20%20%20%20%20%20%23%20if%20a%20profile%20did%20not%20exist%20before%20but%20does%20now%2C%20it%20was%20generated%0A%20%20%20%20%20%20%23%20but%20we%20want%20it%20to%20be%20ephemeral%2C%20so%20move%20it%20back%20to%20%2Frun%0A%20%20%20%20%20%20if%20%5B%20!%20%22%24named_profile_existed%22%20%3D%20%22yes%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20MANAGED_NM_CONN_FILES%3D(%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D%22%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D.nmconnection%22)%0A%20%20%20%20%20%20%20%20copy_nm_conn_files%20%22%24%7BNM_CONN_RUN_PATH%7D%22%0A%20%20%20%20%20%20%20%20rm_nm_conn_files%0A%20%20%20%20%20%20%20%20%23%20reload%20profiles%20so%20that%20NM%20notices%20that%20some%20might%20have%20been%20moved%0A%20%20%20%20%20%20%20%20nmcli%20connection%20reload%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20echo%20%22Waiting%20for%20interface%20%24dev%20to%20activate...%22%0A%20%20%20%20%23%20don't%20use%20--escape%20no%2C%20we%20use%20%3A%20delimiter%20here%0A%20%20%20%20if%20!%20timeout%2060%20bash%20-c%20%22while%20!%20nmcli%20-g%20DEVICE%2CSTATE%20c%20%7C%20grep%20%22'%22'%22%24dev%22%3Aactivated'%22'%22%3B%20do%20sleep%205%3B%20done%22%3B%20then%0A%20%20%20%20%20%20echo%20%22WARNING%3A%20%24dev%20did%20not%20activate%22%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20nm_config_changed%3D0%0A%7D%0A%0A%23%20Removes%20all%20configuration%20and%20reloads%20NM%20if%20necessary%0Arollback_nm()%20%7B%0A%20%20phys0%3D%24(get_bridge_physical_interface%20ovs-if-phys0)%0A%20%20phys1%3D%24(get_bridge_physical_interface%20ovs-if-phys1)%0A%20%20%0A%20%20%23%20Revert%20changes%20made%20by%20%2Fusr%2Flocal%2Fbin%2Fconfigure-ovs.sh%20during%20SDN%20migration.%0A%20%20remove_all_ovn_bridges%0A%20%20%0A%20%20%23%20reload%20profiles%20so%20that%20NM%20notices%20that%20some%20were%20removed%0A%20%20reload_profiles_nm%20%22%24phys0%22%20%22%24phys1%22%0A%7D%0A%0A%23%20Add%20a%20temporary%20deactivated%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20name%20folowed%20by%20arguments%20passed%20to%0A%23%20%60nmcli%20connection%20add%60%0Aadd_nm_conn()%20%7B%0A%20%20%23%20Use%20%60save%20no%60%20to%20add%20a%20temporary%20profile%0A%20%20nmcli%20c%20add%20save%20no%20con-name%20%22%24%40%22%20connection.autoconnect%20no%0A%7D%0A%0A%23%20Modify%20a%20temporary%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20name%20followed%20by%20arguments%20passed%20to%0A%23%20%60nmcli%20connection%20modify%60%0Amod_nm_conn()%20%7B%0A%20%20%23%20the%20easiest%20thing%20to%20do%20here%20would%20be%20to%20use%20%60nmcli%20c%20mod%20--temporary%60%0A%20%20%23%20but%20there%20is%20a%20bug%20in%20selinux%20profiles%20that%20denies%20NM%20from%20performing%0A%20%20%23%20the%20operation%0A%20%20local%20dst_path%3D%24%7BNM_CONN_RUN_PATH%7D%2F%241.nmconnection%0A%20%20local%20src_path%0A%20%20src_path%3D%24(mktemp)%0A%20%20shift%0A%20%20cat%20%22%24dst_path%22%20%3E%20%22%24src_path%22%0A%20%20rm%20-f%20%22%24dst_path%22%0A%20%20nmcli%20--offline%20c%20mod%20%22%24%40%22%20%3C%20%22%24src_path%22%20%3E%20%22%24dst_path%22%0A%20%20rm%20-f%20%22%24src_path%22%0A%20%20chmod%20600%20%22%24dst_path%22%0A%20%20nmcli%20c%20load%20%22%24dst_path%22%0A%7D%0A%0A%23%20Clone%20to%20a%20temporary%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20to%20clone%2C%20second%20argument%20is%20the%20clone%20name%0Aclone_nm_conn()%20%7B%0A%20%20%23%20clone%20as%20temporary%20so%20that%20it%20is%20generated%20in%20NM_CONN_RUN_PATH%0A%20%20nmcli%20connection%20clone%20--temporary%20%22%241%22%20%22%242%22%20%26%3E%20%2Fdev%2Fnull%0A%7D%0A%0A%23%20Activates%20an%20ordered%20set%20of%20NM%20connection%20profiles%0Aactivate_nm_connections()%20%7B%0A%20%20local%20connections%3D(%22%24%40%22)%0A%0A%20%20%23%20We%20want%20autoconnect%20set%20for%20our%20cloned%20slave%20profiles%20so%20that%20they%20are%0A%20%20%23%20used%20over%20the%20original%20profiles%20if%20implicitly%20re-activated%20with%20other%0A%20%20%23%20dependant%20profiles.%20Otherwise%20if%20a%20slave%20activates%20with%20an%20old%20profile%2C%0A%20%20%23%20the%20old%20master%20profile%20might%20activate%20as%20well%2C%20interfering%20and%20causing%0A%20%20%23%20further%20activations%20to%20fail.%0A%20%20%23%20Slave%20interfaces%20should%20already%20be%20active%20so%20setting%20autoconnect%20here%0A%20%20%23%20won't%20implicitly%20activate%20them%20but%20there%20is%20an%20edge%20case%20where%20a%20slave%0A%20%20%23%20might%20be%20inactive%20(link%20down%20for%20example)%20and%20in%20that%20case%20setting%0A%20%20%23%20autoconnect%20will%20cause%20an%20implicit%20activation.%20This%20is%20not%20necessarily%20a%0A%20%20%23%20problem%20and%20hopefully%20we%20can%20make%20sure%20everything%20is%20activated%20as%20we%0A%20%20%23%20want%20next.%0A%20%20for%20conn%20in%20%22%24%7Bconnections%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20local%20slave_type%3D%24(%24NMCLI_GET_VALUE%20connection.slave-type%20connection%20show%20%22%24conn%22)%0A%20%20%20%20if%20%5B%20%22%24slave_type%22%20%3D%20%22team%22%20%5D%20%7C%7C%20%5B%20%22%24slave_type%22%20%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20Activate%20all%20connections%20and%20fail%20if%20activation%20fails%0A%20%20%23%20For%20slave%20connections%20-%20for%20as%20long%20as%20at%20least%20one%20slave%20that%20belongs%20to%20a%20bond%2Fteam%0A%20%20%23%20comes%20up%2C%20we%20should%20not%20fail%0A%20%20declare%20-A%20master_interfaces%0A%20%20for%20conn%20in%20%22%24%7Bconnections%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%23%20Get%20the%20slave%20type%0A%20%20%20%20local%20slave_type%3D%24(%24NMCLI_GET_VALUE%20connection.slave-type%20connection%20show%20%22%24conn%22)%0A%20%20%20%20local%20is_slave%3Dfalse%0A%20%20%20%20if%20%5B%20%22%24slave_type%22%20%3D%20%22team%22%20%5D%20%7C%7C%20%5B%20%22%24slave_type%22%20%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20%20%20is_slave%3Dtrue%0A%20%20%20%20fi%20%0A%0A%20%20%20%20%23%20For%20slave%20interfaces%2C%20initialize%20the%20master%20interface%20to%20false%20if%20the%20key%20is%20not%20yet%20in%20the%20array%0A%20%20%20%20local%20master_interface%0A%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20master_interface%3D%24(%24NMCLI_GET_VALUE%20connection.master%20connection%20show%20%22%24conn%22)%0A%20%20%20%20%20%20if%20!%20%5B%5B%20-v%20%22master_interfaces%5B%24master_interface%5D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%22%24master_interface%22%5D%3Dfalse%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20slaves%20should%20implicitly%20activate%2C%20give%20them%20a%20chance%20to%20do%20so%0A%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20if%20!%20timeout%205%20bash%20-c%20%22while%20!%20%24NMCLI_GET_VALUE%20GENERAL.STATE%20conn%20show%20%22%24conn%22%20%7C%20grep%20activated%3B%20do%20sleep%201%3B%20done%22%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22WARNING%3A%20slave%20%24conn%20did%20not%20implicitly%20activate%20in%205s%2C%20activating%20explicitly.%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Do%20not%20activate%20interfaces%20that%20are%20already%20active%0A%20%20%20%20%23%20But%20set%20the%20entry%20in%20master_interfaces%20to%20true%20if%20this%20is%20a%20slave%0A%20%20%20%20%23%20Also%20set%20autoconnect%20to%20yes%0A%20%20%20%20local%20active_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20conn%20show%20%22%24conn%22)%0A%20%20%20%20if%20%5B%20%22%24active_state%22%20%3D%3D%20%22activated%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Connection%20%24conn%20already%20activated%22%0A%20%20%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%24master_interface%5D%3Dtrue%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Activate%20all%20interfaces%20that%20are%20not%20yet%20active%0A%20%20%20%20for%20i%20in%20%7B1..10%7D%3B%20do%0A%20%20%20%20%20%20echo%20%22Attempt%20%24i%20to%20bring%20up%20connection%20%24conn%22%0A%20%20%20%20%20%20nmcli%20conn%20up%20%22%24conn%22%20%26%26%20s%3D0%20%26%26%20break%20%7C%7C%20s%3D%24%3F%0A%20%20%20%20%20%20sleep%205%0A%20%20%20%20done%0A%20%20%20%20if%20%5B%20%24s%20-eq%200%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Brought%20up%20connection%20%24conn%20successfully%22%0A%20%20%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%22%24master_interface%22%5D%3Dtrue%0A%20%20%20%20%20%20fi%0A%20%20%20%20elif%20!%20%24is_slave%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20Cannot%20bring%20up%20connection%20%24conn%20after%20%24i%20attempts%22%0A%20%20%20%20%20%20return%20%24s%0A%20%20%20%20fi%0A%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20done%0A%20%20%23%20Check%20that%20all%20master%20interfaces%20report%20at%20least%20a%20single%20active%20slave%0A%20%20%23%20Note%3A%20associative%20arrays%20require%20an%20exclamation%20mark%20when%20looping%0A%20%20for%20i%20in%20%22%24%7B!master_interfaces%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20if%20!%20%24%7Bmaster_interfaces%5B%22%24i%22%5D%7D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22ERROR%3A%20Cannot%20bring%20up%20any%20slave%20interface%20for%20master%20interface%3A%20%24i%22%0A%20%20%20%20%20%20%20%20return%201%0A%20%20%20%20fi%0A%20%20done%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%2C%20%24iface%0A%23%20Writes%20content%20of%20%24iface%20into%20%24iface_default_hint_file%0Awrite_iface_default_hint()%20%7B%0A%20%20local%20iface_default_hint_file%3D%22%241%22%0A%20%20local%20iface%3D%22%242%22%0A%0A%20%20echo%20%22%24%7Biface%7D%22%20%3E%7C%20%22%24%7Biface_default_hint_file%7D%22%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%0A%23%20Returns%20the%20stored%20interface%20default%20hint%20if%20the%20hint%20is%20non-empty%2C%0A%23%20not%20br-ex%2C%20not%20br-ex1%20and%20if%20the%20interface%20can%20be%20found%20in%20%2Fsys%2Fclass%2Fnet%0Aget_iface_default_hint()%20%7B%0A%20%20local%20iface_default_hint_file%3D%241%0A%20%20if%20%5B%20-f%20%22%24%7Biface_default_hint_file%7D%22%20%5D%3B%20then%0A%20%20%20%20local%20iface_default_hint%3D%24(cat%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22br-ex1%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20-d%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Biface_default_hint%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20fi%0A%20%20echo%20%22%22%0A%7D%0A%0Aget_ip_from_ip_hint_file()%20%7B%0A%20%20local%20ip_hint_file%3D%22%241%22%0A%20%20if%20%5B%5B%20!%20-f%20%22%24%7Bip_hint_file%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20return%0A%20%20fi%0A%20%20ip_hint%3D%24(cat%20%22%24%7Bip_hint_file%7D%22)%0A%20%20echo%20%22%24%7Bip_hint%7D%22%0A%7D%0A%0A%23%20This%20function%20waits%20for%20ip%20address%20of%20br-ex%20to%20be%20bindable%20only%20in%20case%20of%20ipv6%0A%23%20This%20is%20workaround%20for%20OCPBUGS-673%20as%20it%20will%20not%20allow%20starting%20crio%0A%23%20before%20address%20is%20bindable%0Atry_to_bind_ipv6_address()%20%7B%0A%20%20%23%20Retry%20for%201%20minute%0A%20%20retries%3D60%0A%20%20until%20%5B%5B%20%24%7Bretries%7D%20-eq%200%20%5D%5D%3B%20do%0A%20%20%20%20ip%3D%24(ip%20-6%20-j%20addr%20%7C%20jq%20-r%20%22first(.%5B%5D%20%7C%20select(.ifname%3D%3D%5C%22br-ex%5C%22)%20%7C%20.addr_info%5B%5D%20%7C%20select(.scope%3D%3D%5C%22global%5C%22)%20%7C%20.local)%22)%0A%20%20%20%20if%20%5B%5B%20%22%24%7Bip%7D%22%20%3D%3D%20%22%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22No%20ipv6%20ip%20to%20bind%20was%20found%22%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20random_port%3D%24(shuf%20-i%2050000-60000%20-n%201)%0A%20%20%20%20echo%20%22Trying%20to%20bind%20%24%7Bip%7D%20on%20port%20%24%7Brandom_port%7D%22%0A%20%20%20%20exit_code%3D%24(timeout%202s%20nc%20-l%20%22%24%7Bip%7D%22%20%24%7Brandom_port%7D%3B%20echo%20%24%3F)%0A%20%20%20%20if%20%5B%5B%20exit_code%20-eq%20124%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Address%20bound%20successfully%22%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20sleep%201%0A%20%20%20%20((%20retries--%20))%0A%20%20done%0A%20%20if%20%5B%5B%20%24%7Bretries%7D%20-eq%200%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Failed%20to%20bind%20ip%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%7D%0A%0A%23%20Get%20interface%20that%20matches%20ip%20from%20node%20ip%20hint%20file%0A%23%20in%20case%20file%20not%20exists%20return%20nothing%20and%0A%23%20fallback%20to%20default%20interface%20search%20flow%0Aget_nodeip_hint_interface()%20%7B%0A%20%20local%20ip_hint%3D%22%22%0A%20%20local%20ip_hint_file%3D%22%241%22%0A%20%20local%20extra_bridge%3D%22%242%22%0A%20%20local%20iface%3D%22%22%0A%0A%20%20ip_hint%3D%24(get_ip_from_ip_hint_file%20%22%24%7Bip_hint_file%7D%22)%0A%20%20if%20%5B%5B%20-z%20%22%24%7Bip_hint%7D%22%20%20%5D%5D%3B%20then%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20iface%3D%24(ip%20-j%20addr%20%7C%20jq%20-r%20%22first(.%5B%5D%20%7C%20select(any(.addr_info%5B%5D%3B%20.local%3D%3D%5C%22%24%7Bip_hint%7D%5C%22)%20and%20.ifname!%3D%5C%22br-ex1%5C%22%20and%20.ifname!%3D%5C%22%24%7Bextra_bridge%7D%5C%22))%20%7C%20.ifname%22)%0A%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20fi%0A%7D%0A%0A%23%20Accepts%20parameters%20%24bridge_interface%20(e.g.%20ovs-port-phys0)%0A%23%20Returns%20the%20physical%20interface%20name%20if%20%24bridge_interface%20exists%2C%20%22%22%20otherwise%0Aget_bridge_physical_interface()%20%7B%0A%20%20local%20bridge_interface%3D%22%241%22%0A%20%20local%20physical_interface%3D%22%22%0A%20%20physical_interface%3D%24(%24NMCLI_GET_VALUE%20connection.interface-name%20conn%20show%20%22%24%7Bbridge_interface%7D%22%202%3E%2Fdev%2Fnull%20%7C%7C%20echo%20%22%22)%0A%20%20echo%20%22%24%7Bphysical_interface%7D%22%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%2C%20%24extra_bridge_file%2C%20%24ip_hint_file%2C%20%24default_bridge_file%0A%23%20Determines%20the%20interface%20to%20be%20used%20for%20br-ex.%20Order%20of%20priority%20is%3A%0A%23%201.%20Use%20the%20user%20specified%20interface%20if%20provided%20in%20the%20default%20bridge%20file%0A%23%202.%20Use%20the%20node%20IP%20hint%20interface%0A%23%203.%20Use%20the%20previously%20selected%20interface%0A%23%204.%20Use%20the%20interface%20detected%20as%20default%20gateway%0A%23%0A%23%20Read%20%24default_bridge_file%20and%20return%20the%20contained%20interface.%20Otherwise%2C%0A%23%20read%20%24ip_hint_file%20and%20return%20the%20interface%20that%20matches%20this%20ip.%20Otherwise%2C%0A%23%20if%20the%20default%20interface%20is%20br-ex%2C%20use%20that%20and%20return.%0A%23%20If%20the%20default%20interface%20is%20not%20br-ex%3A%0A%23%20Check%20if%20there%20is%20a%20valid%20hint%20inside%20%24iface_default_hint_file.%20If%20so%2C%20use%20that%20hint.%0A%23%20If%20there%20is%20no%20valid%20hint%2C%20use%20the%20default%20interface%20that%20we%20found%20during%20the%20step%0A%23%20earlier.%0A%23%20Never%20use%20the%20interface%20that%20is%20provided%20inside%20%24extra_bridge_file%20for%20br-ex1.%0A%23%20Never%20use%20br-ex1.%0A%23%20Write%20the%20default%20interface%20to%20%24iface_default_hint_file%0Aget_default_bridge_interface()%20%7B%0A%20%20local%20iface%3D%22%22%0A%20%20local%20counter%3D0%0A%20%20local%20iface_default_hint_file%3D%22%241%22%0A%20%20local%20extra_bridge_file%3D%22%242%22%0A%20%20local%20ip_hint_file%3D%22%243%22%0A%20%20local%20default_bridge_file%3D%22%244%22%0A%20%20local%20extra_bridge%3D%22%22%0A%0A%20%20if%20%5B%20-f%20%22%24%7Bextra_bridge_file%7D%22%20%5D%3B%20then%0A%20%20%20%20extra_bridge%3D%24(cat%20%24%7Bextra_bridge_file%7D)%0A%20%20fi%0A%0A%20%20%23%20try%20to%20use%20user%20specified%20file%20first%0A%20%20if%20%5B%20-f%20%22%24default_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20iface%3D%24(cat%20%22%24%7Bdefault_bridge_file%7D%22)%0A%20%20%20%20if%20%5B%20-z%20%22%24iface%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20User%20specified%20bridge%20file%20detected%20without%20any%20data%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20if%20node%20ip%20was%20set%2C%20we%20should%20search%20for%20interface%20that%20matches%20it%0A%20%20iface%3D%24(get_nodeip_hint_interface%20%22%24%7Bip_hint_file%7D%22%20%22%24%7Bextra_bridge%7D%22)%0A%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20find%20default%20interface%0A%20%20%23%20the%20default%20interface%20might%20be%20br-ex%2C%20so%20check%20this%20before%20looking%20at%20the%20hint%0A%20%20while%20%5B%20%24%7Bcounter%7D%20-lt%2012%20%5D%3B%20do%0A%20%20%20%20%23%20check%20ipv4%0A%20%20%20%20%23%20never%20use%20the%20interface%20that's%20specified%20in%20extra_bridge_file%0A%20%20%20%20%23%20never%20use%20br-ex1%0A%20%20%20%20if%20%5B%20%22%24%7Bextra_bridge%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20iface%3D%24(ip%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20grep%20-v%20%22%24%7Bextra_bridge%7D%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20else%0A%20%20%20%20%20%20iface%3D%24(ip%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20fi%0A%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20%23%20check%20ipv6%0A%20%20%20%20%23%20never%20use%20the%20interface%20that's%20specified%20in%20extra_bridge_file%0A%20%20%20%20%23%20never%20use%20br-ex1%0A%20%20%20%20if%20%5B%20%22%24%7Bextra_bridge%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20iface%3D%24(ip%20-6%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20grep%20-v%20%22%24%7Bextra_bridge%7D%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20else%0A%20%20%20%20%20%20iface%3D%24(ip%20-6%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20fi%0A%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20counter%3D%24((counter%2B1))%0A%20%20%20%20sleep%205%0A%20%20done%0A%0A%20%20%23%20if%20the%20default%20interface%20does%20not%20point%20out%20of%20br-ex%20or%20br-ex1%0A%20%20if%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22br-ex1%22%20%5D%3B%20then%0A%20%20%20%20%23%20determine%20if%20an%20interface%20default%20hint%20exists%20from%20a%20previous%20run%0A%20%20%20%20%23%20and%20if%20the%20interface%20has%20a%20valid%20default%20route%0A%20%20%20%20iface_default_hint%3D%24(get_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%24%7Biface%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20start%20wherever%20count%20left%20off%20in%20the%20previous%20loop%0A%20%20%20%20%20%20%23%20allow%20this%20for%20one%20more%20iteration%20than%20the%20previous%20loop%0A%20%20%20%20%20%20while%20%5B%20%24%7Bcounter%7D%20-le%2012%20%5D%3B%20do%0A%20%20%20%20%20%20%20%20%23%20check%20ipv4%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24(ip%20route%20show%20default%20dev%20%22%24%7Biface_default_hint%7D%22)%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20iface%3D%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20%20%20break%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20%23%20check%20ipv6%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24(ip%20-6%20route%20show%20default%20dev%20%22%24%7Biface_default_hint%7D%22)%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20iface%3D%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20%20%20break%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20counter%3D%24((counter%2B1))%0A%20%20%20%20%20%20%20%20sleep%205%0A%20%20%20%20%20%20done%0A%20%20%20%20fi%0A%20%20%20%20%23%20store%20what%20was%20determined%20was%20the%20(new)%20default%20interface%20inside%0A%20%20%20%20%23%20the%20default%20hint%20file%20for%20future%20reference%0A%20%20%20%20if%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20write_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Biface%7D%22%0A%20%20%20%20fi%0A%20%20fi%0A%20%20echo%20%22%24%7Biface%7D%22%0A%7D%0A%0A%23%20Used%20to%20print%20network%20state%0Aprint_state()%20%7B%0A%20%20echo%20%22Current%20device%2C%20connection%2C%20interface%20and%20routing%20state%3A%22%0A%20%20nmcli%20-g%20all%20device%20%7C%20grep%20-v%20unmanaged%0A%20%20nmcli%20-g%20all%20connection%0A%20%20ip%20-d%20address%20show%0A%20%20ip%20route%20show%0A%20%20ip%20-6%20route%20show%0A%7D%0A%0A%23%20Setup%20an%20exit%20trap%20to%20rollback%20on%20error%0Ahandle_exit()%20%7B%0A%20%20e%3D%24%3F%0A%20%20tdir%3D%24(mktemp%20-u%20-d%20-t%20%22configure-ovs-%24(date%20%2B%25Y-%25m-%25d-%25H-%25M-%25S)-XXXXXXXXXX%22)%0A%20%20%0A%20%20if%20%5B%20%24e%20-eq%200%20%5D%3B%20then%0A%20%20%20%20print_state%0A%20%20%20%20%23%20remove%20previous%20troubleshooting%20information%0A%20%20%20%20rm%20-rf%20%22%24(dirname%20%22%24tdir%22)%2Fconfigure-ovs-*%22%0A%20%20%20%20exit%200%0A%20%20fi%0A%0A%20%20echo%20%22ERROR%3A%20configure-ovs%20exited%20with%20error%3A%20%24e%22%0A%20%20print_state%0A%0A%20%20%23%20remove%20previous%20troubleshooting%20information%20except%20the%20oldest%20one%0A%20%20mapfile%20-t%20tdirs%20%3C%20%3C(compgen%20-G%20%22%24(dirname%20%22%24tdir%22)%2Fconfigure-ovs-*%22)%0A%20%20unset%20-v%20%22tdirs%5B0%5D%22%0A%20%20for%20dir%20in%20%22%24%7Btdirs%5B%40%5D%7D%22%3B%20do%20rm%20-rf%20%22%24dir%22%3B%20done%0A%0A%20%20%23%20copy%20configuration%20to%20tmp%20for%20troubleshooting%0A%20%20mkdir%20-p%20%22%24tdir%22%0A%20%20update_nm_conn_run_files%20br-ex%20phys0%0A%20%20copy_nm_conn_files%20%22%24tdir%22%0A%20%20update_nm_conn_run_files%20br-ex1%20phys1%0A%20%20copy_nm_conn_files%20%22%24tdir%22%0A%20%20echo%20%22Copied%20OVS%20configuration%20to%20%24tdir%20for%20troubleshooting%22%0A%0A%20%20%23%20attempt%20to%20restore%20the%20previous%20network%20state%0A%20%20echo%20%22Attempting%20to%20restore%20previous%20configuration...%22%0A%20%20rollback_nm%0A%20%20print_state%0A%0A%20%20exit%20%24e%0A%7D%0A%0A%23%20Setup%20a%20signal%20trap%20to%20rollback%0Ahandle_termination()%20%7B%0A%20%20echo%20%22WARNING%3A%20configure-ovs%20has%20been%20requested%20to%20terminate%2C%20quitting...%22%0A%20%20%0A%20%20%23%20by%20exiting%20with%20an%20error%20we%20will%20cleanup%20after%20ourselves%20in%20a%0A%20%20%23%20subsequent%20call%20to%20handle_exit%0A%20%20exit%201%0A%7D%0A%0A%23%20main%20function%0Aconfigure_ovs()%20%7B%0A%20%20set%20-eu%0A%0A%20%20%23%20setup%20traps%20to%20handle%20signals%20and%20other%20abnormal%20exits%0A%20%20trap%20'handle_termination'%20TERM%20INT%0A%20%20trap%20'handle_exit'%20EXIT%0A%0A%20%20%23%20this%20flag%20tracks%20if%20any%20config%20change%20was%20made%0A%20%20nm_config_changed%3D0%0A%0A%20%20%23%20Check%20that%20we%20are%20provided%20a%20valid%20NM%20connection%20path%0A%20%20if%20%5B%20%22%24NM_CONN_SET_PATH%22%20!%3D%20%22%24NM_CONN_CONF_PATH%22%20%5D%20%26%26%20%5B%20%22%24NM_CONN_SET_PATH%22%20!%3D%20%22%24NM_CONN_RUN_PATH%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22Error%3A%20Incorrect%20NM%20connection%20path%3A%20%24NM_CONN_SET_PATH%20is%20not%20%24NM_CONN_CONF_PATH%20nor%20%24NM_CONN_RUN_PATH%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20Clean%20up%20old%20config%20on%20behalf%20of%20mtu-migration%0A%20%20if%20!%20systemctl%20-q%20is-enabled%20mtu-migration%3B%20then%0A%20%20%20%20echo%20%22Cleaning%20up%20left%20over%20mtu%20migration%20configuration%22%0A%20%20%20%20rm%20-rf%20%2Fetc%2Fcno%2Fmtu-migration%0A%20%20fi%0A%0A%20%20if%20!%20rpm%20-qa%20%7C%20grep%20-q%20openvswitch%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20Openvswitch%20package%20is%20not%20installed!%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20print%20initial%20state%0A%20%20print_state%0A%0A%20%20if%20%5B%20%22%241%22%20%3D%3D%20%22OVNKubernetes%22%20%5D%3B%20then%0A%20%20%20%20%23%20Configures%20NICs%20onto%20OVS%20bridge%20%22br-ex%22%0A%20%20%20%20%23%20Configuration%20is%20either%20auto-detected%20or%20provided%20through%20a%20config%20file%20written%20already%20in%20Network%20Manager%0A%20%20%20%20%23%20key%20files%20under%20%2Fetc%2FNetworkManager%2Fsystem-connections%2F%0A%20%20%20%20%23%20Managing%20key%20files%20is%20outside%20of%20the%20scope%20of%20this%20script%0A%0A%20%20%20%20%23%20if%20the%20interface%20is%20of%20type%20vmxnet3%20add%20multicast%20capability%20for%20that%20driver%0A%20%20%20%20%23%20History%3A%20BZ%3A1854355%0A%20%20%20%20function%20configure_driver_options%20%7B%0A%20%20%20%20%20%20intf%3D%241%0A%20%20%20%20%20%20if%20%5B%20!%20-f%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Bintf%7D%2Fdevice%2Fuevent%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Device%20file%20doesn't%20exist%2C%20skipping%20setting%20multicast%20mode%22%0A%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20driver%3D%24(cat%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Bintf%7D%2Fdevice%2Fuevent%22%20%7C%20grep%20DRIVER%20%7C%20awk%20-F%20%22%3D%22%20'%7Bprint%20%242%7D')%0A%20%20%20%20%20%20%20%20echo%20%22Driver%20name%20is%22%20%24driver%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24driver%22%20%3D%20%22vmxnet3%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20ip%20link%20set%20dev%20%22%24%7Bintf%7D%22%20allmulticast%20on%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20fi%0A%20%20%20%20%7D%0A%0A%20%20%20%20ovnk_config_dir%3D'%2Fetc%2Fovnk'%0A%20%20%20%20ovnk_var_dir%3D'%2Fvar%2Flib%2Fovnk'%0A%20%20%20%20extra_bridge_file%3D%22%24%7Bovnk_config_dir%7D%2Fextra_bridge%22%0A%20%20%20%20iface_default_hint_file%3D%22%24%7Bovnk_var_dir%7D%2Fiface_default_hint%22%0A%20%20%20%20ip_hint_file%3D%22%2Frun%2Fnodeip-configuration%2Fprimary-ip%22%0A%20%20%20%20%23%20explicitly%20specify%20which%20interface%20should%20be%20used%20with%20the%20default%20bridge%0A%20%20%20%20default_bridge_file%3D%22%24%7Bovnk_config_dir%7D%2Fdefault_bridge%22%0A%0A%20%20%20%20%23%20make%20sure%20to%20create%20ovnk_config_dir%20if%20it%20does%20not%20exist%2C%20yet%0A%20%20%20%20mkdir%20-p%20%22%24%7Bovnk_config_dir%7D%22%0A%20%20%20%20%23%20make%20sure%20to%20create%20ovnk_var_dir%20if%20it%20does%20not%20exist%2C%20yet%0A%20%20%20%20mkdir%20-p%20%22%24%7Bovnk_var_dir%7D%22%0A%0A%20%20%20%20%23%20For%20upgrade%20scenarios%2C%20make%20sure%20that%20we%20stabilize%20what%20we%20already%20configured%0A%20%20%20%20%23%20before.%20If%20we%20do%20not%20have%20a%20valid%20interface%20hint%2C%20find%20the%20physical%20interface%0A%20%20%20%20%23%20that's%20attached%20to%20ovs-if-phys0.%0A%20%20%20%20%23%20If%20we%20find%20such%20an%20interface%2C%20write%20it%20to%20the%20hint%20file.%0A%20%20%20%20iface_default_hint%3D%24(get_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20current_interface%3D%24(get_bridge_physical_interface%20ovs-if-phys0)%0A%20%20%20%20%20%20if%20%5B%20%22%24%7Bcurrent_interface%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20write_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Bcurrent_interface%7D%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20delete%20iface_default_hint_file%20if%20it%20has%20the%20same%20content%20as%20extra_bridge_file%0A%20%20%20%20%23%20in%20that%20case%2C%20we%20must%20also%20force%20a%20reconfiguration%20of%20our%20network%20interfaces%0A%20%20%20%20%23%20to%20make%20sure%20that%20we%20reconcile%20this%20conflict%0A%20%20%20%20if%20%5B%20-f%20%22%24%7Biface_default_hint_file%7D%22%20%5D%20%26%26%0A%20%20%20%20%20%20%5B%20-f%20%22%24%7Bextra_bridge_file%7D%22%20%5D%20%26%26%0A%20%20%20%20%20%20%5B%20%22%24(cat%20%22%24%7Biface_default_hint_file%7D%22)%22%20%3D%3D%20%22%24(cat%20%22%24%7Bextra_bridge_file%7D%22)%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22%24%7Biface_default_hint_file%7D%20and%20%24%7Bextra_bridge_file%7D%20share%20the%20same%20content%22%0A%20%20%20%20%20%20echo%20%22Deleting%20file%20%24%7Biface_default_hint_file%7D%20to%20choose%20a%20different%20interface%22%0A%20%20%20%20%20%20rm%20-f%20%22%24%7Biface_default_hint_file%7D%22%0A%20%20%20%20%20%20rm%20-f%20%2Frun%2Fconfigure-ovs-boot-done%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20on%20every%20boot%20we%20rollback%20and%20generate%20the%20configuration%20again%2C%20to%20take%0A%20%20%20%20%23%20in%20any%20changes%20that%20have%20possibly%20been%20applied%20in%20the%20standard%0A%20%20%20%20%23%20configuration%20sources%0A%20%20%20%20if%20%5B%20!%20-f%20%2Frun%2Fconfigure-ovs-boot-done%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Running%20on%20boot%2C%20restoring%20previous%20configuration%20before%20proceeding...%22%0A%20%20%20%20%20%20rollback_nm%0A%20%20%20%20%20%20print_state%0A%20%20%20%20fi%0A%20%20%20%20touch%20%2Frun%2Fconfigure-ovs-boot-done%0A%0A%20%20%20%20iface%3D%24(get_default_bridge_interface%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Bextra_bridge_file%7D%22%20%22%24%7Bip_hint_file%7D%22%20%22%24%7Bdefault_bridge_file%7D%22)%0A%0A%20%20%20%20if%20%5B%20%22%24iface%22%20!%3D%20%22br-ex%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20Specified%20interface%20is%20not%20br-ex.%0A%20%20%20%20%20%20%23%20Some%20deployments%20use%20a%20temporary%20solution%20where%20br-ex%20is%20moved%20out%20from%20the%20default%20gateway%20interface%0A%20%20%20%20%20%20%23%20and%20bound%20to%20a%20different%20nic%20(https%3A%2F%2Fgithub.com%2Ftrozet%2Fopenshift-ovn-migration).%0A%20%20%20%20%20%20%23%20This%20is%20now%20supported%20through%20an%20extra%20bridge%20if%20requested.%20If%20that%20is%20the%20case%2C%20we%20rollback.%0A%20%20%20%20%20%20%23%20We%20also%20rollback%20if%20it%20looks%20like%20we%20need%20to%20configure%20things%2C%20just%20in%20case%20there%20are%20any%20leftovers%0A%20%20%20%20%20%20%23%20from%20previous%20attempts.%0A%20%20%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%20%7C%7C%20%5B%20-z%20%22%24(nmcli%20connection%20show%20--active%20br-ex%202%3E%20%2Fdev%2Fnull)%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Bridge%20br-ex%20is%20not%20active%2C%20restoring%20previous%20configuration%20before%20proceeding...%22%0A%20%20%20%20%20%20%20%20rollback_nm%0A%20%20%20%20%20%20%20%20print_state%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20convert_to_bridge%20%22%24iface%22%20%22br-ex%22%20%22phys0%22%20%22%24%7BBRIDGE_METRIC%7D%22%0A%0A%20%20%20%20%23%20Check%20if%20we%20need%20to%20configure%20the%20second%20bridge%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%20%26%26%20(!%20nmcli%20connection%20show%20br-ex1%20%26%3E%20%2Fdev%2Fnull%20%7C%7C%20!%20nmcli%20connection%20show%20ovs-if-phys1%20%26%3E%20%2Fdev%2Fnull)%3B%20then%0A%20%20%20%20%20%20interface%3D%24(head%20-n%201%20%24extra_bridge_file)%0A%20%20%20%20%20%20convert_to_bridge%20%22%24interface%22%20%22br-ex1%22%20%22phys1%22%20%22%24%7BBRIDGE1_METRIC%7D%22%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Check%20if%20we%20need%20to%20remove%20the%20second%20bridge%0A%20%20%20%20if%20%5B%20!%20-f%20%22%24extra_bridge_file%22%20%5D%20%26%26%20(nmcli%20connection%20show%20br-ex1%20%26%3E%20%2Fdev%2Fnull%20%7C%7C%20nmcli%20connection%20show%20ovs-if-phys1%20%26%3E%20%2Fdev%2Fnull)%3B%20then%0A%20%20%20%20%20%20remove_ovn_bridges%20br-ex1%20phys1%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Remove%20bridges%20created%20by%20openshift-sdn%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br0%0A%0A%20%20%20%20%23%20Make%20sure%20everything%20is%20activated.%20Do%20it%20in%20a%20specific%20order%3A%0A%20%20%20%20%23%20-%20activate%20br-ex%20first%2C%20due%20to%20autoconnect-slaves%20this%20will%20also%0A%20%20%20%20%23%20%20%20activate%20ovs-port-br-ex%2C%20ovs-port-phys0%20and%20ovs-if-phys0.%20It%20is%0A%20%20%20%20%23%20%20%20important%20that%20ovs-if-phys0%20activates%20with%20br-ex%20to%20avoid%20the%0A%20%20%20%20%23%20%20%20ovs-if-phys0%20profile%20being%20overridden%20with%20a%20profile%20generated%20from%0A%20%20%20%20%23%20%20%20kargs.%20The%20activation%20of%20ovs-if-phys0%2C%20if%20a%20bond%2C%20might%20cause%20the%0A%20%20%20%20%23%20%20%20slaves%20to%20re-activate%2C%20but%20it%20should%20be%20with%20our%20profiles%20since%20they%0A%20%20%20%20%23%20%20%20have%20higher%20priority%0A%20%20%20%20%23%20-%20make%20sure%20that%20ovs-if-phys0%20and%20its%20slaves%2C%20if%20any%2C%20are%20activated.%0A%20%20%20%20%23%20-%20finally%20activate%20ovs-if-br-ex%20which%20holds%20the%20IP%20configuration.%0A%20%20%20%20connections%3D(br-ex%20ovs-if-phys0)%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20%20%20connections%2B%3D(br-ex1%20ovs-if-phys1)%0A%20%20%20%20fi%0A%20%20%20%20while%20IFS%3D%20read%20-r%20connection%3B%20do%0A%20%20%20%20%20%20if%20%5B%5B%20%24connection%20%3D%3D%20*%22%24MANAGED_NM_CONN_SUFFIX%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20connections%2B%3D(%22%24connection%22)%0A%20%20%20%20%20%20fi%0A%20%20%20%20done%20%3C%20%3C(nmcli%20-g%20NAME%20c)%0A%20%20%20%20connections%2B%3D(ovs-if-br-ex)%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20%20%20connections%2B%3D(ovs-if-br-ex1)%0A%20%20%20%20fi%0A%20%20%20%20activate_nm_connections%20%22%24%7Bconnections%5B%40%5D%7D%22%0A%20%20%20%20try_to_bind_ipv6_address%0A%20%20%20%20set_nm_conn_files%0A%20%20elif%20%5B%20%22%241%22%20%3D%3D%20%22OpenShiftSDN%22%20%5D%3B%20then%0A%20%20%20%20%23%20Revert%20changes%20made%20by%20%2Fusr%2Flocal%2Fbin%2Fconfigure-ovs.sh%20during%20SDN%20migration.%0A%20%20%20%20rollback_nm%0A%20%20%20%20%0A%20%20%20%20%23%20Remove%20bridges%20created%20by%20ovn-kubernetes%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br-int%20--%20--if-exists%20del-br%20br-local%0A%20%20fi%0A%7D%0A%0A%23%20Retry%20configure_ovs%20until%20it%20succeeds.%0A%23%20By%20default%2C%20retry%20every%2015%20minutes%20to%20give%20enough%20time%20to%20gather%0A%23%20troubleshooting%20information%20in%20between.%20Note%20that%20configure_ovs%20has%20other%0A%23%20internal%20retry%20mechanisms.%20This%20retry%20is%20intended%20to%20give%20some%0A%23%20self-healing%20capabilities%20to%20temporary%20but%20not%20necessarily%20short-lived%0A%23%20infrastructure%20problems.%0ARETRY%3D%22%24%7BRETRY-15m%7D%22%0Awhile%20true%3B%20do%0A%0A%20%20%23%20Disable%20retries%20if%20termination%20signal%20is%20received.%20Note%20that%20systemd%0A%20%20%23%20sends%20the%20signals%20to%20all%20processes%20in%20the%20group%20by%20default%20so%20we%20expect%0A%20%20%23%20configure_ovs%20to%20get%20its%20own%20signals.%0A%20%20trap%20'echo%20%22WARNING%3A%20termination%20requested%2C%20disabling%20retries%22%3B%20RETRY%3D%22%22'%20INT%20TERM%0A%20%20%0A%20%20%23%20Run%20configure_ovs%20in%20a%20sub-shell.%20%0A%20%20(%20configure_ovs%20%22%24%40%22%20)%0A%20%20e%3D%24%3F%0A%0A%20%20%23%20Handle%20signals%20while%20we%20sleep%0A%20%20trap%20'handle_termination'%20INT%20TERM%0A%20%20%0A%20%20%23%20Exit%20if%20succesful%20and%20not%20configured%20to%20retry%0A%20%20%5B%20%22%24e%22%20-eq%200%20%5D%20%7C%7C%20%5B%20-z%20%22%24RETRY%22%20%5D%20%26%26%20exit%20%22%24e%22%0A%20%20%0A%20%20echo%20%22configure-ovs%20failed%2C%20will%20retry%20after%20%24RETRY%22%0A%20%20%23%20flag%20that%20a%20retry%20has%20happened%0A%20%20touch%20%2Ftmp%2Fconfigure-ovs-retry%0A%20%20sleep%20%22%24RETRY%22%0A%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/configure-ovs.sh" + }, + { + "contents": { + "source": "data:,%23%20This%20file%20is%20generated%20by%20the%20Machine%20Config%20Operator's%20containerruntimeconfig%20controller.%0A%23%0A%23%20storage.conf%20is%20the%20configuration%20file%20for%20all%20tools%0A%23%20that%20share%20the%20containers%2Fstorage%20libraries%0A%23%20See%20man%205%20containers-storage.conf%20for%20more%20information%0A%23%20The%20%22container%20storage%22%20table%20contains%20all%20of%20the%20server%20options.%0A%5Bstorage%5D%0A%0A%23%20Default%20storage%20driver%2C%20must%20be%20set%20for%20proper%20operation.%0Adriver%20%3D%20%22overlay%22%0A%0A%23%20Temporary%20storage%20location%0Arunroot%20%3D%20%22%2Frun%2Fcontainers%2Fstorage%22%0A%0A%23%20Primary%20Read%2FWrite%20location%20of%20container%20storage%0Agraphroot%20%3D%20%22%2Fvar%2Flib%2Fcontainers%2Fstorage%22%0A%0A%5Bstorage.options%5D%0A%23%20Storage%20options%20to%20be%20passed%20to%20underlying%20storage%20drivers%0A%0A%23%20AdditionalImageStores%20is%20used%20to%20pass%20paths%20to%20additional%20Read%2FOnly%20image%20stores%0A%23%20Must%20be%20comma%20separated%20list.%0Aadditionalimagestores%20%3D%20%5B%0A%5D%0A%0A%23%20Remap-UIDs%2FGIDs%20is%20the%20mapping%20from%20UIDs%2FGIDs%20as%20they%20should%20appear%20inside%20of%0A%23%20a%20container%2C%20to%20UIDs%2FGIDs%20as%20they%20should%20appear%20outside%20of%20the%20container%2C%20and%0A%23%20the%20length%20of%20the%20range%20of%20UIDs%2FGIDs.%20%20Additional%20mapped%20sets%20can%20be%20listed%0A%23%20and%20will%20be%20heeded%20by%20libraries%2C%20but%20there%20are%20limits%20to%20the%20number%20of%0A%23%20mappings%20which%20the%20kernel%20will%20allow%20when%20you%20later%20attempt%20to%20run%20a%0A%23%20container.%0A%23%0A%23%20remap-uids%20%3D%200%3A1668442479%3A65536%0A%23%20remap-gids%20%3D%200%3A1668442479%3A65536%0A%0A%23%20Remap-User%2FGroup%20is%20a%20name%20which%20can%20be%20used%20to%20look%20up%20one%20or%20more%20UID%2FGID%0A%23%20ranges%20in%20the%20%2Fetc%2Fsubuid%20or%20%2Fetc%2Fsubgid%20file.%20%20Mappings%20are%20set%20up%20starting%0A%23%20with%20an%20in-container%20ID%20of%200%20and%20the%20a%20host-level%20ID%20taken%20from%20the%20lowest%0A%23%20range%20that%20matches%20the%20specified%20name%2C%20and%20using%20the%20length%20of%20that%20range.%0A%23%20Additional%20ranges%20are%20then%20assigned%2C%20using%20the%20ranges%20which%20specify%20the%0A%23%20lowest%20host-level%20IDs%20first%2C%20to%20the%20lowest%20not-yet-mapped%20container-level%20ID%2C%0A%23%20until%20all%20of%20the%20entries%20have%20been%20used%20for%20maps.%20This%20setting%20overrides%20the%0A%23%20Remap-UIDs%2FGIDs%20setting.%0A%23%0A%23%20remap-user%20%3D%20%22storage%22%0A%23%20remap-group%20%3D%20%22storage%22%0A%0A%5Bstorage.options.pull_options%5D%0A%23%20Options%20controlling%20how%20storage%20is%20populated%20when%20pulling%20images.%0A%0A%23%20Enable%20the%20%22zstd%3Achunked%22%20feature%2C%20which%20allows%20partial%20pulls%2C%20reusing%0A%23%20content%20that%20already%20exists%20on%20the%20system.%20This%20is%20disabled%20by%20default%2C%0A%23%20and%20must%20be%20explicitly%20enabled%20to%20be%20used.%20For%20more%20on%20zstd%3Achunked%2C%20see%0A%23%20https%3A%2F%2Fgithub.com%2Fcontainers%2Fstorage%2Fblob%2Fmain%2Fdocs%2Fcontainers-storage-zstd-chunked.md%0Aenable_partial_images%20%3D%20%22false%22%0A%0A%23%20Tells%20containers%2Fstorage%20to%20use%20hard%20links%20rather%20then%20create%20new%20files%20in%0A%23%20the%20image%2C%20if%20an%20identical%20file%20already%20existed%20in%20storage.%0Ause_hard_links%20%3D%20%22false%22%0A%0A%23%20Path%20to%20an%20ostree%20repository%20that%20might%20have%0A%23%20previously%20pulled%20content%20which%20can%20be%20used%20when%20attempting%20to%20avoid%0A%23%20pulling%20content%20from%20the%20container%20registry.%0Aostree_repos%20%3D%20%22%22%0A%0A%5Bstorage.options.overlay%5D%0A%23%20Storage%20Options%20for%20overlay%0A%0A%23%20Do%20not%20create%20a%20PRIVATE%20bind%20mount%20on%20the%20home%20directory.%0Askip_mount_home%20%3D%20%22true%22%0A%0A%23%20Size%20is%20used%20to%20set%20a%20maximum%20size%20of%20the%20container%20image.%20%20Only%20supported%20by%0A%23%20certain%20container%20storage%20drivers.%0Asize%20%3D%20%22%22%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/storage.conf" + }, + { + "contents": { + "source": "data:,Initial%20Creation%0A" + }, + "mode": 384, + "overwrite": true, + "path": "/etc/docker/certs.d/.create" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20kubelet.config.k8s.io%2Fv1%0Akind%3A%20CredentialProviderConfig%0Aproviders%3A%0A%20%20-%20name%3A%20acr-credential-provider%0A%20%20%20%20apiVersion%3A%20credentialprovider.kubelet.k8s.io%2Fv1%0A%20%20%20%20defaultCacheDuration%3A%20%2210m%22%0A%20%20%20%20matchImages%3A%0A%20%20%20%20%20%20-%20%22*.azurecr.io%22%0A%20%20%20%20%20%20-%20%22*.azurecr.cn%22%0A%20%20%20%20%20%20-%20%22*.azurecr.de%22%0A%20%20%20%20%20%20-%20%22*.azurecr.us%22%0A%20%20%20%20args%3A%0A%20%20%20%20%20%20-%20%2Fetc%2Fkubernetes%2Fcloud.conf%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/credential-providers/acr-credential-provider.yaml" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20v1%0Akind%3A%20Pod%0Ametadata%3A%0A%20%20name%3A%20apiserver-watcher%0A%20%20namespace%3A%20openshift-kube-apiserver%0Aspec%3A%0A%20%20containers%3A%0A%20%20-%20name%3A%20apiserver-watcher%0A%20%20%20%20image%3A%20%22quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3A445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb%22%0A%20%20%20%20command%3A%0A%20%20%20%20%20%20-%20flock%0A%20%20%20%20%20%20-%20--verbose%0A%20%20%20%20%20%20-%20--exclusive%0A%20%20%20%20%20%20-%20--timeout%3D300%0A%20%20%20%20%20%20-%20%2Frootfs%2Frun%2Fcloud-routes%2Fapiserver-watcher.lock%0A%20%20%20%20%20%20-%20apiserver-watcher%0A%20%20%20%20args%3A%0A%20%20%20%20-%20%22run%22%0A%20%20%20%20-%20%22--health-check-url%3Dhttps%3A%2F%2Fapi-int.ci-op-pw3ghqzh-bb5c4.ci2.azure.devcluster.openshift.com%3A6443%2Freadyz%22%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20cpu%3A%2020m%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20mountPath%3A%20%2Frootfs%0A%20%20%20%20%20%20name%3A%20rootfs%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20hostNetwork%3A%20true%0A%20%20hostPID%3A%20true%0A%20%20priorityClassName%3A%20system-node-critical%0A%20%20tolerations%3A%0A%20%20-%20operator%3A%20%22Exists%22%0A%20%20restartPolicy%3A%20Always%0A%20%20volumes%3A%0A%20%20-%20name%3A%20rootfs%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%2F%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/manifests/apiserver-watcher.yaml" + }, + { + "contents": { + "source": "data:,%23%20Proxy%20environment%20variables%20will%20be%20populated%20in%20this%20file.%20Properly%0A%23%20url%20encoded%20passwords%20with%20special%20characters%20will%20use%20'%25%3CHEX%3E%3CHEX%3E'.%0A%23%20Systemd%20requires%20that%20any%20%25%20used%20in%20a%20password%20be%20represented%20as%0A%23%20%25%25%20in%20a%20unit%20file%20since%20%25%20is%20a%20prefix%20for%20macros%3B%20this%20restriction%20does%20not%0A%23%20apply%20for%20environment%20files.%20Templates%20that%20need%20the%20proxy%20set%20should%20use%0A%23%20'EnvironmentFile%3D%2Fetc%2Fmco%2Fproxy.env'.%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/mco/proxy.env" + }, + { + "contents": { + "source": "data:,%5BManager%5D%0ADefaultEnvironment%3DGODEBUG%3Dx509ignoreCN%3D0%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system.conf.d/10-default-env-godebug.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Workaround%3A%0A%23%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FOCPBUGS-38779%0A%0Adriver%3D%24(nmcli%20-t%20-m%20tabular%20-f%20general.driver%20dev%20show%20%22%24%7BDEVICE_IFACE%7D%22)%0A%0Aif%20%5B%5B%20%22%242%22%20%3D%3D%20%22up%22%20%26%26%20%22%24%7Bdriver%7D%22%20%3D%3D%20%22idpf%22%20%5D%5D%3B%20then%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-checksumming%20off%0Afi" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-gcp-disable-idpf-tx-checksum-off" + }, + { + "contents": { + "source": "data:,%23%20Force-load%20legacy%20iptables%20so%20it%20is%20usable%20from%20pod%20network%20namespaces%0Aip_tables%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/modules-load.d/iptables.conf" + }, + { + "contents": { + "source": "data:,NODE_SIZING_ENABLED%3Dfalse%0ASYSTEM_RESERVED_MEMORY%3D1Gi%0ASYSTEM_RESERVED_CPU%3D500m%0ASYSTEM_RESERVED_ES%3D1Gi" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/node-sizing-enabled.env" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-e%0ANODE_SIZES_ENV%3D%24%7BNODE_SIZES_ENV%3A-%2Fetc%2Fnode-sizing.env%7D%0AVERSION_1%3D1%0AVERSION_2%3D2%0ANODE_AUTO_SIZING_VERSION%3D%24%7BNODE_AUTO_SIZING_VERSION%3A-%24VERSION_2%7D%0ANODE_AUTO_SIZING_VERSION_FILE%3D%24%7BNODE_AUTO_SIZING_VERSION_FILE%3A-%2Fetc%2Fnode-sizing-version.json%7D%0Afunction%20dynamic_memory_sizing%20%7B%0A%20%20%20%20total_memory%3D%24(free%20-g%7Cawk%20'%2F%5EMem%3A%2F%7Bprint%20%242%7D')%0A%20%20%20%20%23%20total_memory%3D8%20test%20the%20recommended%20values%20by%20modifying%20this%20value%0A%20%20%20%20recommended_systemreserved_memory%3D0%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%204))%3B%20then%20%23%2025%25%20of%20the%20first%204GB%20of%20memory%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24total_memory%200.25%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D1%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-4))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%204))%3B%20then%20%23%2020%25%20of%20the%20next%204GB%20of%20memory%20(up%20to%208GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.20%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%200.80%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-4))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%208))%3B%20then%20%23%2010%25%20of%20the%20next%208GB%20of%20memory%20(up%20to%2016GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.10%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%200.80%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-8))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%20112))%3B%20then%20%23%206%25%20of%20the%20next%20112GB%20of%20memory%20(up%20to%20128GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.06%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%206.72%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-112))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3E%3D%200))%3B%20then%20%23%202%25%20of%20any%20memory%20above%20128GB%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.02%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20fi%0A%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%7C%20awk%20'%7Bprintf(%22%25d%5Cn%22%2C%241%20%2B%200.5)%7D')%20%23%20Round%20off%20so%20we%20avoid%20float%20conversions%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_MEMORY%3D%24%7Brecommended_systemreserved_memory%7DGi%22%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_cpu_sizing%20%7B%0A%20%20%20%20total_cpu%3D%24(getconf%20_NPROCESSORS_ONLN)%0A%20%20%20%20if%20%5B%20%22%241%22%20-eq%20%22%24VERSION_1%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D0%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%201))%3B%20then%20%23%206%25%20of%20the%20first%20core%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24total_cpu%200.06%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D0.06%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-1))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%201))%3B%20then%20%23%201%25%20of%20the%20next%20core%20(up%20to%202%20cores)%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-1))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%202))%3B%20then%20%23%200.5%25%20of%20the%20next%202%20cores%20(up%20to%204%20cores)%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.005%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-2))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3E%3D%200))%3B%20then%20%23%200.25%25%20of%20any%20cores%20above%204%20cores%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.0025%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20%23%20Base%20allocation%20for%201%20CPU%20in%20fractions%20of%20a%20core%20(60%20millicores%20%3D%200.06%20CPU%20core)%0A%20%20%20%20%20%20%20%20base_allocation_fraction%3D0.06%0A%20%20%20%20%20%20%20%20%23%20Increment%20per%20additional%20CPU%20in%20fractions%20of%20a%20core%20(12%20millicores%20%3D%200.012%20CPU%20core)%0A%20%20%20%20%20%20%20%20increment_per_cpu_fraction%3D0.012%0A%20%20%20%20%20%20%20%20if%20((total_cpu%20%3E%201))%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20Calculate%20the%20total%20system-reserved%20CPU%20in%20fractions%2C%20starting%20with%20the%20base%20allocation%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20and%20adding%20the%20incremental%20fraction%20for%20each%20additional%20CPU%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(awk%20-v%20base%3D%22%24base_allocation_fraction%22%20-v%20increment%3D%22%24increment_per_cpu_fraction%22%20-v%20cpus%3D%22%24total_cpu%22%20'BEGIN%20%7Bprintf%20%22%25.2f%5Cn%22%2C%20base%20%2B%20increment%20*%20(cpus%20-%201)%7D')%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20For%20a%20single%20CPU%2C%20use%20the%20base%20allocation%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24base_allocation_fraction%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Enforce%20minimum%20threshold%20of%200.5%20CPU%0A%20%20%20%20recommended_systemreserved_cpu%3D%24(awk%20-v%20val%3D%22%24recommended_systemreserved_cpu%22%20'BEGIN%20%7Bif%20(val%20%3C%200.5)%20print%200.5%3B%20else%20print%20val%7D')%0A%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_CPU%3D%24%7Brecommended_systemreserved_cpu%7D%22%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_ephemeral_sizing%20%7B%0A%20%20%20%20echo%20%22Not%20implemented%20yet%22%0A%7D%0Afunction%20dynamic_pid_sizing%20%7B%0A%20%20%20%20echo%20%22Not%20implemented%20yet%22%0A%7D%0Afunction%20set_memory%20%7B%0A%20%20%20%20SYSTEM_RESERVED_MEMORY%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_MEMORY%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_MEMORY%3D%221Gi%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_MEMORY%3D%24%7BSYSTEM_RESERVED_MEMORY%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20set_cpu%20%7B%0A%20%20%20%20SYSTEM_RESERVED_CPU%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_CPU%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_CPU%3D%22500m%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_CPU%3D%24%7BSYSTEM_RESERVED_CPU%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20set_es%20%7B%0A%20%20%20%20SYSTEM_RESERVED_ES%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_ES%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_ES%3D%221Gi%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_ES%3D%24%7BSYSTEM_RESERVED_ES%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_node_sizing%20%7B%0A%20%20%20%20rm%20-f%20%24%7BNODE_SIZES_ENV%7D%0A%20%20%20%20dynamic_memory_sizing%0A%20%20%20%20dynamic_cpu_sizing%20%241%0A%20%20%20%20set_es%20%242%0A%20%20%20%20%23dynamic_ephemeral_sizing%0A%20%20%20%20%23dynamic_pid_sizing%0A%7D%0Afunction%20static_node_sizing%20%7B%0A%20%20%20%20rm%20-f%20%24%7BNODE_SIZES_ENV%7D%0A%20%20%20%20set_memory%20%241%0A%20%20%20%20set_cpu%20%242%0A%20%20%20%20set_es%20%243%0A%7D%0Afunction%20create_version_file%20%7B%0A%20%20%20%20echo%20%22%7B%5C%22version%5C%22%3A%20%241%7D%22%20%3E%20%242%0A%7D%0Aif%20!%20%5B%20-f%20%24NODE_AUTO_SIZING_VERSION_FILE%20%5D%3B%20then%0A%20%20%20%20create_version_file%20%24NODE_AUTO_SIZING_VERSION%20%24NODE_AUTO_SIZING_VERSION_FILE%0Afi%0Anew_version%3D%24(jq%20.version%20%24NODE_AUTO_SIZING_VERSION_FILE)%0Aif%20%5B%20%241%20%3D%3D%20%22true%22%20%5D%3B%20then%0A%20%20%20%20dynamic_node_sizing%20%24new_version%20%244%0Aelif%20%5B%20%241%20%3D%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20static_node_sizing%20%242%20%243%20%244%0Aelse%0A%20%20%20%20echo%20%22Unrecognized%20command%20line%20option.%20Valid%20options%20are%20%5C%22true%5C%22%20or%20%5C%22false%5C%22%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/sbin/dynamic-system-reserved-calc.sh" + }, + { + "contents": { + "source": "data:,%23%20Turning%20on%20Accounting%20helps%20track%20down%20performance%20issues.%0A%5BManager%5D%0ADefaultCPUAccounting%3Dyes%0ADefaultMemoryAccounting%3Dyes%0ADefaultBlockIOAccounting%3Dyes%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system.conf.d/kubelet-cgroups.conf" + }, + { + "contents": { + "source": "data:,%5BService%5D%0AEnvironment%3D%22KUBELET_LOG_LEVEL%3D2%22%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system/kubelet.service.d/20-logging.conf" + }, + { + "contents": { + "source": "data:,%23%20ignore%20known%20SDN-managed%20devices%0A%5Bdevice%5D%0Amatch-device%3Dinterface-name%3Abr-int%3Binterface-name%3Abr-local%3Binterface-name%3Abr-nexthop%3Binterface-name%3Aovn-k8s-*%3Binterface-name%3Ak8s-*%3Binterface-name%3Atun0%3Binterface-name%3Abr0%3Binterface-name%3Apatch-br-*%3Binterface-name%3Abr-ext%3Binterface-name%3Aext-vxlan%3Binterface-name%3Aext%3Binterface-name%3Aint%3Binterface-name%3Avxlan_sys_*%3Binterface-name%3Agenev_sys_*%3Bdriver%3Aveth%0Amanaged%3D0%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/sdn.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-eux%0A%0A%23%20Clean%20up%20old%20config%20on%20behalf%20of%20mtu-migration%0Aif%20!%20systemctl%20-q%20is-enabled%20mtu-migration%3B%20then%0A%20%20echo%20%22Cleaning%20up%20left%20over%20mtu%20migration%20configuration%22%0A%20%20rm%20-rf%20%2Fetc%2Fcno%2Fmtu-migration%0Afi%0A%0Aif%20%5B%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20echo%20%22Configuration%20already%20applied%2C%20exiting%22%0A%20%20exit%200%0Afi%0A%0Asrc_path%3D%22%2Fetc%2Fnmstate%2Fopenshift%22%0Adst_path%3D%22%2Fetc%2Fnmstate%22%0Ahostname%3D%24(hostname%20-s)%0Ahost_file%3D%22%24%7Bhostname%7D.yml%22%0Acluster_file%3D%22cluster.yml%22%0Aconfig_file%3D%22%22%0Aif%20%5B%20-s%20%22%24src_path%2F%24host_file%22%20%5D%3B%20then%0A%20%20config_file%3D%24host_file%0Aelif%20%5B%20-s%20%22%24src_path%2F%24cluster_file%22%20%5D%3B%20then%0A%20%20config_file%3D%24cluster_file%0Aelse%0A%20%20echo%20%22No%20configuration%20found%20at%20%24src_path%2F%24host_file%20or%20%24src_path%2F%24cluster_file%22%0A%20%20exit%200%0Afi%0A%0Aif%20%5B%20-e%20%22%24dst_path%2F%24config_file%22%20%5D%3B%20then%0A%20%20echo%20%22ERROR%3A%20File%20%24dst_path%2F%24config_file%20exists.%20Refusing%20to%20overwrite.%22%0A%20%20exit%201%0Afi%0A%0A%23%20Handle%20the%20case%20where%20we're%20migrating%20from%20configure-ovs%0Aovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br-ex%0A%0A%23%20Handle%20the%20case%20where%20we're%20migrating%20from%20OpenShift%20SDN%0Aovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br0%0A%0Acp%20%22%24src_path%2F%24config_file%22%20%2Fetc%2Fnmstate%0Atouch%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/nmstate-configuration.sh" + }, + { + "contents": { + "source": "data:,%5Bservice%5D%0Akeep_state_file_after_apply%20%3D%20true%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/nmstate/nmstate.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Set%20interface%20ofport_request%20to%20guarantee%20stable%20ofport%20numbers.%20This%20is%20important%20for%20flow%20matches.%0A%23%20Otherwise%2C%20another%20ofport%20number%20is%20assigned%20to%20the%20interface%20on%20every%20restart%20of%20NetworkManager.%0A%23%20This%20script%20will%20build%20an%20associative%20array%20INTERFACE_NAME-%3Eofport_request%20and%20will%20save%20it%20to%20file%20CONFIGURATION_FILE.%0A%23%20When%20an%20interface%20is%20brought%20up%2C%20this%20will%20reuse%20the%20value%20from%20the%20associative%20array%20if%20such%20a%20value%20exists.%0A%23%20Otherwise%2C%20this%20will%20try%20to%20use%20the%20current%20ofport%20value.%20If%20the%20ofport%20value%20is%20already%20reserved%2C%20then%0A%23%20this%20uses%20the%20lowest%20available%20numerical%20value%2C%20instead.%0Aset%20-eux%20-o%20pipefail%0Aif%20%5B%5B%20%22OVNKubernetes%22%20!%3D%20%22OVNKubernetes%22%20%5D%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0AINTERFACE_NAME%3D%241%0AOPERATION%3D%242%0A%0A%23%20Only%20execute%20this%20on%20pre-up%0Aif%20%5B%20%22%24%7BOPERATION%7D%22%20!%3D%20%22pre-up%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20NM%20uuid%0AINTERFACE_CONNECTION_UUID%3D%24(nmcli%20-t%20-f%20device%2Ctype%2Cuuid%20conn%20%7C%20awk%20-F%20'%3A'%20'%7Bif(%241%3D%3D%22'%24%7BINTERFACE_NAME%7D'%22%20%26%26%20%242!~%2F%5Eovs*%2F)%20print%20%24NF%7D')%0Aif%20%5B%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20slave-type.%20If%20this%20is%20not%20an%20ovs-port%2C%20then%20exit%0AINTERFACE_OVS_SLAVE_TYPE%3D%24(nmcli%20-t%20-f%20connection.slave-type%20conn%20show%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BINTERFACE_OVS_SLAVE_TYPE%7D%22%20!%3D%20%22ovs-port%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20This%20is%20not%20necessarily%20a%20UUID%20(can%20be%20a%20name%20in%20case%20of%20bonds)%20but%20this%20should%20be%20unique%0APORT%3D%24(nmcli%20-t%20-f%20connection.master%20conn%20show%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20NM%20uuid%0APORT_CONNECTION_UUID%3D%24(nmcli%20-t%20-f%20device%2Ctype%2Cuuid%20conn%20%7C%20awk%20-F%20'%3A'%20'%7Bif(%20(%241%3D%3D%22'%24%7BPORT%7D'%22%20%7C%7C%20%243%3D%3D%22'%24%7BPORT%7D'%22)%20%26%26%20%242~%2F%5Eovs*%2F)%20print%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20port's%20slave-type.%20If%20this%20is%20not%20an%20ovs-bridge%2C%20then%20exit%0APORT_OVS_SLAVE_TYPE%3D%24(nmcli%20-t%20-f%20connection.slave-type%20conn%20show%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT_OVS_SLAVE_TYPE%7D%22%20!%3D%20%22ovs-bridge%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20port's%20master.%20If%20it%20doesn't%20have%20any%2C%20assume%20it's%20not%20our%20bridge%0ABRIDGE_ID%3D%24(nmcli%20-t%20-f%20connection.master%20conn%20show%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BBRIDGE_ID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20bridge%20name%0ABRIDGE_NAME%3D%24(nmcli%20-t%20-f%20connection.interface-name%20conn%20show%20%22%24%7BBRIDGE_ID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0A%23%20Limit%20this%20to%20br-ex%20and%20br-ex1%20only.%20If%20one%20wanted%20to%20enable%20this%20for%20all%20OVS%20bridges%2C%0A%23%20the%20condition%20would%20be%3A%20if%20%5B%20%22%24BRIDGE_NAME%22%20%3D%3D%20%22%22%20%5D%3B%20then%0Aif%20%5B%20%22%24%7BBRIDGE_NAME%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%20%5B%20%22%24%7BBRIDGE_NAME%7D%22%20!%3D%20%22br-ex1%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Make%20sure%20that%20the%20interface%20is%20plugged%20into%20OVS%0A%23%20This%20should%20always%20be%20the%20case%20given%20that%20we%20are%20in%20pre-up%2C%20but%20exit%20gracefully%20in%20the%20odd%20case%20that%20it's%20not%0Aif%20!%20ovs-vsctl%20list%20interface%20%22%24%7BINTERFACE_NAME%7D%22%20%3E%2Fdev%2Fnull%202%3E%261%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0ACONFIGURATION_FILE%3D%22%2Frun%2Fofport_requests.%24%7BBRIDGE_NAME%7D%22%0A%0A%23%20Declare%20a%20new%20associative%20array.%20If%20CONFIGURATION_FILE%20exists%2C%20source%20entries%20from%20there%0Adeclare%20-A%20INTERFACES%0Aif%20%5B%20-f%20%22%24%7BCONFIGURATION_FILE%7D%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22Sourcing%20configuration%20file%20'%24%7BCONFIGURATION_FILE%7D'%20with%20contents%3A%22%0A%20%20%20%20cat%20%22%24%7BCONFIGURATION_FILE%7D%22%0A%20%20%20%20source%20%22%24%7BCONFIGURATION_FILE%7D%22%0Afi%0A%0A%23%20get_interface_ofport_request%20will%20return%0A%23%20*%20either%3A%20the%20current%20ofport%20assignment%20for%20the%20port%20if%20no%20interface%20has%20claimed%20this%20ofport%20number%2C%20yet%0A%23%20*%20or%3A%20%20%20%20%20the%20lowest%20available%20free%20ofport%20number%0Afunction%20get_interface_ofport_request()%20%7B%0A%20%20%20%20%23%20Build%20an%20array%20that%20only%20contains%20the%20currently%20reserved%20ofport_requests%0A%20%20%20%20declare%20-A%20ofport_requests%0A%20%20%20%20for%20interface_name%20in%20%22%24%7B!INTERFACES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20ofport_requests%5B%24%7BINTERFACES%5B%24interface_name%5D%7D%5D%3D%24%7BINTERFACES%5B%24interface_name%5D%7D%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20Get%20the%20current%20ofport%20number%20assignment%0A%20%20%20%20local%20current_ofport%3D%24(ovs-vsctl%20get%20Interface%20%22%24%7BINTERFACE_NAME%7D%22%20ofport)%0A%0A%20%20%20%20%23%20If%20the%20current%20ofport%20number%20is%20still%20free%2C%20use%20it%0A%20%20%20%20if%20!%20%5B%20%22%24%7Bofport_requests%5B%24current_ofport%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%24current_ofport%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20If%20the%20current%20ofport%20number%20is%20not%20free%2C%20return%20the%20lowest%20free%20entry%0A%20%20%20%20i%3D0%0A%20%20%20%20for%20i%20in%20%7B1..65000%7D%3B%20do%0A%20%20%20%20%20%20%20%20if%20!%20%5B%20%22%24%7Bofport_requests%5B%24i%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%24i%0A%20%20%20%20%20%20%20%20%20%20%20%20return%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20if%20we%20still%20cannot%20find%20an%20ID%2C%20exit%20with%20an%20error%0A%20%20%20%20echo%20%22Impossible%20to%20find%20an%20ofport%20ID%20for%20interface%20%24%7BINTERFACE_NAME%7D%22%20%3E%262%0A%20%20%20%20exit%201%0A%7D%0A%0A%23%20If%20INTERFACES%5BINTERFACE_NAME%5D%20exists%2C%20use%20that%20value%0A%23%20If%20INTERFACES%5BINTERFACE_NAME%5D%20does%20not%20exists%2C%20use%20the%20value%20from%20get_interface_ofport_request%0Aif%20!%20%5B%20%22%24%7BINTERFACES%5B%24INTERFACE_NAME%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20INTERFACES%5B%24INTERFACE_NAME%5D%3D%24(get_interface_ofport_request)%0Afi%0A%23%20Set%20ofport_request%20according%20to%20INTERFACES%5BINTERFACE_NAME%5D%0Aovs-vsctl%20set%20Interface%20%22%24%7BINTERFACE_NAME%7D%22%20ofport_request%3D%24%7BINTERFACES%5B%24INTERFACE_NAME%5D%7D%0A%0A%23%20Save%20current%20state%20of%20INTERFACES%20to%20CONFIGURATION_FILE%0Adeclare%20-p%20INTERFACES%20%3E%7C%20%22%24%7BCONFIGURATION_FILE%7D%22%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/pre-up.d/10-ofport-request.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20Prevent%20hairpin%20traffic%20when%20the%20apiserver%20is%20up%0A%0A%23%20As%20per%20the%20Azure%20documentation%20(https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fload-balancer%2Fconcepts%23limitations)%2C%0A%23%20if%20a%20backend%20is%20load-balanced%20to%20itself%2C%20then%20the%20traffic%20will%20be%20dropped.%0A%23%0A%23%20This%20is%20because%20the%20L3LB%20does%20DNAT%2C%20so%20while%20the%20outgoing%20packet%20has%20a%20destination%0A%23%20IP%20of%20the%20VIP%2C%20the%20incoming%20load-balanced%20packet%20has%20a%20destination%20IP%20of%20the%0A%23%20host.%20That%20means%20that%20it%20%22sees%22%20a%20syn%20with%20the%20source%20and%20destination%0A%23%20IPs%20of%20itself%2C%20and%20duly%20replies%20wit%20a%20syn-ack%20back%20to%20itself.%20However%2C%20the%20client%0A%23%20socket%20expects%20a%20syn-ack%20with%20a%20source%20IP%20of%20the%20VIP%2C%20so%20it%20drops%20the%20packet.%0A%23%0A%23%20The%20solution%20is%20to%20redirect%20traffic%20destined%20to%20the%20lb%20vip%20back%20to%20ourselves.%0A%23%0A%23%20We%20check%20%2Frun%2Fcloud-routes%2F%20for%20files%20%24VIP.up%20and%20%24VIP.down.%20If%20the%20.up%20file%0A%23%20exists%2C%20then%20we%20redirect%20traffic%20destined%20for%20that%20vip%20to%20ourselves%20via%20nftables.%0A%23%20A%20systemd%20unit%20watches%20the%20directory%20for%20changes.%0A%23%0A%23%20TODO%3A%20Address%20the%20potential%20issue%20where%20apiserver-watcher%20could%20create%20multiple%20files%0A%23%20and%20openshift-azure-routes%20doesn't%20detect%20all%20of%20them%20because%20file%20change%20events%20are%20not%20queued%0A%23%20when%20the%20service%20is%20already%20running.%0A%23%20https%3A%2F%2Fgithub.com%2Fopenshift%2Fmachine-config-operator%2Fpull%2F3643%23issuecomment-1497234369%0A%0Aset%20-euo%20pipefail%0A%0A%23%20the%20list%20of%20load%20balancer%20IPs%20that%20are%20assigned%20to%20this%20node%0Adeclare%20-A%20v4vips%0Adeclare%20-A%20v6vips%0A%0ATABLE_NAME%3D%22azure-vips%22%0AVIPS_CHAIN%3D%22redirect-vips%22%0ARUN_DIR%3D%22%2Frun%2Fcloud-routes%22%0A%0Ainitialize()%20%7B%0A%20%20%20%20nft%20-f%20-%20%3C%3CEOF%0A%20%20%20%20%20%20%20%20add%20table%20inet%20%24%7BTABLE_NAME%7D%20%7B%20comment%20%22azure%20LB%20vip%20overriding%22%3B%20%7D%0A%20%20%20%20%20%20%20%20add%20chain%20inet%20%24%7BTABLE_NAME%7D%20%24%7BVIPS_CHAIN%7D%0A%0A%20%20%20%20%20%20%20%20add%20chain%20inet%20%24%7BTABLE_NAME%7D%20prerouting%20%7B%20type%20nat%20hook%20prerouting%20priority%20dstnat%3B%20%7D%0A%20%20%20%20%20%20%20%20flush%20chain%20inet%20%24%7BTABLE_NAME%7D%20prerouting%0A%20%20%20%20%20%20%20%20add%20rule%20inet%20%24%7BTABLE_NAME%7D%20prerouting%20goto%20%24%7BVIPS_CHAIN%7D%0A%0A%20%20%20%20%20%20%20%20add%20chain%20inet%20%24%7BTABLE_NAME%7D%20output%20%7B%20type%20nat%20hook%20output%20priority%20dstnat%3B%20%7D%0A%20%20%20%20%20%20%20%20flush%20chain%20inet%20%24%7BTABLE_NAME%7D%20output%0A%20%20%20%20%20%20%20%20add%20rule%20inet%20%24%7BTABLE_NAME%7D%20output%20goto%20%24%7BVIPS_CHAIN%7D%0AEOF%0A%7D%0A%0Aremove_stale_routes()%20%7B%0A%20%20%20%20%23%23%20find%20extra%20ovn%20routes%0A%20%20%20%20local%20ovnkContainerID%3D%24(crictl%20ps%20--name%20ovnkube-controller%20%7C%20awk%20'%7B%20print%20%241%20%7D'%20%7C%20tail%20-n%2B2)%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BovnkContainerID%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22Found%20ovnkube-controller%20pod...%20%24%7BovnkContainerID%7D%22%0A%20%20%20%20local%20routeVIPsV4%3D%24(crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-list%20ovn_cluster_router%20%7C%20grep%20%221010%22%20%7C%20grep%20%22ip4%22%20%7C%20awk%20'%248%7Bprint%20%248%7D')%0A%20%20%20%20echo%20%22Found%20v4route%20vips%3A%20%24%7BrouteVIPsV4%7D%22%0A%20%20%20%20local%20host%3D%24(hostname)%0A%20%20%20%20echo%20%24%7Bhost%7D%0A%20%20%20%20for%20route_vip%20in%20%24%7BrouteVIPsV4%7D%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20!%20-v%20v4vips%5B%24%7Broute_vip%7D%5D%20%5D%5D%20%7C%7C%20%5B%5B%20%22%24%7Bv4vips%5B%24%7Broute_vip%7D%5D%7D%22%20%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20removing%20stale%20vip%20%22%24%7Broute_vip%7D%22%20for%20local%20clients%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%20inport%20%3D%3D%20rtos-%24%7Bhost%7D%20%26%26%20ip4.dst%20%3D%3D%20%24%7Broute_vip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%20%22inport%20%3D%3D%20%5C%22rtos-%24%7Bhost%7D%5C%22%20%26%26%20ip4.dst%20%3D%3D%20%24%7Broute_vip%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20if%20%5B%20!%20-f%20%2Fproc%2Fnet%2Fif_inet6%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%0A%20%20%20%20local%20routeVIPsV6%3D%24(crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-list%20ovn_cluster_router%20%7C%20grep%20%221010%22%20%7C%20grep%20%22ip6%22%20%7C%20awk%20'%248%7Bprint%20%248%7D')%0A%20%20%20%20echo%20%22Found%20v6route%20vips%3A%20%24%7BrouteVIPsV6%7D%22%0A%20%20%20%20for%20route_vip%20in%20%24%7BrouteVIPsV6%7D%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20!%20-v%20v6vips%5B%24%7Broute_vip%7D%5D%20%5D%5D%20%7C%7C%20%5B%5B%20%22%24%7Bv6vips%5B%24%7Broute_vip%7D%5D%7D%22%20%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20removing%20stale%20vip%20%22%24%7Broute_vip%7D%22%20for%20local%20clients%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%20inport%20%3D%3D%20rtos-%24%7Bhost%7D%20%26%26%20ip6.dst%20%3D%3D%20%24%7Broute_vip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%20%22inport%20%3D%3D%20%5C%22rtos-%24%7Bhost%7D%5C%22%20%26%26%20ip6.dst%20%3D%3D%20%24%7Broute_vip%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%7D%0A%0Async_rules()%20%7B%0A%20%20%20%20%23%20Construct%20the%20VIP%20lists.%20(The%20nftables%20syntax%20allows%20a%20trailing%20comma.)%0A%20%20%20%20v4vipset%3D%22%22%0A%20%20%20%20v6vipset%3D%22%22%0A%20%20%20%20for%20vip%20in%20%22%24%7B!v4vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Bv4vips%5B%24%7Bvip%7D%5D%7D%22%20!%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20v4vipset%3D%22%24%7Bvip%7D%2C%20%24%7Bv4vipset%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%20%20%20%20for%20vip%20in%20%22%24%7B!v6vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Bv6vips%5B%24%7Bvip%7D%5D%7D%22%20!%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20v6vipset%3D%22%24%7Bvip%7D%2C%20%24%7Bv6vipset%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20echo%20%22synchronizing%20IPv4%20VIPs%20to%20(%24%7Bv4vipset%7D)%2C%20IPv6%20VIPS%20to%20(%24%7Bv6vipset%7D)%22%0A%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20echo%20%22flush%20chain%20inet%20%24%7BTABLE_NAME%7D%20%24%7BVIPS_CHAIN%7D%22%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Bv4vipset%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22add%20rule%20inet%20%24%7BTABLE_NAME%7D%20%24%7BVIPS_CHAIN%7D%20ip%20daddr%20%7B%20%24%7Bv4vipset%7D%20%7D%20redirect%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Bv6vipset%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22add%20rule%20inet%20%24%7BTABLE_NAME%7D%20%24%7BVIPS_CHAIN%7D%20ip6%20daddr%20%7B%20%24%7Bv6vipset%7D%20%7D%20redirect%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%7D%20%7C%20nft%20-f%20-%0A%7D%0A%0Aadd_routes()%20%7B%0A%20%20%20%20local%20ovnkContainerID%3D%24(crictl%20ps%20--name%20ovnkube-controller%20%7C%20awk%20'%7B%20print%20%241%20%7D'%20%7C%20tail%20-n%2B2)%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BovnkContainerID%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22OVN-Kubernetes%20is%20not%20running%3B%20no%20routes%20to%20add.%22%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22Found%20ovnkube-controller%20pod...%20%24%7BovnkContainerID%7D%22%0A%20%20%20%20local%20ovnK8sMp0v4%3D%24(ip%20-brief%20address%20show%20ovn-k8s-mp0%20%7C%20awk%20'%7Bprint%20%243%7D'%20%7C%20awk%20-F%2F%20'%7Bprint%20%241%7D')%0A%20%20%20%20echo%20%22Found%20ovn-k8s-mp0%20interface%20IP%20%24%7BovnK8sMp0v4%7D%22%0A%20%20%20%20local%20host%3D%24(hostname)%0A%20%20%20%20echo%20%24%7Bhost%7D%0A%20%20%20%20for%20vip%20in%20%22%24%7B!v4vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Bv4vips%5B%24%7Bvip%7D%5D%7D%22%20!%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ensuring%20route%20for%20%24%7Bvip%7D%20for%20internal%20clients%22%0A%20%20%20%20%20%20%20%20%20%20%20%20local%20routes%3D%24(crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-list%20ovn_cluster_router%20%7C%20grep%20%221010%22%20%7C%20grep%20%22%24%7Bvip%7D%22%20%7C%20grep%20%22%24%7BovnK8sMp0v4%7D%22)%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22OVNK%20Routes%20on%20ovn-cluster-router%20at%201010%20priority%3A%20%24routes%22%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Broutes%7D%22%20%3D%3D%20*%22%24%7Bvip%7D%22*%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22Route%20exists%22%0A%20%20%20%20%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22Route%20does%20not%20exist%3B%20creating%20it...%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ovn-nbctl%20lr-policy-add%20ovn_cluster_router%201010%20inport%20%3D%3D%20rtos-%24%7Bhost%7D%20%26%26%20ip4.dst%20%3D%3D%20%24%7Bvip%7D%20reroute%20%24%7BovnK8sMp0v4%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-add%20ovn_cluster_router%201010%20%22inport%20%3D%3D%20%5C%22rtos-%24%7Bhost%7D%5C%22%20%26%26%20ip4.dst%20%3D%3D%20%24%7Bvip%7D%22%20reroute%20%22%24%7BovnK8sMp0v4%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20if%20%5B%20!%20-f%20%2Fproc%2Fnet%2Fif_inet6%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%0A%20%20%20%20local%20ovnK8sMp0v6%3D%24(ip%20-brief%20address%20show%20ovn-k8s-mp0%20%7C%20awk%20'%7Bprint%20%244%7D'%20%7C%20awk%20-F%2F%20'%7Bprint%20%241%7D')%0A%20%20%20%20echo%20%22Found%20ovn-k8s-mp0%20interface%20IP%20%24%7BovnK8sMp0v6%7D%22%0A%0A%20%20%20%20for%20vip%20in%20%22%24%7B!v6vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Bv6vips%5B%24%7Bvip%7D%5D%7D%22%20!%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ensuring%20route%20for%20%24%7Bvip%7D%20for%20internal%20clients%22%0A%20%20%20%20%20%20%20%20%20%20%20%20local%20routes%3D%24(crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-list%20ovn_cluster_router%20%7C%20grep%20%221010%22%20%7C%20grep%20%22%24%7Bvip%7D%22%20%7C%20grep%20%22%24%7BovnK8sMp0v6%7D%22)%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22OVNK%20Routes%20on%20ovn-cluster-router%20at%201010%20priority%3A%20%24routes%22%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Broutes%7D%22%20%3D%3D%20*%22%24%7Bvip%7D%22*%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22Route%20exists%22%0A%20%20%20%20%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22Route%20does%20not%20exist%3B%20creating%20it...%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ovn-nbctl%20lr-policy-add%20ovn_cluster_router%201010%20inport%20%3D%3D%20rtos-%24%7Bhost%7D%20%26%26%20ip6.dst%20%3D%3D%20%24%7Bvip%7D%20reroute%20%24%7BovnK8sMp0v6%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-add%20ovn_cluster_router%201010%20%22inport%20%3D%3D%20%5C%22rtos-%24%7Bhost%7D%5C%22%20%26%26%20ip6.dst%20%3D%3D%20%24%7Bvip%7D%22%20reroute%20%22%24%7BovnK8sMp0v6%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%7D%0A%0Aclear_rules()%20%7B%0A%20%20%20%20echo%20%22clearing%20rules%20from%20%24%7BTABLE_NAME%7D%22%0A%20%20%20%20nft%20delete%20table%20inet%20%22%24%7BTABLE_NAME%7D%22%20%7C%7C%20true%0A%7D%0A%0Aclear_routes()%20%7B%0A%20%20%20%20local%20ovnkContainerID%3D%24(crictl%20ps%20--name%20ovnkube-controller%20%7C%20awk%20'%7B%20print%20%241%20%7D'%20%7C%20tail%20-n%2B2)%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BovnkContainerID%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22OVN-Kubernetes%20is%20not%20running%3B%20no%20routes%20to%20remove.%22%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22Found%20ovnkube-controller%20pod...%20%24%7BovnkContainerID%7D%22%0A%20%20%20%20echo%20%22clearing%20all%20routes%20from%20ovn-cluster-router%22%0A%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%0A%7D%0A%0A%23%20out%20parameters%3A%20v4vips%20v6vips%0Alist_lb_ips()%20%7B%0A%20%20%20%20for%20k%20in%20%22%24%7B!v4vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20unset%20v4vips%5B%22%24%7Bk%7D%22%5D%0A%20%20%20%20done%0A%20%20%20%20for%20k%20in%20%22%24%7B!v6vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20unset%20v6vips%5B%22%24%7Bk%7D%22%5D%0A%20%20%20%20done%0A%0A%0A%20%20%20%20shopt%20-s%20nullglob%0A%20%20%20%20for%20file%20in%20%22%24%7BRUN_DIR%7D%22%2F*.up%20%3B%20do%0A%20%20%20%20%20%20%20%20vip%3D%24(basename%20%22%24%7Bfile%7D%22%20.up)%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20-e%20%22%24%7BRUN_DIR%7D%2F%24%7Bvip%7D.down%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22%24%7Bvip%7D%20has%20upfile%20and%20downfile%2C%20marking%20as%20down%22%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20%5B%5B%20%24%7Bvip%7D%20%3D~%20%3A%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22processing%20v6%20vip%20%24%7Bvip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20v6vips%5B%24%7Bvip%7D%5D%3D%22%24%7Bvip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22processing%20v4%20vip%20%24%7Bvip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20v4vips%5B%24%7Bvip%7D%5D%3D%22%24%7Bvip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%7D%0A%0A%0Acase%20%22%241%22%20in%0A%20%20%20%20start)%0A%20%20%20%20%20%20%20%20initialize%0A%20%20%20%20%20%20%20%20list_lb_ips%0A%20%20%20%20%20%20%20%20sync_rules%0A%20%20%20%20%20%20%20%20remove_stale_routes%20%23%20needed%20for%20OVN-Kubernetes%20plugin's%20routingViaHost%3Dfalse%20mode%0A%20%20%20%20%20%20%20%20add_routes%20%23%20needed%20for%20OVN-Kubernetes%20plugin's%20routingViaHost%3Dfalse%20mode%0A%20%20%20%20%20%20%20%20echo%20%22done%20applying%20vip%20rules%22%0A%20%20%20%20%20%20%20%20%3B%3B%0A%20%20%20%20cleanup)%0A%20%20%20%20%20%20%20%20clear_rules%0A%20%20%20%20%20%20%20%20clear_routes%20%23%20needed%20for%20OVN-Kubernetes%20plugin's%20routingViaHost%3Dfalse%20mode%0A%20%20%20%20%20%20%20%20%3B%3B%0A%20%20%20%20*)%0A%20%20%20%20%20%20%20%20echo%20%24%22Usage%3A%20%240%20%7Bstart%7Ccleanup%7D%22%0A%20%20%20%20%20%20%20%20exit%201%0Aesac%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/opt/libexec/openshift-azure-routes.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20Extract%20Podman%20version%20and%20determine%20the%20signature%20policy%0A%2Fusr%2Fbin%2Fpodman%20-v%20%7C%20%2Fbin%2Fawk%20'%7B%0A%20%20%20%20split(%243%2C%20version%2C%20%22-%22)%3B%0A%20%20%20%20clean_version%20%3D%20version%5B1%5D%3B%0A%0A%20%20%20%20split(clean_version%2C%20current%2C%20%2F%5C.%2F)%3B%0A%20%20%20%20split(%224.4.1%22%2C%20target%2C%20%2F%5C.%2F)%3B%0A%0A%20%20%20%20for%20(i%20%3D%201%3B%20i%20%3C%3D%203%3B%20i%2B%2B)%20%7B%0A%20%20%20%20%20%20%20%20if%20((current%5Bi%5D%20%2B%200)%20%3C%20(target%5Bi%5D%20%2B%200))%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20print%20%22--signature-policy%20%2Fetc%2Fmachine-config-daemon%2Fpolicy-for-old-podman.json%22%3B%0A%20%20%20%20%20%20%20%20%20%20%20%20exit%3B%0A%20%20%20%20%20%20%20%20%7D%20else%20if%20((current%5Bi%5D%20%2B%200)%20%3E%20(target%5Bi%5D%20%2B%200))%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20exit%3B%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%7D%0A%7D'%20%3E%20%2Ftmp%2Fpodman_policy_args%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/etc/machine-config-daemon/generate_podman_policy_args.sh" + }, + { + "contents": { + "source": "data:,%7B%22auths%22%3A%7B%22cloud.openshift.com%22%3A%7B%22auth%22%3A%22b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfNWZlN2RlZTExNTdlNDA2Nzk2NTY3Y2VmMThmNjFjOTM6V0NXOEJLNkxXT1NLMElUSldIMlRXQUhaQ1k4SkJNMUtONVlCUVpNT1Q3UjdQMU9HRk05VTFJQ1VET0FMQkZIWQ%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22quay.io%22%3A%7B%22auth%22%3A%22b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfNWZlN2RlZTExNTdlNDA2Nzk2NTY3Y2VmMThmNjFjOTM6V0NXOEJLNkxXT1NLMElUSldIMlRXQUhaQ1k4SkJNMUtONVlCUVpNT1Q3UjdQMU9HRk05VTFJQ1VET0FMQkZIWQ%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22registry.ci.openshift.org%22%3A%7B%22auth%22%3A%22XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX%22%7D%2C%22registry.connect.redhat.com%22%3A%7B%22auth%22%3A%22fHVoYy1wb29sLWQ3OWIxMzA2LWI4NzItNDBkNy1iZDdmLTU1ZWI2M2EwMjMwOTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTJNelkzWkdaaE5EbGhaV1UwT1RkbU9XVXlNalUyWWpsaE9ESXlNMk0zTWlKOS5oQ0Z3YnhiOExhMDNkSnF4V09NMjFhOFk2MmpYLTRVZDgtR0dWMG1RZlRLc2NBbEpfLThIekZ6WnZSMWN0Uzd1RDlLeEFRbUt2WE11SUx4MDVybmZrQ1c4S3FSbHhBdm5YUk51QTVCZ1FWVjlPRWd5OXpzZTVpMURnXzUwb2N4bHBUcTJYY20yVW00U09VX1hBNThwMXJaa3QtZ19wMXd1X3ItSzFjNzR1dlBlUmJPQXdubzBwZWVyX0tTQUloUklPcjRXbGYtdzhBZTM5UXA5UkQ5ZERta1F2WW1Cek94ZFJlaHhDMm0zdEZUaUc5YTRKT2NsQkVqSnpINlFoLXpZbmhxazZ2LS1yWHdpSXdlSG9PYU90WXhyTW9GN3U4UGtsVXVuVWI2S3RSbHBCVlE1MlB1MGRBVG9qUEpWMHdDdE93NGh0OU1fZi1JNDhHMm15QlAzM3BmME4xTFVmTmhKS0FkZEg0cE9WaVJmOFM1aExPYjVxemFqakhNOGRPYlBnRkN2c0lzR09kdGVaenIwZW5hWFBqOE9sYjlLQlJJTlllUHBMNmZSZGNaeXBkLVNJbzQ2TjJPbHRMLXNuVG5LX1NDMDZfMS1XNnFFdEpmZUptX2N0c2RxWnlnMWw2bVdiQWwtVExyMFl0ZTVldkNpQUpCNTYzZGxKR1ZWY2xCUGgyUkdDUW9TdjVDS3hnM3ZXX21oak42dnVtVjRCRmxLbUdUVTR2TDZ0aGFiS21XQ1puQnlwOS0tcURaMVVMSEhvb0psbG1Ebnc5S1dBRkdldV9oc3hKcDdZUXRVR193SkhZWGthdFRWTFhfMlB3OG5IbXdVSXVBaUdacks5T2xSNzBPdGpCNUdDTzIyd3oyQ0tmeUNELWNIRGFKcm1rX3NwQW9DMnoxQW94MA%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22registry.redhat.io%22%3A%7B%22auth%22%3A%22fHVoYy1wb29sLWQ3OWIxMzA2LWI4NzItNDBkNy1iZDdmLTU1ZWI2M2EwMjMwOTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTJNelkzWkdaaE5EbGhaV1UwT1RkbU9XVXlNalUyWWpsaE9ESXlNMk0zTWlKOS5oQ0Z3YnhiOExhMDNkSnF4V09NMjFhOFk2MmpYLTRVZDgtR0dWMG1RZlRLc2NBbEpfLThIekZ6WnZSMWN0Uzd1RDlLeEFRbUt2WE11SUx4MDVybmZrQ1c4S3FSbHhBdm5YUk51QTVCZ1FWVjlPRWd5OXpzZTVpMURnXzUwb2N4bHBUcTJYY20yVW00U09VX1hBNThwMXJaa3QtZ19wMXd1X3ItSzFjNzR1dlBlUmJPQXdubzBwZWVyX0tTQUloUklPcjRXbGYtdzhBZTM5UXA5UkQ5ZERta1F2WW1Cek94ZFJlaHhDMm0zdEZUaUc5YTRKT2NsQkVqSnpINlFoLXpZbmhxazZ2LS1yWHdpSXdlSG9PYU90WXhyTW9GN3U4UGtsVXVuVWI2S3RSbHBCVlE1MlB1MGRBVG9qUEpWMHdDdE93NGh0OU1fZi1JNDhHMm15QlAzM3BmME4xTFVmTmhKS0FkZEg0cE9WaVJmOFM1aExPYjVxemFqakhNOGRPYlBnRkN2c0lzR09kdGVaenIwZW5hWFBqOE9sYjlLQlJJTlllUHBMNmZSZGNaeXBkLVNJbzQ2TjJPbHRMLXNuVG5LX1NDMDZfMS1XNnFFdEpmZUptX2N0c2RxWnlnMWw2bVdiQWwtVExyMFl0ZTVldkNpQUpCNTYzZGxKR1ZWY2xCUGgyUkdDUW9TdjVDS3hnM3ZXX21oak42dnVtVjRCRmxLbUdUVTR2TDZ0aGFiS21XQ1puQnlwOS0tcURaMVVMSEhvb0psbG1Ebnc5S1dBRkdldV9oc3hKcDdZUXRVR193SkhZWGthdFRWTFhfMlB3OG5IbXdVSXVBaUdacks5T2xSNzBPdGpCNUdDTzIyd3oyQ0tmeUNELWNIRGFKcm1rX3NwQW9DMnoxQW94MA%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%7D%7D%0A" + }, + "mode": 384, + "overwrite": true, + "path": "/var/lib/kubelet/config.json" + }, + { + "contents": { + "source": "data:,%23%20Needed%20by%20the%20OpenShift%20SDN.%20See%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1758552%0Anet.ipv4.conf.all.arp_announce%20%3D%202%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/arp.conf" + }, + { + "contents": { + "source": "data:,%23%20See%3A%20rhbz%231384746%2C%20OCPBUGS-24012%0Anet.ipv4.neigh.default.gc_thresh1%3D8192%0Anet.ipv4.neigh.default.gc_thresh2%3D32768%0Anet.ipv4.neigh.default.gc_thresh3%3D65536%0Anet.ipv6.neigh.default.gc_thresh1%3D8192%0Anet.ipv6.neigh.default.gc_thresh2%3D32768%0Anet.ipv6.neigh.default.gc_thresh3%3D65536%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/gc-thresh.conf" + }, + { + "contents": { + "source": "data:,%0Afs.inotify.max_user_watches%20%3D%2065536%0Afs.inotify.max_user_instances%20%3D%208192%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/inotify.conf" + }, + { + "contents": { + "source": "data:,vm.unprivileged_userfaultfd%20%3D%201" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/enable-userfaultfd.conf" + }, + { + "contents": { + "source": "data:,%23%20Needed%20for%20OpenShift%20Logging%20(ElasticSearch).%20See%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1793714%0Avm.max_map_count%20%3D%20262144%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/vm-max-map.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-euo%20pipefail%0A%0A%23%20First%2C%20we%20need%20to%20wait%20until%20DHCP%20finishes%20and%20the%20node%20has%20a%20non-%60localhost%60%0A%23%20hostname%20before%20%60kubelet.service%60%20starts.%0A%23%20That's%20the%20%60--wait%60%20argument%20as%20used%20by%20%60node-valid-hostname.service%60.%0A%23%0A%23%20Second%2C%20on%20GCP%20specifically%20we%20truncate%20the%20hostname%20if%20it's%20%3E63%20characters.%0A%23%20That's%20%60gcp-hostname.service%60.%0A%0A%23%20Block%20indefinitely%20until%20the%20host%20gets%20a%20non-localhost%20name.%0A%23%20Note%20node-valid-hostname.service%20uses%20systemd%20to%20abort%20if%20this%20takes%20too%20long.%0Await_localhost()%20%7B%0A%20%20%20%20echo%20%22waiting%20for%20non-localhost%20hostname%20to%20be%20assigned%22%0A%20%20%20%20while%20%5B%5B%20%22%24(%3C%20%2Fproc%2Fsys%2Fkernel%2Fhostname)%22%20%3D~%20(localhost%7Clocalhost.localdomain)%20%5D%5D%3B%0A%20%20%20%20do%0A%20%20%20%20%20%20%20%20sleep%201%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20Some%20cloud%20platforms%20may%20assign%20a%20hostname%20with%20a%20trailing%20dot.%0A%20%20%20%20%23%20However%2C%20tools%20like%20%60hostnamectl%60%20(used%20by%20systemd)%20do%20not%20allow%20trailing%20dots%2C%0A%20%20%20%20%23%20so%20we%20strip%20the%20trailing%20dot%20before%20applying%20the%20hostname.%0A%20%20%20%20HOSTNAME%3D%22%24(%3C%2Fproc%2Fsys%2Fkernel%2Fhostname)%22%0A%20%20%20%20CLEAN_HOSTNAME%3D%22%24%7BHOSTNAME%25.%7D%22%20%0A%20%20%20%20echo%20%22node%20identified%20as%20%24CLEAN_HOSTNAME%22%0A%20%20%20%20echo%20%22saving%20hostname%20to%20prevent%20NetworkManager%20from%20ever%20unsetting%20it%22%0A%20%20%20%20hostnamectl%20set-hostname%20--static%20--transient%20%22%24CLEAN_HOSTNAME%22%0A%20%20%20%20exit%200%0A%7D%0A%0Aset_gcp_hostname()%20%7B%0A%20%20%20%20%2Fusr%2Fbin%2Fafterburn%20--provider%20gcp%20--hostname%3D%2Frun%2Fafterburn.hostname%0A%0A%20%20%20%20local%20host_name%3D%24(cat%20%2Frun%2Fafterburn.hostname)%0A%20%20%20%20local%20type_arg%3D%22transient%22%0A%0A%20%20%20%20%23%20%2Fetc%2Fhostname%20is%20used%20for%20static%20hostnames%20and%20is%20authoritative.%0A%20%20%20%20%23%20This%20will%20check%20to%20make%20sure%20that%20the%20static%20hostname%20is%20the%0A%20%20%20%20%23%20less%20than%20or%20equal%20to%2063%20characters%20in%20length.%0A%20%20%20%20if%20%5B%20-f%20%2Fetc%2Fhostname%20%5D%20%26%26%20%5B%20%22%24(cat%20%2Fetc%2Fhostname%20%7C%20wc%20-m)%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20etc_name%3D%22%24(%3C%20%2Fetc%2Fhostname)%22%0A%20%20%20%20%20%20%20%20type_arg%3D%22static%22%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24%7Betc_name%7D%22%20!%3D%20%22%24%7Bhost_name%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22%2Fetc%2Fhostname%20is%20set%20to%20%24%7Betc_name%7D%20but%20does%20not%20match%20%24%7Bhost_name%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22using%20%2Fetc%2Fhostname%20as%20the%20authoritative%20name%22%0A%20%20%20%20%20%20%20%20%20%20%20%20host_name%3D%22%24%7Betc_name%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Only%20mutate%20the%20hostname%20if%20the%20length%20is%20longer%20than%2063%20characters.%20The%0A%20%20%20%20%23%20hostname%20will%20be%20the%20lesser%20of%2063%20characters%20after%20the%20first%20dot%20in%20the%0A%20%20%20%20%23%20FQDN.%20%20This%20algorithm%20is%20only%20known%20to%20work%20in%20GCP%2C%20and%20hence%20is%20only%0A%20%20%20%20%23%20executed%20in%20GCP.%0A%20%20%20%20if%20%5B%20%22%24%7B%23host_name%7D%22%20-gt%2063%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20alt_name%3D%24(printf%20%22%24%7Bhost_name%7D%22%20%7C%20cut%20-f1%20-d'.'%20%7C%20cut%20-c%20-63)%0A%20%20%20%20%20%20%20%20echo%20%22%24%7Bhost_name%7D%20is%20longer%20than%2063%20characters%2C%20using%20truncated%20hostname%22%0A%20%20%20%20%20%20%20%20host_name%3D%22%24%7Balt_name%7D%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22setting%20%24%7Btype_arg%7D%20hostname%20to%20%24%7Bhost_name%7D%22%0A%20%20%20%20%2Fbin%2Fhostnamectl%20%22--%24%7Btype_arg%7D%22%20set-hostname%20%22%24%7Bhost_name%7D%22%0A%20%20%20%20exit%200%0A%7D%0A%0Aset_openstack_hostname()%20%7B%0A%20%20%20%20%23%20Read%20metadata%20written%20by%20afterburn%20service%0A%20%20%20%20.%20%2Frun%2Fmetadata%2Fafterburn%0A%0A%20%20%20%20%23%20node-valid-hostname%20sets%20persistent%20hostname%20from%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%20%20%20%20echo%20%22%24AFTERBURN_OPENSTACK_HOSTNAME%22%20%3E%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%7D%0A%0Aset_powervs_hostname()%20%7B%0A%20%20%20%20%23%20Read%20metadata%20written%20by%20afterburn%20service%0A%20%20%20%20.%20%2Frun%2Fmetadata%2Fafterburn%0A%0A%20%20%20%20%23%20node-valid-hostname%20sets%20persistent%20hostname%20from%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%20%20%20%20echo%20%22%24AFTERBURN_POWERVS_LOCAL_HOSTNAME%22%20%3E%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%7D%0A%0Aarg%3D%24%7B1%7D%3B%20shift%3B%0Acase%20%22%24%7Barg%7D%22%20in%0A%20%20%20%20--wait)%20wait_localhost%3B%3B%0A%20%20%20%20--gcp)%20set_gcp_hostname%3B%3B%0A%20%20%20%20--openstack)%20set_openstack_hostname%3B%3B%0A%20%20%20%20--powervs)%20set_powervs_hostname%3B%3B%0A%20%20%20%20*)%20echo%20%22Unhandled%20arg%20%24arg%22%3B%20exit%201%0Aesac%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/mco-hostname" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0Aset%20-eou%20pipefail%0A%0A%23%20context%0Aintapi%3D%24(oc%20get%20infrastructures.config.openshift.io%20cluster%20-o%20%22jsonpath%3D%7B.status.apiServerInternalURI%7D%22)%0Acontext%3D%22%24(oc%20config%20current-context)%22%0A%23%20cluster%0Acluster%3D%22%24(oc%20config%20view%20-o%20%22jsonpath%3D%7B.contexts%5B%3F(%40.name%3D%3D%5C%22%24context%5C%22)%5D.context.cluster%7D%22)%22%0Aserver%3D%22%24(oc%20config%20view%20-o%20%22jsonpath%3D%7B.clusters%5B%3F(%40.name%3D%3D%5C%22%24cluster%5C%22)%5D.cluster.server%7D%22)%22%0A%23%20token%0Aca_crt_data%3D%22%24(oc%20get%20secret%20-n%20openshift-machine-config-operator%20node-bootstrapper-token%20-o%20%22jsonpath%3D%7B.data.ca%5C.crt%7D%22%20%7C%20base64%20--decode)%22%0Anamespace%3D%22%24(oc%20get%20secret%20-n%20openshift-machine-config-operator%20node-bootstrapper-token%20%20-o%20%22jsonpath%3D%7B.data.namespace%7D%22%20%7C%20base64%20--decode)%22%0Atoken%3D%22%24(oc%20get%20secret%20-n%20openshift-machine-config-operator%20node-bootstrapper-token%20-o%20%22jsonpath%3D%7B.data.token%7D%22%20%7C%20base64%20--decode)%22%0A%0Aexport%20KUBECONFIG%3D%22%24(mktemp)%22%0Akubectl%20config%20set-credentials%20%22kubelet%22%20--token%3D%22%24token%22%20%3E%2Fdev%2Fnull%0Aca_crt%3D%22%24(mktemp)%22%3B%20echo%20%22%24ca_crt_data%22%20%3E%20%24ca_crt%0Akubectl%20config%20set-cluster%20%24cluster%20--server%3D%22%24intapi%22%20--certificate-authority%3D%22%24ca_crt%22%20--embed-certs%20%3E%2Fdev%2Fnull%0Akubectl%20config%20set-context%20kubelet%20--cluster%3D%22%24cluster%22%20--user%3D%22kubelet%22%20%3E%2Fdev%2Fnull%0Akubectl%20config%20use-context%20kubelet%20%3E%2Fdev%2Fnull%0Acat%20%22%24KUBECONFIG%22%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/recover-kubeconfig.sh" + }, + { + "contents": { + "source": "data:," + }, + "mode": 493, + "overwrite": true, + "path": "/etc/kubernetes/kubelet-plugins/volume/exec/.dummy" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Workaround%3A%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1941714%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1935539%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1987108%0A%0Adriver%3D%24(nmcli%20-t%20-m%20tabular%20-f%20general.driver%20dev%20show%20%22%24%7BDEVICE_IFACE%7D%22)%0A%0Aif%20%5B%5B%20%22%242%22%20%3D%3D%20%22up%22%20%26%26%20%22%24%7Bdriver%7D%22%20%3D%3D%20%22vmxnet3%22%20%5D%5D%3B%20then%0A%20%20logger%20-s%20%2299-vsphere-disable-tx-udp-tnl%20triggered%20by%20%24%7B2%7D%20on%20device%20%24%7BDEVICE_IFACE%7D.%22%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-udp_tnl-segmentation%20off%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-udp_tnl-csum-segmentation%20off%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-checksum-ip-generic%20off%0Afi%0A" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-vsphere-disable-tx-udp-tnl" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-x%0A%0Aif%20%5B%20!%20-e%20%22%2Fetc%2Fipsec.d%2Fopenshift.conf%22%20%5D%3B%20then%0A%20%20exit%200%0Afi%0A%0A%23%20Modify%20existing%20IPsec%20out%20connection%20entries%20with%20%22auto%3Dstart%22%0A%23%20option%20and%20restart%20ipsec%20systemd%20service.%20This%20helps%20to%0A%23%20establish%20IKE%20SAs%20for%20the%20existing%20IPsec%20connections%20with%0A%23%20peer%20nodes.%20This%20option%20will%20be%20deleted%20from%20connections%0A%23%20once%20ovs-monitor-ipsec%20process%20spinned%20up%20on%20the%20node%20by%0A%23%20ovn-ipsec-host%20pod%2C%20but%20still%20it%20won't%20reestablish%20IKE%20SAs%0A%23%20again%20with%20peer%20nodes%2C%20so%20it%20shouldn't%20be%20a%20problem.%0A%23%20We%20are%20updating%20only%20out%20connections%20with%20%22auto%3Dstart%22%20to%0A%23%20avoid%20cross%20stream%20issue%20with%20Libreswan%205.2.%0A%23%20The%20in%20connections%20use%20default%20auto%3Droute%20parameter.%0Aif%20!%20grep%20-q%20%22auto%3Dstart%22%20%2Fetc%2Fipsec.d%2Fopenshift.conf%3B%20then%0A%20%20sed%20-i%20'%2F%5E.*conn%20ovn.*-out-1%24%2Fa%5C%20%20%20%20auto%3Dstart'%20%2Fetc%2Fipsec.d%2Fopenshift.conf%0Afi%0Achroot%20%2Fproc%2F1%2Froot%20ipsec%20restart%0A%0A%23%20Wait%20for%20upto%2060s%20to%20get%20IPsec%20SAs%20to%20establish%20with%20peer%20nodes.%0Atimeout%3D60%0Aelapsed%3D0%0Adesiredconn%3D%22%22%0Aestablishedsa%3D%22%22%0Awhile%20%5B%5B%20%24elapsed%20-lt%20%24timeout%20%5D%5D%3B%20do%0A%20%20desiredconn%3D%24(grep%20-E%20'%5E%5Cs*conn%5Cs%2B'%20%2Fetc%2Fipsec.d%2Fopenshift.conf%20%7C%20grep%20-v%20'%25default'%20%7C%20awk%20'%7Bprint%20%242%7D'%20%7C%20tr%20'%20'%20'%5Cn'%20%7C%20sort%20%7C%20tr%20'%5Cn'%20'%20')%0A%20%20establishedsa%3D%24(ipsec%20showstates%20%7C%20grep%20ESTABLISHED_CHILD_SA%20%7C%20grep%20-o%20'%22%5B%5E%22%5D*%22'%20%7C%20sed%20's%2F%22%2F%2Fg'%20%7C%20tr%20'%20'%20'%5Cn'%20%7C%20sort%20%7C%20uniq%20%7C%20tr%20'%5Cn'%20'%20')%0A%20%20if%20%5B%20%22%24desiredconn%22%20%3D%3D%20%22%24establishedsa%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22IPsec%20SAs%20are%20established%20for%20desired%20connections%20after%20%24%7Belapsed%7Ds%22%0A%20%20%20%20break%0A%20%20else%0A%20%20%20%20echo%20%22IPsec%20SAs%20are%20not%20established%20yet%2C%20total%20waited%20time%20%24%7Belapsed%7Ds%22%0A%20%20%20%20sleep%202s%0A%20%20fi%0A%20%20elapsed%3D%24((elapsed%20%2B%202))%0Adone%0A%0Aif%20%5B%5B%20%24elapsed%20-ge%20%24timeout%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Timed%20out%20waiting%2C%20some%20connections%20are%20not%20established%2C%20desired%20conns%20%24desiredconn%2C%20established%20conns%20%24establishedsa%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/ipsec-connect-wait.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-eux%0A%0Aif%20%5B%20!%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20%23%20No%20need%20to%20do%20this%20if%20no%20NMState%20configuration%20was%20applied%0A%20%20exit%200%0Afi%0A%0A%23%20This%20logic%20is%20borrowed%20from%20configure-ovs.sh%0A%23%20TODO%3A%20Find%20a%20platform-agnostic%20way%20to%20do%20this.%20It%20won't%20work%20on%20platforms%20where%0A%23%20nodeip-configuration%20is%20not%20used.%0Aip%3D%24(cat%20%2Frun%2Fnodeip-configuration%2Fprimary-ip)%0Aif%20%5B%5B%20%22%24%7Bip%7D%22%20%3D%3D%20%22%22%20%5D%5D%3B%20then%0A%20%20echo%20%22No%20ip%20to%20bind%20was%20found%22%0A%20%20exit%201%0Afi%0Awhile%20%3A%0Ado%0A%20%20random_port%3D%24(shuf%20-i%2050000-60000%20-n%201)%0A%20%20echo%20%22Trying%20to%20bind%20%24%7Bip%7D%20on%20port%20%24%7Brandom_port%7D%22%0A%20%20exit_code%3D%24(timeout%202s%20nc%20-l%20%22%24%7Bip%7D%22%20%24%7Brandom_port%7D%3B%20echo%20%24%3F)%0A%20%20if%20%5B%5B%20exit_code%20-eq%20124%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Address%20bound%20successfully%22%0A%20%20%20%20exit%200%0A%20%20fi%0A%20%20sleep%2010%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/wait-for-primary-ip.sh" + } + ] + }, + "systemd": { + "units": [ + { + "contents": "[Unit]\nDescription=Cleans NetworkManager state generated by dracut\n# Removal of this file signals firstboot completion\nConditionPathExists=!/etc/ignition-machine-config-encapsulated.json\n# This is opt-in for some deployment types, and opt-out for others.\nConditionPathExists=/var/lib/mco/nm-clean-initrd-state\nWants=network-pre.target\nBefore=network-pre.target\n\n[Service]\nType=oneshot\n# Remove any existing state possibly generated NM run by dracut. We want NM to\n# consider all profiles autoconnect priority when it starts instead of\n# remembering which profile was a device activated with when NM is run by\n# dracut.\nExecStart=/usr/local/bin/nm-clean-initrd-state.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "NetworkManager-clean-initrd-state.service" + }, + { + "dropins": [ + { + "contents": "# vim:set ft=systemd :\n#\n# This drop-in will enable any service built with this\n# github.com/containers/kubemntns library to properly join the mount namespace\n# managed by kubens.service\n#\n\n[Unit]\nAfter=kubens.service\n\n[Service]\nEnvironmentFile=-/run/kubens/env\n", + "name": "01-kubens.conf" + }, + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "[Service]\nEnvironment=\"ENABLE_PROFILE_UNIX_SOCKET=true\"\n", + "name": "10-mco-profile-unix-socket.conf" + }, + { + "contents": "[Unit]\nAfter=kubelet-dependencies.target\nRequires=kubelet-dependencies.target\n", + "name": "05-mco-ordering.conf" + }, + { + "contents": "[Service]\nEnvironment=\"GODEBUG=x509ignoreCN=0,madvdontneed=1\"\n", + "name": "10-mco-default-madv.conf" + } + ], + "name": "crio.service" + }, + { + "dropins": [ + { + "contents": "[Unit]\nConditionPathExists=/enoent\n", + "name": "mco-disabled.conf" + } + ], + "name": "docker.socket" + }, + { + "contents": "[Unit]\nDescription=The firstboot OS update has completed\nDocumentation=https://github.com/openshift/machine-config-operator/\nRequires=basic.target\n\n[Install]\nWantedBy=default.target\n", + "enabled": true, + "name": "firstboot-osupdate.target" + }, + { + "dropins": [ + { + "contents": "[Unit]\nAfter=ovs-configuration.service\nBefore=crio.service\n", + "name": "01-after-configure-ovs.conf" + } + ], + "name": "ipsec.service" + }, + { + "contents": "[Unit]\nDescription=Dynamically sets the system reserved for the kubelet\nWants=network-online.target\nAfter=network-online.target firstboot-osupdate.target\nBefore=kubelet-dependencies.target\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\nRemainAfterExit=yes\nEnvironmentFile=/etc/node-sizing-enabled.env\nExecStart=/bin/bash /usr/local/sbin/dynamic-system-reserved-calc.sh ${NODE_SIZING_ENABLED} ${SYSTEM_RESERVED_MEMORY} ${SYSTEM_RESERVED_CPU} ${SYSTEM_RESERVED_ES}\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "kubelet-auto-node-size.service" + }, + { + "contents": "[Unit]\nDescription=Dependencies necessary to run kubelet\nDocumentation=https://github.com/openshift/machine-config-operator/\nRequires=basic.target network-online.target\nWants=NetworkManager-wait-online.service crio-wipe.service\nWants=rpc-statd.service chrony-wait.service\n", + "name": "kubelet-dependencies.target" + }, + { + "dropins": [ + { + "contents": "# vim:set ft=systemd :\n#\n# This drop-in will enable any service built with this\n# github.com/containers/kubemntns library to properly join the mount namespace\n# managed by kubens.service\n#\n\n[Unit]\nAfter=kubens.service\n\n[Service]\nEnvironmentFile=-/run/kubens/env\n", + "name": "01-kubens.conf" + }, + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "[Service]\nEnvironment=\"GODEBUG=x509ignoreCN=0,madvdontneed=1\"\n", + "name": "10-mco-default-madv.conf" + } + ], + "name": "kubelet.service" + }, + { + "contents": "[Unit]\nDescription=Manages a mount namespace for kubernetes-specific mounts\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nRuntimeDirectory=kubens\nEnvironment=RUNTIME_DIRECTORY=%t/kubens\nEnvironment=BIND_POINT=%t/kubens/mnt\nEnvironment=ENVFILE=%t/kubens/env\n\n# Set up the runtime directory as an unbindable mountpoint\nExecStartPre=bash -c \"findmnt ${RUNTIME_DIRECTORY} || mount --make-unbindable --bind ${RUNTIME_DIRECTORY} ${RUNTIME_DIRECTORY}\"\n# Ensure the bind point exists\nExecStartPre=touch ${BIND_POINT}\n# Use 'unshare' to create the new mountpoint, then 'mount --make-rshared' so it cascades internally\nExecStart=unshare --mount=${BIND_POINT} --propagation slave mount --make-rshared /\n# Finally, set an env pointer for ease-of-use\nExecStartPost=bash -c 'echo \"KUBENSMNT=${BIND_POINT}\" \u003e \"${ENVFILE}\"'\n\n# On stop, a recursive unmount cleans up the namespace and bind-mounted unbindable parent directory\nExecStop=umount -R ${RUNTIME_DIRECTORY}\n\n[Install]\nWantedBy=multi-user.target\n", + "enabled": false, + "name": "kubens.service" + }, + { + "contents": "[Unit]\nDescription=Machine Config Daemon Firstboot\n# Make sure it runs only on OSTree booted system\nConditionPathExists=/run/ostree-booted\n# Removal of this file signals firstboot completion\nConditionPathExists=/etc/ignition-machine-config-encapsulated.json\nAfter=machine-config-daemon-pull.service\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\n# Disable existing repos (if any) so that OS extensions would use embedded RPMs only\nExecStartPre=-/usr/bin/sh -c \"sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/*.repo\"\n# Run this via podman because we want to use the nmstatectl binary in our container\nExecStart=/usr/bin/podman run --rm --privileged --net=host -v /:/rootfs --entrypoint machine-config-daemon 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb' firstboot-complete-machineconfig --persist-nics\nExecStart=/usr/bin/podman run --rm --privileged --pid=host --net=host -v /:/rootfs --entrypoint machine-config-daemon 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb' firstboot-complete-machineconfig\n[Install]\nRequiredBy=firstboot-osupdate.target\n", + "enabled": true, + "name": "machine-config-daemon-firstboot.service" + }, + { + "contents": "[Unit]\nDescription=Machine Config Daemon Pull\n# Make sure it runs only on OSTree booted system\nConditionPathExists=/run/ostree-booted\n# This \"stamp file\" is unlinked when we complete\n# machine-config-daemon-firstboot.service\nConditionPathExists=/etc/ignition-machine-config-encapsulated.json\n# Run after crio-wipe so the pulled MCD image is protected against a corrupted storage from a forced shutdown\nWants=crio-wipe.service NetworkManager-wait-online.service\nAfter=crio-wipe.service NetworkManager-wait-online.service network.service\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nExecStartPre=/etc/machine-config-daemon/generate_podman_policy_args.sh\nExecStart=/bin/sh -c \"while ! /usr/bin/podman pull $(cat /tmp/podman_policy_args) --authfile=/var/lib/kubelet/config.json 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb'; do sleep 1; done\"\n\n[Install]\nRequiredBy=machine-config-daemon-firstboot.service\n", + "enabled": true, + "name": "machine-config-daemon-pull.service" + }, + { + "contents": "[Unit]\nDescription=Applies per-node NMState network configuration\nRequires=openvswitch.service\nWants=NetworkManager-wait-online.service\nAfter=NetworkManager-wait-online.service openvswitch.service network.service nodeip-configuration.service\nBefore=nmstate.service kubelet-dependencies.target ovs-configuration.service node-valid-hostname.service\n\n[Service]\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/usr/local/bin/nmstate-configuration.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "nmstate-configuration.service" + }, + { + "contents": "[Unit]\nDescription=Wait for a non-localhost hostname\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nUser=root\nExecStart=/usr/local/bin/mco-hostname --wait\n\n# Wait up to 5min for the node to get a non-localhost name\nTimeoutSec=300\n\n[Install]\n# TODO: Change this to RequiredBy after we fix https://github.com/openshift/machine-config-operator/pull/3865#issuecomment-1746963115\nWantedBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "node-valid-hostname.service" + }, + { + "contents": "[Unit]\nDescription=Writes IP address configuration so that kubelet and crio services select a valid node IP\nWants=NetworkManager-wait-online.service\nAfter=NetworkManager-wait-online.service firstboot-osupdate.target\nBefore=kubelet-dependencies.target ovs-configuration.service\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/bin/bash -c \" \\\n until \\\n /usr/bin/podman run --rm \\\n --authfile /var/lib/kubelet/config.json \\\n --env 'ENABLE_NODEIP_DEBUG=true' \\\n --net=host \\\n --security-opt label=disable \\\n --volume /etc/systemd/system:/etc/systemd/system \\\n --volume /run/nodeip-configuration:/run/nodeip-configuration \\\n quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8bfb60187a2054ec1946357aeeb4866fa52c1508b05ba1b2b294c08e8e4e27e8 \\\n node-ip \\\n set \\\n --retry-on-failure \\\n --network-type OVNKubernetes \\\n ${NODEIP_HINT:-${KUBELET_NODEIP_HINT:-}}; \\\n do \\\n sleep 5; \\\n done\"\nExecStart=/bin/systemctl daemon-reload\nExecStartPre=/bin/mkdir -p /run/nodeip-configuration\nStandardOutput=journal+console\nStandardError=journal+console\n\nEnvironmentFile=-/etc/default/nodeip-configuration\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": false, + "name": "nodeip-configuration.service" + }, + { + "contents": "[Unit]\nDescription=Watch for downfile changes\nBefore=kubelet-dependencies.target\n\n[Path]\nPathChanged=/run/cloud-routes/\nMakeDirectory=true\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "openshift-azure-routes.path" + }, + { + "contents": "[Unit]\nDescription=Work around Azure load balancer hairpin\n# We don't need to do this on the firstboot\nAfter=firstboot-osupdate.target\n\n[Service]\nType=simple\nExecStart=/bin/bash /opt/libexec/openshift-azure-routes.sh start\nUser=root\nSyslogIdentifier=openshift-azure-routes\n", + "enabled": false, + "name": "openshift-azure-routes.service" + }, + { + "enabled": true, + "name": "openvswitch.service" + }, + { + "contents": "[Unit]\n# Kdump will generate it's initramfs based on the running state when kdump.service run\n# If OVS has already run, the kdump fails to gather a working network config,\n# which prevent network log exports, sush as SSH.\n# See https://issues.redhat.com/browse/OCPBUGS-28239\nAfter=kdump.service\nDescription=Configures OVS with proper host networking configuration\n# This service is used to move a physical NIC into OVS and reconfigure OVS to use the host IP\nRequires=openvswitch.service\nWants=NetworkManager-wait-online.service\nAfter=firstboot-osupdate.target\nAfter=NetworkManager-wait-online.service openvswitch.service network.service nodeip-configuration.service nmstate.service\nBefore=kubelet-dependencies.target node-valid-hostname.service dnsmasq.service\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\nExecStart=/usr/local/bin/configure-ovs.sh OVNKubernetes\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "ovs-configuration.service" + }, + { + "dropins": [ + { + "contents": "[Service]\nRestart=always\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /var/lib/openvswitch'\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /etc/openvswitch'\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /run/openvswitch'\nExecStartPost=-/usr/bin/ovs-appctl vlog/set syslog:info\nExecReload=-/usr/bin/ovs-appctl vlog/set syslog:info\n", + "name": "10-ovs-vswitchd-restart.conf" + } + ], + "name": "ovs-vswitchd.service" + }, + { + "dropins": [ + { + "contents": "[Service]\nRestart=always\n", + "name": "10-ovsdb-restart.conf" + } + ], + "enabled": true, + "name": "ovsdb-server.service" + }, + { + "dropins": [ + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "# See https://github.com/openshift/machine-config-operator/issues/1897\n[Service]\nNice=10\nIOSchedulingClass=best-effort\nIOSchedulingPriority=6\n", + "name": "mco-controlplane-nice.conf" + } + ], + "name": "rpm-ostreed.service" + }, + { + "contents": "[Unit]\nDescription=Ensure IKE SA established for existing IPsec connections.\nAfter=ipsec.service\nBefore=kubelet-dependencies.target node-valid-hostname.service\n\n[Service]\nType=oneshot\nExecStart=/usr/local/bin/ipsec-connect-wait.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=ipsec.service\n", + "enabled": true, + "name": "wait-for-ipsec-connect.service" + }, + { + "contents": "[Unit]\nDescription=Ensure primary IP is assigned and usable\nRequires=nmstate.service\nAfter=nmstate.service\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/bin/bash -c \" \\\n until \\\n /usr/local/bin/wait-for-primary-ip.sh; \\\n do \\\n sleep 10; \\\n done\"\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "wait-for-primary-ip.service" + }, + { + "dropins": [ + { + "contents": "[Unit]\nConditionPathExists=/enoent\n", + "name": "mco-disabled.conf" + } + ], + "name": "zincati.service" + } + ] + } + }, + "fips": false, + "kernelArguments": null, + "kernelType": "", + "osImageURL": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1578c5f720239b6f363d8dc6145056c0f0ab0b20442f59ac74f3b4b91b8382dd" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "03a3677b298e05ba870cdf0f26f4db16f80e64ae" + }, + "creationTimestamp": "2025-08-19T18:55:55Z", + "generation": 2, + "labels": { + "machineconfiguration.openshift.io/role": "worker" + }, + "name": "00-worker", + "ownerReferences": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "ControllerConfig", + "name": "machine-config-controller", + "uid": "39f6a6c9-fced-4161-8602-9f979cf21a91" + } + ], + "resourceVersion": "76345", + "uid": "8ef74734-f27a-4df6-9213-9e339fc1522b" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "version": "3.5.0" + }, + "storage": { + "files": [ + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-ex%20-o%20pipefail%0A%0ANM_DEVICES_DIR%3D%2Frun%2FNetworkManager%2Fdevices%0ANM_RUN_CONN_DIR%3D%2Frun%2FNetworkManager%2Fsystem-connections%0ANM_ETC_CONN_DIR%3D%2Fetc%2FNetworkManager%2Fsystem-connections%0A%0Alogger%20-t%20nm-clean-initrd-state%20%22Cleaning%20network%20activation%20state%20generated%20by%20dracut...%22%0Alogger%20-t%20nm-clean-initrd-state%20%22To%20disable%2C%20remove%20%2Fvar%2Flib%2Fmco%2Fnm-clean-initrd-state%22%0A%0Aif%20%5B%20!%20-e%20%22%24NM_DEVICES_DIR%22%20%5D%20%7C%7C%20%5B%20!%20-e%20%22%24NM_RUN_CONN_DIR%22%20%5D%20%7C%7C%20%5B%20!%20-e%20%22%24NM_ETC_CONN_DIR%22%20%5D%3B%20then%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22There%20is%20no%20network%20activation%20state%20to%20clean%22%0A%20%20exit%0Afi%0A%0A%23%20Some%20deployments%20require%20an%20active%20network%20early%20in%20the%20boot%20process.%20To%0A%23%20achieve%20this%2C%20dracut%20runs%20specific%20NetworkManager%20modules.%20This%20results%20in%0A%23%20NetworkManager%20keyfiles%20being%20generated%20(either%20default%20or%20from%20ip%20kernel%0A%23%20arguments)%20and%20activated.%20This%20activation%20generates%20state%20that%20makes%20those%0A%23%20profiles%20to%20be%20re-activated%20by%20the%20NetworkManager%20service%20later%20in%20the%0A%23%20boot%20process.%20And%20this%20has%20the%20effect%20that%20other%20profiles%20deployed%20by%20the%0A%23%20user%20for%20the%20same%20set%20of%20devices%20are%20ignored.%0A%0A%23%20Most%20of%20the%20time%20this%20is%20the%20desired%20behavior.%20The%20exception%20to%20this%20rule%0A%23%20is%20when%20the%20user%20wants%20to%20run%20the%20boot%20process%20with%20a%20different%20network%0A%23%20setup%20than%20the%20real%20root%20which%20is%20usually%20related%20to%20the%20fact%20that%0A%23%20generating%20images%20with%20customized%20kernel%20arguments%20is%20a%20complication%20in%0A%23%20the%20deployment%20pipeline.%0A%0A%23%20This%20need%20has%20been%20exacerbated%20by%20past%20NetworkManager%20bugs%20that%20activated%0A%23%20the%20network%20on%20boot%20when%20it%20was%20not%20really%20needed.%20Most%20notably%20when%20ip%0A%23%20kernel%20argument%20is%20present%2C%20something%20that%20the%20baremetal%20installer%20adds%20by%0A%23%20default.%0A%0A%23%20The%20intention%20here%20is%20to%20remove%20the%20state%20that%20was%20generated%20with%20the%0A%23%20activation%20of%20those%20profiles%20during%20dracut%20execution.%20Then%20when%0A%23%20NetworkManager%20service%20runs%2C%20the%20profiles%20generated%20by%20dracut%2C%20along%20with%0A%23%20other%20profiles%20configured%20by%20the%20user%2C%20are%20evaluated%20towards%20finding%20the%0A%23%20most%20appropriate%20profile%20to%20connect%20a%20device%20with.%20As%20a%20precaution%2C%20clean%0A%23%20state%20only%20for%20devices%20that%3A%0A%23%20-%20have%20been%20activated%20with%20a%20default%20profile%20(assume%20that%20a%20non-default%0A%23%20%20%20configuration%20expresses%20intention%20by%20user%20to%20run%20with%20it%20permanently)%0A%23%20-%20have%20a%20specific%20configured%20profile%20set%20to%20auto-connect%20(if%20there%20is%20no%0A%23%20%20%20alternate%20configured%20profile%20for%20a%20device%20it%20makes%20no%20sense%20to%0A%23%20%20%20de-activate%20anything)%0A%23%0A%23%20Although%20this%20can%20theoretically%20happen%20on%20any%20deployment%20type%2C%20need%20has%0A%23%20mostly%20come%20from%20IPI%20bare%20metal%20deployments.%20For%20the%20time%20being%2C%20this%0A%23%20should%20be%20opt-in%20in%20any%20other%20deploment%20type.%0A%23%0A%23%20There%20is%20an%20RFE%20filed%20against%20NM%20that%20once%20implemented%20would%20make%20this%0A%23%20script%20unnecessary%3A%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D2089707%0A%0Afor%20device%20in%20%22%24%7BNM_DEVICES_DIR%7D%22%2F*%3B%20do%0A%20%20if%20%5B%20!%20-e%20%22%24device%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20the%20device%20file%20name%20is%20the%20ifindex%0A%20%20ifindex%3D%24(basename%20%22%24device%22)%0A%20%20%0A%20%20%23%20get%20the%20interface%20name%20by%20ifindex%0A%20%20ifname%3D%24(ip%20-j%20link%20show%20%7C%20jq%20-r%20%22.%5B%5D%20%7C%20select(.ifindex%20%3D%3D%20%24%7Bifindex%7D)%20%7C%20.ifname%20%2F%2F%20empty%22)%0A%0A%20%20%23%20no%20interface%20name%20found%2C%20ignore%0A%20%20if%20%5B%20-z%20%22%24ifname%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20get%20the%20uuid%20of%20the%20profile%20the%20device%20has%20been%20activated%20with%0A%20%20active_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bdevice%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Econnection-uuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24device%22)%0A%0A%20%20%23%20the%20device%20was%20not%20activated%20with%20any%20profile%2C%20ignore%0A%20%20if%20%5B%20-z%20%22%24active_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20find%20the%20generated%20profile%20by%20uuid%0A%20%20for%20profile%20in%20%22%24%7BNM_RUN_CONN_DIR%7D%22%2F*%3B%20do%0A%20%20%20%20generated_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Euuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20if%20%5B%20%22%24active_profile_uuid%22%20%3D%20%22%24generated_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20generated%20profile%20not%20found%2C%20ignore%0A%20%20if%20%5B%20%22%24active_profile_uuid%22%20!%3D%20%22%24generated_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20check%20that%20it%20is%20not%20specific%20for%20the%20device%2C%20otherwise%20ignore%0A%20%20profile_ifname%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Einterface-name%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20if%20%5B%20-n%20%22%24profile_ifname%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20profile%20not%20generated%20by%20nm-initrd-generator%2C%20ignore%0A%20%20%23%20only%20check%20it%20if%20the%20key%20is%20set%20(from%20NM%201.32.4)%0A%20%20origin%3D%24(sed%20-nr%20'%2F%5E%5C%5Buser%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Eorg.freedesktop.NetworkManager.origin%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20if%20%5B%20-n%20%22%24origin%22%20%5D%20%26%26%20%5B%20%22%24origin%22%20!%3D%20%22nm-initrd-generator%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20find%20the%20configured%20profile%20by%20name%20with%20auto-connect%20set%0A%20%20for%20profile%20in%20%22%24%7BNM_ETC_CONN_DIR%7D%22%2F*%3B%20do%0A%20%20%20%20profile_ifname%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Einterface-name%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20autoconnect%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Eautoconnect%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20if%20%5B%20%22%24profile_ifname%22%20%3D%20%22%24ifname%22%20%5D%20%26%26%20%5B%20%22%24autoconnect%22%20!%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20configured%20profile%20not%20found%2C%20ignore%0A%20%20if%20%5B%20%22%24profile_ifname%22%20!%3D%20%22%24ifname%22%20%5D%20%7C%7C%20%5B%20%22%24autoconnect%22%20%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20configured_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Euuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%20activated%20with%20default%20generated%20profile%20%24generated_profile_uuid%22%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%20has%20different%20configured%20specific%20profile%20%24configured_profile_uuid%22%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%3A%20removing%20state...%22%0A%20%20%0A%20%20%23%20NM%20can%20still%20generate%20internal%20profiles%20from%20the%20IP%20address%0A%20%20%23%20configuration%20of%20devices%2C%20so%20flush%20addresses%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22Flushing%20IP%20addresses%20from%20%24ifname%22%0A%20%20ip%20addr%20flush%20%22%24ifname%22%0A%20%20ip%20-6%20addr%20flush%20%22%24ifname%22%0A%0A%20%20%23%20remove%20device%20state%20file%20to%20prevent%20NM%20to%20unilaterally%20connect%20with%20the%0A%20%20%23%20latest%20activated%20profile%20without%20evaluating%20other%20profiles%20%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22Removing%20%24device%22%0A%20%20rm%20-f%20--%20%22%24device%22%0A%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/nm-clean-initrd-state.sh" + }, + { + "contents": { + "source": "data:,%5Bconnection%5D%0Aipv6.dhcp-duid%3Dll%0Aipv6.dhcp-iaid%3Dmac%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/01-ipv6.conf" + }, + { + "contents": { + "source": "data:,%5Bmain%5D%0Aplugins%3Dkeyfile%2Cifcfg-rh%0A%5Bkeyfile%5D%0Apath%3D%2Fetc%2FNetworkManager%2Fsystem-connections%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/20-keyfiles.conf" + }, + { + "contents": { + "source": "data:," + }, + "mode": 384, + "overwrite": true, + "path": "/etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt" + }, + { + "contents": { + "source": "data:,KUBERNETES_SERVICE_HOST%3D'api-int.ci-op-pw3ghqzh-bb5c4.ci2.azure.devcluster.openshift.com'%0AKUBERNETES_SERVICE_PORT%3D'6443'%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/apiserver-url.env" + }, + { + "contents": { + "source": "data:,%23%20This%20file%20is%20managed%20by%20machine-config-operator.%0A%23%20Suppress%20audit%20rules%20which%20always%20trigger%20for%20container%0A%23%20workloads%2C%20as%20they%20spam%20the%20audit%20log.%20%20Workloads%20are%20expected%0A%23%20to%20be%20dynamic%2C%20and%20the%20networking%20stack%20uses%20iptables.%0A-a%20exclude%2Calways%20-F%20msgtype%3DNETFILTER_CFG%0A%23%20The%20default%20bridged%20networking%20enables%20promiscuous%20on%20the%20veth%0A%23%20device.%20%20Ideally%2C%20we'd%20teach%20audit%20to%20ignore%20only%20veth%20devices%2C%0A%23%20since%20one%20might%20legitimately%20care%20about%20promiscuous%20on%20real%20physical%0A%23%20devices.%20%20But%20we%20can't%20currently%20differentiate.%0A-a%20exclude%2Calways%20-F%20msgtype%3DANOM_PROMISCUOUS%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/audit/rules.d/mco-audit-quiet-containers.rules" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20ESP%20offload%2C%20either%20in%20actual%20hardware%20or%20as%20part%20as%20GRO%20(generic%0A%23%20recieve%20offload)%20does%20not%20work%20for%20interfaces%20attached%20to%20an%20OVS%20bridge%0A%23%20so%20turn%20it%20off%20for%20the%20time%20being.%0A%23%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FRHEL-58811%0A%0A%23%20Depends%20on%20ipsec%20service%20drop-in%20to%20start%20it%20after%20configure-ovs%20to%20make%0A%23%20sure%20offloads%20are%20disabled%20before%20ipsec%20starts.%0A%0Aif%20%5B%5B%20%22%242%22%20!%3D%20%22up%22%20%5D%5D%3B%20then%0A%20%20exit%0Afi%0A%0Adevice%3D%24DEVICE_IFACE%0Akind_slave%3D%24(ip%20-j%20-d%20link%20show%20%22%24device%22%20%7C%20jq%20-r%20'.%5B0%5D%20%7C%20.linkinfo.info_slave_kind%20%2F%2F%20empty')%0A%0Aif%20%5B%20%22%24kind_slave%22%20%3D%20%22openvswitch%22%20%5D%3B%20then%0A%20%20for%20feature%20in%20tx-esp-segmentation%20esp-hw-offload%20esp-tx-csum-hw-offload%3B%20do%0A%20%20%20%20if%20ethtool%20-k%20%22%24device%22%20%7C%20grep%20-qE%20%22%5E%24%7Bfeature%7D%3A%20off%22%3B%20then%0A%20%20%20%20%20%20%23%20already%20disabled%2C%20nothing%20to%20do%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%20%20%20%20%0A%20%20%20%20logger%20-t%2099-esp-offload%20-s%20%22Setting%20%24feature%20off%20for%20%24device%3A%20unsupported%20when%20attached%20to%20Open%20vSwitch%20bridge%22%0A%20%20%20%20ethtool%20-K%20%22%24device%22%20%22%24feature%22%20off%0A%20%20done%0Afi%0A" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-esp-offload" + }, + { + "contents": { + "source": "data:,r%20%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F80-openshift-network.conf%0Ar%20%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F10-ovn-kubernetes.conf%0Ad%20%2Frun%2Fmultus%2Fcni%2Fnet.d%2F%200755%20root%20root%20-%20-%0AD%20%2Fvar%2Flib%2Fcni%2Fnetworks%2Fopenshift-sdn%2F%200755%20root%20root%20-%20-%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/tmpfiles.d/cleanup-cni.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-x%0A%0Aif%20%5B%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20echo%20%22Skipping%20configure-ovs%20due%20to%20manual%20network%20configuration%22%0A%20%20exit%200%0Afi%0A%0A%23%20This%20file%20is%20not%20needed%20anymore%20in%204.7%2B%2C%20but%20when%20rolling%20back%20to%204.6%0A%23%20the%20ovs%20pod%20needs%20it%20to%20know%20ovs%20is%20running%20on%20the%20host.%0Atouch%20%2Fvar%2Frun%2Fovs-config-executed%0A%0A%23%20always%20use%20--escape%20no%20to%20prevent%20'%3A'%20mangling.%20%20nmcli%20will%20escape%20all%20colons%20as%20%5C%3A%2C%20this%20breaks%20input%0ANMCLI_GET_VALUE%3D%22nmcli%20--escape%20no%20--get-values%22%0A%23%20These%20are%20well%20knwon%20NM%20default%20paths%0ANM_CONN_ETC_PATH%3D%22%2Fetc%2FNetworkManager%2Fsystem-connections%22%0ANM_CONN_RUN_PATH%3D%22%2Frun%2FNetworkManager%2Fsystem-connections%22%0A%0A%23%20This%20is%20the%20path%20where%20NM%20is%20known%20to%20be%20configured%20to%20store%20user%20keyfiles%20%0ANM_CONN_CONF_PATH%3D%22%24NM_CONN_ETC_PATH%22%0A%0A%23%20This%20is%20where%20we%20want%20our%20keyfiles%20to%20finally%20reside.%20configure-ovs%0A%23%20operates%20with%20temporary%20keyfiles%20in%20NM_CONN_RUN_PATH%20and%20then%20as%20a%20last%0A%23%20step%20moves%20those%20keyfiles%20to%20NM_CONN_SET_PATH%20if%20it%20is%20a%20different%20path%0A%23%20(not%20by%20default).%20This%20mitigates%20hard%20interruptions%20(SIGKILL%2C%20hard%20reboot)%0A%23%20of%20configure-ovs%20leaving%20the%20machine%20with%20a%20half-baked%20set%20of%20keyfiles%0A%23%20that%20might%20prevent%20machine%20networking%20from%20working%20correctly.%0ANM_CONN_SET_PATH%3D%22%24%7BNM_CONN_SET_PATH%3A-%24NM_CONN_RUN_PATH%7D%22%0A%0AMANAGED_NM_CONN_SUFFIX%3D%22-slave-ovs-clone%22%0A%23%20Workaround%20to%20ensure%20OVS%20is%20installed%20due%20to%20bug%20in%20systemd%20Requires%3A%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1888017%0Acopy_nm_conn_files()%20%7B%0A%20%20local%20dst_path%3D%22%241%22%0A%20%20for%20src%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20src_path%3D%24(dirname%20%22%24src%22)%0A%20%20%20%20file%3D%24(basename%20%22%24src%22)%0A%20%20%20%20if%20%5B%20-f%20%22%24src_path%2F%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20if%20%5B%20!%20-f%20%22%24dst_path%2F%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Copying%20configuration%20%24file%22%0A%20%20%20%20%20%20%20%20cp%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%0A%20%20%20%20%20%20elif%20!%20cmp%20--silent%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Copying%20updated%20configuration%20%24file%22%0A%20%20%20%20%20%20%20%20cp%20-f%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%0A%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20echo%20%22Skipping%20%24file%20since%20it's%20equal%20at%20destination%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20else%0A%20%20%20%20%20%20echo%20%22Skipping%20%24file%20since%20it%20does%20not%20exist%20at%20source%22%0A%20%20%20%20fi%0A%20%20done%0A%7D%0A%0Aupdate_nm_conn_files_base()%20%7B%0A%20%20base_path%3D%24%7B1%7D%0A%20%20bridge_name%3D%24%7B2%7D%0A%20%20port_name%3D%24%7B3%7D%0A%20%20ovs_port%3D%22ovs-port-%24%7Bbridge_name%7D%22%0A%20%20ovs_interface%3D%22ovs-if-%24%7Bbridge_name%7D%22%0A%20%20default_port_name%3D%22ovs-port-%24%7Bport_name%7D%22%20%23%20ovs-port-phys0%0A%20%20bridge_interface_name%3D%22ovs-if-%24%7Bport_name%7D%22%20%23%20ovs-if-phys0%0A%20%20%23%20In%20RHEL7%20files%20in%20%2F%7Betc%2Crun%7D%2FNetworkManager%2Fsystem-connections%20end%20without%20the%20suffix%20'.nmconnection'%2C%20whereas%20in%20RHCOS%20they%20end%20with%20the%20suffix.%0A%20%20MANAGED_NM_CONN_FILES%3D(%24(echo%20%22%24%7Bbase_path%7D%22%2F%7B%22%24bridge_name%22%2C%22%24ovs_interface%22%2C%22%24ovs_port%22%2C%22%24bridge_interface_name%22%2C%22%24default_port_name%22%7D%7B%2C.nmconnection%7D))%0A%20%20shopt%20-s%20nullglob%0A%20%20MANAGED_NM_CONN_FILES%2B%3D(%24%7Bbase_path%7D%2F*%24%7BMANAGED_NM_CONN_SUFFIX%7D.nmconnection%20%24%7Bbase_path%7D%2F*%24%7BMANAGED_NM_CONN_SUFFIX%7D)%0A%20%20shopt%20-u%20nullglob%0A%7D%0A%0Aupdate_nm_conn_run_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_RUN_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0Aupdate_nm_conn_set_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_SET_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0Aupdate_nm_conn_etc_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_ETC_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0A%23%20Move%20and%20reload%20keyfiles%20at%20their%20final%20destination%0Aset_nm_conn_files()%20%7B%0A%20%20if%20%5B%20%22%24NM_CONN_RUN_PATH%22%20!%3D%20%22%24NM_CONN_SET_PATH%22%20%5D%3B%20then%0A%20%20%20%20update_nm_conn_run_files%20br-ex%20phys0%0A%20%20%20%20copy_nm_conn_files%20%22%24NM_CONN_SET_PATH%22%0A%20%20%20%20rm_nm_conn_files%0A%20%20%20%20update_nm_conn_run_files%20br-ex1%20phys1%0A%20%20%20%20copy_nm_conn_files%20%22%24NM_CONN_SET_PATH%22%0A%20%20%20%20rm_nm_conn_files%0A%0A%20%20%20%20%23%20reload%20keyfiles%0A%20%20%20%20nmcli%20connection%20reload%0A%20%20fi%0A%7D%0A%0A%23%20Used%20to%20remove%20files%20managed%20by%20configure-ovs%20and%20temporary%20leftover%20files%20from%20network%20manager%0Arm_nm_conn_files()%20%7B%0A%20%20for%20file%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20if%20%5B%20-f%20%22%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20rm%20-f%20%22%24file%22%0A%20%20%20%20%20%20echo%20%22Removed%20nmconnection%20file%20%24file%22%0A%20%20%20%20%20%20nm_config_changed%3D1%0A%20%20%20%20fi%0A%20%20done%0A%20%20for%20file%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20for%20temp%20in%20%24(compgen%20-G%20%22%24%7Bfile%7D.*%22)%3B%20do%0A%20%20%20%20%20%20rm%20-f%20%22%24temp%22%0A%20%20%20%20%20%20echo%20%22Removed%20nmconnection%20temporary%20file%20%24temp%22%0A%20%20%20%20%20%20nm_config_changed%3D1%0A%20%20%20%20done%0A%20%20done%0A%7D%0A%0A%23%20Used%20to%20clone%20a%20slave%20connection%20by%20uuid%2C%20returns%20new%20name%0Aclone_slave_connection()%20%7B%0A%20%20local%20uuid%3D%22%241%22%0A%20%20local%20old_name%0A%20%20old_name%3D%22%24(%24NMCLI_GET_VALUE%20connection.id%20connection%20show%20uuid%20%22%24uuid%22)%22%0A%20%20local%20new_name%3D%22%24%7Bold_name%7D%24%7BMANAGED_NM_CONN_SUFFIX%7D%22%0A%20%20if%20nmcli%20connection%20show%20id%20%22%24%7Bnew_name%7D%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20existing%20ovs%20slave%20%24%7Bnew_name%7D%20connection%20profile%20file%20found%2C%20overwriting...%22%20%3E%262%0A%20%20%20%20nmcli%20connection%20delete%20id%20%22%24%7Bnew_name%7D%22%20%26%3E%20%2Fdev%2Fnull%0A%20%20fi%0A%20%20clone_nm_conn%20%24uuid%20%22%24%7Bnew_name%7D%22%0A%20%20echo%20%22%24%7Bnew_name%7D%22%0A%7D%0A%0A%23%20Used%20to%20replace%20an%20old%20master%20connection%20uuid%20with%20a%20new%20one%20on%20all%20connections%0Areplace_connection_master()%20%7B%0A%20%20local%20old%3D%22%241%22%0A%20%20local%20new%3D%22%242%22%0A%20%20for%20conn_uuid%20in%20%24(%24NMCLI_GET_VALUE%20UUID%20connection%20show)%20%3B%20do%0A%20%20%20%20if%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.master%20connection%20show%20uuid%20%22%24conn_uuid%22)%22%20!%3D%20%22%24old%22%20%5D%3B%20then%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20local%20active_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20connection%20show%20%22%24conn_uuid%22)%0A%20%20%20%20local%20autoconnect%3D%24(%24NMCLI_GET_VALUE%20connection.autoconnect%20connection%20show%20%22%24conn_uuid%22)%0A%20%20%20%20if%20%5B%20%22%24active_state%22%20!%3D%20%22activated%22%20%5D%20%26%26%20%5B%20%22%24autoconnect%22%20!%3D%20%22yes%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20Assume%20that%20slave%20profiles%20intended%20to%20be%20used%20are%20those%20that%20are%3A%0A%20%20%20%20%20%20%23%20-%20active%0A%20%20%20%20%20%20%23%20-%20or%20inactive%20(which%20might%20be%20due%20to%20link%20being%20down)%20but%20to%20be%20autoconnected.%0A%20%20%20%20%20%20%23%20Otherwise%2C%20ignore%20them.%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20make%20changes%20for%20slave%20profiles%20in%20a%20new%20clone%0A%20%20%20%20local%20new_name%0A%20%20%20%20new_name%3D%24(clone_slave_connection%20%24conn_uuid)%0A%0A%20%20%20%20mod_nm_conn%20%22%24new_name%22%20connection.master%20%22%24new%22%20connection.autoconnect-priority%20100%20connection.autoconnect%20no%20%0A%20%20%20%20echo%20%22Replaced%20master%20%24old%20with%20%24new%20for%20slave%20profile%20%24new_name%22%0A%20%20done%0A%7D%0A%0A%23%20when%20creating%20the%20bridge%2C%20we%20use%20a%20value%20lower%20than%20NM's%20ethernet%20device%20default%20route%20metric%0A%23%20(we%20pick%2048%20and%2049%20to%20be%20lower%20than%20anything%20that%20NM%20chooses%20by%20default)%0ABRIDGE_METRIC%3D%2248%22%0ABRIDGE1_METRIC%3D%2249%22%0A%23%20Given%20an%20interface%2C%20generates%20NM%20configuration%20to%20add%20to%20an%20OVS%20bridge%0Aconvert_to_bridge()%20%7B%0A%20%20local%20iface%3D%24%7B1%7D%0A%20%20local%20bridge_name%3D%24%7B2%7D%0A%20%20local%20port_name%3D%24%7B3%7D%0A%20%20local%20bridge_metric%3D%24%7B4%7D%0A%20%20local%20ovs_port%3D%22ovs-port-%24%7Bbridge_name%7D%22%0A%20%20local%20ovs_interface%3D%22ovs-if-%24%7Bbridge_name%7D%22%0A%20%20local%20default_port_name%3D%22ovs-port-%24%7Bport_name%7D%22%20%23%20ovs-port-phys0%0A%20%20local%20bridge_interface_name%3D%22ovs-if-%24%7Bport_name%7D%22%20%23%20ovs-if-phys0%0A%0A%20%20if%20%5B%20%22%24iface%22%20%3D%20%22%24bridge_name%22%20%5D%3B%20then%0A%20%20%20%20%23%20handle%20vlans%20and%20bonds%20etc%20if%20they%20have%20already%20been%0A%20%20%20%20%23%20configured%20via%20nm%20key%20files%20and%20br-ex%20is%20already%20up%0A%20%20%20%20ifaces%3D%24(ovs-vsctl%20list-ifaces%20%24%7Biface%7D)%0A%20%20%20%20for%20intf%20in%20%24ifaces%3B%20do%20configure_driver_options%20%24intf%3B%20done%0A%20%20%20%20echo%20%22Networking%20already%20configured%20and%20up%20for%20%24%7Bbridge-name%7D!%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20flag%20to%20reload%20NM%20to%20account%20for%20all%20the%20configuration%20changes%0A%20%20%23%20going%20forward%0A%20%20nm_config_changed%3D1%0A%0A%20%20if%20%5B%20-z%20%22%24iface%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22ERROR%3A%20Unable%20to%20find%20default%20gateway%20interface%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%20%20%23%20find%20the%20MAC%20from%20OVS%20config%20or%20the%20default%20interface%20to%20use%20for%20OVS%20internal%20port%0A%20%20%23%20this%20prevents%20us%20from%20getting%20a%20different%20DHCP%20lease%20and%20dropping%20connection%0A%20%20if%20!%20iface_mac%3D%24(%3C%22%2Fsys%2Fclass%2Fnet%2F%24%7Biface%7D%2Faddress%22)%3B%20then%0A%20%20%20%20echo%20%22Unable%20to%20determine%20default%20interface%20MAC%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20echo%20%22MAC%20address%20found%20for%20iface%3A%20%24%7Biface%7D%3A%20%24%7Biface_mac%7D%22%0A%0A%20%20%23%20find%20MTU%20from%20original%20iface%0A%20%20iface_mtu%3D%24(ip%20link%20show%20%22%24iface%22%20%7C%20awk%20'%7Bprint%20%245%3B%20exit%7D')%0A%20%20if%20%5B%5B%20-z%20%22%24iface_mtu%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Unable%20to%20determine%20default%20interface%20MTU%2C%20defaulting%20to%201500%22%0A%20%20%20%20iface_mtu%3D1500%0A%20%20else%0A%20%20%20%20echo%20%22MTU%20found%20for%20iface%3A%20%24%7Biface%7D%3A%20%24%7Biface_mtu%7D%22%0A%20%20fi%0A%0A%20%20%23%20store%20old%20conn%20for%20later%0A%20%20old_conn%3D%24(nmcli%20--fields%20UUID%2CDEVICE%20conn%20show%20--active%20%7C%20awk%20%22%2F%5Cs%24%7Biface%7D%5Cs*%5C%24%2F%20%7Bprint%20%5C%241%7D%22)%0A%0A%20%20if%20%5B%5B%20-z%20%22%24old_conn%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22ERROR%3A%20cannot%20find%20connection%20for%20interface%3A%20%24%7Biface%7D%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20create%20bridge%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24bridge_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20%22%24bridge_name%22%0A%20%20%20%20add_nm_conn%20%22%24bridge_name%22%20type%20ovs-bridge%20conn.interface%20%22%24bridge_name%22%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20%5C%0A%20%20%20%20connection.autoconnect-slaves%201%0A%20%20fi%0A%0A%20%20%23%20find%20default%20port%20to%20add%20to%20bridge%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24default_port_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-port%20%22%24bridge_name%22%20%24%7Biface%7D%0A%20%20%20%20add_nm_conn%20%22%24default_port_name%22%20type%20ovs-port%20conn.interface%20%24%7Biface%7D%20slave-type%20ovs-bridge%20master%20%22%24bridge_name%22%20%5C%0A%20%20%20%20connection.autoconnect-slaves%201%0A%20%20fi%0A%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24ovs_port%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-port%20%22%24bridge_name%22%20%22%24bridge_name%22%0A%20%20%20%20add_nm_conn%20%22%24ovs_port%22%20type%20ovs-port%20conn.interface%20%22%24bridge_name%22%20slave-type%20ovs-bridge%20master%20%22%24bridge_name%22%0A%20%20fi%0A%0A%20%20extra_phys_args%3D()%0A%20%20%23%20check%20if%20this%20interface%20is%20a%20vlan%2C%20bond%2C%20team%2C%20or%20ethernet%20type%0A%20%20if%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22vlan%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dvlan%0A%20%20%20%20vlan_id%3D%24(%24NMCLI_GET_VALUE%20vlan.id%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-z%20%22%24vlan_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20unable%20to%20determine%20vlan_id%20for%20vlan%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%20%20%20%20vlan_parent%3D%24(%24NMCLI_GET_VALUE%20vlan.parent%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-z%20%22%24vlan_parent%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20unable%20to%20determine%20vlan_parent%20for%20vlan%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%0A%20%20%20%20if%20nmcli%20connection%20show%20uuid%20%22%24vlan_parent%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20%20%20%23%20if%20the%20VLAN%20connection%20is%20configured%20with%20a%20connection%20UUID%20as%20parent%2C%20we%20need%20to%20find%20the%20underlying%20device%0A%20%20%20%20%20%20%23%20and%20create%20the%20bridge%20against%20it%2C%20as%20the%20parent%20connection%20can%20be%20replaced%20by%20another%20bridge.%0A%20%20%20%20%20%20vlan_parent%3D%24(%24NMCLI_GET_VALUE%20GENERAL.DEVICES%20conn%20show%20uuid%20%24%7Bvlan_parent%7D)%0A%20%20%20%20fi%0A%0A%20%20%20%20extra_phys_args%3D(%20dev%20%22%24%7Bvlan_parent%7D%22%20id%20%22%24%7Bvlan_id%7D%22%20)%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dbond%0A%20%20%20%20%23%20check%20bond%20options%0A%20%20%20%20bond_opts%3D%24(%24NMCLI_GET_VALUE%20bond.options%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-n%20%22%24bond_opts%22%20%5D%3B%20then%0A%20%20%20%20%20%20extra_phys_args%2B%3D(%20bond.options%20%22%24%7Bbond_opts%7D%22%20)%0A%20%20%20%20%20%20MODE_REGEX%3D%22(%5E%7C%2C)mode%3Dactive-backup(%2C%7C%24)%22%0A%20%20%20%20%20%20MAC_REGEX%3D%22(%5E%7C%2C)fail_over_mac%3D(1%7Cactive%7C2%7Cfollow)(%2C%7C%24)%22%0A%20%20%20%20%20%20if%20%5B%5B%20%24bond_opts%20%3D~%20%24MODE_REGEX%20%5D%5D%20%26%26%20%5B%5B%20%24bond_opts%20%3D~%20%24MAC_REGEX%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20clone_mac%3D0%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22team%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dteam%0A%20%20%20%20%23%20check%20team%20config%20options%0A%20%20%20%20team_config_opts%3D%24(%24NMCLI_GET_VALUE%20team.config%20-e%20no%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-n%20%22%24team_config_opts%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20team.config%20is%20json%2C%20remove%20spaces%20to%20avoid%20problems%20later%20on%0A%20%20%20%20%20%20extra_phys_args%2B%3D(%20team.config%20%22%24%7Bteam_config_opts%2F%2F%5B%5B%3Aspace%3A%5D%5D%2F%7D%22%20)%0A%20%20%20%20%20%20team_mode%3D%24(echo%20%22%24%7Bteam_config_opts%7D%22%20%7C%20jq%20-r%20%22.runner.name%20%2F%2F%20empty%22)%0A%20%20%20%20%20%20team_mac_policy%3D%24(echo%20%22%24%7Bteam_config_opts%7D%22%20%7C%20jq%20-r%20%22.runner.hwaddr_policy%20%2F%2F%20empty%22)%0A%20%20%20%20%20%20MAC_REGEX%3D%22(by_active%7Conly_active)%22%0A%20%20%20%20%20%20if%20%5B%20%22%24team_mode%22%20%3D%20%22activebackup%22%20%5D%20%26%26%20%5B%5B%20%22%24team_mac_policy%22%20%3D~%20%24MAC_REGEX%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20clone_mac%3D0%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22tun%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dtun%0A%20%20%20%20tun_mode%3D%24(%24NMCLI_GET_VALUE%20tun.mode%20-e%20no%20connection%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20extra_phys_args%2B%3D(%20tun.mode%20%22%24%7Btun_mode%7D%22%20)%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22bridge%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dbridge%0A%20%20else%0A%20%20%20%20iface_type%3D802-3-ethernet%0A%20%20fi%0A%0A%20%20if%20%5B%20!%20%22%24%7Bclone_mac%3A-%7D%22%20%3D%20%220%22%20%5D%3B%20then%0A%20%20%20%20%23%20In%20active-backup%20link%20aggregation%2C%20with%20fail_over_mac%20mode%20enabled%2C%0A%20%20%20%20%23%20cloning%20the%20mac%20address%20is%20not%20supported.%20It%20is%20possible%20then%20that%0A%20%20%20%20%23%20br-ex%20has%20a%20different%20mac%20address%20than%20the%20bond%20which%20might%20be%0A%20%20%20%20%23%20troublesome%20on%20some%20platforms%20where%20the%20nic%20won't%20accept%20packets%20with%0A%20%20%20%20%23%20a%20different%20destination%20mac.%20But%20nobody%20has%20complained%20so%20far%20so%20go%20on%0A%20%20%20%20%23%20with%20what%20we%20got.%20%0A%20%20%20%20%0A%20%20%20%20%23%20Do%20set%20it%20though%20for%20other%20link%20aggregation%20configurations%20where%20the%0A%20%20%20%20%23%20mac%20address%20would%20otherwise%20depend%20on%20enslave%20order%20for%20which%20we%20have%0A%20%20%20%20%23%20no%20control%20going%20forward.%0A%20%20%20%20extra_phys_args%2B%3D(%20802-3-ethernet.cloned-mac-address%20%22%24%7Biface_mac%7D%22%20)%0A%20%20fi%0A%0A%20%20%23%20use%20%24%7Bextra_phys_args%5B%40%5D%2B%22%24%7Bextra_phys_args%5B%40%5D%7D%22%7D%20instead%20of%20%24%7Bextra_phys_args%5B%40%5D%7D%20to%20be%20compatible%20with%20bash%204.2%20in%20RHEL7.9%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24bridge_interface_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20destroy%20interface%20%24%7Biface%7D%0A%20%20%20%20ovs_default_port_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24default_port_name%22)%0A%20%20%20%20add_nm_conn%20%22%24bridge_interface_name%22%20type%20%24%7Biface_type%7D%20conn.interface%20%24%7Biface%7D%20master%20%22%24ovs_default_port_conn%22%20%5C%0A%20%20%20%20%20%20slave-type%20ovs-port%20connection.autoconnect-priority%20100%20connection.autoconnect-slaves%201%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20%5C%0A%20%20%20%20%20%20%24%7Bextra_phys_args%5B%40%5D%2B%22%24%7Bextra_phys_args%5B%40%5D%7D%22%7D%0A%20%20fi%0A%0A%20%20%23%20Get%20the%20new%20connection%20uuids%0A%20%20new_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24bridge_interface_name%22)%0A%20%20ovs_port_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24ovs_port%22)%0A%0A%20%20%23%20Update%20connections%20with%20master%20property%20set%20to%20use%20the%20new%20connection%0A%20%20replace_connection_master%20%24old_conn%20%24new_conn%0A%20%20replace_connection_master%20%24iface%20%24new_conn%0A%0A%20%20ipv4_method%3D%24(%24NMCLI_GET_VALUE%20ipv4.method%20conn%20show%20%22%24old_conn%22)%0A%20%20ipv6_method%3D%24(%24NMCLI_GET_VALUE%20ipv6.method%20conn%20show%20%22%24old_conn%22)%0A%0A%20%20ipv4_addresses%3D%24(%24NMCLI_GET_VALUE%20ipv4.addresses%20conn%20show%20%22%24old_conn%22)%0A%20%20ipv6_addresses%3D%24(%24NMCLI_GET_VALUE%20ipv6.addresses%20conn%20show%20%22%24old_conn%22)%0A%0A%20%20%23%20Warn%20about%20an%20invalid%20MTU%20that%20will%20most%20likely%20fail%20in%20one%20way%20or%0A%20%20%23%20another%0A%20%20if%20%5B%20%24%7Biface_mtu%7D%20-lt%201280%20%5D%20%26%26%20%5B%20%22%24%7Bipv6_method%7D%22%20!%3D%20%22disabled%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20MTU%20%24%7Biface_mtu%7D%20is%20lower%20than%20the%20minimum%20required%20of%201280%20for%20IPv6%22%0A%20%20fi%0A%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24ovs_interface%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20destroy%20interface%20%22%24bridge_name%22%0A%20%20%20%20%23%20Clone%20the%20connection%20in%20case%20the%20method%20is%20manual%20or%20in%20case%20the%20an%20address%20is%20set%20(DHCP%20%2B%20static%20IP)%0A%20%20%20%20if%20%5B%20%22%24%7Bipv4_method%7D%22%20%3D%20%22manual%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv4_addresses%7D%22%20!%3D%20%22%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv6_method%7D%22%20%3D%20%22manual%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv6_addresses%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Static%20IP%20addressing%20detected%20on%20default%20gateway%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20%23%20clone%20the%20old%20connection%20to%20get%20the%20address%20settings%0A%20%20%20%20%20%20%23%20prefer%20cloning%20vs%20copying%20the%20connection%20file%20to%20avoid%20problems%20with%20selinux%0A%20%20%20%20%20%20clone_nm_conn%20%22%24%7Bold_conn%7D%22%20%22%24%7Bovs_interface%7D%22%0A%20%20%20%20%20%20shopt%20-s%20nullglob%0A%20%20%20%20%20%20new_conn_files%3D(%24%7BNM_CONN_RUN_PATH%7D%2F%22%24%7Bovs_interface%7D%22*)%0A%20%20%20%20%20%20shopt%20-u%20nullglob%0A%20%20%20%20%20%20if%20%5B%20%24%7B%23new_conn_files%5B%40%5D%7D%20-ne%201%20%5D%20%7C%7C%20%5B%20!%20-f%20%22%24%7Bnew_conn_files%5B0%5D%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22ERROR%3A%20could%20not%20find%20%24%7Bovs_interface%7D%20conn%20file%20after%20cloning%20from%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20%20%20exit%201%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20new_conn_file%3D%22%24%7Bnew_conn_files%5B0%5D%7D%22%0A%0A%20%20%20%20%20%20%23%20modify%20the%20connection%20type%20directly%20because%20it%20can't%20be%20modified%0A%20%20%20%20%20%20%23%20through%20nmcli%0A%20%20%20%20%20%20sed%20-i%20'%2F%5E%5C%5Bconnection%5C%5D%24%2F%2C%2F%5E%5C%5B%2F%20s%2F%5Etype%3D.*%24%2Ftype%3Dovs-interface%2F'%20%24%7Bnew_conn_file%7D%0A%0A%20%20%20%20%20%20%23%20modify%20some%20more%20settings%20through%20nmcli%0A%20%20%20%20%20%20mod_nm_conn%20%22%24%7Bovs_interface%7D%22%20conn.interface%20%22%24bridge_name%22%20%5C%0A%20%20%20%20%20%20%20%20connection.multi-connect%20%22%22%20connection.autoconnect%20no%20%5C%0A%20%20%20%20%20%20%20%20connection.master%20%22%24ovs_port_conn%22%20connection.slave-type%20ovs-port%20%5C%0A%20%20%20%20%20%20%20%20ovs-interface.type%20internal%20%5C%0A%20%20%20%20%20%20%20%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20802-3-ethernet.cloned-mac-address%20%24%7Biface_mac%7D%20%5C%0A%20%20%20%20%20%20%20%20ipv4.route-metric%20%22%24%7Bbridge_metric%7D%22%20ipv6.route-metric%20%22%24%7Bbridge_metric%7D%22%0A%0A%20%20%20%20%20%20echo%20%22Loaded%20new%20%24ovs_interface%20connection%20file%3A%20%24%7Bnew_conn_file%7D%22%0A%20%20%20%20else%0A%20%20%20%20%20%20extra_if_brex_args%3D%22%22%0A%20%20%20%20%20%20%23%20check%20if%20interface%20had%20ipv4%2Fipv6%20addresses%20assigned%0A%20%20%20%20%20%20num_ipv4_addrs%3D%24(ip%20-j%20a%20show%20dev%20%24%7Biface%7D%20%7C%20jq%20%22.%5B0%5D.addr_info%20%7C%20map(.%20%7C%20select(.family%20%3D%3D%20%5C%22inet%5C%22))%20%7C%20length%22)%0A%20%20%20%20%20%20if%20%5B%20%22%24num_ipv4_addrs%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.may-fail%20no%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20IPV6%20should%20have%20at%20least%20a%20link%20local%20address.%20Check%20for%20more%20than%201%20to%20see%20if%20there%20is%20an%0A%20%20%20%20%20%20%23%20assigned%20address.%0A%20%20%20%20%20%20num_ip6_addrs%3D%24(ip%20-j%20a%20show%20dev%20%24%7Biface%7D%20%7C%20jq%20%22.%5B0%5D.addr_info%20%7C%20map(.%20%7C%20select(.family%20%3D%3D%20%5C%22inet6%5C%22%20and%20.scope%20!%3D%20%5C%22link%5C%22))%20%7C%20length%22)%0A%20%20%20%20%20%20if%20%5B%20%22%24num_ip6_addrs%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.may-fail%20no%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20dhcp%20client%20ids%0A%20%20%20%20%20%20dhcp_client_id%3D%24(%24NMCLI_GET_VALUE%20ipv4.dhcp-client-id%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24dhcp_client_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.dhcp-client-id%20%24%7Bdhcp_client_id%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20dhcp6_client_id%3D%24(%24NMCLI_GET_VALUE%20ipv6.dhcp-duid%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24dhcp6_client_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.dhcp-duid%20%24%7Bdhcp6_client_id%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20ipv6_addr_gen_mode%3D%24(%24NMCLI_GET_VALUE%20ipv6.addr-gen-mode%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_addr_gen_mode%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.addr-gen-mode%20%24%7Bipv6_addr_gen_mode%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20static%20DNS%20address%0A%20%20%20%20%20%20ipv4_dns%3D%24(%24NMCLI_GET_VALUE%20ipv4.dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv4_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.dns%20%24%7Bipv4_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20ipv6_dns%3D%24(%24NMCLI_GET_VALUE%20ipv6.dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.dns%20%24%7Bipv6_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20auto-dns%0A%20%20%20%20%20%20ipv4_auto_dns%3D%24(%24NMCLI_GET_VALUE%20ipv4.ignore-auto-dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv4_auto_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.ignore-auto-dns%20%24%7Bipv4_auto_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20ipv6_auto_dns%3D%24(%24NMCLI_GET_VALUE%20ipv6.ignore-auto-dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_auto_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.ignore-auto-dns%20%24%7Bipv6_auto_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20add_nm_conn%20%22%24ovs_interface%22%20type%20ovs-interface%20slave-type%20ovs-port%20conn.interface%20%22%24bridge_name%22%20master%20%22%24ovs_port_conn%22%20%5C%0A%20%20%20%20%20%20%20%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20802-3-ethernet.cloned-mac-address%20%24%7Biface_mac%7D%20%5C%0A%20%20%20%20%20%20%20%20ipv4.method%20%22%24%7Bipv4_method%7D%22%20ipv4.route-metric%20%22%24%7Bbridge_metric%7D%22%20%5C%0A%20%20%20%20%20%20%20%20ipv6.method%20%22%24%7Bipv6_method%7D%22%20ipv6.route-metric%20%22%24%7Bbridge_metric%7D%22%20%5C%0A%20%20%20%20%20%20%20%20%24%7Bextra_if_brex_args%7D%0A%20%20%20%20fi%0A%20%20fi%0A%0A%20%20configure_driver_options%20%22%24%7Biface%7D%22%0A%7D%0A%0A%23%20Used%20to%20remove%20a%20bridge%0Aremove_ovn_bridges()%20%7B%0A%20%20bridge_name%3D%24%7B1%7D%0A%20%20port_name%3D%24%7B2%7D%0A%0A%20%20%23%20Remove%20the%20keyfiles%20from%20known%20configuration%20paths%0A%20%20update_nm_conn_run_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%20%20update_nm_conn_set_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%20%20%23%20Shouldn't%20be%20necessary%2C%20workaround%20for%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FOCPBUGS-41489%0A%20%20update_nm_conn_etc_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%0A%20%20%23%20NetworkManager%20will%20not%20remove%20%24%7Bbridge_name%7D%20if%20it%20has%20the%20patch%20port%20created%20by%20ovn-kubernetes%0A%20%20%23%20so%20remove%20explicitly%0A%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20%24%7Bbridge_name%7D%0A%7D%0A%0A%23%20Removes%20any%20previous%20ovs%20configuration%0Aremove_all_ovn_bridges()%20%7B%0A%20%20echo%20%22Reverting%20any%20previous%20OVS%20configuration%22%0A%20%20%0A%20%20remove_ovn_bridges%20br-ex%20phys0%0A%20%20remove_ovn_bridges%20br-ex1%20phys1%0A%20%20%0A%20%20echo%20%22OVS%20configuration%20successfully%20reverted%22%0A%7D%0A%0A%23%20Reloads%20NM%20NetworkManager%20profiles%20if%20any%20configuration%20change%20was%20done.%0A%23%20Accepts%20a%20list%20of%20devices%20that%20should%20be%20re-connect%20after%20reload.%0Areload_profiles_nm()%20%7B%0A%20%20if%20%5B%20%24%7Bnm_config_changed%3A-0%7D%20-eq%200%20%5D%3B%20then%0A%20%20%20%20%23%20no%20config%20was%20changed%2C%20no%20need%20to%20reload%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20reload%20profiles%0A%20%20nmcli%20connection%20reload%0A%0A%20%20%23%20precautionary%20sleep%20of%2010s%20(default%20timeout%20of%20NM%20to%20bring%20down%20devices)%0A%20%20sleep%2010%0A%0A%20%20%23%20After%20reload%2C%20devices%20that%20were%20already%20connected%20should%20connect%20again%0A%20%20%23%20if%20any%20profile%20is%20available.%20If%20no%20profile%20is%20available%2C%20a%20device%20can%0A%20%20%23%20remain%20disconnected%20and%20we%20have%20to%20explicitly%20connect%20it%20so%20that%20a%0A%20%20%23%20profile%20is%20generated.%20This%20can%20happen%20for%20physical%20devices%20but%20should%0A%20%20%23%20not%20happen%20for%20software%20devices%20as%20those%20always%20require%20a%20profile.%0A%20%20for%20dev%20in%20%24%40%3B%20do%0A%20%20%20%20%23%20Only%20attempt%20to%20connect%20a%20disconnected%20device%0A%20%20%20%20local%20connected_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20device%20show%20%22%24dev%22%20%7C%7C%20echo%20%22%22)%0A%20%20%20%20if%20%5B%5B%20%22%24connected_state%22%20%3D~%20%22disconnected%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%23%20keep%20track%20if%20a%20profile%20by%20the%20same%20name%20as%20the%20device%20existed%20%0A%20%20%20%20%20%20%23%20before%20we%20attempt%20activation%0A%20%20%20%20%20%20local%20named_profile_existed%3D%24(%5B%20-f%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D%22%20%5D%20%7C%7C%20%5B%20-f%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D.nmconnection%22%20%5D%20%26%26%20echo%20%22yes%22)%0A%20%20%20%20%20%20%0A%20%20%20%20%20%20for%20i%20in%20%7B1..10%7D%3B%20do%0A%20%20%20%20%20%20%20%20%20%20echo%20%22Attempt%20%24i%20to%20connect%20device%20%24dev%22%0A%20%20%20%20%20%20%20%20%20%20nmcli%20device%20connect%20%22%24dev%22%20%26%26%20break%0A%20%20%20%20%20%20%20%20%20%20sleep%205%0A%20%20%20%20%20%20done%0A%0A%20%20%20%20%20%20%23%20if%20a%20profile%20did%20not%20exist%20before%20but%20does%20now%2C%20it%20was%20generated%0A%20%20%20%20%20%20%23%20but%20we%20want%20it%20to%20be%20ephemeral%2C%20so%20move%20it%20back%20to%20%2Frun%0A%20%20%20%20%20%20if%20%5B%20!%20%22%24named_profile_existed%22%20%3D%20%22yes%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20MANAGED_NM_CONN_FILES%3D(%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D%22%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D.nmconnection%22)%0A%20%20%20%20%20%20%20%20copy_nm_conn_files%20%22%24%7BNM_CONN_RUN_PATH%7D%22%0A%20%20%20%20%20%20%20%20rm_nm_conn_files%0A%20%20%20%20%20%20%20%20%23%20reload%20profiles%20so%20that%20NM%20notices%20that%20some%20might%20have%20been%20moved%0A%20%20%20%20%20%20%20%20nmcli%20connection%20reload%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20echo%20%22Waiting%20for%20interface%20%24dev%20to%20activate...%22%0A%20%20%20%20%23%20don't%20use%20--escape%20no%2C%20we%20use%20%3A%20delimiter%20here%0A%20%20%20%20if%20!%20timeout%2060%20bash%20-c%20%22while%20!%20nmcli%20-g%20DEVICE%2CSTATE%20c%20%7C%20grep%20%22'%22'%22%24dev%22%3Aactivated'%22'%22%3B%20do%20sleep%205%3B%20done%22%3B%20then%0A%20%20%20%20%20%20echo%20%22WARNING%3A%20%24dev%20did%20not%20activate%22%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20nm_config_changed%3D0%0A%7D%0A%0A%23%20Removes%20all%20configuration%20and%20reloads%20NM%20if%20necessary%0Arollback_nm()%20%7B%0A%20%20phys0%3D%24(get_bridge_physical_interface%20ovs-if-phys0)%0A%20%20phys1%3D%24(get_bridge_physical_interface%20ovs-if-phys1)%0A%20%20%0A%20%20%23%20Revert%20changes%20made%20by%20%2Fusr%2Flocal%2Fbin%2Fconfigure-ovs.sh%20during%20SDN%20migration.%0A%20%20remove_all_ovn_bridges%0A%20%20%0A%20%20%23%20reload%20profiles%20so%20that%20NM%20notices%20that%20some%20were%20removed%0A%20%20reload_profiles_nm%20%22%24phys0%22%20%22%24phys1%22%0A%7D%0A%0A%23%20Add%20a%20temporary%20deactivated%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20name%20folowed%20by%20arguments%20passed%20to%0A%23%20%60nmcli%20connection%20add%60%0Aadd_nm_conn()%20%7B%0A%20%20%23%20Use%20%60save%20no%60%20to%20add%20a%20temporary%20profile%0A%20%20nmcli%20c%20add%20save%20no%20con-name%20%22%24%40%22%20connection.autoconnect%20no%0A%7D%0A%0A%23%20Modify%20a%20temporary%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20name%20followed%20by%20arguments%20passed%20to%0A%23%20%60nmcli%20connection%20modify%60%0Amod_nm_conn()%20%7B%0A%20%20%23%20the%20easiest%20thing%20to%20do%20here%20would%20be%20to%20use%20%60nmcli%20c%20mod%20--temporary%60%0A%20%20%23%20but%20there%20is%20a%20bug%20in%20selinux%20profiles%20that%20denies%20NM%20from%20performing%0A%20%20%23%20the%20operation%0A%20%20local%20dst_path%3D%24%7BNM_CONN_RUN_PATH%7D%2F%241.nmconnection%0A%20%20local%20src_path%0A%20%20src_path%3D%24(mktemp)%0A%20%20shift%0A%20%20cat%20%22%24dst_path%22%20%3E%20%22%24src_path%22%0A%20%20rm%20-f%20%22%24dst_path%22%0A%20%20nmcli%20--offline%20c%20mod%20%22%24%40%22%20%3C%20%22%24src_path%22%20%3E%20%22%24dst_path%22%0A%20%20rm%20-f%20%22%24src_path%22%0A%20%20chmod%20600%20%22%24dst_path%22%0A%20%20nmcli%20c%20load%20%22%24dst_path%22%0A%7D%0A%0A%23%20Clone%20to%20a%20temporary%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20to%20clone%2C%20second%20argument%20is%20the%20clone%20name%0Aclone_nm_conn()%20%7B%0A%20%20%23%20clone%20as%20temporary%20so%20that%20it%20is%20generated%20in%20NM_CONN_RUN_PATH%0A%20%20nmcli%20connection%20clone%20--temporary%20%22%241%22%20%22%242%22%20%26%3E%20%2Fdev%2Fnull%0A%7D%0A%0A%23%20Activates%20an%20ordered%20set%20of%20NM%20connection%20profiles%0Aactivate_nm_connections()%20%7B%0A%20%20local%20connections%3D(%22%24%40%22)%0A%0A%20%20%23%20We%20want%20autoconnect%20set%20for%20our%20cloned%20slave%20profiles%20so%20that%20they%20are%0A%20%20%23%20used%20over%20the%20original%20profiles%20if%20implicitly%20re-activated%20with%20other%0A%20%20%23%20dependant%20profiles.%20Otherwise%20if%20a%20slave%20activates%20with%20an%20old%20profile%2C%0A%20%20%23%20the%20old%20master%20profile%20might%20activate%20as%20well%2C%20interfering%20and%20causing%0A%20%20%23%20further%20activations%20to%20fail.%0A%20%20%23%20Slave%20interfaces%20should%20already%20be%20active%20so%20setting%20autoconnect%20here%0A%20%20%23%20won't%20implicitly%20activate%20them%20but%20there%20is%20an%20edge%20case%20where%20a%20slave%0A%20%20%23%20might%20be%20inactive%20(link%20down%20for%20example)%20and%20in%20that%20case%20setting%0A%20%20%23%20autoconnect%20will%20cause%20an%20implicit%20activation.%20This%20is%20not%20necessarily%20a%0A%20%20%23%20problem%20and%20hopefully%20we%20can%20make%20sure%20everything%20is%20activated%20as%20we%0A%20%20%23%20want%20next.%0A%20%20for%20conn%20in%20%22%24%7Bconnections%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20local%20slave_type%3D%24(%24NMCLI_GET_VALUE%20connection.slave-type%20connection%20show%20%22%24conn%22)%0A%20%20%20%20if%20%5B%20%22%24slave_type%22%20%3D%20%22team%22%20%5D%20%7C%7C%20%5B%20%22%24slave_type%22%20%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20Activate%20all%20connections%20and%20fail%20if%20activation%20fails%0A%20%20%23%20For%20slave%20connections%20-%20for%20as%20long%20as%20at%20least%20one%20slave%20that%20belongs%20to%20a%20bond%2Fteam%0A%20%20%23%20comes%20up%2C%20we%20should%20not%20fail%0A%20%20declare%20-A%20master_interfaces%0A%20%20for%20conn%20in%20%22%24%7Bconnections%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%23%20Get%20the%20slave%20type%0A%20%20%20%20local%20slave_type%3D%24(%24NMCLI_GET_VALUE%20connection.slave-type%20connection%20show%20%22%24conn%22)%0A%20%20%20%20local%20is_slave%3Dfalse%0A%20%20%20%20if%20%5B%20%22%24slave_type%22%20%3D%20%22team%22%20%5D%20%7C%7C%20%5B%20%22%24slave_type%22%20%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20%20%20is_slave%3Dtrue%0A%20%20%20%20fi%20%0A%0A%20%20%20%20%23%20For%20slave%20interfaces%2C%20initialize%20the%20master%20interface%20to%20false%20if%20the%20key%20is%20not%20yet%20in%20the%20array%0A%20%20%20%20local%20master_interface%0A%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20master_interface%3D%24(%24NMCLI_GET_VALUE%20connection.master%20connection%20show%20%22%24conn%22)%0A%20%20%20%20%20%20if%20!%20%5B%5B%20-v%20%22master_interfaces%5B%24master_interface%5D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%22%24master_interface%22%5D%3Dfalse%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20slaves%20should%20implicitly%20activate%2C%20give%20them%20a%20chance%20to%20do%20so%0A%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20if%20!%20timeout%205%20bash%20-c%20%22while%20!%20%24NMCLI_GET_VALUE%20GENERAL.STATE%20conn%20show%20%22%24conn%22%20%7C%20grep%20activated%3B%20do%20sleep%201%3B%20done%22%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22WARNING%3A%20slave%20%24conn%20did%20not%20implicitly%20activate%20in%205s%2C%20activating%20explicitly.%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Do%20not%20activate%20interfaces%20that%20are%20already%20active%0A%20%20%20%20%23%20But%20set%20the%20entry%20in%20master_interfaces%20to%20true%20if%20this%20is%20a%20slave%0A%20%20%20%20%23%20Also%20set%20autoconnect%20to%20yes%0A%20%20%20%20local%20active_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20conn%20show%20%22%24conn%22)%0A%20%20%20%20if%20%5B%20%22%24active_state%22%20%3D%3D%20%22activated%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Connection%20%24conn%20already%20activated%22%0A%20%20%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%24master_interface%5D%3Dtrue%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Activate%20all%20interfaces%20that%20are%20not%20yet%20active%0A%20%20%20%20for%20i%20in%20%7B1..10%7D%3B%20do%0A%20%20%20%20%20%20echo%20%22Attempt%20%24i%20to%20bring%20up%20connection%20%24conn%22%0A%20%20%20%20%20%20nmcli%20conn%20up%20%22%24conn%22%20%26%26%20s%3D0%20%26%26%20break%20%7C%7C%20s%3D%24%3F%0A%20%20%20%20%20%20sleep%205%0A%20%20%20%20done%0A%20%20%20%20if%20%5B%20%24s%20-eq%200%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Brought%20up%20connection%20%24conn%20successfully%22%0A%20%20%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%22%24master_interface%22%5D%3Dtrue%0A%20%20%20%20%20%20fi%0A%20%20%20%20elif%20!%20%24is_slave%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20Cannot%20bring%20up%20connection%20%24conn%20after%20%24i%20attempts%22%0A%20%20%20%20%20%20return%20%24s%0A%20%20%20%20fi%0A%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20done%0A%20%20%23%20Check%20that%20all%20master%20interfaces%20report%20at%20least%20a%20single%20active%20slave%0A%20%20%23%20Note%3A%20associative%20arrays%20require%20an%20exclamation%20mark%20when%20looping%0A%20%20for%20i%20in%20%22%24%7B!master_interfaces%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20if%20!%20%24%7Bmaster_interfaces%5B%22%24i%22%5D%7D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22ERROR%3A%20Cannot%20bring%20up%20any%20slave%20interface%20for%20master%20interface%3A%20%24i%22%0A%20%20%20%20%20%20%20%20return%201%0A%20%20%20%20fi%0A%20%20done%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%2C%20%24iface%0A%23%20Writes%20content%20of%20%24iface%20into%20%24iface_default_hint_file%0Awrite_iface_default_hint()%20%7B%0A%20%20local%20iface_default_hint_file%3D%22%241%22%0A%20%20local%20iface%3D%22%242%22%0A%0A%20%20echo%20%22%24%7Biface%7D%22%20%3E%7C%20%22%24%7Biface_default_hint_file%7D%22%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%0A%23%20Returns%20the%20stored%20interface%20default%20hint%20if%20the%20hint%20is%20non-empty%2C%0A%23%20not%20br-ex%2C%20not%20br-ex1%20and%20if%20the%20interface%20can%20be%20found%20in%20%2Fsys%2Fclass%2Fnet%0Aget_iface_default_hint()%20%7B%0A%20%20local%20iface_default_hint_file%3D%241%0A%20%20if%20%5B%20-f%20%22%24%7Biface_default_hint_file%7D%22%20%5D%3B%20then%0A%20%20%20%20local%20iface_default_hint%3D%24(cat%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22br-ex1%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20-d%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Biface_default_hint%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20fi%0A%20%20echo%20%22%22%0A%7D%0A%0Aget_ip_from_ip_hint_file()%20%7B%0A%20%20local%20ip_hint_file%3D%22%241%22%0A%20%20if%20%5B%5B%20!%20-f%20%22%24%7Bip_hint_file%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20return%0A%20%20fi%0A%20%20ip_hint%3D%24(cat%20%22%24%7Bip_hint_file%7D%22)%0A%20%20echo%20%22%24%7Bip_hint%7D%22%0A%7D%0A%0A%23%20This%20function%20waits%20for%20ip%20address%20of%20br-ex%20to%20be%20bindable%20only%20in%20case%20of%20ipv6%0A%23%20This%20is%20workaround%20for%20OCPBUGS-673%20as%20it%20will%20not%20allow%20starting%20crio%0A%23%20before%20address%20is%20bindable%0Atry_to_bind_ipv6_address()%20%7B%0A%20%20%23%20Retry%20for%201%20minute%0A%20%20retries%3D60%0A%20%20until%20%5B%5B%20%24%7Bretries%7D%20-eq%200%20%5D%5D%3B%20do%0A%20%20%20%20ip%3D%24(ip%20-6%20-j%20addr%20%7C%20jq%20-r%20%22first(.%5B%5D%20%7C%20select(.ifname%3D%3D%5C%22br-ex%5C%22)%20%7C%20.addr_info%5B%5D%20%7C%20select(.scope%3D%3D%5C%22global%5C%22)%20%7C%20.local)%22)%0A%20%20%20%20if%20%5B%5B%20%22%24%7Bip%7D%22%20%3D%3D%20%22%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22No%20ipv6%20ip%20to%20bind%20was%20found%22%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20random_port%3D%24(shuf%20-i%2050000-60000%20-n%201)%0A%20%20%20%20echo%20%22Trying%20to%20bind%20%24%7Bip%7D%20on%20port%20%24%7Brandom_port%7D%22%0A%20%20%20%20exit_code%3D%24(timeout%202s%20nc%20-l%20%22%24%7Bip%7D%22%20%24%7Brandom_port%7D%3B%20echo%20%24%3F)%0A%20%20%20%20if%20%5B%5B%20exit_code%20-eq%20124%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Address%20bound%20successfully%22%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20sleep%201%0A%20%20%20%20((%20retries--%20))%0A%20%20done%0A%20%20if%20%5B%5B%20%24%7Bretries%7D%20-eq%200%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Failed%20to%20bind%20ip%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%7D%0A%0A%23%20Get%20interface%20that%20matches%20ip%20from%20node%20ip%20hint%20file%0A%23%20in%20case%20file%20not%20exists%20return%20nothing%20and%0A%23%20fallback%20to%20default%20interface%20search%20flow%0Aget_nodeip_hint_interface()%20%7B%0A%20%20local%20ip_hint%3D%22%22%0A%20%20local%20ip_hint_file%3D%22%241%22%0A%20%20local%20extra_bridge%3D%22%242%22%0A%20%20local%20iface%3D%22%22%0A%0A%20%20ip_hint%3D%24(get_ip_from_ip_hint_file%20%22%24%7Bip_hint_file%7D%22)%0A%20%20if%20%5B%5B%20-z%20%22%24%7Bip_hint%7D%22%20%20%5D%5D%3B%20then%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20iface%3D%24(ip%20-j%20addr%20%7C%20jq%20-r%20%22first(.%5B%5D%20%7C%20select(any(.addr_info%5B%5D%3B%20.local%3D%3D%5C%22%24%7Bip_hint%7D%5C%22)%20and%20.ifname!%3D%5C%22br-ex1%5C%22%20and%20.ifname!%3D%5C%22%24%7Bextra_bridge%7D%5C%22))%20%7C%20.ifname%22)%0A%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20fi%0A%7D%0A%0A%23%20Accepts%20parameters%20%24bridge_interface%20(e.g.%20ovs-port-phys0)%0A%23%20Returns%20the%20physical%20interface%20name%20if%20%24bridge_interface%20exists%2C%20%22%22%20otherwise%0Aget_bridge_physical_interface()%20%7B%0A%20%20local%20bridge_interface%3D%22%241%22%0A%20%20local%20physical_interface%3D%22%22%0A%20%20physical_interface%3D%24(%24NMCLI_GET_VALUE%20connection.interface-name%20conn%20show%20%22%24%7Bbridge_interface%7D%22%202%3E%2Fdev%2Fnull%20%7C%7C%20echo%20%22%22)%0A%20%20echo%20%22%24%7Bphysical_interface%7D%22%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%2C%20%24extra_bridge_file%2C%20%24ip_hint_file%2C%20%24default_bridge_file%0A%23%20Determines%20the%20interface%20to%20be%20used%20for%20br-ex.%20Order%20of%20priority%20is%3A%0A%23%201.%20Use%20the%20user%20specified%20interface%20if%20provided%20in%20the%20default%20bridge%20file%0A%23%202.%20Use%20the%20node%20IP%20hint%20interface%0A%23%203.%20Use%20the%20previously%20selected%20interface%0A%23%204.%20Use%20the%20interface%20detected%20as%20default%20gateway%0A%23%0A%23%20Read%20%24default_bridge_file%20and%20return%20the%20contained%20interface.%20Otherwise%2C%0A%23%20read%20%24ip_hint_file%20and%20return%20the%20interface%20that%20matches%20this%20ip.%20Otherwise%2C%0A%23%20if%20the%20default%20interface%20is%20br-ex%2C%20use%20that%20and%20return.%0A%23%20If%20the%20default%20interface%20is%20not%20br-ex%3A%0A%23%20Check%20if%20there%20is%20a%20valid%20hint%20inside%20%24iface_default_hint_file.%20If%20so%2C%20use%20that%20hint.%0A%23%20If%20there%20is%20no%20valid%20hint%2C%20use%20the%20default%20interface%20that%20we%20found%20during%20the%20step%0A%23%20earlier.%0A%23%20Never%20use%20the%20interface%20that%20is%20provided%20inside%20%24extra_bridge_file%20for%20br-ex1.%0A%23%20Never%20use%20br-ex1.%0A%23%20Write%20the%20default%20interface%20to%20%24iface_default_hint_file%0Aget_default_bridge_interface()%20%7B%0A%20%20local%20iface%3D%22%22%0A%20%20local%20counter%3D0%0A%20%20local%20iface_default_hint_file%3D%22%241%22%0A%20%20local%20extra_bridge_file%3D%22%242%22%0A%20%20local%20ip_hint_file%3D%22%243%22%0A%20%20local%20default_bridge_file%3D%22%244%22%0A%20%20local%20extra_bridge%3D%22%22%0A%0A%20%20if%20%5B%20-f%20%22%24%7Bextra_bridge_file%7D%22%20%5D%3B%20then%0A%20%20%20%20extra_bridge%3D%24(cat%20%24%7Bextra_bridge_file%7D)%0A%20%20fi%0A%0A%20%20%23%20try%20to%20use%20user%20specified%20file%20first%0A%20%20if%20%5B%20-f%20%22%24default_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20iface%3D%24(cat%20%22%24%7Bdefault_bridge_file%7D%22)%0A%20%20%20%20if%20%5B%20-z%20%22%24iface%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20User%20specified%20bridge%20file%20detected%20without%20any%20data%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20if%20node%20ip%20was%20set%2C%20we%20should%20search%20for%20interface%20that%20matches%20it%0A%20%20iface%3D%24(get_nodeip_hint_interface%20%22%24%7Bip_hint_file%7D%22%20%22%24%7Bextra_bridge%7D%22)%0A%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20find%20default%20interface%0A%20%20%23%20the%20default%20interface%20might%20be%20br-ex%2C%20so%20check%20this%20before%20looking%20at%20the%20hint%0A%20%20while%20%5B%20%24%7Bcounter%7D%20-lt%2012%20%5D%3B%20do%0A%20%20%20%20%23%20check%20ipv4%0A%20%20%20%20%23%20never%20use%20the%20interface%20that's%20specified%20in%20extra_bridge_file%0A%20%20%20%20%23%20never%20use%20br-ex1%0A%20%20%20%20if%20%5B%20%22%24%7Bextra_bridge%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20iface%3D%24(ip%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20grep%20-v%20%22%24%7Bextra_bridge%7D%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20else%0A%20%20%20%20%20%20iface%3D%24(ip%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20fi%0A%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20%23%20check%20ipv6%0A%20%20%20%20%23%20never%20use%20the%20interface%20that's%20specified%20in%20extra_bridge_file%0A%20%20%20%20%23%20never%20use%20br-ex1%0A%20%20%20%20if%20%5B%20%22%24%7Bextra_bridge%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20iface%3D%24(ip%20-6%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20grep%20-v%20%22%24%7Bextra_bridge%7D%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20else%0A%20%20%20%20%20%20iface%3D%24(ip%20-6%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20fi%0A%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20counter%3D%24((counter%2B1))%0A%20%20%20%20sleep%205%0A%20%20done%0A%0A%20%20%23%20if%20the%20default%20interface%20does%20not%20point%20out%20of%20br-ex%20or%20br-ex1%0A%20%20if%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22br-ex1%22%20%5D%3B%20then%0A%20%20%20%20%23%20determine%20if%20an%20interface%20default%20hint%20exists%20from%20a%20previous%20run%0A%20%20%20%20%23%20and%20if%20the%20interface%20has%20a%20valid%20default%20route%0A%20%20%20%20iface_default_hint%3D%24(get_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%24%7Biface%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20start%20wherever%20count%20left%20off%20in%20the%20previous%20loop%0A%20%20%20%20%20%20%23%20allow%20this%20for%20one%20more%20iteration%20than%20the%20previous%20loop%0A%20%20%20%20%20%20while%20%5B%20%24%7Bcounter%7D%20-le%2012%20%5D%3B%20do%0A%20%20%20%20%20%20%20%20%23%20check%20ipv4%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24(ip%20route%20show%20default%20dev%20%22%24%7Biface_default_hint%7D%22)%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20iface%3D%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20%20%20break%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20%23%20check%20ipv6%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24(ip%20-6%20route%20show%20default%20dev%20%22%24%7Biface_default_hint%7D%22)%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20iface%3D%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20%20%20break%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20counter%3D%24((counter%2B1))%0A%20%20%20%20%20%20%20%20sleep%205%0A%20%20%20%20%20%20done%0A%20%20%20%20fi%0A%20%20%20%20%23%20store%20what%20was%20determined%20was%20the%20(new)%20default%20interface%20inside%0A%20%20%20%20%23%20the%20default%20hint%20file%20for%20future%20reference%0A%20%20%20%20if%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20write_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Biface%7D%22%0A%20%20%20%20fi%0A%20%20fi%0A%20%20echo%20%22%24%7Biface%7D%22%0A%7D%0A%0A%23%20Used%20to%20print%20network%20state%0Aprint_state()%20%7B%0A%20%20echo%20%22Current%20device%2C%20connection%2C%20interface%20and%20routing%20state%3A%22%0A%20%20nmcli%20-g%20all%20device%20%7C%20grep%20-v%20unmanaged%0A%20%20nmcli%20-g%20all%20connection%0A%20%20ip%20-d%20address%20show%0A%20%20ip%20route%20show%0A%20%20ip%20-6%20route%20show%0A%7D%0A%0A%23%20Setup%20an%20exit%20trap%20to%20rollback%20on%20error%0Ahandle_exit()%20%7B%0A%20%20e%3D%24%3F%0A%20%20tdir%3D%24(mktemp%20-u%20-d%20-t%20%22configure-ovs-%24(date%20%2B%25Y-%25m-%25d-%25H-%25M-%25S)-XXXXXXXXXX%22)%0A%20%20%0A%20%20if%20%5B%20%24e%20-eq%200%20%5D%3B%20then%0A%20%20%20%20print_state%0A%20%20%20%20%23%20remove%20previous%20troubleshooting%20information%0A%20%20%20%20rm%20-rf%20%22%24(dirname%20%22%24tdir%22)%2Fconfigure-ovs-*%22%0A%20%20%20%20exit%200%0A%20%20fi%0A%0A%20%20echo%20%22ERROR%3A%20configure-ovs%20exited%20with%20error%3A%20%24e%22%0A%20%20print_state%0A%0A%20%20%23%20remove%20previous%20troubleshooting%20information%20except%20the%20oldest%20one%0A%20%20mapfile%20-t%20tdirs%20%3C%20%3C(compgen%20-G%20%22%24(dirname%20%22%24tdir%22)%2Fconfigure-ovs-*%22)%0A%20%20unset%20-v%20%22tdirs%5B0%5D%22%0A%20%20for%20dir%20in%20%22%24%7Btdirs%5B%40%5D%7D%22%3B%20do%20rm%20-rf%20%22%24dir%22%3B%20done%0A%0A%20%20%23%20copy%20configuration%20to%20tmp%20for%20troubleshooting%0A%20%20mkdir%20-p%20%22%24tdir%22%0A%20%20update_nm_conn_run_files%20br-ex%20phys0%0A%20%20copy_nm_conn_files%20%22%24tdir%22%0A%20%20update_nm_conn_run_files%20br-ex1%20phys1%0A%20%20copy_nm_conn_files%20%22%24tdir%22%0A%20%20echo%20%22Copied%20OVS%20configuration%20to%20%24tdir%20for%20troubleshooting%22%0A%0A%20%20%23%20attempt%20to%20restore%20the%20previous%20network%20state%0A%20%20echo%20%22Attempting%20to%20restore%20previous%20configuration...%22%0A%20%20rollback_nm%0A%20%20print_state%0A%0A%20%20exit%20%24e%0A%7D%0A%0A%23%20Setup%20a%20signal%20trap%20to%20rollback%0Ahandle_termination()%20%7B%0A%20%20echo%20%22WARNING%3A%20configure-ovs%20has%20been%20requested%20to%20terminate%2C%20quitting...%22%0A%20%20%0A%20%20%23%20by%20exiting%20with%20an%20error%20we%20will%20cleanup%20after%20ourselves%20in%20a%0A%20%20%23%20subsequent%20call%20to%20handle_exit%0A%20%20exit%201%0A%7D%0A%0A%23%20main%20function%0Aconfigure_ovs()%20%7B%0A%20%20set%20-eu%0A%0A%20%20%23%20setup%20traps%20to%20handle%20signals%20and%20other%20abnormal%20exits%0A%20%20trap%20'handle_termination'%20TERM%20INT%0A%20%20trap%20'handle_exit'%20EXIT%0A%0A%20%20%23%20this%20flag%20tracks%20if%20any%20config%20change%20was%20made%0A%20%20nm_config_changed%3D0%0A%0A%20%20%23%20Check%20that%20we%20are%20provided%20a%20valid%20NM%20connection%20path%0A%20%20if%20%5B%20%22%24NM_CONN_SET_PATH%22%20!%3D%20%22%24NM_CONN_CONF_PATH%22%20%5D%20%26%26%20%5B%20%22%24NM_CONN_SET_PATH%22%20!%3D%20%22%24NM_CONN_RUN_PATH%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22Error%3A%20Incorrect%20NM%20connection%20path%3A%20%24NM_CONN_SET_PATH%20is%20not%20%24NM_CONN_CONF_PATH%20nor%20%24NM_CONN_RUN_PATH%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20Clean%20up%20old%20config%20on%20behalf%20of%20mtu-migration%0A%20%20if%20!%20systemctl%20-q%20is-enabled%20mtu-migration%3B%20then%0A%20%20%20%20echo%20%22Cleaning%20up%20left%20over%20mtu%20migration%20configuration%22%0A%20%20%20%20rm%20-rf%20%2Fetc%2Fcno%2Fmtu-migration%0A%20%20fi%0A%0A%20%20if%20!%20rpm%20-qa%20%7C%20grep%20-q%20openvswitch%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20Openvswitch%20package%20is%20not%20installed!%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20print%20initial%20state%0A%20%20print_state%0A%0A%20%20if%20%5B%20%22%241%22%20%3D%3D%20%22OVNKubernetes%22%20%5D%3B%20then%0A%20%20%20%20%23%20Configures%20NICs%20onto%20OVS%20bridge%20%22br-ex%22%0A%20%20%20%20%23%20Configuration%20is%20either%20auto-detected%20or%20provided%20through%20a%20config%20file%20written%20already%20in%20Network%20Manager%0A%20%20%20%20%23%20key%20files%20under%20%2Fetc%2FNetworkManager%2Fsystem-connections%2F%0A%20%20%20%20%23%20Managing%20key%20files%20is%20outside%20of%20the%20scope%20of%20this%20script%0A%0A%20%20%20%20%23%20if%20the%20interface%20is%20of%20type%20vmxnet3%20add%20multicast%20capability%20for%20that%20driver%0A%20%20%20%20%23%20History%3A%20BZ%3A1854355%0A%20%20%20%20function%20configure_driver_options%20%7B%0A%20%20%20%20%20%20intf%3D%241%0A%20%20%20%20%20%20if%20%5B%20!%20-f%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Bintf%7D%2Fdevice%2Fuevent%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Device%20file%20doesn't%20exist%2C%20skipping%20setting%20multicast%20mode%22%0A%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20driver%3D%24(cat%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Bintf%7D%2Fdevice%2Fuevent%22%20%7C%20grep%20DRIVER%20%7C%20awk%20-F%20%22%3D%22%20'%7Bprint%20%242%7D')%0A%20%20%20%20%20%20%20%20echo%20%22Driver%20name%20is%22%20%24driver%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24driver%22%20%3D%20%22vmxnet3%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20ip%20link%20set%20dev%20%22%24%7Bintf%7D%22%20allmulticast%20on%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20fi%0A%20%20%20%20%7D%0A%0A%20%20%20%20ovnk_config_dir%3D'%2Fetc%2Fovnk'%0A%20%20%20%20ovnk_var_dir%3D'%2Fvar%2Flib%2Fovnk'%0A%20%20%20%20extra_bridge_file%3D%22%24%7Bovnk_config_dir%7D%2Fextra_bridge%22%0A%20%20%20%20iface_default_hint_file%3D%22%24%7Bovnk_var_dir%7D%2Fiface_default_hint%22%0A%20%20%20%20ip_hint_file%3D%22%2Frun%2Fnodeip-configuration%2Fprimary-ip%22%0A%20%20%20%20%23%20explicitly%20specify%20which%20interface%20should%20be%20used%20with%20the%20default%20bridge%0A%20%20%20%20default_bridge_file%3D%22%24%7Bovnk_config_dir%7D%2Fdefault_bridge%22%0A%0A%20%20%20%20%23%20make%20sure%20to%20create%20ovnk_config_dir%20if%20it%20does%20not%20exist%2C%20yet%0A%20%20%20%20mkdir%20-p%20%22%24%7Bovnk_config_dir%7D%22%0A%20%20%20%20%23%20make%20sure%20to%20create%20ovnk_var_dir%20if%20it%20does%20not%20exist%2C%20yet%0A%20%20%20%20mkdir%20-p%20%22%24%7Bovnk_var_dir%7D%22%0A%0A%20%20%20%20%23%20For%20upgrade%20scenarios%2C%20make%20sure%20that%20we%20stabilize%20what%20we%20already%20configured%0A%20%20%20%20%23%20before.%20If%20we%20do%20not%20have%20a%20valid%20interface%20hint%2C%20find%20the%20physical%20interface%0A%20%20%20%20%23%20that's%20attached%20to%20ovs-if-phys0.%0A%20%20%20%20%23%20If%20we%20find%20such%20an%20interface%2C%20write%20it%20to%20the%20hint%20file.%0A%20%20%20%20iface_default_hint%3D%24(get_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20current_interface%3D%24(get_bridge_physical_interface%20ovs-if-phys0)%0A%20%20%20%20%20%20if%20%5B%20%22%24%7Bcurrent_interface%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20write_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Bcurrent_interface%7D%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20delete%20iface_default_hint_file%20if%20it%20has%20the%20same%20content%20as%20extra_bridge_file%0A%20%20%20%20%23%20in%20that%20case%2C%20we%20must%20also%20force%20a%20reconfiguration%20of%20our%20network%20interfaces%0A%20%20%20%20%23%20to%20make%20sure%20that%20we%20reconcile%20this%20conflict%0A%20%20%20%20if%20%5B%20-f%20%22%24%7Biface_default_hint_file%7D%22%20%5D%20%26%26%0A%20%20%20%20%20%20%5B%20-f%20%22%24%7Bextra_bridge_file%7D%22%20%5D%20%26%26%0A%20%20%20%20%20%20%5B%20%22%24(cat%20%22%24%7Biface_default_hint_file%7D%22)%22%20%3D%3D%20%22%24(cat%20%22%24%7Bextra_bridge_file%7D%22)%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22%24%7Biface_default_hint_file%7D%20and%20%24%7Bextra_bridge_file%7D%20share%20the%20same%20content%22%0A%20%20%20%20%20%20echo%20%22Deleting%20file%20%24%7Biface_default_hint_file%7D%20to%20choose%20a%20different%20interface%22%0A%20%20%20%20%20%20rm%20-f%20%22%24%7Biface_default_hint_file%7D%22%0A%20%20%20%20%20%20rm%20-f%20%2Frun%2Fconfigure-ovs-boot-done%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20on%20every%20boot%20we%20rollback%20and%20generate%20the%20configuration%20again%2C%20to%20take%0A%20%20%20%20%23%20in%20any%20changes%20that%20have%20possibly%20been%20applied%20in%20the%20standard%0A%20%20%20%20%23%20configuration%20sources%0A%20%20%20%20if%20%5B%20!%20-f%20%2Frun%2Fconfigure-ovs-boot-done%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Running%20on%20boot%2C%20restoring%20previous%20configuration%20before%20proceeding...%22%0A%20%20%20%20%20%20rollback_nm%0A%20%20%20%20%20%20print_state%0A%20%20%20%20fi%0A%20%20%20%20touch%20%2Frun%2Fconfigure-ovs-boot-done%0A%0A%20%20%20%20iface%3D%24(get_default_bridge_interface%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Bextra_bridge_file%7D%22%20%22%24%7Bip_hint_file%7D%22%20%22%24%7Bdefault_bridge_file%7D%22)%0A%0A%20%20%20%20if%20%5B%20%22%24iface%22%20!%3D%20%22br-ex%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20Specified%20interface%20is%20not%20br-ex.%0A%20%20%20%20%20%20%23%20Some%20deployments%20use%20a%20temporary%20solution%20where%20br-ex%20is%20moved%20out%20from%20the%20default%20gateway%20interface%0A%20%20%20%20%20%20%23%20and%20bound%20to%20a%20different%20nic%20(https%3A%2F%2Fgithub.com%2Ftrozet%2Fopenshift-ovn-migration).%0A%20%20%20%20%20%20%23%20This%20is%20now%20supported%20through%20an%20extra%20bridge%20if%20requested.%20If%20that%20is%20the%20case%2C%20we%20rollback.%0A%20%20%20%20%20%20%23%20We%20also%20rollback%20if%20it%20looks%20like%20we%20need%20to%20configure%20things%2C%20just%20in%20case%20there%20are%20any%20leftovers%0A%20%20%20%20%20%20%23%20from%20previous%20attempts.%0A%20%20%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%20%7C%7C%20%5B%20-z%20%22%24(nmcli%20connection%20show%20--active%20br-ex%202%3E%20%2Fdev%2Fnull)%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Bridge%20br-ex%20is%20not%20active%2C%20restoring%20previous%20configuration%20before%20proceeding...%22%0A%20%20%20%20%20%20%20%20rollback_nm%0A%20%20%20%20%20%20%20%20print_state%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20convert_to_bridge%20%22%24iface%22%20%22br-ex%22%20%22phys0%22%20%22%24%7BBRIDGE_METRIC%7D%22%0A%0A%20%20%20%20%23%20Check%20if%20we%20need%20to%20configure%20the%20second%20bridge%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%20%26%26%20(!%20nmcli%20connection%20show%20br-ex1%20%26%3E%20%2Fdev%2Fnull%20%7C%7C%20!%20nmcli%20connection%20show%20ovs-if-phys1%20%26%3E%20%2Fdev%2Fnull)%3B%20then%0A%20%20%20%20%20%20interface%3D%24(head%20-n%201%20%24extra_bridge_file)%0A%20%20%20%20%20%20convert_to_bridge%20%22%24interface%22%20%22br-ex1%22%20%22phys1%22%20%22%24%7BBRIDGE1_METRIC%7D%22%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Check%20if%20we%20need%20to%20remove%20the%20second%20bridge%0A%20%20%20%20if%20%5B%20!%20-f%20%22%24extra_bridge_file%22%20%5D%20%26%26%20(nmcli%20connection%20show%20br-ex1%20%26%3E%20%2Fdev%2Fnull%20%7C%7C%20nmcli%20connection%20show%20ovs-if-phys1%20%26%3E%20%2Fdev%2Fnull)%3B%20then%0A%20%20%20%20%20%20remove_ovn_bridges%20br-ex1%20phys1%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Remove%20bridges%20created%20by%20openshift-sdn%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br0%0A%0A%20%20%20%20%23%20Make%20sure%20everything%20is%20activated.%20Do%20it%20in%20a%20specific%20order%3A%0A%20%20%20%20%23%20-%20activate%20br-ex%20first%2C%20due%20to%20autoconnect-slaves%20this%20will%20also%0A%20%20%20%20%23%20%20%20activate%20ovs-port-br-ex%2C%20ovs-port-phys0%20and%20ovs-if-phys0.%20It%20is%0A%20%20%20%20%23%20%20%20important%20that%20ovs-if-phys0%20activates%20with%20br-ex%20to%20avoid%20the%0A%20%20%20%20%23%20%20%20ovs-if-phys0%20profile%20being%20overridden%20with%20a%20profile%20generated%20from%0A%20%20%20%20%23%20%20%20kargs.%20The%20activation%20of%20ovs-if-phys0%2C%20if%20a%20bond%2C%20might%20cause%20the%0A%20%20%20%20%23%20%20%20slaves%20to%20re-activate%2C%20but%20it%20should%20be%20with%20our%20profiles%20since%20they%0A%20%20%20%20%23%20%20%20have%20higher%20priority%0A%20%20%20%20%23%20-%20make%20sure%20that%20ovs-if-phys0%20and%20its%20slaves%2C%20if%20any%2C%20are%20activated.%0A%20%20%20%20%23%20-%20finally%20activate%20ovs-if-br-ex%20which%20holds%20the%20IP%20configuration.%0A%20%20%20%20connections%3D(br-ex%20ovs-if-phys0)%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20%20%20connections%2B%3D(br-ex1%20ovs-if-phys1)%0A%20%20%20%20fi%0A%20%20%20%20while%20IFS%3D%20read%20-r%20connection%3B%20do%0A%20%20%20%20%20%20if%20%5B%5B%20%24connection%20%3D%3D%20*%22%24MANAGED_NM_CONN_SUFFIX%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20connections%2B%3D(%22%24connection%22)%0A%20%20%20%20%20%20fi%0A%20%20%20%20done%20%3C%20%3C(nmcli%20-g%20NAME%20c)%0A%20%20%20%20connections%2B%3D(ovs-if-br-ex)%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20%20%20connections%2B%3D(ovs-if-br-ex1)%0A%20%20%20%20fi%0A%20%20%20%20activate_nm_connections%20%22%24%7Bconnections%5B%40%5D%7D%22%0A%20%20%20%20try_to_bind_ipv6_address%0A%20%20%20%20set_nm_conn_files%0A%20%20elif%20%5B%20%22%241%22%20%3D%3D%20%22OpenShiftSDN%22%20%5D%3B%20then%0A%20%20%20%20%23%20Revert%20changes%20made%20by%20%2Fusr%2Flocal%2Fbin%2Fconfigure-ovs.sh%20during%20SDN%20migration.%0A%20%20%20%20rollback_nm%0A%20%20%20%20%0A%20%20%20%20%23%20Remove%20bridges%20created%20by%20ovn-kubernetes%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br-int%20--%20--if-exists%20del-br%20br-local%0A%20%20fi%0A%7D%0A%0A%23%20Retry%20configure_ovs%20until%20it%20succeeds.%0A%23%20By%20default%2C%20retry%20every%2015%20minutes%20to%20give%20enough%20time%20to%20gather%0A%23%20troubleshooting%20information%20in%20between.%20Note%20that%20configure_ovs%20has%20other%0A%23%20internal%20retry%20mechanisms.%20This%20retry%20is%20intended%20to%20give%20some%0A%23%20self-healing%20capabilities%20to%20temporary%20but%20not%20necessarily%20short-lived%0A%23%20infrastructure%20problems.%0ARETRY%3D%22%24%7BRETRY-15m%7D%22%0Awhile%20true%3B%20do%0A%0A%20%20%23%20Disable%20retries%20if%20termination%20signal%20is%20received.%20Note%20that%20systemd%0A%20%20%23%20sends%20the%20signals%20to%20all%20processes%20in%20the%20group%20by%20default%20so%20we%20expect%0A%20%20%23%20configure_ovs%20to%20get%20its%20own%20signals.%0A%20%20trap%20'echo%20%22WARNING%3A%20termination%20requested%2C%20disabling%20retries%22%3B%20RETRY%3D%22%22'%20INT%20TERM%0A%20%20%0A%20%20%23%20Run%20configure_ovs%20in%20a%20sub-shell.%20%0A%20%20(%20configure_ovs%20%22%24%40%22%20)%0A%20%20e%3D%24%3F%0A%0A%20%20%23%20Handle%20signals%20while%20we%20sleep%0A%20%20trap%20'handle_termination'%20INT%20TERM%0A%20%20%0A%20%20%23%20Exit%20if%20succesful%20and%20not%20configured%20to%20retry%0A%20%20%5B%20%22%24e%22%20-eq%200%20%5D%20%7C%7C%20%5B%20-z%20%22%24RETRY%22%20%5D%20%26%26%20exit%20%22%24e%22%0A%20%20%0A%20%20echo%20%22configure-ovs%20failed%2C%20will%20retry%20after%20%24RETRY%22%0A%20%20%23%20flag%20that%20a%20retry%20has%20happened%0A%20%20touch%20%2Ftmp%2Fconfigure-ovs-retry%0A%20%20sleep%20%22%24RETRY%22%0A%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/configure-ovs.sh" + }, + { + "contents": { + "source": "data:,%23%20This%20file%20is%20generated%20by%20the%20Machine%20Config%20Operator's%20containerruntimeconfig%20controller.%0A%23%0A%23%20storage.conf%20is%20the%20configuration%20file%20for%20all%20tools%0A%23%20that%20share%20the%20containers%2Fstorage%20libraries%0A%23%20See%20man%205%20containers-storage.conf%20for%20more%20information%0A%23%20The%20%22container%20storage%22%20table%20contains%20all%20of%20the%20server%20options.%0A%5Bstorage%5D%0A%0A%23%20Default%20storage%20driver%2C%20must%20be%20set%20for%20proper%20operation.%0Adriver%20%3D%20%22overlay%22%0A%0A%23%20Temporary%20storage%20location%0Arunroot%20%3D%20%22%2Frun%2Fcontainers%2Fstorage%22%0A%0A%23%20Primary%20Read%2FWrite%20location%20of%20container%20storage%0Agraphroot%20%3D%20%22%2Fvar%2Flib%2Fcontainers%2Fstorage%22%0A%0A%5Bstorage.options%5D%0A%23%20Storage%20options%20to%20be%20passed%20to%20underlying%20storage%20drivers%0A%0A%23%20AdditionalImageStores%20is%20used%20to%20pass%20paths%20to%20additional%20Read%2FOnly%20image%20stores%0A%23%20Must%20be%20comma%20separated%20list.%0Aadditionalimagestores%20%3D%20%5B%0A%5D%0A%0A%23%20Remap-UIDs%2FGIDs%20is%20the%20mapping%20from%20UIDs%2FGIDs%20as%20they%20should%20appear%20inside%20of%0A%23%20a%20container%2C%20to%20UIDs%2FGIDs%20as%20they%20should%20appear%20outside%20of%20the%20container%2C%20and%0A%23%20the%20length%20of%20the%20range%20of%20UIDs%2FGIDs.%20%20Additional%20mapped%20sets%20can%20be%20listed%0A%23%20and%20will%20be%20heeded%20by%20libraries%2C%20but%20there%20are%20limits%20to%20the%20number%20of%0A%23%20mappings%20which%20the%20kernel%20will%20allow%20when%20you%20later%20attempt%20to%20run%20a%0A%23%20container.%0A%23%0A%23%20remap-uids%20%3D%200%3A1668442479%3A65536%0A%23%20remap-gids%20%3D%200%3A1668442479%3A65536%0A%0A%23%20Remap-User%2FGroup%20is%20a%20name%20which%20can%20be%20used%20to%20look%20up%20one%20or%20more%20UID%2FGID%0A%23%20ranges%20in%20the%20%2Fetc%2Fsubuid%20or%20%2Fetc%2Fsubgid%20file.%20%20Mappings%20are%20set%20up%20starting%0A%23%20with%20an%20in-container%20ID%20of%200%20and%20the%20a%20host-level%20ID%20taken%20from%20the%20lowest%0A%23%20range%20that%20matches%20the%20specified%20name%2C%20and%20using%20the%20length%20of%20that%20range.%0A%23%20Additional%20ranges%20are%20then%20assigned%2C%20using%20the%20ranges%20which%20specify%20the%0A%23%20lowest%20host-level%20IDs%20first%2C%20to%20the%20lowest%20not-yet-mapped%20container-level%20ID%2C%0A%23%20until%20all%20of%20the%20entries%20have%20been%20used%20for%20maps.%20This%20setting%20overrides%20the%0A%23%20Remap-UIDs%2FGIDs%20setting.%0A%23%0A%23%20remap-user%20%3D%20%22storage%22%0A%23%20remap-group%20%3D%20%22storage%22%0A%0A%5Bstorage.options.pull_options%5D%0A%23%20Options%20controlling%20how%20storage%20is%20populated%20when%20pulling%20images.%0A%0A%23%20Enable%20the%20%22zstd%3Achunked%22%20feature%2C%20which%20allows%20partial%20pulls%2C%20reusing%0A%23%20content%20that%20already%20exists%20on%20the%20system.%20This%20is%20disabled%20by%20default%2C%0A%23%20and%20must%20be%20explicitly%20enabled%20to%20be%20used.%20For%20more%20on%20zstd%3Achunked%2C%20see%0A%23%20https%3A%2F%2Fgithub.com%2Fcontainers%2Fstorage%2Fblob%2Fmain%2Fdocs%2Fcontainers-storage-zstd-chunked.md%0Aenable_partial_images%20%3D%20%22false%22%0A%0A%23%20Tells%20containers%2Fstorage%20to%20use%20hard%20links%20rather%20then%20create%20new%20files%20in%0A%23%20the%20image%2C%20if%20an%20identical%20file%20already%20existed%20in%20storage.%0Ause_hard_links%20%3D%20%22false%22%0A%0A%23%20Path%20to%20an%20ostree%20repository%20that%20might%20have%0A%23%20previously%20pulled%20content%20which%20can%20be%20used%20when%20attempting%20to%20avoid%0A%23%20pulling%20content%20from%20the%20container%20registry.%0Aostree_repos%20%3D%20%22%22%0A%0A%5Bstorage.options.overlay%5D%0A%23%20Storage%20Options%20for%20overlay%0A%0A%23%20Do%20not%20create%20a%20PRIVATE%20bind%20mount%20on%20the%20home%20directory.%0Askip_mount_home%20%3D%20%22true%22%0A%0A%23%20Size%20is%20used%20to%20set%20a%20maximum%20size%20of%20the%20container%20image.%20%20Only%20supported%20by%0A%23%20certain%20container%20storage%20drivers.%0Asize%20%3D%20%22%22%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/storage.conf" + }, + { + "contents": { + "source": "data:,Initial%20Creation%0A" + }, + "mode": 384, + "overwrite": true, + "path": "/etc/docker/certs.d/.create" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20kubelet.config.k8s.io%2Fv1%0Akind%3A%20CredentialProviderConfig%0Aproviders%3A%0A%20%20-%20name%3A%20acr-credential-provider%0A%20%20%20%20apiVersion%3A%20credentialprovider.kubelet.k8s.io%2Fv1%0A%20%20%20%20defaultCacheDuration%3A%20%2210m%22%0A%20%20%20%20matchImages%3A%0A%20%20%20%20%20%20-%20%22*.azurecr.io%22%0A%20%20%20%20%20%20-%20%22*.azurecr.cn%22%0A%20%20%20%20%20%20-%20%22*.azurecr.de%22%0A%20%20%20%20%20%20-%20%22*.azurecr.us%22%0A%20%20%20%20args%3A%0A%20%20%20%20%20%20-%20%2Fetc%2Fkubernetes%2Fcloud.conf%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/credential-providers/acr-credential-provider.yaml" + }, + { + "contents": { + "source": "data:,%23%20Proxy%20environment%20variables%20will%20be%20populated%20in%20this%20file.%20Properly%0A%23%20url%20encoded%20passwords%20with%20special%20characters%20will%20use%20'%25%3CHEX%3E%3CHEX%3E'.%0A%23%20Systemd%20requires%20that%20any%20%25%20used%20in%20a%20password%20be%20represented%20as%0A%23%20%25%25%20in%20a%20unit%20file%20since%20%25%20is%20a%20prefix%20for%20macros%3B%20this%20restriction%20does%20not%0A%23%20apply%20for%20environment%20files.%20Templates%20that%20need%20the%20proxy%20set%20should%20use%0A%23%20'EnvironmentFile%3D%2Fetc%2Fmco%2Fproxy.env'.%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/mco/proxy.env" + }, + { + "contents": { + "source": "data:,%5BManager%5D%0ADefaultEnvironment%3DGODEBUG%3Dx509ignoreCN%3D0%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system.conf.d/10-default-env-godebug.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Workaround%3A%0A%23%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FOCPBUGS-38779%0A%0Adriver%3D%24(nmcli%20-t%20-m%20tabular%20-f%20general.driver%20dev%20show%20%22%24%7BDEVICE_IFACE%7D%22)%0A%0Aif%20%5B%5B%20%22%242%22%20%3D%3D%20%22up%22%20%26%26%20%22%24%7Bdriver%7D%22%20%3D%3D%20%22idpf%22%20%5D%5D%3B%20then%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-checksumming%20off%0Afi" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-gcp-disable-idpf-tx-checksum-off" + }, + { + "contents": { + "source": "data:,%23%20Force-load%20legacy%20iptables%20so%20it%20is%20usable%20from%20pod%20network%20namespaces%0Aip_tables%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/modules-load.d/iptables.conf" + }, + { + "contents": { + "source": "data:,NODE_SIZING_ENABLED%3Dfalse%0ASYSTEM_RESERVED_MEMORY%3D1Gi%0ASYSTEM_RESERVED_CPU%3D500m%0ASYSTEM_RESERVED_ES%3D1Gi" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/node-sizing-enabled.env" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-e%0ANODE_SIZES_ENV%3D%24%7BNODE_SIZES_ENV%3A-%2Fetc%2Fnode-sizing.env%7D%0AVERSION_1%3D1%0AVERSION_2%3D2%0ANODE_AUTO_SIZING_VERSION%3D%24%7BNODE_AUTO_SIZING_VERSION%3A-%24VERSION_2%7D%0ANODE_AUTO_SIZING_VERSION_FILE%3D%24%7BNODE_AUTO_SIZING_VERSION_FILE%3A-%2Fetc%2Fnode-sizing-version.json%7D%0Afunction%20dynamic_memory_sizing%20%7B%0A%20%20%20%20total_memory%3D%24(free%20-g%7Cawk%20'%2F%5EMem%3A%2F%7Bprint%20%242%7D')%0A%20%20%20%20%23%20total_memory%3D8%20test%20the%20recommended%20values%20by%20modifying%20this%20value%0A%20%20%20%20recommended_systemreserved_memory%3D0%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%204))%3B%20then%20%23%2025%25%20of%20the%20first%204GB%20of%20memory%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24total_memory%200.25%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D1%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-4))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%204))%3B%20then%20%23%2020%25%20of%20the%20next%204GB%20of%20memory%20(up%20to%208GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.20%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%200.80%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-4))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%208))%3B%20then%20%23%2010%25%20of%20the%20next%208GB%20of%20memory%20(up%20to%2016GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.10%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%200.80%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-8))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%20112))%3B%20then%20%23%206%25%20of%20the%20next%20112GB%20of%20memory%20(up%20to%20128GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.06%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%206.72%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-112))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3E%3D%200))%3B%20then%20%23%202%25%20of%20any%20memory%20above%20128GB%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.02%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20fi%0A%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%7C%20awk%20'%7Bprintf(%22%25d%5Cn%22%2C%241%20%2B%200.5)%7D')%20%23%20Round%20off%20so%20we%20avoid%20float%20conversions%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_MEMORY%3D%24%7Brecommended_systemreserved_memory%7DGi%22%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_cpu_sizing%20%7B%0A%20%20%20%20total_cpu%3D%24(getconf%20_NPROCESSORS_ONLN)%0A%20%20%20%20if%20%5B%20%22%241%22%20-eq%20%22%24VERSION_1%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D0%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%201))%3B%20then%20%23%206%25%20of%20the%20first%20core%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24total_cpu%200.06%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D0.06%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-1))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%201))%3B%20then%20%23%201%25%20of%20the%20next%20core%20(up%20to%202%20cores)%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-1))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%202))%3B%20then%20%23%200.5%25%20of%20the%20next%202%20cores%20(up%20to%204%20cores)%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.005%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-2))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3E%3D%200))%3B%20then%20%23%200.25%25%20of%20any%20cores%20above%204%20cores%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.0025%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20%23%20Base%20allocation%20for%201%20CPU%20in%20fractions%20of%20a%20core%20(60%20millicores%20%3D%200.06%20CPU%20core)%0A%20%20%20%20%20%20%20%20base_allocation_fraction%3D0.06%0A%20%20%20%20%20%20%20%20%23%20Increment%20per%20additional%20CPU%20in%20fractions%20of%20a%20core%20(12%20millicores%20%3D%200.012%20CPU%20core)%0A%20%20%20%20%20%20%20%20increment_per_cpu_fraction%3D0.012%0A%20%20%20%20%20%20%20%20if%20((total_cpu%20%3E%201))%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20Calculate%20the%20total%20system-reserved%20CPU%20in%20fractions%2C%20starting%20with%20the%20base%20allocation%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20and%20adding%20the%20incremental%20fraction%20for%20each%20additional%20CPU%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(awk%20-v%20base%3D%22%24base_allocation_fraction%22%20-v%20increment%3D%22%24increment_per_cpu_fraction%22%20-v%20cpus%3D%22%24total_cpu%22%20'BEGIN%20%7Bprintf%20%22%25.2f%5Cn%22%2C%20base%20%2B%20increment%20*%20(cpus%20-%201)%7D')%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20For%20a%20single%20CPU%2C%20use%20the%20base%20allocation%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24base_allocation_fraction%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Enforce%20minimum%20threshold%20of%200.5%20CPU%0A%20%20%20%20recommended_systemreserved_cpu%3D%24(awk%20-v%20val%3D%22%24recommended_systemreserved_cpu%22%20'BEGIN%20%7Bif%20(val%20%3C%200.5)%20print%200.5%3B%20else%20print%20val%7D')%0A%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_CPU%3D%24%7Brecommended_systemreserved_cpu%7D%22%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_ephemeral_sizing%20%7B%0A%20%20%20%20echo%20%22Not%20implemented%20yet%22%0A%7D%0Afunction%20dynamic_pid_sizing%20%7B%0A%20%20%20%20echo%20%22Not%20implemented%20yet%22%0A%7D%0Afunction%20set_memory%20%7B%0A%20%20%20%20SYSTEM_RESERVED_MEMORY%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_MEMORY%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_MEMORY%3D%221Gi%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_MEMORY%3D%24%7BSYSTEM_RESERVED_MEMORY%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20set_cpu%20%7B%0A%20%20%20%20SYSTEM_RESERVED_CPU%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_CPU%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_CPU%3D%22500m%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_CPU%3D%24%7BSYSTEM_RESERVED_CPU%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20set_es%20%7B%0A%20%20%20%20SYSTEM_RESERVED_ES%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_ES%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_ES%3D%221Gi%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_ES%3D%24%7BSYSTEM_RESERVED_ES%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_node_sizing%20%7B%0A%20%20%20%20rm%20-f%20%24%7BNODE_SIZES_ENV%7D%0A%20%20%20%20dynamic_memory_sizing%0A%20%20%20%20dynamic_cpu_sizing%20%241%0A%20%20%20%20set_es%20%242%0A%20%20%20%20%23dynamic_ephemeral_sizing%0A%20%20%20%20%23dynamic_pid_sizing%0A%7D%0Afunction%20static_node_sizing%20%7B%0A%20%20%20%20rm%20-f%20%24%7BNODE_SIZES_ENV%7D%0A%20%20%20%20set_memory%20%241%0A%20%20%20%20set_cpu%20%242%0A%20%20%20%20set_es%20%243%0A%7D%0Afunction%20create_version_file%20%7B%0A%20%20%20%20echo%20%22%7B%5C%22version%5C%22%3A%20%241%7D%22%20%3E%20%242%0A%7D%0Aif%20!%20%5B%20-f%20%24NODE_AUTO_SIZING_VERSION_FILE%20%5D%3B%20then%0A%20%20%20%20create_version_file%20%24NODE_AUTO_SIZING_VERSION%20%24NODE_AUTO_SIZING_VERSION_FILE%0Afi%0Anew_version%3D%24(jq%20.version%20%24NODE_AUTO_SIZING_VERSION_FILE)%0Aif%20%5B%20%241%20%3D%3D%20%22true%22%20%5D%3B%20then%0A%20%20%20%20dynamic_node_sizing%20%24new_version%20%244%0Aelif%20%5B%20%241%20%3D%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20static_node_sizing%20%242%20%243%20%244%0Aelse%0A%20%20%20%20echo%20%22Unrecognized%20command%20line%20option.%20Valid%20options%20are%20%5C%22true%5C%22%20or%20%5C%22false%5C%22%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/sbin/dynamic-system-reserved-calc.sh" + }, + { + "contents": { + "source": "data:,%23%20Turning%20on%20Accounting%20helps%20track%20down%20performance%20issues.%0A%5BManager%5D%0ADefaultCPUAccounting%3Dyes%0ADefaultMemoryAccounting%3Dyes%0ADefaultBlockIOAccounting%3Dyes%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system.conf.d/kubelet-cgroups.conf" + }, + { + "contents": { + "source": "data:,%5BService%5D%0AEnvironment%3D%22KUBELET_LOG_LEVEL%3D2%22%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system/kubelet.service.d/20-logging.conf" + }, + { + "contents": { + "source": "data:,%23%20ignore%20known%20SDN-managed%20devices%0A%5Bdevice%5D%0Amatch-device%3Dinterface-name%3Abr-int%3Binterface-name%3Abr-local%3Binterface-name%3Abr-nexthop%3Binterface-name%3Aovn-k8s-*%3Binterface-name%3Ak8s-*%3Binterface-name%3Atun0%3Binterface-name%3Abr0%3Binterface-name%3Apatch-br-*%3Binterface-name%3Abr-ext%3Binterface-name%3Aext-vxlan%3Binterface-name%3Aext%3Binterface-name%3Aint%3Binterface-name%3Avxlan_sys_*%3Binterface-name%3Agenev_sys_*%3Bdriver%3Aveth%0Amanaged%3D0%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/sdn.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-eux%0A%0A%23%20Clean%20up%20old%20config%20on%20behalf%20of%20mtu-migration%0Aif%20!%20systemctl%20-q%20is-enabled%20mtu-migration%3B%20then%0A%20%20echo%20%22Cleaning%20up%20left%20over%20mtu%20migration%20configuration%22%0A%20%20rm%20-rf%20%2Fetc%2Fcno%2Fmtu-migration%0Afi%0A%0Aif%20%5B%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20echo%20%22Configuration%20already%20applied%2C%20exiting%22%0A%20%20exit%200%0Afi%0A%0Asrc_path%3D%22%2Fetc%2Fnmstate%2Fopenshift%22%0Adst_path%3D%22%2Fetc%2Fnmstate%22%0Ahostname%3D%24(hostname%20-s)%0Ahost_file%3D%22%24%7Bhostname%7D.yml%22%0Acluster_file%3D%22cluster.yml%22%0Aconfig_file%3D%22%22%0Aif%20%5B%20-s%20%22%24src_path%2F%24host_file%22%20%5D%3B%20then%0A%20%20config_file%3D%24host_file%0Aelif%20%5B%20-s%20%22%24src_path%2F%24cluster_file%22%20%5D%3B%20then%0A%20%20config_file%3D%24cluster_file%0Aelse%0A%20%20echo%20%22No%20configuration%20found%20at%20%24src_path%2F%24host_file%20or%20%24src_path%2F%24cluster_file%22%0A%20%20exit%200%0Afi%0A%0Aif%20%5B%20-e%20%22%24dst_path%2F%24config_file%22%20%5D%3B%20then%0A%20%20echo%20%22ERROR%3A%20File%20%24dst_path%2F%24config_file%20exists.%20Refusing%20to%20overwrite.%22%0A%20%20exit%201%0Afi%0A%0A%23%20Handle%20the%20case%20where%20we're%20migrating%20from%20configure-ovs%0Aovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br-ex%0A%0A%23%20Handle%20the%20case%20where%20we're%20migrating%20from%20OpenShift%20SDN%0Aovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br0%0A%0Acp%20%22%24src_path%2F%24config_file%22%20%2Fetc%2Fnmstate%0Atouch%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/nmstate-configuration.sh" + }, + { + "contents": { + "source": "data:,%5Bservice%5D%0Akeep_state_file_after_apply%20%3D%20true%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/nmstate/nmstate.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Set%20interface%20ofport_request%20to%20guarantee%20stable%20ofport%20numbers.%20This%20is%20important%20for%20flow%20matches.%0A%23%20Otherwise%2C%20another%20ofport%20number%20is%20assigned%20to%20the%20interface%20on%20every%20restart%20of%20NetworkManager.%0A%23%20This%20script%20will%20build%20an%20associative%20array%20INTERFACE_NAME-%3Eofport_request%20and%20will%20save%20it%20to%20file%20CONFIGURATION_FILE.%0A%23%20When%20an%20interface%20is%20brought%20up%2C%20this%20will%20reuse%20the%20value%20from%20the%20associative%20array%20if%20such%20a%20value%20exists.%0A%23%20Otherwise%2C%20this%20will%20try%20to%20use%20the%20current%20ofport%20value.%20If%20the%20ofport%20value%20is%20already%20reserved%2C%20then%0A%23%20this%20uses%20the%20lowest%20available%20numerical%20value%2C%20instead.%0Aset%20-eux%20-o%20pipefail%0Aif%20%5B%5B%20%22OVNKubernetes%22%20!%3D%20%22OVNKubernetes%22%20%5D%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0AINTERFACE_NAME%3D%241%0AOPERATION%3D%242%0A%0A%23%20Only%20execute%20this%20on%20pre-up%0Aif%20%5B%20%22%24%7BOPERATION%7D%22%20!%3D%20%22pre-up%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20NM%20uuid%0AINTERFACE_CONNECTION_UUID%3D%24(nmcli%20-t%20-f%20device%2Ctype%2Cuuid%20conn%20%7C%20awk%20-F%20'%3A'%20'%7Bif(%241%3D%3D%22'%24%7BINTERFACE_NAME%7D'%22%20%26%26%20%242!~%2F%5Eovs*%2F)%20print%20%24NF%7D')%0Aif%20%5B%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20slave-type.%20If%20this%20is%20not%20an%20ovs-port%2C%20then%20exit%0AINTERFACE_OVS_SLAVE_TYPE%3D%24(nmcli%20-t%20-f%20connection.slave-type%20conn%20show%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BINTERFACE_OVS_SLAVE_TYPE%7D%22%20!%3D%20%22ovs-port%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20This%20is%20not%20necessarily%20a%20UUID%20(can%20be%20a%20name%20in%20case%20of%20bonds)%20but%20this%20should%20be%20unique%0APORT%3D%24(nmcli%20-t%20-f%20connection.master%20conn%20show%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20NM%20uuid%0APORT_CONNECTION_UUID%3D%24(nmcli%20-t%20-f%20device%2Ctype%2Cuuid%20conn%20%7C%20awk%20-F%20'%3A'%20'%7Bif(%20(%241%3D%3D%22'%24%7BPORT%7D'%22%20%7C%7C%20%243%3D%3D%22'%24%7BPORT%7D'%22)%20%26%26%20%242~%2F%5Eovs*%2F)%20print%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20port's%20slave-type.%20If%20this%20is%20not%20an%20ovs-bridge%2C%20then%20exit%0APORT_OVS_SLAVE_TYPE%3D%24(nmcli%20-t%20-f%20connection.slave-type%20conn%20show%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT_OVS_SLAVE_TYPE%7D%22%20!%3D%20%22ovs-bridge%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20port's%20master.%20If%20it%20doesn't%20have%20any%2C%20assume%20it's%20not%20our%20bridge%0ABRIDGE_ID%3D%24(nmcli%20-t%20-f%20connection.master%20conn%20show%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BBRIDGE_ID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20bridge%20name%0ABRIDGE_NAME%3D%24(nmcli%20-t%20-f%20connection.interface-name%20conn%20show%20%22%24%7BBRIDGE_ID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0A%23%20Limit%20this%20to%20br-ex%20and%20br-ex1%20only.%20If%20one%20wanted%20to%20enable%20this%20for%20all%20OVS%20bridges%2C%0A%23%20the%20condition%20would%20be%3A%20if%20%5B%20%22%24BRIDGE_NAME%22%20%3D%3D%20%22%22%20%5D%3B%20then%0Aif%20%5B%20%22%24%7BBRIDGE_NAME%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%20%5B%20%22%24%7BBRIDGE_NAME%7D%22%20!%3D%20%22br-ex1%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Make%20sure%20that%20the%20interface%20is%20plugged%20into%20OVS%0A%23%20This%20should%20always%20be%20the%20case%20given%20that%20we%20are%20in%20pre-up%2C%20but%20exit%20gracefully%20in%20the%20odd%20case%20that%20it's%20not%0Aif%20!%20ovs-vsctl%20list%20interface%20%22%24%7BINTERFACE_NAME%7D%22%20%3E%2Fdev%2Fnull%202%3E%261%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0ACONFIGURATION_FILE%3D%22%2Frun%2Fofport_requests.%24%7BBRIDGE_NAME%7D%22%0A%0A%23%20Declare%20a%20new%20associative%20array.%20If%20CONFIGURATION_FILE%20exists%2C%20source%20entries%20from%20there%0Adeclare%20-A%20INTERFACES%0Aif%20%5B%20-f%20%22%24%7BCONFIGURATION_FILE%7D%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22Sourcing%20configuration%20file%20'%24%7BCONFIGURATION_FILE%7D'%20with%20contents%3A%22%0A%20%20%20%20cat%20%22%24%7BCONFIGURATION_FILE%7D%22%0A%20%20%20%20source%20%22%24%7BCONFIGURATION_FILE%7D%22%0Afi%0A%0A%23%20get_interface_ofport_request%20will%20return%0A%23%20*%20either%3A%20the%20current%20ofport%20assignment%20for%20the%20port%20if%20no%20interface%20has%20claimed%20this%20ofport%20number%2C%20yet%0A%23%20*%20or%3A%20%20%20%20%20the%20lowest%20available%20free%20ofport%20number%0Afunction%20get_interface_ofport_request()%20%7B%0A%20%20%20%20%23%20Build%20an%20array%20that%20only%20contains%20the%20currently%20reserved%20ofport_requests%0A%20%20%20%20declare%20-A%20ofport_requests%0A%20%20%20%20for%20interface_name%20in%20%22%24%7B!INTERFACES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20ofport_requests%5B%24%7BINTERFACES%5B%24interface_name%5D%7D%5D%3D%24%7BINTERFACES%5B%24interface_name%5D%7D%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20Get%20the%20current%20ofport%20number%20assignment%0A%20%20%20%20local%20current_ofport%3D%24(ovs-vsctl%20get%20Interface%20%22%24%7BINTERFACE_NAME%7D%22%20ofport)%0A%0A%20%20%20%20%23%20If%20the%20current%20ofport%20number%20is%20still%20free%2C%20use%20it%0A%20%20%20%20if%20!%20%5B%20%22%24%7Bofport_requests%5B%24current_ofport%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%24current_ofport%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20If%20the%20current%20ofport%20number%20is%20not%20free%2C%20return%20the%20lowest%20free%20entry%0A%20%20%20%20i%3D0%0A%20%20%20%20for%20i%20in%20%7B1..65000%7D%3B%20do%0A%20%20%20%20%20%20%20%20if%20!%20%5B%20%22%24%7Bofport_requests%5B%24i%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%24i%0A%20%20%20%20%20%20%20%20%20%20%20%20return%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20if%20we%20still%20cannot%20find%20an%20ID%2C%20exit%20with%20an%20error%0A%20%20%20%20echo%20%22Impossible%20to%20find%20an%20ofport%20ID%20for%20interface%20%24%7BINTERFACE_NAME%7D%22%20%3E%262%0A%20%20%20%20exit%201%0A%7D%0A%0A%23%20If%20INTERFACES%5BINTERFACE_NAME%5D%20exists%2C%20use%20that%20value%0A%23%20If%20INTERFACES%5BINTERFACE_NAME%5D%20does%20not%20exists%2C%20use%20the%20value%20from%20get_interface_ofport_request%0Aif%20!%20%5B%20%22%24%7BINTERFACES%5B%24INTERFACE_NAME%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20INTERFACES%5B%24INTERFACE_NAME%5D%3D%24(get_interface_ofport_request)%0Afi%0A%23%20Set%20ofport_request%20according%20to%20INTERFACES%5BINTERFACE_NAME%5D%0Aovs-vsctl%20set%20Interface%20%22%24%7BINTERFACE_NAME%7D%22%20ofport_request%3D%24%7BINTERFACES%5B%24INTERFACE_NAME%5D%7D%0A%0A%23%20Save%20current%20state%20of%20INTERFACES%20to%20CONFIGURATION_FILE%0Adeclare%20-p%20INTERFACES%20%3E%7C%20%22%24%7BCONFIGURATION_FILE%7D%22%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/pre-up.d/10-ofport-request.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20Extract%20Podman%20version%20and%20determine%20the%20signature%20policy%0A%2Fusr%2Fbin%2Fpodman%20-v%20%7C%20%2Fbin%2Fawk%20'%7B%0A%20%20%20%20split(%243%2C%20version%2C%20%22-%22)%3B%0A%20%20%20%20clean_version%20%3D%20version%5B1%5D%3B%0A%0A%20%20%20%20split(clean_version%2C%20current%2C%20%2F%5C.%2F)%3B%0A%20%20%20%20split(%224.4.1%22%2C%20target%2C%20%2F%5C.%2F)%3B%0A%0A%20%20%20%20for%20(i%20%3D%201%3B%20i%20%3C%3D%203%3B%20i%2B%2B)%20%7B%0A%20%20%20%20%20%20%20%20if%20((current%5Bi%5D%20%2B%200)%20%3C%20(target%5Bi%5D%20%2B%200))%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20print%20%22--signature-policy%20%2Fetc%2Fmachine-config-daemon%2Fpolicy-for-old-podman.json%22%3B%0A%20%20%20%20%20%20%20%20%20%20%20%20exit%3B%0A%20%20%20%20%20%20%20%20%7D%20else%20if%20((current%5Bi%5D%20%2B%200)%20%3E%20(target%5Bi%5D%20%2B%200))%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20exit%3B%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%7D%0A%7D'%20%3E%20%2Ftmp%2Fpodman_policy_args%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/etc/machine-config-daemon/generate_podman_policy_args.sh" + }, + { + "contents": { + "source": "data:,%7B%22auths%22%3A%7B%22cloud.openshift.com%22%3A%7B%22auth%22%3A%22b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfNWZlN2RlZTExNTdlNDA2Nzk2NTY3Y2VmMThmNjFjOTM6V0NXOEJLNkxXT1NLMElUSldIMlRXQUhaQ1k4SkJNMUtONVlCUVpNT1Q3UjdQMU9HRk05VTFJQ1VET0FMQkZIWQ%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22quay.io%22%3A%7B%22auth%22%3A%22b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfNWZlN2RlZTExNTdlNDA2Nzk2NTY3Y2VmMThmNjFjOTM6V0NXOEJLNkxXT1NLMElUSldIMlRXQUhaQ1k4SkJNMUtONVlCUVpNT1Q3UjdQMU9HRk05VTFJQ1VET0FMQkZIWQ%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22registry.ci.openshift.org%22%3A%7B%22auth%22%3A%22XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX%22%7D%2C%22registry.connect.redhat.com%22%3A%7B%22auth%22%3A%22fHVoYy1wb29sLWQ3OWIxMzA2LWI4NzItNDBkNy1iZDdmLTU1ZWI2M2EwMjMwOTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTJNelkzWkdaaE5EbGhaV1UwT1RkbU9XVXlNalUyWWpsaE9ESXlNMk0zTWlKOS5oQ0Z3YnhiOExhMDNkSnF4V09NMjFhOFk2MmpYLTRVZDgtR0dWMG1RZlRLc2NBbEpfLThIekZ6WnZSMWN0Uzd1RDlLeEFRbUt2WE11SUx4MDVybmZrQ1c4S3FSbHhBdm5YUk51QTVCZ1FWVjlPRWd5OXpzZTVpMURnXzUwb2N4bHBUcTJYY20yVW00U09VX1hBNThwMXJaa3QtZ19wMXd1X3ItSzFjNzR1dlBlUmJPQXdubzBwZWVyX0tTQUloUklPcjRXbGYtdzhBZTM5UXA5UkQ5ZERta1F2WW1Cek94ZFJlaHhDMm0zdEZUaUc5YTRKT2NsQkVqSnpINlFoLXpZbmhxazZ2LS1yWHdpSXdlSG9PYU90WXhyTW9GN3U4UGtsVXVuVWI2S3RSbHBCVlE1MlB1MGRBVG9qUEpWMHdDdE93NGh0OU1fZi1JNDhHMm15QlAzM3BmME4xTFVmTmhKS0FkZEg0cE9WaVJmOFM1aExPYjVxemFqakhNOGRPYlBnRkN2c0lzR09kdGVaenIwZW5hWFBqOE9sYjlLQlJJTlllUHBMNmZSZGNaeXBkLVNJbzQ2TjJPbHRMLXNuVG5LX1NDMDZfMS1XNnFFdEpmZUptX2N0c2RxWnlnMWw2bVdiQWwtVExyMFl0ZTVldkNpQUpCNTYzZGxKR1ZWY2xCUGgyUkdDUW9TdjVDS3hnM3ZXX21oak42dnVtVjRCRmxLbUdUVTR2TDZ0aGFiS21XQ1puQnlwOS0tcURaMVVMSEhvb0psbG1Ebnc5S1dBRkdldV9oc3hKcDdZUXRVR193SkhZWGthdFRWTFhfMlB3OG5IbXdVSXVBaUdacks5T2xSNzBPdGpCNUdDTzIyd3oyQ0tmeUNELWNIRGFKcm1rX3NwQW9DMnoxQW94MA%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22registry.redhat.io%22%3A%7B%22auth%22%3A%22fHVoYy1wb29sLWQ3OWIxMzA2LWI4NzItNDBkNy1iZDdmLTU1ZWI2M2EwMjMwOTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTJNelkzWkdaaE5EbGhaV1UwT1RkbU9XVXlNalUyWWpsaE9ESXlNMk0zTWlKOS5oQ0Z3YnhiOExhMDNkSnF4V09NMjFhOFk2MmpYLTRVZDgtR0dWMG1RZlRLc2NBbEpfLThIekZ6WnZSMWN0Uzd1RDlLeEFRbUt2WE11SUx4MDVybmZrQ1c4S3FSbHhBdm5YUk51QTVCZ1FWVjlPRWd5OXpzZTVpMURnXzUwb2N4bHBUcTJYY20yVW00U09VX1hBNThwMXJaa3QtZ19wMXd1X3ItSzFjNzR1dlBlUmJPQXdubzBwZWVyX0tTQUloUklPcjRXbGYtdzhBZTM5UXA5UkQ5ZERta1F2WW1Cek94ZFJlaHhDMm0zdEZUaUc5YTRKT2NsQkVqSnpINlFoLXpZbmhxazZ2LS1yWHdpSXdlSG9PYU90WXhyTW9GN3U4UGtsVXVuVWI2S3RSbHBCVlE1MlB1MGRBVG9qUEpWMHdDdE93NGh0OU1fZi1JNDhHMm15QlAzM3BmME4xTFVmTmhKS0FkZEg0cE9WaVJmOFM1aExPYjVxemFqakhNOGRPYlBnRkN2c0lzR09kdGVaenIwZW5hWFBqOE9sYjlLQlJJTlllUHBMNmZSZGNaeXBkLVNJbzQ2TjJPbHRMLXNuVG5LX1NDMDZfMS1XNnFFdEpmZUptX2N0c2RxWnlnMWw2bVdiQWwtVExyMFl0ZTVldkNpQUpCNTYzZGxKR1ZWY2xCUGgyUkdDUW9TdjVDS3hnM3ZXX21oak42dnVtVjRCRmxLbUdUVTR2TDZ0aGFiS21XQ1puQnlwOS0tcURaMVVMSEhvb0psbG1Ebnc5S1dBRkdldV9oc3hKcDdZUXRVR193SkhZWGthdFRWTFhfMlB3OG5IbXdVSXVBaUdacks5T2xSNzBPdGpCNUdDTzIyd3oyQ0tmeUNELWNIRGFKcm1rX3NwQW9DMnoxQW94MA%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%7D%7D%0A" + }, + "mode": 384, + "overwrite": true, + "path": "/var/lib/kubelet/config.json" + }, + { + "contents": { + "source": "data:,%23%20Needed%20by%20the%20OpenShift%20SDN.%20See%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1758552%0Anet.ipv4.conf.all.arp_announce%20%3D%202%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/arp.conf" + }, + { + "contents": { + "source": "data:,%23%20See%3A%20rhbz%231384746%2C%20OCPBUGS-24012%0Anet.ipv4.neigh.default.gc_thresh1%3D8192%0Anet.ipv4.neigh.default.gc_thresh2%3D32768%0Anet.ipv4.neigh.default.gc_thresh3%3D65536%0Anet.ipv6.neigh.default.gc_thresh1%3D8192%0Anet.ipv6.neigh.default.gc_thresh2%3D32768%0Anet.ipv6.neigh.default.gc_thresh3%3D65536%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/gc-thresh.conf" + }, + { + "contents": { + "source": "data:,%0Afs.inotify.max_user_watches%20%3D%2065536%0Afs.inotify.max_user_instances%20%3D%208192%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/inotify.conf" + }, + { + "contents": { + "source": "data:,vm.unprivileged_userfaultfd%20%3D%201" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/enable-userfaultfd.conf" + }, + { + "contents": { + "source": "data:,%23%20Needed%20for%20OpenShift%20Logging%20(ElasticSearch).%20See%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1793714%0Avm.max_map_count%20%3D%20262144%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/vm-max-map.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-euo%20pipefail%0A%0A%23%20First%2C%20we%20need%20to%20wait%20until%20DHCP%20finishes%20and%20the%20node%20has%20a%20non-%60localhost%60%0A%23%20hostname%20before%20%60kubelet.service%60%20starts.%0A%23%20That's%20the%20%60--wait%60%20argument%20as%20used%20by%20%60node-valid-hostname.service%60.%0A%23%0A%23%20Second%2C%20on%20GCP%20specifically%20we%20truncate%20the%20hostname%20if%20it's%20%3E63%20characters.%0A%23%20That's%20%60gcp-hostname.service%60.%0A%0A%23%20Block%20indefinitely%20until%20the%20host%20gets%20a%20non-localhost%20name.%0A%23%20Note%20node-valid-hostname.service%20uses%20systemd%20to%20abort%20if%20this%20takes%20too%20long.%0Await_localhost()%20%7B%0A%20%20%20%20echo%20%22waiting%20for%20non-localhost%20hostname%20to%20be%20assigned%22%0A%20%20%20%20while%20%5B%5B%20%22%24(%3C%20%2Fproc%2Fsys%2Fkernel%2Fhostname)%22%20%3D~%20(localhost%7Clocalhost.localdomain)%20%5D%5D%3B%0A%20%20%20%20do%0A%20%20%20%20%20%20%20%20sleep%201%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20Some%20cloud%20platforms%20may%20assign%20a%20hostname%20with%20a%20trailing%20dot.%0A%20%20%20%20%23%20However%2C%20tools%20like%20%60hostnamectl%60%20(used%20by%20systemd)%20do%20not%20allow%20trailing%20dots%2C%0A%20%20%20%20%23%20so%20we%20strip%20the%20trailing%20dot%20before%20applying%20the%20hostname.%0A%20%20%20%20HOSTNAME%3D%22%24(%3C%2Fproc%2Fsys%2Fkernel%2Fhostname)%22%0A%20%20%20%20CLEAN_HOSTNAME%3D%22%24%7BHOSTNAME%25.%7D%22%20%0A%20%20%20%20echo%20%22node%20identified%20as%20%24CLEAN_HOSTNAME%22%0A%20%20%20%20echo%20%22saving%20hostname%20to%20prevent%20NetworkManager%20from%20ever%20unsetting%20it%22%0A%20%20%20%20hostnamectl%20set-hostname%20--static%20--transient%20%22%24CLEAN_HOSTNAME%22%0A%20%20%20%20exit%200%0A%7D%0A%0Aset_gcp_hostname()%20%7B%0A%20%20%20%20%2Fusr%2Fbin%2Fafterburn%20--provider%20gcp%20--hostname%3D%2Frun%2Fafterburn.hostname%0A%0A%20%20%20%20local%20host_name%3D%24(cat%20%2Frun%2Fafterburn.hostname)%0A%20%20%20%20local%20type_arg%3D%22transient%22%0A%0A%20%20%20%20%23%20%2Fetc%2Fhostname%20is%20used%20for%20static%20hostnames%20and%20is%20authoritative.%0A%20%20%20%20%23%20This%20will%20check%20to%20make%20sure%20that%20the%20static%20hostname%20is%20the%0A%20%20%20%20%23%20less%20than%20or%20equal%20to%2063%20characters%20in%20length.%0A%20%20%20%20if%20%5B%20-f%20%2Fetc%2Fhostname%20%5D%20%26%26%20%5B%20%22%24(cat%20%2Fetc%2Fhostname%20%7C%20wc%20-m)%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20etc_name%3D%22%24(%3C%20%2Fetc%2Fhostname)%22%0A%20%20%20%20%20%20%20%20type_arg%3D%22static%22%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24%7Betc_name%7D%22%20!%3D%20%22%24%7Bhost_name%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22%2Fetc%2Fhostname%20is%20set%20to%20%24%7Betc_name%7D%20but%20does%20not%20match%20%24%7Bhost_name%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22using%20%2Fetc%2Fhostname%20as%20the%20authoritative%20name%22%0A%20%20%20%20%20%20%20%20%20%20%20%20host_name%3D%22%24%7Betc_name%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Only%20mutate%20the%20hostname%20if%20the%20length%20is%20longer%20than%2063%20characters.%20The%0A%20%20%20%20%23%20hostname%20will%20be%20the%20lesser%20of%2063%20characters%20after%20the%20first%20dot%20in%20the%0A%20%20%20%20%23%20FQDN.%20%20This%20algorithm%20is%20only%20known%20to%20work%20in%20GCP%2C%20and%20hence%20is%20only%0A%20%20%20%20%23%20executed%20in%20GCP.%0A%20%20%20%20if%20%5B%20%22%24%7B%23host_name%7D%22%20-gt%2063%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20alt_name%3D%24(printf%20%22%24%7Bhost_name%7D%22%20%7C%20cut%20-f1%20-d'.'%20%7C%20cut%20-c%20-63)%0A%20%20%20%20%20%20%20%20echo%20%22%24%7Bhost_name%7D%20is%20longer%20than%2063%20characters%2C%20using%20truncated%20hostname%22%0A%20%20%20%20%20%20%20%20host_name%3D%22%24%7Balt_name%7D%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22setting%20%24%7Btype_arg%7D%20hostname%20to%20%24%7Bhost_name%7D%22%0A%20%20%20%20%2Fbin%2Fhostnamectl%20%22--%24%7Btype_arg%7D%22%20set-hostname%20%22%24%7Bhost_name%7D%22%0A%20%20%20%20exit%200%0A%7D%0A%0Aset_openstack_hostname()%20%7B%0A%20%20%20%20%23%20Read%20metadata%20written%20by%20afterburn%20service%0A%20%20%20%20.%20%2Frun%2Fmetadata%2Fafterburn%0A%0A%20%20%20%20%23%20node-valid-hostname%20sets%20persistent%20hostname%20from%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%20%20%20%20echo%20%22%24AFTERBURN_OPENSTACK_HOSTNAME%22%20%3E%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%7D%0A%0Aset_powervs_hostname()%20%7B%0A%20%20%20%20%23%20Read%20metadata%20written%20by%20afterburn%20service%0A%20%20%20%20.%20%2Frun%2Fmetadata%2Fafterburn%0A%0A%20%20%20%20%23%20node-valid-hostname%20sets%20persistent%20hostname%20from%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%20%20%20%20echo%20%22%24AFTERBURN_POWERVS_LOCAL_HOSTNAME%22%20%3E%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%7D%0A%0Aarg%3D%24%7B1%7D%3B%20shift%3B%0Acase%20%22%24%7Barg%7D%22%20in%0A%20%20%20%20--wait)%20wait_localhost%3B%3B%0A%20%20%20%20--gcp)%20set_gcp_hostname%3B%3B%0A%20%20%20%20--openstack)%20set_openstack_hostname%3B%3B%0A%20%20%20%20--powervs)%20set_powervs_hostname%3B%3B%0A%20%20%20%20*)%20echo%20%22Unhandled%20arg%20%24arg%22%3B%20exit%201%0Aesac%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/mco-hostname" + }, + { + "contents": { + "source": "data:," + }, + "mode": 493, + "overwrite": true, + "path": "/etc/kubernetes/kubelet-plugins/volume/exec/.dummy" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Workaround%3A%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1941714%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1935539%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1987108%0A%0Adriver%3D%24(nmcli%20-t%20-m%20tabular%20-f%20general.driver%20dev%20show%20%22%24%7BDEVICE_IFACE%7D%22)%0A%0Aif%20%5B%5B%20%22%242%22%20%3D%3D%20%22up%22%20%26%26%20%22%24%7Bdriver%7D%22%20%3D%3D%20%22vmxnet3%22%20%5D%5D%3B%20then%0A%20%20logger%20-s%20%2299-vsphere-disable-tx-udp-tnl%20triggered%20by%20%24%7B2%7D%20on%20device%20%24%7BDEVICE_IFACE%7D.%22%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-udp_tnl-segmentation%20off%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-udp_tnl-csum-segmentation%20off%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-checksum-ip-generic%20off%0Afi%0A" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-vsphere-disable-tx-udp-tnl" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-x%0A%0Aif%20%5B%20!%20-e%20%22%2Fetc%2Fipsec.d%2Fopenshift.conf%22%20%5D%3B%20then%0A%20%20exit%200%0Afi%0A%0A%23%20Modify%20existing%20IPsec%20out%20connection%20entries%20with%20%22auto%3Dstart%22%0A%23%20option%20and%20restart%20ipsec%20systemd%20service.%20This%20helps%20to%0A%23%20establish%20IKE%20SAs%20for%20the%20existing%20IPsec%20connections%20with%0A%23%20peer%20nodes.%20This%20option%20will%20be%20deleted%20from%20connections%0A%23%20once%20ovs-monitor-ipsec%20process%20spinned%20up%20on%20the%20node%20by%0A%23%20ovn-ipsec-host%20pod%2C%20but%20still%20it%20won't%20reestablish%20IKE%20SAs%0A%23%20again%20with%20peer%20nodes%2C%20so%20it%20shouldn't%20be%20a%20problem.%0A%23%20We%20are%20updating%20only%20out%20connections%20with%20%22auto%3Dstart%22%20to%0A%23%20avoid%20cross%20stream%20issue%20with%20Libreswan%205.2.%0A%23%20The%20in%20connections%20use%20default%20auto%3Droute%20parameter.%0Aif%20!%20grep%20-q%20%22auto%3Dstart%22%20%2Fetc%2Fipsec.d%2Fopenshift.conf%3B%20then%0A%20%20sed%20-i%20'%2F%5E.*conn%20ovn.*-out-1%24%2Fa%5C%20%20%20%20auto%3Dstart'%20%2Fetc%2Fipsec.d%2Fopenshift.conf%0Afi%0Achroot%20%2Fproc%2F1%2Froot%20ipsec%20restart%0A%0A%23%20Wait%20for%20upto%2060s%20to%20get%20IPsec%20SAs%20to%20establish%20with%20peer%20nodes.%0Atimeout%3D60%0Aelapsed%3D0%0Adesiredconn%3D%22%22%0Aestablishedsa%3D%22%22%0Awhile%20%5B%5B%20%24elapsed%20-lt%20%24timeout%20%5D%5D%3B%20do%0A%20%20desiredconn%3D%24(grep%20-E%20'%5E%5Cs*conn%5Cs%2B'%20%2Fetc%2Fipsec.d%2Fopenshift.conf%20%7C%20grep%20-v%20'%25default'%20%7C%20awk%20'%7Bprint%20%242%7D'%20%7C%20tr%20'%20'%20'%5Cn'%20%7C%20sort%20%7C%20tr%20'%5Cn'%20'%20')%0A%20%20establishedsa%3D%24(ipsec%20showstates%20%7C%20grep%20ESTABLISHED_CHILD_SA%20%7C%20grep%20-o%20'%22%5B%5E%22%5D*%22'%20%7C%20sed%20's%2F%22%2F%2Fg'%20%7C%20tr%20'%20'%20'%5Cn'%20%7C%20sort%20%7C%20uniq%20%7C%20tr%20'%5Cn'%20'%20')%0A%20%20if%20%5B%20%22%24desiredconn%22%20%3D%3D%20%22%24establishedsa%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22IPsec%20SAs%20are%20established%20for%20desired%20connections%20after%20%24%7Belapsed%7Ds%22%0A%20%20%20%20break%0A%20%20else%0A%20%20%20%20echo%20%22IPsec%20SAs%20are%20not%20established%20yet%2C%20total%20waited%20time%20%24%7Belapsed%7Ds%22%0A%20%20%20%20sleep%202s%0A%20%20fi%0A%20%20elapsed%3D%24((elapsed%20%2B%202))%0Adone%0A%0Aif%20%5B%5B%20%24elapsed%20-ge%20%24timeout%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Timed%20out%20waiting%2C%20some%20connections%20are%20not%20established%2C%20desired%20conns%20%24desiredconn%2C%20established%20conns%20%24establishedsa%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/ipsec-connect-wait.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-eux%0A%0Aif%20%5B%20!%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20%23%20No%20need%20to%20do%20this%20if%20no%20NMState%20configuration%20was%20applied%0A%20%20exit%200%0Afi%0A%0A%23%20This%20logic%20is%20borrowed%20from%20configure-ovs.sh%0A%23%20TODO%3A%20Find%20a%20platform-agnostic%20way%20to%20do%20this.%20It%20won't%20work%20on%20platforms%20where%0A%23%20nodeip-configuration%20is%20not%20used.%0Aip%3D%24(cat%20%2Frun%2Fnodeip-configuration%2Fprimary-ip)%0Aif%20%5B%5B%20%22%24%7Bip%7D%22%20%3D%3D%20%22%22%20%5D%5D%3B%20then%0A%20%20echo%20%22No%20ip%20to%20bind%20was%20found%22%0A%20%20exit%201%0Afi%0Awhile%20%3A%0Ado%0A%20%20random_port%3D%24(shuf%20-i%2050000-60000%20-n%201)%0A%20%20echo%20%22Trying%20to%20bind%20%24%7Bip%7D%20on%20port%20%24%7Brandom_port%7D%22%0A%20%20exit_code%3D%24(timeout%202s%20nc%20-l%20%22%24%7Bip%7D%22%20%24%7Brandom_port%7D%3B%20echo%20%24%3F)%0A%20%20if%20%5B%5B%20exit_code%20-eq%20124%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Address%20bound%20successfully%22%0A%20%20%20%20exit%200%0A%20%20fi%0A%20%20sleep%2010%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/wait-for-primary-ip.sh" + } + ] + }, + "systemd": { + "units": [ + { + "contents": "[Unit]\nDescription=Cleans NetworkManager state generated by dracut\n# Removal of this file signals firstboot completion\nConditionPathExists=!/etc/ignition-machine-config-encapsulated.json\n# This is opt-in for some deployment types, and opt-out for others.\nConditionPathExists=/var/lib/mco/nm-clean-initrd-state\nWants=network-pre.target\nBefore=network-pre.target\n\n[Service]\nType=oneshot\n# Remove any existing state possibly generated NM run by dracut. We want NM to\n# consider all profiles autoconnect priority when it starts instead of\n# remembering which profile was a device activated with when NM is run by\n# dracut.\nExecStart=/usr/local/bin/nm-clean-initrd-state.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "NetworkManager-clean-initrd-state.service" + }, + { + "dropins": [ + { + "contents": "# vim:set ft=systemd :\n#\n# This drop-in will enable any service built with this\n# github.com/containers/kubemntns library to properly join the mount namespace\n# managed by kubens.service\n#\n\n[Unit]\nAfter=kubens.service\n\n[Service]\nEnvironmentFile=-/run/kubens/env\n", + "name": "01-kubens.conf" + }, + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "[Service]\nEnvironment=\"ENABLE_PROFILE_UNIX_SOCKET=true\"\n", + "name": "10-mco-profile-unix-socket.conf" + }, + { + "contents": "[Unit]\nAfter=kubelet-dependencies.target\nRequires=kubelet-dependencies.target\n", + "name": "05-mco-ordering.conf" + }, + { + "contents": "[Service]\nEnvironment=\"GODEBUG=x509ignoreCN=0,madvdontneed=1\"\n", + "name": "10-mco-default-madv.conf" + } + ], + "name": "crio.service" + }, + { + "dropins": [ + { + "contents": "[Unit]\nConditionPathExists=/enoent\n", + "name": "mco-disabled.conf" + } + ], + "name": "docker.socket" + }, + { + "contents": "[Unit]\nDescription=The firstboot OS update has completed\nDocumentation=https://github.com/openshift/machine-config-operator/\nRequires=basic.target\n\n[Install]\nWantedBy=default.target\n", + "enabled": true, + "name": "firstboot-osupdate.target" + }, + { + "dropins": [ + { + "contents": "[Unit]\nAfter=ovs-configuration.service\nBefore=crio.service\n", + "name": "01-after-configure-ovs.conf" + } + ], + "name": "ipsec.service" + }, + { + "contents": "[Unit]\nDescription=Dynamically sets the system reserved for the kubelet\nWants=network-online.target\nAfter=network-online.target firstboot-osupdate.target\nBefore=kubelet-dependencies.target\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\nRemainAfterExit=yes\nEnvironmentFile=/etc/node-sizing-enabled.env\nExecStart=/bin/bash /usr/local/sbin/dynamic-system-reserved-calc.sh ${NODE_SIZING_ENABLED} ${SYSTEM_RESERVED_MEMORY} ${SYSTEM_RESERVED_CPU} ${SYSTEM_RESERVED_ES}\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "kubelet-auto-node-size.service" + }, + { + "contents": "[Unit]\nDescription=Dependencies necessary to run kubelet\nDocumentation=https://github.com/openshift/machine-config-operator/\nRequires=basic.target network-online.target\nWants=NetworkManager-wait-online.service crio-wipe.service\nWants=rpc-statd.service chrony-wait.service\n", + "name": "kubelet-dependencies.target" + }, + { + "dropins": [ + { + "contents": "# vim:set ft=systemd :\n#\n# This drop-in will enable any service built with this\n# github.com/containers/kubemntns library to properly join the mount namespace\n# managed by kubens.service\n#\n\n[Unit]\nAfter=kubens.service\n\n[Service]\nEnvironmentFile=-/run/kubens/env\n", + "name": "01-kubens.conf" + }, + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "[Service]\nEnvironment=\"GODEBUG=x509ignoreCN=0,madvdontneed=1\"\n", + "name": "10-mco-default-madv.conf" + } + ], + "name": "kubelet.service" + }, + { + "contents": "[Unit]\nDescription=Manages a mount namespace for kubernetes-specific mounts\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nRuntimeDirectory=kubens\nEnvironment=RUNTIME_DIRECTORY=%t/kubens\nEnvironment=BIND_POINT=%t/kubens/mnt\nEnvironment=ENVFILE=%t/kubens/env\n\n# Set up the runtime directory as an unbindable mountpoint\nExecStartPre=bash -c \"findmnt ${RUNTIME_DIRECTORY} || mount --make-unbindable --bind ${RUNTIME_DIRECTORY} ${RUNTIME_DIRECTORY}\"\n# Ensure the bind point exists\nExecStartPre=touch ${BIND_POINT}\n# Use 'unshare' to create the new mountpoint, then 'mount --make-rshared' so it cascades internally\nExecStart=unshare --mount=${BIND_POINT} --propagation slave mount --make-rshared /\n# Finally, set an env pointer for ease-of-use\nExecStartPost=bash -c 'echo \"KUBENSMNT=${BIND_POINT}\" \u003e \"${ENVFILE}\"'\n\n# On stop, a recursive unmount cleans up the namespace and bind-mounted unbindable parent directory\nExecStop=umount -R ${RUNTIME_DIRECTORY}\n\n[Install]\nWantedBy=multi-user.target\n", + "enabled": false, + "name": "kubens.service" + }, + { + "contents": "[Unit]\nDescription=Machine Config Daemon Firstboot\n# Make sure it runs only on OSTree booted system\nConditionPathExists=/run/ostree-booted\n# Removal of this file signals firstboot completion\nConditionPathExists=/etc/ignition-machine-config-encapsulated.json\nAfter=machine-config-daemon-pull.service\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\n# Disable existing repos (if any) so that OS extensions would use embedded RPMs only\nExecStartPre=-/usr/bin/sh -c \"sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/*.repo\"\n# Run this via podman because we want to use the nmstatectl binary in our container\nExecStart=/usr/bin/podman run --rm --privileged --net=host -v /:/rootfs --entrypoint machine-config-daemon 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb' firstboot-complete-machineconfig --persist-nics\nExecStart=/usr/bin/podman run --rm --privileged --pid=host --net=host -v /:/rootfs --entrypoint machine-config-daemon 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb' firstboot-complete-machineconfig\n[Install]\nRequiredBy=firstboot-osupdate.target\n", + "enabled": true, + "name": "machine-config-daemon-firstboot.service" + }, + { + "contents": "[Unit]\nDescription=Machine Config Daemon Pull\n# Make sure it runs only on OSTree booted system\nConditionPathExists=/run/ostree-booted\n# This \"stamp file\" is unlinked when we complete\n# machine-config-daemon-firstboot.service\nConditionPathExists=/etc/ignition-machine-config-encapsulated.json\n# Run after crio-wipe so the pulled MCD image is protected against a corrupted storage from a forced shutdown\nWants=crio-wipe.service NetworkManager-wait-online.service\nAfter=crio-wipe.service NetworkManager-wait-online.service network.service\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nExecStartPre=/etc/machine-config-daemon/generate_podman_policy_args.sh\nExecStart=/bin/sh -c \"while ! /usr/bin/podman pull $(cat /tmp/podman_policy_args) --authfile=/var/lib/kubelet/config.json 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb'; do sleep 1; done\"\n\n[Install]\nRequiredBy=machine-config-daemon-firstboot.service\n", + "enabled": true, + "name": "machine-config-daemon-pull.service" + }, + { + "contents": "[Unit]\nDescription=Applies per-node NMState network configuration\nRequires=openvswitch.service\nWants=NetworkManager-wait-online.service\nAfter=NetworkManager-wait-online.service openvswitch.service network.service nodeip-configuration.service\nBefore=nmstate.service kubelet-dependencies.target ovs-configuration.service node-valid-hostname.service\n\n[Service]\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/usr/local/bin/nmstate-configuration.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "nmstate-configuration.service" + }, + { + "contents": "[Unit]\nDescription=Wait for a non-localhost hostname\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nUser=root\nExecStart=/usr/local/bin/mco-hostname --wait\n\n# Wait up to 5min for the node to get a non-localhost name\nTimeoutSec=300\n\n[Install]\n# TODO: Change this to RequiredBy after we fix https://github.com/openshift/machine-config-operator/pull/3865#issuecomment-1746963115\nWantedBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "node-valid-hostname.service" + }, + { + "contents": "[Unit]\nDescription=Writes IP address configuration so that kubelet and crio services select a valid node IP\nWants=NetworkManager-wait-online.service\nAfter=NetworkManager-wait-online.service firstboot-osupdate.target\nBefore=kubelet-dependencies.target ovs-configuration.service\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/bin/bash -c \" \\\n until \\\n /usr/bin/podman run --rm \\\n --authfile /var/lib/kubelet/config.json \\\n --env 'ENABLE_NODEIP_DEBUG=true' \\\n --net=host \\\n --security-opt label=disable \\\n --volume /etc/systemd/system:/etc/systemd/system \\\n --volume /run/nodeip-configuration:/run/nodeip-configuration \\\n quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8bfb60187a2054ec1946357aeeb4866fa52c1508b05ba1b2b294c08e8e4e27e8 \\\n node-ip \\\n set \\\n --retry-on-failure \\\n --network-type OVNKubernetes \\\n ${NODEIP_HINT:-${KUBELET_NODEIP_HINT:-}}; \\\n do \\\n sleep 5; \\\n done\"\nExecStart=/bin/systemctl daemon-reload\nExecStartPre=/bin/mkdir -p /run/nodeip-configuration\nStandardOutput=journal+console\nStandardError=journal+console\n\nEnvironmentFile=-/etc/default/nodeip-configuration\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": false, + "name": "nodeip-configuration.service" + }, + { + "enabled": true, + "name": "openvswitch.service" + }, + { + "contents": "[Unit]\n# Kdump will generate it's initramfs based on the running state when kdump.service run\n# If OVS has already run, the kdump fails to gather a working network config,\n# which prevent network log exports, sush as SSH.\n# See https://issues.redhat.com/browse/OCPBUGS-28239\nAfter=kdump.service\nDescription=Configures OVS with proper host networking configuration\n# This service is used to move a physical NIC into OVS and reconfigure OVS to use the host IP\nRequires=openvswitch.service\nWants=NetworkManager-wait-online.service\nAfter=firstboot-osupdate.target\nAfter=NetworkManager-wait-online.service openvswitch.service network.service nodeip-configuration.service nmstate.service\nBefore=kubelet-dependencies.target node-valid-hostname.service dnsmasq.service\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\nExecStart=/usr/local/bin/configure-ovs.sh OVNKubernetes\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "ovs-configuration.service" + }, + { + "dropins": [ + { + "contents": "[Service]\nRestart=always\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /var/lib/openvswitch'\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /etc/openvswitch'\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /run/openvswitch'\nExecStartPost=-/usr/bin/ovs-appctl vlog/set syslog:info\nExecReload=-/usr/bin/ovs-appctl vlog/set syslog:info\n", + "name": "10-ovs-vswitchd-restart.conf" + } + ], + "name": "ovs-vswitchd.service" + }, + { + "dropins": [ + { + "contents": "[Service]\nRestart=always\n", + "name": "10-ovsdb-restart.conf" + } + ], + "enabled": true, + "name": "ovsdb-server.service" + }, + { + "dropins": [ + { + "contents": "", + "name": "10-mco-default-env.conf" + } + ], + "name": "rpm-ostreed.service" + }, + { + "contents": "[Unit]\nDescription=Ensure IKE SA established for existing IPsec connections.\nAfter=ipsec.service\nBefore=kubelet-dependencies.target node-valid-hostname.service\n\n[Service]\nType=oneshot\nExecStart=/usr/local/bin/ipsec-connect-wait.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=ipsec.service\n", + "enabled": true, + "name": "wait-for-ipsec-connect.service" + }, + { + "contents": "[Unit]\nDescription=Ensure primary IP is assigned and usable\nRequires=nmstate.service\nAfter=nmstate.service\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/bin/bash -c \" \\\n until \\\n /usr/local/bin/wait-for-primary-ip.sh; \\\n do \\\n sleep 10; \\\n done\"\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "wait-for-primary-ip.service" + }, + { + "dropins": [ + { + "contents": "[Unit]\nConditionPathExists=/enoent\n", + "name": "mco-disabled.conf" + } + ], + "name": "zincati.service" + } + ] + } + }, + "fips": false, + "kernelArguments": null, + "kernelType": "", + "osImageURL": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1578c5f720239b6f363d8dc6145056c0f0ab0b20442f59ac74f3b4b91b8382dd" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "03a3677b298e05ba870cdf0f26f4db16f80e64ae" + }, + "creationTimestamp": "2025-08-19T18:55:55Z", + "generation": 2, + "labels": { + "machineconfiguration.openshift.io/role": "master" + }, + "name": "01-master-container-runtime", + "ownerReferences": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "ControllerConfig", + "name": "machine-config-controller", + "uid": "39f6a6c9-fced-4161-8602-9f979cf21a91" + } + ], + "resourceVersion": "76347", + "uid": "93dd7394-44cb-4d1c-982e-71faf2647a97" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "version": "3.5.0" + }, + "storage": { + "files": [ + { + "contents": { + "source": "data:,unqualified-search-registries%20%3D%20%5B'registry.access.redhat.com'%2C%20'docker.io'%5D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/registries.conf" + }, + { + "contents": { + "source": "data:,%5Bcrio%5D%0Ainternal_wipe%20%3D%20true%0Ainternal_repair%20%3D%20true%0A%0A%5Bcrio.api%5D%0Astream_address%20%3D%20%22127.0.0.1%22%0Astream_port%20%3D%20%220%22%0A%0A%5Bcrio.runtime%5D%0Aselinux%20%3D%20true%0Aconmon%20%3D%20%22%22%0Aconmon_cgroup%20%3D%20%22pod%22%0Adefault_env%20%3D%20%5B%0A%20%20%20%20%22NSS_SDB_USE_CACHE%3Dno%22%2C%0A%5D%0Adefault_runtime%20%3D%20%22crun%22%0Alog_level%20%3D%20%22info%22%0Acgroup_manager%20%3D%20%22systemd%22%0Adefault_sysctls%20%3D%20%5B%0A%20%20%20%20%22net.ipv4.ping_group_range%3D0%202147483647%22%2C%0A%5D%0Ahooks_dir%20%3D%20%5B%0A%20%20%20%20%22%2Fetc%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%20%20%20%20%22%2Frun%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%20%20%20%20%22%2Fusr%2Fshare%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%5D%0Amanage_ns_lifecycle%20%3D%20true%0Aabsent_mount_sources_to_reject%20%3D%20%5B%0A%20%20%20%20%22%2Fetc%2Fhostname%22%2C%0A%5D%0Adrop_infra_ctr%20%3D%20true%0A%0A%5Bcrio.runtime.runtimes.runc%5D%0Aruntime_root%20%3D%20%22%2Frun%2Frunc%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%20%20%22io.containers.trace-syscall%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.Devices%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.LinkLogs%22%2C%0A%5D%0A%0A%5Bcrio.runtime.runtimes.crun%5D%0Aruntime_root%20%3D%20%22%2Frun%2Fcrun%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%20%20%22io.containers.trace-syscall%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.Devices%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.LinkLogs%22%2C%0A%5D%0A%23%20Based%20on%20https%3A%2F%2Fgithub.com%2Fcontainers%2Fcrun%2Fblob%2F27d7dd3a0%2FREADME.md%3Fplain%3D1%23L48%0Acontainer_min_memory%20%3D%20%22512KiB%22%0Adefault_annotations%20%3D%20%7B%22run.oci.systemd.subgroup%22%20%3D%20%22%22%7D%0A%0A%5Bcrio.runtime.workloads.openshift-builder%5D%0Aactivation_annotation%20%3D%20%22io.openshift.builder%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%22io.kubernetes.cri-o.userns-mode%22%2C%0A%20%20%22io.kubernetes.cri-o.Devices%22%0A%5D%0A%5Bcrio.runtime.workloads.openshift-builder.resources%5D%0A%0A%5Bcrio.image%5D%0Aglobal_auth_file%20%3D%20%22%2Fvar%2Flib%2Fkubelet%2Fconfig.json%22%0Apause_image%20%3D%20%22quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3A041a889dcf9abd95832934bf0870af8d2f4289733294b143d555ce49c77bfec1%22%0Apause_image_auth_file%20%3D%20%22%2Fvar%2Flib%2Fkubelet%2Fconfig.json%22%0Apause_command%20%3D%20%22%2Fusr%2Fbin%2Fpod%22%0Aoci_artifact_mount_support%20%3D%20false%0A%0A%5Bcrio.network%5D%0Anetwork_dir%20%3D%20%22%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F%22%0Aplugin_dirs%20%3D%20%5B%0A%20%20%20%20%22%2Fvar%2Flib%2Fcni%2Fbin%22%2C%0A%5D%0A%0A%5Bcrio.metrics%5D%0Aenable_metrics%20%3D%20true%0Ametrics_host%20%3D%20%22127.0.0.1%22%0Ametrics_port%20%3D%209537%0Ametrics_collectors%20%3D%20%5B%0A%20%20%22operations%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_total%22%0A%20%20%22operations_latency_microseconds_total%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_latency_seconds_total%22%0A%20%20%22operations_latency_microseconds%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_latency_seconds%22%0A%20%20%22operations_errors%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_errors_total%22%0A%20%20%22image_pulls_layer_size%22%2C%0A%20%20%22containers_oom_total%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22containers_oom_count_total%22%0A%20%20%22containers_oom%22%2C%0A%20%20%23%20Drop%20metrics%20with%20excessive%20label%20cardinality.%0A%20%20%23%20%22image_pulls_by_digest%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_bytes_total%22%0A%20%20%23%20%22image_pulls_by_name%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_bytes_total%22%0A%20%20%23%20%22image_pulls_by_name_skipped%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_skipped_bytes_total%22%0A%20%20%23%20%22image_pulls_failures%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_failure_total%22%0A%20%20%23%20%22image_pulls_successes%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_success_total%22%0A%20%20%23%20%22image_layer_reuse%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_layer_reuse_total%22%0A%20%20%22operations_total%22%2C%0A%20%20%22operations_latency_seconds_total%22%2C%0A%20%20%22operations_latency_seconds%22%2C%0A%20%20%22operations_errors_total%22%2C%0A%20%20%22image_pulls_bytes_total%22%2C%0A%20%20%22image_pulls_skipped_bytes_total%22%2C%0A%20%20%22image_pulls_success_total%22%2C%0A%20%20%22image_pulls_failure_total%22%2C%0A%20%20%22image_layer_reuse_total%22%2C%0A%20%20%22containers_oom_count_total%22%2C%0A%20%20%22processes_defunct%22%0A%5D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/crio/crio.conf.d/00-default" + }, + { + "contents": { + "source": "data:,%7B%0A%20%20%20%20%22default%22%3A%20%5B%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22type%22%3A%20%22insecureAcceptAnything%22%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%5D%2C%0A%20%20%20%20%22transports%22%3A%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22docker-daemon%22%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%22%22%3A%20%5B%7B%22type%22%3A%22insecureAcceptAnything%22%7D%5D%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%20%20%7D%0A%7D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/machine-config-daemon/policy-for-old-podman.json" + }, + { + "contents": { + "source": "data:,%7B%0A%20%20%20%20%22default%22%3A%20%5B%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22type%22%3A%20%22insecureAcceptAnything%22%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%5D%2C%0A%20%20%20%20%22transports%22%3A%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22docker-daemon%22%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%22%22%3A%20%5B%7B%22type%22%3A%22insecureAcceptAnything%22%7D%5D%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%20%20%7D%0A%7D" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/policy.json" + } + ] + } + }, + "fips": false, + "kernelArguments": null, + "kernelType": "", + "osImageURL": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1578c5f720239b6f363d8dc6145056c0f0ab0b20442f59ac74f3b4b91b8382dd" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "03a3677b298e05ba870cdf0f26f4db16f80e64ae" + }, + "creationTimestamp": "2025-08-19T18:55:55Z", + "generation": 2, + "labels": { + "machineconfiguration.openshift.io/role": "master" + }, + "name": "01-master-kubelet", + "ownerReferences": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "ControllerConfig", + "name": "machine-config-controller", + "uid": "39f6a6c9-fced-4161-8602-9f979cf21a91" + } + ], + "resourceVersion": "76348", + "uid": "25914833-869f-4811-bb3d-0ceeea753c10" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "version": "3.5.0" + }, + "storage": { + "files": [ + { + "contents": { + "source": "data:,%7B%0A%09%22cloud%22%3A%20%22AzurePublicCloud%22%2C%0A%09%22tenantId%22%3A%20%226047c7e9-b2ad-488d-a54e-dc3f6be6a7ee%22%2C%0A%09%22aadClientId%22%3A%20%22%22%2C%0A%09%22aadClientSecret%22%3A%20%22%22%2C%0A%09%22aadClientCertPath%22%3A%20%22%22%2C%0A%09%22aadClientCertPassword%22%3A%20%22%22%2C%0A%09%22useManagedIdentityExtension%22%3A%20true%2C%0A%09%22userAssignedIdentityID%22%3A%20%22%22%2C%0A%09%22subscriptionId%22%3A%20%2272e3a972-58b0-4afc-bd4f-da89b39ccebd%22%2C%0A%09%22resourceGroup%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-rg%22%2C%0A%09%22location%22%3A%20%22centralus%22%2C%0A%09%22vnetName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-vnet%22%2C%0A%09%22vnetResourceGroup%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-rg%22%2C%0A%09%22subnetName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-worker-subnet%22%2C%0A%09%22securityGroupName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-nsg%22%2C%0A%09%22routeTableName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-node-routetable%22%2C%0A%09%22vmType%22%3A%20%22standard%22%2C%0A%09%22loadBalancerSku%22%3A%20%22standard%22%2C%0A%09%22cloudProviderBackoff%22%3A%20true%2C%0A%09%22useInstanceMetadata%22%3A%20true%2C%0A%09%22excludeMasterFromStandardLB%22%3A%20false%2C%0A%09%22cloudProviderBackoffDuration%22%3A%206%2C%0A%09%22putVMSSVMBatchSize%22%3A%200%2C%0A%09%22enableMigrateToIPBasedBackendPoolAPI%22%3A%20false%0A%7D" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/cloud.conf" + }, + { + "contents": { + "source": "data:,authorization%3A%0A%20%20static%3A%0A%20%20%20%20-%20resourceRequest%3A%20false%0A%20%20%20%20%20%20path%3A%20%2Fmetrics%0A%20%20%20%20%20%20verb%3A%20get%0A%20%20%20%20%20%20user%3A%0A%20%20%20%20%20%20%20%20name%3A%20system%3Aserviceaccount%3Aopenshift-monitoring%3Aprometheus-k8s" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/crio-metrics-proxy.cfg" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20v1%0Akind%3A%20Pod%0Ametadata%3A%0A%20%20name%3A%20kube-rbac-proxy-crio%0A%20%20namespace%3A%20openshift-machine-config-operator%0A%20%20annotations%3A%0A%20%20%20%20target.workload.openshift.io%2Fmanagement%3A%20'%7B%22effect%22%3A%20%22PreferredDuringScheduling%22%7D'%0A%20%20%20%20openshift.io%2Frequired-scc%3A%20privileged%0Aspec%3A%0A%20%20volumes%3A%0A%20%20-%20name%3A%20etc-kube%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%22%2Fetc%2Fkubernetes%22%0A%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%22%2Fvar%2Flib%2Fkubelet%22%0A%20%20hostNetwork%3A%20true%0A%20%20priorityClassName%3A%20system-cluster-critical%0A%20%20initContainers%3A%0A%20%20-%20name%3A%20setup%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20image%3A%20quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3Afd03b21139b2921710987d9a617bf49e5d05825d6e9df66768259e7076c6d53a%0A%20%20%20%20imagePullPolicy%3A%20IfNotPresent%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fvar%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20command%3A%20%5B'%2Fbin%2Fbash'%2C%20'-ec'%5D%0A%20%20%20%20args%3A%0A%20%20%20%20-%20%7C%0A%20%20%20%20%20%20echo%20-n%20%22Waiting%20for%20kubelet%20key%20and%20certificate%20to%20be%20available%22%0A%20%20%20%20%20%20while%20%5B%20-n%20%22%24(test%20-e%20%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem)%22%20%5D%20%3B%20do%0A%20%20%20%20%20%20%20%20echo%20-n%20%22.%22%0A%20%20%20%20%20%20%20%20sleep%201%0A%20%20%20%20%20%20%20%20((%20tries%20%2B%3D%201%20))%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Btries%7D%22%20-gt%2010%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20echo%20%22Timed%20out%20waiting%20for%20kubelet%20key%20and%20cert.%22%0A%20%20%20%20%20%20%20%20%20%20exit%201%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20done%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20%20%20%20%20cpu%3A%205m%0A%20%20containers%3A%0A%20%20-%20name%3A%20kube-rbac-proxy-crio%0A%20%20%20%20image%3A%20quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3Afd03b21139b2921710987d9a617bf49e5d05825d6e9df66768259e7076c6d53a%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20ports%3A%0A%20%20%20%20-%20containerPort%3A%209637%0A%20%20%20%20args%3A%0A%20%20%20%20-%20--secure-listen-address%3D%3A9637%0A%20%20%20%20-%20--config-file%3D%2Fetc%2Fkubernetes%2Fcrio-metrics-proxy.cfg%0A%20%20%20%20-%20--client-ca-file%3D%2Fetc%2Fkubernetes%2Fkubelet-ca.crt%0A%20%20%20%20-%20--logtostderr%3Dtrue%0A%20%20%20%20-%20--kubeconfig%3D%2Fvar%2Flib%2Fkubelet%2Fkubeconfig%0A%20%20%20%20-%20--tls-cipher-suites%3DTLS_AES_128_GCM_SHA256%2CTLS_AES_256_GCM_SHA384%2CTLS_CHACHA20_POLY1305_SHA256%2CTLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%2CTLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%2CTLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%2CTLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%2CTLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256%2CTLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256%20%0A%20%20%20%20-%20--tls-min-version%3DVersionTLS12%0A%20%20%20%20-%20--upstream%3Dhttp%3A%2F%2F127.0.0.1%3A9537%0A%20%20%20%20-%20--tls-cert-file%3D%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem%0A%20%20%20%20-%20--tls-private-key-file%3D%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20cpu%3A%2020m%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20name%3A%20etc-kube%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fetc%2Fkubernetes%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20%20%20readOnly%3A%20true%0A%20%20%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fvar%2Flib%2Fkubelet%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20%20%20readOnly%3A%20true" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/manifests/criometricsproxy.yaml" + }, + { + "contents": { + "source": "data:,kind%3A%20KubeletConfiguration%0AapiVersion%3A%20kubelet.config.k8s.io%2Fv1beta1%0Aauthentication%3A%0A%20%20x509%3A%0A%20%20%20%20clientCAFile%3A%20%2Fetc%2Fkubernetes%2Fkubelet-ca.crt%0A%20%20anonymous%3A%0A%20%20%20%20enabled%3A%20false%0AcgroupDriver%3A%20systemd%0AcgroupRoot%3A%20%2F%0AclusterDNS%3A%0A%20%20-%20172.30.0.10%0AclusterDomain%3A%20cluster.local%0AcontainerLogMaxSize%3A%2050Mi%0AenableSystemLogQuery%3A%20true%0AmaxPods%3A%20250%0AkubeAPIQPS%3A%2050%0AkubeAPIBurst%3A%20100%0ApodPidsLimit%3A%204096%0AprotectKernelDefaults%3A%20true%0ArotateCertificates%3A%20true%0AserializeImagePulls%3A%20false%0AstaticPodPath%3A%20%2Fetc%2Fkubernetes%2Fmanifests%0AsystemCgroups%3A%20%2Fsystem.slice%0AnodeStatusUpdateFrequency%3A%2010s%0AnodeStatusReportFrequency%3A%205m%0AserverTLSBootstrap%3A%20true%0AtlsMinVersion%3A%20VersionTLS12%0AtlsCipherSuites%3A%0A%20%20%20%20-%20TLS_AES_128_GCM_SHA256%0A%20%20%20%20-%20TLS_AES_256_GCM_SHA384%0A%20%20%20%20-%20TLS_CHACHA20_POLY1305_SHA256%0A%20%20%20%20-%20TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%0A%20%20%20%20-%20TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%0A%20%20%20%20-%20TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%0A%20%20%20%20-%20TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%0A%20%20%20%20-%20TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256%0A%20%20%20%20-%20TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/kubelet.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fsh%0Aif%20%5B%20-x%20%2Fusr%2Fbin%2Fkubensenter%20%5D%3B%20then%0A%20%20exec%20%2Fusr%2Fbin%2Fkubensenter%20%22%24%40%22%0Aelse%0A%20%20exec%20%22%24%40%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/kubenswrapper" + } + ] + }, + "systemd": { + "units": [ + { + "contents": "[Unit]\nDescription=Kubernetes Kubelet After Reboot Cleanup\nBefore=kubelet.service\n\n[Service]\nType=oneshot\nExecStart=/bin/rm -f /var/lib/kubelet/cpu_manager_state\nExecStart=/bin/rm -f /var/lib/kubelet/memory_manager_state\nExecStart=-/bin/rm -f /var/lib/kubelet/dra_manager_state\n\n[Install]\nWantedBy=multi-user.target\n", + "enabled": true, + "name": "kubelet-cleanup.service" + }, + { + "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=crio.service kubelet-dependencies.target\nAfter=kubelet-dependencies.target\nAfter=ostree-finalize-staged.service\n\n[Service]\nType=notify\nExecStartPre=/bin/mkdir --parents /etc/kubernetes/manifests\nExecStartPre=-/usr/sbin/restorecon -ri /var/lib/kubelet/pod-resources /usr/local/bin/kubenswrapper /usr/bin/kubensenter\nEnvironment=\"KUBELET_NODE_IP=0.0.0.0\"\nEnvironmentFile=/etc/os-release\nEnvironmentFile=-/etc/kubernetes/kubelet-workaround\nEnvironmentFile=-/etc/kubernetes/kubelet-env\nEnvironmentFile=/etc/node-sizing.env\n\nExecStart=/usr/local/bin/kubenswrapper \\\n /usr/bin/kubelet \\\n --config=/etc/kubernetes/kubelet.conf \\\n --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \\\n --kubeconfig=/var/lib/kubelet/kubeconfig \\\n --container-runtime-endpoint=/var/run/crio/crio.sock \\\n --runtime-cgroups=/system.slice/crio.service \\\n --node-labels=node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master,node.openshift.io/os_id=${ID},${CUSTOM_KUBELET_LABELS} \\\n --node-ip=${KUBELET_NODE_IP} \\\n --minimum-container-ttl-duration=6m0s \\\n --cloud-provider=external \\\n --volume-plugin-dir=/etc/kubernetes/kubelet-plugins/volume/exec \\\n --image-credential-provider-bin-dir=/usr/libexec/kubelet-image-credential-provider-plugins --image-credential-provider-config=/etc/kubernetes/credential-providers/acr-credential-provider.yaml \\\n --hostname-override=${KUBELET_NODE_NAME} \\\n --provider-id=${KUBELET_PROVIDERID} \\\n --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \\\n --pod-infra-container-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:041a889dcf9abd95832934bf0870af8d2f4289733294b143d555ce49c77bfec1 \\\n --system-reserved=cpu=${SYSTEM_RESERVED_CPU},memory=${SYSTEM_RESERVED_MEMORY},ephemeral-storage=${SYSTEM_RESERVED_ES} \\\n --v=${KUBELET_LOG_LEVEL}\n\nRestart=always\nRestartSec=10\n\n[Install]\nWantedBy=multi-user.target\n", + "enabled": true, + "name": "kubelet.service" + } + ] + } + }, + "fips": false, + "kernelArguments": null, + "kernelType": "", + "osImageURL": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1578c5f720239b6f363d8dc6145056c0f0ab0b20442f59ac74f3b4b91b8382dd" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "03a3677b298e05ba870cdf0f26f4db16f80e64ae" + }, + "creationTimestamp": "2025-08-19T18:55:55Z", + "generation": 2, + "labels": { + "machineconfiguration.openshift.io/role": "worker" + }, + "name": "01-worker-container-runtime", + "ownerReferences": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "ControllerConfig", + "name": "machine-config-controller", + "uid": "39f6a6c9-fced-4161-8602-9f979cf21a91" + } + ], + "resourceVersion": "76349", + "uid": "7c925454-ec5b-4a63-bf96-327ea8f5cc9b" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "version": "3.5.0" + }, + "storage": { + "files": [ + { + "contents": { + "source": "data:,unqualified-search-registries%20%3D%20%5B'registry.access.redhat.com'%2C%20'docker.io'%5D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/registries.conf" + }, + { + "contents": { + "source": "data:,%5Bcrio%5D%0Ainternal_wipe%20%3D%20true%0Ainternal_repair%20%3D%20true%0A%0A%5Bcrio.api%5D%0Astream_address%20%3D%20%22127.0.0.1%22%0Astream_port%20%3D%20%220%22%0A%0A%5Bcrio.runtime%5D%0Aselinux%20%3D%20true%0Aconmon%20%3D%20%22%22%0Aconmon_cgroup%20%3D%20%22pod%22%0Adefault_env%20%3D%20%5B%0A%20%20%20%20%22NSS_SDB_USE_CACHE%3Dno%22%2C%0A%5D%0Alog_level%20%3D%20%22info%22%0Acgroup_manager%20%3D%20%22systemd%22%0Adefault_sysctls%20%3D%20%5B%0A%20%20%20%20%22net.ipv4.ping_group_range%3D0%202147483647%22%2C%0A%5D%0Adefault_runtime%20%3D%20%22crun%22%0Ahooks_dir%20%3D%20%5B%0A%20%20%20%20%22%2Fetc%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%20%20%20%20%22%2Frun%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%20%20%20%20%22%2Fusr%2Fshare%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%5D%0Amanage_ns_lifecycle%20%3D%20true%0Aabsent_mount_sources_to_reject%20%3D%20%5B%0A%20%20%20%20%22%2Fetc%2Fhostname%22%2C%0A%5D%0Adrop_infra_ctr%20%3D%20true%0A%0A%5Bcrio.runtime.runtimes.runc%5D%0Aruntime_root%20%3D%20%22%2Frun%2Frunc%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%20%20%22io.containers.trace-syscall%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.Devices%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.LinkLogs%22%2C%0A%5D%0A%0A%5Bcrio.runtime.runtimes.crun%5D%0Aruntime_root%20%3D%20%22%2Frun%2Fcrun%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%20%20%22io.containers.trace-syscall%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.Devices%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.LinkLogs%22%2C%0A%5D%0A%23%20Based%20on%20https%3A%2F%2Fgithub.com%2Fcontainers%2Fcrun%2Fblob%2F27d7dd3a0%2FREADME.md%3Fplain%3D1%23L48%0Acontainer_min_memory%20%3D%20%22512KiB%22%0Adefault_annotations%20%3D%20%7B%22run.oci.systemd.subgroup%22%20%3D%20%22%22%7D%0A%0A%5Bcrio.runtime.workloads.openshift-builder%5D%0Aactivation_annotation%20%3D%20%22io.openshift.builder%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%22io.kubernetes.cri-o.userns-mode%22%2C%0A%20%20%22io.kubernetes.cri-o.Devices%22%0A%5D%0A%5Bcrio.runtime.workloads.openshift-builder.resources%5D%0A%0A%5Bcrio.image%5D%0Aglobal_auth_file%20%3D%20%22%2Fvar%2Flib%2Fkubelet%2Fconfig.json%22%0Apause_image%20%3D%20%22quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3A041a889dcf9abd95832934bf0870af8d2f4289733294b143d555ce49c77bfec1%22%0Apause_image_auth_file%20%3D%20%22%2Fvar%2Flib%2Fkubelet%2Fconfig.json%22%0Apause_command%20%3D%20%22%2Fusr%2Fbin%2Fpod%22%0Aoci_artifact_mount_support%20%3D%20false%0A%0A%5Bcrio.network%5D%0Anetwork_dir%20%3D%20%22%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F%22%0Aplugin_dirs%20%3D%20%5B%0A%20%20%20%20%22%2Fvar%2Flib%2Fcni%2Fbin%22%2C%0A%5D%0A%0A%5Bcrio.metrics%5D%0Aenable_metrics%20%3D%20true%0Ametrics_host%20%3D%20%22127.0.0.1%22%0Ametrics_port%20%3D%209537%0Ametrics_collectors%20%3D%20%5B%0A%20%20%22operations%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_total%22%0A%20%20%22operations_latency_microseconds_total%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_latency_seconds_total%22%0A%20%20%22operations_latency_microseconds%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_latency_seconds%22%0A%20%20%22operations_errors%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_errors_total%22%0A%20%20%22image_pulls_layer_size%22%2C%0A%20%20%22containers_oom_total%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22containers_oom_count_total%22%0A%20%20%22containers_oom%22%2C%0A%20%20%23%20Drop%20metrics%20with%20excessive%20label%20cardinality.%0A%20%20%23%20%22image_pulls_by_digest%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_bytes_total%22%0A%20%20%23%20%22image_pulls_by_name%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_bytes_total%22%0A%20%20%23%20%22image_pulls_by_name_skipped%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_skipped_bytes_total%22%0A%20%20%23%20%22image_pulls_failures%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_failure_total%22%0A%20%20%23%20%22image_pulls_successes%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_success_total%22%0A%20%20%23%20%22image_layer_reuse%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_layer_reuse_total%22%0A%20%20%22operations_total%22%2C%0A%20%20%22operations_latency_seconds_total%22%2C%0A%20%20%22operations_latency_seconds%22%2C%0A%20%20%22operations_errors_total%22%2C%0A%20%20%22image_pulls_bytes_total%22%2C%0A%20%20%22image_pulls_skipped_bytes_total%22%2C%0A%20%20%22image_pulls_success_total%22%2C%0A%20%20%22image_pulls_failure_total%22%2C%0A%20%20%22image_layer_reuse_total%22%2C%0A%20%20%22containers_oom_count_total%22%2C%0A%20%20%22processes_defunct%22%0A%5D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/crio/crio.conf.d/00-default" + }, + { + "contents": { + "source": "data:,%7B%0A%20%20%20%20%22default%22%3A%20%5B%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22type%22%3A%20%22insecureAcceptAnything%22%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%5D%2C%0A%20%20%20%20%22transports%22%3A%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22docker-daemon%22%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%22%22%3A%20%5B%7B%22type%22%3A%22insecureAcceptAnything%22%7D%5D%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%20%20%7D%0A%7D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/machine-config-daemon/policy-for-old-podman.json" + }, + { + "contents": { + "source": "data:,%7B%0A%20%20%20%20%22default%22%3A%20%5B%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22type%22%3A%20%22insecureAcceptAnything%22%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%5D%2C%0A%20%20%20%20%22transports%22%3A%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22docker-daemon%22%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%22%22%3A%20%5B%7B%22type%22%3A%22insecureAcceptAnything%22%7D%5D%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%20%20%7D%0A%7D" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/policy.json" + } + ] + } + }, + "fips": false, + "kernelArguments": null, + "kernelType": "", + "osImageURL": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1578c5f720239b6f363d8dc6145056c0f0ab0b20442f59ac74f3b4b91b8382dd" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "03a3677b298e05ba870cdf0f26f4db16f80e64ae" + }, + "creationTimestamp": "2025-08-19T18:55:56Z", + "generation": 2, + "labels": { + "machineconfiguration.openshift.io/role": "worker" + }, + "name": "01-worker-kubelet", + "ownerReferences": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "ControllerConfig", + "name": "machine-config-controller", + "uid": "39f6a6c9-fced-4161-8602-9f979cf21a91" + } + ], + "resourceVersion": "76351", + "uid": "1601a526-16cf-4f9f-94a8-425cf016401f" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "version": "3.5.0" + }, + "storage": { + "files": [ + { + "contents": { + "source": "data:,%7B%0A%09%22cloud%22%3A%20%22AzurePublicCloud%22%2C%0A%09%22tenantId%22%3A%20%226047c7e9-b2ad-488d-a54e-dc3f6be6a7ee%22%2C%0A%09%22aadClientId%22%3A%20%22%22%2C%0A%09%22aadClientSecret%22%3A%20%22%22%2C%0A%09%22aadClientCertPath%22%3A%20%22%22%2C%0A%09%22aadClientCertPassword%22%3A%20%22%22%2C%0A%09%22useManagedIdentityExtension%22%3A%20true%2C%0A%09%22userAssignedIdentityID%22%3A%20%22%22%2C%0A%09%22subscriptionId%22%3A%20%2272e3a972-58b0-4afc-bd4f-da89b39ccebd%22%2C%0A%09%22resourceGroup%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-rg%22%2C%0A%09%22location%22%3A%20%22centralus%22%2C%0A%09%22vnetName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-vnet%22%2C%0A%09%22vnetResourceGroup%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-rg%22%2C%0A%09%22subnetName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-worker-subnet%22%2C%0A%09%22securityGroupName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-nsg%22%2C%0A%09%22routeTableName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-node-routetable%22%2C%0A%09%22vmType%22%3A%20%22standard%22%2C%0A%09%22loadBalancerSku%22%3A%20%22standard%22%2C%0A%09%22cloudProviderBackoff%22%3A%20true%2C%0A%09%22useInstanceMetadata%22%3A%20true%2C%0A%09%22excludeMasterFromStandardLB%22%3A%20false%2C%0A%09%22cloudProviderBackoffDuration%22%3A%206%2C%0A%09%22putVMSSVMBatchSize%22%3A%200%2C%0A%09%22enableMigrateToIPBasedBackendPoolAPI%22%3A%20false%0A%7D" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/cloud.conf" + }, + { + "contents": { + "source": "data:,authorization%3A%0A%20%20static%3A%0A%20%20%20%20-%20resourceRequest%3A%20false%0A%20%20%20%20%20%20path%3A%20%2Fmetrics%0A%20%20%20%20%20%20verb%3A%20get%0A%20%20%20%20%20%20user%3A%0A%20%20%20%20%20%20%20%20name%3A%20system%3Aserviceaccount%3Aopenshift-monitoring%3Aprometheus-k8s" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/crio-metrics-proxy.cfg" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20v1%0Akind%3A%20Pod%0Ametadata%3A%0A%20%20name%3A%20kube-rbac-proxy-crio%0A%20%20namespace%3A%20openshift-machine-config-operator%0A%20%20annotations%3A%0A%20%20%20%20target.workload.openshift.io%2Fmanagement%3A%20'%7B%22effect%22%3A%20%22PreferredDuringScheduling%22%7D'%0A%20%20%20%20openshift.io%2Frequired-scc%3A%20privileged%0Aspec%3A%0A%20%20volumes%3A%0A%20%20-%20name%3A%20etc-kube%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%22%2Fetc%2Fkubernetes%22%0A%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%22%2Fvar%2Flib%2Fkubelet%22%0A%20%20hostNetwork%3A%20true%0A%20%20priorityClassName%3A%20system-cluster-critical%0A%20%20initContainers%3A%0A%20%20-%20name%3A%20setup%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20image%3A%20quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3Afd03b21139b2921710987d9a617bf49e5d05825d6e9df66768259e7076c6d53a%0A%20%20%20%20imagePullPolicy%3A%20IfNotPresent%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fvar%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20command%3A%20%5B'%2Fbin%2Fbash'%2C%20'-ec'%5D%0A%20%20%20%20args%3A%0A%20%20%20%20-%20%7C%0A%20%20%20%20%20%20echo%20-n%20%22Waiting%20for%20kubelet%20key%20and%20certificate%20to%20be%20available%22%0A%20%20%20%20%20%20while%20%5B%20-n%20%22%24(test%20-e%20%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem)%22%20%5D%20%3B%20do%0A%20%20%20%20%20%20%20%20echo%20-n%20%22.%22%0A%20%20%20%20%20%20%20%20sleep%201%0A%20%20%20%20%20%20%20%20((%20tries%20%2B%3D%201%20))%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Btries%7D%22%20-gt%2010%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20echo%20%22Timed%20out%20waiting%20for%20kubelet%20key%20and%20cert.%22%0A%20%20%20%20%20%20%20%20%20%20exit%201%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20done%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20%20%20%20%20cpu%3A%205m%0A%20%20containers%3A%0A%20%20-%20name%3A%20kube-rbac-proxy-crio%0A%20%20%20%20image%3A%20quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3Afd03b21139b2921710987d9a617bf49e5d05825d6e9df66768259e7076c6d53a%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20ports%3A%0A%20%20%20%20-%20containerPort%3A%209637%0A%20%20%20%20args%3A%0A%20%20%20%20-%20--secure-listen-address%3D%3A9637%0A%20%20%20%20-%20--config-file%3D%2Fetc%2Fkubernetes%2Fcrio-metrics-proxy.cfg%0A%20%20%20%20-%20--client-ca-file%3D%2Fetc%2Fkubernetes%2Fkubelet-ca.crt%0A%20%20%20%20-%20--logtostderr%3Dtrue%0A%20%20%20%20-%20--kubeconfig%3D%2Fvar%2Flib%2Fkubelet%2Fkubeconfig%0A%20%20%20%20-%20--tls-cipher-suites%3DTLS_AES_128_GCM_SHA256%2CTLS_AES_256_GCM_SHA384%2CTLS_CHACHA20_POLY1305_SHA256%2CTLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%2CTLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%2CTLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%2CTLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%2CTLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256%2CTLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256%20%0A%20%20%20%20-%20--tls-min-version%3DVersionTLS12%0A%20%20%20%20-%20--upstream%3Dhttp%3A%2F%2F127.0.0.1%3A9537%0A%20%20%20%20-%20--tls-cert-file%3D%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem%0A%20%20%20%20-%20--tls-private-key-file%3D%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20cpu%3A%2020m%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20name%3A%20etc-kube%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fetc%2Fkubernetes%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20%20%20readOnly%3A%20true%0A%20%20%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fvar%2Flib%2Fkubelet%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20%20%20readOnly%3A%20true" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/manifests/criometricsproxy.yaml" + }, + { + "contents": { + "source": "data:,kind%3A%20KubeletConfiguration%0AapiVersion%3A%20kubelet.config.k8s.io%2Fv1beta1%0Aauthentication%3A%0A%20%20x509%3A%0A%20%20%20%20clientCAFile%3A%20%2Fetc%2Fkubernetes%2Fkubelet-ca.crt%0A%20%20anonymous%3A%0A%20%20%20%20enabled%3A%20false%0AcgroupDriver%3A%20systemd%0AcgroupRoot%3A%20%2F%0AclusterDNS%3A%0A%20%20-%20172.30.0.10%0AclusterDomain%3A%20cluster.local%0AcontainerLogMaxSize%3A%2050Mi%0AenableSystemLogQuery%3A%20true%0AmaxPods%3A%20250%0AkubeAPIQPS%3A%2050%0AkubeAPIBurst%3A%20100%0ApodPidsLimit%3A%204096%0AprotectKernelDefaults%3A%20true%0ArotateCertificates%3A%20true%0AserializeImagePulls%3A%20false%0AstaticPodPath%3A%20%2Fetc%2Fkubernetes%2Fmanifests%0AsystemCgroups%3A%20%2Fsystem.slice%0AnodeStatusUpdateFrequency%3A%2010s%0AnodeStatusReportFrequency%3A%205m%0AserverTLSBootstrap%3A%20true%0AtlsMinVersion%3A%20VersionTLS12%0AtlsCipherSuites%3A%0A%20%20%20%20-%20TLS_AES_128_GCM_SHA256%0A%20%20%20%20-%20TLS_AES_256_GCM_SHA384%0A%20%20%20%20-%20TLS_CHACHA20_POLY1305_SHA256%0A%20%20%20%20-%20TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%0A%20%20%20%20-%20TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%0A%20%20%20%20-%20TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%0A%20%20%20%20-%20TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%0A%20%20%20%20-%20TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256%0A%20%20%20%20-%20TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/kubelet.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fsh%0Aif%20%5B%20-x%20%2Fusr%2Fbin%2Fkubensenter%20%5D%3B%20then%0A%20%20exec%20%2Fusr%2Fbin%2Fkubensenter%20%22%24%40%22%0Aelse%0A%20%20exec%20%22%24%40%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/kubenswrapper" + } + ] + }, + "systemd": { + "units": [ + { + "contents": "[Unit]\nDescription=Kubernetes Kubelet After Reboot Cleanup\nBefore=kubelet.service\n\n[Service]\nType=oneshot\nExecStart=/bin/rm -f /var/lib/kubelet/cpu_manager_state\nExecStart=/bin/rm -f /var/lib/kubelet/memory_manager_state\nExecStart=-/bin/rm -f /var/lib/kubelet/dra_manager_state\n\n[Install]\nWantedBy=multi-user.target\n", + "enabled": true, + "name": "kubelet-cleanup.service" + }, + { + "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=crio.service kubelet-dependencies.target\nAfter=kubelet-dependencies.target\nAfter=ostree-finalize-staged.service\n\n[Service]\nType=notify\nExecStartPre=/bin/mkdir --parents /etc/kubernetes/manifests\nExecStartPre=-/usr/sbin/restorecon -ri /var/lib/kubelet/pod-resources /usr/local/bin/kubenswrapper /usr/bin/kubensenter\nEnvironment=\"KUBELET_NODE_IP=0.0.0.0\"\nEnvironmentFile=/etc/os-release\nEnvironmentFile=-/etc/kubernetes/kubelet-workaround\nEnvironmentFile=-/etc/kubernetes/kubelet-env\nEnvironmentFile=/etc/node-sizing.env\n\nExecStart=/usr/local/bin/kubenswrapper \\\n /usr/bin/kubelet \\\n --config=/etc/kubernetes/kubelet.conf \\\n --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \\\n --kubeconfig=/var/lib/kubelet/kubeconfig \\\n --container-runtime-endpoint=/var/run/crio/crio.sock \\\n --runtime-cgroups=/system.slice/crio.service \\\n --node-labels=node-role.kubernetes.io/worker,node.openshift.io/os_id=${ID},${CUSTOM_KUBELET_LABELS} \\\n --node-ip=${KUBELET_NODE_IP} \\\n --minimum-container-ttl-duration=6m0s \\\n --volume-plugin-dir=/etc/kubernetes/kubelet-plugins/volume/exec \\\n --cloud-provider=external \\\n --image-credential-provider-bin-dir=/usr/libexec/kubelet-image-credential-provider-plugins --image-credential-provider-config=/etc/kubernetes/credential-providers/acr-credential-provider.yaml \\\n --hostname-override=${KUBELET_NODE_NAME} \\\n --provider-id=${KUBELET_PROVIDERID} \\\n --pod-infra-container-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:041a889dcf9abd95832934bf0870af8d2f4289733294b143d555ce49c77bfec1 \\\n --system-reserved=cpu=${SYSTEM_RESERVED_CPU},memory=${SYSTEM_RESERVED_MEMORY},ephemeral-storage=${SYSTEM_RESERVED_ES} \\\n --v=${KUBELET_LOG_LEVEL}\n\nRestart=always\nRestartSec=10\n\n[Install]\nWantedBy=multi-user.target\n", + "enabled": true, + "name": "kubelet.service" + } + ] + } + }, + "fips": false, + "kernelArguments": null, + "kernelType": "", + "osImageURL": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1578c5f720239b6f363d8dc6145056c0f0ab0b20442f59ac74f3b4b91b8382dd" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "03a3677b298e05ba870cdf0f26f4db16f80e64ae" + }, + "creationTimestamp": "2025-08-19T20:57:25Z", + "generation": 1, + "labels": { + "machineconfiguration.openshift.io/role": "master" + }, + "name": "97-master-generated-kubelet", + "resourceVersion": "76343", + "uid": "86e5bdc0-1c21-4b4b-84ab-c04cda3a0856" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "version": "3.5.0" + }, + "storage": { + "files": [ + { + "contents": { + "compression": "", + "source": "data:text/plain;charset=utf-8;base64,YXBpVmVyc2lvbjoga3ViZWxldC5jb25maWcuazhzLmlvL3YxYmV0YTEKYXV0aGVudGljYXRpb246CiAgYW5vbnltb3VzOgogICAgZW5hYmxlZDogZmFsc2UKICB3ZWJob29rOgogICAgY2FjaGVUVEw6IDBzCiAgeDUwOToKICAgIGNsaWVudENBRmlsZTogL2V0Yy9rdWJlcm5ldGVzL2t1YmVsZXQtY2EuY3J0CmF1dGhvcml6YXRpb246CiAgd2ViaG9vazoKICAgIGNhY2hlQXV0aG9yaXplZFRUTDogMHMKICAgIGNhY2hlVW5hdXRob3JpemVkVFRMOiAwcwpjZ3JvdXBEcml2ZXI6IHN5c3RlbWQKY2dyb3VwUm9vdDogLwpjbHVzdGVyRE5TOgotIDE3Mi4zMC4wLjEwCmNsdXN0ZXJEb21haW46IGNsdXN0ZXIubG9jYWwKY29udGFpbmVyTG9nTWF4U2l6ZTogNTBNaQpjb250YWluZXJSdW50aW1lRW5kcG9pbnQ6ICIiCmNwdU1hbmFnZXJSZWNvbmNpbGVQZXJpb2Q6IDBzCmNyYXNoTG9vcEJhY2tPZmY6IHt9CmVuYWJsZVN5c3RlbUxvZ1F1ZXJ5OiB0cnVlCmV2aWN0aW9uUHJlc3N1cmVUcmFuc2l0aW9uUGVyaW9kOiAwcwpmZWF0dXJlR2F0ZXM6CiAgQVdTQ2x1c3Rlckhvc3RlZEROUzogZmFsc2UKICBBV1NDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBV1NEZWRpY2F0ZWRIb3N0czogZmFsc2UKICBBV1NTZXJ2aWNlTEJOZXR3b3JrU2VjdXJpdHlHcm91cDogZmFsc2UKICBBZGRpdGlvbmFsUm91dGluZ0NhcGFiaWxpdGllczogdHJ1ZQogIEFkbWluTmV0d29ya1BvbGljeTogdHJ1ZQogIEFsaWJhYmFQbGF0Zm9ybTogdHJ1ZQogIEF1dG9tYXRlZEV0Y2RCYWNrdXA6IGZhbHNlCiAgQXp1cmVDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBenVyZURlZGljYXRlZEhvc3RzOiBmYWxzZQogIEF6dXJlTXVsdGlEaXNrOiBmYWxzZQogIEF6dXJlV29ya2xvYWRJZGVudGl0eTogdHJ1ZQogIEJvb3RJbWFnZVNrZXdFbmZvcmNlbWVudDogZmFsc2UKICBCb290Y05vZGVNYW5hZ2VtZW50OiBmYWxzZQogIEJ1aWxkQ1NJVm9sdW1lczogdHJ1ZQogIENQTVNNYWNoaW5lTmFtZVByZWZpeDogdHJ1ZQogIENsdXN0ZXJBUElJbnN0YWxsOiBmYWxzZQogIENsdXN0ZXJBUElJbnN0YWxsSUJNQ2xvdWQ6IGZhbHNlCiAgQ2x1c3Rlck1vbml0b3JpbmdDb25maWc6IGZhbHNlCiAgQ2x1c3RlclZlcnNpb25PcGVyYXRvckNvbmZpZ3VyYXRpb246IGZhbHNlCiAgQ29uc29sZVBsdWdpbkNvbnRlbnRTZWN1cml0eVBvbGljeTogdHJ1ZQogIEROU05hbWVSZXNvbHZlcjogZmFsc2UKICBEdWFsUmVwbGljYTogZmFsc2UKICBEeWFubWljU2VydmljZUVuZHBvaW50SUJNQ2xvdWQ6IGZhbHNlCiAgRHluYW1pY1Jlc291cmNlQWxsb2NhdGlvbjogZmFsc2UKICBFdGNkQmFja2VuZFF1b3RhOiBmYWxzZQogIEV2ZW50ZWRQTEVHOiBmYWxzZQogIEV4YW1wbGU6IGZhbHNlCiAgRXhhbXBsZTI6IGZhbHNlCiAgRXh0ZXJuYWxPSURDOiBmYWxzZQogIEV4dGVybmFsT0lEQ1dpdGhVSURBbmRFeHRyYUNsYWltTWFwcGluZ3M6IGZhbHNlCiAgRXh0ZXJuYWxTbmFwc2hvdE1ldGFkYXRhOiBmYWxzZQogIEdDUENsdXN0ZXJIb3N0ZWRETlM6IGZhbHNlCiAgR0NQQ2x1c3Rlckhvc3RlZEROU0luc3RhbGw6IGZhbHNlCiAgR0NQQ3VzdG9tQVBJRW5kcG9pbnRzOiBmYWxzZQogIEdDUEN1c3RvbUFQSUVuZHBvaW50c0luc3RhbGw6IGZhbHNlCiAgR2F0ZXdheUFQSTogdHJ1ZQogIEdhdGV3YXlBUElDb250cm9sbGVyOiB0cnVlCiAgSGlnaGx5QXZhaWxhYmxlQXJiaXRlcjogdHJ1ZQogIEltYWdlTW9kZVN0YXR1c1JlcG9ydGluZzogZmFsc2UKICBJbWFnZVN0cmVhbUltcG9ydE1vZGU6IGZhbHNlCiAgSW1hZ2VWb2x1bWU6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJEeW5hbWljQ29uZmlndXJhdGlvbk1hbmFnZXI6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJMQlN1Ym5ldHNBV1M6IHRydWUKICBJbnNpZ2h0c0NvbmZpZzogZmFsc2UKICBJbnNpZ2h0c0NvbmZpZ0FQSTogZmFsc2UKICBJbnNpZ2h0c09uRGVtYW5kRGF0YUdhdGhlcjogZmFsc2UKICBJbnNpZ2h0c1J1bnRpbWVFeHRyYWN0b3I6IGZhbHNlCiAgSXJyZWNvbmNpbGFibGVNYWNoaW5lQ29uZmlnOiBmYWxzZQogIEtNU0VuY3J5cHRpb25Qcm92aWRlcjogZmFsc2UKICBLTVN2MTogdHJ1ZQogIE1hY2hpbmVBUElNaWdyYXRpb246IGZhbHNlCiAgTWFjaGluZUFQSU9wZXJhdG9yRGlzYWJsZU1hY2hpbmVIZWFsdGhDaGVja0NvbnRyb2xsZXI6IGZhbHNlCiAgTWFjaGluZUNvbmZpZ05vZGVzOiB0cnVlCiAgTWFuYWdlZEJvb3RJbWFnZXM6IHRydWUKICBNYW5hZ2VkQm9vdEltYWdlc0FXUzogdHJ1ZQogIE1hbmFnZWRCb290SW1hZ2VzQXp1cmU6IGZhbHNlCiAgTWFuYWdlZEJvb3RJbWFnZXN2U3BoZXJlOiBmYWxzZQogIE1heFVuYXZhaWxhYmxlU3RhdGVmdWxTZXQ6IGZhbHNlCiAgTWV0cmljc0NvbGxlY3Rpb25Qcm9maWxlczogdHJ1ZQogIE1pbmltdW1LdWJlbGV0VmVyc2lvbjogZmFsc2UKICBNaXhlZENQVXNBbGxvY2F0aW9uOiBmYWxzZQogIE11bHRpQXJjaEluc3RhbGxBenVyZTogZmFsc2UKICBNdWx0aURpc2tTZXR1cDogZmFsc2UKICBNdXRhdGluZ0FkbWlzc2lvblBvbGljeTogZmFsc2UKICBOZXR3b3JrRGlhZ25vc3RpY3NDb25maWc6IHRydWUKICBOZXR3b3JrTGl2ZU1pZ3JhdGlvbjogdHJ1ZQogIE5ldHdvcmtTZWdtZW50YXRpb246IHRydWUKICBOZXdPTE06IHRydWUKICBOZXdPTE1DYXRhbG9nZEFQSVYxTWV0YXM6IGZhbHNlCiAgTmV3T0xNT3duU2luZ2xlTmFtZXNwYWNlOiBmYWxzZQogIE5ld09MTVByZWZsaWdodFBlcm1pc3Npb25DaGVja3M6IGZhbHNlCiAgTmV3T0xNV2ViaG9va1Byb3ZpZGVyT3BlbnNoaWZ0U2VydmljZUNBOiBmYWxzZQogIE5vUmVnaXN0cnlDbHVzdGVyT3BlcmF0aW9uczogZmFsc2UKICBOb2RlU3dhcDogZmFsc2UKICBOdXRhbml4TXVsdGlTdWJuZXRzOiBmYWxzZQogIE9WTk9ic2VydmFiaWxpdHk6IGZhbHNlCiAgT3BlblNoaWZ0UG9kU2VjdXJpdHlBZG1pc3Npb246IHRydWUKICBQaW5uZWRJbWFnZXM6IHRydWUKICBQcmVjb25maWd1cmVkVUROQWRkcmVzc2VzOiBmYWxzZQogIFByb2NNb3VudFR5cGU6IHRydWUKICBSb3V0ZUFkdmVydGlzZW1lbnRzOiB0cnVlCiAgUm91dGVFeHRlcm5hbENlcnRpZmljYXRlOiB0cnVlCiAgU0VMaW51eE1vdW50OiBmYWxzZQogIFNlcnZpY2VBY2NvdW50VG9rZW5Ob2RlQmluZGluZzogdHJ1ZQogIFNldEVJUEZvck5MQkluZ3Jlc3NDb250cm9sbGVyOiB0cnVlCiAgU2hvcnRDZXJ0Um90YXRpb246IGZhbHNlCiAgU2lnbmF0dXJlU3RvcmVzOiBmYWxzZQogIFNpZ3N0b3JlSW1hZ2VWZXJpZmljYXRpb246IHRydWUKICBTaWdzdG9yZUltYWdlVmVyaWZpY2F0aW9uUEtJOiBmYWxzZQogIFN0b3JhZ2VQZXJmb3JtYW50U2VjdXJpdHlQb2xpY3k6IGZhbHNlCiAgVHJhbnNsYXRlU3RyZWFtQ2xvc2VXZWJzb2NrZXRSZXF1ZXN0czogZmFsc2UKICBVcGdyYWRlU3RhdHVzOiBmYWxzZQogIFVzZXJOYW1lc3BhY2VzUG9kU2VjdXJpdHlTdGFuZGFyZHM6IHRydWUKICBVc2VyTmFtZXNwYWNlc1N1cHBvcnQ6IHRydWUKICBWU3BoZXJlQ29uZmlndXJhYmxlTWF4QWxsb3dlZEJsb2NrVm9sdW1lc1Blck5vZGU6IGZhbHNlCiAgVlNwaGVyZUhvc3RWTUdyb3VwWm9uYWw6IGZhbHNlCiAgVlNwaGVyZU1peGVkTm9kZUVudjogZmFsc2UKICBWU3BoZXJlTXVsdGlEaXNrOiB0cnVlCiAgVlNwaGVyZU11bHRpTmV0d29ya3M6IHRydWUKICBWb2x1bWVBdHRyaWJ1dGVzQ2xhc3M6IGZhbHNlCiAgVm9sdW1lR3JvdXBTbmFwc2hvdDogZmFsc2UKZmlsZUNoZWNrRnJlcXVlbmN5OiAwcwpodHRwQ2hlY2tGcmVxdWVuY3k6IDBzCmltYWdlTWF4aW11bUdDQWdlOiAwcwppbWFnZU1pbmltdW1HQ0FnZTogMHMKa2luZDogS3ViZWxldENvbmZpZ3VyYXRpb24Ka3ViZUFQSUJ1cnN0OiAxMDAKa3ViZUFQSVFQUzogNTAKbG9nZ2luZzoKICBmbHVzaEZyZXF1ZW5jeTogMAogIG9wdGlvbnM6CiAgICBqc29uOgogICAgICBpbmZvQnVmZmVyU2l6ZTogIjAiCiAgICB0ZXh0OgogICAgICBpbmZvQnVmZmVyU2l6ZTogIjAiCiAgdmVyYm9zaXR5OiAwCm1heFBvZHM6IDI1MAptZW1vcnlTd2FwOiB7fQpub2RlU3RhdHVzUmVwb3J0RnJlcXVlbmN5OiA1bTBzCm5vZGVTdGF0dXNVcGRhdGVGcmVxdWVuY3k6IDEwcwpwb2RQaWRzTGltaXQ6IDQwOTYKcHJvdGVjdEtlcm5lbERlZmF1bHRzOiB0cnVlCnJvdGF0ZUNlcnRpZmljYXRlczogdHJ1ZQpydW50aW1lUmVxdWVzdFRpbWVvdXQ6IDBzCnNlcmlhbGl6ZUltYWdlUHVsbHM6IGZhbHNlCnNlcnZlclRMU0Jvb3RzdHJhcDogdHJ1ZQpzaHV0ZG93bkdyYWNlUGVyaW9kOiAwcwpzaHV0ZG93bkdyYWNlUGVyaW9kQ3JpdGljYWxQb2RzOiAwcwpzdGF0aWNQb2RQYXRoOiAvZXRjL2t1YmVybmV0ZXMvbWFuaWZlc3RzCnN0cmVhbWluZ0Nvbm5lY3Rpb25JZGxlVGltZW91dDogMHMKc3luY0ZyZXF1ZW5jeTogMHMKc3lzdGVtQ2dyb3VwczogL3N5c3RlbS5zbGljZQp0bHNDaXBoZXJTdWl0ZXM6Ci0gVExTX0FFU18xMjhfR0NNX1NIQTI1NgotIFRMU19BRVNfMjU2X0dDTV9TSEEzODQKLSBUTFNfQ0hBQ0hBMjBfUE9MWTEzMDVfU0hBMjU2Ci0gVExTX0VDREhFX0VDRFNBX1dJVEhfQUVTXzEyOF9HQ01fU0hBMjU2Ci0gVExTX0VDREhFX1JTQV9XSVRIX0FFU18xMjhfR0NNX1NIQTI1NgotIFRMU19FQ0RIRV9FQ0RTQV9XSVRIX0FFU18yNTZfR0NNX1NIQTM4NAotIFRMU19FQ0RIRV9SU0FfV0lUSF9BRVNfMjU2X0dDTV9TSEEzODQKLSBUTFNfRUNESEVfRUNEU0FfV0lUSF9DSEFDSEEyMF9QT0xZMTMwNV9TSEEyNTYKLSBUTFNfRUNESEVfUlNBX1dJVEhfQ0hBQ0hBMjBfUE9MWTEzMDVfU0hBMjU2CnRsc01pblZlcnNpb246IFZlcnNpb25UTFMxMgp2b2x1bWVTdGF0c0FnZ1BlcmlvZDogMHMK" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/kubelet.conf" + } + ] + } + }, + "fips": false, + "kernelArguments": [ + "systemd.unified_cgroup_hierarchy=1", + "cgroup_no_v1=\"all\"", + "psi=0" + ], + "kernelType": "", + "osImageURL": "" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "03a3677b298e05ba870cdf0f26f4db16f80e64ae" + }, + "creationTimestamp": "2025-08-19T20:57:26Z", + "generation": 1, + "labels": { + "machineconfiguration.openshift.io/role": "worker" + }, + "name": "97-worker-generated-kubelet", + "resourceVersion": "76363", + "uid": "b5b5502e-153a-4e4e-87b0-d54299794622" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "version": "3.5.0" + }, + "storage": { + "files": [ + { + "contents": { + "compression": "", + "source": "data:text/plain;charset=utf-8;base64,YXBpVmVyc2lvbjoga3ViZWxldC5jb25maWcuazhzLmlvL3YxYmV0YTEKYXV0aGVudGljYXRpb246CiAgYW5vbnltb3VzOgogICAgZW5hYmxlZDogZmFsc2UKICB3ZWJob29rOgogICAgY2FjaGVUVEw6IDBzCiAgeDUwOToKICAgIGNsaWVudENBRmlsZTogL2V0Yy9rdWJlcm5ldGVzL2t1YmVsZXQtY2EuY3J0CmF1dGhvcml6YXRpb246CiAgd2ViaG9vazoKICAgIGNhY2hlQXV0aG9yaXplZFRUTDogMHMKICAgIGNhY2hlVW5hdXRob3JpemVkVFRMOiAwcwpjZ3JvdXBEcml2ZXI6IHN5c3RlbWQKY2dyb3VwUm9vdDogLwpjbHVzdGVyRE5TOgotIDE3Mi4zMC4wLjEwCmNsdXN0ZXJEb21haW46IGNsdXN0ZXIubG9jYWwKY29udGFpbmVyTG9nTWF4U2l6ZTogNTBNaQpjb250YWluZXJSdW50aW1lRW5kcG9pbnQ6ICIiCmNwdU1hbmFnZXJSZWNvbmNpbGVQZXJpb2Q6IDBzCmNyYXNoTG9vcEJhY2tPZmY6IHt9CmVuYWJsZVN5c3RlbUxvZ1F1ZXJ5OiB0cnVlCmV2aWN0aW9uUHJlc3N1cmVUcmFuc2l0aW9uUGVyaW9kOiAwcwpmZWF0dXJlR2F0ZXM6CiAgQVdTQ2x1c3Rlckhvc3RlZEROUzogZmFsc2UKICBBV1NDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBV1NEZWRpY2F0ZWRIb3N0czogZmFsc2UKICBBV1NTZXJ2aWNlTEJOZXR3b3JrU2VjdXJpdHlHcm91cDogZmFsc2UKICBBZGRpdGlvbmFsUm91dGluZ0NhcGFiaWxpdGllczogdHJ1ZQogIEFkbWluTmV0d29ya1BvbGljeTogdHJ1ZQogIEFsaWJhYmFQbGF0Zm9ybTogdHJ1ZQogIEF1dG9tYXRlZEV0Y2RCYWNrdXA6IGZhbHNlCiAgQXp1cmVDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBenVyZURlZGljYXRlZEhvc3RzOiBmYWxzZQogIEF6dXJlTXVsdGlEaXNrOiBmYWxzZQogIEF6dXJlV29ya2xvYWRJZGVudGl0eTogdHJ1ZQogIEJvb3RJbWFnZVNrZXdFbmZvcmNlbWVudDogZmFsc2UKICBCb290Y05vZGVNYW5hZ2VtZW50OiBmYWxzZQogIEJ1aWxkQ1NJVm9sdW1lczogdHJ1ZQogIENQTVNNYWNoaW5lTmFtZVByZWZpeDogdHJ1ZQogIENsdXN0ZXJBUElJbnN0YWxsOiBmYWxzZQogIENsdXN0ZXJBUElJbnN0YWxsSUJNQ2xvdWQ6IGZhbHNlCiAgQ2x1c3Rlck1vbml0b3JpbmdDb25maWc6IGZhbHNlCiAgQ2x1c3RlclZlcnNpb25PcGVyYXRvckNvbmZpZ3VyYXRpb246IGZhbHNlCiAgQ29uc29sZVBsdWdpbkNvbnRlbnRTZWN1cml0eVBvbGljeTogdHJ1ZQogIEROU05hbWVSZXNvbHZlcjogZmFsc2UKICBEdWFsUmVwbGljYTogZmFsc2UKICBEeWFubWljU2VydmljZUVuZHBvaW50SUJNQ2xvdWQ6IGZhbHNlCiAgRHluYW1pY1Jlc291cmNlQWxsb2NhdGlvbjogZmFsc2UKICBFdGNkQmFja2VuZFF1b3RhOiBmYWxzZQogIEV2ZW50ZWRQTEVHOiBmYWxzZQogIEV4YW1wbGU6IGZhbHNlCiAgRXhhbXBsZTI6IGZhbHNlCiAgRXh0ZXJuYWxPSURDOiBmYWxzZQogIEV4dGVybmFsT0lEQ1dpdGhVSURBbmRFeHRyYUNsYWltTWFwcGluZ3M6IGZhbHNlCiAgRXh0ZXJuYWxTbmFwc2hvdE1ldGFkYXRhOiBmYWxzZQogIEdDUENsdXN0ZXJIb3N0ZWRETlM6IGZhbHNlCiAgR0NQQ2x1c3Rlckhvc3RlZEROU0luc3RhbGw6IGZhbHNlCiAgR0NQQ3VzdG9tQVBJRW5kcG9pbnRzOiBmYWxzZQogIEdDUEN1c3RvbUFQSUVuZHBvaW50c0luc3RhbGw6IGZhbHNlCiAgR2F0ZXdheUFQSTogdHJ1ZQogIEdhdGV3YXlBUElDb250cm9sbGVyOiB0cnVlCiAgSGlnaGx5QXZhaWxhYmxlQXJiaXRlcjogdHJ1ZQogIEltYWdlTW9kZVN0YXR1c1JlcG9ydGluZzogZmFsc2UKICBJbWFnZVN0cmVhbUltcG9ydE1vZGU6IGZhbHNlCiAgSW1hZ2VWb2x1bWU6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJEeW5hbWljQ29uZmlndXJhdGlvbk1hbmFnZXI6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJMQlN1Ym5ldHNBV1M6IHRydWUKICBJbnNpZ2h0c0NvbmZpZzogZmFsc2UKICBJbnNpZ2h0c0NvbmZpZ0FQSTogZmFsc2UKICBJbnNpZ2h0c09uRGVtYW5kRGF0YUdhdGhlcjogZmFsc2UKICBJbnNpZ2h0c1J1bnRpbWVFeHRyYWN0b3I6IGZhbHNlCiAgSXJyZWNvbmNpbGFibGVNYWNoaW5lQ29uZmlnOiBmYWxzZQogIEtNU0VuY3J5cHRpb25Qcm92aWRlcjogZmFsc2UKICBLTVN2MTogdHJ1ZQogIE1hY2hpbmVBUElNaWdyYXRpb246IGZhbHNlCiAgTWFjaGluZUFQSU9wZXJhdG9yRGlzYWJsZU1hY2hpbmVIZWFsdGhDaGVja0NvbnRyb2xsZXI6IGZhbHNlCiAgTWFjaGluZUNvbmZpZ05vZGVzOiB0cnVlCiAgTWFuYWdlZEJvb3RJbWFnZXM6IHRydWUKICBNYW5hZ2VkQm9vdEltYWdlc0FXUzogdHJ1ZQogIE1hbmFnZWRCb290SW1hZ2VzQXp1cmU6IGZhbHNlCiAgTWFuYWdlZEJvb3RJbWFnZXN2U3BoZXJlOiBmYWxzZQogIE1heFVuYXZhaWxhYmxlU3RhdGVmdWxTZXQ6IGZhbHNlCiAgTWV0cmljc0NvbGxlY3Rpb25Qcm9maWxlczogdHJ1ZQogIE1pbmltdW1LdWJlbGV0VmVyc2lvbjogZmFsc2UKICBNaXhlZENQVXNBbGxvY2F0aW9uOiBmYWxzZQogIE11bHRpQXJjaEluc3RhbGxBenVyZTogZmFsc2UKICBNdWx0aURpc2tTZXR1cDogZmFsc2UKICBNdXRhdGluZ0FkbWlzc2lvblBvbGljeTogZmFsc2UKICBOZXR3b3JrRGlhZ25vc3RpY3NDb25maWc6IHRydWUKICBOZXR3b3JrTGl2ZU1pZ3JhdGlvbjogdHJ1ZQogIE5ldHdvcmtTZWdtZW50YXRpb246IHRydWUKICBOZXdPTE06IHRydWUKICBOZXdPTE1DYXRhbG9nZEFQSVYxTWV0YXM6IGZhbHNlCiAgTmV3T0xNT3duU2luZ2xlTmFtZXNwYWNlOiBmYWxzZQogIE5ld09MTVByZWZsaWdodFBlcm1pc3Npb25DaGVja3M6IGZhbHNlCiAgTmV3T0xNV2ViaG9va1Byb3ZpZGVyT3BlbnNoaWZ0U2VydmljZUNBOiBmYWxzZQogIE5vUmVnaXN0cnlDbHVzdGVyT3BlcmF0aW9uczogZmFsc2UKICBOb2RlU3dhcDogZmFsc2UKICBOdXRhbml4TXVsdGlTdWJuZXRzOiBmYWxzZQogIE9WTk9ic2VydmFiaWxpdHk6IGZhbHNlCiAgT3BlblNoaWZ0UG9kU2VjdXJpdHlBZG1pc3Npb246IHRydWUKICBQaW5uZWRJbWFnZXM6IHRydWUKICBQcmVjb25maWd1cmVkVUROQWRkcmVzc2VzOiBmYWxzZQogIFByb2NNb3VudFR5cGU6IHRydWUKICBSb3V0ZUFkdmVydGlzZW1lbnRzOiB0cnVlCiAgUm91dGVFeHRlcm5hbENlcnRpZmljYXRlOiB0cnVlCiAgU0VMaW51eE1vdW50OiBmYWxzZQogIFNlcnZpY2VBY2NvdW50VG9rZW5Ob2RlQmluZGluZzogdHJ1ZQogIFNldEVJUEZvck5MQkluZ3Jlc3NDb250cm9sbGVyOiB0cnVlCiAgU2hvcnRDZXJ0Um90YXRpb246IGZhbHNlCiAgU2lnbmF0dXJlU3RvcmVzOiBmYWxzZQogIFNpZ3N0b3JlSW1hZ2VWZXJpZmljYXRpb246IHRydWUKICBTaWdzdG9yZUltYWdlVmVyaWZpY2F0aW9uUEtJOiBmYWxzZQogIFN0b3JhZ2VQZXJmb3JtYW50U2VjdXJpdHlQb2xpY3k6IGZhbHNlCiAgVHJhbnNsYXRlU3RyZWFtQ2xvc2VXZWJzb2NrZXRSZXF1ZXN0czogZmFsc2UKICBVcGdyYWRlU3RhdHVzOiBmYWxzZQogIFVzZXJOYW1lc3BhY2VzUG9kU2VjdXJpdHlTdGFuZGFyZHM6IHRydWUKICBVc2VyTmFtZXNwYWNlc1N1cHBvcnQ6IHRydWUKICBWU3BoZXJlQ29uZmlndXJhYmxlTWF4QWxsb3dlZEJsb2NrVm9sdW1lc1Blck5vZGU6IGZhbHNlCiAgVlNwaGVyZUhvc3RWTUdyb3VwWm9uYWw6IGZhbHNlCiAgVlNwaGVyZU1peGVkTm9kZUVudjogZmFsc2UKICBWU3BoZXJlTXVsdGlEaXNrOiB0cnVlCiAgVlNwaGVyZU11bHRpTmV0d29ya3M6IHRydWUKICBWb2x1bWVBdHRyaWJ1dGVzQ2xhc3M6IGZhbHNlCiAgVm9sdW1lR3JvdXBTbmFwc2hvdDogZmFsc2UKZmlsZUNoZWNrRnJlcXVlbmN5OiAwcwpodHRwQ2hlY2tGcmVxdWVuY3k6IDBzCmltYWdlTWF4aW11bUdDQWdlOiAwcwppbWFnZU1pbmltdW1HQ0FnZTogMHMKa2luZDogS3ViZWxldENvbmZpZ3VyYXRpb24Ka3ViZUFQSUJ1cnN0OiAxMDAKa3ViZUFQSVFQUzogNTAKbG9nZ2luZzoKICBmbHVzaEZyZXF1ZW5jeTogMAogIG9wdGlvbnM6CiAgICBqc29uOgogICAgICBpbmZvQnVmZmVyU2l6ZTogIjAiCiAgICB0ZXh0OgogICAgICBpbmZvQnVmZmVyU2l6ZTogIjAiCiAgdmVyYm9zaXR5OiAwCm1heFBvZHM6IDI1MAptZW1vcnlTd2FwOiB7fQpub2RlU3RhdHVzUmVwb3J0RnJlcXVlbmN5OiA1bTBzCm5vZGVTdGF0dXNVcGRhdGVGcmVxdWVuY3k6IDEwcwpwb2RQaWRzTGltaXQ6IDQwOTYKcHJvdGVjdEtlcm5lbERlZmF1bHRzOiB0cnVlCnJvdGF0ZUNlcnRpZmljYXRlczogdHJ1ZQpydW50aW1lUmVxdWVzdFRpbWVvdXQ6IDBzCnNlcmlhbGl6ZUltYWdlUHVsbHM6IGZhbHNlCnNlcnZlclRMU0Jvb3RzdHJhcDogdHJ1ZQpzaHV0ZG93bkdyYWNlUGVyaW9kOiAwcwpzaHV0ZG93bkdyYWNlUGVyaW9kQ3JpdGljYWxQb2RzOiAwcwpzdGF0aWNQb2RQYXRoOiAvZXRjL2t1YmVybmV0ZXMvbWFuaWZlc3RzCnN0cmVhbWluZ0Nvbm5lY3Rpb25JZGxlVGltZW91dDogMHMKc3luY0ZyZXF1ZW5jeTogMHMKc3lzdGVtQ2dyb3VwczogL3N5c3RlbS5zbGljZQp0bHNDaXBoZXJTdWl0ZXM6Ci0gVExTX0FFU18xMjhfR0NNX1NIQTI1NgotIFRMU19BRVNfMjU2X0dDTV9TSEEzODQKLSBUTFNfQ0hBQ0hBMjBfUE9MWTEzMDVfU0hBMjU2Ci0gVExTX0VDREhFX0VDRFNBX1dJVEhfQUVTXzEyOF9HQ01fU0hBMjU2Ci0gVExTX0VDREhFX1JTQV9XSVRIX0FFU18xMjhfR0NNX1NIQTI1NgotIFRMU19FQ0RIRV9FQ0RTQV9XSVRIX0FFU18yNTZfR0NNX1NIQTM4NAotIFRMU19FQ0RIRV9SU0FfV0lUSF9BRVNfMjU2X0dDTV9TSEEzODQKLSBUTFNfRUNESEVfRUNEU0FfV0lUSF9DSEFDSEEyMF9QT0xZMTMwNV9TSEEyNTYKLSBUTFNfRUNESEVfUlNBX1dJVEhfQ0hBQ0hBMjBfUE9MWTEzMDVfU0hBMjU2CnRsc01pblZlcnNpb246IFZlcnNpb25UTFMxMgp2b2x1bWVTdGF0c0FnZ1BlcmlvZDogMHMK" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/kubelet.conf" + } + ] + } + }, + "fips": false, + "kernelArguments": [ + "systemd.unified_cgroup_hierarchy=1", + "cgroup_no_v1=\"all\"", + "psi=0" + ], + "kernelType": "", + "osImageURL": "" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "03a3677b298e05ba870cdf0f26f4db16f80e64ae" + }, + "creationTimestamp": "2025-08-19T18:55:56Z", + "generation": 2, + "labels": { + "machineconfiguration.openshift.io/role": "master" + }, + "name": "98-master-generated-kubelet", + "resourceVersion": "76341", + "uid": "4537ccce-8f4b-4228-826c-5e92ccaa3326" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "version": "3.5.0" + }, + "storage": { + "files": [ + { + "contents": { + "compression": "", + "source": "data:text/plain;charset=utf-8;base64,YXBpVmVyc2lvbjoga3ViZWxldC5jb25maWcuazhzLmlvL3YxYmV0YTEKYXV0aGVudGljYXRpb246CiAgYW5vbnltb3VzOgogICAgZW5hYmxlZDogZmFsc2UKICB3ZWJob29rOgogICAgY2FjaGVUVEw6IDBzCiAgeDUwOToKICAgIGNsaWVudENBRmlsZTogL2V0Yy9rdWJlcm5ldGVzL2t1YmVsZXQtY2EuY3J0CmF1dGhvcml6YXRpb246CiAgd2ViaG9vazoKICAgIGNhY2hlQXV0aG9yaXplZFRUTDogMHMKICAgIGNhY2hlVW5hdXRob3JpemVkVFRMOiAwcwpjZ3JvdXBEcml2ZXI6IHN5c3RlbWQKY2dyb3VwUm9vdDogLwpjbHVzdGVyRE5TOgotIDE3Mi4zMC4wLjEwCmNsdXN0ZXJEb21haW46IGNsdXN0ZXIubG9jYWwKY29udGFpbmVyTG9nTWF4U2l6ZTogNTBNaQpjb250YWluZXJSdW50aW1lRW5kcG9pbnQ6ICIiCmNwdU1hbmFnZXJSZWNvbmNpbGVQZXJpb2Q6IDBzCmNyYXNoTG9vcEJhY2tPZmY6IHt9CmVuYWJsZVN5c3RlbUxvZ1F1ZXJ5OiB0cnVlCmV2aWN0aW9uUHJlc3N1cmVUcmFuc2l0aW9uUGVyaW9kOiAwcwpmZWF0dXJlR2F0ZXM6CiAgQVdTQ2x1c3Rlckhvc3RlZEROUzogZmFsc2UKICBBV1NDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBV1NEZWRpY2F0ZWRIb3N0czogZmFsc2UKICBBV1NTZXJ2aWNlTEJOZXR3b3JrU2VjdXJpdHlHcm91cDogZmFsc2UKICBBZGRpdGlvbmFsUm91dGluZ0NhcGFiaWxpdGllczogdHJ1ZQogIEFkbWluTmV0d29ya1BvbGljeTogdHJ1ZQogIEFsaWJhYmFQbGF0Zm9ybTogdHJ1ZQogIEF1dG9tYXRlZEV0Y2RCYWNrdXA6IGZhbHNlCiAgQXp1cmVDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBenVyZURlZGljYXRlZEhvc3RzOiBmYWxzZQogIEF6dXJlTXVsdGlEaXNrOiBmYWxzZQogIEF6dXJlV29ya2xvYWRJZGVudGl0eTogdHJ1ZQogIEJvb3RJbWFnZVNrZXdFbmZvcmNlbWVudDogZmFsc2UKICBCb290Y05vZGVNYW5hZ2VtZW50OiBmYWxzZQogIEJ1aWxkQ1NJVm9sdW1lczogdHJ1ZQogIENQTVNNYWNoaW5lTmFtZVByZWZpeDogdHJ1ZQogIENsdXN0ZXJBUElJbnN0YWxsOiBmYWxzZQogIENsdXN0ZXJBUElJbnN0YWxsSUJNQ2xvdWQ6IGZhbHNlCiAgQ2x1c3Rlck1vbml0b3JpbmdDb25maWc6IGZhbHNlCiAgQ2x1c3RlclZlcnNpb25PcGVyYXRvckNvbmZpZ3VyYXRpb246IGZhbHNlCiAgQ29uc29sZVBsdWdpbkNvbnRlbnRTZWN1cml0eVBvbGljeTogdHJ1ZQogIEROU05hbWVSZXNvbHZlcjogZmFsc2UKICBEdWFsUmVwbGljYTogZmFsc2UKICBEeWFubWljU2VydmljZUVuZHBvaW50SUJNQ2xvdWQ6IGZhbHNlCiAgRHluYW1pY1Jlc291cmNlQWxsb2NhdGlvbjogZmFsc2UKICBFdGNkQmFja2VuZFF1b3RhOiBmYWxzZQogIEV2ZW50ZWRQTEVHOiBmYWxzZQogIEV4YW1wbGU6IGZhbHNlCiAgRXhhbXBsZTI6IGZhbHNlCiAgRXh0ZXJuYWxPSURDOiBmYWxzZQogIEV4dGVybmFsT0lEQ1dpdGhVSURBbmRFeHRyYUNsYWltTWFwcGluZ3M6IGZhbHNlCiAgRXh0ZXJuYWxTbmFwc2hvdE1ldGFkYXRhOiBmYWxzZQogIEdDUENsdXN0ZXJIb3N0ZWRETlM6IGZhbHNlCiAgR0NQQ2x1c3Rlckhvc3RlZEROU0luc3RhbGw6IGZhbHNlCiAgR0NQQ3VzdG9tQVBJRW5kcG9pbnRzOiBmYWxzZQogIEdDUEN1c3RvbUFQSUVuZHBvaW50c0luc3RhbGw6IGZhbHNlCiAgR2F0ZXdheUFQSTogdHJ1ZQogIEdhdGV3YXlBUElDb250cm9sbGVyOiB0cnVlCiAgSGlnaGx5QXZhaWxhYmxlQXJiaXRlcjogdHJ1ZQogIEltYWdlTW9kZVN0YXR1c1JlcG9ydGluZzogZmFsc2UKICBJbWFnZVN0cmVhbUltcG9ydE1vZGU6IGZhbHNlCiAgSW1hZ2VWb2x1bWU6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJEeW5hbWljQ29uZmlndXJhdGlvbk1hbmFnZXI6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJMQlN1Ym5ldHNBV1M6IHRydWUKICBJbnNpZ2h0c0NvbmZpZzogZmFsc2UKICBJbnNpZ2h0c0NvbmZpZ0FQSTogZmFsc2UKICBJbnNpZ2h0c09uRGVtYW5kRGF0YUdhdGhlcjogZmFsc2UKICBJbnNpZ2h0c1J1bnRpbWVFeHRyYWN0b3I6IGZhbHNlCiAgSXJyZWNvbmNpbGFibGVNYWNoaW5lQ29uZmlnOiBmYWxzZQogIEtNU0VuY3J5cHRpb25Qcm92aWRlcjogZmFsc2UKICBLTVN2MTogdHJ1ZQogIE1hY2hpbmVBUElNaWdyYXRpb246IGZhbHNlCiAgTWFjaGluZUFQSU9wZXJhdG9yRGlzYWJsZU1hY2hpbmVIZWFsdGhDaGVja0NvbnRyb2xsZXI6IGZhbHNlCiAgTWFjaGluZUNvbmZpZ05vZGVzOiB0cnVlCiAgTWFuYWdlZEJvb3RJbWFnZXM6IHRydWUKICBNYW5hZ2VkQm9vdEltYWdlc0FXUzogdHJ1ZQogIE1hbmFnZWRCb290SW1hZ2VzQXp1cmU6IGZhbHNlCiAgTWFuYWdlZEJvb3RJbWFnZXN2U3BoZXJlOiBmYWxzZQogIE1heFVuYXZhaWxhYmxlU3RhdGVmdWxTZXQ6IGZhbHNlCiAgTWV0cmljc0NvbGxlY3Rpb25Qcm9maWxlczogdHJ1ZQogIE1pbmltdW1LdWJlbGV0VmVyc2lvbjogZmFsc2UKICBNaXhlZENQVXNBbGxvY2F0aW9uOiBmYWxzZQogIE11bHRpQXJjaEluc3RhbGxBenVyZTogZmFsc2UKICBNdWx0aURpc2tTZXR1cDogZmFsc2UKICBNdXRhdGluZ0FkbWlzc2lvblBvbGljeTogZmFsc2UKICBOZXR3b3JrRGlhZ25vc3RpY3NDb25maWc6IHRydWUKICBOZXR3b3JrTGl2ZU1pZ3JhdGlvbjogdHJ1ZQogIE5ldHdvcmtTZWdtZW50YXRpb246IHRydWUKICBOZXdPTE06IHRydWUKICBOZXdPTE1DYXRhbG9nZEFQSVYxTWV0YXM6IGZhbHNlCiAgTmV3T0xNT3duU2luZ2xlTmFtZXNwYWNlOiBmYWxzZQogIE5ld09MTVByZWZsaWdodFBlcm1pc3Npb25DaGVja3M6IGZhbHNlCiAgTmV3T0xNV2ViaG9va1Byb3ZpZGVyT3BlbnNoaWZ0U2VydmljZUNBOiBmYWxzZQogIE5vUmVnaXN0cnlDbHVzdGVyT3BlcmF0aW9uczogZmFsc2UKICBOb2RlU3dhcDogZmFsc2UKICBOdXRhbml4TXVsdGlTdWJuZXRzOiBmYWxzZQogIE9WTk9ic2VydmFiaWxpdHk6IGZhbHNlCiAgT3BlblNoaWZ0UG9kU2VjdXJpdHlBZG1pc3Npb246IHRydWUKICBQaW5uZWRJbWFnZXM6IHRydWUKICBQcmVjb25maWd1cmVkVUROQWRkcmVzc2VzOiBmYWxzZQogIFByb2NNb3VudFR5cGU6IHRydWUKICBSb3V0ZUFkdmVydGlzZW1lbnRzOiB0cnVlCiAgUm91dGVFeHRlcm5hbENlcnRpZmljYXRlOiB0cnVlCiAgU0VMaW51eE1vdW50OiBmYWxzZQogIFNlcnZpY2VBY2NvdW50VG9rZW5Ob2RlQmluZGluZzogdHJ1ZQogIFNldEVJUEZvck5MQkluZ3Jlc3NDb250cm9sbGVyOiB0cnVlCiAgU2hvcnRDZXJ0Um90YXRpb246IGZhbHNlCiAgU2lnbmF0dXJlU3RvcmVzOiBmYWxzZQogIFNpZ3N0b3JlSW1hZ2VWZXJpZmljYXRpb246IHRydWUKICBTaWdzdG9yZUltYWdlVmVyaWZpY2F0aW9uUEtJOiBmYWxzZQogIFN0b3JhZ2VQZXJmb3JtYW50U2VjdXJpdHlQb2xpY3k6IGZhbHNlCiAgVHJhbnNsYXRlU3RyZWFtQ2xvc2VXZWJzb2NrZXRSZXF1ZXN0czogZmFsc2UKICBVcGdyYWRlU3RhdHVzOiBmYWxzZQogIFVzZXJOYW1lc3BhY2VzUG9kU2VjdXJpdHlTdGFuZGFyZHM6IHRydWUKICBVc2VyTmFtZXNwYWNlc1N1cHBvcnQ6IHRydWUKICBWU3BoZXJlQ29uZmlndXJhYmxlTWF4QWxsb3dlZEJsb2NrVm9sdW1lc1Blck5vZGU6IGZhbHNlCiAgVlNwaGVyZUhvc3RWTUdyb3VwWm9uYWw6IGZhbHNlCiAgVlNwaGVyZU1peGVkTm9kZUVudjogZmFsc2UKICBWU3BoZXJlTXVsdGlEaXNrOiB0cnVlCiAgVlNwaGVyZU11bHRpTmV0d29ya3M6IHRydWUKICBWb2x1bWVBdHRyaWJ1dGVzQ2xhc3M6IGZhbHNlCiAgVm9sdW1lR3JvdXBTbmFwc2hvdDogZmFsc2UKZmlsZUNoZWNrRnJlcXVlbmN5OiAwcwpodHRwQ2hlY2tGcmVxdWVuY3k6IDBzCmltYWdlTWF4aW11bUdDQWdlOiAwcwppbWFnZU1pbmltdW1HQ0FnZTogMHMKa2luZDogS3ViZWxldENvbmZpZ3VyYXRpb24Ka3ViZUFQSUJ1cnN0OiAxMDAKa3ViZUFQSVFQUzogNTAKbG9nZ2luZzoKICBmbHVzaEZyZXF1ZW5jeTogMAogIG9wdGlvbnM6CiAgICBqc29uOgogICAgICBpbmZvQnVmZmVyU2l6ZTogIjAiCiAgICB0ZXh0OgogICAgICBpbmZvQnVmZmVyU2l6ZTogIjAiCiAgdmVyYm9zaXR5OiAwCm1heFBvZHM6IDI1MAptZW1vcnlTd2FwOiB7fQpub2RlU3RhdHVzUmVwb3J0RnJlcXVlbmN5OiA1bTBzCm5vZGVTdGF0dXNVcGRhdGVGcmVxdWVuY3k6IDEwcwpwb2RQaWRzTGltaXQ6IDQwOTYKcHJvdGVjdEtlcm5lbERlZmF1bHRzOiB0cnVlCnJvdGF0ZUNlcnRpZmljYXRlczogdHJ1ZQpydW50aW1lUmVxdWVzdFRpbWVvdXQ6IDBzCnNlcmlhbGl6ZUltYWdlUHVsbHM6IGZhbHNlCnNlcnZlclRMU0Jvb3RzdHJhcDogdHJ1ZQpzaHV0ZG93bkdyYWNlUGVyaW9kOiAwcwpzaHV0ZG93bkdyYWNlUGVyaW9kQ3JpdGljYWxQb2RzOiAwcwpzdGF0aWNQb2RQYXRoOiAvZXRjL2t1YmVybmV0ZXMvbWFuaWZlc3RzCnN0cmVhbWluZ0Nvbm5lY3Rpb25JZGxlVGltZW91dDogMHMKc3luY0ZyZXF1ZW5jeTogMHMKc3lzdGVtQ2dyb3VwczogL3N5c3RlbS5zbGljZQp0bHNDaXBoZXJTdWl0ZXM6Ci0gVExTX0FFU18xMjhfR0NNX1NIQTI1NgotIFRMU19BRVNfMjU2X0dDTV9TSEEzODQKLSBUTFNfQ0hBQ0hBMjBfUE9MWTEzMDVfU0hBMjU2Ci0gVExTX0VDREhFX0VDRFNBX1dJVEhfQUVTXzEyOF9HQ01fU0hBMjU2Ci0gVExTX0VDREhFX1JTQV9XSVRIX0FFU18xMjhfR0NNX1NIQTI1NgotIFRMU19FQ0RIRV9FQ0RTQV9XSVRIX0FFU18yNTZfR0NNX1NIQTM4NAotIFRMU19FQ0RIRV9SU0FfV0lUSF9BRVNfMjU2X0dDTV9TSEEzODQKLSBUTFNfRUNESEVfRUNEU0FfV0lUSF9DSEFDSEEyMF9QT0xZMTMwNV9TSEEyNTYKLSBUTFNfRUNESEVfUlNBX1dJVEhfQ0hBQ0hBMjBfUE9MWTEzMDVfU0hBMjU2CnRsc01pblZlcnNpb246IFZlcnNpb25UTFMxMgp2b2x1bWVTdGF0c0FnZ1BlcmlvZDogMHMK" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/kubelet.conf" + } + ] + } + }, + "fips": false, + "kernelArguments": null, + "kernelType": "", + "osImageURL": "" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "03a3677b298e05ba870cdf0f26f4db16f80e64ae" + }, + "creationTimestamp": "2025-08-19T18:55:57Z", + "generation": 2, + "labels": { + "machineconfiguration.openshift.io/role": "worker" + }, + "name": "98-worker-generated-kubelet", + "resourceVersion": "76350", + "uid": "90df4164-57b3-4caa-b6f3-39351ac1a29f" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "version": "3.5.0" + }, + "storage": { + "files": [ + { + "contents": { + "compression": "", + "source": "data:text/plain;charset=utf-8;base64,YXBpVmVyc2lvbjoga3ViZWxldC5jb25maWcuazhzLmlvL3YxYmV0YTEKYXV0aGVudGljYXRpb246CiAgYW5vbnltb3VzOgogICAgZW5hYmxlZDogZmFsc2UKICB3ZWJob29rOgogICAgY2FjaGVUVEw6IDBzCiAgeDUwOToKICAgIGNsaWVudENBRmlsZTogL2V0Yy9rdWJlcm5ldGVzL2t1YmVsZXQtY2EuY3J0CmF1dGhvcml6YXRpb246CiAgd2ViaG9vazoKICAgIGNhY2hlQXV0aG9yaXplZFRUTDogMHMKICAgIGNhY2hlVW5hdXRob3JpemVkVFRMOiAwcwpjZ3JvdXBEcml2ZXI6IHN5c3RlbWQKY2dyb3VwUm9vdDogLwpjbHVzdGVyRE5TOgotIDE3Mi4zMC4wLjEwCmNsdXN0ZXJEb21haW46IGNsdXN0ZXIubG9jYWwKY29udGFpbmVyTG9nTWF4U2l6ZTogNTBNaQpjb250YWluZXJSdW50aW1lRW5kcG9pbnQ6ICIiCmNwdU1hbmFnZXJSZWNvbmNpbGVQZXJpb2Q6IDBzCmNyYXNoTG9vcEJhY2tPZmY6IHt9CmVuYWJsZVN5c3RlbUxvZ1F1ZXJ5OiB0cnVlCmV2aWN0aW9uUHJlc3N1cmVUcmFuc2l0aW9uUGVyaW9kOiAwcwpmZWF0dXJlR2F0ZXM6CiAgQVdTQ2x1c3Rlckhvc3RlZEROUzogZmFsc2UKICBBV1NDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBV1NEZWRpY2F0ZWRIb3N0czogZmFsc2UKICBBV1NTZXJ2aWNlTEJOZXR3b3JrU2VjdXJpdHlHcm91cDogZmFsc2UKICBBZGRpdGlvbmFsUm91dGluZ0NhcGFiaWxpdGllczogdHJ1ZQogIEFkbWluTmV0d29ya1BvbGljeTogdHJ1ZQogIEFsaWJhYmFQbGF0Zm9ybTogdHJ1ZQogIEF1dG9tYXRlZEV0Y2RCYWNrdXA6IGZhbHNlCiAgQXp1cmVDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBenVyZURlZGljYXRlZEhvc3RzOiBmYWxzZQogIEF6dXJlTXVsdGlEaXNrOiBmYWxzZQogIEF6dXJlV29ya2xvYWRJZGVudGl0eTogdHJ1ZQogIEJvb3RJbWFnZVNrZXdFbmZvcmNlbWVudDogZmFsc2UKICBCb290Y05vZGVNYW5hZ2VtZW50OiBmYWxzZQogIEJ1aWxkQ1NJVm9sdW1lczogdHJ1ZQogIENQTVNNYWNoaW5lTmFtZVByZWZpeDogdHJ1ZQogIENsdXN0ZXJBUElJbnN0YWxsOiBmYWxzZQogIENsdXN0ZXJBUElJbnN0YWxsSUJNQ2xvdWQ6IGZhbHNlCiAgQ2x1c3Rlck1vbml0b3JpbmdDb25maWc6IGZhbHNlCiAgQ2x1c3RlclZlcnNpb25PcGVyYXRvckNvbmZpZ3VyYXRpb246IGZhbHNlCiAgQ29uc29sZVBsdWdpbkNvbnRlbnRTZWN1cml0eVBvbGljeTogdHJ1ZQogIEROU05hbWVSZXNvbHZlcjogZmFsc2UKICBEdWFsUmVwbGljYTogZmFsc2UKICBEeWFubWljU2VydmljZUVuZHBvaW50SUJNQ2xvdWQ6IGZhbHNlCiAgRHluYW1pY1Jlc291cmNlQWxsb2NhdGlvbjogZmFsc2UKICBFdGNkQmFja2VuZFF1b3RhOiBmYWxzZQogIEV2ZW50ZWRQTEVHOiBmYWxzZQogIEV4YW1wbGU6IGZhbHNlCiAgRXhhbXBsZTI6IGZhbHNlCiAgRXh0ZXJuYWxPSURDOiBmYWxzZQogIEV4dGVybmFsT0lEQ1dpdGhVSURBbmRFeHRyYUNsYWltTWFwcGluZ3M6IGZhbHNlCiAgRXh0ZXJuYWxTbmFwc2hvdE1ldGFkYXRhOiBmYWxzZQogIEdDUENsdXN0ZXJIb3N0ZWRETlM6IGZhbHNlCiAgR0NQQ2x1c3Rlckhvc3RlZEROU0luc3RhbGw6IGZhbHNlCiAgR0NQQ3VzdG9tQVBJRW5kcG9pbnRzOiBmYWxzZQogIEdDUEN1c3RvbUFQSUVuZHBvaW50c0luc3RhbGw6IGZhbHNlCiAgR2F0ZXdheUFQSTogdHJ1ZQogIEdhdGV3YXlBUElDb250cm9sbGVyOiB0cnVlCiAgSGlnaGx5QXZhaWxhYmxlQXJiaXRlcjogdHJ1ZQogIEltYWdlTW9kZVN0YXR1c1JlcG9ydGluZzogZmFsc2UKICBJbWFnZVN0cmVhbUltcG9ydE1vZGU6IGZhbHNlCiAgSW1hZ2VWb2x1bWU6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJEeW5hbWljQ29uZmlndXJhdGlvbk1hbmFnZXI6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJMQlN1Ym5ldHNBV1M6IHRydWUKICBJbnNpZ2h0c0NvbmZpZzogZmFsc2UKICBJbnNpZ2h0c0NvbmZpZ0FQSTogZmFsc2UKICBJbnNpZ2h0c09uRGVtYW5kRGF0YUdhdGhlcjogZmFsc2UKICBJbnNpZ2h0c1J1bnRpbWVFeHRyYWN0b3I6IGZhbHNlCiAgSXJyZWNvbmNpbGFibGVNYWNoaW5lQ29uZmlnOiBmYWxzZQogIEtNU0VuY3J5cHRpb25Qcm92aWRlcjogZmFsc2UKICBLTVN2MTogdHJ1ZQogIE1hY2hpbmVBUElNaWdyYXRpb246IGZhbHNlCiAgTWFjaGluZUFQSU9wZXJhdG9yRGlzYWJsZU1hY2hpbmVIZWFsdGhDaGVja0NvbnRyb2xsZXI6IGZhbHNlCiAgTWFjaGluZUNvbmZpZ05vZGVzOiB0cnVlCiAgTWFuYWdlZEJvb3RJbWFnZXM6IHRydWUKICBNYW5hZ2VkQm9vdEltYWdlc0FXUzogdHJ1ZQogIE1hbmFnZWRCb290SW1hZ2VzQXp1cmU6IGZhbHNlCiAgTWFuYWdlZEJvb3RJbWFnZXN2U3BoZXJlOiBmYWxzZQogIE1heFVuYXZhaWxhYmxlU3RhdGVmdWxTZXQ6IGZhbHNlCiAgTWV0cmljc0NvbGxlY3Rpb25Qcm9maWxlczogdHJ1ZQogIE1pbmltdW1LdWJlbGV0VmVyc2lvbjogZmFsc2UKICBNaXhlZENQVXNBbGxvY2F0aW9uOiBmYWxzZQogIE11bHRpQXJjaEluc3RhbGxBenVyZTogZmFsc2UKICBNdWx0aURpc2tTZXR1cDogZmFsc2UKICBNdXRhdGluZ0FkbWlzc2lvblBvbGljeTogZmFsc2UKICBOZXR3b3JrRGlhZ25vc3RpY3NDb25maWc6IHRydWUKICBOZXR3b3JrTGl2ZU1pZ3JhdGlvbjogdHJ1ZQogIE5ldHdvcmtTZWdtZW50YXRpb246IHRydWUKICBOZXdPTE06IHRydWUKICBOZXdPTE1DYXRhbG9nZEFQSVYxTWV0YXM6IGZhbHNlCiAgTmV3T0xNT3duU2luZ2xlTmFtZXNwYWNlOiBmYWxzZQogIE5ld09MTVByZWZsaWdodFBlcm1pc3Npb25DaGVja3M6IGZhbHNlCiAgTmV3T0xNV2ViaG9va1Byb3ZpZGVyT3BlbnNoaWZ0U2VydmljZUNBOiBmYWxzZQogIE5vUmVnaXN0cnlDbHVzdGVyT3BlcmF0aW9uczogZmFsc2UKICBOb2RlU3dhcDogZmFsc2UKICBOdXRhbml4TXVsdGlTdWJuZXRzOiBmYWxzZQogIE9WTk9ic2VydmFiaWxpdHk6IGZhbHNlCiAgT3BlblNoaWZ0UG9kU2VjdXJpdHlBZG1pc3Npb246IHRydWUKICBQaW5uZWRJbWFnZXM6IHRydWUKICBQcmVjb25maWd1cmVkVUROQWRkcmVzc2VzOiBmYWxzZQogIFByb2NNb3VudFR5cGU6IHRydWUKICBSb3V0ZUFkdmVydGlzZW1lbnRzOiB0cnVlCiAgUm91dGVFeHRlcm5hbENlcnRpZmljYXRlOiB0cnVlCiAgU0VMaW51eE1vdW50OiBmYWxzZQogIFNlcnZpY2VBY2NvdW50VG9rZW5Ob2RlQmluZGluZzogdHJ1ZQogIFNldEVJUEZvck5MQkluZ3Jlc3NDb250cm9sbGVyOiB0cnVlCiAgU2hvcnRDZXJ0Um90YXRpb246IGZhbHNlCiAgU2lnbmF0dXJlU3RvcmVzOiBmYWxzZQogIFNpZ3N0b3JlSW1hZ2VWZXJpZmljYXRpb246IHRydWUKICBTaWdzdG9yZUltYWdlVmVyaWZpY2F0aW9uUEtJOiBmYWxzZQogIFN0b3JhZ2VQZXJmb3JtYW50U2VjdXJpdHlQb2xpY3k6IGZhbHNlCiAgVHJhbnNsYXRlU3RyZWFtQ2xvc2VXZWJzb2NrZXRSZXF1ZXN0czogZmFsc2UKICBVcGdyYWRlU3RhdHVzOiBmYWxzZQogIFVzZXJOYW1lc3BhY2VzUG9kU2VjdXJpdHlTdGFuZGFyZHM6IHRydWUKICBVc2VyTmFtZXNwYWNlc1N1cHBvcnQ6IHRydWUKICBWU3BoZXJlQ29uZmlndXJhYmxlTWF4QWxsb3dlZEJsb2NrVm9sdW1lc1Blck5vZGU6IGZhbHNlCiAgVlNwaGVyZUhvc3RWTUdyb3VwWm9uYWw6IGZhbHNlCiAgVlNwaGVyZU1peGVkTm9kZUVudjogZmFsc2UKICBWU3BoZXJlTXVsdGlEaXNrOiB0cnVlCiAgVlNwaGVyZU11bHRpTmV0d29ya3M6IHRydWUKICBWb2x1bWVBdHRyaWJ1dGVzQ2xhc3M6IGZhbHNlCiAgVm9sdW1lR3JvdXBTbmFwc2hvdDogZmFsc2UKZmlsZUNoZWNrRnJlcXVlbmN5OiAwcwpodHRwQ2hlY2tGcmVxdWVuY3k6IDBzCmltYWdlTWF4aW11bUdDQWdlOiAwcwppbWFnZU1pbmltdW1HQ0FnZTogMHMKa2luZDogS3ViZWxldENvbmZpZ3VyYXRpb24Ka3ViZUFQSUJ1cnN0OiAxMDAKa3ViZUFQSVFQUzogNTAKbG9nZ2luZzoKICBmbHVzaEZyZXF1ZW5jeTogMAogIG9wdGlvbnM6CiAgICBqc29uOgogICAgICBpbmZvQnVmZmVyU2l6ZTogIjAiCiAgICB0ZXh0OgogICAgICBpbmZvQnVmZmVyU2l6ZTogIjAiCiAgdmVyYm9zaXR5OiAwCm1heFBvZHM6IDI1MAptZW1vcnlTd2FwOiB7fQpub2RlU3RhdHVzUmVwb3J0RnJlcXVlbmN5OiA1bTBzCm5vZGVTdGF0dXNVcGRhdGVGcmVxdWVuY3k6IDEwcwpwb2RQaWRzTGltaXQ6IDQwOTYKcHJvdGVjdEtlcm5lbERlZmF1bHRzOiB0cnVlCnJvdGF0ZUNlcnRpZmljYXRlczogdHJ1ZQpydW50aW1lUmVxdWVzdFRpbWVvdXQ6IDBzCnNlcmlhbGl6ZUltYWdlUHVsbHM6IGZhbHNlCnNlcnZlclRMU0Jvb3RzdHJhcDogdHJ1ZQpzaHV0ZG93bkdyYWNlUGVyaW9kOiAwcwpzaHV0ZG93bkdyYWNlUGVyaW9kQ3JpdGljYWxQb2RzOiAwcwpzdGF0aWNQb2RQYXRoOiAvZXRjL2t1YmVybmV0ZXMvbWFuaWZlc3RzCnN0cmVhbWluZ0Nvbm5lY3Rpb25JZGxlVGltZW91dDogMHMKc3luY0ZyZXF1ZW5jeTogMHMKc3lzdGVtQ2dyb3VwczogL3N5c3RlbS5zbGljZQp0bHNDaXBoZXJTdWl0ZXM6Ci0gVExTX0FFU18xMjhfR0NNX1NIQTI1NgotIFRMU19BRVNfMjU2X0dDTV9TSEEzODQKLSBUTFNfQ0hBQ0hBMjBfUE9MWTEzMDVfU0hBMjU2Ci0gVExTX0VDREhFX0VDRFNBX1dJVEhfQUVTXzEyOF9HQ01fU0hBMjU2Ci0gVExTX0VDREhFX1JTQV9XSVRIX0FFU18xMjhfR0NNX1NIQTI1NgotIFRMU19FQ0RIRV9FQ0RTQV9XSVRIX0FFU18yNTZfR0NNX1NIQTM4NAotIFRMU19FQ0RIRV9SU0FfV0lUSF9BRVNfMjU2X0dDTV9TSEEzODQKLSBUTFNfRUNESEVfRUNEU0FfV0lUSF9DSEFDSEEyMF9QT0xZMTMwNV9TSEEyNTYKLSBUTFNfRUNESEVfUlNBX1dJVEhfQ0hBQ0hBMjBfUE9MWTEzMDVfU0hBMjU2CnRsc01pblZlcnNpb246IFZlcnNpb25UTFMxMgp2b2x1bWVTdGF0c0FnZ1BlcmlvZDogMHMK" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/kubelet.conf" + } + ] + } + }, + "fips": false, + "kernelArguments": null, + "kernelType": "", + "osImageURL": "" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "creationTimestamp": "2025-08-19T18:47:29Z", + "generation": 1, + "labels": { + "machineconfiguration.openshift.io/role": "master" + }, + "name": "99-installer-ignition-master", + "resourceVersion": "2340", + "uid": "57b0b996-c902-417f-8220-783e8bdf7510" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "security": { + "tls": { + "certificateAuthorities": [ + { + "source": "data:text/plain;charset=utf-8;base64,LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFRENDQWZpZ0F3SUJBZ0lJU2F1czJXZitBNVl3RFFZSktvWklodmNOQVFFTEJRQXdKakVTTUJBR0ExVUUKQ3hNSmIzQmxibk5vYVdaME1SQXdEZ1lEVlFRREV3ZHliMjkwTFdOaE1CNFhEVEkxTURneE9URTRNVEUxTkZvWApEVE0xTURneE56RTRNVEUxTkZvd0pqRVNNQkFHQTFVRUN4TUpiM0JsYm5Ob2FXWjBNUkF3RGdZRFZRUURFd2R5CmIyOTBMV05oTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUE5Qy9PSVRxLzZod1kKUnBNbWtjTDlRSjM2Y1BjbzhXcnBzSW5OYXZiWXNBZjRHZE1VNm9RUEs0Q1FHOTFpa3NFRjY1ZkZFZ25yWWtabApSN0JMNkdPcEh5U3o5aUd6SU1JRWVGTEEzS2p1QnR1VzdqUXlmbjNVUUpUQ0FJTjl4dC9RTG4xM3N0U1U1blVwCjMxbnEvYStYZUxTTWV5YmZxc3dDeEwwTVpZZmRyUUdaMmUxNWJucmpITHQ0M1BJallHSTIwamV4dUN1bHhFeW4KSjB4VktDT1BRVnQyejc1V2ZuS3NDbGpDckpUbXNKQUEzQkkrcWdsZUxZYWZqY3hhejdwTzBCTnJRUTVQV2FYMApQM1pCVU9jZXVGOEJRTkFGVTRJRTFFS0pzTlZRTW85VldOWG1NYUZKZEdrSXJZVjRwY1FSd0RRakRrKzZsUTV6CjFRSERZYmh0WlFJREFRQUJvMEl3UURBT0JnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIKL3pBZEJnTlZIUTRFRmdRVVdOUU11TUxGS1BwSEwrS1hMWnNEa2p0WXBDTXdEUVlKS29aSWh2Y05BUUVMQlFBRApnZ0VCQUprbWZRWW0rWERnZ0NCTS9IdFpOQnpNQ0U4c2U1ZENlV0xkS1c0T25LMVR0dUlsNytQSnpOUkEvL3AxCkIrblF0MjloSFpLK2tGYjlEL3JWSTFiUmtLa1l0QlFIeVVMRFc2RGxMVGp4b0cwdkJ0ZXRwRDBmTEIwbkJnenYKbGhyVkU1V3JpcEZGQnhncWMwK01vS3FMcWVHQmlWNHZxbDR5c1daUGI5VVg4aFBzeHdYNjJUS1g0VG9tMlJ1dQordVN6MkRQS2ZMLzVHRVhXUVR5dGFrTytRYUpKdEJtM280Q1EvRmVLa0ZybloraEcwTk9NNkxlQTVQVUVJQ3FLCmhjMmRSRytWc1A2WERoOGZlU2ZyWStkMVcrSlJTd2k0b2lrT2xNY2dpdjB5d1RVYnR1SSt1WE1mMnJQR1RLMzYKd1oxTGtXNElyb2VGZEJvM1MxUXBWb2VGRmlzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + } + ] + } + }, + "version": "3.2.0" + }, + "storage": { + "disks": [ + { + "device": "/dev/disk/by-id/coreos-boot-disk", + "partitions": [ + { + "label": "var", + "number": 5, + "sizeMiB": 0, + "startMiB": 50000 + } + ] + } + ], + "filesystems": [ + { + "device": "/dev/disk/by-partlabel/var", + "format": "xfs", + "mountOptions": [ + "defaults", + "prjquota" + ], + "path": "/var" + } + ] + }, + "systemd": { + "units": [ + { + "contents": "\n[Unit]\nRequires=systemd-fsck@dev-disk-by\\x2dpartlabel-var.service\nAfter=systemd-fsck@dev-disk-by\\x2dpartlabel-var.service\n\n[Mount]\nWhere=/var\nWhat=/dev/disk/by-partlabel/var\nType=xfs\nOptions=defaults,prjquota\n\n[Install]\nRequiredBy=local-fs.target\n", + "enabled": true, + "name": "var.mount" + } + ] + } + }, + "fips": false, + "kernelArguments": null, + "kernelType": "", + "osImageURL": "" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "03a3677b298e05ba870cdf0f26f4db16f80e64ae" + }, + "creationTimestamp": "2025-08-19T18:55:55Z", + "generation": 1, + "labels": { + "machineconfiguration.openshift.io/role": "master" + }, + "name": "99-master-generated-registries", + "ownerReferences": [ + { + "apiVersion": "config.openshift.io/v1", + "kind": "Image", + "name": "cluster", + "uid": "58a74fa6-f772-4026-9d1e-52f49603f992" + } + ], + "resourceVersion": "76346", + "uid": "9bcc045a-a61c-4e37-a508-7053c7ed81f1" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "version": "3.5.0" + } + }, + "fips": false, + "kernelArguments": null, + "kernelType": "", + "osImageURL": "" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "creationTimestamp": "2025-08-19T18:47:30Z", + "generation": 1, + "labels": { + "machineconfiguration.openshift.io/role": "master" + }, + "name": "99-master-ssh", + "resourceVersion": "2390", + "uid": "4dee86a5-c20d-458e-89d9-0dcbdb6513bd" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "version": "3.2.0" + }, + "passwd": { + "users": [ + { + "name": "core", + "sshAuthorizedKeys": [ + "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n" + ] + } + ] + } + }, + "fips": false, + "kernelArguments": null, + "kernelType": "", + "osImageURL": "" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "03a3677b298e05ba870cdf0f26f4db16f80e64ae" + }, + "creationTimestamp": "2025-08-19T18:55:56Z", + "generation": 1, + "labels": { + "machineconfiguration.openshift.io/role": "worker" + }, + "name": "99-worker-generated-registries", + "ownerReferences": [ + { + "apiVersion": "config.openshift.io/v1", + "kind": "Image", + "name": "cluster", + "uid": "58a74fa6-f772-4026-9d1e-52f49603f992" + } + ], + "resourceVersion": "76342", + "uid": "d9ab6541-c585-401f-bd95-b8a1120e31a8" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "version": "3.5.0" + } + }, + "fips": false, + "kernelArguments": null, + "kernelType": "", + "osImageURL": "" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "creationTimestamp": "2025-08-19T18:47:31Z", + "generation": 1, + "labels": { + "machineconfiguration.openshift.io/role": "worker" + }, + "name": "99-worker-ssh", + "resourceVersion": "2412", + "uid": "c28bcf7d-e42c-41ab-bcf7-67f623fdebc9" + }, + "spec": { + "baseOSExtensionsContainerImage": "", + "config": { + "ignition": { + "version": "3.2.0" + }, + "passwd": { + "users": [ + { + "name": "core", + "sshAuthorizedKeys": [ + "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n" + ] + } + ] + } + }, + "fips": false, + "kernelArguments": null, + "kernelType": "", + "osImageURL": "" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "03a3677b298e05ba870cdf0f26f4db16f80e64ae", + "machineconfiguration.openshift.io/release-image-version": "4.20.0-0.nightly-2025-08-19-180353" + }, + "creationTimestamp": "2025-08-19T20:57:30Z", + "generation": 1, + "name": "rendered-master-505a1e08a37430cbe1ee421928f810ec", + "ownerReferences": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "MachineConfigPool", + "name": "master", + "uid": "8bcdea4d-5638-4bd5-9ff2-b0d669977136" + } + ], + "resourceVersion": "76442", + "uid": "f73af002-dd10-425e-8c95-a5001dd25c28" + }, + "spec": { + "baseOSExtensionsContainerImage": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0", + "config": { + "ignition": { + "security": { + "tls": { + "certificateAuthorities": [ + { + "source": "data:text/plain;charset=utf-8;base64,LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFRENDQWZpZ0F3SUJBZ0lJU2F1czJXZitBNVl3RFFZSktvWklodmNOQVFFTEJRQXdKakVTTUJBR0ExVUUKQ3hNSmIzQmxibk5vYVdaME1SQXdEZ1lEVlFRREV3ZHliMjkwTFdOaE1CNFhEVEkxTURneE9URTRNVEUxTkZvWApEVE0xTURneE56RTRNVEUxTkZvd0pqRVNNQkFHQTFVRUN4TUpiM0JsYm5Ob2FXWjBNUkF3RGdZRFZRUURFd2R5CmIyOTBMV05oTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUE5Qy9PSVRxLzZod1kKUnBNbWtjTDlRSjM2Y1BjbzhXcnBzSW5OYXZiWXNBZjRHZE1VNm9RUEs0Q1FHOTFpa3NFRjY1ZkZFZ25yWWtabApSN0JMNkdPcEh5U3o5aUd6SU1JRWVGTEEzS2p1QnR1VzdqUXlmbjNVUUpUQ0FJTjl4dC9RTG4xM3N0U1U1blVwCjMxbnEvYStYZUxTTWV5YmZxc3dDeEwwTVpZZmRyUUdaMmUxNWJucmpITHQ0M1BJallHSTIwamV4dUN1bHhFeW4KSjB4VktDT1BRVnQyejc1V2ZuS3NDbGpDckpUbXNKQUEzQkkrcWdsZUxZYWZqY3hhejdwTzBCTnJRUTVQV2FYMApQM1pCVU9jZXVGOEJRTkFGVTRJRTFFS0pzTlZRTW85VldOWG1NYUZKZEdrSXJZVjRwY1FSd0RRakRrKzZsUTV6CjFRSERZYmh0WlFJREFRQUJvMEl3UURBT0JnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIKL3pBZEJnTlZIUTRFRmdRVVdOUU11TUxGS1BwSEwrS1hMWnNEa2p0WXBDTXdEUVlKS29aSWh2Y05BUUVMQlFBRApnZ0VCQUprbWZRWW0rWERnZ0NCTS9IdFpOQnpNQ0U4c2U1ZENlV0xkS1c0T25LMVR0dUlsNytQSnpOUkEvL3AxCkIrblF0MjloSFpLK2tGYjlEL3JWSTFiUmtLa1l0QlFIeVVMRFc2RGxMVGp4b0cwdkJ0ZXRwRDBmTEIwbkJnenYKbGhyVkU1V3JpcEZGQnhncWMwK01vS3FMcWVHQmlWNHZxbDR5c1daUGI5VVg4aFBzeHdYNjJUS1g0VG9tMlJ1dQordVN6MkRQS2ZMLzVHRVhXUVR5dGFrTytRYUpKdEJtM280Q1EvRmVLa0ZybloraEcwTk9NNkxlQTVQVUVJQ3FLCmhjMmRSRytWc1A2WERoOGZlU2ZyWStkMVcrSlJTd2k0b2lrT2xNY2dpdjB5d1RVYnR1SSt1WE1mMnJQR1RLMzYKd1oxTGtXNElyb2VGZEJvM1MxUXBWb2VGRmlzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + } + ] + } + }, + "version": "3.5.0" + }, + "passwd": { + "users": [ + { + "name": "core", + "sshAuthorizedKeys": [ + "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n" + ] + } + ] + }, + "storage": { + "disks": [ + { + "device": "/dev/disk/by-id/coreos-boot-disk", + "partitions": [ + { + "label": "var", + "number": 5, + "sizeMiB": 0, + "startMiB": 50000 + } + ] + } + ], + "files": [ + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-ex%20-o%20pipefail%0A%0ANM_DEVICES_DIR%3D%2Frun%2FNetworkManager%2Fdevices%0ANM_RUN_CONN_DIR%3D%2Frun%2FNetworkManager%2Fsystem-connections%0ANM_ETC_CONN_DIR%3D%2Fetc%2FNetworkManager%2Fsystem-connections%0A%0Alogger%20-t%20nm-clean-initrd-state%20%22Cleaning%20network%20activation%20state%20generated%20by%20dracut...%22%0Alogger%20-t%20nm-clean-initrd-state%20%22To%20disable%2C%20remove%20%2Fvar%2Flib%2Fmco%2Fnm-clean-initrd-state%22%0A%0Aif%20%5B%20!%20-e%20%22%24NM_DEVICES_DIR%22%20%5D%20%7C%7C%20%5B%20!%20-e%20%22%24NM_RUN_CONN_DIR%22%20%5D%20%7C%7C%20%5B%20!%20-e%20%22%24NM_ETC_CONN_DIR%22%20%5D%3B%20then%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22There%20is%20no%20network%20activation%20state%20to%20clean%22%0A%20%20exit%0Afi%0A%0A%23%20Some%20deployments%20require%20an%20active%20network%20early%20in%20the%20boot%20process.%20To%0A%23%20achieve%20this%2C%20dracut%20runs%20specific%20NetworkManager%20modules.%20This%20results%20in%0A%23%20NetworkManager%20keyfiles%20being%20generated%20(either%20default%20or%20from%20ip%20kernel%0A%23%20arguments)%20and%20activated.%20This%20activation%20generates%20state%20that%20makes%20those%0A%23%20profiles%20to%20be%20re-activated%20by%20the%20NetworkManager%20service%20later%20in%20the%0A%23%20boot%20process.%20And%20this%20has%20the%20effect%20that%20other%20profiles%20deployed%20by%20the%0A%23%20user%20for%20the%20same%20set%20of%20devices%20are%20ignored.%0A%0A%23%20Most%20of%20the%20time%20this%20is%20the%20desired%20behavior.%20The%20exception%20to%20this%20rule%0A%23%20is%20when%20the%20user%20wants%20to%20run%20the%20boot%20process%20with%20a%20different%20network%0A%23%20setup%20than%20the%20real%20root%20which%20is%20usually%20related%20to%20the%20fact%20that%0A%23%20generating%20images%20with%20customized%20kernel%20arguments%20is%20a%20complication%20in%0A%23%20the%20deployment%20pipeline.%0A%0A%23%20This%20need%20has%20been%20exacerbated%20by%20past%20NetworkManager%20bugs%20that%20activated%0A%23%20the%20network%20on%20boot%20when%20it%20was%20not%20really%20needed.%20Most%20notably%20when%20ip%0A%23%20kernel%20argument%20is%20present%2C%20something%20that%20the%20baremetal%20installer%20adds%20by%0A%23%20default.%0A%0A%23%20The%20intention%20here%20is%20to%20remove%20the%20state%20that%20was%20generated%20with%20the%0A%23%20activation%20of%20those%20profiles%20during%20dracut%20execution.%20Then%20when%0A%23%20NetworkManager%20service%20runs%2C%20the%20profiles%20generated%20by%20dracut%2C%20along%20with%0A%23%20other%20profiles%20configured%20by%20the%20user%2C%20are%20evaluated%20towards%20finding%20the%0A%23%20most%20appropriate%20profile%20to%20connect%20a%20device%20with.%20As%20a%20precaution%2C%20clean%0A%23%20state%20only%20for%20devices%20that%3A%0A%23%20-%20have%20been%20activated%20with%20a%20default%20profile%20(assume%20that%20a%20non-default%0A%23%20%20%20configuration%20expresses%20intention%20by%20user%20to%20run%20with%20it%20permanently)%0A%23%20-%20have%20a%20specific%20configured%20profile%20set%20to%20auto-connect%20(if%20there%20is%20no%0A%23%20%20%20alternate%20configured%20profile%20for%20a%20device%20it%20makes%20no%20sense%20to%0A%23%20%20%20de-activate%20anything)%0A%23%0A%23%20Although%20this%20can%20theoretically%20happen%20on%20any%20deployment%20type%2C%20need%20has%0A%23%20mostly%20come%20from%20IPI%20bare%20metal%20deployments.%20For%20the%20time%20being%2C%20this%0A%23%20should%20be%20opt-in%20in%20any%20other%20deploment%20type.%0A%23%0A%23%20There%20is%20an%20RFE%20filed%20against%20NM%20that%20once%20implemented%20would%20make%20this%0A%23%20script%20unnecessary%3A%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D2089707%0A%0Afor%20device%20in%20%22%24%7BNM_DEVICES_DIR%7D%22%2F*%3B%20do%0A%20%20if%20%5B%20!%20-e%20%22%24device%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20the%20device%20file%20name%20is%20the%20ifindex%0A%20%20ifindex%3D%24(basename%20%22%24device%22)%0A%20%20%0A%20%20%23%20get%20the%20interface%20name%20by%20ifindex%0A%20%20ifname%3D%24(ip%20-j%20link%20show%20%7C%20jq%20-r%20%22.%5B%5D%20%7C%20select(.ifindex%20%3D%3D%20%24%7Bifindex%7D)%20%7C%20.ifname%20%2F%2F%20empty%22)%0A%0A%20%20%23%20no%20interface%20name%20found%2C%20ignore%0A%20%20if%20%5B%20-z%20%22%24ifname%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20get%20the%20uuid%20of%20the%20profile%20the%20device%20has%20been%20activated%20with%0A%20%20active_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bdevice%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Econnection-uuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24device%22)%0A%0A%20%20%23%20the%20device%20was%20not%20activated%20with%20any%20profile%2C%20ignore%0A%20%20if%20%5B%20-z%20%22%24active_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20find%20the%20generated%20profile%20by%20uuid%0A%20%20for%20profile%20in%20%22%24%7BNM_RUN_CONN_DIR%7D%22%2F*%3B%20do%0A%20%20%20%20generated_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Euuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20if%20%5B%20%22%24active_profile_uuid%22%20%3D%20%22%24generated_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20generated%20profile%20not%20found%2C%20ignore%0A%20%20if%20%5B%20%22%24active_profile_uuid%22%20!%3D%20%22%24generated_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20check%20that%20it%20is%20not%20specific%20for%20the%20device%2C%20otherwise%20ignore%0A%20%20profile_ifname%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Einterface-name%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20if%20%5B%20-n%20%22%24profile_ifname%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20profile%20not%20generated%20by%20nm-initrd-generator%2C%20ignore%0A%20%20%23%20only%20check%20it%20if%20the%20key%20is%20set%20(from%20NM%201.32.4)%0A%20%20origin%3D%24(sed%20-nr%20'%2F%5E%5C%5Buser%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Eorg.freedesktop.NetworkManager.origin%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20if%20%5B%20-n%20%22%24origin%22%20%5D%20%26%26%20%5B%20%22%24origin%22%20!%3D%20%22nm-initrd-generator%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20find%20the%20configured%20profile%20by%20name%20with%20auto-connect%20set%0A%20%20for%20profile%20in%20%22%24%7BNM_ETC_CONN_DIR%7D%22%2F*%3B%20do%0A%20%20%20%20profile_ifname%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Einterface-name%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20autoconnect%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Eautoconnect%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20if%20%5B%20%22%24profile_ifname%22%20%3D%20%22%24ifname%22%20%5D%20%26%26%20%5B%20%22%24autoconnect%22%20!%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20configured%20profile%20not%20found%2C%20ignore%0A%20%20if%20%5B%20%22%24profile_ifname%22%20!%3D%20%22%24ifname%22%20%5D%20%7C%7C%20%5B%20%22%24autoconnect%22%20%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20configured_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Euuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%20activated%20with%20default%20generated%20profile%20%24generated_profile_uuid%22%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%20has%20different%20configured%20specific%20profile%20%24configured_profile_uuid%22%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%3A%20removing%20state...%22%0A%20%20%0A%20%20%23%20NM%20can%20still%20generate%20internal%20profiles%20from%20the%20IP%20address%0A%20%20%23%20configuration%20of%20devices%2C%20so%20flush%20addresses%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22Flushing%20IP%20addresses%20from%20%24ifname%22%0A%20%20ip%20addr%20flush%20%22%24ifname%22%0A%20%20ip%20-6%20addr%20flush%20%22%24ifname%22%0A%0A%20%20%23%20remove%20device%20state%20file%20to%20prevent%20NM%20to%20unilaterally%20connect%20with%20the%0A%20%20%23%20latest%20activated%20profile%20without%20evaluating%20other%20profiles%20%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22Removing%20%24device%22%0A%20%20rm%20-f%20--%20%22%24device%22%0A%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/nm-clean-initrd-state.sh" + }, + { + "contents": { + "source": "data:,%5Bconnection%5D%0Aipv6.dhcp-duid%3Dll%0Aipv6.dhcp-iaid%3Dmac%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/01-ipv6.conf" + }, + { + "contents": { + "source": "data:,%5Bmain%5D%0Aplugins%3Dkeyfile%2Cifcfg-rh%0A%5Bkeyfile%5D%0Apath%3D%2Fetc%2FNetworkManager%2Fsystem-connections%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/20-keyfiles.conf" + }, + { + "contents": { + "source": "data:," + }, + "mode": 384, + "overwrite": true, + "path": "/etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt" + }, + { + "contents": { + "source": "data:,KUBERNETES_SERVICE_HOST%3D'api-int.ci-op-pw3ghqzh-bb5c4.ci2.azure.devcluster.openshift.com'%0AKUBERNETES_SERVICE_PORT%3D'6443'%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/apiserver-url.env" + }, + { + "contents": { + "source": "data:,%23%20This%20file%20is%20managed%20by%20machine-config-operator.%0A%23%20Suppress%20audit%20rules%20which%20always%20trigger%20for%20container%0A%23%20workloads%2C%20as%20they%20spam%20the%20audit%20log.%20%20Workloads%20are%20expected%0A%23%20to%20be%20dynamic%2C%20and%20the%20networking%20stack%20uses%20iptables.%0A-a%20exclude%2Calways%20-F%20msgtype%3DNETFILTER_CFG%0A%23%20The%20default%20bridged%20networking%20enables%20promiscuous%20on%20the%20veth%0A%23%20device.%20%20Ideally%2C%20we'd%20teach%20audit%20to%20ignore%20only%20veth%20devices%2C%0A%23%20since%20one%20might%20legitimately%20care%20about%20promiscuous%20on%20real%20physical%0A%23%20devices.%20%20But%20we%20can't%20currently%20differentiate.%0A-a%20exclude%2Calways%20-F%20msgtype%3DANOM_PROMISCUOUS%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/audit/rules.d/mco-audit-quiet-containers.rules" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20ESP%20offload%2C%20either%20in%20actual%20hardware%20or%20as%20part%20as%20GRO%20(generic%0A%23%20recieve%20offload)%20does%20not%20work%20for%20interfaces%20attached%20to%20an%20OVS%20bridge%0A%23%20so%20turn%20it%20off%20for%20the%20time%20being.%0A%23%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FRHEL-58811%0A%0A%23%20Depends%20on%20ipsec%20service%20drop-in%20to%20start%20it%20after%20configure-ovs%20to%20make%0A%23%20sure%20offloads%20are%20disabled%20before%20ipsec%20starts.%0A%0Aif%20%5B%5B%20%22%242%22%20!%3D%20%22up%22%20%5D%5D%3B%20then%0A%20%20exit%0Afi%0A%0Adevice%3D%24DEVICE_IFACE%0Akind_slave%3D%24(ip%20-j%20-d%20link%20show%20%22%24device%22%20%7C%20jq%20-r%20'.%5B0%5D%20%7C%20.linkinfo.info_slave_kind%20%2F%2F%20empty')%0A%0Aif%20%5B%20%22%24kind_slave%22%20%3D%20%22openvswitch%22%20%5D%3B%20then%0A%20%20for%20feature%20in%20tx-esp-segmentation%20esp-hw-offload%20esp-tx-csum-hw-offload%3B%20do%0A%20%20%20%20if%20ethtool%20-k%20%22%24device%22%20%7C%20grep%20-qE%20%22%5E%24%7Bfeature%7D%3A%20off%22%3B%20then%0A%20%20%20%20%20%20%23%20already%20disabled%2C%20nothing%20to%20do%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%20%20%20%20%0A%20%20%20%20logger%20-t%2099-esp-offload%20-s%20%22Setting%20%24feature%20off%20for%20%24device%3A%20unsupported%20when%20attached%20to%20Open%20vSwitch%20bridge%22%0A%20%20%20%20ethtool%20-K%20%22%24device%22%20%22%24feature%22%20off%0A%20%20done%0Afi%0A" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-esp-offload" + }, + { + "contents": { + "source": "data:,r%20%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F80-openshift-network.conf%0Ar%20%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F10-ovn-kubernetes.conf%0Ad%20%2Frun%2Fmultus%2Fcni%2Fnet.d%2F%200755%20root%20root%20-%20-%0AD%20%2Fvar%2Flib%2Fcni%2Fnetworks%2Fopenshift-sdn%2F%200755%20root%20root%20-%20-%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/tmpfiles.d/cleanup-cni.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-x%0A%0Aif%20%5B%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20echo%20%22Skipping%20configure-ovs%20due%20to%20manual%20network%20configuration%22%0A%20%20exit%200%0Afi%0A%0A%23%20This%20file%20is%20not%20needed%20anymore%20in%204.7%2B%2C%20but%20when%20rolling%20back%20to%204.6%0A%23%20the%20ovs%20pod%20needs%20it%20to%20know%20ovs%20is%20running%20on%20the%20host.%0Atouch%20%2Fvar%2Frun%2Fovs-config-executed%0A%0A%23%20always%20use%20--escape%20no%20to%20prevent%20'%3A'%20mangling.%20%20nmcli%20will%20escape%20all%20colons%20as%20%5C%3A%2C%20this%20breaks%20input%0ANMCLI_GET_VALUE%3D%22nmcli%20--escape%20no%20--get-values%22%0A%23%20These%20are%20well%20knwon%20NM%20default%20paths%0ANM_CONN_ETC_PATH%3D%22%2Fetc%2FNetworkManager%2Fsystem-connections%22%0ANM_CONN_RUN_PATH%3D%22%2Frun%2FNetworkManager%2Fsystem-connections%22%0A%0A%23%20This%20is%20the%20path%20where%20NM%20is%20known%20to%20be%20configured%20to%20store%20user%20keyfiles%20%0ANM_CONN_CONF_PATH%3D%22%24NM_CONN_ETC_PATH%22%0A%0A%23%20This%20is%20where%20we%20want%20our%20keyfiles%20to%20finally%20reside.%20configure-ovs%0A%23%20operates%20with%20temporary%20keyfiles%20in%20NM_CONN_RUN_PATH%20and%20then%20as%20a%20last%0A%23%20step%20moves%20those%20keyfiles%20to%20NM_CONN_SET_PATH%20if%20it%20is%20a%20different%20path%0A%23%20(not%20by%20default).%20This%20mitigates%20hard%20interruptions%20(SIGKILL%2C%20hard%20reboot)%0A%23%20of%20configure-ovs%20leaving%20the%20machine%20with%20a%20half-baked%20set%20of%20keyfiles%0A%23%20that%20might%20prevent%20machine%20networking%20from%20working%20correctly.%0ANM_CONN_SET_PATH%3D%22%24%7BNM_CONN_SET_PATH%3A-%24NM_CONN_RUN_PATH%7D%22%0A%0AMANAGED_NM_CONN_SUFFIX%3D%22-slave-ovs-clone%22%0A%23%20Workaround%20to%20ensure%20OVS%20is%20installed%20due%20to%20bug%20in%20systemd%20Requires%3A%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1888017%0Acopy_nm_conn_files()%20%7B%0A%20%20local%20dst_path%3D%22%241%22%0A%20%20for%20src%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20src_path%3D%24(dirname%20%22%24src%22)%0A%20%20%20%20file%3D%24(basename%20%22%24src%22)%0A%20%20%20%20if%20%5B%20-f%20%22%24src_path%2F%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20if%20%5B%20!%20-f%20%22%24dst_path%2F%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Copying%20configuration%20%24file%22%0A%20%20%20%20%20%20%20%20cp%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%0A%20%20%20%20%20%20elif%20!%20cmp%20--silent%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Copying%20updated%20configuration%20%24file%22%0A%20%20%20%20%20%20%20%20cp%20-f%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%0A%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20echo%20%22Skipping%20%24file%20since%20it's%20equal%20at%20destination%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20else%0A%20%20%20%20%20%20echo%20%22Skipping%20%24file%20since%20it%20does%20not%20exist%20at%20source%22%0A%20%20%20%20fi%0A%20%20done%0A%7D%0A%0Aupdate_nm_conn_files_base()%20%7B%0A%20%20base_path%3D%24%7B1%7D%0A%20%20bridge_name%3D%24%7B2%7D%0A%20%20port_name%3D%24%7B3%7D%0A%20%20ovs_port%3D%22ovs-port-%24%7Bbridge_name%7D%22%0A%20%20ovs_interface%3D%22ovs-if-%24%7Bbridge_name%7D%22%0A%20%20default_port_name%3D%22ovs-port-%24%7Bport_name%7D%22%20%23%20ovs-port-phys0%0A%20%20bridge_interface_name%3D%22ovs-if-%24%7Bport_name%7D%22%20%23%20ovs-if-phys0%0A%20%20%23%20In%20RHEL7%20files%20in%20%2F%7Betc%2Crun%7D%2FNetworkManager%2Fsystem-connections%20end%20without%20the%20suffix%20'.nmconnection'%2C%20whereas%20in%20RHCOS%20they%20end%20with%20the%20suffix.%0A%20%20MANAGED_NM_CONN_FILES%3D(%24(echo%20%22%24%7Bbase_path%7D%22%2F%7B%22%24bridge_name%22%2C%22%24ovs_interface%22%2C%22%24ovs_port%22%2C%22%24bridge_interface_name%22%2C%22%24default_port_name%22%7D%7B%2C.nmconnection%7D))%0A%20%20shopt%20-s%20nullglob%0A%20%20MANAGED_NM_CONN_FILES%2B%3D(%24%7Bbase_path%7D%2F*%24%7BMANAGED_NM_CONN_SUFFIX%7D.nmconnection%20%24%7Bbase_path%7D%2F*%24%7BMANAGED_NM_CONN_SUFFIX%7D)%0A%20%20shopt%20-u%20nullglob%0A%7D%0A%0Aupdate_nm_conn_run_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_RUN_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0Aupdate_nm_conn_set_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_SET_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0Aupdate_nm_conn_etc_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_ETC_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0A%23%20Move%20and%20reload%20keyfiles%20at%20their%20final%20destination%0Aset_nm_conn_files()%20%7B%0A%20%20if%20%5B%20%22%24NM_CONN_RUN_PATH%22%20!%3D%20%22%24NM_CONN_SET_PATH%22%20%5D%3B%20then%0A%20%20%20%20update_nm_conn_run_files%20br-ex%20phys0%0A%20%20%20%20copy_nm_conn_files%20%22%24NM_CONN_SET_PATH%22%0A%20%20%20%20rm_nm_conn_files%0A%20%20%20%20update_nm_conn_run_files%20br-ex1%20phys1%0A%20%20%20%20copy_nm_conn_files%20%22%24NM_CONN_SET_PATH%22%0A%20%20%20%20rm_nm_conn_files%0A%0A%20%20%20%20%23%20reload%20keyfiles%0A%20%20%20%20nmcli%20connection%20reload%0A%20%20fi%0A%7D%0A%0A%23%20Used%20to%20remove%20files%20managed%20by%20configure-ovs%20and%20temporary%20leftover%20files%20from%20network%20manager%0Arm_nm_conn_files()%20%7B%0A%20%20for%20file%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20if%20%5B%20-f%20%22%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20rm%20-f%20%22%24file%22%0A%20%20%20%20%20%20echo%20%22Removed%20nmconnection%20file%20%24file%22%0A%20%20%20%20%20%20nm_config_changed%3D1%0A%20%20%20%20fi%0A%20%20done%0A%20%20for%20file%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20for%20temp%20in%20%24(compgen%20-G%20%22%24%7Bfile%7D.*%22)%3B%20do%0A%20%20%20%20%20%20rm%20-f%20%22%24temp%22%0A%20%20%20%20%20%20echo%20%22Removed%20nmconnection%20temporary%20file%20%24temp%22%0A%20%20%20%20%20%20nm_config_changed%3D1%0A%20%20%20%20done%0A%20%20done%0A%7D%0A%0A%23%20Used%20to%20clone%20a%20slave%20connection%20by%20uuid%2C%20returns%20new%20name%0Aclone_slave_connection()%20%7B%0A%20%20local%20uuid%3D%22%241%22%0A%20%20local%20old_name%0A%20%20old_name%3D%22%24(%24NMCLI_GET_VALUE%20connection.id%20connection%20show%20uuid%20%22%24uuid%22)%22%0A%20%20local%20new_name%3D%22%24%7Bold_name%7D%24%7BMANAGED_NM_CONN_SUFFIX%7D%22%0A%20%20if%20nmcli%20connection%20show%20id%20%22%24%7Bnew_name%7D%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20existing%20ovs%20slave%20%24%7Bnew_name%7D%20connection%20profile%20file%20found%2C%20overwriting...%22%20%3E%262%0A%20%20%20%20nmcli%20connection%20delete%20id%20%22%24%7Bnew_name%7D%22%20%26%3E%20%2Fdev%2Fnull%0A%20%20fi%0A%20%20clone_nm_conn%20%24uuid%20%22%24%7Bnew_name%7D%22%0A%20%20echo%20%22%24%7Bnew_name%7D%22%0A%7D%0A%0A%23%20Used%20to%20replace%20an%20old%20master%20connection%20uuid%20with%20a%20new%20one%20on%20all%20connections%0Areplace_connection_master()%20%7B%0A%20%20local%20old%3D%22%241%22%0A%20%20local%20new%3D%22%242%22%0A%20%20for%20conn_uuid%20in%20%24(%24NMCLI_GET_VALUE%20UUID%20connection%20show)%20%3B%20do%0A%20%20%20%20if%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.master%20connection%20show%20uuid%20%22%24conn_uuid%22)%22%20!%3D%20%22%24old%22%20%5D%3B%20then%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20local%20active_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20connection%20show%20%22%24conn_uuid%22)%0A%20%20%20%20local%20autoconnect%3D%24(%24NMCLI_GET_VALUE%20connection.autoconnect%20connection%20show%20%22%24conn_uuid%22)%0A%20%20%20%20if%20%5B%20%22%24active_state%22%20!%3D%20%22activated%22%20%5D%20%26%26%20%5B%20%22%24autoconnect%22%20!%3D%20%22yes%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20Assume%20that%20slave%20profiles%20intended%20to%20be%20used%20are%20those%20that%20are%3A%0A%20%20%20%20%20%20%23%20-%20active%0A%20%20%20%20%20%20%23%20-%20or%20inactive%20(which%20might%20be%20due%20to%20link%20being%20down)%20but%20to%20be%20autoconnected.%0A%20%20%20%20%20%20%23%20Otherwise%2C%20ignore%20them.%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20make%20changes%20for%20slave%20profiles%20in%20a%20new%20clone%0A%20%20%20%20local%20new_name%0A%20%20%20%20new_name%3D%24(clone_slave_connection%20%24conn_uuid)%0A%0A%20%20%20%20mod_nm_conn%20%22%24new_name%22%20connection.master%20%22%24new%22%20connection.autoconnect-priority%20100%20connection.autoconnect%20no%20%0A%20%20%20%20echo%20%22Replaced%20master%20%24old%20with%20%24new%20for%20slave%20profile%20%24new_name%22%0A%20%20done%0A%7D%0A%0A%23%20when%20creating%20the%20bridge%2C%20we%20use%20a%20value%20lower%20than%20NM's%20ethernet%20device%20default%20route%20metric%0A%23%20(we%20pick%2048%20and%2049%20to%20be%20lower%20than%20anything%20that%20NM%20chooses%20by%20default)%0ABRIDGE_METRIC%3D%2248%22%0ABRIDGE1_METRIC%3D%2249%22%0A%23%20Given%20an%20interface%2C%20generates%20NM%20configuration%20to%20add%20to%20an%20OVS%20bridge%0Aconvert_to_bridge()%20%7B%0A%20%20local%20iface%3D%24%7B1%7D%0A%20%20local%20bridge_name%3D%24%7B2%7D%0A%20%20local%20port_name%3D%24%7B3%7D%0A%20%20local%20bridge_metric%3D%24%7B4%7D%0A%20%20local%20ovs_port%3D%22ovs-port-%24%7Bbridge_name%7D%22%0A%20%20local%20ovs_interface%3D%22ovs-if-%24%7Bbridge_name%7D%22%0A%20%20local%20default_port_name%3D%22ovs-port-%24%7Bport_name%7D%22%20%23%20ovs-port-phys0%0A%20%20local%20bridge_interface_name%3D%22ovs-if-%24%7Bport_name%7D%22%20%23%20ovs-if-phys0%0A%0A%20%20if%20%5B%20%22%24iface%22%20%3D%20%22%24bridge_name%22%20%5D%3B%20then%0A%20%20%20%20%23%20handle%20vlans%20and%20bonds%20etc%20if%20they%20have%20already%20been%0A%20%20%20%20%23%20configured%20via%20nm%20key%20files%20and%20br-ex%20is%20already%20up%0A%20%20%20%20ifaces%3D%24(ovs-vsctl%20list-ifaces%20%24%7Biface%7D)%0A%20%20%20%20for%20intf%20in%20%24ifaces%3B%20do%20configure_driver_options%20%24intf%3B%20done%0A%20%20%20%20echo%20%22Networking%20already%20configured%20and%20up%20for%20%24%7Bbridge-name%7D!%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20flag%20to%20reload%20NM%20to%20account%20for%20all%20the%20configuration%20changes%0A%20%20%23%20going%20forward%0A%20%20nm_config_changed%3D1%0A%0A%20%20if%20%5B%20-z%20%22%24iface%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22ERROR%3A%20Unable%20to%20find%20default%20gateway%20interface%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%20%20%23%20find%20the%20MAC%20from%20OVS%20config%20or%20the%20default%20interface%20to%20use%20for%20OVS%20internal%20port%0A%20%20%23%20this%20prevents%20us%20from%20getting%20a%20different%20DHCP%20lease%20and%20dropping%20connection%0A%20%20if%20!%20iface_mac%3D%24(%3C%22%2Fsys%2Fclass%2Fnet%2F%24%7Biface%7D%2Faddress%22)%3B%20then%0A%20%20%20%20echo%20%22Unable%20to%20determine%20default%20interface%20MAC%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20echo%20%22MAC%20address%20found%20for%20iface%3A%20%24%7Biface%7D%3A%20%24%7Biface_mac%7D%22%0A%0A%20%20%23%20find%20MTU%20from%20original%20iface%0A%20%20iface_mtu%3D%24(ip%20link%20show%20%22%24iface%22%20%7C%20awk%20'%7Bprint%20%245%3B%20exit%7D')%0A%20%20if%20%5B%5B%20-z%20%22%24iface_mtu%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Unable%20to%20determine%20default%20interface%20MTU%2C%20defaulting%20to%201500%22%0A%20%20%20%20iface_mtu%3D1500%0A%20%20else%0A%20%20%20%20echo%20%22MTU%20found%20for%20iface%3A%20%24%7Biface%7D%3A%20%24%7Biface_mtu%7D%22%0A%20%20fi%0A%0A%20%20%23%20store%20old%20conn%20for%20later%0A%20%20old_conn%3D%24(nmcli%20--fields%20UUID%2CDEVICE%20conn%20show%20--active%20%7C%20awk%20%22%2F%5Cs%24%7Biface%7D%5Cs*%5C%24%2F%20%7Bprint%20%5C%241%7D%22)%0A%0A%20%20if%20%5B%5B%20-z%20%22%24old_conn%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22ERROR%3A%20cannot%20find%20connection%20for%20interface%3A%20%24%7Biface%7D%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20create%20bridge%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24bridge_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20%22%24bridge_name%22%0A%20%20%20%20add_nm_conn%20%22%24bridge_name%22%20type%20ovs-bridge%20conn.interface%20%22%24bridge_name%22%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20%5C%0A%20%20%20%20connection.autoconnect-slaves%201%0A%20%20fi%0A%0A%20%20%23%20find%20default%20port%20to%20add%20to%20bridge%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24default_port_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-port%20%22%24bridge_name%22%20%24%7Biface%7D%0A%20%20%20%20add_nm_conn%20%22%24default_port_name%22%20type%20ovs-port%20conn.interface%20%24%7Biface%7D%20slave-type%20ovs-bridge%20master%20%22%24bridge_name%22%20%5C%0A%20%20%20%20connection.autoconnect-slaves%201%0A%20%20fi%0A%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24ovs_port%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-port%20%22%24bridge_name%22%20%22%24bridge_name%22%0A%20%20%20%20add_nm_conn%20%22%24ovs_port%22%20type%20ovs-port%20conn.interface%20%22%24bridge_name%22%20slave-type%20ovs-bridge%20master%20%22%24bridge_name%22%0A%20%20fi%0A%0A%20%20extra_phys_args%3D()%0A%20%20%23%20check%20if%20this%20interface%20is%20a%20vlan%2C%20bond%2C%20team%2C%20or%20ethernet%20type%0A%20%20if%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22vlan%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dvlan%0A%20%20%20%20vlan_id%3D%24(%24NMCLI_GET_VALUE%20vlan.id%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-z%20%22%24vlan_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20unable%20to%20determine%20vlan_id%20for%20vlan%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%20%20%20%20vlan_parent%3D%24(%24NMCLI_GET_VALUE%20vlan.parent%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-z%20%22%24vlan_parent%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20unable%20to%20determine%20vlan_parent%20for%20vlan%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%0A%20%20%20%20if%20nmcli%20connection%20show%20uuid%20%22%24vlan_parent%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20%20%20%23%20if%20the%20VLAN%20connection%20is%20configured%20with%20a%20connection%20UUID%20as%20parent%2C%20we%20need%20to%20find%20the%20underlying%20device%0A%20%20%20%20%20%20%23%20and%20create%20the%20bridge%20against%20it%2C%20as%20the%20parent%20connection%20can%20be%20replaced%20by%20another%20bridge.%0A%20%20%20%20%20%20vlan_parent%3D%24(%24NMCLI_GET_VALUE%20GENERAL.DEVICES%20conn%20show%20uuid%20%24%7Bvlan_parent%7D)%0A%20%20%20%20fi%0A%0A%20%20%20%20extra_phys_args%3D(%20dev%20%22%24%7Bvlan_parent%7D%22%20id%20%22%24%7Bvlan_id%7D%22%20)%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dbond%0A%20%20%20%20%23%20check%20bond%20options%0A%20%20%20%20bond_opts%3D%24(%24NMCLI_GET_VALUE%20bond.options%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-n%20%22%24bond_opts%22%20%5D%3B%20then%0A%20%20%20%20%20%20extra_phys_args%2B%3D(%20bond.options%20%22%24%7Bbond_opts%7D%22%20)%0A%20%20%20%20%20%20MODE_REGEX%3D%22(%5E%7C%2C)mode%3Dactive-backup(%2C%7C%24)%22%0A%20%20%20%20%20%20MAC_REGEX%3D%22(%5E%7C%2C)fail_over_mac%3D(1%7Cactive%7C2%7Cfollow)(%2C%7C%24)%22%0A%20%20%20%20%20%20if%20%5B%5B%20%24bond_opts%20%3D~%20%24MODE_REGEX%20%5D%5D%20%26%26%20%5B%5B%20%24bond_opts%20%3D~%20%24MAC_REGEX%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20clone_mac%3D0%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22team%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dteam%0A%20%20%20%20%23%20check%20team%20config%20options%0A%20%20%20%20team_config_opts%3D%24(%24NMCLI_GET_VALUE%20team.config%20-e%20no%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-n%20%22%24team_config_opts%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20team.config%20is%20json%2C%20remove%20spaces%20to%20avoid%20problems%20later%20on%0A%20%20%20%20%20%20extra_phys_args%2B%3D(%20team.config%20%22%24%7Bteam_config_opts%2F%2F%5B%5B%3Aspace%3A%5D%5D%2F%7D%22%20)%0A%20%20%20%20%20%20team_mode%3D%24(echo%20%22%24%7Bteam_config_opts%7D%22%20%7C%20jq%20-r%20%22.runner.name%20%2F%2F%20empty%22)%0A%20%20%20%20%20%20team_mac_policy%3D%24(echo%20%22%24%7Bteam_config_opts%7D%22%20%7C%20jq%20-r%20%22.runner.hwaddr_policy%20%2F%2F%20empty%22)%0A%20%20%20%20%20%20MAC_REGEX%3D%22(by_active%7Conly_active)%22%0A%20%20%20%20%20%20if%20%5B%20%22%24team_mode%22%20%3D%20%22activebackup%22%20%5D%20%26%26%20%5B%5B%20%22%24team_mac_policy%22%20%3D~%20%24MAC_REGEX%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20clone_mac%3D0%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22tun%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dtun%0A%20%20%20%20tun_mode%3D%24(%24NMCLI_GET_VALUE%20tun.mode%20-e%20no%20connection%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20extra_phys_args%2B%3D(%20tun.mode%20%22%24%7Btun_mode%7D%22%20)%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22bridge%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dbridge%0A%20%20else%0A%20%20%20%20iface_type%3D802-3-ethernet%0A%20%20fi%0A%0A%20%20if%20%5B%20!%20%22%24%7Bclone_mac%3A-%7D%22%20%3D%20%220%22%20%5D%3B%20then%0A%20%20%20%20%23%20In%20active-backup%20link%20aggregation%2C%20with%20fail_over_mac%20mode%20enabled%2C%0A%20%20%20%20%23%20cloning%20the%20mac%20address%20is%20not%20supported.%20It%20is%20possible%20then%20that%0A%20%20%20%20%23%20br-ex%20has%20a%20different%20mac%20address%20than%20the%20bond%20which%20might%20be%0A%20%20%20%20%23%20troublesome%20on%20some%20platforms%20where%20the%20nic%20won't%20accept%20packets%20with%0A%20%20%20%20%23%20a%20different%20destination%20mac.%20But%20nobody%20has%20complained%20so%20far%20so%20go%20on%0A%20%20%20%20%23%20with%20what%20we%20got.%20%0A%20%20%20%20%0A%20%20%20%20%23%20Do%20set%20it%20though%20for%20other%20link%20aggregation%20configurations%20where%20the%0A%20%20%20%20%23%20mac%20address%20would%20otherwise%20depend%20on%20enslave%20order%20for%20which%20we%20have%0A%20%20%20%20%23%20no%20control%20going%20forward.%0A%20%20%20%20extra_phys_args%2B%3D(%20802-3-ethernet.cloned-mac-address%20%22%24%7Biface_mac%7D%22%20)%0A%20%20fi%0A%0A%20%20%23%20use%20%24%7Bextra_phys_args%5B%40%5D%2B%22%24%7Bextra_phys_args%5B%40%5D%7D%22%7D%20instead%20of%20%24%7Bextra_phys_args%5B%40%5D%7D%20to%20be%20compatible%20with%20bash%204.2%20in%20RHEL7.9%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24bridge_interface_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20destroy%20interface%20%24%7Biface%7D%0A%20%20%20%20ovs_default_port_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24default_port_name%22)%0A%20%20%20%20add_nm_conn%20%22%24bridge_interface_name%22%20type%20%24%7Biface_type%7D%20conn.interface%20%24%7Biface%7D%20master%20%22%24ovs_default_port_conn%22%20%5C%0A%20%20%20%20%20%20slave-type%20ovs-port%20connection.autoconnect-priority%20100%20connection.autoconnect-slaves%201%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20%5C%0A%20%20%20%20%20%20%24%7Bextra_phys_args%5B%40%5D%2B%22%24%7Bextra_phys_args%5B%40%5D%7D%22%7D%0A%20%20fi%0A%0A%20%20%23%20Get%20the%20new%20connection%20uuids%0A%20%20new_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24bridge_interface_name%22)%0A%20%20ovs_port_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24ovs_port%22)%0A%0A%20%20%23%20Update%20connections%20with%20master%20property%20set%20to%20use%20the%20new%20connection%0A%20%20replace_connection_master%20%24old_conn%20%24new_conn%0A%20%20replace_connection_master%20%24iface%20%24new_conn%0A%0A%20%20ipv4_method%3D%24(%24NMCLI_GET_VALUE%20ipv4.method%20conn%20show%20%22%24old_conn%22)%0A%20%20ipv6_method%3D%24(%24NMCLI_GET_VALUE%20ipv6.method%20conn%20show%20%22%24old_conn%22)%0A%0A%20%20ipv4_addresses%3D%24(%24NMCLI_GET_VALUE%20ipv4.addresses%20conn%20show%20%22%24old_conn%22)%0A%20%20ipv6_addresses%3D%24(%24NMCLI_GET_VALUE%20ipv6.addresses%20conn%20show%20%22%24old_conn%22)%0A%0A%20%20%23%20Warn%20about%20an%20invalid%20MTU%20that%20will%20most%20likely%20fail%20in%20one%20way%20or%0A%20%20%23%20another%0A%20%20if%20%5B%20%24%7Biface_mtu%7D%20-lt%201280%20%5D%20%26%26%20%5B%20%22%24%7Bipv6_method%7D%22%20!%3D%20%22disabled%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20MTU%20%24%7Biface_mtu%7D%20is%20lower%20than%20the%20minimum%20required%20of%201280%20for%20IPv6%22%0A%20%20fi%0A%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24ovs_interface%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20destroy%20interface%20%22%24bridge_name%22%0A%20%20%20%20%23%20Clone%20the%20connection%20in%20case%20the%20method%20is%20manual%20or%20in%20case%20the%20an%20address%20is%20set%20(DHCP%20%2B%20static%20IP)%0A%20%20%20%20if%20%5B%20%22%24%7Bipv4_method%7D%22%20%3D%20%22manual%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv4_addresses%7D%22%20!%3D%20%22%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv6_method%7D%22%20%3D%20%22manual%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv6_addresses%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Static%20IP%20addressing%20detected%20on%20default%20gateway%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20%23%20clone%20the%20old%20connection%20to%20get%20the%20address%20settings%0A%20%20%20%20%20%20%23%20prefer%20cloning%20vs%20copying%20the%20connection%20file%20to%20avoid%20problems%20with%20selinux%0A%20%20%20%20%20%20clone_nm_conn%20%22%24%7Bold_conn%7D%22%20%22%24%7Bovs_interface%7D%22%0A%20%20%20%20%20%20shopt%20-s%20nullglob%0A%20%20%20%20%20%20new_conn_files%3D(%24%7BNM_CONN_RUN_PATH%7D%2F%22%24%7Bovs_interface%7D%22*)%0A%20%20%20%20%20%20shopt%20-u%20nullglob%0A%20%20%20%20%20%20if%20%5B%20%24%7B%23new_conn_files%5B%40%5D%7D%20-ne%201%20%5D%20%7C%7C%20%5B%20!%20-f%20%22%24%7Bnew_conn_files%5B0%5D%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22ERROR%3A%20could%20not%20find%20%24%7Bovs_interface%7D%20conn%20file%20after%20cloning%20from%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20%20%20exit%201%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20new_conn_file%3D%22%24%7Bnew_conn_files%5B0%5D%7D%22%0A%0A%20%20%20%20%20%20%23%20modify%20the%20connection%20type%20directly%20because%20it%20can't%20be%20modified%0A%20%20%20%20%20%20%23%20through%20nmcli%0A%20%20%20%20%20%20sed%20-i%20'%2F%5E%5C%5Bconnection%5C%5D%24%2F%2C%2F%5E%5C%5B%2F%20s%2F%5Etype%3D.*%24%2Ftype%3Dovs-interface%2F'%20%24%7Bnew_conn_file%7D%0A%0A%20%20%20%20%20%20%23%20modify%20some%20more%20settings%20through%20nmcli%0A%20%20%20%20%20%20mod_nm_conn%20%22%24%7Bovs_interface%7D%22%20conn.interface%20%22%24bridge_name%22%20%5C%0A%20%20%20%20%20%20%20%20connection.multi-connect%20%22%22%20connection.autoconnect%20no%20%5C%0A%20%20%20%20%20%20%20%20connection.master%20%22%24ovs_port_conn%22%20connection.slave-type%20ovs-port%20%5C%0A%20%20%20%20%20%20%20%20ovs-interface.type%20internal%20%5C%0A%20%20%20%20%20%20%20%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20802-3-ethernet.cloned-mac-address%20%24%7Biface_mac%7D%20%5C%0A%20%20%20%20%20%20%20%20ipv4.route-metric%20%22%24%7Bbridge_metric%7D%22%20ipv6.route-metric%20%22%24%7Bbridge_metric%7D%22%0A%0A%20%20%20%20%20%20echo%20%22Loaded%20new%20%24ovs_interface%20connection%20file%3A%20%24%7Bnew_conn_file%7D%22%0A%20%20%20%20else%0A%20%20%20%20%20%20extra_if_brex_args%3D%22%22%0A%20%20%20%20%20%20%23%20check%20if%20interface%20had%20ipv4%2Fipv6%20addresses%20assigned%0A%20%20%20%20%20%20num_ipv4_addrs%3D%24(ip%20-j%20a%20show%20dev%20%24%7Biface%7D%20%7C%20jq%20%22.%5B0%5D.addr_info%20%7C%20map(.%20%7C%20select(.family%20%3D%3D%20%5C%22inet%5C%22))%20%7C%20length%22)%0A%20%20%20%20%20%20if%20%5B%20%22%24num_ipv4_addrs%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.may-fail%20no%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20IPV6%20should%20have%20at%20least%20a%20link%20local%20address.%20Check%20for%20more%20than%201%20to%20see%20if%20there%20is%20an%0A%20%20%20%20%20%20%23%20assigned%20address.%0A%20%20%20%20%20%20num_ip6_addrs%3D%24(ip%20-j%20a%20show%20dev%20%24%7Biface%7D%20%7C%20jq%20%22.%5B0%5D.addr_info%20%7C%20map(.%20%7C%20select(.family%20%3D%3D%20%5C%22inet6%5C%22%20and%20.scope%20!%3D%20%5C%22link%5C%22))%20%7C%20length%22)%0A%20%20%20%20%20%20if%20%5B%20%22%24num_ip6_addrs%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.may-fail%20no%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20dhcp%20client%20ids%0A%20%20%20%20%20%20dhcp_client_id%3D%24(%24NMCLI_GET_VALUE%20ipv4.dhcp-client-id%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24dhcp_client_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.dhcp-client-id%20%24%7Bdhcp_client_id%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20dhcp6_client_id%3D%24(%24NMCLI_GET_VALUE%20ipv6.dhcp-duid%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24dhcp6_client_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.dhcp-duid%20%24%7Bdhcp6_client_id%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20ipv6_addr_gen_mode%3D%24(%24NMCLI_GET_VALUE%20ipv6.addr-gen-mode%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_addr_gen_mode%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.addr-gen-mode%20%24%7Bipv6_addr_gen_mode%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20static%20DNS%20address%0A%20%20%20%20%20%20ipv4_dns%3D%24(%24NMCLI_GET_VALUE%20ipv4.dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv4_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.dns%20%24%7Bipv4_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20ipv6_dns%3D%24(%24NMCLI_GET_VALUE%20ipv6.dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.dns%20%24%7Bipv6_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20auto-dns%0A%20%20%20%20%20%20ipv4_auto_dns%3D%24(%24NMCLI_GET_VALUE%20ipv4.ignore-auto-dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv4_auto_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.ignore-auto-dns%20%24%7Bipv4_auto_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20ipv6_auto_dns%3D%24(%24NMCLI_GET_VALUE%20ipv6.ignore-auto-dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_auto_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.ignore-auto-dns%20%24%7Bipv6_auto_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20add_nm_conn%20%22%24ovs_interface%22%20type%20ovs-interface%20slave-type%20ovs-port%20conn.interface%20%22%24bridge_name%22%20master%20%22%24ovs_port_conn%22%20%5C%0A%20%20%20%20%20%20%20%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20802-3-ethernet.cloned-mac-address%20%24%7Biface_mac%7D%20%5C%0A%20%20%20%20%20%20%20%20ipv4.method%20%22%24%7Bipv4_method%7D%22%20ipv4.route-metric%20%22%24%7Bbridge_metric%7D%22%20%5C%0A%20%20%20%20%20%20%20%20ipv6.method%20%22%24%7Bipv6_method%7D%22%20ipv6.route-metric%20%22%24%7Bbridge_metric%7D%22%20%5C%0A%20%20%20%20%20%20%20%20%24%7Bextra_if_brex_args%7D%0A%20%20%20%20fi%0A%20%20fi%0A%0A%20%20configure_driver_options%20%22%24%7Biface%7D%22%0A%7D%0A%0A%23%20Used%20to%20remove%20a%20bridge%0Aremove_ovn_bridges()%20%7B%0A%20%20bridge_name%3D%24%7B1%7D%0A%20%20port_name%3D%24%7B2%7D%0A%0A%20%20%23%20Remove%20the%20keyfiles%20from%20known%20configuration%20paths%0A%20%20update_nm_conn_run_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%20%20update_nm_conn_set_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%20%20%23%20Shouldn't%20be%20necessary%2C%20workaround%20for%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FOCPBUGS-41489%0A%20%20update_nm_conn_etc_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%0A%20%20%23%20NetworkManager%20will%20not%20remove%20%24%7Bbridge_name%7D%20if%20it%20has%20the%20patch%20port%20created%20by%20ovn-kubernetes%0A%20%20%23%20so%20remove%20explicitly%0A%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20%24%7Bbridge_name%7D%0A%7D%0A%0A%23%20Removes%20any%20previous%20ovs%20configuration%0Aremove_all_ovn_bridges()%20%7B%0A%20%20echo%20%22Reverting%20any%20previous%20OVS%20configuration%22%0A%20%20%0A%20%20remove_ovn_bridges%20br-ex%20phys0%0A%20%20remove_ovn_bridges%20br-ex1%20phys1%0A%20%20%0A%20%20echo%20%22OVS%20configuration%20successfully%20reverted%22%0A%7D%0A%0A%23%20Reloads%20NM%20NetworkManager%20profiles%20if%20any%20configuration%20change%20was%20done.%0A%23%20Accepts%20a%20list%20of%20devices%20that%20should%20be%20re-connect%20after%20reload.%0Areload_profiles_nm()%20%7B%0A%20%20if%20%5B%20%24%7Bnm_config_changed%3A-0%7D%20-eq%200%20%5D%3B%20then%0A%20%20%20%20%23%20no%20config%20was%20changed%2C%20no%20need%20to%20reload%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20reload%20profiles%0A%20%20nmcli%20connection%20reload%0A%0A%20%20%23%20precautionary%20sleep%20of%2010s%20(default%20timeout%20of%20NM%20to%20bring%20down%20devices)%0A%20%20sleep%2010%0A%0A%20%20%23%20After%20reload%2C%20devices%20that%20were%20already%20connected%20should%20connect%20again%0A%20%20%23%20if%20any%20profile%20is%20available.%20If%20no%20profile%20is%20available%2C%20a%20device%20can%0A%20%20%23%20remain%20disconnected%20and%20we%20have%20to%20explicitly%20connect%20it%20so%20that%20a%0A%20%20%23%20profile%20is%20generated.%20This%20can%20happen%20for%20physical%20devices%20but%20should%0A%20%20%23%20not%20happen%20for%20software%20devices%20as%20those%20always%20require%20a%20profile.%0A%20%20for%20dev%20in%20%24%40%3B%20do%0A%20%20%20%20%23%20Only%20attempt%20to%20connect%20a%20disconnected%20device%0A%20%20%20%20local%20connected_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20device%20show%20%22%24dev%22%20%7C%7C%20echo%20%22%22)%0A%20%20%20%20if%20%5B%5B%20%22%24connected_state%22%20%3D~%20%22disconnected%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%23%20keep%20track%20if%20a%20profile%20by%20the%20same%20name%20as%20the%20device%20existed%20%0A%20%20%20%20%20%20%23%20before%20we%20attempt%20activation%0A%20%20%20%20%20%20local%20named_profile_existed%3D%24(%5B%20-f%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D%22%20%5D%20%7C%7C%20%5B%20-f%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D.nmconnection%22%20%5D%20%26%26%20echo%20%22yes%22)%0A%20%20%20%20%20%20%0A%20%20%20%20%20%20for%20i%20in%20%7B1..10%7D%3B%20do%0A%20%20%20%20%20%20%20%20%20%20echo%20%22Attempt%20%24i%20to%20connect%20device%20%24dev%22%0A%20%20%20%20%20%20%20%20%20%20nmcli%20device%20connect%20%22%24dev%22%20%26%26%20break%0A%20%20%20%20%20%20%20%20%20%20sleep%205%0A%20%20%20%20%20%20done%0A%0A%20%20%20%20%20%20%23%20if%20a%20profile%20did%20not%20exist%20before%20but%20does%20now%2C%20it%20was%20generated%0A%20%20%20%20%20%20%23%20but%20we%20want%20it%20to%20be%20ephemeral%2C%20so%20move%20it%20back%20to%20%2Frun%0A%20%20%20%20%20%20if%20%5B%20!%20%22%24named_profile_existed%22%20%3D%20%22yes%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20MANAGED_NM_CONN_FILES%3D(%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D%22%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D.nmconnection%22)%0A%20%20%20%20%20%20%20%20copy_nm_conn_files%20%22%24%7BNM_CONN_RUN_PATH%7D%22%0A%20%20%20%20%20%20%20%20rm_nm_conn_files%0A%20%20%20%20%20%20%20%20%23%20reload%20profiles%20so%20that%20NM%20notices%20that%20some%20might%20have%20been%20moved%0A%20%20%20%20%20%20%20%20nmcli%20connection%20reload%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20echo%20%22Waiting%20for%20interface%20%24dev%20to%20activate...%22%0A%20%20%20%20%23%20don't%20use%20--escape%20no%2C%20we%20use%20%3A%20delimiter%20here%0A%20%20%20%20if%20!%20timeout%2060%20bash%20-c%20%22while%20!%20nmcli%20-g%20DEVICE%2CSTATE%20c%20%7C%20grep%20%22'%22'%22%24dev%22%3Aactivated'%22'%22%3B%20do%20sleep%205%3B%20done%22%3B%20then%0A%20%20%20%20%20%20echo%20%22WARNING%3A%20%24dev%20did%20not%20activate%22%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20nm_config_changed%3D0%0A%7D%0A%0A%23%20Removes%20all%20configuration%20and%20reloads%20NM%20if%20necessary%0Arollback_nm()%20%7B%0A%20%20phys0%3D%24(get_bridge_physical_interface%20ovs-if-phys0)%0A%20%20phys1%3D%24(get_bridge_physical_interface%20ovs-if-phys1)%0A%20%20%0A%20%20%23%20Revert%20changes%20made%20by%20%2Fusr%2Flocal%2Fbin%2Fconfigure-ovs.sh%20during%20SDN%20migration.%0A%20%20remove_all_ovn_bridges%0A%20%20%0A%20%20%23%20reload%20profiles%20so%20that%20NM%20notices%20that%20some%20were%20removed%0A%20%20reload_profiles_nm%20%22%24phys0%22%20%22%24phys1%22%0A%7D%0A%0A%23%20Add%20a%20temporary%20deactivated%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20name%20folowed%20by%20arguments%20passed%20to%0A%23%20%60nmcli%20connection%20add%60%0Aadd_nm_conn()%20%7B%0A%20%20%23%20Use%20%60save%20no%60%20to%20add%20a%20temporary%20profile%0A%20%20nmcli%20c%20add%20save%20no%20con-name%20%22%24%40%22%20connection.autoconnect%20no%0A%7D%0A%0A%23%20Modify%20a%20temporary%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20name%20followed%20by%20arguments%20passed%20to%0A%23%20%60nmcli%20connection%20modify%60%0Amod_nm_conn()%20%7B%0A%20%20%23%20the%20easiest%20thing%20to%20do%20here%20would%20be%20to%20use%20%60nmcli%20c%20mod%20--temporary%60%0A%20%20%23%20but%20there%20is%20a%20bug%20in%20selinux%20profiles%20that%20denies%20NM%20from%20performing%0A%20%20%23%20the%20operation%0A%20%20local%20dst_path%3D%24%7BNM_CONN_RUN_PATH%7D%2F%241.nmconnection%0A%20%20local%20src_path%0A%20%20src_path%3D%24(mktemp)%0A%20%20shift%0A%20%20cat%20%22%24dst_path%22%20%3E%20%22%24src_path%22%0A%20%20rm%20-f%20%22%24dst_path%22%0A%20%20nmcli%20--offline%20c%20mod%20%22%24%40%22%20%3C%20%22%24src_path%22%20%3E%20%22%24dst_path%22%0A%20%20rm%20-f%20%22%24src_path%22%0A%20%20chmod%20600%20%22%24dst_path%22%0A%20%20nmcli%20c%20load%20%22%24dst_path%22%0A%7D%0A%0A%23%20Clone%20to%20a%20temporary%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20to%20clone%2C%20second%20argument%20is%20the%20clone%20name%0Aclone_nm_conn()%20%7B%0A%20%20%23%20clone%20as%20temporary%20so%20that%20it%20is%20generated%20in%20NM_CONN_RUN_PATH%0A%20%20nmcli%20connection%20clone%20--temporary%20%22%241%22%20%22%242%22%20%26%3E%20%2Fdev%2Fnull%0A%7D%0A%0A%23%20Activates%20an%20ordered%20set%20of%20NM%20connection%20profiles%0Aactivate_nm_connections()%20%7B%0A%20%20local%20connections%3D(%22%24%40%22)%0A%0A%20%20%23%20We%20want%20autoconnect%20set%20for%20our%20cloned%20slave%20profiles%20so%20that%20they%20are%0A%20%20%23%20used%20over%20the%20original%20profiles%20if%20implicitly%20re-activated%20with%20other%0A%20%20%23%20dependant%20profiles.%20Otherwise%20if%20a%20slave%20activates%20with%20an%20old%20profile%2C%0A%20%20%23%20the%20old%20master%20profile%20might%20activate%20as%20well%2C%20interfering%20and%20causing%0A%20%20%23%20further%20activations%20to%20fail.%0A%20%20%23%20Slave%20interfaces%20should%20already%20be%20active%20so%20setting%20autoconnect%20here%0A%20%20%23%20won't%20implicitly%20activate%20them%20but%20there%20is%20an%20edge%20case%20where%20a%20slave%0A%20%20%23%20might%20be%20inactive%20(link%20down%20for%20example)%20and%20in%20that%20case%20setting%0A%20%20%23%20autoconnect%20will%20cause%20an%20implicit%20activation.%20This%20is%20not%20necessarily%20a%0A%20%20%23%20problem%20and%20hopefully%20we%20can%20make%20sure%20everything%20is%20activated%20as%20we%0A%20%20%23%20want%20next.%0A%20%20for%20conn%20in%20%22%24%7Bconnections%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20local%20slave_type%3D%24(%24NMCLI_GET_VALUE%20connection.slave-type%20connection%20show%20%22%24conn%22)%0A%20%20%20%20if%20%5B%20%22%24slave_type%22%20%3D%20%22team%22%20%5D%20%7C%7C%20%5B%20%22%24slave_type%22%20%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20Activate%20all%20connections%20and%20fail%20if%20activation%20fails%0A%20%20%23%20For%20slave%20connections%20-%20for%20as%20long%20as%20at%20least%20one%20slave%20that%20belongs%20to%20a%20bond%2Fteam%0A%20%20%23%20comes%20up%2C%20we%20should%20not%20fail%0A%20%20declare%20-A%20master_interfaces%0A%20%20for%20conn%20in%20%22%24%7Bconnections%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%23%20Get%20the%20slave%20type%0A%20%20%20%20local%20slave_type%3D%24(%24NMCLI_GET_VALUE%20connection.slave-type%20connection%20show%20%22%24conn%22)%0A%20%20%20%20local%20is_slave%3Dfalse%0A%20%20%20%20if%20%5B%20%22%24slave_type%22%20%3D%20%22team%22%20%5D%20%7C%7C%20%5B%20%22%24slave_type%22%20%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20%20%20is_slave%3Dtrue%0A%20%20%20%20fi%20%0A%0A%20%20%20%20%23%20For%20slave%20interfaces%2C%20initialize%20the%20master%20interface%20to%20false%20if%20the%20key%20is%20not%20yet%20in%20the%20array%0A%20%20%20%20local%20master_interface%0A%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20master_interface%3D%24(%24NMCLI_GET_VALUE%20connection.master%20connection%20show%20%22%24conn%22)%0A%20%20%20%20%20%20if%20!%20%5B%5B%20-v%20%22master_interfaces%5B%24master_interface%5D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%22%24master_interface%22%5D%3Dfalse%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20slaves%20should%20implicitly%20activate%2C%20give%20them%20a%20chance%20to%20do%20so%0A%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20if%20!%20timeout%205%20bash%20-c%20%22while%20!%20%24NMCLI_GET_VALUE%20GENERAL.STATE%20conn%20show%20%22%24conn%22%20%7C%20grep%20activated%3B%20do%20sleep%201%3B%20done%22%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22WARNING%3A%20slave%20%24conn%20did%20not%20implicitly%20activate%20in%205s%2C%20activating%20explicitly.%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Do%20not%20activate%20interfaces%20that%20are%20already%20active%0A%20%20%20%20%23%20But%20set%20the%20entry%20in%20master_interfaces%20to%20true%20if%20this%20is%20a%20slave%0A%20%20%20%20%23%20Also%20set%20autoconnect%20to%20yes%0A%20%20%20%20local%20active_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20conn%20show%20%22%24conn%22)%0A%20%20%20%20if%20%5B%20%22%24active_state%22%20%3D%3D%20%22activated%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Connection%20%24conn%20already%20activated%22%0A%20%20%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%24master_interface%5D%3Dtrue%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Activate%20all%20interfaces%20that%20are%20not%20yet%20active%0A%20%20%20%20for%20i%20in%20%7B1..10%7D%3B%20do%0A%20%20%20%20%20%20echo%20%22Attempt%20%24i%20to%20bring%20up%20connection%20%24conn%22%0A%20%20%20%20%20%20nmcli%20conn%20up%20%22%24conn%22%20%26%26%20s%3D0%20%26%26%20break%20%7C%7C%20s%3D%24%3F%0A%20%20%20%20%20%20sleep%205%0A%20%20%20%20done%0A%20%20%20%20if%20%5B%20%24s%20-eq%200%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Brought%20up%20connection%20%24conn%20successfully%22%0A%20%20%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%22%24master_interface%22%5D%3Dtrue%0A%20%20%20%20%20%20fi%0A%20%20%20%20elif%20!%20%24is_slave%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20Cannot%20bring%20up%20connection%20%24conn%20after%20%24i%20attempts%22%0A%20%20%20%20%20%20return%20%24s%0A%20%20%20%20fi%0A%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20done%0A%20%20%23%20Check%20that%20all%20master%20interfaces%20report%20at%20least%20a%20single%20active%20slave%0A%20%20%23%20Note%3A%20associative%20arrays%20require%20an%20exclamation%20mark%20when%20looping%0A%20%20for%20i%20in%20%22%24%7B!master_interfaces%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20if%20!%20%24%7Bmaster_interfaces%5B%22%24i%22%5D%7D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22ERROR%3A%20Cannot%20bring%20up%20any%20slave%20interface%20for%20master%20interface%3A%20%24i%22%0A%20%20%20%20%20%20%20%20return%201%0A%20%20%20%20fi%0A%20%20done%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%2C%20%24iface%0A%23%20Writes%20content%20of%20%24iface%20into%20%24iface_default_hint_file%0Awrite_iface_default_hint()%20%7B%0A%20%20local%20iface_default_hint_file%3D%22%241%22%0A%20%20local%20iface%3D%22%242%22%0A%0A%20%20echo%20%22%24%7Biface%7D%22%20%3E%7C%20%22%24%7Biface_default_hint_file%7D%22%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%0A%23%20Returns%20the%20stored%20interface%20default%20hint%20if%20the%20hint%20is%20non-empty%2C%0A%23%20not%20br-ex%2C%20not%20br-ex1%20and%20if%20the%20interface%20can%20be%20found%20in%20%2Fsys%2Fclass%2Fnet%0Aget_iface_default_hint()%20%7B%0A%20%20local%20iface_default_hint_file%3D%241%0A%20%20if%20%5B%20-f%20%22%24%7Biface_default_hint_file%7D%22%20%5D%3B%20then%0A%20%20%20%20local%20iface_default_hint%3D%24(cat%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22br-ex1%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20-d%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Biface_default_hint%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20fi%0A%20%20echo%20%22%22%0A%7D%0A%0Aget_ip_from_ip_hint_file()%20%7B%0A%20%20local%20ip_hint_file%3D%22%241%22%0A%20%20if%20%5B%5B%20!%20-f%20%22%24%7Bip_hint_file%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20return%0A%20%20fi%0A%20%20ip_hint%3D%24(cat%20%22%24%7Bip_hint_file%7D%22)%0A%20%20echo%20%22%24%7Bip_hint%7D%22%0A%7D%0A%0A%23%20This%20function%20waits%20for%20ip%20address%20of%20br-ex%20to%20be%20bindable%20only%20in%20case%20of%20ipv6%0A%23%20This%20is%20workaround%20for%20OCPBUGS-673%20as%20it%20will%20not%20allow%20starting%20crio%0A%23%20before%20address%20is%20bindable%0Atry_to_bind_ipv6_address()%20%7B%0A%20%20%23%20Retry%20for%201%20minute%0A%20%20retries%3D60%0A%20%20until%20%5B%5B%20%24%7Bretries%7D%20-eq%200%20%5D%5D%3B%20do%0A%20%20%20%20ip%3D%24(ip%20-6%20-j%20addr%20%7C%20jq%20-r%20%22first(.%5B%5D%20%7C%20select(.ifname%3D%3D%5C%22br-ex%5C%22)%20%7C%20.addr_info%5B%5D%20%7C%20select(.scope%3D%3D%5C%22global%5C%22)%20%7C%20.local)%22)%0A%20%20%20%20if%20%5B%5B%20%22%24%7Bip%7D%22%20%3D%3D%20%22%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22No%20ipv6%20ip%20to%20bind%20was%20found%22%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20random_port%3D%24(shuf%20-i%2050000-60000%20-n%201)%0A%20%20%20%20echo%20%22Trying%20to%20bind%20%24%7Bip%7D%20on%20port%20%24%7Brandom_port%7D%22%0A%20%20%20%20exit_code%3D%24(timeout%202s%20nc%20-l%20%22%24%7Bip%7D%22%20%24%7Brandom_port%7D%3B%20echo%20%24%3F)%0A%20%20%20%20if%20%5B%5B%20exit_code%20-eq%20124%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Address%20bound%20successfully%22%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20sleep%201%0A%20%20%20%20((%20retries--%20))%0A%20%20done%0A%20%20if%20%5B%5B%20%24%7Bretries%7D%20-eq%200%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Failed%20to%20bind%20ip%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%7D%0A%0A%23%20Get%20interface%20that%20matches%20ip%20from%20node%20ip%20hint%20file%0A%23%20in%20case%20file%20not%20exists%20return%20nothing%20and%0A%23%20fallback%20to%20default%20interface%20search%20flow%0Aget_nodeip_hint_interface()%20%7B%0A%20%20local%20ip_hint%3D%22%22%0A%20%20local%20ip_hint_file%3D%22%241%22%0A%20%20local%20extra_bridge%3D%22%242%22%0A%20%20local%20iface%3D%22%22%0A%0A%20%20ip_hint%3D%24(get_ip_from_ip_hint_file%20%22%24%7Bip_hint_file%7D%22)%0A%20%20if%20%5B%5B%20-z%20%22%24%7Bip_hint%7D%22%20%20%5D%5D%3B%20then%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20iface%3D%24(ip%20-j%20addr%20%7C%20jq%20-r%20%22first(.%5B%5D%20%7C%20select(any(.addr_info%5B%5D%3B%20.local%3D%3D%5C%22%24%7Bip_hint%7D%5C%22)%20and%20.ifname!%3D%5C%22br-ex1%5C%22%20and%20.ifname!%3D%5C%22%24%7Bextra_bridge%7D%5C%22))%20%7C%20.ifname%22)%0A%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20fi%0A%7D%0A%0A%23%20Accepts%20parameters%20%24bridge_interface%20(e.g.%20ovs-port-phys0)%0A%23%20Returns%20the%20physical%20interface%20name%20if%20%24bridge_interface%20exists%2C%20%22%22%20otherwise%0Aget_bridge_physical_interface()%20%7B%0A%20%20local%20bridge_interface%3D%22%241%22%0A%20%20local%20physical_interface%3D%22%22%0A%20%20physical_interface%3D%24(%24NMCLI_GET_VALUE%20connection.interface-name%20conn%20show%20%22%24%7Bbridge_interface%7D%22%202%3E%2Fdev%2Fnull%20%7C%7C%20echo%20%22%22)%0A%20%20echo%20%22%24%7Bphysical_interface%7D%22%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%2C%20%24extra_bridge_file%2C%20%24ip_hint_file%2C%20%24default_bridge_file%0A%23%20Determines%20the%20interface%20to%20be%20used%20for%20br-ex.%20Order%20of%20priority%20is%3A%0A%23%201.%20Use%20the%20user%20specified%20interface%20if%20provided%20in%20the%20default%20bridge%20file%0A%23%202.%20Use%20the%20node%20IP%20hint%20interface%0A%23%203.%20Use%20the%20previously%20selected%20interface%0A%23%204.%20Use%20the%20interface%20detected%20as%20default%20gateway%0A%23%0A%23%20Read%20%24default_bridge_file%20and%20return%20the%20contained%20interface.%20Otherwise%2C%0A%23%20read%20%24ip_hint_file%20and%20return%20the%20interface%20that%20matches%20this%20ip.%20Otherwise%2C%0A%23%20if%20the%20default%20interface%20is%20br-ex%2C%20use%20that%20and%20return.%0A%23%20If%20the%20default%20interface%20is%20not%20br-ex%3A%0A%23%20Check%20if%20there%20is%20a%20valid%20hint%20inside%20%24iface_default_hint_file.%20If%20so%2C%20use%20that%20hint.%0A%23%20If%20there%20is%20no%20valid%20hint%2C%20use%20the%20default%20interface%20that%20we%20found%20during%20the%20step%0A%23%20earlier.%0A%23%20Never%20use%20the%20interface%20that%20is%20provided%20inside%20%24extra_bridge_file%20for%20br-ex1.%0A%23%20Never%20use%20br-ex1.%0A%23%20Write%20the%20default%20interface%20to%20%24iface_default_hint_file%0Aget_default_bridge_interface()%20%7B%0A%20%20local%20iface%3D%22%22%0A%20%20local%20counter%3D0%0A%20%20local%20iface_default_hint_file%3D%22%241%22%0A%20%20local%20extra_bridge_file%3D%22%242%22%0A%20%20local%20ip_hint_file%3D%22%243%22%0A%20%20local%20default_bridge_file%3D%22%244%22%0A%20%20local%20extra_bridge%3D%22%22%0A%0A%20%20if%20%5B%20-f%20%22%24%7Bextra_bridge_file%7D%22%20%5D%3B%20then%0A%20%20%20%20extra_bridge%3D%24(cat%20%24%7Bextra_bridge_file%7D)%0A%20%20fi%0A%0A%20%20%23%20try%20to%20use%20user%20specified%20file%20first%0A%20%20if%20%5B%20-f%20%22%24default_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20iface%3D%24(cat%20%22%24%7Bdefault_bridge_file%7D%22)%0A%20%20%20%20if%20%5B%20-z%20%22%24iface%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20User%20specified%20bridge%20file%20detected%20without%20any%20data%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20if%20node%20ip%20was%20set%2C%20we%20should%20search%20for%20interface%20that%20matches%20it%0A%20%20iface%3D%24(get_nodeip_hint_interface%20%22%24%7Bip_hint_file%7D%22%20%22%24%7Bextra_bridge%7D%22)%0A%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20find%20default%20interface%0A%20%20%23%20the%20default%20interface%20might%20be%20br-ex%2C%20so%20check%20this%20before%20looking%20at%20the%20hint%0A%20%20while%20%5B%20%24%7Bcounter%7D%20-lt%2012%20%5D%3B%20do%0A%20%20%20%20%23%20check%20ipv4%0A%20%20%20%20%23%20never%20use%20the%20interface%20that's%20specified%20in%20extra_bridge_file%0A%20%20%20%20%23%20never%20use%20br-ex1%0A%20%20%20%20if%20%5B%20%22%24%7Bextra_bridge%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20iface%3D%24(ip%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20grep%20-v%20%22%24%7Bextra_bridge%7D%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20else%0A%20%20%20%20%20%20iface%3D%24(ip%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20fi%0A%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20%23%20check%20ipv6%0A%20%20%20%20%23%20never%20use%20the%20interface%20that's%20specified%20in%20extra_bridge_file%0A%20%20%20%20%23%20never%20use%20br-ex1%0A%20%20%20%20if%20%5B%20%22%24%7Bextra_bridge%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20iface%3D%24(ip%20-6%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20grep%20-v%20%22%24%7Bextra_bridge%7D%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20else%0A%20%20%20%20%20%20iface%3D%24(ip%20-6%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20fi%0A%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20counter%3D%24((counter%2B1))%0A%20%20%20%20sleep%205%0A%20%20done%0A%0A%20%20%23%20if%20the%20default%20interface%20does%20not%20point%20out%20of%20br-ex%20or%20br-ex1%0A%20%20if%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22br-ex1%22%20%5D%3B%20then%0A%20%20%20%20%23%20determine%20if%20an%20interface%20default%20hint%20exists%20from%20a%20previous%20run%0A%20%20%20%20%23%20and%20if%20the%20interface%20has%20a%20valid%20default%20route%0A%20%20%20%20iface_default_hint%3D%24(get_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%24%7Biface%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20start%20wherever%20count%20left%20off%20in%20the%20previous%20loop%0A%20%20%20%20%20%20%23%20allow%20this%20for%20one%20more%20iteration%20than%20the%20previous%20loop%0A%20%20%20%20%20%20while%20%5B%20%24%7Bcounter%7D%20-le%2012%20%5D%3B%20do%0A%20%20%20%20%20%20%20%20%23%20check%20ipv4%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24(ip%20route%20show%20default%20dev%20%22%24%7Biface_default_hint%7D%22)%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20iface%3D%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20%20%20break%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20%23%20check%20ipv6%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24(ip%20-6%20route%20show%20default%20dev%20%22%24%7Biface_default_hint%7D%22)%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20iface%3D%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20%20%20break%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20counter%3D%24((counter%2B1))%0A%20%20%20%20%20%20%20%20sleep%205%0A%20%20%20%20%20%20done%0A%20%20%20%20fi%0A%20%20%20%20%23%20store%20what%20was%20determined%20was%20the%20(new)%20default%20interface%20inside%0A%20%20%20%20%23%20the%20default%20hint%20file%20for%20future%20reference%0A%20%20%20%20if%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20write_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Biface%7D%22%0A%20%20%20%20fi%0A%20%20fi%0A%20%20echo%20%22%24%7Biface%7D%22%0A%7D%0A%0A%23%20Used%20to%20print%20network%20state%0Aprint_state()%20%7B%0A%20%20echo%20%22Current%20device%2C%20connection%2C%20interface%20and%20routing%20state%3A%22%0A%20%20nmcli%20-g%20all%20device%20%7C%20grep%20-v%20unmanaged%0A%20%20nmcli%20-g%20all%20connection%0A%20%20ip%20-d%20address%20show%0A%20%20ip%20route%20show%0A%20%20ip%20-6%20route%20show%0A%7D%0A%0A%23%20Setup%20an%20exit%20trap%20to%20rollback%20on%20error%0Ahandle_exit()%20%7B%0A%20%20e%3D%24%3F%0A%20%20tdir%3D%24(mktemp%20-u%20-d%20-t%20%22configure-ovs-%24(date%20%2B%25Y-%25m-%25d-%25H-%25M-%25S)-XXXXXXXXXX%22)%0A%20%20%0A%20%20if%20%5B%20%24e%20-eq%200%20%5D%3B%20then%0A%20%20%20%20print_state%0A%20%20%20%20%23%20remove%20previous%20troubleshooting%20information%0A%20%20%20%20rm%20-rf%20%22%24(dirname%20%22%24tdir%22)%2Fconfigure-ovs-*%22%0A%20%20%20%20exit%200%0A%20%20fi%0A%0A%20%20echo%20%22ERROR%3A%20configure-ovs%20exited%20with%20error%3A%20%24e%22%0A%20%20print_state%0A%0A%20%20%23%20remove%20previous%20troubleshooting%20information%20except%20the%20oldest%20one%0A%20%20mapfile%20-t%20tdirs%20%3C%20%3C(compgen%20-G%20%22%24(dirname%20%22%24tdir%22)%2Fconfigure-ovs-*%22)%0A%20%20unset%20-v%20%22tdirs%5B0%5D%22%0A%20%20for%20dir%20in%20%22%24%7Btdirs%5B%40%5D%7D%22%3B%20do%20rm%20-rf%20%22%24dir%22%3B%20done%0A%0A%20%20%23%20copy%20configuration%20to%20tmp%20for%20troubleshooting%0A%20%20mkdir%20-p%20%22%24tdir%22%0A%20%20update_nm_conn_run_files%20br-ex%20phys0%0A%20%20copy_nm_conn_files%20%22%24tdir%22%0A%20%20update_nm_conn_run_files%20br-ex1%20phys1%0A%20%20copy_nm_conn_files%20%22%24tdir%22%0A%20%20echo%20%22Copied%20OVS%20configuration%20to%20%24tdir%20for%20troubleshooting%22%0A%0A%20%20%23%20attempt%20to%20restore%20the%20previous%20network%20state%0A%20%20echo%20%22Attempting%20to%20restore%20previous%20configuration...%22%0A%20%20rollback_nm%0A%20%20print_state%0A%0A%20%20exit%20%24e%0A%7D%0A%0A%23%20Setup%20a%20signal%20trap%20to%20rollback%0Ahandle_termination()%20%7B%0A%20%20echo%20%22WARNING%3A%20configure-ovs%20has%20been%20requested%20to%20terminate%2C%20quitting...%22%0A%20%20%0A%20%20%23%20by%20exiting%20with%20an%20error%20we%20will%20cleanup%20after%20ourselves%20in%20a%0A%20%20%23%20subsequent%20call%20to%20handle_exit%0A%20%20exit%201%0A%7D%0A%0A%23%20main%20function%0Aconfigure_ovs()%20%7B%0A%20%20set%20-eu%0A%0A%20%20%23%20setup%20traps%20to%20handle%20signals%20and%20other%20abnormal%20exits%0A%20%20trap%20'handle_termination'%20TERM%20INT%0A%20%20trap%20'handle_exit'%20EXIT%0A%0A%20%20%23%20this%20flag%20tracks%20if%20any%20config%20change%20was%20made%0A%20%20nm_config_changed%3D0%0A%0A%20%20%23%20Check%20that%20we%20are%20provided%20a%20valid%20NM%20connection%20path%0A%20%20if%20%5B%20%22%24NM_CONN_SET_PATH%22%20!%3D%20%22%24NM_CONN_CONF_PATH%22%20%5D%20%26%26%20%5B%20%22%24NM_CONN_SET_PATH%22%20!%3D%20%22%24NM_CONN_RUN_PATH%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22Error%3A%20Incorrect%20NM%20connection%20path%3A%20%24NM_CONN_SET_PATH%20is%20not%20%24NM_CONN_CONF_PATH%20nor%20%24NM_CONN_RUN_PATH%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20Clean%20up%20old%20config%20on%20behalf%20of%20mtu-migration%0A%20%20if%20!%20systemctl%20-q%20is-enabled%20mtu-migration%3B%20then%0A%20%20%20%20echo%20%22Cleaning%20up%20left%20over%20mtu%20migration%20configuration%22%0A%20%20%20%20rm%20-rf%20%2Fetc%2Fcno%2Fmtu-migration%0A%20%20fi%0A%0A%20%20if%20!%20rpm%20-qa%20%7C%20grep%20-q%20openvswitch%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20Openvswitch%20package%20is%20not%20installed!%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20print%20initial%20state%0A%20%20print_state%0A%0A%20%20if%20%5B%20%22%241%22%20%3D%3D%20%22OVNKubernetes%22%20%5D%3B%20then%0A%20%20%20%20%23%20Configures%20NICs%20onto%20OVS%20bridge%20%22br-ex%22%0A%20%20%20%20%23%20Configuration%20is%20either%20auto-detected%20or%20provided%20through%20a%20config%20file%20written%20already%20in%20Network%20Manager%0A%20%20%20%20%23%20key%20files%20under%20%2Fetc%2FNetworkManager%2Fsystem-connections%2F%0A%20%20%20%20%23%20Managing%20key%20files%20is%20outside%20of%20the%20scope%20of%20this%20script%0A%0A%20%20%20%20%23%20if%20the%20interface%20is%20of%20type%20vmxnet3%20add%20multicast%20capability%20for%20that%20driver%0A%20%20%20%20%23%20History%3A%20BZ%3A1854355%0A%20%20%20%20function%20configure_driver_options%20%7B%0A%20%20%20%20%20%20intf%3D%241%0A%20%20%20%20%20%20if%20%5B%20!%20-f%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Bintf%7D%2Fdevice%2Fuevent%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Device%20file%20doesn't%20exist%2C%20skipping%20setting%20multicast%20mode%22%0A%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20driver%3D%24(cat%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Bintf%7D%2Fdevice%2Fuevent%22%20%7C%20grep%20DRIVER%20%7C%20awk%20-F%20%22%3D%22%20'%7Bprint%20%242%7D')%0A%20%20%20%20%20%20%20%20echo%20%22Driver%20name%20is%22%20%24driver%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24driver%22%20%3D%20%22vmxnet3%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20ip%20link%20set%20dev%20%22%24%7Bintf%7D%22%20allmulticast%20on%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20fi%0A%20%20%20%20%7D%0A%0A%20%20%20%20ovnk_config_dir%3D'%2Fetc%2Fovnk'%0A%20%20%20%20ovnk_var_dir%3D'%2Fvar%2Flib%2Fovnk'%0A%20%20%20%20extra_bridge_file%3D%22%24%7Bovnk_config_dir%7D%2Fextra_bridge%22%0A%20%20%20%20iface_default_hint_file%3D%22%24%7Bovnk_var_dir%7D%2Fiface_default_hint%22%0A%20%20%20%20ip_hint_file%3D%22%2Frun%2Fnodeip-configuration%2Fprimary-ip%22%0A%20%20%20%20%23%20explicitly%20specify%20which%20interface%20should%20be%20used%20with%20the%20default%20bridge%0A%20%20%20%20default_bridge_file%3D%22%24%7Bovnk_config_dir%7D%2Fdefault_bridge%22%0A%0A%20%20%20%20%23%20make%20sure%20to%20create%20ovnk_config_dir%20if%20it%20does%20not%20exist%2C%20yet%0A%20%20%20%20mkdir%20-p%20%22%24%7Bovnk_config_dir%7D%22%0A%20%20%20%20%23%20make%20sure%20to%20create%20ovnk_var_dir%20if%20it%20does%20not%20exist%2C%20yet%0A%20%20%20%20mkdir%20-p%20%22%24%7Bovnk_var_dir%7D%22%0A%0A%20%20%20%20%23%20For%20upgrade%20scenarios%2C%20make%20sure%20that%20we%20stabilize%20what%20we%20already%20configured%0A%20%20%20%20%23%20before.%20If%20we%20do%20not%20have%20a%20valid%20interface%20hint%2C%20find%20the%20physical%20interface%0A%20%20%20%20%23%20that's%20attached%20to%20ovs-if-phys0.%0A%20%20%20%20%23%20If%20we%20find%20such%20an%20interface%2C%20write%20it%20to%20the%20hint%20file.%0A%20%20%20%20iface_default_hint%3D%24(get_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20current_interface%3D%24(get_bridge_physical_interface%20ovs-if-phys0)%0A%20%20%20%20%20%20if%20%5B%20%22%24%7Bcurrent_interface%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20write_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Bcurrent_interface%7D%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20delete%20iface_default_hint_file%20if%20it%20has%20the%20same%20content%20as%20extra_bridge_file%0A%20%20%20%20%23%20in%20that%20case%2C%20we%20must%20also%20force%20a%20reconfiguration%20of%20our%20network%20interfaces%0A%20%20%20%20%23%20to%20make%20sure%20that%20we%20reconcile%20this%20conflict%0A%20%20%20%20if%20%5B%20-f%20%22%24%7Biface_default_hint_file%7D%22%20%5D%20%26%26%0A%20%20%20%20%20%20%5B%20-f%20%22%24%7Bextra_bridge_file%7D%22%20%5D%20%26%26%0A%20%20%20%20%20%20%5B%20%22%24(cat%20%22%24%7Biface_default_hint_file%7D%22)%22%20%3D%3D%20%22%24(cat%20%22%24%7Bextra_bridge_file%7D%22)%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22%24%7Biface_default_hint_file%7D%20and%20%24%7Bextra_bridge_file%7D%20share%20the%20same%20content%22%0A%20%20%20%20%20%20echo%20%22Deleting%20file%20%24%7Biface_default_hint_file%7D%20to%20choose%20a%20different%20interface%22%0A%20%20%20%20%20%20rm%20-f%20%22%24%7Biface_default_hint_file%7D%22%0A%20%20%20%20%20%20rm%20-f%20%2Frun%2Fconfigure-ovs-boot-done%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20on%20every%20boot%20we%20rollback%20and%20generate%20the%20configuration%20again%2C%20to%20take%0A%20%20%20%20%23%20in%20any%20changes%20that%20have%20possibly%20been%20applied%20in%20the%20standard%0A%20%20%20%20%23%20configuration%20sources%0A%20%20%20%20if%20%5B%20!%20-f%20%2Frun%2Fconfigure-ovs-boot-done%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Running%20on%20boot%2C%20restoring%20previous%20configuration%20before%20proceeding...%22%0A%20%20%20%20%20%20rollback_nm%0A%20%20%20%20%20%20print_state%0A%20%20%20%20fi%0A%20%20%20%20touch%20%2Frun%2Fconfigure-ovs-boot-done%0A%0A%20%20%20%20iface%3D%24(get_default_bridge_interface%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Bextra_bridge_file%7D%22%20%22%24%7Bip_hint_file%7D%22%20%22%24%7Bdefault_bridge_file%7D%22)%0A%0A%20%20%20%20if%20%5B%20%22%24iface%22%20!%3D%20%22br-ex%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20Specified%20interface%20is%20not%20br-ex.%0A%20%20%20%20%20%20%23%20Some%20deployments%20use%20a%20temporary%20solution%20where%20br-ex%20is%20moved%20out%20from%20the%20default%20gateway%20interface%0A%20%20%20%20%20%20%23%20and%20bound%20to%20a%20different%20nic%20(https%3A%2F%2Fgithub.com%2Ftrozet%2Fopenshift-ovn-migration).%0A%20%20%20%20%20%20%23%20This%20is%20now%20supported%20through%20an%20extra%20bridge%20if%20requested.%20If%20that%20is%20the%20case%2C%20we%20rollback.%0A%20%20%20%20%20%20%23%20We%20also%20rollback%20if%20it%20looks%20like%20we%20need%20to%20configure%20things%2C%20just%20in%20case%20there%20are%20any%20leftovers%0A%20%20%20%20%20%20%23%20from%20previous%20attempts.%0A%20%20%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%20%7C%7C%20%5B%20-z%20%22%24(nmcli%20connection%20show%20--active%20br-ex%202%3E%20%2Fdev%2Fnull)%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Bridge%20br-ex%20is%20not%20active%2C%20restoring%20previous%20configuration%20before%20proceeding...%22%0A%20%20%20%20%20%20%20%20rollback_nm%0A%20%20%20%20%20%20%20%20print_state%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20convert_to_bridge%20%22%24iface%22%20%22br-ex%22%20%22phys0%22%20%22%24%7BBRIDGE_METRIC%7D%22%0A%0A%20%20%20%20%23%20Check%20if%20we%20need%20to%20configure%20the%20second%20bridge%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%20%26%26%20(!%20nmcli%20connection%20show%20br-ex1%20%26%3E%20%2Fdev%2Fnull%20%7C%7C%20!%20nmcli%20connection%20show%20ovs-if-phys1%20%26%3E%20%2Fdev%2Fnull)%3B%20then%0A%20%20%20%20%20%20interface%3D%24(head%20-n%201%20%24extra_bridge_file)%0A%20%20%20%20%20%20convert_to_bridge%20%22%24interface%22%20%22br-ex1%22%20%22phys1%22%20%22%24%7BBRIDGE1_METRIC%7D%22%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Check%20if%20we%20need%20to%20remove%20the%20second%20bridge%0A%20%20%20%20if%20%5B%20!%20-f%20%22%24extra_bridge_file%22%20%5D%20%26%26%20(nmcli%20connection%20show%20br-ex1%20%26%3E%20%2Fdev%2Fnull%20%7C%7C%20nmcli%20connection%20show%20ovs-if-phys1%20%26%3E%20%2Fdev%2Fnull)%3B%20then%0A%20%20%20%20%20%20remove_ovn_bridges%20br-ex1%20phys1%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Remove%20bridges%20created%20by%20openshift-sdn%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br0%0A%0A%20%20%20%20%23%20Make%20sure%20everything%20is%20activated.%20Do%20it%20in%20a%20specific%20order%3A%0A%20%20%20%20%23%20-%20activate%20br-ex%20first%2C%20due%20to%20autoconnect-slaves%20this%20will%20also%0A%20%20%20%20%23%20%20%20activate%20ovs-port-br-ex%2C%20ovs-port-phys0%20and%20ovs-if-phys0.%20It%20is%0A%20%20%20%20%23%20%20%20important%20that%20ovs-if-phys0%20activates%20with%20br-ex%20to%20avoid%20the%0A%20%20%20%20%23%20%20%20ovs-if-phys0%20profile%20being%20overridden%20with%20a%20profile%20generated%20from%0A%20%20%20%20%23%20%20%20kargs.%20The%20activation%20of%20ovs-if-phys0%2C%20if%20a%20bond%2C%20might%20cause%20the%0A%20%20%20%20%23%20%20%20slaves%20to%20re-activate%2C%20but%20it%20should%20be%20with%20our%20profiles%20since%20they%0A%20%20%20%20%23%20%20%20have%20higher%20priority%0A%20%20%20%20%23%20-%20make%20sure%20that%20ovs-if-phys0%20and%20its%20slaves%2C%20if%20any%2C%20are%20activated.%0A%20%20%20%20%23%20-%20finally%20activate%20ovs-if-br-ex%20which%20holds%20the%20IP%20configuration.%0A%20%20%20%20connections%3D(br-ex%20ovs-if-phys0)%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20%20%20connections%2B%3D(br-ex1%20ovs-if-phys1)%0A%20%20%20%20fi%0A%20%20%20%20while%20IFS%3D%20read%20-r%20connection%3B%20do%0A%20%20%20%20%20%20if%20%5B%5B%20%24connection%20%3D%3D%20*%22%24MANAGED_NM_CONN_SUFFIX%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20connections%2B%3D(%22%24connection%22)%0A%20%20%20%20%20%20fi%0A%20%20%20%20done%20%3C%20%3C(nmcli%20-g%20NAME%20c)%0A%20%20%20%20connections%2B%3D(ovs-if-br-ex)%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20%20%20connections%2B%3D(ovs-if-br-ex1)%0A%20%20%20%20fi%0A%20%20%20%20activate_nm_connections%20%22%24%7Bconnections%5B%40%5D%7D%22%0A%20%20%20%20try_to_bind_ipv6_address%0A%20%20%20%20set_nm_conn_files%0A%20%20elif%20%5B%20%22%241%22%20%3D%3D%20%22OpenShiftSDN%22%20%5D%3B%20then%0A%20%20%20%20%23%20Revert%20changes%20made%20by%20%2Fusr%2Flocal%2Fbin%2Fconfigure-ovs.sh%20during%20SDN%20migration.%0A%20%20%20%20rollback_nm%0A%20%20%20%20%0A%20%20%20%20%23%20Remove%20bridges%20created%20by%20ovn-kubernetes%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br-int%20--%20--if-exists%20del-br%20br-local%0A%20%20fi%0A%7D%0A%0A%23%20Retry%20configure_ovs%20until%20it%20succeeds.%0A%23%20By%20default%2C%20retry%20every%2015%20minutes%20to%20give%20enough%20time%20to%20gather%0A%23%20troubleshooting%20information%20in%20between.%20Note%20that%20configure_ovs%20has%20other%0A%23%20internal%20retry%20mechanisms.%20This%20retry%20is%20intended%20to%20give%20some%0A%23%20self-healing%20capabilities%20to%20temporary%20but%20not%20necessarily%20short-lived%0A%23%20infrastructure%20problems.%0ARETRY%3D%22%24%7BRETRY-15m%7D%22%0Awhile%20true%3B%20do%0A%0A%20%20%23%20Disable%20retries%20if%20termination%20signal%20is%20received.%20Note%20that%20systemd%0A%20%20%23%20sends%20the%20signals%20to%20all%20processes%20in%20the%20group%20by%20default%20so%20we%20expect%0A%20%20%23%20configure_ovs%20to%20get%20its%20own%20signals.%0A%20%20trap%20'echo%20%22WARNING%3A%20termination%20requested%2C%20disabling%20retries%22%3B%20RETRY%3D%22%22'%20INT%20TERM%0A%20%20%0A%20%20%23%20Run%20configure_ovs%20in%20a%20sub-shell.%20%0A%20%20(%20configure_ovs%20%22%24%40%22%20)%0A%20%20e%3D%24%3F%0A%0A%20%20%23%20Handle%20signals%20while%20we%20sleep%0A%20%20trap%20'handle_termination'%20INT%20TERM%0A%20%20%0A%20%20%23%20Exit%20if%20succesful%20and%20not%20configured%20to%20retry%0A%20%20%5B%20%22%24e%22%20-eq%200%20%5D%20%7C%7C%20%5B%20-z%20%22%24RETRY%22%20%5D%20%26%26%20exit%20%22%24e%22%0A%20%20%0A%20%20echo%20%22configure-ovs%20failed%2C%20will%20retry%20after%20%24RETRY%22%0A%20%20%23%20flag%20that%20a%20retry%20has%20happened%0A%20%20touch%20%2Ftmp%2Fconfigure-ovs-retry%0A%20%20sleep%20%22%24RETRY%22%0A%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/configure-ovs.sh" + }, + { + "contents": { + "source": "data:,%23%20This%20file%20is%20generated%20by%20the%20Machine%20Config%20Operator's%20containerruntimeconfig%20controller.%0A%23%0A%23%20storage.conf%20is%20the%20configuration%20file%20for%20all%20tools%0A%23%20that%20share%20the%20containers%2Fstorage%20libraries%0A%23%20See%20man%205%20containers-storage.conf%20for%20more%20information%0A%23%20The%20%22container%20storage%22%20table%20contains%20all%20of%20the%20server%20options.%0A%5Bstorage%5D%0A%0A%23%20Default%20storage%20driver%2C%20must%20be%20set%20for%20proper%20operation.%0Adriver%20%3D%20%22overlay%22%0A%0A%23%20Temporary%20storage%20location%0Arunroot%20%3D%20%22%2Frun%2Fcontainers%2Fstorage%22%0A%0A%23%20Primary%20Read%2FWrite%20location%20of%20container%20storage%0Agraphroot%20%3D%20%22%2Fvar%2Flib%2Fcontainers%2Fstorage%22%0A%0A%5Bstorage.options%5D%0A%23%20Storage%20options%20to%20be%20passed%20to%20underlying%20storage%20drivers%0A%0A%23%20AdditionalImageStores%20is%20used%20to%20pass%20paths%20to%20additional%20Read%2FOnly%20image%20stores%0A%23%20Must%20be%20comma%20separated%20list.%0Aadditionalimagestores%20%3D%20%5B%0A%5D%0A%0A%23%20Remap-UIDs%2FGIDs%20is%20the%20mapping%20from%20UIDs%2FGIDs%20as%20they%20should%20appear%20inside%20of%0A%23%20a%20container%2C%20to%20UIDs%2FGIDs%20as%20they%20should%20appear%20outside%20of%20the%20container%2C%20and%0A%23%20the%20length%20of%20the%20range%20of%20UIDs%2FGIDs.%20%20Additional%20mapped%20sets%20can%20be%20listed%0A%23%20and%20will%20be%20heeded%20by%20libraries%2C%20but%20there%20are%20limits%20to%20the%20number%20of%0A%23%20mappings%20which%20the%20kernel%20will%20allow%20when%20you%20later%20attempt%20to%20run%20a%0A%23%20container.%0A%23%0A%23%20remap-uids%20%3D%200%3A1668442479%3A65536%0A%23%20remap-gids%20%3D%200%3A1668442479%3A65536%0A%0A%23%20Remap-User%2FGroup%20is%20a%20name%20which%20can%20be%20used%20to%20look%20up%20one%20or%20more%20UID%2FGID%0A%23%20ranges%20in%20the%20%2Fetc%2Fsubuid%20or%20%2Fetc%2Fsubgid%20file.%20%20Mappings%20are%20set%20up%20starting%0A%23%20with%20an%20in-container%20ID%20of%200%20and%20the%20a%20host-level%20ID%20taken%20from%20the%20lowest%0A%23%20range%20that%20matches%20the%20specified%20name%2C%20and%20using%20the%20length%20of%20that%20range.%0A%23%20Additional%20ranges%20are%20then%20assigned%2C%20using%20the%20ranges%20which%20specify%20the%0A%23%20lowest%20host-level%20IDs%20first%2C%20to%20the%20lowest%20not-yet-mapped%20container-level%20ID%2C%0A%23%20until%20all%20of%20the%20entries%20have%20been%20used%20for%20maps.%20This%20setting%20overrides%20the%0A%23%20Remap-UIDs%2FGIDs%20setting.%0A%23%0A%23%20remap-user%20%3D%20%22storage%22%0A%23%20remap-group%20%3D%20%22storage%22%0A%0A%5Bstorage.options.pull_options%5D%0A%23%20Options%20controlling%20how%20storage%20is%20populated%20when%20pulling%20images.%0A%0A%23%20Enable%20the%20%22zstd%3Achunked%22%20feature%2C%20which%20allows%20partial%20pulls%2C%20reusing%0A%23%20content%20that%20already%20exists%20on%20the%20system.%20This%20is%20disabled%20by%20default%2C%0A%23%20and%20must%20be%20explicitly%20enabled%20to%20be%20used.%20For%20more%20on%20zstd%3Achunked%2C%20see%0A%23%20https%3A%2F%2Fgithub.com%2Fcontainers%2Fstorage%2Fblob%2Fmain%2Fdocs%2Fcontainers-storage-zstd-chunked.md%0Aenable_partial_images%20%3D%20%22false%22%0A%0A%23%20Tells%20containers%2Fstorage%20to%20use%20hard%20links%20rather%20then%20create%20new%20files%20in%0A%23%20the%20image%2C%20if%20an%20identical%20file%20already%20existed%20in%20storage.%0Ause_hard_links%20%3D%20%22false%22%0A%0A%23%20Path%20to%20an%20ostree%20repository%20that%20might%20have%0A%23%20previously%20pulled%20content%20which%20can%20be%20used%20when%20attempting%20to%20avoid%0A%23%20pulling%20content%20from%20the%20container%20registry.%0Aostree_repos%20%3D%20%22%22%0A%0A%5Bstorage.options.overlay%5D%0A%23%20Storage%20Options%20for%20overlay%0A%0A%23%20Do%20not%20create%20a%20PRIVATE%20bind%20mount%20on%20the%20home%20directory.%0Askip_mount_home%20%3D%20%22true%22%0A%0A%23%20Size%20is%20used%20to%20set%20a%20maximum%20size%20of%20the%20container%20image.%20%20Only%20supported%20by%0A%23%20certain%20container%20storage%20drivers.%0Asize%20%3D%20%22%22%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/storage.conf" + }, + { + "contents": { + "source": "data:,Initial%20Creation%0A" + }, + "mode": 384, + "overwrite": true, + "path": "/etc/docker/certs.d/.create" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20kubelet.config.k8s.io%2Fv1%0Akind%3A%20CredentialProviderConfig%0Aproviders%3A%0A%20%20-%20name%3A%20acr-credential-provider%0A%20%20%20%20apiVersion%3A%20credentialprovider.kubelet.k8s.io%2Fv1%0A%20%20%20%20defaultCacheDuration%3A%20%2210m%22%0A%20%20%20%20matchImages%3A%0A%20%20%20%20%20%20-%20%22*.azurecr.io%22%0A%20%20%20%20%20%20-%20%22*.azurecr.cn%22%0A%20%20%20%20%20%20-%20%22*.azurecr.de%22%0A%20%20%20%20%20%20-%20%22*.azurecr.us%22%0A%20%20%20%20args%3A%0A%20%20%20%20%20%20-%20%2Fetc%2Fkubernetes%2Fcloud.conf%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/credential-providers/acr-credential-provider.yaml" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20v1%0Akind%3A%20Pod%0Ametadata%3A%0A%20%20name%3A%20apiserver-watcher%0A%20%20namespace%3A%20openshift-kube-apiserver%0Aspec%3A%0A%20%20containers%3A%0A%20%20-%20name%3A%20apiserver-watcher%0A%20%20%20%20image%3A%20%22quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3A445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb%22%0A%20%20%20%20command%3A%0A%20%20%20%20%20%20-%20flock%0A%20%20%20%20%20%20-%20--verbose%0A%20%20%20%20%20%20-%20--exclusive%0A%20%20%20%20%20%20-%20--timeout%3D300%0A%20%20%20%20%20%20-%20%2Frootfs%2Frun%2Fcloud-routes%2Fapiserver-watcher.lock%0A%20%20%20%20%20%20-%20apiserver-watcher%0A%20%20%20%20args%3A%0A%20%20%20%20-%20%22run%22%0A%20%20%20%20-%20%22--health-check-url%3Dhttps%3A%2F%2Fapi-int.ci-op-pw3ghqzh-bb5c4.ci2.azure.devcluster.openshift.com%3A6443%2Freadyz%22%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20cpu%3A%2020m%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20mountPath%3A%20%2Frootfs%0A%20%20%20%20%20%20name%3A%20rootfs%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20hostNetwork%3A%20true%0A%20%20hostPID%3A%20true%0A%20%20priorityClassName%3A%20system-node-critical%0A%20%20tolerations%3A%0A%20%20-%20operator%3A%20%22Exists%22%0A%20%20restartPolicy%3A%20Always%0A%20%20volumes%3A%0A%20%20-%20name%3A%20rootfs%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%2F%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/manifests/apiserver-watcher.yaml" + }, + { + "contents": { + "source": "data:,%23%20Proxy%20environment%20variables%20will%20be%20populated%20in%20this%20file.%20Properly%0A%23%20url%20encoded%20passwords%20with%20special%20characters%20will%20use%20'%25%3CHEX%3E%3CHEX%3E'.%0A%23%20Systemd%20requires%20that%20any%20%25%20used%20in%20a%20password%20be%20represented%20as%0A%23%20%25%25%20in%20a%20unit%20file%20since%20%25%20is%20a%20prefix%20for%20macros%3B%20this%20restriction%20does%20not%0A%23%20apply%20for%20environment%20files.%20Templates%20that%20need%20the%20proxy%20set%20should%20use%0A%23%20'EnvironmentFile%3D%2Fetc%2Fmco%2Fproxy.env'.%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/mco/proxy.env" + }, + { + "contents": { + "source": "data:,%5BManager%5D%0ADefaultEnvironment%3DGODEBUG%3Dx509ignoreCN%3D0%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system.conf.d/10-default-env-godebug.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Workaround%3A%0A%23%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FOCPBUGS-38779%0A%0Adriver%3D%24(nmcli%20-t%20-m%20tabular%20-f%20general.driver%20dev%20show%20%22%24%7BDEVICE_IFACE%7D%22)%0A%0Aif%20%5B%5B%20%22%242%22%20%3D%3D%20%22up%22%20%26%26%20%22%24%7Bdriver%7D%22%20%3D%3D%20%22idpf%22%20%5D%5D%3B%20then%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-checksumming%20off%0Afi" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-gcp-disable-idpf-tx-checksum-off" + }, + { + "contents": { + "source": "data:,%23%20Force-load%20legacy%20iptables%20so%20it%20is%20usable%20from%20pod%20network%20namespaces%0Aip_tables%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/modules-load.d/iptables.conf" + }, + { + "contents": { + "source": "data:,NODE_SIZING_ENABLED%3Dfalse%0ASYSTEM_RESERVED_MEMORY%3D1Gi%0ASYSTEM_RESERVED_CPU%3D500m%0ASYSTEM_RESERVED_ES%3D1Gi" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/node-sizing-enabled.env" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-e%0ANODE_SIZES_ENV%3D%24%7BNODE_SIZES_ENV%3A-%2Fetc%2Fnode-sizing.env%7D%0AVERSION_1%3D1%0AVERSION_2%3D2%0ANODE_AUTO_SIZING_VERSION%3D%24%7BNODE_AUTO_SIZING_VERSION%3A-%24VERSION_2%7D%0ANODE_AUTO_SIZING_VERSION_FILE%3D%24%7BNODE_AUTO_SIZING_VERSION_FILE%3A-%2Fetc%2Fnode-sizing-version.json%7D%0Afunction%20dynamic_memory_sizing%20%7B%0A%20%20%20%20total_memory%3D%24(free%20-g%7Cawk%20'%2F%5EMem%3A%2F%7Bprint%20%242%7D')%0A%20%20%20%20%23%20total_memory%3D8%20test%20the%20recommended%20values%20by%20modifying%20this%20value%0A%20%20%20%20recommended_systemreserved_memory%3D0%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%204))%3B%20then%20%23%2025%25%20of%20the%20first%204GB%20of%20memory%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24total_memory%200.25%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D1%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-4))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%204))%3B%20then%20%23%2020%25%20of%20the%20next%204GB%20of%20memory%20(up%20to%208GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.20%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%200.80%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-4))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%208))%3B%20then%20%23%2010%25%20of%20the%20next%208GB%20of%20memory%20(up%20to%2016GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.10%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%200.80%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-8))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%20112))%3B%20then%20%23%206%25%20of%20the%20next%20112GB%20of%20memory%20(up%20to%20128GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.06%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%206.72%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-112))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3E%3D%200))%3B%20then%20%23%202%25%20of%20any%20memory%20above%20128GB%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.02%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20fi%0A%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%7C%20awk%20'%7Bprintf(%22%25d%5Cn%22%2C%241%20%2B%200.5)%7D')%20%23%20Round%20off%20so%20we%20avoid%20float%20conversions%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_MEMORY%3D%24%7Brecommended_systemreserved_memory%7DGi%22%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_cpu_sizing%20%7B%0A%20%20%20%20total_cpu%3D%24(getconf%20_NPROCESSORS_ONLN)%0A%20%20%20%20if%20%5B%20%22%241%22%20-eq%20%22%24VERSION_1%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D0%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%201))%3B%20then%20%23%206%25%20of%20the%20first%20core%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24total_cpu%200.06%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D0.06%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-1))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%201))%3B%20then%20%23%201%25%20of%20the%20next%20core%20(up%20to%202%20cores)%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-1))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%202))%3B%20then%20%23%200.5%25%20of%20the%20next%202%20cores%20(up%20to%204%20cores)%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.005%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-2))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3E%3D%200))%3B%20then%20%23%200.25%25%20of%20any%20cores%20above%204%20cores%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.0025%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20%23%20Base%20allocation%20for%201%20CPU%20in%20fractions%20of%20a%20core%20(60%20millicores%20%3D%200.06%20CPU%20core)%0A%20%20%20%20%20%20%20%20base_allocation_fraction%3D0.06%0A%20%20%20%20%20%20%20%20%23%20Increment%20per%20additional%20CPU%20in%20fractions%20of%20a%20core%20(12%20millicores%20%3D%200.012%20CPU%20core)%0A%20%20%20%20%20%20%20%20increment_per_cpu_fraction%3D0.012%0A%20%20%20%20%20%20%20%20if%20((total_cpu%20%3E%201))%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20Calculate%20the%20total%20system-reserved%20CPU%20in%20fractions%2C%20starting%20with%20the%20base%20allocation%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20and%20adding%20the%20incremental%20fraction%20for%20each%20additional%20CPU%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(awk%20-v%20base%3D%22%24base_allocation_fraction%22%20-v%20increment%3D%22%24increment_per_cpu_fraction%22%20-v%20cpus%3D%22%24total_cpu%22%20'BEGIN%20%7Bprintf%20%22%25.2f%5Cn%22%2C%20base%20%2B%20increment%20*%20(cpus%20-%201)%7D')%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20For%20a%20single%20CPU%2C%20use%20the%20base%20allocation%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24base_allocation_fraction%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Enforce%20minimum%20threshold%20of%200.5%20CPU%0A%20%20%20%20recommended_systemreserved_cpu%3D%24(awk%20-v%20val%3D%22%24recommended_systemreserved_cpu%22%20'BEGIN%20%7Bif%20(val%20%3C%200.5)%20print%200.5%3B%20else%20print%20val%7D')%0A%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_CPU%3D%24%7Brecommended_systemreserved_cpu%7D%22%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_ephemeral_sizing%20%7B%0A%20%20%20%20echo%20%22Not%20implemented%20yet%22%0A%7D%0Afunction%20dynamic_pid_sizing%20%7B%0A%20%20%20%20echo%20%22Not%20implemented%20yet%22%0A%7D%0Afunction%20set_memory%20%7B%0A%20%20%20%20SYSTEM_RESERVED_MEMORY%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_MEMORY%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_MEMORY%3D%221Gi%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_MEMORY%3D%24%7BSYSTEM_RESERVED_MEMORY%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20set_cpu%20%7B%0A%20%20%20%20SYSTEM_RESERVED_CPU%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_CPU%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_CPU%3D%22500m%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_CPU%3D%24%7BSYSTEM_RESERVED_CPU%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20set_es%20%7B%0A%20%20%20%20SYSTEM_RESERVED_ES%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_ES%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_ES%3D%221Gi%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_ES%3D%24%7BSYSTEM_RESERVED_ES%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_node_sizing%20%7B%0A%20%20%20%20rm%20-f%20%24%7BNODE_SIZES_ENV%7D%0A%20%20%20%20dynamic_memory_sizing%0A%20%20%20%20dynamic_cpu_sizing%20%241%0A%20%20%20%20set_es%20%242%0A%20%20%20%20%23dynamic_ephemeral_sizing%0A%20%20%20%20%23dynamic_pid_sizing%0A%7D%0Afunction%20static_node_sizing%20%7B%0A%20%20%20%20rm%20-f%20%24%7BNODE_SIZES_ENV%7D%0A%20%20%20%20set_memory%20%241%0A%20%20%20%20set_cpu%20%242%0A%20%20%20%20set_es%20%243%0A%7D%0Afunction%20create_version_file%20%7B%0A%20%20%20%20echo%20%22%7B%5C%22version%5C%22%3A%20%241%7D%22%20%3E%20%242%0A%7D%0Aif%20!%20%5B%20-f%20%24NODE_AUTO_SIZING_VERSION_FILE%20%5D%3B%20then%0A%20%20%20%20create_version_file%20%24NODE_AUTO_SIZING_VERSION%20%24NODE_AUTO_SIZING_VERSION_FILE%0Afi%0Anew_version%3D%24(jq%20.version%20%24NODE_AUTO_SIZING_VERSION_FILE)%0Aif%20%5B%20%241%20%3D%3D%20%22true%22%20%5D%3B%20then%0A%20%20%20%20dynamic_node_sizing%20%24new_version%20%244%0Aelif%20%5B%20%241%20%3D%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20static_node_sizing%20%242%20%243%20%244%0Aelse%0A%20%20%20%20echo%20%22Unrecognized%20command%20line%20option.%20Valid%20options%20are%20%5C%22true%5C%22%20or%20%5C%22false%5C%22%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/sbin/dynamic-system-reserved-calc.sh" + }, + { + "contents": { + "source": "data:,%23%20Turning%20on%20Accounting%20helps%20track%20down%20performance%20issues.%0A%5BManager%5D%0ADefaultCPUAccounting%3Dyes%0ADefaultMemoryAccounting%3Dyes%0ADefaultBlockIOAccounting%3Dyes%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system.conf.d/kubelet-cgroups.conf" + }, + { + "contents": { + "source": "data:,%5BService%5D%0AEnvironment%3D%22KUBELET_LOG_LEVEL%3D2%22%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system/kubelet.service.d/20-logging.conf" + }, + { + "contents": { + "source": "data:,%23%20ignore%20known%20SDN-managed%20devices%0A%5Bdevice%5D%0Amatch-device%3Dinterface-name%3Abr-int%3Binterface-name%3Abr-local%3Binterface-name%3Abr-nexthop%3Binterface-name%3Aovn-k8s-*%3Binterface-name%3Ak8s-*%3Binterface-name%3Atun0%3Binterface-name%3Abr0%3Binterface-name%3Apatch-br-*%3Binterface-name%3Abr-ext%3Binterface-name%3Aext-vxlan%3Binterface-name%3Aext%3Binterface-name%3Aint%3Binterface-name%3Avxlan_sys_*%3Binterface-name%3Agenev_sys_*%3Bdriver%3Aveth%0Amanaged%3D0%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/sdn.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-eux%0A%0A%23%20Clean%20up%20old%20config%20on%20behalf%20of%20mtu-migration%0Aif%20!%20systemctl%20-q%20is-enabled%20mtu-migration%3B%20then%0A%20%20echo%20%22Cleaning%20up%20left%20over%20mtu%20migration%20configuration%22%0A%20%20rm%20-rf%20%2Fetc%2Fcno%2Fmtu-migration%0Afi%0A%0Aif%20%5B%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20echo%20%22Configuration%20already%20applied%2C%20exiting%22%0A%20%20exit%200%0Afi%0A%0Asrc_path%3D%22%2Fetc%2Fnmstate%2Fopenshift%22%0Adst_path%3D%22%2Fetc%2Fnmstate%22%0Ahostname%3D%24(hostname%20-s)%0Ahost_file%3D%22%24%7Bhostname%7D.yml%22%0Acluster_file%3D%22cluster.yml%22%0Aconfig_file%3D%22%22%0Aif%20%5B%20-s%20%22%24src_path%2F%24host_file%22%20%5D%3B%20then%0A%20%20config_file%3D%24host_file%0Aelif%20%5B%20-s%20%22%24src_path%2F%24cluster_file%22%20%5D%3B%20then%0A%20%20config_file%3D%24cluster_file%0Aelse%0A%20%20echo%20%22No%20configuration%20found%20at%20%24src_path%2F%24host_file%20or%20%24src_path%2F%24cluster_file%22%0A%20%20exit%200%0Afi%0A%0Aif%20%5B%20-e%20%22%24dst_path%2F%24config_file%22%20%5D%3B%20then%0A%20%20echo%20%22ERROR%3A%20File%20%24dst_path%2F%24config_file%20exists.%20Refusing%20to%20overwrite.%22%0A%20%20exit%201%0Afi%0A%0A%23%20Handle%20the%20case%20where%20we're%20migrating%20from%20configure-ovs%0Aovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br-ex%0A%0A%23%20Handle%20the%20case%20where%20we're%20migrating%20from%20OpenShift%20SDN%0Aovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br0%0A%0Acp%20%22%24src_path%2F%24config_file%22%20%2Fetc%2Fnmstate%0Atouch%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/nmstate-configuration.sh" + }, + { + "contents": { + "source": "data:,%5Bservice%5D%0Akeep_state_file_after_apply%20%3D%20true%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/nmstate/nmstate.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Set%20interface%20ofport_request%20to%20guarantee%20stable%20ofport%20numbers.%20This%20is%20important%20for%20flow%20matches.%0A%23%20Otherwise%2C%20another%20ofport%20number%20is%20assigned%20to%20the%20interface%20on%20every%20restart%20of%20NetworkManager.%0A%23%20This%20script%20will%20build%20an%20associative%20array%20INTERFACE_NAME-%3Eofport_request%20and%20will%20save%20it%20to%20file%20CONFIGURATION_FILE.%0A%23%20When%20an%20interface%20is%20brought%20up%2C%20this%20will%20reuse%20the%20value%20from%20the%20associative%20array%20if%20such%20a%20value%20exists.%0A%23%20Otherwise%2C%20this%20will%20try%20to%20use%20the%20current%20ofport%20value.%20If%20the%20ofport%20value%20is%20already%20reserved%2C%20then%0A%23%20this%20uses%20the%20lowest%20available%20numerical%20value%2C%20instead.%0Aset%20-eux%20-o%20pipefail%0Aif%20%5B%5B%20%22OVNKubernetes%22%20!%3D%20%22OVNKubernetes%22%20%5D%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0AINTERFACE_NAME%3D%241%0AOPERATION%3D%242%0A%0A%23%20Only%20execute%20this%20on%20pre-up%0Aif%20%5B%20%22%24%7BOPERATION%7D%22%20!%3D%20%22pre-up%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20NM%20uuid%0AINTERFACE_CONNECTION_UUID%3D%24(nmcli%20-t%20-f%20device%2Ctype%2Cuuid%20conn%20%7C%20awk%20-F%20'%3A'%20'%7Bif(%241%3D%3D%22'%24%7BINTERFACE_NAME%7D'%22%20%26%26%20%242!~%2F%5Eovs*%2F)%20print%20%24NF%7D')%0Aif%20%5B%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20slave-type.%20If%20this%20is%20not%20an%20ovs-port%2C%20then%20exit%0AINTERFACE_OVS_SLAVE_TYPE%3D%24(nmcli%20-t%20-f%20connection.slave-type%20conn%20show%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BINTERFACE_OVS_SLAVE_TYPE%7D%22%20!%3D%20%22ovs-port%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20This%20is%20not%20necessarily%20a%20UUID%20(can%20be%20a%20name%20in%20case%20of%20bonds)%20but%20this%20should%20be%20unique%0APORT%3D%24(nmcli%20-t%20-f%20connection.master%20conn%20show%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20NM%20uuid%0APORT_CONNECTION_UUID%3D%24(nmcli%20-t%20-f%20device%2Ctype%2Cuuid%20conn%20%7C%20awk%20-F%20'%3A'%20'%7Bif(%20(%241%3D%3D%22'%24%7BPORT%7D'%22%20%7C%7C%20%243%3D%3D%22'%24%7BPORT%7D'%22)%20%26%26%20%242~%2F%5Eovs*%2F)%20print%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20port's%20slave-type.%20If%20this%20is%20not%20an%20ovs-bridge%2C%20then%20exit%0APORT_OVS_SLAVE_TYPE%3D%24(nmcli%20-t%20-f%20connection.slave-type%20conn%20show%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT_OVS_SLAVE_TYPE%7D%22%20!%3D%20%22ovs-bridge%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20port's%20master.%20If%20it%20doesn't%20have%20any%2C%20assume%20it's%20not%20our%20bridge%0ABRIDGE_ID%3D%24(nmcli%20-t%20-f%20connection.master%20conn%20show%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BBRIDGE_ID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20bridge%20name%0ABRIDGE_NAME%3D%24(nmcli%20-t%20-f%20connection.interface-name%20conn%20show%20%22%24%7BBRIDGE_ID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0A%23%20Limit%20this%20to%20br-ex%20and%20br-ex1%20only.%20If%20one%20wanted%20to%20enable%20this%20for%20all%20OVS%20bridges%2C%0A%23%20the%20condition%20would%20be%3A%20if%20%5B%20%22%24BRIDGE_NAME%22%20%3D%3D%20%22%22%20%5D%3B%20then%0Aif%20%5B%20%22%24%7BBRIDGE_NAME%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%20%5B%20%22%24%7BBRIDGE_NAME%7D%22%20!%3D%20%22br-ex1%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Make%20sure%20that%20the%20interface%20is%20plugged%20into%20OVS%0A%23%20This%20should%20always%20be%20the%20case%20given%20that%20we%20are%20in%20pre-up%2C%20but%20exit%20gracefully%20in%20the%20odd%20case%20that%20it's%20not%0Aif%20!%20ovs-vsctl%20list%20interface%20%22%24%7BINTERFACE_NAME%7D%22%20%3E%2Fdev%2Fnull%202%3E%261%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0ACONFIGURATION_FILE%3D%22%2Frun%2Fofport_requests.%24%7BBRIDGE_NAME%7D%22%0A%0A%23%20Declare%20a%20new%20associative%20array.%20If%20CONFIGURATION_FILE%20exists%2C%20source%20entries%20from%20there%0Adeclare%20-A%20INTERFACES%0Aif%20%5B%20-f%20%22%24%7BCONFIGURATION_FILE%7D%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22Sourcing%20configuration%20file%20'%24%7BCONFIGURATION_FILE%7D'%20with%20contents%3A%22%0A%20%20%20%20cat%20%22%24%7BCONFIGURATION_FILE%7D%22%0A%20%20%20%20source%20%22%24%7BCONFIGURATION_FILE%7D%22%0Afi%0A%0A%23%20get_interface_ofport_request%20will%20return%0A%23%20*%20either%3A%20the%20current%20ofport%20assignment%20for%20the%20port%20if%20no%20interface%20has%20claimed%20this%20ofport%20number%2C%20yet%0A%23%20*%20or%3A%20%20%20%20%20the%20lowest%20available%20free%20ofport%20number%0Afunction%20get_interface_ofport_request()%20%7B%0A%20%20%20%20%23%20Build%20an%20array%20that%20only%20contains%20the%20currently%20reserved%20ofport_requests%0A%20%20%20%20declare%20-A%20ofport_requests%0A%20%20%20%20for%20interface_name%20in%20%22%24%7B!INTERFACES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20ofport_requests%5B%24%7BINTERFACES%5B%24interface_name%5D%7D%5D%3D%24%7BINTERFACES%5B%24interface_name%5D%7D%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20Get%20the%20current%20ofport%20number%20assignment%0A%20%20%20%20local%20current_ofport%3D%24(ovs-vsctl%20get%20Interface%20%22%24%7BINTERFACE_NAME%7D%22%20ofport)%0A%0A%20%20%20%20%23%20If%20the%20current%20ofport%20number%20is%20still%20free%2C%20use%20it%0A%20%20%20%20if%20!%20%5B%20%22%24%7Bofport_requests%5B%24current_ofport%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%24current_ofport%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20If%20the%20current%20ofport%20number%20is%20not%20free%2C%20return%20the%20lowest%20free%20entry%0A%20%20%20%20i%3D0%0A%20%20%20%20for%20i%20in%20%7B1..65000%7D%3B%20do%0A%20%20%20%20%20%20%20%20if%20!%20%5B%20%22%24%7Bofport_requests%5B%24i%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%24i%0A%20%20%20%20%20%20%20%20%20%20%20%20return%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20if%20we%20still%20cannot%20find%20an%20ID%2C%20exit%20with%20an%20error%0A%20%20%20%20echo%20%22Impossible%20to%20find%20an%20ofport%20ID%20for%20interface%20%24%7BINTERFACE_NAME%7D%22%20%3E%262%0A%20%20%20%20exit%201%0A%7D%0A%0A%23%20If%20INTERFACES%5BINTERFACE_NAME%5D%20exists%2C%20use%20that%20value%0A%23%20If%20INTERFACES%5BINTERFACE_NAME%5D%20does%20not%20exists%2C%20use%20the%20value%20from%20get_interface_ofport_request%0Aif%20!%20%5B%20%22%24%7BINTERFACES%5B%24INTERFACE_NAME%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20INTERFACES%5B%24INTERFACE_NAME%5D%3D%24(get_interface_ofport_request)%0Afi%0A%23%20Set%20ofport_request%20according%20to%20INTERFACES%5BINTERFACE_NAME%5D%0Aovs-vsctl%20set%20Interface%20%22%24%7BINTERFACE_NAME%7D%22%20ofport_request%3D%24%7BINTERFACES%5B%24INTERFACE_NAME%5D%7D%0A%0A%23%20Save%20current%20state%20of%20INTERFACES%20to%20CONFIGURATION_FILE%0Adeclare%20-p%20INTERFACES%20%3E%7C%20%22%24%7BCONFIGURATION_FILE%7D%22%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/pre-up.d/10-ofport-request.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20Prevent%20hairpin%20traffic%20when%20the%20apiserver%20is%20up%0A%0A%23%20As%20per%20the%20Azure%20documentation%20(https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fload-balancer%2Fconcepts%23limitations)%2C%0A%23%20if%20a%20backend%20is%20load-balanced%20to%20itself%2C%20then%20the%20traffic%20will%20be%20dropped.%0A%23%0A%23%20This%20is%20because%20the%20L3LB%20does%20DNAT%2C%20so%20while%20the%20outgoing%20packet%20has%20a%20destination%0A%23%20IP%20of%20the%20VIP%2C%20the%20incoming%20load-balanced%20packet%20has%20a%20destination%20IP%20of%20the%0A%23%20host.%20That%20means%20that%20it%20%22sees%22%20a%20syn%20with%20the%20source%20and%20destination%0A%23%20IPs%20of%20itself%2C%20and%20duly%20replies%20wit%20a%20syn-ack%20back%20to%20itself.%20However%2C%20the%20client%0A%23%20socket%20expects%20a%20syn-ack%20with%20a%20source%20IP%20of%20the%20VIP%2C%20so%20it%20drops%20the%20packet.%0A%23%0A%23%20The%20solution%20is%20to%20redirect%20traffic%20destined%20to%20the%20lb%20vip%20back%20to%20ourselves.%0A%23%0A%23%20We%20check%20%2Frun%2Fcloud-routes%2F%20for%20files%20%24VIP.up%20and%20%24VIP.down.%20If%20the%20.up%20file%0A%23%20exists%2C%20then%20we%20redirect%20traffic%20destined%20for%20that%20vip%20to%20ourselves%20via%20nftables.%0A%23%20A%20systemd%20unit%20watches%20the%20directory%20for%20changes.%0A%23%0A%23%20TODO%3A%20Address%20the%20potential%20issue%20where%20apiserver-watcher%20could%20create%20multiple%20files%0A%23%20and%20openshift-azure-routes%20doesn't%20detect%20all%20of%20them%20because%20file%20change%20events%20are%20not%20queued%0A%23%20when%20the%20service%20is%20already%20running.%0A%23%20https%3A%2F%2Fgithub.com%2Fopenshift%2Fmachine-config-operator%2Fpull%2F3643%23issuecomment-1497234369%0A%0Aset%20-euo%20pipefail%0A%0A%23%20the%20list%20of%20load%20balancer%20IPs%20that%20are%20assigned%20to%20this%20node%0Adeclare%20-A%20v4vips%0Adeclare%20-A%20v6vips%0A%0ATABLE_NAME%3D%22azure-vips%22%0AVIPS_CHAIN%3D%22redirect-vips%22%0ARUN_DIR%3D%22%2Frun%2Fcloud-routes%22%0A%0Ainitialize()%20%7B%0A%20%20%20%20nft%20-f%20-%20%3C%3CEOF%0A%20%20%20%20%20%20%20%20add%20table%20inet%20%24%7BTABLE_NAME%7D%20%7B%20comment%20%22azure%20LB%20vip%20overriding%22%3B%20%7D%0A%20%20%20%20%20%20%20%20add%20chain%20inet%20%24%7BTABLE_NAME%7D%20%24%7BVIPS_CHAIN%7D%0A%0A%20%20%20%20%20%20%20%20add%20chain%20inet%20%24%7BTABLE_NAME%7D%20prerouting%20%7B%20type%20nat%20hook%20prerouting%20priority%20dstnat%3B%20%7D%0A%20%20%20%20%20%20%20%20flush%20chain%20inet%20%24%7BTABLE_NAME%7D%20prerouting%0A%20%20%20%20%20%20%20%20add%20rule%20inet%20%24%7BTABLE_NAME%7D%20prerouting%20goto%20%24%7BVIPS_CHAIN%7D%0A%0A%20%20%20%20%20%20%20%20add%20chain%20inet%20%24%7BTABLE_NAME%7D%20output%20%7B%20type%20nat%20hook%20output%20priority%20dstnat%3B%20%7D%0A%20%20%20%20%20%20%20%20flush%20chain%20inet%20%24%7BTABLE_NAME%7D%20output%0A%20%20%20%20%20%20%20%20add%20rule%20inet%20%24%7BTABLE_NAME%7D%20output%20goto%20%24%7BVIPS_CHAIN%7D%0AEOF%0A%7D%0A%0Aremove_stale_routes()%20%7B%0A%20%20%20%20%23%23%20find%20extra%20ovn%20routes%0A%20%20%20%20local%20ovnkContainerID%3D%24(crictl%20ps%20--name%20ovnkube-controller%20%7C%20awk%20'%7B%20print%20%241%20%7D'%20%7C%20tail%20-n%2B2)%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BovnkContainerID%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22Found%20ovnkube-controller%20pod...%20%24%7BovnkContainerID%7D%22%0A%20%20%20%20local%20routeVIPsV4%3D%24(crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-list%20ovn_cluster_router%20%7C%20grep%20%221010%22%20%7C%20grep%20%22ip4%22%20%7C%20awk%20'%248%7Bprint%20%248%7D')%0A%20%20%20%20echo%20%22Found%20v4route%20vips%3A%20%24%7BrouteVIPsV4%7D%22%0A%20%20%20%20local%20host%3D%24(hostname)%0A%20%20%20%20echo%20%24%7Bhost%7D%0A%20%20%20%20for%20route_vip%20in%20%24%7BrouteVIPsV4%7D%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20!%20-v%20v4vips%5B%24%7Broute_vip%7D%5D%20%5D%5D%20%7C%7C%20%5B%5B%20%22%24%7Bv4vips%5B%24%7Broute_vip%7D%5D%7D%22%20%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20removing%20stale%20vip%20%22%24%7Broute_vip%7D%22%20for%20local%20clients%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%20inport%20%3D%3D%20rtos-%24%7Bhost%7D%20%26%26%20ip4.dst%20%3D%3D%20%24%7Broute_vip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%20%22inport%20%3D%3D%20%5C%22rtos-%24%7Bhost%7D%5C%22%20%26%26%20ip4.dst%20%3D%3D%20%24%7Broute_vip%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20if%20%5B%20!%20-f%20%2Fproc%2Fnet%2Fif_inet6%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%0A%20%20%20%20local%20routeVIPsV6%3D%24(crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-list%20ovn_cluster_router%20%7C%20grep%20%221010%22%20%7C%20grep%20%22ip6%22%20%7C%20awk%20'%248%7Bprint%20%248%7D')%0A%20%20%20%20echo%20%22Found%20v6route%20vips%3A%20%24%7BrouteVIPsV6%7D%22%0A%20%20%20%20for%20route_vip%20in%20%24%7BrouteVIPsV6%7D%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20!%20-v%20v6vips%5B%24%7Broute_vip%7D%5D%20%5D%5D%20%7C%7C%20%5B%5B%20%22%24%7Bv6vips%5B%24%7Broute_vip%7D%5D%7D%22%20%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20removing%20stale%20vip%20%22%24%7Broute_vip%7D%22%20for%20local%20clients%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%20inport%20%3D%3D%20rtos-%24%7Bhost%7D%20%26%26%20ip6.dst%20%3D%3D%20%24%7Broute_vip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%20%22inport%20%3D%3D%20%5C%22rtos-%24%7Bhost%7D%5C%22%20%26%26%20ip6.dst%20%3D%3D%20%24%7Broute_vip%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%7D%0A%0Async_rules()%20%7B%0A%20%20%20%20%23%20Construct%20the%20VIP%20lists.%20(The%20nftables%20syntax%20allows%20a%20trailing%20comma.)%0A%20%20%20%20v4vipset%3D%22%22%0A%20%20%20%20v6vipset%3D%22%22%0A%20%20%20%20for%20vip%20in%20%22%24%7B!v4vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Bv4vips%5B%24%7Bvip%7D%5D%7D%22%20!%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20v4vipset%3D%22%24%7Bvip%7D%2C%20%24%7Bv4vipset%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%20%20%20%20for%20vip%20in%20%22%24%7B!v6vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Bv6vips%5B%24%7Bvip%7D%5D%7D%22%20!%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20v6vipset%3D%22%24%7Bvip%7D%2C%20%24%7Bv6vipset%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20echo%20%22synchronizing%20IPv4%20VIPs%20to%20(%24%7Bv4vipset%7D)%2C%20IPv6%20VIPS%20to%20(%24%7Bv6vipset%7D)%22%0A%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20echo%20%22flush%20chain%20inet%20%24%7BTABLE_NAME%7D%20%24%7BVIPS_CHAIN%7D%22%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Bv4vipset%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22add%20rule%20inet%20%24%7BTABLE_NAME%7D%20%24%7BVIPS_CHAIN%7D%20ip%20daddr%20%7B%20%24%7Bv4vipset%7D%20%7D%20redirect%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Bv6vipset%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22add%20rule%20inet%20%24%7BTABLE_NAME%7D%20%24%7BVIPS_CHAIN%7D%20ip6%20daddr%20%7B%20%24%7Bv6vipset%7D%20%7D%20redirect%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%7D%20%7C%20nft%20-f%20-%0A%7D%0A%0Aadd_routes()%20%7B%0A%20%20%20%20local%20ovnkContainerID%3D%24(crictl%20ps%20--name%20ovnkube-controller%20%7C%20awk%20'%7B%20print%20%241%20%7D'%20%7C%20tail%20-n%2B2)%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BovnkContainerID%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22OVN-Kubernetes%20is%20not%20running%3B%20no%20routes%20to%20add.%22%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22Found%20ovnkube-controller%20pod...%20%24%7BovnkContainerID%7D%22%0A%20%20%20%20local%20ovnK8sMp0v4%3D%24(ip%20-brief%20address%20show%20ovn-k8s-mp0%20%7C%20awk%20'%7Bprint%20%243%7D'%20%7C%20awk%20-F%2F%20'%7Bprint%20%241%7D')%0A%20%20%20%20echo%20%22Found%20ovn-k8s-mp0%20interface%20IP%20%24%7BovnK8sMp0v4%7D%22%0A%20%20%20%20local%20host%3D%24(hostname)%0A%20%20%20%20echo%20%24%7Bhost%7D%0A%20%20%20%20for%20vip%20in%20%22%24%7B!v4vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Bv4vips%5B%24%7Bvip%7D%5D%7D%22%20!%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ensuring%20route%20for%20%24%7Bvip%7D%20for%20internal%20clients%22%0A%20%20%20%20%20%20%20%20%20%20%20%20local%20routes%3D%24(crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-list%20ovn_cluster_router%20%7C%20grep%20%221010%22%20%7C%20grep%20%22%24%7Bvip%7D%22%20%7C%20grep%20%22%24%7BovnK8sMp0v4%7D%22)%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22OVNK%20Routes%20on%20ovn-cluster-router%20at%201010%20priority%3A%20%24routes%22%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Broutes%7D%22%20%3D%3D%20*%22%24%7Bvip%7D%22*%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22Route%20exists%22%0A%20%20%20%20%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22Route%20does%20not%20exist%3B%20creating%20it...%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ovn-nbctl%20lr-policy-add%20ovn_cluster_router%201010%20inport%20%3D%3D%20rtos-%24%7Bhost%7D%20%26%26%20ip4.dst%20%3D%3D%20%24%7Bvip%7D%20reroute%20%24%7BovnK8sMp0v4%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-add%20ovn_cluster_router%201010%20%22inport%20%3D%3D%20%5C%22rtos-%24%7Bhost%7D%5C%22%20%26%26%20ip4.dst%20%3D%3D%20%24%7Bvip%7D%22%20reroute%20%22%24%7BovnK8sMp0v4%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20if%20%5B%20!%20-f%20%2Fproc%2Fnet%2Fif_inet6%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%0A%20%20%20%20local%20ovnK8sMp0v6%3D%24(ip%20-brief%20address%20show%20ovn-k8s-mp0%20%7C%20awk%20'%7Bprint%20%244%7D'%20%7C%20awk%20-F%2F%20'%7Bprint%20%241%7D')%0A%20%20%20%20echo%20%22Found%20ovn-k8s-mp0%20interface%20IP%20%24%7BovnK8sMp0v6%7D%22%0A%0A%20%20%20%20for%20vip%20in%20%22%24%7B!v6vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Bv6vips%5B%24%7Bvip%7D%5D%7D%22%20!%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ensuring%20route%20for%20%24%7Bvip%7D%20for%20internal%20clients%22%0A%20%20%20%20%20%20%20%20%20%20%20%20local%20routes%3D%24(crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-list%20ovn_cluster_router%20%7C%20grep%20%221010%22%20%7C%20grep%20%22%24%7Bvip%7D%22%20%7C%20grep%20%22%24%7BovnK8sMp0v6%7D%22)%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22OVNK%20Routes%20on%20ovn-cluster-router%20at%201010%20priority%3A%20%24routes%22%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Broutes%7D%22%20%3D%3D%20*%22%24%7Bvip%7D%22*%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22Route%20exists%22%0A%20%20%20%20%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22Route%20does%20not%20exist%3B%20creating%20it...%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ovn-nbctl%20lr-policy-add%20ovn_cluster_router%201010%20inport%20%3D%3D%20rtos-%24%7Bhost%7D%20%26%26%20ip6.dst%20%3D%3D%20%24%7Bvip%7D%20reroute%20%24%7BovnK8sMp0v6%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-add%20ovn_cluster_router%201010%20%22inport%20%3D%3D%20%5C%22rtos-%24%7Bhost%7D%5C%22%20%26%26%20ip6.dst%20%3D%3D%20%24%7Bvip%7D%22%20reroute%20%22%24%7BovnK8sMp0v6%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%7D%0A%0Aclear_rules()%20%7B%0A%20%20%20%20echo%20%22clearing%20rules%20from%20%24%7BTABLE_NAME%7D%22%0A%20%20%20%20nft%20delete%20table%20inet%20%22%24%7BTABLE_NAME%7D%22%20%7C%7C%20true%0A%7D%0A%0Aclear_routes()%20%7B%0A%20%20%20%20local%20ovnkContainerID%3D%24(crictl%20ps%20--name%20ovnkube-controller%20%7C%20awk%20'%7B%20print%20%241%20%7D'%20%7C%20tail%20-n%2B2)%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BovnkContainerID%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22OVN-Kubernetes%20is%20not%20running%3B%20no%20routes%20to%20remove.%22%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22Found%20ovnkube-controller%20pod...%20%24%7BovnkContainerID%7D%22%0A%20%20%20%20echo%20%22clearing%20all%20routes%20from%20ovn-cluster-router%22%0A%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%0A%7D%0A%0A%23%20out%20parameters%3A%20v4vips%20v6vips%0Alist_lb_ips()%20%7B%0A%20%20%20%20for%20k%20in%20%22%24%7B!v4vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20unset%20v4vips%5B%22%24%7Bk%7D%22%5D%0A%20%20%20%20done%0A%20%20%20%20for%20k%20in%20%22%24%7B!v6vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20unset%20v6vips%5B%22%24%7Bk%7D%22%5D%0A%20%20%20%20done%0A%0A%0A%20%20%20%20shopt%20-s%20nullglob%0A%20%20%20%20for%20file%20in%20%22%24%7BRUN_DIR%7D%22%2F*.up%20%3B%20do%0A%20%20%20%20%20%20%20%20vip%3D%24(basename%20%22%24%7Bfile%7D%22%20.up)%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20-e%20%22%24%7BRUN_DIR%7D%2F%24%7Bvip%7D.down%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22%24%7Bvip%7D%20has%20upfile%20and%20downfile%2C%20marking%20as%20down%22%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20%5B%5B%20%24%7Bvip%7D%20%3D~%20%3A%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22processing%20v6%20vip%20%24%7Bvip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20v6vips%5B%24%7Bvip%7D%5D%3D%22%24%7Bvip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22processing%20v4%20vip%20%24%7Bvip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20v4vips%5B%24%7Bvip%7D%5D%3D%22%24%7Bvip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%7D%0A%0A%0Acase%20%22%241%22%20in%0A%20%20%20%20start)%0A%20%20%20%20%20%20%20%20initialize%0A%20%20%20%20%20%20%20%20list_lb_ips%0A%20%20%20%20%20%20%20%20sync_rules%0A%20%20%20%20%20%20%20%20remove_stale_routes%20%23%20needed%20for%20OVN-Kubernetes%20plugin's%20routingViaHost%3Dfalse%20mode%0A%20%20%20%20%20%20%20%20add_routes%20%23%20needed%20for%20OVN-Kubernetes%20plugin's%20routingViaHost%3Dfalse%20mode%0A%20%20%20%20%20%20%20%20echo%20%22done%20applying%20vip%20rules%22%0A%20%20%20%20%20%20%20%20%3B%3B%0A%20%20%20%20cleanup)%0A%20%20%20%20%20%20%20%20clear_rules%0A%20%20%20%20%20%20%20%20clear_routes%20%23%20needed%20for%20OVN-Kubernetes%20plugin's%20routingViaHost%3Dfalse%20mode%0A%20%20%20%20%20%20%20%20%3B%3B%0A%20%20%20%20*)%0A%20%20%20%20%20%20%20%20echo%20%24%22Usage%3A%20%240%20%7Bstart%7Ccleanup%7D%22%0A%20%20%20%20%20%20%20%20exit%201%0Aesac%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/opt/libexec/openshift-azure-routes.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20Extract%20Podman%20version%20and%20determine%20the%20signature%20policy%0A%2Fusr%2Fbin%2Fpodman%20-v%20%7C%20%2Fbin%2Fawk%20'%7B%0A%20%20%20%20split(%243%2C%20version%2C%20%22-%22)%3B%0A%20%20%20%20clean_version%20%3D%20version%5B1%5D%3B%0A%0A%20%20%20%20split(clean_version%2C%20current%2C%20%2F%5C.%2F)%3B%0A%20%20%20%20split(%224.4.1%22%2C%20target%2C%20%2F%5C.%2F)%3B%0A%0A%20%20%20%20for%20(i%20%3D%201%3B%20i%20%3C%3D%203%3B%20i%2B%2B)%20%7B%0A%20%20%20%20%20%20%20%20if%20((current%5Bi%5D%20%2B%200)%20%3C%20(target%5Bi%5D%20%2B%200))%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20print%20%22--signature-policy%20%2Fetc%2Fmachine-config-daemon%2Fpolicy-for-old-podman.json%22%3B%0A%20%20%20%20%20%20%20%20%20%20%20%20exit%3B%0A%20%20%20%20%20%20%20%20%7D%20else%20if%20((current%5Bi%5D%20%2B%200)%20%3E%20(target%5Bi%5D%20%2B%200))%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20exit%3B%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%7D%0A%7D'%20%3E%20%2Ftmp%2Fpodman_policy_args%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/etc/machine-config-daemon/generate_podman_policy_args.sh" + }, + { + "contents": { + "source": "data:,%7B%22auths%22%3A%7B%22cloud.openshift.com%22%3A%7B%22auth%22%3A%22b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfNWZlN2RlZTExNTdlNDA2Nzk2NTY3Y2VmMThmNjFjOTM6V0NXOEJLNkxXT1NLMElUSldIMlRXQUhaQ1k4SkJNMUtONVlCUVpNT1Q3UjdQMU9HRk05VTFJQ1VET0FMQkZIWQ%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22quay.io%22%3A%7B%22auth%22%3A%22b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfNWZlN2RlZTExNTdlNDA2Nzk2NTY3Y2VmMThmNjFjOTM6V0NXOEJLNkxXT1NLMElUSldIMlRXQUhaQ1k4SkJNMUtONVlCUVpNT1Q3UjdQMU9HRk05VTFJQ1VET0FMQkZIWQ%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22registry.ci.openshift.org%22%3A%7B%22auth%22%3A%22XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX%22%7D%2C%22registry.connect.redhat.com%22%3A%7B%22auth%22%3A%22fHVoYy1wb29sLWQ3OWIxMzA2LWI4NzItNDBkNy1iZDdmLTU1ZWI2M2EwMjMwOTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTJNelkzWkdaaE5EbGhaV1UwT1RkbU9XVXlNalUyWWpsaE9ESXlNMk0zTWlKOS5oQ0Z3YnhiOExhMDNkSnF4V09NMjFhOFk2MmpYLTRVZDgtR0dWMG1RZlRLc2NBbEpfLThIekZ6WnZSMWN0Uzd1RDlLeEFRbUt2WE11SUx4MDVybmZrQ1c4S3FSbHhBdm5YUk51QTVCZ1FWVjlPRWd5OXpzZTVpMURnXzUwb2N4bHBUcTJYY20yVW00U09VX1hBNThwMXJaa3QtZ19wMXd1X3ItSzFjNzR1dlBlUmJPQXdubzBwZWVyX0tTQUloUklPcjRXbGYtdzhBZTM5UXA5UkQ5ZERta1F2WW1Cek94ZFJlaHhDMm0zdEZUaUc5YTRKT2NsQkVqSnpINlFoLXpZbmhxazZ2LS1yWHdpSXdlSG9PYU90WXhyTW9GN3U4UGtsVXVuVWI2S3RSbHBCVlE1MlB1MGRBVG9qUEpWMHdDdE93NGh0OU1fZi1JNDhHMm15QlAzM3BmME4xTFVmTmhKS0FkZEg0cE9WaVJmOFM1aExPYjVxemFqakhNOGRPYlBnRkN2c0lzR09kdGVaenIwZW5hWFBqOE9sYjlLQlJJTlllUHBMNmZSZGNaeXBkLVNJbzQ2TjJPbHRMLXNuVG5LX1NDMDZfMS1XNnFFdEpmZUptX2N0c2RxWnlnMWw2bVdiQWwtVExyMFl0ZTVldkNpQUpCNTYzZGxKR1ZWY2xCUGgyUkdDUW9TdjVDS3hnM3ZXX21oak42dnVtVjRCRmxLbUdUVTR2TDZ0aGFiS21XQ1puQnlwOS0tcURaMVVMSEhvb0psbG1Ebnc5S1dBRkdldV9oc3hKcDdZUXRVR193SkhZWGthdFRWTFhfMlB3OG5IbXdVSXVBaUdacks5T2xSNzBPdGpCNUdDTzIyd3oyQ0tmeUNELWNIRGFKcm1rX3NwQW9DMnoxQW94MA%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22registry.redhat.io%22%3A%7B%22auth%22%3A%22fHVoYy1wb29sLWQ3OWIxMzA2LWI4NzItNDBkNy1iZDdmLTU1ZWI2M2EwMjMwOTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTJNelkzWkdaaE5EbGhaV1UwT1RkbU9XVXlNalUyWWpsaE9ESXlNMk0zTWlKOS5oQ0Z3YnhiOExhMDNkSnF4V09NMjFhOFk2MmpYLTRVZDgtR0dWMG1RZlRLc2NBbEpfLThIekZ6WnZSMWN0Uzd1RDlLeEFRbUt2WE11SUx4MDVybmZrQ1c4S3FSbHhBdm5YUk51QTVCZ1FWVjlPRWd5OXpzZTVpMURnXzUwb2N4bHBUcTJYY20yVW00U09VX1hBNThwMXJaa3QtZ19wMXd1X3ItSzFjNzR1dlBlUmJPQXdubzBwZWVyX0tTQUloUklPcjRXbGYtdzhBZTM5UXA5UkQ5ZERta1F2WW1Cek94ZFJlaHhDMm0zdEZUaUc5YTRKT2NsQkVqSnpINlFoLXpZbmhxazZ2LS1yWHdpSXdlSG9PYU90WXhyTW9GN3U4UGtsVXVuVWI2S3RSbHBCVlE1MlB1MGRBVG9qUEpWMHdDdE93NGh0OU1fZi1JNDhHMm15QlAzM3BmME4xTFVmTmhKS0FkZEg0cE9WaVJmOFM1aExPYjVxemFqakhNOGRPYlBnRkN2c0lzR09kdGVaenIwZW5hWFBqOE9sYjlLQlJJTlllUHBMNmZSZGNaeXBkLVNJbzQ2TjJPbHRMLXNuVG5LX1NDMDZfMS1XNnFFdEpmZUptX2N0c2RxWnlnMWw2bVdiQWwtVExyMFl0ZTVldkNpQUpCNTYzZGxKR1ZWY2xCUGgyUkdDUW9TdjVDS3hnM3ZXX21oak42dnVtVjRCRmxLbUdUVTR2TDZ0aGFiS21XQ1puQnlwOS0tcURaMVVMSEhvb0psbG1Ebnc5S1dBRkdldV9oc3hKcDdZUXRVR193SkhZWGthdFRWTFhfMlB3OG5IbXdVSXVBaUdacks5T2xSNzBPdGpCNUdDTzIyd3oyQ0tmeUNELWNIRGFKcm1rX3NwQW9DMnoxQW94MA%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%7D%7D%0A" + }, + "mode": 384, + "overwrite": true, + "path": "/var/lib/kubelet/config.json" + }, + { + "contents": { + "source": "data:,%23%20Needed%20by%20the%20OpenShift%20SDN.%20See%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1758552%0Anet.ipv4.conf.all.arp_announce%20%3D%202%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/arp.conf" + }, + { + "contents": { + "source": "data:,%23%20See%3A%20rhbz%231384746%2C%20OCPBUGS-24012%0Anet.ipv4.neigh.default.gc_thresh1%3D8192%0Anet.ipv4.neigh.default.gc_thresh2%3D32768%0Anet.ipv4.neigh.default.gc_thresh3%3D65536%0Anet.ipv6.neigh.default.gc_thresh1%3D8192%0Anet.ipv6.neigh.default.gc_thresh2%3D32768%0Anet.ipv6.neigh.default.gc_thresh3%3D65536%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/gc-thresh.conf" + }, + { + "contents": { + "source": "data:,%0Afs.inotify.max_user_watches%20%3D%2065536%0Afs.inotify.max_user_instances%20%3D%208192%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/inotify.conf" + }, + { + "contents": { + "source": "data:,vm.unprivileged_userfaultfd%20%3D%201" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/enable-userfaultfd.conf" + }, + { + "contents": { + "source": "data:,%23%20Needed%20for%20OpenShift%20Logging%20(ElasticSearch).%20See%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1793714%0Avm.max_map_count%20%3D%20262144%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/vm-max-map.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-euo%20pipefail%0A%0A%23%20First%2C%20we%20need%20to%20wait%20until%20DHCP%20finishes%20and%20the%20node%20has%20a%20non-%60localhost%60%0A%23%20hostname%20before%20%60kubelet.service%60%20starts.%0A%23%20That's%20the%20%60--wait%60%20argument%20as%20used%20by%20%60node-valid-hostname.service%60.%0A%23%0A%23%20Second%2C%20on%20GCP%20specifically%20we%20truncate%20the%20hostname%20if%20it's%20%3E63%20characters.%0A%23%20That's%20%60gcp-hostname.service%60.%0A%0A%23%20Block%20indefinitely%20until%20the%20host%20gets%20a%20non-localhost%20name.%0A%23%20Note%20node-valid-hostname.service%20uses%20systemd%20to%20abort%20if%20this%20takes%20too%20long.%0Await_localhost()%20%7B%0A%20%20%20%20echo%20%22waiting%20for%20non-localhost%20hostname%20to%20be%20assigned%22%0A%20%20%20%20while%20%5B%5B%20%22%24(%3C%20%2Fproc%2Fsys%2Fkernel%2Fhostname)%22%20%3D~%20(localhost%7Clocalhost.localdomain)%20%5D%5D%3B%0A%20%20%20%20do%0A%20%20%20%20%20%20%20%20sleep%201%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20Some%20cloud%20platforms%20may%20assign%20a%20hostname%20with%20a%20trailing%20dot.%0A%20%20%20%20%23%20However%2C%20tools%20like%20%60hostnamectl%60%20(used%20by%20systemd)%20do%20not%20allow%20trailing%20dots%2C%0A%20%20%20%20%23%20so%20we%20strip%20the%20trailing%20dot%20before%20applying%20the%20hostname.%0A%20%20%20%20HOSTNAME%3D%22%24(%3C%2Fproc%2Fsys%2Fkernel%2Fhostname)%22%0A%20%20%20%20CLEAN_HOSTNAME%3D%22%24%7BHOSTNAME%25.%7D%22%20%0A%20%20%20%20echo%20%22node%20identified%20as%20%24CLEAN_HOSTNAME%22%0A%20%20%20%20echo%20%22saving%20hostname%20to%20prevent%20NetworkManager%20from%20ever%20unsetting%20it%22%0A%20%20%20%20hostnamectl%20set-hostname%20--static%20--transient%20%22%24CLEAN_HOSTNAME%22%0A%20%20%20%20exit%200%0A%7D%0A%0Aset_gcp_hostname()%20%7B%0A%20%20%20%20%2Fusr%2Fbin%2Fafterburn%20--provider%20gcp%20--hostname%3D%2Frun%2Fafterburn.hostname%0A%0A%20%20%20%20local%20host_name%3D%24(cat%20%2Frun%2Fafterburn.hostname)%0A%20%20%20%20local%20type_arg%3D%22transient%22%0A%0A%20%20%20%20%23%20%2Fetc%2Fhostname%20is%20used%20for%20static%20hostnames%20and%20is%20authoritative.%0A%20%20%20%20%23%20This%20will%20check%20to%20make%20sure%20that%20the%20static%20hostname%20is%20the%0A%20%20%20%20%23%20less%20than%20or%20equal%20to%2063%20characters%20in%20length.%0A%20%20%20%20if%20%5B%20-f%20%2Fetc%2Fhostname%20%5D%20%26%26%20%5B%20%22%24(cat%20%2Fetc%2Fhostname%20%7C%20wc%20-m)%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20etc_name%3D%22%24(%3C%20%2Fetc%2Fhostname)%22%0A%20%20%20%20%20%20%20%20type_arg%3D%22static%22%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24%7Betc_name%7D%22%20!%3D%20%22%24%7Bhost_name%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22%2Fetc%2Fhostname%20is%20set%20to%20%24%7Betc_name%7D%20but%20does%20not%20match%20%24%7Bhost_name%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22using%20%2Fetc%2Fhostname%20as%20the%20authoritative%20name%22%0A%20%20%20%20%20%20%20%20%20%20%20%20host_name%3D%22%24%7Betc_name%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Only%20mutate%20the%20hostname%20if%20the%20length%20is%20longer%20than%2063%20characters.%20The%0A%20%20%20%20%23%20hostname%20will%20be%20the%20lesser%20of%2063%20characters%20after%20the%20first%20dot%20in%20the%0A%20%20%20%20%23%20FQDN.%20%20This%20algorithm%20is%20only%20known%20to%20work%20in%20GCP%2C%20and%20hence%20is%20only%0A%20%20%20%20%23%20executed%20in%20GCP.%0A%20%20%20%20if%20%5B%20%22%24%7B%23host_name%7D%22%20-gt%2063%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20alt_name%3D%24(printf%20%22%24%7Bhost_name%7D%22%20%7C%20cut%20-f1%20-d'.'%20%7C%20cut%20-c%20-63)%0A%20%20%20%20%20%20%20%20echo%20%22%24%7Bhost_name%7D%20is%20longer%20than%2063%20characters%2C%20using%20truncated%20hostname%22%0A%20%20%20%20%20%20%20%20host_name%3D%22%24%7Balt_name%7D%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22setting%20%24%7Btype_arg%7D%20hostname%20to%20%24%7Bhost_name%7D%22%0A%20%20%20%20%2Fbin%2Fhostnamectl%20%22--%24%7Btype_arg%7D%22%20set-hostname%20%22%24%7Bhost_name%7D%22%0A%20%20%20%20exit%200%0A%7D%0A%0Aset_openstack_hostname()%20%7B%0A%20%20%20%20%23%20Read%20metadata%20written%20by%20afterburn%20service%0A%20%20%20%20.%20%2Frun%2Fmetadata%2Fafterburn%0A%0A%20%20%20%20%23%20node-valid-hostname%20sets%20persistent%20hostname%20from%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%20%20%20%20echo%20%22%24AFTERBURN_OPENSTACK_HOSTNAME%22%20%3E%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%7D%0A%0Aset_powervs_hostname()%20%7B%0A%20%20%20%20%23%20Read%20metadata%20written%20by%20afterburn%20service%0A%20%20%20%20.%20%2Frun%2Fmetadata%2Fafterburn%0A%0A%20%20%20%20%23%20node-valid-hostname%20sets%20persistent%20hostname%20from%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%20%20%20%20echo%20%22%24AFTERBURN_POWERVS_LOCAL_HOSTNAME%22%20%3E%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%7D%0A%0Aarg%3D%24%7B1%7D%3B%20shift%3B%0Acase%20%22%24%7Barg%7D%22%20in%0A%20%20%20%20--wait)%20wait_localhost%3B%3B%0A%20%20%20%20--gcp)%20set_gcp_hostname%3B%3B%0A%20%20%20%20--openstack)%20set_openstack_hostname%3B%3B%0A%20%20%20%20--powervs)%20set_powervs_hostname%3B%3B%0A%20%20%20%20*)%20echo%20%22Unhandled%20arg%20%24arg%22%3B%20exit%201%0Aesac%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/mco-hostname" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0Aset%20-eou%20pipefail%0A%0A%23%20context%0Aintapi%3D%24(oc%20get%20infrastructures.config.openshift.io%20cluster%20-o%20%22jsonpath%3D%7B.status.apiServerInternalURI%7D%22)%0Acontext%3D%22%24(oc%20config%20current-context)%22%0A%23%20cluster%0Acluster%3D%22%24(oc%20config%20view%20-o%20%22jsonpath%3D%7B.contexts%5B%3F(%40.name%3D%3D%5C%22%24context%5C%22)%5D.context.cluster%7D%22)%22%0Aserver%3D%22%24(oc%20config%20view%20-o%20%22jsonpath%3D%7B.clusters%5B%3F(%40.name%3D%3D%5C%22%24cluster%5C%22)%5D.cluster.server%7D%22)%22%0A%23%20token%0Aca_crt_data%3D%22%24(oc%20get%20secret%20-n%20openshift-machine-config-operator%20node-bootstrapper-token%20-o%20%22jsonpath%3D%7B.data.ca%5C.crt%7D%22%20%7C%20base64%20--decode)%22%0Anamespace%3D%22%24(oc%20get%20secret%20-n%20openshift-machine-config-operator%20node-bootstrapper-token%20%20-o%20%22jsonpath%3D%7B.data.namespace%7D%22%20%7C%20base64%20--decode)%22%0Atoken%3D%22%24(oc%20get%20secret%20-n%20openshift-machine-config-operator%20node-bootstrapper-token%20-o%20%22jsonpath%3D%7B.data.token%7D%22%20%7C%20base64%20--decode)%22%0A%0Aexport%20KUBECONFIG%3D%22%24(mktemp)%22%0Akubectl%20config%20set-credentials%20%22kubelet%22%20--token%3D%22%24token%22%20%3E%2Fdev%2Fnull%0Aca_crt%3D%22%24(mktemp)%22%3B%20echo%20%22%24ca_crt_data%22%20%3E%20%24ca_crt%0Akubectl%20config%20set-cluster%20%24cluster%20--server%3D%22%24intapi%22%20--certificate-authority%3D%22%24ca_crt%22%20--embed-certs%20%3E%2Fdev%2Fnull%0Akubectl%20config%20set-context%20kubelet%20--cluster%3D%22%24cluster%22%20--user%3D%22kubelet%22%20%3E%2Fdev%2Fnull%0Akubectl%20config%20use-context%20kubelet%20%3E%2Fdev%2Fnull%0Acat%20%22%24KUBECONFIG%22%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/recover-kubeconfig.sh" + }, + { + "contents": { + "source": "data:," + }, + "mode": 493, + "overwrite": true, + "path": "/etc/kubernetes/kubelet-plugins/volume/exec/.dummy" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Workaround%3A%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1941714%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1935539%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1987108%0A%0Adriver%3D%24(nmcli%20-t%20-m%20tabular%20-f%20general.driver%20dev%20show%20%22%24%7BDEVICE_IFACE%7D%22)%0A%0Aif%20%5B%5B%20%22%242%22%20%3D%3D%20%22up%22%20%26%26%20%22%24%7Bdriver%7D%22%20%3D%3D%20%22vmxnet3%22%20%5D%5D%3B%20then%0A%20%20logger%20-s%20%2299-vsphere-disable-tx-udp-tnl%20triggered%20by%20%24%7B2%7D%20on%20device%20%24%7BDEVICE_IFACE%7D.%22%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-udp_tnl-segmentation%20off%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-udp_tnl-csum-segmentation%20off%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-checksum-ip-generic%20off%0Afi%0A" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-vsphere-disable-tx-udp-tnl" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-x%0A%0Aif%20%5B%20!%20-e%20%22%2Fetc%2Fipsec.d%2Fopenshift.conf%22%20%5D%3B%20then%0A%20%20exit%200%0Afi%0A%0A%23%20Modify%20existing%20IPsec%20out%20connection%20entries%20with%20%22auto%3Dstart%22%0A%23%20option%20and%20restart%20ipsec%20systemd%20service.%20This%20helps%20to%0A%23%20establish%20IKE%20SAs%20for%20the%20existing%20IPsec%20connections%20with%0A%23%20peer%20nodes.%20This%20option%20will%20be%20deleted%20from%20connections%0A%23%20once%20ovs-monitor-ipsec%20process%20spinned%20up%20on%20the%20node%20by%0A%23%20ovn-ipsec-host%20pod%2C%20but%20still%20it%20won't%20reestablish%20IKE%20SAs%0A%23%20again%20with%20peer%20nodes%2C%20so%20it%20shouldn't%20be%20a%20problem.%0A%23%20We%20are%20updating%20only%20out%20connections%20with%20%22auto%3Dstart%22%20to%0A%23%20avoid%20cross%20stream%20issue%20with%20Libreswan%205.2.%0A%23%20The%20in%20connections%20use%20default%20auto%3Droute%20parameter.%0Aif%20!%20grep%20-q%20%22auto%3Dstart%22%20%2Fetc%2Fipsec.d%2Fopenshift.conf%3B%20then%0A%20%20sed%20-i%20'%2F%5E.*conn%20ovn.*-out-1%24%2Fa%5C%20%20%20%20auto%3Dstart'%20%2Fetc%2Fipsec.d%2Fopenshift.conf%0Afi%0Achroot%20%2Fproc%2F1%2Froot%20ipsec%20restart%0A%0A%23%20Wait%20for%20upto%2060s%20to%20get%20IPsec%20SAs%20to%20establish%20with%20peer%20nodes.%0Atimeout%3D60%0Aelapsed%3D0%0Adesiredconn%3D%22%22%0Aestablishedsa%3D%22%22%0Awhile%20%5B%5B%20%24elapsed%20-lt%20%24timeout%20%5D%5D%3B%20do%0A%20%20desiredconn%3D%24(grep%20-E%20'%5E%5Cs*conn%5Cs%2B'%20%2Fetc%2Fipsec.d%2Fopenshift.conf%20%7C%20grep%20-v%20'%25default'%20%7C%20awk%20'%7Bprint%20%242%7D'%20%7C%20tr%20'%20'%20'%5Cn'%20%7C%20sort%20%7C%20tr%20'%5Cn'%20'%20')%0A%20%20establishedsa%3D%24(ipsec%20showstates%20%7C%20grep%20ESTABLISHED_CHILD_SA%20%7C%20grep%20-o%20'%22%5B%5E%22%5D*%22'%20%7C%20sed%20's%2F%22%2F%2Fg'%20%7C%20tr%20'%20'%20'%5Cn'%20%7C%20sort%20%7C%20uniq%20%7C%20tr%20'%5Cn'%20'%20')%0A%20%20if%20%5B%20%22%24desiredconn%22%20%3D%3D%20%22%24establishedsa%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22IPsec%20SAs%20are%20established%20for%20desired%20connections%20after%20%24%7Belapsed%7Ds%22%0A%20%20%20%20break%0A%20%20else%0A%20%20%20%20echo%20%22IPsec%20SAs%20are%20not%20established%20yet%2C%20total%20waited%20time%20%24%7Belapsed%7Ds%22%0A%20%20%20%20sleep%202s%0A%20%20fi%0A%20%20elapsed%3D%24((elapsed%20%2B%202))%0Adone%0A%0Aif%20%5B%5B%20%24elapsed%20-ge%20%24timeout%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Timed%20out%20waiting%2C%20some%20connections%20are%20not%20established%2C%20desired%20conns%20%24desiredconn%2C%20established%20conns%20%24establishedsa%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/ipsec-connect-wait.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-eux%0A%0Aif%20%5B%20!%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20%23%20No%20need%20to%20do%20this%20if%20no%20NMState%20configuration%20was%20applied%0A%20%20exit%200%0Afi%0A%0A%23%20This%20logic%20is%20borrowed%20from%20configure-ovs.sh%0A%23%20TODO%3A%20Find%20a%20platform-agnostic%20way%20to%20do%20this.%20It%20won't%20work%20on%20platforms%20where%0A%23%20nodeip-configuration%20is%20not%20used.%0Aip%3D%24(cat%20%2Frun%2Fnodeip-configuration%2Fprimary-ip)%0Aif%20%5B%5B%20%22%24%7Bip%7D%22%20%3D%3D%20%22%22%20%5D%5D%3B%20then%0A%20%20echo%20%22No%20ip%20to%20bind%20was%20found%22%0A%20%20exit%201%0Afi%0Awhile%20%3A%0Ado%0A%20%20random_port%3D%24(shuf%20-i%2050000-60000%20-n%201)%0A%20%20echo%20%22Trying%20to%20bind%20%24%7Bip%7D%20on%20port%20%24%7Brandom_port%7D%22%0A%20%20exit_code%3D%24(timeout%202s%20nc%20-l%20%22%24%7Bip%7D%22%20%24%7Brandom_port%7D%3B%20echo%20%24%3F)%0A%20%20if%20%5B%5B%20exit_code%20-eq%20124%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Address%20bound%20successfully%22%0A%20%20%20%20exit%200%0A%20%20fi%0A%20%20sleep%2010%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/wait-for-primary-ip.sh" + }, + { + "contents": { + "source": "data:,unqualified-search-registries%20%3D%20%5B'registry.access.redhat.com'%2C%20'docker.io'%5D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/registries.conf" + }, + { + "contents": { + "source": "data:,%5Bcrio%5D%0Ainternal_wipe%20%3D%20true%0Ainternal_repair%20%3D%20true%0A%0A%5Bcrio.api%5D%0Astream_address%20%3D%20%22127.0.0.1%22%0Astream_port%20%3D%20%220%22%0A%0A%5Bcrio.runtime%5D%0Aselinux%20%3D%20true%0Aconmon%20%3D%20%22%22%0Aconmon_cgroup%20%3D%20%22pod%22%0Adefault_env%20%3D%20%5B%0A%20%20%20%20%22NSS_SDB_USE_CACHE%3Dno%22%2C%0A%5D%0Adefault_runtime%20%3D%20%22crun%22%0Alog_level%20%3D%20%22info%22%0Acgroup_manager%20%3D%20%22systemd%22%0Adefault_sysctls%20%3D%20%5B%0A%20%20%20%20%22net.ipv4.ping_group_range%3D0%202147483647%22%2C%0A%5D%0Ahooks_dir%20%3D%20%5B%0A%20%20%20%20%22%2Fetc%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%20%20%20%20%22%2Frun%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%20%20%20%20%22%2Fusr%2Fshare%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%5D%0Amanage_ns_lifecycle%20%3D%20true%0Aabsent_mount_sources_to_reject%20%3D%20%5B%0A%20%20%20%20%22%2Fetc%2Fhostname%22%2C%0A%5D%0Adrop_infra_ctr%20%3D%20true%0A%0A%5Bcrio.runtime.runtimes.runc%5D%0Aruntime_root%20%3D%20%22%2Frun%2Frunc%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%20%20%22io.containers.trace-syscall%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.Devices%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.LinkLogs%22%2C%0A%5D%0A%0A%5Bcrio.runtime.runtimes.crun%5D%0Aruntime_root%20%3D%20%22%2Frun%2Fcrun%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%20%20%22io.containers.trace-syscall%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.Devices%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.LinkLogs%22%2C%0A%5D%0A%23%20Based%20on%20https%3A%2F%2Fgithub.com%2Fcontainers%2Fcrun%2Fblob%2F27d7dd3a0%2FREADME.md%3Fplain%3D1%23L48%0Acontainer_min_memory%20%3D%20%22512KiB%22%0Adefault_annotations%20%3D%20%7B%22run.oci.systemd.subgroup%22%20%3D%20%22%22%7D%0A%0A%5Bcrio.runtime.workloads.openshift-builder%5D%0Aactivation_annotation%20%3D%20%22io.openshift.builder%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%22io.kubernetes.cri-o.userns-mode%22%2C%0A%20%20%22io.kubernetes.cri-o.Devices%22%0A%5D%0A%5Bcrio.runtime.workloads.openshift-builder.resources%5D%0A%0A%5Bcrio.image%5D%0Aglobal_auth_file%20%3D%20%22%2Fvar%2Flib%2Fkubelet%2Fconfig.json%22%0Apause_image%20%3D%20%22quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3A041a889dcf9abd95832934bf0870af8d2f4289733294b143d555ce49c77bfec1%22%0Apause_image_auth_file%20%3D%20%22%2Fvar%2Flib%2Fkubelet%2Fconfig.json%22%0Apause_command%20%3D%20%22%2Fusr%2Fbin%2Fpod%22%0Aoci_artifact_mount_support%20%3D%20false%0A%0A%5Bcrio.network%5D%0Anetwork_dir%20%3D%20%22%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F%22%0Aplugin_dirs%20%3D%20%5B%0A%20%20%20%20%22%2Fvar%2Flib%2Fcni%2Fbin%22%2C%0A%5D%0A%0A%5Bcrio.metrics%5D%0Aenable_metrics%20%3D%20true%0Ametrics_host%20%3D%20%22127.0.0.1%22%0Ametrics_port%20%3D%209537%0Ametrics_collectors%20%3D%20%5B%0A%20%20%22operations%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_total%22%0A%20%20%22operations_latency_microseconds_total%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_latency_seconds_total%22%0A%20%20%22operations_latency_microseconds%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_latency_seconds%22%0A%20%20%22operations_errors%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_errors_total%22%0A%20%20%22image_pulls_layer_size%22%2C%0A%20%20%22containers_oom_total%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22containers_oom_count_total%22%0A%20%20%22containers_oom%22%2C%0A%20%20%23%20Drop%20metrics%20with%20excessive%20label%20cardinality.%0A%20%20%23%20%22image_pulls_by_digest%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_bytes_total%22%0A%20%20%23%20%22image_pulls_by_name%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_bytes_total%22%0A%20%20%23%20%22image_pulls_by_name_skipped%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_skipped_bytes_total%22%0A%20%20%23%20%22image_pulls_failures%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_failure_total%22%0A%20%20%23%20%22image_pulls_successes%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_success_total%22%0A%20%20%23%20%22image_layer_reuse%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_layer_reuse_total%22%0A%20%20%22operations_total%22%2C%0A%20%20%22operations_latency_seconds_total%22%2C%0A%20%20%22operations_latency_seconds%22%2C%0A%20%20%22operations_errors_total%22%2C%0A%20%20%22image_pulls_bytes_total%22%2C%0A%20%20%22image_pulls_skipped_bytes_total%22%2C%0A%20%20%22image_pulls_success_total%22%2C%0A%20%20%22image_pulls_failure_total%22%2C%0A%20%20%22image_layer_reuse_total%22%2C%0A%20%20%22containers_oom_count_total%22%2C%0A%20%20%22processes_defunct%22%0A%5D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/crio/crio.conf.d/00-default" + }, + { + "contents": { + "source": "data:,%7B%0A%20%20%20%20%22default%22%3A%20%5B%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22type%22%3A%20%22insecureAcceptAnything%22%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%5D%2C%0A%20%20%20%20%22transports%22%3A%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22docker-daemon%22%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%22%22%3A%20%5B%7B%22type%22%3A%22insecureAcceptAnything%22%7D%5D%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%20%20%7D%0A%7D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/machine-config-daemon/policy-for-old-podman.json" + }, + { + "contents": { + "source": "data:,%7B%0A%20%20%20%20%22default%22%3A%20%5B%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22type%22%3A%20%22insecureAcceptAnything%22%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%5D%2C%0A%20%20%20%20%22transports%22%3A%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22docker-daemon%22%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%22%22%3A%20%5B%7B%22type%22%3A%22insecureAcceptAnything%22%7D%5D%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%20%20%7D%0A%7D" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/policy.json" + }, + { + "contents": { + "source": "data:,%7B%0A%09%22cloud%22%3A%20%22AzurePublicCloud%22%2C%0A%09%22tenantId%22%3A%20%226047c7e9-b2ad-488d-a54e-dc3f6be6a7ee%22%2C%0A%09%22aadClientId%22%3A%20%22%22%2C%0A%09%22aadClientSecret%22%3A%20%22%22%2C%0A%09%22aadClientCertPath%22%3A%20%22%22%2C%0A%09%22aadClientCertPassword%22%3A%20%22%22%2C%0A%09%22useManagedIdentityExtension%22%3A%20true%2C%0A%09%22userAssignedIdentityID%22%3A%20%22%22%2C%0A%09%22subscriptionId%22%3A%20%2272e3a972-58b0-4afc-bd4f-da89b39ccebd%22%2C%0A%09%22resourceGroup%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-rg%22%2C%0A%09%22location%22%3A%20%22centralus%22%2C%0A%09%22vnetName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-vnet%22%2C%0A%09%22vnetResourceGroup%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-rg%22%2C%0A%09%22subnetName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-worker-subnet%22%2C%0A%09%22securityGroupName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-nsg%22%2C%0A%09%22routeTableName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-node-routetable%22%2C%0A%09%22vmType%22%3A%20%22standard%22%2C%0A%09%22loadBalancerSku%22%3A%20%22standard%22%2C%0A%09%22cloudProviderBackoff%22%3A%20true%2C%0A%09%22useInstanceMetadata%22%3A%20true%2C%0A%09%22excludeMasterFromStandardLB%22%3A%20false%2C%0A%09%22cloudProviderBackoffDuration%22%3A%206%2C%0A%09%22putVMSSVMBatchSize%22%3A%200%2C%0A%09%22enableMigrateToIPBasedBackendPoolAPI%22%3A%20false%0A%7D" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/cloud.conf" + }, + { + "contents": { + "source": "data:,authorization%3A%0A%20%20static%3A%0A%20%20%20%20-%20resourceRequest%3A%20false%0A%20%20%20%20%20%20path%3A%20%2Fmetrics%0A%20%20%20%20%20%20verb%3A%20get%0A%20%20%20%20%20%20user%3A%0A%20%20%20%20%20%20%20%20name%3A%20system%3Aserviceaccount%3Aopenshift-monitoring%3Aprometheus-k8s" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/crio-metrics-proxy.cfg" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20v1%0Akind%3A%20Pod%0Ametadata%3A%0A%20%20name%3A%20kube-rbac-proxy-crio%0A%20%20namespace%3A%20openshift-machine-config-operator%0A%20%20annotations%3A%0A%20%20%20%20target.workload.openshift.io%2Fmanagement%3A%20'%7B%22effect%22%3A%20%22PreferredDuringScheduling%22%7D'%0A%20%20%20%20openshift.io%2Frequired-scc%3A%20privileged%0Aspec%3A%0A%20%20volumes%3A%0A%20%20-%20name%3A%20etc-kube%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%22%2Fetc%2Fkubernetes%22%0A%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%22%2Fvar%2Flib%2Fkubelet%22%0A%20%20hostNetwork%3A%20true%0A%20%20priorityClassName%3A%20system-cluster-critical%0A%20%20initContainers%3A%0A%20%20-%20name%3A%20setup%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20image%3A%20quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3Afd03b21139b2921710987d9a617bf49e5d05825d6e9df66768259e7076c6d53a%0A%20%20%20%20imagePullPolicy%3A%20IfNotPresent%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fvar%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20command%3A%20%5B'%2Fbin%2Fbash'%2C%20'-ec'%5D%0A%20%20%20%20args%3A%0A%20%20%20%20-%20%7C%0A%20%20%20%20%20%20echo%20-n%20%22Waiting%20for%20kubelet%20key%20and%20certificate%20to%20be%20available%22%0A%20%20%20%20%20%20while%20%5B%20-n%20%22%24(test%20-e%20%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem)%22%20%5D%20%3B%20do%0A%20%20%20%20%20%20%20%20echo%20-n%20%22.%22%0A%20%20%20%20%20%20%20%20sleep%201%0A%20%20%20%20%20%20%20%20((%20tries%20%2B%3D%201%20))%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Btries%7D%22%20-gt%2010%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20echo%20%22Timed%20out%20waiting%20for%20kubelet%20key%20and%20cert.%22%0A%20%20%20%20%20%20%20%20%20%20exit%201%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20done%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20%20%20%20%20cpu%3A%205m%0A%20%20containers%3A%0A%20%20-%20name%3A%20kube-rbac-proxy-crio%0A%20%20%20%20image%3A%20quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3Afd03b21139b2921710987d9a617bf49e5d05825d6e9df66768259e7076c6d53a%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20ports%3A%0A%20%20%20%20-%20containerPort%3A%209637%0A%20%20%20%20args%3A%0A%20%20%20%20-%20--secure-listen-address%3D%3A9637%0A%20%20%20%20-%20--config-file%3D%2Fetc%2Fkubernetes%2Fcrio-metrics-proxy.cfg%0A%20%20%20%20-%20--client-ca-file%3D%2Fetc%2Fkubernetes%2Fkubelet-ca.crt%0A%20%20%20%20-%20--logtostderr%3Dtrue%0A%20%20%20%20-%20--kubeconfig%3D%2Fvar%2Flib%2Fkubelet%2Fkubeconfig%0A%20%20%20%20-%20--tls-cipher-suites%3DTLS_AES_128_GCM_SHA256%2CTLS_AES_256_GCM_SHA384%2CTLS_CHACHA20_POLY1305_SHA256%2CTLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%2CTLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%2CTLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%2CTLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%2CTLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256%2CTLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256%20%0A%20%20%20%20-%20--tls-min-version%3DVersionTLS12%0A%20%20%20%20-%20--upstream%3Dhttp%3A%2F%2F127.0.0.1%3A9537%0A%20%20%20%20-%20--tls-cert-file%3D%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem%0A%20%20%20%20-%20--tls-private-key-file%3D%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20cpu%3A%2020m%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20name%3A%20etc-kube%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fetc%2Fkubernetes%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20%20%20readOnly%3A%20true%0A%20%20%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fvar%2Flib%2Fkubelet%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20%20%20readOnly%3A%20true" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/manifests/criometricsproxy.yaml" + }, + { + "contents": { + "source": "data:text/plain;charset=utf-8;base64,YXBpVmVyc2lvbjoga3ViZWxldC5jb25maWcuazhzLmlvL3YxYmV0YTEKYXV0aGVudGljYXRpb246CiAgYW5vbnltb3VzOgogICAgZW5hYmxlZDogZmFsc2UKICB3ZWJob29rOgogICAgY2FjaGVUVEw6IDBzCiAgeDUwOToKICAgIGNsaWVudENBRmlsZTogL2V0Yy9rdWJlcm5ldGVzL2t1YmVsZXQtY2EuY3J0CmF1dGhvcml6YXRpb246CiAgd2ViaG9vazoKICAgIGNhY2hlQXV0aG9yaXplZFRUTDogMHMKICAgIGNhY2hlVW5hdXRob3JpemVkVFRMOiAwcwpjZ3JvdXBEcml2ZXI6IHN5c3RlbWQKY2dyb3VwUm9vdDogLwpjbHVzdGVyRE5TOgotIDE3Mi4zMC4wLjEwCmNsdXN0ZXJEb21haW46IGNsdXN0ZXIubG9jYWwKY29udGFpbmVyTG9nTWF4U2l6ZTogNTBNaQpjb250YWluZXJSdW50aW1lRW5kcG9pbnQ6ICIiCmNwdU1hbmFnZXJSZWNvbmNpbGVQZXJpb2Q6IDBzCmNyYXNoTG9vcEJhY2tPZmY6IHt9CmVuYWJsZVN5c3RlbUxvZ1F1ZXJ5OiB0cnVlCmV2aWN0aW9uUHJlc3N1cmVUcmFuc2l0aW9uUGVyaW9kOiAwcwpmZWF0dXJlR2F0ZXM6CiAgQVdTQ2x1c3Rlckhvc3RlZEROUzogZmFsc2UKICBBV1NDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBV1NEZWRpY2F0ZWRIb3N0czogZmFsc2UKICBBV1NTZXJ2aWNlTEJOZXR3b3JrU2VjdXJpdHlHcm91cDogZmFsc2UKICBBZGRpdGlvbmFsUm91dGluZ0NhcGFiaWxpdGllczogdHJ1ZQogIEFkbWluTmV0d29ya1BvbGljeTogdHJ1ZQogIEFsaWJhYmFQbGF0Zm9ybTogdHJ1ZQogIEF1dG9tYXRlZEV0Y2RCYWNrdXA6IGZhbHNlCiAgQXp1cmVDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBenVyZURlZGljYXRlZEhvc3RzOiBmYWxzZQogIEF6dXJlTXVsdGlEaXNrOiBmYWxzZQogIEF6dXJlV29ya2xvYWRJZGVudGl0eTogdHJ1ZQogIEJvb3RJbWFnZVNrZXdFbmZvcmNlbWVudDogZmFsc2UKICBCb290Y05vZGVNYW5hZ2VtZW50OiBmYWxzZQogIEJ1aWxkQ1NJVm9sdW1lczogdHJ1ZQogIENQTVNNYWNoaW5lTmFtZVByZWZpeDogdHJ1ZQogIENsdXN0ZXJBUElJbnN0YWxsOiBmYWxzZQogIENsdXN0ZXJBUElJbnN0YWxsSUJNQ2xvdWQ6IGZhbHNlCiAgQ2x1c3Rlck1vbml0b3JpbmdDb25maWc6IGZhbHNlCiAgQ2x1c3RlclZlcnNpb25PcGVyYXRvckNvbmZpZ3VyYXRpb246IGZhbHNlCiAgQ29uc29sZVBsdWdpbkNvbnRlbnRTZWN1cml0eVBvbGljeTogdHJ1ZQogIEROU05hbWVSZXNvbHZlcjogZmFsc2UKICBEdWFsUmVwbGljYTogZmFsc2UKICBEeWFubWljU2VydmljZUVuZHBvaW50SUJNQ2xvdWQ6IGZhbHNlCiAgRHluYW1pY1Jlc291cmNlQWxsb2NhdGlvbjogZmFsc2UKICBFdGNkQmFja2VuZFF1b3RhOiBmYWxzZQogIEV2ZW50ZWRQTEVHOiBmYWxzZQogIEV4YW1wbGU6IGZhbHNlCiAgRXhhbXBsZTI6IGZhbHNlCiAgRXh0ZXJuYWxPSURDOiBmYWxzZQogIEV4dGVybmFsT0lEQ1dpdGhVSURBbmRFeHRyYUNsYWltTWFwcGluZ3M6IGZhbHNlCiAgRXh0ZXJuYWxTbmFwc2hvdE1ldGFkYXRhOiBmYWxzZQogIEdDUENsdXN0ZXJIb3N0ZWRETlM6IGZhbHNlCiAgR0NQQ2x1c3Rlckhvc3RlZEROU0luc3RhbGw6IGZhbHNlCiAgR0NQQ3VzdG9tQVBJRW5kcG9pbnRzOiBmYWxzZQogIEdDUEN1c3RvbUFQSUVuZHBvaW50c0luc3RhbGw6IGZhbHNlCiAgR2F0ZXdheUFQSTogdHJ1ZQogIEdhdGV3YXlBUElDb250cm9sbGVyOiB0cnVlCiAgSGlnaGx5QXZhaWxhYmxlQXJiaXRlcjogdHJ1ZQogIEltYWdlTW9kZVN0YXR1c1JlcG9ydGluZzogZmFsc2UKICBJbWFnZVN0cmVhbUltcG9ydE1vZGU6IGZhbHNlCiAgSW1hZ2VWb2x1bWU6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJEeW5hbWljQ29uZmlndXJhdGlvbk1hbmFnZXI6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJMQlN1Ym5ldHNBV1M6IHRydWUKICBJbnNpZ2h0c0NvbmZpZzogZmFsc2UKICBJbnNpZ2h0c0NvbmZpZ0FQSTogZmFsc2UKICBJbnNpZ2h0c09uRGVtYW5kRGF0YUdhdGhlcjogZmFsc2UKICBJbnNpZ2h0c1J1bnRpbWVFeHRyYWN0b3I6IGZhbHNlCiAgSXJyZWNvbmNpbGFibGVNYWNoaW5lQ29uZmlnOiBmYWxzZQogIEtNU0VuY3J5cHRpb25Qcm92aWRlcjogZmFsc2UKICBLTVN2MTogdHJ1ZQogIE1hY2hpbmVBUElNaWdyYXRpb246IGZhbHNlCiAgTWFjaGluZUFQSU9wZXJhdG9yRGlzYWJsZU1hY2hpbmVIZWFsdGhDaGVja0NvbnRyb2xsZXI6IGZhbHNlCiAgTWFjaGluZUNvbmZpZ05vZGVzOiB0cnVlCiAgTWFuYWdlZEJvb3RJbWFnZXM6IHRydWUKICBNYW5hZ2VkQm9vdEltYWdlc0FXUzogdHJ1ZQogIE1hbmFnZWRCb290SW1hZ2VzQXp1cmU6IGZhbHNlCiAgTWFuYWdlZEJvb3RJbWFnZXN2U3BoZXJlOiBmYWxzZQogIE1heFVuYXZhaWxhYmxlU3RhdGVmdWxTZXQ6IGZhbHNlCiAgTWV0cmljc0NvbGxlY3Rpb25Qcm9maWxlczogdHJ1ZQogIE1pbmltdW1LdWJlbGV0VmVyc2lvbjogZmFsc2UKICBNaXhlZENQVXNBbGxvY2F0aW9uOiBmYWxzZQogIE11bHRpQXJjaEluc3RhbGxBenVyZTogZmFsc2UKICBNdWx0aURpc2tTZXR1cDogZmFsc2UKICBNdXRhdGluZ0FkbWlzc2lvblBvbGljeTogZmFsc2UKICBOZXR3b3JrRGlhZ25vc3RpY3NDb25maWc6IHRydWUKICBOZXR3b3JrTGl2ZU1pZ3JhdGlvbjogdHJ1ZQogIE5ldHdvcmtTZWdtZW50YXRpb246IHRydWUKICBOZXdPTE06IHRydWUKICBOZXdPTE1DYXRhbG9nZEFQSVYxTWV0YXM6IGZhbHNlCiAgTmV3T0xNT3duU2luZ2xlTmFtZXNwYWNlOiBmYWxzZQogIE5ld09MTVByZWZsaWdodFBlcm1pc3Npb25DaGVja3M6IGZhbHNlCiAgTmV3T0xNV2ViaG9va1Byb3ZpZGVyT3BlbnNoaWZ0U2VydmljZUNBOiBmYWxzZQogIE5vUmVnaXN0cnlDbHVzdGVyT3BlcmF0aW9uczogZmFsc2UKICBOb2RlU3dhcDogZmFsc2UKICBOdXRhbml4TXVsdGlTdWJuZXRzOiBmYWxzZQogIE9WTk9ic2VydmFiaWxpdHk6IGZhbHNlCiAgT3BlblNoaWZ0UG9kU2VjdXJpdHlBZG1pc3Npb246IHRydWUKICBQaW5uZWRJbWFnZXM6IHRydWUKICBQcmVjb25maWd1cmVkVUROQWRkcmVzc2VzOiBmYWxzZQogIFByb2NNb3VudFR5cGU6IHRydWUKICBSb3V0ZUFkdmVydGlzZW1lbnRzOiB0cnVlCiAgUm91dGVFeHRlcm5hbENlcnRpZmljYXRlOiB0cnVlCiAgU0VMaW51eE1vdW50OiBmYWxzZQogIFNlcnZpY2VBY2NvdW50VG9rZW5Ob2RlQmluZGluZzogdHJ1ZQogIFNldEVJUEZvck5MQkluZ3Jlc3NDb250cm9sbGVyOiB0cnVlCiAgU2hvcnRDZXJ0Um90YXRpb246IGZhbHNlCiAgU2lnbmF0dXJlU3RvcmVzOiBmYWxzZQogIFNpZ3N0b3JlSW1hZ2VWZXJpZmljYXRpb246IHRydWUKICBTaWdzdG9yZUltYWdlVmVyaWZpY2F0aW9uUEtJOiBmYWxzZQogIFN0b3JhZ2VQZXJmb3JtYW50U2VjdXJpdHlQb2xpY3k6IGZhbHNlCiAgVHJhbnNsYXRlU3RyZWFtQ2xvc2VXZWJzb2NrZXRSZXF1ZXN0czogZmFsc2UKICBVcGdyYWRlU3RhdHVzOiBmYWxzZQogIFVzZXJOYW1lc3BhY2VzUG9kU2VjdXJpdHlTdGFuZGFyZHM6IHRydWUKICBVc2VyTmFtZXNwYWNlc1N1cHBvcnQ6IHRydWUKICBWU3BoZXJlQ29uZmlndXJhYmxlTWF4QWxsb3dlZEJsb2NrVm9sdW1lc1Blck5vZGU6IGZhbHNlCiAgVlNwaGVyZUhvc3RWTUdyb3VwWm9uYWw6IGZhbHNlCiAgVlNwaGVyZU1peGVkTm9kZUVudjogZmFsc2UKICBWU3BoZXJlTXVsdGlEaXNrOiB0cnVlCiAgVlNwaGVyZU11bHRpTmV0d29ya3M6IHRydWUKICBWb2x1bWVBdHRyaWJ1dGVzQ2xhc3M6IGZhbHNlCiAgVm9sdW1lR3JvdXBTbmFwc2hvdDogZmFsc2UKZmlsZUNoZWNrRnJlcXVlbmN5OiAwcwpodHRwQ2hlY2tGcmVxdWVuY3k6IDBzCmltYWdlTWF4aW11bUdDQWdlOiAwcwppbWFnZU1pbmltdW1HQ0FnZTogMHMKa2luZDogS3ViZWxldENvbmZpZ3VyYXRpb24Ka3ViZUFQSUJ1cnN0OiAxMDAKa3ViZUFQSVFQUzogNTAKbG9nZ2luZzoKICBmbHVzaEZyZXF1ZW5jeTogMAogIG9wdGlvbnM6CiAgICBqc29uOgogICAgICBpbmZvQnVmZmVyU2l6ZTogIjAiCiAgICB0ZXh0OgogICAgICBpbmZvQnVmZmVyU2l6ZTogIjAiCiAgdmVyYm9zaXR5OiAwCm1heFBvZHM6IDI1MAptZW1vcnlTd2FwOiB7fQpub2RlU3RhdHVzUmVwb3J0RnJlcXVlbmN5OiA1bTBzCm5vZGVTdGF0dXNVcGRhdGVGcmVxdWVuY3k6IDEwcwpwb2RQaWRzTGltaXQ6IDQwOTYKcHJvdGVjdEtlcm5lbERlZmF1bHRzOiB0cnVlCnJvdGF0ZUNlcnRpZmljYXRlczogdHJ1ZQpydW50aW1lUmVxdWVzdFRpbWVvdXQ6IDBzCnNlcmlhbGl6ZUltYWdlUHVsbHM6IGZhbHNlCnNlcnZlclRMU0Jvb3RzdHJhcDogdHJ1ZQpzaHV0ZG93bkdyYWNlUGVyaW9kOiAwcwpzaHV0ZG93bkdyYWNlUGVyaW9kQ3JpdGljYWxQb2RzOiAwcwpzdGF0aWNQb2RQYXRoOiAvZXRjL2t1YmVybmV0ZXMvbWFuaWZlc3RzCnN0cmVhbWluZ0Nvbm5lY3Rpb25JZGxlVGltZW91dDogMHMKc3luY0ZyZXF1ZW5jeTogMHMKc3lzdGVtQ2dyb3VwczogL3N5c3RlbS5zbGljZQp0bHNDaXBoZXJTdWl0ZXM6Ci0gVExTX0FFU18xMjhfR0NNX1NIQTI1NgotIFRMU19BRVNfMjU2X0dDTV9TSEEzODQKLSBUTFNfQ0hBQ0hBMjBfUE9MWTEzMDVfU0hBMjU2Ci0gVExTX0VDREhFX0VDRFNBX1dJVEhfQUVTXzEyOF9HQ01fU0hBMjU2Ci0gVExTX0VDREhFX1JTQV9XSVRIX0FFU18xMjhfR0NNX1NIQTI1NgotIFRMU19FQ0RIRV9FQ0RTQV9XSVRIX0FFU18yNTZfR0NNX1NIQTM4NAotIFRMU19FQ0RIRV9SU0FfV0lUSF9BRVNfMjU2X0dDTV9TSEEzODQKLSBUTFNfRUNESEVfRUNEU0FfV0lUSF9DSEFDSEEyMF9QT0xZMTMwNV9TSEEyNTYKLSBUTFNfRUNESEVfUlNBX1dJVEhfQ0hBQ0hBMjBfUE9MWTEzMDVfU0hBMjU2CnRsc01pblZlcnNpb246IFZlcnNpb25UTFMxMgp2b2x1bWVTdGF0c0FnZ1BlcmlvZDogMHMK" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/kubelet.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fsh%0Aif%20%5B%20-x%20%2Fusr%2Fbin%2Fkubensenter%20%5D%3B%20then%0A%20%20exec%20%2Fusr%2Fbin%2Fkubensenter%20%22%24%40%22%0Aelse%0A%20%20exec%20%22%24%40%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/kubenswrapper" + } + ], + "filesystems": [ + { + "device": "/dev/disk/by-partlabel/var", + "format": "xfs", + "mountOptions": [ + "defaults", + "prjquota" + ], + "path": "/var" + } + ] + }, + "systemd": { + "units": [ + { + "contents": "[Unit]\nDescription=Cleans NetworkManager state generated by dracut\n# Removal of this file signals firstboot completion\nConditionPathExists=!/etc/ignition-machine-config-encapsulated.json\n# This is opt-in for some deployment types, and opt-out for others.\nConditionPathExists=/var/lib/mco/nm-clean-initrd-state\nWants=network-pre.target\nBefore=network-pre.target\n\n[Service]\nType=oneshot\n# Remove any existing state possibly generated NM run by dracut. We want NM to\n# consider all profiles autoconnect priority when it starts instead of\n# remembering which profile was a device activated with when NM is run by\n# dracut.\nExecStart=/usr/local/bin/nm-clean-initrd-state.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "NetworkManager-clean-initrd-state.service" + }, + { + "dropins": [ + { + "contents": "# vim:set ft=systemd :\n#\n# This drop-in will enable any service built with this\n# github.com/containers/kubemntns library to properly join the mount namespace\n# managed by kubens.service\n#\n\n[Unit]\nAfter=kubens.service\n\n[Service]\nEnvironmentFile=-/run/kubens/env\n", + "name": "01-kubens.conf" + }, + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "[Service]\nEnvironment=\"ENABLE_PROFILE_UNIX_SOCKET=true\"\n", + "name": "10-mco-profile-unix-socket.conf" + }, + { + "contents": "[Unit]\nAfter=kubelet-dependencies.target\nRequires=kubelet-dependencies.target\n", + "name": "05-mco-ordering.conf" + }, + { + "contents": "[Service]\nEnvironment=\"GODEBUG=x509ignoreCN=0,madvdontneed=1\"\n", + "name": "10-mco-default-madv.conf" + } + ], + "name": "crio.service" + }, + { + "dropins": [ + { + "contents": "[Unit]\nConditionPathExists=/enoent\n", + "name": "mco-disabled.conf" + } + ], + "name": "docker.socket" + }, + { + "contents": "[Unit]\nDescription=The firstboot OS update has completed\nDocumentation=https://github.com/openshift/machine-config-operator/\nRequires=basic.target\n\n[Install]\nWantedBy=default.target\n", + "enabled": true, + "name": "firstboot-osupdate.target" + }, + { + "dropins": [ + { + "contents": "[Unit]\nAfter=ovs-configuration.service\nBefore=crio.service\n", + "name": "01-after-configure-ovs.conf" + } + ], + "name": "ipsec.service" + }, + { + "contents": "[Unit]\nDescription=Dynamically sets the system reserved for the kubelet\nWants=network-online.target\nAfter=network-online.target firstboot-osupdate.target\nBefore=kubelet-dependencies.target\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\nRemainAfterExit=yes\nEnvironmentFile=/etc/node-sizing-enabled.env\nExecStart=/bin/bash /usr/local/sbin/dynamic-system-reserved-calc.sh ${NODE_SIZING_ENABLED} ${SYSTEM_RESERVED_MEMORY} ${SYSTEM_RESERVED_CPU} ${SYSTEM_RESERVED_ES}\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "kubelet-auto-node-size.service" + }, + { + "contents": "[Unit]\nDescription=Dependencies necessary to run kubelet\nDocumentation=https://github.com/openshift/machine-config-operator/\nRequires=basic.target network-online.target\nWants=NetworkManager-wait-online.service crio-wipe.service\nWants=rpc-statd.service chrony-wait.service\n", + "name": "kubelet-dependencies.target" + }, + { + "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=crio.service kubelet-dependencies.target\nAfter=kubelet-dependencies.target\nAfter=ostree-finalize-staged.service\n\n[Service]\nType=notify\nExecStartPre=/bin/mkdir --parents /etc/kubernetes/manifests\nExecStartPre=-/usr/sbin/restorecon -ri /var/lib/kubelet/pod-resources /usr/local/bin/kubenswrapper /usr/bin/kubensenter\nEnvironment=\"KUBELET_NODE_IP=0.0.0.0\"\nEnvironmentFile=/etc/os-release\nEnvironmentFile=-/etc/kubernetes/kubelet-workaround\nEnvironmentFile=-/etc/kubernetes/kubelet-env\nEnvironmentFile=/etc/node-sizing.env\n\nExecStart=/usr/local/bin/kubenswrapper \\\n /usr/bin/kubelet \\\n --config=/etc/kubernetes/kubelet.conf \\\n --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \\\n --kubeconfig=/var/lib/kubelet/kubeconfig \\\n --container-runtime-endpoint=/var/run/crio/crio.sock \\\n --runtime-cgroups=/system.slice/crio.service \\\n --node-labels=node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master,node.openshift.io/os_id=${ID},${CUSTOM_KUBELET_LABELS} \\\n --node-ip=${KUBELET_NODE_IP} \\\n --minimum-container-ttl-duration=6m0s \\\n --cloud-provider=external \\\n --volume-plugin-dir=/etc/kubernetes/kubelet-plugins/volume/exec \\\n --image-credential-provider-bin-dir=/usr/libexec/kubelet-image-credential-provider-plugins --image-credential-provider-config=/etc/kubernetes/credential-providers/acr-credential-provider.yaml \\\n --hostname-override=${KUBELET_NODE_NAME} \\\n --provider-id=${KUBELET_PROVIDERID} \\\n --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \\\n --pod-infra-container-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:041a889dcf9abd95832934bf0870af8d2f4289733294b143d555ce49c77bfec1 \\\n --system-reserved=cpu=${SYSTEM_RESERVED_CPU},memory=${SYSTEM_RESERVED_MEMORY},ephemeral-storage=${SYSTEM_RESERVED_ES} \\\n --v=${KUBELET_LOG_LEVEL}\n\nRestart=always\nRestartSec=10\n\n[Install]\nWantedBy=multi-user.target\n", + "dropins": [ + { + "contents": "# vim:set ft=systemd :\n#\n# This drop-in will enable any service built with this\n# github.com/containers/kubemntns library to properly join the mount namespace\n# managed by kubens.service\n#\n\n[Unit]\nAfter=kubens.service\n\n[Service]\nEnvironmentFile=-/run/kubens/env\n", + "name": "01-kubens.conf" + }, + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "[Service]\nEnvironment=\"GODEBUG=x509ignoreCN=0,madvdontneed=1\"\n", + "name": "10-mco-default-madv.conf" + } + ], + "enabled": true, + "name": "kubelet.service" + }, + { + "contents": "[Unit]\nDescription=Manages a mount namespace for kubernetes-specific mounts\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nRuntimeDirectory=kubens\nEnvironment=RUNTIME_DIRECTORY=%t/kubens\nEnvironment=BIND_POINT=%t/kubens/mnt\nEnvironment=ENVFILE=%t/kubens/env\n\n# Set up the runtime directory as an unbindable mountpoint\nExecStartPre=bash -c \"findmnt ${RUNTIME_DIRECTORY} || mount --make-unbindable --bind ${RUNTIME_DIRECTORY} ${RUNTIME_DIRECTORY}\"\n# Ensure the bind point exists\nExecStartPre=touch ${BIND_POINT}\n# Use 'unshare' to create the new mountpoint, then 'mount --make-rshared' so it cascades internally\nExecStart=unshare --mount=${BIND_POINT} --propagation slave mount --make-rshared /\n# Finally, set an env pointer for ease-of-use\nExecStartPost=bash -c 'echo \"KUBENSMNT=${BIND_POINT}\" \u003e \"${ENVFILE}\"'\n\n# On stop, a recursive unmount cleans up the namespace and bind-mounted unbindable parent directory\nExecStop=umount -R ${RUNTIME_DIRECTORY}\n\n[Install]\nWantedBy=multi-user.target\n", + "enabled": false, + "name": "kubens.service" + }, + { + "contents": "[Unit]\nDescription=Machine Config Daemon Firstboot\n# Make sure it runs only on OSTree booted system\nConditionPathExists=/run/ostree-booted\n# Removal of this file signals firstboot completion\nConditionPathExists=/etc/ignition-machine-config-encapsulated.json\nAfter=machine-config-daemon-pull.service\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\n# Disable existing repos (if any) so that OS extensions would use embedded RPMs only\nExecStartPre=-/usr/bin/sh -c \"sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/*.repo\"\n# Run this via podman because we want to use the nmstatectl binary in our container\nExecStart=/usr/bin/podman run --rm --privileged --net=host -v /:/rootfs --entrypoint machine-config-daemon 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb' firstboot-complete-machineconfig --persist-nics\nExecStart=/usr/bin/podman run --rm --privileged --pid=host --net=host -v /:/rootfs --entrypoint machine-config-daemon 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb' firstboot-complete-machineconfig\n[Install]\nRequiredBy=firstboot-osupdate.target\n", + "enabled": true, + "name": "machine-config-daemon-firstboot.service" + }, + { + "contents": "[Unit]\nDescription=Machine Config Daemon Pull\n# Make sure it runs only on OSTree booted system\nConditionPathExists=/run/ostree-booted\n# This \"stamp file\" is unlinked when we complete\n# machine-config-daemon-firstboot.service\nConditionPathExists=/etc/ignition-machine-config-encapsulated.json\n# Run after crio-wipe so the pulled MCD image is protected against a corrupted storage from a forced shutdown\nWants=crio-wipe.service NetworkManager-wait-online.service\nAfter=crio-wipe.service NetworkManager-wait-online.service network.service\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nExecStartPre=/etc/machine-config-daemon/generate_podman_policy_args.sh\nExecStart=/bin/sh -c \"while ! /usr/bin/podman pull $(cat /tmp/podman_policy_args) --authfile=/var/lib/kubelet/config.json 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb'; do sleep 1; done\"\n\n[Install]\nRequiredBy=machine-config-daemon-firstboot.service\n", + "enabled": true, + "name": "machine-config-daemon-pull.service" + }, + { + "contents": "[Unit]\nDescription=Applies per-node NMState network configuration\nRequires=openvswitch.service\nWants=NetworkManager-wait-online.service\nAfter=NetworkManager-wait-online.service openvswitch.service network.service nodeip-configuration.service\nBefore=nmstate.service kubelet-dependencies.target ovs-configuration.service node-valid-hostname.service\n\n[Service]\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/usr/local/bin/nmstate-configuration.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "nmstate-configuration.service" + }, + { + "contents": "[Unit]\nDescription=Wait for a non-localhost hostname\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nUser=root\nExecStart=/usr/local/bin/mco-hostname --wait\n\n# Wait up to 5min for the node to get a non-localhost name\nTimeoutSec=300\n\n[Install]\n# TODO: Change this to RequiredBy after we fix https://github.com/openshift/machine-config-operator/pull/3865#issuecomment-1746963115\nWantedBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "node-valid-hostname.service" + }, + { + "contents": "[Unit]\nDescription=Writes IP address configuration so that kubelet and crio services select a valid node IP\nWants=NetworkManager-wait-online.service\nAfter=NetworkManager-wait-online.service firstboot-osupdate.target\nBefore=kubelet-dependencies.target ovs-configuration.service\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/bin/bash -c \" \\\n until \\\n /usr/bin/podman run --rm \\\n --authfile /var/lib/kubelet/config.json \\\n --env 'ENABLE_NODEIP_DEBUG=true' \\\n --net=host \\\n --security-opt label=disable \\\n --volume /etc/systemd/system:/etc/systemd/system \\\n --volume /run/nodeip-configuration:/run/nodeip-configuration \\\n quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8bfb60187a2054ec1946357aeeb4866fa52c1508b05ba1b2b294c08e8e4e27e8 \\\n node-ip \\\n set \\\n --retry-on-failure \\\n --network-type OVNKubernetes \\\n ${NODEIP_HINT:-${KUBELET_NODEIP_HINT:-}}; \\\n do \\\n sleep 5; \\\n done\"\nExecStart=/bin/systemctl daemon-reload\nExecStartPre=/bin/mkdir -p /run/nodeip-configuration\nStandardOutput=journal+console\nStandardError=journal+console\n\nEnvironmentFile=-/etc/default/nodeip-configuration\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": false, + "name": "nodeip-configuration.service" + }, + { + "contents": "[Unit]\nDescription=Watch for downfile changes\nBefore=kubelet-dependencies.target\n\n[Path]\nPathChanged=/run/cloud-routes/\nMakeDirectory=true\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "openshift-azure-routes.path" + }, + { + "contents": "[Unit]\nDescription=Work around Azure load balancer hairpin\n# We don't need to do this on the firstboot\nAfter=firstboot-osupdate.target\n\n[Service]\nType=simple\nExecStart=/bin/bash /opt/libexec/openshift-azure-routes.sh start\nUser=root\nSyslogIdentifier=openshift-azure-routes\n", + "enabled": false, + "name": "openshift-azure-routes.service" + }, + { + "enabled": true, + "name": "openvswitch.service" + }, + { + "contents": "[Unit]\n# Kdump will generate it's initramfs based on the running state when kdump.service run\n# If OVS has already run, the kdump fails to gather a working network config,\n# which prevent network log exports, sush as SSH.\n# See https://issues.redhat.com/browse/OCPBUGS-28239\nAfter=kdump.service\nDescription=Configures OVS with proper host networking configuration\n# This service is used to move a physical NIC into OVS and reconfigure OVS to use the host IP\nRequires=openvswitch.service\nWants=NetworkManager-wait-online.service\nAfter=firstboot-osupdate.target\nAfter=NetworkManager-wait-online.service openvswitch.service network.service nodeip-configuration.service nmstate.service\nBefore=kubelet-dependencies.target node-valid-hostname.service dnsmasq.service\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\nExecStart=/usr/local/bin/configure-ovs.sh OVNKubernetes\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "ovs-configuration.service" + }, + { + "dropins": [ + { + "contents": "[Service]\nRestart=always\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /var/lib/openvswitch'\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /etc/openvswitch'\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /run/openvswitch'\nExecStartPost=-/usr/bin/ovs-appctl vlog/set syslog:info\nExecReload=-/usr/bin/ovs-appctl vlog/set syslog:info\n", + "name": "10-ovs-vswitchd-restart.conf" + } + ], + "name": "ovs-vswitchd.service" + }, + { + "dropins": [ + { + "contents": "[Service]\nRestart=always\n", + "name": "10-ovsdb-restart.conf" + } + ], + "enabled": true, + "name": "ovsdb-server.service" + }, + { + "dropins": [ + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "# See https://github.com/openshift/machine-config-operator/issues/1897\n[Service]\nNice=10\nIOSchedulingClass=best-effort\nIOSchedulingPriority=6\n", + "name": "mco-controlplane-nice.conf" + } + ], + "name": "rpm-ostreed.service" + }, + { + "contents": "[Unit]\nDescription=Ensure IKE SA established for existing IPsec connections.\nAfter=ipsec.service\nBefore=kubelet-dependencies.target node-valid-hostname.service\n\n[Service]\nType=oneshot\nExecStart=/usr/local/bin/ipsec-connect-wait.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=ipsec.service\n", + "enabled": true, + "name": "wait-for-ipsec-connect.service" + }, + { + "contents": "[Unit]\nDescription=Ensure primary IP is assigned and usable\nRequires=nmstate.service\nAfter=nmstate.service\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/bin/bash -c \" \\\n until \\\n /usr/local/bin/wait-for-primary-ip.sh; \\\n do \\\n sleep 10; \\\n done\"\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "wait-for-primary-ip.service" + }, + { + "dropins": [ + { + "contents": "[Unit]\nConditionPathExists=/enoent\n", + "name": "mco-disabled.conf" + } + ], + "name": "zincati.service" + }, + { + "contents": "[Unit]\nDescription=Kubernetes Kubelet After Reboot Cleanup\nBefore=kubelet.service\n\n[Service]\nType=oneshot\nExecStart=/bin/rm -f /var/lib/kubelet/cpu_manager_state\nExecStart=/bin/rm -f /var/lib/kubelet/memory_manager_state\nExecStart=-/bin/rm -f /var/lib/kubelet/dra_manager_state\n\n[Install]\nWantedBy=multi-user.target\n", + "enabled": true, + "name": "kubelet-cleanup.service" + }, + { + "contents": "\n[Unit]\nRequires=systemd-fsck@dev-disk-by\\x2dpartlabel-var.service\nAfter=systemd-fsck@dev-disk-by\\x2dpartlabel-var.service\n\n[Mount]\nWhere=/var\nWhat=/dev/disk/by-partlabel/var\nType=xfs\nOptions=defaults,prjquota\n\n[Install]\nRequiredBy=local-fs.target\n", + "enabled": true, + "name": "var.mount" + } + ] + } + }, + "extensions": [], + "fips": false, + "kernelArguments": [ + "systemd.unified_cgroup_hierarchy=1", + "cgroup_no_v1=\"all\"", + "psi=0" + ], + "kernelType": "default", + "osImageURL": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1578c5f720239b6f363d8dc6145056c0f0ab0b20442f59ac74f3b4b91b8382dd" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "1b20021e15e2252573d2369f63de846c588222ae", + "machineconfiguration.openshift.io/release-image-version": "4.20.0-0.nightly-2025-08-12-153542" + }, + "creationTimestamp": "2025-08-19T18:55:59Z", + "generation": 1, + "name": "rendered-master-51b9bd96466b1461e8a9b84416e4405b", + "ownerReferences": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "MachineConfigPool", + "name": "master", + "uid": "8bcdea4d-5638-4bd5-9ff2-b0d669977136" + } + ], + "resourceVersion": "12814", + "uid": "fb9255c0-a23b-4228-8aa7-248df17610eb" + }, + "spec": { + "baseOSExtensionsContainerImage": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2b2f07545a00fcce5f743d57926fef00202072f7d8363961624f48c0043c2d33", + "config": { + "ignition": { + "security": { + "tls": { + "certificateAuthorities": [ + { + "source": "data:text/plain;charset=utf-8;base64,LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFRENDQWZpZ0F3SUJBZ0lJU2F1czJXZitBNVl3RFFZSktvWklodmNOQVFFTEJRQXdKakVTTUJBR0ExVUUKQ3hNSmIzQmxibk5vYVdaME1SQXdEZ1lEVlFRREV3ZHliMjkwTFdOaE1CNFhEVEkxTURneE9URTRNVEUxTkZvWApEVE0xTURneE56RTRNVEUxTkZvd0pqRVNNQkFHQTFVRUN4TUpiM0JsYm5Ob2FXWjBNUkF3RGdZRFZRUURFd2R5CmIyOTBMV05oTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUE5Qy9PSVRxLzZod1kKUnBNbWtjTDlRSjM2Y1BjbzhXcnBzSW5OYXZiWXNBZjRHZE1VNm9RUEs0Q1FHOTFpa3NFRjY1ZkZFZ25yWWtabApSN0JMNkdPcEh5U3o5aUd6SU1JRWVGTEEzS2p1QnR1VzdqUXlmbjNVUUpUQ0FJTjl4dC9RTG4xM3N0U1U1blVwCjMxbnEvYStYZUxTTWV5YmZxc3dDeEwwTVpZZmRyUUdaMmUxNWJucmpITHQ0M1BJallHSTIwamV4dUN1bHhFeW4KSjB4VktDT1BRVnQyejc1V2ZuS3NDbGpDckpUbXNKQUEzQkkrcWdsZUxZYWZqY3hhejdwTzBCTnJRUTVQV2FYMApQM1pCVU9jZXVGOEJRTkFGVTRJRTFFS0pzTlZRTW85VldOWG1NYUZKZEdrSXJZVjRwY1FSd0RRakRrKzZsUTV6CjFRSERZYmh0WlFJREFRQUJvMEl3UURBT0JnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIKL3pBZEJnTlZIUTRFRmdRVVdOUU11TUxGS1BwSEwrS1hMWnNEa2p0WXBDTXdEUVlKS29aSWh2Y05BUUVMQlFBRApnZ0VCQUprbWZRWW0rWERnZ0NCTS9IdFpOQnpNQ0U4c2U1ZENlV0xkS1c0T25LMVR0dUlsNytQSnpOUkEvL3AxCkIrblF0MjloSFpLK2tGYjlEL3JWSTFiUmtLa1l0QlFIeVVMRFc2RGxMVGp4b0cwdkJ0ZXRwRDBmTEIwbkJnenYKbGhyVkU1V3JpcEZGQnhncWMwK01vS3FMcWVHQmlWNHZxbDR5c1daUGI5VVg4aFBzeHdYNjJUS1g0VG9tMlJ1dQordVN6MkRQS2ZMLzVHRVhXUVR5dGFrTytRYUpKdEJtM280Q1EvRmVLa0ZybloraEcwTk9NNkxlQTVQVUVJQ3FLCmhjMmRSRytWc1A2WERoOGZlU2ZyWStkMVcrSlJTd2k0b2lrT2xNY2dpdjB5d1RVYnR1SSt1WE1mMnJQR1RLMzYKd1oxTGtXNElyb2VGZEJvM1MxUXBWb2VGRmlzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + } + ] + } + }, + "version": "3.5.0" + }, + "passwd": { + "users": [ + { + "name": "core", + "sshAuthorizedKeys": [ + "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n" + ] + } + ] + }, + "storage": { + "disks": [ + { + "device": "/dev/disk/by-id/coreos-boot-disk", + "partitions": [ + { + "label": "var", + "number": 5, + "sizeMiB": 0, + "startMiB": 50000 + } + ] + } + ], + "files": [ + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-ex%20-o%20pipefail%0A%0ANM_DEVICES_DIR%3D%2Frun%2FNetworkManager%2Fdevices%0ANM_RUN_CONN_DIR%3D%2Frun%2FNetworkManager%2Fsystem-connections%0ANM_ETC_CONN_DIR%3D%2Fetc%2FNetworkManager%2Fsystem-connections%0A%0Alogger%20-t%20nm-clean-initrd-state%20%22Cleaning%20network%20activation%20state%20generated%20by%20dracut...%22%0Alogger%20-t%20nm-clean-initrd-state%20%22To%20disable%2C%20remove%20%2Fvar%2Flib%2Fmco%2Fnm-clean-initrd-state%22%0A%0Aif%20%5B%20!%20-e%20%22%24NM_DEVICES_DIR%22%20%5D%20%7C%7C%20%5B%20!%20-e%20%22%24NM_RUN_CONN_DIR%22%20%5D%20%7C%7C%20%5B%20!%20-e%20%22%24NM_ETC_CONN_DIR%22%20%5D%3B%20then%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22There%20is%20no%20network%20activation%20state%20to%20clean%22%0A%20%20exit%0Afi%0A%0A%23%20Some%20deployments%20require%20an%20active%20network%20early%20in%20the%20boot%20process.%20To%0A%23%20achieve%20this%2C%20dracut%20runs%20specific%20NetworkManager%20modules.%20This%20results%20in%0A%23%20NetworkManager%20keyfiles%20being%20generated%20(either%20default%20or%20from%20ip%20kernel%0A%23%20arguments)%20and%20activated.%20This%20activation%20generates%20state%20that%20makes%20those%0A%23%20profiles%20to%20be%20re-activated%20by%20the%20NetworkManager%20service%20later%20in%20the%0A%23%20boot%20process.%20And%20this%20has%20the%20effect%20that%20other%20profiles%20deployed%20by%20the%0A%23%20user%20for%20the%20same%20set%20of%20devices%20are%20ignored.%0A%0A%23%20Most%20of%20the%20time%20this%20is%20the%20desired%20behavior.%20The%20exception%20to%20this%20rule%0A%23%20is%20when%20the%20user%20wants%20to%20run%20the%20boot%20process%20with%20a%20different%20network%0A%23%20setup%20than%20the%20real%20root%20which%20is%20usually%20related%20to%20the%20fact%20that%0A%23%20generating%20images%20with%20customized%20kernel%20arguments%20is%20a%20complication%20in%0A%23%20the%20deployment%20pipeline.%0A%0A%23%20This%20need%20has%20been%20exacerbated%20by%20past%20NetworkManager%20bugs%20that%20activated%0A%23%20the%20network%20on%20boot%20when%20it%20was%20not%20really%20needed.%20Most%20notably%20when%20ip%0A%23%20kernel%20argument%20is%20present%2C%20something%20that%20the%20baremetal%20installer%20adds%20by%0A%23%20default.%0A%0A%23%20The%20intention%20here%20is%20to%20remove%20the%20state%20that%20was%20generated%20with%20the%0A%23%20activation%20of%20those%20profiles%20during%20dracut%20execution.%20Then%20when%0A%23%20NetworkManager%20service%20runs%2C%20the%20profiles%20generated%20by%20dracut%2C%20along%20with%0A%23%20other%20profiles%20configured%20by%20the%20user%2C%20are%20evaluated%20towards%20finding%20the%0A%23%20most%20appropriate%20profile%20to%20connect%20a%20device%20with.%20As%20a%20precaution%2C%20clean%0A%23%20state%20only%20for%20devices%20that%3A%0A%23%20-%20have%20been%20activated%20with%20a%20default%20profile%20(assume%20that%20a%20non-default%0A%23%20%20%20configuration%20expresses%20intention%20by%20user%20to%20run%20with%20it%20permanently)%0A%23%20-%20have%20a%20specific%20configured%20profile%20set%20to%20auto-connect%20(if%20there%20is%20no%0A%23%20%20%20alternate%20configured%20profile%20for%20a%20device%20it%20makes%20no%20sense%20to%0A%23%20%20%20de-activate%20anything)%0A%23%0A%23%20Although%20this%20can%20theoretically%20happen%20on%20any%20deployment%20type%2C%20need%20has%0A%23%20mostly%20come%20from%20IPI%20bare%20metal%20deployments.%20For%20the%20time%20being%2C%20this%0A%23%20should%20be%20opt-in%20in%20any%20other%20deploment%20type.%0A%23%0A%23%20There%20is%20an%20RFE%20filed%20against%20NM%20that%20once%20implemented%20would%20make%20this%0A%23%20script%20unnecessary%3A%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D2089707%0A%0Afor%20device%20in%20%22%24%7BNM_DEVICES_DIR%7D%22%2F*%3B%20do%0A%20%20if%20%5B%20!%20-e%20%22%24device%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20the%20device%20file%20name%20is%20the%20ifindex%0A%20%20ifindex%3D%24(basename%20%22%24device%22)%0A%20%20%0A%20%20%23%20get%20the%20interface%20name%20by%20ifindex%0A%20%20ifname%3D%24(ip%20-j%20link%20show%20%7C%20jq%20-r%20%22.%5B%5D%20%7C%20select(.ifindex%20%3D%3D%20%24%7Bifindex%7D)%20%7C%20.ifname%20%2F%2F%20empty%22)%0A%0A%20%20%23%20no%20interface%20name%20found%2C%20ignore%0A%20%20if%20%5B%20-z%20%22%24ifname%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20get%20the%20uuid%20of%20the%20profile%20the%20device%20has%20been%20activated%20with%0A%20%20active_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bdevice%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Econnection-uuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24device%22)%0A%0A%20%20%23%20the%20device%20was%20not%20activated%20with%20any%20profile%2C%20ignore%0A%20%20if%20%5B%20-z%20%22%24active_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20find%20the%20generated%20profile%20by%20uuid%0A%20%20for%20profile%20in%20%22%24%7BNM_RUN_CONN_DIR%7D%22%2F*%3B%20do%0A%20%20%20%20generated_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Euuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20if%20%5B%20%22%24active_profile_uuid%22%20%3D%20%22%24generated_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20generated%20profile%20not%20found%2C%20ignore%0A%20%20if%20%5B%20%22%24active_profile_uuid%22%20!%3D%20%22%24generated_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20check%20that%20it%20is%20not%20specific%20for%20the%20device%2C%20otherwise%20ignore%0A%20%20profile_ifname%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Einterface-name%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20if%20%5B%20-n%20%22%24profile_ifname%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20profile%20not%20generated%20by%20nm-initrd-generator%2C%20ignore%0A%20%20%23%20only%20check%20it%20if%20the%20key%20is%20set%20(from%20NM%201.32.4)%0A%20%20origin%3D%24(sed%20-nr%20'%2F%5E%5C%5Buser%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Eorg.freedesktop.NetworkManager.origin%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20if%20%5B%20-n%20%22%24origin%22%20%5D%20%26%26%20%5B%20%22%24origin%22%20!%3D%20%22nm-initrd-generator%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20find%20the%20configured%20profile%20by%20name%20with%20auto-connect%20set%0A%20%20for%20profile%20in%20%22%24%7BNM_ETC_CONN_DIR%7D%22%2F*%3B%20do%0A%20%20%20%20profile_ifname%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Einterface-name%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20autoconnect%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Eautoconnect%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20if%20%5B%20%22%24profile_ifname%22%20%3D%20%22%24ifname%22%20%5D%20%26%26%20%5B%20%22%24autoconnect%22%20!%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20configured%20profile%20not%20found%2C%20ignore%0A%20%20if%20%5B%20%22%24profile_ifname%22%20!%3D%20%22%24ifname%22%20%5D%20%7C%7C%20%5B%20%22%24autoconnect%22%20%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20configured_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Euuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%20activated%20with%20default%20generated%20profile%20%24generated_profile_uuid%22%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%20has%20different%20configured%20specific%20profile%20%24configured_profile_uuid%22%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%3A%20removing%20state...%22%0A%20%20%0A%20%20%23%20NM%20can%20still%20generate%20internal%20profiles%20from%20the%20IP%20address%0A%20%20%23%20configuration%20of%20devices%2C%20so%20flush%20addresses%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22Flushing%20IP%20addresses%20from%20%24ifname%22%0A%20%20ip%20addr%20flush%20%22%24ifname%22%0A%20%20ip%20-6%20addr%20flush%20%22%24ifname%22%0A%0A%20%20%23%20remove%20device%20state%20file%20to%20prevent%20NM%20to%20unilaterally%20connect%20with%20the%0A%20%20%23%20latest%20activated%20profile%20without%20evaluating%20other%20profiles%20%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22Removing%20%24device%22%0A%20%20rm%20-f%20--%20%22%24device%22%0A%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/nm-clean-initrd-state.sh" + }, + { + "contents": { + "source": "data:,%5Bconnection%5D%0Aipv6.dhcp-duid%3Dll%0Aipv6.dhcp-iaid%3Dmac%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/01-ipv6.conf" + }, + { + "contents": { + "source": "data:,%5Bmain%5D%0Aplugins%3Dkeyfile%2Cifcfg-rh%0A%5Bkeyfile%5D%0Apath%3D%2Fetc%2FNetworkManager%2Fsystem-connections%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/20-keyfiles.conf" + }, + { + "contents": { + "source": "data:," + }, + "mode": 384, + "overwrite": true, + "path": "/etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt" + }, + { + "contents": { + "source": "data:,KUBERNETES_SERVICE_HOST%3D'api-int.ci-op-pw3ghqzh-bb5c4.ci2.azure.devcluster.openshift.com'%0AKUBERNETES_SERVICE_PORT%3D'6443'%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/apiserver-url.env" + }, + { + "contents": { + "source": "data:,%23%20This%20file%20is%20managed%20by%20machine-config-operator.%0A%23%20Suppress%20audit%20rules%20which%20always%20trigger%20for%20container%0A%23%20workloads%2C%20as%20they%20spam%20the%20audit%20log.%20%20Workloads%20are%20expected%0A%23%20to%20be%20dynamic%2C%20and%20the%20networking%20stack%20uses%20iptables.%0A-a%20exclude%2Calways%20-F%20msgtype%3DNETFILTER_CFG%0A%23%20The%20default%20bridged%20networking%20enables%20promiscuous%20on%20the%20veth%0A%23%20device.%20%20Ideally%2C%20we'd%20teach%20audit%20to%20ignore%20only%20veth%20devices%2C%0A%23%20since%20one%20might%20legitimately%20care%20about%20promiscuous%20on%20real%20physical%0A%23%20devices.%20%20But%20we%20can't%20currently%20differentiate.%0A-a%20exclude%2Calways%20-F%20msgtype%3DANOM_PROMISCUOUS%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/audit/rules.d/mco-audit-quiet-containers.rules" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20ESP%20offload%2C%20either%20in%20actual%20hardware%20or%20as%20part%20as%20GRO%20(generic%0A%23%20recieve%20offload)%20does%20not%20work%20for%20interfaces%20attached%20to%20an%20OVS%20bridge%0A%23%20so%20turn%20it%20off%20for%20the%20time%20being.%0A%23%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FRHEL-58811%0A%0A%23%20Depends%20on%20ipsec%20service%20drop-in%20to%20start%20it%20after%20configure-ovs%20to%20make%0A%23%20sure%20offloads%20are%20disabled%20before%20ipsec%20starts.%0A%0Aif%20%5B%5B%20%22%242%22%20!%3D%20%22up%22%20%5D%5D%3B%20then%0A%20%20exit%0Afi%0A%0Adevice%3D%24DEVICE_IFACE%0Akind_slave%3D%24(ip%20-j%20-d%20link%20show%20%22%24device%22%20%7C%20jq%20-r%20'.%5B0%5D%20%7C%20.linkinfo.info_slave_kind%20%2F%2F%20empty')%0A%0Aif%20%5B%20%22%24kind_slave%22%20%3D%20%22openvswitch%22%20%5D%3B%20then%0A%20%20for%20feature%20in%20tx-esp-segmentation%20esp-hw-offload%20esp-tx-csum-hw-offload%3B%20do%0A%20%20%20%20if%20ethtool%20-k%20%22%24device%22%20%7C%20grep%20-qE%20%22%5E%24%7Bfeature%7D%3A%20off%22%3B%20then%0A%20%20%20%20%20%20%23%20already%20disabled%2C%20nothing%20to%20do%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%20%20%20%20%0A%20%20%20%20logger%20-t%2099-esp-offload%20-s%20%22Setting%20%24feature%20off%20for%20%24device%3A%20unsupported%20when%20attached%20to%20Open%20vSwitch%20bridge%22%0A%20%20%20%20ethtool%20-K%20%22%24device%22%20%22%24feature%22%20off%0A%20%20done%0Afi%0A" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-esp-offload" + }, + { + "contents": { + "source": "data:,r%20%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F80-openshift-network.conf%0Ar%20%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F10-ovn-kubernetes.conf%0Ad%20%2Frun%2Fmultus%2Fcni%2Fnet.d%2F%200755%20root%20root%20-%20-%0AD%20%2Fvar%2Flib%2Fcni%2Fnetworks%2Fopenshift-sdn%2F%200755%20root%20root%20-%20-%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/tmpfiles.d/cleanup-cni.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-x%0A%0Aif%20%5B%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20echo%20%22Skipping%20configure-ovs%20due%20to%20manual%20network%20configuration%22%0A%20%20exit%200%0Afi%0A%0A%23%20This%20file%20is%20not%20needed%20anymore%20in%204.7%2B%2C%20but%20when%20rolling%20back%20to%204.6%0A%23%20the%20ovs%20pod%20needs%20it%20to%20know%20ovs%20is%20running%20on%20the%20host.%0Atouch%20%2Fvar%2Frun%2Fovs-config-executed%0A%0A%23%20always%20use%20--escape%20no%20to%20prevent%20'%3A'%20mangling.%20%20nmcli%20will%20escape%20all%20colons%20as%20%5C%3A%2C%20this%20breaks%20input%0ANMCLI_GET_VALUE%3D%22nmcli%20--escape%20no%20--get-values%22%0A%23%20These%20are%20well%20knwon%20NM%20default%20paths%0ANM_CONN_ETC_PATH%3D%22%2Fetc%2FNetworkManager%2Fsystem-connections%22%0ANM_CONN_RUN_PATH%3D%22%2Frun%2FNetworkManager%2Fsystem-connections%22%0A%0A%23%20This%20is%20the%20path%20where%20NM%20is%20known%20to%20be%20configured%20to%20store%20user%20keyfiles%20%0ANM_CONN_CONF_PATH%3D%22%24NM_CONN_ETC_PATH%22%0A%0A%23%20This%20is%20where%20we%20want%20our%20keyfiles%20to%20finally%20reside.%20configure-ovs%0A%23%20operates%20with%20temporary%20keyfiles%20in%20NM_CONN_RUN_PATH%20and%20then%20as%20a%20last%0A%23%20step%20moves%20those%20keyfiles%20to%20NM_CONN_SET_PATH%20if%20it%20is%20a%20different%20path%0A%23%20(not%20by%20default).%20This%20mitigates%20hard%20interruptions%20(SIGKILL%2C%20hard%20reboot)%0A%23%20of%20configure-ovs%20leaving%20the%20machine%20with%20a%20half-baked%20set%20of%20keyfiles%0A%23%20that%20might%20prevent%20machine%20networking%20from%20working%20correctly.%0ANM_CONN_SET_PATH%3D%22%24%7BNM_CONN_SET_PATH%3A-%24NM_CONN_RUN_PATH%7D%22%0A%0AMANAGED_NM_CONN_SUFFIX%3D%22-slave-ovs-clone%22%0A%23%20Workaround%20to%20ensure%20OVS%20is%20installed%20due%20to%20bug%20in%20systemd%20Requires%3A%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1888017%0Acopy_nm_conn_files()%20%7B%0A%20%20local%20dst_path%3D%22%241%22%0A%20%20for%20src%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20src_path%3D%24(dirname%20%22%24src%22)%0A%20%20%20%20file%3D%24(basename%20%22%24src%22)%0A%20%20%20%20if%20%5B%20-f%20%22%24src_path%2F%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20if%20%5B%20!%20-f%20%22%24dst_path%2F%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Copying%20configuration%20%24file%22%0A%20%20%20%20%20%20%20%20cp%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%0A%20%20%20%20%20%20elif%20!%20cmp%20--silent%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Copying%20updated%20configuration%20%24file%22%0A%20%20%20%20%20%20%20%20cp%20-f%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%0A%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20echo%20%22Skipping%20%24file%20since%20it's%20equal%20at%20destination%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20else%0A%20%20%20%20%20%20echo%20%22Skipping%20%24file%20since%20it%20does%20not%20exist%20at%20source%22%0A%20%20%20%20fi%0A%20%20done%0A%7D%0A%0Aupdate_nm_conn_files_base()%20%7B%0A%20%20base_path%3D%24%7B1%7D%0A%20%20bridge_name%3D%24%7B2%7D%0A%20%20port_name%3D%24%7B3%7D%0A%20%20ovs_port%3D%22ovs-port-%24%7Bbridge_name%7D%22%0A%20%20ovs_interface%3D%22ovs-if-%24%7Bbridge_name%7D%22%0A%20%20default_port_name%3D%22ovs-port-%24%7Bport_name%7D%22%20%23%20ovs-port-phys0%0A%20%20bridge_interface_name%3D%22ovs-if-%24%7Bport_name%7D%22%20%23%20ovs-if-phys0%0A%20%20%23%20In%20RHEL7%20files%20in%20%2F%7Betc%2Crun%7D%2FNetworkManager%2Fsystem-connections%20end%20without%20the%20suffix%20'.nmconnection'%2C%20whereas%20in%20RHCOS%20they%20end%20with%20the%20suffix.%0A%20%20MANAGED_NM_CONN_FILES%3D(%24(echo%20%22%24%7Bbase_path%7D%22%2F%7B%22%24bridge_name%22%2C%22%24ovs_interface%22%2C%22%24ovs_port%22%2C%22%24bridge_interface_name%22%2C%22%24default_port_name%22%7D%7B%2C.nmconnection%7D))%0A%20%20shopt%20-s%20nullglob%0A%20%20MANAGED_NM_CONN_FILES%2B%3D(%24%7Bbase_path%7D%2F*%24%7BMANAGED_NM_CONN_SUFFIX%7D.nmconnection%20%24%7Bbase_path%7D%2F*%24%7BMANAGED_NM_CONN_SUFFIX%7D)%0A%20%20shopt%20-u%20nullglob%0A%7D%0A%0Aupdate_nm_conn_run_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_RUN_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0Aupdate_nm_conn_set_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_SET_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0Aupdate_nm_conn_etc_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_ETC_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0A%23%20Move%20and%20reload%20keyfiles%20at%20their%20final%20destination%0Aset_nm_conn_files()%20%7B%0A%20%20if%20%5B%20%22%24NM_CONN_RUN_PATH%22%20!%3D%20%22%24NM_CONN_SET_PATH%22%20%5D%3B%20then%0A%20%20%20%20update_nm_conn_run_files%20br-ex%20phys0%0A%20%20%20%20copy_nm_conn_files%20%22%24NM_CONN_SET_PATH%22%0A%20%20%20%20rm_nm_conn_files%0A%20%20%20%20update_nm_conn_run_files%20br-ex1%20phys1%0A%20%20%20%20copy_nm_conn_files%20%22%24NM_CONN_SET_PATH%22%0A%20%20%20%20rm_nm_conn_files%0A%0A%20%20%20%20%23%20reload%20keyfiles%0A%20%20%20%20nmcli%20connection%20reload%0A%20%20fi%0A%7D%0A%0A%23%20Used%20to%20remove%20files%20managed%20by%20configure-ovs%20and%20temporary%20leftover%20files%20from%20network%20manager%0Arm_nm_conn_files()%20%7B%0A%20%20for%20file%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20if%20%5B%20-f%20%22%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20rm%20-f%20%22%24file%22%0A%20%20%20%20%20%20echo%20%22Removed%20nmconnection%20file%20%24file%22%0A%20%20%20%20%20%20nm_config_changed%3D1%0A%20%20%20%20fi%0A%20%20done%0A%20%20for%20file%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20for%20temp%20in%20%24(compgen%20-G%20%22%24%7Bfile%7D.*%22)%3B%20do%0A%20%20%20%20%20%20rm%20-f%20%22%24temp%22%0A%20%20%20%20%20%20echo%20%22Removed%20nmconnection%20temporary%20file%20%24temp%22%0A%20%20%20%20%20%20nm_config_changed%3D1%0A%20%20%20%20done%0A%20%20done%0A%7D%0A%0A%23%20Used%20to%20clone%20a%20slave%20connection%20by%20uuid%2C%20returns%20new%20name%0Aclone_slave_connection()%20%7B%0A%20%20local%20uuid%3D%22%241%22%0A%20%20local%20old_name%0A%20%20old_name%3D%22%24(%24NMCLI_GET_VALUE%20connection.id%20connection%20show%20uuid%20%22%24uuid%22)%22%0A%20%20local%20new_name%3D%22%24%7Bold_name%7D%24%7BMANAGED_NM_CONN_SUFFIX%7D%22%0A%20%20if%20nmcli%20connection%20show%20id%20%22%24%7Bnew_name%7D%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20existing%20ovs%20slave%20%24%7Bnew_name%7D%20connection%20profile%20file%20found%2C%20overwriting...%22%20%3E%262%0A%20%20%20%20nmcli%20connection%20delete%20id%20%22%24%7Bnew_name%7D%22%20%26%3E%20%2Fdev%2Fnull%0A%20%20fi%0A%20%20clone_nm_conn%20%24uuid%20%22%24%7Bnew_name%7D%22%0A%20%20echo%20%22%24%7Bnew_name%7D%22%0A%7D%0A%0A%23%20Used%20to%20replace%20an%20old%20master%20connection%20uuid%20with%20a%20new%20one%20on%20all%20connections%0Areplace_connection_master()%20%7B%0A%20%20local%20old%3D%22%241%22%0A%20%20local%20new%3D%22%242%22%0A%20%20for%20conn_uuid%20in%20%24(%24NMCLI_GET_VALUE%20UUID%20connection%20show)%20%3B%20do%0A%20%20%20%20if%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.master%20connection%20show%20uuid%20%22%24conn_uuid%22)%22%20!%3D%20%22%24old%22%20%5D%3B%20then%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20local%20active_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20connection%20show%20%22%24conn_uuid%22)%0A%20%20%20%20local%20autoconnect%3D%24(%24NMCLI_GET_VALUE%20connection.autoconnect%20connection%20show%20%22%24conn_uuid%22)%0A%20%20%20%20if%20%5B%20%22%24active_state%22%20!%3D%20%22activated%22%20%5D%20%26%26%20%5B%20%22%24autoconnect%22%20!%3D%20%22yes%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20Assume%20that%20slave%20profiles%20intended%20to%20be%20used%20are%20those%20that%20are%3A%0A%20%20%20%20%20%20%23%20-%20active%0A%20%20%20%20%20%20%23%20-%20or%20inactive%20(which%20might%20be%20due%20to%20link%20being%20down)%20but%20to%20be%20autoconnected.%0A%20%20%20%20%20%20%23%20Otherwise%2C%20ignore%20them.%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20make%20changes%20for%20slave%20profiles%20in%20a%20new%20clone%0A%20%20%20%20local%20new_name%0A%20%20%20%20new_name%3D%24(clone_slave_connection%20%24conn_uuid)%0A%0A%20%20%20%20mod_nm_conn%20%22%24new_name%22%20connection.master%20%22%24new%22%20connection.autoconnect-priority%20100%20connection.autoconnect%20no%20%0A%20%20%20%20echo%20%22Replaced%20master%20%24old%20with%20%24new%20for%20slave%20profile%20%24new_name%22%0A%20%20done%0A%7D%0A%0A%23%20when%20creating%20the%20bridge%2C%20we%20use%20a%20value%20lower%20than%20NM's%20ethernet%20device%20default%20route%20metric%0A%23%20(we%20pick%2048%20and%2049%20to%20be%20lower%20than%20anything%20that%20NM%20chooses%20by%20default)%0ABRIDGE_METRIC%3D%2248%22%0ABRIDGE1_METRIC%3D%2249%22%0A%23%20Given%20an%20interface%2C%20generates%20NM%20configuration%20to%20add%20to%20an%20OVS%20bridge%0Aconvert_to_bridge()%20%7B%0A%20%20local%20iface%3D%24%7B1%7D%0A%20%20local%20bridge_name%3D%24%7B2%7D%0A%20%20local%20port_name%3D%24%7B3%7D%0A%20%20local%20bridge_metric%3D%24%7B4%7D%0A%20%20local%20ovs_port%3D%22ovs-port-%24%7Bbridge_name%7D%22%0A%20%20local%20ovs_interface%3D%22ovs-if-%24%7Bbridge_name%7D%22%0A%20%20local%20default_port_name%3D%22ovs-port-%24%7Bport_name%7D%22%20%23%20ovs-port-phys0%0A%20%20local%20bridge_interface_name%3D%22ovs-if-%24%7Bport_name%7D%22%20%23%20ovs-if-phys0%0A%0A%20%20if%20%5B%20%22%24iface%22%20%3D%20%22%24bridge_name%22%20%5D%3B%20then%0A%20%20%20%20%23%20handle%20vlans%20and%20bonds%20etc%20if%20they%20have%20already%20been%0A%20%20%20%20%23%20configured%20via%20nm%20key%20files%20and%20br-ex%20is%20already%20up%0A%20%20%20%20ifaces%3D%24(ovs-vsctl%20list-ifaces%20%24%7Biface%7D)%0A%20%20%20%20for%20intf%20in%20%24ifaces%3B%20do%20configure_driver_options%20%24intf%3B%20done%0A%20%20%20%20echo%20%22Networking%20already%20configured%20and%20up%20for%20%24%7Bbridge-name%7D!%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20flag%20to%20reload%20NM%20to%20account%20for%20all%20the%20configuration%20changes%0A%20%20%23%20going%20forward%0A%20%20nm_config_changed%3D1%0A%0A%20%20if%20%5B%20-z%20%22%24iface%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22ERROR%3A%20Unable%20to%20find%20default%20gateway%20interface%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%20%20%23%20find%20the%20MAC%20from%20OVS%20config%20or%20the%20default%20interface%20to%20use%20for%20OVS%20internal%20port%0A%20%20%23%20this%20prevents%20us%20from%20getting%20a%20different%20DHCP%20lease%20and%20dropping%20connection%0A%20%20if%20!%20iface_mac%3D%24(%3C%22%2Fsys%2Fclass%2Fnet%2F%24%7Biface%7D%2Faddress%22)%3B%20then%0A%20%20%20%20echo%20%22Unable%20to%20determine%20default%20interface%20MAC%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20echo%20%22MAC%20address%20found%20for%20iface%3A%20%24%7Biface%7D%3A%20%24%7Biface_mac%7D%22%0A%0A%20%20%23%20find%20MTU%20from%20original%20iface%0A%20%20iface_mtu%3D%24(ip%20link%20show%20%22%24iface%22%20%7C%20awk%20'%7Bprint%20%245%3B%20exit%7D')%0A%20%20if%20%5B%5B%20-z%20%22%24iface_mtu%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Unable%20to%20determine%20default%20interface%20MTU%2C%20defaulting%20to%201500%22%0A%20%20%20%20iface_mtu%3D1500%0A%20%20else%0A%20%20%20%20echo%20%22MTU%20found%20for%20iface%3A%20%24%7Biface%7D%3A%20%24%7Biface_mtu%7D%22%0A%20%20fi%0A%0A%20%20%23%20store%20old%20conn%20for%20later%0A%20%20old_conn%3D%24(nmcli%20--fields%20UUID%2CDEVICE%20conn%20show%20--active%20%7C%20awk%20%22%2F%5Cs%24%7Biface%7D%5Cs*%5C%24%2F%20%7Bprint%20%5C%241%7D%22)%0A%0A%20%20if%20%5B%5B%20-z%20%22%24old_conn%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22ERROR%3A%20cannot%20find%20connection%20for%20interface%3A%20%24%7Biface%7D%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20create%20bridge%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24bridge_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20%22%24bridge_name%22%0A%20%20%20%20add_nm_conn%20%22%24bridge_name%22%20type%20ovs-bridge%20conn.interface%20%22%24bridge_name%22%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20%5C%0A%20%20%20%20connection.autoconnect-slaves%201%0A%20%20fi%0A%0A%20%20%23%20find%20default%20port%20to%20add%20to%20bridge%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24default_port_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-port%20%22%24bridge_name%22%20%24%7Biface%7D%0A%20%20%20%20add_nm_conn%20%22%24default_port_name%22%20type%20ovs-port%20conn.interface%20%24%7Biface%7D%20slave-type%20ovs-bridge%20master%20%22%24bridge_name%22%20%5C%0A%20%20%20%20connection.autoconnect-slaves%201%0A%20%20fi%0A%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24ovs_port%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-port%20%22%24bridge_name%22%20%22%24bridge_name%22%0A%20%20%20%20add_nm_conn%20%22%24ovs_port%22%20type%20ovs-port%20conn.interface%20%22%24bridge_name%22%20slave-type%20ovs-bridge%20master%20%22%24bridge_name%22%0A%20%20fi%0A%0A%20%20extra_phys_args%3D()%0A%20%20%23%20check%20if%20this%20interface%20is%20a%20vlan%2C%20bond%2C%20team%2C%20or%20ethernet%20type%0A%20%20if%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22vlan%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dvlan%0A%20%20%20%20vlan_id%3D%24(%24NMCLI_GET_VALUE%20vlan.id%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-z%20%22%24vlan_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20unable%20to%20determine%20vlan_id%20for%20vlan%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%20%20%20%20vlan_parent%3D%24(%24NMCLI_GET_VALUE%20vlan.parent%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-z%20%22%24vlan_parent%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20unable%20to%20determine%20vlan_parent%20for%20vlan%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%0A%20%20%20%20if%20nmcli%20connection%20show%20uuid%20%22%24vlan_parent%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20%20%20%23%20if%20the%20VLAN%20connection%20is%20configured%20with%20a%20connection%20UUID%20as%20parent%2C%20we%20need%20to%20find%20the%20underlying%20device%0A%20%20%20%20%20%20%23%20and%20create%20the%20bridge%20against%20it%2C%20as%20the%20parent%20connection%20can%20be%20replaced%20by%20another%20bridge.%0A%20%20%20%20%20%20vlan_parent%3D%24(%24NMCLI_GET_VALUE%20GENERAL.DEVICES%20conn%20show%20uuid%20%24%7Bvlan_parent%7D)%0A%20%20%20%20fi%0A%0A%20%20%20%20extra_phys_args%3D(%20dev%20%22%24%7Bvlan_parent%7D%22%20id%20%22%24%7Bvlan_id%7D%22%20)%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dbond%0A%20%20%20%20%23%20check%20bond%20options%0A%20%20%20%20bond_opts%3D%24(%24NMCLI_GET_VALUE%20bond.options%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-n%20%22%24bond_opts%22%20%5D%3B%20then%0A%20%20%20%20%20%20extra_phys_args%2B%3D(%20bond.options%20%22%24%7Bbond_opts%7D%22%20)%0A%20%20%20%20%20%20MODE_REGEX%3D%22(%5E%7C%2C)mode%3Dactive-backup(%2C%7C%24)%22%0A%20%20%20%20%20%20MAC_REGEX%3D%22(%5E%7C%2C)fail_over_mac%3D(1%7Cactive%7C2%7Cfollow)(%2C%7C%24)%22%0A%20%20%20%20%20%20if%20%5B%5B%20%24bond_opts%20%3D~%20%24MODE_REGEX%20%5D%5D%20%26%26%20%5B%5B%20%24bond_opts%20%3D~%20%24MAC_REGEX%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20clone_mac%3D0%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22team%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dteam%0A%20%20%20%20%23%20check%20team%20config%20options%0A%20%20%20%20team_config_opts%3D%24(%24NMCLI_GET_VALUE%20team.config%20-e%20no%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-n%20%22%24team_config_opts%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20team.config%20is%20json%2C%20remove%20spaces%20to%20avoid%20problems%20later%20on%0A%20%20%20%20%20%20extra_phys_args%2B%3D(%20team.config%20%22%24%7Bteam_config_opts%2F%2F%5B%5B%3Aspace%3A%5D%5D%2F%7D%22%20)%0A%20%20%20%20%20%20team_mode%3D%24(echo%20%22%24%7Bteam_config_opts%7D%22%20%7C%20jq%20-r%20%22.runner.name%20%2F%2F%20empty%22)%0A%20%20%20%20%20%20team_mac_policy%3D%24(echo%20%22%24%7Bteam_config_opts%7D%22%20%7C%20jq%20-r%20%22.runner.hwaddr_policy%20%2F%2F%20empty%22)%0A%20%20%20%20%20%20MAC_REGEX%3D%22(by_active%7Conly_active)%22%0A%20%20%20%20%20%20if%20%5B%20%22%24team_mode%22%20%3D%20%22activebackup%22%20%5D%20%26%26%20%5B%5B%20%22%24team_mac_policy%22%20%3D~%20%24MAC_REGEX%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20clone_mac%3D0%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22tun%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dtun%0A%20%20%20%20tun_mode%3D%24(%24NMCLI_GET_VALUE%20tun.mode%20-e%20no%20connection%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20extra_phys_args%2B%3D(%20tun.mode%20%22%24%7Btun_mode%7D%22%20)%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22bridge%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dbridge%0A%20%20else%0A%20%20%20%20iface_type%3D802-3-ethernet%0A%20%20fi%0A%0A%20%20if%20%5B%20!%20%22%24%7Bclone_mac%3A-%7D%22%20%3D%20%220%22%20%5D%3B%20then%0A%20%20%20%20%23%20In%20active-backup%20link%20aggregation%2C%20with%20fail_over_mac%20mode%20enabled%2C%0A%20%20%20%20%23%20cloning%20the%20mac%20address%20is%20not%20supported.%20It%20is%20possible%20then%20that%0A%20%20%20%20%23%20br-ex%20has%20a%20different%20mac%20address%20than%20the%20bond%20which%20might%20be%0A%20%20%20%20%23%20troublesome%20on%20some%20platforms%20where%20the%20nic%20won't%20accept%20packets%20with%0A%20%20%20%20%23%20a%20different%20destination%20mac.%20But%20nobody%20has%20complained%20so%20far%20so%20go%20on%0A%20%20%20%20%23%20with%20what%20we%20got.%20%0A%20%20%20%20%0A%20%20%20%20%23%20Do%20set%20it%20though%20for%20other%20link%20aggregation%20configurations%20where%20the%0A%20%20%20%20%23%20mac%20address%20would%20otherwise%20depend%20on%20enslave%20order%20for%20which%20we%20have%0A%20%20%20%20%23%20no%20control%20going%20forward.%0A%20%20%20%20extra_phys_args%2B%3D(%20802-3-ethernet.cloned-mac-address%20%22%24%7Biface_mac%7D%22%20)%0A%20%20fi%0A%0A%20%20%23%20use%20%24%7Bextra_phys_args%5B%40%5D%2B%22%24%7Bextra_phys_args%5B%40%5D%7D%22%7D%20instead%20of%20%24%7Bextra_phys_args%5B%40%5D%7D%20to%20be%20compatible%20with%20bash%204.2%20in%20RHEL7.9%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24bridge_interface_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20destroy%20interface%20%24%7Biface%7D%0A%20%20%20%20ovs_default_port_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24default_port_name%22)%0A%20%20%20%20add_nm_conn%20%22%24bridge_interface_name%22%20type%20%24%7Biface_type%7D%20conn.interface%20%24%7Biface%7D%20master%20%22%24ovs_default_port_conn%22%20%5C%0A%20%20%20%20%20%20slave-type%20ovs-port%20connection.autoconnect-priority%20100%20connection.autoconnect-slaves%201%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20%5C%0A%20%20%20%20%20%20%24%7Bextra_phys_args%5B%40%5D%2B%22%24%7Bextra_phys_args%5B%40%5D%7D%22%7D%0A%20%20fi%0A%0A%20%20%23%20Get%20the%20new%20connection%20uuids%0A%20%20new_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24bridge_interface_name%22)%0A%20%20ovs_port_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24ovs_port%22)%0A%0A%20%20%23%20Update%20connections%20with%20master%20property%20set%20to%20use%20the%20new%20connection%0A%20%20replace_connection_master%20%24old_conn%20%24new_conn%0A%20%20replace_connection_master%20%24iface%20%24new_conn%0A%0A%20%20ipv4_method%3D%24(%24NMCLI_GET_VALUE%20ipv4.method%20conn%20show%20%22%24old_conn%22)%0A%20%20ipv6_method%3D%24(%24NMCLI_GET_VALUE%20ipv6.method%20conn%20show%20%22%24old_conn%22)%0A%0A%20%20ipv4_addresses%3D%24(%24NMCLI_GET_VALUE%20ipv4.addresses%20conn%20show%20%22%24old_conn%22)%0A%20%20ipv6_addresses%3D%24(%24NMCLI_GET_VALUE%20ipv6.addresses%20conn%20show%20%22%24old_conn%22)%0A%0A%20%20%23%20Warn%20about%20an%20invalid%20MTU%20that%20will%20most%20likely%20fail%20in%20one%20way%20or%0A%20%20%23%20another%0A%20%20if%20%5B%20%24%7Biface_mtu%7D%20-lt%201280%20%5D%20%26%26%20%5B%20%22%24%7Bipv6_method%7D%22%20!%3D%20%22disabled%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20MTU%20%24%7Biface_mtu%7D%20is%20lower%20than%20the%20minimum%20required%20of%201280%20for%20IPv6%22%0A%20%20fi%0A%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24ovs_interface%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20destroy%20interface%20%22%24bridge_name%22%0A%20%20%20%20%23%20Clone%20the%20connection%20in%20case%20the%20method%20is%20manual%20or%20in%20case%20the%20an%20address%20is%20set%20(DHCP%20%2B%20static%20IP)%0A%20%20%20%20if%20%5B%20%22%24%7Bipv4_method%7D%22%20%3D%20%22manual%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv4_addresses%7D%22%20!%3D%20%22%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv6_method%7D%22%20%3D%20%22manual%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv6_addresses%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Static%20IP%20addressing%20detected%20on%20default%20gateway%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20%23%20clone%20the%20old%20connection%20to%20get%20the%20address%20settings%0A%20%20%20%20%20%20%23%20prefer%20cloning%20vs%20copying%20the%20connection%20file%20to%20avoid%20problems%20with%20selinux%0A%20%20%20%20%20%20clone_nm_conn%20%22%24%7Bold_conn%7D%22%20%22%24%7Bovs_interface%7D%22%0A%20%20%20%20%20%20shopt%20-s%20nullglob%0A%20%20%20%20%20%20new_conn_files%3D(%24%7BNM_CONN_RUN_PATH%7D%2F%22%24%7Bovs_interface%7D%22*)%0A%20%20%20%20%20%20shopt%20-u%20nullglob%0A%20%20%20%20%20%20if%20%5B%20%24%7B%23new_conn_files%5B%40%5D%7D%20-ne%201%20%5D%20%7C%7C%20%5B%20!%20-f%20%22%24%7Bnew_conn_files%5B0%5D%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22ERROR%3A%20could%20not%20find%20%24%7Bovs_interface%7D%20conn%20file%20after%20cloning%20from%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20%20%20exit%201%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20new_conn_file%3D%22%24%7Bnew_conn_files%5B0%5D%7D%22%0A%0A%20%20%20%20%20%20%23%20modify%20the%20connection%20type%20directly%20because%20it%20can't%20be%20modified%0A%20%20%20%20%20%20%23%20through%20nmcli%0A%20%20%20%20%20%20sed%20-i%20'%2F%5E%5C%5Bconnection%5C%5D%24%2F%2C%2F%5E%5C%5B%2F%20s%2F%5Etype%3D.*%24%2Ftype%3Dovs-interface%2F'%20%24%7Bnew_conn_file%7D%0A%0A%20%20%20%20%20%20%23%20modify%20some%20more%20settings%20through%20nmcli%0A%20%20%20%20%20%20mod_nm_conn%20%22%24%7Bovs_interface%7D%22%20conn.interface%20%22%24bridge_name%22%20%5C%0A%20%20%20%20%20%20%20%20connection.multi-connect%20%22%22%20connection.autoconnect%20no%20%5C%0A%20%20%20%20%20%20%20%20connection.master%20%22%24ovs_port_conn%22%20connection.slave-type%20ovs-port%20%5C%0A%20%20%20%20%20%20%20%20ovs-interface.type%20internal%20%5C%0A%20%20%20%20%20%20%20%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20802-3-ethernet.cloned-mac-address%20%24%7Biface_mac%7D%20%5C%0A%20%20%20%20%20%20%20%20ipv4.route-metric%20%22%24%7Bbridge_metric%7D%22%20ipv6.route-metric%20%22%24%7Bbridge_metric%7D%22%0A%0A%20%20%20%20%20%20echo%20%22Loaded%20new%20%24ovs_interface%20connection%20file%3A%20%24%7Bnew_conn_file%7D%22%0A%20%20%20%20else%0A%20%20%20%20%20%20extra_if_brex_args%3D%22%22%0A%20%20%20%20%20%20%23%20check%20if%20interface%20had%20ipv4%2Fipv6%20addresses%20assigned%0A%20%20%20%20%20%20num_ipv4_addrs%3D%24(ip%20-j%20a%20show%20dev%20%24%7Biface%7D%20%7C%20jq%20%22.%5B0%5D.addr_info%20%7C%20map(.%20%7C%20select(.family%20%3D%3D%20%5C%22inet%5C%22))%20%7C%20length%22)%0A%20%20%20%20%20%20if%20%5B%20%22%24num_ipv4_addrs%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.may-fail%20no%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20IPV6%20should%20have%20at%20least%20a%20link%20local%20address.%20Check%20for%20more%20than%201%20to%20see%20if%20there%20is%20an%0A%20%20%20%20%20%20%23%20assigned%20address.%0A%20%20%20%20%20%20num_ip6_addrs%3D%24(ip%20-j%20a%20show%20dev%20%24%7Biface%7D%20%7C%20jq%20%22.%5B0%5D.addr_info%20%7C%20map(.%20%7C%20select(.family%20%3D%3D%20%5C%22inet6%5C%22%20and%20.scope%20!%3D%20%5C%22link%5C%22))%20%7C%20length%22)%0A%20%20%20%20%20%20if%20%5B%20%22%24num_ip6_addrs%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.may-fail%20no%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20dhcp%20client%20ids%0A%20%20%20%20%20%20dhcp_client_id%3D%24(%24NMCLI_GET_VALUE%20ipv4.dhcp-client-id%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24dhcp_client_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.dhcp-client-id%20%24%7Bdhcp_client_id%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20dhcp6_client_id%3D%24(%24NMCLI_GET_VALUE%20ipv6.dhcp-duid%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24dhcp6_client_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.dhcp-duid%20%24%7Bdhcp6_client_id%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20ipv6_addr_gen_mode%3D%24(%24NMCLI_GET_VALUE%20ipv6.addr-gen-mode%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_addr_gen_mode%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.addr-gen-mode%20%24%7Bipv6_addr_gen_mode%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20static%20DNS%20address%0A%20%20%20%20%20%20ipv4_dns%3D%24(%24NMCLI_GET_VALUE%20ipv4.dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv4_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.dns%20%24%7Bipv4_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20ipv6_dns%3D%24(%24NMCLI_GET_VALUE%20ipv6.dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.dns%20%24%7Bipv6_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20auto-dns%0A%20%20%20%20%20%20ipv4_auto_dns%3D%24(%24NMCLI_GET_VALUE%20ipv4.ignore-auto-dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv4_auto_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.ignore-auto-dns%20%24%7Bipv4_auto_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20ipv6_auto_dns%3D%24(%24NMCLI_GET_VALUE%20ipv6.ignore-auto-dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_auto_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.ignore-auto-dns%20%24%7Bipv6_auto_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20add_nm_conn%20%22%24ovs_interface%22%20type%20ovs-interface%20slave-type%20ovs-port%20conn.interface%20%22%24bridge_name%22%20master%20%22%24ovs_port_conn%22%20%5C%0A%20%20%20%20%20%20%20%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20802-3-ethernet.cloned-mac-address%20%24%7Biface_mac%7D%20%5C%0A%20%20%20%20%20%20%20%20ipv4.method%20%22%24%7Bipv4_method%7D%22%20ipv4.route-metric%20%22%24%7Bbridge_metric%7D%22%20%5C%0A%20%20%20%20%20%20%20%20ipv6.method%20%22%24%7Bipv6_method%7D%22%20ipv6.route-metric%20%22%24%7Bbridge_metric%7D%22%20%5C%0A%20%20%20%20%20%20%20%20%24%7Bextra_if_brex_args%7D%0A%20%20%20%20fi%0A%20%20fi%0A%0A%20%20configure_driver_options%20%22%24%7Biface%7D%22%0A%7D%0A%0A%23%20Used%20to%20remove%20a%20bridge%0Aremove_ovn_bridges()%20%7B%0A%20%20bridge_name%3D%24%7B1%7D%0A%20%20port_name%3D%24%7B2%7D%0A%0A%20%20%23%20Remove%20the%20keyfiles%20from%20known%20configuration%20paths%0A%20%20update_nm_conn_run_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%20%20update_nm_conn_set_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%20%20%23%20Shouldn't%20be%20necessary%2C%20workaround%20for%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FOCPBUGS-41489%0A%20%20update_nm_conn_etc_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%0A%20%20%23%20NetworkManager%20will%20not%20remove%20%24%7Bbridge_name%7D%20if%20it%20has%20the%20patch%20port%20created%20by%20ovn-kubernetes%0A%20%20%23%20so%20remove%20explicitly%0A%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20%24%7Bbridge_name%7D%0A%7D%0A%0A%23%20Removes%20any%20previous%20ovs%20configuration%0Aremove_all_ovn_bridges()%20%7B%0A%20%20echo%20%22Reverting%20any%20previous%20OVS%20configuration%22%0A%20%20%0A%20%20remove_ovn_bridges%20br-ex%20phys0%0A%20%20remove_ovn_bridges%20br-ex1%20phys1%0A%20%20%0A%20%20echo%20%22OVS%20configuration%20successfully%20reverted%22%0A%7D%0A%0A%23%20Reloads%20NM%20NetworkManager%20profiles%20if%20any%20configuration%20change%20was%20done.%0A%23%20Accepts%20a%20list%20of%20devices%20that%20should%20be%20re-connect%20after%20reload.%0Areload_profiles_nm()%20%7B%0A%20%20if%20%5B%20%24%7Bnm_config_changed%3A-0%7D%20-eq%200%20%5D%3B%20then%0A%20%20%20%20%23%20no%20config%20was%20changed%2C%20no%20need%20to%20reload%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20reload%20profiles%0A%20%20nmcli%20connection%20reload%0A%0A%20%20%23%20precautionary%20sleep%20of%2010s%20(default%20timeout%20of%20NM%20to%20bring%20down%20devices)%0A%20%20sleep%2010%0A%0A%20%20%23%20After%20reload%2C%20devices%20that%20were%20already%20connected%20should%20connect%20again%0A%20%20%23%20if%20any%20profile%20is%20available.%20If%20no%20profile%20is%20available%2C%20a%20device%20can%0A%20%20%23%20remain%20disconnected%20and%20we%20have%20to%20explicitly%20connect%20it%20so%20that%20a%0A%20%20%23%20profile%20is%20generated.%20This%20can%20happen%20for%20physical%20devices%20but%20should%0A%20%20%23%20not%20happen%20for%20software%20devices%20as%20those%20always%20require%20a%20profile.%0A%20%20for%20dev%20in%20%24%40%3B%20do%0A%20%20%20%20%23%20Only%20attempt%20to%20connect%20a%20disconnected%20device%0A%20%20%20%20local%20connected_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20device%20show%20%22%24dev%22%20%7C%7C%20echo%20%22%22)%0A%20%20%20%20if%20%5B%5B%20%22%24connected_state%22%20%3D~%20%22disconnected%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%23%20keep%20track%20if%20a%20profile%20by%20the%20same%20name%20as%20the%20device%20existed%20%0A%20%20%20%20%20%20%23%20before%20we%20attempt%20activation%0A%20%20%20%20%20%20local%20named_profile_existed%3D%24(%5B%20-f%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D%22%20%5D%20%7C%7C%20%5B%20-f%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D.nmconnection%22%20%5D%20%26%26%20echo%20%22yes%22)%0A%20%20%20%20%20%20%0A%20%20%20%20%20%20for%20i%20in%20%7B1..10%7D%3B%20do%0A%20%20%20%20%20%20%20%20%20%20echo%20%22Attempt%20%24i%20to%20connect%20device%20%24dev%22%0A%20%20%20%20%20%20%20%20%20%20nmcli%20device%20connect%20%22%24dev%22%20%26%26%20break%0A%20%20%20%20%20%20%20%20%20%20sleep%205%0A%20%20%20%20%20%20done%0A%0A%20%20%20%20%20%20%23%20if%20a%20profile%20did%20not%20exist%20before%20but%20does%20now%2C%20it%20was%20generated%0A%20%20%20%20%20%20%23%20but%20we%20want%20it%20to%20be%20ephemeral%2C%20so%20move%20it%20back%20to%20%2Frun%0A%20%20%20%20%20%20if%20%5B%20!%20%22%24named_profile_existed%22%20%3D%20%22yes%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20MANAGED_NM_CONN_FILES%3D(%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D%22%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D.nmconnection%22)%0A%20%20%20%20%20%20%20%20copy_nm_conn_files%20%22%24%7BNM_CONN_RUN_PATH%7D%22%0A%20%20%20%20%20%20%20%20rm_nm_conn_files%0A%20%20%20%20%20%20%20%20%23%20reload%20profiles%20so%20that%20NM%20notices%20that%20some%20might%20have%20been%20moved%0A%20%20%20%20%20%20%20%20nmcli%20connection%20reload%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20echo%20%22Waiting%20for%20interface%20%24dev%20to%20activate...%22%0A%20%20%20%20%23%20don't%20use%20--escape%20no%2C%20we%20use%20%3A%20delimiter%20here%0A%20%20%20%20if%20!%20timeout%2060%20bash%20-c%20%22while%20!%20nmcli%20-g%20DEVICE%2CSTATE%20c%20%7C%20grep%20%22'%22'%22%24dev%22%3Aactivated'%22'%22%3B%20do%20sleep%205%3B%20done%22%3B%20then%0A%20%20%20%20%20%20echo%20%22WARNING%3A%20%24dev%20did%20not%20activate%22%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20nm_config_changed%3D0%0A%7D%0A%0A%23%20Removes%20all%20configuration%20and%20reloads%20NM%20if%20necessary%0Arollback_nm()%20%7B%0A%20%20phys0%3D%24(get_bridge_physical_interface%20ovs-if-phys0)%0A%20%20phys1%3D%24(get_bridge_physical_interface%20ovs-if-phys1)%0A%20%20%0A%20%20%23%20Revert%20changes%20made%20by%20%2Fusr%2Flocal%2Fbin%2Fconfigure-ovs.sh%20during%20SDN%20migration.%0A%20%20remove_all_ovn_bridges%0A%20%20%0A%20%20%23%20reload%20profiles%20so%20that%20NM%20notices%20that%20some%20were%20removed%0A%20%20reload_profiles_nm%20%22%24phys0%22%20%22%24phys1%22%0A%7D%0A%0A%23%20Add%20a%20temporary%20deactivated%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20name%20folowed%20by%20arguments%20passed%20to%0A%23%20%60nmcli%20connection%20add%60%0Aadd_nm_conn()%20%7B%0A%20%20%23%20Use%20%60save%20no%60%20to%20add%20a%20temporary%20profile%0A%20%20nmcli%20c%20add%20save%20no%20con-name%20%22%24%40%22%20connection.autoconnect%20no%0A%7D%0A%0A%23%20Modify%20a%20temporary%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20name%20followed%20by%20arguments%20passed%20to%0A%23%20%60nmcli%20connection%20modify%60%0Amod_nm_conn()%20%7B%0A%20%20%23%20the%20easiest%20thing%20to%20do%20here%20would%20be%20to%20use%20%60nmcli%20c%20mod%20--temporary%60%0A%20%20%23%20but%20there%20is%20a%20bug%20in%20selinux%20profiles%20that%20denies%20NM%20from%20performing%0A%20%20%23%20the%20operation%0A%20%20local%20dst_path%3D%24%7BNM_CONN_RUN_PATH%7D%2F%241.nmconnection%0A%20%20local%20src_path%0A%20%20src_path%3D%24(mktemp)%0A%20%20shift%0A%20%20cat%20%22%24dst_path%22%20%3E%20%22%24src_path%22%0A%20%20rm%20-f%20%22%24dst_path%22%0A%20%20nmcli%20--offline%20c%20mod%20%22%24%40%22%20%3C%20%22%24src_path%22%20%3E%20%22%24dst_path%22%0A%20%20rm%20-f%20%22%24src_path%22%0A%20%20chmod%20600%20%22%24dst_path%22%0A%20%20nmcli%20c%20load%20%22%24dst_path%22%0A%7D%0A%0A%23%20Clone%20to%20a%20temporary%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20to%20clone%2C%20second%20argument%20is%20the%20clone%20name%0Aclone_nm_conn()%20%7B%0A%20%20%23%20clone%20as%20temporary%20so%20that%20it%20is%20generated%20in%20NM_CONN_RUN_PATH%0A%20%20nmcli%20connection%20clone%20--temporary%20%22%241%22%20%22%242%22%20%26%3E%20%2Fdev%2Fnull%0A%7D%0A%0A%23%20Activates%20an%20ordered%20set%20of%20NM%20connection%20profiles%0Aactivate_nm_connections()%20%7B%0A%20%20local%20connections%3D(%22%24%40%22)%0A%0A%20%20%23%20We%20want%20autoconnect%20set%20for%20our%20cloned%20slave%20profiles%20so%20that%20they%20are%0A%20%20%23%20used%20over%20the%20original%20profiles%20if%20implicitly%20re-activated%20with%20other%0A%20%20%23%20dependant%20profiles.%20Otherwise%20if%20a%20slave%20activates%20with%20an%20old%20profile%2C%0A%20%20%23%20the%20old%20master%20profile%20might%20activate%20as%20well%2C%20interfering%20and%20causing%0A%20%20%23%20further%20activations%20to%20fail.%0A%20%20%23%20Slave%20interfaces%20should%20already%20be%20active%20so%20setting%20autoconnect%20here%0A%20%20%23%20won't%20implicitly%20activate%20them%20but%20there%20is%20an%20edge%20case%20where%20a%20slave%0A%20%20%23%20might%20be%20inactive%20(link%20down%20for%20example)%20and%20in%20that%20case%20setting%0A%20%20%23%20autoconnect%20will%20cause%20an%20implicit%20activation.%20This%20is%20not%20necessarily%20a%0A%20%20%23%20problem%20and%20hopefully%20we%20can%20make%20sure%20everything%20is%20activated%20as%20we%0A%20%20%23%20want%20next.%0A%20%20for%20conn%20in%20%22%24%7Bconnections%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20local%20slave_type%3D%24(%24NMCLI_GET_VALUE%20connection.slave-type%20connection%20show%20%22%24conn%22)%0A%20%20%20%20if%20%5B%20%22%24slave_type%22%20%3D%20%22team%22%20%5D%20%7C%7C%20%5B%20%22%24slave_type%22%20%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20Activate%20all%20connections%20and%20fail%20if%20activation%20fails%0A%20%20%23%20For%20slave%20connections%20-%20for%20as%20long%20as%20at%20least%20one%20slave%20that%20belongs%20to%20a%20bond%2Fteam%0A%20%20%23%20comes%20up%2C%20we%20should%20not%20fail%0A%20%20declare%20-A%20master_interfaces%0A%20%20for%20conn%20in%20%22%24%7Bconnections%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%23%20Get%20the%20slave%20type%0A%20%20%20%20local%20slave_type%3D%24(%24NMCLI_GET_VALUE%20connection.slave-type%20connection%20show%20%22%24conn%22)%0A%20%20%20%20local%20is_slave%3Dfalse%0A%20%20%20%20if%20%5B%20%22%24slave_type%22%20%3D%20%22team%22%20%5D%20%7C%7C%20%5B%20%22%24slave_type%22%20%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20%20%20is_slave%3Dtrue%0A%20%20%20%20fi%20%0A%0A%20%20%20%20%23%20For%20slave%20interfaces%2C%20initialize%20the%20master%20interface%20to%20false%20if%20the%20key%20is%20not%20yet%20in%20the%20array%0A%20%20%20%20local%20master_interface%0A%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20master_interface%3D%24(%24NMCLI_GET_VALUE%20connection.master%20connection%20show%20%22%24conn%22)%0A%20%20%20%20%20%20if%20!%20%5B%5B%20-v%20%22master_interfaces%5B%24master_interface%5D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%22%24master_interface%22%5D%3Dfalse%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20slaves%20should%20implicitly%20activate%2C%20give%20them%20a%20chance%20to%20do%20so%0A%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20if%20!%20timeout%205%20bash%20-c%20%22while%20!%20%24NMCLI_GET_VALUE%20GENERAL.STATE%20conn%20show%20%22%24conn%22%20%7C%20grep%20activated%3B%20do%20sleep%201%3B%20done%22%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22WARNING%3A%20slave%20%24conn%20did%20not%20implicitly%20activate%20in%205s%2C%20activating%20explicitly.%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Do%20not%20activate%20interfaces%20that%20are%20already%20active%0A%20%20%20%20%23%20But%20set%20the%20entry%20in%20master_interfaces%20to%20true%20if%20this%20is%20a%20slave%0A%20%20%20%20%23%20Also%20set%20autoconnect%20to%20yes%0A%20%20%20%20local%20active_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20conn%20show%20%22%24conn%22)%0A%20%20%20%20if%20%5B%20%22%24active_state%22%20%3D%3D%20%22activated%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Connection%20%24conn%20already%20activated%22%0A%20%20%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%24master_interface%5D%3Dtrue%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Activate%20all%20interfaces%20that%20are%20not%20yet%20active%0A%20%20%20%20for%20i%20in%20%7B1..10%7D%3B%20do%0A%20%20%20%20%20%20echo%20%22Attempt%20%24i%20to%20bring%20up%20connection%20%24conn%22%0A%20%20%20%20%20%20nmcli%20conn%20up%20%22%24conn%22%20%26%26%20s%3D0%20%26%26%20break%20%7C%7C%20s%3D%24%3F%0A%20%20%20%20%20%20sleep%205%0A%20%20%20%20done%0A%20%20%20%20if%20%5B%20%24s%20-eq%200%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Brought%20up%20connection%20%24conn%20successfully%22%0A%20%20%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%22%24master_interface%22%5D%3Dtrue%0A%20%20%20%20%20%20fi%0A%20%20%20%20elif%20!%20%24is_slave%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20Cannot%20bring%20up%20connection%20%24conn%20after%20%24i%20attempts%22%0A%20%20%20%20%20%20return%20%24s%0A%20%20%20%20fi%0A%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20done%0A%20%20%23%20Check%20that%20all%20master%20interfaces%20report%20at%20least%20a%20single%20active%20slave%0A%20%20%23%20Note%3A%20associative%20arrays%20require%20an%20exclamation%20mark%20when%20looping%0A%20%20for%20i%20in%20%22%24%7B!master_interfaces%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20if%20!%20%24%7Bmaster_interfaces%5B%22%24i%22%5D%7D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22ERROR%3A%20Cannot%20bring%20up%20any%20slave%20interface%20for%20master%20interface%3A%20%24i%22%0A%20%20%20%20%20%20%20%20return%201%0A%20%20%20%20fi%0A%20%20done%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%2C%20%24iface%0A%23%20Writes%20content%20of%20%24iface%20into%20%24iface_default_hint_file%0Awrite_iface_default_hint()%20%7B%0A%20%20local%20iface_default_hint_file%3D%22%241%22%0A%20%20local%20iface%3D%22%242%22%0A%0A%20%20echo%20%22%24%7Biface%7D%22%20%3E%7C%20%22%24%7Biface_default_hint_file%7D%22%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%0A%23%20Returns%20the%20stored%20interface%20default%20hint%20if%20the%20hint%20is%20non-empty%2C%0A%23%20not%20br-ex%2C%20not%20br-ex1%20and%20if%20the%20interface%20can%20be%20found%20in%20%2Fsys%2Fclass%2Fnet%0Aget_iface_default_hint()%20%7B%0A%20%20local%20iface_default_hint_file%3D%241%0A%20%20if%20%5B%20-f%20%22%24%7Biface_default_hint_file%7D%22%20%5D%3B%20then%0A%20%20%20%20local%20iface_default_hint%3D%24(cat%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22br-ex1%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20-d%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Biface_default_hint%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20fi%0A%20%20echo%20%22%22%0A%7D%0A%0Aget_ip_from_ip_hint_file()%20%7B%0A%20%20local%20ip_hint_file%3D%22%241%22%0A%20%20if%20%5B%5B%20!%20-f%20%22%24%7Bip_hint_file%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20return%0A%20%20fi%0A%20%20ip_hint%3D%24(cat%20%22%24%7Bip_hint_file%7D%22)%0A%20%20echo%20%22%24%7Bip_hint%7D%22%0A%7D%0A%0A%23%20This%20function%20waits%20for%20ip%20address%20of%20br-ex%20to%20be%20bindable%20only%20in%20case%20of%20ipv6%0A%23%20This%20is%20workaround%20for%20OCPBUGS-673%20as%20it%20will%20not%20allow%20starting%20crio%0A%23%20before%20address%20is%20bindable%0Atry_to_bind_ipv6_address()%20%7B%0A%20%20%23%20Retry%20for%201%20minute%0A%20%20retries%3D60%0A%20%20until%20%5B%5B%20%24%7Bretries%7D%20-eq%200%20%5D%5D%3B%20do%0A%20%20%20%20ip%3D%24(ip%20-6%20-j%20addr%20%7C%20jq%20-r%20%22first(.%5B%5D%20%7C%20select(.ifname%3D%3D%5C%22br-ex%5C%22)%20%7C%20.addr_info%5B%5D%20%7C%20select(.scope%3D%3D%5C%22global%5C%22)%20%7C%20.local)%22)%0A%20%20%20%20if%20%5B%5B%20%22%24%7Bip%7D%22%20%3D%3D%20%22%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22No%20ipv6%20ip%20to%20bind%20was%20found%22%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20random_port%3D%24(shuf%20-i%2050000-60000%20-n%201)%0A%20%20%20%20echo%20%22Trying%20to%20bind%20%24%7Bip%7D%20on%20port%20%24%7Brandom_port%7D%22%0A%20%20%20%20exit_code%3D%24(timeout%202s%20nc%20-l%20%22%24%7Bip%7D%22%20%24%7Brandom_port%7D%3B%20echo%20%24%3F)%0A%20%20%20%20if%20%5B%5B%20exit_code%20-eq%20124%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Address%20bound%20successfully%22%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20sleep%201%0A%20%20%20%20((%20retries--%20))%0A%20%20done%0A%20%20if%20%5B%5B%20%24%7Bretries%7D%20-eq%200%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Failed%20to%20bind%20ip%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%7D%0A%0A%23%20Get%20interface%20that%20matches%20ip%20from%20node%20ip%20hint%20file%0A%23%20in%20case%20file%20not%20exists%20return%20nothing%20and%0A%23%20fallback%20to%20default%20interface%20search%20flow%0Aget_nodeip_hint_interface()%20%7B%0A%20%20local%20ip_hint%3D%22%22%0A%20%20local%20ip_hint_file%3D%22%241%22%0A%20%20local%20extra_bridge%3D%22%242%22%0A%20%20local%20iface%3D%22%22%0A%0A%20%20ip_hint%3D%24(get_ip_from_ip_hint_file%20%22%24%7Bip_hint_file%7D%22)%0A%20%20if%20%5B%5B%20-z%20%22%24%7Bip_hint%7D%22%20%20%5D%5D%3B%20then%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20iface%3D%24(ip%20-j%20addr%20%7C%20jq%20-r%20%22first(.%5B%5D%20%7C%20select(any(.addr_info%5B%5D%3B%20.local%3D%3D%5C%22%24%7Bip_hint%7D%5C%22)%20and%20.ifname!%3D%5C%22br-ex1%5C%22%20and%20.ifname!%3D%5C%22%24%7Bextra_bridge%7D%5C%22))%20%7C%20.ifname%22)%0A%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20fi%0A%7D%0A%0A%23%20Accepts%20parameters%20%24bridge_interface%20(e.g.%20ovs-port-phys0)%0A%23%20Returns%20the%20physical%20interface%20name%20if%20%24bridge_interface%20exists%2C%20%22%22%20otherwise%0Aget_bridge_physical_interface()%20%7B%0A%20%20local%20bridge_interface%3D%22%241%22%0A%20%20local%20physical_interface%3D%22%22%0A%20%20physical_interface%3D%24(%24NMCLI_GET_VALUE%20connection.interface-name%20conn%20show%20%22%24%7Bbridge_interface%7D%22%202%3E%2Fdev%2Fnull%20%7C%7C%20echo%20%22%22)%0A%20%20echo%20%22%24%7Bphysical_interface%7D%22%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%2C%20%24extra_bridge_file%2C%20%24ip_hint_file%2C%20%24default_bridge_file%0A%23%20Determines%20the%20interface%20to%20be%20used%20for%20br-ex.%20Order%20of%20priority%20is%3A%0A%23%201.%20Use%20the%20user%20specified%20interface%20if%20provided%20in%20the%20default%20bridge%20file%0A%23%202.%20Use%20the%20node%20IP%20hint%20interface%0A%23%203.%20Use%20the%20previously%20selected%20interface%0A%23%204.%20Use%20the%20interface%20detected%20as%20default%20gateway%0A%23%0A%23%20Read%20%24default_bridge_file%20and%20return%20the%20contained%20interface.%20Otherwise%2C%0A%23%20read%20%24ip_hint_file%20and%20return%20the%20interface%20that%20matches%20this%20ip.%20Otherwise%2C%0A%23%20if%20the%20default%20interface%20is%20br-ex%2C%20use%20that%20and%20return.%0A%23%20If%20the%20default%20interface%20is%20not%20br-ex%3A%0A%23%20Check%20if%20there%20is%20a%20valid%20hint%20inside%20%24iface_default_hint_file.%20If%20so%2C%20use%20that%20hint.%0A%23%20If%20there%20is%20no%20valid%20hint%2C%20use%20the%20default%20interface%20that%20we%20found%20during%20the%20step%0A%23%20earlier.%0A%23%20Never%20use%20the%20interface%20that%20is%20provided%20inside%20%24extra_bridge_file%20for%20br-ex1.%0A%23%20Never%20use%20br-ex1.%0A%23%20Write%20the%20default%20interface%20to%20%24iface_default_hint_file%0Aget_default_bridge_interface()%20%7B%0A%20%20local%20iface%3D%22%22%0A%20%20local%20counter%3D0%0A%20%20local%20iface_default_hint_file%3D%22%241%22%0A%20%20local%20extra_bridge_file%3D%22%242%22%0A%20%20local%20ip_hint_file%3D%22%243%22%0A%20%20local%20default_bridge_file%3D%22%244%22%0A%20%20local%20extra_bridge%3D%22%22%0A%0A%20%20if%20%5B%20-f%20%22%24%7Bextra_bridge_file%7D%22%20%5D%3B%20then%0A%20%20%20%20extra_bridge%3D%24(cat%20%24%7Bextra_bridge_file%7D)%0A%20%20fi%0A%0A%20%20%23%20try%20to%20use%20user%20specified%20file%20first%0A%20%20if%20%5B%20-f%20%22%24default_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20iface%3D%24(cat%20%22%24%7Bdefault_bridge_file%7D%22)%0A%20%20%20%20if%20%5B%20-z%20%22%24iface%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20User%20specified%20bridge%20file%20detected%20without%20any%20data%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20if%20node%20ip%20was%20set%2C%20we%20should%20search%20for%20interface%20that%20matches%20it%0A%20%20iface%3D%24(get_nodeip_hint_interface%20%22%24%7Bip_hint_file%7D%22%20%22%24%7Bextra_bridge%7D%22)%0A%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20find%20default%20interface%0A%20%20%23%20the%20default%20interface%20might%20be%20br-ex%2C%20so%20check%20this%20before%20looking%20at%20the%20hint%0A%20%20while%20%5B%20%24%7Bcounter%7D%20-lt%2012%20%5D%3B%20do%0A%20%20%20%20%23%20check%20ipv4%0A%20%20%20%20%23%20never%20use%20the%20interface%20that's%20specified%20in%20extra_bridge_file%0A%20%20%20%20%23%20never%20use%20br-ex1%0A%20%20%20%20if%20%5B%20%22%24%7Bextra_bridge%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20iface%3D%24(ip%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20grep%20-v%20%22%24%7Bextra_bridge%7D%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20else%0A%20%20%20%20%20%20iface%3D%24(ip%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20fi%0A%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20%23%20check%20ipv6%0A%20%20%20%20%23%20never%20use%20the%20interface%20that's%20specified%20in%20extra_bridge_file%0A%20%20%20%20%23%20never%20use%20br-ex1%0A%20%20%20%20if%20%5B%20%22%24%7Bextra_bridge%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20iface%3D%24(ip%20-6%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20grep%20-v%20%22%24%7Bextra_bridge%7D%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20else%0A%20%20%20%20%20%20iface%3D%24(ip%20-6%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20fi%0A%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20counter%3D%24((counter%2B1))%0A%20%20%20%20sleep%205%0A%20%20done%0A%0A%20%20%23%20if%20the%20default%20interface%20does%20not%20point%20out%20of%20br-ex%20or%20br-ex1%0A%20%20if%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22br-ex1%22%20%5D%3B%20then%0A%20%20%20%20%23%20determine%20if%20an%20interface%20default%20hint%20exists%20from%20a%20previous%20run%0A%20%20%20%20%23%20and%20if%20the%20interface%20has%20a%20valid%20default%20route%0A%20%20%20%20iface_default_hint%3D%24(get_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%24%7Biface%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20start%20wherever%20count%20left%20off%20in%20the%20previous%20loop%0A%20%20%20%20%20%20%23%20allow%20this%20for%20one%20more%20iteration%20than%20the%20previous%20loop%0A%20%20%20%20%20%20while%20%5B%20%24%7Bcounter%7D%20-le%2012%20%5D%3B%20do%0A%20%20%20%20%20%20%20%20%23%20check%20ipv4%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24(ip%20route%20show%20default%20dev%20%22%24%7Biface_default_hint%7D%22)%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20iface%3D%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20%20%20break%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20%23%20check%20ipv6%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24(ip%20-6%20route%20show%20default%20dev%20%22%24%7Biface_default_hint%7D%22)%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20iface%3D%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20%20%20break%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20counter%3D%24((counter%2B1))%0A%20%20%20%20%20%20%20%20sleep%205%0A%20%20%20%20%20%20done%0A%20%20%20%20fi%0A%20%20%20%20%23%20store%20what%20was%20determined%20was%20the%20(new)%20default%20interface%20inside%0A%20%20%20%20%23%20the%20default%20hint%20file%20for%20future%20reference%0A%20%20%20%20if%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20write_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Biface%7D%22%0A%20%20%20%20fi%0A%20%20fi%0A%20%20echo%20%22%24%7Biface%7D%22%0A%7D%0A%0A%23%20Used%20to%20print%20network%20state%0Aprint_state()%20%7B%0A%20%20echo%20%22Current%20device%2C%20connection%2C%20interface%20and%20routing%20state%3A%22%0A%20%20nmcli%20-g%20all%20device%20%7C%20grep%20-v%20unmanaged%0A%20%20nmcli%20-g%20all%20connection%0A%20%20ip%20-d%20address%20show%0A%20%20ip%20route%20show%0A%20%20ip%20-6%20route%20show%0A%7D%0A%0A%23%20Setup%20an%20exit%20trap%20to%20rollback%20on%20error%0Ahandle_exit()%20%7B%0A%20%20e%3D%24%3F%0A%20%20tdir%3D%24(mktemp%20-u%20-d%20-t%20%22configure-ovs-%24(date%20%2B%25Y-%25m-%25d-%25H-%25M-%25S)-XXXXXXXXXX%22)%0A%20%20%0A%20%20if%20%5B%20%24e%20-eq%200%20%5D%3B%20then%0A%20%20%20%20print_state%0A%20%20%20%20%23%20remove%20previous%20troubleshooting%20information%0A%20%20%20%20rm%20-rf%20%22%24(dirname%20%22%24tdir%22)%2Fconfigure-ovs-*%22%0A%20%20%20%20exit%200%0A%20%20fi%0A%0A%20%20echo%20%22ERROR%3A%20configure-ovs%20exited%20with%20error%3A%20%24e%22%0A%20%20print_state%0A%0A%20%20%23%20remove%20previous%20troubleshooting%20information%20except%20the%20oldest%20one%0A%20%20mapfile%20-t%20tdirs%20%3C%20%3C(compgen%20-G%20%22%24(dirname%20%22%24tdir%22)%2Fconfigure-ovs-*%22)%0A%20%20unset%20-v%20%22tdirs%5B0%5D%22%0A%20%20for%20dir%20in%20%22%24%7Btdirs%5B%40%5D%7D%22%3B%20do%20rm%20-rf%20%22%24dir%22%3B%20done%0A%0A%20%20%23%20copy%20configuration%20to%20tmp%20for%20troubleshooting%0A%20%20mkdir%20-p%20%22%24tdir%22%0A%20%20update_nm_conn_run_files%20br-ex%20phys0%0A%20%20copy_nm_conn_files%20%22%24tdir%22%0A%20%20update_nm_conn_run_files%20br-ex1%20phys1%0A%20%20copy_nm_conn_files%20%22%24tdir%22%0A%20%20echo%20%22Copied%20OVS%20configuration%20to%20%24tdir%20for%20troubleshooting%22%0A%0A%20%20%23%20attempt%20to%20restore%20the%20previous%20network%20state%0A%20%20echo%20%22Attempting%20to%20restore%20previous%20configuration...%22%0A%20%20rollback_nm%0A%20%20print_state%0A%0A%20%20exit%20%24e%0A%7D%0A%0A%23%20Setup%20a%20signal%20trap%20to%20rollback%0Ahandle_termination()%20%7B%0A%20%20echo%20%22WARNING%3A%20configure-ovs%20has%20been%20requested%20to%20terminate%2C%20quitting...%22%0A%20%20%0A%20%20%23%20by%20exiting%20with%20an%20error%20we%20will%20cleanup%20after%20ourselves%20in%20a%0A%20%20%23%20subsequent%20call%20to%20handle_exit%0A%20%20exit%201%0A%7D%0A%0A%23%20main%20function%0Aconfigure_ovs()%20%7B%0A%20%20set%20-eu%0A%0A%20%20%23%20setup%20traps%20to%20handle%20signals%20and%20other%20abnormal%20exits%0A%20%20trap%20'handle_termination'%20TERM%20INT%0A%20%20trap%20'handle_exit'%20EXIT%0A%0A%20%20%23%20this%20flag%20tracks%20if%20any%20config%20change%20was%20made%0A%20%20nm_config_changed%3D0%0A%0A%20%20%23%20Check%20that%20we%20are%20provided%20a%20valid%20NM%20connection%20path%0A%20%20if%20%5B%20%22%24NM_CONN_SET_PATH%22%20!%3D%20%22%24NM_CONN_CONF_PATH%22%20%5D%20%26%26%20%5B%20%22%24NM_CONN_SET_PATH%22%20!%3D%20%22%24NM_CONN_RUN_PATH%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22Error%3A%20Incorrect%20NM%20connection%20path%3A%20%24NM_CONN_SET_PATH%20is%20not%20%24NM_CONN_CONF_PATH%20nor%20%24NM_CONN_RUN_PATH%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20Clean%20up%20old%20config%20on%20behalf%20of%20mtu-migration%0A%20%20if%20!%20systemctl%20-q%20is-enabled%20mtu-migration%3B%20then%0A%20%20%20%20echo%20%22Cleaning%20up%20left%20over%20mtu%20migration%20configuration%22%0A%20%20%20%20rm%20-rf%20%2Fetc%2Fcno%2Fmtu-migration%0A%20%20fi%0A%0A%20%20if%20!%20rpm%20-qa%20%7C%20grep%20-q%20openvswitch%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20Openvswitch%20package%20is%20not%20installed!%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20print%20initial%20state%0A%20%20print_state%0A%0A%20%20if%20%5B%20%22%241%22%20%3D%3D%20%22OVNKubernetes%22%20%5D%3B%20then%0A%20%20%20%20%23%20Configures%20NICs%20onto%20OVS%20bridge%20%22br-ex%22%0A%20%20%20%20%23%20Configuration%20is%20either%20auto-detected%20or%20provided%20through%20a%20config%20file%20written%20already%20in%20Network%20Manager%0A%20%20%20%20%23%20key%20files%20under%20%2Fetc%2FNetworkManager%2Fsystem-connections%2F%0A%20%20%20%20%23%20Managing%20key%20files%20is%20outside%20of%20the%20scope%20of%20this%20script%0A%0A%20%20%20%20%23%20if%20the%20interface%20is%20of%20type%20vmxnet3%20add%20multicast%20capability%20for%20that%20driver%0A%20%20%20%20%23%20History%3A%20BZ%3A1854355%0A%20%20%20%20function%20configure_driver_options%20%7B%0A%20%20%20%20%20%20intf%3D%241%0A%20%20%20%20%20%20if%20%5B%20!%20-f%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Bintf%7D%2Fdevice%2Fuevent%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Device%20file%20doesn't%20exist%2C%20skipping%20setting%20multicast%20mode%22%0A%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20driver%3D%24(cat%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Bintf%7D%2Fdevice%2Fuevent%22%20%7C%20grep%20DRIVER%20%7C%20awk%20-F%20%22%3D%22%20'%7Bprint%20%242%7D')%0A%20%20%20%20%20%20%20%20echo%20%22Driver%20name%20is%22%20%24driver%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24driver%22%20%3D%20%22vmxnet3%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20ip%20link%20set%20dev%20%22%24%7Bintf%7D%22%20allmulticast%20on%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20fi%0A%20%20%20%20%7D%0A%0A%20%20%20%20ovnk_config_dir%3D'%2Fetc%2Fovnk'%0A%20%20%20%20ovnk_var_dir%3D'%2Fvar%2Flib%2Fovnk'%0A%20%20%20%20extra_bridge_file%3D%22%24%7Bovnk_config_dir%7D%2Fextra_bridge%22%0A%20%20%20%20iface_default_hint_file%3D%22%24%7Bovnk_var_dir%7D%2Fiface_default_hint%22%0A%20%20%20%20ip_hint_file%3D%22%2Frun%2Fnodeip-configuration%2Fprimary-ip%22%0A%20%20%20%20%23%20explicitly%20specify%20which%20interface%20should%20be%20used%20with%20the%20default%20bridge%0A%20%20%20%20default_bridge_file%3D%22%24%7Bovnk_config_dir%7D%2Fdefault_bridge%22%0A%0A%20%20%20%20%23%20make%20sure%20to%20create%20ovnk_config_dir%20if%20it%20does%20not%20exist%2C%20yet%0A%20%20%20%20mkdir%20-p%20%22%24%7Bovnk_config_dir%7D%22%0A%20%20%20%20%23%20make%20sure%20to%20create%20ovnk_var_dir%20if%20it%20does%20not%20exist%2C%20yet%0A%20%20%20%20mkdir%20-p%20%22%24%7Bovnk_var_dir%7D%22%0A%0A%20%20%20%20%23%20For%20upgrade%20scenarios%2C%20make%20sure%20that%20we%20stabilize%20what%20we%20already%20configured%0A%20%20%20%20%23%20before.%20If%20we%20do%20not%20have%20a%20valid%20interface%20hint%2C%20find%20the%20physical%20interface%0A%20%20%20%20%23%20that's%20attached%20to%20ovs-if-phys0.%0A%20%20%20%20%23%20If%20we%20find%20such%20an%20interface%2C%20write%20it%20to%20the%20hint%20file.%0A%20%20%20%20iface_default_hint%3D%24(get_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20current_interface%3D%24(get_bridge_physical_interface%20ovs-if-phys0)%0A%20%20%20%20%20%20if%20%5B%20%22%24%7Bcurrent_interface%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20write_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Bcurrent_interface%7D%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20delete%20iface_default_hint_file%20if%20it%20has%20the%20same%20content%20as%20extra_bridge_file%0A%20%20%20%20%23%20in%20that%20case%2C%20we%20must%20also%20force%20a%20reconfiguration%20of%20our%20network%20interfaces%0A%20%20%20%20%23%20to%20make%20sure%20that%20we%20reconcile%20this%20conflict%0A%20%20%20%20if%20%5B%20-f%20%22%24%7Biface_default_hint_file%7D%22%20%5D%20%26%26%0A%20%20%20%20%20%20%5B%20-f%20%22%24%7Bextra_bridge_file%7D%22%20%5D%20%26%26%0A%20%20%20%20%20%20%5B%20%22%24(cat%20%22%24%7Biface_default_hint_file%7D%22)%22%20%3D%3D%20%22%24(cat%20%22%24%7Bextra_bridge_file%7D%22)%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22%24%7Biface_default_hint_file%7D%20and%20%24%7Bextra_bridge_file%7D%20share%20the%20same%20content%22%0A%20%20%20%20%20%20echo%20%22Deleting%20file%20%24%7Biface_default_hint_file%7D%20to%20choose%20a%20different%20interface%22%0A%20%20%20%20%20%20rm%20-f%20%22%24%7Biface_default_hint_file%7D%22%0A%20%20%20%20%20%20rm%20-f%20%2Frun%2Fconfigure-ovs-boot-done%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20on%20every%20boot%20we%20rollback%20and%20generate%20the%20configuration%20again%2C%20to%20take%0A%20%20%20%20%23%20in%20any%20changes%20that%20have%20possibly%20been%20applied%20in%20the%20standard%0A%20%20%20%20%23%20configuration%20sources%0A%20%20%20%20if%20%5B%20!%20-f%20%2Frun%2Fconfigure-ovs-boot-done%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Running%20on%20boot%2C%20restoring%20previous%20configuration%20before%20proceeding...%22%0A%20%20%20%20%20%20rollback_nm%0A%20%20%20%20%20%20print_state%0A%20%20%20%20fi%0A%20%20%20%20touch%20%2Frun%2Fconfigure-ovs-boot-done%0A%0A%20%20%20%20iface%3D%24(get_default_bridge_interface%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Bextra_bridge_file%7D%22%20%22%24%7Bip_hint_file%7D%22%20%22%24%7Bdefault_bridge_file%7D%22)%0A%0A%20%20%20%20if%20%5B%20%22%24iface%22%20!%3D%20%22br-ex%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20Specified%20interface%20is%20not%20br-ex.%0A%20%20%20%20%20%20%23%20Some%20deployments%20use%20a%20temporary%20solution%20where%20br-ex%20is%20moved%20out%20from%20the%20default%20gateway%20interface%0A%20%20%20%20%20%20%23%20and%20bound%20to%20a%20different%20nic%20(https%3A%2F%2Fgithub.com%2Ftrozet%2Fopenshift-ovn-migration).%0A%20%20%20%20%20%20%23%20This%20is%20now%20supported%20through%20an%20extra%20bridge%20if%20requested.%20If%20that%20is%20the%20case%2C%20we%20rollback.%0A%20%20%20%20%20%20%23%20We%20also%20rollback%20if%20it%20looks%20like%20we%20need%20to%20configure%20things%2C%20just%20in%20case%20there%20are%20any%20leftovers%0A%20%20%20%20%20%20%23%20from%20previous%20attempts.%0A%20%20%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%20%7C%7C%20%5B%20-z%20%22%24(nmcli%20connection%20show%20--active%20br-ex%202%3E%20%2Fdev%2Fnull)%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Bridge%20br-ex%20is%20not%20active%2C%20restoring%20previous%20configuration%20before%20proceeding...%22%0A%20%20%20%20%20%20%20%20rollback_nm%0A%20%20%20%20%20%20%20%20print_state%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20convert_to_bridge%20%22%24iface%22%20%22br-ex%22%20%22phys0%22%20%22%24%7BBRIDGE_METRIC%7D%22%0A%0A%20%20%20%20%23%20Check%20if%20we%20need%20to%20configure%20the%20second%20bridge%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%20%26%26%20(!%20nmcli%20connection%20show%20br-ex1%20%26%3E%20%2Fdev%2Fnull%20%7C%7C%20!%20nmcli%20connection%20show%20ovs-if-phys1%20%26%3E%20%2Fdev%2Fnull)%3B%20then%0A%20%20%20%20%20%20interface%3D%24(head%20-n%201%20%24extra_bridge_file)%0A%20%20%20%20%20%20convert_to_bridge%20%22%24interface%22%20%22br-ex1%22%20%22phys1%22%20%22%24%7BBRIDGE1_METRIC%7D%22%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Check%20if%20we%20need%20to%20remove%20the%20second%20bridge%0A%20%20%20%20if%20%5B%20!%20-f%20%22%24extra_bridge_file%22%20%5D%20%26%26%20(nmcli%20connection%20show%20br-ex1%20%26%3E%20%2Fdev%2Fnull%20%7C%7C%20nmcli%20connection%20show%20ovs-if-phys1%20%26%3E%20%2Fdev%2Fnull)%3B%20then%0A%20%20%20%20%20%20remove_ovn_bridges%20br-ex1%20phys1%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Remove%20bridges%20created%20by%20openshift-sdn%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br0%0A%0A%20%20%20%20%23%20Make%20sure%20everything%20is%20activated.%20Do%20it%20in%20a%20specific%20order%3A%0A%20%20%20%20%23%20-%20activate%20br-ex%20first%2C%20due%20to%20autoconnect-slaves%20this%20will%20also%0A%20%20%20%20%23%20%20%20activate%20ovs-port-br-ex%2C%20ovs-port-phys0%20and%20ovs-if-phys0.%20It%20is%0A%20%20%20%20%23%20%20%20important%20that%20ovs-if-phys0%20activates%20with%20br-ex%20to%20avoid%20the%0A%20%20%20%20%23%20%20%20ovs-if-phys0%20profile%20being%20overridden%20with%20a%20profile%20generated%20from%0A%20%20%20%20%23%20%20%20kargs.%20The%20activation%20of%20ovs-if-phys0%2C%20if%20a%20bond%2C%20might%20cause%20the%0A%20%20%20%20%23%20%20%20slaves%20to%20re-activate%2C%20but%20it%20should%20be%20with%20our%20profiles%20since%20they%0A%20%20%20%20%23%20%20%20have%20higher%20priority%0A%20%20%20%20%23%20-%20make%20sure%20that%20ovs-if-phys0%20and%20its%20slaves%2C%20if%20any%2C%20are%20activated.%0A%20%20%20%20%23%20-%20finally%20activate%20ovs-if-br-ex%20which%20holds%20the%20IP%20configuration.%0A%20%20%20%20connections%3D(br-ex%20ovs-if-phys0)%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20%20%20connections%2B%3D(br-ex1%20ovs-if-phys1)%0A%20%20%20%20fi%0A%20%20%20%20while%20IFS%3D%20read%20-r%20connection%3B%20do%0A%20%20%20%20%20%20if%20%5B%5B%20%24connection%20%3D%3D%20*%22%24MANAGED_NM_CONN_SUFFIX%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20connections%2B%3D(%22%24connection%22)%0A%20%20%20%20%20%20fi%0A%20%20%20%20done%20%3C%20%3C(nmcli%20-g%20NAME%20c)%0A%20%20%20%20connections%2B%3D(ovs-if-br-ex)%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20%20%20connections%2B%3D(ovs-if-br-ex1)%0A%20%20%20%20fi%0A%20%20%20%20activate_nm_connections%20%22%24%7Bconnections%5B%40%5D%7D%22%0A%20%20%20%20try_to_bind_ipv6_address%0A%20%20%20%20set_nm_conn_files%0A%20%20elif%20%5B%20%22%241%22%20%3D%3D%20%22OpenShiftSDN%22%20%5D%3B%20then%0A%20%20%20%20%23%20Revert%20changes%20made%20by%20%2Fusr%2Flocal%2Fbin%2Fconfigure-ovs.sh%20during%20SDN%20migration.%0A%20%20%20%20rollback_nm%0A%20%20%20%20%0A%20%20%20%20%23%20Remove%20bridges%20created%20by%20ovn-kubernetes%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br-int%20--%20--if-exists%20del-br%20br-local%0A%20%20fi%0A%7D%0A%0A%23%20Retry%20configure_ovs%20until%20it%20succeeds.%0A%23%20By%20default%2C%20retry%20every%2015%20minutes%20to%20give%20enough%20time%20to%20gather%0A%23%20troubleshooting%20information%20in%20between.%20Note%20that%20configure_ovs%20has%20other%0A%23%20internal%20retry%20mechanisms.%20This%20retry%20is%20intended%20to%20give%20some%0A%23%20self-healing%20capabilities%20to%20temporary%20but%20not%20necessarily%20short-lived%0A%23%20infrastructure%20problems.%0ARETRY%3D%22%24%7BRETRY-15m%7D%22%0Awhile%20true%3B%20do%0A%0A%20%20%23%20Disable%20retries%20if%20termination%20signal%20is%20received.%20Note%20that%20systemd%0A%20%20%23%20sends%20the%20signals%20to%20all%20processes%20in%20the%20group%20by%20default%20so%20we%20expect%0A%20%20%23%20configure_ovs%20to%20get%20its%20own%20signals.%0A%20%20trap%20'echo%20%22WARNING%3A%20termination%20requested%2C%20disabling%20retries%22%3B%20RETRY%3D%22%22'%20INT%20TERM%0A%20%20%0A%20%20%23%20Run%20configure_ovs%20in%20a%20sub-shell.%20%0A%20%20(%20configure_ovs%20%22%24%40%22%20)%0A%20%20e%3D%24%3F%0A%0A%20%20%23%20Handle%20signals%20while%20we%20sleep%0A%20%20trap%20'handle_termination'%20INT%20TERM%0A%20%20%0A%20%20%23%20Exit%20if%20succesful%20and%20not%20configured%20to%20retry%0A%20%20%5B%20%22%24e%22%20-eq%200%20%5D%20%7C%7C%20%5B%20-z%20%22%24RETRY%22%20%5D%20%26%26%20exit%20%22%24e%22%0A%20%20%0A%20%20echo%20%22configure-ovs%20failed%2C%20will%20retry%20after%20%24RETRY%22%0A%20%20%23%20flag%20that%20a%20retry%20has%20happened%0A%20%20touch%20%2Ftmp%2Fconfigure-ovs-retry%0A%20%20sleep%20%22%24RETRY%22%0A%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/configure-ovs.sh" + }, + { + "contents": { + "source": "data:,%23%20This%20file%20is%20generated%20by%20the%20Machine%20Config%20Operator's%20containerruntimeconfig%20controller.%0A%23%0A%23%20storage.conf%20is%20the%20configuration%20file%20for%20all%20tools%0A%23%20that%20share%20the%20containers%2Fstorage%20libraries%0A%23%20See%20man%205%20containers-storage.conf%20for%20more%20information%0A%23%20The%20%22container%20storage%22%20table%20contains%20all%20of%20the%20server%20options.%0A%5Bstorage%5D%0A%0A%23%20Default%20storage%20driver%2C%20must%20be%20set%20for%20proper%20operation.%0Adriver%20%3D%20%22overlay%22%0A%0A%23%20Temporary%20storage%20location%0Arunroot%20%3D%20%22%2Frun%2Fcontainers%2Fstorage%22%0A%0A%23%20Primary%20Read%2FWrite%20location%20of%20container%20storage%0Agraphroot%20%3D%20%22%2Fvar%2Flib%2Fcontainers%2Fstorage%22%0A%0A%5Bstorage.options%5D%0A%23%20Storage%20options%20to%20be%20passed%20to%20underlying%20storage%20drivers%0A%0A%23%20AdditionalImageStores%20is%20used%20to%20pass%20paths%20to%20additional%20Read%2FOnly%20image%20stores%0A%23%20Must%20be%20comma%20separated%20list.%0Aadditionalimagestores%20%3D%20%5B%0A%5D%0A%0A%23%20Remap-UIDs%2FGIDs%20is%20the%20mapping%20from%20UIDs%2FGIDs%20as%20they%20should%20appear%20inside%20of%0A%23%20a%20container%2C%20to%20UIDs%2FGIDs%20as%20they%20should%20appear%20outside%20of%20the%20container%2C%20and%0A%23%20the%20length%20of%20the%20range%20of%20UIDs%2FGIDs.%20%20Additional%20mapped%20sets%20can%20be%20listed%0A%23%20and%20will%20be%20heeded%20by%20libraries%2C%20but%20there%20are%20limits%20to%20the%20number%20of%0A%23%20mappings%20which%20the%20kernel%20will%20allow%20when%20you%20later%20attempt%20to%20run%20a%0A%23%20container.%0A%23%0A%23%20remap-uids%20%3D%200%3A1668442479%3A65536%0A%23%20remap-gids%20%3D%200%3A1668442479%3A65536%0A%0A%23%20Remap-User%2FGroup%20is%20a%20name%20which%20can%20be%20used%20to%20look%20up%20one%20or%20more%20UID%2FGID%0A%23%20ranges%20in%20the%20%2Fetc%2Fsubuid%20or%20%2Fetc%2Fsubgid%20file.%20%20Mappings%20are%20set%20up%20starting%0A%23%20with%20an%20in-container%20ID%20of%200%20and%20the%20a%20host-level%20ID%20taken%20from%20the%20lowest%0A%23%20range%20that%20matches%20the%20specified%20name%2C%20and%20using%20the%20length%20of%20that%20range.%0A%23%20Additional%20ranges%20are%20then%20assigned%2C%20using%20the%20ranges%20which%20specify%20the%0A%23%20lowest%20host-level%20IDs%20first%2C%20to%20the%20lowest%20not-yet-mapped%20container-level%20ID%2C%0A%23%20until%20all%20of%20the%20entries%20have%20been%20used%20for%20maps.%20This%20setting%20overrides%20the%0A%23%20Remap-UIDs%2FGIDs%20setting.%0A%23%0A%23%20remap-user%20%3D%20%22storage%22%0A%23%20remap-group%20%3D%20%22storage%22%0A%0A%5Bstorage.options.pull_options%5D%0A%23%20Options%20controlling%20how%20storage%20is%20populated%20when%20pulling%20images.%0A%0A%23%20Enable%20the%20%22zstd%3Achunked%22%20feature%2C%20which%20allows%20partial%20pulls%2C%20reusing%0A%23%20content%20that%20already%20exists%20on%20the%20system.%20This%20is%20disabled%20by%20default%2C%0A%23%20and%20must%20be%20explicitly%20enabled%20to%20be%20used.%20For%20more%20on%20zstd%3Achunked%2C%20see%0A%23%20https%3A%2F%2Fgithub.com%2Fcontainers%2Fstorage%2Fblob%2Fmain%2Fdocs%2Fcontainers-storage-zstd-chunked.md%0Aenable_partial_images%20%3D%20%22false%22%0A%0A%23%20Tells%20containers%2Fstorage%20to%20use%20hard%20links%20rather%20then%20create%20new%20files%20in%0A%23%20the%20image%2C%20if%20an%20identical%20file%20already%20existed%20in%20storage.%0Ause_hard_links%20%3D%20%22false%22%0A%0A%23%20Path%20to%20an%20ostree%20repository%20that%20might%20have%0A%23%20previously%20pulled%20content%20which%20can%20be%20used%20when%20attempting%20to%20avoid%0A%23%20pulling%20content%20from%20the%20container%20registry.%0Aostree_repos%20%3D%20%22%22%0A%0A%5Bstorage.options.overlay%5D%0A%23%20Storage%20Options%20for%20overlay%0A%0A%23%20Do%20not%20create%20a%20PRIVATE%20bind%20mount%20on%20the%20home%20directory.%0Askip_mount_home%20%3D%20%22true%22%0A%0A%23%20Size%20is%20used%20to%20set%20a%20maximum%20size%20of%20the%20container%20image.%20%20Only%20supported%20by%0A%23%20certain%20container%20storage%20drivers.%0Asize%20%3D%20%22%22%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/storage.conf" + }, + { + "contents": { + "source": "data:,Initial%20Creation%0A" + }, + "mode": 384, + "overwrite": true, + "path": "/etc/docker/certs.d/.create" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20kubelet.config.k8s.io%2Fv1%0Akind%3A%20CredentialProviderConfig%0Aproviders%3A%0A%20%20-%20name%3A%20acr-credential-provider%0A%20%20%20%20apiVersion%3A%20credentialprovider.kubelet.k8s.io%2Fv1%0A%20%20%20%20defaultCacheDuration%3A%20%2210m%22%0A%20%20%20%20matchImages%3A%0A%20%20%20%20%20%20-%20%22*.azurecr.io%22%0A%20%20%20%20%20%20-%20%22*.azurecr.cn%22%0A%20%20%20%20%20%20-%20%22*.azurecr.de%22%0A%20%20%20%20%20%20-%20%22*.azurecr.us%22%0A%20%20%20%20args%3A%0A%20%20%20%20%20%20-%20%2Fetc%2Fkubernetes%2Fcloud.conf%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/credential-providers/acr-credential-provider.yaml" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20v1%0Akind%3A%20Pod%0Ametadata%3A%0A%20%20name%3A%20apiserver-watcher%0A%20%20namespace%3A%20openshift-kube-apiserver%0Aspec%3A%0A%20%20containers%3A%0A%20%20-%20name%3A%20apiserver-watcher%0A%20%20%20%20image%3A%20%22quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3A789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84%22%0A%20%20%20%20command%3A%0A%20%20%20%20%20%20-%20flock%0A%20%20%20%20%20%20-%20--verbose%0A%20%20%20%20%20%20-%20--exclusive%0A%20%20%20%20%20%20-%20--timeout%3D300%0A%20%20%20%20%20%20-%20%2Frootfs%2Frun%2Fcloud-routes%2Fapiserver-watcher.lock%0A%20%20%20%20%20%20-%20apiserver-watcher%0A%20%20%20%20args%3A%0A%20%20%20%20-%20%22run%22%0A%20%20%20%20-%20%22--health-check-url%3Dhttps%3A%2F%2Fapi-int.ci-op-pw3ghqzh-bb5c4.ci2.azure.devcluster.openshift.com%3A6443%2Freadyz%22%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20cpu%3A%2020m%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20mountPath%3A%20%2Frootfs%0A%20%20%20%20%20%20name%3A%20rootfs%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20hostNetwork%3A%20true%0A%20%20hostPID%3A%20true%0A%20%20priorityClassName%3A%20system-node-critical%0A%20%20tolerations%3A%0A%20%20-%20operator%3A%20%22Exists%22%0A%20%20restartPolicy%3A%20Always%0A%20%20volumes%3A%0A%20%20-%20name%3A%20rootfs%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%2F%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/manifests/apiserver-watcher.yaml" + }, + { + "contents": { + "source": "data:,%23%20Proxy%20environment%20variables%20will%20be%20populated%20in%20this%20file.%20Properly%0A%23%20url%20encoded%20passwords%20with%20special%20characters%20will%20use%20'%25%3CHEX%3E%3CHEX%3E'.%0A%23%20Systemd%20requires%20that%20any%20%25%20used%20in%20a%20password%20be%20represented%20as%0A%23%20%25%25%20in%20a%20unit%20file%20since%20%25%20is%20a%20prefix%20for%20macros%3B%20this%20restriction%20does%20not%0A%23%20apply%20for%20environment%20files.%20Templates%20that%20need%20the%20proxy%20set%20should%20use%0A%23%20'EnvironmentFile%3D%2Fetc%2Fmco%2Fproxy.env'.%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/mco/proxy.env" + }, + { + "contents": { + "source": "data:,%5BManager%5D%0ADefaultEnvironment%3DGODEBUG%3Dx509ignoreCN%3D0%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system.conf.d/10-default-env-godebug.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Workaround%3A%0A%23%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FOCPBUGS-38779%0A%0Adriver%3D%24(nmcli%20-t%20-m%20tabular%20-f%20general.driver%20dev%20show%20%22%24%7BDEVICE_IFACE%7D%22)%0A%0Aif%20%5B%5B%20%22%242%22%20%3D%3D%20%22up%22%20%26%26%20%22%24%7Bdriver%7D%22%20%3D%3D%20%22idpf%22%20%5D%5D%3B%20then%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-checksumming%20off%0Afi" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-gcp-disable-idpf-tx-checksum-off" + }, + { + "contents": { + "source": "data:,%23%20Force-load%20legacy%20iptables%20so%20it%20is%20usable%20from%20pod%20network%20namespaces%0Aip_tables%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/modules-load.d/iptables.conf" + }, + { + "contents": { + "source": "data:,NODE_SIZING_ENABLED%3Dfalse%0ASYSTEM_RESERVED_MEMORY%3D1Gi%0ASYSTEM_RESERVED_CPU%3D500m%0ASYSTEM_RESERVED_ES%3D1Gi" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/node-sizing-enabled.env" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-e%0ANODE_SIZES_ENV%3D%24%7BNODE_SIZES_ENV%3A-%2Fetc%2Fnode-sizing.env%7D%0AVERSION_1%3D1%0AVERSION_2%3D2%0ANODE_AUTO_SIZING_VERSION%3D%24%7BNODE_AUTO_SIZING_VERSION%3A-%24VERSION_2%7D%0ANODE_AUTO_SIZING_VERSION_FILE%3D%24%7BNODE_AUTO_SIZING_VERSION_FILE%3A-%2Fetc%2Fnode-sizing-version.json%7D%0Afunction%20dynamic_memory_sizing%20%7B%0A%20%20%20%20total_memory%3D%24(free%20-g%7Cawk%20'%2F%5EMem%3A%2F%7Bprint%20%242%7D')%0A%20%20%20%20%23%20total_memory%3D8%20test%20the%20recommended%20values%20by%20modifying%20this%20value%0A%20%20%20%20recommended_systemreserved_memory%3D0%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%204))%3B%20then%20%23%2025%25%20of%20the%20first%204GB%20of%20memory%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24total_memory%200.25%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D1%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-4))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%204))%3B%20then%20%23%2020%25%20of%20the%20next%204GB%20of%20memory%20(up%20to%208GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.20%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%200.80%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-4))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%208))%3B%20then%20%23%2010%25%20of%20the%20next%208GB%20of%20memory%20(up%20to%2016GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.10%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%200.80%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-8))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%20112))%3B%20then%20%23%206%25%20of%20the%20next%20112GB%20of%20memory%20(up%20to%20128GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.06%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%206.72%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-112))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3E%3D%200))%3B%20then%20%23%202%25%20of%20any%20memory%20above%20128GB%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.02%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20fi%0A%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%7C%20awk%20'%7Bprintf(%22%25d%5Cn%22%2C%241%20%2B%200.5)%7D')%20%23%20Round%20off%20so%20we%20avoid%20float%20conversions%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_MEMORY%3D%24%7Brecommended_systemreserved_memory%7DGi%22%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_cpu_sizing%20%7B%0A%20%20%20%20total_cpu%3D%24(getconf%20_NPROCESSORS_ONLN)%0A%20%20%20%20if%20%5B%20%22%241%22%20-eq%20%22%24VERSION_1%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D0%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%201))%3B%20then%20%23%206%25%20of%20the%20first%20core%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24total_cpu%200.06%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D0.06%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-1))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%201))%3B%20then%20%23%201%25%20of%20the%20next%20core%20(up%20to%202%20cores)%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-1))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%202))%3B%20then%20%23%200.5%25%20of%20the%20next%202%20cores%20(up%20to%204%20cores)%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.005%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-2))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3E%3D%200))%3B%20then%20%23%200.25%25%20of%20any%20cores%20above%204%20cores%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.0025%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20%23%20Base%20allocation%20for%201%20CPU%20in%20fractions%20of%20a%20core%20(60%20millicores%20%3D%200.06%20CPU%20core)%0A%20%20%20%20%20%20%20%20base_allocation_fraction%3D0.06%0A%20%20%20%20%20%20%20%20%23%20Increment%20per%20additional%20CPU%20in%20fractions%20of%20a%20core%20(12%20millicores%20%3D%200.012%20CPU%20core)%0A%20%20%20%20%20%20%20%20increment_per_cpu_fraction%3D0.012%0A%20%20%20%20%20%20%20%20if%20((total_cpu%20%3E%201))%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20Calculate%20the%20total%20system-reserved%20CPU%20in%20fractions%2C%20starting%20with%20the%20base%20allocation%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20and%20adding%20the%20incremental%20fraction%20for%20each%20additional%20CPU%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(awk%20-v%20base%3D%22%24base_allocation_fraction%22%20-v%20increment%3D%22%24increment_per_cpu_fraction%22%20-v%20cpus%3D%22%24total_cpu%22%20'BEGIN%20%7Bprintf%20%22%25.2f%5Cn%22%2C%20base%20%2B%20increment%20*%20(cpus%20-%201)%7D')%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20For%20a%20single%20CPU%2C%20use%20the%20base%20allocation%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24base_allocation_fraction%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Enforce%20minimum%20threshold%20of%200.5%20CPU%0A%20%20%20%20recommended_systemreserved_cpu%3D%24(awk%20-v%20val%3D%22%24recommended_systemreserved_cpu%22%20'BEGIN%20%7Bif%20(val%20%3C%200.5)%20print%200.5%3B%20else%20print%20val%7D')%0A%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_CPU%3D%24%7Brecommended_systemreserved_cpu%7D%22%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_ephemeral_sizing%20%7B%0A%20%20%20%20echo%20%22Not%20implemented%20yet%22%0A%7D%0Afunction%20dynamic_pid_sizing%20%7B%0A%20%20%20%20echo%20%22Not%20implemented%20yet%22%0A%7D%0Afunction%20set_memory%20%7B%0A%20%20%20%20SYSTEM_RESERVED_MEMORY%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_MEMORY%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_MEMORY%3D%221Gi%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_MEMORY%3D%24%7BSYSTEM_RESERVED_MEMORY%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20set_cpu%20%7B%0A%20%20%20%20SYSTEM_RESERVED_CPU%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_CPU%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_CPU%3D%22500m%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_CPU%3D%24%7BSYSTEM_RESERVED_CPU%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20set_es%20%7B%0A%20%20%20%20SYSTEM_RESERVED_ES%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_ES%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_ES%3D%221Gi%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_ES%3D%24%7BSYSTEM_RESERVED_ES%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_node_sizing%20%7B%0A%20%20%20%20rm%20-f%20%24%7BNODE_SIZES_ENV%7D%0A%20%20%20%20dynamic_memory_sizing%0A%20%20%20%20dynamic_cpu_sizing%20%241%0A%20%20%20%20set_es%20%242%0A%20%20%20%20%23dynamic_ephemeral_sizing%0A%20%20%20%20%23dynamic_pid_sizing%0A%7D%0Afunction%20static_node_sizing%20%7B%0A%20%20%20%20rm%20-f%20%24%7BNODE_SIZES_ENV%7D%0A%20%20%20%20set_memory%20%241%0A%20%20%20%20set_cpu%20%242%0A%20%20%20%20set_es%20%243%0A%7D%0Afunction%20create_version_file%20%7B%0A%20%20%20%20echo%20%22%7B%5C%22version%5C%22%3A%20%241%7D%22%20%3E%20%242%0A%7D%0Aif%20!%20%5B%20-f%20%24NODE_AUTO_SIZING_VERSION_FILE%20%5D%3B%20then%0A%20%20%20%20create_version_file%20%24NODE_AUTO_SIZING_VERSION%20%24NODE_AUTO_SIZING_VERSION_FILE%0Afi%0Anew_version%3D%24(jq%20.version%20%24NODE_AUTO_SIZING_VERSION_FILE)%0Aif%20%5B%20%241%20%3D%3D%20%22true%22%20%5D%3B%20then%0A%20%20%20%20dynamic_node_sizing%20%24new_version%20%244%0Aelif%20%5B%20%241%20%3D%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20static_node_sizing%20%242%20%243%20%244%0Aelse%0A%20%20%20%20echo%20%22Unrecognized%20command%20line%20option.%20Valid%20options%20are%20%5C%22true%5C%22%20or%20%5C%22false%5C%22%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/sbin/dynamic-system-reserved-calc.sh" + }, + { + "contents": { + "source": "data:,%23%20Turning%20on%20Accounting%20helps%20track%20down%20performance%20issues.%0A%5BManager%5D%0ADefaultCPUAccounting%3Dyes%0ADefaultMemoryAccounting%3Dyes%0ADefaultBlockIOAccounting%3Dyes%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system.conf.d/kubelet-cgroups.conf" + }, + { + "contents": { + "source": "data:,%5BService%5D%0AEnvironment%3D%22KUBELET_LOG_LEVEL%3D2%22%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system/kubelet.service.d/20-logging.conf" + }, + { + "contents": { + "source": "data:,%23%20ignore%20known%20SDN-managed%20devices%0A%5Bdevice%5D%0Amatch-device%3Dinterface-name%3Abr-int%3Binterface-name%3Abr-local%3Binterface-name%3Abr-nexthop%3Binterface-name%3Aovn-k8s-*%3Binterface-name%3Ak8s-*%3Binterface-name%3Atun0%3Binterface-name%3Abr0%3Binterface-name%3Apatch-br-*%3Binterface-name%3Abr-ext%3Binterface-name%3Aext-vxlan%3Binterface-name%3Aext%3Binterface-name%3Aint%3Binterface-name%3Avxlan_sys_*%3Binterface-name%3Agenev_sys_*%3Bdriver%3Aveth%0Amanaged%3D0%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/sdn.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-eux%0A%0A%23%20Clean%20up%20old%20config%20on%20behalf%20of%20mtu-migration%0Aif%20!%20systemctl%20-q%20is-enabled%20mtu-migration%3B%20then%0A%20%20echo%20%22Cleaning%20up%20left%20over%20mtu%20migration%20configuration%22%0A%20%20rm%20-rf%20%2Fetc%2Fcno%2Fmtu-migration%0Afi%0A%0Aif%20%5B%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20echo%20%22Configuration%20already%20applied%2C%20exiting%22%0A%20%20exit%200%0Afi%0A%0Asrc_path%3D%22%2Fetc%2Fnmstate%2Fopenshift%22%0Adst_path%3D%22%2Fetc%2Fnmstate%22%0Ahostname%3D%24(hostname%20-s)%0Ahost_file%3D%22%24%7Bhostname%7D.yml%22%0Acluster_file%3D%22cluster.yml%22%0Aconfig_file%3D%22%22%0Aif%20%5B%20-s%20%22%24src_path%2F%24host_file%22%20%5D%3B%20then%0A%20%20config_file%3D%24host_file%0Aelif%20%5B%20-s%20%22%24src_path%2F%24cluster_file%22%20%5D%3B%20then%0A%20%20config_file%3D%24cluster_file%0Aelse%0A%20%20echo%20%22No%20configuration%20found%20at%20%24src_path%2F%24host_file%20or%20%24src_path%2F%24cluster_file%22%0A%20%20exit%200%0Afi%0A%0Aif%20%5B%20-e%20%22%24dst_path%2F%24config_file%22%20%5D%3B%20then%0A%20%20echo%20%22ERROR%3A%20File%20%24dst_path%2F%24config_file%20exists.%20Refusing%20to%20overwrite.%22%0A%20%20exit%201%0Afi%0A%0A%23%20Handle%20the%20case%20where%20we're%20migrating%20from%20configure-ovs%0Aovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br-ex%0A%0A%23%20Handle%20the%20case%20where%20we're%20migrating%20from%20OpenShift%20SDN%0Aovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br0%0A%0Acp%20%22%24src_path%2F%24config_file%22%20%2Fetc%2Fnmstate%0Atouch%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/nmstate-configuration.sh" + }, + { + "contents": { + "source": "data:,%5Bservice%5D%0Akeep_state_file_after_apply%20%3D%20true%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/nmstate/nmstate.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Set%20interface%20ofport_request%20to%20guarantee%20stable%20ofport%20numbers.%20This%20is%20important%20for%20flow%20matches.%0A%23%20Otherwise%2C%20another%20ofport%20number%20is%20assigned%20to%20the%20interface%20on%20every%20restart%20of%20NetworkManager.%0A%23%20This%20script%20will%20build%20an%20associative%20array%20INTERFACE_NAME-%3Eofport_request%20and%20will%20save%20it%20to%20file%20CONFIGURATION_FILE.%0A%23%20When%20an%20interface%20is%20brought%20up%2C%20this%20will%20reuse%20the%20value%20from%20the%20associative%20array%20if%20such%20a%20value%20exists.%0A%23%20Otherwise%2C%20this%20will%20try%20to%20use%20the%20current%20ofport%20value.%20If%20the%20ofport%20value%20is%20already%20reserved%2C%20then%0A%23%20this%20uses%20the%20lowest%20available%20numerical%20value%2C%20instead.%0Aset%20-eux%20-o%20pipefail%0Aif%20%5B%5B%20%22OVNKubernetes%22%20!%3D%20%22OVNKubernetes%22%20%5D%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0AINTERFACE_NAME%3D%241%0AOPERATION%3D%242%0A%0A%23%20Only%20execute%20this%20on%20pre-up%0Aif%20%5B%20%22%24%7BOPERATION%7D%22%20!%3D%20%22pre-up%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20NM%20uuid%0AINTERFACE_CONNECTION_UUID%3D%24(nmcli%20-t%20-f%20device%2Ctype%2Cuuid%20conn%20%7C%20awk%20-F%20'%3A'%20'%7Bif(%241%3D%3D%22'%24%7BINTERFACE_NAME%7D'%22%20%26%26%20%242!~%2F%5Eovs*%2F)%20print%20%24NF%7D')%0Aif%20%5B%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20slave-type.%20If%20this%20is%20not%20an%20ovs-port%2C%20then%20exit%0AINTERFACE_OVS_SLAVE_TYPE%3D%24(nmcli%20-t%20-f%20connection.slave-type%20conn%20show%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BINTERFACE_OVS_SLAVE_TYPE%7D%22%20!%3D%20%22ovs-port%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20This%20is%20not%20necessarily%20a%20UUID%20(can%20be%20a%20name%20in%20case%20of%20bonds)%20but%20this%20should%20be%20unique%0APORT%3D%24(nmcli%20-t%20-f%20connection.master%20conn%20show%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20NM%20uuid%0APORT_CONNECTION_UUID%3D%24(nmcli%20-t%20-f%20device%2Ctype%2Cuuid%20conn%20%7C%20awk%20-F%20'%3A'%20'%7Bif(%20(%241%3D%3D%22'%24%7BPORT%7D'%22%20%7C%7C%20%243%3D%3D%22'%24%7BPORT%7D'%22)%20%26%26%20%242~%2F%5Eovs*%2F)%20print%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20port's%20slave-type.%20If%20this%20is%20not%20an%20ovs-bridge%2C%20then%20exit%0APORT_OVS_SLAVE_TYPE%3D%24(nmcli%20-t%20-f%20connection.slave-type%20conn%20show%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT_OVS_SLAVE_TYPE%7D%22%20!%3D%20%22ovs-bridge%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20port's%20master.%20If%20it%20doesn't%20have%20any%2C%20assume%20it's%20not%20our%20bridge%0ABRIDGE_ID%3D%24(nmcli%20-t%20-f%20connection.master%20conn%20show%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BBRIDGE_ID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20bridge%20name%0ABRIDGE_NAME%3D%24(nmcli%20-t%20-f%20connection.interface-name%20conn%20show%20%22%24%7BBRIDGE_ID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0A%23%20Limit%20this%20to%20br-ex%20and%20br-ex1%20only.%20If%20one%20wanted%20to%20enable%20this%20for%20all%20OVS%20bridges%2C%0A%23%20the%20condition%20would%20be%3A%20if%20%5B%20%22%24BRIDGE_NAME%22%20%3D%3D%20%22%22%20%5D%3B%20then%0Aif%20%5B%20%22%24%7BBRIDGE_NAME%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%20%5B%20%22%24%7BBRIDGE_NAME%7D%22%20!%3D%20%22br-ex1%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Make%20sure%20that%20the%20interface%20is%20plugged%20into%20OVS%0A%23%20This%20should%20always%20be%20the%20case%20given%20that%20we%20are%20in%20pre-up%2C%20but%20exit%20gracefully%20in%20the%20odd%20case%20that%20it's%20not%0Aif%20!%20ovs-vsctl%20list%20interface%20%22%24%7BINTERFACE_NAME%7D%22%20%3E%2Fdev%2Fnull%202%3E%261%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0ACONFIGURATION_FILE%3D%22%2Frun%2Fofport_requests.%24%7BBRIDGE_NAME%7D%22%0A%0A%23%20Declare%20a%20new%20associative%20array.%20If%20CONFIGURATION_FILE%20exists%2C%20source%20entries%20from%20there%0Adeclare%20-A%20INTERFACES%0Aif%20%5B%20-f%20%22%24%7BCONFIGURATION_FILE%7D%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22Sourcing%20configuration%20file%20'%24%7BCONFIGURATION_FILE%7D'%20with%20contents%3A%22%0A%20%20%20%20cat%20%22%24%7BCONFIGURATION_FILE%7D%22%0A%20%20%20%20source%20%22%24%7BCONFIGURATION_FILE%7D%22%0Afi%0A%0A%23%20get_interface_ofport_request%20will%20return%0A%23%20*%20either%3A%20the%20current%20ofport%20assignment%20for%20the%20port%20if%20no%20interface%20has%20claimed%20this%20ofport%20number%2C%20yet%0A%23%20*%20or%3A%20%20%20%20%20the%20lowest%20available%20free%20ofport%20number%0Afunction%20get_interface_ofport_request()%20%7B%0A%20%20%20%20%23%20Build%20an%20array%20that%20only%20contains%20the%20currently%20reserved%20ofport_requests%0A%20%20%20%20declare%20-A%20ofport_requests%0A%20%20%20%20for%20interface_name%20in%20%22%24%7B!INTERFACES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20ofport_requests%5B%24%7BINTERFACES%5B%24interface_name%5D%7D%5D%3D%24%7BINTERFACES%5B%24interface_name%5D%7D%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20Get%20the%20current%20ofport%20number%20assignment%0A%20%20%20%20local%20current_ofport%3D%24(ovs-vsctl%20get%20Interface%20%22%24%7BINTERFACE_NAME%7D%22%20ofport)%0A%0A%20%20%20%20%23%20If%20the%20current%20ofport%20number%20is%20still%20free%2C%20use%20it%0A%20%20%20%20if%20!%20%5B%20%22%24%7Bofport_requests%5B%24current_ofport%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%24current_ofport%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20If%20the%20current%20ofport%20number%20is%20not%20free%2C%20return%20the%20lowest%20free%20entry%0A%20%20%20%20i%3D0%0A%20%20%20%20for%20i%20in%20%7B1..65000%7D%3B%20do%0A%20%20%20%20%20%20%20%20if%20!%20%5B%20%22%24%7Bofport_requests%5B%24i%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%24i%0A%20%20%20%20%20%20%20%20%20%20%20%20return%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20if%20we%20still%20cannot%20find%20an%20ID%2C%20exit%20with%20an%20error%0A%20%20%20%20echo%20%22Impossible%20to%20find%20an%20ofport%20ID%20for%20interface%20%24%7BINTERFACE_NAME%7D%22%20%3E%262%0A%20%20%20%20exit%201%0A%7D%0A%0A%23%20If%20INTERFACES%5BINTERFACE_NAME%5D%20exists%2C%20use%20that%20value%0A%23%20If%20INTERFACES%5BINTERFACE_NAME%5D%20does%20not%20exists%2C%20use%20the%20value%20from%20get_interface_ofport_request%0Aif%20!%20%5B%20%22%24%7BINTERFACES%5B%24INTERFACE_NAME%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20INTERFACES%5B%24INTERFACE_NAME%5D%3D%24(get_interface_ofport_request)%0Afi%0A%23%20Set%20ofport_request%20according%20to%20INTERFACES%5BINTERFACE_NAME%5D%0Aovs-vsctl%20set%20Interface%20%22%24%7BINTERFACE_NAME%7D%22%20ofport_request%3D%24%7BINTERFACES%5B%24INTERFACE_NAME%5D%7D%0A%0A%23%20Save%20current%20state%20of%20INTERFACES%20to%20CONFIGURATION_FILE%0Adeclare%20-p%20INTERFACES%20%3E%7C%20%22%24%7BCONFIGURATION_FILE%7D%22%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/pre-up.d/10-ofport-request.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20Prevent%20hairpin%20traffic%20when%20the%20apiserver%20is%20up%0A%0A%23%20As%20per%20the%20Azure%20documentation%20(https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fload-balancer%2Fconcepts%23limitations)%2C%0A%23%20if%20a%20backend%20is%20load-balanced%20to%20itself%2C%20then%20the%20traffic%20will%20be%20dropped.%0A%23%0A%23%20This%20is%20because%20the%20L3LB%20does%20DNAT%2C%20so%20while%20the%20outgoing%20packet%20has%20a%20destination%0A%23%20IP%20of%20the%20VIP%2C%20the%20incoming%20load-balanced%20packet%20has%20a%20destination%20IP%20of%20the%0A%23%20host.%20That%20means%20that%20it%20%22sees%22%20a%20syn%20with%20the%20source%20and%20destination%0A%23%20IPs%20of%20itself%2C%20and%20duly%20replies%20wit%20a%20syn-ack%20back%20to%20itself.%20However%2C%20the%20client%0A%23%20socket%20expects%20a%20syn-ack%20with%20a%20source%20IP%20of%20the%20VIP%2C%20so%20it%20drops%20the%20packet.%0A%23%0A%23%20The%20solution%20is%20to%20redirect%20traffic%20destined%20to%20the%20lb%20vip%20back%20to%20ourselves.%0A%23%0A%23%20We%20check%20%2Frun%2Fcloud-routes%2F%20for%20files%20%24VIP.up%20and%20%24VIP.down.%20If%20the%20.up%20file%0A%23%20exists%2C%20then%20we%20redirect%20traffic%20destined%20for%20that%20vip%20to%20ourselves%20via%20nftables.%0A%23%20A%20systemd%20unit%20watches%20the%20directory%20for%20changes.%0A%23%0A%23%20TODO%3A%20Address%20the%20potential%20issue%20where%20apiserver-watcher%20could%20create%20multiple%20files%0A%23%20and%20openshift-azure-routes%20doesn't%20detect%20all%20of%20them%20because%20file%20change%20events%20are%20not%20queued%0A%23%20when%20the%20service%20is%20already%20running.%0A%23%20https%3A%2F%2Fgithub.com%2Fopenshift%2Fmachine-config-operator%2Fpull%2F3643%23issuecomment-1497234369%0A%0Aset%20-euo%20pipefail%0A%0A%23%20the%20list%20of%20load%20balancer%20IPs%20that%20are%20assigned%20to%20this%20node%0Adeclare%20-A%20v4vips%0Adeclare%20-A%20v6vips%0A%0ATABLE_NAME%3D%22azure-vips%22%0AVIPS_CHAIN%3D%22redirect-vips%22%0ARUN_DIR%3D%22%2Frun%2Fcloud-routes%22%0A%0Ainitialize()%20%7B%0A%20%20%20%20nft%20-f%20-%20%3C%3CEOF%0A%20%20%20%20%20%20%20%20add%20table%20inet%20%24%7BTABLE_NAME%7D%20%7B%20comment%20%22azure%20LB%20vip%20overriding%22%3B%20%7D%0A%20%20%20%20%20%20%20%20add%20chain%20inet%20%24%7BTABLE_NAME%7D%20%24%7BVIPS_CHAIN%7D%0A%0A%20%20%20%20%20%20%20%20add%20chain%20inet%20%24%7BTABLE_NAME%7D%20prerouting%20%7B%20type%20nat%20hook%20prerouting%20priority%20dstnat%3B%20%7D%0A%20%20%20%20%20%20%20%20flush%20chain%20inet%20%24%7BTABLE_NAME%7D%20prerouting%0A%20%20%20%20%20%20%20%20add%20rule%20inet%20%24%7BTABLE_NAME%7D%20prerouting%20goto%20%24%7BVIPS_CHAIN%7D%0A%0A%20%20%20%20%20%20%20%20add%20chain%20inet%20%24%7BTABLE_NAME%7D%20output%20%7B%20type%20nat%20hook%20output%20priority%20dstnat%3B%20%7D%0A%20%20%20%20%20%20%20%20flush%20chain%20inet%20%24%7BTABLE_NAME%7D%20output%0A%20%20%20%20%20%20%20%20add%20rule%20inet%20%24%7BTABLE_NAME%7D%20output%20goto%20%24%7BVIPS_CHAIN%7D%0AEOF%0A%7D%0A%0Aremove_stale_routes()%20%7B%0A%20%20%20%20%23%23%20find%20extra%20ovn%20routes%0A%20%20%20%20local%20ovnkContainerID%3D%24(crictl%20ps%20--name%20ovnkube-controller%20%7C%20awk%20'%7B%20print%20%241%20%7D'%20%7C%20tail%20-n%2B2)%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BovnkContainerID%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22Found%20ovnkube-controller%20pod...%20%24%7BovnkContainerID%7D%22%0A%20%20%20%20local%20routeVIPsV4%3D%24(crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-list%20ovn_cluster_router%20%7C%20grep%20%221010%22%20%7C%20grep%20%22ip4%22%20%7C%20awk%20'%248%7Bprint%20%248%7D')%0A%20%20%20%20echo%20%22Found%20v4route%20vips%3A%20%24%7BrouteVIPsV4%7D%22%0A%20%20%20%20local%20host%3D%24(hostname)%0A%20%20%20%20echo%20%24%7Bhost%7D%0A%20%20%20%20for%20route_vip%20in%20%24%7BrouteVIPsV4%7D%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20!%20-v%20v4vips%5B%24%7Broute_vip%7D%5D%20%5D%5D%20%7C%7C%20%5B%5B%20%22%24%7Bv4vips%5B%24%7Broute_vip%7D%5D%7D%22%20%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20removing%20stale%20vip%20%22%24%7Broute_vip%7D%22%20for%20local%20clients%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%20inport%20%3D%3D%20rtos-%24%7Bhost%7D%20%26%26%20ip4.dst%20%3D%3D%20%24%7Broute_vip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%20%22inport%20%3D%3D%20%5C%22rtos-%24%7Bhost%7D%5C%22%20%26%26%20ip4.dst%20%3D%3D%20%24%7Broute_vip%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20if%20%5B%20!%20-f%20%2Fproc%2Fnet%2Fif_inet6%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%0A%20%20%20%20local%20routeVIPsV6%3D%24(crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-list%20ovn_cluster_router%20%7C%20grep%20%221010%22%20%7C%20grep%20%22ip6%22%20%7C%20awk%20'%248%7Bprint%20%248%7D')%0A%20%20%20%20echo%20%22Found%20v6route%20vips%3A%20%24%7BrouteVIPsV6%7D%22%0A%20%20%20%20for%20route_vip%20in%20%24%7BrouteVIPsV6%7D%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20!%20-v%20v6vips%5B%24%7Broute_vip%7D%5D%20%5D%5D%20%7C%7C%20%5B%5B%20%22%24%7Bv6vips%5B%24%7Broute_vip%7D%5D%7D%22%20%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20removing%20stale%20vip%20%22%24%7Broute_vip%7D%22%20for%20local%20clients%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%20inport%20%3D%3D%20rtos-%24%7Bhost%7D%20%26%26%20ip6.dst%20%3D%3D%20%24%7Broute_vip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%20%22inport%20%3D%3D%20%5C%22rtos-%24%7Bhost%7D%5C%22%20%26%26%20ip6.dst%20%3D%3D%20%24%7Broute_vip%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%7D%0A%0Async_rules()%20%7B%0A%20%20%20%20%23%20Construct%20the%20VIP%20lists.%20(The%20nftables%20syntax%20allows%20a%20trailing%20comma.)%0A%20%20%20%20v4vipset%3D%22%22%0A%20%20%20%20v6vipset%3D%22%22%0A%20%20%20%20for%20vip%20in%20%22%24%7B!v4vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Bv4vips%5B%24%7Bvip%7D%5D%7D%22%20!%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20v4vipset%3D%22%24%7Bvip%7D%2C%20%24%7Bv4vipset%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%20%20%20%20for%20vip%20in%20%22%24%7B!v6vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Bv6vips%5B%24%7Bvip%7D%5D%7D%22%20!%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20v6vipset%3D%22%24%7Bvip%7D%2C%20%24%7Bv6vipset%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20echo%20%22synchronizing%20IPv4%20VIPs%20to%20(%24%7Bv4vipset%7D)%2C%20IPv6%20VIPS%20to%20(%24%7Bv6vipset%7D)%22%0A%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20echo%20%22flush%20chain%20inet%20%24%7BTABLE_NAME%7D%20%24%7BVIPS_CHAIN%7D%22%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Bv4vipset%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22add%20rule%20inet%20%24%7BTABLE_NAME%7D%20%24%7BVIPS_CHAIN%7D%20ip%20daddr%20%7B%20%24%7Bv4vipset%7D%20%7D%20redirect%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Bv6vipset%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22add%20rule%20inet%20%24%7BTABLE_NAME%7D%20%24%7BVIPS_CHAIN%7D%20ip6%20daddr%20%7B%20%24%7Bv6vipset%7D%20%7D%20redirect%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%7D%20%7C%20nft%20-f%20-%0A%7D%0A%0Aadd_routes()%20%7B%0A%20%20%20%20local%20ovnkContainerID%3D%24(crictl%20ps%20--name%20ovnkube-controller%20%7C%20awk%20'%7B%20print%20%241%20%7D'%20%7C%20tail%20-n%2B2)%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BovnkContainerID%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22OVN-Kubernetes%20is%20not%20running%3B%20no%20routes%20to%20add.%22%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22Found%20ovnkube-controller%20pod...%20%24%7BovnkContainerID%7D%22%0A%20%20%20%20local%20ovnK8sMp0v4%3D%24(ip%20-brief%20address%20show%20ovn-k8s-mp0%20%7C%20awk%20'%7Bprint%20%243%7D'%20%7C%20awk%20-F%2F%20'%7Bprint%20%241%7D')%0A%20%20%20%20echo%20%22Found%20ovn-k8s-mp0%20interface%20IP%20%24%7BovnK8sMp0v4%7D%22%0A%20%20%20%20local%20host%3D%24(hostname)%0A%20%20%20%20echo%20%24%7Bhost%7D%0A%20%20%20%20for%20vip%20in%20%22%24%7B!v4vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Bv4vips%5B%24%7Bvip%7D%5D%7D%22%20!%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ensuring%20route%20for%20%24%7Bvip%7D%20for%20internal%20clients%22%0A%20%20%20%20%20%20%20%20%20%20%20%20local%20routes%3D%24(crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-list%20ovn_cluster_router%20%7C%20grep%20%221010%22%20%7C%20grep%20%22%24%7Bvip%7D%22%20%7C%20grep%20%22%24%7BovnK8sMp0v4%7D%22)%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22OVNK%20Routes%20on%20ovn-cluster-router%20at%201010%20priority%3A%20%24routes%22%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Broutes%7D%22%20%3D%3D%20*%22%24%7Bvip%7D%22*%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22Route%20exists%22%0A%20%20%20%20%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22Route%20does%20not%20exist%3B%20creating%20it...%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ovn-nbctl%20lr-policy-add%20ovn_cluster_router%201010%20inport%20%3D%3D%20rtos-%24%7Bhost%7D%20%26%26%20ip4.dst%20%3D%3D%20%24%7Bvip%7D%20reroute%20%24%7BovnK8sMp0v4%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-add%20ovn_cluster_router%201010%20%22inport%20%3D%3D%20%5C%22rtos-%24%7Bhost%7D%5C%22%20%26%26%20ip4.dst%20%3D%3D%20%24%7Bvip%7D%22%20reroute%20%22%24%7BovnK8sMp0v4%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20if%20%5B%20!%20-f%20%2Fproc%2Fnet%2Fif_inet6%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%0A%20%20%20%20local%20ovnK8sMp0v6%3D%24(ip%20-brief%20address%20show%20ovn-k8s-mp0%20%7C%20awk%20'%7Bprint%20%244%7D'%20%7C%20awk%20-F%2F%20'%7Bprint%20%241%7D')%0A%20%20%20%20echo%20%22Found%20ovn-k8s-mp0%20interface%20IP%20%24%7BovnK8sMp0v6%7D%22%0A%0A%20%20%20%20for%20vip%20in%20%22%24%7B!v6vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Bv6vips%5B%24%7Bvip%7D%5D%7D%22%20!%3D%20down%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ensuring%20route%20for%20%24%7Bvip%7D%20for%20internal%20clients%22%0A%20%20%20%20%20%20%20%20%20%20%20%20local%20routes%3D%24(crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-list%20ovn_cluster_router%20%7C%20grep%20%221010%22%20%7C%20grep%20%22%24%7Bvip%7D%22%20%7C%20grep%20%22%24%7BovnK8sMp0v6%7D%22)%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22OVNK%20Routes%20on%20ovn-cluster-router%20at%201010%20priority%3A%20%24routes%22%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Broutes%7D%22%20%3D%3D%20*%22%24%7Bvip%7D%22*%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22Route%20exists%22%0A%20%20%20%20%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22Route%20does%20not%20exist%3B%20creating%20it...%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22ovn-nbctl%20lr-policy-add%20ovn_cluster_router%201010%20inport%20%3D%3D%20rtos-%24%7Bhost%7D%20%26%26%20ip6.dst%20%3D%3D%20%24%7Bvip%7D%20reroute%20%24%7BovnK8sMp0v6%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-add%20ovn_cluster_router%201010%20%22inport%20%3D%3D%20%5C%22rtos-%24%7Bhost%7D%5C%22%20%26%26%20ip6.dst%20%3D%3D%20%24%7Bvip%7D%22%20reroute%20%22%24%7BovnK8sMp0v6%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%7D%0A%0Aclear_rules()%20%7B%0A%20%20%20%20echo%20%22clearing%20rules%20from%20%24%7BTABLE_NAME%7D%22%0A%20%20%20%20nft%20delete%20table%20inet%20%22%24%7BTABLE_NAME%7D%22%20%7C%7C%20true%0A%7D%0A%0Aclear_routes()%20%7B%0A%20%20%20%20local%20ovnkContainerID%3D%24(crictl%20ps%20--name%20ovnkube-controller%20%7C%20awk%20'%7B%20print%20%241%20%7D'%20%7C%20tail%20-n%2B2)%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BovnkContainerID%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22OVN-Kubernetes%20is%20not%20running%3B%20no%20routes%20to%20remove.%22%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22Found%20ovnkube-controller%20pod...%20%24%7BovnkContainerID%7D%22%0A%20%20%20%20echo%20%22clearing%20all%20routes%20from%20ovn-cluster-router%22%0A%20%20%20%20crictl%20exec%20-i%20%24%7BovnkContainerID%7D%20ovn-nbctl%20lr-policy-del%20ovn_cluster_router%201010%0A%7D%0A%0A%23%20out%20parameters%3A%20v4vips%20v6vips%0Alist_lb_ips()%20%7B%0A%20%20%20%20for%20k%20in%20%22%24%7B!v4vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20unset%20v4vips%5B%22%24%7Bk%7D%22%5D%0A%20%20%20%20done%0A%20%20%20%20for%20k%20in%20%22%24%7B!v6vips%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20unset%20v6vips%5B%22%24%7Bk%7D%22%5D%0A%20%20%20%20done%0A%0A%0A%20%20%20%20shopt%20-s%20nullglob%0A%20%20%20%20for%20file%20in%20%22%24%7BRUN_DIR%7D%22%2F*.up%20%3B%20do%0A%20%20%20%20%20%20%20%20vip%3D%24(basename%20%22%24%7Bfile%7D%22%20.up)%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20-e%20%22%24%7BRUN_DIR%7D%2F%24%7Bvip%7D.down%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22%24%7Bvip%7D%20has%20upfile%20and%20downfile%2C%20marking%20as%20down%22%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20%5B%5B%20%24%7Bvip%7D%20%3D~%20%3A%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22processing%20v6%20vip%20%24%7Bvip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20v6vips%5B%24%7Bvip%7D%5D%3D%22%24%7Bvip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22processing%20v4%20vip%20%24%7Bvip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20v4vips%5B%24%7Bvip%7D%5D%3D%22%24%7Bvip%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%7D%0A%0A%0Acase%20%22%241%22%20in%0A%20%20%20%20start)%0A%20%20%20%20%20%20%20%20initialize%0A%20%20%20%20%20%20%20%20list_lb_ips%0A%20%20%20%20%20%20%20%20sync_rules%0A%20%20%20%20%20%20%20%20remove_stale_routes%20%23%20needed%20for%20OVN-Kubernetes%20plugin's%20routingViaHost%3Dfalse%20mode%0A%20%20%20%20%20%20%20%20add_routes%20%23%20needed%20for%20OVN-Kubernetes%20plugin's%20routingViaHost%3Dfalse%20mode%0A%20%20%20%20%20%20%20%20echo%20%22done%20applying%20vip%20rules%22%0A%20%20%20%20%20%20%20%20%3B%3B%0A%20%20%20%20cleanup)%0A%20%20%20%20%20%20%20%20clear_rules%0A%20%20%20%20%20%20%20%20clear_routes%20%23%20needed%20for%20OVN-Kubernetes%20plugin's%20routingViaHost%3Dfalse%20mode%0A%20%20%20%20%20%20%20%20%3B%3B%0A%20%20%20%20*)%0A%20%20%20%20%20%20%20%20echo%20%24%22Usage%3A%20%240%20%7Bstart%7Ccleanup%7D%22%0A%20%20%20%20%20%20%20%20exit%201%0Aesac%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/opt/libexec/openshift-azure-routes.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20Extract%20Podman%20version%20and%20determine%20the%20signature%20policy%0A%2Fusr%2Fbin%2Fpodman%20-v%20%7C%20%2Fbin%2Fawk%20'%7B%0A%20%20%20%20split(%243%2C%20version%2C%20%22-%22)%3B%0A%20%20%20%20clean_version%20%3D%20version%5B1%5D%3B%0A%0A%20%20%20%20split(clean_version%2C%20current%2C%20%2F%5C.%2F)%3B%0A%20%20%20%20split(%224.4.1%22%2C%20target%2C%20%2F%5C.%2F)%3B%0A%0A%20%20%20%20for%20(i%20%3D%201%3B%20i%20%3C%3D%203%3B%20i%2B%2B)%20%7B%0A%20%20%20%20%20%20%20%20if%20((current%5Bi%5D%20%2B%200)%20%3C%20(target%5Bi%5D%20%2B%200))%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20print%20%22--signature-policy%20%2Fetc%2Fmachine-config-daemon%2Fpolicy-for-old-podman.json%22%3B%0A%20%20%20%20%20%20%20%20%20%20%20%20exit%3B%0A%20%20%20%20%20%20%20%20%7D%20else%20if%20((current%5Bi%5D%20%2B%200)%20%3E%20(target%5Bi%5D%20%2B%200))%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20exit%3B%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%7D%0A%7D'%20%3E%20%2Ftmp%2Fpodman_policy_args%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/etc/machine-config-daemon/generate_podman_policy_args.sh" + }, + { + "contents": { + "source": "data:,%7B%22auths%22%3A%7B%22cloud.openshift.com%22%3A%7B%22auth%22%3A%22b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfNWZlN2RlZTExNTdlNDA2Nzk2NTY3Y2VmMThmNjFjOTM6V0NXOEJLNkxXT1NLMElUSldIMlRXQUhaQ1k4SkJNMUtONVlCUVpNT1Q3UjdQMU9HRk05VTFJQ1VET0FMQkZIWQ%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22quay.io%22%3A%7B%22auth%22%3A%22b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfNWZlN2RlZTExNTdlNDA2Nzk2NTY3Y2VmMThmNjFjOTM6V0NXOEJLNkxXT1NLMElUSldIMlRXQUhaQ1k4SkJNMUtONVlCUVpNT1Q3UjdQMU9HRk05VTFJQ1VET0FMQkZIWQ%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22registry.ci.openshift.org%22%3A%7B%22auth%22%3A%22XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX%22%7D%2C%22registry.connect.redhat.com%22%3A%7B%22auth%22%3A%22fHVoYy1wb29sLWQ3OWIxMzA2LWI4NzItNDBkNy1iZDdmLTU1ZWI2M2EwMjMwOTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTJNelkzWkdaaE5EbGhaV1UwT1RkbU9XVXlNalUyWWpsaE9ESXlNMk0zTWlKOS5oQ0Z3YnhiOExhMDNkSnF4V09NMjFhOFk2MmpYLTRVZDgtR0dWMG1RZlRLc2NBbEpfLThIekZ6WnZSMWN0Uzd1RDlLeEFRbUt2WE11SUx4MDVybmZrQ1c4S3FSbHhBdm5YUk51QTVCZ1FWVjlPRWd5OXpzZTVpMURnXzUwb2N4bHBUcTJYY20yVW00U09VX1hBNThwMXJaa3QtZ19wMXd1X3ItSzFjNzR1dlBlUmJPQXdubzBwZWVyX0tTQUloUklPcjRXbGYtdzhBZTM5UXA5UkQ5ZERta1F2WW1Cek94ZFJlaHhDMm0zdEZUaUc5YTRKT2NsQkVqSnpINlFoLXpZbmhxazZ2LS1yWHdpSXdlSG9PYU90WXhyTW9GN3U4UGtsVXVuVWI2S3RSbHBCVlE1MlB1MGRBVG9qUEpWMHdDdE93NGh0OU1fZi1JNDhHMm15QlAzM3BmME4xTFVmTmhKS0FkZEg0cE9WaVJmOFM1aExPYjVxemFqakhNOGRPYlBnRkN2c0lzR09kdGVaenIwZW5hWFBqOE9sYjlLQlJJTlllUHBMNmZSZGNaeXBkLVNJbzQ2TjJPbHRMLXNuVG5LX1NDMDZfMS1XNnFFdEpmZUptX2N0c2RxWnlnMWw2bVdiQWwtVExyMFl0ZTVldkNpQUpCNTYzZGxKR1ZWY2xCUGgyUkdDUW9TdjVDS3hnM3ZXX21oak42dnVtVjRCRmxLbUdUVTR2TDZ0aGFiS21XQ1puQnlwOS0tcURaMVVMSEhvb0psbG1Ebnc5S1dBRkdldV9oc3hKcDdZUXRVR193SkhZWGthdFRWTFhfMlB3OG5IbXdVSXVBaUdacks5T2xSNzBPdGpCNUdDTzIyd3oyQ0tmeUNELWNIRGFKcm1rX3NwQW9DMnoxQW94MA%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22registry.redhat.io%22%3A%7B%22auth%22%3A%22fHVoYy1wb29sLWQ3OWIxMzA2LWI4NzItNDBkNy1iZDdmLTU1ZWI2M2EwMjMwOTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTJNelkzWkdaaE5EbGhaV1UwT1RkbU9XVXlNalUyWWpsaE9ESXlNMk0zTWlKOS5oQ0Z3YnhiOExhMDNkSnF4V09NMjFhOFk2MmpYLTRVZDgtR0dWMG1RZlRLc2NBbEpfLThIekZ6WnZSMWN0Uzd1RDlLeEFRbUt2WE11SUx4MDVybmZrQ1c4S3FSbHhBdm5YUk51QTVCZ1FWVjlPRWd5OXpzZTVpMURnXzUwb2N4bHBUcTJYY20yVW00U09VX1hBNThwMXJaa3QtZ19wMXd1X3ItSzFjNzR1dlBlUmJPQXdubzBwZWVyX0tTQUloUklPcjRXbGYtdzhBZTM5UXA5UkQ5ZERta1F2WW1Cek94ZFJlaHhDMm0zdEZUaUc5YTRKT2NsQkVqSnpINlFoLXpZbmhxazZ2LS1yWHdpSXdlSG9PYU90WXhyTW9GN3U4UGtsVXVuVWI2S3RSbHBCVlE1MlB1MGRBVG9qUEpWMHdDdE93NGh0OU1fZi1JNDhHMm15QlAzM3BmME4xTFVmTmhKS0FkZEg0cE9WaVJmOFM1aExPYjVxemFqakhNOGRPYlBnRkN2c0lzR09kdGVaenIwZW5hWFBqOE9sYjlLQlJJTlllUHBMNmZSZGNaeXBkLVNJbzQ2TjJPbHRMLXNuVG5LX1NDMDZfMS1XNnFFdEpmZUptX2N0c2RxWnlnMWw2bVdiQWwtVExyMFl0ZTVldkNpQUpCNTYzZGxKR1ZWY2xCUGgyUkdDUW9TdjVDS3hnM3ZXX21oak42dnVtVjRCRmxLbUdUVTR2TDZ0aGFiS21XQ1puQnlwOS0tcURaMVVMSEhvb0psbG1Ebnc5S1dBRkdldV9oc3hKcDdZUXRVR193SkhZWGthdFRWTFhfMlB3OG5IbXdVSXVBaUdacks5T2xSNzBPdGpCNUdDTzIyd3oyQ0tmeUNELWNIRGFKcm1rX3NwQW9DMnoxQW94MA%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%7D%7D%0A" + }, + "mode": 384, + "overwrite": true, + "path": "/var/lib/kubelet/config.json" + }, + { + "contents": { + "source": "data:,%23%20Needed%20by%20the%20OpenShift%20SDN.%20See%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1758552%0Anet.ipv4.conf.all.arp_announce%20%3D%202%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/arp.conf" + }, + { + "contents": { + "source": "data:,%23%20See%3A%20rhbz%231384746%2C%20OCPBUGS-24012%0Anet.ipv4.neigh.default.gc_thresh1%3D8192%0Anet.ipv4.neigh.default.gc_thresh2%3D32768%0Anet.ipv4.neigh.default.gc_thresh3%3D65536%0Anet.ipv6.neigh.default.gc_thresh1%3D8192%0Anet.ipv6.neigh.default.gc_thresh2%3D32768%0Anet.ipv6.neigh.default.gc_thresh3%3D65536%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/gc-thresh.conf" + }, + { + "contents": { + "source": "data:,%0Afs.inotify.max_user_watches%20%3D%2065536%0Afs.inotify.max_user_instances%20%3D%208192%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/inotify.conf" + }, + { + "contents": { + "source": "data:,vm.unprivileged_userfaultfd%20%3D%201" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/enable-userfaultfd.conf" + }, + { + "contents": { + "source": "data:,%23%20Needed%20for%20OpenShift%20Logging%20(ElasticSearch).%20See%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1793714%0Avm.max_map_count%20%3D%20262144%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/vm-max-map.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-euo%20pipefail%0A%0A%23%20First%2C%20we%20need%20to%20wait%20until%20DHCP%20finishes%20and%20the%20node%20has%20a%20non-%60localhost%60%0A%23%20hostname%20before%20%60kubelet.service%60%20starts.%0A%23%20That's%20the%20%60--wait%60%20argument%20as%20used%20by%20%60node-valid-hostname.service%60.%0A%23%0A%23%20Second%2C%20on%20GCP%20specifically%20we%20truncate%20the%20hostname%20if%20it's%20%3E63%20characters.%0A%23%20That's%20%60gcp-hostname.service%60.%0A%0A%23%20Block%20indefinitely%20until%20the%20host%20gets%20a%20non-localhost%20name.%0A%23%20Note%20node-valid-hostname.service%20uses%20systemd%20to%20abort%20if%20this%20takes%20too%20long.%0Await_localhost()%20%7B%0A%20%20%20%20echo%20%22waiting%20for%20non-localhost%20hostname%20to%20be%20assigned%22%0A%20%20%20%20while%20%5B%5B%20%22%24(%3C%20%2Fproc%2Fsys%2Fkernel%2Fhostname)%22%20%3D~%20(localhost%7Clocalhost.localdomain)%20%5D%5D%3B%0A%20%20%20%20do%0A%20%20%20%20%20%20%20%20sleep%201%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20Some%20cloud%20platforms%20may%20assign%20a%20hostname%20with%20a%20trailing%20dot.%0A%20%20%20%20%23%20However%2C%20tools%20like%20%60hostnamectl%60%20(used%20by%20systemd)%20do%20not%20allow%20trailing%20dots%2C%0A%20%20%20%20%23%20so%20we%20strip%20the%20trailing%20dot%20before%20applying%20the%20hostname.%0A%20%20%20%20HOSTNAME%3D%22%24(%3C%2Fproc%2Fsys%2Fkernel%2Fhostname)%22%0A%20%20%20%20CLEAN_HOSTNAME%3D%22%24%7BHOSTNAME%25.%7D%22%20%0A%20%20%20%20echo%20%22node%20identified%20as%20%24CLEAN_HOSTNAME%22%0A%20%20%20%20echo%20%22saving%20hostname%20to%20prevent%20NetworkManager%20from%20ever%20unsetting%20it%22%0A%20%20%20%20hostnamectl%20set-hostname%20--static%20--transient%20%22%24CLEAN_HOSTNAME%22%0A%20%20%20%20exit%200%0A%7D%0A%0Aset_gcp_hostname()%20%7B%0A%20%20%20%20%2Fusr%2Fbin%2Fafterburn%20--provider%20gcp%20--hostname%3D%2Frun%2Fafterburn.hostname%0A%0A%20%20%20%20local%20host_name%3D%24(cat%20%2Frun%2Fafterburn.hostname)%0A%20%20%20%20local%20type_arg%3D%22transient%22%0A%0A%20%20%20%20%23%20%2Fetc%2Fhostname%20is%20used%20for%20static%20hostnames%20and%20is%20authoritative.%0A%20%20%20%20%23%20This%20will%20check%20to%20make%20sure%20that%20the%20static%20hostname%20is%20the%0A%20%20%20%20%23%20less%20than%20or%20equal%20to%2063%20characters%20in%20length.%0A%20%20%20%20if%20%5B%20-f%20%2Fetc%2Fhostname%20%5D%20%26%26%20%5B%20%22%24(cat%20%2Fetc%2Fhostname%20%7C%20wc%20-m)%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20etc_name%3D%22%24(%3C%20%2Fetc%2Fhostname)%22%0A%20%20%20%20%20%20%20%20type_arg%3D%22static%22%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24%7Betc_name%7D%22%20!%3D%20%22%24%7Bhost_name%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22%2Fetc%2Fhostname%20is%20set%20to%20%24%7Betc_name%7D%20but%20does%20not%20match%20%24%7Bhost_name%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22using%20%2Fetc%2Fhostname%20as%20the%20authoritative%20name%22%0A%20%20%20%20%20%20%20%20%20%20%20%20host_name%3D%22%24%7Betc_name%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Only%20mutate%20the%20hostname%20if%20the%20length%20is%20longer%20than%2063%20characters.%20The%0A%20%20%20%20%23%20hostname%20will%20be%20the%20lesser%20of%2063%20characters%20after%20the%20first%20dot%20in%20the%0A%20%20%20%20%23%20FQDN.%20%20This%20algorithm%20is%20only%20known%20to%20work%20in%20GCP%2C%20and%20hence%20is%20only%0A%20%20%20%20%23%20executed%20in%20GCP.%0A%20%20%20%20if%20%5B%20%22%24%7B%23host_name%7D%22%20-gt%2063%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20alt_name%3D%24(printf%20%22%24%7Bhost_name%7D%22%20%7C%20cut%20-f1%20-d'.'%20%7C%20cut%20-c%20-63)%0A%20%20%20%20%20%20%20%20echo%20%22%24%7Bhost_name%7D%20is%20longer%20than%2063%20characters%2C%20using%20truncated%20hostname%22%0A%20%20%20%20%20%20%20%20host_name%3D%22%24%7Balt_name%7D%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22setting%20%24%7Btype_arg%7D%20hostname%20to%20%24%7Bhost_name%7D%22%0A%20%20%20%20%2Fbin%2Fhostnamectl%20%22--%24%7Btype_arg%7D%22%20set-hostname%20%22%24%7Bhost_name%7D%22%0A%20%20%20%20exit%200%0A%7D%0A%0Aset_openstack_hostname()%20%7B%0A%20%20%20%20%23%20Read%20metadata%20written%20by%20afterburn%20service%0A%20%20%20%20.%20%2Frun%2Fmetadata%2Fafterburn%0A%0A%20%20%20%20%23%20node-valid-hostname%20sets%20persistent%20hostname%20from%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%20%20%20%20echo%20%22%24AFTERBURN_OPENSTACK_HOSTNAME%22%20%3E%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%7D%0A%0Aset_powervs_hostname()%20%7B%0A%20%20%20%20%23%20Read%20metadata%20written%20by%20afterburn%20service%0A%20%20%20%20.%20%2Frun%2Fmetadata%2Fafterburn%0A%0A%20%20%20%20%23%20node-valid-hostname%20sets%20persistent%20hostname%20from%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%20%20%20%20echo%20%22%24AFTERBURN_POWERVS_LOCAL_HOSTNAME%22%20%3E%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%7D%0A%0Aarg%3D%24%7B1%7D%3B%20shift%3B%0Acase%20%22%24%7Barg%7D%22%20in%0A%20%20%20%20--wait)%20wait_localhost%3B%3B%0A%20%20%20%20--gcp)%20set_gcp_hostname%3B%3B%0A%20%20%20%20--openstack)%20set_openstack_hostname%3B%3B%0A%20%20%20%20--powervs)%20set_powervs_hostname%3B%3B%0A%20%20%20%20*)%20echo%20%22Unhandled%20arg%20%24arg%22%3B%20exit%201%0Aesac%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/mco-hostname" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0Aset%20-eou%20pipefail%0A%0A%23%20context%0Aintapi%3D%24(oc%20get%20infrastructures.config.openshift.io%20cluster%20-o%20%22jsonpath%3D%7B.status.apiServerInternalURI%7D%22)%0Acontext%3D%22%24(oc%20config%20current-context)%22%0A%23%20cluster%0Acluster%3D%22%24(oc%20config%20view%20-o%20%22jsonpath%3D%7B.contexts%5B%3F(%40.name%3D%3D%5C%22%24context%5C%22)%5D.context.cluster%7D%22)%22%0Aserver%3D%22%24(oc%20config%20view%20-o%20%22jsonpath%3D%7B.clusters%5B%3F(%40.name%3D%3D%5C%22%24cluster%5C%22)%5D.cluster.server%7D%22)%22%0A%23%20token%0Aca_crt_data%3D%22%24(oc%20get%20secret%20-n%20openshift-machine-config-operator%20node-bootstrapper-token%20-o%20%22jsonpath%3D%7B.data.ca%5C.crt%7D%22%20%7C%20base64%20--decode)%22%0Anamespace%3D%22%24(oc%20get%20secret%20-n%20openshift-machine-config-operator%20node-bootstrapper-token%20%20-o%20%22jsonpath%3D%7B.data.namespace%7D%22%20%7C%20base64%20--decode)%22%0Atoken%3D%22%24(oc%20get%20secret%20-n%20openshift-machine-config-operator%20node-bootstrapper-token%20-o%20%22jsonpath%3D%7B.data.token%7D%22%20%7C%20base64%20--decode)%22%0A%0Aexport%20KUBECONFIG%3D%22%24(mktemp)%22%0Akubectl%20config%20set-credentials%20%22kubelet%22%20--token%3D%22%24token%22%20%3E%2Fdev%2Fnull%0Aca_crt%3D%22%24(mktemp)%22%3B%20echo%20%22%24ca_crt_data%22%20%3E%20%24ca_crt%0Akubectl%20config%20set-cluster%20%24cluster%20--server%3D%22%24intapi%22%20--certificate-authority%3D%22%24ca_crt%22%20--embed-certs%20%3E%2Fdev%2Fnull%0Akubectl%20config%20set-context%20kubelet%20--cluster%3D%22%24cluster%22%20--user%3D%22kubelet%22%20%3E%2Fdev%2Fnull%0Akubectl%20config%20use-context%20kubelet%20%3E%2Fdev%2Fnull%0Acat%20%22%24KUBECONFIG%22%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/recover-kubeconfig.sh" + }, + { + "contents": { + "source": "data:," + }, + "mode": 493, + "overwrite": true, + "path": "/etc/kubernetes/kubelet-plugins/volume/exec/.dummy" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Workaround%3A%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1941714%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1935539%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1987108%0A%0Adriver%3D%24(nmcli%20-t%20-m%20tabular%20-f%20general.driver%20dev%20show%20%22%24%7BDEVICE_IFACE%7D%22)%0A%0Aif%20%5B%5B%20%22%242%22%20%3D%3D%20%22up%22%20%26%26%20%22%24%7Bdriver%7D%22%20%3D%3D%20%22vmxnet3%22%20%5D%5D%3B%20then%0A%20%20logger%20-s%20%2299-vsphere-disable-tx-udp-tnl%20triggered%20by%20%24%7B2%7D%20on%20device%20%24%7BDEVICE_IFACE%7D.%22%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-udp_tnl-segmentation%20off%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-udp_tnl-csum-segmentation%20off%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-checksum-ip-generic%20off%0Afi%0A" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-vsphere-disable-tx-udp-tnl" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-x%0A%0Aif%20%5B%20!%20-e%20%22%2Fetc%2Fipsec.d%2Fopenshift.conf%22%20%5D%3B%20then%0A%20%20exit%200%0Afi%0A%0A%23%20Modify%20existing%20IPsec%20out%20connection%20entries%20with%20%22auto%3Dstart%22%0A%23%20option%20and%20restart%20ipsec%20systemd%20service.%20This%20helps%20to%0A%23%20establish%20IKE%20SAs%20for%20the%20existing%20IPsec%20connections%20with%0A%23%20peer%20nodes.%20This%20option%20will%20be%20deleted%20from%20connections%0A%23%20once%20ovs-monitor-ipsec%20process%20spinned%20up%20on%20the%20node%20by%0A%23%20ovn-ipsec-host%20pod%2C%20but%20still%20it%20won't%20reestablish%20IKE%20SAs%0A%23%20again%20with%20peer%20nodes%2C%20so%20it%20shouldn't%20be%20a%20problem.%0A%23%20We%20are%20updating%20only%20out%20connections%20with%20%22auto%3Dstart%22%20to%0A%23%20avoid%20cross%20stream%20issue%20with%20Libreswan%205.2.%0A%23%20The%20in%20connections%20use%20default%20auto%3Droute%20parameter.%0Aif%20!%20grep%20-q%20%22auto%3Dstart%22%20%2Fetc%2Fipsec.d%2Fopenshift.conf%3B%20then%0A%20%20sed%20-i%20'%2F%5E.*conn%20ovn.*-out-1%24%2Fa%5C%20%20%20%20auto%3Dstart'%20%2Fetc%2Fipsec.d%2Fopenshift.conf%0Afi%0Achroot%20%2Fproc%2F1%2Froot%20ipsec%20restart%0A%0A%23%20Wait%20for%20upto%2060s%20to%20get%20IPsec%20SAs%20to%20establish%20with%20peer%20nodes.%0Atimeout%3D60%0Aelapsed%3D0%0Adesiredconn%3D%22%22%0Aestablishedsa%3D%22%22%0Awhile%20%5B%5B%20%24elapsed%20-lt%20%24timeout%20%5D%5D%3B%20do%0A%20%20desiredconn%3D%24(grep%20-E%20'%5E%5Cs*conn%5Cs%2B'%20%2Fetc%2Fipsec.d%2Fopenshift.conf%20%7C%20grep%20-v%20'%25default'%20%7C%20awk%20'%7Bprint%20%242%7D'%20%7C%20tr%20'%20'%20'%5Cn'%20%7C%20sort%20%7C%20tr%20'%5Cn'%20'%20')%0A%20%20establishedsa%3D%24(ipsec%20showstates%20%7C%20grep%20ESTABLISHED_CHILD_SA%20%7C%20grep%20-o%20'%22%5B%5E%22%5D*%22'%20%7C%20sed%20's%2F%22%2F%2Fg'%20%7C%20tr%20'%20'%20'%5Cn'%20%7C%20sort%20%7C%20uniq%20%7C%20tr%20'%5Cn'%20'%20')%0A%20%20if%20%5B%20%22%24desiredconn%22%20%3D%3D%20%22%24establishedsa%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22IPsec%20SAs%20are%20established%20for%20desired%20connections%20after%20%24%7Belapsed%7Ds%22%0A%20%20%20%20break%0A%20%20else%0A%20%20%20%20echo%20%22IPsec%20SAs%20are%20not%20established%20yet%2C%20total%20waited%20time%20%24%7Belapsed%7Ds%22%0A%20%20%20%20sleep%202s%0A%20%20fi%0A%20%20elapsed%3D%24((elapsed%20%2B%202))%0Adone%0A%0Aif%20%5B%5B%20%24elapsed%20-ge%20%24timeout%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Timed%20out%20waiting%2C%20some%20connections%20are%20not%20established%2C%20desired%20conns%20%24desiredconn%2C%20established%20conns%20%24establishedsa%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/ipsec-connect-wait.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-eux%0A%0Aif%20%5B%20!%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20%23%20No%20need%20to%20do%20this%20if%20no%20NMState%20configuration%20was%20applied%0A%20%20exit%200%0Afi%0A%0A%23%20This%20logic%20is%20borrowed%20from%20configure-ovs.sh%0A%23%20TODO%3A%20Find%20a%20platform-agnostic%20way%20to%20do%20this.%20It%20won't%20work%20on%20platforms%20where%0A%23%20nodeip-configuration%20is%20not%20used.%0Aip%3D%24(cat%20%2Frun%2Fnodeip-configuration%2Fprimary-ip)%0Aif%20%5B%5B%20%22%24%7Bip%7D%22%20%3D%3D%20%22%22%20%5D%5D%3B%20then%0A%20%20echo%20%22No%20ip%20to%20bind%20was%20found%22%0A%20%20exit%201%0Afi%0Awhile%20%3A%0Ado%0A%20%20random_port%3D%24(shuf%20-i%2050000-60000%20-n%201)%0A%20%20echo%20%22Trying%20to%20bind%20%24%7Bip%7D%20on%20port%20%24%7Brandom_port%7D%22%0A%20%20exit_code%3D%24(timeout%202s%20nc%20-l%20%22%24%7Bip%7D%22%20%24%7Brandom_port%7D%3B%20echo%20%24%3F)%0A%20%20if%20%5B%5B%20exit_code%20-eq%20124%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Address%20bound%20successfully%22%0A%20%20%20%20exit%200%0A%20%20fi%0A%20%20sleep%2010%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/wait-for-primary-ip.sh" + }, + { + "contents": { + "source": "data:,unqualified-search-registries%20%3D%20%5B'registry.access.redhat.com'%2C%20'docker.io'%5D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/registries.conf" + }, + { + "contents": { + "source": "data:,%5Bcrio%5D%0Ainternal_wipe%20%3D%20true%0Ainternal_repair%20%3D%20true%0A%0A%5Bcrio.api%5D%0Astream_address%20%3D%20%22127.0.0.1%22%0Astream_port%20%3D%20%220%22%0A%0A%5Bcrio.runtime%5D%0Aselinux%20%3D%20true%0Aconmon%20%3D%20%22%22%0Aconmon_cgroup%20%3D%20%22pod%22%0Adefault_env%20%3D%20%5B%0A%20%20%20%20%22NSS_SDB_USE_CACHE%3Dno%22%2C%0A%5D%0Adefault_runtime%20%3D%20%22crun%22%0Alog_level%20%3D%20%22info%22%0Acgroup_manager%20%3D%20%22systemd%22%0Adefault_sysctls%20%3D%20%5B%0A%20%20%20%20%22net.ipv4.ping_group_range%3D0%202147483647%22%2C%0A%5D%0Ahooks_dir%20%3D%20%5B%0A%20%20%20%20%22%2Fetc%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%20%20%20%20%22%2Frun%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%20%20%20%20%22%2Fusr%2Fshare%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%5D%0Amanage_ns_lifecycle%20%3D%20true%0Aabsent_mount_sources_to_reject%20%3D%20%5B%0A%20%20%20%20%22%2Fetc%2Fhostname%22%2C%0A%5D%0Adrop_infra_ctr%20%3D%20true%0A%0A%5Bcrio.runtime.runtimes.runc%5D%0Aruntime_root%20%3D%20%22%2Frun%2Frunc%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%20%20%22io.containers.trace-syscall%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.Devices%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.LinkLogs%22%2C%0A%5D%0A%0A%5Bcrio.runtime.runtimes.crun%5D%0Aruntime_root%20%3D%20%22%2Frun%2Fcrun%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%20%20%22io.containers.trace-syscall%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.Devices%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.LinkLogs%22%2C%0A%5D%0A%23%20Based%20on%20https%3A%2F%2Fgithub.com%2Fcontainers%2Fcrun%2Fblob%2F27d7dd3a0%2FREADME.md%3Fplain%3D1%23L48%0Acontainer_min_memory%20%3D%20%22512KiB%22%0Adefault_annotations%20%3D%20%7B%22run.oci.systemd.subgroup%22%20%3D%20%22%22%7D%0A%0A%5Bcrio.runtime.workloads.openshift-builder%5D%0Aactivation_annotation%20%3D%20%22io.openshift.builder%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%22io.kubernetes.cri-o.userns-mode%22%2C%0A%20%20%22io.kubernetes.cri-o.Devices%22%0A%5D%0A%5Bcrio.runtime.workloads.openshift-builder.resources%5D%0A%0A%5Bcrio.image%5D%0Aglobal_auth_file%20%3D%20%22%2Fvar%2Flib%2Fkubelet%2Fconfig.json%22%0Apause_image%20%3D%20%22quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3A06bad2ea56c7fcc489b9e47c86ec3dd0024994026d409073d5f77b64f1793a15%22%0Apause_image_auth_file%20%3D%20%22%2Fvar%2Flib%2Fkubelet%2Fconfig.json%22%0Apause_command%20%3D%20%22%2Fusr%2Fbin%2Fpod%22%0Aoci_artifact_mount_support%20%3D%20false%0A%0A%5Bcrio.network%5D%0Anetwork_dir%20%3D%20%22%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F%22%0Aplugin_dirs%20%3D%20%5B%0A%20%20%20%20%22%2Fvar%2Flib%2Fcni%2Fbin%22%2C%0A%5D%0A%0A%5Bcrio.metrics%5D%0Aenable_metrics%20%3D%20true%0Ametrics_host%20%3D%20%22127.0.0.1%22%0Ametrics_port%20%3D%209537%0Ametrics_collectors%20%3D%20%5B%0A%20%20%22operations%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_total%22%0A%20%20%22operations_latency_microseconds_total%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_latency_seconds_total%22%0A%20%20%22operations_latency_microseconds%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_latency_seconds%22%0A%20%20%22operations_errors%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_errors_total%22%0A%20%20%22image_pulls_layer_size%22%2C%0A%20%20%22containers_oom_total%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22containers_oom_count_total%22%0A%20%20%22containers_oom%22%2C%0A%20%20%23%20Drop%20metrics%20with%20excessive%20label%20cardinality.%0A%20%20%23%20%22image_pulls_by_digest%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_bytes_total%22%0A%20%20%23%20%22image_pulls_by_name%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_bytes_total%22%0A%20%20%23%20%22image_pulls_by_name_skipped%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_skipped_bytes_total%22%0A%20%20%23%20%22image_pulls_failures%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_failure_total%22%0A%20%20%23%20%22image_pulls_successes%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_success_total%22%0A%20%20%23%20%22image_layer_reuse%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_layer_reuse_total%22%0A%20%20%22operations_total%22%2C%0A%20%20%22operations_latency_seconds_total%22%2C%0A%20%20%22operations_latency_seconds%22%2C%0A%20%20%22operations_errors_total%22%2C%0A%20%20%22image_pulls_bytes_total%22%2C%0A%20%20%22image_pulls_skipped_bytes_total%22%2C%0A%20%20%22image_pulls_success_total%22%2C%0A%20%20%22image_pulls_failure_total%22%2C%0A%20%20%22image_layer_reuse_total%22%2C%0A%20%20%22containers_oom_count_total%22%2C%0A%20%20%22processes_defunct%22%0A%5D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/crio/crio.conf.d/00-default" + }, + { + "contents": { + "source": "data:,%7B%0A%20%20%20%20%22default%22%3A%20%5B%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22type%22%3A%20%22insecureAcceptAnything%22%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%5D%2C%0A%20%20%20%20%22transports%22%3A%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22docker-daemon%22%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%22%22%3A%20%5B%7B%22type%22%3A%22insecureAcceptAnything%22%7D%5D%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%20%20%7D%0A%7D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/machine-config-daemon/policy-for-old-podman.json" + }, + { + "contents": { + "source": "data:,%7B%0A%20%20%20%20%22default%22%3A%20%5B%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22type%22%3A%20%22insecureAcceptAnything%22%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%5D%2C%0A%20%20%20%20%22transports%22%3A%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22docker-daemon%22%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%22%22%3A%20%5B%7B%22type%22%3A%22insecureAcceptAnything%22%7D%5D%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%20%20%7D%0A%7D" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/policy.json" + }, + { + "contents": { + "source": "data:,%7B%0A%09%22cloud%22%3A%20%22AzurePublicCloud%22%2C%0A%09%22tenantId%22%3A%20%226047c7e9-b2ad-488d-a54e-dc3f6be6a7ee%22%2C%0A%09%22aadClientId%22%3A%20%22%22%2C%0A%09%22aadClientSecret%22%3A%20%22%22%2C%0A%09%22aadClientCertPath%22%3A%20%22%22%2C%0A%09%22aadClientCertPassword%22%3A%20%22%22%2C%0A%09%22useManagedIdentityExtension%22%3A%20true%2C%0A%09%22userAssignedIdentityID%22%3A%20%22%22%2C%0A%09%22subscriptionId%22%3A%20%2272e3a972-58b0-4afc-bd4f-da89b39ccebd%22%2C%0A%09%22resourceGroup%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-rg%22%2C%0A%09%22location%22%3A%20%22centralus%22%2C%0A%09%22vnetName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-vnet%22%2C%0A%09%22vnetResourceGroup%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-rg%22%2C%0A%09%22subnetName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-worker-subnet%22%2C%0A%09%22securityGroupName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-nsg%22%2C%0A%09%22routeTableName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-node-routetable%22%2C%0A%09%22vmType%22%3A%20%22standard%22%2C%0A%09%22loadBalancerSku%22%3A%20%22standard%22%2C%0A%09%22cloudProviderBackoff%22%3A%20true%2C%0A%09%22useInstanceMetadata%22%3A%20true%2C%0A%09%22excludeMasterFromStandardLB%22%3A%20false%2C%0A%09%22cloudProviderBackoffDuration%22%3A%206%2C%0A%09%22putVMSSVMBatchSize%22%3A%200%2C%0A%09%22enableMigrateToIPBasedBackendPoolAPI%22%3A%20false%0A%7D" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/cloud.conf" + }, + { + "contents": { + "source": "data:,authorization%3A%0A%20%20static%3A%0A%20%20%20%20-%20resourceRequest%3A%20false%0A%20%20%20%20%20%20path%3A%20%2Fmetrics%0A%20%20%20%20%20%20verb%3A%20get%0A%20%20%20%20%20%20user%3A%0A%20%20%20%20%20%20%20%20name%3A%20system%3Aserviceaccount%3Aopenshift-monitoring%3Aprometheus-k8s" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/crio-metrics-proxy.cfg" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20v1%0Akind%3A%20Pod%0Ametadata%3A%0A%20%20name%3A%20kube-rbac-proxy-crio%0A%20%20namespace%3A%20openshift-machine-config-operator%0A%20%20annotations%3A%0A%20%20%20%20target.workload.openshift.io%2Fmanagement%3A%20'%7B%22effect%22%3A%20%22PreferredDuringScheduling%22%7D'%0A%20%20%20%20openshift.io%2Frequired-scc%3A%20privileged%0Aspec%3A%0A%20%20volumes%3A%0A%20%20-%20name%3A%20etc-kube%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%22%2Fetc%2Fkubernetes%22%0A%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%22%2Fvar%2Flib%2Fkubelet%22%0A%20%20hostNetwork%3A%20true%0A%20%20priorityClassName%3A%20system-cluster-critical%0A%20%20initContainers%3A%0A%20%20-%20name%3A%20setup%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20image%3A%20quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3A30453ce537781b695dda30554a9b0288d18e29690b1e1b14f405a1876728e8a0%0A%20%20%20%20imagePullPolicy%3A%20IfNotPresent%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fvar%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20command%3A%20%5B'%2Fbin%2Fbash'%2C%20'-ec'%5D%0A%20%20%20%20args%3A%0A%20%20%20%20-%20%7C%0A%20%20%20%20%20%20echo%20-n%20%22Waiting%20for%20kubelet%20key%20and%20certificate%20to%20be%20available%22%0A%20%20%20%20%20%20while%20%5B%20-n%20%22%24(test%20-e%20%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem)%22%20%5D%20%3B%20do%0A%20%20%20%20%20%20%20%20echo%20-n%20%22.%22%0A%20%20%20%20%20%20%20%20sleep%201%0A%20%20%20%20%20%20%20%20((%20tries%20%2B%3D%201%20))%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Btries%7D%22%20-gt%2010%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20echo%20%22Timed%20out%20waiting%20for%20kubelet%20key%20and%20cert.%22%0A%20%20%20%20%20%20%20%20%20%20exit%201%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20done%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20%20%20%20%20cpu%3A%205m%0A%20%20containers%3A%0A%20%20-%20name%3A%20kube-rbac-proxy-crio%0A%20%20%20%20image%3A%20quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3A30453ce537781b695dda30554a9b0288d18e29690b1e1b14f405a1876728e8a0%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20ports%3A%0A%20%20%20%20-%20containerPort%3A%209637%0A%20%20%20%20args%3A%0A%20%20%20%20-%20--secure-listen-address%3D%3A9637%0A%20%20%20%20-%20--config-file%3D%2Fetc%2Fkubernetes%2Fcrio-metrics-proxy.cfg%0A%20%20%20%20-%20--client-ca-file%3D%2Fetc%2Fkubernetes%2Fkubelet-ca.crt%0A%20%20%20%20-%20--logtostderr%3Dtrue%0A%20%20%20%20-%20--kubeconfig%3D%2Fvar%2Flib%2Fkubelet%2Fkubeconfig%0A%20%20%20%20-%20--tls-cipher-suites%3DTLS_AES_128_GCM_SHA256%2CTLS_AES_256_GCM_SHA384%2CTLS_CHACHA20_POLY1305_SHA256%2CTLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%2CTLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%2CTLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%2CTLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%2CTLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256%2CTLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256%20%0A%20%20%20%20-%20--tls-min-version%3DVersionTLS12%0A%20%20%20%20-%20--upstream%3Dhttp%3A%2F%2F127.0.0.1%3A9537%0A%20%20%20%20-%20--tls-cert-file%3D%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem%0A%20%20%20%20-%20--tls-private-key-file%3D%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20cpu%3A%2020m%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20name%3A%20etc-kube%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fetc%2Fkubernetes%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20%20%20readOnly%3A%20true%0A%20%20%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fvar%2Flib%2Fkubelet%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20%20%20readOnly%3A%20true" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/manifests/criometricsproxy.yaml" + }, + { + "contents": { + "source": "data:text/plain;charset=utf-8;base64,YXBpVmVyc2lvbjoga3ViZWxldC5jb25maWcuazhzLmlvL3YxYmV0YTEKYXV0aGVudGljYXRpb246CiAgYW5vbnltb3VzOgogICAgZW5hYmxlZDogZmFsc2UKICB3ZWJob29rOgogICAgY2FjaGVUVEw6IDBzCiAgeDUwOToKICAgIGNsaWVudENBRmlsZTogL2V0Yy9rdWJlcm5ldGVzL2t1YmVsZXQtY2EuY3J0CmF1dGhvcml6YXRpb246CiAgd2ViaG9vazoKICAgIGNhY2hlQXV0aG9yaXplZFRUTDogMHMKICAgIGNhY2hlVW5hdXRob3JpemVkVFRMOiAwcwpjZ3JvdXBEcml2ZXI6IHN5c3RlbWQKY2dyb3VwUm9vdDogLwpjbHVzdGVyRE5TOgotIDE3Mi4zMC4wLjEwCmNsdXN0ZXJEb21haW46IGNsdXN0ZXIubG9jYWwKY29udGFpbmVyTG9nTWF4U2l6ZTogNTBNaQpjb250YWluZXJSdW50aW1lRW5kcG9pbnQ6ICIiCmNwdU1hbmFnZXJSZWNvbmNpbGVQZXJpb2Q6IDBzCmNyYXNoTG9vcEJhY2tPZmY6IHt9CmVuYWJsZVN5c3RlbUxvZ1F1ZXJ5OiB0cnVlCmV2aWN0aW9uUHJlc3N1cmVUcmFuc2l0aW9uUGVyaW9kOiAwcwpmZWF0dXJlR2F0ZXM6CiAgQVdTQ2x1c3Rlckhvc3RlZEROUzogZmFsc2UKICBBV1NDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBV1NEZWRpY2F0ZWRIb3N0czogZmFsc2UKICBBV1NTZXJ2aWNlTEJOZXR3b3JrU2VjdXJpdHlHcm91cDogZmFsc2UKICBBZGRpdGlvbmFsUm91dGluZ0NhcGFiaWxpdGllczogdHJ1ZQogIEFkbWluTmV0d29ya1BvbGljeTogdHJ1ZQogIEFsaWJhYmFQbGF0Zm9ybTogdHJ1ZQogIEF1dG9tYXRlZEV0Y2RCYWNrdXA6IGZhbHNlCiAgQXp1cmVDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBenVyZURlZGljYXRlZEhvc3RzOiBmYWxzZQogIEF6dXJlTXVsdGlEaXNrOiBmYWxzZQogIEF6dXJlV29ya2xvYWRJZGVudGl0eTogdHJ1ZQogIEJvb3RJbWFnZVNrZXdFbmZvcmNlbWVudDogZmFsc2UKICBCb290Y05vZGVNYW5hZ2VtZW50OiBmYWxzZQogIEJ1aWxkQ1NJVm9sdW1lczogdHJ1ZQogIENQTVNNYWNoaW5lTmFtZVByZWZpeDogdHJ1ZQogIENodW5rU2l6ZU1pQjogdHJ1ZQogIENsdXN0ZXJBUElJbnN0YWxsOiBmYWxzZQogIENsdXN0ZXJBUElJbnN0YWxsSUJNQ2xvdWQ6IGZhbHNlCiAgQ2x1c3Rlck1vbml0b3JpbmdDb25maWc6IGZhbHNlCiAgQ2x1c3RlclZlcnNpb25PcGVyYXRvckNvbmZpZ3VyYXRpb246IGZhbHNlCiAgQ29uc29sZVBsdWdpbkNvbnRlbnRTZWN1cml0eVBvbGljeTogdHJ1ZQogIEROU05hbWVSZXNvbHZlcjogZmFsc2UKICBEdWFsUmVwbGljYTogZmFsc2UKICBEeWFubWljU2VydmljZUVuZHBvaW50SUJNQ2xvdWQ6IGZhbHNlCiAgRHluYW1pY1Jlc291cmNlQWxsb2NhdGlvbjogZmFsc2UKICBFdGNkQmFja2VuZFF1b3RhOiBmYWxzZQogIEV2ZW50ZWRQTEVHOiBmYWxzZQogIEV4YW1wbGU6IGZhbHNlCiAgRXhhbXBsZTI6IGZhbHNlCiAgRXh0ZXJuYWxPSURDOiBmYWxzZQogIEV4dGVybmFsT0lEQ1dpdGhVSURBbmRFeHRyYUNsYWltTWFwcGluZ3M6IGZhbHNlCiAgRXh0ZXJuYWxTbmFwc2hvdE1ldGFkYXRhOiBmYWxzZQogIEdDUENsdXN0ZXJIb3N0ZWRETlM6IGZhbHNlCiAgR0NQQ2x1c3Rlckhvc3RlZEROU0luc3RhbGw6IGZhbHNlCiAgR0NQQ3VzdG9tQVBJRW5kcG9pbnRzOiBmYWxzZQogIEdDUEN1c3RvbUFQSUVuZHBvaW50c0luc3RhbGw6IGZhbHNlCiAgR2F0ZXdheUFQSTogdHJ1ZQogIEdhdGV3YXlBUElDb250cm9sbGVyOiB0cnVlCiAgSGlnaGx5QXZhaWxhYmxlQXJiaXRlcjogdHJ1ZQogIEltYWdlTW9kZVN0YXR1c1JlcG9ydGluZzogZmFsc2UKICBJbWFnZVN0cmVhbUltcG9ydE1vZGU6IGZhbHNlCiAgSW1hZ2VWb2x1bWU6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJEeW5hbWljQ29uZmlndXJhdGlvbk1hbmFnZXI6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJMQlN1Ym5ldHNBV1M6IHRydWUKICBJbnNpZ2h0c0NvbmZpZzogZmFsc2UKICBJbnNpZ2h0c0NvbmZpZ0FQSTogZmFsc2UKICBJbnNpZ2h0c09uRGVtYW5kRGF0YUdhdGhlcjogZmFsc2UKICBJbnNpZ2h0c1J1bnRpbWVFeHRyYWN0b3I6IGZhbHNlCiAgSXJyZWNvbmNpbGFibGVNYWNoaW5lQ29uZmlnOiBmYWxzZQogIEtNU0VuY3J5cHRpb25Qcm92aWRlcjogZmFsc2UKICBLTVN2MTogdHJ1ZQogIE1hY2hpbmVBUElNaWdyYXRpb246IGZhbHNlCiAgTWFjaGluZUFQSU9wZXJhdG9yRGlzYWJsZU1hY2hpbmVIZWFsdGhDaGVja0NvbnRyb2xsZXI6IGZhbHNlCiAgTWFjaGluZUNvbmZpZ05vZGVzOiB0cnVlCiAgTWFuYWdlZEJvb3RJbWFnZXM6IHRydWUKICBNYW5hZ2VkQm9vdEltYWdlc0FXUzogdHJ1ZQogIE1hbmFnZWRCb290SW1hZ2VzQXp1cmU6IGZhbHNlCiAgTWFuYWdlZEJvb3RJbWFnZXN2U3BoZXJlOiBmYWxzZQogIE1heFVuYXZhaWxhYmxlU3RhdGVmdWxTZXQ6IGZhbHNlCiAgTWV0cmljc0NvbGxlY3Rpb25Qcm9maWxlczogdHJ1ZQogIE1pbmltdW1LdWJlbGV0VmVyc2lvbjogZmFsc2UKICBNaXhlZENQVXNBbGxvY2F0aW9uOiBmYWxzZQogIE11bHRpQXJjaEluc3RhbGxBenVyZTogZmFsc2UKICBNdWx0aURpc2tTZXR1cDogZmFsc2UKICBNdXRhdGluZ0FkbWlzc2lvblBvbGljeTogZmFsc2UKICBOZXR3b3JrRGlhZ25vc3RpY3NDb25maWc6IHRydWUKICBOZXR3b3JrTGl2ZU1pZ3JhdGlvbjogdHJ1ZQogIE5ldHdvcmtTZWdtZW50YXRpb246IHRydWUKICBOZXdPTE06IHRydWUKICBOZXdPTE1DYXRhbG9nZEFQSVYxTWV0YXM6IGZhbHNlCiAgTmV3T0xNT3duU2luZ2xlTmFtZXNwYWNlOiBmYWxzZQogIE5ld09MTVByZWZsaWdodFBlcm1pc3Npb25DaGVja3M6IGZhbHNlCiAgTmV3T0xNV2ViaG9va1Byb3ZpZGVyT3BlbnNoaWZ0U2VydmljZUNBOiBmYWxzZQogIE5vUmVnaXN0cnlDbHVzdGVyT3BlcmF0aW9uczogZmFsc2UKICBOb2RlU3dhcDogZmFsc2UKICBOdXRhbml4TXVsdGlTdWJuZXRzOiBmYWxzZQogIE9WTk9ic2VydmFiaWxpdHk6IGZhbHNlCiAgT3BlblNoaWZ0UG9kU2VjdXJpdHlBZG1pc3Npb246IHRydWUKICBQaW5uZWRJbWFnZXM6IHRydWUKICBQcmVjb25maWd1cmVkVUROQWRkcmVzc2VzOiBmYWxzZQogIFByb2NNb3VudFR5cGU6IHRydWUKICBSb3V0ZUFkdmVydGlzZW1lbnRzOiB0cnVlCiAgUm91dGVFeHRlcm5hbENlcnRpZmljYXRlOiB0cnVlCiAgU0VMaW51eE1vdW50OiBmYWxzZQogIFNlcnZpY2VBY2NvdW50VG9rZW5Ob2RlQmluZGluZzogdHJ1ZQogIFNldEVJUEZvck5MQkluZ3Jlc3NDb250cm9sbGVyOiB0cnVlCiAgU2hvcnRDZXJ0Um90YXRpb246IGZhbHNlCiAgU2lnbmF0dXJlU3RvcmVzOiBmYWxzZQogIFNpZ3N0b3JlSW1hZ2VWZXJpZmljYXRpb246IGZhbHNlCiAgU2lnc3RvcmVJbWFnZVZlcmlmaWNhdGlvblBLSTogZmFsc2UKICBTdG9yYWdlUGVyZm9ybWFudFNlY3VyaXR5UG9saWN5OiBmYWxzZQogIFRyYW5zbGF0ZVN0cmVhbUNsb3NlV2Vic29ja2V0UmVxdWVzdHM6IGZhbHNlCiAgVXBncmFkZVN0YXR1czogZmFsc2UKICBVc2VyTmFtZXNwYWNlc1BvZFNlY3VyaXR5U3RhbmRhcmRzOiB0cnVlCiAgVXNlck5hbWVzcGFjZXNTdXBwb3J0OiB0cnVlCiAgVlNwaGVyZUNvbmZpZ3VyYWJsZU1heEFsbG93ZWRCbG9ja1ZvbHVtZXNQZXJOb2RlOiBmYWxzZQogIFZTcGhlcmVIb3N0Vk1Hcm91cFpvbmFsOiBmYWxzZQogIFZTcGhlcmVNaXhlZE5vZGVFbnY6IGZhbHNlCiAgVlNwaGVyZU11bHRpRGlzazogdHJ1ZQogIFZTcGhlcmVNdWx0aU5ldHdvcmtzOiB0cnVlCiAgVm9sdW1lQXR0cmlidXRlc0NsYXNzOiBmYWxzZQogIFZvbHVtZUdyb3VwU25hcHNob3Q6IGZhbHNlCmZpbGVDaGVja0ZyZXF1ZW5jeTogMHMKaHR0cENoZWNrRnJlcXVlbmN5OiAwcwppbWFnZU1heGltdW1HQ0FnZTogMHMKaW1hZ2VNaW5pbXVtR0NBZ2U6IDBzCmtpbmQ6IEt1YmVsZXRDb25maWd1cmF0aW9uCmt1YmVBUElCdXJzdDogMTAwCmt1YmVBUElRUFM6IDUwCmxvZ2dpbmc6CiAgZmx1c2hGcmVxdWVuY3k6IDAKICBvcHRpb25zOgogICAganNvbjoKICAgICAgaW5mb0J1ZmZlclNpemU6ICIwIgogICAgdGV4dDoKICAgICAgaW5mb0J1ZmZlclNpemU6ICIwIgogIHZlcmJvc2l0eTogMAptYXhQb2RzOiAyNTAKbWVtb3J5U3dhcDoge30Kbm9kZVN0YXR1c1JlcG9ydEZyZXF1ZW5jeTogNW0wcwpub2RlU3RhdHVzVXBkYXRlRnJlcXVlbmN5OiAxMHMKcG9kUGlkc0xpbWl0OiA0MDk2CnByb3RlY3RLZXJuZWxEZWZhdWx0czogdHJ1ZQpyb3RhdGVDZXJ0aWZpY2F0ZXM6IHRydWUKcnVudGltZVJlcXVlc3RUaW1lb3V0OiAwcwpzZXJpYWxpemVJbWFnZVB1bGxzOiBmYWxzZQpzZXJ2ZXJUTFNCb290c3RyYXA6IHRydWUKc2h1dGRvd25HcmFjZVBlcmlvZDogMHMKc2h1dGRvd25HcmFjZVBlcmlvZENyaXRpY2FsUG9kczogMHMKc3RhdGljUG9kUGF0aDogL2V0Yy9rdWJlcm5ldGVzL21hbmlmZXN0cwpzdHJlYW1pbmdDb25uZWN0aW9uSWRsZVRpbWVvdXQ6IDBzCnN5bmNGcmVxdWVuY3k6IDBzCnN5c3RlbUNncm91cHM6IC9zeXN0ZW0uc2xpY2UKdGxzQ2lwaGVyU3VpdGVzOgotIFRMU19BRVNfMTI4X0dDTV9TSEEyNTYKLSBUTFNfQUVTXzI1Nl9HQ01fU0hBMzg0Ci0gVExTX0NIQUNIQTIwX1BPTFkxMzA1X1NIQTI1NgotIFRMU19FQ0RIRV9FQ0RTQV9XSVRIX0FFU18xMjhfR0NNX1NIQTI1NgotIFRMU19FQ0RIRV9SU0FfV0lUSF9BRVNfMTI4X0dDTV9TSEEyNTYKLSBUTFNfRUNESEVfRUNEU0FfV0lUSF9BRVNfMjU2X0dDTV9TSEEzODQKLSBUTFNfRUNESEVfUlNBX1dJVEhfQUVTXzI1Nl9HQ01fU0hBMzg0Ci0gVExTX0VDREhFX0VDRFNBX1dJVEhfQ0hBQ0hBMjBfUE9MWTEzMDVfU0hBMjU2Ci0gVExTX0VDREhFX1JTQV9XSVRIX0NIQUNIQTIwX1BPTFkxMzA1X1NIQTI1Ngp0bHNNaW5WZXJzaW9uOiBWZXJzaW9uVExTMTIKdm9sdW1lU3RhdHNBZ2dQZXJpb2Q6IDBzCg==" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/kubelet.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fsh%0Aif%20%5B%20-x%20%2Fusr%2Fbin%2Fkubensenter%20%5D%3B%20then%0A%20%20exec%20%2Fusr%2Fbin%2Fkubensenter%20%22%24%40%22%0Aelse%0A%20%20exec%20%22%24%40%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/kubenswrapper" + } + ], + "filesystems": [ + { + "device": "/dev/disk/by-partlabel/var", + "format": "xfs", + "mountOptions": [ + "defaults", + "prjquota" + ], + "path": "/var" + } + ] + }, + "systemd": { + "units": [ + { + "contents": "[Unit]\nDescription=Cleans NetworkManager state generated by dracut\n# Removal of this file signals firstboot completion\nConditionPathExists=!/etc/ignition-machine-config-encapsulated.json\n# This is opt-in for some deployment types, and opt-out for others.\nConditionPathExists=/var/lib/mco/nm-clean-initrd-state\nWants=network-pre.target\nBefore=network-pre.target\n\n[Service]\nType=oneshot\n# Remove any existing state possibly generated NM run by dracut. We want NM to\n# consider all profiles autoconnect priority when it starts instead of\n# remembering which profile was a device activated with when NM is run by\n# dracut.\nExecStart=/usr/local/bin/nm-clean-initrd-state.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "NetworkManager-clean-initrd-state.service" + }, + { + "dropins": [ + { + "contents": "# vim:set ft=systemd :\n#\n# This drop-in will enable any service built with this\n# github.com/containers/kubemntns library to properly join the mount namespace\n# managed by kubens.service\n#\n\n[Unit]\nAfter=kubens.service\n\n[Service]\nEnvironmentFile=-/run/kubens/env\n", + "name": "01-kubens.conf" + }, + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "[Service]\nEnvironment=\"ENABLE_PROFILE_UNIX_SOCKET=true\"\n", + "name": "10-mco-profile-unix-socket.conf" + }, + { + "contents": "[Unit]\nAfter=kubelet-dependencies.target\nRequires=kubelet-dependencies.target\n", + "name": "05-mco-ordering.conf" + }, + { + "contents": "[Service]\nEnvironment=\"GODEBUG=x509ignoreCN=0,madvdontneed=1\"\n", + "name": "10-mco-default-madv.conf" + } + ], + "name": "crio.service" + }, + { + "dropins": [ + { + "contents": "[Unit]\nConditionPathExists=/enoent\n", + "name": "mco-disabled.conf" + } + ], + "name": "docker.socket" + }, + { + "contents": "[Unit]\nDescription=The firstboot OS update has completed\nDocumentation=https://github.com/openshift/machine-config-operator/\nRequires=basic.target\n\n[Install]\nWantedBy=default.target\n", + "enabled": true, + "name": "firstboot-osupdate.target" + }, + { + "dropins": [ + { + "contents": "[Unit]\nAfter=ovs-configuration.service\nBefore=crio.service\n", + "name": "01-after-configure-ovs.conf" + } + ], + "name": "ipsec.service" + }, + { + "contents": "[Unit]\nDescription=Dynamically sets the system reserved for the kubelet\nWants=network-online.target\nAfter=network-online.target firstboot-osupdate.target\nBefore=kubelet-dependencies.target\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\nRemainAfterExit=yes\nEnvironmentFile=/etc/node-sizing-enabled.env\nExecStart=/bin/bash /usr/local/sbin/dynamic-system-reserved-calc.sh ${NODE_SIZING_ENABLED} ${SYSTEM_RESERVED_MEMORY} ${SYSTEM_RESERVED_CPU} ${SYSTEM_RESERVED_ES}\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "kubelet-auto-node-size.service" + }, + { + "contents": "[Unit]\nDescription=Dependencies necessary to run kubelet\nDocumentation=https://github.com/openshift/machine-config-operator/\nRequires=basic.target network-online.target\nWants=NetworkManager-wait-online.service crio-wipe.service\nWants=rpc-statd.service chrony-wait.service\n", + "name": "kubelet-dependencies.target" + }, + { + "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=crio.service kubelet-dependencies.target\nAfter=kubelet-dependencies.target\nAfter=ostree-finalize-staged.service\n\n[Service]\nType=notify\nExecStartPre=/bin/mkdir --parents /etc/kubernetes/manifests\nExecStartPre=-/usr/sbin/restorecon -ri /var/lib/kubelet/pod-resources /usr/local/bin/kubenswrapper /usr/bin/kubensenter\nEnvironment=\"KUBELET_NODE_IP=0.0.0.0\"\nEnvironmentFile=/etc/os-release\nEnvironmentFile=-/etc/kubernetes/kubelet-workaround\nEnvironmentFile=-/etc/kubernetes/kubelet-env\nEnvironmentFile=/etc/node-sizing.env\n\nExecStart=/usr/local/bin/kubenswrapper \\\n /usr/bin/kubelet \\\n --config=/etc/kubernetes/kubelet.conf \\\n --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \\\n --kubeconfig=/var/lib/kubelet/kubeconfig \\\n --container-runtime-endpoint=/var/run/crio/crio.sock \\\n --runtime-cgroups=/system.slice/crio.service \\\n --node-labels=node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master,node.openshift.io/os_id=${ID},${CUSTOM_KUBELET_LABELS} \\\n --node-ip=${KUBELET_NODE_IP} \\\n --minimum-container-ttl-duration=6m0s \\\n --cloud-provider=external \\\n --volume-plugin-dir=/etc/kubernetes/kubelet-plugins/volume/exec \\\n --image-credential-provider-bin-dir=/usr/libexec/kubelet-image-credential-provider-plugins --image-credential-provider-config=/etc/kubernetes/credential-providers/acr-credential-provider.yaml \\\n --hostname-override=${KUBELET_NODE_NAME} \\\n --provider-id=${KUBELET_PROVIDERID} \\\n --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \\\n --pod-infra-container-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bad2ea56c7fcc489b9e47c86ec3dd0024994026d409073d5f77b64f1793a15 \\\n --system-reserved=cpu=${SYSTEM_RESERVED_CPU},memory=${SYSTEM_RESERVED_MEMORY},ephemeral-storage=${SYSTEM_RESERVED_ES} \\\n --v=${KUBELET_LOG_LEVEL}\n\nRestart=always\nRestartSec=10\n\n[Install]\nWantedBy=multi-user.target\n", + "dropins": [ + { + "contents": "# vim:set ft=systemd :\n#\n# This drop-in will enable any service built with this\n# github.com/containers/kubemntns library to properly join the mount namespace\n# managed by kubens.service\n#\n\n[Unit]\nAfter=kubens.service\n\n[Service]\nEnvironmentFile=-/run/kubens/env\n", + "name": "01-kubens.conf" + }, + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "[Service]\nEnvironment=\"GODEBUG=x509ignoreCN=0,madvdontneed=1\"\n", + "name": "10-mco-default-madv.conf" + } + ], + "enabled": true, + "name": "kubelet.service" + }, + { + "contents": "[Unit]\nDescription=Manages a mount namespace for kubernetes-specific mounts\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nRuntimeDirectory=kubens\nEnvironment=RUNTIME_DIRECTORY=%t/kubens\nEnvironment=BIND_POINT=%t/kubens/mnt\nEnvironment=ENVFILE=%t/kubens/env\n\n# Set up the runtime directory as an unbindable mountpoint\nExecStartPre=bash -c \"findmnt ${RUNTIME_DIRECTORY} || mount --make-unbindable --bind ${RUNTIME_DIRECTORY} ${RUNTIME_DIRECTORY}\"\n# Ensure the bind point exists\nExecStartPre=touch ${BIND_POINT}\n# Use 'unshare' to create the new mountpoint, then 'mount --make-rshared' so it cascades internally\nExecStart=unshare --mount=${BIND_POINT} --propagation slave mount --make-rshared /\n# Finally, set an env pointer for ease-of-use\nExecStartPost=bash -c 'echo \"KUBENSMNT=${BIND_POINT}\" \u003e \"${ENVFILE}\"'\n\n# On stop, a recursive unmount cleans up the namespace and bind-mounted unbindable parent directory\nExecStop=umount -R ${RUNTIME_DIRECTORY}\n\n[Install]\nWantedBy=multi-user.target\n", + "enabled": false, + "name": "kubens.service" + }, + { + "contents": "[Unit]\nDescription=Machine Config Daemon Firstboot\n# Make sure it runs only on OSTree booted system\nConditionPathExists=/run/ostree-booted\n# Removal of this file signals firstboot completion\nConditionPathExists=/etc/ignition-machine-config-encapsulated.json\nAfter=machine-config-daemon-pull.service\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\n# Disable existing repos (if any) so that OS extensions would use embedded RPMs only\nExecStartPre=-/usr/bin/sh -c \"sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/*.repo\"\n# Run this via podman because we want to use the nmstatectl binary in our container\nExecStart=/usr/bin/podman run --rm --privileged --net=host -v /:/rootfs --entrypoint machine-config-daemon 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84' firstboot-complete-machineconfig --persist-nics\nExecStart=/usr/bin/podman run --rm --privileged --pid=host --net=host -v /:/rootfs --entrypoint machine-config-daemon 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84' firstboot-complete-machineconfig\n[Install]\nRequiredBy=firstboot-osupdate.target\n", + "enabled": true, + "name": "machine-config-daemon-firstboot.service" + }, + { + "contents": "[Unit]\nDescription=Machine Config Daemon Pull\n# Make sure it runs only on OSTree booted system\nConditionPathExists=/run/ostree-booted\n# This \"stamp file\" is unlinked when we complete\n# machine-config-daemon-firstboot.service\nConditionPathExists=/etc/ignition-machine-config-encapsulated.json\n# Run after crio-wipe so the pulled MCD image is protected against a corrupted storage from a forced shutdown\nWants=crio-wipe.service NetworkManager-wait-online.service\nAfter=crio-wipe.service NetworkManager-wait-online.service network.service\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nExecStartPre=/etc/machine-config-daemon/generate_podman_policy_args.sh\nExecStart=/bin/sh -c \"while ! /usr/bin/podman pull $(cat /tmp/podman_policy_args) --authfile=/var/lib/kubelet/config.json 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84'; do sleep 1; done\"\n\n[Install]\nRequiredBy=machine-config-daemon-firstboot.service\n", + "enabled": true, + "name": "machine-config-daemon-pull.service" + }, + { + "contents": "[Unit]\nDescription=Applies per-node NMState network configuration\nRequires=openvswitch.service\nWants=NetworkManager-wait-online.service\nAfter=NetworkManager-wait-online.service openvswitch.service network.service nodeip-configuration.service\nBefore=nmstate.service kubelet-dependencies.target ovs-configuration.service node-valid-hostname.service\n\n[Service]\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/usr/local/bin/nmstate-configuration.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "nmstate-configuration.service" + }, + { + "contents": "[Unit]\nDescription=Wait for a non-localhost hostname\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nUser=root\nExecStart=/usr/local/bin/mco-hostname --wait\n\n# Wait up to 5min for the node to get a non-localhost name\nTimeoutSec=300\n\n[Install]\n# TODO: Change this to RequiredBy after we fix https://github.com/openshift/machine-config-operator/pull/3865#issuecomment-1746963115\nWantedBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "node-valid-hostname.service" + }, + { + "contents": "[Unit]\nDescription=Writes IP address configuration so that kubelet and crio services select a valid node IP\nWants=NetworkManager-wait-online.service\nAfter=NetworkManager-wait-online.service firstboot-osupdate.target\nBefore=kubelet-dependencies.target ovs-configuration.service\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/bin/bash -c \" \\\n until \\\n /usr/bin/podman run --rm \\\n --authfile /var/lib/kubelet/config.json \\\n --env 'ENABLE_NODEIP_DEBUG=true' \\\n --net=host \\\n --security-opt label=disable \\\n --volume /etc/systemd/system:/etc/systemd/system \\\n --volume /run/nodeip-configuration:/run/nodeip-configuration \\\n quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cb620f8264c66301b054ed08c776fffefe36fe0054e4a9fbda2fa9b748d8e2e3 \\\n node-ip \\\n set \\\n --retry-on-failure \\\n --network-type OVNKubernetes \\\n ${NODEIP_HINT:-${KUBELET_NODEIP_HINT:-}}; \\\n do \\\n sleep 5; \\\n done\"\nExecStart=/bin/systemctl daemon-reload\nExecStartPre=/bin/mkdir -p /run/nodeip-configuration\nStandardOutput=journal+console\nStandardError=journal+console\n\nEnvironmentFile=-/etc/default/nodeip-configuration\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": false, + "name": "nodeip-configuration.service" + }, + { + "contents": "[Unit]\nDescription=Watch for downfile changes\nBefore=kubelet-dependencies.target\n\n[Path]\nPathChanged=/run/cloud-routes/\nMakeDirectory=true\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "openshift-azure-routes.path" + }, + { + "contents": "[Unit]\nDescription=Work around Azure load balancer hairpin\n# We don't need to do this on the firstboot\nAfter=firstboot-osupdate.target\n\n[Service]\nType=simple\nExecStart=/bin/bash /opt/libexec/openshift-azure-routes.sh start\nUser=root\nSyslogIdentifier=openshift-azure-routes\n", + "enabled": false, + "name": "openshift-azure-routes.service" + }, + { + "enabled": true, + "name": "openvswitch.service" + }, + { + "contents": "[Unit]\n# Kdump will generate it's initramfs based on the running state when kdump.service run\n# If OVS has already run, the kdump fails to gather a working network config,\n# which prevent network log exports, sush as SSH.\n# See https://issues.redhat.com/browse/OCPBUGS-28239\nAfter=kdump.service\nDescription=Configures OVS with proper host networking configuration\n# This service is used to move a physical NIC into OVS and reconfigure OVS to use the host IP\nRequires=openvswitch.service\nWants=NetworkManager-wait-online.service\nAfter=firstboot-osupdate.target\nAfter=NetworkManager-wait-online.service openvswitch.service network.service nodeip-configuration.service nmstate.service\nBefore=kubelet-dependencies.target node-valid-hostname.service dnsmasq.service\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\nExecStart=/usr/local/bin/configure-ovs.sh OVNKubernetes\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "ovs-configuration.service" + }, + { + "dropins": [ + { + "contents": "[Service]\nRestart=always\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /var/lib/openvswitch'\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /etc/openvswitch'\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /run/openvswitch'\nExecStartPost=-/usr/bin/ovs-appctl vlog/set syslog:info\nExecReload=-/usr/bin/ovs-appctl vlog/set syslog:info\n", + "name": "10-ovs-vswitchd-restart.conf" + } + ], + "name": "ovs-vswitchd.service" + }, + { + "dropins": [ + { + "contents": "[Service]\nRestart=always\n", + "name": "10-ovsdb-restart.conf" + } + ], + "enabled": true, + "name": "ovsdb-server.service" + }, + { + "dropins": [ + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "# See https://github.com/openshift/machine-config-operator/issues/1897\n[Service]\nNice=10\nIOSchedulingClass=best-effort\nIOSchedulingPriority=6\n", + "name": "mco-controlplane-nice.conf" + } + ], + "name": "rpm-ostreed.service" + }, + { + "contents": "[Unit]\nDescription=Ensure IKE SA established for existing IPsec connections.\nAfter=ipsec.service\nBefore=kubelet-dependencies.target node-valid-hostname.service\n\n[Service]\nType=oneshot\nExecStart=/usr/local/bin/ipsec-connect-wait.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=ipsec.service\n", + "enabled": true, + "name": "wait-for-ipsec-connect.service" + }, + { + "contents": "[Unit]\nDescription=Ensure primary IP is assigned and usable\nRequires=nmstate.service\nAfter=nmstate.service\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/bin/bash -c \" \\\n until \\\n /usr/local/bin/wait-for-primary-ip.sh; \\\n do \\\n sleep 10; \\\n done\"\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "wait-for-primary-ip.service" + }, + { + "dropins": [ + { + "contents": "[Unit]\nConditionPathExists=/enoent\n", + "name": "mco-disabled.conf" + } + ], + "name": "zincati.service" + }, + { + "contents": "[Unit]\nDescription=Kubernetes Kubelet After Reboot Cleanup\nBefore=kubelet.service\n\n[Service]\nType=oneshot\nExecStart=/bin/rm -f /var/lib/kubelet/cpu_manager_state\nExecStart=/bin/rm -f /var/lib/kubelet/memory_manager_state\nExecStart=-/bin/rm -f /var/lib/kubelet/dra_manager_state\n\n[Install]\nWantedBy=multi-user.target\n", + "enabled": true, + "name": "kubelet-cleanup.service" + }, + { + "contents": "\n[Unit]\nRequires=systemd-fsck@dev-disk-by\\x2dpartlabel-var.service\nAfter=systemd-fsck@dev-disk-by\\x2dpartlabel-var.service\n\n[Mount]\nWhere=/var\nWhat=/dev/disk/by-partlabel/var\nType=xfs\nOptions=defaults,prjquota\n\n[Install]\nRequiredBy=local-fs.target\n", + "enabled": true, + "name": "var.mount" + } + ] + } + }, + "extensions": [], + "fips": false, + "kernelArguments": [ + "systemd.unified_cgroup_hierarchy=1", + "cgroup_no_v1=\"all\"", + "psi=0" + ], + "kernelType": "default", + "osImageURL": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ec4b5c12b787640403135c65517882f80c80fd0c741dfeb70e9c716c5ca2edeb" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "03a3677b298e05ba870cdf0f26f4db16f80e64ae", + "machineconfiguration.openshift.io/release-image-version": "4.20.0-0.nightly-2025-08-19-180353" + }, + "creationTimestamp": "2025-08-19T20:57:30Z", + "generation": 1, + "name": "rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "ownerReferences": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "MachineConfigPool", + "name": "worker", + "uid": "19c9702b-1ecf-4c47-ab1e-0bf44aeb0869" + } + ], + "resourceVersion": "76441", + "uid": "48b80ce0-3009-4553-ad7e-34383e24e58b" + }, + "spec": { + "baseOSExtensionsContainerImage": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0", + "config": { + "ignition": { + "version": "3.5.0" + }, + "passwd": { + "users": [ + { + "name": "core", + "sshAuthorizedKeys": [ + "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n" + ] + } + ] + }, + "storage": { + "files": [ + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-ex%20-o%20pipefail%0A%0ANM_DEVICES_DIR%3D%2Frun%2FNetworkManager%2Fdevices%0ANM_RUN_CONN_DIR%3D%2Frun%2FNetworkManager%2Fsystem-connections%0ANM_ETC_CONN_DIR%3D%2Fetc%2FNetworkManager%2Fsystem-connections%0A%0Alogger%20-t%20nm-clean-initrd-state%20%22Cleaning%20network%20activation%20state%20generated%20by%20dracut...%22%0Alogger%20-t%20nm-clean-initrd-state%20%22To%20disable%2C%20remove%20%2Fvar%2Flib%2Fmco%2Fnm-clean-initrd-state%22%0A%0Aif%20%5B%20!%20-e%20%22%24NM_DEVICES_DIR%22%20%5D%20%7C%7C%20%5B%20!%20-e%20%22%24NM_RUN_CONN_DIR%22%20%5D%20%7C%7C%20%5B%20!%20-e%20%22%24NM_ETC_CONN_DIR%22%20%5D%3B%20then%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22There%20is%20no%20network%20activation%20state%20to%20clean%22%0A%20%20exit%0Afi%0A%0A%23%20Some%20deployments%20require%20an%20active%20network%20early%20in%20the%20boot%20process.%20To%0A%23%20achieve%20this%2C%20dracut%20runs%20specific%20NetworkManager%20modules.%20This%20results%20in%0A%23%20NetworkManager%20keyfiles%20being%20generated%20(either%20default%20or%20from%20ip%20kernel%0A%23%20arguments)%20and%20activated.%20This%20activation%20generates%20state%20that%20makes%20those%0A%23%20profiles%20to%20be%20re-activated%20by%20the%20NetworkManager%20service%20later%20in%20the%0A%23%20boot%20process.%20And%20this%20has%20the%20effect%20that%20other%20profiles%20deployed%20by%20the%0A%23%20user%20for%20the%20same%20set%20of%20devices%20are%20ignored.%0A%0A%23%20Most%20of%20the%20time%20this%20is%20the%20desired%20behavior.%20The%20exception%20to%20this%20rule%0A%23%20is%20when%20the%20user%20wants%20to%20run%20the%20boot%20process%20with%20a%20different%20network%0A%23%20setup%20than%20the%20real%20root%20which%20is%20usually%20related%20to%20the%20fact%20that%0A%23%20generating%20images%20with%20customized%20kernel%20arguments%20is%20a%20complication%20in%0A%23%20the%20deployment%20pipeline.%0A%0A%23%20This%20need%20has%20been%20exacerbated%20by%20past%20NetworkManager%20bugs%20that%20activated%0A%23%20the%20network%20on%20boot%20when%20it%20was%20not%20really%20needed.%20Most%20notably%20when%20ip%0A%23%20kernel%20argument%20is%20present%2C%20something%20that%20the%20baremetal%20installer%20adds%20by%0A%23%20default.%0A%0A%23%20The%20intention%20here%20is%20to%20remove%20the%20state%20that%20was%20generated%20with%20the%0A%23%20activation%20of%20those%20profiles%20during%20dracut%20execution.%20Then%20when%0A%23%20NetworkManager%20service%20runs%2C%20the%20profiles%20generated%20by%20dracut%2C%20along%20with%0A%23%20other%20profiles%20configured%20by%20the%20user%2C%20are%20evaluated%20towards%20finding%20the%0A%23%20most%20appropriate%20profile%20to%20connect%20a%20device%20with.%20As%20a%20precaution%2C%20clean%0A%23%20state%20only%20for%20devices%20that%3A%0A%23%20-%20have%20been%20activated%20with%20a%20default%20profile%20(assume%20that%20a%20non-default%0A%23%20%20%20configuration%20expresses%20intention%20by%20user%20to%20run%20with%20it%20permanently)%0A%23%20-%20have%20a%20specific%20configured%20profile%20set%20to%20auto-connect%20(if%20there%20is%20no%0A%23%20%20%20alternate%20configured%20profile%20for%20a%20device%20it%20makes%20no%20sense%20to%0A%23%20%20%20de-activate%20anything)%0A%23%0A%23%20Although%20this%20can%20theoretically%20happen%20on%20any%20deployment%20type%2C%20need%20has%0A%23%20mostly%20come%20from%20IPI%20bare%20metal%20deployments.%20For%20the%20time%20being%2C%20this%0A%23%20should%20be%20opt-in%20in%20any%20other%20deploment%20type.%0A%23%0A%23%20There%20is%20an%20RFE%20filed%20against%20NM%20that%20once%20implemented%20would%20make%20this%0A%23%20script%20unnecessary%3A%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D2089707%0A%0Afor%20device%20in%20%22%24%7BNM_DEVICES_DIR%7D%22%2F*%3B%20do%0A%20%20if%20%5B%20!%20-e%20%22%24device%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20the%20device%20file%20name%20is%20the%20ifindex%0A%20%20ifindex%3D%24(basename%20%22%24device%22)%0A%20%20%0A%20%20%23%20get%20the%20interface%20name%20by%20ifindex%0A%20%20ifname%3D%24(ip%20-j%20link%20show%20%7C%20jq%20-r%20%22.%5B%5D%20%7C%20select(.ifindex%20%3D%3D%20%24%7Bifindex%7D)%20%7C%20.ifname%20%2F%2F%20empty%22)%0A%0A%20%20%23%20no%20interface%20name%20found%2C%20ignore%0A%20%20if%20%5B%20-z%20%22%24ifname%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20get%20the%20uuid%20of%20the%20profile%20the%20device%20has%20been%20activated%20with%0A%20%20active_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bdevice%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Econnection-uuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24device%22)%0A%0A%20%20%23%20the%20device%20was%20not%20activated%20with%20any%20profile%2C%20ignore%0A%20%20if%20%5B%20-z%20%22%24active_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20find%20the%20generated%20profile%20by%20uuid%0A%20%20for%20profile%20in%20%22%24%7BNM_RUN_CONN_DIR%7D%22%2F*%3B%20do%0A%20%20%20%20generated_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Euuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20if%20%5B%20%22%24active_profile_uuid%22%20%3D%20%22%24generated_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20generated%20profile%20not%20found%2C%20ignore%0A%20%20if%20%5B%20%22%24active_profile_uuid%22%20!%3D%20%22%24generated_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20check%20that%20it%20is%20not%20specific%20for%20the%20device%2C%20otherwise%20ignore%0A%20%20profile_ifname%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Einterface-name%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20if%20%5B%20-n%20%22%24profile_ifname%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20profile%20not%20generated%20by%20nm-initrd-generator%2C%20ignore%0A%20%20%23%20only%20check%20it%20if%20the%20key%20is%20set%20(from%20NM%201.32.4)%0A%20%20origin%3D%24(sed%20-nr%20'%2F%5E%5C%5Buser%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Eorg.freedesktop.NetworkManager.origin%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20if%20%5B%20-n%20%22%24origin%22%20%5D%20%26%26%20%5B%20%22%24origin%22%20!%3D%20%22nm-initrd-generator%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20find%20the%20configured%20profile%20by%20name%20with%20auto-connect%20set%0A%20%20for%20profile%20in%20%22%24%7BNM_ETC_CONN_DIR%7D%22%2F*%3B%20do%0A%20%20%20%20profile_ifname%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Einterface-name%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20autoconnect%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Eautoconnect%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20if%20%5B%20%22%24profile_ifname%22%20%3D%20%22%24ifname%22%20%5D%20%26%26%20%5B%20%22%24autoconnect%22%20!%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20configured%20profile%20not%20found%2C%20ignore%0A%20%20if%20%5B%20%22%24profile_ifname%22%20!%3D%20%22%24ifname%22%20%5D%20%7C%7C%20%5B%20%22%24autoconnect%22%20%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20configured_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Euuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%20activated%20with%20default%20generated%20profile%20%24generated_profile_uuid%22%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%20has%20different%20configured%20specific%20profile%20%24configured_profile_uuid%22%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%3A%20removing%20state...%22%0A%20%20%0A%20%20%23%20NM%20can%20still%20generate%20internal%20profiles%20from%20the%20IP%20address%0A%20%20%23%20configuration%20of%20devices%2C%20so%20flush%20addresses%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22Flushing%20IP%20addresses%20from%20%24ifname%22%0A%20%20ip%20addr%20flush%20%22%24ifname%22%0A%20%20ip%20-6%20addr%20flush%20%22%24ifname%22%0A%0A%20%20%23%20remove%20device%20state%20file%20to%20prevent%20NM%20to%20unilaterally%20connect%20with%20the%0A%20%20%23%20latest%20activated%20profile%20without%20evaluating%20other%20profiles%20%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22Removing%20%24device%22%0A%20%20rm%20-f%20--%20%22%24device%22%0A%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/nm-clean-initrd-state.sh" + }, + { + "contents": { + "source": "data:,%5Bconnection%5D%0Aipv6.dhcp-duid%3Dll%0Aipv6.dhcp-iaid%3Dmac%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/01-ipv6.conf" + }, + { + "contents": { + "source": "data:,%5Bmain%5D%0Aplugins%3Dkeyfile%2Cifcfg-rh%0A%5Bkeyfile%5D%0Apath%3D%2Fetc%2FNetworkManager%2Fsystem-connections%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/20-keyfiles.conf" + }, + { + "contents": { + "source": "data:," + }, + "mode": 384, + "overwrite": true, + "path": "/etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt" + }, + { + "contents": { + "source": "data:,KUBERNETES_SERVICE_HOST%3D'api-int.ci-op-pw3ghqzh-bb5c4.ci2.azure.devcluster.openshift.com'%0AKUBERNETES_SERVICE_PORT%3D'6443'%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/apiserver-url.env" + }, + { + "contents": { + "source": "data:,%23%20This%20file%20is%20managed%20by%20machine-config-operator.%0A%23%20Suppress%20audit%20rules%20which%20always%20trigger%20for%20container%0A%23%20workloads%2C%20as%20they%20spam%20the%20audit%20log.%20%20Workloads%20are%20expected%0A%23%20to%20be%20dynamic%2C%20and%20the%20networking%20stack%20uses%20iptables.%0A-a%20exclude%2Calways%20-F%20msgtype%3DNETFILTER_CFG%0A%23%20The%20default%20bridged%20networking%20enables%20promiscuous%20on%20the%20veth%0A%23%20device.%20%20Ideally%2C%20we'd%20teach%20audit%20to%20ignore%20only%20veth%20devices%2C%0A%23%20since%20one%20might%20legitimately%20care%20about%20promiscuous%20on%20real%20physical%0A%23%20devices.%20%20But%20we%20can't%20currently%20differentiate.%0A-a%20exclude%2Calways%20-F%20msgtype%3DANOM_PROMISCUOUS%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/audit/rules.d/mco-audit-quiet-containers.rules" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20ESP%20offload%2C%20either%20in%20actual%20hardware%20or%20as%20part%20as%20GRO%20(generic%0A%23%20recieve%20offload)%20does%20not%20work%20for%20interfaces%20attached%20to%20an%20OVS%20bridge%0A%23%20so%20turn%20it%20off%20for%20the%20time%20being.%0A%23%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FRHEL-58811%0A%0A%23%20Depends%20on%20ipsec%20service%20drop-in%20to%20start%20it%20after%20configure-ovs%20to%20make%0A%23%20sure%20offloads%20are%20disabled%20before%20ipsec%20starts.%0A%0Aif%20%5B%5B%20%22%242%22%20!%3D%20%22up%22%20%5D%5D%3B%20then%0A%20%20exit%0Afi%0A%0Adevice%3D%24DEVICE_IFACE%0Akind_slave%3D%24(ip%20-j%20-d%20link%20show%20%22%24device%22%20%7C%20jq%20-r%20'.%5B0%5D%20%7C%20.linkinfo.info_slave_kind%20%2F%2F%20empty')%0A%0Aif%20%5B%20%22%24kind_slave%22%20%3D%20%22openvswitch%22%20%5D%3B%20then%0A%20%20for%20feature%20in%20tx-esp-segmentation%20esp-hw-offload%20esp-tx-csum-hw-offload%3B%20do%0A%20%20%20%20if%20ethtool%20-k%20%22%24device%22%20%7C%20grep%20-qE%20%22%5E%24%7Bfeature%7D%3A%20off%22%3B%20then%0A%20%20%20%20%20%20%23%20already%20disabled%2C%20nothing%20to%20do%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%20%20%20%20%0A%20%20%20%20logger%20-t%2099-esp-offload%20-s%20%22Setting%20%24feature%20off%20for%20%24device%3A%20unsupported%20when%20attached%20to%20Open%20vSwitch%20bridge%22%0A%20%20%20%20ethtool%20-K%20%22%24device%22%20%22%24feature%22%20off%0A%20%20done%0Afi%0A" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-esp-offload" + }, + { + "contents": { + "source": "data:,r%20%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F80-openshift-network.conf%0Ar%20%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F10-ovn-kubernetes.conf%0Ad%20%2Frun%2Fmultus%2Fcni%2Fnet.d%2F%200755%20root%20root%20-%20-%0AD%20%2Fvar%2Flib%2Fcni%2Fnetworks%2Fopenshift-sdn%2F%200755%20root%20root%20-%20-%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/tmpfiles.d/cleanup-cni.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-x%0A%0Aif%20%5B%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20echo%20%22Skipping%20configure-ovs%20due%20to%20manual%20network%20configuration%22%0A%20%20exit%200%0Afi%0A%0A%23%20This%20file%20is%20not%20needed%20anymore%20in%204.7%2B%2C%20but%20when%20rolling%20back%20to%204.6%0A%23%20the%20ovs%20pod%20needs%20it%20to%20know%20ovs%20is%20running%20on%20the%20host.%0Atouch%20%2Fvar%2Frun%2Fovs-config-executed%0A%0A%23%20always%20use%20--escape%20no%20to%20prevent%20'%3A'%20mangling.%20%20nmcli%20will%20escape%20all%20colons%20as%20%5C%3A%2C%20this%20breaks%20input%0ANMCLI_GET_VALUE%3D%22nmcli%20--escape%20no%20--get-values%22%0A%23%20These%20are%20well%20knwon%20NM%20default%20paths%0ANM_CONN_ETC_PATH%3D%22%2Fetc%2FNetworkManager%2Fsystem-connections%22%0ANM_CONN_RUN_PATH%3D%22%2Frun%2FNetworkManager%2Fsystem-connections%22%0A%0A%23%20This%20is%20the%20path%20where%20NM%20is%20known%20to%20be%20configured%20to%20store%20user%20keyfiles%20%0ANM_CONN_CONF_PATH%3D%22%24NM_CONN_ETC_PATH%22%0A%0A%23%20This%20is%20where%20we%20want%20our%20keyfiles%20to%20finally%20reside.%20configure-ovs%0A%23%20operates%20with%20temporary%20keyfiles%20in%20NM_CONN_RUN_PATH%20and%20then%20as%20a%20last%0A%23%20step%20moves%20those%20keyfiles%20to%20NM_CONN_SET_PATH%20if%20it%20is%20a%20different%20path%0A%23%20(not%20by%20default).%20This%20mitigates%20hard%20interruptions%20(SIGKILL%2C%20hard%20reboot)%0A%23%20of%20configure-ovs%20leaving%20the%20machine%20with%20a%20half-baked%20set%20of%20keyfiles%0A%23%20that%20might%20prevent%20machine%20networking%20from%20working%20correctly.%0ANM_CONN_SET_PATH%3D%22%24%7BNM_CONN_SET_PATH%3A-%24NM_CONN_RUN_PATH%7D%22%0A%0AMANAGED_NM_CONN_SUFFIX%3D%22-slave-ovs-clone%22%0A%23%20Workaround%20to%20ensure%20OVS%20is%20installed%20due%20to%20bug%20in%20systemd%20Requires%3A%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1888017%0Acopy_nm_conn_files()%20%7B%0A%20%20local%20dst_path%3D%22%241%22%0A%20%20for%20src%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20src_path%3D%24(dirname%20%22%24src%22)%0A%20%20%20%20file%3D%24(basename%20%22%24src%22)%0A%20%20%20%20if%20%5B%20-f%20%22%24src_path%2F%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20if%20%5B%20!%20-f%20%22%24dst_path%2F%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Copying%20configuration%20%24file%22%0A%20%20%20%20%20%20%20%20cp%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%0A%20%20%20%20%20%20elif%20!%20cmp%20--silent%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Copying%20updated%20configuration%20%24file%22%0A%20%20%20%20%20%20%20%20cp%20-f%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%0A%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20echo%20%22Skipping%20%24file%20since%20it's%20equal%20at%20destination%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20else%0A%20%20%20%20%20%20echo%20%22Skipping%20%24file%20since%20it%20does%20not%20exist%20at%20source%22%0A%20%20%20%20fi%0A%20%20done%0A%7D%0A%0Aupdate_nm_conn_files_base()%20%7B%0A%20%20base_path%3D%24%7B1%7D%0A%20%20bridge_name%3D%24%7B2%7D%0A%20%20port_name%3D%24%7B3%7D%0A%20%20ovs_port%3D%22ovs-port-%24%7Bbridge_name%7D%22%0A%20%20ovs_interface%3D%22ovs-if-%24%7Bbridge_name%7D%22%0A%20%20default_port_name%3D%22ovs-port-%24%7Bport_name%7D%22%20%23%20ovs-port-phys0%0A%20%20bridge_interface_name%3D%22ovs-if-%24%7Bport_name%7D%22%20%23%20ovs-if-phys0%0A%20%20%23%20In%20RHEL7%20files%20in%20%2F%7Betc%2Crun%7D%2FNetworkManager%2Fsystem-connections%20end%20without%20the%20suffix%20'.nmconnection'%2C%20whereas%20in%20RHCOS%20they%20end%20with%20the%20suffix.%0A%20%20MANAGED_NM_CONN_FILES%3D(%24(echo%20%22%24%7Bbase_path%7D%22%2F%7B%22%24bridge_name%22%2C%22%24ovs_interface%22%2C%22%24ovs_port%22%2C%22%24bridge_interface_name%22%2C%22%24default_port_name%22%7D%7B%2C.nmconnection%7D))%0A%20%20shopt%20-s%20nullglob%0A%20%20MANAGED_NM_CONN_FILES%2B%3D(%24%7Bbase_path%7D%2F*%24%7BMANAGED_NM_CONN_SUFFIX%7D.nmconnection%20%24%7Bbase_path%7D%2F*%24%7BMANAGED_NM_CONN_SUFFIX%7D)%0A%20%20shopt%20-u%20nullglob%0A%7D%0A%0Aupdate_nm_conn_run_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_RUN_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0Aupdate_nm_conn_set_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_SET_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0Aupdate_nm_conn_etc_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_ETC_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0A%23%20Move%20and%20reload%20keyfiles%20at%20their%20final%20destination%0Aset_nm_conn_files()%20%7B%0A%20%20if%20%5B%20%22%24NM_CONN_RUN_PATH%22%20!%3D%20%22%24NM_CONN_SET_PATH%22%20%5D%3B%20then%0A%20%20%20%20update_nm_conn_run_files%20br-ex%20phys0%0A%20%20%20%20copy_nm_conn_files%20%22%24NM_CONN_SET_PATH%22%0A%20%20%20%20rm_nm_conn_files%0A%20%20%20%20update_nm_conn_run_files%20br-ex1%20phys1%0A%20%20%20%20copy_nm_conn_files%20%22%24NM_CONN_SET_PATH%22%0A%20%20%20%20rm_nm_conn_files%0A%0A%20%20%20%20%23%20reload%20keyfiles%0A%20%20%20%20nmcli%20connection%20reload%0A%20%20fi%0A%7D%0A%0A%23%20Used%20to%20remove%20files%20managed%20by%20configure-ovs%20and%20temporary%20leftover%20files%20from%20network%20manager%0Arm_nm_conn_files()%20%7B%0A%20%20for%20file%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20if%20%5B%20-f%20%22%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20rm%20-f%20%22%24file%22%0A%20%20%20%20%20%20echo%20%22Removed%20nmconnection%20file%20%24file%22%0A%20%20%20%20%20%20nm_config_changed%3D1%0A%20%20%20%20fi%0A%20%20done%0A%20%20for%20file%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20for%20temp%20in%20%24(compgen%20-G%20%22%24%7Bfile%7D.*%22)%3B%20do%0A%20%20%20%20%20%20rm%20-f%20%22%24temp%22%0A%20%20%20%20%20%20echo%20%22Removed%20nmconnection%20temporary%20file%20%24temp%22%0A%20%20%20%20%20%20nm_config_changed%3D1%0A%20%20%20%20done%0A%20%20done%0A%7D%0A%0A%23%20Used%20to%20clone%20a%20slave%20connection%20by%20uuid%2C%20returns%20new%20name%0Aclone_slave_connection()%20%7B%0A%20%20local%20uuid%3D%22%241%22%0A%20%20local%20old_name%0A%20%20old_name%3D%22%24(%24NMCLI_GET_VALUE%20connection.id%20connection%20show%20uuid%20%22%24uuid%22)%22%0A%20%20local%20new_name%3D%22%24%7Bold_name%7D%24%7BMANAGED_NM_CONN_SUFFIX%7D%22%0A%20%20if%20nmcli%20connection%20show%20id%20%22%24%7Bnew_name%7D%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20existing%20ovs%20slave%20%24%7Bnew_name%7D%20connection%20profile%20file%20found%2C%20overwriting...%22%20%3E%262%0A%20%20%20%20nmcli%20connection%20delete%20id%20%22%24%7Bnew_name%7D%22%20%26%3E%20%2Fdev%2Fnull%0A%20%20fi%0A%20%20clone_nm_conn%20%24uuid%20%22%24%7Bnew_name%7D%22%0A%20%20echo%20%22%24%7Bnew_name%7D%22%0A%7D%0A%0A%23%20Used%20to%20replace%20an%20old%20master%20connection%20uuid%20with%20a%20new%20one%20on%20all%20connections%0Areplace_connection_master()%20%7B%0A%20%20local%20old%3D%22%241%22%0A%20%20local%20new%3D%22%242%22%0A%20%20for%20conn_uuid%20in%20%24(%24NMCLI_GET_VALUE%20UUID%20connection%20show)%20%3B%20do%0A%20%20%20%20if%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.master%20connection%20show%20uuid%20%22%24conn_uuid%22)%22%20!%3D%20%22%24old%22%20%5D%3B%20then%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20local%20active_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20connection%20show%20%22%24conn_uuid%22)%0A%20%20%20%20local%20autoconnect%3D%24(%24NMCLI_GET_VALUE%20connection.autoconnect%20connection%20show%20%22%24conn_uuid%22)%0A%20%20%20%20if%20%5B%20%22%24active_state%22%20!%3D%20%22activated%22%20%5D%20%26%26%20%5B%20%22%24autoconnect%22%20!%3D%20%22yes%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20Assume%20that%20slave%20profiles%20intended%20to%20be%20used%20are%20those%20that%20are%3A%0A%20%20%20%20%20%20%23%20-%20active%0A%20%20%20%20%20%20%23%20-%20or%20inactive%20(which%20might%20be%20due%20to%20link%20being%20down)%20but%20to%20be%20autoconnected.%0A%20%20%20%20%20%20%23%20Otherwise%2C%20ignore%20them.%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20make%20changes%20for%20slave%20profiles%20in%20a%20new%20clone%0A%20%20%20%20local%20new_name%0A%20%20%20%20new_name%3D%24(clone_slave_connection%20%24conn_uuid)%0A%0A%20%20%20%20mod_nm_conn%20%22%24new_name%22%20connection.master%20%22%24new%22%20connection.autoconnect-priority%20100%20connection.autoconnect%20no%20%0A%20%20%20%20echo%20%22Replaced%20master%20%24old%20with%20%24new%20for%20slave%20profile%20%24new_name%22%0A%20%20done%0A%7D%0A%0A%23%20when%20creating%20the%20bridge%2C%20we%20use%20a%20value%20lower%20than%20NM's%20ethernet%20device%20default%20route%20metric%0A%23%20(we%20pick%2048%20and%2049%20to%20be%20lower%20than%20anything%20that%20NM%20chooses%20by%20default)%0ABRIDGE_METRIC%3D%2248%22%0ABRIDGE1_METRIC%3D%2249%22%0A%23%20Given%20an%20interface%2C%20generates%20NM%20configuration%20to%20add%20to%20an%20OVS%20bridge%0Aconvert_to_bridge()%20%7B%0A%20%20local%20iface%3D%24%7B1%7D%0A%20%20local%20bridge_name%3D%24%7B2%7D%0A%20%20local%20port_name%3D%24%7B3%7D%0A%20%20local%20bridge_metric%3D%24%7B4%7D%0A%20%20local%20ovs_port%3D%22ovs-port-%24%7Bbridge_name%7D%22%0A%20%20local%20ovs_interface%3D%22ovs-if-%24%7Bbridge_name%7D%22%0A%20%20local%20default_port_name%3D%22ovs-port-%24%7Bport_name%7D%22%20%23%20ovs-port-phys0%0A%20%20local%20bridge_interface_name%3D%22ovs-if-%24%7Bport_name%7D%22%20%23%20ovs-if-phys0%0A%0A%20%20if%20%5B%20%22%24iface%22%20%3D%20%22%24bridge_name%22%20%5D%3B%20then%0A%20%20%20%20%23%20handle%20vlans%20and%20bonds%20etc%20if%20they%20have%20already%20been%0A%20%20%20%20%23%20configured%20via%20nm%20key%20files%20and%20br-ex%20is%20already%20up%0A%20%20%20%20ifaces%3D%24(ovs-vsctl%20list-ifaces%20%24%7Biface%7D)%0A%20%20%20%20for%20intf%20in%20%24ifaces%3B%20do%20configure_driver_options%20%24intf%3B%20done%0A%20%20%20%20echo%20%22Networking%20already%20configured%20and%20up%20for%20%24%7Bbridge-name%7D!%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20flag%20to%20reload%20NM%20to%20account%20for%20all%20the%20configuration%20changes%0A%20%20%23%20going%20forward%0A%20%20nm_config_changed%3D1%0A%0A%20%20if%20%5B%20-z%20%22%24iface%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22ERROR%3A%20Unable%20to%20find%20default%20gateway%20interface%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%20%20%23%20find%20the%20MAC%20from%20OVS%20config%20or%20the%20default%20interface%20to%20use%20for%20OVS%20internal%20port%0A%20%20%23%20this%20prevents%20us%20from%20getting%20a%20different%20DHCP%20lease%20and%20dropping%20connection%0A%20%20if%20!%20iface_mac%3D%24(%3C%22%2Fsys%2Fclass%2Fnet%2F%24%7Biface%7D%2Faddress%22)%3B%20then%0A%20%20%20%20echo%20%22Unable%20to%20determine%20default%20interface%20MAC%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20echo%20%22MAC%20address%20found%20for%20iface%3A%20%24%7Biface%7D%3A%20%24%7Biface_mac%7D%22%0A%0A%20%20%23%20find%20MTU%20from%20original%20iface%0A%20%20iface_mtu%3D%24(ip%20link%20show%20%22%24iface%22%20%7C%20awk%20'%7Bprint%20%245%3B%20exit%7D')%0A%20%20if%20%5B%5B%20-z%20%22%24iface_mtu%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Unable%20to%20determine%20default%20interface%20MTU%2C%20defaulting%20to%201500%22%0A%20%20%20%20iface_mtu%3D1500%0A%20%20else%0A%20%20%20%20echo%20%22MTU%20found%20for%20iface%3A%20%24%7Biface%7D%3A%20%24%7Biface_mtu%7D%22%0A%20%20fi%0A%0A%20%20%23%20store%20old%20conn%20for%20later%0A%20%20old_conn%3D%24(nmcli%20--fields%20UUID%2CDEVICE%20conn%20show%20--active%20%7C%20awk%20%22%2F%5Cs%24%7Biface%7D%5Cs*%5C%24%2F%20%7Bprint%20%5C%241%7D%22)%0A%0A%20%20if%20%5B%5B%20-z%20%22%24old_conn%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22ERROR%3A%20cannot%20find%20connection%20for%20interface%3A%20%24%7Biface%7D%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20create%20bridge%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24bridge_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20%22%24bridge_name%22%0A%20%20%20%20add_nm_conn%20%22%24bridge_name%22%20type%20ovs-bridge%20conn.interface%20%22%24bridge_name%22%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20%5C%0A%20%20%20%20connection.autoconnect-slaves%201%0A%20%20fi%0A%0A%20%20%23%20find%20default%20port%20to%20add%20to%20bridge%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24default_port_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-port%20%22%24bridge_name%22%20%24%7Biface%7D%0A%20%20%20%20add_nm_conn%20%22%24default_port_name%22%20type%20ovs-port%20conn.interface%20%24%7Biface%7D%20slave-type%20ovs-bridge%20master%20%22%24bridge_name%22%20%5C%0A%20%20%20%20connection.autoconnect-slaves%201%0A%20%20fi%0A%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24ovs_port%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-port%20%22%24bridge_name%22%20%22%24bridge_name%22%0A%20%20%20%20add_nm_conn%20%22%24ovs_port%22%20type%20ovs-port%20conn.interface%20%22%24bridge_name%22%20slave-type%20ovs-bridge%20master%20%22%24bridge_name%22%0A%20%20fi%0A%0A%20%20extra_phys_args%3D()%0A%20%20%23%20check%20if%20this%20interface%20is%20a%20vlan%2C%20bond%2C%20team%2C%20or%20ethernet%20type%0A%20%20if%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22vlan%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dvlan%0A%20%20%20%20vlan_id%3D%24(%24NMCLI_GET_VALUE%20vlan.id%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-z%20%22%24vlan_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20unable%20to%20determine%20vlan_id%20for%20vlan%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%20%20%20%20vlan_parent%3D%24(%24NMCLI_GET_VALUE%20vlan.parent%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-z%20%22%24vlan_parent%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20unable%20to%20determine%20vlan_parent%20for%20vlan%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%0A%20%20%20%20if%20nmcli%20connection%20show%20uuid%20%22%24vlan_parent%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20%20%20%23%20if%20the%20VLAN%20connection%20is%20configured%20with%20a%20connection%20UUID%20as%20parent%2C%20we%20need%20to%20find%20the%20underlying%20device%0A%20%20%20%20%20%20%23%20and%20create%20the%20bridge%20against%20it%2C%20as%20the%20parent%20connection%20can%20be%20replaced%20by%20another%20bridge.%0A%20%20%20%20%20%20vlan_parent%3D%24(%24NMCLI_GET_VALUE%20GENERAL.DEVICES%20conn%20show%20uuid%20%24%7Bvlan_parent%7D)%0A%20%20%20%20fi%0A%0A%20%20%20%20extra_phys_args%3D(%20dev%20%22%24%7Bvlan_parent%7D%22%20id%20%22%24%7Bvlan_id%7D%22%20)%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dbond%0A%20%20%20%20%23%20check%20bond%20options%0A%20%20%20%20bond_opts%3D%24(%24NMCLI_GET_VALUE%20bond.options%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-n%20%22%24bond_opts%22%20%5D%3B%20then%0A%20%20%20%20%20%20extra_phys_args%2B%3D(%20bond.options%20%22%24%7Bbond_opts%7D%22%20)%0A%20%20%20%20%20%20MODE_REGEX%3D%22(%5E%7C%2C)mode%3Dactive-backup(%2C%7C%24)%22%0A%20%20%20%20%20%20MAC_REGEX%3D%22(%5E%7C%2C)fail_over_mac%3D(1%7Cactive%7C2%7Cfollow)(%2C%7C%24)%22%0A%20%20%20%20%20%20if%20%5B%5B%20%24bond_opts%20%3D~%20%24MODE_REGEX%20%5D%5D%20%26%26%20%5B%5B%20%24bond_opts%20%3D~%20%24MAC_REGEX%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20clone_mac%3D0%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22team%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dteam%0A%20%20%20%20%23%20check%20team%20config%20options%0A%20%20%20%20team_config_opts%3D%24(%24NMCLI_GET_VALUE%20team.config%20-e%20no%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-n%20%22%24team_config_opts%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20team.config%20is%20json%2C%20remove%20spaces%20to%20avoid%20problems%20later%20on%0A%20%20%20%20%20%20extra_phys_args%2B%3D(%20team.config%20%22%24%7Bteam_config_opts%2F%2F%5B%5B%3Aspace%3A%5D%5D%2F%7D%22%20)%0A%20%20%20%20%20%20team_mode%3D%24(echo%20%22%24%7Bteam_config_opts%7D%22%20%7C%20jq%20-r%20%22.runner.name%20%2F%2F%20empty%22)%0A%20%20%20%20%20%20team_mac_policy%3D%24(echo%20%22%24%7Bteam_config_opts%7D%22%20%7C%20jq%20-r%20%22.runner.hwaddr_policy%20%2F%2F%20empty%22)%0A%20%20%20%20%20%20MAC_REGEX%3D%22(by_active%7Conly_active)%22%0A%20%20%20%20%20%20if%20%5B%20%22%24team_mode%22%20%3D%20%22activebackup%22%20%5D%20%26%26%20%5B%5B%20%22%24team_mac_policy%22%20%3D~%20%24MAC_REGEX%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20clone_mac%3D0%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22tun%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dtun%0A%20%20%20%20tun_mode%3D%24(%24NMCLI_GET_VALUE%20tun.mode%20-e%20no%20connection%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20extra_phys_args%2B%3D(%20tun.mode%20%22%24%7Btun_mode%7D%22%20)%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22bridge%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dbridge%0A%20%20else%0A%20%20%20%20iface_type%3D802-3-ethernet%0A%20%20fi%0A%0A%20%20if%20%5B%20!%20%22%24%7Bclone_mac%3A-%7D%22%20%3D%20%220%22%20%5D%3B%20then%0A%20%20%20%20%23%20In%20active-backup%20link%20aggregation%2C%20with%20fail_over_mac%20mode%20enabled%2C%0A%20%20%20%20%23%20cloning%20the%20mac%20address%20is%20not%20supported.%20It%20is%20possible%20then%20that%0A%20%20%20%20%23%20br-ex%20has%20a%20different%20mac%20address%20than%20the%20bond%20which%20might%20be%0A%20%20%20%20%23%20troublesome%20on%20some%20platforms%20where%20the%20nic%20won't%20accept%20packets%20with%0A%20%20%20%20%23%20a%20different%20destination%20mac.%20But%20nobody%20has%20complained%20so%20far%20so%20go%20on%0A%20%20%20%20%23%20with%20what%20we%20got.%20%0A%20%20%20%20%0A%20%20%20%20%23%20Do%20set%20it%20though%20for%20other%20link%20aggregation%20configurations%20where%20the%0A%20%20%20%20%23%20mac%20address%20would%20otherwise%20depend%20on%20enslave%20order%20for%20which%20we%20have%0A%20%20%20%20%23%20no%20control%20going%20forward.%0A%20%20%20%20extra_phys_args%2B%3D(%20802-3-ethernet.cloned-mac-address%20%22%24%7Biface_mac%7D%22%20)%0A%20%20fi%0A%0A%20%20%23%20use%20%24%7Bextra_phys_args%5B%40%5D%2B%22%24%7Bextra_phys_args%5B%40%5D%7D%22%7D%20instead%20of%20%24%7Bextra_phys_args%5B%40%5D%7D%20to%20be%20compatible%20with%20bash%204.2%20in%20RHEL7.9%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24bridge_interface_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20destroy%20interface%20%24%7Biface%7D%0A%20%20%20%20ovs_default_port_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24default_port_name%22)%0A%20%20%20%20add_nm_conn%20%22%24bridge_interface_name%22%20type%20%24%7Biface_type%7D%20conn.interface%20%24%7Biface%7D%20master%20%22%24ovs_default_port_conn%22%20%5C%0A%20%20%20%20%20%20slave-type%20ovs-port%20connection.autoconnect-priority%20100%20connection.autoconnect-slaves%201%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20%5C%0A%20%20%20%20%20%20%24%7Bextra_phys_args%5B%40%5D%2B%22%24%7Bextra_phys_args%5B%40%5D%7D%22%7D%0A%20%20fi%0A%0A%20%20%23%20Get%20the%20new%20connection%20uuids%0A%20%20new_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24bridge_interface_name%22)%0A%20%20ovs_port_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24ovs_port%22)%0A%0A%20%20%23%20Update%20connections%20with%20master%20property%20set%20to%20use%20the%20new%20connection%0A%20%20replace_connection_master%20%24old_conn%20%24new_conn%0A%20%20replace_connection_master%20%24iface%20%24new_conn%0A%0A%20%20ipv4_method%3D%24(%24NMCLI_GET_VALUE%20ipv4.method%20conn%20show%20%22%24old_conn%22)%0A%20%20ipv6_method%3D%24(%24NMCLI_GET_VALUE%20ipv6.method%20conn%20show%20%22%24old_conn%22)%0A%0A%20%20ipv4_addresses%3D%24(%24NMCLI_GET_VALUE%20ipv4.addresses%20conn%20show%20%22%24old_conn%22)%0A%20%20ipv6_addresses%3D%24(%24NMCLI_GET_VALUE%20ipv6.addresses%20conn%20show%20%22%24old_conn%22)%0A%0A%20%20%23%20Warn%20about%20an%20invalid%20MTU%20that%20will%20most%20likely%20fail%20in%20one%20way%20or%0A%20%20%23%20another%0A%20%20if%20%5B%20%24%7Biface_mtu%7D%20-lt%201280%20%5D%20%26%26%20%5B%20%22%24%7Bipv6_method%7D%22%20!%3D%20%22disabled%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20MTU%20%24%7Biface_mtu%7D%20is%20lower%20than%20the%20minimum%20required%20of%201280%20for%20IPv6%22%0A%20%20fi%0A%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24ovs_interface%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20destroy%20interface%20%22%24bridge_name%22%0A%20%20%20%20%23%20Clone%20the%20connection%20in%20case%20the%20method%20is%20manual%20or%20in%20case%20the%20an%20address%20is%20set%20(DHCP%20%2B%20static%20IP)%0A%20%20%20%20if%20%5B%20%22%24%7Bipv4_method%7D%22%20%3D%20%22manual%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv4_addresses%7D%22%20!%3D%20%22%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv6_method%7D%22%20%3D%20%22manual%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv6_addresses%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Static%20IP%20addressing%20detected%20on%20default%20gateway%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20%23%20clone%20the%20old%20connection%20to%20get%20the%20address%20settings%0A%20%20%20%20%20%20%23%20prefer%20cloning%20vs%20copying%20the%20connection%20file%20to%20avoid%20problems%20with%20selinux%0A%20%20%20%20%20%20clone_nm_conn%20%22%24%7Bold_conn%7D%22%20%22%24%7Bovs_interface%7D%22%0A%20%20%20%20%20%20shopt%20-s%20nullglob%0A%20%20%20%20%20%20new_conn_files%3D(%24%7BNM_CONN_RUN_PATH%7D%2F%22%24%7Bovs_interface%7D%22*)%0A%20%20%20%20%20%20shopt%20-u%20nullglob%0A%20%20%20%20%20%20if%20%5B%20%24%7B%23new_conn_files%5B%40%5D%7D%20-ne%201%20%5D%20%7C%7C%20%5B%20!%20-f%20%22%24%7Bnew_conn_files%5B0%5D%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22ERROR%3A%20could%20not%20find%20%24%7Bovs_interface%7D%20conn%20file%20after%20cloning%20from%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20%20%20exit%201%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20new_conn_file%3D%22%24%7Bnew_conn_files%5B0%5D%7D%22%0A%0A%20%20%20%20%20%20%23%20modify%20the%20connection%20type%20directly%20because%20it%20can't%20be%20modified%0A%20%20%20%20%20%20%23%20through%20nmcli%0A%20%20%20%20%20%20sed%20-i%20'%2F%5E%5C%5Bconnection%5C%5D%24%2F%2C%2F%5E%5C%5B%2F%20s%2F%5Etype%3D.*%24%2Ftype%3Dovs-interface%2F'%20%24%7Bnew_conn_file%7D%0A%0A%20%20%20%20%20%20%23%20modify%20some%20more%20settings%20through%20nmcli%0A%20%20%20%20%20%20mod_nm_conn%20%22%24%7Bovs_interface%7D%22%20conn.interface%20%22%24bridge_name%22%20%5C%0A%20%20%20%20%20%20%20%20connection.multi-connect%20%22%22%20connection.autoconnect%20no%20%5C%0A%20%20%20%20%20%20%20%20connection.master%20%22%24ovs_port_conn%22%20connection.slave-type%20ovs-port%20%5C%0A%20%20%20%20%20%20%20%20ovs-interface.type%20internal%20%5C%0A%20%20%20%20%20%20%20%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20802-3-ethernet.cloned-mac-address%20%24%7Biface_mac%7D%20%5C%0A%20%20%20%20%20%20%20%20ipv4.route-metric%20%22%24%7Bbridge_metric%7D%22%20ipv6.route-metric%20%22%24%7Bbridge_metric%7D%22%0A%0A%20%20%20%20%20%20echo%20%22Loaded%20new%20%24ovs_interface%20connection%20file%3A%20%24%7Bnew_conn_file%7D%22%0A%20%20%20%20else%0A%20%20%20%20%20%20extra_if_brex_args%3D%22%22%0A%20%20%20%20%20%20%23%20check%20if%20interface%20had%20ipv4%2Fipv6%20addresses%20assigned%0A%20%20%20%20%20%20num_ipv4_addrs%3D%24(ip%20-j%20a%20show%20dev%20%24%7Biface%7D%20%7C%20jq%20%22.%5B0%5D.addr_info%20%7C%20map(.%20%7C%20select(.family%20%3D%3D%20%5C%22inet%5C%22))%20%7C%20length%22)%0A%20%20%20%20%20%20if%20%5B%20%22%24num_ipv4_addrs%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.may-fail%20no%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20IPV6%20should%20have%20at%20least%20a%20link%20local%20address.%20Check%20for%20more%20than%201%20to%20see%20if%20there%20is%20an%0A%20%20%20%20%20%20%23%20assigned%20address.%0A%20%20%20%20%20%20num_ip6_addrs%3D%24(ip%20-j%20a%20show%20dev%20%24%7Biface%7D%20%7C%20jq%20%22.%5B0%5D.addr_info%20%7C%20map(.%20%7C%20select(.family%20%3D%3D%20%5C%22inet6%5C%22%20and%20.scope%20!%3D%20%5C%22link%5C%22))%20%7C%20length%22)%0A%20%20%20%20%20%20if%20%5B%20%22%24num_ip6_addrs%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.may-fail%20no%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20dhcp%20client%20ids%0A%20%20%20%20%20%20dhcp_client_id%3D%24(%24NMCLI_GET_VALUE%20ipv4.dhcp-client-id%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24dhcp_client_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.dhcp-client-id%20%24%7Bdhcp_client_id%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20dhcp6_client_id%3D%24(%24NMCLI_GET_VALUE%20ipv6.dhcp-duid%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24dhcp6_client_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.dhcp-duid%20%24%7Bdhcp6_client_id%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20ipv6_addr_gen_mode%3D%24(%24NMCLI_GET_VALUE%20ipv6.addr-gen-mode%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_addr_gen_mode%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.addr-gen-mode%20%24%7Bipv6_addr_gen_mode%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20static%20DNS%20address%0A%20%20%20%20%20%20ipv4_dns%3D%24(%24NMCLI_GET_VALUE%20ipv4.dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv4_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.dns%20%24%7Bipv4_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20ipv6_dns%3D%24(%24NMCLI_GET_VALUE%20ipv6.dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.dns%20%24%7Bipv6_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20auto-dns%0A%20%20%20%20%20%20ipv4_auto_dns%3D%24(%24NMCLI_GET_VALUE%20ipv4.ignore-auto-dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv4_auto_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.ignore-auto-dns%20%24%7Bipv4_auto_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20ipv6_auto_dns%3D%24(%24NMCLI_GET_VALUE%20ipv6.ignore-auto-dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_auto_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.ignore-auto-dns%20%24%7Bipv6_auto_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20add_nm_conn%20%22%24ovs_interface%22%20type%20ovs-interface%20slave-type%20ovs-port%20conn.interface%20%22%24bridge_name%22%20master%20%22%24ovs_port_conn%22%20%5C%0A%20%20%20%20%20%20%20%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20802-3-ethernet.cloned-mac-address%20%24%7Biface_mac%7D%20%5C%0A%20%20%20%20%20%20%20%20ipv4.method%20%22%24%7Bipv4_method%7D%22%20ipv4.route-metric%20%22%24%7Bbridge_metric%7D%22%20%5C%0A%20%20%20%20%20%20%20%20ipv6.method%20%22%24%7Bipv6_method%7D%22%20ipv6.route-metric%20%22%24%7Bbridge_metric%7D%22%20%5C%0A%20%20%20%20%20%20%20%20%24%7Bextra_if_brex_args%7D%0A%20%20%20%20fi%0A%20%20fi%0A%0A%20%20configure_driver_options%20%22%24%7Biface%7D%22%0A%7D%0A%0A%23%20Used%20to%20remove%20a%20bridge%0Aremove_ovn_bridges()%20%7B%0A%20%20bridge_name%3D%24%7B1%7D%0A%20%20port_name%3D%24%7B2%7D%0A%0A%20%20%23%20Remove%20the%20keyfiles%20from%20known%20configuration%20paths%0A%20%20update_nm_conn_run_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%20%20update_nm_conn_set_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%20%20%23%20Shouldn't%20be%20necessary%2C%20workaround%20for%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FOCPBUGS-41489%0A%20%20update_nm_conn_etc_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%0A%20%20%23%20NetworkManager%20will%20not%20remove%20%24%7Bbridge_name%7D%20if%20it%20has%20the%20patch%20port%20created%20by%20ovn-kubernetes%0A%20%20%23%20so%20remove%20explicitly%0A%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20%24%7Bbridge_name%7D%0A%7D%0A%0A%23%20Removes%20any%20previous%20ovs%20configuration%0Aremove_all_ovn_bridges()%20%7B%0A%20%20echo%20%22Reverting%20any%20previous%20OVS%20configuration%22%0A%20%20%0A%20%20remove_ovn_bridges%20br-ex%20phys0%0A%20%20remove_ovn_bridges%20br-ex1%20phys1%0A%20%20%0A%20%20echo%20%22OVS%20configuration%20successfully%20reverted%22%0A%7D%0A%0A%23%20Reloads%20NM%20NetworkManager%20profiles%20if%20any%20configuration%20change%20was%20done.%0A%23%20Accepts%20a%20list%20of%20devices%20that%20should%20be%20re-connect%20after%20reload.%0Areload_profiles_nm()%20%7B%0A%20%20if%20%5B%20%24%7Bnm_config_changed%3A-0%7D%20-eq%200%20%5D%3B%20then%0A%20%20%20%20%23%20no%20config%20was%20changed%2C%20no%20need%20to%20reload%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20reload%20profiles%0A%20%20nmcli%20connection%20reload%0A%0A%20%20%23%20precautionary%20sleep%20of%2010s%20(default%20timeout%20of%20NM%20to%20bring%20down%20devices)%0A%20%20sleep%2010%0A%0A%20%20%23%20After%20reload%2C%20devices%20that%20were%20already%20connected%20should%20connect%20again%0A%20%20%23%20if%20any%20profile%20is%20available.%20If%20no%20profile%20is%20available%2C%20a%20device%20can%0A%20%20%23%20remain%20disconnected%20and%20we%20have%20to%20explicitly%20connect%20it%20so%20that%20a%0A%20%20%23%20profile%20is%20generated.%20This%20can%20happen%20for%20physical%20devices%20but%20should%0A%20%20%23%20not%20happen%20for%20software%20devices%20as%20those%20always%20require%20a%20profile.%0A%20%20for%20dev%20in%20%24%40%3B%20do%0A%20%20%20%20%23%20Only%20attempt%20to%20connect%20a%20disconnected%20device%0A%20%20%20%20local%20connected_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20device%20show%20%22%24dev%22%20%7C%7C%20echo%20%22%22)%0A%20%20%20%20if%20%5B%5B%20%22%24connected_state%22%20%3D~%20%22disconnected%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%23%20keep%20track%20if%20a%20profile%20by%20the%20same%20name%20as%20the%20device%20existed%20%0A%20%20%20%20%20%20%23%20before%20we%20attempt%20activation%0A%20%20%20%20%20%20local%20named_profile_existed%3D%24(%5B%20-f%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D%22%20%5D%20%7C%7C%20%5B%20-f%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D.nmconnection%22%20%5D%20%26%26%20echo%20%22yes%22)%0A%20%20%20%20%20%20%0A%20%20%20%20%20%20for%20i%20in%20%7B1..10%7D%3B%20do%0A%20%20%20%20%20%20%20%20%20%20echo%20%22Attempt%20%24i%20to%20connect%20device%20%24dev%22%0A%20%20%20%20%20%20%20%20%20%20nmcli%20device%20connect%20%22%24dev%22%20%26%26%20break%0A%20%20%20%20%20%20%20%20%20%20sleep%205%0A%20%20%20%20%20%20done%0A%0A%20%20%20%20%20%20%23%20if%20a%20profile%20did%20not%20exist%20before%20but%20does%20now%2C%20it%20was%20generated%0A%20%20%20%20%20%20%23%20but%20we%20want%20it%20to%20be%20ephemeral%2C%20so%20move%20it%20back%20to%20%2Frun%0A%20%20%20%20%20%20if%20%5B%20!%20%22%24named_profile_existed%22%20%3D%20%22yes%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20MANAGED_NM_CONN_FILES%3D(%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D%22%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D.nmconnection%22)%0A%20%20%20%20%20%20%20%20copy_nm_conn_files%20%22%24%7BNM_CONN_RUN_PATH%7D%22%0A%20%20%20%20%20%20%20%20rm_nm_conn_files%0A%20%20%20%20%20%20%20%20%23%20reload%20profiles%20so%20that%20NM%20notices%20that%20some%20might%20have%20been%20moved%0A%20%20%20%20%20%20%20%20nmcli%20connection%20reload%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20echo%20%22Waiting%20for%20interface%20%24dev%20to%20activate...%22%0A%20%20%20%20%23%20don't%20use%20--escape%20no%2C%20we%20use%20%3A%20delimiter%20here%0A%20%20%20%20if%20!%20timeout%2060%20bash%20-c%20%22while%20!%20nmcli%20-g%20DEVICE%2CSTATE%20c%20%7C%20grep%20%22'%22'%22%24dev%22%3Aactivated'%22'%22%3B%20do%20sleep%205%3B%20done%22%3B%20then%0A%20%20%20%20%20%20echo%20%22WARNING%3A%20%24dev%20did%20not%20activate%22%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20nm_config_changed%3D0%0A%7D%0A%0A%23%20Removes%20all%20configuration%20and%20reloads%20NM%20if%20necessary%0Arollback_nm()%20%7B%0A%20%20phys0%3D%24(get_bridge_physical_interface%20ovs-if-phys0)%0A%20%20phys1%3D%24(get_bridge_physical_interface%20ovs-if-phys1)%0A%20%20%0A%20%20%23%20Revert%20changes%20made%20by%20%2Fusr%2Flocal%2Fbin%2Fconfigure-ovs.sh%20during%20SDN%20migration.%0A%20%20remove_all_ovn_bridges%0A%20%20%0A%20%20%23%20reload%20profiles%20so%20that%20NM%20notices%20that%20some%20were%20removed%0A%20%20reload_profiles_nm%20%22%24phys0%22%20%22%24phys1%22%0A%7D%0A%0A%23%20Add%20a%20temporary%20deactivated%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20name%20folowed%20by%20arguments%20passed%20to%0A%23%20%60nmcli%20connection%20add%60%0Aadd_nm_conn()%20%7B%0A%20%20%23%20Use%20%60save%20no%60%20to%20add%20a%20temporary%20profile%0A%20%20nmcli%20c%20add%20save%20no%20con-name%20%22%24%40%22%20connection.autoconnect%20no%0A%7D%0A%0A%23%20Modify%20a%20temporary%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20name%20followed%20by%20arguments%20passed%20to%0A%23%20%60nmcli%20connection%20modify%60%0Amod_nm_conn()%20%7B%0A%20%20%23%20the%20easiest%20thing%20to%20do%20here%20would%20be%20to%20use%20%60nmcli%20c%20mod%20--temporary%60%0A%20%20%23%20but%20there%20is%20a%20bug%20in%20selinux%20profiles%20that%20denies%20NM%20from%20performing%0A%20%20%23%20the%20operation%0A%20%20local%20dst_path%3D%24%7BNM_CONN_RUN_PATH%7D%2F%241.nmconnection%0A%20%20local%20src_path%0A%20%20src_path%3D%24(mktemp)%0A%20%20shift%0A%20%20cat%20%22%24dst_path%22%20%3E%20%22%24src_path%22%0A%20%20rm%20-f%20%22%24dst_path%22%0A%20%20nmcli%20--offline%20c%20mod%20%22%24%40%22%20%3C%20%22%24src_path%22%20%3E%20%22%24dst_path%22%0A%20%20rm%20-f%20%22%24src_path%22%0A%20%20chmod%20600%20%22%24dst_path%22%0A%20%20nmcli%20c%20load%20%22%24dst_path%22%0A%7D%0A%0A%23%20Clone%20to%20a%20temporary%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20to%20clone%2C%20second%20argument%20is%20the%20clone%20name%0Aclone_nm_conn()%20%7B%0A%20%20%23%20clone%20as%20temporary%20so%20that%20it%20is%20generated%20in%20NM_CONN_RUN_PATH%0A%20%20nmcli%20connection%20clone%20--temporary%20%22%241%22%20%22%242%22%20%26%3E%20%2Fdev%2Fnull%0A%7D%0A%0A%23%20Activates%20an%20ordered%20set%20of%20NM%20connection%20profiles%0Aactivate_nm_connections()%20%7B%0A%20%20local%20connections%3D(%22%24%40%22)%0A%0A%20%20%23%20We%20want%20autoconnect%20set%20for%20our%20cloned%20slave%20profiles%20so%20that%20they%20are%0A%20%20%23%20used%20over%20the%20original%20profiles%20if%20implicitly%20re-activated%20with%20other%0A%20%20%23%20dependant%20profiles.%20Otherwise%20if%20a%20slave%20activates%20with%20an%20old%20profile%2C%0A%20%20%23%20the%20old%20master%20profile%20might%20activate%20as%20well%2C%20interfering%20and%20causing%0A%20%20%23%20further%20activations%20to%20fail.%0A%20%20%23%20Slave%20interfaces%20should%20already%20be%20active%20so%20setting%20autoconnect%20here%0A%20%20%23%20won't%20implicitly%20activate%20them%20but%20there%20is%20an%20edge%20case%20where%20a%20slave%0A%20%20%23%20might%20be%20inactive%20(link%20down%20for%20example)%20and%20in%20that%20case%20setting%0A%20%20%23%20autoconnect%20will%20cause%20an%20implicit%20activation.%20This%20is%20not%20necessarily%20a%0A%20%20%23%20problem%20and%20hopefully%20we%20can%20make%20sure%20everything%20is%20activated%20as%20we%0A%20%20%23%20want%20next.%0A%20%20for%20conn%20in%20%22%24%7Bconnections%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20local%20slave_type%3D%24(%24NMCLI_GET_VALUE%20connection.slave-type%20connection%20show%20%22%24conn%22)%0A%20%20%20%20if%20%5B%20%22%24slave_type%22%20%3D%20%22team%22%20%5D%20%7C%7C%20%5B%20%22%24slave_type%22%20%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20Activate%20all%20connections%20and%20fail%20if%20activation%20fails%0A%20%20%23%20For%20slave%20connections%20-%20for%20as%20long%20as%20at%20least%20one%20slave%20that%20belongs%20to%20a%20bond%2Fteam%0A%20%20%23%20comes%20up%2C%20we%20should%20not%20fail%0A%20%20declare%20-A%20master_interfaces%0A%20%20for%20conn%20in%20%22%24%7Bconnections%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%23%20Get%20the%20slave%20type%0A%20%20%20%20local%20slave_type%3D%24(%24NMCLI_GET_VALUE%20connection.slave-type%20connection%20show%20%22%24conn%22)%0A%20%20%20%20local%20is_slave%3Dfalse%0A%20%20%20%20if%20%5B%20%22%24slave_type%22%20%3D%20%22team%22%20%5D%20%7C%7C%20%5B%20%22%24slave_type%22%20%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20%20%20is_slave%3Dtrue%0A%20%20%20%20fi%20%0A%0A%20%20%20%20%23%20For%20slave%20interfaces%2C%20initialize%20the%20master%20interface%20to%20false%20if%20the%20key%20is%20not%20yet%20in%20the%20array%0A%20%20%20%20local%20master_interface%0A%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20master_interface%3D%24(%24NMCLI_GET_VALUE%20connection.master%20connection%20show%20%22%24conn%22)%0A%20%20%20%20%20%20if%20!%20%5B%5B%20-v%20%22master_interfaces%5B%24master_interface%5D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%22%24master_interface%22%5D%3Dfalse%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20slaves%20should%20implicitly%20activate%2C%20give%20them%20a%20chance%20to%20do%20so%0A%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20if%20!%20timeout%205%20bash%20-c%20%22while%20!%20%24NMCLI_GET_VALUE%20GENERAL.STATE%20conn%20show%20%22%24conn%22%20%7C%20grep%20activated%3B%20do%20sleep%201%3B%20done%22%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22WARNING%3A%20slave%20%24conn%20did%20not%20implicitly%20activate%20in%205s%2C%20activating%20explicitly.%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Do%20not%20activate%20interfaces%20that%20are%20already%20active%0A%20%20%20%20%23%20But%20set%20the%20entry%20in%20master_interfaces%20to%20true%20if%20this%20is%20a%20slave%0A%20%20%20%20%23%20Also%20set%20autoconnect%20to%20yes%0A%20%20%20%20local%20active_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20conn%20show%20%22%24conn%22)%0A%20%20%20%20if%20%5B%20%22%24active_state%22%20%3D%3D%20%22activated%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Connection%20%24conn%20already%20activated%22%0A%20%20%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%24master_interface%5D%3Dtrue%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Activate%20all%20interfaces%20that%20are%20not%20yet%20active%0A%20%20%20%20for%20i%20in%20%7B1..10%7D%3B%20do%0A%20%20%20%20%20%20echo%20%22Attempt%20%24i%20to%20bring%20up%20connection%20%24conn%22%0A%20%20%20%20%20%20nmcli%20conn%20up%20%22%24conn%22%20%26%26%20s%3D0%20%26%26%20break%20%7C%7C%20s%3D%24%3F%0A%20%20%20%20%20%20sleep%205%0A%20%20%20%20done%0A%20%20%20%20if%20%5B%20%24s%20-eq%200%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Brought%20up%20connection%20%24conn%20successfully%22%0A%20%20%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%22%24master_interface%22%5D%3Dtrue%0A%20%20%20%20%20%20fi%0A%20%20%20%20elif%20!%20%24is_slave%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20Cannot%20bring%20up%20connection%20%24conn%20after%20%24i%20attempts%22%0A%20%20%20%20%20%20return%20%24s%0A%20%20%20%20fi%0A%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20done%0A%20%20%23%20Check%20that%20all%20master%20interfaces%20report%20at%20least%20a%20single%20active%20slave%0A%20%20%23%20Note%3A%20associative%20arrays%20require%20an%20exclamation%20mark%20when%20looping%0A%20%20for%20i%20in%20%22%24%7B!master_interfaces%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20if%20!%20%24%7Bmaster_interfaces%5B%22%24i%22%5D%7D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22ERROR%3A%20Cannot%20bring%20up%20any%20slave%20interface%20for%20master%20interface%3A%20%24i%22%0A%20%20%20%20%20%20%20%20return%201%0A%20%20%20%20fi%0A%20%20done%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%2C%20%24iface%0A%23%20Writes%20content%20of%20%24iface%20into%20%24iface_default_hint_file%0Awrite_iface_default_hint()%20%7B%0A%20%20local%20iface_default_hint_file%3D%22%241%22%0A%20%20local%20iface%3D%22%242%22%0A%0A%20%20echo%20%22%24%7Biface%7D%22%20%3E%7C%20%22%24%7Biface_default_hint_file%7D%22%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%0A%23%20Returns%20the%20stored%20interface%20default%20hint%20if%20the%20hint%20is%20non-empty%2C%0A%23%20not%20br-ex%2C%20not%20br-ex1%20and%20if%20the%20interface%20can%20be%20found%20in%20%2Fsys%2Fclass%2Fnet%0Aget_iface_default_hint()%20%7B%0A%20%20local%20iface_default_hint_file%3D%241%0A%20%20if%20%5B%20-f%20%22%24%7Biface_default_hint_file%7D%22%20%5D%3B%20then%0A%20%20%20%20local%20iface_default_hint%3D%24(cat%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22br-ex1%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20-d%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Biface_default_hint%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20fi%0A%20%20echo%20%22%22%0A%7D%0A%0Aget_ip_from_ip_hint_file()%20%7B%0A%20%20local%20ip_hint_file%3D%22%241%22%0A%20%20if%20%5B%5B%20!%20-f%20%22%24%7Bip_hint_file%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20return%0A%20%20fi%0A%20%20ip_hint%3D%24(cat%20%22%24%7Bip_hint_file%7D%22)%0A%20%20echo%20%22%24%7Bip_hint%7D%22%0A%7D%0A%0A%23%20This%20function%20waits%20for%20ip%20address%20of%20br-ex%20to%20be%20bindable%20only%20in%20case%20of%20ipv6%0A%23%20This%20is%20workaround%20for%20OCPBUGS-673%20as%20it%20will%20not%20allow%20starting%20crio%0A%23%20before%20address%20is%20bindable%0Atry_to_bind_ipv6_address()%20%7B%0A%20%20%23%20Retry%20for%201%20minute%0A%20%20retries%3D60%0A%20%20until%20%5B%5B%20%24%7Bretries%7D%20-eq%200%20%5D%5D%3B%20do%0A%20%20%20%20ip%3D%24(ip%20-6%20-j%20addr%20%7C%20jq%20-r%20%22first(.%5B%5D%20%7C%20select(.ifname%3D%3D%5C%22br-ex%5C%22)%20%7C%20.addr_info%5B%5D%20%7C%20select(.scope%3D%3D%5C%22global%5C%22)%20%7C%20.local)%22)%0A%20%20%20%20if%20%5B%5B%20%22%24%7Bip%7D%22%20%3D%3D%20%22%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22No%20ipv6%20ip%20to%20bind%20was%20found%22%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20random_port%3D%24(shuf%20-i%2050000-60000%20-n%201)%0A%20%20%20%20echo%20%22Trying%20to%20bind%20%24%7Bip%7D%20on%20port%20%24%7Brandom_port%7D%22%0A%20%20%20%20exit_code%3D%24(timeout%202s%20nc%20-l%20%22%24%7Bip%7D%22%20%24%7Brandom_port%7D%3B%20echo%20%24%3F)%0A%20%20%20%20if%20%5B%5B%20exit_code%20-eq%20124%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Address%20bound%20successfully%22%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20sleep%201%0A%20%20%20%20((%20retries--%20))%0A%20%20done%0A%20%20if%20%5B%5B%20%24%7Bretries%7D%20-eq%200%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Failed%20to%20bind%20ip%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%7D%0A%0A%23%20Get%20interface%20that%20matches%20ip%20from%20node%20ip%20hint%20file%0A%23%20in%20case%20file%20not%20exists%20return%20nothing%20and%0A%23%20fallback%20to%20default%20interface%20search%20flow%0Aget_nodeip_hint_interface()%20%7B%0A%20%20local%20ip_hint%3D%22%22%0A%20%20local%20ip_hint_file%3D%22%241%22%0A%20%20local%20extra_bridge%3D%22%242%22%0A%20%20local%20iface%3D%22%22%0A%0A%20%20ip_hint%3D%24(get_ip_from_ip_hint_file%20%22%24%7Bip_hint_file%7D%22)%0A%20%20if%20%5B%5B%20-z%20%22%24%7Bip_hint%7D%22%20%20%5D%5D%3B%20then%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20iface%3D%24(ip%20-j%20addr%20%7C%20jq%20-r%20%22first(.%5B%5D%20%7C%20select(any(.addr_info%5B%5D%3B%20.local%3D%3D%5C%22%24%7Bip_hint%7D%5C%22)%20and%20.ifname!%3D%5C%22br-ex1%5C%22%20and%20.ifname!%3D%5C%22%24%7Bextra_bridge%7D%5C%22))%20%7C%20.ifname%22)%0A%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20fi%0A%7D%0A%0A%23%20Accepts%20parameters%20%24bridge_interface%20(e.g.%20ovs-port-phys0)%0A%23%20Returns%20the%20physical%20interface%20name%20if%20%24bridge_interface%20exists%2C%20%22%22%20otherwise%0Aget_bridge_physical_interface()%20%7B%0A%20%20local%20bridge_interface%3D%22%241%22%0A%20%20local%20physical_interface%3D%22%22%0A%20%20physical_interface%3D%24(%24NMCLI_GET_VALUE%20connection.interface-name%20conn%20show%20%22%24%7Bbridge_interface%7D%22%202%3E%2Fdev%2Fnull%20%7C%7C%20echo%20%22%22)%0A%20%20echo%20%22%24%7Bphysical_interface%7D%22%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%2C%20%24extra_bridge_file%2C%20%24ip_hint_file%2C%20%24default_bridge_file%0A%23%20Determines%20the%20interface%20to%20be%20used%20for%20br-ex.%20Order%20of%20priority%20is%3A%0A%23%201.%20Use%20the%20user%20specified%20interface%20if%20provided%20in%20the%20default%20bridge%20file%0A%23%202.%20Use%20the%20node%20IP%20hint%20interface%0A%23%203.%20Use%20the%20previously%20selected%20interface%0A%23%204.%20Use%20the%20interface%20detected%20as%20default%20gateway%0A%23%0A%23%20Read%20%24default_bridge_file%20and%20return%20the%20contained%20interface.%20Otherwise%2C%0A%23%20read%20%24ip_hint_file%20and%20return%20the%20interface%20that%20matches%20this%20ip.%20Otherwise%2C%0A%23%20if%20the%20default%20interface%20is%20br-ex%2C%20use%20that%20and%20return.%0A%23%20If%20the%20default%20interface%20is%20not%20br-ex%3A%0A%23%20Check%20if%20there%20is%20a%20valid%20hint%20inside%20%24iface_default_hint_file.%20If%20so%2C%20use%20that%20hint.%0A%23%20If%20there%20is%20no%20valid%20hint%2C%20use%20the%20default%20interface%20that%20we%20found%20during%20the%20step%0A%23%20earlier.%0A%23%20Never%20use%20the%20interface%20that%20is%20provided%20inside%20%24extra_bridge_file%20for%20br-ex1.%0A%23%20Never%20use%20br-ex1.%0A%23%20Write%20the%20default%20interface%20to%20%24iface_default_hint_file%0Aget_default_bridge_interface()%20%7B%0A%20%20local%20iface%3D%22%22%0A%20%20local%20counter%3D0%0A%20%20local%20iface_default_hint_file%3D%22%241%22%0A%20%20local%20extra_bridge_file%3D%22%242%22%0A%20%20local%20ip_hint_file%3D%22%243%22%0A%20%20local%20default_bridge_file%3D%22%244%22%0A%20%20local%20extra_bridge%3D%22%22%0A%0A%20%20if%20%5B%20-f%20%22%24%7Bextra_bridge_file%7D%22%20%5D%3B%20then%0A%20%20%20%20extra_bridge%3D%24(cat%20%24%7Bextra_bridge_file%7D)%0A%20%20fi%0A%0A%20%20%23%20try%20to%20use%20user%20specified%20file%20first%0A%20%20if%20%5B%20-f%20%22%24default_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20iface%3D%24(cat%20%22%24%7Bdefault_bridge_file%7D%22)%0A%20%20%20%20if%20%5B%20-z%20%22%24iface%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20User%20specified%20bridge%20file%20detected%20without%20any%20data%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20if%20node%20ip%20was%20set%2C%20we%20should%20search%20for%20interface%20that%20matches%20it%0A%20%20iface%3D%24(get_nodeip_hint_interface%20%22%24%7Bip_hint_file%7D%22%20%22%24%7Bextra_bridge%7D%22)%0A%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20find%20default%20interface%0A%20%20%23%20the%20default%20interface%20might%20be%20br-ex%2C%20so%20check%20this%20before%20looking%20at%20the%20hint%0A%20%20while%20%5B%20%24%7Bcounter%7D%20-lt%2012%20%5D%3B%20do%0A%20%20%20%20%23%20check%20ipv4%0A%20%20%20%20%23%20never%20use%20the%20interface%20that's%20specified%20in%20extra_bridge_file%0A%20%20%20%20%23%20never%20use%20br-ex1%0A%20%20%20%20if%20%5B%20%22%24%7Bextra_bridge%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20iface%3D%24(ip%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20grep%20-v%20%22%24%7Bextra_bridge%7D%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20else%0A%20%20%20%20%20%20iface%3D%24(ip%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20fi%0A%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20%23%20check%20ipv6%0A%20%20%20%20%23%20never%20use%20the%20interface%20that's%20specified%20in%20extra_bridge_file%0A%20%20%20%20%23%20never%20use%20br-ex1%0A%20%20%20%20if%20%5B%20%22%24%7Bextra_bridge%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20iface%3D%24(ip%20-6%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20grep%20-v%20%22%24%7Bextra_bridge%7D%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20else%0A%20%20%20%20%20%20iface%3D%24(ip%20-6%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20fi%0A%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20counter%3D%24((counter%2B1))%0A%20%20%20%20sleep%205%0A%20%20done%0A%0A%20%20%23%20if%20the%20default%20interface%20does%20not%20point%20out%20of%20br-ex%20or%20br-ex1%0A%20%20if%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22br-ex1%22%20%5D%3B%20then%0A%20%20%20%20%23%20determine%20if%20an%20interface%20default%20hint%20exists%20from%20a%20previous%20run%0A%20%20%20%20%23%20and%20if%20the%20interface%20has%20a%20valid%20default%20route%0A%20%20%20%20iface_default_hint%3D%24(get_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%24%7Biface%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20start%20wherever%20count%20left%20off%20in%20the%20previous%20loop%0A%20%20%20%20%20%20%23%20allow%20this%20for%20one%20more%20iteration%20than%20the%20previous%20loop%0A%20%20%20%20%20%20while%20%5B%20%24%7Bcounter%7D%20-le%2012%20%5D%3B%20do%0A%20%20%20%20%20%20%20%20%23%20check%20ipv4%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24(ip%20route%20show%20default%20dev%20%22%24%7Biface_default_hint%7D%22)%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20iface%3D%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20%20%20break%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20%23%20check%20ipv6%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24(ip%20-6%20route%20show%20default%20dev%20%22%24%7Biface_default_hint%7D%22)%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20iface%3D%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20%20%20break%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20counter%3D%24((counter%2B1))%0A%20%20%20%20%20%20%20%20sleep%205%0A%20%20%20%20%20%20done%0A%20%20%20%20fi%0A%20%20%20%20%23%20store%20what%20was%20determined%20was%20the%20(new)%20default%20interface%20inside%0A%20%20%20%20%23%20the%20default%20hint%20file%20for%20future%20reference%0A%20%20%20%20if%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20write_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Biface%7D%22%0A%20%20%20%20fi%0A%20%20fi%0A%20%20echo%20%22%24%7Biface%7D%22%0A%7D%0A%0A%23%20Used%20to%20print%20network%20state%0Aprint_state()%20%7B%0A%20%20echo%20%22Current%20device%2C%20connection%2C%20interface%20and%20routing%20state%3A%22%0A%20%20nmcli%20-g%20all%20device%20%7C%20grep%20-v%20unmanaged%0A%20%20nmcli%20-g%20all%20connection%0A%20%20ip%20-d%20address%20show%0A%20%20ip%20route%20show%0A%20%20ip%20-6%20route%20show%0A%7D%0A%0A%23%20Setup%20an%20exit%20trap%20to%20rollback%20on%20error%0Ahandle_exit()%20%7B%0A%20%20e%3D%24%3F%0A%20%20tdir%3D%24(mktemp%20-u%20-d%20-t%20%22configure-ovs-%24(date%20%2B%25Y-%25m-%25d-%25H-%25M-%25S)-XXXXXXXXXX%22)%0A%20%20%0A%20%20if%20%5B%20%24e%20-eq%200%20%5D%3B%20then%0A%20%20%20%20print_state%0A%20%20%20%20%23%20remove%20previous%20troubleshooting%20information%0A%20%20%20%20rm%20-rf%20%22%24(dirname%20%22%24tdir%22)%2Fconfigure-ovs-*%22%0A%20%20%20%20exit%200%0A%20%20fi%0A%0A%20%20echo%20%22ERROR%3A%20configure-ovs%20exited%20with%20error%3A%20%24e%22%0A%20%20print_state%0A%0A%20%20%23%20remove%20previous%20troubleshooting%20information%20except%20the%20oldest%20one%0A%20%20mapfile%20-t%20tdirs%20%3C%20%3C(compgen%20-G%20%22%24(dirname%20%22%24tdir%22)%2Fconfigure-ovs-*%22)%0A%20%20unset%20-v%20%22tdirs%5B0%5D%22%0A%20%20for%20dir%20in%20%22%24%7Btdirs%5B%40%5D%7D%22%3B%20do%20rm%20-rf%20%22%24dir%22%3B%20done%0A%0A%20%20%23%20copy%20configuration%20to%20tmp%20for%20troubleshooting%0A%20%20mkdir%20-p%20%22%24tdir%22%0A%20%20update_nm_conn_run_files%20br-ex%20phys0%0A%20%20copy_nm_conn_files%20%22%24tdir%22%0A%20%20update_nm_conn_run_files%20br-ex1%20phys1%0A%20%20copy_nm_conn_files%20%22%24tdir%22%0A%20%20echo%20%22Copied%20OVS%20configuration%20to%20%24tdir%20for%20troubleshooting%22%0A%0A%20%20%23%20attempt%20to%20restore%20the%20previous%20network%20state%0A%20%20echo%20%22Attempting%20to%20restore%20previous%20configuration...%22%0A%20%20rollback_nm%0A%20%20print_state%0A%0A%20%20exit%20%24e%0A%7D%0A%0A%23%20Setup%20a%20signal%20trap%20to%20rollback%0Ahandle_termination()%20%7B%0A%20%20echo%20%22WARNING%3A%20configure-ovs%20has%20been%20requested%20to%20terminate%2C%20quitting...%22%0A%20%20%0A%20%20%23%20by%20exiting%20with%20an%20error%20we%20will%20cleanup%20after%20ourselves%20in%20a%0A%20%20%23%20subsequent%20call%20to%20handle_exit%0A%20%20exit%201%0A%7D%0A%0A%23%20main%20function%0Aconfigure_ovs()%20%7B%0A%20%20set%20-eu%0A%0A%20%20%23%20setup%20traps%20to%20handle%20signals%20and%20other%20abnormal%20exits%0A%20%20trap%20'handle_termination'%20TERM%20INT%0A%20%20trap%20'handle_exit'%20EXIT%0A%0A%20%20%23%20this%20flag%20tracks%20if%20any%20config%20change%20was%20made%0A%20%20nm_config_changed%3D0%0A%0A%20%20%23%20Check%20that%20we%20are%20provided%20a%20valid%20NM%20connection%20path%0A%20%20if%20%5B%20%22%24NM_CONN_SET_PATH%22%20!%3D%20%22%24NM_CONN_CONF_PATH%22%20%5D%20%26%26%20%5B%20%22%24NM_CONN_SET_PATH%22%20!%3D%20%22%24NM_CONN_RUN_PATH%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22Error%3A%20Incorrect%20NM%20connection%20path%3A%20%24NM_CONN_SET_PATH%20is%20not%20%24NM_CONN_CONF_PATH%20nor%20%24NM_CONN_RUN_PATH%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20Clean%20up%20old%20config%20on%20behalf%20of%20mtu-migration%0A%20%20if%20!%20systemctl%20-q%20is-enabled%20mtu-migration%3B%20then%0A%20%20%20%20echo%20%22Cleaning%20up%20left%20over%20mtu%20migration%20configuration%22%0A%20%20%20%20rm%20-rf%20%2Fetc%2Fcno%2Fmtu-migration%0A%20%20fi%0A%0A%20%20if%20!%20rpm%20-qa%20%7C%20grep%20-q%20openvswitch%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20Openvswitch%20package%20is%20not%20installed!%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20print%20initial%20state%0A%20%20print_state%0A%0A%20%20if%20%5B%20%22%241%22%20%3D%3D%20%22OVNKubernetes%22%20%5D%3B%20then%0A%20%20%20%20%23%20Configures%20NICs%20onto%20OVS%20bridge%20%22br-ex%22%0A%20%20%20%20%23%20Configuration%20is%20either%20auto-detected%20or%20provided%20through%20a%20config%20file%20written%20already%20in%20Network%20Manager%0A%20%20%20%20%23%20key%20files%20under%20%2Fetc%2FNetworkManager%2Fsystem-connections%2F%0A%20%20%20%20%23%20Managing%20key%20files%20is%20outside%20of%20the%20scope%20of%20this%20script%0A%0A%20%20%20%20%23%20if%20the%20interface%20is%20of%20type%20vmxnet3%20add%20multicast%20capability%20for%20that%20driver%0A%20%20%20%20%23%20History%3A%20BZ%3A1854355%0A%20%20%20%20function%20configure_driver_options%20%7B%0A%20%20%20%20%20%20intf%3D%241%0A%20%20%20%20%20%20if%20%5B%20!%20-f%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Bintf%7D%2Fdevice%2Fuevent%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Device%20file%20doesn't%20exist%2C%20skipping%20setting%20multicast%20mode%22%0A%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20driver%3D%24(cat%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Bintf%7D%2Fdevice%2Fuevent%22%20%7C%20grep%20DRIVER%20%7C%20awk%20-F%20%22%3D%22%20'%7Bprint%20%242%7D')%0A%20%20%20%20%20%20%20%20echo%20%22Driver%20name%20is%22%20%24driver%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24driver%22%20%3D%20%22vmxnet3%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20ip%20link%20set%20dev%20%22%24%7Bintf%7D%22%20allmulticast%20on%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20fi%0A%20%20%20%20%7D%0A%0A%20%20%20%20ovnk_config_dir%3D'%2Fetc%2Fovnk'%0A%20%20%20%20ovnk_var_dir%3D'%2Fvar%2Flib%2Fovnk'%0A%20%20%20%20extra_bridge_file%3D%22%24%7Bovnk_config_dir%7D%2Fextra_bridge%22%0A%20%20%20%20iface_default_hint_file%3D%22%24%7Bovnk_var_dir%7D%2Fiface_default_hint%22%0A%20%20%20%20ip_hint_file%3D%22%2Frun%2Fnodeip-configuration%2Fprimary-ip%22%0A%20%20%20%20%23%20explicitly%20specify%20which%20interface%20should%20be%20used%20with%20the%20default%20bridge%0A%20%20%20%20default_bridge_file%3D%22%24%7Bovnk_config_dir%7D%2Fdefault_bridge%22%0A%0A%20%20%20%20%23%20make%20sure%20to%20create%20ovnk_config_dir%20if%20it%20does%20not%20exist%2C%20yet%0A%20%20%20%20mkdir%20-p%20%22%24%7Bovnk_config_dir%7D%22%0A%20%20%20%20%23%20make%20sure%20to%20create%20ovnk_var_dir%20if%20it%20does%20not%20exist%2C%20yet%0A%20%20%20%20mkdir%20-p%20%22%24%7Bovnk_var_dir%7D%22%0A%0A%20%20%20%20%23%20For%20upgrade%20scenarios%2C%20make%20sure%20that%20we%20stabilize%20what%20we%20already%20configured%0A%20%20%20%20%23%20before.%20If%20we%20do%20not%20have%20a%20valid%20interface%20hint%2C%20find%20the%20physical%20interface%0A%20%20%20%20%23%20that's%20attached%20to%20ovs-if-phys0.%0A%20%20%20%20%23%20If%20we%20find%20such%20an%20interface%2C%20write%20it%20to%20the%20hint%20file.%0A%20%20%20%20iface_default_hint%3D%24(get_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20current_interface%3D%24(get_bridge_physical_interface%20ovs-if-phys0)%0A%20%20%20%20%20%20if%20%5B%20%22%24%7Bcurrent_interface%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20write_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Bcurrent_interface%7D%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20delete%20iface_default_hint_file%20if%20it%20has%20the%20same%20content%20as%20extra_bridge_file%0A%20%20%20%20%23%20in%20that%20case%2C%20we%20must%20also%20force%20a%20reconfiguration%20of%20our%20network%20interfaces%0A%20%20%20%20%23%20to%20make%20sure%20that%20we%20reconcile%20this%20conflict%0A%20%20%20%20if%20%5B%20-f%20%22%24%7Biface_default_hint_file%7D%22%20%5D%20%26%26%0A%20%20%20%20%20%20%5B%20-f%20%22%24%7Bextra_bridge_file%7D%22%20%5D%20%26%26%0A%20%20%20%20%20%20%5B%20%22%24(cat%20%22%24%7Biface_default_hint_file%7D%22)%22%20%3D%3D%20%22%24(cat%20%22%24%7Bextra_bridge_file%7D%22)%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22%24%7Biface_default_hint_file%7D%20and%20%24%7Bextra_bridge_file%7D%20share%20the%20same%20content%22%0A%20%20%20%20%20%20echo%20%22Deleting%20file%20%24%7Biface_default_hint_file%7D%20to%20choose%20a%20different%20interface%22%0A%20%20%20%20%20%20rm%20-f%20%22%24%7Biface_default_hint_file%7D%22%0A%20%20%20%20%20%20rm%20-f%20%2Frun%2Fconfigure-ovs-boot-done%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20on%20every%20boot%20we%20rollback%20and%20generate%20the%20configuration%20again%2C%20to%20take%0A%20%20%20%20%23%20in%20any%20changes%20that%20have%20possibly%20been%20applied%20in%20the%20standard%0A%20%20%20%20%23%20configuration%20sources%0A%20%20%20%20if%20%5B%20!%20-f%20%2Frun%2Fconfigure-ovs-boot-done%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Running%20on%20boot%2C%20restoring%20previous%20configuration%20before%20proceeding...%22%0A%20%20%20%20%20%20rollback_nm%0A%20%20%20%20%20%20print_state%0A%20%20%20%20fi%0A%20%20%20%20touch%20%2Frun%2Fconfigure-ovs-boot-done%0A%0A%20%20%20%20iface%3D%24(get_default_bridge_interface%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Bextra_bridge_file%7D%22%20%22%24%7Bip_hint_file%7D%22%20%22%24%7Bdefault_bridge_file%7D%22)%0A%0A%20%20%20%20if%20%5B%20%22%24iface%22%20!%3D%20%22br-ex%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20Specified%20interface%20is%20not%20br-ex.%0A%20%20%20%20%20%20%23%20Some%20deployments%20use%20a%20temporary%20solution%20where%20br-ex%20is%20moved%20out%20from%20the%20default%20gateway%20interface%0A%20%20%20%20%20%20%23%20and%20bound%20to%20a%20different%20nic%20(https%3A%2F%2Fgithub.com%2Ftrozet%2Fopenshift-ovn-migration).%0A%20%20%20%20%20%20%23%20This%20is%20now%20supported%20through%20an%20extra%20bridge%20if%20requested.%20If%20that%20is%20the%20case%2C%20we%20rollback.%0A%20%20%20%20%20%20%23%20We%20also%20rollback%20if%20it%20looks%20like%20we%20need%20to%20configure%20things%2C%20just%20in%20case%20there%20are%20any%20leftovers%0A%20%20%20%20%20%20%23%20from%20previous%20attempts.%0A%20%20%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%20%7C%7C%20%5B%20-z%20%22%24(nmcli%20connection%20show%20--active%20br-ex%202%3E%20%2Fdev%2Fnull)%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Bridge%20br-ex%20is%20not%20active%2C%20restoring%20previous%20configuration%20before%20proceeding...%22%0A%20%20%20%20%20%20%20%20rollback_nm%0A%20%20%20%20%20%20%20%20print_state%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20convert_to_bridge%20%22%24iface%22%20%22br-ex%22%20%22phys0%22%20%22%24%7BBRIDGE_METRIC%7D%22%0A%0A%20%20%20%20%23%20Check%20if%20we%20need%20to%20configure%20the%20second%20bridge%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%20%26%26%20(!%20nmcli%20connection%20show%20br-ex1%20%26%3E%20%2Fdev%2Fnull%20%7C%7C%20!%20nmcli%20connection%20show%20ovs-if-phys1%20%26%3E%20%2Fdev%2Fnull)%3B%20then%0A%20%20%20%20%20%20interface%3D%24(head%20-n%201%20%24extra_bridge_file)%0A%20%20%20%20%20%20convert_to_bridge%20%22%24interface%22%20%22br-ex1%22%20%22phys1%22%20%22%24%7BBRIDGE1_METRIC%7D%22%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Check%20if%20we%20need%20to%20remove%20the%20second%20bridge%0A%20%20%20%20if%20%5B%20!%20-f%20%22%24extra_bridge_file%22%20%5D%20%26%26%20(nmcli%20connection%20show%20br-ex1%20%26%3E%20%2Fdev%2Fnull%20%7C%7C%20nmcli%20connection%20show%20ovs-if-phys1%20%26%3E%20%2Fdev%2Fnull)%3B%20then%0A%20%20%20%20%20%20remove_ovn_bridges%20br-ex1%20phys1%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Remove%20bridges%20created%20by%20openshift-sdn%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br0%0A%0A%20%20%20%20%23%20Make%20sure%20everything%20is%20activated.%20Do%20it%20in%20a%20specific%20order%3A%0A%20%20%20%20%23%20-%20activate%20br-ex%20first%2C%20due%20to%20autoconnect-slaves%20this%20will%20also%0A%20%20%20%20%23%20%20%20activate%20ovs-port-br-ex%2C%20ovs-port-phys0%20and%20ovs-if-phys0.%20It%20is%0A%20%20%20%20%23%20%20%20important%20that%20ovs-if-phys0%20activates%20with%20br-ex%20to%20avoid%20the%0A%20%20%20%20%23%20%20%20ovs-if-phys0%20profile%20being%20overridden%20with%20a%20profile%20generated%20from%0A%20%20%20%20%23%20%20%20kargs.%20The%20activation%20of%20ovs-if-phys0%2C%20if%20a%20bond%2C%20might%20cause%20the%0A%20%20%20%20%23%20%20%20slaves%20to%20re-activate%2C%20but%20it%20should%20be%20with%20our%20profiles%20since%20they%0A%20%20%20%20%23%20%20%20have%20higher%20priority%0A%20%20%20%20%23%20-%20make%20sure%20that%20ovs-if-phys0%20and%20its%20slaves%2C%20if%20any%2C%20are%20activated.%0A%20%20%20%20%23%20-%20finally%20activate%20ovs-if-br-ex%20which%20holds%20the%20IP%20configuration.%0A%20%20%20%20connections%3D(br-ex%20ovs-if-phys0)%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20%20%20connections%2B%3D(br-ex1%20ovs-if-phys1)%0A%20%20%20%20fi%0A%20%20%20%20while%20IFS%3D%20read%20-r%20connection%3B%20do%0A%20%20%20%20%20%20if%20%5B%5B%20%24connection%20%3D%3D%20*%22%24MANAGED_NM_CONN_SUFFIX%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20connections%2B%3D(%22%24connection%22)%0A%20%20%20%20%20%20fi%0A%20%20%20%20done%20%3C%20%3C(nmcli%20-g%20NAME%20c)%0A%20%20%20%20connections%2B%3D(ovs-if-br-ex)%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20%20%20connections%2B%3D(ovs-if-br-ex1)%0A%20%20%20%20fi%0A%20%20%20%20activate_nm_connections%20%22%24%7Bconnections%5B%40%5D%7D%22%0A%20%20%20%20try_to_bind_ipv6_address%0A%20%20%20%20set_nm_conn_files%0A%20%20elif%20%5B%20%22%241%22%20%3D%3D%20%22OpenShiftSDN%22%20%5D%3B%20then%0A%20%20%20%20%23%20Revert%20changes%20made%20by%20%2Fusr%2Flocal%2Fbin%2Fconfigure-ovs.sh%20during%20SDN%20migration.%0A%20%20%20%20rollback_nm%0A%20%20%20%20%0A%20%20%20%20%23%20Remove%20bridges%20created%20by%20ovn-kubernetes%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br-int%20--%20--if-exists%20del-br%20br-local%0A%20%20fi%0A%7D%0A%0A%23%20Retry%20configure_ovs%20until%20it%20succeeds.%0A%23%20By%20default%2C%20retry%20every%2015%20minutes%20to%20give%20enough%20time%20to%20gather%0A%23%20troubleshooting%20information%20in%20between.%20Note%20that%20configure_ovs%20has%20other%0A%23%20internal%20retry%20mechanisms.%20This%20retry%20is%20intended%20to%20give%20some%0A%23%20self-healing%20capabilities%20to%20temporary%20but%20not%20necessarily%20short-lived%0A%23%20infrastructure%20problems.%0ARETRY%3D%22%24%7BRETRY-15m%7D%22%0Awhile%20true%3B%20do%0A%0A%20%20%23%20Disable%20retries%20if%20termination%20signal%20is%20received.%20Note%20that%20systemd%0A%20%20%23%20sends%20the%20signals%20to%20all%20processes%20in%20the%20group%20by%20default%20so%20we%20expect%0A%20%20%23%20configure_ovs%20to%20get%20its%20own%20signals.%0A%20%20trap%20'echo%20%22WARNING%3A%20termination%20requested%2C%20disabling%20retries%22%3B%20RETRY%3D%22%22'%20INT%20TERM%0A%20%20%0A%20%20%23%20Run%20configure_ovs%20in%20a%20sub-shell.%20%0A%20%20(%20configure_ovs%20%22%24%40%22%20)%0A%20%20e%3D%24%3F%0A%0A%20%20%23%20Handle%20signals%20while%20we%20sleep%0A%20%20trap%20'handle_termination'%20INT%20TERM%0A%20%20%0A%20%20%23%20Exit%20if%20succesful%20and%20not%20configured%20to%20retry%0A%20%20%5B%20%22%24e%22%20-eq%200%20%5D%20%7C%7C%20%5B%20-z%20%22%24RETRY%22%20%5D%20%26%26%20exit%20%22%24e%22%0A%20%20%0A%20%20echo%20%22configure-ovs%20failed%2C%20will%20retry%20after%20%24RETRY%22%0A%20%20%23%20flag%20that%20a%20retry%20has%20happened%0A%20%20touch%20%2Ftmp%2Fconfigure-ovs-retry%0A%20%20sleep%20%22%24RETRY%22%0A%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/configure-ovs.sh" + }, + { + "contents": { + "source": "data:,%23%20This%20file%20is%20generated%20by%20the%20Machine%20Config%20Operator's%20containerruntimeconfig%20controller.%0A%23%0A%23%20storage.conf%20is%20the%20configuration%20file%20for%20all%20tools%0A%23%20that%20share%20the%20containers%2Fstorage%20libraries%0A%23%20See%20man%205%20containers-storage.conf%20for%20more%20information%0A%23%20The%20%22container%20storage%22%20table%20contains%20all%20of%20the%20server%20options.%0A%5Bstorage%5D%0A%0A%23%20Default%20storage%20driver%2C%20must%20be%20set%20for%20proper%20operation.%0Adriver%20%3D%20%22overlay%22%0A%0A%23%20Temporary%20storage%20location%0Arunroot%20%3D%20%22%2Frun%2Fcontainers%2Fstorage%22%0A%0A%23%20Primary%20Read%2FWrite%20location%20of%20container%20storage%0Agraphroot%20%3D%20%22%2Fvar%2Flib%2Fcontainers%2Fstorage%22%0A%0A%5Bstorage.options%5D%0A%23%20Storage%20options%20to%20be%20passed%20to%20underlying%20storage%20drivers%0A%0A%23%20AdditionalImageStores%20is%20used%20to%20pass%20paths%20to%20additional%20Read%2FOnly%20image%20stores%0A%23%20Must%20be%20comma%20separated%20list.%0Aadditionalimagestores%20%3D%20%5B%0A%5D%0A%0A%23%20Remap-UIDs%2FGIDs%20is%20the%20mapping%20from%20UIDs%2FGIDs%20as%20they%20should%20appear%20inside%20of%0A%23%20a%20container%2C%20to%20UIDs%2FGIDs%20as%20they%20should%20appear%20outside%20of%20the%20container%2C%20and%0A%23%20the%20length%20of%20the%20range%20of%20UIDs%2FGIDs.%20%20Additional%20mapped%20sets%20can%20be%20listed%0A%23%20and%20will%20be%20heeded%20by%20libraries%2C%20but%20there%20are%20limits%20to%20the%20number%20of%0A%23%20mappings%20which%20the%20kernel%20will%20allow%20when%20you%20later%20attempt%20to%20run%20a%0A%23%20container.%0A%23%0A%23%20remap-uids%20%3D%200%3A1668442479%3A65536%0A%23%20remap-gids%20%3D%200%3A1668442479%3A65536%0A%0A%23%20Remap-User%2FGroup%20is%20a%20name%20which%20can%20be%20used%20to%20look%20up%20one%20or%20more%20UID%2FGID%0A%23%20ranges%20in%20the%20%2Fetc%2Fsubuid%20or%20%2Fetc%2Fsubgid%20file.%20%20Mappings%20are%20set%20up%20starting%0A%23%20with%20an%20in-container%20ID%20of%200%20and%20the%20a%20host-level%20ID%20taken%20from%20the%20lowest%0A%23%20range%20that%20matches%20the%20specified%20name%2C%20and%20using%20the%20length%20of%20that%20range.%0A%23%20Additional%20ranges%20are%20then%20assigned%2C%20using%20the%20ranges%20which%20specify%20the%0A%23%20lowest%20host-level%20IDs%20first%2C%20to%20the%20lowest%20not-yet-mapped%20container-level%20ID%2C%0A%23%20until%20all%20of%20the%20entries%20have%20been%20used%20for%20maps.%20This%20setting%20overrides%20the%0A%23%20Remap-UIDs%2FGIDs%20setting.%0A%23%0A%23%20remap-user%20%3D%20%22storage%22%0A%23%20remap-group%20%3D%20%22storage%22%0A%0A%5Bstorage.options.pull_options%5D%0A%23%20Options%20controlling%20how%20storage%20is%20populated%20when%20pulling%20images.%0A%0A%23%20Enable%20the%20%22zstd%3Achunked%22%20feature%2C%20which%20allows%20partial%20pulls%2C%20reusing%0A%23%20content%20that%20already%20exists%20on%20the%20system.%20This%20is%20disabled%20by%20default%2C%0A%23%20and%20must%20be%20explicitly%20enabled%20to%20be%20used.%20For%20more%20on%20zstd%3Achunked%2C%20see%0A%23%20https%3A%2F%2Fgithub.com%2Fcontainers%2Fstorage%2Fblob%2Fmain%2Fdocs%2Fcontainers-storage-zstd-chunked.md%0Aenable_partial_images%20%3D%20%22false%22%0A%0A%23%20Tells%20containers%2Fstorage%20to%20use%20hard%20links%20rather%20then%20create%20new%20files%20in%0A%23%20the%20image%2C%20if%20an%20identical%20file%20already%20existed%20in%20storage.%0Ause_hard_links%20%3D%20%22false%22%0A%0A%23%20Path%20to%20an%20ostree%20repository%20that%20might%20have%0A%23%20previously%20pulled%20content%20which%20can%20be%20used%20when%20attempting%20to%20avoid%0A%23%20pulling%20content%20from%20the%20container%20registry.%0Aostree_repos%20%3D%20%22%22%0A%0A%5Bstorage.options.overlay%5D%0A%23%20Storage%20Options%20for%20overlay%0A%0A%23%20Do%20not%20create%20a%20PRIVATE%20bind%20mount%20on%20the%20home%20directory.%0Askip_mount_home%20%3D%20%22true%22%0A%0A%23%20Size%20is%20used%20to%20set%20a%20maximum%20size%20of%20the%20container%20image.%20%20Only%20supported%20by%0A%23%20certain%20container%20storage%20drivers.%0Asize%20%3D%20%22%22%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/storage.conf" + }, + { + "contents": { + "source": "data:,Initial%20Creation%0A" + }, + "mode": 384, + "overwrite": true, + "path": "/etc/docker/certs.d/.create" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20kubelet.config.k8s.io%2Fv1%0Akind%3A%20CredentialProviderConfig%0Aproviders%3A%0A%20%20-%20name%3A%20acr-credential-provider%0A%20%20%20%20apiVersion%3A%20credentialprovider.kubelet.k8s.io%2Fv1%0A%20%20%20%20defaultCacheDuration%3A%20%2210m%22%0A%20%20%20%20matchImages%3A%0A%20%20%20%20%20%20-%20%22*.azurecr.io%22%0A%20%20%20%20%20%20-%20%22*.azurecr.cn%22%0A%20%20%20%20%20%20-%20%22*.azurecr.de%22%0A%20%20%20%20%20%20-%20%22*.azurecr.us%22%0A%20%20%20%20args%3A%0A%20%20%20%20%20%20-%20%2Fetc%2Fkubernetes%2Fcloud.conf%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/credential-providers/acr-credential-provider.yaml" + }, + { + "contents": { + "source": "data:,%23%20Proxy%20environment%20variables%20will%20be%20populated%20in%20this%20file.%20Properly%0A%23%20url%20encoded%20passwords%20with%20special%20characters%20will%20use%20'%25%3CHEX%3E%3CHEX%3E'.%0A%23%20Systemd%20requires%20that%20any%20%25%20used%20in%20a%20password%20be%20represented%20as%0A%23%20%25%25%20in%20a%20unit%20file%20since%20%25%20is%20a%20prefix%20for%20macros%3B%20this%20restriction%20does%20not%0A%23%20apply%20for%20environment%20files.%20Templates%20that%20need%20the%20proxy%20set%20should%20use%0A%23%20'EnvironmentFile%3D%2Fetc%2Fmco%2Fproxy.env'.%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/mco/proxy.env" + }, + { + "contents": { + "source": "data:,%5BManager%5D%0ADefaultEnvironment%3DGODEBUG%3Dx509ignoreCN%3D0%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system.conf.d/10-default-env-godebug.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Workaround%3A%0A%23%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FOCPBUGS-38779%0A%0Adriver%3D%24(nmcli%20-t%20-m%20tabular%20-f%20general.driver%20dev%20show%20%22%24%7BDEVICE_IFACE%7D%22)%0A%0Aif%20%5B%5B%20%22%242%22%20%3D%3D%20%22up%22%20%26%26%20%22%24%7Bdriver%7D%22%20%3D%3D%20%22idpf%22%20%5D%5D%3B%20then%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-checksumming%20off%0Afi" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-gcp-disable-idpf-tx-checksum-off" + }, + { + "contents": { + "source": "data:,%23%20Force-load%20legacy%20iptables%20so%20it%20is%20usable%20from%20pod%20network%20namespaces%0Aip_tables%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/modules-load.d/iptables.conf" + }, + { + "contents": { + "source": "data:,NODE_SIZING_ENABLED%3Dfalse%0ASYSTEM_RESERVED_MEMORY%3D1Gi%0ASYSTEM_RESERVED_CPU%3D500m%0ASYSTEM_RESERVED_ES%3D1Gi" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/node-sizing-enabled.env" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-e%0ANODE_SIZES_ENV%3D%24%7BNODE_SIZES_ENV%3A-%2Fetc%2Fnode-sizing.env%7D%0AVERSION_1%3D1%0AVERSION_2%3D2%0ANODE_AUTO_SIZING_VERSION%3D%24%7BNODE_AUTO_SIZING_VERSION%3A-%24VERSION_2%7D%0ANODE_AUTO_SIZING_VERSION_FILE%3D%24%7BNODE_AUTO_SIZING_VERSION_FILE%3A-%2Fetc%2Fnode-sizing-version.json%7D%0Afunction%20dynamic_memory_sizing%20%7B%0A%20%20%20%20total_memory%3D%24(free%20-g%7Cawk%20'%2F%5EMem%3A%2F%7Bprint%20%242%7D')%0A%20%20%20%20%23%20total_memory%3D8%20test%20the%20recommended%20values%20by%20modifying%20this%20value%0A%20%20%20%20recommended_systemreserved_memory%3D0%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%204))%3B%20then%20%23%2025%25%20of%20the%20first%204GB%20of%20memory%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24total_memory%200.25%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D1%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-4))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%204))%3B%20then%20%23%2020%25%20of%20the%20next%204GB%20of%20memory%20(up%20to%208GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.20%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%200.80%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-4))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%208))%3B%20then%20%23%2010%25%20of%20the%20next%208GB%20of%20memory%20(up%20to%2016GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.10%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%200.80%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-8))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%20112))%3B%20then%20%23%206%25%20of%20the%20next%20112GB%20of%20memory%20(up%20to%20128GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.06%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%206.72%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-112))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3E%3D%200))%3B%20then%20%23%202%25%20of%20any%20memory%20above%20128GB%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.02%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20fi%0A%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%7C%20awk%20'%7Bprintf(%22%25d%5Cn%22%2C%241%20%2B%200.5)%7D')%20%23%20Round%20off%20so%20we%20avoid%20float%20conversions%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_MEMORY%3D%24%7Brecommended_systemreserved_memory%7DGi%22%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_cpu_sizing%20%7B%0A%20%20%20%20total_cpu%3D%24(getconf%20_NPROCESSORS_ONLN)%0A%20%20%20%20if%20%5B%20%22%241%22%20-eq%20%22%24VERSION_1%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D0%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%201))%3B%20then%20%23%206%25%20of%20the%20first%20core%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24total_cpu%200.06%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D0.06%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-1))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%201))%3B%20then%20%23%201%25%20of%20the%20next%20core%20(up%20to%202%20cores)%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-1))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%202))%3B%20then%20%23%200.5%25%20of%20the%20next%202%20cores%20(up%20to%204%20cores)%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.005%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-2))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3E%3D%200))%3B%20then%20%23%200.25%25%20of%20any%20cores%20above%204%20cores%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.0025%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20%23%20Base%20allocation%20for%201%20CPU%20in%20fractions%20of%20a%20core%20(60%20millicores%20%3D%200.06%20CPU%20core)%0A%20%20%20%20%20%20%20%20base_allocation_fraction%3D0.06%0A%20%20%20%20%20%20%20%20%23%20Increment%20per%20additional%20CPU%20in%20fractions%20of%20a%20core%20(12%20millicores%20%3D%200.012%20CPU%20core)%0A%20%20%20%20%20%20%20%20increment_per_cpu_fraction%3D0.012%0A%20%20%20%20%20%20%20%20if%20((total_cpu%20%3E%201))%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20Calculate%20the%20total%20system-reserved%20CPU%20in%20fractions%2C%20starting%20with%20the%20base%20allocation%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20and%20adding%20the%20incremental%20fraction%20for%20each%20additional%20CPU%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(awk%20-v%20base%3D%22%24base_allocation_fraction%22%20-v%20increment%3D%22%24increment_per_cpu_fraction%22%20-v%20cpus%3D%22%24total_cpu%22%20'BEGIN%20%7Bprintf%20%22%25.2f%5Cn%22%2C%20base%20%2B%20increment%20*%20(cpus%20-%201)%7D')%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20For%20a%20single%20CPU%2C%20use%20the%20base%20allocation%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24base_allocation_fraction%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Enforce%20minimum%20threshold%20of%200.5%20CPU%0A%20%20%20%20recommended_systemreserved_cpu%3D%24(awk%20-v%20val%3D%22%24recommended_systemreserved_cpu%22%20'BEGIN%20%7Bif%20(val%20%3C%200.5)%20print%200.5%3B%20else%20print%20val%7D')%0A%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_CPU%3D%24%7Brecommended_systemreserved_cpu%7D%22%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_ephemeral_sizing%20%7B%0A%20%20%20%20echo%20%22Not%20implemented%20yet%22%0A%7D%0Afunction%20dynamic_pid_sizing%20%7B%0A%20%20%20%20echo%20%22Not%20implemented%20yet%22%0A%7D%0Afunction%20set_memory%20%7B%0A%20%20%20%20SYSTEM_RESERVED_MEMORY%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_MEMORY%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_MEMORY%3D%221Gi%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_MEMORY%3D%24%7BSYSTEM_RESERVED_MEMORY%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20set_cpu%20%7B%0A%20%20%20%20SYSTEM_RESERVED_CPU%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_CPU%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_CPU%3D%22500m%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_CPU%3D%24%7BSYSTEM_RESERVED_CPU%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20set_es%20%7B%0A%20%20%20%20SYSTEM_RESERVED_ES%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_ES%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_ES%3D%221Gi%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_ES%3D%24%7BSYSTEM_RESERVED_ES%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_node_sizing%20%7B%0A%20%20%20%20rm%20-f%20%24%7BNODE_SIZES_ENV%7D%0A%20%20%20%20dynamic_memory_sizing%0A%20%20%20%20dynamic_cpu_sizing%20%241%0A%20%20%20%20set_es%20%242%0A%20%20%20%20%23dynamic_ephemeral_sizing%0A%20%20%20%20%23dynamic_pid_sizing%0A%7D%0Afunction%20static_node_sizing%20%7B%0A%20%20%20%20rm%20-f%20%24%7BNODE_SIZES_ENV%7D%0A%20%20%20%20set_memory%20%241%0A%20%20%20%20set_cpu%20%242%0A%20%20%20%20set_es%20%243%0A%7D%0Afunction%20create_version_file%20%7B%0A%20%20%20%20echo%20%22%7B%5C%22version%5C%22%3A%20%241%7D%22%20%3E%20%242%0A%7D%0Aif%20!%20%5B%20-f%20%24NODE_AUTO_SIZING_VERSION_FILE%20%5D%3B%20then%0A%20%20%20%20create_version_file%20%24NODE_AUTO_SIZING_VERSION%20%24NODE_AUTO_SIZING_VERSION_FILE%0Afi%0Anew_version%3D%24(jq%20.version%20%24NODE_AUTO_SIZING_VERSION_FILE)%0Aif%20%5B%20%241%20%3D%3D%20%22true%22%20%5D%3B%20then%0A%20%20%20%20dynamic_node_sizing%20%24new_version%20%244%0Aelif%20%5B%20%241%20%3D%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20static_node_sizing%20%242%20%243%20%244%0Aelse%0A%20%20%20%20echo%20%22Unrecognized%20command%20line%20option.%20Valid%20options%20are%20%5C%22true%5C%22%20or%20%5C%22false%5C%22%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/sbin/dynamic-system-reserved-calc.sh" + }, + { + "contents": { + "source": "data:,%23%20Turning%20on%20Accounting%20helps%20track%20down%20performance%20issues.%0A%5BManager%5D%0ADefaultCPUAccounting%3Dyes%0ADefaultMemoryAccounting%3Dyes%0ADefaultBlockIOAccounting%3Dyes%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system.conf.d/kubelet-cgroups.conf" + }, + { + "contents": { + "source": "data:,%5BService%5D%0AEnvironment%3D%22KUBELET_LOG_LEVEL%3D2%22%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system/kubelet.service.d/20-logging.conf" + }, + { + "contents": { + "source": "data:,%23%20ignore%20known%20SDN-managed%20devices%0A%5Bdevice%5D%0Amatch-device%3Dinterface-name%3Abr-int%3Binterface-name%3Abr-local%3Binterface-name%3Abr-nexthop%3Binterface-name%3Aovn-k8s-*%3Binterface-name%3Ak8s-*%3Binterface-name%3Atun0%3Binterface-name%3Abr0%3Binterface-name%3Apatch-br-*%3Binterface-name%3Abr-ext%3Binterface-name%3Aext-vxlan%3Binterface-name%3Aext%3Binterface-name%3Aint%3Binterface-name%3Avxlan_sys_*%3Binterface-name%3Agenev_sys_*%3Bdriver%3Aveth%0Amanaged%3D0%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/sdn.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-eux%0A%0A%23%20Clean%20up%20old%20config%20on%20behalf%20of%20mtu-migration%0Aif%20!%20systemctl%20-q%20is-enabled%20mtu-migration%3B%20then%0A%20%20echo%20%22Cleaning%20up%20left%20over%20mtu%20migration%20configuration%22%0A%20%20rm%20-rf%20%2Fetc%2Fcno%2Fmtu-migration%0Afi%0A%0Aif%20%5B%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20echo%20%22Configuration%20already%20applied%2C%20exiting%22%0A%20%20exit%200%0Afi%0A%0Asrc_path%3D%22%2Fetc%2Fnmstate%2Fopenshift%22%0Adst_path%3D%22%2Fetc%2Fnmstate%22%0Ahostname%3D%24(hostname%20-s)%0Ahost_file%3D%22%24%7Bhostname%7D.yml%22%0Acluster_file%3D%22cluster.yml%22%0Aconfig_file%3D%22%22%0Aif%20%5B%20-s%20%22%24src_path%2F%24host_file%22%20%5D%3B%20then%0A%20%20config_file%3D%24host_file%0Aelif%20%5B%20-s%20%22%24src_path%2F%24cluster_file%22%20%5D%3B%20then%0A%20%20config_file%3D%24cluster_file%0Aelse%0A%20%20echo%20%22No%20configuration%20found%20at%20%24src_path%2F%24host_file%20or%20%24src_path%2F%24cluster_file%22%0A%20%20exit%200%0Afi%0A%0Aif%20%5B%20-e%20%22%24dst_path%2F%24config_file%22%20%5D%3B%20then%0A%20%20echo%20%22ERROR%3A%20File%20%24dst_path%2F%24config_file%20exists.%20Refusing%20to%20overwrite.%22%0A%20%20exit%201%0Afi%0A%0A%23%20Handle%20the%20case%20where%20we're%20migrating%20from%20configure-ovs%0Aovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br-ex%0A%0A%23%20Handle%20the%20case%20where%20we're%20migrating%20from%20OpenShift%20SDN%0Aovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br0%0A%0Acp%20%22%24src_path%2F%24config_file%22%20%2Fetc%2Fnmstate%0Atouch%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/nmstate-configuration.sh" + }, + { + "contents": { + "source": "data:,%5Bservice%5D%0Akeep_state_file_after_apply%20%3D%20true%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/nmstate/nmstate.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Set%20interface%20ofport_request%20to%20guarantee%20stable%20ofport%20numbers.%20This%20is%20important%20for%20flow%20matches.%0A%23%20Otherwise%2C%20another%20ofport%20number%20is%20assigned%20to%20the%20interface%20on%20every%20restart%20of%20NetworkManager.%0A%23%20This%20script%20will%20build%20an%20associative%20array%20INTERFACE_NAME-%3Eofport_request%20and%20will%20save%20it%20to%20file%20CONFIGURATION_FILE.%0A%23%20When%20an%20interface%20is%20brought%20up%2C%20this%20will%20reuse%20the%20value%20from%20the%20associative%20array%20if%20such%20a%20value%20exists.%0A%23%20Otherwise%2C%20this%20will%20try%20to%20use%20the%20current%20ofport%20value.%20If%20the%20ofport%20value%20is%20already%20reserved%2C%20then%0A%23%20this%20uses%20the%20lowest%20available%20numerical%20value%2C%20instead.%0Aset%20-eux%20-o%20pipefail%0Aif%20%5B%5B%20%22OVNKubernetes%22%20!%3D%20%22OVNKubernetes%22%20%5D%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0AINTERFACE_NAME%3D%241%0AOPERATION%3D%242%0A%0A%23%20Only%20execute%20this%20on%20pre-up%0Aif%20%5B%20%22%24%7BOPERATION%7D%22%20!%3D%20%22pre-up%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20NM%20uuid%0AINTERFACE_CONNECTION_UUID%3D%24(nmcli%20-t%20-f%20device%2Ctype%2Cuuid%20conn%20%7C%20awk%20-F%20'%3A'%20'%7Bif(%241%3D%3D%22'%24%7BINTERFACE_NAME%7D'%22%20%26%26%20%242!~%2F%5Eovs*%2F)%20print%20%24NF%7D')%0Aif%20%5B%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20slave-type.%20If%20this%20is%20not%20an%20ovs-port%2C%20then%20exit%0AINTERFACE_OVS_SLAVE_TYPE%3D%24(nmcli%20-t%20-f%20connection.slave-type%20conn%20show%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BINTERFACE_OVS_SLAVE_TYPE%7D%22%20!%3D%20%22ovs-port%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20This%20is%20not%20necessarily%20a%20UUID%20(can%20be%20a%20name%20in%20case%20of%20bonds)%20but%20this%20should%20be%20unique%0APORT%3D%24(nmcli%20-t%20-f%20connection.master%20conn%20show%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20NM%20uuid%0APORT_CONNECTION_UUID%3D%24(nmcli%20-t%20-f%20device%2Ctype%2Cuuid%20conn%20%7C%20awk%20-F%20'%3A'%20'%7Bif(%20(%241%3D%3D%22'%24%7BPORT%7D'%22%20%7C%7C%20%243%3D%3D%22'%24%7BPORT%7D'%22)%20%26%26%20%242~%2F%5Eovs*%2F)%20print%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20port's%20slave-type.%20If%20this%20is%20not%20an%20ovs-bridge%2C%20then%20exit%0APORT_OVS_SLAVE_TYPE%3D%24(nmcli%20-t%20-f%20connection.slave-type%20conn%20show%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT_OVS_SLAVE_TYPE%7D%22%20!%3D%20%22ovs-bridge%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20port's%20master.%20If%20it%20doesn't%20have%20any%2C%20assume%20it's%20not%20our%20bridge%0ABRIDGE_ID%3D%24(nmcli%20-t%20-f%20connection.master%20conn%20show%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BBRIDGE_ID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20bridge%20name%0ABRIDGE_NAME%3D%24(nmcli%20-t%20-f%20connection.interface-name%20conn%20show%20%22%24%7BBRIDGE_ID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0A%23%20Limit%20this%20to%20br-ex%20and%20br-ex1%20only.%20If%20one%20wanted%20to%20enable%20this%20for%20all%20OVS%20bridges%2C%0A%23%20the%20condition%20would%20be%3A%20if%20%5B%20%22%24BRIDGE_NAME%22%20%3D%3D%20%22%22%20%5D%3B%20then%0Aif%20%5B%20%22%24%7BBRIDGE_NAME%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%20%5B%20%22%24%7BBRIDGE_NAME%7D%22%20!%3D%20%22br-ex1%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Make%20sure%20that%20the%20interface%20is%20plugged%20into%20OVS%0A%23%20This%20should%20always%20be%20the%20case%20given%20that%20we%20are%20in%20pre-up%2C%20but%20exit%20gracefully%20in%20the%20odd%20case%20that%20it's%20not%0Aif%20!%20ovs-vsctl%20list%20interface%20%22%24%7BINTERFACE_NAME%7D%22%20%3E%2Fdev%2Fnull%202%3E%261%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0ACONFIGURATION_FILE%3D%22%2Frun%2Fofport_requests.%24%7BBRIDGE_NAME%7D%22%0A%0A%23%20Declare%20a%20new%20associative%20array.%20If%20CONFIGURATION_FILE%20exists%2C%20source%20entries%20from%20there%0Adeclare%20-A%20INTERFACES%0Aif%20%5B%20-f%20%22%24%7BCONFIGURATION_FILE%7D%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22Sourcing%20configuration%20file%20'%24%7BCONFIGURATION_FILE%7D'%20with%20contents%3A%22%0A%20%20%20%20cat%20%22%24%7BCONFIGURATION_FILE%7D%22%0A%20%20%20%20source%20%22%24%7BCONFIGURATION_FILE%7D%22%0Afi%0A%0A%23%20get_interface_ofport_request%20will%20return%0A%23%20*%20either%3A%20the%20current%20ofport%20assignment%20for%20the%20port%20if%20no%20interface%20has%20claimed%20this%20ofport%20number%2C%20yet%0A%23%20*%20or%3A%20%20%20%20%20the%20lowest%20available%20free%20ofport%20number%0Afunction%20get_interface_ofport_request()%20%7B%0A%20%20%20%20%23%20Build%20an%20array%20that%20only%20contains%20the%20currently%20reserved%20ofport_requests%0A%20%20%20%20declare%20-A%20ofport_requests%0A%20%20%20%20for%20interface_name%20in%20%22%24%7B!INTERFACES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20ofport_requests%5B%24%7BINTERFACES%5B%24interface_name%5D%7D%5D%3D%24%7BINTERFACES%5B%24interface_name%5D%7D%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20Get%20the%20current%20ofport%20number%20assignment%0A%20%20%20%20local%20current_ofport%3D%24(ovs-vsctl%20get%20Interface%20%22%24%7BINTERFACE_NAME%7D%22%20ofport)%0A%0A%20%20%20%20%23%20If%20the%20current%20ofport%20number%20is%20still%20free%2C%20use%20it%0A%20%20%20%20if%20!%20%5B%20%22%24%7Bofport_requests%5B%24current_ofport%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%24current_ofport%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20If%20the%20current%20ofport%20number%20is%20not%20free%2C%20return%20the%20lowest%20free%20entry%0A%20%20%20%20i%3D0%0A%20%20%20%20for%20i%20in%20%7B1..65000%7D%3B%20do%0A%20%20%20%20%20%20%20%20if%20!%20%5B%20%22%24%7Bofport_requests%5B%24i%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%24i%0A%20%20%20%20%20%20%20%20%20%20%20%20return%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20if%20we%20still%20cannot%20find%20an%20ID%2C%20exit%20with%20an%20error%0A%20%20%20%20echo%20%22Impossible%20to%20find%20an%20ofport%20ID%20for%20interface%20%24%7BINTERFACE_NAME%7D%22%20%3E%262%0A%20%20%20%20exit%201%0A%7D%0A%0A%23%20If%20INTERFACES%5BINTERFACE_NAME%5D%20exists%2C%20use%20that%20value%0A%23%20If%20INTERFACES%5BINTERFACE_NAME%5D%20does%20not%20exists%2C%20use%20the%20value%20from%20get_interface_ofport_request%0Aif%20!%20%5B%20%22%24%7BINTERFACES%5B%24INTERFACE_NAME%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20INTERFACES%5B%24INTERFACE_NAME%5D%3D%24(get_interface_ofport_request)%0Afi%0A%23%20Set%20ofport_request%20according%20to%20INTERFACES%5BINTERFACE_NAME%5D%0Aovs-vsctl%20set%20Interface%20%22%24%7BINTERFACE_NAME%7D%22%20ofport_request%3D%24%7BINTERFACES%5B%24INTERFACE_NAME%5D%7D%0A%0A%23%20Save%20current%20state%20of%20INTERFACES%20to%20CONFIGURATION_FILE%0Adeclare%20-p%20INTERFACES%20%3E%7C%20%22%24%7BCONFIGURATION_FILE%7D%22%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/pre-up.d/10-ofport-request.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20Extract%20Podman%20version%20and%20determine%20the%20signature%20policy%0A%2Fusr%2Fbin%2Fpodman%20-v%20%7C%20%2Fbin%2Fawk%20'%7B%0A%20%20%20%20split(%243%2C%20version%2C%20%22-%22)%3B%0A%20%20%20%20clean_version%20%3D%20version%5B1%5D%3B%0A%0A%20%20%20%20split(clean_version%2C%20current%2C%20%2F%5C.%2F)%3B%0A%20%20%20%20split(%224.4.1%22%2C%20target%2C%20%2F%5C.%2F)%3B%0A%0A%20%20%20%20for%20(i%20%3D%201%3B%20i%20%3C%3D%203%3B%20i%2B%2B)%20%7B%0A%20%20%20%20%20%20%20%20if%20((current%5Bi%5D%20%2B%200)%20%3C%20(target%5Bi%5D%20%2B%200))%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20print%20%22--signature-policy%20%2Fetc%2Fmachine-config-daemon%2Fpolicy-for-old-podman.json%22%3B%0A%20%20%20%20%20%20%20%20%20%20%20%20exit%3B%0A%20%20%20%20%20%20%20%20%7D%20else%20if%20((current%5Bi%5D%20%2B%200)%20%3E%20(target%5Bi%5D%20%2B%200))%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20exit%3B%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%7D%0A%7D'%20%3E%20%2Ftmp%2Fpodman_policy_args%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/etc/machine-config-daemon/generate_podman_policy_args.sh" + }, + { + "contents": { + "source": "data:,%7B%22auths%22%3A%7B%22cloud.openshift.com%22%3A%7B%22auth%22%3A%22b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfNWZlN2RlZTExNTdlNDA2Nzk2NTY3Y2VmMThmNjFjOTM6V0NXOEJLNkxXT1NLMElUSldIMlRXQUhaQ1k4SkJNMUtONVlCUVpNT1Q3UjdQMU9HRk05VTFJQ1VET0FMQkZIWQ%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22quay.io%22%3A%7B%22auth%22%3A%22b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfNWZlN2RlZTExNTdlNDA2Nzk2NTY3Y2VmMThmNjFjOTM6V0NXOEJLNkxXT1NLMElUSldIMlRXQUhaQ1k4SkJNMUtONVlCUVpNT1Q3UjdQMU9HRk05VTFJQ1VET0FMQkZIWQ%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22registry.ci.openshift.org%22%3A%7B%22auth%22%3A%22XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX%22%7D%2C%22registry.connect.redhat.com%22%3A%7B%22auth%22%3A%22fHVoYy1wb29sLWQ3OWIxMzA2LWI4NzItNDBkNy1iZDdmLTU1ZWI2M2EwMjMwOTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTJNelkzWkdaaE5EbGhaV1UwT1RkbU9XVXlNalUyWWpsaE9ESXlNMk0zTWlKOS5oQ0Z3YnhiOExhMDNkSnF4V09NMjFhOFk2MmpYLTRVZDgtR0dWMG1RZlRLc2NBbEpfLThIekZ6WnZSMWN0Uzd1RDlLeEFRbUt2WE11SUx4MDVybmZrQ1c4S3FSbHhBdm5YUk51QTVCZ1FWVjlPRWd5OXpzZTVpMURnXzUwb2N4bHBUcTJYY20yVW00U09VX1hBNThwMXJaa3QtZ19wMXd1X3ItSzFjNzR1dlBlUmJPQXdubzBwZWVyX0tTQUloUklPcjRXbGYtdzhBZTM5UXA5UkQ5ZERta1F2WW1Cek94ZFJlaHhDMm0zdEZUaUc5YTRKT2NsQkVqSnpINlFoLXpZbmhxazZ2LS1yWHdpSXdlSG9PYU90WXhyTW9GN3U4UGtsVXVuVWI2S3RSbHBCVlE1MlB1MGRBVG9qUEpWMHdDdE93NGh0OU1fZi1JNDhHMm15QlAzM3BmME4xTFVmTmhKS0FkZEg0cE9WaVJmOFM1aExPYjVxemFqakhNOGRPYlBnRkN2c0lzR09kdGVaenIwZW5hWFBqOE9sYjlLQlJJTlllUHBMNmZSZGNaeXBkLVNJbzQ2TjJPbHRMLXNuVG5LX1NDMDZfMS1XNnFFdEpmZUptX2N0c2RxWnlnMWw2bVdiQWwtVExyMFl0ZTVldkNpQUpCNTYzZGxKR1ZWY2xCUGgyUkdDUW9TdjVDS3hnM3ZXX21oak42dnVtVjRCRmxLbUdUVTR2TDZ0aGFiS21XQ1puQnlwOS0tcURaMVVMSEhvb0psbG1Ebnc5S1dBRkdldV9oc3hKcDdZUXRVR193SkhZWGthdFRWTFhfMlB3OG5IbXdVSXVBaUdacks5T2xSNzBPdGpCNUdDTzIyd3oyQ0tmeUNELWNIRGFKcm1rX3NwQW9DMnoxQW94MA%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22registry.redhat.io%22%3A%7B%22auth%22%3A%22fHVoYy1wb29sLWQ3OWIxMzA2LWI4NzItNDBkNy1iZDdmLTU1ZWI2M2EwMjMwOTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTJNelkzWkdaaE5EbGhaV1UwT1RkbU9XVXlNalUyWWpsaE9ESXlNMk0zTWlKOS5oQ0Z3YnhiOExhMDNkSnF4V09NMjFhOFk2MmpYLTRVZDgtR0dWMG1RZlRLc2NBbEpfLThIekZ6WnZSMWN0Uzd1RDlLeEFRbUt2WE11SUx4MDVybmZrQ1c4S3FSbHhBdm5YUk51QTVCZ1FWVjlPRWd5OXpzZTVpMURnXzUwb2N4bHBUcTJYY20yVW00U09VX1hBNThwMXJaa3QtZ19wMXd1X3ItSzFjNzR1dlBlUmJPQXdubzBwZWVyX0tTQUloUklPcjRXbGYtdzhBZTM5UXA5UkQ5ZERta1F2WW1Cek94ZFJlaHhDMm0zdEZUaUc5YTRKT2NsQkVqSnpINlFoLXpZbmhxazZ2LS1yWHdpSXdlSG9PYU90WXhyTW9GN3U4UGtsVXVuVWI2S3RSbHBCVlE1MlB1MGRBVG9qUEpWMHdDdE93NGh0OU1fZi1JNDhHMm15QlAzM3BmME4xTFVmTmhKS0FkZEg0cE9WaVJmOFM1aExPYjVxemFqakhNOGRPYlBnRkN2c0lzR09kdGVaenIwZW5hWFBqOE9sYjlLQlJJTlllUHBMNmZSZGNaeXBkLVNJbzQ2TjJPbHRMLXNuVG5LX1NDMDZfMS1XNnFFdEpmZUptX2N0c2RxWnlnMWw2bVdiQWwtVExyMFl0ZTVldkNpQUpCNTYzZGxKR1ZWY2xCUGgyUkdDUW9TdjVDS3hnM3ZXX21oak42dnVtVjRCRmxLbUdUVTR2TDZ0aGFiS21XQ1puQnlwOS0tcURaMVVMSEhvb0psbG1Ebnc5S1dBRkdldV9oc3hKcDdZUXRVR193SkhZWGthdFRWTFhfMlB3OG5IbXdVSXVBaUdacks5T2xSNzBPdGpCNUdDTzIyd3oyQ0tmeUNELWNIRGFKcm1rX3NwQW9DMnoxQW94MA%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%7D%7D%0A" + }, + "mode": 384, + "overwrite": true, + "path": "/var/lib/kubelet/config.json" + }, + { + "contents": { + "source": "data:,%23%20Needed%20by%20the%20OpenShift%20SDN.%20See%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1758552%0Anet.ipv4.conf.all.arp_announce%20%3D%202%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/arp.conf" + }, + { + "contents": { + "source": "data:,%23%20See%3A%20rhbz%231384746%2C%20OCPBUGS-24012%0Anet.ipv4.neigh.default.gc_thresh1%3D8192%0Anet.ipv4.neigh.default.gc_thresh2%3D32768%0Anet.ipv4.neigh.default.gc_thresh3%3D65536%0Anet.ipv6.neigh.default.gc_thresh1%3D8192%0Anet.ipv6.neigh.default.gc_thresh2%3D32768%0Anet.ipv6.neigh.default.gc_thresh3%3D65536%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/gc-thresh.conf" + }, + { + "contents": { + "source": "data:,%0Afs.inotify.max_user_watches%20%3D%2065536%0Afs.inotify.max_user_instances%20%3D%208192%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/inotify.conf" + }, + { + "contents": { + "source": "data:,vm.unprivileged_userfaultfd%20%3D%201" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/enable-userfaultfd.conf" + }, + { + "contents": { + "source": "data:,%23%20Needed%20for%20OpenShift%20Logging%20(ElasticSearch).%20See%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1793714%0Avm.max_map_count%20%3D%20262144%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/vm-max-map.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-euo%20pipefail%0A%0A%23%20First%2C%20we%20need%20to%20wait%20until%20DHCP%20finishes%20and%20the%20node%20has%20a%20non-%60localhost%60%0A%23%20hostname%20before%20%60kubelet.service%60%20starts.%0A%23%20That's%20the%20%60--wait%60%20argument%20as%20used%20by%20%60node-valid-hostname.service%60.%0A%23%0A%23%20Second%2C%20on%20GCP%20specifically%20we%20truncate%20the%20hostname%20if%20it's%20%3E63%20characters.%0A%23%20That's%20%60gcp-hostname.service%60.%0A%0A%23%20Block%20indefinitely%20until%20the%20host%20gets%20a%20non-localhost%20name.%0A%23%20Note%20node-valid-hostname.service%20uses%20systemd%20to%20abort%20if%20this%20takes%20too%20long.%0Await_localhost()%20%7B%0A%20%20%20%20echo%20%22waiting%20for%20non-localhost%20hostname%20to%20be%20assigned%22%0A%20%20%20%20while%20%5B%5B%20%22%24(%3C%20%2Fproc%2Fsys%2Fkernel%2Fhostname)%22%20%3D~%20(localhost%7Clocalhost.localdomain)%20%5D%5D%3B%0A%20%20%20%20do%0A%20%20%20%20%20%20%20%20sleep%201%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20Some%20cloud%20platforms%20may%20assign%20a%20hostname%20with%20a%20trailing%20dot.%0A%20%20%20%20%23%20However%2C%20tools%20like%20%60hostnamectl%60%20(used%20by%20systemd)%20do%20not%20allow%20trailing%20dots%2C%0A%20%20%20%20%23%20so%20we%20strip%20the%20trailing%20dot%20before%20applying%20the%20hostname.%0A%20%20%20%20HOSTNAME%3D%22%24(%3C%2Fproc%2Fsys%2Fkernel%2Fhostname)%22%0A%20%20%20%20CLEAN_HOSTNAME%3D%22%24%7BHOSTNAME%25.%7D%22%20%0A%20%20%20%20echo%20%22node%20identified%20as%20%24CLEAN_HOSTNAME%22%0A%20%20%20%20echo%20%22saving%20hostname%20to%20prevent%20NetworkManager%20from%20ever%20unsetting%20it%22%0A%20%20%20%20hostnamectl%20set-hostname%20--static%20--transient%20%22%24CLEAN_HOSTNAME%22%0A%20%20%20%20exit%200%0A%7D%0A%0Aset_gcp_hostname()%20%7B%0A%20%20%20%20%2Fusr%2Fbin%2Fafterburn%20--provider%20gcp%20--hostname%3D%2Frun%2Fafterburn.hostname%0A%0A%20%20%20%20local%20host_name%3D%24(cat%20%2Frun%2Fafterburn.hostname)%0A%20%20%20%20local%20type_arg%3D%22transient%22%0A%0A%20%20%20%20%23%20%2Fetc%2Fhostname%20is%20used%20for%20static%20hostnames%20and%20is%20authoritative.%0A%20%20%20%20%23%20This%20will%20check%20to%20make%20sure%20that%20the%20static%20hostname%20is%20the%0A%20%20%20%20%23%20less%20than%20or%20equal%20to%2063%20characters%20in%20length.%0A%20%20%20%20if%20%5B%20-f%20%2Fetc%2Fhostname%20%5D%20%26%26%20%5B%20%22%24(cat%20%2Fetc%2Fhostname%20%7C%20wc%20-m)%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20etc_name%3D%22%24(%3C%20%2Fetc%2Fhostname)%22%0A%20%20%20%20%20%20%20%20type_arg%3D%22static%22%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24%7Betc_name%7D%22%20!%3D%20%22%24%7Bhost_name%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22%2Fetc%2Fhostname%20is%20set%20to%20%24%7Betc_name%7D%20but%20does%20not%20match%20%24%7Bhost_name%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22using%20%2Fetc%2Fhostname%20as%20the%20authoritative%20name%22%0A%20%20%20%20%20%20%20%20%20%20%20%20host_name%3D%22%24%7Betc_name%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Only%20mutate%20the%20hostname%20if%20the%20length%20is%20longer%20than%2063%20characters.%20The%0A%20%20%20%20%23%20hostname%20will%20be%20the%20lesser%20of%2063%20characters%20after%20the%20first%20dot%20in%20the%0A%20%20%20%20%23%20FQDN.%20%20This%20algorithm%20is%20only%20known%20to%20work%20in%20GCP%2C%20and%20hence%20is%20only%0A%20%20%20%20%23%20executed%20in%20GCP.%0A%20%20%20%20if%20%5B%20%22%24%7B%23host_name%7D%22%20-gt%2063%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20alt_name%3D%24(printf%20%22%24%7Bhost_name%7D%22%20%7C%20cut%20-f1%20-d'.'%20%7C%20cut%20-c%20-63)%0A%20%20%20%20%20%20%20%20echo%20%22%24%7Bhost_name%7D%20is%20longer%20than%2063%20characters%2C%20using%20truncated%20hostname%22%0A%20%20%20%20%20%20%20%20host_name%3D%22%24%7Balt_name%7D%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22setting%20%24%7Btype_arg%7D%20hostname%20to%20%24%7Bhost_name%7D%22%0A%20%20%20%20%2Fbin%2Fhostnamectl%20%22--%24%7Btype_arg%7D%22%20set-hostname%20%22%24%7Bhost_name%7D%22%0A%20%20%20%20exit%200%0A%7D%0A%0Aset_openstack_hostname()%20%7B%0A%20%20%20%20%23%20Read%20metadata%20written%20by%20afterburn%20service%0A%20%20%20%20.%20%2Frun%2Fmetadata%2Fafterburn%0A%0A%20%20%20%20%23%20node-valid-hostname%20sets%20persistent%20hostname%20from%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%20%20%20%20echo%20%22%24AFTERBURN_OPENSTACK_HOSTNAME%22%20%3E%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%7D%0A%0Aset_powervs_hostname()%20%7B%0A%20%20%20%20%23%20Read%20metadata%20written%20by%20afterburn%20service%0A%20%20%20%20.%20%2Frun%2Fmetadata%2Fafterburn%0A%0A%20%20%20%20%23%20node-valid-hostname%20sets%20persistent%20hostname%20from%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%20%20%20%20echo%20%22%24AFTERBURN_POWERVS_LOCAL_HOSTNAME%22%20%3E%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%7D%0A%0Aarg%3D%24%7B1%7D%3B%20shift%3B%0Acase%20%22%24%7Barg%7D%22%20in%0A%20%20%20%20--wait)%20wait_localhost%3B%3B%0A%20%20%20%20--gcp)%20set_gcp_hostname%3B%3B%0A%20%20%20%20--openstack)%20set_openstack_hostname%3B%3B%0A%20%20%20%20--powervs)%20set_powervs_hostname%3B%3B%0A%20%20%20%20*)%20echo%20%22Unhandled%20arg%20%24arg%22%3B%20exit%201%0Aesac%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/mco-hostname" + }, + { + "contents": { + "source": "data:," + }, + "mode": 493, + "overwrite": true, + "path": "/etc/kubernetes/kubelet-plugins/volume/exec/.dummy" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Workaround%3A%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1941714%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1935539%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1987108%0A%0Adriver%3D%24(nmcli%20-t%20-m%20tabular%20-f%20general.driver%20dev%20show%20%22%24%7BDEVICE_IFACE%7D%22)%0A%0Aif%20%5B%5B%20%22%242%22%20%3D%3D%20%22up%22%20%26%26%20%22%24%7Bdriver%7D%22%20%3D%3D%20%22vmxnet3%22%20%5D%5D%3B%20then%0A%20%20logger%20-s%20%2299-vsphere-disable-tx-udp-tnl%20triggered%20by%20%24%7B2%7D%20on%20device%20%24%7BDEVICE_IFACE%7D.%22%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-udp_tnl-segmentation%20off%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-udp_tnl-csum-segmentation%20off%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-checksum-ip-generic%20off%0Afi%0A" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-vsphere-disable-tx-udp-tnl" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-x%0A%0Aif%20%5B%20!%20-e%20%22%2Fetc%2Fipsec.d%2Fopenshift.conf%22%20%5D%3B%20then%0A%20%20exit%200%0Afi%0A%0A%23%20Modify%20existing%20IPsec%20out%20connection%20entries%20with%20%22auto%3Dstart%22%0A%23%20option%20and%20restart%20ipsec%20systemd%20service.%20This%20helps%20to%0A%23%20establish%20IKE%20SAs%20for%20the%20existing%20IPsec%20connections%20with%0A%23%20peer%20nodes.%20This%20option%20will%20be%20deleted%20from%20connections%0A%23%20once%20ovs-monitor-ipsec%20process%20spinned%20up%20on%20the%20node%20by%0A%23%20ovn-ipsec-host%20pod%2C%20but%20still%20it%20won't%20reestablish%20IKE%20SAs%0A%23%20again%20with%20peer%20nodes%2C%20so%20it%20shouldn't%20be%20a%20problem.%0A%23%20We%20are%20updating%20only%20out%20connections%20with%20%22auto%3Dstart%22%20to%0A%23%20avoid%20cross%20stream%20issue%20with%20Libreswan%205.2.%0A%23%20The%20in%20connections%20use%20default%20auto%3Droute%20parameter.%0Aif%20!%20grep%20-q%20%22auto%3Dstart%22%20%2Fetc%2Fipsec.d%2Fopenshift.conf%3B%20then%0A%20%20sed%20-i%20'%2F%5E.*conn%20ovn.*-out-1%24%2Fa%5C%20%20%20%20auto%3Dstart'%20%2Fetc%2Fipsec.d%2Fopenshift.conf%0Afi%0Achroot%20%2Fproc%2F1%2Froot%20ipsec%20restart%0A%0A%23%20Wait%20for%20upto%2060s%20to%20get%20IPsec%20SAs%20to%20establish%20with%20peer%20nodes.%0Atimeout%3D60%0Aelapsed%3D0%0Adesiredconn%3D%22%22%0Aestablishedsa%3D%22%22%0Awhile%20%5B%5B%20%24elapsed%20-lt%20%24timeout%20%5D%5D%3B%20do%0A%20%20desiredconn%3D%24(grep%20-E%20'%5E%5Cs*conn%5Cs%2B'%20%2Fetc%2Fipsec.d%2Fopenshift.conf%20%7C%20grep%20-v%20'%25default'%20%7C%20awk%20'%7Bprint%20%242%7D'%20%7C%20tr%20'%20'%20'%5Cn'%20%7C%20sort%20%7C%20tr%20'%5Cn'%20'%20')%0A%20%20establishedsa%3D%24(ipsec%20showstates%20%7C%20grep%20ESTABLISHED_CHILD_SA%20%7C%20grep%20-o%20'%22%5B%5E%22%5D*%22'%20%7C%20sed%20's%2F%22%2F%2Fg'%20%7C%20tr%20'%20'%20'%5Cn'%20%7C%20sort%20%7C%20uniq%20%7C%20tr%20'%5Cn'%20'%20')%0A%20%20if%20%5B%20%22%24desiredconn%22%20%3D%3D%20%22%24establishedsa%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22IPsec%20SAs%20are%20established%20for%20desired%20connections%20after%20%24%7Belapsed%7Ds%22%0A%20%20%20%20break%0A%20%20else%0A%20%20%20%20echo%20%22IPsec%20SAs%20are%20not%20established%20yet%2C%20total%20waited%20time%20%24%7Belapsed%7Ds%22%0A%20%20%20%20sleep%202s%0A%20%20fi%0A%20%20elapsed%3D%24((elapsed%20%2B%202))%0Adone%0A%0Aif%20%5B%5B%20%24elapsed%20-ge%20%24timeout%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Timed%20out%20waiting%2C%20some%20connections%20are%20not%20established%2C%20desired%20conns%20%24desiredconn%2C%20established%20conns%20%24establishedsa%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/ipsec-connect-wait.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-eux%0A%0Aif%20%5B%20!%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20%23%20No%20need%20to%20do%20this%20if%20no%20NMState%20configuration%20was%20applied%0A%20%20exit%200%0Afi%0A%0A%23%20This%20logic%20is%20borrowed%20from%20configure-ovs.sh%0A%23%20TODO%3A%20Find%20a%20platform-agnostic%20way%20to%20do%20this.%20It%20won't%20work%20on%20platforms%20where%0A%23%20nodeip-configuration%20is%20not%20used.%0Aip%3D%24(cat%20%2Frun%2Fnodeip-configuration%2Fprimary-ip)%0Aif%20%5B%5B%20%22%24%7Bip%7D%22%20%3D%3D%20%22%22%20%5D%5D%3B%20then%0A%20%20echo%20%22No%20ip%20to%20bind%20was%20found%22%0A%20%20exit%201%0Afi%0Awhile%20%3A%0Ado%0A%20%20random_port%3D%24(shuf%20-i%2050000-60000%20-n%201)%0A%20%20echo%20%22Trying%20to%20bind%20%24%7Bip%7D%20on%20port%20%24%7Brandom_port%7D%22%0A%20%20exit_code%3D%24(timeout%202s%20nc%20-l%20%22%24%7Bip%7D%22%20%24%7Brandom_port%7D%3B%20echo%20%24%3F)%0A%20%20if%20%5B%5B%20exit_code%20-eq%20124%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Address%20bound%20successfully%22%0A%20%20%20%20exit%200%0A%20%20fi%0A%20%20sleep%2010%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/wait-for-primary-ip.sh" + }, + { + "contents": { + "source": "data:,unqualified-search-registries%20%3D%20%5B'registry.access.redhat.com'%2C%20'docker.io'%5D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/registries.conf" + }, + { + "contents": { + "source": "data:,%5Bcrio%5D%0Ainternal_wipe%20%3D%20true%0Ainternal_repair%20%3D%20true%0A%0A%5Bcrio.api%5D%0Astream_address%20%3D%20%22127.0.0.1%22%0Astream_port%20%3D%20%220%22%0A%0A%5Bcrio.runtime%5D%0Aselinux%20%3D%20true%0Aconmon%20%3D%20%22%22%0Aconmon_cgroup%20%3D%20%22pod%22%0Adefault_env%20%3D%20%5B%0A%20%20%20%20%22NSS_SDB_USE_CACHE%3Dno%22%2C%0A%5D%0Alog_level%20%3D%20%22info%22%0Acgroup_manager%20%3D%20%22systemd%22%0Adefault_sysctls%20%3D%20%5B%0A%20%20%20%20%22net.ipv4.ping_group_range%3D0%202147483647%22%2C%0A%5D%0Adefault_runtime%20%3D%20%22crun%22%0Ahooks_dir%20%3D%20%5B%0A%20%20%20%20%22%2Fetc%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%20%20%20%20%22%2Frun%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%20%20%20%20%22%2Fusr%2Fshare%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%5D%0Amanage_ns_lifecycle%20%3D%20true%0Aabsent_mount_sources_to_reject%20%3D%20%5B%0A%20%20%20%20%22%2Fetc%2Fhostname%22%2C%0A%5D%0Adrop_infra_ctr%20%3D%20true%0A%0A%5Bcrio.runtime.runtimes.runc%5D%0Aruntime_root%20%3D%20%22%2Frun%2Frunc%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%20%20%22io.containers.trace-syscall%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.Devices%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.LinkLogs%22%2C%0A%5D%0A%0A%5Bcrio.runtime.runtimes.crun%5D%0Aruntime_root%20%3D%20%22%2Frun%2Fcrun%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%20%20%22io.containers.trace-syscall%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.Devices%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.LinkLogs%22%2C%0A%5D%0A%23%20Based%20on%20https%3A%2F%2Fgithub.com%2Fcontainers%2Fcrun%2Fblob%2F27d7dd3a0%2FREADME.md%3Fplain%3D1%23L48%0Acontainer_min_memory%20%3D%20%22512KiB%22%0Adefault_annotations%20%3D%20%7B%22run.oci.systemd.subgroup%22%20%3D%20%22%22%7D%0A%0A%5Bcrio.runtime.workloads.openshift-builder%5D%0Aactivation_annotation%20%3D%20%22io.openshift.builder%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%22io.kubernetes.cri-o.userns-mode%22%2C%0A%20%20%22io.kubernetes.cri-o.Devices%22%0A%5D%0A%5Bcrio.runtime.workloads.openshift-builder.resources%5D%0A%0A%5Bcrio.image%5D%0Aglobal_auth_file%20%3D%20%22%2Fvar%2Flib%2Fkubelet%2Fconfig.json%22%0Apause_image%20%3D%20%22quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3A041a889dcf9abd95832934bf0870af8d2f4289733294b143d555ce49c77bfec1%22%0Apause_image_auth_file%20%3D%20%22%2Fvar%2Flib%2Fkubelet%2Fconfig.json%22%0Apause_command%20%3D%20%22%2Fusr%2Fbin%2Fpod%22%0Aoci_artifact_mount_support%20%3D%20false%0A%0A%5Bcrio.network%5D%0Anetwork_dir%20%3D%20%22%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F%22%0Aplugin_dirs%20%3D%20%5B%0A%20%20%20%20%22%2Fvar%2Flib%2Fcni%2Fbin%22%2C%0A%5D%0A%0A%5Bcrio.metrics%5D%0Aenable_metrics%20%3D%20true%0Ametrics_host%20%3D%20%22127.0.0.1%22%0Ametrics_port%20%3D%209537%0Ametrics_collectors%20%3D%20%5B%0A%20%20%22operations%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_total%22%0A%20%20%22operations_latency_microseconds_total%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_latency_seconds_total%22%0A%20%20%22operations_latency_microseconds%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_latency_seconds%22%0A%20%20%22operations_errors%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_errors_total%22%0A%20%20%22image_pulls_layer_size%22%2C%0A%20%20%22containers_oom_total%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22containers_oom_count_total%22%0A%20%20%22containers_oom%22%2C%0A%20%20%23%20Drop%20metrics%20with%20excessive%20label%20cardinality.%0A%20%20%23%20%22image_pulls_by_digest%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_bytes_total%22%0A%20%20%23%20%22image_pulls_by_name%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_bytes_total%22%0A%20%20%23%20%22image_pulls_by_name_skipped%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_skipped_bytes_total%22%0A%20%20%23%20%22image_pulls_failures%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_failure_total%22%0A%20%20%23%20%22image_pulls_successes%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_success_total%22%0A%20%20%23%20%22image_layer_reuse%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_layer_reuse_total%22%0A%20%20%22operations_total%22%2C%0A%20%20%22operations_latency_seconds_total%22%2C%0A%20%20%22operations_latency_seconds%22%2C%0A%20%20%22operations_errors_total%22%2C%0A%20%20%22image_pulls_bytes_total%22%2C%0A%20%20%22image_pulls_skipped_bytes_total%22%2C%0A%20%20%22image_pulls_success_total%22%2C%0A%20%20%22image_pulls_failure_total%22%2C%0A%20%20%22image_layer_reuse_total%22%2C%0A%20%20%22containers_oom_count_total%22%2C%0A%20%20%22processes_defunct%22%0A%5D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/crio/crio.conf.d/00-default" + }, + { + "contents": { + "source": "data:,%7B%0A%20%20%20%20%22default%22%3A%20%5B%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22type%22%3A%20%22insecureAcceptAnything%22%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%5D%2C%0A%20%20%20%20%22transports%22%3A%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22docker-daemon%22%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%22%22%3A%20%5B%7B%22type%22%3A%22insecureAcceptAnything%22%7D%5D%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%20%20%7D%0A%7D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/machine-config-daemon/policy-for-old-podman.json" + }, + { + "contents": { + "source": "data:,%7B%0A%20%20%20%20%22default%22%3A%20%5B%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22type%22%3A%20%22insecureAcceptAnything%22%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%5D%2C%0A%20%20%20%20%22transports%22%3A%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22docker-daemon%22%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%22%22%3A%20%5B%7B%22type%22%3A%22insecureAcceptAnything%22%7D%5D%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%20%20%7D%0A%7D" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/policy.json" + }, + { + "contents": { + "source": "data:,%7B%0A%09%22cloud%22%3A%20%22AzurePublicCloud%22%2C%0A%09%22tenantId%22%3A%20%226047c7e9-b2ad-488d-a54e-dc3f6be6a7ee%22%2C%0A%09%22aadClientId%22%3A%20%22%22%2C%0A%09%22aadClientSecret%22%3A%20%22%22%2C%0A%09%22aadClientCertPath%22%3A%20%22%22%2C%0A%09%22aadClientCertPassword%22%3A%20%22%22%2C%0A%09%22useManagedIdentityExtension%22%3A%20true%2C%0A%09%22userAssignedIdentityID%22%3A%20%22%22%2C%0A%09%22subscriptionId%22%3A%20%2272e3a972-58b0-4afc-bd4f-da89b39ccebd%22%2C%0A%09%22resourceGroup%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-rg%22%2C%0A%09%22location%22%3A%20%22centralus%22%2C%0A%09%22vnetName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-vnet%22%2C%0A%09%22vnetResourceGroup%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-rg%22%2C%0A%09%22subnetName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-worker-subnet%22%2C%0A%09%22securityGroupName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-nsg%22%2C%0A%09%22routeTableName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-node-routetable%22%2C%0A%09%22vmType%22%3A%20%22standard%22%2C%0A%09%22loadBalancerSku%22%3A%20%22standard%22%2C%0A%09%22cloudProviderBackoff%22%3A%20true%2C%0A%09%22useInstanceMetadata%22%3A%20true%2C%0A%09%22excludeMasterFromStandardLB%22%3A%20false%2C%0A%09%22cloudProviderBackoffDuration%22%3A%206%2C%0A%09%22putVMSSVMBatchSize%22%3A%200%2C%0A%09%22enableMigrateToIPBasedBackendPoolAPI%22%3A%20false%0A%7D" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/cloud.conf" + }, + { + "contents": { + "source": "data:,authorization%3A%0A%20%20static%3A%0A%20%20%20%20-%20resourceRequest%3A%20false%0A%20%20%20%20%20%20path%3A%20%2Fmetrics%0A%20%20%20%20%20%20verb%3A%20get%0A%20%20%20%20%20%20user%3A%0A%20%20%20%20%20%20%20%20name%3A%20system%3Aserviceaccount%3Aopenshift-monitoring%3Aprometheus-k8s" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/crio-metrics-proxy.cfg" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20v1%0Akind%3A%20Pod%0Ametadata%3A%0A%20%20name%3A%20kube-rbac-proxy-crio%0A%20%20namespace%3A%20openshift-machine-config-operator%0A%20%20annotations%3A%0A%20%20%20%20target.workload.openshift.io%2Fmanagement%3A%20'%7B%22effect%22%3A%20%22PreferredDuringScheduling%22%7D'%0A%20%20%20%20openshift.io%2Frequired-scc%3A%20privileged%0Aspec%3A%0A%20%20volumes%3A%0A%20%20-%20name%3A%20etc-kube%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%22%2Fetc%2Fkubernetes%22%0A%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%22%2Fvar%2Flib%2Fkubelet%22%0A%20%20hostNetwork%3A%20true%0A%20%20priorityClassName%3A%20system-cluster-critical%0A%20%20initContainers%3A%0A%20%20-%20name%3A%20setup%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20image%3A%20quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3Afd03b21139b2921710987d9a617bf49e5d05825d6e9df66768259e7076c6d53a%0A%20%20%20%20imagePullPolicy%3A%20IfNotPresent%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fvar%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20command%3A%20%5B'%2Fbin%2Fbash'%2C%20'-ec'%5D%0A%20%20%20%20args%3A%0A%20%20%20%20-%20%7C%0A%20%20%20%20%20%20echo%20-n%20%22Waiting%20for%20kubelet%20key%20and%20certificate%20to%20be%20available%22%0A%20%20%20%20%20%20while%20%5B%20-n%20%22%24(test%20-e%20%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem)%22%20%5D%20%3B%20do%0A%20%20%20%20%20%20%20%20echo%20-n%20%22.%22%0A%20%20%20%20%20%20%20%20sleep%201%0A%20%20%20%20%20%20%20%20((%20tries%20%2B%3D%201%20))%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Btries%7D%22%20-gt%2010%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20echo%20%22Timed%20out%20waiting%20for%20kubelet%20key%20and%20cert.%22%0A%20%20%20%20%20%20%20%20%20%20exit%201%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20done%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20%20%20%20%20cpu%3A%205m%0A%20%20containers%3A%0A%20%20-%20name%3A%20kube-rbac-proxy-crio%0A%20%20%20%20image%3A%20quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3Afd03b21139b2921710987d9a617bf49e5d05825d6e9df66768259e7076c6d53a%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20ports%3A%0A%20%20%20%20-%20containerPort%3A%209637%0A%20%20%20%20args%3A%0A%20%20%20%20-%20--secure-listen-address%3D%3A9637%0A%20%20%20%20-%20--config-file%3D%2Fetc%2Fkubernetes%2Fcrio-metrics-proxy.cfg%0A%20%20%20%20-%20--client-ca-file%3D%2Fetc%2Fkubernetes%2Fkubelet-ca.crt%0A%20%20%20%20-%20--logtostderr%3Dtrue%0A%20%20%20%20-%20--kubeconfig%3D%2Fvar%2Flib%2Fkubelet%2Fkubeconfig%0A%20%20%20%20-%20--tls-cipher-suites%3DTLS_AES_128_GCM_SHA256%2CTLS_AES_256_GCM_SHA384%2CTLS_CHACHA20_POLY1305_SHA256%2CTLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%2CTLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%2CTLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%2CTLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%2CTLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256%2CTLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256%20%0A%20%20%20%20-%20--tls-min-version%3DVersionTLS12%0A%20%20%20%20-%20--upstream%3Dhttp%3A%2F%2F127.0.0.1%3A9537%0A%20%20%20%20-%20--tls-cert-file%3D%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem%0A%20%20%20%20-%20--tls-private-key-file%3D%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20cpu%3A%2020m%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20name%3A%20etc-kube%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fetc%2Fkubernetes%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20%20%20readOnly%3A%20true%0A%20%20%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fvar%2Flib%2Fkubelet%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20%20%20readOnly%3A%20true" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/manifests/criometricsproxy.yaml" + }, + { + "contents": { + "source": "data:text/plain;charset=utf-8;base64,YXBpVmVyc2lvbjoga3ViZWxldC5jb25maWcuazhzLmlvL3YxYmV0YTEKYXV0aGVudGljYXRpb246CiAgYW5vbnltb3VzOgogICAgZW5hYmxlZDogZmFsc2UKICB3ZWJob29rOgogICAgY2FjaGVUVEw6IDBzCiAgeDUwOToKICAgIGNsaWVudENBRmlsZTogL2V0Yy9rdWJlcm5ldGVzL2t1YmVsZXQtY2EuY3J0CmF1dGhvcml6YXRpb246CiAgd2ViaG9vazoKICAgIGNhY2hlQXV0aG9yaXplZFRUTDogMHMKICAgIGNhY2hlVW5hdXRob3JpemVkVFRMOiAwcwpjZ3JvdXBEcml2ZXI6IHN5c3RlbWQKY2dyb3VwUm9vdDogLwpjbHVzdGVyRE5TOgotIDE3Mi4zMC4wLjEwCmNsdXN0ZXJEb21haW46IGNsdXN0ZXIubG9jYWwKY29udGFpbmVyTG9nTWF4U2l6ZTogNTBNaQpjb250YWluZXJSdW50aW1lRW5kcG9pbnQ6ICIiCmNwdU1hbmFnZXJSZWNvbmNpbGVQZXJpb2Q6IDBzCmNyYXNoTG9vcEJhY2tPZmY6IHt9CmVuYWJsZVN5c3RlbUxvZ1F1ZXJ5OiB0cnVlCmV2aWN0aW9uUHJlc3N1cmVUcmFuc2l0aW9uUGVyaW9kOiAwcwpmZWF0dXJlR2F0ZXM6CiAgQVdTQ2x1c3Rlckhvc3RlZEROUzogZmFsc2UKICBBV1NDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBV1NEZWRpY2F0ZWRIb3N0czogZmFsc2UKICBBV1NTZXJ2aWNlTEJOZXR3b3JrU2VjdXJpdHlHcm91cDogZmFsc2UKICBBZGRpdGlvbmFsUm91dGluZ0NhcGFiaWxpdGllczogdHJ1ZQogIEFkbWluTmV0d29ya1BvbGljeTogdHJ1ZQogIEFsaWJhYmFQbGF0Zm9ybTogdHJ1ZQogIEF1dG9tYXRlZEV0Y2RCYWNrdXA6IGZhbHNlCiAgQXp1cmVDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBenVyZURlZGljYXRlZEhvc3RzOiBmYWxzZQogIEF6dXJlTXVsdGlEaXNrOiBmYWxzZQogIEF6dXJlV29ya2xvYWRJZGVudGl0eTogdHJ1ZQogIEJvb3RJbWFnZVNrZXdFbmZvcmNlbWVudDogZmFsc2UKICBCb290Y05vZGVNYW5hZ2VtZW50OiBmYWxzZQogIEJ1aWxkQ1NJVm9sdW1lczogdHJ1ZQogIENQTVNNYWNoaW5lTmFtZVByZWZpeDogdHJ1ZQogIENsdXN0ZXJBUElJbnN0YWxsOiBmYWxzZQogIENsdXN0ZXJBUElJbnN0YWxsSUJNQ2xvdWQ6IGZhbHNlCiAgQ2x1c3Rlck1vbml0b3JpbmdDb25maWc6IGZhbHNlCiAgQ2x1c3RlclZlcnNpb25PcGVyYXRvckNvbmZpZ3VyYXRpb246IGZhbHNlCiAgQ29uc29sZVBsdWdpbkNvbnRlbnRTZWN1cml0eVBvbGljeTogdHJ1ZQogIEROU05hbWVSZXNvbHZlcjogZmFsc2UKICBEdWFsUmVwbGljYTogZmFsc2UKICBEeWFubWljU2VydmljZUVuZHBvaW50SUJNQ2xvdWQ6IGZhbHNlCiAgRHluYW1pY1Jlc291cmNlQWxsb2NhdGlvbjogZmFsc2UKICBFdGNkQmFja2VuZFF1b3RhOiBmYWxzZQogIEV2ZW50ZWRQTEVHOiBmYWxzZQogIEV4YW1wbGU6IGZhbHNlCiAgRXhhbXBsZTI6IGZhbHNlCiAgRXh0ZXJuYWxPSURDOiBmYWxzZQogIEV4dGVybmFsT0lEQ1dpdGhVSURBbmRFeHRyYUNsYWltTWFwcGluZ3M6IGZhbHNlCiAgRXh0ZXJuYWxTbmFwc2hvdE1ldGFkYXRhOiBmYWxzZQogIEdDUENsdXN0ZXJIb3N0ZWRETlM6IGZhbHNlCiAgR0NQQ2x1c3Rlckhvc3RlZEROU0luc3RhbGw6IGZhbHNlCiAgR0NQQ3VzdG9tQVBJRW5kcG9pbnRzOiBmYWxzZQogIEdDUEN1c3RvbUFQSUVuZHBvaW50c0luc3RhbGw6IGZhbHNlCiAgR2F0ZXdheUFQSTogdHJ1ZQogIEdhdGV3YXlBUElDb250cm9sbGVyOiB0cnVlCiAgSGlnaGx5QXZhaWxhYmxlQXJiaXRlcjogdHJ1ZQogIEltYWdlTW9kZVN0YXR1c1JlcG9ydGluZzogZmFsc2UKICBJbWFnZVN0cmVhbUltcG9ydE1vZGU6IGZhbHNlCiAgSW1hZ2VWb2x1bWU6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJEeW5hbWljQ29uZmlndXJhdGlvbk1hbmFnZXI6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJMQlN1Ym5ldHNBV1M6IHRydWUKICBJbnNpZ2h0c0NvbmZpZzogZmFsc2UKICBJbnNpZ2h0c0NvbmZpZ0FQSTogZmFsc2UKICBJbnNpZ2h0c09uRGVtYW5kRGF0YUdhdGhlcjogZmFsc2UKICBJbnNpZ2h0c1J1bnRpbWVFeHRyYWN0b3I6IGZhbHNlCiAgSXJyZWNvbmNpbGFibGVNYWNoaW5lQ29uZmlnOiBmYWxzZQogIEtNU0VuY3J5cHRpb25Qcm92aWRlcjogZmFsc2UKICBLTVN2MTogdHJ1ZQogIE1hY2hpbmVBUElNaWdyYXRpb246IGZhbHNlCiAgTWFjaGluZUFQSU9wZXJhdG9yRGlzYWJsZU1hY2hpbmVIZWFsdGhDaGVja0NvbnRyb2xsZXI6IGZhbHNlCiAgTWFjaGluZUNvbmZpZ05vZGVzOiB0cnVlCiAgTWFuYWdlZEJvb3RJbWFnZXM6IHRydWUKICBNYW5hZ2VkQm9vdEltYWdlc0FXUzogdHJ1ZQogIE1hbmFnZWRCb290SW1hZ2VzQXp1cmU6IGZhbHNlCiAgTWFuYWdlZEJvb3RJbWFnZXN2U3BoZXJlOiBmYWxzZQogIE1heFVuYXZhaWxhYmxlU3RhdGVmdWxTZXQ6IGZhbHNlCiAgTWV0cmljc0NvbGxlY3Rpb25Qcm9maWxlczogdHJ1ZQogIE1pbmltdW1LdWJlbGV0VmVyc2lvbjogZmFsc2UKICBNaXhlZENQVXNBbGxvY2F0aW9uOiBmYWxzZQogIE11bHRpQXJjaEluc3RhbGxBenVyZTogZmFsc2UKICBNdWx0aURpc2tTZXR1cDogZmFsc2UKICBNdXRhdGluZ0FkbWlzc2lvblBvbGljeTogZmFsc2UKICBOZXR3b3JrRGlhZ25vc3RpY3NDb25maWc6IHRydWUKICBOZXR3b3JrTGl2ZU1pZ3JhdGlvbjogdHJ1ZQogIE5ldHdvcmtTZWdtZW50YXRpb246IHRydWUKICBOZXdPTE06IHRydWUKICBOZXdPTE1DYXRhbG9nZEFQSVYxTWV0YXM6IGZhbHNlCiAgTmV3T0xNT3duU2luZ2xlTmFtZXNwYWNlOiBmYWxzZQogIE5ld09MTVByZWZsaWdodFBlcm1pc3Npb25DaGVja3M6IGZhbHNlCiAgTmV3T0xNV2ViaG9va1Byb3ZpZGVyT3BlbnNoaWZ0U2VydmljZUNBOiBmYWxzZQogIE5vUmVnaXN0cnlDbHVzdGVyT3BlcmF0aW9uczogZmFsc2UKICBOb2RlU3dhcDogZmFsc2UKICBOdXRhbml4TXVsdGlTdWJuZXRzOiBmYWxzZQogIE9WTk9ic2VydmFiaWxpdHk6IGZhbHNlCiAgT3BlblNoaWZ0UG9kU2VjdXJpdHlBZG1pc3Npb246IHRydWUKICBQaW5uZWRJbWFnZXM6IHRydWUKICBQcmVjb25maWd1cmVkVUROQWRkcmVzc2VzOiBmYWxzZQogIFByb2NNb3VudFR5cGU6IHRydWUKICBSb3V0ZUFkdmVydGlzZW1lbnRzOiB0cnVlCiAgUm91dGVFeHRlcm5hbENlcnRpZmljYXRlOiB0cnVlCiAgU0VMaW51eE1vdW50OiBmYWxzZQogIFNlcnZpY2VBY2NvdW50VG9rZW5Ob2RlQmluZGluZzogdHJ1ZQogIFNldEVJUEZvck5MQkluZ3Jlc3NDb250cm9sbGVyOiB0cnVlCiAgU2hvcnRDZXJ0Um90YXRpb246IGZhbHNlCiAgU2lnbmF0dXJlU3RvcmVzOiBmYWxzZQogIFNpZ3N0b3JlSW1hZ2VWZXJpZmljYXRpb246IHRydWUKICBTaWdzdG9yZUltYWdlVmVyaWZpY2F0aW9uUEtJOiBmYWxzZQogIFN0b3JhZ2VQZXJmb3JtYW50U2VjdXJpdHlQb2xpY3k6IGZhbHNlCiAgVHJhbnNsYXRlU3RyZWFtQ2xvc2VXZWJzb2NrZXRSZXF1ZXN0czogZmFsc2UKICBVcGdyYWRlU3RhdHVzOiBmYWxzZQogIFVzZXJOYW1lc3BhY2VzUG9kU2VjdXJpdHlTdGFuZGFyZHM6IHRydWUKICBVc2VyTmFtZXNwYWNlc1N1cHBvcnQ6IHRydWUKICBWU3BoZXJlQ29uZmlndXJhYmxlTWF4QWxsb3dlZEJsb2NrVm9sdW1lc1Blck5vZGU6IGZhbHNlCiAgVlNwaGVyZUhvc3RWTUdyb3VwWm9uYWw6IGZhbHNlCiAgVlNwaGVyZU1peGVkTm9kZUVudjogZmFsc2UKICBWU3BoZXJlTXVsdGlEaXNrOiB0cnVlCiAgVlNwaGVyZU11bHRpTmV0d29ya3M6IHRydWUKICBWb2x1bWVBdHRyaWJ1dGVzQ2xhc3M6IGZhbHNlCiAgVm9sdW1lR3JvdXBTbmFwc2hvdDogZmFsc2UKZmlsZUNoZWNrRnJlcXVlbmN5OiAwcwpodHRwQ2hlY2tGcmVxdWVuY3k6IDBzCmltYWdlTWF4aW11bUdDQWdlOiAwcwppbWFnZU1pbmltdW1HQ0FnZTogMHMKa2luZDogS3ViZWxldENvbmZpZ3VyYXRpb24Ka3ViZUFQSUJ1cnN0OiAxMDAKa3ViZUFQSVFQUzogNTAKbG9nZ2luZzoKICBmbHVzaEZyZXF1ZW5jeTogMAogIG9wdGlvbnM6CiAgICBqc29uOgogICAgICBpbmZvQnVmZmVyU2l6ZTogIjAiCiAgICB0ZXh0OgogICAgICBpbmZvQnVmZmVyU2l6ZTogIjAiCiAgdmVyYm9zaXR5OiAwCm1heFBvZHM6IDI1MAptZW1vcnlTd2FwOiB7fQpub2RlU3RhdHVzUmVwb3J0RnJlcXVlbmN5OiA1bTBzCm5vZGVTdGF0dXNVcGRhdGVGcmVxdWVuY3k6IDEwcwpwb2RQaWRzTGltaXQ6IDQwOTYKcHJvdGVjdEtlcm5lbERlZmF1bHRzOiB0cnVlCnJvdGF0ZUNlcnRpZmljYXRlczogdHJ1ZQpydW50aW1lUmVxdWVzdFRpbWVvdXQ6IDBzCnNlcmlhbGl6ZUltYWdlUHVsbHM6IGZhbHNlCnNlcnZlclRMU0Jvb3RzdHJhcDogdHJ1ZQpzaHV0ZG93bkdyYWNlUGVyaW9kOiAwcwpzaHV0ZG93bkdyYWNlUGVyaW9kQ3JpdGljYWxQb2RzOiAwcwpzdGF0aWNQb2RQYXRoOiAvZXRjL2t1YmVybmV0ZXMvbWFuaWZlc3RzCnN0cmVhbWluZ0Nvbm5lY3Rpb25JZGxlVGltZW91dDogMHMKc3luY0ZyZXF1ZW5jeTogMHMKc3lzdGVtQ2dyb3VwczogL3N5c3RlbS5zbGljZQp0bHNDaXBoZXJTdWl0ZXM6Ci0gVExTX0FFU18xMjhfR0NNX1NIQTI1NgotIFRMU19BRVNfMjU2X0dDTV9TSEEzODQKLSBUTFNfQ0hBQ0hBMjBfUE9MWTEzMDVfU0hBMjU2Ci0gVExTX0VDREhFX0VDRFNBX1dJVEhfQUVTXzEyOF9HQ01fU0hBMjU2Ci0gVExTX0VDREhFX1JTQV9XSVRIX0FFU18xMjhfR0NNX1NIQTI1NgotIFRMU19FQ0RIRV9FQ0RTQV9XSVRIX0FFU18yNTZfR0NNX1NIQTM4NAotIFRMU19FQ0RIRV9SU0FfV0lUSF9BRVNfMjU2X0dDTV9TSEEzODQKLSBUTFNfRUNESEVfRUNEU0FfV0lUSF9DSEFDSEEyMF9QT0xZMTMwNV9TSEEyNTYKLSBUTFNfRUNESEVfUlNBX1dJVEhfQ0hBQ0hBMjBfUE9MWTEzMDVfU0hBMjU2CnRsc01pblZlcnNpb246IFZlcnNpb25UTFMxMgp2b2x1bWVTdGF0c0FnZ1BlcmlvZDogMHMK" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/kubelet.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fsh%0Aif%20%5B%20-x%20%2Fusr%2Fbin%2Fkubensenter%20%5D%3B%20then%0A%20%20exec%20%2Fusr%2Fbin%2Fkubensenter%20%22%24%40%22%0Aelse%0A%20%20exec%20%22%24%40%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/kubenswrapper" + } + ] + }, + "systemd": { + "units": [ + { + "contents": "[Unit]\nDescription=Cleans NetworkManager state generated by dracut\n# Removal of this file signals firstboot completion\nConditionPathExists=!/etc/ignition-machine-config-encapsulated.json\n# This is opt-in for some deployment types, and opt-out for others.\nConditionPathExists=/var/lib/mco/nm-clean-initrd-state\nWants=network-pre.target\nBefore=network-pre.target\n\n[Service]\nType=oneshot\n# Remove any existing state possibly generated NM run by dracut. We want NM to\n# consider all profiles autoconnect priority when it starts instead of\n# remembering which profile was a device activated with when NM is run by\n# dracut.\nExecStart=/usr/local/bin/nm-clean-initrd-state.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "NetworkManager-clean-initrd-state.service" + }, + { + "dropins": [ + { + "contents": "# vim:set ft=systemd :\n#\n# This drop-in will enable any service built with this\n# github.com/containers/kubemntns library to properly join the mount namespace\n# managed by kubens.service\n#\n\n[Unit]\nAfter=kubens.service\n\n[Service]\nEnvironmentFile=-/run/kubens/env\n", + "name": "01-kubens.conf" + }, + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "[Service]\nEnvironment=\"ENABLE_PROFILE_UNIX_SOCKET=true\"\n", + "name": "10-mco-profile-unix-socket.conf" + }, + { + "contents": "[Unit]\nAfter=kubelet-dependencies.target\nRequires=kubelet-dependencies.target\n", + "name": "05-mco-ordering.conf" + }, + { + "contents": "[Service]\nEnvironment=\"GODEBUG=x509ignoreCN=0,madvdontneed=1\"\n", + "name": "10-mco-default-madv.conf" + } + ], + "name": "crio.service" + }, + { + "dropins": [ + { + "contents": "[Unit]\nConditionPathExists=/enoent\n", + "name": "mco-disabled.conf" + } + ], + "name": "docker.socket" + }, + { + "contents": "[Unit]\nDescription=The firstboot OS update has completed\nDocumentation=https://github.com/openshift/machine-config-operator/\nRequires=basic.target\n\n[Install]\nWantedBy=default.target\n", + "enabled": true, + "name": "firstboot-osupdate.target" + }, + { + "dropins": [ + { + "contents": "[Unit]\nAfter=ovs-configuration.service\nBefore=crio.service\n", + "name": "01-after-configure-ovs.conf" + } + ], + "name": "ipsec.service" + }, + { + "contents": "[Unit]\nDescription=Dynamically sets the system reserved for the kubelet\nWants=network-online.target\nAfter=network-online.target firstboot-osupdate.target\nBefore=kubelet-dependencies.target\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\nRemainAfterExit=yes\nEnvironmentFile=/etc/node-sizing-enabled.env\nExecStart=/bin/bash /usr/local/sbin/dynamic-system-reserved-calc.sh ${NODE_SIZING_ENABLED} ${SYSTEM_RESERVED_MEMORY} ${SYSTEM_RESERVED_CPU} ${SYSTEM_RESERVED_ES}\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "kubelet-auto-node-size.service" + }, + { + "contents": "[Unit]\nDescription=Dependencies necessary to run kubelet\nDocumentation=https://github.com/openshift/machine-config-operator/\nRequires=basic.target network-online.target\nWants=NetworkManager-wait-online.service crio-wipe.service\nWants=rpc-statd.service chrony-wait.service\n", + "name": "kubelet-dependencies.target" + }, + { + "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=crio.service kubelet-dependencies.target\nAfter=kubelet-dependencies.target\nAfter=ostree-finalize-staged.service\n\n[Service]\nType=notify\nExecStartPre=/bin/mkdir --parents /etc/kubernetes/manifests\nExecStartPre=-/usr/sbin/restorecon -ri /var/lib/kubelet/pod-resources /usr/local/bin/kubenswrapper /usr/bin/kubensenter\nEnvironment=\"KUBELET_NODE_IP=0.0.0.0\"\nEnvironmentFile=/etc/os-release\nEnvironmentFile=-/etc/kubernetes/kubelet-workaround\nEnvironmentFile=-/etc/kubernetes/kubelet-env\nEnvironmentFile=/etc/node-sizing.env\n\nExecStart=/usr/local/bin/kubenswrapper \\\n /usr/bin/kubelet \\\n --config=/etc/kubernetes/kubelet.conf \\\n --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \\\n --kubeconfig=/var/lib/kubelet/kubeconfig \\\n --container-runtime-endpoint=/var/run/crio/crio.sock \\\n --runtime-cgroups=/system.slice/crio.service \\\n --node-labels=node-role.kubernetes.io/worker,node.openshift.io/os_id=${ID},${CUSTOM_KUBELET_LABELS} \\\n --node-ip=${KUBELET_NODE_IP} \\\n --minimum-container-ttl-duration=6m0s \\\n --volume-plugin-dir=/etc/kubernetes/kubelet-plugins/volume/exec \\\n --cloud-provider=external \\\n --image-credential-provider-bin-dir=/usr/libexec/kubelet-image-credential-provider-plugins --image-credential-provider-config=/etc/kubernetes/credential-providers/acr-credential-provider.yaml \\\n --hostname-override=${KUBELET_NODE_NAME} \\\n --provider-id=${KUBELET_PROVIDERID} \\\n --pod-infra-container-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:041a889dcf9abd95832934bf0870af8d2f4289733294b143d555ce49c77bfec1 \\\n --system-reserved=cpu=${SYSTEM_RESERVED_CPU},memory=${SYSTEM_RESERVED_MEMORY},ephemeral-storage=${SYSTEM_RESERVED_ES} \\\n --v=${KUBELET_LOG_LEVEL}\n\nRestart=always\nRestartSec=10\n\n[Install]\nWantedBy=multi-user.target\n", + "dropins": [ + { + "contents": "# vim:set ft=systemd :\n#\n# This drop-in will enable any service built with this\n# github.com/containers/kubemntns library to properly join the mount namespace\n# managed by kubens.service\n#\n\n[Unit]\nAfter=kubens.service\n\n[Service]\nEnvironmentFile=-/run/kubens/env\n", + "name": "01-kubens.conf" + }, + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "[Service]\nEnvironment=\"GODEBUG=x509ignoreCN=0,madvdontneed=1\"\n", + "name": "10-mco-default-madv.conf" + } + ], + "enabled": true, + "name": "kubelet.service" + }, + { + "contents": "[Unit]\nDescription=Manages a mount namespace for kubernetes-specific mounts\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nRuntimeDirectory=kubens\nEnvironment=RUNTIME_DIRECTORY=%t/kubens\nEnvironment=BIND_POINT=%t/kubens/mnt\nEnvironment=ENVFILE=%t/kubens/env\n\n# Set up the runtime directory as an unbindable mountpoint\nExecStartPre=bash -c \"findmnt ${RUNTIME_DIRECTORY} || mount --make-unbindable --bind ${RUNTIME_DIRECTORY} ${RUNTIME_DIRECTORY}\"\n# Ensure the bind point exists\nExecStartPre=touch ${BIND_POINT}\n# Use 'unshare' to create the new mountpoint, then 'mount --make-rshared' so it cascades internally\nExecStart=unshare --mount=${BIND_POINT} --propagation slave mount --make-rshared /\n# Finally, set an env pointer for ease-of-use\nExecStartPost=bash -c 'echo \"KUBENSMNT=${BIND_POINT}\" \u003e \"${ENVFILE}\"'\n\n# On stop, a recursive unmount cleans up the namespace and bind-mounted unbindable parent directory\nExecStop=umount -R ${RUNTIME_DIRECTORY}\n\n[Install]\nWantedBy=multi-user.target\n", + "enabled": false, + "name": "kubens.service" + }, + { + "contents": "[Unit]\nDescription=Machine Config Daemon Firstboot\n# Make sure it runs only on OSTree booted system\nConditionPathExists=/run/ostree-booted\n# Removal of this file signals firstboot completion\nConditionPathExists=/etc/ignition-machine-config-encapsulated.json\nAfter=machine-config-daemon-pull.service\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\n# Disable existing repos (if any) so that OS extensions would use embedded RPMs only\nExecStartPre=-/usr/bin/sh -c \"sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/*.repo\"\n# Run this via podman because we want to use the nmstatectl binary in our container\nExecStart=/usr/bin/podman run --rm --privileged --net=host -v /:/rootfs --entrypoint machine-config-daemon 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb' firstboot-complete-machineconfig --persist-nics\nExecStart=/usr/bin/podman run --rm --privileged --pid=host --net=host -v /:/rootfs --entrypoint machine-config-daemon 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb' firstboot-complete-machineconfig\n[Install]\nRequiredBy=firstboot-osupdate.target\n", + "enabled": true, + "name": "machine-config-daemon-firstboot.service" + }, + { + "contents": "[Unit]\nDescription=Machine Config Daemon Pull\n# Make sure it runs only on OSTree booted system\nConditionPathExists=/run/ostree-booted\n# This \"stamp file\" is unlinked when we complete\n# machine-config-daemon-firstboot.service\nConditionPathExists=/etc/ignition-machine-config-encapsulated.json\n# Run after crio-wipe so the pulled MCD image is protected against a corrupted storage from a forced shutdown\nWants=crio-wipe.service NetworkManager-wait-online.service\nAfter=crio-wipe.service NetworkManager-wait-online.service network.service\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nExecStartPre=/etc/machine-config-daemon/generate_podman_policy_args.sh\nExecStart=/bin/sh -c \"while ! /usr/bin/podman pull $(cat /tmp/podman_policy_args) --authfile=/var/lib/kubelet/config.json 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb'; do sleep 1; done\"\n\n[Install]\nRequiredBy=machine-config-daemon-firstboot.service\n", + "enabled": true, + "name": "machine-config-daemon-pull.service" + }, + { + "contents": "[Unit]\nDescription=Applies per-node NMState network configuration\nRequires=openvswitch.service\nWants=NetworkManager-wait-online.service\nAfter=NetworkManager-wait-online.service openvswitch.service network.service nodeip-configuration.service\nBefore=nmstate.service kubelet-dependencies.target ovs-configuration.service node-valid-hostname.service\n\n[Service]\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/usr/local/bin/nmstate-configuration.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "nmstate-configuration.service" + }, + { + "contents": "[Unit]\nDescription=Wait for a non-localhost hostname\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nUser=root\nExecStart=/usr/local/bin/mco-hostname --wait\n\n# Wait up to 5min for the node to get a non-localhost name\nTimeoutSec=300\n\n[Install]\n# TODO: Change this to RequiredBy after we fix https://github.com/openshift/machine-config-operator/pull/3865#issuecomment-1746963115\nWantedBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "node-valid-hostname.service" + }, + { + "contents": "[Unit]\nDescription=Writes IP address configuration so that kubelet and crio services select a valid node IP\nWants=NetworkManager-wait-online.service\nAfter=NetworkManager-wait-online.service firstboot-osupdate.target\nBefore=kubelet-dependencies.target ovs-configuration.service\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/bin/bash -c \" \\\n until \\\n /usr/bin/podman run --rm \\\n --authfile /var/lib/kubelet/config.json \\\n --env 'ENABLE_NODEIP_DEBUG=true' \\\n --net=host \\\n --security-opt label=disable \\\n --volume /etc/systemd/system:/etc/systemd/system \\\n --volume /run/nodeip-configuration:/run/nodeip-configuration \\\n quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8bfb60187a2054ec1946357aeeb4866fa52c1508b05ba1b2b294c08e8e4e27e8 \\\n node-ip \\\n set \\\n --retry-on-failure \\\n --network-type OVNKubernetes \\\n ${NODEIP_HINT:-${KUBELET_NODEIP_HINT:-}}; \\\n do \\\n sleep 5; \\\n done\"\nExecStart=/bin/systemctl daemon-reload\nExecStartPre=/bin/mkdir -p /run/nodeip-configuration\nStandardOutput=journal+console\nStandardError=journal+console\n\nEnvironmentFile=-/etc/default/nodeip-configuration\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": false, + "name": "nodeip-configuration.service" + }, + { + "enabled": true, + "name": "openvswitch.service" + }, + { + "contents": "[Unit]\n# Kdump will generate it's initramfs based on the running state when kdump.service run\n# If OVS has already run, the kdump fails to gather a working network config,\n# which prevent network log exports, sush as SSH.\n# See https://issues.redhat.com/browse/OCPBUGS-28239\nAfter=kdump.service\nDescription=Configures OVS with proper host networking configuration\n# This service is used to move a physical NIC into OVS and reconfigure OVS to use the host IP\nRequires=openvswitch.service\nWants=NetworkManager-wait-online.service\nAfter=firstboot-osupdate.target\nAfter=NetworkManager-wait-online.service openvswitch.service network.service nodeip-configuration.service nmstate.service\nBefore=kubelet-dependencies.target node-valid-hostname.service dnsmasq.service\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\nExecStart=/usr/local/bin/configure-ovs.sh OVNKubernetes\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "ovs-configuration.service" + }, + { + "dropins": [ + { + "contents": "[Service]\nRestart=always\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /var/lib/openvswitch'\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /etc/openvswitch'\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /run/openvswitch'\nExecStartPost=-/usr/bin/ovs-appctl vlog/set syslog:info\nExecReload=-/usr/bin/ovs-appctl vlog/set syslog:info\n", + "name": "10-ovs-vswitchd-restart.conf" + } + ], + "name": "ovs-vswitchd.service" + }, + { + "dropins": [ + { + "contents": "[Service]\nRestart=always\n", + "name": "10-ovsdb-restart.conf" + } + ], + "enabled": true, + "name": "ovsdb-server.service" + }, + { + "dropins": [ + { + "contents": "", + "name": "10-mco-default-env.conf" + } + ], + "name": "rpm-ostreed.service" + }, + { + "contents": "[Unit]\nDescription=Ensure IKE SA established for existing IPsec connections.\nAfter=ipsec.service\nBefore=kubelet-dependencies.target node-valid-hostname.service\n\n[Service]\nType=oneshot\nExecStart=/usr/local/bin/ipsec-connect-wait.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=ipsec.service\n", + "enabled": true, + "name": "wait-for-ipsec-connect.service" + }, + { + "contents": "[Unit]\nDescription=Ensure primary IP is assigned and usable\nRequires=nmstate.service\nAfter=nmstate.service\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/bin/bash -c \" \\\n until \\\n /usr/local/bin/wait-for-primary-ip.sh; \\\n do \\\n sleep 10; \\\n done\"\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "wait-for-primary-ip.service" + }, + { + "dropins": [ + { + "contents": "[Unit]\nConditionPathExists=/enoent\n", + "name": "mco-disabled.conf" + } + ], + "name": "zincati.service" + }, + { + "contents": "[Unit]\nDescription=Kubernetes Kubelet After Reboot Cleanup\nBefore=kubelet.service\n\n[Service]\nType=oneshot\nExecStart=/bin/rm -f /var/lib/kubelet/cpu_manager_state\nExecStart=/bin/rm -f /var/lib/kubelet/memory_manager_state\nExecStart=-/bin/rm -f /var/lib/kubelet/dra_manager_state\n\n[Install]\nWantedBy=multi-user.target\n", + "enabled": true, + "name": "kubelet-cleanup.service" + } + ] + } + }, + "extensions": [], + "fips": false, + "kernelArguments": [ + "systemd.unified_cgroup_hierarchy=1", + "cgroup_no_v1=\"all\"", + "psi=0" + ], + "kernelType": "default", + "osImageURL": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1578c5f720239b6f363d8dc6145056c0f0ab0b20442f59ac74f3b4b91b8382dd" + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "metadata": { + "annotations": { + "machineconfiguration.openshift.io/generated-by-controller-version": "1b20021e15e2252573d2369f63de846c588222ae", + "machineconfiguration.openshift.io/release-image-version": "4.20.0-0.nightly-2025-08-12-153542" + }, + "creationTimestamp": "2025-08-19T18:55:59Z", + "generation": 1, + "name": "rendered-worker-95bb510afd31180b50f1d28076a174d6", + "ownerReferences": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "MachineConfigPool", + "name": "worker", + "uid": "19c9702b-1ecf-4c47-ab1e-0bf44aeb0869" + } + ], + "resourceVersion": "12812", + "uid": "d8a5da67-70fe-41ea-b346-0cba8c14eab3" + }, + "spec": { + "baseOSExtensionsContainerImage": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2b2f07545a00fcce5f743d57926fef00202072f7d8363961624f48c0043c2d33", + "config": { + "ignition": { + "version": "3.5.0" + }, + "passwd": { + "users": [ + { + "name": "core", + "sshAuthorizedKeys": [ + "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n" + ] + } + ] + }, + "storage": { + "files": [ + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-ex%20-o%20pipefail%0A%0ANM_DEVICES_DIR%3D%2Frun%2FNetworkManager%2Fdevices%0ANM_RUN_CONN_DIR%3D%2Frun%2FNetworkManager%2Fsystem-connections%0ANM_ETC_CONN_DIR%3D%2Fetc%2FNetworkManager%2Fsystem-connections%0A%0Alogger%20-t%20nm-clean-initrd-state%20%22Cleaning%20network%20activation%20state%20generated%20by%20dracut...%22%0Alogger%20-t%20nm-clean-initrd-state%20%22To%20disable%2C%20remove%20%2Fvar%2Flib%2Fmco%2Fnm-clean-initrd-state%22%0A%0Aif%20%5B%20!%20-e%20%22%24NM_DEVICES_DIR%22%20%5D%20%7C%7C%20%5B%20!%20-e%20%22%24NM_RUN_CONN_DIR%22%20%5D%20%7C%7C%20%5B%20!%20-e%20%22%24NM_ETC_CONN_DIR%22%20%5D%3B%20then%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22There%20is%20no%20network%20activation%20state%20to%20clean%22%0A%20%20exit%0Afi%0A%0A%23%20Some%20deployments%20require%20an%20active%20network%20early%20in%20the%20boot%20process.%20To%0A%23%20achieve%20this%2C%20dracut%20runs%20specific%20NetworkManager%20modules.%20This%20results%20in%0A%23%20NetworkManager%20keyfiles%20being%20generated%20(either%20default%20or%20from%20ip%20kernel%0A%23%20arguments)%20and%20activated.%20This%20activation%20generates%20state%20that%20makes%20those%0A%23%20profiles%20to%20be%20re-activated%20by%20the%20NetworkManager%20service%20later%20in%20the%0A%23%20boot%20process.%20And%20this%20has%20the%20effect%20that%20other%20profiles%20deployed%20by%20the%0A%23%20user%20for%20the%20same%20set%20of%20devices%20are%20ignored.%0A%0A%23%20Most%20of%20the%20time%20this%20is%20the%20desired%20behavior.%20The%20exception%20to%20this%20rule%0A%23%20is%20when%20the%20user%20wants%20to%20run%20the%20boot%20process%20with%20a%20different%20network%0A%23%20setup%20than%20the%20real%20root%20which%20is%20usually%20related%20to%20the%20fact%20that%0A%23%20generating%20images%20with%20customized%20kernel%20arguments%20is%20a%20complication%20in%0A%23%20the%20deployment%20pipeline.%0A%0A%23%20This%20need%20has%20been%20exacerbated%20by%20past%20NetworkManager%20bugs%20that%20activated%0A%23%20the%20network%20on%20boot%20when%20it%20was%20not%20really%20needed.%20Most%20notably%20when%20ip%0A%23%20kernel%20argument%20is%20present%2C%20something%20that%20the%20baremetal%20installer%20adds%20by%0A%23%20default.%0A%0A%23%20The%20intention%20here%20is%20to%20remove%20the%20state%20that%20was%20generated%20with%20the%0A%23%20activation%20of%20those%20profiles%20during%20dracut%20execution.%20Then%20when%0A%23%20NetworkManager%20service%20runs%2C%20the%20profiles%20generated%20by%20dracut%2C%20along%20with%0A%23%20other%20profiles%20configured%20by%20the%20user%2C%20are%20evaluated%20towards%20finding%20the%0A%23%20most%20appropriate%20profile%20to%20connect%20a%20device%20with.%20As%20a%20precaution%2C%20clean%0A%23%20state%20only%20for%20devices%20that%3A%0A%23%20-%20have%20been%20activated%20with%20a%20default%20profile%20(assume%20that%20a%20non-default%0A%23%20%20%20configuration%20expresses%20intention%20by%20user%20to%20run%20with%20it%20permanently)%0A%23%20-%20have%20a%20specific%20configured%20profile%20set%20to%20auto-connect%20(if%20there%20is%20no%0A%23%20%20%20alternate%20configured%20profile%20for%20a%20device%20it%20makes%20no%20sense%20to%0A%23%20%20%20de-activate%20anything)%0A%23%0A%23%20Although%20this%20can%20theoretically%20happen%20on%20any%20deployment%20type%2C%20need%20has%0A%23%20mostly%20come%20from%20IPI%20bare%20metal%20deployments.%20For%20the%20time%20being%2C%20this%0A%23%20should%20be%20opt-in%20in%20any%20other%20deploment%20type.%0A%23%0A%23%20There%20is%20an%20RFE%20filed%20against%20NM%20that%20once%20implemented%20would%20make%20this%0A%23%20script%20unnecessary%3A%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D2089707%0A%0Afor%20device%20in%20%22%24%7BNM_DEVICES_DIR%7D%22%2F*%3B%20do%0A%20%20if%20%5B%20!%20-e%20%22%24device%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20the%20device%20file%20name%20is%20the%20ifindex%0A%20%20ifindex%3D%24(basename%20%22%24device%22)%0A%20%20%0A%20%20%23%20get%20the%20interface%20name%20by%20ifindex%0A%20%20ifname%3D%24(ip%20-j%20link%20show%20%7C%20jq%20-r%20%22.%5B%5D%20%7C%20select(.ifindex%20%3D%3D%20%24%7Bifindex%7D)%20%7C%20.ifname%20%2F%2F%20empty%22)%0A%0A%20%20%23%20no%20interface%20name%20found%2C%20ignore%0A%20%20if%20%5B%20-z%20%22%24ifname%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20get%20the%20uuid%20of%20the%20profile%20the%20device%20has%20been%20activated%20with%0A%20%20active_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bdevice%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Econnection-uuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24device%22)%0A%0A%20%20%23%20the%20device%20was%20not%20activated%20with%20any%20profile%2C%20ignore%0A%20%20if%20%5B%20-z%20%22%24active_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20find%20the%20generated%20profile%20by%20uuid%0A%20%20for%20profile%20in%20%22%24%7BNM_RUN_CONN_DIR%7D%22%2F*%3B%20do%0A%20%20%20%20generated_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Euuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20if%20%5B%20%22%24active_profile_uuid%22%20%3D%20%22%24generated_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20generated%20profile%20not%20found%2C%20ignore%0A%20%20if%20%5B%20%22%24active_profile_uuid%22%20!%3D%20%22%24generated_profile_uuid%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20check%20that%20it%20is%20not%20specific%20for%20the%20device%2C%20otherwise%20ignore%0A%20%20profile_ifname%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Einterface-name%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20if%20%5B%20-n%20%22%24profile_ifname%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20profile%20not%20generated%20by%20nm-initrd-generator%2C%20ignore%0A%20%20%23%20only%20check%20it%20if%20the%20key%20is%20set%20(from%20NM%201.32.4)%0A%20%20origin%3D%24(sed%20-nr%20'%2F%5E%5C%5Buser%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Eorg.freedesktop.NetworkManager.origin%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20if%20%5B%20-n%20%22%24origin%22%20%5D%20%26%26%20%5B%20%22%24origin%22%20!%3D%20%22nm-initrd-generator%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20%23%20find%20the%20configured%20profile%20by%20name%20with%20auto-connect%20set%0A%20%20for%20profile%20in%20%22%24%7BNM_ETC_CONN_DIR%7D%22%2F*%3B%20do%0A%20%20%20%20profile_ifname%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Einterface-name%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20autoconnect%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Eautoconnect%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%20%20if%20%5B%20%22%24profile_ifname%22%20%3D%20%22%24ifname%22%20%5D%20%26%26%20%5B%20%22%24autoconnect%22%20!%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20configured%20profile%20not%20found%2C%20ignore%0A%20%20if%20%5B%20%22%24profile_ifname%22%20!%3D%20%22%24ifname%22%20%5D%20%7C%7C%20%5B%20%22%24autoconnect%22%20%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20continue%0A%20%20fi%0A%0A%20%20configured_profile_uuid%3D%24(sed%20-nr%20'%2F%5E%5C%5Bconnection%5C%5D%2F%2C%2F%5E%5C%5B%2F%7B%2F%5Euuid%5Cs*%3D%2F%7Bs%2F%5B%5E%3D%5D%2B%5Cs*%3D%5Cs*%2F%2F%3BP%7D%7D'%20%22%24profile%22)%0A%20%20%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%20activated%20with%20default%20generated%20profile%20%24generated_profile_uuid%22%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%20has%20different%20configured%20specific%20profile%20%24configured_profile_uuid%22%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22%24ifname%3A%20removing%20state...%22%0A%20%20%0A%20%20%23%20NM%20can%20still%20generate%20internal%20profiles%20from%20the%20IP%20address%0A%20%20%23%20configuration%20of%20devices%2C%20so%20flush%20addresses%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22Flushing%20IP%20addresses%20from%20%24ifname%22%0A%20%20ip%20addr%20flush%20%22%24ifname%22%0A%20%20ip%20-6%20addr%20flush%20%22%24ifname%22%0A%0A%20%20%23%20remove%20device%20state%20file%20to%20prevent%20NM%20to%20unilaterally%20connect%20with%20the%0A%20%20%23%20latest%20activated%20profile%20without%20evaluating%20other%20profiles%20%0A%20%20logger%20-t%20nm-clean-initrd-state%20%22Removing%20%24device%22%0A%20%20rm%20-f%20--%20%22%24device%22%0A%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/nm-clean-initrd-state.sh" + }, + { + "contents": { + "source": "data:,%5Bconnection%5D%0Aipv6.dhcp-duid%3Dll%0Aipv6.dhcp-iaid%3Dmac%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/01-ipv6.conf" + }, + { + "contents": { + "source": "data:,%5Bmain%5D%0Aplugins%3Dkeyfile%2Cifcfg-rh%0A%5Bkeyfile%5D%0Apath%3D%2Fetc%2FNetworkManager%2Fsystem-connections%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/20-keyfiles.conf" + }, + { + "contents": { + "source": "data:," + }, + "mode": 384, + "overwrite": true, + "path": "/etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt" + }, + { + "contents": { + "source": "data:,KUBERNETES_SERVICE_HOST%3D'api-int.ci-op-pw3ghqzh-bb5c4.ci2.azure.devcluster.openshift.com'%0AKUBERNETES_SERVICE_PORT%3D'6443'%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/apiserver-url.env" + }, + { + "contents": { + "source": "data:,%23%20This%20file%20is%20managed%20by%20machine-config-operator.%0A%23%20Suppress%20audit%20rules%20which%20always%20trigger%20for%20container%0A%23%20workloads%2C%20as%20they%20spam%20the%20audit%20log.%20%20Workloads%20are%20expected%0A%23%20to%20be%20dynamic%2C%20and%20the%20networking%20stack%20uses%20iptables.%0A-a%20exclude%2Calways%20-F%20msgtype%3DNETFILTER_CFG%0A%23%20The%20default%20bridged%20networking%20enables%20promiscuous%20on%20the%20veth%0A%23%20device.%20%20Ideally%2C%20we'd%20teach%20audit%20to%20ignore%20only%20veth%20devices%2C%0A%23%20since%20one%20might%20legitimately%20care%20about%20promiscuous%20on%20real%20physical%0A%23%20devices.%20%20But%20we%20can't%20currently%20differentiate.%0A-a%20exclude%2Calways%20-F%20msgtype%3DANOM_PROMISCUOUS%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/audit/rules.d/mco-audit-quiet-containers.rules" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20ESP%20offload%2C%20either%20in%20actual%20hardware%20or%20as%20part%20as%20GRO%20(generic%0A%23%20recieve%20offload)%20does%20not%20work%20for%20interfaces%20attached%20to%20an%20OVS%20bridge%0A%23%20so%20turn%20it%20off%20for%20the%20time%20being.%0A%23%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FRHEL-58811%0A%0A%23%20Depends%20on%20ipsec%20service%20drop-in%20to%20start%20it%20after%20configure-ovs%20to%20make%0A%23%20sure%20offloads%20are%20disabled%20before%20ipsec%20starts.%0A%0Aif%20%5B%5B%20%22%242%22%20!%3D%20%22up%22%20%5D%5D%3B%20then%0A%20%20exit%0Afi%0A%0Adevice%3D%24DEVICE_IFACE%0Akind_slave%3D%24(ip%20-j%20-d%20link%20show%20%22%24device%22%20%7C%20jq%20-r%20'.%5B0%5D%20%7C%20.linkinfo.info_slave_kind%20%2F%2F%20empty')%0A%0Aif%20%5B%20%22%24kind_slave%22%20%3D%20%22openvswitch%22%20%5D%3B%20then%0A%20%20for%20feature%20in%20tx-esp-segmentation%20esp-hw-offload%20esp-tx-csum-hw-offload%3B%20do%0A%20%20%20%20if%20ethtool%20-k%20%22%24device%22%20%7C%20grep%20-qE%20%22%5E%24%7Bfeature%7D%3A%20off%22%3B%20then%0A%20%20%20%20%20%20%23%20already%20disabled%2C%20nothing%20to%20do%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%20%20%20%20%0A%20%20%20%20logger%20-t%2099-esp-offload%20-s%20%22Setting%20%24feature%20off%20for%20%24device%3A%20unsupported%20when%20attached%20to%20Open%20vSwitch%20bridge%22%0A%20%20%20%20ethtool%20-K%20%22%24device%22%20%22%24feature%22%20off%0A%20%20done%0Afi%0A" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-esp-offload" + }, + { + "contents": { + "source": "data:,r%20%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F80-openshift-network.conf%0Ar%20%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F10-ovn-kubernetes.conf%0Ad%20%2Frun%2Fmultus%2Fcni%2Fnet.d%2F%200755%20root%20root%20-%20-%0AD%20%2Fvar%2Flib%2Fcni%2Fnetworks%2Fopenshift-sdn%2F%200755%20root%20root%20-%20-%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/tmpfiles.d/cleanup-cni.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-x%0A%0Aif%20%5B%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20echo%20%22Skipping%20configure-ovs%20due%20to%20manual%20network%20configuration%22%0A%20%20exit%200%0Afi%0A%0A%23%20This%20file%20is%20not%20needed%20anymore%20in%204.7%2B%2C%20but%20when%20rolling%20back%20to%204.6%0A%23%20the%20ovs%20pod%20needs%20it%20to%20know%20ovs%20is%20running%20on%20the%20host.%0Atouch%20%2Fvar%2Frun%2Fovs-config-executed%0A%0A%23%20always%20use%20--escape%20no%20to%20prevent%20'%3A'%20mangling.%20%20nmcli%20will%20escape%20all%20colons%20as%20%5C%3A%2C%20this%20breaks%20input%0ANMCLI_GET_VALUE%3D%22nmcli%20--escape%20no%20--get-values%22%0A%23%20These%20are%20well%20knwon%20NM%20default%20paths%0ANM_CONN_ETC_PATH%3D%22%2Fetc%2FNetworkManager%2Fsystem-connections%22%0ANM_CONN_RUN_PATH%3D%22%2Frun%2FNetworkManager%2Fsystem-connections%22%0A%0A%23%20This%20is%20the%20path%20where%20NM%20is%20known%20to%20be%20configured%20to%20store%20user%20keyfiles%20%0ANM_CONN_CONF_PATH%3D%22%24NM_CONN_ETC_PATH%22%0A%0A%23%20This%20is%20where%20we%20want%20our%20keyfiles%20to%20finally%20reside.%20configure-ovs%0A%23%20operates%20with%20temporary%20keyfiles%20in%20NM_CONN_RUN_PATH%20and%20then%20as%20a%20last%0A%23%20step%20moves%20those%20keyfiles%20to%20NM_CONN_SET_PATH%20if%20it%20is%20a%20different%20path%0A%23%20(not%20by%20default).%20This%20mitigates%20hard%20interruptions%20(SIGKILL%2C%20hard%20reboot)%0A%23%20of%20configure-ovs%20leaving%20the%20machine%20with%20a%20half-baked%20set%20of%20keyfiles%0A%23%20that%20might%20prevent%20machine%20networking%20from%20working%20correctly.%0ANM_CONN_SET_PATH%3D%22%24%7BNM_CONN_SET_PATH%3A-%24NM_CONN_RUN_PATH%7D%22%0A%0AMANAGED_NM_CONN_SUFFIX%3D%22-slave-ovs-clone%22%0A%23%20Workaround%20to%20ensure%20OVS%20is%20installed%20due%20to%20bug%20in%20systemd%20Requires%3A%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1888017%0Acopy_nm_conn_files()%20%7B%0A%20%20local%20dst_path%3D%22%241%22%0A%20%20for%20src%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20src_path%3D%24(dirname%20%22%24src%22)%0A%20%20%20%20file%3D%24(basename%20%22%24src%22)%0A%20%20%20%20if%20%5B%20-f%20%22%24src_path%2F%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20if%20%5B%20!%20-f%20%22%24dst_path%2F%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Copying%20configuration%20%24file%22%0A%20%20%20%20%20%20%20%20cp%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%0A%20%20%20%20%20%20elif%20!%20cmp%20--silent%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Copying%20updated%20configuration%20%24file%22%0A%20%20%20%20%20%20%20%20cp%20-f%20%22%24src_path%2F%24file%22%20%22%24dst_path%2F%24file%22%0A%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20echo%20%22Skipping%20%24file%20since%20it's%20equal%20at%20destination%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20else%0A%20%20%20%20%20%20echo%20%22Skipping%20%24file%20since%20it%20does%20not%20exist%20at%20source%22%0A%20%20%20%20fi%0A%20%20done%0A%7D%0A%0Aupdate_nm_conn_files_base()%20%7B%0A%20%20base_path%3D%24%7B1%7D%0A%20%20bridge_name%3D%24%7B2%7D%0A%20%20port_name%3D%24%7B3%7D%0A%20%20ovs_port%3D%22ovs-port-%24%7Bbridge_name%7D%22%0A%20%20ovs_interface%3D%22ovs-if-%24%7Bbridge_name%7D%22%0A%20%20default_port_name%3D%22ovs-port-%24%7Bport_name%7D%22%20%23%20ovs-port-phys0%0A%20%20bridge_interface_name%3D%22ovs-if-%24%7Bport_name%7D%22%20%23%20ovs-if-phys0%0A%20%20%23%20In%20RHEL7%20files%20in%20%2F%7Betc%2Crun%7D%2FNetworkManager%2Fsystem-connections%20end%20without%20the%20suffix%20'.nmconnection'%2C%20whereas%20in%20RHCOS%20they%20end%20with%20the%20suffix.%0A%20%20MANAGED_NM_CONN_FILES%3D(%24(echo%20%22%24%7Bbase_path%7D%22%2F%7B%22%24bridge_name%22%2C%22%24ovs_interface%22%2C%22%24ovs_port%22%2C%22%24bridge_interface_name%22%2C%22%24default_port_name%22%7D%7B%2C.nmconnection%7D))%0A%20%20shopt%20-s%20nullglob%0A%20%20MANAGED_NM_CONN_FILES%2B%3D(%24%7Bbase_path%7D%2F*%24%7BMANAGED_NM_CONN_SUFFIX%7D.nmconnection%20%24%7Bbase_path%7D%2F*%24%7BMANAGED_NM_CONN_SUFFIX%7D)%0A%20%20shopt%20-u%20nullglob%0A%7D%0A%0Aupdate_nm_conn_run_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_RUN_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0Aupdate_nm_conn_set_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_SET_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0Aupdate_nm_conn_etc_files()%20%7B%0A%20%20update_nm_conn_files_base%20%22%24%7BNM_CONN_ETC_PATH%7D%22%20%22%24%7B1%7D%22%20%22%24%7B2%7D%22%0A%7D%0A%0A%23%20Move%20and%20reload%20keyfiles%20at%20their%20final%20destination%0Aset_nm_conn_files()%20%7B%0A%20%20if%20%5B%20%22%24NM_CONN_RUN_PATH%22%20!%3D%20%22%24NM_CONN_SET_PATH%22%20%5D%3B%20then%0A%20%20%20%20update_nm_conn_run_files%20br-ex%20phys0%0A%20%20%20%20copy_nm_conn_files%20%22%24NM_CONN_SET_PATH%22%0A%20%20%20%20rm_nm_conn_files%0A%20%20%20%20update_nm_conn_run_files%20br-ex1%20phys1%0A%20%20%20%20copy_nm_conn_files%20%22%24NM_CONN_SET_PATH%22%0A%20%20%20%20rm_nm_conn_files%0A%0A%20%20%20%20%23%20reload%20keyfiles%0A%20%20%20%20nmcli%20connection%20reload%0A%20%20fi%0A%7D%0A%0A%23%20Used%20to%20remove%20files%20managed%20by%20configure-ovs%20and%20temporary%20leftover%20files%20from%20network%20manager%0Arm_nm_conn_files()%20%7B%0A%20%20for%20file%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20if%20%5B%20-f%20%22%24file%22%20%5D%3B%20then%0A%20%20%20%20%20%20rm%20-f%20%22%24file%22%0A%20%20%20%20%20%20echo%20%22Removed%20nmconnection%20file%20%24file%22%0A%20%20%20%20%20%20nm_config_changed%3D1%0A%20%20%20%20fi%0A%20%20done%0A%20%20for%20file%20in%20%22%24%7BMANAGED_NM_CONN_FILES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20for%20temp%20in%20%24(compgen%20-G%20%22%24%7Bfile%7D.*%22)%3B%20do%0A%20%20%20%20%20%20rm%20-f%20%22%24temp%22%0A%20%20%20%20%20%20echo%20%22Removed%20nmconnection%20temporary%20file%20%24temp%22%0A%20%20%20%20%20%20nm_config_changed%3D1%0A%20%20%20%20done%0A%20%20done%0A%7D%0A%0A%23%20Used%20to%20clone%20a%20slave%20connection%20by%20uuid%2C%20returns%20new%20name%0Aclone_slave_connection()%20%7B%0A%20%20local%20uuid%3D%22%241%22%0A%20%20local%20old_name%0A%20%20old_name%3D%22%24(%24NMCLI_GET_VALUE%20connection.id%20connection%20show%20uuid%20%22%24uuid%22)%22%0A%20%20local%20new_name%3D%22%24%7Bold_name%7D%24%7BMANAGED_NM_CONN_SUFFIX%7D%22%0A%20%20if%20nmcli%20connection%20show%20id%20%22%24%7Bnew_name%7D%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20existing%20ovs%20slave%20%24%7Bnew_name%7D%20connection%20profile%20file%20found%2C%20overwriting...%22%20%3E%262%0A%20%20%20%20nmcli%20connection%20delete%20id%20%22%24%7Bnew_name%7D%22%20%26%3E%20%2Fdev%2Fnull%0A%20%20fi%0A%20%20clone_nm_conn%20%24uuid%20%22%24%7Bnew_name%7D%22%0A%20%20echo%20%22%24%7Bnew_name%7D%22%0A%7D%0A%0A%23%20Used%20to%20replace%20an%20old%20master%20connection%20uuid%20with%20a%20new%20one%20on%20all%20connections%0Areplace_connection_master()%20%7B%0A%20%20local%20old%3D%22%241%22%0A%20%20local%20new%3D%22%242%22%0A%20%20for%20conn_uuid%20in%20%24(%24NMCLI_GET_VALUE%20UUID%20connection%20show)%20%3B%20do%0A%20%20%20%20if%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.master%20connection%20show%20uuid%20%22%24conn_uuid%22)%22%20!%3D%20%22%24old%22%20%5D%3B%20then%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20local%20active_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20connection%20show%20%22%24conn_uuid%22)%0A%20%20%20%20local%20autoconnect%3D%24(%24NMCLI_GET_VALUE%20connection.autoconnect%20connection%20show%20%22%24conn_uuid%22)%0A%20%20%20%20if%20%5B%20%22%24active_state%22%20!%3D%20%22activated%22%20%5D%20%26%26%20%5B%20%22%24autoconnect%22%20!%3D%20%22yes%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20Assume%20that%20slave%20profiles%20intended%20to%20be%20used%20are%20those%20that%20are%3A%0A%20%20%20%20%20%20%23%20-%20active%0A%20%20%20%20%20%20%23%20-%20or%20inactive%20(which%20might%20be%20due%20to%20link%20being%20down)%20but%20to%20be%20autoconnected.%0A%20%20%20%20%20%20%23%20Otherwise%2C%20ignore%20them.%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20make%20changes%20for%20slave%20profiles%20in%20a%20new%20clone%0A%20%20%20%20local%20new_name%0A%20%20%20%20new_name%3D%24(clone_slave_connection%20%24conn_uuid)%0A%0A%20%20%20%20mod_nm_conn%20%22%24new_name%22%20connection.master%20%22%24new%22%20connection.autoconnect-priority%20100%20connection.autoconnect%20no%20%0A%20%20%20%20echo%20%22Replaced%20master%20%24old%20with%20%24new%20for%20slave%20profile%20%24new_name%22%0A%20%20done%0A%7D%0A%0A%23%20when%20creating%20the%20bridge%2C%20we%20use%20a%20value%20lower%20than%20NM's%20ethernet%20device%20default%20route%20metric%0A%23%20(we%20pick%2048%20and%2049%20to%20be%20lower%20than%20anything%20that%20NM%20chooses%20by%20default)%0ABRIDGE_METRIC%3D%2248%22%0ABRIDGE1_METRIC%3D%2249%22%0A%23%20Given%20an%20interface%2C%20generates%20NM%20configuration%20to%20add%20to%20an%20OVS%20bridge%0Aconvert_to_bridge()%20%7B%0A%20%20local%20iface%3D%24%7B1%7D%0A%20%20local%20bridge_name%3D%24%7B2%7D%0A%20%20local%20port_name%3D%24%7B3%7D%0A%20%20local%20bridge_metric%3D%24%7B4%7D%0A%20%20local%20ovs_port%3D%22ovs-port-%24%7Bbridge_name%7D%22%0A%20%20local%20ovs_interface%3D%22ovs-if-%24%7Bbridge_name%7D%22%0A%20%20local%20default_port_name%3D%22ovs-port-%24%7Bport_name%7D%22%20%23%20ovs-port-phys0%0A%20%20local%20bridge_interface_name%3D%22ovs-if-%24%7Bport_name%7D%22%20%23%20ovs-if-phys0%0A%0A%20%20if%20%5B%20%22%24iface%22%20%3D%20%22%24bridge_name%22%20%5D%3B%20then%0A%20%20%20%20%23%20handle%20vlans%20and%20bonds%20etc%20if%20they%20have%20already%20been%0A%20%20%20%20%23%20configured%20via%20nm%20key%20files%20and%20br-ex%20is%20already%20up%0A%20%20%20%20ifaces%3D%24(ovs-vsctl%20list-ifaces%20%24%7Biface%7D)%0A%20%20%20%20for%20intf%20in%20%24ifaces%3B%20do%20configure_driver_options%20%24intf%3B%20done%0A%20%20%20%20echo%20%22Networking%20already%20configured%20and%20up%20for%20%24%7Bbridge-name%7D!%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20flag%20to%20reload%20NM%20to%20account%20for%20all%20the%20configuration%20changes%0A%20%20%23%20going%20forward%0A%20%20nm_config_changed%3D1%0A%0A%20%20if%20%5B%20-z%20%22%24iface%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22ERROR%3A%20Unable%20to%20find%20default%20gateway%20interface%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%20%20%23%20find%20the%20MAC%20from%20OVS%20config%20or%20the%20default%20interface%20to%20use%20for%20OVS%20internal%20port%0A%20%20%23%20this%20prevents%20us%20from%20getting%20a%20different%20DHCP%20lease%20and%20dropping%20connection%0A%20%20if%20!%20iface_mac%3D%24(%3C%22%2Fsys%2Fclass%2Fnet%2F%24%7Biface%7D%2Faddress%22)%3B%20then%0A%20%20%20%20echo%20%22Unable%20to%20determine%20default%20interface%20MAC%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20echo%20%22MAC%20address%20found%20for%20iface%3A%20%24%7Biface%7D%3A%20%24%7Biface_mac%7D%22%0A%0A%20%20%23%20find%20MTU%20from%20original%20iface%0A%20%20iface_mtu%3D%24(ip%20link%20show%20%22%24iface%22%20%7C%20awk%20'%7Bprint%20%245%3B%20exit%7D')%0A%20%20if%20%5B%5B%20-z%20%22%24iface_mtu%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Unable%20to%20determine%20default%20interface%20MTU%2C%20defaulting%20to%201500%22%0A%20%20%20%20iface_mtu%3D1500%0A%20%20else%0A%20%20%20%20echo%20%22MTU%20found%20for%20iface%3A%20%24%7Biface%7D%3A%20%24%7Biface_mtu%7D%22%0A%20%20fi%0A%0A%20%20%23%20store%20old%20conn%20for%20later%0A%20%20old_conn%3D%24(nmcli%20--fields%20UUID%2CDEVICE%20conn%20show%20--active%20%7C%20awk%20%22%2F%5Cs%24%7Biface%7D%5Cs*%5C%24%2F%20%7Bprint%20%5C%241%7D%22)%0A%0A%20%20if%20%5B%5B%20-z%20%22%24old_conn%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22ERROR%3A%20cannot%20find%20connection%20for%20interface%3A%20%24%7Biface%7D%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20create%20bridge%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24bridge_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20%22%24bridge_name%22%0A%20%20%20%20add_nm_conn%20%22%24bridge_name%22%20type%20ovs-bridge%20conn.interface%20%22%24bridge_name%22%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20%5C%0A%20%20%20%20connection.autoconnect-slaves%201%0A%20%20fi%0A%0A%20%20%23%20find%20default%20port%20to%20add%20to%20bridge%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24default_port_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-port%20%22%24bridge_name%22%20%24%7Biface%7D%0A%20%20%20%20add_nm_conn%20%22%24default_port_name%22%20type%20ovs-port%20conn.interface%20%24%7Biface%7D%20slave-type%20ovs-bridge%20master%20%22%24bridge_name%22%20%5C%0A%20%20%20%20connection.autoconnect-slaves%201%0A%20%20fi%0A%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24ovs_port%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-port%20%22%24bridge_name%22%20%22%24bridge_name%22%0A%20%20%20%20add_nm_conn%20%22%24ovs_port%22%20type%20ovs-port%20conn.interface%20%22%24bridge_name%22%20slave-type%20ovs-bridge%20master%20%22%24bridge_name%22%0A%20%20fi%0A%0A%20%20extra_phys_args%3D()%0A%20%20%23%20check%20if%20this%20interface%20is%20a%20vlan%2C%20bond%2C%20team%2C%20or%20ethernet%20type%0A%20%20if%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22vlan%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dvlan%0A%20%20%20%20vlan_id%3D%24(%24NMCLI_GET_VALUE%20vlan.id%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-z%20%22%24vlan_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20unable%20to%20determine%20vlan_id%20for%20vlan%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%20%20%20%20vlan_parent%3D%24(%24NMCLI_GET_VALUE%20vlan.parent%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-z%20%22%24vlan_parent%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20unable%20to%20determine%20vlan_parent%20for%20vlan%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%0A%20%20%20%20if%20nmcli%20connection%20show%20uuid%20%22%24vlan_parent%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20%20%20%23%20if%20the%20VLAN%20connection%20is%20configured%20with%20a%20connection%20UUID%20as%20parent%2C%20we%20need%20to%20find%20the%20underlying%20device%0A%20%20%20%20%20%20%23%20and%20create%20the%20bridge%20against%20it%2C%20as%20the%20parent%20connection%20can%20be%20replaced%20by%20another%20bridge.%0A%20%20%20%20%20%20vlan_parent%3D%24(%24NMCLI_GET_VALUE%20GENERAL.DEVICES%20conn%20show%20uuid%20%24%7Bvlan_parent%7D)%0A%20%20%20%20fi%0A%0A%20%20%20%20extra_phys_args%3D(%20dev%20%22%24%7Bvlan_parent%7D%22%20id%20%22%24%7Bvlan_id%7D%22%20)%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dbond%0A%20%20%20%20%23%20check%20bond%20options%0A%20%20%20%20bond_opts%3D%24(%24NMCLI_GET_VALUE%20bond.options%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-n%20%22%24bond_opts%22%20%5D%3B%20then%0A%20%20%20%20%20%20extra_phys_args%2B%3D(%20bond.options%20%22%24%7Bbond_opts%7D%22%20)%0A%20%20%20%20%20%20MODE_REGEX%3D%22(%5E%7C%2C)mode%3Dactive-backup(%2C%7C%24)%22%0A%20%20%20%20%20%20MAC_REGEX%3D%22(%5E%7C%2C)fail_over_mac%3D(1%7Cactive%7C2%7Cfollow)(%2C%7C%24)%22%0A%20%20%20%20%20%20if%20%5B%5B%20%24bond_opts%20%3D~%20%24MODE_REGEX%20%5D%5D%20%26%26%20%5B%5B%20%24bond_opts%20%3D~%20%24MAC_REGEX%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20clone_mac%3D0%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22team%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dteam%0A%20%20%20%20%23%20check%20team%20config%20options%0A%20%20%20%20team_config_opts%3D%24(%24NMCLI_GET_VALUE%20team.config%20-e%20no%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20if%20%5B%20-n%20%22%24team_config_opts%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20team.config%20is%20json%2C%20remove%20spaces%20to%20avoid%20problems%20later%20on%0A%20%20%20%20%20%20extra_phys_args%2B%3D(%20team.config%20%22%24%7Bteam_config_opts%2F%2F%5B%5B%3Aspace%3A%5D%5D%2F%7D%22%20)%0A%20%20%20%20%20%20team_mode%3D%24(echo%20%22%24%7Bteam_config_opts%7D%22%20%7C%20jq%20-r%20%22.runner.name%20%2F%2F%20empty%22)%0A%20%20%20%20%20%20team_mac_policy%3D%24(echo%20%22%24%7Bteam_config_opts%7D%22%20%7C%20jq%20-r%20%22.runner.hwaddr_policy%20%2F%2F%20empty%22)%0A%20%20%20%20%20%20MAC_REGEX%3D%22(by_active%7Conly_active)%22%0A%20%20%20%20%20%20if%20%5B%20%22%24team_mode%22%20%3D%20%22activebackup%22%20%5D%20%26%26%20%5B%5B%20%22%24team_mac_policy%22%20%3D~%20%24MAC_REGEX%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20clone_mac%3D0%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22tun%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dtun%0A%20%20%20%20tun_mode%3D%24(%24NMCLI_GET_VALUE%20tun.mode%20-e%20no%20connection%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20extra_phys_args%2B%3D(%20tun.mode%20%22%24%7Btun_mode%7D%22%20)%0A%20%20elif%20%5B%20%22%24(%24NMCLI_GET_VALUE%20connection.type%20conn%20show%20%24%7Bold_conn%7D)%22%20%3D%3D%20%22bridge%22%20%5D%3B%20then%0A%20%20%20%20iface_type%3Dbridge%0A%20%20else%0A%20%20%20%20iface_type%3D802-3-ethernet%0A%20%20fi%0A%0A%20%20if%20%5B%20!%20%22%24%7Bclone_mac%3A-%7D%22%20%3D%20%220%22%20%5D%3B%20then%0A%20%20%20%20%23%20In%20active-backup%20link%20aggregation%2C%20with%20fail_over_mac%20mode%20enabled%2C%0A%20%20%20%20%23%20cloning%20the%20mac%20address%20is%20not%20supported.%20It%20is%20possible%20then%20that%0A%20%20%20%20%23%20br-ex%20has%20a%20different%20mac%20address%20than%20the%20bond%20which%20might%20be%0A%20%20%20%20%23%20troublesome%20on%20some%20platforms%20where%20the%20nic%20won't%20accept%20packets%20with%0A%20%20%20%20%23%20a%20different%20destination%20mac.%20But%20nobody%20has%20complained%20so%20far%20so%20go%20on%0A%20%20%20%20%23%20with%20what%20we%20got.%20%0A%20%20%20%20%0A%20%20%20%20%23%20Do%20set%20it%20though%20for%20other%20link%20aggregation%20configurations%20where%20the%0A%20%20%20%20%23%20mac%20address%20would%20otherwise%20depend%20on%20enslave%20order%20for%20which%20we%20have%0A%20%20%20%20%23%20no%20control%20going%20forward.%0A%20%20%20%20extra_phys_args%2B%3D(%20802-3-ethernet.cloned-mac-address%20%22%24%7Biface_mac%7D%22%20)%0A%20%20fi%0A%0A%20%20%23%20use%20%24%7Bextra_phys_args%5B%40%5D%2B%22%24%7Bextra_phys_args%5B%40%5D%7D%22%7D%20instead%20of%20%24%7Bextra_phys_args%5B%40%5D%7D%20to%20be%20compatible%20with%20bash%204.2%20in%20RHEL7.9%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24bridge_interface_name%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20destroy%20interface%20%24%7Biface%7D%0A%20%20%20%20ovs_default_port_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24default_port_name%22)%0A%20%20%20%20add_nm_conn%20%22%24bridge_interface_name%22%20type%20%24%7Biface_type%7D%20conn.interface%20%24%7Biface%7D%20master%20%22%24ovs_default_port_conn%22%20%5C%0A%20%20%20%20%20%20slave-type%20ovs-port%20connection.autoconnect-priority%20100%20connection.autoconnect-slaves%201%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20%5C%0A%20%20%20%20%20%20%24%7Bextra_phys_args%5B%40%5D%2B%22%24%7Bextra_phys_args%5B%40%5D%7D%22%7D%0A%20%20fi%0A%0A%20%20%23%20Get%20the%20new%20connection%20uuids%0A%20%20new_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24bridge_interface_name%22)%0A%20%20ovs_port_conn%3D%24(%24NMCLI_GET_VALUE%20connection.uuid%20conn%20show%20%22%24ovs_port%22)%0A%0A%20%20%23%20Update%20connections%20with%20master%20property%20set%20to%20use%20the%20new%20connection%0A%20%20replace_connection_master%20%24old_conn%20%24new_conn%0A%20%20replace_connection_master%20%24iface%20%24new_conn%0A%0A%20%20ipv4_method%3D%24(%24NMCLI_GET_VALUE%20ipv4.method%20conn%20show%20%22%24old_conn%22)%0A%20%20ipv6_method%3D%24(%24NMCLI_GET_VALUE%20ipv6.method%20conn%20show%20%22%24old_conn%22)%0A%0A%20%20ipv4_addresses%3D%24(%24NMCLI_GET_VALUE%20ipv4.addresses%20conn%20show%20%22%24old_conn%22)%0A%20%20ipv6_addresses%3D%24(%24NMCLI_GET_VALUE%20ipv6.addresses%20conn%20show%20%22%24old_conn%22)%0A%0A%20%20%23%20Warn%20about%20an%20invalid%20MTU%20that%20will%20most%20likely%20fail%20in%20one%20way%20or%0A%20%20%23%20another%0A%20%20if%20%5B%20%24%7Biface_mtu%7D%20-lt%201280%20%5D%20%26%26%20%5B%20%22%24%7Bipv6_method%7D%22%20!%3D%20%22disabled%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20MTU%20%24%7Biface_mtu%7D%20is%20lower%20than%20the%20minimum%20required%20of%201280%20for%20IPv6%22%0A%20%20fi%0A%0A%20%20if%20!%20nmcli%20connection%20show%20%22%24ovs_interface%22%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20destroy%20interface%20%22%24bridge_name%22%0A%20%20%20%20%23%20Clone%20the%20connection%20in%20case%20the%20method%20is%20manual%20or%20in%20case%20the%20an%20address%20is%20set%20(DHCP%20%2B%20static%20IP)%0A%20%20%20%20if%20%5B%20%22%24%7Bipv4_method%7D%22%20%3D%20%22manual%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv4_addresses%7D%22%20!%3D%20%22%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv6_method%7D%22%20%3D%20%22manual%22%20%5D%20%7C%7C%20%5B%20%22%24%7Bipv6_addresses%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Static%20IP%20addressing%20detected%20on%20default%20gateway%20connection%3A%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20%23%20clone%20the%20old%20connection%20to%20get%20the%20address%20settings%0A%20%20%20%20%20%20%23%20prefer%20cloning%20vs%20copying%20the%20connection%20file%20to%20avoid%20problems%20with%20selinux%0A%20%20%20%20%20%20clone_nm_conn%20%22%24%7Bold_conn%7D%22%20%22%24%7Bovs_interface%7D%22%0A%20%20%20%20%20%20shopt%20-s%20nullglob%0A%20%20%20%20%20%20new_conn_files%3D(%24%7BNM_CONN_RUN_PATH%7D%2F%22%24%7Bovs_interface%7D%22*)%0A%20%20%20%20%20%20shopt%20-u%20nullglob%0A%20%20%20%20%20%20if%20%5B%20%24%7B%23new_conn_files%5B%40%5D%7D%20-ne%201%20%5D%20%7C%7C%20%5B%20!%20-f%20%22%24%7Bnew_conn_files%5B0%5D%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22ERROR%3A%20could%20not%20find%20%24%7Bovs_interface%7D%20conn%20file%20after%20cloning%20from%20%24%7Bold_conn%7D%22%0A%20%20%20%20%20%20%20%20exit%201%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20new_conn_file%3D%22%24%7Bnew_conn_files%5B0%5D%7D%22%0A%0A%20%20%20%20%20%20%23%20modify%20the%20connection%20type%20directly%20because%20it%20can't%20be%20modified%0A%20%20%20%20%20%20%23%20through%20nmcli%0A%20%20%20%20%20%20sed%20-i%20'%2F%5E%5C%5Bconnection%5C%5D%24%2F%2C%2F%5E%5C%5B%2F%20s%2F%5Etype%3D.*%24%2Ftype%3Dovs-interface%2F'%20%24%7Bnew_conn_file%7D%0A%0A%20%20%20%20%20%20%23%20modify%20some%20more%20settings%20through%20nmcli%0A%20%20%20%20%20%20mod_nm_conn%20%22%24%7Bovs_interface%7D%22%20conn.interface%20%22%24bridge_name%22%20%5C%0A%20%20%20%20%20%20%20%20connection.multi-connect%20%22%22%20connection.autoconnect%20no%20%5C%0A%20%20%20%20%20%20%20%20connection.master%20%22%24ovs_port_conn%22%20connection.slave-type%20ovs-port%20%5C%0A%20%20%20%20%20%20%20%20ovs-interface.type%20internal%20%5C%0A%20%20%20%20%20%20%20%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20802-3-ethernet.cloned-mac-address%20%24%7Biface_mac%7D%20%5C%0A%20%20%20%20%20%20%20%20ipv4.route-metric%20%22%24%7Bbridge_metric%7D%22%20ipv6.route-metric%20%22%24%7Bbridge_metric%7D%22%0A%0A%20%20%20%20%20%20echo%20%22Loaded%20new%20%24ovs_interface%20connection%20file%3A%20%24%7Bnew_conn_file%7D%22%0A%20%20%20%20else%0A%20%20%20%20%20%20extra_if_brex_args%3D%22%22%0A%20%20%20%20%20%20%23%20check%20if%20interface%20had%20ipv4%2Fipv6%20addresses%20assigned%0A%20%20%20%20%20%20num_ipv4_addrs%3D%24(ip%20-j%20a%20show%20dev%20%24%7Biface%7D%20%7C%20jq%20%22.%5B0%5D.addr_info%20%7C%20map(.%20%7C%20select(.family%20%3D%3D%20%5C%22inet%5C%22))%20%7C%20length%22)%0A%20%20%20%20%20%20if%20%5B%20%22%24num_ipv4_addrs%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.may-fail%20no%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20IPV6%20should%20have%20at%20least%20a%20link%20local%20address.%20Check%20for%20more%20than%201%20to%20see%20if%20there%20is%20an%0A%20%20%20%20%20%20%23%20assigned%20address.%0A%20%20%20%20%20%20num_ip6_addrs%3D%24(ip%20-j%20a%20show%20dev%20%24%7Biface%7D%20%7C%20jq%20%22.%5B0%5D.addr_info%20%7C%20map(.%20%7C%20select(.family%20%3D%3D%20%5C%22inet6%5C%22%20and%20.scope%20!%3D%20%5C%22link%5C%22))%20%7C%20length%22)%0A%20%20%20%20%20%20if%20%5B%20%22%24num_ip6_addrs%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.may-fail%20no%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20dhcp%20client%20ids%0A%20%20%20%20%20%20dhcp_client_id%3D%24(%24NMCLI_GET_VALUE%20ipv4.dhcp-client-id%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24dhcp_client_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.dhcp-client-id%20%24%7Bdhcp_client_id%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20dhcp6_client_id%3D%24(%24NMCLI_GET_VALUE%20ipv6.dhcp-duid%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24dhcp6_client_id%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.dhcp-duid%20%24%7Bdhcp6_client_id%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20ipv6_addr_gen_mode%3D%24(%24NMCLI_GET_VALUE%20ipv6.addr-gen-mode%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_addr_gen_mode%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.addr-gen-mode%20%24%7Bipv6_addr_gen_mode%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20static%20DNS%20address%0A%20%20%20%20%20%20ipv4_dns%3D%24(%24NMCLI_GET_VALUE%20ipv4.dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv4_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.dns%20%24%7Bipv4_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20ipv6_dns%3D%24(%24NMCLI_GET_VALUE%20ipv6.dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.dns%20%24%7Bipv6_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20%23%20check%20for%20auto-dns%0A%20%20%20%20%20%20ipv4_auto_dns%3D%24(%24NMCLI_GET_VALUE%20ipv4.ignore-auto-dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv4_auto_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv4.ignore-auto-dns%20%24%7Bipv4_auto_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20ipv6_auto_dns%3D%24(%24NMCLI_GET_VALUE%20ipv6.ignore-auto-dns%20conn%20show%20%24%7Bold_conn%7D)%0A%20%20%20%20%20%20if%20%5B%20-n%20%22%24ipv6_auto_dns%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20extra_if_brex_args%2B%3D%22ipv6.ignore-auto-dns%20%24%7Bipv6_auto_dns%7D%20%22%0A%20%20%20%20%20%20fi%0A%0A%20%20%20%20%20%20add_nm_conn%20%22%24ovs_interface%22%20type%20ovs-interface%20slave-type%20ovs-port%20conn.interface%20%22%24bridge_name%22%20master%20%22%24ovs_port_conn%22%20%5C%0A%20%20%20%20%20%20%20%20802-3-ethernet.mtu%20%24%7Biface_mtu%7D%20802-3-ethernet.cloned-mac-address%20%24%7Biface_mac%7D%20%5C%0A%20%20%20%20%20%20%20%20ipv4.method%20%22%24%7Bipv4_method%7D%22%20ipv4.route-metric%20%22%24%7Bbridge_metric%7D%22%20%5C%0A%20%20%20%20%20%20%20%20ipv6.method%20%22%24%7Bipv6_method%7D%22%20ipv6.route-metric%20%22%24%7Bbridge_metric%7D%22%20%5C%0A%20%20%20%20%20%20%20%20%24%7Bextra_if_brex_args%7D%0A%20%20%20%20fi%0A%20%20fi%0A%0A%20%20configure_driver_options%20%22%24%7Biface%7D%22%0A%7D%0A%0A%23%20Used%20to%20remove%20a%20bridge%0Aremove_ovn_bridges()%20%7B%0A%20%20bridge_name%3D%24%7B1%7D%0A%20%20port_name%3D%24%7B2%7D%0A%0A%20%20%23%20Remove%20the%20keyfiles%20from%20known%20configuration%20paths%0A%20%20update_nm_conn_run_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%20%20update_nm_conn_set_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%20%20%23%20Shouldn't%20be%20necessary%2C%20workaround%20for%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FOCPBUGS-41489%0A%20%20update_nm_conn_etc_files%20%24%7Bbridge_name%7D%20%24%7Bport_name%7D%0A%20%20rm_nm_conn_files%0A%0A%20%20%23%20NetworkManager%20will%20not%20remove%20%24%7Bbridge_name%7D%20if%20it%20has%20the%20patch%20port%20created%20by%20ovn-kubernetes%0A%20%20%23%20so%20remove%20explicitly%0A%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20%24%7Bbridge_name%7D%0A%7D%0A%0A%23%20Removes%20any%20previous%20ovs%20configuration%0Aremove_all_ovn_bridges()%20%7B%0A%20%20echo%20%22Reverting%20any%20previous%20OVS%20configuration%22%0A%20%20%0A%20%20remove_ovn_bridges%20br-ex%20phys0%0A%20%20remove_ovn_bridges%20br-ex1%20phys1%0A%20%20%0A%20%20echo%20%22OVS%20configuration%20successfully%20reverted%22%0A%7D%0A%0A%23%20Reloads%20NM%20NetworkManager%20profiles%20if%20any%20configuration%20change%20was%20done.%0A%23%20Accepts%20a%20list%20of%20devices%20that%20should%20be%20re-connect%20after%20reload.%0Areload_profiles_nm()%20%7B%0A%20%20if%20%5B%20%24%7Bnm_config_changed%3A-0%7D%20-eq%200%20%5D%3B%20then%0A%20%20%20%20%23%20no%20config%20was%20changed%2C%20no%20need%20to%20reload%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20reload%20profiles%0A%20%20nmcli%20connection%20reload%0A%0A%20%20%23%20precautionary%20sleep%20of%2010s%20(default%20timeout%20of%20NM%20to%20bring%20down%20devices)%0A%20%20sleep%2010%0A%0A%20%20%23%20After%20reload%2C%20devices%20that%20were%20already%20connected%20should%20connect%20again%0A%20%20%23%20if%20any%20profile%20is%20available.%20If%20no%20profile%20is%20available%2C%20a%20device%20can%0A%20%20%23%20remain%20disconnected%20and%20we%20have%20to%20explicitly%20connect%20it%20so%20that%20a%0A%20%20%23%20profile%20is%20generated.%20This%20can%20happen%20for%20physical%20devices%20but%20should%0A%20%20%23%20not%20happen%20for%20software%20devices%20as%20those%20always%20require%20a%20profile.%0A%20%20for%20dev%20in%20%24%40%3B%20do%0A%20%20%20%20%23%20Only%20attempt%20to%20connect%20a%20disconnected%20device%0A%20%20%20%20local%20connected_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20device%20show%20%22%24dev%22%20%7C%7C%20echo%20%22%22)%0A%20%20%20%20if%20%5B%5B%20%22%24connected_state%22%20%3D~%20%22disconnected%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%23%20keep%20track%20if%20a%20profile%20by%20the%20same%20name%20as%20the%20device%20existed%20%0A%20%20%20%20%20%20%23%20before%20we%20attempt%20activation%0A%20%20%20%20%20%20local%20named_profile_existed%3D%24(%5B%20-f%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D%22%20%5D%20%7C%7C%20%5B%20-f%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D.nmconnection%22%20%5D%20%26%26%20echo%20%22yes%22)%0A%20%20%20%20%20%20%0A%20%20%20%20%20%20for%20i%20in%20%7B1..10%7D%3B%20do%0A%20%20%20%20%20%20%20%20%20%20echo%20%22Attempt%20%24i%20to%20connect%20device%20%24dev%22%0A%20%20%20%20%20%20%20%20%20%20nmcli%20device%20connect%20%22%24dev%22%20%26%26%20break%0A%20%20%20%20%20%20%20%20%20%20sleep%205%0A%20%20%20%20%20%20done%0A%0A%20%20%20%20%20%20%23%20if%20a%20profile%20did%20not%20exist%20before%20but%20does%20now%2C%20it%20was%20generated%0A%20%20%20%20%20%20%23%20but%20we%20want%20it%20to%20be%20ephemeral%2C%20so%20move%20it%20back%20to%20%2Frun%0A%20%20%20%20%20%20if%20%5B%20!%20%22%24named_profile_existed%22%20%3D%20%22yes%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20MANAGED_NM_CONN_FILES%3D(%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D%22%20%22%24%7BNM_CONN_CONF_PATH%7D%2F%24%7Bdev%7D.nmconnection%22)%0A%20%20%20%20%20%20%20%20copy_nm_conn_files%20%22%24%7BNM_CONN_RUN_PATH%7D%22%0A%20%20%20%20%20%20%20%20rm_nm_conn_files%0A%20%20%20%20%20%20%20%20%23%20reload%20profiles%20so%20that%20NM%20notices%20that%20some%20might%20have%20been%20moved%0A%20%20%20%20%20%20%20%20nmcli%20connection%20reload%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20echo%20%22Waiting%20for%20interface%20%24dev%20to%20activate...%22%0A%20%20%20%20%23%20don't%20use%20--escape%20no%2C%20we%20use%20%3A%20delimiter%20here%0A%20%20%20%20if%20!%20timeout%2060%20bash%20-c%20%22while%20!%20nmcli%20-g%20DEVICE%2CSTATE%20c%20%7C%20grep%20%22'%22'%22%24dev%22%3Aactivated'%22'%22%3B%20do%20sleep%205%3B%20done%22%3B%20then%0A%20%20%20%20%20%20echo%20%22WARNING%3A%20%24dev%20did%20not%20activate%22%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20nm_config_changed%3D0%0A%7D%0A%0A%23%20Removes%20all%20configuration%20and%20reloads%20NM%20if%20necessary%0Arollback_nm()%20%7B%0A%20%20phys0%3D%24(get_bridge_physical_interface%20ovs-if-phys0)%0A%20%20phys1%3D%24(get_bridge_physical_interface%20ovs-if-phys1)%0A%20%20%0A%20%20%23%20Revert%20changes%20made%20by%20%2Fusr%2Flocal%2Fbin%2Fconfigure-ovs.sh%20during%20SDN%20migration.%0A%20%20remove_all_ovn_bridges%0A%20%20%0A%20%20%23%20reload%20profiles%20so%20that%20NM%20notices%20that%20some%20were%20removed%0A%20%20reload_profiles_nm%20%22%24phys0%22%20%22%24phys1%22%0A%7D%0A%0A%23%20Add%20a%20temporary%20deactivated%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20name%20folowed%20by%20arguments%20passed%20to%0A%23%20%60nmcli%20connection%20add%60%0Aadd_nm_conn()%20%7B%0A%20%20%23%20Use%20%60save%20no%60%20to%20add%20a%20temporary%20profile%0A%20%20nmcli%20c%20add%20save%20no%20con-name%20%22%24%40%22%20connection.autoconnect%20no%0A%7D%0A%0A%23%20Modify%20a%20temporary%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20name%20followed%20by%20arguments%20passed%20to%0A%23%20%60nmcli%20connection%20modify%60%0Amod_nm_conn()%20%7B%0A%20%20%23%20the%20easiest%20thing%20to%20do%20here%20would%20be%20to%20use%20%60nmcli%20c%20mod%20--temporary%60%0A%20%20%23%20but%20there%20is%20a%20bug%20in%20selinux%20profiles%20that%20denies%20NM%20from%20performing%0A%20%20%23%20the%20operation%0A%20%20local%20dst_path%3D%24%7BNM_CONN_RUN_PATH%7D%2F%241.nmconnection%0A%20%20local%20src_path%0A%20%20src_path%3D%24(mktemp)%0A%20%20shift%0A%20%20cat%20%22%24dst_path%22%20%3E%20%22%24src_path%22%0A%20%20rm%20-f%20%22%24dst_path%22%0A%20%20nmcli%20--offline%20c%20mod%20%22%24%40%22%20%3C%20%22%24src_path%22%20%3E%20%22%24dst_path%22%0A%20%20rm%20-f%20%22%24src_path%22%0A%20%20chmod%20600%20%22%24dst_path%22%0A%20%20nmcli%20c%20load%20%22%24dst_path%22%0A%7D%0A%0A%23%20Clone%20to%20a%20temporary%20connection%20profile%20%0A%23%20First%20argument%20is%20the%20connection%20to%20clone%2C%20second%20argument%20is%20the%20clone%20name%0Aclone_nm_conn()%20%7B%0A%20%20%23%20clone%20as%20temporary%20so%20that%20it%20is%20generated%20in%20NM_CONN_RUN_PATH%0A%20%20nmcli%20connection%20clone%20--temporary%20%22%241%22%20%22%242%22%20%26%3E%20%2Fdev%2Fnull%0A%7D%0A%0A%23%20Activates%20an%20ordered%20set%20of%20NM%20connection%20profiles%0Aactivate_nm_connections()%20%7B%0A%20%20local%20connections%3D(%22%24%40%22)%0A%0A%20%20%23%20We%20want%20autoconnect%20set%20for%20our%20cloned%20slave%20profiles%20so%20that%20they%20are%0A%20%20%23%20used%20over%20the%20original%20profiles%20if%20implicitly%20re-activated%20with%20other%0A%20%20%23%20dependant%20profiles.%20Otherwise%20if%20a%20slave%20activates%20with%20an%20old%20profile%2C%0A%20%20%23%20the%20old%20master%20profile%20might%20activate%20as%20well%2C%20interfering%20and%20causing%0A%20%20%23%20further%20activations%20to%20fail.%0A%20%20%23%20Slave%20interfaces%20should%20already%20be%20active%20so%20setting%20autoconnect%20here%0A%20%20%23%20won't%20implicitly%20activate%20them%20but%20there%20is%20an%20edge%20case%20where%20a%20slave%0A%20%20%23%20might%20be%20inactive%20(link%20down%20for%20example)%20and%20in%20that%20case%20setting%0A%20%20%23%20autoconnect%20will%20cause%20an%20implicit%20activation.%20This%20is%20not%20necessarily%20a%0A%20%20%23%20problem%20and%20hopefully%20we%20can%20make%20sure%20everything%20is%20activated%20as%20we%0A%20%20%23%20want%20next.%0A%20%20for%20conn%20in%20%22%24%7Bconnections%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20local%20slave_type%3D%24(%24NMCLI_GET_VALUE%20connection.slave-type%20connection%20show%20%22%24conn%22)%0A%20%20%20%20if%20%5B%20%22%24slave_type%22%20%3D%20%22team%22%20%5D%20%7C%7C%20%5B%20%22%24slave_type%22%20%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20%20%20fi%0A%20%20done%0A%0A%20%20%23%20Activate%20all%20connections%20and%20fail%20if%20activation%20fails%0A%20%20%23%20For%20slave%20connections%20-%20for%20as%20long%20as%20at%20least%20one%20slave%20that%20belongs%20to%20a%20bond%2Fteam%0A%20%20%23%20comes%20up%2C%20we%20should%20not%20fail%0A%20%20declare%20-A%20master_interfaces%0A%20%20for%20conn%20in%20%22%24%7Bconnections%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%23%20Get%20the%20slave%20type%0A%20%20%20%20local%20slave_type%3D%24(%24NMCLI_GET_VALUE%20connection.slave-type%20connection%20show%20%22%24conn%22)%0A%20%20%20%20local%20is_slave%3Dfalse%0A%20%20%20%20if%20%5B%20%22%24slave_type%22%20%3D%20%22team%22%20%5D%20%7C%7C%20%5B%20%22%24slave_type%22%20%3D%20%22bond%22%20%5D%3B%20then%0A%20%20%20%20%20%20is_slave%3Dtrue%0A%20%20%20%20fi%20%0A%0A%20%20%20%20%23%20For%20slave%20interfaces%2C%20initialize%20the%20master%20interface%20to%20false%20if%20the%20key%20is%20not%20yet%20in%20the%20array%0A%20%20%20%20local%20master_interface%0A%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20master_interface%3D%24(%24NMCLI_GET_VALUE%20connection.master%20connection%20show%20%22%24conn%22)%0A%20%20%20%20%20%20if%20!%20%5B%5B%20-v%20%22master_interfaces%5B%24master_interface%5D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%22%24master_interface%22%5D%3Dfalse%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20slaves%20should%20implicitly%20activate%2C%20give%20them%20a%20chance%20to%20do%20so%0A%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20if%20!%20timeout%205%20bash%20-c%20%22while%20!%20%24NMCLI_GET_VALUE%20GENERAL.STATE%20conn%20show%20%22%24conn%22%20%7C%20grep%20activated%3B%20do%20sleep%201%3B%20done%22%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22WARNING%3A%20slave%20%24conn%20did%20not%20implicitly%20activate%20in%205s%2C%20activating%20explicitly.%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Do%20not%20activate%20interfaces%20that%20are%20already%20active%0A%20%20%20%20%23%20But%20set%20the%20entry%20in%20master_interfaces%20to%20true%20if%20this%20is%20a%20slave%0A%20%20%20%20%23%20Also%20set%20autoconnect%20to%20yes%0A%20%20%20%20local%20active_state%3D%24(%24NMCLI_GET_VALUE%20GENERAL.STATE%20conn%20show%20%22%24conn%22)%0A%20%20%20%20if%20%5B%20%22%24active_state%22%20%3D%3D%20%22activated%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Connection%20%24conn%20already%20activated%22%0A%20%20%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%24master_interface%5D%3Dtrue%0A%20%20%20%20%20%20fi%0A%20%20%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20%20%20%20%20continue%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Activate%20all%20interfaces%20that%20are%20not%20yet%20active%0A%20%20%20%20for%20i%20in%20%7B1..10%7D%3B%20do%0A%20%20%20%20%20%20echo%20%22Attempt%20%24i%20to%20bring%20up%20connection%20%24conn%22%0A%20%20%20%20%20%20nmcli%20conn%20up%20%22%24conn%22%20%26%26%20s%3D0%20%26%26%20break%20%7C%7C%20s%3D%24%3F%0A%20%20%20%20%20%20sleep%205%0A%20%20%20%20done%0A%20%20%20%20if%20%5B%20%24s%20-eq%200%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Brought%20up%20connection%20%24conn%20successfully%22%0A%20%20%20%20%20%20if%20%24is_slave%3B%20then%0A%20%20%20%20%20%20%20%20master_interfaces%5B%22%24master_interface%22%5D%3Dtrue%0A%20%20%20%20%20%20fi%0A%20%20%20%20elif%20!%20%24is_slave%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20Cannot%20bring%20up%20connection%20%24conn%20after%20%24i%20attempts%22%0A%20%20%20%20%20%20return%20%24s%0A%20%20%20%20fi%0A%20%20%20%20mod_nm_conn%20%22%24conn%22%20connection.autoconnect%20yes%0A%20%20done%0A%20%20%23%20Check%20that%20all%20master%20interfaces%20report%20at%20least%20a%20single%20active%20slave%0A%20%20%23%20Note%3A%20associative%20arrays%20require%20an%20exclamation%20mark%20when%20looping%0A%20%20for%20i%20in%20%22%24%7B!master_interfaces%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20if%20!%20%24%7Bmaster_interfaces%5B%22%24i%22%5D%7D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22ERROR%3A%20Cannot%20bring%20up%20any%20slave%20interface%20for%20master%20interface%3A%20%24i%22%0A%20%20%20%20%20%20%20%20return%201%0A%20%20%20%20fi%0A%20%20done%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%2C%20%24iface%0A%23%20Writes%20content%20of%20%24iface%20into%20%24iface_default_hint_file%0Awrite_iface_default_hint()%20%7B%0A%20%20local%20iface_default_hint_file%3D%22%241%22%0A%20%20local%20iface%3D%22%242%22%0A%0A%20%20echo%20%22%24%7Biface%7D%22%20%3E%7C%20%22%24%7Biface_default_hint_file%7D%22%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%0A%23%20Returns%20the%20stored%20interface%20default%20hint%20if%20the%20hint%20is%20non-empty%2C%0A%23%20not%20br-ex%2C%20not%20br-ex1%20and%20if%20the%20interface%20can%20be%20found%20in%20%2Fsys%2Fclass%2Fnet%0Aget_iface_default_hint()%20%7B%0A%20%20local%20iface_default_hint_file%3D%241%0A%20%20if%20%5B%20-f%20%22%24%7Biface_default_hint_file%7D%22%20%5D%3B%20then%0A%20%20%20%20local%20iface_default_hint%3D%24(cat%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22br-ex1%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20-d%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Biface_default_hint%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%20%20fi%0A%20%20echo%20%22%22%0A%7D%0A%0Aget_ip_from_ip_hint_file()%20%7B%0A%20%20local%20ip_hint_file%3D%22%241%22%0A%20%20if%20%5B%5B%20!%20-f%20%22%24%7Bip_hint_file%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20return%0A%20%20fi%0A%20%20ip_hint%3D%24(cat%20%22%24%7Bip_hint_file%7D%22)%0A%20%20echo%20%22%24%7Bip_hint%7D%22%0A%7D%0A%0A%23%20This%20function%20waits%20for%20ip%20address%20of%20br-ex%20to%20be%20bindable%20only%20in%20case%20of%20ipv6%0A%23%20This%20is%20workaround%20for%20OCPBUGS-673%20as%20it%20will%20not%20allow%20starting%20crio%0A%23%20before%20address%20is%20bindable%0Atry_to_bind_ipv6_address()%20%7B%0A%20%20%23%20Retry%20for%201%20minute%0A%20%20retries%3D60%0A%20%20until%20%5B%5B%20%24%7Bretries%7D%20-eq%200%20%5D%5D%3B%20do%0A%20%20%20%20ip%3D%24(ip%20-6%20-j%20addr%20%7C%20jq%20-r%20%22first(.%5B%5D%20%7C%20select(.ifname%3D%3D%5C%22br-ex%5C%22)%20%7C%20.addr_info%5B%5D%20%7C%20select(.scope%3D%3D%5C%22global%5C%22)%20%7C%20.local)%22)%0A%20%20%20%20if%20%5B%5B%20%22%24%7Bip%7D%22%20%3D%3D%20%22%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22No%20ipv6%20ip%20to%20bind%20was%20found%22%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20random_port%3D%24(shuf%20-i%2050000-60000%20-n%201)%0A%20%20%20%20echo%20%22Trying%20to%20bind%20%24%7Bip%7D%20on%20port%20%24%7Brandom_port%7D%22%0A%20%20%20%20exit_code%3D%24(timeout%202s%20nc%20-l%20%22%24%7Bip%7D%22%20%24%7Brandom_port%7D%3B%20echo%20%24%3F)%0A%20%20%20%20if%20%5B%5B%20exit_code%20-eq%20124%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Address%20bound%20successfully%22%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20sleep%201%0A%20%20%20%20((%20retries--%20))%0A%20%20done%0A%20%20if%20%5B%5B%20%24%7Bretries%7D%20-eq%200%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Failed%20to%20bind%20ip%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%7D%0A%0A%23%20Get%20interface%20that%20matches%20ip%20from%20node%20ip%20hint%20file%0A%23%20in%20case%20file%20not%20exists%20return%20nothing%20and%0A%23%20fallback%20to%20default%20interface%20search%20flow%0Aget_nodeip_hint_interface()%20%7B%0A%20%20local%20ip_hint%3D%22%22%0A%20%20local%20ip_hint_file%3D%22%241%22%0A%20%20local%20extra_bridge%3D%22%242%22%0A%20%20local%20iface%3D%22%22%0A%0A%20%20ip_hint%3D%24(get_ip_from_ip_hint_file%20%22%24%7Bip_hint_file%7D%22)%0A%20%20if%20%5B%5B%20-z%20%22%24%7Bip_hint%7D%22%20%20%5D%5D%3B%20then%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20iface%3D%24(ip%20-j%20addr%20%7C%20jq%20-r%20%22first(.%5B%5D%20%7C%20select(any(.addr_info%5B%5D%3B%20.local%3D%3D%5C%22%24%7Bip_hint%7D%5C%22)%20and%20.ifname!%3D%5C%22br-ex1%5C%22%20and%20.ifname!%3D%5C%22%24%7Bextra_bridge%7D%5C%22))%20%7C%20.ifname%22)%0A%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20fi%0A%7D%0A%0A%23%20Accepts%20parameters%20%24bridge_interface%20(e.g.%20ovs-port-phys0)%0A%23%20Returns%20the%20physical%20interface%20name%20if%20%24bridge_interface%20exists%2C%20%22%22%20otherwise%0Aget_bridge_physical_interface()%20%7B%0A%20%20local%20bridge_interface%3D%22%241%22%0A%20%20local%20physical_interface%3D%22%22%0A%20%20physical_interface%3D%24(%24NMCLI_GET_VALUE%20connection.interface-name%20conn%20show%20%22%24%7Bbridge_interface%7D%22%202%3E%2Fdev%2Fnull%20%7C%7C%20echo%20%22%22)%0A%20%20echo%20%22%24%7Bphysical_interface%7D%22%0A%7D%0A%0A%23%20Accepts%20parameters%20%24iface_default_hint_file%2C%20%24extra_bridge_file%2C%20%24ip_hint_file%2C%20%24default_bridge_file%0A%23%20Determines%20the%20interface%20to%20be%20used%20for%20br-ex.%20Order%20of%20priority%20is%3A%0A%23%201.%20Use%20the%20user%20specified%20interface%20if%20provided%20in%20the%20default%20bridge%20file%0A%23%202.%20Use%20the%20node%20IP%20hint%20interface%0A%23%203.%20Use%20the%20previously%20selected%20interface%0A%23%204.%20Use%20the%20interface%20detected%20as%20default%20gateway%0A%23%0A%23%20Read%20%24default_bridge_file%20and%20return%20the%20contained%20interface.%20Otherwise%2C%0A%23%20read%20%24ip_hint_file%20and%20return%20the%20interface%20that%20matches%20this%20ip.%20Otherwise%2C%0A%23%20if%20the%20default%20interface%20is%20br-ex%2C%20use%20that%20and%20return.%0A%23%20If%20the%20default%20interface%20is%20not%20br-ex%3A%0A%23%20Check%20if%20there%20is%20a%20valid%20hint%20inside%20%24iface_default_hint_file.%20If%20so%2C%20use%20that%20hint.%0A%23%20If%20there%20is%20no%20valid%20hint%2C%20use%20the%20default%20interface%20that%20we%20found%20during%20the%20step%0A%23%20earlier.%0A%23%20Never%20use%20the%20interface%20that%20is%20provided%20inside%20%24extra_bridge_file%20for%20br-ex1.%0A%23%20Never%20use%20br-ex1.%0A%23%20Write%20the%20default%20interface%20to%20%24iface_default_hint_file%0Aget_default_bridge_interface()%20%7B%0A%20%20local%20iface%3D%22%22%0A%20%20local%20counter%3D0%0A%20%20local%20iface_default_hint_file%3D%22%241%22%0A%20%20local%20extra_bridge_file%3D%22%242%22%0A%20%20local%20ip_hint_file%3D%22%243%22%0A%20%20local%20default_bridge_file%3D%22%244%22%0A%20%20local%20extra_bridge%3D%22%22%0A%0A%20%20if%20%5B%20-f%20%22%24%7Bextra_bridge_file%7D%22%20%5D%3B%20then%0A%20%20%20%20extra_bridge%3D%24(cat%20%24%7Bextra_bridge_file%7D)%0A%20%20fi%0A%0A%20%20%23%20try%20to%20use%20user%20specified%20file%20first%0A%20%20if%20%5B%20-f%20%22%24default_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20iface%3D%24(cat%20%22%24%7Bdefault_bridge_file%7D%22)%0A%20%20%20%20if%20%5B%20-z%20%22%24iface%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22ERROR%3A%20User%20specified%20bridge%20file%20detected%20without%20any%20data%22%0A%20%20%20%20%20%20exit%201%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20if%20node%20ip%20was%20set%2C%20we%20should%20search%20for%20interface%20that%20matches%20it%0A%20%20iface%3D%24(get_nodeip_hint_interface%20%22%24%7Bip_hint_file%7D%22%20%22%24%7Bextra_bridge%7D%22)%0A%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22%24%7Biface%7D%22%0A%20%20%20%20return%0A%20%20fi%0A%0A%20%20%23%20find%20default%20interface%0A%20%20%23%20the%20default%20interface%20might%20be%20br-ex%2C%20so%20check%20this%20before%20looking%20at%20the%20hint%0A%20%20while%20%5B%20%24%7Bcounter%7D%20-lt%2012%20%5D%3B%20do%0A%20%20%20%20%23%20check%20ipv4%0A%20%20%20%20%23%20never%20use%20the%20interface%20that's%20specified%20in%20extra_bridge_file%0A%20%20%20%20%23%20never%20use%20br-ex1%0A%20%20%20%20if%20%5B%20%22%24%7Bextra_bridge%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20iface%3D%24(ip%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20grep%20-v%20%22%24%7Bextra_bridge%7D%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20else%0A%20%20%20%20%20%20iface%3D%24(ip%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20fi%0A%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20%23%20check%20ipv6%0A%20%20%20%20%23%20never%20use%20the%20interface%20that's%20specified%20in%20extra_bridge_file%0A%20%20%20%20%23%20never%20use%20br-ex1%0A%20%20%20%20if%20%5B%20%22%24%7Bextra_bridge%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20iface%3D%24(ip%20-6%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20grep%20-v%20%22%24%7Bextra_bridge%7D%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20else%0A%20%20%20%20%20%20iface%3D%24(ip%20-6%20route%20show%20default%20%7C%20grep%20-v%20%22br-ex1%22%20%7C%20awk%20'%7B%20if%20(%244%20%3D%3D%20%22dev%22)%20%7B%20print%20%245%3B%20exit%20%7D%20%7D')%0A%20%20%20%20fi%0A%20%20%20%20if%20%5B%5B%20-n%20%22%24%7Biface%7D%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20break%0A%20%20%20%20fi%0A%20%20%20%20counter%3D%24((counter%2B1))%0A%20%20%20%20sleep%205%0A%20%20done%0A%0A%20%20%23%20if%20the%20default%20interface%20does%20not%20point%20out%20of%20br-ex%20or%20br-ex1%0A%20%20if%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22br-ex1%22%20%5D%3B%20then%0A%20%20%20%20%23%20determine%20if%20an%20interface%20default%20hint%20exists%20from%20a%20previous%20run%0A%20%20%20%20%23%20and%20if%20the%20interface%20has%20a%20valid%20default%20route%0A%20%20%20%20iface_default_hint%3D%24(get_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%22%20%5D%20%26%26%0A%20%20%20%20%20%20%20%20%5B%20%22%24%7Biface_default_hint%7D%22%20!%3D%20%22%24%7Biface%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20start%20wherever%20count%20left%20off%20in%20the%20previous%20loop%0A%20%20%20%20%20%20%23%20allow%20this%20for%20one%20more%20iteration%20than%20the%20previous%20loop%0A%20%20%20%20%20%20while%20%5B%20%24%7Bcounter%7D%20-le%2012%20%5D%3B%20do%0A%20%20%20%20%20%20%20%20%23%20check%20ipv4%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24(ip%20route%20show%20default%20dev%20%22%24%7Biface_default_hint%7D%22)%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20iface%3D%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20%20%20break%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20%23%20check%20ipv6%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24(ip%20-6%20route%20show%20default%20dev%20%22%24%7Biface_default_hint%7D%22)%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20iface%3D%22%24%7Biface_default_hint%7D%22%0A%20%20%20%20%20%20%20%20%20%20break%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20counter%3D%24((counter%2B1))%0A%20%20%20%20%20%20%20%20sleep%205%0A%20%20%20%20%20%20done%0A%20%20%20%20fi%0A%20%20%20%20%23%20store%20what%20was%20determined%20was%20the%20(new)%20default%20interface%20inside%0A%20%20%20%20%23%20the%20default%20hint%20file%20for%20future%20reference%0A%20%20%20%20if%20%5B%20%22%24%7Biface%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20write_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Biface%7D%22%0A%20%20%20%20fi%0A%20%20fi%0A%20%20echo%20%22%24%7Biface%7D%22%0A%7D%0A%0A%23%20Used%20to%20print%20network%20state%0Aprint_state()%20%7B%0A%20%20echo%20%22Current%20device%2C%20connection%2C%20interface%20and%20routing%20state%3A%22%0A%20%20nmcli%20-g%20all%20device%20%7C%20grep%20-v%20unmanaged%0A%20%20nmcli%20-g%20all%20connection%0A%20%20ip%20-d%20address%20show%0A%20%20ip%20route%20show%0A%20%20ip%20-6%20route%20show%0A%7D%0A%0A%23%20Setup%20an%20exit%20trap%20to%20rollback%20on%20error%0Ahandle_exit()%20%7B%0A%20%20e%3D%24%3F%0A%20%20tdir%3D%24(mktemp%20-u%20-d%20-t%20%22configure-ovs-%24(date%20%2B%25Y-%25m-%25d-%25H-%25M-%25S)-XXXXXXXXXX%22)%0A%20%20%0A%20%20if%20%5B%20%24e%20-eq%200%20%5D%3B%20then%0A%20%20%20%20print_state%0A%20%20%20%20%23%20remove%20previous%20troubleshooting%20information%0A%20%20%20%20rm%20-rf%20%22%24(dirname%20%22%24tdir%22)%2Fconfigure-ovs-*%22%0A%20%20%20%20exit%200%0A%20%20fi%0A%0A%20%20echo%20%22ERROR%3A%20configure-ovs%20exited%20with%20error%3A%20%24e%22%0A%20%20print_state%0A%0A%20%20%23%20remove%20previous%20troubleshooting%20information%20except%20the%20oldest%20one%0A%20%20mapfile%20-t%20tdirs%20%3C%20%3C(compgen%20-G%20%22%24(dirname%20%22%24tdir%22)%2Fconfigure-ovs-*%22)%0A%20%20unset%20-v%20%22tdirs%5B0%5D%22%0A%20%20for%20dir%20in%20%22%24%7Btdirs%5B%40%5D%7D%22%3B%20do%20rm%20-rf%20%22%24dir%22%3B%20done%0A%0A%20%20%23%20copy%20configuration%20to%20tmp%20for%20troubleshooting%0A%20%20mkdir%20-p%20%22%24tdir%22%0A%20%20update_nm_conn_run_files%20br-ex%20phys0%0A%20%20copy_nm_conn_files%20%22%24tdir%22%0A%20%20update_nm_conn_run_files%20br-ex1%20phys1%0A%20%20copy_nm_conn_files%20%22%24tdir%22%0A%20%20echo%20%22Copied%20OVS%20configuration%20to%20%24tdir%20for%20troubleshooting%22%0A%0A%20%20%23%20attempt%20to%20restore%20the%20previous%20network%20state%0A%20%20echo%20%22Attempting%20to%20restore%20previous%20configuration...%22%0A%20%20rollback_nm%0A%20%20print_state%0A%0A%20%20exit%20%24e%0A%7D%0A%0A%23%20Setup%20a%20signal%20trap%20to%20rollback%0Ahandle_termination()%20%7B%0A%20%20echo%20%22WARNING%3A%20configure-ovs%20has%20been%20requested%20to%20terminate%2C%20quitting...%22%0A%20%20%0A%20%20%23%20by%20exiting%20with%20an%20error%20we%20will%20cleanup%20after%20ourselves%20in%20a%0A%20%20%23%20subsequent%20call%20to%20handle_exit%0A%20%20exit%201%0A%7D%0A%0A%23%20main%20function%0Aconfigure_ovs()%20%7B%0A%20%20set%20-eu%0A%0A%20%20%23%20setup%20traps%20to%20handle%20signals%20and%20other%20abnormal%20exits%0A%20%20trap%20'handle_termination'%20TERM%20INT%0A%20%20trap%20'handle_exit'%20EXIT%0A%0A%20%20%23%20this%20flag%20tracks%20if%20any%20config%20change%20was%20made%0A%20%20nm_config_changed%3D0%0A%0A%20%20%23%20Check%20that%20we%20are%20provided%20a%20valid%20NM%20connection%20path%0A%20%20if%20%5B%20%22%24NM_CONN_SET_PATH%22%20!%3D%20%22%24NM_CONN_CONF_PATH%22%20%5D%20%26%26%20%5B%20%22%24NM_CONN_SET_PATH%22%20!%3D%20%22%24NM_CONN_RUN_PATH%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22Error%3A%20Incorrect%20NM%20connection%20path%3A%20%24NM_CONN_SET_PATH%20is%20not%20%24NM_CONN_CONF_PATH%20nor%20%24NM_CONN_RUN_PATH%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20Clean%20up%20old%20config%20on%20behalf%20of%20mtu-migration%0A%20%20if%20!%20systemctl%20-q%20is-enabled%20mtu-migration%3B%20then%0A%20%20%20%20echo%20%22Cleaning%20up%20left%20over%20mtu%20migration%20configuration%22%0A%20%20%20%20rm%20-rf%20%2Fetc%2Fcno%2Fmtu-migration%0A%20%20fi%0A%0A%20%20if%20!%20rpm%20-qa%20%7C%20grep%20-q%20openvswitch%3B%20then%0A%20%20%20%20echo%20%22WARNING%3A%20Openvswitch%20package%20is%20not%20installed!%22%0A%20%20%20%20exit%201%0A%20%20fi%0A%0A%20%20%23%20print%20initial%20state%0A%20%20print_state%0A%0A%20%20if%20%5B%20%22%241%22%20%3D%3D%20%22OVNKubernetes%22%20%5D%3B%20then%0A%20%20%20%20%23%20Configures%20NICs%20onto%20OVS%20bridge%20%22br-ex%22%0A%20%20%20%20%23%20Configuration%20is%20either%20auto-detected%20or%20provided%20through%20a%20config%20file%20written%20already%20in%20Network%20Manager%0A%20%20%20%20%23%20key%20files%20under%20%2Fetc%2FNetworkManager%2Fsystem-connections%2F%0A%20%20%20%20%23%20Managing%20key%20files%20is%20outside%20of%20the%20scope%20of%20this%20script%0A%0A%20%20%20%20%23%20if%20the%20interface%20is%20of%20type%20vmxnet3%20add%20multicast%20capability%20for%20that%20driver%0A%20%20%20%20%23%20History%3A%20BZ%3A1854355%0A%20%20%20%20function%20configure_driver_options%20%7B%0A%20%20%20%20%20%20intf%3D%241%0A%20%20%20%20%20%20if%20%5B%20!%20-f%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Bintf%7D%2Fdevice%2Fuevent%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Device%20file%20doesn't%20exist%2C%20skipping%20setting%20multicast%20mode%22%0A%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20driver%3D%24(cat%20%22%2Fsys%2Fclass%2Fnet%2F%24%7Bintf%7D%2Fdevice%2Fuevent%22%20%7C%20grep%20DRIVER%20%7C%20awk%20-F%20%22%3D%22%20'%7Bprint%20%242%7D')%0A%20%20%20%20%20%20%20%20echo%20%22Driver%20name%20is%22%20%24driver%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24driver%22%20%3D%20%22vmxnet3%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20ip%20link%20set%20dev%20%22%24%7Bintf%7D%22%20allmulticast%20on%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20fi%0A%20%20%20%20%7D%0A%0A%20%20%20%20ovnk_config_dir%3D'%2Fetc%2Fovnk'%0A%20%20%20%20ovnk_var_dir%3D'%2Fvar%2Flib%2Fovnk'%0A%20%20%20%20extra_bridge_file%3D%22%24%7Bovnk_config_dir%7D%2Fextra_bridge%22%0A%20%20%20%20iface_default_hint_file%3D%22%24%7Bovnk_var_dir%7D%2Fiface_default_hint%22%0A%20%20%20%20ip_hint_file%3D%22%2Frun%2Fnodeip-configuration%2Fprimary-ip%22%0A%20%20%20%20%23%20explicitly%20specify%20which%20interface%20should%20be%20used%20with%20the%20default%20bridge%0A%20%20%20%20default_bridge_file%3D%22%24%7Bovnk_config_dir%7D%2Fdefault_bridge%22%0A%0A%20%20%20%20%23%20make%20sure%20to%20create%20ovnk_config_dir%20if%20it%20does%20not%20exist%2C%20yet%0A%20%20%20%20mkdir%20-p%20%22%24%7Bovnk_config_dir%7D%22%0A%20%20%20%20%23%20make%20sure%20to%20create%20ovnk_var_dir%20if%20it%20does%20not%20exist%2C%20yet%0A%20%20%20%20mkdir%20-p%20%22%24%7Bovnk_var_dir%7D%22%0A%0A%20%20%20%20%23%20For%20upgrade%20scenarios%2C%20make%20sure%20that%20we%20stabilize%20what%20we%20already%20configured%0A%20%20%20%20%23%20before.%20If%20we%20do%20not%20have%20a%20valid%20interface%20hint%2C%20find%20the%20physical%20interface%0A%20%20%20%20%23%20that's%20attached%20to%20ovs-if-phys0.%0A%20%20%20%20%23%20If%20we%20find%20such%20an%20interface%2C%20write%20it%20to%20the%20hint%20file.%0A%20%20%20%20iface_default_hint%3D%24(get_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22)%0A%20%20%20%20if%20%5B%20%22%24%7Biface_default_hint%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20current_interface%3D%24(get_bridge_physical_interface%20ovs-if-phys0)%0A%20%20%20%20%20%20if%20%5B%20%22%24%7Bcurrent_interface%7D%22%20!%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20write_iface_default_hint%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Bcurrent_interface%7D%22%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20delete%20iface_default_hint_file%20if%20it%20has%20the%20same%20content%20as%20extra_bridge_file%0A%20%20%20%20%23%20in%20that%20case%2C%20we%20must%20also%20force%20a%20reconfiguration%20of%20our%20network%20interfaces%0A%20%20%20%20%23%20to%20make%20sure%20that%20we%20reconcile%20this%20conflict%0A%20%20%20%20if%20%5B%20-f%20%22%24%7Biface_default_hint_file%7D%22%20%5D%20%26%26%0A%20%20%20%20%20%20%5B%20-f%20%22%24%7Bextra_bridge_file%7D%22%20%5D%20%26%26%0A%20%20%20%20%20%20%5B%20%22%24(cat%20%22%24%7Biface_default_hint_file%7D%22)%22%20%3D%3D%20%22%24(cat%20%22%24%7Bextra_bridge_file%7D%22)%22%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22%24%7Biface_default_hint_file%7D%20and%20%24%7Bextra_bridge_file%7D%20share%20the%20same%20content%22%0A%20%20%20%20%20%20echo%20%22Deleting%20file%20%24%7Biface_default_hint_file%7D%20to%20choose%20a%20different%20interface%22%0A%20%20%20%20%20%20rm%20-f%20%22%24%7Biface_default_hint_file%7D%22%0A%20%20%20%20%20%20rm%20-f%20%2Frun%2Fconfigure-ovs-boot-done%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20on%20every%20boot%20we%20rollback%20and%20generate%20the%20configuration%20again%2C%20to%20take%0A%20%20%20%20%23%20in%20any%20changes%20that%20have%20possibly%20been%20applied%20in%20the%20standard%0A%20%20%20%20%23%20configuration%20sources%0A%20%20%20%20if%20%5B%20!%20-f%20%2Frun%2Fconfigure-ovs-boot-done%20%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Running%20on%20boot%2C%20restoring%20previous%20configuration%20before%20proceeding...%22%0A%20%20%20%20%20%20rollback_nm%0A%20%20%20%20%20%20print_state%0A%20%20%20%20fi%0A%20%20%20%20touch%20%2Frun%2Fconfigure-ovs-boot-done%0A%0A%20%20%20%20iface%3D%24(get_default_bridge_interface%20%22%24%7Biface_default_hint_file%7D%22%20%22%24%7Bextra_bridge_file%7D%22%20%22%24%7Bip_hint_file%7D%22%20%22%24%7Bdefault_bridge_file%7D%22)%0A%0A%20%20%20%20if%20%5B%20%22%24iface%22%20!%3D%20%22br-ex%22%20%5D%3B%20then%0A%20%20%20%20%20%20%23%20Specified%20interface%20is%20not%20br-ex.%0A%20%20%20%20%20%20%23%20Some%20deployments%20use%20a%20temporary%20solution%20where%20br-ex%20is%20moved%20out%20from%20the%20default%20gateway%20interface%0A%20%20%20%20%20%20%23%20and%20bound%20to%20a%20different%20nic%20(https%3A%2F%2Fgithub.com%2Ftrozet%2Fopenshift-ovn-migration).%0A%20%20%20%20%20%20%23%20This%20is%20now%20supported%20through%20an%20extra%20bridge%20if%20requested.%20If%20that%20is%20the%20case%2C%20we%20rollback.%0A%20%20%20%20%20%20%23%20We%20also%20rollback%20if%20it%20looks%20like%20we%20need%20to%20configure%20things%2C%20just%20in%20case%20there%20are%20any%20leftovers%0A%20%20%20%20%20%20%23%20from%20previous%20attempts.%0A%20%20%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%20%7C%7C%20%5B%20-z%20%22%24(nmcli%20connection%20show%20--active%20br-ex%202%3E%20%2Fdev%2Fnull)%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%22Bridge%20br-ex%20is%20not%20active%2C%20restoring%20previous%20configuration%20before%20proceeding...%22%0A%20%20%20%20%20%20%20%20rollback_nm%0A%20%20%20%20%20%20%20%20print_state%0A%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20convert_to_bridge%20%22%24iface%22%20%22br-ex%22%20%22phys0%22%20%22%24%7BBRIDGE_METRIC%7D%22%0A%0A%20%20%20%20%23%20Check%20if%20we%20need%20to%20configure%20the%20second%20bridge%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%20%26%26%20(!%20nmcli%20connection%20show%20br-ex1%20%26%3E%20%2Fdev%2Fnull%20%7C%7C%20!%20nmcli%20connection%20show%20ovs-if-phys1%20%26%3E%20%2Fdev%2Fnull)%3B%20then%0A%20%20%20%20%20%20interface%3D%24(head%20-n%201%20%24extra_bridge_file)%0A%20%20%20%20%20%20convert_to_bridge%20%22%24interface%22%20%22br-ex1%22%20%22phys1%22%20%22%24%7BBRIDGE1_METRIC%7D%22%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Check%20if%20we%20need%20to%20remove%20the%20second%20bridge%0A%20%20%20%20if%20%5B%20!%20-f%20%22%24extra_bridge_file%22%20%5D%20%26%26%20(nmcli%20connection%20show%20br-ex1%20%26%3E%20%2Fdev%2Fnull%20%7C%7C%20nmcli%20connection%20show%20ovs-if-phys1%20%26%3E%20%2Fdev%2Fnull)%3B%20then%0A%20%20%20%20%20%20remove_ovn_bridges%20br-ex1%20phys1%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Remove%20bridges%20created%20by%20openshift-sdn%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br0%0A%0A%20%20%20%20%23%20Make%20sure%20everything%20is%20activated.%20Do%20it%20in%20a%20specific%20order%3A%0A%20%20%20%20%23%20-%20activate%20br-ex%20first%2C%20due%20to%20autoconnect-slaves%20this%20will%20also%0A%20%20%20%20%23%20%20%20activate%20ovs-port-br-ex%2C%20ovs-port-phys0%20and%20ovs-if-phys0.%20It%20is%0A%20%20%20%20%23%20%20%20important%20that%20ovs-if-phys0%20activates%20with%20br-ex%20to%20avoid%20the%0A%20%20%20%20%23%20%20%20ovs-if-phys0%20profile%20being%20overridden%20with%20a%20profile%20generated%20from%0A%20%20%20%20%23%20%20%20kargs.%20The%20activation%20of%20ovs-if-phys0%2C%20if%20a%20bond%2C%20might%20cause%20the%0A%20%20%20%20%23%20%20%20slaves%20to%20re-activate%2C%20but%20it%20should%20be%20with%20our%20profiles%20since%20they%0A%20%20%20%20%23%20%20%20have%20higher%20priority%0A%20%20%20%20%23%20-%20make%20sure%20that%20ovs-if-phys0%20and%20its%20slaves%2C%20if%20any%2C%20are%20activated.%0A%20%20%20%20%23%20-%20finally%20activate%20ovs-if-br-ex%20which%20holds%20the%20IP%20configuration.%0A%20%20%20%20connections%3D(br-ex%20ovs-if-phys0)%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20%20%20connections%2B%3D(br-ex1%20ovs-if-phys1)%0A%20%20%20%20fi%0A%20%20%20%20while%20IFS%3D%20read%20-r%20connection%3B%20do%0A%20%20%20%20%20%20if%20%5B%5B%20%24connection%20%3D%3D%20*%22%24MANAGED_NM_CONN_SUFFIX%22%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20connections%2B%3D(%22%24connection%22)%0A%20%20%20%20%20%20fi%0A%20%20%20%20done%20%3C%20%3C(nmcli%20-g%20NAME%20c)%0A%20%20%20%20connections%2B%3D(ovs-if-br-ex)%0A%20%20%20%20if%20%5B%20-f%20%22%24extra_bridge_file%22%20%5D%3B%20then%0A%20%20%20%20%20%20connections%2B%3D(ovs-if-br-ex1)%0A%20%20%20%20fi%0A%20%20%20%20activate_nm_connections%20%22%24%7Bconnections%5B%40%5D%7D%22%0A%20%20%20%20try_to_bind_ipv6_address%0A%20%20%20%20set_nm_conn_files%0A%20%20elif%20%5B%20%22%241%22%20%3D%3D%20%22OpenShiftSDN%22%20%5D%3B%20then%0A%20%20%20%20%23%20Revert%20changes%20made%20by%20%2Fusr%2Flocal%2Fbin%2Fconfigure-ovs.sh%20during%20SDN%20migration.%0A%20%20%20%20rollback_nm%0A%20%20%20%20%0A%20%20%20%20%23%20Remove%20bridges%20created%20by%20ovn-kubernetes%0A%20%20%20%20ovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br-int%20--%20--if-exists%20del-br%20br-local%0A%20%20fi%0A%7D%0A%0A%23%20Retry%20configure_ovs%20until%20it%20succeeds.%0A%23%20By%20default%2C%20retry%20every%2015%20minutes%20to%20give%20enough%20time%20to%20gather%0A%23%20troubleshooting%20information%20in%20between.%20Note%20that%20configure_ovs%20has%20other%0A%23%20internal%20retry%20mechanisms.%20This%20retry%20is%20intended%20to%20give%20some%0A%23%20self-healing%20capabilities%20to%20temporary%20but%20not%20necessarily%20short-lived%0A%23%20infrastructure%20problems.%0ARETRY%3D%22%24%7BRETRY-15m%7D%22%0Awhile%20true%3B%20do%0A%0A%20%20%23%20Disable%20retries%20if%20termination%20signal%20is%20received.%20Note%20that%20systemd%0A%20%20%23%20sends%20the%20signals%20to%20all%20processes%20in%20the%20group%20by%20default%20so%20we%20expect%0A%20%20%23%20configure_ovs%20to%20get%20its%20own%20signals.%0A%20%20trap%20'echo%20%22WARNING%3A%20termination%20requested%2C%20disabling%20retries%22%3B%20RETRY%3D%22%22'%20INT%20TERM%0A%20%20%0A%20%20%23%20Run%20configure_ovs%20in%20a%20sub-shell.%20%0A%20%20(%20configure_ovs%20%22%24%40%22%20)%0A%20%20e%3D%24%3F%0A%0A%20%20%23%20Handle%20signals%20while%20we%20sleep%0A%20%20trap%20'handle_termination'%20INT%20TERM%0A%20%20%0A%20%20%23%20Exit%20if%20succesful%20and%20not%20configured%20to%20retry%0A%20%20%5B%20%22%24e%22%20-eq%200%20%5D%20%7C%7C%20%5B%20-z%20%22%24RETRY%22%20%5D%20%26%26%20exit%20%22%24e%22%0A%20%20%0A%20%20echo%20%22configure-ovs%20failed%2C%20will%20retry%20after%20%24RETRY%22%0A%20%20%23%20flag%20that%20a%20retry%20has%20happened%0A%20%20touch%20%2Ftmp%2Fconfigure-ovs-retry%0A%20%20sleep%20%22%24RETRY%22%0A%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/configure-ovs.sh" + }, + { + "contents": { + "source": "data:,%23%20This%20file%20is%20generated%20by%20the%20Machine%20Config%20Operator's%20containerruntimeconfig%20controller.%0A%23%0A%23%20storage.conf%20is%20the%20configuration%20file%20for%20all%20tools%0A%23%20that%20share%20the%20containers%2Fstorage%20libraries%0A%23%20See%20man%205%20containers-storage.conf%20for%20more%20information%0A%23%20The%20%22container%20storage%22%20table%20contains%20all%20of%20the%20server%20options.%0A%5Bstorage%5D%0A%0A%23%20Default%20storage%20driver%2C%20must%20be%20set%20for%20proper%20operation.%0Adriver%20%3D%20%22overlay%22%0A%0A%23%20Temporary%20storage%20location%0Arunroot%20%3D%20%22%2Frun%2Fcontainers%2Fstorage%22%0A%0A%23%20Primary%20Read%2FWrite%20location%20of%20container%20storage%0Agraphroot%20%3D%20%22%2Fvar%2Flib%2Fcontainers%2Fstorage%22%0A%0A%5Bstorage.options%5D%0A%23%20Storage%20options%20to%20be%20passed%20to%20underlying%20storage%20drivers%0A%0A%23%20AdditionalImageStores%20is%20used%20to%20pass%20paths%20to%20additional%20Read%2FOnly%20image%20stores%0A%23%20Must%20be%20comma%20separated%20list.%0Aadditionalimagestores%20%3D%20%5B%0A%5D%0A%0A%23%20Remap-UIDs%2FGIDs%20is%20the%20mapping%20from%20UIDs%2FGIDs%20as%20they%20should%20appear%20inside%20of%0A%23%20a%20container%2C%20to%20UIDs%2FGIDs%20as%20they%20should%20appear%20outside%20of%20the%20container%2C%20and%0A%23%20the%20length%20of%20the%20range%20of%20UIDs%2FGIDs.%20%20Additional%20mapped%20sets%20can%20be%20listed%0A%23%20and%20will%20be%20heeded%20by%20libraries%2C%20but%20there%20are%20limits%20to%20the%20number%20of%0A%23%20mappings%20which%20the%20kernel%20will%20allow%20when%20you%20later%20attempt%20to%20run%20a%0A%23%20container.%0A%23%0A%23%20remap-uids%20%3D%200%3A1668442479%3A65536%0A%23%20remap-gids%20%3D%200%3A1668442479%3A65536%0A%0A%23%20Remap-User%2FGroup%20is%20a%20name%20which%20can%20be%20used%20to%20look%20up%20one%20or%20more%20UID%2FGID%0A%23%20ranges%20in%20the%20%2Fetc%2Fsubuid%20or%20%2Fetc%2Fsubgid%20file.%20%20Mappings%20are%20set%20up%20starting%0A%23%20with%20an%20in-container%20ID%20of%200%20and%20the%20a%20host-level%20ID%20taken%20from%20the%20lowest%0A%23%20range%20that%20matches%20the%20specified%20name%2C%20and%20using%20the%20length%20of%20that%20range.%0A%23%20Additional%20ranges%20are%20then%20assigned%2C%20using%20the%20ranges%20which%20specify%20the%0A%23%20lowest%20host-level%20IDs%20first%2C%20to%20the%20lowest%20not-yet-mapped%20container-level%20ID%2C%0A%23%20until%20all%20of%20the%20entries%20have%20been%20used%20for%20maps.%20This%20setting%20overrides%20the%0A%23%20Remap-UIDs%2FGIDs%20setting.%0A%23%0A%23%20remap-user%20%3D%20%22storage%22%0A%23%20remap-group%20%3D%20%22storage%22%0A%0A%5Bstorage.options.pull_options%5D%0A%23%20Options%20controlling%20how%20storage%20is%20populated%20when%20pulling%20images.%0A%0A%23%20Enable%20the%20%22zstd%3Achunked%22%20feature%2C%20which%20allows%20partial%20pulls%2C%20reusing%0A%23%20content%20that%20already%20exists%20on%20the%20system.%20This%20is%20disabled%20by%20default%2C%0A%23%20and%20must%20be%20explicitly%20enabled%20to%20be%20used.%20For%20more%20on%20zstd%3Achunked%2C%20see%0A%23%20https%3A%2F%2Fgithub.com%2Fcontainers%2Fstorage%2Fblob%2Fmain%2Fdocs%2Fcontainers-storage-zstd-chunked.md%0Aenable_partial_images%20%3D%20%22false%22%0A%0A%23%20Tells%20containers%2Fstorage%20to%20use%20hard%20links%20rather%20then%20create%20new%20files%20in%0A%23%20the%20image%2C%20if%20an%20identical%20file%20already%20existed%20in%20storage.%0Ause_hard_links%20%3D%20%22false%22%0A%0A%23%20Path%20to%20an%20ostree%20repository%20that%20might%20have%0A%23%20previously%20pulled%20content%20which%20can%20be%20used%20when%20attempting%20to%20avoid%0A%23%20pulling%20content%20from%20the%20container%20registry.%0Aostree_repos%20%3D%20%22%22%0A%0A%5Bstorage.options.overlay%5D%0A%23%20Storage%20Options%20for%20overlay%0A%0A%23%20Do%20not%20create%20a%20PRIVATE%20bind%20mount%20on%20the%20home%20directory.%0Askip_mount_home%20%3D%20%22true%22%0A%0A%23%20Size%20is%20used%20to%20set%20a%20maximum%20size%20of%20the%20container%20image.%20%20Only%20supported%20by%0A%23%20certain%20container%20storage%20drivers.%0Asize%20%3D%20%22%22%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/storage.conf" + }, + { + "contents": { + "source": "data:,Initial%20Creation%0A" + }, + "mode": 384, + "overwrite": true, + "path": "/etc/docker/certs.d/.create" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20kubelet.config.k8s.io%2Fv1%0Akind%3A%20CredentialProviderConfig%0Aproviders%3A%0A%20%20-%20name%3A%20acr-credential-provider%0A%20%20%20%20apiVersion%3A%20credentialprovider.kubelet.k8s.io%2Fv1%0A%20%20%20%20defaultCacheDuration%3A%20%2210m%22%0A%20%20%20%20matchImages%3A%0A%20%20%20%20%20%20-%20%22*.azurecr.io%22%0A%20%20%20%20%20%20-%20%22*.azurecr.cn%22%0A%20%20%20%20%20%20-%20%22*.azurecr.de%22%0A%20%20%20%20%20%20-%20%22*.azurecr.us%22%0A%20%20%20%20args%3A%0A%20%20%20%20%20%20-%20%2Fetc%2Fkubernetes%2Fcloud.conf%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/credential-providers/acr-credential-provider.yaml" + }, + { + "contents": { + "source": "data:,%23%20Proxy%20environment%20variables%20will%20be%20populated%20in%20this%20file.%20Properly%0A%23%20url%20encoded%20passwords%20with%20special%20characters%20will%20use%20'%25%3CHEX%3E%3CHEX%3E'.%0A%23%20Systemd%20requires%20that%20any%20%25%20used%20in%20a%20password%20be%20represented%20as%0A%23%20%25%25%20in%20a%20unit%20file%20since%20%25%20is%20a%20prefix%20for%20macros%3B%20this%20restriction%20does%20not%0A%23%20apply%20for%20environment%20files.%20Templates%20that%20need%20the%20proxy%20set%20should%20use%0A%23%20'EnvironmentFile%3D%2Fetc%2Fmco%2Fproxy.env'.%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/mco/proxy.env" + }, + { + "contents": { + "source": "data:,%5BManager%5D%0ADefaultEnvironment%3DGODEBUG%3Dx509ignoreCN%3D0%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system.conf.d/10-default-env-godebug.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Workaround%3A%0A%23%20https%3A%2F%2Fissues.redhat.com%2Fbrowse%2FOCPBUGS-38779%0A%0Adriver%3D%24(nmcli%20-t%20-m%20tabular%20-f%20general.driver%20dev%20show%20%22%24%7BDEVICE_IFACE%7D%22)%0A%0Aif%20%5B%5B%20%22%242%22%20%3D%3D%20%22up%22%20%26%26%20%22%24%7Bdriver%7D%22%20%3D%3D%20%22idpf%22%20%5D%5D%3B%20then%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-checksumming%20off%0Afi" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-gcp-disable-idpf-tx-checksum-off" + }, + { + "contents": { + "source": "data:,%23%20Force-load%20legacy%20iptables%20so%20it%20is%20usable%20from%20pod%20network%20namespaces%0Aip_tables%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/modules-load.d/iptables.conf" + }, + { + "contents": { + "source": "data:,NODE_SIZING_ENABLED%3Dfalse%0ASYSTEM_RESERVED_MEMORY%3D1Gi%0ASYSTEM_RESERVED_CPU%3D500m%0ASYSTEM_RESERVED_ES%3D1Gi" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/node-sizing-enabled.env" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-e%0ANODE_SIZES_ENV%3D%24%7BNODE_SIZES_ENV%3A-%2Fetc%2Fnode-sizing.env%7D%0AVERSION_1%3D1%0AVERSION_2%3D2%0ANODE_AUTO_SIZING_VERSION%3D%24%7BNODE_AUTO_SIZING_VERSION%3A-%24VERSION_2%7D%0ANODE_AUTO_SIZING_VERSION_FILE%3D%24%7BNODE_AUTO_SIZING_VERSION_FILE%3A-%2Fetc%2Fnode-sizing-version.json%7D%0Afunction%20dynamic_memory_sizing%20%7B%0A%20%20%20%20total_memory%3D%24(free%20-g%7Cawk%20'%2F%5EMem%3A%2F%7Bprint%20%242%7D')%0A%20%20%20%20%23%20total_memory%3D8%20test%20the%20recommended%20values%20by%20modifying%20this%20value%0A%20%20%20%20recommended_systemreserved_memory%3D0%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%204))%3B%20then%20%23%2025%25%20of%20the%20first%204GB%20of%20memory%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24total_memory%200.25%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D1%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-4))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%204))%3B%20then%20%23%2020%25%20of%20the%20next%204GB%20of%20memory%20(up%20to%208GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.20%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%200.80%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-4))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%208))%3B%20then%20%23%2010%25%20of%20the%20next%208GB%20of%20memory%20(up%20to%2016GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.10%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%200.80%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-8))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3C%3D%20112))%3B%20then%20%23%206%25%20of%20the%20next%20112GB%20of%20memory%20(up%20to%20128GB)%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.06%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D0%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%206.72%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20total_memory%3D%24((total_memory-112))%0A%20%20%20%20fi%0A%20%20%20%20if%20((%24total_memory%20%3E%3D%200))%3B%20then%20%23%202%25%20of%20any%20memory%20above%20128GB%0A%20%20%20%20%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%24(echo%20%24total_memory%200.02%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20fi%0A%20%20%20%20recommended_systemreserved_memory%3D%24(echo%20%24recommended_systemreserved_memory%20%7C%20awk%20'%7Bprintf(%22%25d%5Cn%22%2C%241%20%2B%200.5)%7D')%20%23%20Round%20off%20so%20we%20avoid%20float%20conversions%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_MEMORY%3D%24%7Brecommended_systemreserved_memory%7DGi%22%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_cpu_sizing%20%7B%0A%20%20%20%20total_cpu%3D%24(getconf%20_NPROCESSORS_ONLN)%0A%20%20%20%20if%20%5B%20%22%241%22%20-eq%20%22%24VERSION_1%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D0%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%201))%3B%20then%20%23%206%25%20of%20the%20first%20core%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24total_cpu%200.06%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D0.06%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-1))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%201))%3B%20then%20%23%201%25%20of%20the%20next%20core%20(up%20to%202%20cores)%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-1))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3C%3D%202))%3B%20then%20%23%200.5%25%20of%20the%20next%202%20cores%20(up%20to%204%20cores)%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.005%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D0%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%200.01%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20%20%20%20%20total_cpu%3D%24((total_cpu-2))%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20%20%20if%20((%24total_cpu%20%3E%3D%200))%3B%20then%20%23%200.25%25%20of%20any%20cores%20above%204%20cores%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(echo%20%24recommended_systemreserved_cpu%20%24(echo%20%24total_cpu%200.0025%20%7C%20awk%20'%7Bprint%20%241%20*%20%242%7D')%20%7C%20awk%20'%7Bprint%20%241%20%2B%20%242%7D')%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20else%0A%20%20%20%20%20%20%20%20%23%20Base%20allocation%20for%201%20CPU%20in%20fractions%20of%20a%20core%20(60%20millicores%20%3D%200.06%20CPU%20core)%0A%20%20%20%20%20%20%20%20base_allocation_fraction%3D0.06%0A%20%20%20%20%20%20%20%20%23%20Increment%20per%20additional%20CPU%20in%20fractions%20of%20a%20core%20(12%20millicores%20%3D%200.012%20CPU%20core)%0A%20%20%20%20%20%20%20%20increment_per_cpu_fraction%3D0.012%0A%20%20%20%20%20%20%20%20if%20((total_cpu%20%3E%201))%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20Calculate%20the%20total%20system-reserved%20CPU%20in%20fractions%2C%20starting%20with%20the%20base%20allocation%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20and%20adding%20the%20incremental%20fraction%20for%20each%20additional%20CPU%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24(awk%20-v%20base%3D%22%24base_allocation_fraction%22%20-v%20increment%3D%22%24increment_per_cpu_fraction%22%20-v%20cpus%3D%22%24total_cpu%22%20'BEGIN%20%7Bprintf%20%22%25.2f%5Cn%22%2C%20base%20%2B%20increment%20*%20(cpus%20-%201)%7D')%0A%20%20%20%20%20%20%20%20else%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20For%20a%20single%20CPU%2C%20use%20the%20base%20allocation%0A%20%20%20%20%20%20%20%20%20%20%20%20recommended_systemreserved_cpu%3D%24base_allocation_fraction%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Enforce%20minimum%20threshold%20of%200.5%20CPU%0A%20%20%20%20recommended_systemreserved_cpu%3D%24(awk%20-v%20val%3D%22%24recommended_systemreserved_cpu%22%20'BEGIN%20%7Bif%20(val%20%3C%200.5)%20print%200.5%3B%20else%20print%20val%7D')%0A%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_CPU%3D%24%7Brecommended_systemreserved_cpu%7D%22%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_ephemeral_sizing%20%7B%0A%20%20%20%20echo%20%22Not%20implemented%20yet%22%0A%7D%0Afunction%20dynamic_pid_sizing%20%7B%0A%20%20%20%20echo%20%22Not%20implemented%20yet%22%0A%7D%0Afunction%20set_memory%20%7B%0A%20%20%20%20SYSTEM_RESERVED_MEMORY%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_MEMORY%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_MEMORY%3D%221Gi%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_MEMORY%3D%24%7BSYSTEM_RESERVED_MEMORY%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20set_cpu%20%7B%0A%20%20%20%20SYSTEM_RESERVED_CPU%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_CPU%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_CPU%3D%22500m%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_CPU%3D%24%7BSYSTEM_RESERVED_CPU%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20set_es%20%7B%0A%20%20%20%20SYSTEM_RESERVED_ES%3D%241%0A%20%20%20%20if%20%5B%20-z%20%22%24%7BSYSTEM_RESERVED_ES%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20SYSTEM_RESERVED_ES%3D%221Gi%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22SYSTEM_RESERVED_ES%3D%24%7BSYSTEM_RESERVED_ES%7D%22%20%3E%3E%20%24%7BNODE_SIZES_ENV%7D%0A%7D%0Afunction%20dynamic_node_sizing%20%7B%0A%20%20%20%20rm%20-f%20%24%7BNODE_SIZES_ENV%7D%0A%20%20%20%20dynamic_memory_sizing%0A%20%20%20%20dynamic_cpu_sizing%20%241%0A%20%20%20%20set_es%20%242%0A%20%20%20%20%23dynamic_ephemeral_sizing%0A%20%20%20%20%23dynamic_pid_sizing%0A%7D%0Afunction%20static_node_sizing%20%7B%0A%20%20%20%20rm%20-f%20%24%7BNODE_SIZES_ENV%7D%0A%20%20%20%20set_memory%20%241%0A%20%20%20%20set_cpu%20%242%0A%20%20%20%20set_es%20%243%0A%7D%0Afunction%20create_version_file%20%7B%0A%20%20%20%20echo%20%22%7B%5C%22version%5C%22%3A%20%241%7D%22%20%3E%20%242%0A%7D%0Aif%20!%20%5B%20-f%20%24NODE_AUTO_SIZING_VERSION_FILE%20%5D%3B%20then%0A%20%20%20%20create_version_file%20%24NODE_AUTO_SIZING_VERSION%20%24NODE_AUTO_SIZING_VERSION_FILE%0Afi%0Anew_version%3D%24(jq%20.version%20%24NODE_AUTO_SIZING_VERSION_FILE)%0Aif%20%5B%20%241%20%3D%3D%20%22true%22%20%5D%3B%20then%0A%20%20%20%20dynamic_node_sizing%20%24new_version%20%244%0Aelif%20%5B%20%241%20%3D%3D%20%22false%22%20%5D%3B%20then%0A%20%20%20%20static_node_sizing%20%242%20%243%20%244%0Aelse%0A%20%20%20%20echo%20%22Unrecognized%20command%20line%20option.%20Valid%20options%20are%20%5C%22true%5C%22%20or%20%5C%22false%5C%22%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/sbin/dynamic-system-reserved-calc.sh" + }, + { + "contents": { + "source": "data:,%23%20Turning%20on%20Accounting%20helps%20track%20down%20performance%20issues.%0A%5BManager%5D%0ADefaultCPUAccounting%3Dyes%0ADefaultMemoryAccounting%3Dyes%0ADefaultBlockIOAccounting%3Dyes%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system.conf.d/kubelet-cgroups.conf" + }, + { + "contents": { + "source": "data:,%5BService%5D%0AEnvironment%3D%22KUBELET_LOG_LEVEL%3D2%22%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/systemd/system/kubelet.service.d/20-logging.conf" + }, + { + "contents": { + "source": "data:,%23%20ignore%20known%20SDN-managed%20devices%0A%5Bdevice%5D%0Amatch-device%3Dinterface-name%3Abr-int%3Binterface-name%3Abr-local%3Binterface-name%3Abr-nexthop%3Binterface-name%3Aovn-k8s-*%3Binterface-name%3Ak8s-*%3Binterface-name%3Atun0%3Binterface-name%3Abr0%3Binterface-name%3Apatch-br-*%3Binterface-name%3Abr-ext%3Binterface-name%3Aext-vxlan%3Binterface-name%3Aext%3Binterface-name%3Aint%3Binterface-name%3Avxlan_sys_*%3Binterface-name%3Agenev_sys_*%3Bdriver%3Aveth%0Amanaged%3D0%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/NetworkManager/conf.d/sdn.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-eux%0A%0A%23%20Clean%20up%20old%20config%20on%20behalf%20of%20mtu-migration%0Aif%20!%20systemctl%20-q%20is-enabled%20mtu-migration%3B%20then%0A%20%20echo%20%22Cleaning%20up%20left%20over%20mtu%20migration%20configuration%22%0A%20%20rm%20-rf%20%2Fetc%2Fcno%2Fmtu-migration%0Afi%0A%0Aif%20%5B%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20echo%20%22Configuration%20already%20applied%2C%20exiting%22%0A%20%20exit%200%0Afi%0A%0Asrc_path%3D%22%2Fetc%2Fnmstate%2Fopenshift%22%0Adst_path%3D%22%2Fetc%2Fnmstate%22%0Ahostname%3D%24(hostname%20-s)%0Ahost_file%3D%22%24%7Bhostname%7D.yml%22%0Acluster_file%3D%22cluster.yml%22%0Aconfig_file%3D%22%22%0Aif%20%5B%20-s%20%22%24src_path%2F%24host_file%22%20%5D%3B%20then%0A%20%20config_file%3D%24host_file%0Aelif%20%5B%20-s%20%22%24src_path%2F%24cluster_file%22%20%5D%3B%20then%0A%20%20config_file%3D%24cluster_file%0Aelse%0A%20%20echo%20%22No%20configuration%20found%20at%20%24src_path%2F%24host_file%20or%20%24src_path%2F%24cluster_file%22%0A%20%20exit%200%0Afi%0A%0Aif%20%5B%20-e%20%22%24dst_path%2F%24config_file%22%20%5D%3B%20then%0A%20%20echo%20%22ERROR%3A%20File%20%24dst_path%2F%24config_file%20exists.%20Refusing%20to%20overwrite.%22%0A%20%20exit%201%0Afi%0A%0A%23%20Handle%20the%20case%20where%20we're%20migrating%20from%20configure-ovs%0Aovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br-ex%0A%0A%23%20Handle%20the%20case%20where%20we're%20migrating%20from%20OpenShift%20SDN%0Aovs-vsctl%20--timeout%3D30%20--if-exists%20del-br%20br0%0A%0Acp%20%22%24src_path%2F%24config_file%22%20%2Fetc%2Fnmstate%0Atouch%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/nmstate-configuration.sh" + }, + { + "contents": { + "source": "data:,%5Bservice%5D%0Akeep_state_file_after_apply%20%3D%20true%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/nmstate/nmstate.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Set%20interface%20ofport_request%20to%20guarantee%20stable%20ofport%20numbers.%20This%20is%20important%20for%20flow%20matches.%0A%23%20Otherwise%2C%20another%20ofport%20number%20is%20assigned%20to%20the%20interface%20on%20every%20restart%20of%20NetworkManager.%0A%23%20This%20script%20will%20build%20an%20associative%20array%20INTERFACE_NAME-%3Eofport_request%20and%20will%20save%20it%20to%20file%20CONFIGURATION_FILE.%0A%23%20When%20an%20interface%20is%20brought%20up%2C%20this%20will%20reuse%20the%20value%20from%20the%20associative%20array%20if%20such%20a%20value%20exists.%0A%23%20Otherwise%2C%20this%20will%20try%20to%20use%20the%20current%20ofport%20value.%20If%20the%20ofport%20value%20is%20already%20reserved%2C%20then%0A%23%20this%20uses%20the%20lowest%20available%20numerical%20value%2C%20instead.%0Aset%20-eux%20-o%20pipefail%0Aif%20%5B%5B%20%22OVNKubernetes%22%20!%3D%20%22OVNKubernetes%22%20%5D%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0AINTERFACE_NAME%3D%241%0AOPERATION%3D%242%0A%0A%23%20Only%20execute%20this%20on%20pre-up%0Aif%20%5B%20%22%24%7BOPERATION%7D%22%20!%3D%20%22pre-up%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20NM%20uuid%0AINTERFACE_CONNECTION_UUID%3D%24(nmcli%20-t%20-f%20device%2Ctype%2Cuuid%20conn%20%7C%20awk%20-F%20'%3A'%20'%7Bif(%241%3D%3D%22'%24%7BINTERFACE_NAME%7D'%22%20%26%26%20%242!~%2F%5Eovs*%2F)%20print%20%24NF%7D')%0Aif%20%5B%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20slave-type.%20If%20this%20is%20not%20an%20ovs-port%2C%20then%20exit%0AINTERFACE_OVS_SLAVE_TYPE%3D%24(nmcli%20-t%20-f%20connection.slave-type%20conn%20show%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BINTERFACE_OVS_SLAVE_TYPE%7D%22%20!%3D%20%22ovs-port%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20This%20is%20not%20necessarily%20a%20UUID%20(can%20be%20a%20name%20in%20case%20of%20bonds)%20but%20this%20should%20be%20unique%0APORT%3D%24(nmcli%20-t%20-f%20connection.master%20conn%20show%20%22%24%7BINTERFACE_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20interface's%20NM%20uuid%0APORT_CONNECTION_UUID%3D%24(nmcli%20-t%20-f%20device%2Ctype%2Cuuid%20conn%20%7C%20awk%20-F%20'%3A'%20'%7Bif(%20(%241%3D%3D%22'%24%7BPORT%7D'%22%20%7C%7C%20%243%3D%3D%22'%24%7BPORT%7D'%22)%20%26%26%20%242~%2F%5Eovs*%2F)%20print%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20port's%20slave-type.%20If%20this%20is%20not%20an%20ovs-bridge%2C%20then%20exit%0APORT_OVS_SLAVE_TYPE%3D%24(nmcli%20-t%20-f%20connection.slave-type%20conn%20show%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BPORT_OVS_SLAVE_TYPE%7D%22%20!%3D%20%22ovs-bridge%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20port's%20master.%20If%20it%20doesn't%20have%20any%2C%20assume%20it's%20not%20our%20bridge%0ABRIDGE_ID%3D%24(nmcli%20-t%20-f%20connection.master%20conn%20show%20%22%24%7BPORT_CONNECTION_UUID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0Aif%20%5B%20%22%24%7BBRIDGE_ID%7D%22%20%3D%3D%20%22%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Get%20the%20bridge%20name%0ABRIDGE_NAME%3D%24(nmcli%20-t%20-f%20connection.interface-name%20conn%20show%20%22%24%7BBRIDGE_ID%7D%22%20%7C%20awk%20-F%20'%3A'%20'%7Bprint%20%24NF%7D')%0A%23%20Limit%20this%20to%20br-ex%20and%20br-ex1%20only.%20If%20one%20wanted%20to%20enable%20this%20for%20all%20OVS%20bridges%2C%0A%23%20the%20condition%20would%20be%3A%20if%20%5B%20%22%24BRIDGE_NAME%22%20%3D%3D%20%22%22%20%5D%3B%20then%0Aif%20%5B%20%22%24%7BBRIDGE_NAME%7D%22%20!%3D%20%22br-ex%22%20%5D%20%26%26%20%5B%20%22%24%7BBRIDGE_NAME%7D%22%20!%3D%20%22br-ex1%22%20%5D%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0A%23%20Make%20sure%20that%20the%20interface%20is%20plugged%20into%20OVS%0A%23%20This%20should%20always%20be%20the%20case%20given%20that%20we%20are%20in%20pre-up%2C%20but%20exit%20gracefully%20in%20the%20odd%20case%20that%20it's%20not%0Aif%20!%20ovs-vsctl%20list%20interface%20%22%24%7BINTERFACE_NAME%7D%22%20%3E%2Fdev%2Fnull%202%3E%261%3B%20then%0A%20%20%20%20exit%200%0Afi%0A%0ACONFIGURATION_FILE%3D%22%2Frun%2Fofport_requests.%24%7BBRIDGE_NAME%7D%22%0A%0A%23%20Declare%20a%20new%20associative%20array.%20If%20CONFIGURATION_FILE%20exists%2C%20source%20entries%20from%20there%0Adeclare%20-A%20INTERFACES%0Aif%20%5B%20-f%20%22%24%7BCONFIGURATION_FILE%7D%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22Sourcing%20configuration%20file%20'%24%7BCONFIGURATION_FILE%7D'%20with%20contents%3A%22%0A%20%20%20%20cat%20%22%24%7BCONFIGURATION_FILE%7D%22%0A%20%20%20%20source%20%22%24%7BCONFIGURATION_FILE%7D%22%0Afi%0A%0A%23%20get_interface_ofport_request%20will%20return%0A%23%20*%20either%3A%20the%20current%20ofport%20assignment%20for%20the%20port%20if%20no%20interface%20has%20claimed%20this%20ofport%20number%2C%20yet%0A%23%20*%20or%3A%20%20%20%20%20the%20lowest%20available%20free%20ofport%20number%0Afunction%20get_interface_ofport_request()%20%7B%0A%20%20%20%20%23%20Build%20an%20array%20that%20only%20contains%20the%20currently%20reserved%20ofport_requests%0A%20%20%20%20declare%20-A%20ofport_requests%0A%20%20%20%20for%20interface_name%20in%20%22%24%7B!INTERFACES%5B%40%5D%7D%22%3B%20do%0A%20%20%20%20%20%20%20%20ofport_requests%5B%24%7BINTERFACES%5B%24interface_name%5D%7D%5D%3D%24%7BINTERFACES%5B%24interface_name%5D%7D%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20Get%20the%20current%20ofport%20number%20assignment%0A%20%20%20%20local%20current_ofport%3D%24(ovs-vsctl%20get%20Interface%20%22%24%7BINTERFACE_NAME%7D%22%20ofport)%0A%0A%20%20%20%20%23%20If%20the%20current%20ofport%20number%20is%20still%20free%2C%20use%20it%0A%20%20%20%20if%20!%20%5B%20%22%24%7Bofport_requests%5B%24current_ofport%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20echo%20%24current_ofport%0A%20%20%20%20%20%20%20%20return%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20If%20the%20current%20ofport%20number%20is%20not%20free%2C%20return%20the%20lowest%20free%20entry%0A%20%20%20%20i%3D0%0A%20%20%20%20for%20i%20in%20%7B1..65000%7D%3B%20do%0A%20%20%20%20%20%20%20%20if%20!%20%5B%20%22%24%7Bofport_requests%5B%24i%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%24i%0A%20%20%20%20%20%20%20%20%20%20%20%20return%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20if%20we%20still%20cannot%20find%20an%20ID%2C%20exit%20with%20an%20error%0A%20%20%20%20echo%20%22Impossible%20to%20find%20an%20ofport%20ID%20for%20interface%20%24%7BINTERFACE_NAME%7D%22%20%3E%262%0A%20%20%20%20exit%201%0A%7D%0A%0A%23%20If%20INTERFACES%5BINTERFACE_NAME%5D%20exists%2C%20use%20that%20value%0A%23%20If%20INTERFACES%5BINTERFACE_NAME%5D%20does%20not%20exists%2C%20use%20the%20value%20from%20get_interface_ofport_request%0Aif%20!%20%5B%20%22%24%7BINTERFACES%5B%24INTERFACE_NAME%5D%2Ba%7D%22%20%5D%3B%20then%0A%20%20%20%20INTERFACES%5B%24INTERFACE_NAME%5D%3D%24(get_interface_ofport_request)%0Afi%0A%23%20Set%20ofport_request%20according%20to%20INTERFACES%5BINTERFACE_NAME%5D%0Aovs-vsctl%20set%20Interface%20%22%24%7BINTERFACE_NAME%7D%22%20ofport_request%3D%24%7BINTERFACES%5B%24INTERFACE_NAME%5D%7D%0A%0A%23%20Save%20current%20state%20of%20INTERFACES%20to%20CONFIGURATION_FILE%0Adeclare%20-p%20INTERFACES%20%3E%7C%20%22%24%7BCONFIGURATION_FILE%7D%22%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/pre-up.d/10-ofport-request.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0A%23%20Extract%20Podman%20version%20and%20determine%20the%20signature%20policy%0A%2Fusr%2Fbin%2Fpodman%20-v%20%7C%20%2Fbin%2Fawk%20'%7B%0A%20%20%20%20split(%243%2C%20version%2C%20%22-%22)%3B%0A%20%20%20%20clean_version%20%3D%20version%5B1%5D%3B%0A%0A%20%20%20%20split(clean_version%2C%20current%2C%20%2F%5C.%2F)%3B%0A%20%20%20%20split(%224.4.1%22%2C%20target%2C%20%2F%5C.%2F)%3B%0A%0A%20%20%20%20for%20(i%20%3D%201%3B%20i%20%3C%3D%203%3B%20i%2B%2B)%20%7B%0A%20%20%20%20%20%20%20%20if%20((current%5Bi%5D%20%2B%200)%20%3C%20(target%5Bi%5D%20%2B%200))%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20print%20%22--signature-policy%20%2Fetc%2Fmachine-config-daemon%2Fpolicy-for-old-podman.json%22%3B%0A%20%20%20%20%20%20%20%20%20%20%20%20exit%3B%0A%20%20%20%20%20%20%20%20%7D%20else%20if%20((current%5Bi%5D%20%2B%200)%20%3E%20(target%5Bi%5D%20%2B%200))%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20exit%3B%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%7D%0A%7D'%20%3E%20%2Ftmp%2Fpodman_policy_args%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/etc/machine-config-daemon/generate_podman_policy_args.sh" + }, + { + "contents": { + "source": "data:,%7B%22auths%22%3A%7B%22cloud.openshift.com%22%3A%7B%22auth%22%3A%22b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfNWZlN2RlZTExNTdlNDA2Nzk2NTY3Y2VmMThmNjFjOTM6V0NXOEJLNkxXT1NLMElUSldIMlRXQUhaQ1k4SkJNMUtONVlCUVpNT1Q3UjdQMU9HRk05VTFJQ1VET0FMQkZIWQ%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22quay.io%22%3A%7B%22auth%22%3A%22b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfNWZlN2RlZTExNTdlNDA2Nzk2NTY3Y2VmMThmNjFjOTM6V0NXOEJLNkxXT1NLMElUSldIMlRXQUhaQ1k4SkJNMUtONVlCUVpNT1Q3UjdQMU9HRk05VTFJQ1VET0FMQkZIWQ%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22registry.ci.openshift.org%22%3A%7B%22auth%22%3A%22XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX%22%7D%2C%22registry.connect.redhat.com%22%3A%7B%22auth%22%3A%22fHVoYy1wb29sLWQ3OWIxMzA2LWI4NzItNDBkNy1iZDdmLTU1ZWI2M2EwMjMwOTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTJNelkzWkdaaE5EbGhaV1UwT1RkbU9XVXlNalUyWWpsaE9ESXlNMk0zTWlKOS5oQ0Z3YnhiOExhMDNkSnF4V09NMjFhOFk2MmpYLTRVZDgtR0dWMG1RZlRLc2NBbEpfLThIekZ6WnZSMWN0Uzd1RDlLeEFRbUt2WE11SUx4MDVybmZrQ1c4S3FSbHhBdm5YUk51QTVCZ1FWVjlPRWd5OXpzZTVpMURnXzUwb2N4bHBUcTJYY20yVW00U09VX1hBNThwMXJaa3QtZ19wMXd1X3ItSzFjNzR1dlBlUmJPQXdubzBwZWVyX0tTQUloUklPcjRXbGYtdzhBZTM5UXA5UkQ5ZERta1F2WW1Cek94ZFJlaHhDMm0zdEZUaUc5YTRKT2NsQkVqSnpINlFoLXpZbmhxazZ2LS1yWHdpSXdlSG9PYU90WXhyTW9GN3U4UGtsVXVuVWI2S3RSbHBCVlE1MlB1MGRBVG9qUEpWMHdDdE93NGh0OU1fZi1JNDhHMm15QlAzM3BmME4xTFVmTmhKS0FkZEg0cE9WaVJmOFM1aExPYjVxemFqakhNOGRPYlBnRkN2c0lzR09kdGVaenIwZW5hWFBqOE9sYjlLQlJJTlllUHBMNmZSZGNaeXBkLVNJbzQ2TjJPbHRMLXNuVG5LX1NDMDZfMS1XNnFFdEpmZUptX2N0c2RxWnlnMWw2bVdiQWwtVExyMFl0ZTVldkNpQUpCNTYzZGxKR1ZWY2xCUGgyUkdDUW9TdjVDS3hnM3ZXX21oak42dnVtVjRCRmxLbUdUVTR2TDZ0aGFiS21XQ1puQnlwOS0tcURaMVVMSEhvb0psbG1Ebnc5S1dBRkdldV9oc3hKcDdZUXRVR193SkhZWGthdFRWTFhfMlB3OG5IbXdVSXVBaUdacks5T2xSNzBPdGpCNUdDTzIyd3oyQ0tmeUNELWNIRGFKcm1rX3NwQW9DMnoxQW94MA%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%2C%22registry.redhat.io%22%3A%7B%22auth%22%3A%22fHVoYy1wb29sLWQ3OWIxMzA2LWI4NzItNDBkNy1iZDdmLTU1ZWI2M2EwMjMwOTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTJNelkzWkdaaE5EbGhaV1UwT1RkbU9XVXlNalUyWWpsaE9ESXlNMk0zTWlKOS5oQ0Z3YnhiOExhMDNkSnF4V09NMjFhOFk2MmpYLTRVZDgtR0dWMG1RZlRLc2NBbEpfLThIekZ6WnZSMWN0Uzd1RDlLeEFRbUt2WE11SUx4MDVybmZrQ1c4S3FSbHhBdm5YUk51QTVCZ1FWVjlPRWd5OXpzZTVpMURnXzUwb2N4bHBUcTJYY20yVW00U09VX1hBNThwMXJaa3QtZ19wMXd1X3ItSzFjNzR1dlBlUmJPQXdubzBwZWVyX0tTQUloUklPcjRXbGYtdzhBZTM5UXA5UkQ5ZERta1F2WW1Cek94ZFJlaHhDMm0zdEZUaUc5YTRKT2NsQkVqSnpINlFoLXpZbmhxazZ2LS1yWHdpSXdlSG9PYU90WXhyTW9GN3U4UGtsVXVuVWI2S3RSbHBCVlE1MlB1MGRBVG9qUEpWMHdDdE93NGh0OU1fZi1JNDhHMm15QlAzM3BmME4xTFVmTmhKS0FkZEg0cE9WaVJmOFM1aExPYjVxemFqakhNOGRPYlBnRkN2c0lzR09kdGVaenIwZW5hWFBqOE9sYjlLQlJJTlllUHBMNmZSZGNaeXBkLVNJbzQ2TjJPbHRMLXNuVG5LX1NDMDZfMS1XNnFFdEpmZUptX2N0c2RxWnlnMWw2bVdiQWwtVExyMFl0ZTVldkNpQUpCNTYzZGxKR1ZWY2xCUGgyUkdDUW9TdjVDS3hnM3ZXX21oak42dnVtVjRCRmxLbUdUVTR2TDZ0aGFiS21XQ1puQnlwOS0tcURaMVVMSEhvb0psbG1Ebnc5S1dBRkdldV9oc3hKcDdZUXRVR193SkhZWGthdFRWTFhfMlB3OG5IbXdVSXVBaUdacks5T2xSNzBPdGpCNUdDTzIyd3oyQ0tmeUNELWNIRGFKcm1rX3NwQW9DMnoxQW94MA%3D%3D%22%2C%22email%22%3A%22aos-team-dp-testplatform%40redhat.com%22%7D%7D%7D%0A" + }, + "mode": 384, + "overwrite": true, + "path": "/var/lib/kubelet/config.json" + }, + { + "contents": { + "source": "data:,%23%20Needed%20by%20the%20OpenShift%20SDN.%20See%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1758552%0Anet.ipv4.conf.all.arp_announce%20%3D%202%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/arp.conf" + }, + { + "contents": { + "source": "data:,%23%20See%3A%20rhbz%231384746%2C%20OCPBUGS-24012%0Anet.ipv4.neigh.default.gc_thresh1%3D8192%0Anet.ipv4.neigh.default.gc_thresh2%3D32768%0Anet.ipv4.neigh.default.gc_thresh3%3D65536%0Anet.ipv6.neigh.default.gc_thresh1%3D8192%0Anet.ipv6.neigh.default.gc_thresh2%3D32768%0Anet.ipv6.neigh.default.gc_thresh3%3D65536%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/gc-thresh.conf" + }, + { + "contents": { + "source": "data:,%0Afs.inotify.max_user_watches%20%3D%2065536%0Afs.inotify.max_user_instances%20%3D%208192%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/inotify.conf" + }, + { + "contents": { + "source": "data:,vm.unprivileged_userfaultfd%20%3D%201" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/enable-userfaultfd.conf" + }, + { + "contents": { + "source": "data:,%23%20Needed%20for%20OpenShift%20Logging%20(ElasticSearch).%20See%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1793714%0Avm.max_map_count%20%3D%20262144%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/sysctl.d/vm-max-map.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-euo%20pipefail%0A%0A%23%20First%2C%20we%20need%20to%20wait%20until%20DHCP%20finishes%20and%20the%20node%20has%20a%20non-%60localhost%60%0A%23%20hostname%20before%20%60kubelet.service%60%20starts.%0A%23%20That's%20the%20%60--wait%60%20argument%20as%20used%20by%20%60node-valid-hostname.service%60.%0A%23%0A%23%20Second%2C%20on%20GCP%20specifically%20we%20truncate%20the%20hostname%20if%20it's%20%3E63%20characters.%0A%23%20That's%20%60gcp-hostname.service%60.%0A%0A%23%20Block%20indefinitely%20until%20the%20host%20gets%20a%20non-localhost%20name.%0A%23%20Note%20node-valid-hostname.service%20uses%20systemd%20to%20abort%20if%20this%20takes%20too%20long.%0Await_localhost()%20%7B%0A%20%20%20%20echo%20%22waiting%20for%20non-localhost%20hostname%20to%20be%20assigned%22%0A%20%20%20%20while%20%5B%5B%20%22%24(%3C%20%2Fproc%2Fsys%2Fkernel%2Fhostname)%22%20%3D~%20(localhost%7Clocalhost.localdomain)%20%5D%5D%3B%0A%20%20%20%20do%0A%20%20%20%20%20%20%20%20sleep%201%0A%20%20%20%20done%0A%0A%20%20%20%20%23%20Some%20cloud%20platforms%20may%20assign%20a%20hostname%20with%20a%20trailing%20dot.%0A%20%20%20%20%23%20However%2C%20tools%20like%20%60hostnamectl%60%20(used%20by%20systemd)%20do%20not%20allow%20trailing%20dots%2C%0A%20%20%20%20%23%20so%20we%20strip%20the%20trailing%20dot%20before%20applying%20the%20hostname.%0A%20%20%20%20HOSTNAME%3D%22%24(%3C%2Fproc%2Fsys%2Fkernel%2Fhostname)%22%0A%20%20%20%20CLEAN_HOSTNAME%3D%22%24%7BHOSTNAME%25.%7D%22%20%0A%20%20%20%20echo%20%22node%20identified%20as%20%24CLEAN_HOSTNAME%22%0A%20%20%20%20echo%20%22saving%20hostname%20to%20prevent%20NetworkManager%20from%20ever%20unsetting%20it%22%0A%20%20%20%20hostnamectl%20set-hostname%20--static%20--transient%20%22%24CLEAN_HOSTNAME%22%0A%20%20%20%20exit%200%0A%7D%0A%0Aset_gcp_hostname()%20%7B%0A%20%20%20%20%2Fusr%2Fbin%2Fafterburn%20--provider%20gcp%20--hostname%3D%2Frun%2Fafterburn.hostname%0A%0A%20%20%20%20local%20host_name%3D%24(cat%20%2Frun%2Fafterburn.hostname)%0A%20%20%20%20local%20type_arg%3D%22transient%22%0A%0A%20%20%20%20%23%20%2Fetc%2Fhostname%20is%20used%20for%20static%20hostnames%20and%20is%20authoritative.%0A%20%20%20%20%23%20This%20will%20check%20to%20make%20sure%20that%20the%20static%20hostname%20is%20the%0A%20%20%20%20%23%20less%20than%20or%20equal%20to%2063%20characters%20in%20length.%0A%20%20%20%20if%20%5B%20-f%20%2Fetc%2Fhostname%20%5D%20%26%26%20%5B%20%22%24(cat%20%2Fetc%2Fhostname%20%7C%20wc%20-m)%22%20-gt%200%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20etc_name%3D%22%24(%3C%20%2Fetc%2Fhostname)%22%0A%20%20%20%20%20%20%20%20type_arg%3D%22static%22%0A%20%20%20%20%20%20%20%20if%20%5B%20%22%24%7Betc_name%7D%22%20!%3D%20%22%24%7Bhost_name%7D%22%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22%2Fetc%2Fhostname%20is%20set%20to%20%24%7Betc_name%7D%20but%20does%20not%20match%20%24%7Bhost_name%7D%22%0A%20%20%20%20%20%20%20%20%20%20%20%20echo%20%22using%20%2Fetc%2Fhostname%20as%20the%20authoritative%20name%22%0A%20%20%20%20%20%20%20%20%20%20%20%20host_name%3D%22%24%7Betc_name%7D%22%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20fi%0A%0A%20%20%20%20%23%20Only%20mutate%20the%20hostname%20if%20the%20length%20is%20longer%20than%2063%20characters.%20The%0A%20%20%20%20%23%20hostname%20will%20be%20the%20lesser%20of%2063%20characters%20after%20the%20first%20dot%20in%20the%0A%20%20%20%20%23%20FQDN.%20%20This%20algorithm%20is%20only%20known%20to%20work%20in%20GCP%2C%20and%20hence%20is%20only%0A%20%20%20%20%23%20executed%20in%20GCP.%0A%20%20%20%20if%20%5B%20%22%24%7B%23host_name%7D%22%20-gt%2063%20%5D%3B%20then%0A%20%20%20%20%20%20%20%20alt_name%3D%24(printf%20%22%24%7Bhost_name%7D%22%20%7C%20cut%20-f1%20-d'.'%20%7C%20cut%20-c%20-63)%0A%20%20%20%20%20%20%20%20echo%20%22%24%7Bhost_name%7D%20is%20longer%20than%2063%20characters%2C%20using%20truncated%20hostname%22%0A%20%20%20%20%20%20%20%20host_name%3D%22%24%7Balt_name%7D%22%0A%20%20%20%20fi%0A%20%20%20%20echo%20%22setting%20%24%7Btype_arg%7D%20hostname%20to%20%24%7Bhost_name%7D%22%0A%20%20%20%20%2Fbin%2Fhostnamectl%20%22--%24%7Btype_arg%7D%22%20set-hostname%20%22%24%7Bhost_name%7D%22%0A%20%20%20%20exit%200%0A%7D%0A%0Aset_openstack_hostname()%20%7B%0A%20%20%20%20%23%20Read%20metadata%20written%20by%20afterburn%20service%0A%20%20%20%20.%20%2Frun%2Fmetadata%2Fafterburn%0A%0A%20%20%20%20%23%20node-valid-hostname%20sets%20persistent%20hostname%20from%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%20%20%20%20echo%20%22%24AFTERBURN_OPENSTACK_HOSTNAME%22%20%3E%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%7D%0A%0Aset_powervs_hostname()%20%7B%0A%20%20%20%20%23%20Read%20metadata%20written%20by%20afterburn%20service%0A%20%20%20%20.%20%2Frun%2Fmetadata%2Fafterburn%0A%0A%20%20%20%20%23%20node-valid-hostname%20sets%20persistent%20hostname%20from%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%20%20%20%20echo%20%22%24AFTERBURN_POWERVS_LOCAL_HOSTNAME%22%20%3E%20%2Fproc%2Fsys%2Fkernel%2Fhostname%0A%7D%0A%0Aarg%3D%24%7B1%7D%3B%20shift%3B%0Acase%20%22%24%7Barg%7D%22%20in%0A%20%20%20%20--wait)%20wait_localhost%3B%3B%0A%20%20%20%20--gcp)%20set_gcp_hostname%3B%3B%0A%20%20%20%20--openstack)%20set_openstack_hostname%3B%3B%0A%20%20%20%20--powervs)%20set_powervs_hostname%3B%3B%0A%20%20%20%20*)%20echo%20%22Unhandled%20arg%20%24arg%22%3B%20exit%201%0Aesac%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/mco-hostname" + }, + { + "contents": { + "source": "data:," + }, + "mode": 493, + "overwrite": true, + "path": "/etc/kubernetes/kubelet-plugins/volume/exec/.dummy" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%23%20Workaround%3A%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1941714%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1935539%0A%23%20https%3A%2F%2Fbugzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D1987108%0A%0Adriver%3D%24(nmcli%20-t%20-m%20tabular%20-f%20general.driver%20dev%20show%20%22%24%7BDEVICE_IFACE%7D%22)%0A%0Aif%20%5B%5B%20%22%242%22%20%3D%3D%20%22up%22%20%26%26%20%22%24%7Bdriver%7D%22%20%3D%3D%20%22vmxnet3%22%20%5D%5D%3B%20then%0A%20%20logger%20-s%20%2299-vsphere-disable-tx-udp-tnl%20triggered%20by%20%24%7B2%7D%20on%20device%20%24%7BDEVICE_IFACE%7D.%22%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-udp_tnl-segmentation%20off%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-udp_tnl-csum-segmentation%20off%0A%20%20ethtool%20-K%20%24%7BDEVICE_IFACE%7D%20tx-checksum-ip-generic%20off%0Afi%0A" + }, + "mode": 484, + "overwrite": true, + "path": "/etc/NetworkManager/dispatcher.d/99-vsphere-disable-tx-udp-tnl" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-x%0A%0Aif%20%5B%20!%20-e%20%22%2Fetc%2Fipsec.d%2Fopenshift.conf%22%20%5D%3B%20then%0A%20%20exit%200%0Afi%0A%0A%23%20Modify%20existing%20IPsec%20out%20connection%20entries%20with%20%22auto%3Dstart%22%0A%23%20option%20and%20restart%20ipsec%20systemd%20service.%20This%20helps%20to%0A%23%20establish%20IKE%20SAs%20for%20the%20existing%20IPsec%20connections%20with%0A%23%20peer%20nodes.%20This%20option%20will%20be%20deleted%20from%20connections%0A%23%20once%20ovs-monitor-ipsec%20process%20spinned%20up%20on%20the%20node%20by%0A%23%20ovn-ipsec-host%20pod%2C%20but%20still%20it%20won't%20reestablish%20IKE%20SAs%0A%23%20again%20with%20peer%20nodes%2C%20so%20it%20shouldn't%20be%20a%20problem.%0A%23%20We%20are%20updating%20only%20out%20connections%20with%20%22auto%3Dstart%22%20to%0A%23%20avoid%20cross%20stream%20issue%20with%20Libreswan%205.2.%0A%23%20The%20in%20connections%20use%20default%20auto%3Droute%20parameter.%0Aif%20!%20grep%20-q%20%22auto%3Dstart%22%20%2Fetc%2Fipsec.d%2Fopenshift.conf%3B%20then%0A%20%20sed%20-i%20'%2F%5E.*conn%20ovn.*-out-1%24%2Fa%5C%20%20%20%20auto%3Dstart'%20%2Fetc%2Fipsec.d%2Fopenshift.conf%0Afi%0Achroot%20%2Fproc%2F1%2Froot%20ipsec%20restart%0A%0A%23%20Wait%20for%20upto%2060s%20to%20get%20IPsec%20SAs%20to%20establish%20with%20peer%20nodes.%0Atimeout%3D60%0Aelapsed%3D0%0Adesiredconn%3D%22%22%0Aestablishedsa%3D%22%22%0Awhile%20%5B%5B%20%24elapsed%20-lt%20%24timeout%20%5D%5D%3B%20do%0A%20%20desiredconn%3D%24(grep%20-E%20'%5E%5Cs*conn%5Cs%2B'%20%2Fetc%2Fipsec.d%2Fopenshift.conf%20%7C%20grep%20-v%20'%25default'%20%7C%20awk%20'%7Bprint%20%242%7D'%20%7C%20tr%20'%20'%20'%5Cn'%20%7C%20sort%20%7C%20tr%20'%5Cn'%20'%20')%0A%20%20establishedsa%3D%24(ipsec%20showstates%20%7C%20grep%20ESTABLISHED_CHILD_SA%20%7C%20grep%20-o%20'%22%5B%5E%22%5D*%22'%20%7C%20sed%20's%2F%22%2F%2Fg'%20%7C%20tr%20'%20'%20'%5Cn'%20%7C%20sort%20%7C%20uniq%20%7C%20tr%20'%5Cn'%20'%20')%0A%20%20if%20%5B%20%22%24desiredconn%22%20%3D%3D%20%22%24establishedsa%22%20%5D%3B%20then%0A%20%20%20%20echo%20%22IPsec%20SAs%20are%20established%20for%20desired%20connections%20after%20%24%7Belapsed%7Ds%22%0A%20%20%20%20break%0A%20%20else%0A%20%20%20%20echo%20%22IPsec%20SAs%20are%20not%20established%20yet%2C%20total%20waited%20time%20%24%7Belapsed%7Ds%22%0A%20%20%20%20sleep%202s%0A%20%20fi%0A%20%20elapsed%3D%24((elapsed%20%2B%202))%0Adone%0A%0Aif%20%5B%5B%20%24elapsed%20-ge%20%24timeout%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Timed%20out%20waiting%2C%20some%20connections%20are%20not%20established%2C%20desired%20conns%20%24desiredconn%2C%20established%20conns%20%24establishedsa%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/ipsec-connect-wait.sh" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0Aset%20-eux%0A%0Aif%20%5B%20!%20-e%20%2Fetc%2Fnmstate%2Fopenshift%2Fapplied%20%5D%3B%20then%0A%20%20%23%20No%20need%20to%20do%20this%20if%20no%20NMState%20configuration%20was%20applied%0A%20%20exit%200%0Afi%0A%0A%23%20This%20logic%20is%20borrowed%20from%20configure-ovs.sh%0A%23%20TODO%3A%20Find%20a%20platform-agnostic%20way%20to%20do%20this.%20It%20won't%20work%20on%20platforms%20where%0A%23%20nodeip-configuration%20is%20not%20used.%0Aip%3D%24(cat%20%2Frun%2Fnodeip-configuration%2Fprimary-ip)%0Aif%20%5B%5B%20%22%24%7Bip%7D%22%20%3D%3D%20%22%22%20%5D%5D%3B%20then%0A%20%20echo%20%22No%20ip%20to%20bind%20was%20found%22%0A%20%20exit%201%0Afi%0Awhile%20%3A%0Ado%0A%20%20random_port%3D%24(shuf%20-i%2050000-60000%20-n%201)%0A%20%20echo%20%22Trying%20to%20bind%20%24%7Bip%7D%20on%20port%20%24%7Brandom_port%7D%22%0A%20%20exit_code%3D%24(timeout%202s%20nc%20-l%20%22%24%7Bip%7D%22%20%24%7Brandom_port%7D%3B%20echo%20%24%3F)%0A%20%20if%20%5B%5B%20exit_code%20-eq%20124%20%5D%5D%3B%20then%0A%20%20%20%20echo%20%22Address%20bound%20successfully%22%0A%20%20%20%20exit%200%0A%20%20fi%0A%20%20sleep%2010%0Adone%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/wait-for-primary-ip.sh" + }, + { + "contents": { + "source": "data:,unqualified-search-registries%20%3D%20%5B'registry.access.redhat.com'%2C%20'docker.io'%5D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/registries.conf" + }, + { + "contents": { + "source": "data:,%5Bcrio%5D%0Ainternal_wipe%20%3D%20true%0Ainternal_repair%20%3D%20true%0A%0A%5Bcrio.api%5D%0Astream_address%20%3D%20%22127.0.0.1%22%0Astream_port%20%3D%20%220%22%0A%0A%5Bcrio.runtime%5D%0Aselinux%20%3D%20true%0Aconmon%20%3D%20%22%22%0Aconmon_cgroup%20%3D%20%22pod%22%0Adefault_env%20%3D%20%5B%0A%20%20%20%20%22NSS_SDB_USE_CACHE%3Dno%22%2C%0A%5D%0Alog_level%20%3D%20%22info%22%0Acgroup_manager%20%3D%20%22systemd%22%0Adefault_sysctls%20%3D%20%5B%0A%20%20%20%20%22net.ipv4.ping_group_range%3D0%202147483647%22%2C%0A%5D%0Adefault_runtime%20%3D%20%22crun%22%0Ahooks_dir%20%3D%20%5B%0A%20%20%20%20%22%2Fetc%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%20%20%20%20%22%2Frun%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%20%20%20%20%22%2Fusr%2Fshare%2Fcontainers%2Foci%2Fhooks.d%22%2C%0A%5D%0Amanage_ns_lifecycle%20%3D%20true%0Aabsent_mount_sources_to_reject%20%3D%20%5B%0A%20%20%20%20%22%2Fetc%2Fhostname%22%2C%0A%5D%0Adrop_infra_ctr%20%3D%20true%0A%0A%5Bcrio.runtime.runtimes.runc%5D%0Aruntime_root%20%3D%20%22%2Frun%2Frunc%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%20%20%22io.containers.trace-syscall%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.Devices%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.LinkLogs%22%2C%0A%5D%0A%0A%5Bcrio.runtime.runtimes.crun%5D%0Aruntime_root%20%3D%20%22%2Frun%2Fcrun%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%20%20%22io.containers.trace-syscall%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.Devices%22%2C%0A%20%20%20%20%22io.kubernetes.cri-o.LinkLogs%22%2C%0A%5D%0A%23%20Based%20on%20https%3A%2F%2Fgithub.com%2Fcontainers%2Fcrun%2Fblob%2F27d7dd3a0%2FREADME.md%3Fplain%3D1%23L48%0Acontainer_min_memory%20%3D%20%22512KiB%22%0Adefault_annotations%20%3D%20%7B%22run.oci.systemd.subgroup%22%20%3D%20%22%22%7D%0A%0A%5Bcrio.runtime.workloads.openshift-builder%5D%0Aactivation_annotation%20%3D%20%22io.openshift.builder%22%0Aallowed_annotations%20%3D%20%5B%0A%20%20%22io.kubernetes.cri-o.userns-mode%22%2C%0A%20%20%22io.kubernetes.cri-o.Devices%22%0A%5D%0A%5Bcrio.runtime.workloads.openshift-builder.resources%5D%0A%0A%5Bcrio.image%5D%0Aglobal_auth_file%20%3D%20%22%2Fvar%2Flib%2Fkubelet%2Fconfig.json%22%0Apause_image%20%3D%20%22quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3A06bad2ea56c7fcc489b9e47c86ec3dd0024994026d409073d5f77b64f1793a15%22%0Apause_image_auth_file%20%3D%20%22%2Fvar%2Flib%2Fkubelet%2Fconfig.json%22%0Apause_command%20%3D%20%22%2Fusr%2Fbin%2Fpod%22%0Aoci_artifact_mount_support%20%3D%20false%0A%0A%5Bcrio.network%5D%0Anetwork_dir%20%3D%20%22%2Fetc%2Fkubernetes%2Fcni%2Fnet.d%2F%22%0Aplugin_dirs%20%3D%20%5B%0A%20%20%20%20%22%2Fvar%2Flib%2Fcni%2Fbin%22%2C%0A%5D%0A%0A%5Bcrio.metrics%5D%0Aenable_metrics%20%3D%20true%0Ametrics_host%20%3D%20%22127.0.0.1%22%0Ametrics_port%20%3D%209537%0Ametrics_collectors%20%3D%20%5B%0A%20%20%22operations%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_total%22%0A%20%20%22operations_latency_microseconds_total%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_latency_seconds_total%22%0A%20%20%22operations_latency_microseconds%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_latency_seconds%22%0A%20%20%22operations_errors%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22operations_errors_total%22%0A%20%20%22image_pulls_layer_size%22%2C%0A%20%20%22containers_oom_total%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22containers_oom_count_total%22%0A%20%20%22containers_oom%22%2C%0A%20%20%23%20Drop%20metrics%20with%20excessive%20label%20cardinality.%0A%20%20%23%20%22image_pulls_by_digest%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_bytes_total%22%0A%20%20%23%20%22image_pulls_by_name%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_bytes_total%22%0A%20%20%23%20%22image_pulls_by_name_skipped%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_skipped_bytes_total%22%0A%20%20%23%20%22image_pulls_failures%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_failure_total%22%0A%20%20%23%20%22image_pulls_successes%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_pulls_success_total%22%0A%20%20%23%20%22image_layer_reuse%22%2C%20%23%20DEPRECATED%3A%20in%20favour%20of%20%22image_layer_reuse_total%22%0A%20%20%22operations_total%22%2C%0A%20%20%22operations_latency_seconds_total%22%2C%0A%20%20%22operations_latency_seconds%22%2C%0A%20%20%22operations_errors_total%22%2C%0A%20%20%22image_pulls_bytes_total%22%2C%0A%20%20%22image_pulls_skipped_bytes_total%22%2C%0A%20%20%22image_pulls_success_total%22%2C%0A%20%20%22image_pulls_failure_total%22%2C%0A%20%20%22image_layer_reuse_total%22%2C%0A%20%20%22containers_oom_count_total%22%2C%0A%20%20%22processes_defunct%22%0A%5D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/crio/crio.conf.d/00-default" + }, + { + "contents": { + "source": "data:,%7B%0A%20%20%20%20%22default%22%3A%20%5B%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22type%22%3A%20%22insecureAcceptAnything%22%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%5D%2C%0A%20%20%20%20%22transports%22%3A%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22docker-daemon%22%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%22%22%3A%20%5B%7B%22type%22%3A%22insecureAcceptAnything%22%7D%5D%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%20%20%7D%0A%7D%0A" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/machine-config-daemon/policy-for-old-podman.json" + }, + { + "contents": { + "source": "data:,%7B%0A%20%20%20%20%22default%22%3A%20%5B%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22type%22%3A%20%22insecureAcceptAnything%22%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%5D%2C%0A%20%20%20%20%22transports%22%3A%0A%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%22docker-daemon%22%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7B%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%22%22%3A%20%5B%7B%22type%22%3A%22insecureAcceptAnything%22%7D%5D%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%20%20%7D%0A%7D" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/containers/policy.json" + }, + { + "contents": { + "source": "data:,%7B%0A%09%22cloud%22%3A%20%22AzurePublicCloud%22%2C%0A%09%22tenantId%22%3A%20%226047c7e9-b2ad-488d-a54e-dc3f6be6a7ee%22%2C%0A%09%22aadClientId%22%3A%20%22%22%2C%0A%09%22aadClientSecret%22%3A%20%22%22%2C%0A%09%22aadClientCertPath%22%3A%20%22%22%2C%0A%09%22aadClientCertPassword%22%3A%20%22%22%2C%0A%09%22useManagedIdentityExtension%22%3A%20true%2C%0A%09%22userAssignedIdentityID%22%3A%20%22%22%2C%0A%09%22subscriptionId%22%3A%20%2272e3a972-58b0-4afc-bd4f-da89b39ccebd%22%2C%0A%09%22resourceGroup%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-rg%22%2C%0A%09%22location%22%3A%20%22centralus%22%2C%0A%09%22vnetName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-vnet%22%2C%0A%09%22vnetResourceGroup%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-rg%22%2C%0A%09%22subnetName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-worker-subnet%22%2C%0A%09%22securityGroupName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-nsg%22%2C%0A%09%22routeTableName%22%3A%20%22ci-op-pw3ghqzh-bb5c4-pdhfc-node-routetable%22%2C%0A%09%22vmType%22%3A%20%22standard%22%2C%0A%09%22loadBalancerSku%22%3A%20%22standard%22%2C%0A%09%22cloudProviderBackoff%22%3A%20true%2C%0A%09%22useInstanceMetadata%22%3A%20true%2C%0A%09%22excludeMasterFromStandardLB%22%3A%20false%2C%0A%09%22cloudProviderBackoffDuration%22%3A%206%2C%0A%09%22putVMSSVMBatchSize%22%3A%200%2C%0A%09%22enableMigrateToIPBasedBackendPoolAPI%22%3A%20false%0A%7D" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/cloud.conf" + }, + { + "contents": { + "source": "data:,authorization%3A%0A%20%20static%3A%0A%20%20%20%20-%20resourceRequest%3A%20false%0A%20%20%20%20%20%20path%3A%20%2Fmetrics%0A%20%20%20%20%20%20verb%3A%20get%0A%20%20%20%20%20%20user%3A%0A%20%20%20%20%20%20%20%20name%3A%20system%3Aserviceaccount%3Aopenshift-monitoring%3Aprometheus-k8s" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/crio-metrics-proxy.cfg" + }, + { + "contents": { + "source": "data:,apiVersion%3A%20v1%0Akind%3A%20Pod%0Ametadata%3A%0A%20%20name%3A%20kube-rbac-proxy-crio%0A%20%20namespace%3A%20openshift-machine-config-operator%0A%20%20annotations%3A%0A%20%20%20%20target.workload.openshift.io%2Fmanagement%3A%20'%7B%22effect%22%3A%20%22PreferredDuringScheduling%22%7D'%0A%20%20%20%20openshift.io%2Frequired-scc%3A%20privileged%0Aspec%3A%0A%20%20volumes%3A%0A%20%20-%20name%3A%20etc-kube%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%22%2Fetc%2Fkubernetes%22%0A%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20hostPath%3A%0A%20%20%20%20%20%20path%3A%20%22%2Fvar%2Flib%2Fkubelet%22%0A%20%20hostNetwork%3A%20true%0A%20%20priorityClassName%3A%20system-cluster-critical%0A%20%20initContainers%3A%0A%20%20-%20name%3A%20setup%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20image%3A%20quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3A30453ce537781b695dda30554a9b0288d18e29690b1e1b14f405a1876728e8a0%0A%20%20%20%20imagePullPolicy%3A%20IfNotPresent%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fvar%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20command%3A%20%5B'%2Fbin%2Fbash'%2C%20'-ec'%5D%0A%20%20%20%20args%3A%0A%20%20%20%20-%20%7C%0A%20%20%20%20%20%20echo%20-n%20%22Waiting%20for%20kubelet%20key%20and%20certificate%20to%20be%20available%22%0A%20%20%20%20%20%20while%20%5B%20-n%20%22%24(test%20-e%20%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem)%22%20%5D%20%3B%20do%0A%20%20%20%20%20%20%20%20echo%20-n%20%22.%22%0A%20%20%20%20%20%20%20%20sleep%201%0A%20%20%20%20%20%20%20%20((%20tries%20%2B%3D%201%20))%0A%20%20%20%20%20%20%20%20if%20%5B%5B%20%22%24%7Btries%7D%22%20-gt%2010%20%5D%5D%3B%20then%0A%20%20%20%20%20%20%20%20%20%20echo%20%22Timed%20out%20waiting%20for%20kubelet%20key%20and%20cert.%22%0A%20%20%20%20%20%20%20%20%20%20exit%201%0A%20%20%20%20%20%20%20%20fi%0A%20%20%20%20%20%20done%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20%20%20%20%20cpu%3A%205m%0A%20%20containers%3A%0A%20%20-%20name%3A%20kube-rbac-proxy-crio%0A%20%20%20%20image%3A%20quay.io%2Fopenshift-release-dev%2Focp-v4.0-art-dev%40sha256%3A30453ce537781b695dda30554a9b0288d18e29690b1e1b14f405a1876728e8a0%0A%20%20%20%20securityContext%3A%0A%20%20%20%20%20%20privileged%3A%20true%0A%20%20%20%20ports%3A%0A%20%20%20%20-%20containerPort%3A%209637%0A%20%20%20%20args%3A%0A%20%20%20%20-%20--secure-listen-address%3D%3A9637%0A%20%20%20%20-%20--config-file%3D%2Fetc%2Fkubernetes%2Fcrio-metrics-proxy.cfg%0A%20%20%20%20-%20--client-ca-file%3D%2Fetc%2Fkubernetes%2Fkubelet-ca.crt%0A%20%20%20%20-%20--logtostderr%3Dtrue%0A%20%20%20%20-%20--kubeconfig%3D%2Fvar%2Flib%2Fkubelet%2Fkubeconfig%0A%20%20%20%20-%20--tls-cipher-suites%3DTLS_AES_128_GCM_SHA256%2CTLS_AES_256_GCM_SHA384%2CTLS_CHACHA20_POLY1305_SHA256%2CTLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%2CTLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%2CTLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%2CTLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%2CTLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256%2CTLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256%20%0A%20%20%20%20-%20--tls-min-version%3DVersionTLS12%0A%20%20%20%20-%20--upstream%3Dhttp%3A%2F%2F127.0.0.1%3A9537%0A%20%20%20%20-%20--tls-cert-file%3D%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem%0A%20%20%20%20-%20--tls-private-key-file%3D%2Fvar%2Flib%2Fkubelet%2Fpki%2Fkubelet-server-current.pem%0A%20%20%20%20resources%3A%0A%20%20%20%20%20%20requests%3A%0A%20%20%20%20%20%20%20%20cpu%3A%2020m%0A%20%20%20%20%20%20%20%20memory%3A%2050Mi%0A%20%20%20%20terminationMessagePolicy%3A%20FallbackToLogsOnError%0A%20%20%20%20volumeMounts%3A%0A%20%20%20%20-%20name%3A%20etc-kube%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fetc%2Fkubernetes%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20%20%20readOnly%3A%20true%0A%20%20%20%20-%20name%3A%20var-lib-kubelet%0A%20%20%20%20%20%20mountPath%3A%20%22%2Fvar%2Flib%2Fkubelet%22%0A%20%20%20%20%20%20mountPropagation%3A%20HostToContainer%0A%20%20%20%20%20%20readOnly%3A%20true" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/manifests/criometricsproxy.yaml" + }, + { + "contents": { + "source": "data:text/plain;charset=utf-8;base64,YXBpVmVyc2lvbjoga3ViZWxldC5jb25maWcuazhzLmlvL3YxYmV0YTEKYXV0aGVudGljYXRpb246CiAgYW5vbnltb3VzOgogICAgZW5hYmxlZDogZmFsc2UKICB3ZWJob29rOgogICAgY2FjaGVUVEw6IDBzCiAgeDUwOToKICAgIGNsaWVudENBRmlsZTogL2V0Yy9rdWJlcm5ldGVzL2t1YmVsZXQtY2EuY3J0CmF1dGhvcml6YXRpb246CiAgd2ViaG9vazoKICAgIGNhY2hlQXV0aG9yaXplZFRUTDogMHMKICAgIGNhY2hlVW5hdXRob3JpemVkVFRMOiAwcwpjZ3JvdXBEcml2ZXI6IHN5c3RlbWQKY2dyb3VwUm9vdDogLwpjbHVzdGVyRE5TOgotIDE3Mi4zMC4wLjEwCmNsdXN0ZXJEb21haW46IGNsdXN0ZXIubG9jYWwKY29udGFpbmVyTG9nTWF4U2l6ZTogNTBNaQpjb250YWluZXJSdW50aW1lRW5kcG9pbnQ6ICIiCmNwdU1hbmFnZXJSZWNvbmNpbGVQZXJpb2Q6IDBzCmNyYXNoTG9vcEJhY2tPZmY6IHt9CmVuYWJsZVN5c3RlbUxvZ1F1ZXJ5OiB0cnVlCmV2aWN0aW9uUHJlc3N1cmVUcmFuc2l0aW9uUGVyaW9kOiAwcwpmZWF0dXJlR2F0ZXM6CiAgQVdTQ2x1c3Rlckhvc3RlZEROUzogZmFsc2UKICBBV1NDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBV1NEZWRpY2F0ZWRIb3N0czogZmFsc2UKICBBV1NTZXJ2aWNlTEJOZXR3b3JrU2VjdXJpdHlHcm91cDogZmFsc2UKICBBZGRpdGlvbmFsUm91dGluZ0NhcGFiaWxpdGllczogdHJ1ZQogIEFkbWluTmV0d29ya1BvbGljeTogdHJ1ZQogIEFsaWJhYmFQbGF0Zm9ybTogdHJ1ZQogIEF1dG9tYXRlZEV0Y2RCYWNrdXA6IGZhbHNlCiAgQXp1cmVDbHVzdGVySG9zdGVkRE5TSW5zdGFsbDogZmFsc2UKICBBenVyZURlZGljYXRlZEhvc3RzOiBmYWxzZQogIEF6dXJlTXVsdGlEaXNrOiBmYWxzZQogIEF6dXJlV29ya2xvYWRJZGVudGl0eTogdHJ1ZQogIEJvb3RJbWFnZVNrZXdFbmZvcmNlbWVudDogZmFsc2UKICBCb290Y05vZGVNYW5hZ2VtZW50OiBmYWxzZQogIEJ1aWxkQ1NJVm9sdW1lczogdHJ1ZQogIENQTVNNYWNoaW5lTmFtZVByZWZpeDogdHJ1ZQogIENodW5rU2l6ZU1pQjogdHJ1ZQogIENsdXN0ZXJBUElJbnN0YWxsOiBmYWxzZQogIENsdXN0ZXJBUElJbnN0YWxsSUJNQ2xvdWQ6IGZhbHNlCiAgQ2x1c3Rlck1vbml0b3JpbmdDb25maWc6IGZhbHNlCiAgQ2x1c3RlclZlcnNpb25PcGVyYXRvckNvbmZpZ3VyYXRpb246IGZhbHNlCiAgQ29uc29sZVBsdWdpbkNvbnRlbnRTZWN1cml0eVBvbGljeTogdHJ1ZQogIEROU05hbWVSZXNvbHZlcjogZmFsc2UKICBEdWFsUmVwbGljYTogZmFsc2UKICBEeWFubWljU2VydmljZUVuZHBvaW50SUJNQ2xvdWQ6IGZhbHNlCiAgRHluYW1pY1Jlc291cmNlQWxsb2NhdGlvbjogZmFsc2UKICBFdGNkQmFja2VuZFF1b3RhOiBmYWxzZQogIEV2ZW50ZWRQTEVHOiBmYWxzZQogIEV4YW1wbGU6IGZhbHNlCiAgRXhhbXBsZTI6IGZhbHNlCiAgRXh0ZXJuYWxPSURDOiBmYWxzZQogIEV4dGVybmFsT0lEQ1dpdGhVSURBbmRFeHRyYUNsYWltTWFwcGluZ3M6IGZhbHNlCiAgRXh0ZXJuYWxTbmFwc2hvdE1ldGFkYXRhOiBmYWxzZQogIEdDUENsdXN0ZXJIb3N0ZWRETlM6IGZhbHNlCiAgR0NQQ2x1c3Rlckhvc3RlZEROU0luc3RhbGw6IGZhbHNlCiAgR0NQQ3VzdG9tQVBJRW5kcG9pbnRzOiBmYWxzZQogIEdDUEN1c3RvbUFQSUVuZHBvaW50c0luc3RhbGw6IGZhbHNlCiAgR2F0ZXdheUFQSTogdHJ1ZQogIEdhdGV3YXlBUElDb250cm9sbGVyOiB0cnVlCiAgSGlnaGx5QXZhaWxhYmxlQXJiaXRlcjogdHJ1ZQogIEltYWdlTW9kZVN0YXR1c1JlcG9ydGluZzogZmFsc2UKICBJbWFnZVN0cmVhbUltcG9ydE1vZGU6IGZhbHNlCiAgSW1hZ2VWb2x1bWU6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJEeW5hbWljQ29uZmlndXJhdGlvbk1hbmFnZXI6IGZhbHNlCiAgSW5ncmVzc0NvbnRyb2xsZXJMQlN1Ym5ldHNBV1M6IHRydWUKICBJbnNpZ2h0c0NvbmZpZzogZmFsc2UKICBJbnNpZ2h0c0NvbmZpZ0FQSTogZmFsc2UKICBJbnNpZ2h0c09uRGVtYW5kRGF0YUdhdGhlcjogZmFsc2UKICBJbnNpZ2h0c1J1bnRpbWVFeHRyYWN0b3I6IGZhbHNlCiAgSXJyZWNvbmNpbGFibGVNYWNoaW5lQ29uZmlnOiBmYWxzZQogIEtNU0VuY3J5cHRpb25Qcm92aWRlcjogZmFsc2UKICBLTVN2MTogdHJ1ZQogIE1hY2hpbmVBUElNaWdyYXRpb246IGZhbHNlCiAgTWFjaGluZUFQSU9wZXJhdG9yRGlzYWJsZU1hY2hpbmVIZWFsdGhDaGVja0NvbnRyb2xsZXI6IGZhbHNlCiAgTWFjaGluZUNvbmZpZ05vZGVzOiB0cnVlCiAgTWFuYWdlZEJvb3RJbWFnZXM6IHRydWUKICBNYW5hZ2VkQm9vdEltYWdlc0FXUzogdHJ1ZQogIE1hbmFnZWRCb290SW1hZ2VzQXp1cmU6IGZhbHNlCiAgTWFuYWdlZEJvb3RJbWFnZXN2U3BoZXJlOiBmYWxzZQogIE1heFVuYXZhaWxhYmxlU3RhdGVmdWxTZXQ6IGZhbHNlCiAgTWV0cmljc0NvbGxlY3Rpb25Qcm9maWxlczogdHJ1ZQogIE1pbmltdW1LdWJlbGV0VmVyc2lvbjogZmFsc2UKICBNaXhlZENQVXNBbGxvY2F0aW9uOiBmYWxzZQogIE11bHRpQXJjaEluc3RhbGxBenVyZTogZmFsc2UKICBNdWx0aURpc2tTZXR1cDogZmFsc2UKICBNdXRhdGluZ0FkbWlzc2lvblBvbGljeTogZmFsc2UKICBOZXR3b3JrRGlhZ25vc3RpY3NDb25maWc6IHRydWUKICBOZXR3b3JrTGl2ZU1pZ3JhdGlvbjogdHJ1ZQogIE5ldHdvcmtTZWdtZW50YXRpb246IHRydWUKICBOZXdPTE06IHRydWUKICBOZXdPTE1DYXRhbG9nZEFQSVYxTWV0YXM6IGZhbHNlCiAgTmV3T0xNT3duU2luZ2xlTmFtZXNwYWNlOiBmYWxzZQogIE5ld09MTVByZWZsaWdodFBlcm1pc3Npb25DaGVja3M6IGZhbHNlCiAgTmV3T0xNV2ViaG9va1Byb3ZpZGVyT3BlbnNoaWZ0U2VydmljZUNBOiBmYWxzZQogIE5vUmVnaXN0cnlDbHVzdGVyT3BlcmF0aW9uczogZmFsc2UKICBOb2RlU3dhcDogZmFsc2UKICBOdXRhbml4TXVsdGlTdWJuZXRzOiBmYWxzZQogIE9WTk9ic2VydmFiaWxpdHk6IGZhbHNlCiAgT3BlblNoaWZ0UG9kU2VjdXJpdHlBZG1pc3Npb246IHRydWUKICBQaW5uZWRJbWFnZXM6IHRydWUKICBQcmVjb25maWd1cmVkVUROQWRkcmVzc2VzOiBmYWxzZQogIFByb2NNb3VudFR5cGU6IHRydWUKICBSb3V0ZUFkdmVydGlzZW1lbnRzOiB0cnVlCiAgUm91dGVFeHRlcm5hbENlcnRpZmljYXRlOiB0cnVlCiAgU0VMaW51eE1vdW50OiBmYWxzZQogIFNlcnZpY2VBY2NvdW50VG9rZW5Ob2RlQmluZGluZzogdHJ1ZQogIFNldEVJUEZvck5MQkluZ3Jlc3NDb250cm9sbGVyOiB0cnVlCiAgU2hvcnRDZXJ0Um90YXRpb246IGZhbHNlCiAgU2lnbmF0dXJlU3RvcmVzOiBmYWxzZQogIFNpZ3N0b3JlSW1hZ2VWZXJpZmljYXRpb246IGZhbHNlCiAgU2lnc3RvcmVJbWFnZVZlcmlmaWNhdGlvblBLSTogZmFsc2UKICBTdG9yYWdlUGVyZm9ybWFudFNlY3VyaXR5UG9saWN5OiBmYWxzZQogIFRyYW5zbGF0ZVN0cmVhbUNsb3NlV2Vic29ja2V0UmVxdWVzdHM6IGZhbHNlCiAgVXBncmFkZVN0YXR1czogZmFsc2UKICBVc2VyTmFtZXNwYWNlc1BvZFNlY3VyaXR5U3RhbmRhcmRzOiB0cnVlCiAgVXNlck5hbWVzcGFjZXNTdXBwb3J0OiB0cnVlCiAgVlNwaGVyZUNvbmZpZ3VyYWJsZU1heEFsbG93ZWRCbG9ja1ZvbHVtZXNQZXJOb2RlOiBmYWxzZQogIFZTcGhlcmVIb3N0Vk1Hcm91cFpvbmFsOiBmYWxzZQogIFZTcGhlcmVNaXhlZE5vZGVFbnY6IGZhbHNlCiAgVlNwaGVyZU11bHRpRGlzazogdHJ1ZQogIFZTcGhlcmVNdWx0aU5ldHdvcmtzOiB0cnVlCiAgVm9sdW1lQXR0cmlidXRlc0NsYXNzOiBmYWxzZQogIFZvbHVtZUdyb3VwU25hcHNob3Q6IGZhbHNlCmZpbGVDaGVja0ZyZXF1ZW5jeTogMHMKaHR0cENoZWNrRnJlcXVlbmN5OiAwcwppbWFnZU1heGltdW1HQ0FnZTogMHMKaW1hZ2VNaW5pbXVtR0NBZ2U6IDBzCmtpbmQ6IEt1YmVsZXRDb25maWd1cmF0aW9uCmt1YmVBUElCdXJzdDogMTAwCmt1YmVBUElRUFM6IDUwCmxvZ2dpbmc6CiAgZmx1c2hGcmVxdWVuY3k6IDAKICBvcHRpb25zOgogICAganNvbjoKICAgICAgaW5mb0J1ZmZlclNpemU6ICIwIgogICAgdGV4dDoKICAgICAgaW5mb0J1ZmZlclNpemU6ICIwIgogIHZlcmJvc2l0eTogMAptYXhQb2RzOiAyNTAKbWVtb3J5U3dhcDoge30Kbm9kZVN0YXR1c1JlcG9ydEZyZXF1ZW5jeTogNW0wcwpub2RlU3RhdHVzVXBkYXRlRnJlcXVlbmN5OiAxMHMKcG9kUGlkc0xpbWl0OiA0MDk2CnByb3RlY3RLZXJuZWxEZWZhdWx0czogdHJ1ZQpyb3RhdGVDZXJ0aWZpY2F0ZXM6IHRydWUKcnVudGltZVJlcXVlc3RUaW1lb3V0OiAwcwpzZXJpYWxpemVJbWFnZVB1bGxzOiBmYWxzZQpzZXJ2ZXJUTFNCb290c3RyYXA6IHRydWUKc2h1dGRvd25HcmFjZVBlcmlvZDogMHMKc2h1dGRvd25HcmFjZVBlcmlvZENyaXRpY2FsUG9kczogMHMKc3RhdGljUG9kUGF0aDogL2V0Yy9rdWJlcm5ldGVzL21hbmlmZXN0cwpzdHJlYW1pbmdDb25uZWN0aW9uSWRsZVRpbWVvdXQ6IDBzCnN5bmNGcmVxdWVuY3k6IDBzCnN5c3RlbUNncm91cHM6IC9zeXN0ZW0uc2xpY2UKdGxzQ2lwaGVyU3VpdGVzOgotIFRMU19BRVNfMTI4X0dDTV9TSEEyNTYKLSBUTFNfQUVTXzI1Nl9HQ01fU0hBMzg0Ci0gVExTX0NIQUNIQTIwX1BPTFkxMzA1X1NIQTI1NgotIFRMU19FQ0RIRV9FQ0RTQV9XSVRIX0FFU18xMjhfR0NNX1NIQTI1NgotIFRMU19FQ0RIRV9SU0FfV0lUSF9BRVNfMTI4X0dDTV9TSEEyNTYKLSBUTFNfRUNESEVfRUNEU0FfV0lUSF9BRVNfMjU2X0dDTV9TSEEzODQKLSBUTFNfRUNESEVfUlNBX1dJVEhfQUVTXzI1Nl9HQ01fU0hBMzg0Ci0gVExTX0VDREhFX0VDRFNBX1dJVEhfQ0hBQ0hBMjBfUE9MWTEzMDVfU0hBMjU2Ci0gVExTX0VDREhFX1JTQV9XSVRIX0NIQUNIQTIwX1BPTFkxMzA1X1NIQTI1Ngp0bHNNaW5WZXJzaW9uOiBWZXJzaW9uVExTMTIKdm9sdW1lU3RhdHNBZ2dQZXJpb2Q6IDBzCg==" + }, + "mode": 420, + "overwrite": true, + "path": "/etc/kubernetes/kubelet.conf" + }, + { + "contents": { + "source": "data:,%23!%2Fbin%2Fsh%0Aif%20%5B%20-x%20%2Fusr%2Fbin%2Fkubensenter%20%5D%3B%20then%0A%20%20exec%20%2Fusr%2Fbin%2Fkubensenter%20%22%24%40%22%0Aelse%0A%20%20exec%20%22%24%40%22%0Afi%0A" + }, + "mode": 493, + "overwrite": true, + "path": "/usr/local/bin/kubenswrapper" + } + ] + }, + "systemd": { + "units": [ + { + "contents": "[Unit]\nDescription=Cleans NetworkManager state generated by dracut\n# Removal of this file signals firstboot completion\nConditionPathExists=!/etc/ignition-machine-config-encapsulated.json\n# This is opt-in for some deployment types, and opt-out for others.\nConditionPathExists=/var/lib/mco/nm-clean-initrd-state\nWants=network-pre.target\nBefore=network-pre.target\n\n[Service]\nType=oneshot\n# Remove any existing state possibly generated NM run by dracut. We want NM to\n# consider all profiles autoconnect priority when it starts instead of\n# remembering which profile was a device activated with when NM is run by\n# dracut.\nExecStart=/usr/local/bin/nm-clean-initrd-state.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "NetworkManager-clean-initrd-state.service" + }, + { + "dropins": [ + { + "contents": "# vim:set ft=systemd :\n#\n# This drop-in will enable any service built with this\n# github.com/containers/kubemntns library to properly join the mount namespace\n# managed by kubens.service\n#\n\n[Unit]\nAfter=kubens.service\n\n[Service]\nEnvironmentFile=-/run/kubens/env\n", + "name": "01-kubens.conf" + }, + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "[Service]\nEnvironment=\"ENABLE_PROFILE_UNIX_SOCKET=true\"\n", + "name": "10-mco-profile-unix-socket.conf" + }, + { + "contents": "[Unit]\nAfter=kubelet-dependencies.target\nRequires=kubelet-dependencies.target\n", + "name": "05-mco-ordering.conf" + }, + { + "contents": "[Service]\nEnvironment=\"GODEBUG=x509ignoreCN=0,madvdontneed=1\"\n", + "name": "10-mco-default-madv.conf" + } + ], + "name": "crio.service" + }, + { + "dropins": [ + { + "contents": "[Unit]\nConditionPathExists=/enoent\n", + "name": "mco-disabled.conf" + } + ], + "name": "docker.socket" + }, + { + "contents": "[Unit]\nDescription=The firstboot OS update has completed\nDocumentation=https://github.com/openshift/machine-config-operator/\nRequires=basic.target\n\n[Install]\nWantedBy=default.target\n", + "enabled": true, + "name": "firstboot-osupdate.target" + }, + { + "dropins": [ + { + "contents": "[Unit]\nAfter=ovs-configuration.service\nBefore=crio.service\n", + "name": "01-after-configure-ovs.conf" + } + ], + "name": "ipsec.service" + }, + { + "contents": "[Unit]\nDescription=Dynamically sets the system reserved for the kubelet\nWants=network-online.target\nAfter=network-online.target firstboot-osupdate.target\nBefore=kubelet-dependencies.target\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\nRemainAfterExit=yes\nEnvironmentFile=/etc/node-sizing-enabled.env\nExecStart=/bin/bash /usr/local/sbin/dynamic-system-reserved-calc.sh ${NODE_SIZING_ENABLED} ${SYSTEM_RESERVED_MEMORY} ${SYSTEM_RESERVED_CPU} ${SYSTEM_RESERVED_ES}\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "kubelet-auto-node-size.service" + }, + { + "contents": "[Unit]\nDescription=Dependencies necessary to run kubelet\nDocumentation=https://github.com/openshift/machine-config-operator/\nRequires=basic.target network-online.target\nWants=NetworkManager-wait-online.service crio-wipe.service\nWants=rpc-statd.service chrony-wait.service\n", + "name": "kubelet-dependencies.target" + }, + { + "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=crio.service kubelet-dependencies.target\nAfter=kubelet-dependencies.target\nAfter=ostree-finalize-staged.service\n\n[Service]\nType=notify\nExecStartPre=/bin/mkdir --parents /etc/kubernetes/manifests\nExecStartPre=-/usr/sbin/restorecon -ri /var/lib/kubelet/pod-resources /usr/local/bin/kubenswrapper /usr/bin/kubensenter\nEnvironment=\"KUBELET_NODE_IP=0.0.0.0\"\nEnvironmentFile=/etc/os-release\nEnvironmentFile=-/etc/kubernetes/kubelet-workaround\nEnvironmentFile=-/etc/kubernetes/kubelet-env\nEnvironmentFile=/etc/node-sizing.env\n\nExecStart=/usr/local/bin/kubenswrapper \\\n /usr/bin/kubelet \\\n --config=/etc/kubernetes/kubelet.conf \\\n --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \\\n --kubeconfig=/var/lib/kubelet/kubeconfig \\\n --container-runtime-endpoint=/var/run/crio/crio.sock \\\n --runtime-cgroups=/system.slice/crio.service \\\n --node-labels=node-role.kubernetes.io/worker,node.openshift.io/os_id=${ID},${CUSTOM_KUBELET_LABELS} \\\n --node-ip=${KUBELET_NODE_IP} \\\n --minimum-container-ttl-duration=6m0s \\\n --volume-plugin-dir=/etc/kubernetes/kubelet-plugins/volume/exec \\\n --cloud-provider=external \\\n --image-credential-provider-bin-dir=/usr/libexec/kubelet-image-credential-provider-plugins --image-credential-provider-config=/etc/kubernetes/credential-providers/acr-credential-provider.yaml \\\n --hostname-override=${KUBELET_NODE_NAME} \\\n --provider-id=${KUBELET_PROVIDERID} \\\n --pod-infra-container-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bad2ea56c7fcc489b9e47c86ec3dd0024994026d409073d5f77b64f1793a15 \\\n --system-reserved=cpu=${SYSTEM_RESERVED_CPU},memory=${SYSTEM_RESERVED_MEMORY},ephemeral-storage=${SYSTEM_RESERVED_ES} \\\n --v=${KUBELET_LOG_LEVEL}\n\nRestart=always\nRestartSec=10\n\n[Install]\nWantedBy=multi-user.target\n", + "dropins": [ + { + "contents": "# vim:set ft=systemd :\n#\n# This drop-in will enable any service built with this\n# github.com/containers/kubemntns library to properly join the mount namespace\n# managed by kubens.service\n#\n\n[Unit]\nAfter=kubens.service\n\n[Service]\nEnvironmentFile=-/run/kubens/env\n", + "name": "01-kubens.conf" + }, + { + "contents": "", + "name": "10-mco-default-env.conf" + }, + { + "contents": "[Service]\nEnvironment=\"GODEBUG=x509ignoreCN=0,madvdontneed=1\"\n", + "name": "10-mco-default-madv.conf" + } + ], + "enabled": true, + "name": "kubelet.service" + }, + { + "contents": "[Unit]\nDescription=Manages a mount namespace for kubernetes-specific mounts\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nRuntimeDirectory=kubens\nEnvironment=RUNTIME_DIRECTORY=%t/kubens\nEnvironment=BIND_POINT=%t/kubens/mnt\nEnvironment=ENVFILE=%t/kubens/env\n\n# Set up the runtime directory as an unbindable mountpoint\nExecStartPre=bash -c \"findmnt ${RUNTIME_DIRECTORY} || mount --make-unbindable --bind ${RUNTIME_DIRECTORY} ${RUNTIME_DIRECTORY}\"\n# Ensure the bind point exists\nExecStartPre=touch ${BIND_POINT}\n# Use 'unshare' to create the new mountpoint, then 'mount --make-rshared' so it cascades internally\nExecStart=unshare --mount=${BIND_POINT} --propagation slave mount --make-rshared /\n# Finally, set an env pointer for ease-of-use\nExecStartPost=bash -c 'echo \"KUBENSMNT=${BIND_POINT}\" \u003e \"${ENVFILE}\"'\n\n# On stop, a recursive unmount cleans up the namespace and bind-mounted unbindable parent directory\nExecStop=umount -R ${RUNTIME_DIRECTORY}\n\n[Install]\nWantedBy=multi-user.target\n", + "enabled": false, + "name": "kubens.service" + }, + { + "contents": "[Unit]\nDescription=Machine Config Daemon Firstboot\n# Make sure it runs only on OSTree booted system\nConditionPathExists=/run/ostree-booted\n# Removal of this file signals firstboot completion\nConditionPathExists=/etc/ignition-machine-config-encapsulated.json\nAfter=machine-config-daemon-pull.service\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\n# Disable existing repos (if any) so that OS extensions would use embedded RPMs only\nExecStartPre=-/usr/bin/sh -c \"sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/*.repo\"\n# Run this via podman because we want to use the nmstatectl binary in our container\nExecStart=/usr/bin/podman run --rm --privileged --net=host -v /:/rootfs --entrypoint machine-config-daemon 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84' firstboot-complete-machineconfig --persist-nics\nExecStart=/usr/bin/podman run --rm --privileged --pid=host --net=host -v /:/rootfs --entrypoint machine-config-daemon 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84' firstboot-complete-machineconfig\n[Install]\nRequiredBy=firstboot-osupdate.target\n", + "enabled": true, + "name": "machine-config-daemon-firstboot.service" + }, + { + "contents": "[Unit]\nDescription=Machine Config Daemon Pull\n# Make sure it runs only on OSTree booted system\nConditionPathExists=/run/ostree-booted\n# This \"stamp file\" is unlinked when we complete\n# machine-config-daemon-firstboot.service\nConditionPathExists=/etc/ignition-machine-config-encapsulated.json\n# Run after crio-wipe so the pulled MCD image is protected against a corrupted storage from a forced shutdown\nWants=crio-wipe.service NetworkManager-wait-online.service\nAfter=crio-wipe.service NetworkManager-wait-online.service network.service\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nExecStartPre=/etc/machine-config-daemon/generate_podman_policy_args.sh\nExecStart=/bin/sh -c \"while ! /usr/bin/podman pull $(cat /tmp/podman_policy_args) --authfile=/var/lib/kubelet/config.json 'quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84'; do sleep 1; done\"\n\n[Install]\nRequiredBy=machine-config-daemon-firstboot.service\n", + "enabled": true, + "name": "machine-config-daemon-pull.service" + }, + { + "contents": "[Unit]\nDescription=Applies per-node NMState network configuration\nRequires=openvswitch.service\nWants=NetworkManager-wait-online.service\nAfter=NetworkManager-wait-online.service openvswitch.service network.service nodeip-configuration.service\nBefore=nmstate.service kubelet-dependencies.target ovs-configuration.service node-valid-hostname.service\n\n[Service]\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/usr/local/bin/nmstate-configuration.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "nmstate-configuration.service" + }, + { + "contents": "[Unit]\nDescription=Wait for a non-localhost hostname\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nUser=root\nExecStart=/usr/local/bin/mco-hostname --wait\n\n# Wait up to 5min for the node to get a non-localhost name\nTimeoutSec=300\n\n[Install]\n# TODO: Change this to RequiredBy after we fix https://github.com/openshift/machine-config-operator/pull/3865#issuecomment-1746963115\nWantedBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "node-valid-hostname.service" + }, + { + "contents": "[Unit]\nDescription=Writes IP address configuration so that kubelet and crio services select a valid node IP\nWants=NetworkManager-wait-online.service\nAfter=NetworkManager-wait-online.service firstboot-osupdate.target\nBefore=kubelet-dependencies.target ovs-configuration.service\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/bin/bash -c \" \\\n until \\\n /usr/bin/podman run --rm \\\n --authfile /var/lib/kubelet/config.json \\\n --env 'ENABLE_NODEIP_DEBUG=true' \\\n --net=host \\\n --security-opt label=disable \\\n --volume /etc/systemd/system:/etc/systemd/system \\\n --volume /run/nodeip-configuration:/run/nodeip-configuration \\\n quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cb620f8264c66301b054ed08c776fffefe36fe0054e4a9fbda2fa9b748d8e2e3 \\\n node-ip \\\n set \\\n --retry-on-failure \\\n --network-type OVNKubernetes \\\n ${NODEIP_HINT:-${KUBELET_NODEIP_HINT:-}}; \\\n do \\\n sleep 5; \\\n done\"\nExecStart=/bin/systemctl daemon-reload\nExecStartPre=/bin/mkdir -p /run/nodeip-configuration\nStandardOutput=journal+console\nStandardError=journal+console\n\nEnvironmentFile=-/etc/default/nodeip-configuration\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": false, + "name": "nodeip-configuration.service" + }, + { + "enabled": true, + "name": "openvswitch.service" + }, + { + "contents": "[Unit]\n# Kdump will generate it's initramfs based on the running state when kdump.service run\n# If OVS has already run, the kdump fails to gather a working network config,\n# which prevent network log exports, sush as SSH.\n# See https://issues.redhat.com/browse/OCPBUGS-28239\nAfter=kdump.service\nDescription=Configures OVS with proper host networking configuration\n# This service is used to move a physical NIC into OVS and reconfigure OVS to use the host IP\nRequires=openvswitch.service\nWants=NetworkManager-wait-online.service\nAfter=firstboot-osupdate.target\nAfter=NetworkManager-wait-online.service openvswitch.service network.service nodeip-configuration.service nmstate.service\nBefore=kubelet-dependencies.target node-valid-hostname.service dnsmasq.service\n\n[Service]\n# Need oneshot to delay kubelet\nType=oneshot\nExecStart=/usr/local/bin/configure-ovs.sh OVNKubernetes\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nRequiredBy=kubelet-dependencies.target\n", + "enabled": true, + "name": "ovs-configuration.service" + }, + { + "dropins": [ + { + "contents": "[Service]\nRestart=always\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /var/lib/openvswitch'\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /etc/openvswitch'\nExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /run/openvswitch'\nExecStartPost=-/usr/bin/ovs-appctl vlog/set syslog:info\nExecReload=-/usr/bin/ovs-appctl vlog/set syslog:info\n", + "name": "10-ovs-vswitchd-restart.conf" + } + ], + "name": "ovs-vswitchd.service" + }, + { + "dropins": [ + { + "contents": "[Service]\nRestart=always\n", + "name": "10-ovsdb-restart.conf" + } + ], + "enabled": true, + "name": "ovsdb-server.service" + }, + { + "dropins": [ + { + "contents": "", + "name": "10-mco-default-env.conf" + } + ], + "name": "rpm-ostreed.service" + }, + { + "contents": "[Unit]\nDescription=Ensure IKE SA established for existing IPsec connections.\nAfter=ipsec.service\nBefore=kubelet-dependencies.target node-valid-hostname.service\n\n[Service]\nType=oneshot\nExecStart=/usr/local/bin/ipsec-connect-wait.sh\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=ipsec.service\n", + "enabled": true, + "name": "wait-for-ipsec-connect.service" + }, + { + "contents": "[Unit]\nDescription=Ensure primary IP is assigned and usable\nRequires=nmstate.service\nAfter=nmstate.service\nBefore=kubelet-dependencies.target\n\n[Service]\nType=oneshot\n# Would prefer to do Restart=on-failure instead of this bash retry loop, but\n# the version of systemd we have right now doesn't support it. It should be\n# available in systemd v244 and higher.\nExecStart=/bin/bash -c \" \\\n until \\\n /usr/local/bin/wait-for-primary-ip.sh; \\\n do \\\n sleep 10; \\\n done\"\nStandardOutput=journal+console\nStandardError=journal+console\n\n[Install]\nWantedBy=network-online.target\n", + "enabled": true, + "name": "wait-for-primary-ip.service" + }, + { + "dropins": [ + { + "contents": "[Unit]\nConditionPathExists=/enoent\n", + "name": "mco-disabled.conf" + } + ], + "name": "zincati.service" + }, + { + "contents": "[Unit]\nDescription=Kubernetes Kubelet After Reboot Cleanup\nBefore=kubelet.service\n\n[Service]\nType=oneshot\nExecStart=/bin/rm -f /var/lib/kubelet/cpu_manager_state\nExecStart=/bin/rm -f /var/lib/kubelet/memory_manager_state\nExecStart=-/bin/rm -f /var/lib/kubelet/dra_manager_state\n\n[Install]\nWantedBy=multi-user.target\n", + "enabled": true, + "name": "kubelet-cleanup.service" + } + ] + } + }, + "extensions": [], + "fips": false, + "kernelArguments": [ + "systemd.unified_cgroup_hierarchy=1", + "cgroup_no_v1=\"all\"", + "psi=0" + ], + "kernelType": "default", + "osImageURL": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ec4b5c12b787640403135c65517882f80c80fd0c741dfeb70e9c716c5ca2edeb" + } + } + ], + "kind": "List", + "metadata": { + "resourceVersion": "" + } +} diff --git a/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-mcp.yaml b/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-mcp.yaml new file mode 100644 index 0000000000..ffb89def6b --- /dev/null +++ b/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-mcp.yaml @@ -0,0 +1,434 @@ +{ + "apiVersion": "v1", + "items": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfigPool", + "metadata": { + "creationTimestamp": "2025-08-19T18:55:10Z", + "generation": 3, + "labels": { + "machineconfiguration.openshift.io/mco-built-in": "", + "operator.machineconfiguration.openshift.io/required-for-upgrade": "", + "pools.operator.machineconfiguration.openshift.io/master": "" + }, + "name": "master", + "resourceVersion": "108518", + "uid": "8bcdea4d-5638-4bd5-9ff2-b0d669977136" + }, + "spec": { + "configuration": { + "name": "rendered-master-505a1e08a37430cbe1ee421928f810ec", + "source": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "00-master" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "01-master-container-runtime" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "01-master-kubelet" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "97-master-generated-kubelet" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "98-master-generated-kubelet" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "99-installer-ignition-master" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "99-master-generated-registries" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "99-master-ssh" + } + ] + }, + "machineConfigSelector": { + "matchLabels": { + "machineconfiguration.openshift.io/role": "master" + } + }, + "nodeSelector": { + "matchLabels": { + "node-role.kubernetes.io/master": "" + } + }, + "paused": false + }, + "status": { + "certExpirys": [ + { + "bundle": "KubeAPIServerServingCAData", + "expiry": "2035-08-17T18:12:00Z", + "subject": "CN=admin-kubeconfig-signer,OU=openshift" + }, + { + "bundle": "KubeAPIServerServingCAData", + "expiry": "2025-08-20T18:12:04Z", + "subject": "CN=kube-csr-signer_@1755629664" + }, + { + "bundle": "KubeAPIServerServingCAData", + "expiry": "2025-08-20T18:12:04Z", + "subject": "CN=kubelet-signer,OU=openshift" + }, + { + "bundle": "KubeAPIServerServingCAData", + "expiry": "2026-08-19T18:12:04Z", + "subject": "CN=kube-apiserver-to-kubelet-signer,OU=openshift" + }, + { + "bundle": "KubeAPIServerServingCAData", + "expiry": "2026-08-19T18:12:04Z", + "subject": "CN=kube-control-plane-signer,OU=openshift" + }, + { + "bundle": "KubeAPIServerServingCAData", + "expiry": "2035-08-17T18:12:01Z", + "subject": "CN=kubelet-bootstrap-kubeconfig-signer,OU=openshift" + }, + { + "bundle": "KubeAPIServerServingCAData", + "expiry": "2028-08-18T18:54:23Z", + "subject": "CN=openshift-kube-apiserver-operator_node-system-admin-signer@1755629662" + } + ], + "conditions": [ + { + "lastTransitionTime": "2025-08-19T18:55:59Z", + "message": "", + "reason": "", + "status": "False", + "type": "PinnedImageSetsDegraded" + }, + { + "lastTransitionTime": "2025-08-19T18:56:00Z", + "message": "", + "reason": "", + "status": "False", + "type": "RenderDegraded" + }, + { + "lastTransitionTime": "2025-08-19T20:57:36Z", + "message": "", + "reason": "", + "status": "False", + "type": "Updated" + }, + { + "lastTransitionTime": "2025-08-19T20:57:36Z", + "message": "All nodes are updating to MachineConfig rendered-master-505a1e08a37430cbe1ee421928f810ec", + "reason": "", + "status": "True", + "type": "Updating" + }, + { + "lastTransitionTime": "2025-08-19T21:16:02Z", + "message": "Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 is reporting: \"Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 upgrade failure. failed to run command nice (6 tries): [timed out waiting for the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 failed: time=\\\"2025-08-19T22:13:54Z\\\" level=warning msg=\\\"Failed, retrying in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:40349-\u003e168.63.129.16:53: i/o timeout\\\"\\ntime=\\\"2025-08-19T22:14:05Z\\\" level=warning msg=\\\"Failed, retrying in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:36467-\u003e168.63.129.16:53: i/o timeout\\\"\\ntime=\\\"2025-08-19T22:14:16Z\\\" level=warning msg=\\\"Failed, retrying in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:44244-\u003e168.63.129.16:53: i/o timeout\\\"\\nError: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:48400-\u003e168.63.129.16:53: i/o timeout\\n: exit status 125]\", Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 is reporting: \"failed to run command nice (6 tries): [timed out waiting for the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 failed: time=\\\"2025-08-19T22:13:54Z\\\" level=warning msg=\\\"Failed, retrying in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:40349-\u003e168.63.129.16:53: i/o timeout\\\"\\ntime=\\\"2025-08-19T22:14:05Z\\\" level=warning msg=\\\"Failed, retrying in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:36467-\u003e168.63.129.16:53: i/o timeout\\\"\\ntime=\\\"2025-08-19T22:14:16Z\\\" level=warning msg=\\\"Failed, retrying in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:44244-\u003e168.63.129.16:53: i/o timeout\\\"\\nError: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:48400-\u003e168.63.129.16:53: i/o timeout\\n: exit status 125]\"", + "reason": "1 nodes are reporting degraded status on sync", + "status": "True", + "type": "NodeDegraded" + }, + { + "lastTransitionTime": "2025-08-19T21:16:02Z", + "message": "Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 is reporting: \"Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 upgrade failure. failed to run command nice (6 tries): [timed out waiting for the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 failed: time=\\\"2025-08-19T22:13:54Z\\\" level=warning msg=\\\"Failed, retrying in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:40349-\u003e168.63.129.16:53: i/o timeout\\\"\\ntime=\\\"2025-08-19T22:14:05Z\\\" level=warning msg=\\\"Failed, retrying in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:36467-\u003e168.63.129.16:53: i/o timeout\\\"\\ntime=\\\"2025-08-19T22:14:16Z\\\" level=warning msg=\\\"Failed, retrying in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:44244-\u003e168.63.129.16:53: i/o timeout\\\"\\nError: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:48400-\u003e168.63.129.16:53: i/o timeout\\n: exit status 125]\", Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 is reporting: \"failed to run command nice (6 tries): [timed out waiting for the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 failed: time=\\\"2025-08-19T22:13:54Z\\\" level=warning msg=\\\"Failed, retrying in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:40349-\u003e168.63.129.16:53: i/o timeout\\\"\\ntime=\\\"2025-08-19T22:14:05Z\\\" level=warning msg=\\\"Failed, retrying in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:36467-\u003e168.63.129.16:53: i/o timeout\\\"\\ntime=\\\"2025-08-19T22:14:16Z\\\" level=warning msg=\\\"Failed, retrying in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\\\\\"https://quay.io/v2/\\\\\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:44244-\u003e168.63.129.16:53: i/o timeout\\\"\\nError: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:48400-\u003e168.63.129.16:53: i/o timeout\\n: exit status 125]\"", + "reason": "", + "status": "True", + "type": "Degraded" + } + ], + "configuration": { + "name": "rendered-master-51b9bd96466b1461e8a9b84416e4405b", + "source": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "00-master" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "01-master-container-runtime" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "01-master-kubelet" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "97-master-generated-kubelet" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "98-master-generated-kubelet" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "99-installer-ignition-master" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "99-master-generated-registries" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "99-master-ssh" + } + ] + }, + "degradedMachineCount": 1, + "machineCount": 3, + "observedGeneration": 3, + "poolSynchronizersStatus": [ + { + "availableMachineCount": 2, + "machineCount": 3, + "poolSynchronizerType": "PinnedImageSets", + "readyMachineCount": 1, + "unavailableMachineCount": 1, + "updatedMachineCount": 3 + } + ], + "readyMachineCount": 1, + "unavailableMachineCount": 1, + "updatedMachineCount": 1 + } + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfigPool", + "metadata": { + "creationTimestamp": "2025-08-19T18:55:10Z", + "generation": 3, + "labels": { + "machineconfiguration.openshift.io/mco-built-in": "", + "pools.operator.machineconfiguration.openshift.io/worker": "" + }, + "name": "worker", + "resourceVersion": "89942", + "uid": "19c9702b-1ecf-4c47-ab1e-0bf44aeb0869" + }, + "spec": { + "configuration": { + "name": "rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "source": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "00-worker" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "01-worker-container-runtime" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "01-worker-kubelet" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "97-worker-generated-kubelet" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "98-worker-generated-kubelet" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "99-worker-generated-registries" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "99-worker-ssh" + } + ] + }, + "machineConfigSelector": { + "matchLabels": { + "machineconfiguration.openshift.io/role": "worker" + } + }, + "nodeSelector": { + "matchLabels": { + "node-role.kubernetes.io/worker": "" + } + }, + "paused": false + }, + "status": { + "certExpirys": [ + { + "bundle": "KubeAPIServerServingCAData", + "expiry": "2035-08-17T18:12:00Z", + "subject": "CN=admin-kubeconfig-signer,OU=openshift" + }, + { + "bundle": "KubeAPIServerServingCAData", + "expiry": "2025-08-20T18:12:04Z", + "subject": "CN=kube-csr-signer_@1755629664" + }, + { + "bundle": "KubeAPIServerServingCAData", + "expiry": "2025-08-20T18:12:04Z", + "subject": "CN=kubelet-signer,OU=openshift" + }, + { + "bundle": "KubeAPIServerServingCAData", + "expiry": "2026-08-19T18:12:04Z", + "subject": "CN=kube-apiserver-to-kubelet-signer,OU=openshift" + }, + { + "bundle": "KubeAPIServerServingCAData", + "expiry": "2026-08-19T18:12:04Z", + "subject": "CN=kube-control-plane-signer,OU=openshift" + }, + { + "bundle": "KubeAPIServerServingCAData", + "expiry": "2035-08-17T18:12:01Z", + "subject": "CN=kubelet-bootstrap-kubeconfig-signer,OU=openshift" + }, + { + "bundle": "KubeAPIServerServingCAData", + "expiry": "2028-08-18T18:54:23Z", + "subject": "CN=openshift-kube-apiserver-operator_node-system-admin-signer@1755629662" + } + ], + "conditions": [ + { + "lastTransitionTime": "2025-08-19T18:55:59Z", + "message": "", + "reason": "", + "status": "False", + "type": "PinnedImageSetsDegraded" + }, + { + "lastTransitionTime": "2025-08-19T18:56:00Z", + "message": "", + "reason": "", + "status": "False", + "type": "RenderDegraded" + }, + { + "lastTransitionTime": "2025-08-19T18:56:04Z", + "message": "", + "reason": "", + "status": "False", + "type": "NodeDegraded" + }, + { + "lastTransitionTime": "2025-08-19T18:56:04Z", + "message": "", + "reason": "", + "status": "False", + "type": "Degraded" + }, + { + "lastTransitionTime": "2025-08-19T21:15:50Z", + "message": "All nodes are updated with MachineConfig rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "reason": "", + "status": "True", + "type": "Updated" + }, + { + "lastTransitionTime": "2025-08-19T21:15:50Z", + "message": "", + "reason": "", + "status": "False", + "type": "Updating" + } + ], + "configuration": { + "name": "rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "source": [ + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "00-worker" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "01-worker-container-runtime" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "01-worker-kubelet" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "97-worker-generated-kubelet" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "98-worker-generated-kubelet" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "99-worker-generated-registries" + }, + { + "apiVersion": "machineconfiguration.openshift.io/v1", + "kind": "MachineConfig", + "name": "99-worker-ssh" + } + ] + }, + "degradedMachineCount": 0, + "machineCount": 3, + "observedGeneration": 3, + "poolSynchronizersStatus": [ + { + "availableMachineCount": 3, + "machineCount": 3, + "poolSynchronizerType": "PinnedImageSets", + "readyMachineCount": 3, + "unavailableMachineCount": 0, + "updatedMachineCount": 3 + } + ], + "readyMachineCount": 3, + "unavailableMachineCount": 0, + "updatedMachineCount": 3 + } + } + ], + "kind": "List", + "metadata": { + "resourceVersion": "" + } +} diff --git a/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-node.yaml b/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-node.yaml new file mode 100644 index 0000000000..747065fa24 --- /dev/null +++ b/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768-node.yaml @@ -0,0 +1,2880 @@ +{ + "apiVersion": "v1", + "items": [ + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "cloud.network.openshift.io/egress-ipconfig": "[{\"interface\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-master-0-nic\",\"ifaddr\":{\"ipv4\":\"10.0.0.0/17\"},\"capacity\":{\"ip\":255}}]", + "cluster.x-k8s.io/cluster-name": "ci-op-pw3ghqzh-bb5c4-pdhfc", + "cluster.x-k8s.io/cluster-namespace": "openshift-cluster-api-guests", + "cluster.x-k8s.io/labels-from-machine": "", + "cluster.x-k8s.io/machine": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-0", + "csi.volume.kubernetes.io/nodeid": "{\"disk.csi.azure.com\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-master-0\",\"file.csi.azure.com\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-master-0\"}", + "k8s.ovn.org/host-cidrs": "[\"10.0.0.4/17\"]", + "k8s.ovn.org/l3-gateway-config": "{\"default\":{\"mode\":\"shared\",\"bridge-id\":\"br-ex\",\"interface-id\":\"br-ex_ci-op-pw3ghqzh-bb5c4-pdhfc-master-0\",\"mac-address\":\"7c:ed:8d:09:16:12\",\"ip-addresses\":[\"10.0.0.4/17\"],\"ip-address\":\"10.0.0.4/17\",\"next-hops\":[\"10.0.0.1\"],\"next-hop\":\"10.0.0.1\",\"node-port-enable\":\"true\",\"vlan-id\":\"0\"}}", + "k8s.ovn.org/node-chassis-id": "70a96d5b-33b5-490d-8953-7db503f743b1", + "k8s.ovn.org/node-encap-ips": "[\"10.0.0.4\"]", + "k8s.ovn.org/node-gateway-router-lrp-ifaddrs": "{\"default\":{\"ipv4\":\"100.64.0.2/16\"}}", + "k8s.ovn.org/node-id": "2", + "k8s.ovn.org/node-masquerade-subnet": "{\"ipv4\":\"169.254.0.0/17\",\"ipv6\":\"fd69::/112\"}", + "k8s.ovn.org/node-primary-ifaddr": "{\"ipv4\":\"10.0.0.4/17\"}", + "k8s.ovn.org/node-subnets": "{\"default\":[\"10.130.0.0/23\"]}", + "k8s.ovn.org/node-transit-switch-port-ifaddr": "{\"ipv4\":\"100.88.0.2/16\"}", + "k8s.ovn.org/remote-zone-migrated": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-0", + "k8s.ovn.org/zone-name": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-0", + "machine.openshift.io/machine": "openshift-machine-api/ci-op-pw3ghqzh-bb5c4-pdhfc-master-0", + "machineconfiguration.openshift.io/controlPlaneTopology": "HighlyAvailable", + "machineconfiguration.openshift.io/currentConfig": "rendered-master-51b9bd96466b1461e8a9b84416e4405b", + "machineconfiguration.openshift.io/desiredConfig": "rendered-master-51b9bd96466b1461e8a9b84416e4405b", + "machineconfiguration.openshift.io/desiredDrain": "uncordon-rendered-master-51b9bd96466b1461e8a9b84416e4405b", + "machineconfiguration.openshift.io/lastAppliedDrain": "uncordon-rendered-master-51b9bd96466b1461e8a9b84416e4405b", + "machineconfiguration.openshift.io/lastObservedServerCAAnnotation": "false", + "machineconfiguration.openshift.io/lastSyncedControllerConfigResourceVersion": "101203", + "machineconfiguration.openshift.io/reason": "", + "machineconfiguration.openshift.io/state": "Done", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2025-08-19T18:49:58Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/instance-type": "Standard_D8s_v3", + "beta.kubernetes.io/os": "linux", + "failure-domain.beta.kubernetes.io/region": "centralus", + "failure-domain.beta.kubernetes.io/zone": "centralus-2", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-0", + "kubernetes.io/os": "linux", + "node-role.kubernetes.io/control-plane": "", + "node-role.kubernetes.io/master": "", + "node.kubernetes.io/instance-type": "Standard_D8s_v3", + "node.openshift.io/os_id": "rhel", + "topology.disk.csi.azure.com/zone": "centralus-2", + "topology.kubernetes.io/region": "centralus", + "topology.kubernetes.io/zone": "centralus-2" + }, + "name": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-0", + "resourceVersion": "108884", + "uid": "9dbcdc00-3fe0-49b7-98d7-a743a108de91" + }, + "spec": { + "providerID": "azure:///subscriptions/72e3a972-58b0-4afc-bd4f-da89b39ccebd/resourceGroups/ci-op-pw3ghqzh-bb5c4-pdhfc-rg/providers/Microsoft.Compute/virtualMachines/ci-op-pw3ghqzh-bb5c4-pdhfc-master-0", + "taints": [ + { + "effect": "NoSchedule", + "key": "node-role.kubernetes.io/master" + }, + { + "effect": "PreferNoSchedule", + "key": "UpdateInProgress" + } + ] + }, + "status": { + "addresses": [ + { + "address": "10.0.0.4", + "type": "InternalIP" + }, + { + "address": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-0", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "7500m", + "ephemeral-storage": "940839717763", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "31710696Ki", + "pods": "250" + }, + "capacity": { + "cpu": "8", + "ephemeral-storage": "1022041516Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "32861672Ki", + "pods": "250" + }, + "conditions": [ + { + "lastHeartbeatTime": "2025-08-19T22:16:02Z", + "lastTransitionTime": "2025-08-19T18:49:58Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:16:02Z", + "lastTransitionTime": "2025-08-19T18:49:58Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:16:02Z", + "lastTransitionTime": "2025-08-19T18:49:58Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:16:02Z", + "lastTransitionTime": "2025-08-19T18:54:03Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "features": { + "supplementalGroupsPolicy": true + }, + "images": [ + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:264b95d9da49e98b0e95d475a54502a5f6a038aed1223785efe4d548c0c627b9" + ], + "sizeBytes": 2986782136 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1578c5f720239b6f363d8dc6145056c0f0ab0b20442f59ac74f3b4b91b8382dd" + ], + "sizeBytes": 2623946604 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f44ccf8dfafac1d76d63f088d5489aa2c80c500090b54d163a1313ea29e59c70" + ], + "sizeBytes": 1709750180 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1d9095f0b1eb1e56bb3c9cdf399f53ebe872f959e31ce5c96d627b20df32a307" + ], + "sizeBytes": 1593262602 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e95cd4fffde9168ea297fe85a6f64eb4999b4ca3bb2b2f87e5b065a63103a2b4" + ], + "sizeBytes": 1593262087 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:73d151dca489c6ef4cbab9bdb959ec79a1988709e3dd84a3f2220a2ecd19860c" + ], + "sizeBytes": 1275183258 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:227fa53363859ae17840a982ef7e437aa69f54e9136a733060a89cb5d8c0bfb9" + ], + "sizeBytes": 1275179169 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0ad67a8fbcddda0d4fef4a88824d1141509f0e0cb7eaa53af1fe90b4d887f35" + ], + "sizeBytes": 1256608647 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84" + ], + "sizeBytes": 1055523933 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb" + ], + "sizeBytes": 1055475298 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c9a16b4080eedd3104bf9629bdc7bd4cf35e09be8d663119fe7ef3ba59305fe" + ], + "sizeBytes": 1005597554 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c4b2c1018d6146ef3f44c71e93d1e2f07a19f66744cad11a61e9db81630eb149" + ], + "sizeBytes": 1005593465 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69923e2fd3aaee33dec35fb7bfc89157d1fac108fcbbe59940b937e00bd96aaa" + ], + "sizeBytes": 994228126 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9b587825d65e11a5c7a569ae88487f008187764cfaf336956b66de66ca0f71e" + ], + "sizeBytes": 994224039 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:54ad5c267492adef5b00abfd2ff7df9a9d7aaf016693ffd146ce5d94e92c321d" + ], + "sizeBytes": 974082778 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:59e9437d40eaa8ccee2e6d551105dcf545bb4607d2bf6387c45aadde82277218" + ], + "sizeBytes": 974078676 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d94306beb1451e42a52df1eaad44328e1d435cd6cf6458e2cb2af36e4beed78" + ], + "sizeBytes": 881490655 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:74f21c787c6e21f6266885e542e4e0a29b5d907918f1f1f87a3370579726c6da" + ], + "sizeBytes": 881464535 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b1deb65135994266b490bddea69f1401910155554b9e5188815f4c46f1aadc85" + ], + "sizeBytes": 879740749 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c58fc56284d30b193ead1b0d675fd6f1028a0b174136c40f25e602c095cb5d5d" + ], + "sizeBytes": 861083916 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f4d7e8675f66e1a1ed9b125c1bc3457db8662895a462747f8e23f1a04978d9c0" + ], + "sizeBytes": 861024000 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:96113b4aaafb3e2099bf3b9f712072234ea67f5c074c0155546fb51a778bf1d0" + ], + "sizeBytes": 726597841 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:586eaf866881bb875a56d415dac76a642ddde27f01a07b9015d393efcaae0991" + ], + "sizeBytes": 726593742 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98d86b726bd27541dda18823237803a63ad1d47efd8146ea1a49e4d8f2b4bc88" + ], + "sizeBytes": 703092589 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8ade543d232b5a017e881ac8405a81c4288033e686caedb1704359968522e44e" + ], + "sizeBytes": 702622782 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0523529ce5209e6ce49bc5af8d7fede115dd9e8bdf5d93d30de92c83172c46a6" + ], + "sizeBytes": 651870673 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5173bfa1042b75eedb315a7d690ecdf769ad52ce5e1e7fac9bbb2a477270bc40" + ], + "sizeBytes": 651867098 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9285872530c1b711e66a60a75626930321f3a3b96c83d565b8d3f66a5235b230" + ], + "sizeBytes": 643877659 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:289a088c56fa1eeb73344c4690bbb4901a1e5a8e2ab3e9874a69359932795bba" + ], + "sizeBytes": 643877135 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a00a11fbbe3f6705ffbd13148a929674f0763e4e6e73fd543887d9147841fe6" + ], + "sizeBytes": 632082319 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:da9bfe286c50dedcd44a9ea34691e216a1265ad84cc4ab7b7b6267a1070309b8" + ], + "sizeBytes": 632078229 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2b2f07545a00fcce5f743d57926fef00202072f7d8363961624f48c0043c2d33" + ], + "sizeBytes": 628976543 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5d712ff60cc9c24b9a3843ede6685928ae854b270278ecb5369dbd4ff07f495" + ], + "sizeBytes": 622559445 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c87ca3968251e1912e493f811b030dfb94864ce5c72185f4442ce5f53638cae" + ], + "sizeBytes": 622447307 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8464d819fc02d1a5ca0069fe368054dd1490e71d4ea909bbbab64853e9f42f61" + ], + "sizeBytes": 606541524 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8bf3a1bab30395d536e1ca717a4f0c21b8fcbbaa1bc738e0d7df09da31c6f904" + ], + "sizeBytes": 595957777 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a2fbe32318847312ef48e9f922bc7c8085e5710c48352e31efac47b5a522780" + ], + "sizeBytes": 589720179 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f9fd6ac23c7b2aa3431c11845d8e838f7f28573d8888acf960916204ff67da6b" + ], + "sizeBytes": 589716606 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3de1c8747aae934b3e965856516dae2d9b67827a7b2b77051e2761d46d6ab72f" + ], + "sizeBytes": 574053299 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a76a972374c89ab1d87e81fd6ab72d672f62d7b7c2f09cdc69a837e97ae43a46" + ], + "sizeBytes": 571490300 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4bb282dad29b1d36fcba1247eeb8460823fccdf2425a61d22b2680e2ee734bee" + ], + "sizeBytes": 571486199 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e667bdb87e7ad857724b8c970a07fb2365cb5138749c85c736dabc055765aab4" + ], + "sizeBytes": 554355698 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:afeff223e2c8673e6b3d7bf215b6f93aecfa296864e4f57ceacdec0953c65440" + ], + "sizeBytes": 554351596 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4bd979c4f44d10d488c5e9c4192ffc216f56c3d2578b629753d1cec293e820ea" + ], + "sizeBytes": 547466620 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:22b6cb6e21de70f6bd7426775f35523fe027ad68e567d38fbd93871fecf681c1" + ], + "sizeBytes": 547464067 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ef1c22cc81f7d9651705bc4d12c2ac9147a874c717f6137e9e6be5f84cdfb4c8" + ], + "sizeBytes": 541973392 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:28b14c96475f455b972efeb5df2519a4c0a502f0a56f92756f4407b08e27d612" + ], + "sizeBytes": 541969301 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:afe2947612e849727161ee4e6e5492fb18cc8e1ccc45a20380e853ea242a1065" + ], + "sizeBytes": 541494090 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:67afe375350a2f7945c0e99aa0de6f4b33396800dd11c4ed98e35cb896d91418" + ], + "sizeBytes": 541489999 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1cce893b180c7da15d39feda52db5fe7e45943bb3507e402b94754f497bd71ad" + ], + "sizeBytes": 536453507 + } + ], + "nodeInfo": { + "architecture": "amd64", + "bootID": "3cdd93a5-5863-4e77-8982-4ca8fe9dbda7", + "containerRuntimeVersion": "cri-o://1.33.3-3.rhaos4.20.git22d4f06.el9", + "kernelVersion": "5.14.0-570.35.1.el9_6.x86_64", + "kubeProxyVersion": "", + "kubeletVersion": "v1.33.3", + "machineID": "6b109a02c0134995a5d5f790f35c0edd", + "operatingSystem": "linux", + "osImage": "Red Hat Enterprise Linux CoreOS 9.6.20250811-0 (Plow)", + "systemUUID": "0ee6a716-70b0-46b7-ad1f-629cf23c160e" + }, + "runtimeHandlers": [ + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "crun" + }, + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "" + }, + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "runc" + } + ] + } + }, + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "cloud.network.openshift.io/egress-ipconfig": "[{\"interface\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-master-1-nic\",\"ifaddr\":{\"ipv4\":\"10.0.0.0/17\"},\"capacity\":{\"ip\":255}}]", + "cluster.x-k8s.io/cluster-name": "ci-op-pw3ghqzh-bb5c4-pdhfc", + "cluster.x-k8s.io/cluster-namespace": "openshift-cluster-api-guests", + "cluster.x-k8s.io/labels-from-machine": "", + "cluster.x-k8s.io/machine": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-1", + "csi.volume.kubernetes.io/nodeid": "{\"disk.csi.azure.com\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-master-1\",\"file.csi.azure.com\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-master-1\"}", + "k8s.ovn.org/host-cidrs": "[\"10.0.0.6/17\"]", + "k8s.ovn.org/l3-gateway-config": "{\"default\":{\"mode\":\"shared\",\"bridge-id\":\"br-ex\",\"interface-id\":\"br-ex_ci-op-pw3ghqzh-bb5c4-pdhfc-master-1\",\"mac-address\":\"7c:1e:52:88:d4:1d\",\"ip-addresses\":[\"10.0.0.6/17\"],\"ip-address\":\"10.0.0.6/17\",\"next-hops\":[\"10.0.0.1\"],\"next-hop\":\"10.0.0.1\",\"node-port-enable\":\"true\",\"vlan-id\":\"0\"}}", + "k8s.ovn.org/node-chassis-id": "4f20ce9a-5ad9-461c-a9ea-fb31c158be39", + "k8s.ovn.org/node-encap-ips": "[\"10.0.0.6\"]", + "k8s.ovn.org/node-gateway-router-lrp-ifaddrs": "{\"default\":{\"ipv4\":\"100.64.0.3/16\"}}", + "k8s.ovn.org/node-id": "3", + "k8s.ovn.org/node-masquerade-subnet": "{\"ipv4\":\"169.254.0.0/17\",\"ipv6\":\"fd69::/112\"}", + "k8s.ovn.org/node-primary-ifaddr": "{\"ipv4\":\"10.0.0.6/17\"}", + "k8s.ovn.org/node-subnets": "{\"default\":[\"10.129.0.0/23\"]}", + "k8s.ovn.org/node-transit-switch-port-ifaddr": "{\"ipv4\":\"100.88.0.3/16\"}", + "k8s.ovn.org/remote-zone-migrated": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-1", + "k8s.ovn.org/zone-name": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-1", + "machine.openshift.io/machine": "openshift-machine-api/ci-op-pw3ghqzh-bb5c4-pdhfc-master-1", + "machineconfiguration.openshift.io/controlPlaneTopology": "HighlyAvailable", + "machineconfiguration.openshift.io/currentConfig": "rendered-master-51b9bd96466b1461e8a9b84416e4405b", + "machineconfiguration.openshift.io/desiredConfig": "rendered-master-505a1e08a37430cbe1ee421928f810ec", + "machineconfiguration.openshift.io/desiredDrain": "drain-rendered-master-505a1e08a37430cbe1ee421928f810ec", + "machineconfiguration.openshift.io/lastAppliedDrain": "drain-rendered-master-505a1e08a37430cbe1ee421928f810ec", + "machineconfiguration.openshift.io/lastObservedServerCAAnnotation": "false", + "machineconfiguration.openshift.io/lastSyncedControllerConfigResourceVersion": "101203", + "machineconfiguration.openshift.io/reason": "failed to run command nice (6 tries): [timed out waiting for the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 failed: time=\"2025-08-19T22:13:54Z\" level=warning msg=\"Failed, retrying in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:40349-\u003e168.63.129.16:53: i/o timeout\"\ntime=\"2025-08-19T22:14:05Z\" level=warning msg=\"Failed, retrying in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:36467-\u003e168.63.129.16:53: i/o timeout\"\ntime=\"2025-08-19T22:14:16Z\" level=warning msg=\"Failed, retrying in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:44244-\u003e168.63.129.16:53: i/o timeout\"\nError: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:48400-\u003e168.63.129.16:53: i/o timeout\n: exit status 125]", + "machineconfiguration.openshift.io/state": "Degraded", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2025-08-19T18:50:22Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/instance-type": "Standard_D8s_v3", + "beta.kubernetes.io/os": "linux", + "failure-domain.beta.kubernetes.io/region": "centralus", + "failure-domain.beta.kubernetes.io/zone": "centralus-3", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-1", + "kubernetes.io/os": "linux", + "node-role.kubernetes.io/control-plane": "", + "node-role.kubernetes.io/master": "", + "node.kubernetes.io/instance-type": "Standard_D8s_v3", + "node.openshift.io/os_id": "rhel", + "topology.disk.csi.azure.com/zone": "centralus-3", + "topology.kubernetes.io/region": "centralus", + "topology.kubernetes.io/zone": "centralus-3" + }, + "name": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-1", + "resourceVersion": "109024", + "uid": "aa1bdaed-ab6f-4982-b56e-a64861224723" + }, + "spec": { + "providerID": "azure:///subscriptions/72e3a972-58b0-4afc-bd4f-da89b39ccebd/resourceGroups/ci-op-pw3ghqzh-bb5c4-pdhfc-rg/providers/Microsoft.Compute/virtualMachines/ci-op-pw3ghqzh-bb5c4-pdhfc-master-1", + "taints": [ + { + "effect": "NoSchedule", + "key": "node-role.kubernetes.io/master" + }, + { + "effect": "NoSchedule", + "key": "node.kubernetes.io/unschedulable", + "timeAdded": "2025-08-19T21:06:02Z" + } + ], + "unschedulable": true + }, + "status": { + "addresses": [ + { + "address": "10.0.0.6", + "type": "InternalIP" + }, + { + "address": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-1", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "7500m", + "ephemeral-storage": "940839717763", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "31710740Ki", + "pods": "250" + }, + "capacity": { + "cpu": "8", + "ephemeral-storage": "1022041516Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "32861716Ki", + "pods": "250" + }, + "conditions": [ + { + "lastHeartbeatTime": "2025-08-19T22:16:39Z", + "lastTransitionTime": "2025-08-19T19:05:12Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:16:39Z", + "lastTransitionTime": "2025-08-19T19:05:12Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:16:39Z", + "lastTransitionTime": "2025-08-19T19:05:12Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:16:39Z", + "lastTransitionTime": "2025-08-19T19:05:12Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "features": { + "supplementalGroupsPolicy": true + }, + "images": [ + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:264b95d9da49e98b0e95d475a54502a5f6a038aed1223785efe4d548c0c627b9" + ], + "sizeBytes": 2986782136 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:272861fb416f41c0fb4113b61a3f82d6585563e60bba992a818681dbc25336bf" + ], + "sizeBytes": 2986615212 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f44ccf8dfafac1d76d63f088d5489aa2c80c500090b54d163a1313ea29e59c70" + ], + "sizeBytes": 1709750180 + }, + { + "names": [ + "registry.redhat.io/redhat/redhat-operator-index@sha256:4fe73e654adf2754ab50aa04018350c5b58702234d9a57e54e81a8534e801743", + "registry.redhat.io/redhat/redhat-operator-index@sha256:e71eb3e1c4204bfe30b96c35209ac1951e16340c72d240d162ddc4ed696a75ac", + "registry.redhat.io/redhat/redhat-operator-index:v4.20" + ], + "sizeBytes": 1693120212 + }, + { + "names": [ + "registry.redhat.io/redhat/redhat-operator-index@sha256:eb6d82457662695ddc16dac1b44a67d49f83c26200f92cdbe888c48bc63e8d33" + ], + "sizeBytes": 1691666624 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1d9095f0b1eb1e56bb3c9cdf399f53ebe872f959e31ce5c96d627b20df32a307" + ], + "sizeBytes": 1593262602 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e95cd4fffde9168ea297fe85a6f64eb4999b4ca3bb2b2f87e5b065a63103a2b4" + ], + "sizeBytes": 1593262087 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:73d151dca489c6ef4cbab9bdb959ec79a1988709e3dd84a3f2220a2ecd19860c" + ], + "sizeBytes": 1275183258 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:227fa53363859ae17840a982ef7e437aa69f54e9136a733060a89cb5d8c0bfb9" + ], + "sizeBytes": 1275179169 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0ad67a8fbcddda0d4fef4a88824d1141509f0e0cb7eaa53af1fe90b4d887f35" + ], + "sizeBytes": 1256608647 + }, + { + "names": [ + "registry.redhat.io/redhat/community-operator-index@sha256:44dd276765cbac4b9d437d57621b9fa908804afe4c09a1f67b3ad6eb735638f9", + "registry.redhat.io/redhat/community-operator-index@sha256:5753fbfe051f907bb9f4762a87fdb4f5b9b76f349fda44e923d1b36bd8602501", + "registry.redhat.io/redhat/community-operator-index:v4.20" + ], + "sizeBytes": 1207034567 + }, + { + "names": [ + "registry.redhat.io/redhat/redhat-marketplace-index@sha256:0fb6e453e1514188640de2ba3ba7a81234b55ca31c3b9e27005950453951e220", + "registry.redhat.io/redhat/redhat-marketplace-index@sha256:3b21b0bae2dcd659840e001171ccf79518662d5851e8ee38cd9926d699b1c2a9", + "registry.redhat.io/redhat/redhat-marketplace-index:v4.20" + ], + "sizeBytes": 1086465212 + }, + { + "names": [ + "registry.redhat.io/redhat/redhat-marketplace-index@sha256:aece9d7e2e5fe607900a02af44c2547ba3a819588e0caf7f6c923a039d717cdd" + ], + "sizeBytes": 1086222517 + }, + { + "names": [ + "registry.redhat.io/redhat/certified-operator-index@sha256:0ce7f29a7522a0ec9c0c5d1d823afcee4e94b2f7c6887b3524ab7a144fa97641", + "registry.redhat.io/redhat/certified-operator-index@sha256:97b02c5a4c0d0e910eaf0f135be483df3fb6d4958f02061f2b4780667af3b95f", + "registry.redhat.io/redhat/certified-operator-index:v4.20" + ], + "sizeBytes": 1057304769 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84" + ], + "sizeBytes": 1055523933 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb" + ], + "sizeBytes": 1055475298 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d18da9bad39752c9663c80098f2e8ee258cf434ab83e0da96132597867ca9a50" + ], + "sizeBytes": 985190381 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:37f60cf434e82f101eb45c0d28c61b92444e830103113645726ebe907c193e4a" + ], + "sizeBytes": 976668147 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:54ad5c267492adef5b00abfd2ff7df9a9d7aaf016693ffd146ce5d94e92c321d" + ], + "sizeBytes": 974082778 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:59e9437d40eaa8ccee2e6d551105dcf545bb4607d2bf6387c45aadde82277218" + ], + "sizeBytes": 974078676 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d94306beb1451e42a52df1eaad44328e1d435cd6cf6458e2cb2af36e4beed78" + ], + "sizeBytes": 881490655 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:74f21c787c6e21f6266885e542e4e0a29b5d907918f1f1f87a3370579726c6da" + ], + "sizeBytes": 881464535 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c58fc56284d30b193ead1b0d675fd6f1028a0b174136c40f25e602c095cb5d5d" + ], + "sizeBytes": 861083916 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f4d7e8675f66e1a1ed9b125c1bc3457db8662895a462747f8e23f1a04978d9c0" + ], + "sizeBytes": 861024000 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:96113b4aaafb3e2099bf3b9f712072234ea67f5c074c0155546fb51a778bf1d0" + ], + "sizeBytes": 726597841 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:586eaf866881bb875a56d415dac76a642ddde27f01a07b9015d393efcaae0991" + ], + "sizeBytes": 726593742 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98d86b726bd27541dda18823237803a63ad1d47efd8146ea1a49e4d8f2b4bc88" + ], + "sizeBytes": 703092589 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8ade543d232b5a017e881ac8405a81c4288033e686caedb1704359968522e44e" + ], + "sizeBytes": 702622782 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0523529ce5209e6ce49bc5af8d7fede115dd9e8bdf5d93d30de92c83172c46a6" + ], + "sizeBytes": 651870673 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5173bfa1042b75eedb315a7d690ecdf769ad52ce5e1e7fac9bbb2a477270bc40" + ], + "sizeBytes": 651867098 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9285872530c1b711e66a60a75626930321f3a3b96c83d565b8d3f66a5235b230" + ], + "sizeBytes": 643877659 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:289a088c56fa1eeb73344c4690bbb4901a1e5a8e2ab3e9874a69359932795bba" + ], + "sizeBytes": 643877135 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a00a11fbbe3f6705ffbd13148a929674f0763e4e6e73fd543887d9147841fe6" + ], + "sizeBytes": 632082319 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:da9bfe286c50dedcd44a9ea34691e216a1265ad84cc4ab7b7b6267a1070309b8" + ], + "sizeBytes": 632078229 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0" + ], + "sizeBytes": 628981149 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2b2f07545a00fcce5f743d57926fef00202072f7d8363961624f48c0043c2d33" + ], + "sizeBytes": 628976543 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5d712ff60cc9c24b9a3843ede6685928ae854b270278ecb5369dbd4ff07f495" + ], + "sizeBytes": 622559445 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c87ca3968251e1912e493f811b030dfb94864ce5c72185f4442ce5f53638cae" + ], + "sizeBytes": 622447307 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8464d819fc02d1a5ca0069fe368054dd1490e71d4ea909bbbab64853e9f42f61" + ], + "sizeBytes": 606541524 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8bf3a1bab30395d536e1ca717a4f0c21b8fcbbaa1bc738e0d7df09da31c6f904" + ], + "sizeBytes": 595957777 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a2fbe32318847312ef48e9f922bc7c8085e5710c48352e31efac47b5a522780" + ], + "sizeBytes": 589720179 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f9fd6ac23c7b2aa3431c11845d8e838f7f28573d8888acf960916204ff67da6b" + ], + "sizeBytes": 589716606 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a76a972374c89ab1d87e81fd6ab72d672f62d7b7c2f09cdc69a837e97ae43a46" + ], + "sizeBytes": 571490300 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4bb282dad29b1d36fcba1247eeb8460823fccdf2425a61d22b2680e2ee734bee" + ], + "sizeBytes": 571486199 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ef1c22cc81f7d9651705bc4d12c2ac9147a874c717f6137e9e6be5f84cdfb4c8" + ], + "sizeBytes": 541973392 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:28b14c96475f455b972efeb5df2519a4c0a502f0a56f92756f4407b08e27d612" + ], + "sizeBytes": 541969301 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:afe2947612e849727161ee4e6e5492fb18cc8e1ccc45a20380e853ea242a1065" + ], + "sizeBytes": 541494090 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:67afe375350a2f7945c0e99aa0de6f4b33396800dd11c4ed98e35cb896d91418" + ], + "sizeBytes": 541489999 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1cce893b180c7da15d39feda52db5fe7e45943bb3507e402b94754f497bd71ad" + ], + "sizeBytes": 536453507 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c2bdcdc249e541e4053958afd72f3aa08dd85e7b8a20a34dc262ce524ad24418" + ], + "sizeBytes": 536449421 + } + ], + "nodeInfo": { + "architecture": "amd64", + "bootID": "54fa190a-2d1d-40ae-b004-a63e8a4490d7", + "containerRuntimeVersion": "cri-o://1.33.3-3.rhaos4.20.git22d4f06.el9", + "kernelVersion": "5.14.0-570.35.1.el9_6.x86_64", + "kubeProxyVersion": "", + "kubeletVersion": "v1.33.3", + "machineID": "f99664007ef64e1b8dc3b33ca2c984e2", + "operatingSystem": "linux", + "osImage": "Red Hat Enterprise Linux CoreOS 9.6.20250811-0 (Plow)", + "systemUUID": "9c9a51b4-6db7-4d61-81db-399aae1c70a0" + }, + "runtimeHandlers": [ + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "crun" + }, + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "" + }, + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "runc" + } + ] + } + }, + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "cloud.network.openshift.io/egress-ipconfig": "[{\"interface\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-master-2-nic\",\"ifaddr\":{\"ipv4\":\"10.0.0.0/17\"},\"capacity\":{\"ip\":255}}]", + "cluster.x-k8s.io/cluster-name": "ci-op-pw3ghqzh-bb5c4-pdhfc", + "cluster.x-k8s.io/cluster-namespace": "openshift-cluster-api-guests", + "cluster.x-k8s.io/labels-from-machine": "", + "cluster.x-k8s.io/machine": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-2", + "csi.volume.kubernetes.io/nodeid": "{\"disk.csi.azure.com\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-master-2\",\"file.csi.azure.com\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-master-2\"}", + "k8s.ovn.org/host-cidrs": "[\"10.0.0.5/17\"]", + "k8s.ovn.org/l3-gateway-config": "{\"default\":{\"mode\":\"shared\",\"bridge-id\":\"br-ex\",\"interface-id\":\"br-ex_ci-op-pw3ghqzh-bb5c4-pdhfc-master-2\",\"mac-address\":\"60:45:bd:2e:65:5a\",\"ip-addresses\":[\"10.0.0.5/17\"],\"ip-address\":\"10.0.0.5/17\",\"next-hops\":[\"10.0.0.1\"],\"next-hop\":\"10.0.0.1\",\"node-port-enable\":\"true\",\"vlan-id\":\"0\"}}", + "k8s.ovn.org/node-chassis-id": "f7295ac4-9211-4fca-b102-f55c939d8fa9", + "k8s.ovn.org/node-encap-ips": "[\"10.0.0.5\"]", + "k8s.ovn.org/node-gateway-router-lrp-ifaddrs": "{\"default\":{\"ipv4\":\"100.64.0.4/16\"}}", + "k8s.ovn.org/node-id": "4", + "k8s.ovn.org/node-masquerade-subnet": "{\"ipv4\":\"169.254.0.0/17\",\"ipv6\":\"fd69::/112\"}", + "k8s.ovn.org/node-primary-ifaddr": "{\"ipv4\":\"10.0.0.5/17\"}", + "k8s.ovn.org/node-subnets": "{\"default\":[\"10.128.0.0/23\"]}", + "k8s.ovn.org/node-transit-switch-port-ifaddr": "{\"ipv4\":\"100.88.0.4/16\"}", + "k8s.ovn.org/remote-zone-migrated": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-2", + "k8s.ovn.org/zone-name": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-2", + "machine.openshift.io/machine": "openshift-machine-api/ci-op-pw3ghqzh-bb5c4-pdhfc-master-2", + "machineconfiguration.openshift.io/controlPlaneTopology": "HighlyAvailable", + "machineconfiguration.openshift.io/currentConfig": "rendered-master-505a1e08a37430cbe1ee421928f810ec", + "machineconfiguration.openshift.io/desiredConfig": "rendered-master-505a1e08a37430cbe1ee421928f810ec", + "machineconfiguration.openshift.io/desiredDrain": "uncordon-rendered-master-505a1e08a37430cbe1ee421928f810ec", + "machineconfiguration.openshift.io/lastAppliedDrain": "uncordon-rendered-master-505a1e08a37430cbe1ee421928f810ec", + "machineconfiguration.openshift.io/lastObservedServerCAAnnotation": "false", + "machineconfiguration.openshift.io/lastSyncedControllerConfigResourceVersion": "101203", + "machineconfiguration.openshift.io/post-config-action": "", + "machineconfiguration.openshift.io/reason": "", + "machineconfiguration.openshift.io/state": "Done", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2025-08-19T18:50:20Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/instance-type": "Standard_D8s_v3", + "beta.kubernetes.io/os": "linux", + "failure-domain.beta.kubernetes.io/region": "centralus", + "failure-domain.beta.kubernetes.io/zone": "centralus-1", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-2", + "kubernetes.io/os": "linux", + "node-role.kubernetes.io/control-plane": "", + "node-role.kubernetes.io/master": "", + "node.kubernetes.io/instance-type": "Standard_D8s_v3", + "node.openshift.io/os_id": "rhel", + "topology.disk.csi.azure.com/zone": "centralus-1", + "topology.kubernetes.io/region": "centralus", + "topology.kubernetes.io/zone": "centralus-1" + }, + "name": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-2", + "resourceVersion": "109098", + "uid": "f5192552-1d63-4b20-861e-9d68bf2387ad" + }, + "spec": { + "providerID": "azure:///subscriptions/72e3a972-58b0-4afc-bd4f-da89b39ccebd/resourceGroups/ci-op-pw3ghqzh-bb5c4-pdhfc-rg/providers/Microsoft.Compute/virtualMachines/ci-op-pw3ghqzh-bb5c4-pdhfc-master-2", + "taints": [ + { + "effect": "NoSchedule", + "key": "node-role.kubernetes.io/master" + } + ] + }, + "status": { + "addresses": [ + { + "address": "10.0.0.5", + "type": "InternalIP" + }, + { + "address": "ci-op-pw3ghqzh-bb5c4-pdhfc-master-2", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "7500m", + "ephemeral-storage": "940839717763", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "31710692Ki", + "pods": "250" + }, + "capacity": { + "cpu": "8", + "ephemeral-storage": "1022041516Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "32861668Ki", + "pods": "250" + }, + "conditions": [ + { + "lastHeartbeatTime": "2025-08-19T22:16:54Z", + "lastTransitionTime": "2025-08-19T21:05:28Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:16:54Z", + "lastTransitionTime": "2025-08-19T21:05:28Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:16:54Z", + "lastTransitionTime": "2025-08-19T21:05:28Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:16:54Z", + "lastTransitionTime": "2025-08-19T21:05:42Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "features": { + "supplementalGroupsPolicy": true + }, + "images": [ + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:264b95d9da49e98b0e95d475a54502a5f6a038aed1223785efe4d548c0c627b9" + ], + "sizeBytes": 2986782136 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:272861fb416f41c0fb4113b61a3f82d6585563e60bba992a818681dbc25336bf" + ], + "sizeBytes": 2986615212 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f44ccf8dfafac1d76d63f088d5489aa2c80c500090b54d163a1313ea29e59c70" + ], + "sizeBytes": 1709750180 + }, + { + "names": [ + "registry.redhat.io/redhat/redhat-operator-index@sha256:4fe73e654adf2754ab50aa04018350c5b58702234d9a57e54e81a8534e801743", + "registry.redhat.io/redhat/redhat-operator-index@sha256:e71eb3e1c4204bfe30b96c35209ac1951e16340c72d240d162ddc4ed696a75ac", + "registry.redhat.io/redhat/redhat-operator-index:v4.20" + ], + "sizeBytes": 1693120212 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1d9095f0b1eb1e56bb3c9cdf399f53ebe872f959e31ce5c96d627b20df32a307" + ], + "sizeBytes": 1593262602 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e95cd4fffde9168ea297fe85a6f64eb4999b4ca3bb2b2f87e5b065a63103a2b4" + ], + "sizeBytes": 1593262087 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:73d151dca489c6ef4cbab9bdb959ec79a1988709e3dd84a3f2220a2ecd19860c" + ], + "sizeBytes": 1275183258 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:227fa53363859ae17840a982ef7e437aa69f54e9136a733060a89cb5d8c0bfb9" + ], + "sizeBytes": 1275179169 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0ad67a8fbcddda0d4fef4a88824d1141509f0e0cb7eaa53af1fe90b4d887f35" + ], + "sizeBytes": 1256608647 + }, + { + "names": [ + "registry.redhat.io/redhat/community-operator-index@sha256:44dd276765cbac4b9d437d57621b9fa908804afe4c09a1f67b3ad6eb735638f9", + "registry.redhat.io/redhat/community-operator-index@sha256:5753fbfe051f907bb9f4762a87fdb4f5b9b76f349fda44e923d1b36bd8602501", + "registry.redhat.io/redhat/community-operator-index:v4.20" + ], + "sizeBytes": 1207034567 + }, + { + "names": [ + "registry.redhat.io/redhat/redhat-marketplace-index@sha256:0fb6e453e1514188640de2ba3ba7a81234b55ca31c3b9e27005950453951e220", + "registry.redhat.io/redhat/redhat-marketplace-index@sha256:3b21b0bae2dcd659840e001171ccf79518662d5851e8ee38cd9926d699b1c2a9", + "registry.redhat.io/redhat/redhat-marketplace-index:v4.20" + ], + "sizeBytes": 1086465212 + }, + { + "names": [ + "registry.redhat.io/redhat/certified-operator-index@sha256:0ce7f29a7522a0ec9c0c5d1d823afcee4e94b2f7c6887b3524ab7a144fa97641", + "registry.redhat.io/redhat/certified-operator-index@sha256:97b02c5a4c0d0e910eaf0f135be483df3fb6d4958f02061f2b4780667af3b95f", + "registry.redhat.io/redhat/certified-operator-index:v4.20" + ], + "sizeBytes": 1057304769 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84" + ], + "sizeBytes": 1055523933 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb" + ], + "sizeBytes": 1055475298 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69923e2fd3aaee33dec35fb7bfc89157d1fac108fcbbe59940b937e00bd96aaa" + ], + "sizeBytes": 994228126 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d18da9bad39752c9663c80098f2e8ee258cf434ab83e0da96132597867ca9a50" + ], + "sizeBytes": 985190381 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:37f60cf434e82f101eb45c0d28c61b92444e830103113645726ebe907c193e4a" + ], + "sizeBytes": 976668147 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:54ad5c267492adef5b00abfd2ff7df9a9d7aaf016693ffd146ce5d94e92c321d" + ], + "sizeBytes": 974082778 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:59e9437d40eaa8ccee2e6d551105dcf545bb4607d2bf6387c45aadde82277218" + ], + "sizeBytes": 974078676 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d94306beb1451e42a52df1eaad44328e1d435cd6cf6458e2cb2af36e4beed78" + ], + "sizeBytes": 881490655 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:74f21c787c6e21f6266885e542e4e0a29b5d907918f1f1f87a3370579726c6da" + ], + "sizeBytes": 881464535 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b1deb65135994266b490bddea69f1401910155554b9e5188815f4c46f1aadc85" + ], + "sizeBytes": 879740749 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c58fc56284d30b193ead1b0d675fd6f1028a0b174136c40f25e602c095cb5d5d" + ], + "sizeBytes": 861083916 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f4d7e8675f66e1a1ed9b125c1bc3457db8662895a462747f8e23f1a04978d9c0" + ], + "sizeBytes": 861024000 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:96113b4aaafb3e2099bf3b9f712072234ea67f5c074c0155546fb51a778bf1d0" + ], + "sizeBytes": 726597841 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:586eaf866881bb875a56d415dac76a642ddde27f01a07b9015d393efcaae0991" + ], + "sizeBytes": 726593742 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98d86b726bd27541dda18823237803a63ad1d47efd8146ea1a49e4d8f2b4bc88" + ], + "sizeBytes": 703092589 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8ade543d232b5a017e881ac8405a81c4288033e686caedb1704359968522e44e" + ], + "sizeBytes": 702622782 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0523529ce5209e6ce49bc5af8d7fede115dd9e8bdf5d93d30de92c83172c46a6" + ], + "sizeBytes": 651870673 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5173bfa1042b75eedb315a7d690ecdf769ad52ce5e1e7fac9bbb2a477270bc40" + ], + "sizeBytes": 651867098 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9285872530c1b711e66a60a75626930321f3a3b96c83d565b8d3f66a5235b230" + ], + "sizeBytes": 643877659 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:289a088c56fa1eeb73344c4690bbb4901a1e5a8e2ab3e9874a69359932795bba" + ], + "sizeBytes": 643877135 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a00a11fbbe3f6705ffbd13148a929674f0763e4e6e73fd543887d9147841fe6" + ], + "sizeBytes": 632082319 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:da9bfe286c50dedcd44a9ea34691e216a1265ad84cc4ab7b7b6267a1070309b8" + ], + "sizeBytes": 632078229 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0" + ], + "sizeBytes": 628981149 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2b2f07545a00fcce5f743d57926fef00202072f7d8363961624f48c0043c2d33" + ], + "sizeBytes": 628976543 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85e2cde9ed16506e5d4d9d8de7a5151d54a3af9dfbcf7df1d67e2d08d1fad3a8" + ], + "sizeBytes": 624130975 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5d712ff60cc9c24b9a3843ede6685928ae854b270278ecb5369dbd4ff07f495" + ], + "sizeBytes": 622559445 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c87ca3968251e1912e493f811b030dfb94864ce5c72185f4442ce5f53638cae" + ], + "sizeBytes": 622447307 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8464d819fc02d1a5ca0069fe368054dd1490e71d4ea909bbbab64853e9f42f61" + ], + "sizeBytes": 606541524 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8bf3a1bab30395d536e1ca717a4f0c21b8fcbbaa1bc738e0d7df09da31c6f904" + ], + "sizeBytes": 595957777 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a2fbe32318847312ef48e9f922bc7c8085e5710c48352e31efac47b5a522780" + ], + "sizeBytes": 589720179 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f9fd6ac23c7b2aa3431c11845d8e838f7f28573d8888acf960916204ff67da6b" + ], + "sizeBytes": 589716606 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d0ee1ace4f3575674782918fc8f7be381fe0c9db6ef8566d4add09efd1be13d2" + ], + "sizeBytes": 574057402 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a76a972374c89ab1d87e81fd6ab72d672f62d7b7c2f09cdc69a837e97ae43a46" + ], + "sizeBytes": 571490300 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4bb282dad29b1d36fcba1247eeb8460823fccdf2425a61d22b2680e2ee734bee" + ], + "sizeBytes": 571486199 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:afeff223e2c8673e6b3d7bf215b6f93aecfa296864e4f57ceacdec0953c65440" + ], + "sizeBytes": 554351596 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ef1c22cc81f7d9651705bc4d12c2ac9147a874c717f6137e9e6be5f84cdfb4c8" + ], + "sizeBytes": 541973392 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:28b14c96475f455b972efeb5df2519a4c0a502f0a56f92756f4407b08e27d612" + ], + "sizeBytes": 541969301 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:afe2947612e849727161ee4e6e5492fb18cc8e1ccc45a20380e853ea242a1065" + ], + "sizeBytes": 541494090 + } + ], + "nodeInfo": { + "architecture": "amd64", + "bootID": "4a9ad8d5-45b4-4d19-aadd-3f85acbcc532", + "containerRuntimeVersion": "cri-o://1.33.3-5.rhaos4.20.giteaa7ec8.el9", + "kernelVersion": "5.14.0-570.35.1.el9_6.x86_64", + "kubeProxyVersion": "", + "kubeletVersion": "v1.33.3", + "machineID": "5a426222a37443f982cf7a07d821be09", + "operatingSystem": "linux", + "osImage": "Red Hat Enterprise Linux CoreOS 9.6.20250815-0 (Plow)", + "systemUUID": "265cec44-fc03-46fc-b628-b255008df58d" + }, + "runtimeHandlers": [ + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "crun" + }, + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "" + }, + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "runc" + } + ] + } + }, + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "cloud.network.openshift.io/egress-ipconfig": "[{\"interface\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus1-dlxfb-nic\",\"ifaddr\":{\"ipv4\":\"10.0.128.0/17\"},\"capacity\":{\"ip\":255}}]", + "csi.volume.kubernetes.io/nodeid": "{\"disk.csi.azure.com\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus1-dlxfb\",\"file.csi.azure.com\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus1-dlxfb\"}", + "k8s.ovn.org/host-cidrs": "[\"10.0.128.5/17\"]", + "k8s.ovn.org/l3-gateway-config": "{\"default\":{\"mode\":\"shared\",\"bridge-id\":\"br-ex\",\"interface-id\":\"br-ex_ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus1-dlxfb\",\"mac-address\":\"00:22:48:48:ff:43\",\"ip-addresses\":[\"10.0.128.5/17\"],\"ip-address\":\"10.0.128.5/17\",\"next-hops\":[\"10.0.128.1\"],\"next-hop\":\"10.0.128.1\",\"node-port-enable\":\"true\",\"vlan-id\":\"0\"}}", + "k8s.ovn.org/node-chassis-id": "23e0424b-91c1-45b3-82cf-bc40c9f6203d", + "k8s.ovn.org/node-encap-ips": "[\"10.0.128.5\"]", + "k8s.ovn.org/node-gateway-router-lrp-ifaddrs": "{\"default\":{\"ipv4\":\"100.64.0.5/16\"}}", + "k8s.ovn.org/node-id": "5", + "k8s.ovn.org/node-masquerade-subnet": "{\"ipv4\":\"169.254.0.0/17\",\"ipv6\":\"fd69::/112\"}", + "k8s.ovn.org/node-primary-ifaddr": "{\"ipv4\":\"10.0.128.5/17\"}", + "k8s.ovn.org/node-subnets": "{\"default\":[\"10.131.0.0/23\"]}", + "k8s.ovn.org/node-transit-switch-port-ifaddr": "{\"ipv4\":\"100.88.0.5/16\"}", + "k8s.ovn.org/remote-zone-migrated": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus1-dlxfb", + "k8s.ovn.org/zone-name": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus1-dlxfb", + "machine.openshift.io/machine": "openshift-machine-api/ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus1-dlxfb", + "machineconfiguration.openshift.io/controlPlaneTopology": "HighlyAvailable", + "machineconfiguration.openshift.io/currentConfig": "rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "machineconfiguration.openshift.io/desiredConfig": "rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "machineconfiguration.openshift.io/desiredDrain": "uncordon-rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "machineconfiguration.openshift.io/lastAppliedDrain": "uncordon-rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "machineconfiguration.openshift.io/lastObservedServerCAAnnotation": "false", + "machineconfiguration.openshift.io/lastSyncedControllerConfigResourceVersion": "101203", + "machineconfiguration.openshift.io/post-config-action": "", + "machineconfiguration.openshift.io/reason": "", + "machineconfiguration.openshift.io/state": "Done", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2025-08-19T19:05:11Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/instance-type": "Standard_D4s_v3", + "beta.kubernetes.io/os": "linux", + "failure-domain.beta.kubernetes.io/region": "centralus", + "failure-domain.beta.kubernetes.io/zone": "centralus-1", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus1-dlxfb", + "kubernetes.io/os": "linux", + "node-role.kubernetes.io/worker": "", + "node.kubernetes.io/instance-type": "Standard_D4s_v3", + "node.openshift.io/os_id": "rhel", + "topology.disk.csi.azure.com/zone": "centralus-1", + "topology.kubernetes.io/region": "centralus", + "topology.kubernetes.io/zone": "centralus-1" + }, + "name": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus1-dlxfb", + "resourceVersion": "108674", + "uid": "2edc82f2-0dbd-432b-81a1-f65fe3eac362" + }, + "spec": { + "providerID": "azure:///subscriptions/72e3a972-58b0-4afc-bd4f-da89b39ccebd/resourceGroups/ci-op-pw3ghqzh-bb5c4-pdhfc-rg/providers/Microsoft.Compute/virtualMachines/ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus1-dlxfb" + }, + "status": { + "addresses": [ + { + "address": "10.0.128.5", + "type": "InternalIP" + }, + { + "address": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus1-dlxfb", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "3500m", + "ephemeral-storage": "122076772149", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "15221260Ki", + "pods": "250" + }, + "capacity": { + "cpu": "4", + "ephemeral-storage": "133626860Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "16372236Ki", + "pods": "250" + }, + "conditions": [ + { + "lastHeartbeatTime": "2025-08-19T22:15:12Z", + "lastTransitionTime": "2025-08-19T21:03:48Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:15:12Z", + "lastTransitionTime": "2025-08-19T21:03:48Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:15:12Z", + "lastTransitionTime": "2025-08-19T21:03:48Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:15:12Z", + "lastTransitionTime": "2025-08-19T21:04:03Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "features": { + "supplementalGroupsPolicy": true + }, + "images": [ + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1d9095f0b1eb1e56bb3c9cdf399f53ebe872f959e31ce5c96d627b20df32a307" + ], + "sizeBytes": 1593262602 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e95cd4fffde9168ea297fe85a6f64eb4999b4ca3bb2b2f87e5b065a63103a2b4" + ], + "sizeBytes": 1593262087 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:73d151dca489c6ef4cbab9bdb959ec79a1988709e3dd84a3f2220a2ecd19860c" + ], + "sizeBytes": 1275183258 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:227fa53363859ae17840a982ef7e437aa69f54e9136a733060a89cb5d8c0bfb9" + ], + "sizeBytes": 1275179169 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2739ce537eb29d18f87ff657a3c6468918177c8ea4fc7167338756fdebf731b7" + ], + "sizeBytes": 1273083298 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fd2b2d012de7acc128df3c74ab1f242f5f352fd991c83a664fb90bfbbdc1b93" + ], + "sizeBytes": 1273043885 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0ad67a8fbcddda0d4fef4a88824d1141509f0e0cb7eaa53af1fe90b4d887f35" + ], + "sizeBytes": 1256608647 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84" + ], + "sizeBytes": 1055523933 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb" + ], + "sizeBytes": 1055475298 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:54ad5c267492adef5b00abfd2ff7df9a9d7aaf016693ffd146ce5d94e92c321d" + ], + "sizeBytes": 974082778 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:59e9437d40eaa8ccee2e6d551105dcf545bb4607d2bf6387c45aadde82277218" + ], + "sizeBytes": 974078676 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d94306beb1451e42a52df1eaad44328e1d435cd6cf6458e2cb2af36e4beed78" + ], + "sizeBytes": 881490655 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b1deb65135994266b490bddea69f1401910155554b9e5188815f4c46f1aadc85" + ], + "sizeBytes": 879740749 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:96113b4aaafb3e2099bf3b9f712072234ea67f5c074c0155546fb51a778bf1d0" + ], + "sizeBytes": 726597841 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:586eaf866881bb875a56d415dac76a642ddde27f01a07b9015d393efcaae0991" + ], + "sizeBytes": 726593742 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98d86b726bd27541dda18823237803a63ad1d47efd8146ea1a49e4d8f2b4bc88" + ], + "sizeBytes": 703092589 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8ade543d232b5a017e881ac8405a81c4288033e686caedb1704359968522e44e" + ], + "sizeBytes": 702622782 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9285872530c1b711e66a60a75626930321f3a3b96c83d565b8d3f66a5235b230" + ], + "sizeBytes": 643877659 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:289a088c56fa1eeb73344c4690bbb4901a1e5a8e2ab3e9874a69359932795bba" + ], + "sizeBytes": 643877135 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a00a11fbbe3f6705ffbd13148a929674f0763e4e6e73fd543887d9147841fe6" + ], + "sizeBytes": 632082319 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:da9bfe286c50dedcd44a9ea34691e216a1265ad84cc4ab7b7b6267a1070309b8" + ], + "sizeBytes": 632078229 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0" + ], + "sizeBytes": 628981149 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2b2f07545a00fcce5f743d57926fef00202072f7d8363961624f48c0043c2d33" + ], + "sizeBytes": 628976543 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:99def7dd7ccfae606d06c73db1d693d9444752e0b8440ac962971e90b4fc20f1" + ], + "sizeBytes": 616644251 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5cfc255b4820593e5523da9b32398dc9fc1e960f8b013b9c4c362a8362132204" + ], + "sizeBytes": 616640164 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a2fbe32318847312ef48e9f922bc7c8085e5710c48352e31efac47b5a522780" + ], + "sizeBytes": 589720179 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f9fd6ac23c7b2aa3431c11845d8e838f7f28573d8888acf960916204ff67da6b" + ], + "sizeBytes": 589716606 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a76a972374c89ab1d87e81fd6ab72d672f62d7b7c2f09cdc69a837e97ae43a46" + ], + "sizeBytes": 571490300 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4bb282dad29b1d36fcba1247eeb8460823fccdf2425a61d22b2680e2ee734bee" + ], + "sizeBytes": 571486199 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:afe2947612e849727161ee4e6e5492fb18cc8e1ccc45a20380e853ea242a1065" + ], + "sizeBytes": 541494090 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:67afe375350a2f7945c0e99aa0de6f4b33396800dd11c4ed98e35cb896d91418" + ], + "sizeBytes": 541489999 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1cce893b180c7da15d39feda52db5fe7e45943bb3507e402b94754f497bd71ad" + ], + "sizeBytes": 536453507 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c2bdcdc249e541e4053958afd72f3aa08dd85e7b8a20a34dc262ce524ad24418" + ], + "sizeBytes": 536449421 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0cacb79e191c31ce99dc1d29b7fd27b0bfdf346f58ead65b65f042e9ff34b51" + ], + "sizeBytes": 514655222 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bfb44839f085256f11eec65b1805579a08afafa7af76ef1df9e79d06fd4825df" + ], + "sizeBytes": 514651119 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7848070645f103e7b291ae589d0d4bc9f44644f9fafb636ce6bd49113df595cf" + ], + "sizeBytes": 513923987 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:728a324a24d27e68e511b6ff47f454a48479fd425d0e981f475fb788ea1e62c6" + ], + "sizeBytes": 513919899 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5620a464a2e36078b86cf18591138c70da234ff10fb20fe35cee0b8df122dee2" + ], + "sizeBytes": 510130372 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:20da344fe6302981459c8b0368439215a552e82b77793bdc1e8799ea49db8a2a" + ], + "sizeBytes": 510126269 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:948b37b94af00516efb52ba676de32d0e010d0c6a2545d5dd2f3a19af850b9cb" + ], + "sizeBytes": 496592243 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8412f40fa6a47fa89c5fa293f7ee6d2aa276dd57408dc8b7bd22698fb6b46b51" + ], + "sizeBytes": 496588139 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5260ee2d074b46aac05bca22f15b749693c103fe1918b40a6dbf0ab79d401ea9" + ], + "sizeBytes": 488936437 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3c5db7352957de4d30e0808136d22d935e2d41b13a256ec4e7f5824cb5e848ee" + ], + "sizeBytes": 488932332 + }, + { + "names": [ + "registry.redhat.io/openshift4/ose-oauth-proxy@sha256:5739105ef80abefdbd9db5cdfdf80bc2be3b6ad7eef9be2bd01c64045d9a95b0", + "registry.redhat.io/openshift4/ose-oauth-proxy@sha256:a93cbb575ff59bc8015602165645204b9ca56e327308b11a3d75b651701db875", + "registry.redhat.io/openshift4/ose-oauth-proxy:latest" + ], + "sizeBytes": 480798661 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b7fe30a90c52e34417941391db11d976859257362d6feae7797d0807e0b650d8" + ], + "sizeBytes": 480333342 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8698aafa8e2df3f9998aedefbecd845817883303c2aebfac07de2af63b3d6239" + ], + "sizeBytes": 480329257 + }, + { + "names": [ + "quay.io/openshift-logging/promtail@sha256:37c4c781fb99ad787118c4ebebc1d81ac6415ba62c4d5965e3852e5721509909", + "quay.io/openshift-logging/promtail@sha256:f0e53a97d686fdab3ae1a4b43176dd0105ce53887b0faba528f3497f7765d26f", + "quay.io/openshift-logging/promtail:v2.9.8" + ], + "sizeBytes": 478481622 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765299ed4baf9342ea68280f95948c032326d29fc5658e302cae54f892589998" + ], + "sizeBytes": 477859920 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:350e1d59af219717d23fae55e257c2ca2c39072e3c2569a3092a88dc29cbd048" + ], + "sizeBytes": 467919993 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30453ce537781b695dda30554a9b0288d18e29690b1e1b14f405a1876728e8a0" + ], + "sizeBytes": 467848523 + } + ], + "nodeInfo": { + "architecture": "amd64", + "bootID": "3c453d7c-9c2e-4dcf-b888-a944ec3424b6", + "containerRuntimeVersion": "cri-o://1.33.3-5.rhaos4.20.giteaa7ec8.el9", + "kernelVersion": "5.14.0-570.35.1.el9_6.x86_64", + "kubeProxyVersion": "", + "kubeletVersion": "v1.33.3", + "machineID": "b63153a8cf6d4edeb0360f3591bf57e1", + "operatingSystem": "linux", + "osImage": "Red Hat Enterprise Linux CoreOS 9.6.20250815-0 (Plow)", + "systemUUID": "d9d7efe8-e852-4f83-872c-14df04178e80" + }, + "runtimeHandlers": [ + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "crun" + }, + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "" + }, + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "runc" + } + ], + "volumesAttached": [ + { + "devicePath": "", + "name": "kubernetes.io/csi/disk.csi.azure.com^/subscriptions/72e3a972-58b0-4afc-bd4f-da89b39ccebd/resourceGroups/ci-op-pw3ghqzh-bb5c4-pdhfc-rg/providers/Microsoft.Compute/disks/pvc-2af26bb5-3201-49df-b4a7-37baca148744" + } + ], + "volumesInUse": [ + "kubernetes.io/csi/disk.csi.azure.com^/subscriptions/72e3a972-58b0-4afc-bd4f-da89b39ccebd/resourceGroups/ci-op-pw3ghqzh-bb5c4-pdhfc-rg/providers/Microsoft.Compute/disks/pvc-2af26bb5-3201-49df-b4a7-37baca148744" + ] + } + }, + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "cloud.network.openshift.io/egress-ipconfig": "[{\"interface\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus2-84x5p-nic\",\"ifaddr\":{\"ipv4\":\"10.0.128.0/17\"},\"capacity\":{\"ip\":255}}]", + "csi.volume.kubernetes.io/nodeid": "{\"disk.csi.azure.com\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus2-84x5p\",\"file.csi.azure.com\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus2-84x5p\"}", + "k8s.ovn.org/host-cidrs": "[\"10.0.128.4/17\"]", + "k8s.ovn.org/l3-gateway-config": "{\"default\":{\"mode\":\"shared\",\"bridge-id\":\"br-ex\",\"interface-id\":\"br-ex_ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus2-84x5p\",\"mac-address\":\"7c:ed:8d:4c:cd:47\",\"ip-addresses\":[\"10.0.128.4/17\"],\"ip-address\":\"10.0.128.4/17\",\"next-hops\":[\"10.0.128.1\"],\"next-hop\":\"10.0.128.1\",\"node-port-enable\":\"true\",\"vlan-id\":\"0\"}}", + "k8s.ovn.org/node-chassis-id": "30347d47-50e8-42db-a0db-3d604b0ceeed", + "k8s.ovn.org/node-encap-ips": "[\"10.0.128.4\"]", + "k8s.ovn.org/node-gateway-router-lrp-ifaddrs": "{\"default\":{\"ipv4\":\"100.64.0.6/16\"}}", + "k8s.ovn.org/node-id": "6", + "k8s.ovn.org/node-masquerade-subnet": "{\"ipv4\":\"169.254.0.0/17\",\"ipv6\":\"fd69::/112\"}", + "k8s.ovn.org/node-primary-ifaddr": "{\"ipv4\":\"10.0.128.4/17\"}", + "k8s.ovn.org/node-subnets": "{\"default\":[\"10.128.2.0/23\"]}", + "k8s.ovn.org/node-transit-switch-port-ifaddr": "{\"ipv4\":\"100.88.0.6/16\"}", + "k8s.ovn.org/remote-zone-migrated": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus2-84x5p", + "k8s.ovn.org/zone-name": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus2-84x5p", + "machine.openshift.io/machine": "openshift-machine-api/ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus2-84x5p", + "machineconfiguration.openshift.io/controlPlaneTopology": "HighlyAvailable", + "machineconfiguration.openshift.io/currentConfig": "rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "machineconfiguration.openshift.io/desiredConfig": "rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "machineconfiguration.openshift.io/desiredDrain": "uncordon-rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "machineconfiguration.openshift.io/lastAppliedDrain": "uncordon-rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "machineconfiguration.openshift.io/lastObservedServerCAAnnotation": "false", + "machineconfiguration.openshift.io/lastSyncedControllerConfigResourceVersion": "101203", + "machineconfiguration.openshift.io/post-config-action": "", + "machineconfiguration.openshift.io/reason": "", + "machineconfiguration.openshift.io/state": "Done", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2025-08-19T19:05:11Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/instance-type": "Standard_D4s_v3", + "beta.kubernetes.io/os": "linux", + "failure-domain.beta.kubernetes.io/region": "centralus", + "failure-domain.beta.kubernetes.io/zone": "centralus-2", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus2-84x5p", + "kubernetes.io/os": "linux", + "node-role.kubernetes.io/worker": "", + "node.kubernetes.io/instance-type": "Standard_D4s_v3", + "node.openshift.io/os_id": "rhel", + "topology.disk.csi.azure.com/zone": "centralus-2", + "topology.kubernetes.io/region": "centralus", + "topology.kubernetes.io/zone": "centralus-2" + }, + "name": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus2-84x5p", + "resourceVersion": "108915", + "uid": "28350b7d-f54a-46a3-9ae5-a75f640695df" + }, + "spec": { + "providerID": "azure:///subscriptions/72e3a972-58b0-4afc-bd4f-da89b39ccebd/resourceGroups/ci-op-pw3ghqzh-bb5c4-pdhfc-rg/providers/Microsoft.Compute/virtualMachines/ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus2-84x5p" + }, + "status": { + "addresses": [ + { + "address": "10.0.128.4", + "type": "InternalIP" + }, + { + "address": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus2-84x5p", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "3500m", + "ephemeral-storage": "122076772149", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "15221264Ki", + "pods": "250" + }, + "capacity": { + "cpu": "4", + "ephemeral-storage": "133626860Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "16372240Ki", + "pods": "250" + }, + "conditions": [ + { + "lastHeartbeatTime": "2025-08-19T22:16:11Z", + "lastTransitionTime": "2025-08-19T21:09:39Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:16:11Z", + "lastTransitionTime": "2025-08-19T21:09:39Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:16:11Z", + "lastTransitionTime": "2025-08-19T21:09:39Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:16:11Z", + "lastTransitionTime": "2025-08-19T21:09:54Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "features": { + "supplementalGroupsPolicy": true + }, + "images": [ + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1d9095f0b1eb1e56bb3c9cdf399f53ebe872f959e31ce5c96d627b20df32a307" + ], + "sizeBytes": 1593262602 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e95cd4fffde9168ea297fe85a6f64eb4999b4ca3bb2b2f87e5b065a63103a2b4" + ], + "sizeBytes": 1593262087 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:73d151dca489c6ef4cbab9bdb959ec79a1988709e3dd84a3f2220a2ecd19860c" + ], + "sizeBytes": 1275183258 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:227fa53363859ae17840a982ef7e437aa69f54e9136a733060a89cb5d8c0bfb9" + ], + "sizeBytes": 1275179169 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2739ce537eb29d18f87ff657a3c6468918177c8ea4fc7167338756fdebf731b7" + ], + "sizeBytes": 1273083298 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fd2b2d012de7acc128df3c74ab1f242f5f352fd991c83a664fb90bfbbdc1b93" + ], + "sizeBytes": 1273043885 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0ad67a8fbcddda0d4fef4a88824d1141509f0e0cb7eaa53af1fe90b4d887f35" + ], + "sizeBytes": 1256608647 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84" + ], + "sizeBytes": 1055523933 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb" + ], + "sizeBytes": 1055475298 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:54ad5c267492adef5b00abfd2ff7df9a9d7aaf016693ffd146ce5d94e92c321d" + ], + "sizeBytes": 974082778 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:59e9437d40eaa8ccee2e6d551105dcf545bb4607d2bf6387c45aadde82277218" + ], + "sizeBytes": 974078676 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:74f21c787c6e21f6266885e542e4e0a29b5d907918f1f1f87a3370579726c6da" + ], + "sizeBytes": 881464535 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b1deb65135994266b490bddea69f1401910155554b9e5188815f4c46f1aadc85" + ], + "sizeBytes": 879740749 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:96113b4aaafb3e2099bf3b9f712072234ea67f5c074c0155546fb51a778bf1d0" + ], + "sizeBytes": 726597841 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:586eaf866881bb875a56d415dac76a642ddde27f01a07b9015d393efcaae0991" + ], + "sizeBytes": 726593742 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98d86b726bd27541dda18823237803a63ad1d47efd8146ea1a49e4d8f2b4bc88" + ], + "sizeBytes": 703092589 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8ade543d232b5a017e881ac8405a81c4288033e686caedb1704359968522e44e" + ], + "sizeBytes": 702622782 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9285872530c1b711e66a60a75626930321f3a3b96c83d565b8d3f66a5235b230" + ], + "sizeBytes": 643877659 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:289a088c56fa1eeb73344c4690bbb4901a1e5a8e2ab3e9874a69359932795bba" + ], + "sizeBytes": 643877135 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a00a11fbbe3f6705ffbd13148a929674f0763e4e6e73fd543887d9147841fe6" + ], + "sizeBytes": 632082319 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:da9bfe286c50dedcd44a9ea34691e216a1265ad84cc4ab7b7b6267a1070309b8" + ], + "sizeBytes": 632078229 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0" + ], + "sizeBytes": 628981149 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2b2f07545a00fcce5f743d57926fef00202072f7d8363961624f48c0043c2d33" + ], + "sizeBytes": 628976543 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:99def7dd7ccfae606d06c73db1d693d9444752e0b8440ac962971e90b4fc20f1" + ], + "sizeBytes": 616644251 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5cfc255b4820593e5523da9b32398dc9fc1e960f8b013b9c4c362a8362132204" + ], + "sizeBytes": 616640164 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a2fbe32318847312ef48e9f922bc7c8085e5710c48352e31efac47b5a522780" + ], + "sizeBytes": 589720179 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f9fd6ac23c7b2aa3431c11845d8e838f7f28573d8888acf960916204ff67da6b" + ], + "sizeBytes": 589716606 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a76a972374c89ab1d87e81fd6ab72d672f62d7b7c2f09cdc69a837e97ae43a46" + ], + "sizeBytes": 571490300 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4bb282dad29b1d36fcba1247eeb8460823fccdf2425a61d22b2680e2ee734bee" + ], + "sizeBytes": 571486199 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:afe2947612e849727161ee4e6e5492fb18cc8e1ccc45a20380e853ea242a1065" + ], + "sizeBytes": 541494090 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:67afe375350a2f7945c0e99aa0de6f4b33396800dd11c4ed98e35cb896d91418" + ], + "sizeBytes": 541489999 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1cce893b180c7da15d39feda52db5fe7e45943bb3507e402b94754f497bd71ad" + ], + "sizeBytes": 536453507 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c2bdcdc249e541e4053958afd72f3aa08dd85e7b8a20a34dc262ce524ad24418" + ], + "sizeBytes": 536449421 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0cacb79e191c31ce99dc1d29b7fd27b0bfdf346f58ead65b65f042e9ff34b51" + ], + "sizeBytes": 514655222 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bfb44839f085256f11eec65b1805579a08afafa7af76ef1df9e79d06fd4825df" + ], + "sizeBytes": 514651119 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7848070645f103e7b291ae589d0d4bc9f44644f9fafb636ce6bd49113df595cf" + ], + "sizeBytes": 513923987 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:728a324a24d27e68e511b6ff47f454a48479fd425d0e981f475fb788ea1e62c6" + ], + "sizeBytes": 513919899 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5620a464a2e36078b86cf18591138c70da234ff10fb20fe35cee0b8df122dee2" + ], + "sizeBytes": 510130372 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:20da344fe6302981459c8b0368439215a552e82b77793bdc1e8799ea49db8a2a" + ], + "sizeBytes": 510126269 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:948b37b94af00516efb52ba676de32d0e010d0c6a2545d5dd2f3a19af850b9cb" + ], + "sizeBytes": 496592243 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8412f40fa6a47fa89c5fa293f7ee6d2aa276dd57408dc8b7bd22698fb6b46b51" + ], + "sizeBytes": 496588139 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5260ee2d074b46aac05bca22f15b749693c103fe1918b40a6dbf0ab79d401ea9" + ], + "sizeBytes": 488936437 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3c5db7352957de4d30e0808136d22d935e2d41b13a256ec4e7f5824cb5e848ee" + ], + "sizeBytes": 488932332 + }, + { + "names": [ + "registry.redhat.io/openshift4/ose-oauth-proxy@sha256:5739105ef80abefdbd9db5cdfdf80bc2be3b6ad7eef9be2bd01c64045d9a95b0", + "registry.redhat.io/openshift4/ose-oauth-proxy@sha256:a93cbb575ff59bc8015602165645204b9ca56e327308b11a3d75b651701db875", + "registry.redhat.io/openshift4/ose-oauth-proxy:latest" + ], + "sizeBytes": 480798661 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b7fe30a90c52e34417941391db11d976859257362d6feae7797d0807e0b650d8" + ], + "sizeBytes": 480333342 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8698aafa8e2df3f9998aedefbecd845817883303c2aebfac07de2af63b3d6239" + ], + "sizeBytes": 480329257 + }, + { + "names": [ + "quay.io/openshift-logging/promtail@sha256:37c4c781fb99ad787118c4ebebc1d81ac6415ba62c4d5965e3852e5721509909", + "quay.io/openshift-logging/promtail@sha256:f0e53a97d686fdab3ae1a4b43176dd0105ce53887b0faba528f3497f7765d26f", + "quay.io/openshift-logging/promtail:v2.9.8" + ], + "sizeBytes": 478481622 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d387c35d5c3a811f21397e687cd0e8dde2a69d67dbb5dc96a17328c554fb05d7" + ], + "sizeBytes": 477864021 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765299ed4baf9342ea68280f95948c032326d29fc5658e302cae54f892589998" + ], + "sizeBytes": 477859920 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4036581f35e9d0b27d1a071b1f04d3ae37fc39f59b5feedef7752ccdb3e42ffd" + ], + "sizeBytes": 467924094 + } + ], + "nodeInfo": { + "architecture": "amd64", + "bootID": "84d357d5-3a85-4581-a9bc-caac0e0d1c74", + "containerRuntimeVersion": "cri-o://1.33.3-5.rhaos4.20.giteaa7ec8.el9", + "kernelVersion": "5.14.0-570.35.1.el9_6.x86_64", + "kubeProxyVersion": "", + "kubeletVersion": "v1.33.3", + "machineID": "379e960d54c74a26a9e5f6c2a0878e92", + "operatingSystem": "linux", + "osImage": "Red Hat Enterprise Linux CoreOS 9.6.20250815-0 (Plow)", + "systemUUID": "6d559cee-f8f4-4468-b065-f9c5df32200f" + }, + "runtimeHandlers": [ + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "crun" + }, + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "" + }, + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "runc" + } + ], + "volumesAttached": [ + { + "devicePath": "", + "name": "kubernetes.io/csi/disk.csi.azure.com^/subscriptions/72e3a972-58b0-4afc-bd4f-da89b39ccebd/resourceGroups/ci-op-pw3ghqzh-bb5c4-pdhfc-rg/providers/Microsoft.Compute/disks/pvc-abf85965-c32a-480e-9a57-cfbcc47f6339" + } + ], + "volumesInUse": [ + "kubernetes.io/csi/disk.csi.azure.com^/subscriptions/72e3a972-58b0-4afc-bd4f-da89b39ccebd/resourceGroups/ci-op-pw3ghqzh-bb5c4-pdhfc-rg/providers/Microsoft.Compute/disks/pvc-abf85965-c32a-480e-9a57-cfbcc47f6339" + ] + } + }, + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "cloud.network.openshift.io/egress-ipconfig": "[{\"interface\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus3-rc5fl-nic\",\"ifaddr\":{\"ipv4\":\"10.0.128.0/17\"},\"capacity\":{\"ip\":255}}]", + "csi.volume.kubernetes.io/nodeid": "{\"disk.csi.azure.com\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus3-rc5fl\",\"file.csi.azure.com\":\"ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus3-rc5fl\"}", + "k8s.ovn.org/host-cidrs": "[\"10.0.128.6/17\"]", + "k8s.ovn.org/l3-gateway-config": "{\"default\":{\"mode\":\"shared\",\"bridge-id\":\"br-ex\",\"interface-id\":\"br-ex_ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus3-rc5fl\",\"mac-address\":\"7c:ed:8d:4d:74:a3\",\"ip-addresses\":[\"10.0.128.6/17\"],\"ip-address\":\"10.0.128.6/17\",\"next-hops\":[\"10.0.128.1\"],\"next-hop\":\"10.0.128.1\",\"node-port-enable\":\"true\",\"vlan-id\":\"0\"}}", + "k8s.ovn.org/node-chassis-id": "4d52310a-f2a8-40d6-aa95-cf3c7099e9ad", + "k8s.ovn.org/node-encap-ips": "[\"10.0.128.6\"]", + "k8s.ovn.org/node-gateway-router-lrp-ifaddrs": "{\"default\":{\"ipv4\":\"100.64.0.7/16\"}}", + "k8s.ovn.org/node-id": "7", + "k8s.ovn.org/node-masquerade-subnet": "{\"ipv4\":\"169.254.0.0/17\",\"ipv6\":\"fd69::/112\"}", + "k8s.ovn.org/node-primary-ifaddr": "{\"ipv4\":\"10.0.128.6/17\"}", + "k8s.ovn.org/node-subnets": "{\"default\":[\"10.129.2.0/23\"]}", + "k8s.ovn.org/node-transit-switch-port-ifaddr": "{\"ipv4\":\"100.88.0.7/16\"}", + "k8s.ovn.org/remote-zone-migrated": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus3-rc5fl", + "k8s.ovn.org/zone-name": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus3-rc5fl", + "machine.openshift.io/machine": "openshift-machine-api/ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus3-rc5fl", + "machineconfiguration.openshift.io/controlPlaneTopology": "HighlyAvailable", + "machineconfiguration.openshift.io/currentConfig": "rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "machineconfiguration.openshift.io/desiredConfig": "rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "machineconfiguration.openshift.io/desiredDrain": "uncordon-rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "machineconfiguration.openshift.io/lastAppliedDrain": "uncordon-rendered-worker-2b807ab49e9d8a7040a5bcc4085e31c1", + "machineconfiguration.openshift.io/lastObservedServerCAAnnotation": "false", + "machineconfiguration.openshift.io/lastSyncedControllerConfigResourceVersion": "101203", + "machineconfiguration.openshift.io/post-config-action": "", + "machineconfiguration.openshift.io/reason": "", + "machineconfiguration.openshift.io/state": "Done", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2025-08-19T19:05:15Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/instance-type": "Standard_D4s_v3", + "beta.kubernetes.io/os": "linux", + "failure-domain.beta.kubernetes.io/region": "centralus", + "failure-domain.beta.kubernetes.io/zone": "centralus-3", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus3-rc5fl", + "kubernetes.io/os": "linux", + "node-role.kubernetes.io/worker": "", + "node.kubernetes.io/instance-type": "Standard_D4s_v3", + "node.openshift.io/os_id": "rhel", + "topology.disk.csi.azure.com/zone": "centralus-3", + "topology.kubernetes.io/region": "centralus", + "topology.kubernetes.io/zone": "centralus-3" + }, + "name": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus3-rc5fl", + "resourceVersion": "109139", + "uid": "1d485a28-7afc-4263-bda8-6dac7f058f03" + }, + "spec": { + "providerID": "azure:///subscriptions/72e3a972-58b0-4afc-bd4f-da89b39ccebd/resourceGroups/ci-op-pw3ghqzh-bb5c4-pdhfc-rg/providers/Microsoft.Compute/virtualMachines/ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus3-rc5fl" + }, + "status": { + "addresses": [ + { + "address": "10.0.128.6", + "type": "InternalIP" + }, + { + "address": "ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus3-rc5fl", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "3500m", + "ephemeral-storage": "122076772149", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "15221312Ki", + "pods": "250" + }, + "capacity": { + "cpu": "4", + "ephemeral-storage": "133626860Ki", + "hugepages-1Gi": "0", + "hugepages-2Mi": "0", + "memory": "16372288Ki", + "pods": "250" + }, + "conditions": [ + { + "lastHeartbeatTime": "2025-08-19T22:17:06Z", + "lastTransitionTime": "2025-08-19T21:15:19Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:17:06Z", + "lastTransitionTime": "2025-08-19T21:15:19Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:17:06Z", + "lastTransitionTime": "2025-08-19T21:15:19Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2025-08-19T22:17:06Z", + "lastTransitionTime": "2025-08-19T21:15:37Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "features": { + "supplementalGroupsPolicy": true + }, + "images": [ + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1d9095f0b1eb1e56bb3c9cdf399f53ebe872f959e31ce5c96d627b20df32a307" + ], + "sizeBytes": 1593262602 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e95cd4fffde9168ea297fe85a6f64eb4999b4ca3bb2b2f87e5b065a63103a2b4" + ], + "sizeBytes": 1593262087 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:73d151dca489c6ef4cbab9bdb959ec79a1988709e3dd84a3f2220a2ecd19860c" + ], + "sizeBytes": 1275183258 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:227fa53363859ae17840a982ef7e437aa69f54e9136a733060a89cb5d8c0bfb9" + ], + "sizeBytes": 1275179169 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2739ce537eb29d18f87ff657a3c6468918177c8ea4fc7167338756fdebf731b7" + ], + "sizeBytes": 1273083298 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fd2b2d012de7acc128df3c74ab1f242f5f352fd991c83a664fb90bfbbdc1b93" + ], + "sizeBytes": 1273043885 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0ad67a8fbcddda0d4fef4a88824d1141509f0e0cb7eaa53af1fe90b4d887f35" + ], + "sizeBytes": 1256608647 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:789b229aaae1498cb467998a763ba0b7fe60cc994bedc2ef8749f10ce1082d84" + ], + "sizeBytes": 1055523933 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:445b8bb6878f6904d8a9ac40bda8779a4a4b3148b2efdef1a7cce5984a2931cb" + ], + "sizeBytes": 1055475298 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:54ad5c267492adef5b00abfd2ff7df9a9d7aaf016693ffd146ce5d94e92c321d" + ], + "sizeBytes": 974082778 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:59e9437d40eaa8ccee2e6d551105dcf545bb4607d2bf6387c45aadde82277218" + ], + "sizeBytes": 974078676 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d94306beb1451e42a52df1eaad44328e1d435cd6cf6458e2cb2af36e4beed78" + ], + "sizeBytes": 881490655 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b1deb65135994266b490bddea69f1401910155554b9e5188815f4c46f1aadc85" + ], + "sizeBytes": 879740749 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:96113b4aaafb3e2099bf3b9f712072234ea67f5c074c0155546fb51a778bf1d0" + ], + "sizeBytes": 726597841 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:586eaf866881bb875a56d415dac76a642ddde27f01a07b9015d393efcaae0991" + ], + "sizeBytes": 726593742 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98d86b726bd27541dda18823237803a63ad1d47efd8146ea1a49e4d8f2b4bc88" + ], + "sizeBytes": 703092589 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8ade543d232b5a017e881ac8405a81c4288033e686caedb1704359968522e44e" + ], + "sizeBytes": 702622782 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9285872530c1b711e66a60a75626930321f3a3b96c83d565b8d3f66a5235b230" + ], + "sizeBytes": 643877659 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:289a088c56fa1eeb73344c4690bbb4901a1e5a8e2ab3e9874a69359932795bba" + ], + "sizeBytes": 643877135 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a00a11fbbe3f6705ffbd13148a929674f0763e4e6e73fd543887d9147841fe6" + ], + "sizeBytes": 632082319 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:da9bfe286c50dedcd44a9ea34691e216a1265ad84cc4ab7b7b6267a1070309b8" + ], + "sizeBytes": 632078229 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0" + ], + "sizeBytes": 628981149 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2b2f07545a00fcce5f743d57926fef00202072f7d8363961624f48c0043c2d33" + ], + "sizeBytes": 628976543 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a2fbe32318847312ef48e9f922bc7c8085e5710c48352e31efac47b5a522780" + ], + "sizeBytes": 589720179 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f9fd6ac23c7b2aa3431c11845d8e838f7f28573d8888acf960916204ff67da6b" + ], + "sizeBytes": 589716606 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a76a972374c89ab1d87e81fd6ab72d672f62d7b7c2f09cdc69a837e97ae43a46" + ], + "sizeBytes": 571490300 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4bb282dad29b1d36fcba1247eeb8460823fccdf2425a61d22b2680e2ee734bee" + ], + "sizeBytes": 571486199 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:afe2947612e849727161ee4e6e5492fb18cc8e1ccc45a20380e853ea242a1065" + ], + "sizeBytes": 541494090 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:67afe375350a2f7945c0e99aa0de6f4b33396800dd11c4ed98e35cb896d91418" + ], + "sizeBytes": 541489999 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1cce893b180c7da15d39feda52db5fe7e45943bb3507e402b94754f497bd71ad" + ], + "sizeBytes": 536453507 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c2bdcdc249e541e4053958afd72f3aa08dd85e7b8a20a34dc262ce524ad24418" + ], + "sizeBytes": 536449421 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0cacb79e191c31ce99dc1d29b7fd27b0bfdf346f58ead65b65f042e9ff34b51" + ], + "sizeBytes": 514655222 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bfb44839f085256f11eec65b1805579a08afafa7af76ef1df9e79d06fd4825df" + ], + "sizeBytes": 514651119 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7848070645f103e7b291ae589d0d4bc9f44644f9fafb636ce6bd49113df595cf" + ], + "sizeBytes": 513923987 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:728a324a24d27e68e511b6ff47f454a48479fd425d0e981f475fb788ea1e62c6" + ], + "sizeBytes": 513919899 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5620a464a2e36078b86cf18591138c70da234ff10fb20fe35cee0b8df122dee2" + ], + "sizeBytes": 510130372 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:20da344fe6302981459c8b0368439215a552e82b77793bdc1e8799ea49db8a2a" + ], + "sizeBytes": 510126269 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8412f40fa6a47fa89c5fa293f7ee6d2aa276dd57408dc8b7bd22698fb6b46b51" + ], + "sizeBytes": 496588139 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5260ee2d074b46aac05bca22f15b749693c103fe1918b40a6dbf0ab79d401ea9" + ], + "sizeBytes": 488936437 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3c5db7352957de4d30e0808136d22d935e2d41b13a256ec4e7f5824cb5e848ee" + ], + "sizeBytes": 488932332 + }, + { + "names": [ + "registry.redhat.io/openshift4/ose-oauth-proxy@sha256:5739105ef80abefdbd9db5cdfdf80bc2be3b6ad7eef9be2bd01c64045d9a95b0", + "registry.redhat.io/openshift4/ose-oauth-proxy@sha256:a93cbb575ff59bc8015602165645204b9ca56e327308b11a3d75b651701db875", + "registry.redhat.io/openshift4/ose-oauth-proxy:latest" + ], + "sizeBytes": 480798661 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b7fe30a90c52e34417941391db11d976859257362d6feae7797d0807e0b650d8" + ], + "sizeBytes": 480333342 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8698aafa8e2df3f9998aedefbecd845817883303c2aebfac07de2af63b3d6239" + ], + "sizeBytes": 480329257 + }, + { + "names": [ + "quay.io/openshift-logging/promtail@sha256:37c4c781fb99ad787118c4ebebc1d81ac6415ba62c4d5965e3852e5721509909", + "quay.io/openshift-logging/promtail@sha256:f0e53a97d686fdab3ae1a4b43176dd0105ce53887b0faba528f3497f7765d26f", + "quay.io/openshift-logging/promtail:v2.9.8" + ], + "sizeBytes": 478481622 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d387c35d5c3a811f21397e687cd0e8dde2a69d67dbb5dc96a17328c554fb05d7" + ], + "sizeBytes": 477864021 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765299ed4baf9342ea68280f95948c032326d29fc5658e302cae54f892589998" + ], + "sizeBytes": 477859920 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4036581f35e9d0b27d1a071b1f04d3ae37fc39f59b5feedef7752ccdb3e42ffd" + ], + "sizeBytes": 467924094 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:350e1d59af219717d23fae55e257c2ca2c39072e3c2569a3092a88dc29cbd048" + ], + "sizeBytes": 467919993 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30453ce537781b695dda30554a9b0288d18e29690b1e1b14f405a1876728e8a0" + ], + "sizeBytes": 467848523 + }, + { + "names": [ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd03b21139b2921710987d9a617bf49e5d05825d6e9df66768259e7076c6d53a" + ], + "sizeBytes": 467844422 + } + ], + "nodeInfo": { + "architecture": "amd64", + "bootID": "2458541e-c462-4b6c-97d0-e23ee48b7f8b", + "containerRuntimeVersion": "cri-o://1.33.3-5.rhaos4.20.giteaa7ec8.el9", + "kernelVersion": "5.14.0-570.35.1.el9_6.x86_64", + "kubeProxyVersion": "", + "kubeletVersion": "v1.33.3", + "machineID": "8a45e8ce37294a9ababdb66611679bd6", + "operatingSystem": "linux", + "osImage": "Red Hat Enterprise Linux CoreOS 9.6.20250815-0 (Plow)", + "systemUUID": "44081a3f-ad77-46e2-afc7-ba8def651bc5" + }, + "runtimeHandlers": [ + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "crun" + }, + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "" + }, + { + "features": { + "recursiveReadOnlyMounts": true, + "userNamespaces": true + }, + "name": "runc" + } + ] + } + } + ], + "kind": "List", + "metadata": { + "resourceVersion": "" + } +} diff --git a/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768.detailed-output b/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768.detailed-output new file mode 100644 index 0000000000..c17117741e --- /dev/null +++ b/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768.detailed-output @@ -0,0 +1,55 @@ += Control Plane = +Assessment: Progressing - Slow +Target Version: 4.20.0-0.nightly-2025-08-19-180353 (from 4.20.0-0.nightly-2025-08-12-153542) +Updating: machine-config +Completion: 97% (33 operators updated, 1 updating, 0 waiting) +Duration: 2h3m (Est. Time Remaining: <10m) +Operator Health: 31 Healthy, 3 Available but degraded + +Updating Cluster Operators +NAME SINCE REASON MESSAGE +machine-config 41m7s - Working towards 4.20.0-0.nightly-2025-08-19-180353 + +Control Plane Nodes +NAME ASSESSMENT PHASE VERSION EST MESSAGE +ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 Degraded Updating 4.20.0-0.nightly-2025-08-12-153542 ? failed to run command nice (6 tries): [timed out waiting for the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 failed: time="2025-08-19T22:13:54Z" level=warning msg="Failed, retrying in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:40349->168.63.129.16:53: i/o timeout" +time="2025-08-19T22:14:05Z" level=warning msg="Failed, retrying in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:36467->168.63.129.16:53: i/o timeout" +time="2025-08-19T22:14:16Z" level=warning msg="Failed, retrying in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:44244->168.63.129.16:53: i/o timeout" +Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get "https://quay.io/v2/": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:48400->168.63.129.16:53: i/o timeout +: exit status 125] +ci-op-pw3ghqzh-bb5c4-pdhfc-master-0 Outdated Pending 4.20.0-0.nightly-2025-08-12-153542 ? +ci-op-pw3ghqzh-bb5c4-pdhfc-master-2 Completed Updated 4.20.0-0.nightly-2025-08-19-180353 - + += Worker Upgrade = + +WORKER POOL ASSESSMENT COMPLETION STATUS +worker Completed 100% (3/3) 3 Available, 0 Progressing, 0 Draining + +Worker Pool Nodes: worker +NAME ASSESSMENT PHASE VERSION EST MESSAGE +ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus1-dlxfb Completed Updated 4.20.0-0.nightly-2025-08-19-180353 - +ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus2-84x5p Completed Updated 4.20.0-0.nightly-2025-08-19-180353 - +ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus3-rc5fl Completed Updated 4.20.0-0.nightly-2025-08-19-180353 - + += Update Health = +Message: Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 is degraded + Since: - + Level: Error + Impact: Update Stalled + Reference: https://docs.openshift.com/container-platform/latest/post_installation_configuration/machine-configuration-tasks.html#understanding-the-machine-config-operator + Resources: + nodes: ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 + Description: failed to run command nice (6 tries): [timed out waiting for the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 failed: time="2025-08-19T22:13:54Z" level=warning msg="Failed, retrying in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:40349->168.63.129.16:53: i/o timeout" + , time="2025-08-19T22:14:05Z" level=warning msg="Failed, retrying in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:36467->168.63.129.16:53: i/o timeout" + , time="2025-08-19T22:14:16Z" level=warning msg="Failed, retrying in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:44244->168.63.129.16:53: i/o timeout" + , Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get "https://quay.io/v2/": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:48400->168.63.129.16:53: i/o timeout + , : exit status 125] + +Message: Cluster Operator machine-config is degraded (RequiredPoolsFailed) + Since: 8m55s + Level: Warning + Impact: API Availability + Reference: https://github.com/openshift/runbooks/blob/master/alerts/cluster-monitoring-operator/ClusterOperatorDegraded.md + Resources: + clusteroperators.config.openshift.io: machine-config + Description: Unable to apply 4.20.0-0.nightly-2025-08-19-180353: error during syncRequiredMachineConfigPools: [context deadline exceeded, error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 3, ready 1, updated: 1, unavailable: 1, reason: Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 is reporting: "Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 upgrade failure. failed to run command nice (6 tries): [timed out waiting for the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 failed: time=\"2025-08-19T21:22:32Z\" level=warning msg=\"Failed, retrying in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:38553->168.63.129.16:53: i/o timeout\"\ntime=\"2025-08-19T21:22:43Z\" level=warning msg=\"Failed, retrying in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:40258->168.63.129.16:53: i/o timeout\"\ntime=\"2025-08-19T21:22:54Z\" level=warning msg=\"Failed, retrying in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:58117->168.63.129.16:53: i/o timeout\"\nError: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:35905->168.63.129.16:53: i/o timeout\n: exit status 125]", Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 is reporting: "failed to run command nice (6 tries): [timed out waiting for the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 failed: time=\"2025-08-19T21:22:32Z\" level=warning msg=\"Failed, retrying in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:38553->168.63.129.16:53: i/o timeout\"\ntime=\"2025-08-19T21:22:43Z\" level=warning msg=\"Failed, retrying in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:40258->168.63.129.16:53: i/o timeout\"\ntime=\"2025-08-19T21:22:54Z\" level=warning msg=\"Failed, retrying in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \\\"https://quay.io/v2/\\\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:58117->168.63.129.16:53: i/o timeout\"\nError: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:35905->168.63.129.16:53: i/o timeout\n: exit status 125]")] diff --git a/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768.output b/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768.output new file mode 100644 index 0000000000..172257777b --- /dev/null +++ b/pkg/cli/admin/upgrade/status/examples/ocpbugs-60768.output @@ -0,0 +1,35 @@ += Control Plane = +Assessment: Progressing - Slow +Target Version: 4.20.0-0.nightly-2025-08-19-180353 (from 4.20.0-0.nightly-2025-08-12-153542) +Updating: machine-config +Completion: 97% (33 operators updated, 1 updating, 0 waiting) +Duration: 2h3m (Est. Time Remaining: <10m) +Operator Health: 31 Healthy, 3 Available but degraded + +Control Plane Nodes +NAME ASSESSMENT PHASE VERSION EST MESSAGE +ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 Degraded Updating 4.20.0-0.nightly-2025-08-12-153542 ? failed to run command nice (6 tries): [timed out waiting for the condition, running nice -- ionice -c 3 podman pull -q --authfile /var/lib/kubelet/config.json quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0 failed: time="2025-08-19T22:13:54Z" level=warning msg="Failed, retrying in 1s ... (1/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:40349->168.63.129.16:53: i/o timeout" +time="2025-08-19T22:14:05Z" level=warning msg="Failed, retrying in 1s ... (2/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:36467->168.63.129.16:53: i/o timeout" +time="2025-08-19T22:14:16Z" level=warning msg="Failed, retrying in 1s ... (3/3). Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get \"https://quay.io/v2/\": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:44244->168.63.129.16:53: i/o timeout" +Error: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:56bab0cd6a692bb7021640889ca69a11d6fc6df1af210c4e24d4feef81c6d7b0: pinging container registry quay.io: Get "https://quay.io/v2/": dial tcp: lookup quay.io on 168.63.129.16:53: read udp 10.0.0.6:48400->168.63.129.16:53: i/o timeout +: exit status 125] +ci-op-pw3ghqzh-bb5c4-pdhfc-master-0 Outdated Pending 4.20.0-0.nightly-2025-08-12-153542 ? +ci-op-pw3ghqzh-bb5c4-pdhfc-master-2 Completed Updated 4.20.0-0.nightly-2025-08-19-180353 - + += Worker Upgrade = + +WORKER POOL ASSESSMENT COMPLETION STATUS +worker Completed 100% (3/3) 3 Available, 0 Progressing, 0 Draining + +Worker Pool Nodes: worker +NAME ASSESSMENT PHASE VERSION EST MESSAGE +ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus1-dlxfb Completed Updated 4.20.0-0.nightly-2025-08-19-180353 - +ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus2-84x5p Completed Updated 4.20.0-0.nightly-2025-08-19-180353 - +ci-op-pw3ghqzh-bb5c4-pdhfc-worker-centralus3-rc5fl Completed Updated 4.20.0-0.nightly-2025-08-19-180353 - + += Update Health = +SINCE LEVEL IMPACT MESSAGE +- Error Update Stalled Node ci-op-pw3ghqzh-bb5c4-pdhfc-master-1 is degraded +8m55s Warning API Availability Cluster Operator machine-config is degraded (RequiredPoolsFailed) + +Run with --details=health for additional description and links to related online documentation