diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b798084cd1..a5deb48953 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -174,7 +174,7 @@ jobs: - name: Scan SBOM id: scan if: ${{ !inputs.dry_run }} - uses: anchore/scan-action@1638637db639e0ade3258b51db49a9a137574c3e # v6.5.1 + uses: anchore/scan-action@f6601287cdb1efc985d6b765bbf99cb4c0ac29d8 # v7.0.0 with: sbom: "sbom-${{ inputs.image }}.json" only-fixed: true diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ab8fbe1e87..604f69682f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -186,7 +186,7 @@ jobs: echo "GOCACHE=${{ github.workspace }}/.gocache" >> $GITHUB_ENV - name: Create/Update Draft - uses: lucacome/draft-release@00f74370c044c322da6cb52acc707d62c7762c71 # v1.2.4 + uses: lucacome/draft-release@fd099feb33710d1fa27b915a08a7acd6a1fb7fd2 # v2.0.0 with: minor-label: "enhancement" major-label: "change" @@ -199,16 +199,16 @@ jobs: - name: Download Syft if: ${{ inputs.is_production_release }} - uses: anchore/sbom-action/download-syft@da167eac915b4e86f08b264dbdbc867b61be6f0c # v0.20.5 + uses: anchore/sbom-action/download-syft@f8bdd1d8ac5e901a77a92f111440fdb1b593736b # v0.20.6 - name: Install Cosign if: ${{ inputs.is_production_release }} - uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # v3.9.2 + uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # v3.10.0 - name: Build binary uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6.4.0 with: - version: v2.12.0 # renovate: datasource=github-tags depName=goreleaser/goreleaser + version: v2.12.1 # renovate: datasource=github-tags depName=goreleaser/goreleaser args: ${{ (inputs.is_production_release && (inputs.dry_run == false || inputs.dry_run == null)) && 'release' || 'build --snapshot' }} --clean env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/conformance.yml b/.github/workflows/conformance.yml index 6286011c1b..0e564f6c05 100644 --- a/.github/workflows/conformance.yml +++ b/.github/workflows/conformance.yml @@ -99,7 +99,7 @@ jobs: - name: Build binary uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6.4.0 with: - version: v2.12.0 # renovate: datasource=github-tags depName=goreleaser/goreleaser + version: v2.12.1 # renovate: datasource=github-tags depName=goreleaser/goreleaser args: build --single-target --snapshot --clean env: TELEMETRY_ENDPOINT: "" # disables sending telemetry diff --git a/.github/workflows/functional.yml b/.github/workflows/functional.yml index b76d83231c..073d6ffc11 100644 --- a/.github/workflows/functional.yml +++ b/.github/workflows/functional.yml @@ -83,7 +83,7 @@ jobs: - name: Build binary uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6.4.0 with: - version: v2.12.0 # renovate: datasource=github-tags depName=goreleaser/goreleaser + version: v2.12.1 # renovate: datasource=github-tags depName=goreleaser/goreleaser args: build --single-target --snapshot --clean env: TELEMETRY_ENDPOINT: otel-collector-opentelemetry-collector.collector.svc.cluster.local:4317 diff --git a/.github/workflows/release-pr.yml b/.github/workflows/release-pr.yml index d96b8b5746..a175447a1c 100644 --- a/.github/workflows/release-pr.yml +++ b/.github/workflows/release-pr.yml @@ -36,7 +36,7 @@ jobs: - name: Get Release Notes id: notes - uses: lucacome/draft-release@00f74370c044c322da6cb52acc707d62c7762c71 # v1.2.4 + uses: lucacome/draft-release@fd099feb33710d1fa27b915a08a7acd6a1fb7fd2 # v2.0.0 with: config-path: .github/release-notes.yml dry-run: true diff --git a/README.md b/README.md index ba14f780f8..03ceb4a09c 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ The following table lists the software versions NGINX Gateway Fabric supports. | NGINX Gateway Fabric | Gateway API | Kubernetes | NGINX OSS | NGINX Plus | NGINX Agent | |----------------------|-------------|------------|-----------|------------|-------------| -| Edge | 1.3.0 | 1.25+ | 1.29.1 | R35 | v3.3.0 | +| Edge | 1.3.0 | 1.25+ | 1.29.1 | R35 | v3.3.1 | | 2.1.1 | 1.3.0 | 1.25+ | 1.29.1 | R35 | v3.2.1 | | 2.1.0 | 1.3.0 | 1.25+ | 1.29.1 | R35 | v3.2.1 | | 2.0.2 | 1.3.0 | 1.25+ | 1.28.0 | R34 | v3.0.1 | diff --git a/build/Dockerfile.nginx b/build/Dockerfile.nginx index 83c7e066ae..378e8c909b 100644 --- a/build/Dockerfile.nginx +++ b/build/Dockerfile.nginx @@ -7,7 +7,7 @@ ADD --link --chown=101:1001 https://cs.nginx.com/static/keys/nginx_signing.rsa.p FROM nginx:1.29.1-alpine-otel # renovate: datasource=github-tags depName=nginx/agent -ARG NGINX_AGENT_VERSION=v3.3.0 +ARG NGINX_AGENT_VERSION=v3.3.1 ARG NJS_DIR ARG NGINX_CONF_DIR ARG BUILD_AGENT diff --git a/build/Dockerfile.nginxplus b/build/Dockerfile.nginxplus index a33adefb0f..b92dc19516 100644 --- a/build/Dockerfile.nginxplus +++ b/build/Dockerfile.nginxplus @@ -8,7 +8,7 @@ FROM alpine:3.22 ARG NGINX_PLUS_VERSION=R35 # renovate: datasource=github-tags depName=nginx/agent -ARG NGINX_AGENT_VERSION=v3.3.0 +ARG NGINX_AGENT_VERSION=v3.3.1 ARG NJS_DIR ARG NGINX_CONF_DIR ARG BUILD_AGENT diff --git a/charts/nginx-gateway-fabric/README.md b/charts/nginx-gateway-fabric/README.md index 4f2541107c..eb7f3ce114 100644 --- a/charts/nginx-gateway-fabric/README.md +++ b/charts/nginx-gateway-fabric/README.md @@ -207,7 +207,7 @@ The following table lists the configurable parameters of the NGINX Gateway Fabri | `certGenerator.ttlSecondsAfterFinished` | How long to wait after the cert generator job has finished before it is removed by the job controller. | int | `30` | | `clusterDomain` | The DNS cluster domain of your Kubernetes cluster. | string | `"cluster.local"` | | `gateways` | A list of Gateway objects. View https://gateway-api.sigs.k8s.io/reference/spec/#gateway for full Gateway reference. | list | `[]` | -| `nginx` | The nginx section contains the configuration for all NGINX data plane deployments installed by the NGINX Gateway Fabric control plane. | object | `{"autoscaling":{"enable":false},"config":{},"container":{"hostPorts":[],"lifecycle":{},"readinessProbe":{},"resources":{},"volumeMounts":[]},"debug":false,"image":{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric/nginx","tag":"edge"},"imagePullSecret":"","imagePullSecrets":[],"kind":"deployment","nginxOneConsole":{"dataplaneKeySecretName":"","endpointHost":"agent.connect.nginx.com","endpointPort":443,"skipVerify":false},"patches":[],"plus":false,"pod":{},"replicas":1,"service":{"externalTrafficPolicy":"Local","loadBalancerClass":"","loadBalancerIP":"","loadBalancerSourceRanges":[],"nodePorts":[],"patches":[],"type":"LoadBalancer"},"usage":{"caSecretName":"","clientSSLSecretName":"","endpoint":"","resolver":"","secretName":"nplus-license","skipVerify":false}}` | +| `nginx` | The nginx section contains the configuration for all NGINX data plane deployments installed by the NGINX Gateway Fabric control plane. | object | `{"autoscaling":{"enable":false},"config":{},"container":{"hostPorts":[],"lifecycle":{},"readinessProbe":{},"resources":{},"volumeMounts":[]},"debug":false,"image":{"pullPolicy":"Always","repository":"ghcr.io/nginx/nginx-gateway-fabric/nginx","tag":"edge"},"imagePullSecret":"","imagePullSecrets":[],"kind":"deployment","nginxOneConsole":{"dataplaneKeySecretName":"","endpointHost":"agent.connect.nginx.com","endpointPort":443,"skipVerify":false},"patches":[],"plus":false,"pod":{},"replicas":1,"service":{"externalTrafficPolicy":"Local","loadBalancerClass":"","loadBalancerIP":"","loadBalancerSourceRanges":[],"nodePorts":[],"patches":[],"type":"LoadBalancer"},"usage":{"caSecretName":"","clientSSLSecretName":"","endpoint":"","enforceInitialReport":true,"resolver":"","secretName":"nplus-license","skipVerify":false}}` | | `nginx.autoscaling` | Autoscaling configuration for the NGINX data plane. | object | `{"enable":false}` | | `nginx.autoscaling.enable` | Enable or disable Horizontal Pod Autoscaler for the NGINX data plane. | bool | `false` | | `nginx.config` | The configuration for the data plane that is contained in the NginxProxy resource. This is applied globally to all Gateways managed by this instance of NGINX Gateway Fabric. | object | `{}` | @@ -241,6 +241,7 @@ The following table lists the configurable parameters of the NGINX Gateway Fabri | `nginx.usage.caSecretName` | The name of the Secret containing the NGINX Instance Manager CA certificate. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `""` | | `nginx.usage.clientSSLSecretName` | The name of the Secret containing the client certificate and key for authenticating with NGINX Instance Manager. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `""` | | `nginx.usage.endpoint` | The endpoint of the NGINX Plus usage reporting server. Default: product.connect.nginx.com | string | `""` | +| `nginx.usage.enforceInitialReport` | Enable enforcement of the initial NGINX Plus licensing report. If set to false, the initial report is not enforced. | bool | `true` | | `nginx.usage.resolver` | The nameserver used to resolve the NGINX Plus usage reporting endpoint. Used with NGINX Instance Manager. | string | `""` | | `nginx.usage.secretName` | The name of the Secret containing the JWT for NGINX Plus usage reporting. Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). | string | `"nplus-license"` | | `nginx.usage.skipVerify` | Disable client verification of the NGINX Plus usage reporting server certificate. | bool | `false` | diff --git a/charts/nginx-gateway-fabric/templates/deployment.yaml b/charts/nginx-gateway-fabric/templates/deployment.yaml index 9be1b13f16..5bc292bdb4 100644 --- a/charts/nginx-gateway-fabric/templates/deployment.yaml +++ b/charts/nginx-gateway-fabric/templates/deployment.yaml @@ -72,6 +72,9 @@ spec: {{- if .Values.nginx.usage.clientSSLSecretName }} - --usage-report-client-ssl-secret={{ .Values.nginx.usage.clientSSLSecretName }} {{- end }} + {{- if hasKey .Values.nginx.usage "enforceInitialReport" }} + - --usage-report-enforce-initial-report={{ .Values.nginx.usage.enforceInitialReport }} + {{- end }} {{- end }} {{- if .Values.nginxGateway.metrics.enable }} - --metrics-port={{ .Values.nginxGateway.metrics.port }} diff --git a/charts/nginx-gateway-fabric/values.schema.json b/charts/nginx-gateway-fabric/values.schema.json index ca5d339d44..9f44991db3 100644 --- a/charts/nginx-gateway-fabric/values.schema.json +++ b/charts/nginx-gateway-fabric/values.schema.json @@ -692,6 +692,13 @@ "title": "endpoint", "type": "string" }, + "enforceInitialReport": { + "default": true, + "description": "Enable enforcement of the initial NGINX Plus licensing report. If set to false, the initial report is not enforced.", + "required": [], + "title": "enforceInitialReport", + "type": "boolean" + }, "resolver": { "default": "", "description": "The nameserver used to resolve the NGINX Plus usage reporting endpoint. Used with NGINX Instance Manager.", diff --git a/charts/nginx-gateway-fabric/values.yaml b/charts/nginx-gateway-fabric/values.yaml index 044e0f2d37..52f1e03e55 100644 --- a/charts/nginx-gateway-fabric/values.yaml +++ b/charts/nginx-gateway-fabric/values.yaml @@ -337,6 +337,9 @@ nginx: # Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway). clientSSLSecretName: "" + # -- Enable enforcement of the initial NGINX Plus licensing report. If set to false, the initial report is not enforced. + enforceInitialReport: true + # @schema # type: object # properties: diff --git a/cmd/gateway/commands.go b/cmd/gateway/commands.go index 9f83cbcfb3..f334d499a7 100644 --- a/cmd/gateway/commands.go +++ b/cmd/gateway/commands.go @@ -42,6 +42,17 @@ const ( nginxOneTelemetryEndpointHost = "agent.connect.nginx.com" ) +// usageReportParams holds the parameters for building the usage report configuration for PLUS. +type usageReportParams struct { + SecretName stringValidatingValue + ClientSSLSecretName stringValidatingValue + CASecretName stringValidatingValue + Endpoint stringValidatingValue + Resolver stringValidatingValue + SkipVerify bool + EnforceInitialReport bool +} + func createRootCommand() *cobra.Command { rootCmd := &cobra.Command{ Use: "gateway", @@ -58,31 +69,32 @@ func createRootCommand() *cobra.Command { func createControllerCommand() *cobra.Command { // flag names const ( - configFlag = "config" - serviceFlag = "service" - agentTLSSecretFlag = "agent-tls-secret" - nginxOneDataplaneKeySecretFlag = "nginx-one-dataplane-key-secret" //nolint:gosec // not credentials - nginxOneTelemetryEndpointHostFlag = "nginx-one-telemetry-endpoint-host" - nginxOneTelemetryEndpointPortFlag = "nginx-one-telemetry-endpoint-port" - nginxOneTLSSkipVerifyFlag = "nginx-one-tls-skip-verify" - metricsDisableFlag = "metrics-disable" - metricsSecureFlag = "metrics-secure-serving" - metricsPortFlag = "metrics-port" - healthDisableFlag = "health-disable" - healthPortFlag = "health-port" - leaderElectionDisableFlag = "leader-election-disable" - leaderElectionLockNameFlag = "leader-election-lock-name" - productTelemetryDisableFlag = "product-telemetry-disable" - gwAPIExperimentalFlag = "gateway-api-experimental-features" - nginxDockerSecretFlag = "nginx-docker-secret" //nolint:gosec // not credentials - usageReportSecretFlag = "usage-report-secret" - usageReportEndpointFlag = "usage-report-endpoint" - usageReportResolverFlag = "usage-report-resolver" - usageReportSkipVerifyFlag = "usage-report-skip-verify" - usageReportClientSSLSecretFlag = "usage-report-client-ssl-secret" //nolint:gosec // not credentials - usageReportCASecretFlag = "usage-report-ca-secret" //nolint:gosec // not credentials - snippetsFiltersFlag = "snippets-filters" - nginxSCCFlag = "nginx-scc" + configFlag = "config" + serviceFlag = "service" + agentTLSSecretFlag = "agent-tls-secret" + nginxOneDataplaneKeySecretFlag = "nginx-one-dataplane-key-secret" //nolint:gosec // not credentials + nginxOneTelemetryEndpointHostFlag = "nginx-one-telemetry-endpoint-host" + nginxOneTelemetryEndpointPortFlag = "nginx-one-telemetry-endpoint-port" + nginxOneTLSSkipVerifyFlag = "nginx-one-tls-skip-verify" + metricsDisableFlag = "metrics-disable" + metricsSecureFlag = "metrics-secure-serving" + metricsPortFlag = "metrics-port" + healthDisableFlag = "health-disable" + healthPortFlag = "health-port" + leaderElectionDisableFlag = "leader-election-disable" + leaderElectionLockNameFlag = "leader-election-lock-name" + productTelemetryDisableFlag = "product-telemetry-disable" + gwAPIExperimentalFlag = "gateway-api-experimental-features" + nginxDockerSecretFlag = "nginx-docker-secret" //nolint:gosec // not credentials + usageReportSecretFlag = "usage-report-secret" + usageReportEndpointFlag = "usage-report-endpoint" + usageReportResolverFlag = "usage-report-resolver" + usageReportSkipVerifyFlag = "usage-report-skip-verify" + usageReportClientSSLSecretFlag = "usage-report-client-ssl-secret" //nolint:gosec // not credentials + usageReportCASecretFlag = "usage-report-ca-secret" //nolint:gosec // not credentials + usageReportEnforceInitialReportFlag = "usage-report-enforce-initial-report" + snippetsFiltersFlag = "snippets-filters" + nginxSCCFlag = "nginx-scc" ) // flag values @@ -148,24 +160,26 @@ func createControllerCommand() *cobra.Command { nginxDockerSecrets = stringSliceValidatingValue{ validator: validateResourceName, } - usageReportSkipVerify bool - usageReportSecretName = stringValidatingValue{ + ) + + usageReportParams := usageReportParams{ + SecretName: stringValidatingValue{ validator: validateResourceName, value: "nplus-license", - } - usageReportEndpoint = stringValidatingValue{ + }, + Endpoint: stringValidatingValue{ validator: validateEndpointOptionalPort, - } - usageReportResolver = stringValidatingValue{ + }, + Resolver: stringValidatingValue{ validator: validateEndpointOptionalPort, - } - usageReportClientSSLSecretName = stringValidatingValue{ + }, + ClientSSLSecretName: stringValidatingValue{ validator: validateResourceName, - } - usageReportCASecretName = stringValidatingValue{ + }, + CASecretName: stringValidatingValue{ validator: validateResourceName, - } - ) + }, + } cmd := &cobra.Command{ Use: "controller", @@ -212,18 +226,10 @@ func createControllerCommand() *cobra.Command { } var usageReportConfig config.UsageReportConfig - if plus && usageReportSecretName.value == "" { - return errors.New("usage-report-secret is required when using NGINX Plus") - } - if plus { - usageReportConfig = config.UsageReportConfig{ - SecretName: usageReportSecretName.value, - ClientSSLSecretName: usageReportClientSSLSecretName.value, - CASecretName: usageReportCASecretName.value, - Endpoint: usageReportEndpoint.value, - Resolver: usageReportResolver.value, - SkipVerify: usageReportSkipVerify, + usageReportConfig, err = buildUsageReportConfig(usageReportParams) + if err != nil { + return err } } @@ -432,33 +438,33 @@ func createControllerCommand() *cobra.Command { ) cmd.Flags().Var( - &usageReportSecretName, + &usageReportParams.SecretName, usageReportSecretFlag, "The name of the Secret containing the JWT for NGINX Plus usage reporting. Must exist in the same namespace "+ "that the NGINX Gateway Fabric control plane is running in (default namespace: nginx-gateway).", ) cmd.Flags().Var( - &usageReportEndpoint, + &usageReportParams.Endpoint, usageReportEndpointFlag, "The endpoint of the NGINX Plus usage reporting server.", ) cmd.Flags().Var( - &usageReportResolver, + &usageReportParams.Resolver, usageReportResolverFlag, "The nameserver used to resolve the NGINX Plus usage reporting endpoint. Used with NGINX Instance Manager.", ) cmd.Flags().BoolVar( - &usageReportSkipVerify, + &usageReportParams.SkipVerify, usageReportSkipVerifyFlag, false, "Disable client verification of the NGINX Plus usage reporting server certificate.", ) cmd.Flags().Var( - &usageReportClientSSLSecretName, + &usageReportParams.ClientSSLSecretName, usageReportClientSSLSecretFlag, "The name of the Secret containing the client certificate and key for authenticating with NGINX Instance Manager. "+ "Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in "+ @@ -466,13 +472,20 @@ func createControllerCommand() *cobra.Command { ) cmd.Flags().Var( - &usageReportCASecretName, + &usageReportParams.CASecretName, usageReportCASecretFlag, "The name of the Secret containing the NGINX Instance Manager CA certificate. "+ "Must exist in the same namespace that the NGINX Gateway Fabric control plane is running in "+ "(default namespace: nginx-gateway).", ) + cmd.Flags().BoolVar( + &usageReportParams.EnforceInitialReport, + usageReportEnforceInitialReportFlag, + true, + "Enable enforcement of the initial NGINX Plus licensing report. If set to false, the initial report is not enforced.", + ) + cmd.Flags().BoolVar( &snippetsFilters, snippetsFiltersFlag, @@ -491,6 +504,22 @@ func createControllerCommand() *cobra.Command { return cmd } +func buildUsageReportConfig(params usageReportParams) (config.UsageReportConfig, error) { + if params.SecretName.value == "" { + return config.UsageReportConfig{}, errors.New("usage-report-secret is required when using NGINX Plus") + } + + return config.UsageReportConfig{ + SecretName: params.SecretName.value, + ClientSSLSecretName: params.ClientSSLSecretName.value, + CASecretName: params.CASecretName.value, + Endpoint: params.Endpoint.value, + Resolver: params.Resolver.value, + SkipVerify: params.SkipVerify, + EnforceInitialReport: params.EnforceInitialReport, + }, nil +} + func createGenerateCertsCommand() *cobra.Command { // flag names const ( diff --git a/cmd/gateway/commands_test.go b/cmd/gateway/commands_test.go index 0ecd4111d9..4cb8f0d532 100644 --- a/cmd/gateway/commands_test.go +++ b/cmd/gateway/commands_test.go @@ -154,6 +154,7 @@ func TestControllerCmdFlagValidation(t *testing.T) { "--usage-report-resolver=resolver.com", "--usage-report-ca-secret=ca-secret", "--usage-report-client-ssl-secret=client-secret", + "--usage-report-enforce-initial-report", "--snippets-filters", "--nginx-scc=nginx-sscc-name", "--nginx-one-dataplane-key-secret=dataplane-key-secret", @@ -854,3 +855,72 @@ func TestCreateGatewayPodConfig(t *testing.T) { g.Expect(err).To(MatchError(errors.New("environment variable POD_UID not set"))) g.Expect(cfg).To(Equal(config.GatewayPodConfig{})) } + +func TestUsageReportConfig(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + params usageReportParams + expected config.UsageReportConfig + expectError bool + }{ + { + name: "NGINX Plus enabled with all valid parameters", + params: usageReportParams{ + SecretName: stringValidatingValue{value: "test-secret"}, + ClientSSLSecretName: stringValidatingValue{value: "client-ssl-secret"}, + CASecretName: stringValidatingValue{value: "ca-secret"}, + Endpoint: stringValidatingValue{value: "example.com"}, + Resolver: stringValidatingValue{value: "resolver.com"}, + SkipVerify: true, + EnforceInitialReport: false, + }, + expectError: false, + expected: config.UsageReportConfig{ + SecretName: "test-secret", + ClientSSLSecretName: "client-ssl-secret", + CASecretName: "ca-secret", + Endpoint: "example.com", + Resolver: "resolver.com", + SkipVerify: true, + EnforceInitialReport: false, + }, + }, + { + name: "NGINX Plus enabled with missing secret", + params: usageReportParams{ + SecretName: stringValidatingValue{value: ""}, + ClientSSLSecretName: stringValidatingValue{value: "client-ssl-secret"}, + CASecretName: stringValidatingValue{value: "ca-secret"}, + Endpoint: stringValidatingValue{value: "example.com"}, + Resolver: stringValidatingValue{value: "resolver.com"}, + SkipVerify: true, + EnforceInitialReport: false, + }, + expectError: true, + expected: config.UsageReportConfig{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + result, err := buildUsageReportConfig(tc.params) + + if tc.expectError { + if err == nil { + t.Errorf("expected an error but got none") + } + } else { + if err != nil { + t.Errorf("did not expect an error but got: %v", err) + } + + if result != tc.expected { + t.Errorf("expected result %+v, but got %+v", tc.expected, result) + } + } + }) + } +} diff --git a/deploy/experimental-nginx-plus/deploy.yaml b/deploy/experimental-nginx-plus/deploy.yaml index 5b478afd22..c11cbc662d 100644 --- a/deploy/experimental-nginx-plus/deploy.yaml +++ b/deploy/experimental-nginx-plus/deploy.yaml @@ -271,6 +271,7 @@ spec: - --nginx-docker-secret=nginx-plus-registry-secret - --nginx-plus - --usage-report-secret=nplus-license + - --usage-report-enforce-initial-report=true - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election diff --git a/deploy/nginx-plus/deploy.yaml b/deploy/nginx-plus/deploy.yaml index c82a24e0e9..70f8f34c49 100644 --- a/deploy/nginx-plus/deploy.yaml +++ b/deploy/nginx-plus/deploy.yaml @@ -267,6 +267,7 @@ spec: - --nginx-docker-secret=nginx-plus-registry-secret - --nginx-plus - --usage-report-secret=nplus-license + - --usage-report-enforce-initial-report=true - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election diff --git a/deploy/snippets-filters-nginx-plus/deploy.yaml b/deploy/snippets-filters-nginx-plus/deploy.yaml index 79b8a2bf0f..ed0dccb81f 100644 --- a/deploy/snippets-filters-nginx-plus/deploy.yaml +++ b/deploy/snippets-filters-nginx-plus/deploy.yaml @@ -269,6 +269,7 @@ spec: - --nginx-docker-secret=nginx-plus-registry-secret - --nginx-plus - --usage-report-secret=nplus-license + - --usage-report-enforce-initial-report=true - --metrics-port=9113 - --health-port=8081 - --leader-election-lock-name=nginx-gateway-leader-election diff --git a/docs/proposals/gateway-inference-extension.md b/docs/proposals/gateway-inference-extension.md index 8107dd0465..fb6abe3ae2 100644 --- a/docs/proposals/gateway-inference-extension.md +++ b/docs/proposals/gateway-inference-extension.md @@ -57,81 +57,6 @@ The Go application could be built into the existing `nginx-gateway` binary, and See the [Alternatives section](#alternatives) for a future improvement to this workflow. -### Model Name extraction - -When a client sends a request to an AI workload, the desired model name (e.g. gpt-4o, llama, etc.) is included in the request body. - -By default, the EPP gets the model name from the request body, and then picks the proper endpoint for that model name. However, the model name could also be provided via header (`X-Gateway-Model-Name`). For example, a user could specify a desire for a traffic split or model name redirect, and therefore NGINX would need to change the model name by setting the header. - -Example that redirects requests to model name `food-review` to `food-review-v1`: - -```yaml -kind: HTTPRoute -apiVersion: gateway.networking.k8s.io/v1 -metadata: - name: my-route -spec: - parentRefs: - - name: my-inference-gateway - rules: - - matches: - - headers: - - type: Exact - name: X-Gateway-Model-Name - value: food-review - backendRefs: - - name: vllm-llama3-8b-instruct - kind: InferencePool - group: inference.networking.x-k8s.io - - filters: - - type: RequestHeaderModifier - requestHeaderModifier: - set: - - name: X-Gateway-Model-Name - value: food-review-v1 -``` - -Example with traffic splitting: - -```yaml -kind: HTTPRoute -apiVersion: gateway.networking.k8s.io/v1 -metadata: - name: my-route -spec: - parentRefs: - - name: my-inference-gateway - rules: - - matches: - - headers: - - type: Exact - name: X-Gateway-Model-Name - value: food-review - backendRefs: - - name: vllm-llama3-8b-instruct - kind: InferencePool - group: inference.networking.x-k8s.io - weight: 90 - - filters: - - type: RequestHeaderModifier - requestHeaderModifier: - set: - - name: X-Gateway-Model-Name - value: food-review-v1 - - name: vllm-llama3-8b-instruct - kind: InferencePool - group: inference.networking.x-k8s.io - weight: 10 - - filters: - - type: RequestHeaderModifier - requestHeaderModifier: - set: - - name: X-Gateway-Model-Name - value: food-review-v2 -``` - -In both cases, NGINX would need to extract the model name from the request body. This will probably require an NJS module. If that model name matches the condition set in the Route, then NGINX sets the header appropriately when sending the request to the EPP. For the redirect example, NGINX would set the header to `food-review-v1`. For the traffic splitting example, NGINX would set the header to either `food-review-v1` or `food-review-v2` depending on the weighted traffic decision. - ### Managing InferencePools By default, the EPP should know which endpoints are a part of an InferencePool, and then pick the correct endpoint to send to. This means that NGINX does not need to have an upstream for the AI workload servers, since it just gets the endpoint it needs to send to from the EPP. @@ -140,7 +65,9 @@ However, there could still be a valid use case for NGF to track and configure NG Because of this, NGF should watch the endpoints associated with an InferencePool, and create an upstream. One way to accomplish this is for NGF to create a Headless "shadow" Service that encompasses those endpoints. By defining this Service, NGF can use all of its existing Service/EndpointSlice logic to build the upstreams as if it was a normal Service. -**The main point of concern with this is how can we fallback to use the upstream servers if the EPP is unavailable to give us an endpoint?** This may have to be discovered during implementation. +#### Status + +Status conditions also need to be set on the InferencePool resources, per the API spec requirements and recommendations. ### Flow Diagram @@ -233,4 +160,3 @@ If this Inference Extension feature gains traction and usage, it could be worth - [API specification](https://gateway-api-inference-extension.sigs.k8s.io/reference/spec/) - [Endpoint Picker](https://github.com/kubernetes-sigs/gateway-api-inference-extension/tree/main/pkg/epp) - [Endpoint Picker and InferencePool Helm Chart](https://github.com/kubernetes-sigs/gateway-api-inference-extension/tree/main/config/charts/inferencepool) -- [Traffic splitting/redirect doc](https://docs.google.com/document/d/1s4U4T_cjQkk4UeIDyAJl2Ox6FZoBigXBXn9Ai0qV7As/edit?tab=t.0#heading=h.9re863ochpnv) diff --git a/go.mod b/go.mod index a36ac987dd..0b35914cbb 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/go-logr/logr v1.4.3 github.com/google/go-cmp v0.7.0 github.com/google/uuid v1.6.0 - github.com/nginx/agent/v3 v3.3.0 + github.com/nginx/agent/v3 v3.3.1 github.com/nginx/telemetry-exporter v0.1.4 github.com/onsi/ginkgo/v2 v2.25.3 github.com/onsi/gomega v1.38.2 diff --git a/go.sum b/go.sum index 112a8f7a20..b8716c1de8 100644 --- a/go.sum +++ b/go.sum @@ -35,8 +35,8 @@ github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I= -github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw= +github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= @@ -137,8 +137,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/nginx/agent/v3 v3.3.0 h1:dybKbWK9NhaPi0Au+A8TuashL8RbapjYX2NcBKPWsDo= -github.com/nginx/agent/v3 v3.3.0/go.mod h1:xPZ9XbZD4MLc/e5rbULdfH0kPyZp8D0GK5fJhMb16Eo= +github.com/nginx/agent/v3 v3.3.1 h1:ji5c0bzrP35+qSb5UbkoMwkYTQ6IL3jJsXED6mMczOQ= +github.com/nginx/agent/v3 v3.3.1/go.mod h1:wTMWYVUnc+YdXSUCwR2uzg5iWOsKYC9ZiPhqgj61IcU= github.com/nginx/telemetry-exporter v0.1.4 h1:3ikgKlyz/O57oaBLkxCInMjr74AhGTKr9rHdRAkkl/w= github.com/nginx/telemetry-exporter v0.1.4/go.mod h1:bl6qmsxgk4a9D0X8R5E3sUNXN2iECPEK1JNbRLhN5C4= github.com/nginxinc/nginx-plus-go-client/v2 v2.0.1 h1:5VVK38bnELMDWnwfF6dSv57ResXh9AUzeDa72ENj94o= @@ -179,8 +179,8 @@ github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6g github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= -github.com/shirou/gopsutil/v4 v4.25.3 h1:SeA68lsu8gLggyMbmCn8cmp97V1TI9ld9sVzAUcKcKE= -github.com/shirou/gopsutil/v4 v4.25.3/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA= +github.com/shirou/gopsutil/v4 v4.25.7 h1:bNb2JuqKuAu3tRlPv5piSmBZyMfecwQ+t/ILq+1JqVM= +github.com/shirou/gopsutil/v4 v4.25.7/go.mod h1:XV/egmwJtd3ZQjBpJVY5kndsiOO4IRqy9TQnmm6VP7U= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= @@ -206,10 +206,10 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8 github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/testcontainers/testcontainers-go v0.36.0 h1:YpffyLuHtdp5EUsI5mT4sRw8GZhO/5ozyDT1xWGXt00= github.com/testcontainers/testcontainers-go v0.36.0/go.mod h1:yk73GVJ0KUZIHUtFna6MO7QS144qYpoY8lEEtU9Hed0= -github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= -github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= -github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= -github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= +github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4= +github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4= +github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso= +github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/internal/controller/config/config.go b/internal/controller/config/config.go index ff6744b101..e23f73ca59 100644 --- a/internal/controller/config/config.go +++ b/internal/controller/config/config.go @@ -125,6 +125,8 @@ type UsageReportConfig struct { Resolver string // SkipVerify controls whether the nginx verifies the server certificate. SkipVerify bool + // EnforceInitialReport controls whether the initial NGINX Plus licensing report is enforced. + EnforceInitialReport bool } // Flags contains the NGF command-line flag names and values. diff --git a/internal/controller/handler.go b/internal/controller/handler.go index 8613de046b..903ffd2112 100644 --- a/internal/controller/handler.go +++ b/internal/controller/handler.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "net" "strings" "sync" "time" @@ -508,6 +509,14 @@ func getGatewayAddresses( addresses = append(addresses, gwSvc.Spec.ClusterIP) } + for _, address := range gateway.Source.Spec.Addresses { + if address.Type != nil && + *address.Type == gatewayv1.IPAddressType && + net.ParseIP(address.Value) != nil { + addresses = append(addresses, address.Value) + } + } + gwAddresses := make([]gatewayv1.GatewayStatusAddress, 0, len(addresses)+len(hostnames)) for _, addr := range addresses { statusAddr := gatewayv1.GatewayStatusAddress{ diff --git a/internal/controller/handler_test.go b/internal/controller/handler_test.go index a9fa942c27..ec9fe05848 100644 --- a/internal/controller/handler_test.go +++ b/internal/controller/handler_test.go @@ -543,6 +543,18 @@ var _ = Describe("getGatewayAddresses", func() { Name: "gateway", Namespace: "test", }, + Spec: gatewayv1.GatewaySpec{ + Addresses: []gatewayv1.GatewaySpecAddress{ + { + Type: helpers.GetPointer(gatewayv1.IPAddressType), + Value: "192.0.2.1", + }, + { + Type: helpers.GetPointer(gatewayv1.IPAddressType), + Value: "192.0.2.3", + }, + }, + }, }, } @@ -580,9 +592,11 @@ var _ = Describe("getGatewayAddresses", func() { addrs, err = getGatewayAddresses(context.Background(), fakeClient, &svc, gateway, "nginx") Expect(err).ToNot(HaveOccurred()) - Expect(addrs).To(HaveLen(2)) + Expect(addrs).To(HaveLen(4)) Expect(addrs[0].Value).To(Equal("34.35.36.37")) - Expect(addrs[1].Value).To(Equal("myhost")) + Expect(addrs[1].Value).To(Equal("192.0.2.1")) + Expect(addrs[2].Value).To(Equal("192.0.2.3")) + Expect(addrs[3].Value).To(Equal("myhost")) Expect(fakeClient.Delete(context.Background(), &svc)).To(Succeed()) // Create ClusterIP Service @@ -601,8 +615,10 @@ var _ = Describe("getGatewayAddresses", func() { addrs, err = getGatewayAddresses(context.Background(), fakeClient, &svc, gateway, "nginx") Expect(err).ToNot(HaveOccurred()) - Expect(addrs).To(HaveLen(1)) + Expect(addrs).To(HaveLen(3)) Expect(addrs[0].Value).To(Equal("12.13.14.15")) + Expect(addrs[1].Value).To(Equal("192.0.2.1")) + Expect(addrs[2].Value).To(Equal("192.0.2.3")) }) }) diff --git a/internal/controller/nginx/agent/agent_test.go b/internal/controller/nginx/agent/agent_test.go index 5351fbb944..b0646ee939 100644 --- a/internal/controller/nginx/agent/agent_test.go +++ b/internal/controller/nginx/agent/agent_test.go @@ -66,7 +66,8 @@ func TestUpdateConfig(t *testing.T) { updater.UpdateConfig(deployment, []File{file}) g.Expect(fakeBroadcaster.SendCallCount()).To(Equal(1)) - g.Expect(deployment.GetFile(file.Meta.Name, file.Meta.Hash)).To(Equal(file.Contents)) + fileContents, _ := deployment.GetFile(file.Meta.Name, file.Meta.Hash) + g.Expect(fileContents).To(Equal(file.Contents)) if test.expErr { g.Expect(deployment.GetLatestConfigError()).To(Equal(testErr)) diff --git a/internal/controller/nginx/agent/command.go b/internal/controller/nginx/agent/command.go index 046fb7f313..cc89a47fd5 100644 --- a/internal/controller/nginx/agent/command.go +++ b/internal/controller/nginx/agent/command.go @@ -155,6 +155,8 @@ func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error channels := broadcaster.Subscribe() defer broadcaster.CancelSubscription(channels.ID) + var pendingBroadcastRequest *broadcast.NginxAgentMessage + for { // When a message is received over the ListenCh, it is assumed and required that the // deployment object is already LOCKED. This lock is acquired by the event handler before calling @@ -191,6 +193,10 @@ func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error return grpcStatus.Error(codes.Internal, err.Error()) } + + // Track this broadcast request to distinguish it from initial config operations. + // Only broadcast operations should signal ResponseCh for coordination. + pendingBroadcastRequest = &msg case err = <-msgr.Errors(): cs.logger.Error(err, "connection error", "pod", conn.PodName) deployment.SetPodErrorStatus(conn.PodName, err) @@ -198,6 +204,9 @@ func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error case channels.ResponseCh <- struct{}{}: default: } + if pendingBroadcastRequest != nil { + cs.logger.V(1).Info("Connection error during pending request, operation failed") + } if errors.Is(err, io.EOF) { return grpcStatus.Error(codes.Aborted, err.Error()) @@ -215,7 +224,15 @@ func (cs *commandService) Subscribe(in pb.CommandService_SubscribeServer) error } else { deployment.SetPodErrorStatus(conn.PodName, nil) } - channels.ResponseCh <- struct{}{} + + // Signal broadcast completion only for tracked broadcast operations. + // Initial config responses are ignored to prevent spurious success messages. + if pendingBroadcastRequest != nil { + pendingBroadcastRequest = nil + channels.ResponseCh <- struct{}{} + } else { + cs.logger.V(1).Info("Received response for non-broadcast request (likely initial config)", "pod", conn.PodName) + } } } } @@ -265,6 +282,9 @@ func (cs *commandService) setInitialConfig( defer deployment.FileLock.Unlock() fileOverviews, configVersion := deployment.GetFileOverviews() + + cs.logger.Info("Sending initial configuration to agent", "pod", conn.PodName, "configVersion", configVersion) + if err := msgr.Send(ctx, buildRequest(fileOverviews, conn.InstanceID, configVersion)); err != nil { cs.logAndSendErrorStatus(deployment, conn, err) @@ -348,9 +368,11 @@ func (cs *commandService) waitForInitialConfigApply( res := msg.GetCommandResponse() if res.GetStatus() != pb.CommandResponse_COMMAND_STATUS_OK { applyErr := fmt.Errorf("msg: %s; error: %s", res.GetMessage(), res.GetError()) + cs.logger.V(1).Info("Received initial config response with error", "error", applyErr) return applyErr, nil } + cs.logger.V(1).Info("Received successful initial config response") return applyErr, connectionErr } } diff --git a/internal/controller/nginx/agent/command_test.go b/internal/controller/nginx/agent/command_test.go index 7b8b763011..634cc0eddb 100644 --- a/internal/controller/nginx/agent/command_test.go +++ b/internal/controller/nginx/agent/command_test.go @@ -340,25 +340,16 @@ func TestSubscribe(t *testing.T) { mockServer := newMockSubscribeServer(ctx) - // put the requests on the listenCh for the Subscription loop to pick up + // Define the broadcast messages to be sent later loopFile := &pb.File{ FileMeta: &pb.FileMeta{ Name: "some-other.conf", Hash: "56789", }, } - listenCh <- broadcast.NginxAgentMessage{ - Type: broadcast.ConfigApplyRequest, - FileOverviews: []*pb.File{loopFile}, - } - loopAction := &pb.NGINXPlusAction{ Action: &pb.NGINXPlusAction_UpdateStreamServers{}, } - listenCh <- broadcast.NginxAgentMessage{ - Type: broadcast.APIRequest, - NGINXPlusAction: loopAction, - } // start the Subscriber errCh := make(chan error) @@ -366,6 +357,9 @@ func TestSubscribe(t *testing.T) { errCh <- cs.Subscribe(mockServer) }() + // PHASE 1: Initial config is sent by setInitialConfig() BEFORE the event loop starts + // These should NOT signal ResponseCh as they're not broadcast operations + // ensure that the initial config file was sent when the Subscription connected expFile := &pb.File{ FileMeta: &pb.FileMeta{ @@ -374,6 +368,7 @@ func TestSubscribe(t *testing.T) { }, } ensureFileWasSent(g, mockServer, expFile) + // Respond to initial config - this should NOT signal ResponseCh mockServer.recvChan <- &pb.DataPlaneResponse{ CommandResponse: &pb.CommandResponse{ Status: pb.CommandResponse_COMMAND_STATUS_OK, @@ -382,22 +377,40 @@ func TestSubscribe(t *testing.T) { // ensure that the initial API request was sent when the Subscription connected ensureAPIRequestWasSent(g, mockServer, initialAction) + // Respond to initial API request - this should NOT signal ResponseCh mockServer.recvChan <- &pb.DataPlaneResponse{ CommandResponse: &pb.CommandResponse{ Status: pb.CommandResponse_COMMAND_STATUS_OK, }, } + // Wait for status queue to be updated after initial config completes g.Eventually(func() string { obj := cs.statusQueue.Dequeue(ctx) return obj.Deployment.Name }).Should(Equal("nginx-deployment")) - // ensure the second file was sent in the loop + // PHASE 2: Now send broadcast operations to the event loop + // Put the broadcast requests on the listenCh for the Subscription loop to pick up + listenCh <- broadcast.NginxAgentMessage{ + Type: broadcast.ConfigApplyRequest, + FileOverviews: []*pb.File{loopFile}, + } + + // PHASE 2: Broadcast operations from the event loop + // These SHOULD signal ResponseCh as they are broadcast operations + + // ensure the broadcast file was sent in the loop ensureFileWasSent(g, mockServer, loopFile) verifyResponse(g, mockServer, responseCh) - // ensure the second action was sent in the loop + // Send second broadcast operation + listenCh <- broadcast.NginxAgentMessage{ + Type: broadcast.APIRequest, + NGINXPlusAction: loopAction, + } + + // ensure the broadcast action was sent in the loop ensureAPIRequestWasSent(g, mockServer, loopAction) verifyResponse(g, mockServer, responseCh) diff --git a/internal/controller/nginx/agent/deployment.go b/internal/controller/nginx/agent/deployment.go index 48cb69d5af..7e9aa3aba5 100644 --- a/internal/controller/nginx/agent/deployment.go +++ b/internal/controller/nginx/agent/deployment.go @@ -161,14 +161,18 @@ func (d *Deployment) GetNGINXPlusActions() []*pb.NGINXPlusAction { // GetFile gets the requested file for the deployment and returns its contents. // The deployment FileLock MUST already be locked before calling this function. -func (d *Deployment) GetFile(name, hash string) []byte { +func (d *Deployment) GetFile(name, hash string) ([]byte, string) { + var fileFoundHash string for _, file := range d.files { - if name == file.Meta.GetName() && hash == file.Meta.GetHash() { - return file.Contents + if name == file.Meta.GetName() { + fileFoundHash = file.Meta.GetHash() + if hash == file.Meta.GetHash() { + return file.Contents, file.Meta.GetHash() + } } } - return nil + return nil, fileFoundHash } // SetFiles updates the nginx files and fileOverviews for the deployment and returns the message to send. diff --git a/internal/controller/nginx/agent/deployment_test.go b/internal/controller/nginx/agent/deployment_test.go index b3f2744ee6..0560c31600 100644 --- a/internal/controller/nginx/agent/deployment_test.go +++ b/internal/controller/nginx/agent/deployment_test.go @@ -52,11 +52,13 @@ func TestSetAndGetFiles(t *testing.T) { g.Expect(msg.FileOverviews).To(HaveLen(9)) // 1 file + 8 ignored files g.Expect(fileOverviews).To(Equal(msg.FileOverviews)) - file := deployment.GetFile("test.conf", "12345") + file, _ := deployment.GetFile("test.conf", "12345") g.Expect(file).To(Equal([]byte("test content"))) - g.Expect(deployment.GetFile("invalid", "12345")).To(BeNil()) - g.Expect(deployment.GetFile("test.conf", "invalid")).To(BeNil()) + invalidFile, _ := deployment.GetFile("invalid", "12345") + g.Expect(invalidFile).To(BeNil()) + wrongHashFile, _ := deployment.GetFile("test.conf", "invalid") + g.Expect(wrongHashFile).To(BeNil()) // Set the same files again msg = deployment.SetFiles(files) diff --git a/internal/controller/nginx/agent/file.go b/internal/controller/nginx/agent/file.go index ec2a5b1013..fdcdc2ae8f 100644 --- a/internal/controller/nginx/agent/file.go +++ b/internal/controller/nginx/agent/file.go @@ -143,12 +143,22 @@ func (fs *fileService) getFileContents(req *pb.GetFileRequest, connKey string) ( } filename := req.GetFileMeta().GetName() - contents := deployment.GetFile(filename, req.GetFileMeta().GetHash()) + contents, fileFoundHash := deployment.GetFile(filename, req.GetFileMeta().GetHash()) if len(contents) == 0 { + fs.logger.V(1).Info("Error getting file for agent", "file", filename) + if fileFoundHash != "" { + fs.logger.V(1).Info( + "File found had wrong hash", + "hashWanted", + req.GetFileMeta().GetHash(), + "hashFound", + fileFoundHash, + ) + } return nil, status.Errorf(codes.NotFound, "file not found") } - fs.logger.V(1).Info("Getting file for agent", "file", filename) + fs.logger.V(1).Info("Getting file for agent", "file", filename, "fileHash", fileFoundHash) return contents, nil } diff --git a/internal/controller/nginx/config/main_config.go b/internal/controller/nginx/config/main_config.go index 1fb7991225..bd58eabee7 100644 --- a/internal/controller/nginx/config/main_config.go +++ b/internal/controller/nginx/config/main_config.go @@ -55,13 +55,14 @@ func executeEventsConfig(conf dataplane.Configuration) []executeResult { } type mgmtConf struct { - Endpoint string - Resolver string - LicenseTokenFile string - CACertFile string - ClientSSLCertFile string - ClientSSLKeyFile string - SkipVerify bool + Endpoint string + Resolver string + LicenseTokenFile string + CACertFile string + ClientSSLCertFile string + ClientSSLKeyFile string + SkipVerify bool + EnforceInitialReport bool } // generateMgmtFiles generates the NGINX Plus configuration file for the mgmt block. As part of this, @@ -88,10 +89,11 @@ func (g GeneratorImpl) generateMgmtFiles(conf dataplane.Configuration) []agent.F files := []agent.File{tokenFile} cfg := mgmtConf{ - Endpoint: g.usageReportConfig.Endpoint, - Resolver: g.usageReportConfig.Resolver, - LicenseTokenFile: tokenFile.Meta.Name, - SkipVerify: g.usageReportConfig.SkipVerify, + Endpoint: g.usageReportConfig.Endpoint, + Resolver: g.usageReportConfig.Resolver, + LicenseTokenFile: tokenFile.Meta.Name, + SkipVerify: g.usageReportConfig.SkipVerify, + EnforceInitialReport: g.usageReportConfig.EnforceInitialReport, } if content, ok := conf.AuxiliarySecrets[graph.PlusReportCACertificate]; ok { diff --git a/internal/controller/nginx/config/main_config_template.go b/internal/controller/nginx/config/main_config_template.go index ae668431a8..21f6a28abd 100644 --- a/internal/controller/nginx/config/main_config_template.go +++ b/internal/controller/nginx/config/main_config_template.go @@ -37,5 +37,8 @@ mgmt { ssl_certificate {{ .ClientSSLCertFile }}; ssl_certificate_key {{ .ClientSSLKeyFile }}; {{- end }} + {{- if not .EnforceInitialReport }} + enforce_initial_report off; + {{- end }} } ` diff --git a/internal/controller/provisioner/objects.go b/internal/controller/provisioner/objects.go index 14b3cbcc72..475a3e7319 100644 --- a/internal/controller/provisioner/objects.go +++ b/internal/controller/provisioner/objects.go @@ -167,7 +167,7 @@ func (p *NginxProvisioner) buildNginxResourceObjects( Annotations: maps.Clone(objectMeta.Annotations), } - service, err := buildNginxService(serviceObjectMeta, nProxyCfg, ports, selectorLabels) + service, err := buildNginxService(serviceObjectMeta, nProxyCfg, ports, selectorLabels, gateway.Spec.Addresses) if err != nil { errs = append(errs, err) } @@ -517,6 +517,7 @@ func buildNginxService( nProxyCfg *graph.EffectiveNginxProxy, ports map[int32]struct{}, selectorLabels map[string]string, + addresses []gatewayv1.GatewaySpecAddress, ) (*corev1.Service, error) { var serviceCfg ngfAPIv1alpha2.ServiceSpec if nProxyCfg != nil && nProxyCfg.Kubernetes != nil && nProxyCfg.Kubernetes.Service != nil { @@ -572,6 +573,8 @@ func buildNginxService( }, } + setSvcExternalIPs(svc, addresses) + setIPFamily(nProxyCfg, svc) setSvcLoadBalancerSettings(serviceCfg, &svc.Spec) @@ -586,6 +589,14 @@ func buildNginxService( return svc, nil } +func setSvcExternalIPs(svc *corev1.Service, addresses []gatewayv1.GatewaySpecAddress) { + for _, address := range addresses { + if address.Type != nil && *address.Type == gatewayv1.IPAddressType { + svc.Spec.ExternalIPs = append(svc.Spec.ExternalIPs, address.Value) + } + } +} + func setIPFamily(nProxyCfg *graph.EffectiveNginxProxy, svc *corev1.Service) { if nProxyCfg != nil && nProxyCfg.IPFamily != nil && *nProxyCfg.IPFamily != ngfAPIv1alpha2.Dual { svc.Spec.IPFamilyPolicy = helpers.GetPointer(corev1.IPFamilyPolicySingleStack) @@ -1113,6 +1124,9 @@ func (p *NginxProvisioner) buildNginxPodTemplateSpec( func (p *NginxProvisioner) buildImage(nProxyCfg *graph.EffectiveNginxProxy) (string, corev1.PullPolicy) { image := defaultNginxImagePath + if p.cfg.Plus { + image = defaultNginxPlusImagePath + } tag := p.cfg.GatewayPodConfig.Version pullPolicy := defaultImagePullPolicy diff --git a/internal/controller/provisioner/objects_test.go b/internal/controller/provisioner/objects_test.go index 4af6f2deca..2327db259d 100644 --- a/internal/controller/provisioner/objects_test.go +++ b/internal/controller/provisioner/objects_test.go @@ -81,6 +81,12 @@ func TestBuildNginxResourceObjects(t *testing.T) { Port: 9999, }, }, + Addresses: []gatewayv1.GatewaySpecAddress{ + { + Type: helpers.GetPointer(gatewayv1.IPAddressType), + Value: "192.0.0.2", + }, + }, }, } @@ -185,6 +191,7 @@ func TestBuildNginxResourceObjects(t *testing.T) { TargetPort: intstr.FromInt(9999), }, })) + g.Expect(svc.Spec.ExternalIPs).To(Equal([]string{"192.0.0.2"})) depObj := objects[5] dep, ok := depObj.(*appsv1.Deployment) @@ -498,6 +505,7 @@ func TestBuildNginxResourceObjects_Plus(t *testing.T) { cfg: Config{ GatewayPodConfig: &config.GatewayPodConfig{ Namespace: ngfNamespace, + Version: "1.0.0", }, Plus: true, PlusUsageConfig: &config.UsageReportConfig{ @@ -586,6 +594,7 @@ func TestBuildNginxResourceObjects_Plus(t *testing.T) { g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("ssl_trusted_certificate")) g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("ssl_certificate")) g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("ssl_certificate_key")) + g.Expect(cm.Data["mgmt.conf"]).To(ContainSubstring("enforce_initial_report off")) cmObj = objects[5] cm, ok = cmObj.(*corev1.ConfigMap) @@ -611,6 +620,7 @@ func TestBuildNginxResourceObjects_Plus(t *testing.T) { Name: "nginx-plus-usage-certs", MountPath: "/etc/nginx/certs-bootstrap/", })) + g.Expect(container.Image).To(Equal(fmt.Sprintf("%s:1.0.0", defaultNginxPlusImagePath))) } func TestBuildNginxResourceObjects_DockerSecrets(t *testing.T) { diff --git a/internal/controller/state/conditions/conditions.go b/internal/controller/state/conditions/conditions.go index c9909ed271..9fc569aeda 100644 --- a/internal/controller/state/conditions/conditions.go +++ b/internal/controller/state/conditions/conditions.go @@ -840,22 +840,36 @@ func NewGatewayInvalid(msg string) []Condition { } } -// NewGatewayUnsupportedValue returns Conditions that indicate that a field of the Gateway has an unsupported value. -// Unsupported means that the value is not supported by the implementation or invalid. -func NewGatewayUnsupportedValue(msg string) []Condition { - return []Condition{ - { - Type: string(v1.GatewayConditionAccepted), - Status: metav1.ConditionFalse, - Reason: string(GatewayReasonUnsupportedValue), - Message: msg, - }, - { - Type: string(v1.GatewayConditionProgrammed), - Status: metav1.ConditionFalse, - Reason: string(GatewayReasonUnsupportedValue), - Message: msg, - }, +// NewGatewayUnsupportedAddress returns a Condition that indicates the Gateway is not accepted because it +// contains an address type that is not supported. +func NewGatewayUnsupportedAddress(msg string) Condition { + return Condition{ + Type: string(v1.GatewayConditionAccepted), + Status: metav1.ConditionFalse, + Reason: string(v1.GatewayReasonUnsupportedAddress), + Message: msg, + } +} + +// NewGatewayUnusableAddress returns a Condition that indicates the Gateway is not programmed because it +// contains an address type that can't be used. +func NewGatewayUnusableAddress(msg string) Condition { + return Condition{ + Type: string(v1.GatewayConditionProgrammed), + Status: metav1.ConditionFalse, + Reason: string(v1.GatewayReasonAddressNotUsable), + Message: msg, + } +} + +// NewGatewayAddressNotAssigned returns a Condition that indicates the Gateway is not programmed because it +// has not assigned an address for the Gateway. +func NewGatewayAddressNotAssigned(msg string) Condition { + return Condition{ + Type: string(v1.GatewayConditionProgrammed), + Status: metav1.ConditionFalse, + Reason: string(v1.GatewayReasonAddressNotAssigned), + Message: msg, } } diff --git a/internal/controller/state/graph/gateway.go b/internal/controller/state/graph/gateway.go index f9e955371c..103634871e 100644 --- a/internal/controller/state/graph/gateway.go +++ b/internal/controller/state/graph/gateway.go @@ -179,11 +179,14 @@ func validateGateway(gw *v1.Gateway, gc *GatewayClass, npCfg *NginxProxy) ([]con conds = append(conds, conditions.NewGatewayInvalid("GatewayClass is invalid")...) } - if len(gw.Spec.Addresses) > 0 { - path := field.NewPath("spec", "addresses") - valErr := field.Forbidden(path, "addresses are not supported") - - conds = append(conds, conditions.NewGatewayUnsupportedValue(valErr.Error())...) + // Set the unaccepted conditions here, because those make the gateway invalid. We set the unprogrammed conditions + // elsewhere, because those do not make the gateway invalid. + for _, address := range gw.Spec.Addresses { + if address.Type == nil { + conds = append(conds, conditions.NewGatewayUnsupportedAddress("AddressType must be specified")) + } else if *address.Type != v1.IPAddressType { + conds = append(conds, conditions.NewGatewayUnsupportedAddress("Only AddressType IPAddress is supported")) + } } // we evaluate validity before validating parametersRef because an invalid parametersRef/NginxProxy does not diff --git a/internal/controller/state/graph/gateway_test.go b/internal/controller/state/graph/gateway_test.go index 5edfce48c6..f31fa1dd3c 100644 --- a/internal/controller/state/graph/gateway_test.go +++ b/internal/controller/state/graph/gateway_test.go @@ -1120,30 +1120,6 @@ func TestBuildGateway(t *testing.T) { }, name: "port/protocol collisions", }, - { - gateway: createGateway( - gatewayCfg{ - name: "gateway1", - listeners: []v1.Listener{foo80Listener1, foo443HTTPSListener1}, - addresses: []v1.GatewaySpecAddress{{}}, - }, - ), - gatewayClass: validGC, - expected: map[types.NamespacedName]*Gateway{ - {Namespace: "test", Name: "gateway1"}: { - Source: getLastCreatedGateway(), - DeploymentName: types.NamespacedName{ - Namespace: "test", - Name: controller.CreateNginxResourceName("gateway1", gcName), - }, - Valid: false, - Conditions: conditions.NewGatewayUnsupportedValue("spec." + - "addresses: Forbidden: addresses are not supported", - ), - }, - }, - name: "gateway addresses are not supported", - }, { gateway: nil, expected: nil, @@ -1484,6 +1460,59 @@ func TestBuildGateway(t *testing.T) { }, name: "invalid gatewayclass and invalid NginxProxy", }, + { + name: "invalid gateway; gateway addresses type unspecified", + gateway: createGateway(gatewayCfg{ + name: "gateway-addr-unspecified", + listeners: []v1.Listener{foo80Listener1}, + addresses: []v1.GatewaySpecAddress{ + { + Value: "198.0.0.1", + }, + }, + }), + gatewayClass: validGC, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway-addr-unspecified"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway-addr-unspecified", gcName), + }, + Valid: false, + Conditions: []conditions.Condition{ + conditions.NewGatewayUnsupportedAddress("AddressType must be specified"), + }, + }, + }, + }, + { + name: "invalid gateway; gateway addresses type unsupported", + gateway: createGateway(gatewayCfg{ + name: "gateway-addr-unsupported", + listeners: []v1.Listener{foo80Listener1}, + addresses: []v1.GatewaySpecAddress{ + { + Type: helpers.GetPointer(v1.HostnameAddressType), + Value: "example.com", + }, + }, + }), + gatewayClass: validGC, + expected: map[types.NamespacedName]*Gateway{ + {Namespace: "test", Name: "gateway-addr-unsupported"}: { + Source: getLastCreatedGateway(), + DeploymentName: types.NamespacedName{ + Namespace: "test", + Name: controller.CreateNginxResourceName("gateway-addr-unsupported", gcName), + }, + Valid: false, + Conditions: []conditions.Condition{ + conditions.NewGatewayUnsupportedAddress("Only AddressType IPAddress is supported"), + }, + }, + }, + }, } secretResolver := newSecretResolver( diff --git a/internal/controller/status/prepare_requests.go b/internal/controller/status/prepare_requests.go index 3210e432ec..c2618fd12d 100644 --- a/internal/controller/status/prepare_requests.go +++ b/internal/controller/status/prepare_requests.go @@ -2,6 +2,8 @@ package status import ( "fmt" + "net" + "reflect" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -17,6 +19,10 @@ import ( "github.com/nginx/nginx-gateway-fabric/v2/internal/framework/kinds" ) +// unusableGatewayIPAddress 198.51.100.0 is a publicly reserved IP address specifically for documentation. +// This is needed to give the conformance tests an example valid ip unusable address. +const unusableGatewayIPAddress = "198.51.100.0" + // PrepareRouteRequests prepares status UpdateRequests for the given Routes. func PrepareRouteRequests( l4routes map[graph.L4RouteKey]*graph.L4Route, @@ -329,6 +335,20 @@ func prepareGatewayRequest( ) } + // Set the unprogrammed conditions here, because those do not make the gateway invalid. + // We set the unaccepted conditions elsewhere, because those do make the gateway invalid. + for _, address := range gateway.Source.Spec.Addresses { + if address.Value == "" { + gwConds = append(gwConds, conditions.NewGatewayAddressNotAssigned("Dynamically assigned addresses for the "+ + "Gateway addresses field are not supported, value must be specified")) + } else { + ip := net.ParseIP(address.Value) + if ip == nil || reflect.DeepEqual(ip, net.ParseIP(unusableGatewayIPAddress)) { + gwConds = append(gwConds, conditions.NewGatewayUnusableAddress("Invalid IP address")) + } + } + } + apiGwConds := conditions.ConvertConditions( conditions.DeduplicateConditions(gwConds), gateway.Source.Generation, diff --git a/internal/controller/status/prepare_requests_test.go b/internal/controller/status/prepare_requests_test.go index 7e08a7cb59..965d3dc128 100644 --- a/internal/controller/status/prepare_requests_test.go +++ b/internal/controller/status/prepare_requests_test.go @@ -850,6 +850,11 @@ func TestBuildGatewayStatuses(t *testing.T) { }, } } + createGatewayWithAddresses := func(addresses []v1.GatewaySpecAddress) *v1.Gateway { + g := createGateway() + g.Spec.Addresses = addresses + return g + } transitionTime := helpers.PrepareTimeForFakeClient(metav1.Now()) @@ -1341,6 +1346,105 @@ func TestBuildGatewayStatuses(t *testing.T) { }, }, }, + { + name: "valid gateway; valid listeners; gateway addresses value unspecified", + gateway: &graph.Gateway{ + Source: createGatewayWithAddresses([]v1.GatewaySpecAddress{ + { + Type: helpers.GetPointer(v1.IPAddressType), + Value: "", + }, + }), + Listeners: []*graph.Listener{ + { + Name: "listener-valid-1", + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{routeKey: {}}, + }, + }, + Valid: true, + }, + expected: map[types.NamespacedName]v1.GatewayStatus{ + {Namespace: "test", Name: "gateway"}: { + Addresses: addr, + Conditions: []metav1.Condition{ + { + Type: string(v1.GatewayConditionAccepted), + Status: metav1.ConditionTrue, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(v1.GatewayReasonAccepted), + Message: "Gateway is accepted", + }, + { + Type: string(v1.GatewayConditionProgrammed), + Status: metav1.ConditionFalse, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(v1.GatewayReasonAddressNotAssigned), + Message: "Dynamically assigned addresses for the Gateway addresses " + + "field are not supported, value must be specified", + }, + }, + Listeners: []v1.ListenerStatus{ + { + Name: "listener-valid-1", + AttachedRoutes: 1, + Conditions: validListenerConditions, + }, + }, + }, + }, + }, + { + name: "valid gateway; valid listeners; gateway addresses value unusable", + gateway: &graph.Gateway{ + Source: createGatewayWithAddresses([]v1.GatewaySpecAddress{ + { + Type: helpers.GetPointer(v1.IPAddressType), + Value: "", + }, + }), + Listeners: []*graph.Listener{ + { + Name: "listener-valid-1", + Valid: true, + Routes: map[graph.RouteKey]*graph.L7Route{routeKey: {}}, + }, + }, + Valid: true, + }, + expected: map[types.NamespacedName]v1.GatewayStatus{ + {Namespace: "test", Name: "gateway"}: { + Addresses: addr, + Conditions: []metav1.Condition{ + { + Type: string(v1.GatewayConditionAccepted), + Status: metav1.ConditionTrue, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(v1.GatewayReasonAccepted), + Message: "Gateway is accepted", + }, + { + Type: string(v1.GatewayConditionProgrammed), + Status: metav1.ConditionFalse, + ObservedGeneration: 2, + LastTransitionTime: transitionTime, + Reason: string(v1.GatewayReasonAddressNotUsable), + Message: "Invalid IP address", + }, + }, + Listeners: []v1.ListenerStatus{ + { + Name: "listener-valid-1", + AttachedRoutes: 1, + Conditions: validListenerConditions, + }, + }, + }, + }, + }, } for _, test := range tests { diff --git a/tests/Makefile b/tests/Makefile index 668fd879e3..dcea49c341 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -12,7 +12,7 @@ GW_SERVICE_TYPE = NodePort## Service type to use for the gateway NGF_VERSION ?= edge## NGF version to be tested PULL_POLICY = Never## Pull policy for the images NGINX_CONF_DIR = internal/controller/nginx/conf -SUPPORTED_EXTENDED_FEATURES = HTTPRouteQueryParamMatching,HTTPRouteMethodMatching,HTTPRoutePortRedirect,HTTPRouteSchemeRedirect,HTTPRouteHostRewrite,HTTPRoutePathRewrite,GatewayPort8080,GatewayAddressEmpty,HTTPRouteResponseHeaderModification,HTTPRoutePathRedirect,GatewayHTTPListenerIsolation,GatewayInfrastructurePropagation,HTTPRouteRequestMirror,HTTPRouteRequestMultipleMirrors,HTTPRouteRequestPercentageMirror,HTTPRouteBackendProtocolWebSocket,HTTPRouteParentRefPort,HTTPRouteDestinationPortMatching +SUPPORTED_EXTENDED_FEATURES = HTTPRouteQueryParamMatching,HTTPRouteMethodMatching,HTTPRoutePortRedirect,HTTPRouteSchemeRedirect,HTTPRouteHostRewrite,HTTPRoutePathRewrite,GatewayPort8080,GatewayAddressEmpty,HTTPRouteResponseHeaderModification,HTTPRoutePathRedirect,GatewayHTTPListenerIsolation,GatewayInfrastructurePropagation,HTTPRouteRequestMirror,HTTPRouteRequestMultipleMirrors,HTTPRouteRequestPercentageMirror,HTTPRouteBackendProtocolWebSocket,HTTPRouteParentRefPort,HTTPRouteDestinationPortMatching,GatewayStaticAddresses STANDARD_CONFORMANCE_PROFILES = GATEWAY-HTTP,GATEWAY-GRPC EXPERIMENTAL_CONFORMANCE_PROFILES = GATEWAY-TLS CONFORMANCE_PROFILES = $(STANDARD_CONFORMANCE_PROFILES) # by default we use the standard conformance profiles. If experimental is enabled we override this and add the experimental profiles. diff --git a/tests/conformance/conformance_test.go b/tests/conformance/conformance_test.go index 3437749c67..d792046e96 100644 --- a/tests/conformance/conformance_test.go +++ b/tests/conformance/conformance_test.go @@ -22,6 +22,8 @@ import ( "testing" . "github.com/onsi/gomega" + v1 "sigs.k8s.io/gateway-api/apis/v1" + "sigs.k8s.io/gateway-api/apis/v1beta1" "sigs.k8s.io/gateway-api/conformance" conf_v1 "sigs.k8s.io/gateway-api/conformance/apis/v1" "sigs.k8s.io/gateway-api/conformance/tests" @@ -30,6 +32,10 @@ import ( "sigs.k8s.io/yaml" ) +// unusableGatewayIPAddress 198.51.100.0 is a publicly reserved IP address specifically for documentation. +// This is needed to give the conformance tests an example valid ip unusable address. +const unusableGatewayIPAddress = "198.51.100.0" + func TestConformance(t *testing.T) { g := NewWithT(t) @@ -42,6 +48,11 @@ func TestConformance(t *testing.T) { ) opts := conformance.DefaultOptions(t) + + ipaddressType := v1.IPAddressType + opts.UnusableNetworkAddresses = []v1beta1.GatewaySpecAddress{{Type: &ipaddressType, Value: unusableGatewayIPAddress}} + opts.UsableNetworkAddresses = []v1beta1.GatewaySpecAddress{{Type: &ipaddressType, Value: "192.0.2.1"}} + opts.Implementation = conf_v1.Implementation{ Organization: "nginx", Project: "nginx-gateway-fabric", diff --git a/tests/framework/collector.go b/tests/framework/collector.go index 20fe2614dd..bc71a7734e 100644 --- a/tests/framework/collector.go +++ b/tests/framework/collector.go @@ -13,7 +13,7 @@ const ( collectorChartReleaseName = "otel-collector" //nolint:lll // renovate: datasource=helm depName=opentelemetry-collector registryUrl=https://open-telemetry.github.io/opentelemetry-helm-charts - collectorChartVersion = "0.133.0" + collectorChartVersion = "0.134.0" ) // InstallCollector installs the otel-collector. diff --git a/tests/framework/crossplane.go b/tests/framework/crossplane.go index 02a16b6cb5..90b50faef4 100644 --- a/tests/framework/crossplane.go +++ b/tests/framework/crossplane.go @@ -8,6 +8,7 @@ import ( "strings" "time" + . "github.com/onsi/ginkgo/v2" core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -42,10 +43,13 @@ const crossplaneImageName = "nginx-crossplane:latest" // ValidateNginxFieldExists accepts the nginx config and the configuration for the expected field, // and returns whether or not that field exists where it should. -func ValidateNginxFieldExists(conf *Payload, expFieldCfg ExpectedNginxField) error { +func ValidateNginxFieldExists(conf *Payload, expFieldCfg ExpectedNginxField, opts ...Option) error { b, err := json.Marshal(conf) if err != nil { - return fmt.Errorf("error marshaling nginx config: %w", err) + marshalErr := fmt.Errorf("error marshaling nginx config: %w", err) + GinkgoWriter.Printf("%v\n", marshalErr) + + return marshalErr } for _, config := range conf.Config { @@ -55,7 +59,7 @@ func ValidateNginxFieldExists(conf *Payload, expFieldCfg ExpectedNginxField) err for _, directive := range config.Parsed { if expFieldCfg.Server == "" && expFieldCfg.Upstream == "" { - if expFieldCfg.fieldFound(directive) { + if expFieldCfg.fieldFound(directive, opts...) { return nil } continue @@ -65,13 +69,15 @@ func ValidateNginxFieldExists(conf *Payload, expFieldCfg ExpectedNginxField) err return nil } - if expFieldCfg.Upstream != "" && fieldExistsInUpstream(expFieldCfg, *directive) { + if expFieldCfg.Upstream != "" && fieldExistsInUpstream(expFieldCfg, *directive, opts...) { return nil } } } + directiveErr := fmt.Errorf("directive %s not found in: nginx config %s", expFieldCfg.Directive, string(b)) + GinkgoWriter.Printf("ERROR: %v\n", directiveErr) - return fmt.Errorf("directive %s not found in: nginx config %s", expFieldCfg.Directive, string(b)) + return directiveErr } func fieldExistsInServer( @@ -94,7 +100,16 @@ func fieldExistsInServer( func fieldExistsInUpstream( expFieldCfg ExpectedNginxField, directive Directive, + opts ...Option, ) bool { + options := LogOptions(opts...) + if options.logEnabled { + GinkgoWriter.Printf( + "Checking upstream for directive %q with value %q\n", + expFieldCfg.Directive, + expFieldCfg.Value, + ) + } if directive.Directive == "upstream" && directive.Args[0] == expFieldCfg.Upstream { for _, directive := range directive.Block { if expFieldCfg.fieldFound(directive) { @@ -115,7 +130,8 @@ func getServerName(serverBlock Directives) string { return "" } -func (e ExpectedNginxField) fieldFound(directive *Directive) bool { +func (e ExpectedNginxField) fieldFound(directive *Directive, opts ...Option) bool { + options := LogOptions(opts...) arg := strings.Join(directive.Args, " ") valueMatch := arg == e.Value @@ -123,7 +139,20 @@ func (e ExpectedNginxField) fieldFound(directive *Directive) bool { valueMatch = strings.Contains(arg, e.Value) } - return directive.Directive == e.Directive && valueMatch + if directive.Directive == e.Directive && valueMatch { + if options.logEnabled { + GinkgoWriter.Printf( + "Found field %q with value %q in field %q with value %q\n", + e.Directive, + e.Value, + directive.Directive, + arg, + ) + } + return true + } + + return false } func fieldExistsInLocation(locationDirective *Directive, expFieldCfg ExpectedNginxField) bool { @@ -201,7 +230,10 @@ func injectCrossplaneContainer( podClient := k8sClient.CoreV1().Pods(namespace) if _, err := podClient.UpdateEphemeralContainers(ctx, ngfPodName, pod, metav1.UpdateOptions{}); err != nil { - return fmt.Errorf("error adding ephemeral container: %w", err) + containerErr := fmt.Errorf("error adding ephemeral container: %w", err) + GinkgoWriter.Printf("%v\n", containerErr) + + return containerErr } return nil @@ -231,7 +263,10 @@ func createCrossplaneExecutor( exec, err := remotecommand.NewSPDYExecutor(k8sConfig, http.MethodPost, req.URL()) if err != nil { - return nil, fmt.Errorf("error creating executor: %w", err) + executorErr := fmt.Errorf("error creating executor: %w", err) + GinkgoWriter.Printf("%v\n", executorErr) + + return nil, executorErr } return exec, nil diff --git a/tests/framework/generate_manifests.go b/tests/framework/generate_manifests.go index c7a00e234a..8e5d436fd6 100644 --- a/tests/framework/generate_manifests.go +++ b/tests/framework/generate_manifests.go @@ -7,7 +7,6 @@ import ( "io" "text/template" - . "github.com/onsi/ginkgo/v2" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/yaml" "sigs.k8s.io/controller-runtime/pkg/client" @@ -222,7 +221,6 @@ func GenerateScaleListenerObjects(numListeners int, tls bool) (ScaleObjects, err } func generateSecrets(secrets []string) ([]client.Object, error) { - GinkgoWriter.Printf("Generating secrets\n") objects := make([]client.Object, 0, len(secrets)) for _, secret := range secrets { @@ -239,7 +237,6 @@ func generateSecrets(secrets []string) ([]client.Object, error) { objects = append(objects, objs...) } - GinkgoWriter.Printf("Generated %d secrets\n", len(objects)) return objects, nil } diff --git a/tests/framework/info.go b/tests/framework/info.go index c485edc9aa..8f36929bc5 100644 --- a/tests/framework/info.go +++ b/tests/framework/info.go @@ -84,11 +84,11 @@ func GetBuildInfo() (commitHash string, commitTime string, dirtyBuild string) { } // AddNginxLogsAndEventsToReport adds nginx logs and events from the namespace to the report if the spec failed. -func AddNginxLogsAndEventsToReport(rm ResourceManager, namespace string) { +func AddNginxLogsAndEventsToReport(rm ResourceManager, namespace string, opts ...Option) { if CurrentSpecReport().Failed() { var returnLogs string - nginxPodNames, _ := GetReadyNginxPodNames(rm.K8sClient, namespace, rm.TimeoutConfig.GetStatusTimeout) + nginxPodNames, _ := rm.GetReadyNginxPodNames(namespace, rm.TimeoutConfig.GetStatusTimeout, opts...) for _, nginxPodName := range nginxPodNames { returnLogs += fmt.Sprintf("Logs for Nginx Pod %s:\n", nginxPodName) diff --git a/tests/framework/load.go b/tests/framework/load.go index c687f275b1..d6caf312b5 100644 --- a/tests/framework/load.go +++ b/tests/framework/load.go @@ -7,6 +7,7 @@ import ( "net/http" "time" + . "github.com/onsi/ginkgo/v2" vegeta "github.com/tsenart/vegeta/v12/lib" ) @@ -49,6 +50,7 @@ type Metrics struct { // RunLoadTest uses Vegeta to send traffic to the provided Targets at the given rate for the given duration and writes // the results to the provided file. func RunLoadTest(cfg LoadTestConfig) (vegeta.Results, Metrics) { + GinkgoWriter.Printf("Running load test: %s\n", cfg.Description) vegTargets := convertTargetToVegetaTarget(cfg.Targets) targeter := vegeta.NewStaticTargeter(vegTargets...) @@ -61,7 +63,12 @@ func RunLoadTest(cfg LoadTestConfig) (vegeta.Results, Metrics) { Timeout: vegeta.DefaultTimeout, Transport: &http.Transport{ DialContext: func(ctx context.Context, network, _ string) (net.Conn, error) { - return dialer.DialContext(ctx, network, cfg.Proxy) + conn, err := dialer.DialContext(ctx, network, cfg.Proxy) + if err != nil { + GinkgoWriter.Printf("ERROR occurred during dialing %q in %q network, error: %s\n", cfg.Proxy, network, err) + } + + return conn, err }, TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, //nolint:gosec // self-signed cert for testing diff --git a/tests/framework/logging.go b/tests/framework/logging.go index aaf9f54a48..ddc02672a3 100644 --- a/tests/framework/logging.go +++ b/tests/framework/logging.go @@ -11,3 +11,12 @@ func WithLoggingDisabled() Option { opts.logEnabled = false } } + +func LogOptions(opts ...Option) *Options { + options := &Options{logEnabled: true} + for _, opt := range opts { + opt(options) + } + + return options +} diff --git a/tests/framework/ngf.go b/tests/framework/ngf.go index 660cb79e8e..0bb98d1956 100644 --- a/tests/framework/ngf.go +++ b/tests/framework/ngf.go @@ -14,7 +14,6 @@ import ( apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" ) const ( @@ -101,7 +100,7 @@ func InstallNGF(cfg InstallationConfig, extraArgs ...string) ([]byte, error) { } // CreateLicenseSecret creates the NGINX Plus JWT secret. -func CreateLicenseSecret(k8sClient client.Client, namespace, filename string) error { +func CreateLicenseSecret(rm ResourceManager, namespace, filename string) error { GinkgoWriter.Printf("Creating NGINX Plus license secret in namespace %q from file %q\n", namespace, filename) conf, err := os.ReadFile(filename) @@ -121,11 +120,8 @@ func CreateLicenseSecret(k8sClient client.Client, namespace, filename string) er }, } - if err := k8sClient.Create(ctx, ns); err != nil && !apierrors.IsAlreadyExists(err) { - createNSErr := fmt.Errorf("error creating namespace: %w", err) - GinkgoWriter.Printf("%v\n", createNSErr) - - return createNSErr + if err := rm.Create(ctx, ns); err != nil && !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("error creating namespace: %w", err) } secret := &core.Secret{ @@ -138,7 +134,7 @@ func CreateLicenseSecret(k8sClient client.Client, namespace, filename string) er }, } - if err := k8sClient.Create(ctx, secret); err != nil && !apierrors.IsAlreadyExists(err) { + if err := rm.Create(ctx, secret); err != nil && !apierrors.IsAlreadyExists(err) { createSecretErr := fmt.Errorf("error creating secret: %w", err) GinkgoWriter.Printf("%v\n", createSecretErr) @@ -185,7 +181,7 @@ func UpgradeNGF(cfg InstallationConfig, extraArgs ...string) ([]byte, error) { } // UninstallNGF uninstalls NGF. -func UninstallNGF(cfg InstallationConfig, k8sClient client.Client) ([]byte, error) { +func UninstallNGF(cfg InstallationConfig, rm ResourceManager) ([]byte, error) { args := []string{ "uninstall", cfg.ReleaseName, "--namespace", cfg.Namespace, } @@ -199,20 +195,20 @@ func UninstallNGF(cfg InstallationConfig, k8sClient client.Client) ([]byte, erro ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - err = k8sClient.Delete(ctx, &core.Namespace{ObjectMeta: metav1.ObjectMeta{Name: cfg.Namespace}}) + err = rm.Delete(ctx, &core.Namespace{ObjectMeta: metav1.ObjectMeta{Name: cfg.Namespace}}, nil) if err != nil && !apierrors.IsNotFound(err) { return nil, err } var crList apiext.CustomResourceDefinitionList - if err := k8sClient.List(ctx, &crList); err != nil { + if err := rm.List(ctx, &crList); err != nil { return nil, err } for _, cr := range crList.Items { if strings.Contains(cr.Spec.Group, "gateway.nginx.org") { cr := cr - if err := k8sClient.Delete(ctx, &cr); err != nil && !apierrors.IsNotFound(err) { + if err := rm.Delete(ctx, &cr, nil); err != nil && !apierrors.IsNotFound(err) { return nil, err } } diff --git a/tests/framework/prometheus.go b/tests/framework/prometheus.go index 6ce8a300bb..37d72e1c49 100644 --- a/tests/framework/prometheus.go +++ b/tests/framework/prometheus.go @@ -10,6 +10,7 @@ import ( "os/exec" "time" + . "github.com/onsi/ginkgo/v2" "github.com/prometheus/client_golang/api" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" @@ -49,7 +50,10 @@ func InstallPrometheus( "https://prometheus-community.github.io/helm-charts", ).CombinedOutput() if err != nil { - return PrometheusInstance{}, fmt.Errorf("failed to add Prometheus helm repo: %w; output: %s", err, string(output)) + prometheusErr := fmt.Errorf("failed to add Prometheus helm repo: %w; output: %s", err, string(output)) + GinkgoWriter.Printf("ERROR: %v\n", prometheusErr) + + return PrometheusInstance{}, prometheusErr } output, err = exec.CommandContext( @@ -59,7 +63,10 @@ func InstallPrometheus( "update", ).CombinedOutput() if err != nil { - return PrometheusInstance{}, fmt.Errorf("failed to update helm repos: %w; output: %s", err, string(output)) + helmReposErr := fmt.Errorf("failed to update helm repos: %w; output: %s", err, string(output)) + GinkgoWriter.Printf("ERROR: %v\n", helmReposErr) + + return PrometheusInstance{}, helmReposErr } scrapeInterval := fmt.Sprintf("%ds", int(cfg.ScrapeInterval.Seconds())) @@ -77,24 +84,36 @@ func InstallPrometheus( "--wait", ).CombinedOutput() if err != nil { - return PrometheusInstance{}, fmt.Errorf("failed to install Prometheus: %w; output: %s", err, string(output)) + prometheusInstallationErr := fmt.Errorf("failed to install Prometheus: %w; output: %s", err, string(output)) + GinkgoWriter.Printf("ERROR: %v\n", prometheusInstallationErr) + + return PrometheusInstance{}, prometheusInstallationErr } pods, err := rm.GetPods(prometheusNamespace, client.MatchingLabels{ "app.kubernetes.io/name": "prometheus", }) if err != nil { - return PrometheusInstance{}, fmt.Errorf("failed to get Prometheus pods: %w", err) + podsErr := fmt.Errorf("failed to get Prometheus pods: %w", err) + GinkgoWriter.Printf("ERROR: %v\n", podsErr) + + return PrometheusInstance{}, podsErr } if len(pods) != 1 { - return PrometheusInstance{}, fmt.Errorf("expected one Prometheus pod, found %d", len(pods)) + manyPodsErr := fmt.Errorf("expected one Prometheus pod, found %d", len(pods)) + GinkgoWriter.Printf("ERROR: %v\n", manyPodsErr) + + return PrometheusInstance{}, manyPodsErr } pod := pods[0] if pod.Status.PodIP == "" { - return PrometheusInstance{}, errors.New("the Prometheus pod has no IP") + podIPErr := errors.New("the Prometheus pod has no IP") + GinkgoWriter.Printf("ERROR: %v\n", podIPErr) + + return PrometheusInstance{}, podIPErr } var queryTimeout time.Duration @@ -114,6 +133,7 @@ func InstallPrometheus( // UninstallPrometheus uninstalls Prometheus from the cluster. func UninstallPrometheus(rm ResourceManager) error { + GinkgoWriter.Printf("Uninstalling Prometheus from namespace %q\n", prometheusNamespace) output, err := exec.CommandContext( context.Background(), "helm", @@ -122,11 +142,17 @@ func UninstallPrometheus(rm ResourceManager) error { "-n", prometheusNamespace, ).CombinedOutput() if err != nil { - return fmt.Errorf("failed to uninstall Prometheus: %w; output: %s", err, string(output)) + uninstallErr := fmt.Errorf("failed to uninstall Prometheus: %w; output: %s", err, string(output)) + GinkgoWriter.Printf("ERROR: %v\n", uninstallErr) + + return uninstallErr } if err := rm.DeleteNamespace(prometheusNamespace); err != nil { - return fmt.Errorf("failed to delete Prometheus namespace: %w", err) + deleteNSErr := fmt.Errorf("failed to delete Prometheus namespace: %w", err) + GinkgoWriter.Printf("ERROR: %v\n", deleteNSErr) + + return deleteNSErr } return nil @@ -150,8 +176,12 @@ type PrometheusInstance struct { // PortForward starts port forwarding to the Prometheus instance. func (ins *PrometheusInstance) PortForward(config *rest.Config, stopCh <-chan struct{}) error { + GinkgoWriter.Printf("Starting port forwarding to Prometheus pod %q in namespace %q\n", ins.podName, ins.podNamespace) if ins.portForward { - panic("port forwarding already started") + infoMsg := "port forwarding already started" + GinkgoWriter.Printf("INFO: %s\n", infoMsg) + + panic(infoMsg) } ins.portForward = true @@ -175,6 +205,8 @@ func (ins *PrometheusInstance) getAPIClient() (v1.API, error) { c, err := api.NewClient(cfg) if err != nil { + GinkgoWriter.Printf("ERROR occurred during creating Prometheus API client: %v\n", err) + return nil, err } @@ -185,7 +217,10 @@ func (ins *PrometheusInstance) ensureAPIClient() error { if ins.apiClient == nil { ac, err := ins.getAPIClient() if err != nil { - return fmt.Errorf("failed to get Prometheus API client: %w", err) + apiClientErr := fmt.Errorf("failed to get Prometheus API client: %w", err) + GinkgoWriter.Printf("ERROR: %v\n", apiClientErr) + + return apiClientErr } ins.apiClient = ac } @@ -209,10 +244,14 @@ func (ins *PrometheusInstance) QueryWithCtx(ctx context.Context, query string) ( result, warnings, err := ins.apiClient.Query(ctx, query, time.Time{}) if err != nil { - return nil, fmt.Errorf("failed to query Prometheus: %w", err) + queryErr := fmt.Errorf("failed to query Prometheus: %w", err) + GinkgoWriter.Printf("ERROR: %v\n", queryErr) + + return nil, queryErr } if len(warnings) > 0 { + GinkgoWriter.Printf("WARNING: Prometheus query returned warnings: %v\n", warnings) slog.InfoContext(context.Background(), "Prometheus query returned warnings", "query", query, @@ -235,16 +274,23 @@ func (ins *PrometheusInstance) QueryRange(query string, promRange v1.Range) (mod func (ins *PrometheusInstance) QueryRangeWithCtx(ctx context.Context, query string, promRange v1.Range, ) (model.Value, error) { + GinkgoWriter.Printf("Querying Prometheus with range query: %q\n", query) if err := ins.ensureAPIClient(); err != nil { + GinkgoWriter.Printf("ERROR during ensureAPIClient for prometheus: %v\n", err) + return nil, err } result, warnings, err := ins.apiClient.QueryRange(ctx, query, promRange) if err != nil { - return nil, fmt.Errorf("failed to query Prometheus: %w", err) + queryErr := fmt.Errorf("failed to query Prometheus: %w", err) + GinkgoWriter.Printf("ERROR: %v\n", queryErr) + + return nil, queryErr } if len(warnings) > 0 { + GinkgoWriter.Printf("WARNING: Prometheus range query returned warnings: %v\n", warnings) slog.InfoContext(context.Background(), "Prometheus range query returned warnings", "query", query, @@ -260,11 +306,17 @@ func (ins *PrometheusInstance) QueryRangeWithCtx(ctx context.Context, func GetFirstValueOfPrometheusVector(val model.Value) (float64, error) { res, ok := val.(model.Vector) if !ok { - return 0, fmt.Errorf("expected a vector, got %T", val) + valueErr := fmt.Errorf("expected a vector, got %T", val) + GinkgoWriter.Printf("ERROR: %v\n", valueErr) + + return 0, valueErr } if len(res) == 0 { - return 0, errors.New("empty vector") + vectorErr := errors.New("empty vector") + GinkgoWriter.Printf("ERROR: %v\n", vectorErr) + + return 0, vectorErr } return float64(res[0].Value), nil @@ -272,8 +324,11 @@ func GetFirstValueOfPrometheusVector(val model.Value) (float64, error) { // WritePrometheusMatrixToCSVFile writes a Prometheus matrix to a CSV file. func WritePrometheusMatrixToCSVFile(fileName string, value model.Value) error { + GinkgoWriter.Printf("Writing Prometheus matrix to CSV file %q\n", fileName) file, err := os.Create(fileName) if err != nil { + GinkgoWriter.Printf("ERROR occurred during creating file %q: %v\n", fileName, err) + return err } defer file.Close() @@ -282,13 +337,18 @@ func WritePrometheusMatrixToCSVFile(fileName string, value model.Value) error { matrix, ok := value.(model.Matrix) if !ok { - return fmt.Errorf("expected a matrix, got %T", value) + matrixErr := fmt.Errorf("expected a matrix, got %T", value) + GinkgoWriter.Printf("ERROR: %v\n", matrixErr) + + return matrixErr } for _, sample := range matrix { for _, pair := range sample.Values { record := []string{fmt.Sprint(pair.Timestamp.Unix()), pair.Value.String()} if err := csvWriter.Write(record); err != nil { + GinkgoWriter.Printf("ERROR: %v\n", err) + return err } } @@ -409,18 +469,30 @@ func CreateMetricExistChecker( query string, getTime func() time.Time, modifyTime func(), + opts ...Option, ) func() error { return func() error { queryWithTimestamp := fmt.Sprintf("%s @ %d", query, getTime().Unix()) + options := LogOptions(opts...) result, err := promInstance.Query(queryWithTimestamp) if err != nil { - return fmt.Errorf("failed to query Prometheus: %w", err) + queryErr := fmt.Errorf("failed to query Prometheus: %w", err) + if options.logEnabled { + GinkgoWriter.Printf("ERROR during creating metric existence checker: %v\n", queryErr) + } + + return queryErr } if result.String() == "" { modifyTime() - return errors.New("empty result") + emptyResultErr := errors.New("empty result") + if options.logEnabled { + GinkgoWriter.Printf("ERROR during creating metric existence checker: %v\n", emptyResultErr) + } + + return emptyResultErr } return nil @@ -436,6 +508,7 @@ func CreateEndTimeFinder( endTime *time.Time, queryRangeStep time.Duration, ) func() error { + GinkgoWriter.Printf("Creating end time finder with start time %v and initial end time %v\n", startTime, endTime) return func() error { result, err := promInstance.QueryRange(query, v1.Range{ Start: startTime, @@ -443,12 +516,18 @@ func CreateEndTimeFinder( Step: queryRangeStep, }) if err != nil { - return fmt.Errorf("failed to query Prometheus: %w", err) + queryErr := fmt.Errorf("failed to query Prometheus: %w", err) + GinkgoWriter.Printf("ERROR during creating end time finder: %v\n", queryErr) + + return queryErr } if result.String() == "" { *endTime = time.Now() - return errors.New("empty result") + emptyResultsErr := errors.New("empty result") + GinkgoWriter.Printf("ERROR during creating end time finder: %v\n", emptyResultsErr) + + return emptyResultsErr } return nil @@ -457,14 +536,29 @@ func CreateEndTimeFinder( // CreateResponseChecker returns a function that checks if there is a successful response from a url. func CreateResponseChecker(url, address string, requestTimeout time.Duration, opts ...Option) func() error { + options := LogOptions(opts...) + if options.logEnabled { + GinkgoWriter.Printf("Starting checking response for url %q and address %q\n", url, address) + } + return func() error { status, _, err := Get(url, address, requestTimeout, nil, nil, opts...) if err != nil { - return fmt.Errorf("bad response: %w", err) + badReqErr := fmt.Errorf("bad response: %w", err) + if options.logEnabled { + GinkgoWriter.Printf("ERROR during creating response checker: %v\n", badReqErr) + } + + return badReqErr } if status != 200 { - return fmt.Errorf("unexpected status code: %d", status) + statusErr := fmt.Errorf("unexpected status code: %d", status) + if options.logEnabled { + GinkgoWriter.Printf("ERROR during creating response checker: %v\n", statusErr) + } + + return statusErr } return nil @@ -474,11 +568,15 @@ func CreateResponseChecker(url, address string, requestTimeout time.Duration, op func getFirstValueOfVector(query string, promInstance PrometheusInstance) (float64, error) { result, err := promInstance.Query(query) if err != nil { + GinkgoWriter.Printf("ERROR querying Prometheus during getting first value of vector: %v\n", err) + return 0, err } val, err := GetFirstValueOfPrometheusVector(result) if err != nil { + GinkgoWriter.Printf("ERROR getting first value of Prometheus vector: %v\n", err) + return 0, err } @@ -488,12 +586,17 @@ func getFirstValueOfVector(query string, promInstance PrometheusInstance) (float func getBuckets(query string, promInstance PrometheusInstance) ([]Bucket, error) { result, err := promInstance.Query(query) if err != nil { + GinkgoWriter.Printf("ERROR querying Prometheus during getting buckets: %v\n", err) + return nil, err } res, ok := result.(model.Vector) if !ok { - return nil, errors.New("could not convert result to vector") + convertationErr := errors.New("could not convert result to vector") + GinkgoWriter.Printf("ERROR during getting buckets: %v\n", convertationErr) + + return nil, convertationErr } buckets := make([]Bucket, 0, len(res)) diff --git a/tests/framework/request.go b/tests/framework/request.go index a04d8ccc66..bd8663b8a3 100644 --- a/tests/framework/request.go +++ b/tests/framework/request.go @@ -24,10 +24,7 @@ func Get( headers, queryParams map[string]string, opts ...Option, ) (int, string, error) { - options := &Options{logEnabled: true} - for _, opt := range opts { - opt(options) - } + options := LogOptions(opts...) resp, err := makeRequest(http.MethodGet, url, address, nil, timeout, headers, queryParams, opts...) if err != nil { @@ -99,11 +96,7 @@ func makeRequest( ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - options := &Options{logEnabled: true} - - for _, opt := range opts { - opt(options) - } + options := LogOptions(opts...) if options.logEnabled { requestDetails := fmt.Sprintf( "Method: %s, URL: %s, Address: %s, Headers: %v, QueryParams: %v\n", diff --git a/tests/framework/resourcemanager.go b/tests/framework/resourcemanager.go index f51f17920a..f8bc54acaa 100644 --- a/tests/framework/resourcemanager.go +++ b/tests/framework/resourcemanager.go @@ -76,8 +76,11 @@ type ClusterInfo struct { } // Apply creates or updates Kubernetes resources defined as Go objects. -func (rm *ResourceManager) Apply(resources []client.Object) error { - GinkgoWriter.Printf("Applying resources defined as Go objects\n") +func (rm *ResourceManager) Apply(resources []client.Object, opts ...Option) error { + options := LogOptions(opts...) + if options.logEnabled { + GinkgoWriter.Printf("Applying resources defined as Go objects\n") + } ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.CreateTimeout) defer cancel() @@ -101,25 +104,13 @@ func (rm *ResourceManager) Apply(resources []client.Object) error { } } - if err := rm.K8sClient.Get(ctx, client.ObjectKeyFromObject(resource), obj); err != nil { + if err := rm.Get(ctx, client.ObjectKeyFromObject(resource), obj, opts...); err != nil { if !apierrors.IsNotFound(err) { - notFoundErr := fmt.Errorf("error getting resource: %w", err) - GinkgoWriter.Printf( - "ERROR occurred during getting Kubernetes resources: %s\n", - notFoundErr, - ) - - return notFoundErr + return err } - if err := rm.K8sClient.Create(ctx, resource); err != nil { - creatingResourceErr := fmt.Errorf("error creating resource: %w", err) - GinkgoWriter.Printf( - "ERROR occurred during applying creates Kubernetes resources: %s\n", - creatingResourceErr, - ) - - return creatingResourceErr + if err := rm.Create(ctx, resource); err != nil { + return fmt.Errorf("error creating resource: %w", err) } continue @@ -128,83 +119,69 @@ func (rm *ResourceManager) Apply(resources []client.Object) error { // Some tests modify resources that are also modified by NGF (to update their status), so conflicts are possible // For example, a Gateway resource. err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - if err := rm.K8sClient.Get(ctx, client.ObjectKeyFromObject(resource), obj); err != nil { - GinkgoWriter.Printf( - "ERROR occurred during getting Kubernetes resources on retries: %s\n", - err, - ) - + if err := rm.Get(ctx, client.ObjectKeyFromObject(resource), obj); err != nil { return err } resource.SetResourceVersion(obj.GetResourceVersion()) - updateErr := rm.K8sClient.Update(ctx, resource) - if updateErr != nil { - GinkgoWriter.Printf( - "ERROR occurred during updating Kubernetes resources on retries: %s\n", - updateErr, - ) - } - return updateErr + return rm.Update(ctx, resource, nil) }) if err != nil { retryErr := fmt.Errorf("error updating resource: %w", err) - GinkgoWriter.Printf( - "ERROR occurred during retries: %s\n", - retryErr, - ) + GinkgoWriter.Printf("%s\n", retryErr) return retryErr } } - GinkgoWriter.Printf("Resources defined as Go objects applied successfully\n") - + if options.logEnabled { + GinkgoWriter.Printf("Resources defined as Go objects applied successfully\n") + } return nil } // ApplyFromFiles creates or updates Kubernetes resources defined within the provided YAML files. -func (rm *ResourceManager) ApplyFromFiles(files []string, namespace string) error { +func (rm *ResourceManager) ApplyFromFiles(files []string, namespace string, opts ...Option) error { + options := LogOptions(opts...) for _, file := range files { - GinkgoWriter.Printf("Applying resources from file: %q to namespace %q\n", file, namespace) + if options.logEnabled { + GinkgoWriter.Printf("\nApplying resources from file: %q to namespace %q\n", file, namespace) + } data, err := rm.GetFileContents(file) if err != nil { - GinkgoWriter.Printf("ERROR occurred during getting file contents for file %q, error: %s\n", file, err) - return err } if err = rm.ApplyFromBuffer(data, namespace); err != nil { - GinkgoWriter.Printf("ERROR occurred during applying resources from file %q, error: %s\n", file, err) - return err } } - GinkgoWriter.Printf("Resources from files applied successfully to namespace %q,\n", namespace) + if options.logEnabled { + GinkgoWriter.Printf("Resources from files applied successfully to namespace %q,\n", namespace) + } return nil } -func (rm *ResourceManager) ApplyFromBuffer(buffer *bytes.Buffer, namespace string) error { +func (rm *ResourceManager) ApplyFromBuffer(buffer *bytes.Buffer, namespace string, opts ...Option) error { ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.CreateTimeout) defer cancel() + options := LogOptions(opts...) + if options.logEnabled { + GinkgoWriter.Printf("Applying resources from buffer to namespace %q\n", namespace) + } + handlerFunc := func(obj unstructured.Unstructured) error { obj.SetNamespace(namespace) nsName := types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()} fetchedObj := obj.DeepCopy() - if err := rm.K8sClient.Get(ctx, nsName, fetchedObj); err != nil { + if err := rm.Get(ctx, nsName, fetchedObj, opts...); err != nil { if !apierrors.IsNotFound(err) { - getResourceErr := fmt.Errorf("error getting resource: %w", err) - GinkgoWriter.Printf("ERROR occurred during getting resource from buffer, error: %s\n", getResourceErr) - - return getResourceErr + return err } - if err := rm.K8sClient.Create(ctx, &obj); err != nil { - createResourceErr := fmt.Errorf("error creating resource: %w", err) - GinkgoWriter.Printf("ERROR occurred during creating resource from buffer, error: %s\n", createResourceErr) - - return createResourceErr + if err := rm.Create(ctx, &obj); err != nil { + return fmt.Errorf("error creating resource: %w", err) } return nil @@ -213,28 +190,16 @@ func (rm *ResourceManager) ApplyFromBuffer(buffer *bytes.Buffer, namespace strin // Some tests modify resources that are also modified by NGF (to update their status), so conflicts are possible // For example, a Gateway resource. err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - if err := rm.K8sClient.Get(ctx, nsName, fetchedObj); err != nil { - GinkgoWriter.Printf( - "ERROR occurred during getting resource from buffer on retries, error: %s\n", - err, - ) - + if err := rm.Get(ctx, nsName, fetchedObj); err != nil { return err } obj.SetResourceVersion(fetchedObj.GetResourceVersion()) - updateErr := rm.K8sClient.Update(ctx, &obj) - if updateErr != nil { - GinkgoWriter.Printf("ERROR occurred during updating resource from buffer, error: %s\n", updateErr) - } - return updateErr + return rm.Update(ctx, &obj, nil) }) if err != nil { retryErr := fmt.Errorf("error updating resource: %w", err) - GinkgoWriter.Printf( - "ERROR occurred during retries, while update from buffer error: %s\n", - retryErr, - ) + GinkgoWriter.Printf("%s\n", retryErr) return retryErr } @@ -246,17 +211,14 @@ func (rm *ResourceManager) ApplyFromBuffer(buffer *bytes.Buffer, namespace strin } // Delete deletes Kubernetes resources defined as Go objects. -func (rm *ResourceManager) Delete(resources []client.Object, opts ...client.DeleteOption) error { +func (rm *ResourceManager) DeleteResources(resources []client.Object, opts ...client.DeleteOption) error { GinkgoWriter.Printf("Deleting resources\n") ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.DeleteTimeout) defer cancel() for _, resource := range resources { - if err := rm.K8sClient.Delete(ctx, resource, opts...); err != nil && !apierrors.IsNotFound(err) { - delErr := fmt.Errorf("error deleting resource: %w", err) - GinkgoWriter.Printf("ERROR occurred during deleting resource, error: %s\n", delErr) - - return delErr + if err := rm.Delete(ctx, resource, opts); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("error deleting resource: %w", err) } } GinkgoWriter.Printf("Resources deleted successfully\n") @@ -264,29 +226,22 @@ func (rm *ResourceManager) Delete(resources []client.Object, opts ...client.Dele return nil } -func (rm *ResourceManager) DeleteNamespace(name string) error { +func (rm *ResourceManager) DeleteNamespace(name string, opts ...Option) error { ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.DeleteNamespaceTimeout) GinkgoWriter.Printf("Deleting namespace %q\n", name) defer cancel() ns := &core.Namespace{} - if err := rm.K8sClient.Get(ctx, types.NamespacedName{Name: name}, ns); err != nil { + if err := rm.Get(ctx, types.NamespacedName{Name: name}, ns, opts...); err != nil { if apierrors.IsNotFound(err) { - GinkgoWriter.Printf("Namespace %q not found, nothing to delete\n", name) - return nil } - getNsErr := fmt.Errorf("error getting namespace: %w", err) - GinkgoWriter.Printf("ERROR occurred during getting namespace, error: %s\n", getNsErr) - return getNsErr + return fmt.Errorf("error getting namespace: %w", err) } - if err := rm.K8sClient.Delete(ctx, ns); err != nil { - delErr := fmt.Errorf("error deleting namespace: %w", err) - GinkgoWriter.Printf("ERROR occurred during deleting namespace, error: %s\n", delErr) - - return delErr + if err := rm.Delete(ctx, ns, nil, opts...); err != nil { + return fmt.Errorf("error deleting namespace: %w", err) } GinkgoWriter.Printf("Waiting for namespace %q to be deleted\n", name) @@ -296,24 +251,20 @@ func (rm *ResourceManager) DeleteNamespace(name string) error { 500*time.Millisecond, true, /* poll immediately */ func(ctx context.Context) (bool, error) { - if err := rm.K8sClient.Get(ctx, types.NamespacedName{Name: name}, ns); err != nil { + if err := rm.Get(ctx, types.NamespacedName{Name: name}, ns, opts...); err != nil { if apierrors.IsNotFound(err) { - GinkgoWriter.Printf("Namespace %q not found (deleted)\n", name) - return true, nil } - getNsErr := fmt.Errorf("error getting namespace: %w", err) - GinkgoWriter.Printf("ERROR occurred during getting namespace, error: %s\n", getNsErr) - return false, getNsErr + return false, fmt.Errorf("error getting namespace: %w", err) } return false, nil }) } -func (rm *ResourceManager) DeleteNamespaces(names []string) error { - GinkgoWriter.Printf("Deleting namespaces: %v\n", names) +func (rm *ResourceManager) DeleteNamespaces(names []string, opts ...Option) error { + GinkgoWriter.Printf("Deleting %d namespaces\n", len(names)) ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.DeleteNamespaceTimeout*2) defer cancel() @@ -321,15 +272,12 @@ func (rm *ResourceManager) DeleteNamespaces(names []string) error { for _, name := range names { ns := &core.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}} - if err := rm.K8sClient.Delete(ctx, ns); err != nil { + if err := rm.Delete(ctx, ns, nil, opts...); err != nil { if apierrors.IsNotFound(err) { - GinkgoWriter.Printf("Namespace %q not found, nothing to delete\n", name) continue } - delNsErr := fmt.Errorf("error deleting namespace: %w", err) - GinkgoWriter.Printf("ERROR occurred during deleting namespace %q, error: %s\n", name, delNsErr) - combinedErrors = errors.Join(combinedErrors, delNsErr) + combinedErrors = errors.Join(combinedErrors, fmt.Errorf("error deleting namespace: %w", err)) } } @@ -339,7 +287,7 @@ func (rm *ResourceManager) DeleteNamespaces(names []string) error { true, /* poll immediately */ func(ctx context.Context) (bool, error) { nsList := &core.NamespaceList{} - if err := rm.K8sClient.List(ctx, nsList); err != nil { + if err := rm.List(ctx, nsList); err != nil { return false, nil //nolint:nilerr // retry on error } @@ -363,9 +311,7 @@ func (rm *ResourceManager) DeleteFromFiles(files []string, namespace string) err ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.DeleteTimeout) defer cancel() - if err := rm.K8sClient.Delete(ctx, &obj); err != nil && !apierrors.IsNotFound(err) { - GinkgoWriter.Printf("ERROR occurred during deleting resource from file, error: %s\n", err) - + if err := rm.Delete(ctx, &obj, nil); err != nil && !apierrors.IsNotFound(err) { return err } @@ -375,8 +321,6 @@ func (rm *ResourceManager) DeleteFromFiles(files []string, namespace string) err for _, file := range files { data, err := rm.GetFileContents(file) if err != nil { - GinkgoWriter.Printf("ERROR occurred during getting file contents for file %q, error: %s\n", file, err) - return err } @@ -419,34 +363,49 @@ func (rm *ResourceManager) readAndHandleObject( // path or an https:// URL to YAML manifests and provides the contents. func (rm *ResourceManager) GetFileContents(file string) (*bytes.Buffer, error) { if strings.HasPrefix(file, "http://") { - return nil, fmt.Errorf("data can't be retrieved from %s: http is not supported, use https", file) + err := fmt.Errorf("data can't be retrieved from %s: http is not supported, use https", file) + GinkgoWriter.Printf("ERROR occurred during getting contents for file %q, error: %s\n", file, err) + + return nil, err } else if strings.HasPrefix(file, "https://") { ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.ManifestFetchTimeout) defer cancel() req, err := http.NewRequestWithContext(ctx, http.MethodGet, file, nil) if err != nil { + GinkgoWriter.Printf("ERROR occurred during getting contents for file %q, error: %s\n", file, err) + return nil, err } resp, err := http.DefaultClient.Do(req) if err != nil { + GinkgoWriter.Printf("ERROR occurred during getting contents for file %q, error: %s\n", file, err) + return nil, err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%d response when getting %s file contents", resp.StatusCode, file) + err = fmt.Errorf("%d response when getting %s file contents", resp.StatusCode, file) + GinkgoWriter.Printf("ERROR occurred during getting contents for file %q, error: %s\n", file, err) + + return nil, err } manifests := new(bytes.Buffer) count, err := manifests.ReadFrom(resp.Body) if err != nil { + GinkgoWriter.Printf("ERROR occurred during getting contents for file %q, error: %s\n", file, err) + return nil, err } if resp.ContentLength != -1 && count != resp.ContentLength { - return nil, fmt.Errorf("received %d bytes from %s, expected %d", count, file, resp.ContentLength) + err = fmt.Errorf("received %d bytes from %s, expected %d", count, file, resp.ContentLength) + GinkgoWriter.Printf("ERROR occurred during getting contents for file %q, error: %s\n", file, err) + + return nil, err } return manifests, nil } @@ -457,6 +416,8 @@ func (rm *ResourceManager) GetFileContents(file string) (*bytes.Buffer, error) { b, err := rm.FS.ReadFile(file) if err != nil { + GinkgoWriter.Printf("ERROR occurred during getting file contents for file %q, error: %s\n", file, err) + return nil, err } @@ -465,18 +426,18 @@ func (rm *ResourceManager) GetFileContents(file string) (*bytes.Buffer, error) { // WaitForAppsToBeReady waits for all apps in the specified namespace to be ready, // or until the ctx timeout is reached. -func (rm *ResourceManager) WaitForAppsToBeReady(namespace string) error { +func (rm *ResourceManager) WaitForAppsToBeReady(namespace string, opts ...Option) error { GinkgoWriter.Printf("Waiting for apps to be ready in namespace %q\n", namespace) ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.CreateTimeout) defer cancel() - return rm.WaitForAppsToBeReadyWithCtx(ctx, namespace) + return rm.WaitForAppsToBeReadyWithCtx(ctx, namespace, opts...) } // WaitForAppsToBeReadyWithCtx waits for all apps in the specified namespace to be ready or // until the provided context is canceled. -func (rm *ResourceManager) WaitForAppsToBeReadyWithCtx(ctx context.Context, namespace string) error { - if err := rm.WaitForPodsToBeReady(ctx, namespace); err != nil { +func (rm *ResourceManager) WaitForAppsToBeReadyWithCtx(ctx context.Context, namespace string, opts ...Option) error { + if err := rm.WaitForPodsToBeReady(ctx, namespace, opts...); err != nil { GinkgoWriter.Printf("ERROR occurred during waiting for pods to be ready, error: %s\n", err) return err @@ -504,14 +465,23 @@ func (rm *ResourceManager) WaitForAppsToBeReadyWithCtx(ctx context.Context, name // WaitForPodsToBeReady waits for all Pods in the specified namespace to be ready or // until the provided context is canceled. -func (rm *ResourceManager) WaitForPodsToBeReady(ctx context.Context, namespace string) error { - return wait.PollUntilContextCancel( +func (rm *ResourceManager) WaitForPodsToBeReady( + ctx context.Context, + namespace string, + opts ...Option, +) error { + options := LogOptions(opts...) + waitingErr := wait.PollUntilContextCancel( ctx, 500*time.Millisecond, true, /* poll immediately */ func(ctx context.Context) (bool, error) { var podList core.PodList - if err := rm.K8sClient.List(ctx, &podList, client.InNamespace(namespace)); err != nil { + if err := rm.List( + ctx, + &podList, + client.InNamespace(namespace), + ); err != nil { return false, err } @@ -523,10 +493,22 @@ func (rm *ResourceManager) WaitForPodsToBeReady(ctx context.Context, namespace s } } } + if options.logEnabled { + GinkgoWriter.Printf("Pods ready: %d out of %d in namespace %q\n", podsReady, len(podList.Items), namespace) + } return podsReady == len(podList.Items), nil }, ) + if waitingErr != nil { + GinkgoWriter.Printf( + "ERROR occurred during waiting for Pods to be ready in namespace %q, error: %s\n", + namespace, + waitingErr, + ) + } + + return waitingErr } func (rm *ResourceManager) waitForGatewaysToBeReady(ctx context.Context, namespace string) error { @@ -536,7 +518,11 @@ func (rm *ResourceManager) waitForGatewaysToBeReady(ctx context.Context, namespa true, /* poll immediately */ func(ctx context.Context) (bool, error) { var gatewayList v1.GatewayList - if err := rm.K8sClient.List(ctx, &gatewayList, client.InNamespace(namespace)); err != nil { + if err := rm.List( + ctx, + &gatewayList, + client.InNamespace(namespace), + ); err != nil { return false, err } @@ -554,13 +540,18 @@ func (rm *ResourceManager) waitForGatewaysToBeReady(ctx context.Context, namespa } func (rm *ResourceManager) waitForHTTPRoutesToBeReady(ctx context.Context, namespace string) error { + GinkgoWriter.Printf("Waiting for HTTPRoutes to be ready in namespace %q\n", namespace) return wait.PollUntilContextCancel( ctx, 500*time.Millisecond, true, /* poll immediately */ func(ctx context.Context) (bool, error) { var routeList v1.HTTPRouteList - if err := rm.K8sClient.List(ctx, &routeList, client.InNamespace(namespace)); err != nil { + if err := rm.List( + ctx, + &routeList, + client.InNamespace(namespace), + ); err != nil { return false, err } @@ -576,10 +567,13 @@ func (rm *ResourceManager) waitForHTTPRoutesToBeReady(ctx context.Context, names } func (rm *ResourceManager) waitForGRPCRoutesToBeReady(ctx context.Context, namespace string) error { + GinkgoWriter.Printf("Waiting for GRPCRoutes to be ready in namespace %q\n", namespace) // First, check if grpcroute even exists for v1. If not, ignore. var routeList v1.GRPCRouteList - err := rm.K8sClient.List(ctx, &routeList, client.InNamespace(namespace)) + err := rm.List(ctx, &routeList, client.InNamespace(namespace)) if err != nil && strings.Contains(err.Error(), "no matches for kind") { + GinkgoWriter.Printf("No GRPCRoute resources found in namespace %q, skipping wait\n", namespace) + return nil } @@ -589,7 +583,11 @@ func (rm *ResourceManager) waitForGRPCRoutesToBeReady(ctx context.Context, names true, /* poll immediately */ func(ctx context.Context) (bool, error) { var routeList v1.GRPCRouteList - if err := rm.K8sClient.List(ctx, &routeList, client.InNamespace(namespace)); err != nil { + if err := rm.List( + ctx, + &routeList, + client.InNamespace(namespace), + ); err != nil { return false, err } @@ -612,10 +610,10 @@ func (rm *ResourceManager) GetLBIPAddress(namespace string) (string, error) { var serviceList core.ServiceList var address string - if err := rm.K8sClient.List(ctx, &serviceList, client.InNamespace(namespace)); err != nil { - GinkgoWriter.Printf("ERROR occurred during getting list of services in namespace %q, error: %s\n", - namespace, err) - + if err := rm.List( + ctx, &serviceList, + client.InNamespace(namespace), + ); err != nil { return "", err } var nsName types.NamespacedName @@ -624,14 +622,7 @@ func (rm *ResourceManager) GetLBIPAddress(namespace string) (string, error) { if svc.Spec.Type == core.ServiceTypeLoadBalancer { nsName = types.NamespacedName{Namespace: svc.GetNamespace(), Name: svc.GetName()} if err := rm.waitForLBStatusToBeReady(ctx, nsName); err != nil { - lbStatusErr := fmt.Errorf("error getting status from LoadBalancer service: %w", err) - GinkgoWriter.Printf( - "ERROR occurred during waiting for LoadBalancer service in namespace %q to be ready, error: %s\n", - nsName, - err, - ) - - return "", lbStatusErr + return "", fmt.Errorf("error getting status from LoadBalancer service: %w", err) } } } @@ -639,14 +630,8 @@ func (rm *ResourceManager) GetLBIPAddress(namespace string) (string, error) { if nsName.Name != "" { var lbService core.Service - if err := rm.K8sClient.Get(ctx, nsName, &lbService); err != nil { - getLBStatusErr := fmt.Errorf("error getting LoadBalancer service: %w", err) - GinkgoWriter.Printf("ERROR occurred during getting LoadBalancer service in namespace %q, error: %s\n", - nsName, - err, - ) - - return "", getLBStatusErr + if err := rm.Get(ctx, nsName, &lbService); err != nil { + return "", fmt.Errorf("error getting LoadBalancer service: %w", err) } if lbService.Status.LoadBalancer.Ingress[0].IP != "" { address = lbService.Status.LoadBalancer.Ingress[0].IP @@ -665,7 +650,7 @@ func (rm *ResourceManager) waitForLBStatusToBeReady(ctx context.Context, svcNsNa true, /* poll immediately */ func(ctx context.Context) (bool, error) { var svc core.Service - if err := rm.K8sClient.Get(ctx, svcNsName, &svc); err != nil { + if err := rm.Get(ctx, svcNsName, &svc); err != nil { return false, err } if len(svc.Status.LoadBalancer.Ingress) > 0 { @@ -679,19 +664,14 @@ func (rm *ResourceManager) waitForLBStatusToBeReady(ctx context.Context, svcNsNa // GetClusterInfo retrieves node info and Kubernetes version from the cluster. func (rm *ResourceManager) GetClusterInfo() (ClusterInfo, error) { - GinkgoWriter.Printf("Getting cluster info\n") + GinkgoWriter.Printf("Getting cluster info|nodes\n") ctx, cancel := context.WithTimeout(context.Background(), rm.TimeoutConfig.GetTimeout) defer cancel() var nodes core.NodeList ci := &ClusterInfo{} - if err := rm.K8sClient.List(ctx, &nodes); err != nil { - getNodesErr := fmt.Errorf("error getting nodes: %w", err) - GinkgoWriter.Printf("ERROR occurred during getting nodes in cluster, error: %s\n", - getNodesErr, - ) - - return *ci, getNodesErr + if err := rm.List(ctx, &nodes); err != nil { + return *ci, fmt.Errorf("error getting nodes: %w", err) } ci.NodeCount = len(nodes.Items) @@ -712,13 +692,8 @@ func (rm *ResourceManager) GetClusterInfo() (ClusterInfo, error) { var ns core.Namespace key := types.NamespacedName{Name: "kube-system"} - if err := rm.K8sClient.Get(ctx, key, &ns); err != nil { - getK8sNamespaceErr := fmt.Errorf("error getting kube-system namespace: %w", err) - GinkgoWriter.Printf("ERROR occurred during getting kube-system namespace, error: %s\n", - getK8sNamespaceErr, - ) - - return *ci, getK8sNamespaceErr + if err := rm.Get(ctx, key, &ns); err != nil { + return *ci, fmt.Errorf("error getting kube-system namespace: %w", err) } ci.ID = string(ns.UID) @@ -733,19 +708,13 @@ func (rm *ResourceManager) GetPodNames(namespace string, labels client.MatchingL defer cancel() var podList core.PodList - if err := rm.K8sClient.List( + if err := rm.List( ctx, &podList, client.InNamespace(namespace), labels, ); err != nil { - getPodsErr := fmt.Errorf("error getting list of Pods: %w", err) - GinkgoWriter.Printf("ERROR occurred during getting list of Pods in namespace %q, error: %s\n", - namespace, - getPodsErr, - ) - - return nil, getPodsErr + return nil, fmt.Errorf("error getting list of Pods: %w", err) } names := make([]string, 0, len(podList.Items)) @@ -765,19 +734,13 @@ func (rm *ResourceManager) GetPods(namespace string, labels client.MatchingLabel defer cancel() var podList core.PodList - if err := rm.K8sClient.List( + if err := rm.List( ctx, &podList, client.InNamespace(namespace), labels, ); err != nil { - getPodsErr := fmt.Errorf("error getting list of Pods: %w", err) - GinkgoWriter.Printf("ERROR occurred during getting list of Pods in namespace %q, error: %s\n", - namespace, - getPodsErr, - ) - - return nil, getPodsErr + return nil, fmt.Errorf("error getting list of Pods: %w", err) } GinkgoWriter.Printf("Found %d pods in namespace %q\n", len(podList.Items), namespace) @@ -791,15 +754,8 @@ func (rm *ResourceManager) GetPod(namespace, name string) (*core.Pod, error) { defer cancel() var pod core.Pod - if err := rm.K8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, &pod); err != nil { - getPodErr := fmt.Errorf("error getting Pod: %w", err) - GinkgoWriter.Printf("ERROR occurred during getting Pod %q in namespace %q, error: %s\n", - name, - namespace, - getPodErr, - ) - - return nil, getPodErr + if err := rm.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, &pod); err != nil { + return nil, fmt.Errorf("error getting Pod: %w", err) } GinkgoWriter.Printf("Found pod %q in namespace %q\n", name, namespace) @@ -849,7 +805,7 @@ func (rm *ResourceManager) GetNGFDeployment(namespace, releaseName string) (*app var deployments apps.DeploymentList - if err := rm.K8sClient.List( + if err := rm.List( ctx, &deployments, client.InNamespace(namespace), @@ -857,14 +813,7 @@ func (rm *ResourceManager) GetNGFDeployment(namespace, releaseName string) (*app "app.kubernetes.io/instance": releaseName, }, ); err != nil { - getDeploymentsErr := fmt.Errorf("error getting list of Deployments: %w", err) - GinkgoWriter.Printf("ERROR occurred during getting list of Deployments in namespace %q with release %q, error: %s\n", - namespace, - releaseName, - getDeploymentsErr, - ) - - return nil, getDeploymentsErr + return nil, fmt.Errorf("error getting list of Deployments: %w", err) } if len(deployments.Items) != 1 { @@ -899,13 +848,7 @@ func (rm *ResourceManager) getGatewayClassNginxProxy( var proxy ngfAPIv1alpha2.NginxProxy proxyName := releaseName + "-proxy-config" - if err := rm.K8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: proxyName}, &proxy); err != nil { - GinkgoWriter.Printf("ERROR occurred during getting NginxProxy %q in namespace %q, error: %s\n", - proxyName, - namespace, - err, - ) - + if err := rm.Get(ctx, types.NamespacedName{Namespace: namespace, Name: proxyName}, &proxy); err != nil { return nil, err } GinkgoWriter.Printf("Successfully found NginxProxy %q in namespace %q\n", proxyName, namespace) @@ -939,15 +882,8 @@ func (rm *ResourceManager) ScaleNginxDeployment(namespace, releaseName string, r proxy.Spec.Kubernetes.Deployment.Replicas = &replicas - if err = rm.K8sClient.Update(ctx, proxy); err != nil { - updateNginxProxyErr := fmt.Errorf("error updating NginxProxy: %w", err) - GinkgoWriter.Printf("ERROR occurred during updating NginxProxy in namespace %q with release name %q, error: %s\n", - namespace, - releaseName, - updateNginxProxyErr, - ) - - return updateNginxProxyErr + if err = rm.Update(ctx, proxy, nil); err != nil { + return fmt.Errorf("error updating NginxProxy: %w", err) } GinkgoWriter.Printf("Successfully scaled Nginx Deployment in namespace %q with release name %q to %d replicas\n", @@ -966,18 +902,12 @@ func (rm *ResourceManager) GetEvents(namespace string) (*core.EventList, error) defer cancel() var eventList core.EventList - if err := rm.K8sClient.List( + if err := rm.List( ctx, &eventList, client.InNamespace(namespace), ); err != nil { - getEventsListErr := fmt.Errorf("error getting list of Events: %w", err) - GinkgoWriter.Printf("ERROR occurred during getting Events in namespace %q, error: %s\n", - namespace, - getEventsListErr, - ) - - return &core.EventList{}, getEventsListErr + return &core.EventList{}, fmt.Errorf("error getting list of Events: %w", err) } GinkgoWriter.Printf("Successfully found %d Events in namespace %q\n", len(eventList.Items), namespace) @@ -991,27 +921,13 @@ func (rm *ResourceManager) ScaleDeployment(namespace, name string, replicas int3 defer cancel() var deployment apps.Deployment - if err := rm.K8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, &deployment); err != nil { - getDeploymentErr := fmt.Errorf("error getting Deployment: %w", err) - GinkgoWriter.Printf("ERROR occurred during getting Deployment in namespace %q with name %q, error: %s\n", - namespace, - name, - getDeploymentErr, - ) - - return getDeploymentErr + if err := rm.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, &deployment); err != nil { + return fmt.Errorf("error getting Deployment: %w", err) } deployment.Spec.Replicas = &replicas - if err := rm.K8sClient.Update(ctx, &deployment); err != nil { - updateDeploymentErr := fmt.Errorf("error updating Deployment: %w", err) - GinkgoWriter.Printf("ERROR occurred during updating Deployment in namespace %q with name %q, error: %s\n", - namespace, - name, - updateDeploymentErr, - ) - - return updateDeploymentErr + if err := rm.Update(ctx, &deployment, nil); err != nil { + return fmt.Errorf("error updating Deployment: %w", err) } GinkgoWriter.Printf("Successfully scaled Deployment %q in namespace %q to %d replicas\n", name, namespace, replicas) @@ -1019,13 +935,16 @@ func (rm *ResourceManager) ScaleDeployment(namespace, name string, replicas int3 } // GetReadyNGFPodNames returns the name(s) of the NGF Pod(s). -func GetReadyNGFPodNames( - k8sClient client.Client, +func (rm *ResourceManager) GetReadyNGFPodNames( namespace, releaseName string, timeout time.Duration, + opts ...Option, ) ([]string, error) { - GinkgoWriter.Printf("Getting ready NGF Pod names in namespace %q with release name %q\n", namespace, releaseName) + options := LogOptions(opts...) + if options.logEnabled { + GinkgoWriter.Printf("Getting ready NGF Pod names in namespace %q with release name %q\n", namespace, releaseName) + } ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() @@ -1037,7 +956,7 @@ func GetReadyNGFPodNames( true, // poll immediately func(ctx context.Context) (bool, error) { var podList core.PodList - if err := k8sClient.List( + if err := rm.List( ctx, &podList, client.InNamespace(namespace), @@ -1048,38 +967,45 @@ func GetReadyNGFPodNames( return false, fmt.Errorf("error getting list of NGF Pods: %w", err) } - ngfPodNames = getReadyPodNames(podList) + ngfPodNames = getReadyPodNames(podList, opts...) return len(ngfPodNames) > 0, nil }, ) if err != nil { waitingPodsErr := fmt.Errorf("timed out waiting for NGF Pods to be ready: %w", err) + if options.logEnabled { + GinkgoWriter.Printf( + "ERROR occurred during waiting for NGF Pods to be ready in namespace %q with release name %q, error: %s\n", + namespace, + releaseName, + waitingPodsErr, + ) + } + + return nil, waitingPodsErr + } + if options.logEnabled { GinkgoWriter.Printf( - "ERROR occurred during waiting for NGF Pods to be ready in namespace %q with release name %q, error: %s\n", + "Successfully found ready NGF Pod names in namespace %q with release name %q: %v\n", namespace, releaseName, - waitingPodsErr, + ngfPodNames, ) - - return nil, waitingPodsErr } - GinkgoWriter.Printf( - "Successfully found ready NGF Pod names in namespace %q with release name %q: %v\n", - namespace, - releaseName, - ngfPodNames, - ) return ngfPodNames, nil } // GetReadyNginxPodNames returns the name(s) of the NGINX Pod(s). -func GetReadyNginxPodNames( - k8sClient client.Client, +func (rm *ResourceManager) GetReadyNginxPodNames( namespace string, timeout time.Duration, + opts ...Option, ) ([]string, error) { - GinkgoWriter.Printf("Getting ready NGINX Pod names in namespace %q\n", namespace) + options := LogOptions(opts...) + if options.logEnabled { + GinkgoWriter.Printf("Getting ready NGINX Pod names in namespace %q\n", namespace) + } ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() @@ -1091,7 +1017,7 @@ func GetReadyNginxPodNames( true, // poll immediately func(ctx context.Context) (bool, error) { var podList core.PodList - if err := k8sClient.List( + if err := rm.List( ctx, &podList, client.InNamespace(namespace), @@ -1100,29 +1026,33 @@ func GetReadyNginxPodNames( return false, fmt.Errorf("error getting list of NGINX Pods: %w", err) } - nginxPodNames = getReadyPodNames(podList) + nginxPodNames = getReadyPodNames(podList, opts...) return len(nginxPodNames) > 0, nil }, ) if err != nil { waitingPodsErr := fmt.Errorf("timed out waiting for NGINX Pods to be ready: %w", err) - GinkgoWriter.Printf("ERROR occurred during waiting for NGINX Pods to be ready in namespace %q, error: %s\n", - namespace, - waitingPodsErr, - ) + if options.logEnabled { + GinkgoWriter.Printf("ERROR occurred during waiting for NGINX Pods to be ready in namespace %q, error: %s\n", + namespace, + waitingPodsErr, + ) + } return nil, waitingPodsErr } - GinkgoWriter.Printf( - "Successfully found ready NGINX Pod names in namespace %q: %v\n", - namespace, - nginxPodNames, - ) + if options.logEnabled { + GinkgoWriter.Printf( + "Successfully found ready NGINX Pod name(s) in namespace %q: %v\n", + namespace, + nginxPodNames, + ) + } return nginxPodNames, nil } -func getReadyPodNames(podList core.PodList) []string { +func getReadyPodNames(podList core.PodList, opts ...Option) []string { var names []string for _, pod := range podList.Items { for _, cond := range pod.Status.Conditions { @@ -1131,7 +1061,10 @@ func getReadyPodNames(podList core.PodList) []string { } } } - GinkgoWriter.Printf("Found %d ready pod names: %v\n", len(names), names) + options := LogOptions(opts...) + if options.logEnabled { + GinkgoWriter.Printf("Found %d ready pod name(s): %v\n", len(names), names) + } return names } @@ -1153,15 +1086,26 @@ func countNumberOfReadyParents(parents []v1.RouteParentStatus) int { // WaitForPodsToBeReadyWithCount waits for all Pods in the specified namespace to be ready or // until the provided context is canceled. -func (rm *ResourceManager) WaitForPodsToBeReadyWithCount(ctx context.Context, namespace string, count int) error { +func (rm *ResourceManager) WaitForPodsToBeReadyWithCount( + ctx context.Context, + namespace string, + count int, + opts ...Option, +) error { + options := LogOptions(opts...) GinkgoWriter.Printf("Waiting for %d pods to be ready in namespace %q\n", count, namespace) + return wait.PollUntilContextCancel( ctx, 500*time.Millisecond, true, /* poll immediately */ func(ctx context.Context) (bool, error) { var podList core.PodList - if err := rm.K8sClient.List(ctx, &podList, client.InNamespace(namespace)); err != nil { + if err := rm.List( + ctx, + &podList, + client.InNamespace(namespace), + ); err != nil { return false, err } @@ -1173,7 +1117,9 @@ func (rm *ResourceManager) WaitForPodsToBeReadyWithCount(ctx context.Context, na } } } - GinkgoWriter.Printf("Found %d/%d ready pods in namespace %q\n", podsReady, count, namespace) + if options.logEnabled { + GinkgoWriter.Printf("Found %d/%d ready pods in namespace %q\n", podsReady, count, namespace) + } return podsReady == count, nil }, @@ -1199,7 +1145,7 @@ func (rm *ResourceManager) WaitForGatewayObservedGeneration( func(ctx context.Context) (bool, error) { var gw v1.Gateway key := types.NamespacedName{Namespace: namespace, Name: name} - if err := rm.K8sClient.Get(ctx, key, &gw); err != nil { + if err := rm.Get(ctx, key, &gw); err != nil { return false, err } @@ -1216,8 +1162,15 @@ func (rm *ResourceManager) WaitForGatewayObservedGeneration( // GetNginxConfig uses crossplane to get the nginx configuration and convert it to JSON. // If the crossplane image is loaded locally on the node, crossplaneImageRepo can be empty. -func (rm *ResourceManager) GetNginxConfig(nginxPodName, namespace, crossplaneImageRepo string) (*Payload, error) { +func (rm *ResourceManager) GetNginxConfig( + nginxPodName, + namespace, + crossplaneImageRepo string, + opts ...Option, +) (*Payload, error) { GinkgoWriter.Printf("Getting NGINX config from pod %q in namespace %q\n", nginxPodName, namespace) + options := LogOptions(opts...) + if err := injectCrossplaneContainer( rm.ClientGoClient, rm.TimeoutConfig.UpdateTimeout, @@ -1263,10 +1216,12 @@ func (rm *ResourceManager) GetNginxConfig(nginxPodName, namespace, crossplaneIma }, ); err != nil { containerErr := fmt.Errorf("could not connect to ephemeral container: %w", err) - GinkgoWriter.Printf("ERROR occurred during waiting for NGINX Pods to be ready in namespace %q, error: %s\n", - namespace, - containerErr, - ) + if options.logEnabled { + GinkgoWriter.Printf("ERROR occurred during waiting for NGINX Pods to be ready in namespace %q, error: %s\n", + namespace, + containerErr, + ) + } return nil, containerErr } @@ -1286,3 +1241,94 @@ func (rm *ResourceManager) GetNginxConfig(nginxPodName, namespace, crossplaneIma return conf, nil } + +// Get retrieves a resource by key, logging errors if enabled. +func (rm *ResourceManager) Get( + ctx context.Context, + key client.ObjectKey, + obj client.Object, + opts ...Option, +) error { + options := LogOptions(opts...) + if err := rm.K8sClient.Get(ctx, key, obj); err != nil { + if options.logEnabled { + GinkgoWriter.Printf("Could not get k8s resource %q error: %v\n", obj.GetName(), err) + } + + return err + } + + return nil +} + +// Create adds a new resource, returning an error on failure. +func (rm *ResourceManager) Create( + ctx context.Context, + obj client.Object, +) error { + if err := rm.K8sClient.Create(ctx, obj); err != nil { + createErr := fmt.Errorf("error creating k8s resource %q: %w", obj.GetName(), err) + GinkgoWriter.Printf("%v\n", createErr) + + return createErr + } + return nil +} + +// Delete removes a resource, returning an error on failure. +func (rm *ResourceManager) Delete( + ctx context.Context, + obj client.Object, + deleteOpts []client.DeleteOption, + opts ...Option, +) error { + options := LogOptions(opts...) + if err := rm.K8sClient.Delete(ctx, obj, deleteOpts...); err != nil { + if options.logEnabled { + GinkgoWriter.Printf("Could not delete k8s resource %q: %w\n", obj.GetName(), err) + } + + return err + } + return nil +} + +// Update modifies a resource. +func (rm *ResourceManager) Update( + ctx context.Context, + obj client.Object, + updateOpts []client.UpdateOption, + opts ...Option, +) error { + options := LogOptions(opts...) + if err := rm.K8sClient.Update(ctx, obj, updateOpts...); err != nil { + updateResourceErr := fmt.Errorf("error updating k8s resource: %w", err) + if options.logEnabled { + GinkgoWriter.Printf( + "ERROR occurred during updating k8s resource in namespace %q with name %q, error: %s\n", + obj.GetNamespace(), + obj.GetName(), + updateResourceErr, + ) + } + + return updateResourceErr + } + + return nil +} + +// List retrieves a list of resources, returning an error on failure. +func (rm *ResourceManager) List( + ctx context.Context, + list client.ObjectList, + listOpts ...client.ListOption, +) error { + if err := rm.K8sClient.List(ctx, list, listOpts...); err != nil { + listErr := fmt.Errorf("error listing k8s resources: %w", err) + GinkgoWriter.Printf("%v\n", listErr) + + return listErr + } + return nil +} diff --git a/tests/framework/results.go b/tests/framework/results.go index 120dbcc3a5..87bba0a0b4 100644 --- a/tests/framework/results.go +++ b/tests/framework/results.go @@ -9,6 +9,7 @@ import ( "os/exec" "path/filepath" + . "github.com/onsi/ginkgo/v2" vegeta "github.com/tsenart/vegeta/v12/lib" ) @@ -23,7 +24,10 @@ func CreateResultsDir(testName, version string) (string, error) { if _, err := os.Stat(dirName); err == nil { if err := os.RemoveAll(dirName); err != nil { - return "", fmt.Errorf("failed to remove existing directory %s: %w", dirName, err) + rmDirErr := fmt.Errorf("failed to remove existing directory %s: %w", dirName, err) + GinkgoWriter.Printf("ERROR occurred during removing existing results directory %q, error: %s\n", dirName, rmDirErr) + + return "", rmDirErr } } @@ -34,6 +38,8 @@ func CreateResultsDir(testName, version string) (string, error) { func CreateResultsFile(filename string) (*os.File, error) { outFile, err := os.OpenFile(filename, os.O_TRUNC|os.O_WRONLY|os.O_CREATE, 0o644) if err != nil { + GinkgoWriter.Printf("ERROR occurred during creating results file %q, error: %s\n", filename, err) + return nil, err } @@ -65,13 +71,19 @@ func WriteSystemInfoToFile(file *os.File, ci ClusterInfo, plus bool) error { plus, commit, date, dirty, clusterType, ci.NodeCount, ci.K8sVersion, ci.CPUCountPerNode, ci.MemoryPerNode, ci.MaxPodsPerNode, ) if _, err := fmt.Fprint(file, text); err != nil { + GinkgoWriter.Printf("ERROR occurred during writing system info to results file, error: %s\n", err) + return err } if ci.IsGKE { if _, err := fmt.Fprintf(file, "- Zone: %s\n- Instance Type: %s\n", ci.GkeZone, ci.GkeInstanceType); err != nil { + GinkgoWriter.Printf("ERROR occurred during writing GKE info to results file, error: %s\n", err) + return err } } + GinkgoWriter.Printf("Wrote system info to results file\n") + return nil } @@ -89,6 +101,13 @@ func generatePNG(resultsDir, inputFilename, outputFilename, configFilename strin output, err := cmd.CombinedOutput() if err != nil { + GinkgoWriter.Printf( + "ERROR occurred during generating PNG %q using gnuplot, error: %s, output: %s\n", + outputFilename, + err, + string(output), + ) + return fmt.Errorf("failed to generate PNG: %w; output: %s", err, string(output)) } @@ -118,13 +137,20 @@ func GenerateMemoryPNG(resultsDir, inputFilename, outputFilename string) error { // WriteMetricsResults writes the metrics results to the results file in text format. func WriteMetricsResults(resultsFile *os.File, metrics *Metrics) error { reporter := vegeta.NewTextReporter(&metrics.Metrics) + reporterErr := reporter.Report(resultsFile) + if reporterErr != nil { + GinkgoWriter.Printf("ERROR occurred during writing metrics results to results file, error: %s\n", reporterErr) + } + GinkgoWriter.Printf("Wrote metrics results to results file %q\n", resultsFile.Name()) - return reporter.Report(resultsFile) + return reporterErr } // WriteContent writes basic content to the results file. func WriteContent(resultsFile *os.File, content string) error { if _, err := fmt.Fprintln(resultsFile, content); err != nil { + GinkgoWriter.Printf("ERROR occurred during writing content to results file, error: %s\n", err) + return err } diff --git a/tests/results/dp-perf/edge/edge-oss.md b/tests/results/dp-perf/edge/edge-oss.md index ee23694a87..56ed6964b9 100644 --- a/tests/results/dp-perf/edge/edge-oss.md +++ b/tests/results/dp-perf/edge/edge-oss.md @@ -6,16 +6,16 @@ NGINX Plus: false NGINX Gateway Fabric: -- Commit: 9155a2b6a8d3179165797ef3e789e97283f7a695 -- Date: 2025-03-15T07:17:11Z +- Commit: 635b3fcd6e643f4bd24ebbd4c901619a030c4bc0 +- Date: 2025-09-15T17:56:13Z - Dirty: false GKE Cluster: - Node count: 12 -- k8s version: v1.31.6-gke.1020000 +- k8s version: v1.33.4-gke.1036000 - vCPUs per node: 16 -- RAM per node: 65851340Ki +- RAM per node: 65851528Ki - Max pods per node: 110 - Zone: us-west1-b - Instance Type: n2d-standard-16 @@ -23,10 +23,10 @@ GKE Cluster: ## Test1: Running latte path based routing ```text -Requests [total, rate, throughput] 30000, 1000.02, 1000.00 -Duration [total, attack, wait] 30s, 29.999s, 569.726µs -Latencies [min, mean, 50, 90, 95, 99, max] 492.479µs, 670.385µs, 659.036µs, 746.275µs, 777.873µs, 857.407µs, 10.667ms -Bytes In [total, mean] 4800000, 160.00 +Requests [total, rate, throughput] 30000, 1000.02, 999.99 +Duration [total, attack, wait] 30s, 29.999s, 823.134µs +Latencies [min, mean, 50, 90, 95, 99, max] 701.612µs, 944.922µs, 913.567µs, 1.064ms, 1.127ms, 1.318ms, 19.252ms +Bytes In [total, mean] 4770000, 159.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 @@ -36,23 +36,23 @@ Error Set: ## Test2: Running coffee header based routing ```text -Requests [total, rate, throughput] 29999, 1000.00, 999.98 -Duration [total, attack, wait] 30s, 29.999s, 665.107µs -Latencies [min, mean, 50, 90, 95, 99, max] 518.165µs, 707.025µs, 693.839µs, 792.941µs, 827.269µs, 914.615µs, 9.399ms -Bytes In [total, mean] 4829839, 161.00 +Requests [total, rate, throughput] 30000, 1000.02, 999.98 +Duration [total, attack, wait] 30s, 29.999s, 1.086ms +Latencies [min, mean, 50, 90, 95, 99, max] 709.701µs, 978.507µs, 947.668µs, 1.092ms, 1.16ms, 1.369ms, 18.067ms +Bytes In [total, mean] 4800000, 160.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% -Status Codes [code:count] 200:29999 +Status Codes [code:count] 200:30000 Error Set: ``` ## Test3: Running coffee query based routing ```text -Requests [total, rate, throughput] 30000, 1000.03, 1000.01 -Duration [total, attack, wait] 30s, 29.999s, 715.919µs -Latencies [min, mean, 50, 90, 95, 99, max] 535.068µs, 708.655µs, 696.175µs, 794.741µs, 829.728µs, 926.641µs, 9.422ms -Bytes In [total, mean] 5070000, 169.00 +Requests [total, rate, throughput] 30000, 1000.01, 999.98 +Duration [total, attack, wait] 30.001s, 30s, 907.607µs +Latencies [min, mean, 50, 90, 95, 99, max] 754.461µs, 1.003ms, 978.075µs, 1.155ms, 1.226ms, 1.387ms, 10.062ms +Bytes In [total, mean] 5040000, 168.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 @@ -62,10 +62,10 @@ Error Set: ## Test4: Running tea GET method based routing ```text -Requests [total, rate, throughput] 30000, 1000.02, 1000.00 -Duration [total, attack, wait] 30s, 29.999s, 689.244µs -Latencies [min, mean, 50, 90, 95, 99, max] 517.044µs, 689.83µs, 678.3µs, 768.738µs, 802.493µs, 884.763µs, 13.123ms -Bytes In [total, mean] 4740000, 158.00 +Requests [total, rate, throughput] 30000, 1000.04, 1000.01 +Duration [total, attack, wait] 30s, 29.999s, 832.592µs +Latencies [min, mean, 50, 90, 95, 99, max] 709.117µs, 967.336µs, 940.759µs, 1.083ms, 1.15ms, 1.346ms, 16.037ms +Bytes In [total, mean] 4710000, 157.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 @@ -75,10 +75,10 @@ Error Set: ## Test5: Running tea POST method based routing ```text -Requests [total, rate, throughput] 30000, 1000.03, 1000.01 -Duration [total, attack, wait] 30s, 29.999s, 618.418µs -Latencies [min, mean, 50, 90, 95, 99, max] 506.217µs, 700.343µs, 688.984µs, 785.078µs, 815.876µs, 898.036µs, 9.243ms -Bytes In [total, mean] 4740000, 158.00 +Requests [total, rate, throughput] 30000, 1000.04, 1000.00 +Duration [total, attack, wait] 30s, 29.999s, 1.005ms +Latencies [min, mean, 50, 90, 95, 99, max] 733.008µs, 1.005ms, 975.492µs, 1.137ms, 1.201ms, 1.382ms, 21.294ms +Bytes In [total, mean] 4710000, 157.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 diff --git a/tests/results/dp-perf/edge/edge-plus.md b/tests/results/dp-perf/edge/edge-plus.md index 33e8224482..a4ebc3699e 100644 --- a/tests/results/dp-perf/edge/edge-plus.md +++ b/tests/results/dp-perf/edge/edge-plus.md @@ -6,16 +6,16 @@ NGINX Plus: true NGINX Gateway Fabric: -- Commit: 9155a2b6a8d3179165797ef3e789e97283f7a695 -- Date: 2025-03-15T07:17:11Z +- Commit: 635b3fcd6e643f4bd24ebbd4c901619a030c4bc0 +- Date: 2025-09-15T17:56:13Z - Dirty: false GKE Cluster: - Node count: 12 -- k8s version: v1.31.6-gke.1020000 +- k8s version: v1.33.4-gke.1036000 - vCPUs per node: 16 -- RAM per node: 65851340Ki +- RAM per node: 65851528Ki - Max pods per node: 110 - Zone: us-west1-b - Instance Type: n2d-standard-16 @@ -23,10 +23,10 @@ GKE Cluster: ## Test1: Running latte path based routing ```text -Requests [total, rate, throughput] 30000, 1000.02, 1000.00 -Duration [total, attack, wait] 30s, 29.999s, 662.781µs -Latencies [min, mean, 50, 90, 95, 99, max] 484.55µs, 685.236µs, 667.104µs, 770.782µs, 811.42µs, 914.291µs, 14.722ms -Bytes In [total, mean] 4830000, 161.00 +Requests [total, rate, throughput] 30000, 1000.01, 999.98 +Duration [total, attack, wait] 30.001s, 30s, 900.89µs +Latencies [min, mean, 50, 90, 95, 99, max] 714.789µs, 966.238µs, 944.115µs, 1.062ms, 1.112ms, 1.285ms, 37.418ms +Bytes In [total, mean] 4740000, 158.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 @@ -36,10 +36,10 @@ Error Set: ## Test2: Running coffee header based routing ```text -Requests [total, rate, throughput] 30000, 1000.03, 1000.01 -Duration [total, attack, wait] 30s, 29.999s, 762.608µs -Latencies [min, mean, 50, 90, 95, 99, max] 518.259µs, 717.123µs, 697.898µs, 803.297µs, 846.307µs, 967.712µs, 18.472ms -Bytes In [total, mean] 4860000, 162.00 +Requests [total, rate, throughput] 30000, 1000.01, 999.98 +Duration [total, attack, wait] 30.001s, 30s, 860.973µs +Latencies [min, mean, 50, 90, 95, 99, max] 753.171µs, 970.828µs, 948.946µs, 1.067ms, 1.118ms, 1.295ms, 20.518ms +Bytes In [total, mean] 4770000, 159.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 @@ -49,10 +49,10 @@ Error Set: ## Test3: Running coffee query based routing ```text -Requests [total, rate, throughput] 30000, 1000.02, 1000.00 -Duration [total, attack, wait] 30s, 29.999s, 686.373µs -Latencies [min, mean, 50, 90, 95, 99, max] 492.406µs, 724.55µs, 708.483µs, 817.376µs, 858.736µs, 976.771µs, 11.812ms -Bytes In [total, mean] 5100000, 170.00 +Requests [total, rate, throughput] 30000, 1000.04, 1000.01 +Duration [total, attack, wait] 30s, 29.999s, 967.396µs +Latencies [min, mean, 50, 90, 95, 99, max] 770.147µs, 988.786µs, 968.93µs, 1.085ms, 1.137ms, 1.289ms, 22.817ms +Bytes In [total, mean] 5010000, 167.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 @@ -62,10 +62,10 @@ Error Set: ## Test4: Running tea GET method based routing ```text -Requests [total, rate, throughput] 30000, 1000.02, 1000.00 -Duration [total, attack, wait] 30s, 29.999s, 695.346µs -Latencies [min, mean, 50, 90, 95, 99, max] 529.34µs, 718.101µs, 700.968µs, 809.033µs, 850.203µs, 954.285µs, 12.013ms -Bytes In [total, mean] 4770000, 159.00 +Requests [total, rate, throughput] 30000, 1000.04, 1000.00 +Duration [total, attack, wait] 30s, 29.999s, 1.021ms +Latencies [min, mean, 50, 90, 95, 99, max] 725.58µs, 975.886µs, 954.237µs, 1.07ms, 1.121ms, 1.291ms, 21.906ms +Bytes In [total, mean] 4680000, 156.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 @@ -75,10 +75,10 @@ Error Set: ## Test5: Running tea POST method based routing ```text -Requests [total, rate, throughput] 30000, 1000.01, 999.99 -Duration [total, attack, wait] 30s, 30s, 683.707µs -Latencies [min, mean, 50, 90, 95, 99, max] 510.765µs, 709.274µs, 692.005µs, 795.696µs, 836.686µs, 946.19µs, 15.089ms -Bytes In [total, mean] 4770000, 159.00 +Requests [total, rate, throughput] 30000, 1000.04, 1000.01 +Duration [total, attack, wait] 30s, 29.999s, 881.157µs +Latencies [min, mean, 50, 90, 95, 99, max] 740.614µs, 958.919µs, 938.262µs, 1.054ms, 1.105ms, 1.28ms, 19.591ms +Bytes In [total, mean] 4680000, 156.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 diff --git a/tests/results/ngf-upgrade/edge/edge-oss.md b/tests/results/ngf-upgrade/edge/edge-oss.md index b91e00b68a..92c35e1b7a 100644 --- a/tests/results/ngf-upgrade/edge/edge-oss.md +++ b/tests/results/ngf-upgrade/edge/edge-oss.md @@ -6,46 +6,46 @@ NGINX Plus: false NGINX Gateway Fabric: -- Commit: 9155a2b6a8d3179165797ef3e789e97283f7a695 -- Date: 2025-03-15T07:17:11Z +- Commit: 635b3fcd6e643f4bd24ebbd4c901619a030c4bc0 +- Date: 2025-09-15T17:56:13Z - Dirty: false GKE Cluster: - Node count: 12 -- k8s version: v1.31.6-gke.1020000 +- k8s version: v1.33.4-gke.1036000 - vCPUs per node: 16 -- RAM per node: 65851340Ki +- RAM per node: 65851528Ki - Max pods per node: 110 - Zone: us-west1-b - Instance Type: n2d-standard-16 -## Test: Send http /coffee traffic +## Test: Send https /tea traffic ```text -Requests [total, rate, throughput] 6000, 100.02, 100.01 -Duration [total, attack, wait] 59.992s, 59.991s, 864.529µs -Latencies [min, mean, 50, 90, 95, 99, max] 470.608µs, 866.796µs, 864.021µs, 991.562µs, 1.037ms, 1.156ms, 10.317ms -Bytes In [total, mean] 967993, 161.33 +Requests [total, rate, throughput] 6000, 100.02, 100.02 +Duration [total, attack, wait] 59.991s, 59.99s, 1.188ms +Latencies [min, mean, 50, 90, 95, 99, max] 897.354µs, 1.237ms, 1.21ms, 1.357ms, 1.41ms, 1.536ms, 14.359ms +Bytes In [total, mean] 924000, 154.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:6000 Error Set: ``` -![http-oss.png](http-oss.png) +![https-oss.png](https-oss.png) -## Test: Send https /tea traffic +## Test: Send http /coffee traffic ```text Requests [total, rate, throughput] 6000, 100.02, 100.01 -Duration [total, attack, wait] 59.992s, 59.991s, 850.998µs -Latencies [min, mean, 50, 90, 95, 99, max] 455.17µs, 901.793µs, 886.37µs, 1.017ms, 1.06ms, 1.177ms, 10.281ms -Bytes In [total, mean] 931993, 155.33 +Duration [total, attack, wait] 59.991s, 59.99s, 1.398ms +Latencies [min, mean, 50, 90, 95, 99, max] 897.295µs, 1.211ms, 1.204ms, 1.356ms, 1.401ms, 1.541ms, 9.367ms +Bytes In [total, mean] 960000, 160.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:6000 Error Set: ``` -![https-oss.png](https-oss.png) +![http-oss.png](http-oss.png) diff --git a/tests/results/ngf-upgrade/edge/edge-plus.md b/tests/results/ngf-upgrade/edge/edge-plus.md index 2bf3b4d5f8..6941ae801e 100644 --- a/tests/results/ngf-upgrade/edge/edge-plus.md +++ b/tests/results/ngf-upgrade/edge/edge-plus.md @@ -6,46 +6,46 @@ NGINX Plus: true NGINX Gateway Fabric: -- Commit: 9155a2b6a8d3179165797ef3e789e97283f7a695 -- Date: 2025-03-15T07:17:11Z +- Commit: 635b3fcd6e643f4bd24ebbd4c901619a030c4bc0 +- Date: 2025-09-15T17:56:13Z - Dirty: false GKE Cluster: - Node count: 12 -- k8s version: v1.31.6-gke.1020000 +- k8s version: v1.33.4-gke.1036000 - vCPUs per node: 16 -- RAM per node: 65851340Ki +- RAM per node: 65851528Ki - Max pods per node: 110 - Zone: us-west1-b - Instance Type: n2d-standard-16 -## Test: Send http /coffee traffic +## Test: Send https /tea traffic ```text -Requests [total, rate, throughput] 6000, 100.02, 100.02 -Duration [total, attack, wait] 59.99s, 59.989s, 855.743µs -Latencies [min, mean, 50, 90, 95, 99, max] 635.422µs, 851.551µs, 832.349µs, 964.217µs, 1.017ms, 1.176ms, 9.726ms -Bytes In [total, mean] 972000, 162.00 +Requests [total, rate, throughput] 6000, 100.02, 100.01 +Duration [total, attack, wait] 59.991s, 59.99s, 1.072ms +Latencies [min, mean, 50, 90, 95, 99, max] 893.778µs, 1.174ms, 1.148ms, 1.28ms, 1.33ms, 1.457ms, 12.871ms +Bytes In [total, mean] 912000, 152.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:6000 Error Set: ``` -![http-plus.png](http-plus.png) +![https-plus.png](https-plus.png) -## Test: Send https /tea traffic +## Test: Send http /coffee traffic ```text -Requests [total, rate, throughput] 6000, 100.02, 100.02 -Duration [total, attack, wait] 59.991s, 59.99s, 673.229µs -Latencies [min, mean, 50, 90, 95, 99, max] 439.111µs, 903.895µs, 887.003µs, 1.057ms, 1.116ms, 1.284ms, 9.699ms -Bytes In [total, mean] 936000, 156.00 +Requests [total, rate, throughput] 6000, 100.02, 100.01 +Duration [total, attack, wait] 59.991s, 59.99s, 812.181µs +Latencies [min, mean, 50, 90, 95, 99, max] 595.829µs, 1.048ms, 1.069ms, 1.268ms, 1.322ms, 1.429ms, 15.22ms +Bytes In [total, mean] 949964, 158.33 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:6000 Error Set: ``` -![https-plus.png](https-plus.png) +![http-plus.png](http-plus.png) diff --git a/tests/results/ngf-upgrade/edge/http-oss.png b/tests/results/ngf-upgrade/edge/http-oss.png index e17fa179ba..041b7ae8a5 100644 Binary files a/tests/results/ngf-upgrade/edge/http-oss.png and b/tests/results/ngf-upgrade/edge/http-oss.png differ diff --git a/tests/results/ngf-upgrade/edge/http-plus.png b/tests/results/ngf-upgrade/edge/http-plus.png index 8d812c3435..4aaac25491 100644 Binary files a/tests/results/ngf-upgrade/edge/http-plus.png and b/tests/results/ngf-upgrade/edge/http-plus.png differ diff --git a/tests/results/ngf-upgrade/edge/https-oss.png b/tests/results/ngf-upgrade/edge/https-oss.png index e17fa179ba..041b7ae8a5 100644 Binary files a/tests/results/ngf-upgrade/edge/https-oss.png and b/tests/results/ngf-upgrade/edge/https-oss.png differ diff --git a/tests/results/ngf-upgrade/edge/https-plus.png b/tests/results/ngf-upgrade/edge/https-plus.png index 8d812c3435..4aaac25491 100644 Binary files a/tests/results/ngf-upgrade/edge/https-plus.png and b/tests/results/ngf-upgrade/edge/https-plus.png differ diff --git a/tests/results/reconfig/edge/edge-oss.md b/tests/results/reconfig/edge/edge-oss.md index a9a5e8f46b..655b6ef857 100644 --- a/tests/results/reconfig/edge/edge-oss.md +++ b/tests/results/reconfig/edge/edge-oss.md @@ -6,201 +6,100 @@ NGINX Plus: false NGINX Gateway Fabric: -- Commit: 9155a2b6a8d3179165797ef3e789e97283f7a695 -- Date: 2025-03-15T07:17:11Z +- Commit: 635b3fcd6e643f4bd24ebbd4c901619a030c4bc0 +- Date: 2025-09-15T17:56:13Z - Dirty: false GKE Cluster: - Node count: 12 -- k8s version: v1.31.6-gke.1020000 +- k8s version: v1.33.4-gke.1036000 - vCPUs per node: 16 -- RAM per node: 65851340Ki +- RAM per node: 65851528Ki - Max pods per node: 110 - Zone: us-west1-b - Instance Type: n2d-standard-16 ## Test 1: Resources exist before startup - NumResources 30 -### Reloads and Time to Ready +### Time to Ready -- TimeToReadyTotal: 3s -- TimeToReadyAvgSingle: < 1s -- NGINX Reloads: 2 -- NGINX Reload Average Time: 101ms -- Reload distribution: - - 500.0ms: 2 - - 1000.0ms: 2 - - 5000.0ms: 2 - - 10000.0ms: 2 - - 30000.0ms: 2 - - +Infms: 2 +Time To Ready Description: From when NGF starts to when the NGINX configuration is fully configured +- TimeToReadyTotal: 25s ### Event Batch Processing -- Event Batch Total: 5 -- Event Batch Processing Average Time: 53ms +- Event Batch Total: 10 +- Event Batch Processing Average Time: 3ms - Event Batch Processing distribution: - - 500.0ms: 5 - - 1000.0ms: 5 - - 5000.0ms: 5 - - 10000.0ms: 5 - - 30000.0ms: 5 - - +Infms: 5 + - 500.0ms: 10 + - 1000.0ms: 10 + - 5000.0ms: 10 + - 10000.0ms: 10 + - 30000.0ms: 10 + - +Infms: 10 ### NGINX Error Logs - ## Test 1: Resources exist before startup - NumResources 150 -### Reloads and Time to Ready - -- TimeToReadyTotal: 3s -- TimeToReadyAvgSingle: < 1s -- NGINX Reloads: 2 -- NGINX Reload Average Time: 88ms -- Reload distribution: - - 500.0ms: 2 - - 1000.0ms: 2 - - 5000.0ms: 2 - - 10000.0ms: 2 - - 30000.0ms: 2 - - +Infms: 2 - -### Event Batch Processing - -- Event Batch Total: 6 -- Event Batch Processing Average Time: 45ms -- Event Batch Processing distribution: - - 500.0ms: 6 - - 1000.0ms: 6 - - 5000.0ms: 6 - - 10000.0ms: 6 - - 30000.0ms: 6 - - +Infms: 6 - -### NGINX Error Logs - - -## Test 2: Start NGF, deploy Gateway, create many resources attached to GW - NumResources 30 - -### Reloads and Time to Ready +### Time to Ready -- TimeToReadyTotal: 8s -- TimeToReadyAvgSingle: < 1s -- NGINX Reloads: 63 -- NGINX Reload Average Time: 125ms -- Reload distribution: - - 500.0ms: 63 - - 1000.0ms: 63 - - 5000.0ms: 63 - - 10000.0ms: 63 - - 30000.0ms: 63 - - +Infms: 63 +Time To Ready Description: From when NGF starts to when the NGINX configuration is fully configured +- TimeToReadyTotal: 27s ### Event Batch Processing -- Event Batch Total: 337 -- Event Batch Processing Average Time: 23ms +- Event Batch Total: 11 +- Event Batch Processing Average Time: 10ms - Event Batch Processing distribution: - - 500.0ms: 337 - - 1000.0ms: 337 - - 5000.0ms: 337 - - 10000.0ms: 337 - - 30000.0ms: 337 - - +Infms: 337 + - 500.0ms: 11 + - 1000.0ms: 11 + - 5000.0ms: 11 + - 10000.0ms: 11 + - 30000.0ms: 11 + - +Infms: 11 ### NGINX Error Logs +## Test 2: Start NGF, deploy Gateway, wait until NGINX agent instance connects to NGF, create many resources attached to GW - NumResources 30 -## Test 2: Start NGF, deploy Gateway, create many resources attached to GW - NumResources 150 - -### Reloads and Time to Ready +### Time to Ready -- TimeToReadyTotal: 44s -- TimeToReadyAvgSingle: < 1s -- NGINX Reloads: 343 -- NGINX Reload Average Time: 125ms -- Reload distribution: - - 500.0ms: 343 - - 1000.0ms: 343 - - 5000.0ms: 343 - - 10000.0ms: 343 - - 30000.0ms: 343 - - +Infms: 343 +Time To Ready Description: From when NGINX receives the first configuration created by NGF to when the NGINX configuration is fully configured +- TimeToReadyTotal: 21s ### Event Batch Processing -- Event Batch Total: 1689 -- Event Batch Processing Average Time: 25ms -- Event Batch Processing distribution: - - 500.0ms: 1689 - - 1000.0ms: 1689 - - 5000.0ms: 1689 - - 10000.0ms: 1689 - - 30000.0ms: 1689 - - +Infms: 1689 - -### NGINX Error Logs - - -## Test 3: Start NGF, create many resources attached to a Gateway, deploy the Gateway - NumResources 30 - -### Reloads and Time to Ready - -- TimeToReadyTotal: < 1s -- TimeToReadyAvgSingle: < 1s -- NGINX Reloads: 64 -- NGINX Reload Average Time: 125ms -- Reload distribution: - - 500.0ms: 64 - - 1000.0ms: 64 - - 5000.0ms: 64 - - 10000.0ms: 64 - - 30000.0ms: 64 - - +Infms: 64 - -### Event Batch Processing - -- Event Batch Total: 321 -- Event Batch Processing Average Time: 25ms +- Event Batch Total: 247 +- Event Batch Processing Average Time: 26ms - Event Batch Processing distribution: - - 500.0ms: 321 - - 1000.0ms: 321 - - 5000.0ms: 321 - - 10000.0ms: 321 - - 30000.0ms: 321 - - +Infms: 321 + - 500.0ms: 239 + - 1000.0ms: 247 + - 5000.0ms: 247 + - 10000.0ms: 247 + - 30000.0ms: 247 + - +Infms: 247 ### NGINX Error Logs +## Test 2: Start NGF, deploy Gateway, wait until NGINX agent instance connects to NGF, create many resources attached to GW - NumResources 150 -## Test 3: Start NGF, create many resources attached to a Gateway, deploy the Gateway - NumResources 150 +### Time to Ready -### Reloads and Time to Ready - -- TimeToReadyTotal: < 1s -- TimeToReadyAvgSingle: < 1s -- NGINX Reloads: 342 -- NGINX Reload Average Time: 125ms -- Reload distribution: - - 500.0ms: 342 - - 1000.0ms: 342 - - 5000.0ms: 342 - - 10000.0ms: 342 - - 30000.0ms: 342 - - +Infms: 342 +Time To Ready Description: From when NGINX receives the first configuration created by NGF to when the NGINX configuration is fully configured +- TimeToReadyTotal: 112s ### Event Batch Processing -- Event Batch Total: 1639 -- Event Batch Processing Average Time: 26ms +- Event Batch Total: 1265 +- Event Batch Processing Average Time: 23ms - Event Batch Processing distribution: - - 500.0ms: 1639 - - 1000.0ms: 1639 - - 5000.0ms: 1639 - - 10000.0ms: 1639 - - 30000.0ms: 1639 - - +Infms: 1639 + - 500.0ms: 1229 + - 1000.0ms: 1265 + - 5000.0ms: 1265 + - 10000.0ms: 1265 + - 30000.0ms: 1265 + - +Infms: 1265 ### NGINX Error Logs diff --git a/tests/results/reconfig/edge/edge-plus.md b/tests/results/reconfig/edge/edge-plus.md index b339fb3c0d..fccf4182c6 100644 --- a/tests/results/reconfig/edge/edge-plus.md +++ b/tests/results/reconfig/edge/edge-plus.md @@ -6,202 +6,100 @@ NGINX Plus: true NGINX Gateway Fabric: -- Commit: 9155a2b6a8d3179165797ef3e789e97283f7a695 -- Date: 2025-03-15T07:17:11Z +- Commit: 635b3fcd6e643f4bd24ebbd4c901619a030c4bc0 +- Date: 2025-09-15T17:56:13Z - Dirty: false GKE Cluster: - Node count: 12 -- k8s version: v1.31.6-gke.1020000 +- k8s version: v1.33.4-gke.1036000 - vCPUs per node: 16 -- RAM per node: 65851340Ki +- RAM per node: 65851528Ki - Max pods per node: 110 - Zone: us-west1-b - Instance Type: n2d-standard-16 ## Test 1: Resources exist before startup - NumResources 30 -### Reloads and Time to Ready +### Time to Ready -- TimeToReadyTotal: 4s -- TimeToReadyAvgSingle: < 1s -- NGINX Reloads: 2 -- NGINX Reload Average Time: 100ms -- Reload distribution: - - 500.0ms: 2 - - 1000.0ms: 2 - - 5000.0ms: 2 - - 10000.0ms: 2 - - 30000.0ms: 2 - - +Infms: 2 +Time To Ready Description: From when NGF starts to when the NGINX configuration is fully configured +- TimeToReadyTotal: 12s ### Event Batch Processing -- Event Batch Total: 6 -- Event Batch Processing Average Time: 52ms -- Event Batch Processing distribution: - - 500.0ms: 6 - - 1000.0ms: 6 - - 5000.0ms: 6 - - 10000.0ms: 6 - - 30000.0ms: 6 - - +Infms: 6 - -### NGINX Error Logs - - -## Test 1: Resources exist before startup - NumResources 150 - -### Reloads and Time to Ready - -- TimeToReadyTotal: 4s -- TimeToReadyAvgSingle: < 1s -- NGINX Reloads: 2 -- NGINX Reload Average Time: 100ms -- Reload distribution: - - 500.0ms: 2 - - 1000.0ms: 2 - - 5000.0ms: 2 - - 10000.0ms: 2 - - 30000.0ms: 2 - - +Infms: 2 - -### Event Batch Processing - -- Event Batch Total: 6 -- Event Batch Processing Average Time: 53ms -- Event Batch Processing distribution: - - 500.0ms: 6 - - 1000.0ms: 6 - - 5000.0ms: 6 - - 10000.0ms: 6 - - 30000.0ms: 6 - - +Infms: 6 - -### NGINX Error Logs - - -## Test 2: Start NGF, deploy Gateway, create many resources attached to GW - NumResources 30 - -### Reloads and Time to Ready - -- TimeToReadyTotal: 8s -- TimeToReadyAvgSingle: < 1s -- NGINX Reloads: 47 -- NGINX Reload Average Time: 148ms -- Reload distribution: - - 500.0ms: 47 - - 1000.0ms: 47 - - 5000.0ms: 47 - - 10000.0ms: 47 - - 30000.0ms: 47 - - +Infms: 47 - -### Event Batch Processing - -- Event Batch Total: 322 +- Event Batch Total: 10 - Event Batch Processing Average Time: 25ms - Event Batch Processing distribution: - - 500.0ms: 322 - - 1000.0ms: 322 - - 5000.0ms: 322 - - 10000.0ms: 322 - - 30000.0ms: 322 - - +Infms: 322 + - 500.0ms: 10 + - 1000.0ms: 10 + - 5000.0ms: 10 + - 10000.0ms: 10 + - 30000.0ms: 10 + - +Infms: 10 ### NGINX Error Logs +## Test 1: Resources exist before startup - NumResources 150 -## Test 2: Start NGF, deploy Gateway, create many resources attached to GW - NumResources 150 - -### Reloads and Time to Ready +### Time to Ready -- TimeToReadyTotal: 20s -- TimeToReadyAvgSingle: < 1s -- NGINX Reloads: 117 -- NGINX Reload Average Time: 150ms -- Reload distribution: - - 500.0ms: 117 - - 1000.0ms: 117 - - 5000.0ms: 117 - - 10000.0ms: 117 - - 30000.0ms: 117 - - +Infms: 117 +Time To Ready Description: From when NGF starts to when the NGINX configuration is fully configured +- TimeToReadyTotal: 19s ### Event Batch Processing -- Event Batch Total: 1460 -- Event Batch Processing Average Time: 14ms +- Event Batch Total: 9 +- Event Batch Processing Average Time: 21ms - Event Batch Processing distribution: - - 500.0ms: 1460 - - 1000.0ms: 1460 - - 5000.0ms: 1460 - - 10000.0ms: 1460 - - 30000.0ms: 1460 - - +Infms: 1460 + - 500.0ms: 9 + - 1000.0ms: 9 + - 5000.0ms: 9 + - 10000.0ms: 9 + - 30000.0ms: 9 + - +Infms: 9 ### NGINX Error Logs -2025/03/15 17:00:26 [emerg] 48#48: invalid instance state file "/var/lib/nginx/state/nginx-mgmt-state" - -## Test 3: Start NGF, create many resources attached to a Gateway, deploy the Gateway - NumResources 30 +## Test 2: Start NGF, deploy Gateway, wait until NGINX agent instance connects to NGF, create many resources attached to GW - NumResources 30 -### Reloads and Time to Ready +### Time to Ready -- TimeToReadyTotal: < 1s -- TimeToReadyAvgSingle: < 1s -- NGINX Reloads: 46 -- NGINX Reload Average Time: 133ms -- Reload distribution: - - 500.0ms: 46 - - 1000.0ms: 46 - - 5000.0ms: 46 - - 10000.0ms: 46 - - 30000.0ms: 46 - - +Infms: 46 +Time To Ready Description: From when NGINX receives the first configuration created by NGF to when the NGINX configuration is fully configured +- TimeToReadyTotal: 24s ### Event Batch Processing -- Event Batch Total: 291 -- Event Batch Processing Average Time: 28ms +- Event Batch Total: 255 +- Event Batch Processing Average Time: 36ms - Event Batch Processing distribution: - - 500.0ms: 291 - - 1000.0ms: 291 - - 5000.0ms: 291 - - 10000.0ms: 291 - - 30000.0ms: 291 - - +Infms: 291 + - 500.0ms: 244 + - 1000.0ms: 251 + - 5000.0ms: 255 + - 10000.0ms: 255 + - 30000.0ms: 255 + - +Infms: 255 ### NGINX Error Logs +## Test 2: Start NGF, deploy Gateway, wait until NGINX agent instance connects to NGF, create many resources attached to GW - NumResources 150 -## Test 3: Start NGF, create many resources attached to a Gateway, deploy the Gateway - NumResources 150 - -### Reloads and Time to Ready +### Time to Ready -- TimeToReadyTotal: < 1s -- TimeToReadyAvgSingle: < 1s -- NGINX Reloads: 258 -- NGINX Reload Average Time: 132ms -- Reload distribution: - - 500.0ms: 258 - - 1000.0ms: 258 - - 5000.0ms: 258 - - 10000.0ms: 258 - - 30000.0ms: 258 - - +Infms: 258 +Time To Ready Description: From when NGINX receives the first configuration created by NGF to when the NGINX configuration is fully configured +- TimeToReadyTotal: 128s ### Event Batch Processing -- Event Batch Total: 1501 +- Event Batch Total: 1298 - Event Batch Processing Average Time: 29ms - Event Batch Processing distribution: - - 500.0ms: 1501 - - 1000.0ms: 1501 - - 5000.0ms: 1501 - - 10000.0ms: 1501 - - 30000.0ms: 1501 - - +Infms: 1501 + - 500.0ms: 1287 + - 1000.0ms: 1288 + - 5000.0ms: 1297 + - 10000.0ms: 1298 + - 30000.0ms: 1298 + - +Infms: 1298 ### NGINX Error Logs diff --git a/tests/results/scale/edge/TestScale_HTTPRoutes/cpu-oss.png b/tests/results/scale/edge/TestScale_HTTPRoutes/cpu-oss.png index 08cc64bf7d..565ecda6ca 100644 Binary files a/tests/results/scale/edge/TestScale_HTTPRoutes/cpu-oss.png and b/tests/results/scale/edge/TestScale_HTTPRoutes/cpu-oss.png differ diff --git a/tests/results/scale/edge/TestScale_HTTPRoutes/cpu-plus.png b/tests/results/scale/edge/TestScale_HTTPRoutes/cpu-plus.png index d8a73bd8e6..fc82b6e5e1 100644 Binary files a/tests/results/scale/edge/TestScale_HTTPRoutes/cpu-plus.png and b/tests/results/scale/edge/TestScale_HTTPRoutes/cpu-plus.png differ diff --git a/tests/results/scale/edge/TestScale_HTTPRoutes/memory-oss.png b/tests/results/scale/edge/TestScale_HTTPRoutes/memory-oss.png index e940514c20..47901238b8 100644 Binary files a/tests/results/scale/edge/TestScale_HTTPRoutes/memory-oss.png and b/tests/results/scale/edge/TestScale_HTTPRoutes/memory-oss.png differ diff --git a/tests/results/scale/edge/TestScale_HTTPRoutes/memory-plus.png b/tests/results/scale/edge/TestScale_HTTPRoutes/memory-plus.png index 1d2c2028a9..c6d9b4f9d0 100644 Binary files a/tests/results/scale/edge/TestScale_HTTPRoutes/memory-plus.png and b/tests/results/scale/edge/TestScale_HTTPRoutes/memory-plus.png differ diff --git a/tests/results/scale/edge/TestScale_HTTPRoutes/ttr-oss.png b/tests/results/scale/edge/TestScale_HTTPRoutes/ttr-oss.png index cbb86f3c40..993acc1524 100644 Binary files a/tests/results/scale/edge/TestScale_HTTPRoutes/ttr-oss.png and b/tests/results/scale/edge/TestScale_HTTPRoutes/ttr-oss.png differ diff --git a/tests/results/scale/edge/TestScale_HTTPRoutes/ttr-plus.png b/tests/results/scale/edge/TestScale_HTTPRoutes/ttr-plus.png index fa76f68e10..738664a289 100644 Binary files a/tests/results/scale/edge/TestScale_HTTPRoutes/ttr-plus.png and b/tests/results/scale/edge/TestScale_HTTPRoutes/ttr-plus.png differ diff --git a/tests/results/scale/edge/TestScale_HTTPSListeners/cpu-oss.png b/tests/results/scale/edge/TestScale_HTTPSListeners/cpu-oss.png index d299f397c9..ed0d0981b7 100644 Binary files a/tests/results/scale/edge/TestScale_HTTPSListeners/cpu-oss.png and b/tests/results/scale/edge/TestScale_HTTPSListeners/cpu-oss.png differ diff --git a/tests/results/scale/edge/TestScale_HTTPSListeners/cpu-plus.png b/tests/results/scale/edge/TestScale_HTTPSListeners/cpu-plus.png index e75dd0cd66..1697b196e6 100644 Binary files a/tests/results/scale/edge/TestScale_HTTPSListeners/cpu-plus.png and b/tests/results/scale/edge/TestScale_HTTPSListeners/cpu-plus.png differ diff --git a/tests/results/scale/edge/TestScale_HTTPSListeners/memory-oss.png b/tests/results/scale/edge/TestScale_HTTPSListeners/memory-oss.png index 24f13fa2a4..831fec9330 100644 Binary files a/tests/results/scale/edge/TestScale_HTTPSListeners/memory-oss.png and b/tests/results/scale/edge/TestScale_HTTPSListeners/memory-oss.png differ diff --git a/tests/results/scale/edge/TestScale_HTTPSListeners/memory-plus.png b/tests/results/scale/edge/TestScale_HTTPSListeners/memory-plus.png index 5b5edd2b05..e0918e7ddd 100644 Binary files a/tests/results/scale/edge/TestScale_HTTPSListeners/memory-plus.png and b/tests/results/scale/edge/TestScale_HTTPSListeners/memory-plus.png differ diff --git a/tests/results/scale/edge/TestScale_HTTPSListeners/ngf-oss.log b/tests/results/scale/edge/TestScale_HTTPSListeners/ngf-oss.log index 793908c50a..2087752e06 100644 --- a/tests/results/scale/edge/TestScale_HTTPSListeners/ngf-oss.log +++ b/tests/results/scale/edge/TestScale_HTTPSListeners/ngf-oss.log @@ -1 +1,3 @@ -{"level":"debug","ts":"2024-10-15T19:05:58Z","logger":"controller-runtime.healthz","msg":"healthz check failed","checker":"readyz","error":"nginx has not yet become ready to accept traffic"} +{"level":"debug","ts":"2025-09-15T23:52:32Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gatewayclasses.gateway.networking.k8s.io \"nginx\": the object has been modified; please apply your changes to the latest version and try again","namespace":"","name":"nginx","kind":"GatewayClass"} +{"level":"debug","ts":"2025-09-15T23:53:16Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gateways.gateway.networking.k8s.io \"gateway\": the object has been modified; please apply your changes to the latest version and try again","namespace":"scale","name":"gateway","kind":"Gateway"} +{"level":"debug","ts":"2025-09-15T23:53:38Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gateways.gateway.networking.k8s.io \"gateway\": the object has been modified; please apply your changes to the latest version and try again","namespace":"scale","name":"gateway","kind":"Gateway"} diff --git a/tests/results/scale/edge/TestScale_HTTPSListeners/ngf-plus.log b/tests/results/scale/edge/TestScale_HTTPSListeners/ngf-plus.log index b75fafd9d2..2e5064fead 100644 --- a/tests/results/scale/edge/TestScale_HTTPSListeners/ngf-plus.log +++ b/tests/results/scale/edge/TestScale_HTTPSListeners/ngf-plus.log @@ -1,3 +1,2 @@ -{"level":"debug","ts":"2024-10-15T18:17:33Z","logger":"controller-runtime.healthz","msg":"healthz check failed","checker":"readyz","error":"nginx has not yet become ready to accept traffic"} -{"level":"debug","ts":"2024-10-15T18:17:33Z","logger":"controller-runtime.healthz","msg":"healthz check failed","checker":"readyz","error":"nginx has not yet become ready to accept traffic"} -{"level":"debug","ts":"2024-10-15T18:19:29Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gateways.gateway.networking.k8s.io \"gateway\": the object has been modified; please apply your changes to the latest version and try again","namespace":"scale","name":"gateway","kind":"Gateway"} +{"level":"debug","ts":"2025-09-16T02:16:54Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gatewayclasses.gateway.networking.k8s.io \"nginx\": the object has been modified; please apply your changes to the latest version and try again","namespace":"","name":"nginx","kind":"GatewayClass"} +{"level":"debug","ts":"2025-09-16T02:17:38Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gateways.gateway.networking.k8s.io \"gateway\": the object has been modified; please apply your changes to the latest version and try again","namespace":"scale","name":"gateway","kind":"Gateway"} diff --git a/tests/results/scale/edge/TestScale_HTTPSListeners/ttr-oss.png b/tests/results/scale/edge/TestScale_HTTPSListeners/ttr-oss.png index e7e2c229f7..8ead62bddc 100644 Binary files a/tests/results/scale/edge/TestScale_HTTPSListeners/ttr-oss.png and b/tests/results/scale/edge/TestScale_HTTPSListeners/ttr-oss.png differ diff --git a/tests/results/scale/edge/TestScale_HTTPSListeners/ttr-plus.png b/tests/results/scale/edge/TestScale_HTTPSListeners/ttr-plus.png index ea7ab1486e..47ab70e577 100644 Binary files a/tests/results/scale/edge/TestScale_HTTPSListeners/ttr-plus.png and b/tests/results/scale/edge/TestScale_HTTPSListeners/ttr-plus.png differ diff --git a/tests/results/scale/edge/TestScale_Listeners/cpu-oss.png b/tests/results/scale/edge/TestScale_Listeners/cpu-oss.png index ea3db28b65..acf9b2cb66 100644 Binary files a/tests/results/scale/edge/TestScale_Listeners/cpu-oss.png and b/tests/results/scale/edge/TestScale_Listeners/cpu-oss.png differ diff --git a/tests/results/scale/edge/TestScale_Listeners/cpu-plus.png b/tests/results/scale/edge/TestScale_Listeners/cpu-plus.png index beb015f4c2..b37fd10ab5 100644 Binary files a/tests/results/scale/edge/TestScale_Listeners/cpu-plus.png and b/tests/results/scale/edge/TestScale_Listeners/cpu-plus.png differ diff --git a/tests/results/scale/edge/TestScale_Listeners/memory-oss.png b/tests/results/scale/edge/TestScale_Listeners/memory-oss.png index 8d0b0e101c..fbb57bfafa 100644 Binary files a/tests/results/scale/edge/TestScale_Listeners/memory-oss.png and b/tests/results/scale/edge/TestScale_Listeners/memory-oss.png differ diff --git a/tests/results/scale/edge/TestScale_Listeners/memory-plus.png b/tests/results/scale/edge/TestScale_Listeners/memory-plus.png index 856cd9fb5c..3a3b103183 100644 Binary files a/tests/results/scale/edge/TestScale_Listeners/memory-plus.png and b/tests/results/scale/edge/TestScale_Listeners/memory-plus.png differ diff --git a/tests/results/scale/edge/TestScale_Listeners/ngf-oss.log b/tests/results/scale/edge/TestScale_Listeners/ngf-oss.log index 892742246a..0d09ecae0e 100644 --- a/tests/results/scale/edge/TestScale_Listeners/ngf-oss.log +++ b/tests/results/scale/edge/TestScale_Listeners/ngf-oss.log @@ -1 +1,2 @@ -{"level":"debug","ts":"2025-03-15T18:56:50Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gateways.gateway.networking.k8s.io \"gateway\": the object has been modified; please apply your changes to the latest version and try again","namespace":"scale","name":"gateway","kind":"Gateway"} +{"level":"debug","ts":"2025-09-15T23:49:54Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gateways.gateway.networking.k8s.io \"gateway\": the object has been modified; please apply your changes to the latest version and try again","namespace":"scale","name":"gateway","kind":"Gateway"} +{"level":"debug","ts":"2025-09-15T23:50:16Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gateways.gateway.networking.k8s.io \"gateway\": the object has been modified; please apply your changes to the latest version and try again","namespace":"scale","name":"gateway","kind":"Gateway"} diff --git a/tests/results/scale/edge/TestScale_Listeners/ngf-plus.log b/tests/results/scale/edge/TestScale_Listeners/ngf-plus.log index bf1f2e1f65..377e482dbc 100644 --- a/tests/results/scale/edge/TestScale_Listeners/ngf-plus.log +++ b/tests/results/scale/edge/TestScale_Listeners/ngf-plus.log @@ -1,2 +1 @@ -{"level":"debug","ts":"2024-11-15T18:20:41Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gateways.gateway.networking.k8s.io \"gateway\": the object has been modified; please apply your changes to the latest version and try again","namespace":"scale","name":"gateway","kind":"Gateway"} -{"level":"debug","ts":"2024-11-15T18:20:42Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gateways.gateway.networking.k8s.io \"gateway\": the object has been modified; please apply your changes to the latest version and try again","namespace":"scale","name":"gateway","kind":"Gateway"} +{"level":"debug","ts":"2025-09-16T02:14:40Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gateways.gateway.networking.k8s.io \"gateway\": the object has been modified; please apply your changes to the latest version and try again","namespace":"scale","name":"gateway","kind":"Gateway"} diff --git a/tests/results/scale/edge/TestScale_Listeners/ttr-oss.png b/tests/results/scale/edge/TestScale_Listeners/ttr-oss.png index 390469af6f..26f1c44a14 100644 Binary files a/tests/results/scale/edge/TestScale_Listeners/ttr-oss.png and b/tests/results/scale/edge/TestScale_Listeners/ttr-oss.png differ diff --git a/tests/results/scale/edge/TestScale_Listeners/ttr-plus.png b/tests/results/scale/edge/TestScale_Listeners/ttr-plus.png index f41386ec7a..f5f583f551 100644 Binary files a/tests/results/scale/edge/TestScale_Listeners/ttr-plus.png and b/tests/results/scale/edge/TestScale_Listeners/ttr-plus.png differ diff --git a/tests/results/scale/edge/TestScale_UpstreamServers/cpu-oss.png b/tests/results/scale/edge/TestScale_UpstreamServers/cpu-oss.png index 0899136b25..4aae8e128f 100644 Binary files a/tests/results/scale/edge/TestScale_UpstreamServers/cpu-oss.png and b/tests/results/scale/edge/TestScale_UpstreamServers/cpu-oss.png differ diff --git a/tests/results/scale/edge/TestScale_UpstreamServers/cpu-plus.png b/tests/results/scale/edge/TestScale_UpstreamServers/cpu-plus.png index b9672a72b3..e2ac05e068 100644 Binary files a/tests/results/scale/edge/TestScale_UpstreamServers/cpu-plus.png and b/tests/results/scale/edge/TestScale_UpstreamServers/cpu-plus.png differ diff --git a/tests/results/scale/edge/TestScale_UpstreamServers/memory-oss.png b/tests/results/scale/edge/TestScale_UpstreamServers/memory-oss.png index bb06b99b8d..1594fd5432 100644 Binary files a/tests/results/scale/edge/TestScale_UpstreamServers/memory-oss.png and b/tests/results/scale/edge/TestScale_UpstreamServers/memory-oss.png differ diff --git a/tests/results/scale/edge/TestScale_UpstreamServers/memory-plus.png b/tests/results/scale/edge/TestScale_UpstreamServers/memory-plus.png index 26cdc8ce2d..5d092fe303 100644 Binary files a/tests/results/scale/edge/TestScale_UpstreamServers/memory-plus.png and b/tests/results/scale/edge/TestScale_UpstreamServers/memory-plus.png differ diff --git a/tests/results/scale/edge/TestScale_UpstreamServers/ngf-oss.log b/tests/results/scale/edge/TestScale_UpstreamServers/ngf-oss.log new file mode 100644 index 0000000000..0be69b5a5c --- /dev/null +++ b/tests/results/scale/edge/TestScale_UpstreamServers/ngf-oss.log @@ -0,0 +1 @@ +{"level":"debug","ts":"2025-09-16T00:08:26Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gateways.gateway.networking.k8s.io \"gateway\": the object has been modified; please apply your changes to the latest version and try again","namespace":"scale","name":"gateway","kind":"Gateway"} diff --git a/tests/results/scale/edge/TestScale_UpstreamServers/ngf-plus.log b/tests/results/scale/edge/TestScale_UpstreamServers/ngf-plus.log index 35862e1388..8b17dcdcc6 100644 --- a/tests/results/scale/edge/TestScale_UpstreamServers/ngf-plus.log +++ b/tests/results/scale/edge/TestScale_UpstreamServers/ngf-plus.log @@ -1 +1,2 @@ -{"level":"info","ts":"2024-10-01T16:34:56Z","msg":"pkg/mod/k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243: watch of *v1.EndpointSlice ended with: an error on the server (\"unable to decode an event from the watch stream: got short buffer with n=0, base=4092, cap=81920\") has prevented the request from succeeding"} +{"level":"debug","ts":"2025-09-16T02:34:18Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gatewayclasses.gateway.networking.k8s.io \"nginx\": the object has been modified; please apply your changes to the latest version and try again","namespace":"","name":"nginx","kind":"GatewayClass"} +{"level":"debug","ts":"2025-09-16T02:34:33Z","logger":"statusUpdater","msg":"Encountered error updating status","error":"Operation cannot be fulfilled on gateways.gateway.networking.k8s.io \"gateway\": the object has been modified; please apply your changes to the latest version and try again","namespace":"scale","name":"gateway","kind":"Gateway"} diff --git a/tests/results/scale/edge/edge-oss.md b/tests/results/scale/edge/edge-oss.md index e09c77e77a..116d10a0ff 100644 --- a/tests/results/scale/edge/edge-oss.md +++ b/tests/results/scale/edge/edge-oss.md @@ -6,50 +6,37 @@ NGINX Plus: false NGINX Gateway Fabric: -- Commit: 9155a2b6a8d3179165797ef3e789e97283f7a695 -- Date: 2025-03-15T07:17:11Z +- Commit: 635b3fcd6e643f4bd24ebbd4c901619a030c4bc0 +- Date: 2025-09-15T17:56:13Z - Dirty: false GKE Cluster: - Node count: 12 -- k8s version: v1.31.6-gke.1020000 +- k8s version: v1.33.4-gke.1036000 - vCPUs per node: 16 -- RAM per node: 65851340Ki +- RAM per node: 65851528Ki - Max pods per node: 110 - Zone: us-west1-b - Instance Type: n2d-standard-16 ## Test TestScale_Listeners -### Reloads - -- Total: 127 -- Total Errors: 0 -- Average Time: 127ms -- Reload distribution: - - 500.0ms: 127 - - 1000.0ms: 127 - - 5000.0ms: 127 - - 10000.0ms: 127 - - 30000.0ms: 127 - - +Infms: 127 - ### Event Batch Processing -- Total: 385 -- Average Time: 136ms +- Total: 207 +- Average Time: 23ms - Event Batch Processing distribution: - - 500.0ms: 347 - - 1000.0ms: 382 - - 5000.0ms: 385 - - 10000.0ms: 385 - - 30000.0ms: 385 - - +Infms: 385 + - 500.0ms: 202 + - 1000.0ms: 207 + - 5000.0ms: 207 + - 10000.0ms: 207 + - 30000.0ms: 207 + - +Infms: 207 ### Errors -- NGF errors: 1 +- NGF errors: 2 - NGF container restarts: 0 - NGINX errors: 0 - NGINX container restarts: 0 @@ -61,34 +48,21 @@ The logs are attached only if there are errors. ## Test TestScale_HTTPSListeners -### Reloads - -- Total: 128 -- Total Errors: 0 -- Average Time: 146ms -- Reload distribution: - - 500.0ms: 128 - - 1000.0ms: 128 - - 5000.0ms: 128 - - 10000.0ms: 128 - - 30000.0ms: 128 - - +Infms: 128 - ### Event Batch Processing -- Total: 450 -- Average Time: 166ms +- Total: 269 +- Average Time: 16ms - Event Batch Processing distribution: - - 500.0ms: 392 - - 1000.0ms: 432 - - 5000.0ms: 450 - - 10000.0ms: 450 - - 30000.0ms: 450 - - +Infms: 450 + - 500.0ms: 263 + - 1000.0ms: 269 + - 5000.0ms: 269 + - 10000.0ms: 269 + - 30000.0ms: 269 + - +Infms: 269 ### Errors -- NGF errors: 0 +- NGF errors: 3 - NGF container restarts: 0 - NGINX errors: 0 - NGINX container restarts: 0 @@ -100,30 +74,17 @@ The logs are attached only if there are errors. ## Test TestScale_HTTPRoutes -### Reloads - -- Total: 1001 -- Total Errors: 0 -- Average Time: 174ms -- Reload distribution: - - 500.0ms: 1001 - - 1000.0ms: 1001 - - 5000.0ms: 1001 - - 10000.0ms: 1001 - - 30000.0ms: 1001 - - +Infms: 1001 - ### Event Batch Processing -- Total: 1008 -- Average Time: 229ms +- Total: 1009 +- Average Time: 600ms - Event Batch Processing distribution: - - 500.0ms: 1002 - - 1000.0ms: 1008 - - 5000.0ms: 1008 - - 10000.0ms: 1008 - - 30000.0ms: 1008 - - +Infms: 1008 + - 500.0ms: 295 + - 1000.0ms: 1009 + - 5000.0ms: 1009 + - 10000.0ms: 1009 + - 30000.0ms: 1009 + - +Infms: 1009 ### Errors @@ -139,34 +100,21 @@ The logs are attached only if there are errors. ## Test TestScale_UpstreamServers -### Reloads - -- Total: 97 -- Total Errors: 0 -- Average Time: 126ms -- Reload distribution: - - 500.0ms: 97 - - 1000.0ms: 97 - - 5000.0ms: 97 - - 10000.0ms: 97 - - 30000.0ms: 97 - - +Infms: 97 - ### Event Batch Processing -- Total: 99 -- Average Time: 125ms +- Total: 46 +- Average Time: 405ms - Event Batch Processing distribution: - - 500.0ms: 99 - - 1000.0ms: 99 - - 5000.0ms: 99 - - 10000.0ms: 99 - - 30000.0ms: 99 - - +Infms: 99 + - 500.0ms: 29 + - 1000.0ms: 46 + - 5000.0ms: 46 + - 10000.0ms: 46 + - 30000.0ms: 46 + - +Infms: 46 ### Errors -- NGF errors: 0 +- NGF errors: 1 - NGF container restarts: 0 - NGINX errors: 0 - NGINX container restarts: 0 @@ -179,19 +127,19 @@ The logs are attached only if there are errors. ## Test TestScale_HTTPMatches ```text -Requests [total, rate, throughput] 30000, 1000.03, 1000.01 -Duration [total, attack, wait] 30s, 29.999s, 663.238µs -Latencies [min, mean, 50, 90, 95, 99, max] 499.976µs, 677.946µs, 660.823µs, 759.984µs, 799.116µs, 904.939µs, 12.162ms -Bytes In [total, mean] 4830000, 161.00 +Requests [total, rate, throughput] 29999, 1000.01, 999.97 +Duration [total, attack, wait] 30s, 29.999s, 1.057ms +Latencies [min, mean, 50, 90, 95, 99, max] 751.608µs, 1.002ms, 965.548µs, 1.092ms, 1.151ms, 1.335ms, 22.262ms +Bytes In [total, mean] 4829839, 161.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% -Status Codes [code:count] 200:30000 +Status Codes [code:count] 200:29999 Error Set: ``` ```text Requests [total, rate, throughput] 30000, 1000.03, 1000.00 -Duration [total, attack, wait] 30s, 29.999s, 750.337µs -Latencies [min, mean, 50, 90, 95, 99, max] 590.522µs, 762.674µs, 740.085µs, 869.449µs, 930.564µs, 1.057ms, 8.287ms +Duration [total, attack, wait] 30s, 29.999s, 1.059ms +Latencies [min, mean, 50, 90, 95, 99, max] 823.833µs, 1.06ms, 1.039ms, 1.168ms, 1.227ms, 1.393ms, 16.671ms Bytes In [total, mean] 4830000, 161.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% diff --git a/tests/results/scale/edge/edge-plus.md b/tests/results/scale/edge/edge-plus.md index 42bff5367d..2947764dea 100644 --- a/tests/results/scale/edge/edge-plus.md +++ b/tests/results/scale/edge/edge-plus.md @@ -6,52 +6,39 @@ NGINX Plus: true NGINX Gateway Fabric: -- Commit: 9155a2b6a8d3179165797ef3e789e97283f7a695 -- Date: 2025-03-15T07:17:11Z +- Commit: 635b3fcd6e643f4bd24ebbd4c901619a030c4bc0 +- Date: 2025-09-15T17:56:13Z - Dirty: false GKE Cluster: - Node count: 12 -- k8s version: v1.31.6-gke.1020000 +- k8s version: v1.33.4-gke.1036000 - vCPUs per node: 16 -- RAM per node: 65851340Ki +- RAM per node: 65851528Ki - Max pods per node: 110 - Zone: us-west1-b - Instance Type: n2d-standard-16 ## Test TestScale_Listeners -### Reloads - -- Total: 128 -- Total Errors: 0 -- Average Time: 151ms -- Reload distribution: - - 500.0ms: 128 - - 1000.0ms: 128 - - 5000.0ms: 128 - - 10000.0ms: 128 - - 30000.0ms: 128 - - +Infms: 128 - ### Event Batch Processing -- Total: 387 -- Average Time: 134ms +- Total: 203 +- Average Time: 39ms - Event Batch Processing distribution: - - 500.0ms: 351 - - 1000.0ms: 386 - - 5000.0ms: 387 - - 10000.0ms: 387 - - 30000.0ms: 387 - - +Infms: 387 + - 500.0ms: 199 + - 1000.0ms: 201 + - 5000.0ms: 203 + - 10000.0ms: 203 + - 30000.0ms: 203 + - +Infms: 203 ### Errors -- NGF errors: 0 +- NGF errors: 1 - NGF container restarts: 0 -- NGINX errors: 7 +- NGINX errors: 0 - NGINX container restarts: 0 ### Graphs and Logs @@ -61,36 +48,23 @@ The logs are attached only if there are errors. ## Test TestScale_HTTPSListeners -### Reloads - -- Total: 128 -- Total Errors: 0 -- Average Time: 160ms -- Reload distribution: - - 500.0ms: 128 - - 1000.0ms: 128 - - 5000.0ms: 128 - - 10000.0ms: 128 - - 30000.0ms: 128 - - +Infms: 128 - ### Event Batch Processing -- Total: 451 -- Average Time: 127ms +- Total: 266 +- Average Time: 18ms - Event Batch Processing distribution: - - 500.0ms: 408 - - 1000.0ms: 450 - - 5000.0ms: 451 - - 10000.0ms: 451 - - 30000.0ms: 451 - - +Infms: 451 + - 500.0ms: 261 + - 1000.0ms: 264 + - 5000.0ms: 266 + - 10000.0ms: 266 + - 30000.0ms: 266 + - +Infms: 266 ### Errors -- NGF errors: 0 +- NGF errors: 2 - NGF container restarts: 0 -- NGINX errors: 15 +- NGINX errors: 0 - NGINX container restarts: 0 ### Graphs and Logs @@ -100,30 +74,17 @@ The logs are attached only if there are errors. ## Test TestScale_HTTPRoutes -### Reloads - -- Total: 1001 -- Total Errors: 0 -- Average Time: 189ms -- Reload distribution: - - 500.0ms: 1001 - - 1000.0ms: 1001 - - 5000.0ms: 1001 - - 10000.0ms: 1001 - - 30000.0ms: 1001 - - +Infms: 1001 - ### Event Batch Processing -- Total: 1008 -- Average Time: 261ms +- Total: 1010 +- Average Time: 696ms - Event Batch Processing distribution: - - 500.0ms: 1006 - - 1000.0ms: 1008 - - 5000.0ms: 1008 - - 10000.0ms: 1008 - - 30000.0ms: 1008 - - +Infms: 1008 + - 500.0ms: 163 + - 1000.0ms: 992 + - 5000.0ms: 1010 + - 10000.0ms: 1010 + - 30000.0ms: 1010 + - +Infms: 1010 ### Errors @@ -139,34 +100,21 @@ The logs are attached only if there are errors. ## Test TestScale_UpstreamServers -### Reloads - -- Total: 3 -- Total Errors: 0 -- Average Time: 143ms -- Reload distribution: - - 500.0ms: 3 - - 1000.0ms: 3 - - 5000.0ms: 3 - - 10000.0ms: 3 - - 30000.0ms: 3 - - +Infms: 3 - ### Event Batch Processing -- Total: 37 -- Average Time: 498ms +- Total: 54 +- Average Time: 403ms - Event Batch Processing distribution: - - 500.0ms: 19 - - 1000.0ms: 35 - - 5000.0ms: 37 - - 10000.0ms: 37 - - 30000.0ms: 37 - - +Infms: 37 + - 500.0ms: 37 + - 1000.0ms: 53 + - 5000.0ms: 54 + - 10000.0ms: 54 + - 30000.0ms: 54 + - +Infms: 54 ### Errors -- NGF errors: 0 +- NGF errors: 2 - NGF container restarts: 0 - NGINX errors: 0 - NGINX container restarts: 0 @@ -179,20 +127,20 @@ The logs are attached only if there are errors. ## Test TestScale_HTTPMatches ```text -Requests [total, rate, throughput] 30000, 1000.02, 1000.00 -Duration [total, attack, wait] 30s, 29.999s, 666.245µs -Latencies [min, mean, 50, 90, 95, 99, max] 514.253µs, 675.464µs, 655.764µs, 737.887µs, 766.943µs, 852.013µs, 12.375ms -Bytes In [total, mean] 4860000, 162.00 +Requests [total, rate, throughput] 30000, 1000.03, 1000.00 +Duration [total, attack, wait] 30s, 29.999s, 1.078ms +Latencies [min, mean, 50, 90, 95, 99, max] 740.672µs, 968.805µs, 941.919µs, 1.063ms, 1.113ms, 1.293ms, 13.259ms +Bytes In [total, mean] 4800000, 160.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 Error Set: ``` ```text -Requests [total, rate, throughput] 30000, 1000.03, 1000.01 -Duration [total, attack, wait] 30s, 29.999s, 772.346µs -Latencies [min, mean, 50, 90, 95, 99, max] 596.801µs, 753.715µs, 734.197µs, 841.051µs, 886.584µs, 980.974µs, 13.362ms -Bytes In [total, mean] 4860000, 162.00 +Requests [total, rate, throughput] 30000, 1000.03, 1000.00 +Duration [total, attack, wait] 30s, 29.999s, 1.012ms +Latencies [min, mean, 50, 90, 95, 99, max] 841.073µs, 1.062ms, 1.042ms, 1.16ms, 1.218ms, 1.386ms, 15.126ms +Bytes In [total, mean] 4800000, 160.00 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-http-oss.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-http-oss.png index c6629a529a..d911a9efbe 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-http-oss.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-http-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-http-plus.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-http-plus.png index c262e56fa0..1b334d7303 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-http-plus.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-http-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-https-oss.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-https-oss.png index c6629a529a..d911a9efbe 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-https-oss.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-https-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-https-plus.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-https-plus.png index c262e56fa0..1b334d7303 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-https-plus.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-affinity-https-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-http-oss.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-http-oss.png index 33783f67af..03f824fabc 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-http-oss.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-http-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-http-plus.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-http-plus.png index b436e0cb38..5a64287854 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-http-plus.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-http-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-https-oss.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-https-oss.png index 33783f67af..03f824fabc 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-https-oss.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-https-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-https-plus.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-https-plus.png index b436e0cb38..1e7d4e326f 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-down-https-plus.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-down-https-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-http-oss.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-http-oss.png index 5197b18ead..0487a8323a 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-http-oss.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-http-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-http-plus.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-http-plus.png index f83464b470..ffb1661a77 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-http-plus.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-http-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-https-oss.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-https-oss.png index 5197b18ead..0487a8323a 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-https-oss.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-https-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-https-plus.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-https-plus.png index f83464b470..ffb1661a77 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-https-plus.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-affinity-https-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-http-oss.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-http-oss.png index 6ed5b65e55..7243b26c9e 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-http-oss.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-http-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-http-plus.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-http-plus.png index b842efc1d4..0f2aca7ecf 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-http-plus.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-http-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-https-oss.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-https-oss.png index 6ed5b65e55..7243b26c9e 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-https-oss.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-https-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-https-plus.png b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-https-plus.png index b842efc1d4..0f2aca7ecf 100644 Binary files a/tests/results/zero-downtime-scale/edge/abrupt-scale-up-https-plus.png and b/tests/results/zero-downtime-scale/edge/abrupt-scale-up-https-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/edge-oss.md b/tests/results/zero-downtime-scale/edge/edge-oss.md index ac4d398884..e33ac62313 100644 --- a/tests/results/zero-downtime-scale/edge/edge-oss.md +++ b/tests/results/zero-downtime-scale/edge/edge-oss.md @@ -6,21 +6,21 @@ NGINX Plus: false NGINX Gateway Fabric: -- Commit: 9155a2b6a8d3179165797ef3e789e97283f7a695 -- Date: 2025-03-15T07:17:11Z +- Commit: 635b3fcd6e643f4bd24ebbd4c901619a030c4bc0 +- Date: 2025-09-15T17:56:13Z - Dirty: false GKE Cluster: - Node count: 12 -- k8s version: v1.31.6-gke.1020000 +- k8s version: v1.33.4-gke.1036000 - vCPUs per node: 16 -- RAM per node: 65851340Ki +- RAM per node: 65851528Ki - Max pods per node: 110 - Zone: us-west1-b - Instance Type: n2d-standard-16 -## One NGF Pod runs per node Test Results +## One NGINX Pod runs per node Test Results ### Scale Up Gradually @@ -28,9 +28,9 @@ GKE Cluster: ```text Requests [total, rate, throughput] 30000, 100.00, 100.00 -Duration [total, attack, wait] 5m0s, 5m0s, 806.452µs -Latencies [min, mean, 50, 90, 95, 99, max] 433.68µs, 873.614µs, 867.783µs, 993.691µs, 1.043ms, 1.351ms, 12.859ms -Bytes In [total, mean] 4646890, 154.90 +Duration [total, attack, wait] 5m0s, 5m0s, 998.895µs +Latencies [min, mean, 50, 90, 95, 99, max] 671.983µs, 1.293ms, 1.278ms, 1.478ms, 1.554ms, 1.86ms, 16.369ms +Bytes In [total, mean] 4656088, 155.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 @@ -43,9 +43,9 @@ Error Set: ```text Requests [total, rate, throughput] 30000, 100.00, 100.00 -Duration [total, attack, wait] 5m0s, 5m0s, 1.573ms -Latencies [min, mean, 50, 90, 95, 99, max] 412.342µs, 847.173µs, 847.018µs, 972.99µs, 1.017ms, 1.314ms, 12.59ms -Bytes In [total, mean] 4823979, 160.80 +Duration [total, attack, wait] 5m0s, 5m0s, 1.197ms +Latencies [min, mean, 50, 90, 95, 99, max] 659.146µs, 1.213ms, 1.207ms, 1.387ms, 1.452ms, 1.75ms, 17.398ms +Bytes In [total, mean] 4835973, 161.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 @@ -60,9 +60,9 @@ Error Set: ```text Requests [total, rate, throughput] 48000, 100.00, 100.00 -Duration [total, attack, wait] 8m0s, 8m0s, 901.369µs -Latencies [min, mean, 50, 90, 95, 99, max] 400.666µs, 872.136µs, 870.908µs, 1.012ms, 1.064ms, 1.269ms, 12.942ms -Bytes In [total, mean] 7718342, 160.80 +Duration [total, attack, wait] 8m0s, 8m0s, 1.077ms +Latencies [min, mean, 50, 90, 95, 99, max] 649.696µs, 1.228ms, 1.217ms, 1.411ms, 1.48ms, 1.72ms, 38.048ms +Bytes In [total, mean] 7737483, 161.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:48000 @@ -75,9 +75,9 @@ Error Set: ```text Requests [total, rate, throughput] 48000, 100.00, 100.00 -Duration [total, attack, wait] 8m0s, 8m0s, 941.614µs -Latencies [min, mean, 50, 90, 95, 99, max] 417.468µs, 901.578µs, 895.785µs, 1.037ms, 1.091ms, 1.325ms, 13.219ms -Bytes In [total, mean] 7435362, 154.90 +Duration [total, attack, wait] 8m0s, 8m0s, 1.437ms +Latencies [min, mean, 50, 90, 95, 99, max] 705.961µs, 1.261ms, 1.247ms, 1.436ms, 1.507ms, 1.78ms, 37.314ms +Bytes In [total, mean] 7449488, 155.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:48000 @@ -88,35 +88,35 @@ Error Set: ### Scale Up Abruptly -#### Test: Send https /tea traffic +#### Test: Send http /coffee traffic ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 1.117ms -Latencies [min, mean, 50, 90, 95, 99, max] 455.668µs, 904.422µs, 904.957µs, 1.033ms, 1.082ms, 1.248ms, 5.631ms -Bytes In [total, mean] 1858693, 154.89 +Duration [total, attack, wait] 2m0s, 2m0s, 1.229ms +Latencies [min, mean, 50, 90, 95, 99, max] 675.074µs, 1.205ms, 1.2ms, 1.378ms, 1.435ms, 1.589ms, 59.446ms +Bytes In [total, mean] 1934397, 161.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 Error Set: ``` -![abrupt-scale-up-affinity-https-oss.png](abrupt-scale-up-affinity-https-oss.png) +![abrupt-scale-up-affinity-http-oss.png](abrupt-scale-up-affinity-http-oss.png) -#### Test: Send http /coffee traffic +#### Test: Send https /tea traffic ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 1.119ms -Latencies [min, mean, 50, 90, 95, 99, max] 412.944µs, 891.737µs, 891.196µs, 1.028ms, 1.081ms, 1.282ms, 9.702ms -Bytes In [total, mean] 1929586, 160.80 +Duration [total, attack, wait] 2m0s, 2m0s, 1.118ms +Latencies [min, mean, 50, 90, 95, 99, max] 703.38µs, 1.241ms, 1.229ms, 1.404ms, 1.467ms, 1.662ms, 57.97ms +Bytes In [total, mean] 1862451, 155.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 Error Set: ``` -![abrupt-scale-up-affinity-http-oss.png](abrupt-scale-up-affinity-http-oss.png) +![abrupt-scale-up-affinity-https-oss.png](abrupt-scale-up-affinity-https-oss.png) ### Scale Down Abruptly @@ -124,9 +124,9 @@ Error Set: ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 713.199µs -Latencies [min, mean, 50, 90, 95, 99, max] 494.231µs, 953.041µs, 942.581µs, 1.108ms, 1.176ms, 1.337ms, 10.627ms -Bytes In [total, mean] 1858836, 154.90 +Duration [total, attack, wait] 2m0s, 2m0s, 869.184µs +Latencies [min, mean, 50, 90, 95, 99, max] 714.269µs, 1.258ms, 1.253ms, 1.422ms, 1.48ms, 1.651ms, 14.173ms +Bytes In [total, mean] 1862397, 155.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 @@ -139,9 +139,9 @@ Error Set: ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 968.062µs -Latencies [min, mean, 50, 90, 95, 99, max] 478.648µs, 936.553µs, 928.014µs, 1.105ms, 1.171ms, 1.332ms, 10.648ms -Bytes In [total, mean] 1929503, 160.79 +Duration [total, attack, wait] 2m0s, 2m0s, 1.116ms +Latencies [min, mean, 50, 90, 95, 99, max] 689.491µs, 1.189ms, 1.195ms, 1.361ms, 1.413ms, 1.544ms, 5.134ms +Bytes In [total, mean] 1934379, 161.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 @@ -150,39 +150,39 @@ Error Set: ![abrupt-scale-down-affinity-http-oss.png](abrupt-scale-down-affinity-http-oss.png) -## Multiple NGF Pods run per node Test Results +## Multiple NGINX Pods run per node Test Results ### Scale Up Gradually -#### Test: Send http /coffee traffic +#### Test: Send https /tea traffic ```text Requests [total, rate, throughput] 30000, 100.00, 100.00 -Duration [total, attack, wait] 5m0s, 5m0s, 873.094µs -Latencies [min, mean, 50, 90, 95, 99, max] 443.965µs, 917.539µs, 914.468µs, 1.045ms, 1.096ms, 1.453ms, 25.238ms -Bytes In [total, mean] 4829927, 161.00 +Duration [total, attack, wait] 5m0s, 5m0s, 1.206ms +Latencies [min, mean, 50, 90, 95, 99, max] 678.928µs, 1.281ms, 1.246ms, 1.448ms, 1.531ms, 1.943ms, 29.127ms +Bytes In [total, mean] 4656049, 155.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 Error Set: ``` -![gradual-scale-up-http-oss.png](gradual-scale-up-http-oss.png) +![gradual-scale-up-https-oss.png](gradual-scale-up-https-oss.png) -#### Test: Send https /tea traffic +#### Test: Send http /coffee traffic ```text Requests [total, rate, throughput] 30000, 100.00, 100.00 -Duration [total, attack, wait] 5m0s, 5m0s, 860.394µs -Latencies [min, mean, 50, 90, 95, 99, max] 468.09µs, 940.797µs, 930.888µs, 1.071ms, 1.133ms, 1.485ms, 25.057ms -Bytes In [total, mean] 4652973, 155.10 +Duration [total, attack, wait] 5m0s, 5m0s, 1.056ms +Latencies [min, mean, 50, 90, 95, 99, max] 634.533µs, 1.203ms, 1.189ms, 1.388ms, 1.473ms, 1.859ms, 25.911ms +Bytes In [total, mean] 4835915, 161.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 Error Set: ``` -![gradual-scale-up-https-oss.png](gradual-scale-up-https-oss.png) +![gradual-scale-up-http-oss.png](gradual-scale-up-http-oss.png) ### Scale Down Gradually @@ -190,9 +190,9 @@ Error Set: ```text Requests [total, rate, throughput] 96000, 100.00, 100.00 -Duration [total, attack, wait] 16m0s, 16m0s, 1.096ms -Latencies [min, mean, 50, 90, 95, 99, max] 470.107µs, 956.329µs, 946.839µs, 1.102ms, 1.167ms, 1.419ms, 22.663ms -Bytes In [total, mean] 14889791, 155.10 +Duration [total, attack, wait] 16m0s, 16m0s, 1.373ms +Latencies [min, mean, 50, 90, 95, 99, max] 675.608µs, 1.264ms, 1.249ms, 1.441ms, 1.512ms, 1.792ms, 50.861ms +Bytes In [total, mean] 14899194, 155.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:96000 @@ -205,9 +205,9 @@ Error Set: ```text Requests [total, rate, throughput] 96000, 100.00, 100.00 -Duration [total, attack, wait] 16m0s, 16m0s, 909.045µs -Latencies [min, mean, 50, 90, 95, 99, max] 438.459µs, 925.315µs, 923.188µs, 1.065ms, 1.12ms, 1.373ms, 21.282ms -Bytes In [total, mean] 15456075, 161.00 +Duration [total, attack, wait] 16m0s, 16m0s, 1.127ms +Latencies [min, mean, 50, 90, 95, 99, max] 648.252µs, 1.205ms, 1.199ms, 1.387ms, 1.453ms, 1.718ms, 50.561ms +Bytes In [total, mean] 15475157, 161.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:96000 @@ -222,9 +222,9 @@ Error Set: ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 1.1ms -Latencies [min, mean, 50, 90, 95, 99, max] 534.726µs, 973.229µs, 963.738µs, 1.127ms, 1.189ms, 1.369ms, 7.262ms -Bytes In [total, mean] 1861155, 155.10 +Duration [total, attack, wait] 2m0s, 2m0s, 967.985µs +Latencies [min, mean, 50, 90, 95, 99, max] 741.113µs, 1.297ms, 1.255ms, 1.415ms, 1.468ms, 1.606ms, 116.955ms +Bytes In [total, mean] 1862469, 155.21 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 @@ -237,9 +237,9 @@ Error Set: ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 912.394µs -Latencies [min, mean, 50, 90, 95, 99, max] 484.225µs, 933.174µs, 935.072µs, 1.065ms, 1.109ms, 1.305ms, 11.145ms -Bytes In [total, mean] 1932089, 161.01 +Duration [total, attack, wait] 2m0s, 2m0s, 1.185ms +Latencies [min, mean, 50, 90, 95, 99, max] 670.703µs, 1.227ms, 1.204ms, 1.374ms, 1.42ms, 1.553ms, 111.729ms +Bytes In [total, mean] 1934414, 161.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 @@ -254,9 +254,9 @@ Error Set: ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 844.643µs -Latencies [min, mean, 50, 90, 95, 99, max] 452.528µs, 904.647µs, 912.865µs, 1.037ms, 1.076ms, 1.194ms, 6.917ms -Bytes In [total, mean] 1932017, 161.00 +Duration [total, attack, wait] 2m0s, 2m0s, 1.15ms +Latencies [min, mean, 50, 90, 95, 99, max] 660.653µs, 1.213ms, 1.213ms, 1.382ms, 1.435ms, 1.57ms, 13.809ms +Bytes In [total, mean] 1934319, 161.19 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 @@ -269,9 +269,9 @@ Error Set: ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 701.654µs -Latencies [min, mean, 50, 90, 95, 99, max] 511.224µs, 936.573µs, 932.311µs, 1.074ms, 1.128ms, 1.304ms, 7.938ms -Bytes In [total, mean] 1861196, 155.10 +Duration [total, attack, wait] 2m0s, 2m0s, 1.352ms +Latencies [min, mean, 50, 90, 95, 99, max] 713.339µs, 1.254ms, 1.252ms, 1.413ms, 1.463ms, 1.619ms, 13.187ms +Bytes In [total, mean] 1862473, 155.21 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 diff --git a/tests/results/zero-downtime-scale/edge/edge-plus.md b/tests/results/zero-downtime-scale/edge/edge-plus.md index d689897ffb..ca3534d749 100644 --- a/tests/results/zero-downtime-scale/edge/edge-plus.md +++ b/tests/results/zero-downtime-scale/edge/edge-plus.md @@ -6,21 +6,21 @@ NGINX Plus: true NGINX Gateway Fabric: -- Commit: 9155a2b6a8d3179165797ef3e789e97283f7a695 -- Date: 2025-03-15T07:17:11Z +- Commit: 635b3fcd6e643f4bd24ebbd4c901619a030c4bc0 +- Date: 2025-09-15T17:56:13Z - Dirty: false GKE Cluster: - Node count: 12 -- k8s version: v1.31.6-gke.1020000 +- k8s version: v1.33.4-gke.1036000 - vCPUs per node: 16 -- RAM per node: 65851340Ki +- RAM per node: 65851528Ki - Max pods per node: 110 - Zone: us-west1-b - Instance Type: n2d-standard-16 -## One NGF Pod runs per node Test Results +## One NGINX Pod runs per node Test Results ### Scale Up Gradually @@ -28,9 +28,9 @@ GKE Cluster: ```text Requests [total, rate, throughput] 30000, 100.00, 100.00 -Duration [total, attack, wait] 5m0s, 5m0s, 766.303µs -Latencies [min, mean, 50, 90, 95, 99, max] 441.594µs, 875.579µs, 868.868µs, 997.175µs, 1.049ms, 1.357ms, 13.238ms -Bytes In [total, mean] 4673932, 155.80 +Duration [total, attack, wait] 5m0s, 5m0s, 1.327ms +Latencies [min, mean, 50, 90, 95, 99, max] 675.096µs, 1.232ms, 1.209ms, 1.429ms, 1.543ms, 1.768ms, 27.473ms +Bytes In [total, mean] 4596075, 153.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 @@ -43,9 +43,9 @@ Error Set: ```text Requests [total, rate, throughput] 30000, 100.00, 100.00 -Duration [total, attack, wait] 5m0s, 5m0s, 727.187µs -Latencies [min, mean, 50, 90, 95, 99, max] 414.641µs, 846.924µs, 846.028µs, 971.491µs, 1.017ms, 1.294ms, 11.941ms -Bytes In [total, mean] 4850987, 161.70 +Duration [total, attack, wait] 5m0s, 5m0s, 1.046ms +Latencies [min, mean, 50, 90, 95, 99, max] 663.466µs, 1.172ms, 1.152ms, 1.361ms, 1.48ms, 1.74ms, 17.181ms +Bytes In [total, mean] 4775927, 159.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 @@ -59,14 +59,15 @@ Error Set: #### Test: Send https /tea traffic ```text -Requests [total, rate, throughput] 48000, 100.00, 100.00 -Duration [total, attack, wait] 8m0s, 8m0s, 730.887µs -Latencies [min, mean, 50, 90, 95, 99, max] 433.836µs, 850.845µs, 848.555µs, 968.862µs, 1.013ms, 1.215ms, 8.39ms -Bytes In [total, mean] 7478267, 155.80 +Requests [total, rate, throughput] 48000, 100.00, 99.99 +Duration [total, attack, wait] 8m0s, 8m0s, 1.163ms +Latencies [min, mean, 50, 90, 95, 99, max] 305.029µs, 1.277ms, 1.217ms, 1.523ms, 1.634ms, 1.847ms, 219.704ms +Bytes In [total, mean] 7352590, 153.18 Bytes Out [total, mean] 0, 0.00 -Success [ratio] 100.00% -Status Codes [code:count] 200:48000 +Success [ratio] 99.99% +Status Codes [code:count] 0:6 200:47994 Error Set: +Get "https://cafe.example.com/tea": dial tcp 0.0.0.0:0->10.138.0.47:443: connect: network is unreachable ``` ![gradual-scale-down-affinity-https-plus.png](gradual-scale-down-affinity-https-plus.png) @@ -74,14 +75,15 @@ Error Set: #### Test: Send http /coffee traffic ```text -Requests [total, rate, throughput] 48000, 100.00, 100.00 -Duration [total, attack, wait] 8m0s, 8m0s, 715.71µs -Latencies [min, mean, 50, 90, 95, 99, max] 405.345µs, 820.868µs, 825.255µs, 941.274µs, 982.586µs, 1.188ms, 11.166ms -Bytes In [total, mean] 7761660, 161.70 +Requests [total, rate, throughput] 48000, 100.00, 99.99 +Duration [total, attack, wait] 8m0s, 8m0s, 1.045ms +Latencies [min, mean, 50, 90, 95, 99, max] 243.115µs, 1.215ms, 1.169ms, 1.465ms, 1.598ms, 1.81ms, 214.724ms +Bytes In [total, mean] 7640356, 159.17 Bytes Out [total, mean] 0, 0.00 -Success [ratio] 100.00% -Status Codes [code:count] 200:48000 +Success [ratio] 99.99% +Status Codes [code:count] 0:7 200:47993 Error Set: +Get "http://cafe.example.com/coffee": dial tcp 0.0.0.0:0->10.138.0.47:80: connect: network is unreachable ``` ![gradual-scale-down-affinity-http-plus.png](gradual-scale-down-affinity-http-plus.png) @@ -92,9 +94,9 @@ Error Set: ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 731.03µs -Latencies [min, mean, 50, 90, 95, 99, max] 423.637µs, 822.777µs, 824.747µs, 937.12µs, 981.297µs, 1.14ms, 3.973ms -Bytes In [total, mean] 1940496, 161.71 +Duration [total, attack, wait] 2m0s, 2m0s, 1.402ms +Latencies [min, mean, 50, 90, 95, 99, max] 656.659µs, 1.129ms, 1.133ms, 1.278ms, 1.322ms, 1.507ms, 3.641ms +Bytes In [total, mean] 1910438, 159.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 @@ -107,9 +109,9 @@ Error Set: ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 887.292µs -Latencies [min, mean, 50, 90, 95, 99, max] 442.281µs, 858.712µs, 854.673µs, 973.029µs, 1.017ms, 1.179ms, 10.485ms -Bytes In [total, mean] 1869632, 155.80 +Duration [total, attack, wait] 2m0s, 2m0s, 1.284ms +Latencies [min, mean, 50, 90, 95, 99, max] 710.396µs, 1.192ms, 1.195ms, 1.323ms, 1.366ms, 1.579ms, 9.731ms +Bytes In [total, mean] 1838355, 153.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 @@ -120,101 +122,101 @@ Error Set: ### Scale Down Abruptly -#### Test: Send http /coffee traffic +#### Test: Send https /tea traffic ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 804.798µs -Latencies [min, mean, 50, 90, 95, 99, max] 424.072µs, 837.824µs, 838.15µs, 963.636µs, 1.006ms, 1.123ms, 44.463ms -Bytes In [total, mean] 1940409, 161.70 +Duration [total, attack, wait] 2m0s, 2m0s, 1.314ms +Latencies [min, mean, 50, 90, 95, 99, max] 730.016µs, 1.229ms, 1.213ms, 1.343ms, 1.388ms, 1.521ms, 64.443ms +Bytes In [total, mean] 1838380, 153.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 Error Set: ``` -![abrupt-scale-down-affinity-http-plus.png](abrupt-scale-down-affinity-http-plus.png) +![abrupt-scale-down-affinity-https-plus.png](abrupt-scale-down-affinity-https-plus.png) -#### Test: Send https /tea traffic +#### Test: Send http /coffee traffic ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 685.714µs -Latencies [min, mean, 50, 90, 95, 99, max] 459.498µs, 865.342µs, 861.335µs, 990.99µs, 1.035ms, 1.151ms, 48.501ms -Bytes In [total, mean] 1869571, 155.80 +Duration [total, attack, wait] 2m0s, 2m0s, 1.177ms +Latencies [min, mean, 50, 90, 95, 99, max] 678.11µs, 1.171ms, 1.161ms, 1.306ms, 1.348ms, 1.474ms, 67.354ms +Bytes In [total, mean] 1910385, 159.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 Error Set: ``` -![abrupt-scale-down-affinity-https-plus.png](abrupt-scale-down-affinity-https-plus.png) +![abrupt-scale-down-affinity-http-plus.png](abrupt-scale-down-affinity-http-plus.png) -## Multiple NGF Pods run per node Test Results +## Multiple NGINX Pods run per node Test Results ### Scale Up Gradually -#### Test: Send http /coffee traffic +#### Test: Send https /tea traffic ```text Requests [total, rate, throughput] 30000, 100.00, 100.00 -Duration [total, attack, wait] 5m0s, 5m0s, 499.02µs -Latencies [min, mean, 50, 90, 95, 99, max] 404.188µs, 862.699µs, 858.402µs, 1.003ms, 1.053ms, 1.359ms, 11.022ms -Bytes In [total, mean] 4862948, 162.10 +Duration [total, attack, wait] 5m0s, 5m0s, 1.232ms +Latencies [min, mean, 50, 90, 95, 99, max] 677.29µs, 1.222ms, 1.214ms, 1.361ms, 1.417ms, 1.778ms, 29.484ms +Bytes In [total, mean] 4595877, 153.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 Error Set: ``` -![gradual-scale-up-http-plus.png](gradual-scale-up-http-plus.png) +![gradual-scale-up-https-plus.png](gradual-scale-up-https-plus.png) -#### Test: Send https /tea traffic +#### Test: Send http /coffee traffic ```text Requests [total, rate, throughput] 30000, 100.00, 100.00 -Duration [total, attack, wait] 5m0s, 5m0s, 917.782µs -Latencies [min, mean, 50, 90, 95, 99, max] 452.74µs, 884.958µs, 873.544µs, 1.017ms, 1.07ms, 1.42ms, 11.641ms -Bytes In [total, mean] 4682982, 156.10 +Duration [total, attack, wait] 5m0s, 5m0s, 1.116ms +Latencies [min, mean, 50, 90, 95, 99, max] 652.028µs, 1.156ms, 1.151ms, 1.31ms, 1.364ms, 1.702ms, 29.516ms +Bytes In [total, mean] 4775988, 159.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:30000 Error Set: ``` -![gradual-scale-up-https-plus.png](gradual-scale-up-https-plus.png) +![gradual-scale-up-http-plus.png](gradual-scale-up-http-plus.png) ### Scale Down Gradually -#### Test: Send https /tea traffic +#### Test: Send http /coffee traffic ```text Requests [total, rate, throughput] 96000, 100.00, 100.00 -Duration [total, attack, wait] 16m0s, 16m0s, 760.896µs -Latencies [min, mean, 50, 90, 95, 99, max] 433.285µs, 934.463µs, 905.034µs, 1.107ms, 1.202ms, 1.549ms, 83.045ms -Bytes In [total, mean] 14985575, 156.10 +Duration [total, attack, wait] 16m0s, 16m0s, 1.136ms +Latencies [min, mean, 50, 90, 95, 99, max] 577.2µs, 1.169ms, 1.161ms, 1.316ms, 1.366ms, 1.628ms, 72.479ms +Bytes In [total, mean] 15283137, 159.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:96000 Error Set: ``` -![gradual-scale-down-https-plus.png](gradual-scale-down-https-plus.png) +![gradual-scale-down-http-plus.png](gradual-scale-down-http-plus.png) -#### Test: Send http /coffee traffic +#### Test: Send https /tea traffic ```text Requests [total, rate, throughput] 96000, 100.00, 100.00 -Duration [total, attack, wait] 16m0s, 16m0s, 797.537µs -Latencies [min, mean, 50, 90, 95, 99, max] 389.802µs, 906.16µs, 872.26µs, 1.081ms, 1.271ms, 1.729ms, 78.489ms -Bytes In [total, mean] 15561579, 162.10 +Duration [total, attack, wait] 16m0s, 16m0s, 1.188ms +Latencies [min, mean, 50, 90, 95, 99, max] 687.721µs, 1.229ms, 1.216ms, 1.364ms, 1.419ms, 1.697ms, 68.011ms +Bytes In [total, mean] 14707422, 153.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:96000 Error Set: ``` -![gradual-scale-down-http-plus.png](gradual-scale-down-http-plus.png) +![gradual-scale-down-https-plus.png](gradual-scale-down-https-plus.png) ### Scale Up Abruptly @@ -222,9 +224,9 @@ Error Set: ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 1.038ms -Latencies [min, mean, 50, 90, 95, 99, max] 438.072µs, 859.877µs, 851.049µs, 991.439µs, 1.042ms, 1.261ms, 9.194ms -Bytes In [total, mean] 1873263, 156.11 +Duration [total, attack, wait] 2m0s, 2m0s, 1.264ms +Latencies [min, mean, 50, 90, 95, 99, max] 718.712µs, 1.247ms, 1.217ms, 1.353ms, 1.401ms, 1.716ms, 37.253ms +Bytes In [total, mean] 1838307, 153.19 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 @@ -237,9 +239,9 @@ Error Set: ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 745.836µs -Latencies [min, mean, 50, 90, 95, 99, max] 397.717µs, 825.498µs, 823.88µs, 955.33µs, 1.002ms, 1.198ms, 9.229ms -Bytes In [total, mean] 1945082, 162.09 +Duration [total, attack, wait] 2m0s, 2m0s, 1.329ms +Latencies [min, mean, 50, 90, 95, 99, max] 670.205µs, 1.191ms, 1.169ms, 1.31ms, 1.357ms, 1.582ms, 113.243ms +Bytes In [total, mean] 1910371, 159.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 @@ -253,14 +255,15 @@ Error Set: #### Test: Send http /coffee traffic ```text -Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 853.74µs -Latencies [min, mean, 50, 90, 95, 99, max] 434.496µs, 857.503µs, 848.746µs, 975.447µs, 1.022ms, 1.187ms, 26.289ms -Bytes In [total, mean] 1945253, 162.10 +Requests [total, rate, throughput] 12000, 100.01, 91.67 +Duration [total, attack, wait] 2m0s, 2m0s, 1.262ms +Latencies [min, mean, 50, 90, 95, 99, max] 488.744µs, 1.133ms, 1.175ms, 1.329ms, 1.374ms, 1.478ms, 3.391ms +Bytes In [total, mean] 1901179, 158.43 Bytes Out [total, mean] 0, 0.00 -Success [ratio] 100.00% -Status Codes [code:count] 200:12000 +Success [ratio] 91.67% +Status Codes [code:count] 200:11000 502:1000 Error Set: +502 Bad Gateway ``` ![abrupt-scale-down-http-plus.png](abrupt-scale-down-http-plus.png) @@ -269,9 +272,9 @@ Error Set: ```text Requests [total, rate, throughput] 12000, 100.01, 100.01 -Duration [total, attack, wait] 2m0s, 2m0s, 849.564µs -Latencies [min, mean, 50, 90, 95, 99, max] 453.708µs, 899.405µs, 881.221µs, 1.024ms, 1.074ms, 1.234ms, 8.51ms -Bytes In [total, mean] 1873266, 156.11 +Duration [total, attack, wait] 2m0s, 2m0s, 1.186ms +Latencies [min, mean, 50, 90, 95, 99, max] 746.375µs, 1.23ms, 1.233ms, 1.364ms, 1.407ms, 1.537ms, 20.761ms +Bytes In [total, mean] 1838411, 153.20 Bytes Out [total, mean] 0, 0.00 Success [ratio] 100.00% Status Codes [code:count] 200:12000 diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-http-oss.png b/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-http-oss.png index 450efa3f68..17457326c9 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-http-oss.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-http-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-http-plus.png b/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-http-plus.png index f09d91c088..d13a5ee457 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-http-plus.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-http-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-https-oss.png b/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-https-oss.png index 450efa3f68..17457326c9 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-https-oss.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-https-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-https-plus.png b/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-https-plus.png index f09d91c088..d13a5ee457 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-https-plus.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-down-affinity-https-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-down-http-oss.png b/tests/results/zero-downtime-scale/edge/gradual-scale-down-http-oss.png index 1956882a7a..a2372742b7 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-down-http-oss.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-down-http-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-down-http-plus.png b/tests/results/zero-downtime-scale/edge/gradual-scale-down-http-plus.png index 23d466b705..1d4f15c123 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-down-http-plus.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-down-http-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-down-https-oss.png b/tests/results/zero-downtime-scale/edge/gradual-scale-down-https-oss.png index 1956882a7a..a2372742b7 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-down-https-oss.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-down-https-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-down-https-plus.png b/tests/results/zero-downtime-scale/edge/gradual-scale-down-https-plus.png index 23d466b705..1d4f15c123 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-down-https-plus.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-down-https-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-http-oss.png b/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-http-oss.png index 4d7a5204e9..7ef9180c35 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-http-oss.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-http-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-http-plus.png b/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-http-plus.png index 75b6e12b2c..74b0930b90 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-http-plus.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-http-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-https-oss.png b/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-https-oss.png index 4d7a5204e9..7ef9180c35 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-https-oss.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-https-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-https-plus.png b/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-https-plus.png index 75b6e12b2c..74b0930b90 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-https-plus.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-up-affinity-https-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-up-http-oss.png b/tests/results/zero-downtime-scale/edge/gradual-scale-up-http-oss.png index 08f89e08c5..72bf597417 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-up-http-oss.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-up-http-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-up-http-plus.png b/tests/results/zero-downtime-scale/edge/gradual-scale-up-http-plus.png index e0b62dc33f..e9e06c4281 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-up-http-plus.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-up-http-plus.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-up-https-oss.png b/tests/results/zero-downtime-scale/edge/gradual-scale-up-https-oss.png index 08f89e08c5..72bf597417 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-up-https-oss.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-up-https-oss.png differ diff --git a/tests/results/zero-downtime-scale/edge/gradual-scale-up-https-plus.png b/tests/results/zero-downtime-scale/edge/gradual-scale-up-https-plus.png index e0b62dc33f..e9e06c4281 100644 Binary files a/tests/results/zero-downtime-scale/edge/gradual-scale-up-https-plus.png and b/tests/results/zero-downtime-scale/edge/gradual-scale-up-https-plus.png differ diff --git a/tests/suite/advanced_routing_test.go b/tests/suite/advanced_routing_test.go index 1163089cc5..944a9f0832 100644 --- a/tests/suite/advanced_routing_test.go +++ b/tests/suite/advanced_routing_test.go @@ -40,7 +40,10 @@ var _ = Describe("AdvancedRouting", Ordered, Label("functional", "routing"), fun Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + nginxPodNames, err := resourceManager.GetReadyNginxPodNames( + namespace, + timeoutConfig.GetStatusTimeout, + ) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) diff --git a/tests/suite/client_settings_test.go b/tests/suite/client_settings_test.go index 53762e58bc..e02283ea99 100644 --- a/tests/suite/client_settings_test.go +++ b/tests/suite/client_settings_test.go @@ -47,7 +47,10 @@ var _ = Describe("ClientSettingsPolicy", Ordered, Label("functional", "cspolicy" Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + nginxPodNames, err := resourceManager.GetReadyNginxPodNames( + namespace, + timeoutConfig.GetStatusTimeout, + ) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) @@ -393,7 +396,7 @@ func waitForClientSettingsAncestorStatus( func(ctx context.Context) (bool, error) { var pol ngfAPI.ClientSettingsPolicy - if err := k8sClient.Get(ctx, policyNsname, &pol); err != nil { + if err := resourceManager.Get(ctx, policyNsname, &pol); err != nil { return false, err } diff --git a/tests/suite/dataplane_perf_test.go b/tests/suite/dataplane_perf_test.go index cce009ab03..45bd8ef295 100644 --- a/tests/suite/dataplane_perf_test.go +++ b/tests/suite/dataplane_perf_test.go @@ -66,7 +66,10 @@ var _ = Describe("Dataplane performance", Ordered, Label("nfr", "performance"), Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetTimeout) + nginxPodNames, err := resourceManager.GetReadyNginxPodNames( + namespace, + timeoutConfig.GetTimeout, + ) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) diff --git a/tests/suite/graceful_recovery_test.go b/tests/suite/graceful_recovery_test.go index 01cfed2652..a5d3348958 100644 --- a/tests/suite/graceful_recovery_test.go +++ b/tests/suite/graceful_recovery_test.go @@ -74,7 +74,11 @@ var _ = Describe("Graceful Recovery test", Ordered, FlakeAttempts(2), Label("gra defer cancel() var pod core.Pod - if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: podName}, &pod); err != nil { + if err := resourceManager.Get( + ctx, + types.NamespacedName{Namespace: namespace, Name: podName}, + &pod, + ); err != nil { return 0, fmt.Errorf("error retrieving Pod: %w", err) } @@ -95,8 +99,11 @@ var _ = Describe("Graceful Recovery test", Ordered, FlakeAttempts(2), Label("gra } if restartCount != currentRestartCount+1 { - return fmt.Errorf("expected current restart count: %d to match incremented restart count: %d", + restartErr := fmt.Errorf("expected current restart count: %d to match incremented restart count: %d", restartCount, currentRestartCount+1) + GinkgoWriter.Printf("%s\n", restartErr) + + return restartErr } return nil @@ -107,7 +114,7 @@ var _ = Describe("Graceful Recovery test", Ordered, FlakeAttempts(2), Label("gra defer cancel() var nodes core.NodeList - if err := k8sClient.List(ctx, &nodes); err != nil { + if err := resourceManager.List(ctx, &nodes); err != nil { return nil, fmt.Errorf("error listing nodes: %w", err) } @@ -125,26 +132,39 @@ var _ = Describe("Graceful Recovery test", Ordered, FlakeAttempts(2), Label("gra defer cancel() var nginxPod core.Pod - if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: nginxPodName}, &nginxPod); err != nil { + if err := resourceManager.Get( + ctx, + types.NamespacedName{Namespace: ns.Name, Name: nginxPodName}, + &nginxPod, + ); err != nil { return nil, fmt.Errorf("error retrieving nginx Pod: %w", err) } b, err := resourceManager.GetFileContents("graceful-recovery/node-debugger-job.yaml") if err != nil { - return nil, fmt.Errorf("error processing node debugger job file: %w", err) + debugErr := fmt.Errorf("error processing node debugger job file: %w", err) + GinkgoWriter.Printf("%s\n", debugErr) + + return nil, debugErr } job := &v1.Job{} if err = yaml.Unmarshal(b.Bytes(), job); err != nil { - return nil, fmt.Errorf("error with yaml unmarshal: %w", err) + yamlErr := fmt.Errorf("error with yaml unmarshal: %w", err) + GinkgoWriter.Printf("%s\n", yamlErr) + + return nil, yamlErr } job.Spec.Template.Spec.NodeSelector["kubernetes.io/hostname"] = nginxPod.Spec.NodeName if len(job.Spec.Template.Spec.Containers) != 1 { - return nil, fmt.Errorf( + containerErr := fmt.Errorf( "expected node debugger job to contain one container, actual number: %d", len(job.Spec.Template.Spec.Containers), ) + GinkgoWriter.Printf("ERROR: %s\n", containerErr) + + return nil, containerErr } job.Namespace = ns.Name @@ -174,7 +194,7 @@ var _ = Describe("Graceful Recovery test", Ordered, FlakeAttempts(2), Label("gra // default propagation policy is metav1.DeletePropagationOrphan which does not delete the underlying // pod created through the job after the job is deleted. Setting it to metav1.DeletePropagationBackground // deletes the underlying pod after the job is deleted. - Expect(resourceManager.Delete( + Expect(resourceManager.DeleteResources( []client.Object{job}, client.PropagationPolicy(metav1.DeletePropagationBackground), )).To(Succeed()) @@ -207,7 +227,10 @@ var _ = Describe("Graceful Recovery test", Ordered, FlakeAttempts(2), Label("gra var err error Eventually( func() bool { - nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) + nginxPodNames, err = resourceManager.GetReadyNginxPodNames( + ns.Name, + timeoutConfig.GetStatusTimeout, + ) return len(nginxPodNames) == 1 && err == nil }). WithTimeout(timeoutConfig.CreateTimeout). @@ -281,8 +304,7 @@ var _ = Describe("Graceful Recovery test", Ordered, FlakeAttempts(2), Label("gra var podNames []string Eventually( func() bool { - podNames, err = framework.GetReadyNGFPodNames( - k8sClient, + podNames, err = resourceManager.GetReadyNGFPodNames( ngfNamespace, releaseName, timeoutConfig.GetStatusTimeout, @@ -307,7 +329,10 @@ var _ = Describe("Graceful Recovery test", Ordered, FlakeAttempts(2), Label("gra var nginxPodNames []string Eventually( func() bool { - nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) + nginxPodNames, err = resourceManager.GetReadyNginxPodNames( + ns.Name, + timeoutConfig.GetStatusTimeout, + ) return len(nginxPodNames) == 1 && err == nil }). WithTimeout(timeoutConfig.CreateTimeout * 2). @@ -352,12 +377,15 @@ var _ = Describe("Graceful Recovery test", Ordered, FlakeAttempts(2), Label("gra var lease coordination.Lease key := types.NamespacedName{Name: "ngf-test-nginx-gateway-fabric-leader-election", Namespace: ngfNamespace} - if err := k8sClient.Get(ctx, key, &lease); err != nil { + if err := resourceManager.Get(ctx, key, &lease); err != nil { return "", errors.New("could not retrieve leader election lease") } if *lease.Spec.HolderIdentity == "" { - return "", errors.New("leader election lease holder identity is empty") + leaderErr := errors.New("leader election lease holder identity is empty") + GinkgoWriter.Printf("ERROR: %s\n", leaderErr) + + return "", leaderErr } return *lease.Spec.HolderIdentity, nil @@ -381,7 +409,11 @@ var _ = Describe("Graceful Recovery test", Ordered, FlakeAttempts(2), Label("gra } BeforeAll(func() { - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetStatusTimeout) + podNames, err := resourceManager.GetReadyNGFPodNames( + ngfNamespace, + releaseName, + timeoutConfig.GetStatusTimeout, + ) Expect(err).ToNot(HaveOccurred()) Expect(podNames).To(HaveLen(1)) @@ -397,7 +429,10 @@ var _ = Describe("Graceful Recovery test", Ordered, FlakeAttempts(2), Label("gra Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) + nginxPodNames, err := resourceManager.GetReadyNginxPodNames( + ns.Name, + timeoutConfig.GetStatusTimeout, + ) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) @@ -431,7 +466,10 @@ var _ = Describe("Graceful Recovery test", Ordered, FlakeAttempts(2), Label("gra It("recovers when nginx container is restarted", func() { restartNginxContainer(activeNginxPodName, ns.Name, nginxContainerName) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) + nginxPodNames, err := resourceManager.GetReadyNginxPodNames( + ns.Name, + timeoutConfig.GetStatusTimeout, + ) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) activeNginxPodName = nginxPodNames[0] @@ -460,13 +498,12 @@ var _ = Describe("Graceful Recovery test", Ordered, FlakeAttempts(2), Label("gra ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.DeleteTimeout) defer cancel() - Expect(k8sClient.Delete(ctx, ngfPod)).To(Succeed()) + Expect(resourceManager.Delete(ctx, ngfPod, nil)).To(Succeed()) var newNGFPodNames []string Eventually( func() bool { - newNGFPodNames, err = framework.GetReadyNGFPodNames( - k8sClient, + newNGFPodNames, err = resourceManager.GetReadyNGFPodNames( ngfNamespace, releaseName, timeoutConfig.GetStatusTimeout, diff --git a/tests/suite/longevity_test.go b/tests/suite/longevity_test.go index 8734ffa5bc..6d9cc2e37e 100644 --- a/tests/suite/longevity_test.go +++ b/tests/suite/longevity_test.go @@ -56,7 +56,7 @@ var _ = Describe("Longevity", Label("longevity-setup", "longevity-teardown"), fu Expect(resourceManager.Apply([]client.Object{&ns})).To(Succeed()) Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.ApplyFromFiles(promFile, ngfNamespace)).To(Succeed()) - Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) + Expect(resourceManager.WaitForAppsToBeReady(ns.Name, framework.WithLoggingDisabled())).To(Succeed()) }) It("collects results", Label("longevity-teardown"), func() { diff --git a/tests/suite/manifests/longevity/cronjob.yaml b/tests/suite/manifests/longevity/cronjob.yaml index 7302030d2e..b07466bb15 100644 --- a/tests/suite/manifests/longevity/cronjob.yaml +++ b/tests/suite/manifests/longevity/cronjob.yaml @@ -40,7 +40,7 @@ spec: serviceAccountName: rollout-mgr containers: - name: coffee-rollout-mgr - image: curlimages/curl:8.15.0 + image: curlimages/curl:8.16.0 imagePullPolicy: IfNotPresent command: - /bin/sh @@ -69,7 +69,7 @@ spec: serviceAccountName: rollout-mgr containers: - name: coffee-rollout-mgr - image: curlimages/curl:8.15.0 + image: curlimages/curl:8.16.0 imagePullPolicy: IfNotPresent command: - /bin/sh diff --git a/tests/suite/manifests/upstream-settings-policy/invalid-target-usps.yaml b/tests/suite/manifests/upstream-settings-policy/invalid-target-usps.yaml index 74d0dd4b4f..da0c8cc470 100644 --- a/tests/suite/manifests/upstream-settings-policy/invalid-target-usps.yaml +++ b/tests/suite/manifests/upstream-settings-policy/invalid-target-usps.yaml @@ -5,7 +5,8 @@ metadata: spec: gatewayClassName: nginx addresses: - - value: "10.0.0.1" + - type: Hostname + value: "foo" listeners: - name: http port: 80 diff --git a/tests/suite/nginxgateway_test.go b/tests/suite/nginxgateway_test.go index 3518c2ed69..4813661964 100644 --- a/tests/suite/nginxgateway_test.go +++ b/tests/suite/nginxgateway_test.go @@ -13,7 +13,6 @@ import ( "k8s.io/apimachinery/pkg/types" ngfAPI "github.com/nginx/nginx-gateway-fabric/v2/apis/v1alpha1" - "github.com/nginx/nginx-gateway-fabric/v2/tests/framework" ) var _ = Describe("NginxGateway", Ordered, Label("functional", "nginxGateway"), func() { @@ -34,15 +33,8 @@ var _ = Describe("NginxGateway", Ordered, Label("functional", "nginxGateway"), f var nginxGateway ngfAPI.NginxGateway - if err := k8sClient.Get(ctx, nsname, &nginxGateway); err != nil { - gatewayErr := fmt.Errorf("failed to get nginxGateway: %w", err) - GinkgoWriter.Printf( - "ERROR occurred during getting NGINX Gateway in namespace %q: %v\n", - nsname.Namespace, - gatewayErr, - ) - - return nginxGateway, gatewayErr + if err := resourceManager.Get(ctx, nsname, &nginxGateway); err != nil { + return nginxGateway, fmt.Errorf("failed to get nginxGateway: %w", err) } return nginxGateway, nil @@ -115,8 +107,7 @@ var _ = Describe("NginxGateway", Ordered, Label("functional", "nginxGateway"), f } getNGFPodName := func() (string, error) { - podNames, err := framework.GetReadyNGFPodNames( - k8sClient, + podNames, err := resourceManager.GetReadyNGFPodNames( ngfNamespace, releaseName, timeoutConfig.GetStatusTimeout, diff --git a/tests/suite/reconfig_test.go b/tests/suite/reconfig_test.go index ab990e1808..51816d0736 100644 --- a/tests/suite/reconfig_test.go +++ b/tests/suite/reconfig_test.go @@ -64,7 +64,6 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r Expect(err).ToNot(HaveOccurred()) k8sConfig := ctlr.GetConfigOrDie() - if !clusterInfo.IsGKE { Expect(promInstance.PortForward(k8sConfig, promPortForwardStopCh)).To(Succeed()) } @@ -85,6 +84,8 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r }) createUniqueResources := func(resourceCount int, fileName string) error { + GinkgoWriter.Printf("Creating %d unique resources from %s\n", resourceCount, fileName) + var appliedResources []string for i := 1; i <= resourceCount; i++ { namespace := "namespace" + strconv.Itoa(i) @@ -98,11 +99,29 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r fileString = strings.ReplaceAll(fileString, "tea", "tea"+namespace) data := bytes.NewBufferString(fileString) - - if err := resourceManager.ApplyFromBuffer(data, namespace); err != nil { - return fmt.Errorf("error processing manifest file: %w", err) + appliedResources = append(appliedResources, namespace) + + if err := resourceManager.ApplyFromBuffer( + data, + namespace, + framework.WithLoggingDisabled(), // disable logging to avoid huge logs for 150 resources + ); err != nil { + manifestErr := fmt.Errorf("error processing manifest file: %w", err) + GinkgoWriter.Printf( + "ERROR on creating and applying unique resources, could proceed %v\n the error happened on %q: %v\n", + appliedResources, + namespace, + manifestErr, + ) + return manifestErr } } + GinkgoWriter.Printf( + "Successfully created %d unique resources from %s: %v\n", + resourceCount, + fileName, + appliedResources, + ) return nil } @@ -117,7 +136,7 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r Name: "namespace" + strconv.Itoa(i), }, } - Expect(k8sClient.Create(ctx, &ns)).To(Succeed()) + Expect(resourceManager.Create(ctx, &ns)).To(Succeed()) } Expect(resourceManager.Apply([]client.Object{&reconfigNamespace})).To(Succeed()) @@ -126,7 +145,8 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r "reconfig/cafe-secret.yaml", "reconfig/reference-grant.yaml", }, - reconfigNamespace.Name)).To(Succeed()) + reconfigNamespace.Name, + )).To(Succeed()) Expect(createUniqueResources(resourceCount, "manifests/reconfig/cafe.yaml")).To(Succeed()) @@ -147,21 +167,24 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r defer cancel() var namespaces core.NamespaceList - if err := k8sClient.List(ctx, &namespaces); err != nil { + if err := resourceManager.List(ctx, &namespaces); err != nil { return fmt.Errorf("error getting namespaces: %w", err) } + GinkgoWriter.Printf("Found %d namespaces, expected at least%d\n", len(namespaces.Items), resourceCount) Expect(len(namespaces.Items)).To(BeNumerically(">=", resourceCount)) var routes v1.HTTPRouteList - if err := k8sClient.List(ctx, &routes); err != nil { + if err := resourceManager.List(ctx, &routes); err != nil { return fmt.Errorf("error getting HTTPRoutes: %w", err) } + GinkgoWriter.Printf("Found %d HTTPRoutes, expected %d\n", len(routes.Items), resourceCount*3) Expect(routes.Items).To(HaveLen(resourceCount * 3)) var pods core.PodList - if err := k8sClient.List(ctx, &pods); err != nil { + if err := resourceManager.List(ctx, &pods); err != nil { return fmt.Errorf("error getting Pods: %w", err) } + GinkgoWriter.Printf("Found %d Pods, expected at least %d\n", len(pods.Items), resourceCount*2) Expect(len(pods.Items)).To(BeNumerically(">=", resourceCount*2)) return nil @@ -175,7 +198,7 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r namespaces[i] = "namespace" + strconv.Itoa(i+1) } - err = resourceManager.DeleteNamespaces(namespaces) + err = resourceManager.DeleteNamespaces(namespaces, framework.WithLoggingDisabled()) Expect(resourceManager.DeleteNamespace(reconfigNamespace.Name)).To(Succeed()) return err @@ -196,7 +219,7 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r } // each call to ValidateNginxFieldExists takes about 1ms - if err := framework.ValidateNginxFieldExists(conf, expUpstream); err != nil { + if err := framework.ValidateNginxFieldExists(conf, expUpstream, framework.WithLoggingDisabled()); err != nil { select { case <-ctx.Done(): return fmt.Errorf("error validating nginx conf was generated in "+namespace+": %w", err.Error()) @@ -222,6 +245,7 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r if stringTimeToReadyTotal == "0" { stringTimeToReadyTotal = "< 1" } + GinkgoWriter.Printf("Calculated time to ready total for %q: %s\n", nginxPodName, stringTimeToReadyTotal) return stringTimeToReadyTotal } @@ -276,6 +300,7 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r q, getEndTime, noOpModifier, + framework.WithLoggingDisabled(), ), ).WithTimeout(metricExistTimeout).WithPolling(metricExistPolling).Should(Succeed()) } @@ -314,7 +339,7 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r cfg.nfr = true setup(cfg) - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) + podNames, err := resourceManager.GetReadyNGFPodNames(ngfNamespace, releaseName, timeoutConfig.GetTimeout) Expect(err).ToNot(HaveOccurred()) Expect(podNames).To(HaveLen(1)) ngfPodName := podNames[0] @@ -323,10 +348,10 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r var nginxPodNames []string Eventually( func() bool { - nginxPodNames, err = framework.GetReadyNginxPodNames( - k8sClient, + nginxPodNames, err = resourceManager.GetReadyNginxPodNames( reconfigNamespace.Name, timeoutConfig.GetStatusTimeout, + framework.WithLoggingDisabled(), ) return len(nginxPodNames) == 1 && err == nil }). @@ -371,21 +396,26 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r cfg.nfr = true setup(cfg) - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) + podNames, err := resourceManager.GetReadyNGFPodNames( + ngfNamespace, + releaseName, + timeoutConfig.GetTimeout, + framework.WithLoggingDisabled(), + ) Expect(err).ToNot(HaveOccurred()) Expect(podNames).To(HaveLen(1)) ngfPodName := podNames[0] - Expect(resourceManager.Apply([]client.Object{&reconfigNamespace})).To(Succeed()) + Expect(resourceManager.Apply([]client.Object{&reconfigNamespace}, framework.WithLoggingDisabled())).To(Succeed()) Expect(resourceManager.ApplyFromFiles([]string{"reconfig/gateway.yaml"}, reconfigNamespace.Name)).To(Succeed()) var nginxPodNames []string Eventually( func() bool { - nginxPodNames, err = framework.GetReadyNginxPodNames( - k8sClient, + nginxPodNames, err = resourceManager.GetReadyNginxPodNames( reconfigNamespace.Name, timeoutConfig.GetStatusTimeout, + framework.WithLoggingDisabled(), ) return len(nginxPodNames) == 1 && err == nil }). @@ -406,7 +436,7 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r File: "http.conf", } - return framework.ValidateNginxFieldExists(conf, defaultUpstream) == nil + return framework.ValidateNginxFieldExists(conf, defaultUpstream, framework.WithLoggingDisabled()) == nil }). WithTimeout(timeoutConfig.CreateTimeout). Should(BeTrue()) @@ -439,7 +469,11 @@ var _ = Describe("Reconfiguration Performance Testing", Ordered, Label("nfr", "r }) AfterEach(func() { - framework.AddNginxLogsAndEventsToReport(resourceManager, reconfigNamespace.Name) + framework.AddNginxLogsAndEventsToReport( + resourceManager, + reconfigNamespace.Name, + framework.WithLoggingDisabled(), + ) Expect(cleanupResources()).Should(Succeed()) teardown(releaseName) diff --git a/tests/suite/sample_test.go b/tests/suite/sample_test.go index a79dda5a7d..f133cc6acd 100644 --- a/tests/suite/sample_test.go +++ b/tests/suite/sample_test.go @@ -38,7 +38,10 @@ var _ = Describe("Basic test example", Label("functional"), func() { Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + nginxPodNames, err := resourceManager.GetReadyNginxPodNames( + namespace, + timeoutConfig.GetStatusTimeout, + ) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) diff --git a/tests/suite/scale_test.go b/tests/suite/scale_test.go index 526b27049c..adecea7e5c 100644 --- a/tests/suite/scale_test.go +++ b/tests/suite/scale_test.go @@ -114,7 +114,11 @@ var _ = Describe("Scale test", Ordered, Label("nfr", "scale"), func() { } Expect(resourceManager.Apply([]client.Object{ns})).To(Succeed()) - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) + podNames, err := resourceManager.GetReadyNGFPodNames( + ngfNamespace, + releaseName, + timeoutConfig.GetTimeout, + ) Expect(err).ToNot(HaveOccurred()) Expect(podNames).To(HaveLen(1)) ngfPodName = podNames[0] @@ -159,6 +163,8 @@ The logs are attached only if there are errors. writeScaleResults := func(dest io.Writer, results scaleTestResults) error { tmpl, err := template.New("results").Parse(scaleResultTemplate) if err != nil { + GinkgoWriter.Printf("ERROR creating results template: %v\n", err) + return err } @@ -245,8 +251,12 @@ The logs are attached only if there are errors. test() - // We sleep for 2 scape intervals to ensure that Prometheus scrapes the metrics after the test() finishes + // We sleep for 2 scrape intervals to ensure that Prometheus scrapes the metrics after the test() finishes // before endTime, so that we don't lose any metric values like reloads. + GinkgoWriter.Printf( + "Sleeping for %v to ensure Prometheus scrapes the metrics after the test finishes\n", + 2*scrapeInterval, + ) time.Sleep(2 * scrapeInterval) endTime := time.Now() @@ -349,7 +359,10 @@ The logs are attached only if there are errors. []string{`"logger":"usageReporter`}, // ignore usageReporter errors ) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + nginxPodNames, err := resourceManager.GetReadyNginxPodNames( + namespace, + timeoutConfig.GetStatusTimeout, + ) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) @@ -375,10 +388,15 @@ The logs are attached only if there are errors. findRestarts := func(containerName string, pod *core.Pod) int { for _, containerStatus := range pod.Status.ContainerStatuses { if containerStatus.Name == containerName { + GinkgoWriter.Printf("INFO: container %q had %d restarts\n", containerName, containerStatus.RestartCount) + return int(containerStatus.RestartCount) } } - Fail(fmt.Sprintf("container %s not found", containerName)) + fail := fmt.Sprintf("container %s not found", containerName) + GinkgoWriter.Printf("FAIL: %v\n", fail) + + Fail(fail) return 0 } @@ -420,13 +438,19 @@ The logs are attached only if there are errors. Expect(resourceManager.WaitForPodsToBeReady(ctx, namespace)).To(Succeed()) for i := range len(objects.ScaleIterationGroups) { - Expect(resourceManager.Apply(objects.ScaleIterationGroups[i])).To(Succeed()) + Expect(resourceManager.Apply( + objects.ScaleIterationGroups[i], + framework.WithLoggingDisabled(), // disable logging to avoid huge log + )).To(Succeed()) if i == 0 { var nginxPodNames []string Eventually( func() bool { - nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + nginxPodNames, err = resourceManager.GetReadyNginxPodNames( + namespace, + timeoutConfig.GetStatusTimeout, + ) return len(nginxPodNames) == 1 && err == nil }). WithTimeout(timeoutConfig.CreateTimeout). @@ -454,7 +478,7 @@ The logs are attached only if there are errors. url, address, timeoutConfig.RequestTimeout, - framework.WithLoggingDisabled(), // disable logging to avoid huge logs + framework.WithLoggingDisabled(), // disable logging to avoid huge log for 1000 requests ), ).WithTimeout(6 * timeoutConfig.RequestTimeout).WithPolling(100 * time.Millisecond).Should(Succeed()) @@ -489,7 +513,10 @@ The logs are attached only if there are errors. var err error Eventually( func() bool { - nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + nginxPodNames, err = resourceManager.GetReadyNginxPodNames( + namespace, + timeoutConfig.GetStatusTimeout, + ) return len(nginxPodNames) == 1 && err == nil }). WithTimeout(timeoutConfig.CreateTimeout). @@ -508,7 +535,11 @@ The logs are attached only if there are errors. } Eventually( - framework.CreateResponseChecker(url, address, timeoutConfig.RequestTimeout), + framework.CreateResponseChecker( + url, + address, + timeoutConfig.RequestTimeout, + ), ).WithTimeout(5 * timeoutConfig.RequestTimeout).WithPolling(100 * time.Millisecond).Should(Succeed()) Expect( @@ -521,7 +552,11 @@ The logs are attached only if there are errors. Expect(resourceManager.WaitForPodsToBeReady(ctx, namespace)).To(Succeed()) Eventually( - framework.CreateResponseChecker(url, address, timeoutConfig.RequestTimeout), + framework.CreateResponseChecker( + url, + address, + timeoutConfig.RequestTimeout, + ), ).WithTimeout(5 * timeoutConfig.RequestTimeout).WithPolling(100 * time.Millisecond).Should(Succeed()) } @@ -639,7 +674,10 @@ The logs are attached only if there are errors. var err error Eventually( func() bool { - nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + nginxPodNames, err = resourceManager.GetReadyNginxPodNames( + namespace, + timeoutConfig.GetStatusTimeout, + ) return len(nginxPodNames) == 1 && err == nil }). WithTimeout(timeoutConfig.CreateTimeout). @@ -702,7 +740,10 @@ The logs are attached only if there are errors. }) AfterEach(func() { - framework.AddNginxLogsAndEventsToReport(resourceManager, namespace) + framework.AddNginxLogsAndEventsToReport( + resourceManager, + namespace, + ) cleanUpPortForward() Expect(resourceManager.DeleteNamespace(namespace)).To(Succeed()) teardown(releaseName) @@ -905,7 +946,10 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim var err error Eventually( func() bool { - nginxPodNames, err = framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) + nginxPodNames, err = resourceManager.GetReadyNginxPodNames( + ns.Name, + timeoutConfig.GetStatusTimeout, + ) return len(nginxPodNames) == 1 && err == nil }). WithTimeout(timeoutConfig.CreateTimeout). @@ -921,7 +965,10 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim }) AfterAll(func() { - framework.AddNginxLogsAndEventsToReport(resourceManager, ns.Name) + framework.AddNginxLogsAndEventsToReport( + resourceManager, + ns.Name, + ) cleanUpPortForward() teardown(releaseName) @@ -961,7 +1008,11 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim ctx, cancel := context.WithTimeout(context.Background(), timeoutConfig.UpdateTimeout) - Expect(resourceManager.WaitForPodsToBeReadyWithCount(ctx, ns.Name, i+numCoffeeAndTeaPods)).To(Succeed()) + Expect(resourceManager.WaitForPodsToBeReadyWithCount( + ctx, + ns.Name, + i+numCoffeeAndTeaPods), + ).To(Succeed()) Expect(resourceManager.WaitForGatewayObservedGeneration(ctx, ns.Name, "gateway", i)).To(Succeed()) cancel() @@ -1037,8 +1088,11 @@ var _ = Describe("Zero downtime scale test", Ordered, Label("nfr", "zero-downtim var gw v1.Gateway key := types.NamespacedName{Namespace: ns.Name, Name: "gateway"} - if err := resourceManager.K8sClient.Get(ctx, key, &gw); err != nil { - return fmt.Errorf("failed to get gateway: %w", err) + if err := resourceManager.Get(ctx, key, &gw); err != nil { + gatewayErr := fmt.Errorf("failed to get gateway: %w", err) + GinkgoWriter.Printf("ERROR: %v\n", gatewayErr) + + return gatewayErr } if len(gw.Status.Listeners) != num { diff --git a/tests/suite/snippets_filter_test.go b/tests/suite/snippets_filter_test.go index 78e0a592e8..af67646a7d 100644 --- a/tests/suite/snippets_filter_test.go +++ b/tests/suite/snippets_filter_test.go @@ -42,7 +42,10 @@ var _ = Describe("SnippetsFilter", Ordered, Label("functional", "snippets-filter Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + nginxPodNames, err := resourceManager.GetReadyNginxPodNames( + namespace, + timeoutConfig.GetStatusTimeout, + ) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) @@ -270,9 +273,7 @@ func checkHTTPRouteToHaveGatewayNotProgrammedCond(httpRouteNsName types.Namespac var hr v1.HTTPRoute var err error - if err = k8sClient.Get(ctx, httpRouteNsName, &hr); err != nil { - GinkgoWriter.Printf("ERROR: failed to get HTTPRoute: %v\n", err) - + if err = resourceManager.Get(ctx, httpRouteNsName, &hr); err != nil { return err } @@ -328,9 +329,7 @@ func checkForSnippetsFilterToBeAccepted(snippetsFilterNsNames types.NamespacedNa var sf ngfAPI.SnippetsFilter var err error - if err = k8sClient.Get(ctx, snippetsFilterNsNames, &sf); err != nil { - GinkgoWriter.Printf("ERROR: failed to get SnippetsFilter: %v\n", err) - + if err = resourceManager.Get(ctx, snippetsFilterNsNames, &sf); err != nil { return err } diff --git a/tests/suite/system_suite_test.go b/tests/suite/system_suite_test.go index 71d610bf5a..31ce49c0d0 100644 --- a/tests/suite/system_suite_test.go +++ b/tests/suite/system_suite_test.go @@ -124,7 +124,6 @@ func setup(cfg setupConfig, extraInstallArgs ...string) { options := client.Options{ Scheme: scheme, } - var err error k8sClient, err = client.New(k8sConfig, options) Expect(err).ToNot(HaveOccurred()) @@ -179,8 +178,7 @@ func setup(cfg setupConfig, extraInstallArgs ...string) { installCfg := createNGFInstallConfig(cfg, extraInstallArgs...) - podNames, err := framework.GetReadyNGFPodNames( - k8sClient, + podNames, err := resourceManager.GetReadyNGFPodNames( installCfg.Namespace, installCfg.ReleaseName, timeoutConfig.CreateTimeout, @@ -263,7 +261,7 @@ func createNGFInstallConfig(cfg setupConfig, extraInstallArgs ...string) framewo } if *plusEnabled { - Expect(framework.CreateLicenseSecret(k8sClient, ngfNamespace, *plusLicenseFileName)).To(Succeed()) + Expect(framework.CreateLicenseSecret(resourceManager, ngfNamespace, *plusLicenseFileName)).To(Succeed()) } output, err = framework.InstallNGF(installCfg, extraInstallArgs...) @@ -278,7 +276,7 @@ func teardown(relName string) { Namespace: ngfNamespace, } - output, err := framework.UninstallNGF(cfg, k8sClient) + output, err := framework.UninstallNGF(cfg, resourceManager) Expect(err).ToNot(HaveOccurred(), string(output)) output, err = framework.UninstallGatewayAPI(*gatewayAPIVersion) @@ -293,7 +291,11 @@ func teardown(relName string) { true, /* poll immediately */ func(ctx context.Context) (bool, error) { key := k8sTypes.NamespacedName{Name: ngfNamespace} - if err := k8sClient.Get(ctx, key, &core.Namespace{}); err != nil && apierrors.IsNotFound(err) { + if err := resourceManager.Get( + ctx, + key, + &core.Namespace{}, + ); err != nil && apierrors.IsNotFound(err) { return true, nil } diff --git a/tests/suite/tracing_test.go b/tests/suite/tracing_test.go index bdc1abda54..9b31d47be0 100644 --- a/tests/suite/tracing_test.go +++ b/tests/suite/tracing_test.go @@ -52,11 +52,11 @@ var _ = Describe("Tracing", FlakeAttempts(2), Ordered, Label("functional", "trac key := types.NamespacedName{Name: "ngf-test-proxy-config", Namespace: "nginx-gateway"} var nginxProxy ngfAPIv1alpha2.NginxProxy - Expect(k8sClient.Get(ctx, key, &nginxProxy)).To(Succeed()) + Expect(resourceManager.Get(ctx, key, &nginxProxy)).To(Succeed()) nginxProxy.Spec.Telemetry = &telemetry - Expect(k8sClient.Update(ctx, &nginxProxy)).To(Succeed()) + Expect(resourceManager.Update(ctx, &nginxProxy, nil)).To(Succeed()) } BeforeAll(func() { @@ -92,7 +92,10 @@ var _ = Describe("Tracing", FlakeAttempts(2), Ordered, Label("functional", "trac Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + nginxPodNames, err := resourceManager.GetReadyNginxPodNames( + namespace, + timeoutConfig.GetStatusTimeout, + ) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) @@ -240,9 +243,7 @@ func verifyGatewayClassResolvedRefs() error { defer cancel() var gc gatewayv1.GatewayClass - if err := k8sClient.Get(ctx, types.NamespacedName{Name: gatewayClassName}, &gc); err != nil { - GinkgoWriter.Printf("ERROR getting GatewayClass %s: %v\n", gatewayClassName, err) - + if err := resourceManager.Get(ctx, types.NamespacedName{Name: gatewayClassName}, &gc); err != nil { return err } @@ -272,9 +273,7 @@ func verifyPolicyStatus() error { var pol ngfAPIv1alpha2.ObservabilityPolicy key := types.NamespacedName{Name: "test-observability-policy", Namespace: "helloworld"} - if err := k8sClient.Get(ctx, key, &pol); err != nil { - GinkgoWriter.Printf("ERROR getting ObservabilityPolicy %q in namespace %q: %v\n", key.Name, key.Namespace, err) - + if err := resourceManager.Get(ctx, key, &pol); err != nil { return err } diff --git a/tests/suite/upgrade_test.go b/tests/suite/upgrade_test.go index cc864ff990..1636893003 100644 --- a/tests/suite/upgrade_test.go +++ b/tests/suite/upgrade_test.go @@ -67,7 +67,10 @@ var _ = Describe("Upgrade testing", Label("nfr", "upgrade"), func() { Expect(resourceManager.ApplyFromFiles(files, ns.Name)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(ns.Name)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, ns.Name, timeoutConfig.GetStatusTimeout) + nginxPodNames, err := resourceManager.GetReadyNginxPodNames( + ns.Name, + timeoutConfig.GetStatusTimeout, + ) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) @@ -83,7 +86,10 @@ var _ = Describe("Upgrade testing", Label("nfr", "upgrade"), func() { }) AfterEach(func() { - framework.AddNginxLogsAndEventsToReport(resourceManager, ns.Name) + framework.AddNginxLogsAndEventsToReport( + resourceManager, + ns.Name, + ) cleanUpPortForward() Expect(resourceManager.DeleteFromFiles(files, ns.Name)).To(Succeed()) @@ -204,7 +210,7 @@ var _ = Describe("Upgrade testing", Label("nfr", "upgrade"), func() { Expect(resourceManager.ApplyFromFiles([]string{"ngf-upgrade/gateway-updated.yaml"}, ns.Name)).To(Succeed()) - podNames, err := framework.GetReadyNGFPodNames(k8sClient, ngfNamespace, releaseName, timeoutConfig.GetTimeout) + podNames, err := resourceManager.GetReadyNGFPodNames(ngfNamespace, releaseName, timeoutConfig.GetTimeout) Expect(err).ToNot(HaveOccurred()) Expect(podNames).ToNot(BeEmpty()) @@ -220,7 +226,7 @@ var _ = Describe("Upgrade testing", Label("nfr", "upgrade"), func() { true, /* poll immediately */ func(_ context.Context) (bool, error) { defer GinkgoRecover() - Expect(k8sClient.Get(leaseCtx, key, &lease)).To(Succeed()) + Expect(resourceManager.Get(leaseCtx, key, &lease)).To(Succeed()) if lease.Spec.HolderIdentity != nil { for _, podName := range podNames { @@ -245,7 +251,7 @@ var _ = Describe("Upgrade testing", Label("nfr", "upgrade"), func() { 500*time.Millisecond, true, /* poll immediately */ func(ctx context.Context) (bool, error) { - Expect(k8sClient.Get(ctx, key, &gw)).To(Succeed()) + Expect(resourceManager.Get(ctx, key, &gw)).To(Succeed()) expListenerName := "http-new" for _, listener := range gw.Status.Listeners { if listener.Name == v1.SectionName(expListenerName) { diff --git a/tests/suite/upstream_settings_test.go b/tests/suite/upstream_settings_test.go index cd70604ef0..2d53aac520 100644 --- a/tests/suite/upstream_settings_test.go +++ b/tests/suite/upstream_settings_test.go @@ -51,7 +51,10 @@ var _ = Describe("UpstreamSettingsPolicy", Ordered, Label("functional", "uspolic Expect(resourceManager.ApplyFromFiles(files, namespace)).To(Succeed()) Expect(resourceManager.WaitForAppsToBeReady(namespace)).To(Succeed()) - nginxPodNames, err := framework.GetReadyNginxPodNames(k8sClient, namespace, timeoutConfig.GetStatusTimeout) + nginxPodNames, err := resourceManager.GetReadyNginxPodNames( + namespace, + timeoutConfig.GetStatusTimeout, + ) Expect(err).ToNot(HaveOccurred()) Expect(nginxPodNames).To(HaveLen(1)) @@ -424,7 +427,7 @@ func usPolicyHasNoAncestors(usPolicyNsName types.NamespacedName) bool { defer cancel() var usPolicy ngfAPI.UpstreamSettingsPolicy - if err := k8sClient.Get(ctx, usPolicyNsName, &usPolicy); err != nil { + if err := resourceManager.Get(ctx, usPolicyNsName, &usPolicy); err != nil { GinkgoWriter.Printf("ERROR: Failed to get UpstreamSettingsPolicy %q: %s", usPolicyNsName, err.Error()) return false } @@ -464,9 +467,7 @@ func waitForUSPolicyStatus( var usPolicy ngfAPI.UpstreamSettingsPolicy var err error - if err := k8sClient.Get(ctx, usPolicyNsName, &usPolicy); err != nil { - GinkgoWriter.Printf("ERROR: Failed to get UpstreamSettingsPolicy %q: %s", usPolicyNsName, err.Error()) - + if err := resourceManager.Get(ctx, usPolicyNsName, &usPolicy); err != nil { return false, err }