diff --git a/examples/prometheus/prometheus.yaml b/examples/prometheus/prometheus.yaml index 040113f8bfce..d21f0490010a 100644 --- a/examples/prometheus/prometheus.yaml +++ b/examples/prometheus/prometheus.yaml @@ -40,6 +40,53 @@ objects: serviceaccounts.openshift.io/oauth-redirectreference.prom: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' serviceaccounts.openshift.io/oauth-redirectreference.alerts: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' serviceaccounts.openshift.io/oauth-redirectreference.alertmanager: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alertmanager"}}' + +# Create a service account for accessing prometheus data +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: prometheus-reader + namespace: "${NAMESPACE}" + +# Create a service account for prometheus to use to scrape other infrastructure components +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: prometheus-scraper + namespace: "${NAMESPACE}" + +- apiVersion: v1 + kind: Secret + metadata: + name: prometheus-scraper + namespace: "${NAMESPACE}" + annotations: + kubernetes.io/service-account.name: prometheus-scraper + type: kubernetes.io/service-account-token + +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: prometheus-scraper + rules: + - apiGroups: + - route.openshift.io + resources: + - routers/metrics + verbs: + - get + +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRoleBinding + metadata: + name: prometheus-scraper + roleRef: + name: prometheus-scraper + subjects: + - kind: ServiceAccount + name: prometheus-scraper + namespace: "${NAMESPACE}" + - apiVersion: authorization.openshift.io/v1 kind: ClusterRoleBinding metadata: @@ -51,6 +98,18 @@ objects: name: prometheus namespace: "${NAMESPACE}" +- apiVersion: authorization.openshift.io/v1 + kind: RoleBinding + metadata: + name: prometheus-reader + namespace: "${NAMESPACE}" + roleRef: + name: view + subjects: + - kind: ServiceAccount + name: prometheus-reader + namespace: "${NAMESPACE}" + # Create a fully end-to-end TLS connection to the prometheus proxy - apiVersion: route.openshift.io/v1 kind: Route @@ -229,6 +288,8 @@ objects: name: prometheus-config - mountPath: /prometheus name: prometheus-data + - mountPath: /var/run/secrets/kubernetes.io/scraper + name: prometheus-scraper-secret # Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy # use http port=4190 and https port=9943 to differ from prom-proxy @@ -320,6 +381,9 @@ objects: configMap: defaultMode: 420 name: prometheus + - name: prometheus-scraper-secret + secret: + secretName: prometheus-scraper - name: prometheus-proxy-secret secret: secretName: prometheus-proxy @@ -603,21 +667,39 @@ objects: - action: labelmap regex: __meta_kubernetes_node_label_(.+) + # TODO: auto-generate these sections, or add a dynamic infrastructure scraper # Scrape config for the template service broker - job_name: 'openshift-template-service-broker' scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt server_name: apiserver.openshift-template-service-broker.svc - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - + bearer_token_file: /var/run/secrets/kubernetes.io/scraper/token kubernetes_sd_configs: - role: endpoints - + namespaces: + names: + - openshift-template-service-broker relabel_configs: - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] action: keep regex: openshift-template-service-broker;apiserver;https + # Scrape config for the router + - job_name: 'openshift-router' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + server_name: router.default.svc + bearer_token_file: /var/run/secrets/kubernetes.io/scraper/token + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - default + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;router;1936-tcp alerting: alertmanagers: diff --git a/pkg/oc/admin/router/router.go b/pkg/oc/admin/router/router.go index 28407b4903c5..cc699ab5eb63 100644 --- a/pkg/oc/admin/router/router.go +++ b/pkg/oc/admin/router/router.go @@ -391,6 +391,27 @@ func generateSecretsConfig(cfg *RouterConfig, namespace string, defaultCert []by secrets = append(secrets, secret) } + if cfg.Type == "haproxy-router" && cfg.StatsPort != 0 { + metricsCertName := "router-metrics-tls" + if len(defaultCert) == 0 { + // when we are generating a serving cert, we need to reuse the existing cert + metricsCertName = certName + } + volumes = append(volumes, kapi.Volume{ + Name: "metrics-server-certificate", + VolumeSource: kapi.VolumeSource{ + Secret: &kapi.SecretVolumeSource{ + SecretName: metricsCertName, + }, + }, + }) + mounts = append(mounts, kapi.VolumeMount{ + Name: "metrics-server-certificate", + ReadOnly: true, + MountPath: "/etc/pki/tls/metrics/", + }) + } + // The secret in this volume is either the one created for the // user supplied default cert (pem format) or the secret generated // by the service anotation (cert only format). @@ -460,36 +481,6 @@ func generateReadinessProbeConfig(cfg *RouterConfig, ports []kapi.ContainerPort) return probe } -func generateMetricsExporterContainer(cfg *RouterConfig, env app.Environment) *kapi.Container { - containerName := "metrics-exporter" - if len(cfg.MetricsImage) > 0 { - return &kapi.Container{ - Name: containerName, - Image: cfg.MetricsImage, - Env: env.List(), - } - } - switch cfg.Type { - case "haproxy-router": - return &kapi.Container{ - Name: containerName, - Image: "prom/haproxy-exporter:latest", - Env: env.List(), - Args: []string{ - fmt.Sprintf("--haproxy.scrape-uri=http://$(STATS_USERNAME):$(STATS_PASSWORD)@localhost:$(STATS_PORT)/haproxy?stats;csv"), - }, - Ports: []kapi.ContainerPort{ - { - Name: "http", - ContainerPort: 9101, - }, - }, - } - default: - return nil - } -} - // RunCmdRouter contains all the necessary functionality for the // OpenShift CLI router command. func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out, errout io.Writer, cfg *RouterConfig, args []string) error { @@ -696,6 +687,8 @@ func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out, errout io.Write if cfg.Type == "haproxy-router" && cfg.StatsPort != 0 { env["ROUTER_LISTEN_ADDR"] = fmt.Sprintf("0.0.0.0:%d", cfg.StatsPort) env["ROUTER_METRICS_TYPE"] = "haproxy" + env["ROUTER_METRICS_TLS_CERT_FILE"] = "/etc/pki/tls/metrics/tls.crt" + env["ROUTER_METRICS_TLS_KEY_FILE"] = "/etc/pki/tls/metrics/tls.key" } env.Add(secretEnv) if len(defaultCert) > 0 { @@ -741,13 +734,6 @@ func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out, errout io.Write }, } - if cfg.StatsPort > 0 && cfg.ExposeMetrics { - pc := generateMetricsExporterContainer(cfg, env) - if pc != nil { - containers = append(containers, *pc) - } - } - objects := []runtime.Object{} for _, s := range secrets { objects = append(objects, s) @@ -826,6 +812,9 @@ func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out, errout io.Write // The secret generated by the service annotaion contains a tls.crt and tls.key // which ultimately need to be combined into a pem t.Annotations["service.alpha.openshift.io/serving-cert-secret-name"] = certName + } else if cfg.Type == "haproxy-router" && cfg.StatsPort != 0 { + // Generate a serving cert for metrics only + t.Annotations["service.alpha.openshift.io/serving-cert-secret-name"] = "router-metrics-tls" } } } diff --git a/pkg/oc/bootstrap/bindata.go b/pkg/oc/bootstrap/bindata.go index fe20f1c15939..7b94e611df03 100644 --- a/pkg/oc/bootstrap/bindata.go +++ b/pkg/oc/bootstrap/bindata.go @@ -14001,6 +14001,53 @@ objects: serviceaccounts.openshift.io/oauth-redirectreference.prom: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' serviceaccounts.openshift.io/oauth-redirectreference.alerts: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' serviceaccounts.openshift.io/oauth-redirectreference.alertmanager: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alertmanager"}}' + +# Create a service account for accessing prometheus data +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: prometheus-reader + namespace: "${NAMESPACE}" + +# Create a service account for prometheus to use to scrape other infrastructure components +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: prometheus-scraper + namespace: "${NAMESPACE}" + +- apiVersion: v1 + kind: Secret + metadata: + name: prometheus-scraper + namespace: "${NAMESPACE}" + annotations: + kubernetes.io/service-account.name: prometheus-scraper + type: kubernetes.io/service-account-token + +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: prometheus-scraper + rules: + - apiGroups: + - route.openshift.io + resources: + - routers/metrics + verbs: + - get + +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRoleBinding + metadata: + name: prometheus-scraper + roleRef: + name: prometheus-scraper + subjects: + - kind: ServiceAccount + name: prometheus-scraper + namespace: "${NAMESPACE}" + - apiVersion: authorization.openshift.io/v1 kind: ClusterRoleBinding metadata: @@ -14012,6 +14059,18 @@ objects: name: prometheus namespace: "${NAMESPACE}" +- apiVersion: authorization.openshift.io/v1 + kind: RoleBinding + metadata: + name: prometheus-reader + namespace: "${NAMESPACE}" + roleRef: + name: view + subjects: + - kind: ServiceAccount + name: prometheus-reader + namespace: "${NAMESPACE}" + # Create a fully end-to-end TLS connection to the prometheus proxy - apiVersion: route.openshift.io/v1 kind: Route @@ -14190,6 +14249,8 @@ objects: name: prometheus-config - mountPath: /prometheus name: prometheus-data + - mountPath: /var/run/secrets/kubernetes.io/scraper + name: prometheus-scraper-secret # Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy # use http port=4190 and https port=9943 to differ from prom-proxy @@ -14281,6 +14342,9 @@ objects: configMap: defaultMode: 420 name: prometheus + - name: prometheus-scraper-secret + secret: + secretName: prometheus-scraper - name: prometheus-proxy-secret secret: secretName: prometheus-proxy @@ -14564,21 +14628,39 @@ objects: - action: labelmap regex: __meta_kubernetes_node_label_(.+) + # TODO: auto-generate these sections, or add a dynamic infrastructure scraper # Scrape config for the template service broker - job_name: 'openshift-template-service-broker' scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt server_name: apiserver.openshift-template-service-broker.svc - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - + bearer_token_file: /var/run/secrets/kubernetes.io/scraper/token kubernetes_sd_configs: - role: endpoints - + namespaces: + names: + - openshift-template-service-broker relabel_configs: - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] action: keep regex: openshift-template-service-broker;apiserver;https + # Scrape config for the router + - job_name: 'openshift-router' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + server_name: router.default.svc + bearer_token_file: /var/run/secrets/kubernetes.io/scraper/token + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - default + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;router;1936-tcp alerting: alertmanagers: diff --git a/test/extended/testdata/bindata.go b/test/extended/testdata/bindata.go index 55b6f214d865..30b4d36d248e 100644 --- a/test/extended/testdata/bindata.go +++ b/test/extended/testdata/bindata.go @@ -25549,6 +25549,53 @@ objects: serviceaccounts.openshift.io/oauth-redirectreference.prom: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' serviceaccounts.openshift.io/oauth-redirectreference.alerts: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' serviceaccounts.openshift.io/oauth-redirectreference.alertmanager: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alertmanager"}}' + +# Create a service account for accessing prometheus data +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: prometheus-reader + namespace: "${NAMESPACE}" + +# Create a service account for prometheus to use to scrape other infrastructure components +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: prometheus-scraper + namespace: "${NAMESPACE}" + +- apiVersion: v1 + kind: Secret + metadata: + name: prometheus-scraper + namespace: "${NAMESPACE}" + annotations: + kubernetes.io/service-account.name: prometheus-scraper + type: kubernetes.io/service-account-token + +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: prometheus-scraper + rules: + - apiGroups: + - route.openshift.io + resources: + - routers/metrics + verbs: + - get + +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRoleBinding + metadata: + name: prometheus-scraper + roleRef: + name: prometheus-scraper + subjects: + - kind: ServiceAccount + name: prometheus-scraper + namespace: "${NAMESPACE}" + - apiVersion: authorization.openshift.io/v1 kind: ClusterRoleBinding metadata: @@ -25560,6 +25607,18 @@ objects: name: prometheus namespace: "${NAMESPACE}" +- apiVersion: authorization.openshift.io/v1 + kind: RoleBinding + metadata: + name: prometheus-reader + namespace: "${NAMESPACE}" + roleRef: + name: view + subjects: + - kind: ServiceAccount + name: prometheus-reader + namespace: "${NAMESPACE}" + # Create a fully end-to-end TLS connection to the prometheus proxy - apiVersion: route.openshift.io/v1 kind: Route @@ -25738,6 +25797,8 @@ objects: name: prometheus-config - mountPath: /prometheus name: prometheus-data + - mountPath: /var/run/secrets/kubernetes.io/scraper + name: prometheus-scraper-secret # Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy # use http port=4190 and https port=9943 to differ from prom-proxy @@ -25829,6 +25890,9 @@ objects: configMap: defaultMode: 420 name: prometheus + - name: prometheus-scraper-secret + secret: + secretName: prometheus-scraper - name: prometheus-proxy-secret secret: secretName: prometheus-proxy @@ -26112,21 +26176,39 @@ objects: - action: labelmap regex: __meta_kubernetes_node_label_(.+) + # TODO: auto-generate these sections, or add a dynamic infrastructure scraper # Scrape config for the template service broker - job_name: 'openshift-template-service-broker' scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt server_name: apiserver.openshift-template-service-broker.svc - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - + bearer_token_file: /var/run/secrets/kubernetes.io/scraper/token kubernetes_sd_configs: - role: endpoints - + namespaces: + names: + - openshift-template-service-broker relabel_configs: - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] action: keep regex: openshift-template-service-broker;apiserver;https + # Scrape config for the router + - job_name: 'openshift-router' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + server_name: router.default.svc + bearer_token_file: /var/run/secrets/kubernetes.io/scraper/token + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - default + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;router;1936-tcp alerting: alertmanagers: