From b192cf162dc776d2c9d40f5f9979589fc079b0a4 Mon Sep 17 00:00:00 2001 From: Jackson West Date: Fri, 25 Feb 2022 12:37:01 -0600 Subject: [PATCH] EKS-D changes to helm control plane chart Signed-off-by: Jackson West --- config/prow/cluster/crier_deployment.yaml | 118 ++++++--------- config/prow/cluster/crier_rbac.yaml | 44 +----- config/prow/cluster/deck_deployment.yaml | 135 +++++++----------- config/prow/cluster/deck_rbac.yaml | 38 +---- config/prow/cluster/deck_service.yaml | 11 +- config/prow/cluster/ghproxy.yaml | 35 +++-- config/prow/cluster/hook_deployment.yaml | 116 +++++---------- config/prow/cluster/hook_rbac.yaml | 6 +- config/prow/cluster/hook_service.yaml | 12 +- .../prow/cluster/horologium_deployment.yaml | 21 ++- config/prow/cluster/horologium_rbac.yaml | 6 +- .../prow_controller_manager_deployment.yaml | 109 ++++++-------- .../cluster/prow_controller_manager_rbac.yaml | 31 +--- config/prow/cluster/sinker_deployment.yaml | 115 ++++++--------- config/prow/cluster/sinker_rbac.yaml | 29 +--- .../cluster/statusreconciler_deployment.yaml | 29 ++-- .../prow/cluster/statusreconciler_rbac.yaml | 7 +- config/prow/cluster/tide_deployment.yaml | 38 +++-- config/prow/cluster/tide_rbac.yaml | 8 +- config/prow/cluster/tide_service.yaml | 11 +- 20 files changed, 339 insertions(+), 580 deletions(-) diff --git a/config/prow/cluster/crier_deployment.yaml b/config/prow/cluster/crier_deployment.yaml index d53ff40e9b..ecb49e4f79 100644 --- a/config/prow/cluster/crier_deployment.yaml +++ b/config/prow/cluster/crier_deployment.yaml @@ -15,7 +15,6 @@ apiVersion: apps/v1 kind: Deployment metadata: - namespace: default name: crier labels: app: crier @@ -26,6 +25,12 @@ spec: app: crier template: metadata: + {{- if .Values.crier.scrape_metrics }} + annotations: + prometheus.io/path: /metrics + prometheus.io/port: '9090' + prometheus.io/scrape: 'true' + {{- end }} labels: app: crier spec: @@ -33,64 +38,56 @@ spec: terminationGracePeriodSeconds: 30 containers: - name: crier - image: gcr.io/k8s-prow/crier:v20230322-fad14aa00d + image: {{ .Values.crier.image }} args: - - --blob-storage-workers=1 + - --blob-storage-workers=10 - --config-path=/etc/config/config.yaml - --github-endpoint=http://ghproxy - --github-endpoint=https://api.github.com - - --github-token-path=/etc/github/oauth - - --github-workers=5 + - --github-token-path=/etc/github/token + - --github-workers=10 - --job-config-path=/etc/job-config - - --kubernetes-blob-storage-workers=1 - - --slack-token-file=/etc/slack/token - - --slack-workers=1 + - --kubeconfig=/etc/kubeconfig/config + - --kubernetes-blob-storage-workers=10 + - --s3-credentials-file=/etc/s3-credentials/service-account.json env: - # Use KUBECONFIG envvar rather than --kubeconfig flag in order to provide multiple configs to merge. - - name: KUBECONFIG - value: "/etc/kubeconfig/config:/etc/kubeconfig-build-test-infra-trusted/kubeconfig:/etc/kubeconfig-build-k8s-prow-builds/kubeconfig:/etc/kubeconfig-build-rules-k8s/kubeconfig:/etc/kubeconfig-eks-prow-build-cluster/kubeconfig" - # AWS_ variables needed to assume role to access the prow-build-cluster EKS cluster. - - name: AWS_ROLE_ARN - value: arn:aws:iam::468814281478:role/Prow-EKS-Admin - - name: AWS_WEB_IDENTITY_TOKEN_FILE - value: /var/run/secrets/aws-iam-token/serviceaccount/token - - name: AWS_REGION - value: us-east-2 - ports: - - name: metrics - containerPort: 9090 + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + - name: AWS_ROLE_SESSION_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name volumeMounts: - - mountPath: /etc/kubeconfig - name: kubeconfig - readOnly: true - - mountPath: /etc/kubeconfig-build-test-infra-trusted - name: kubeconfig-build-test-infra-trusted - readOnly: true - - mountPath: /etc/kubeconfig-build-k8s-prow-builds - name: kubeconfig-build-k8s-prow-builds - readOnly: true - - mountPath: /etc/kubeconfig-build-rules-k8s - name: kubeconfig-build-rules-k8s - readOnly: true - - mountPath: /etc/kubeconfig-eks-prow-build-cluster - name: kubeconfig-eks-prow-build-cluster - readOnly: true - name: config mountPath: /etc/config readOnly: true - name: job-config mountPath: /etc/job-config readOnly: true - - name: oauth + - name: github-token mountPath: /etc/github readOnly: true - - name: slack - mountPath: /etc/slack + - name: s3-credentials + mountPath: /etc/s3-credentials readOnly: true - # AWS IAM token needed to assume role to access the prow-build-cluster EKS cluster. - - name: aws-iam-token - mountPath: /var/run/secrets/aws-iam-token/serviceaccount + - name: kubeconfig + mountPath: /etc/kubeconfig readOnly: true + - name: shared-bins + mountPath: /shared-bins + initContainers: + - name: aws-iam-authenticator + env: + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + image: {{ .Values.awsIamAuthenticator.image }} + command: + - cp + - /aws-iam-authenticator + - /shared-bins/aws-iam-authenticator + volumeMounts: + - name: shared-bins + mountPath: /shared-bins volumes: - name: config configMap: @@ -98,38 +95,15 @@ spec: - name: job-config configMap: name: job-config - - name: oauth + - name: github-token secret: - secretName: oauth-token - - name: slack + secretName: github-token + - name: s3-credentials secret: - secretName: slack-token + secretName: s3-credentials + - name: shared-bins + emptyDir: {} - name: kubeconfig secret: - defaultMode: 420 + defaultMode: 0644 secretName: kubeconfig - - name: kubeconfig-build-test-infra-trusted - secret: - defaultMode: 420 - secretName: kubeconfig-build-test-infra-trusted - - name: kubeconfig-build-k8s-prow-builds - secret: - defaultMode: 420 - secretName: kubeconfig-build-k8s-prow-builds - - name: kubeconfig-build-rules-k8s - secret: - defaultMode: 420 - secretName: kubeconfig-build-rules-k8s - - name: kubeconfig-eks-prow-build-cluster - secret: - defaultMode: 420 - secretName: kubeconfig-eks-prow-build-cluster - # AWS IAM token needed to assume role to access the prow-build-cluster EKS cluster. - - name: aws-iam-token - projected: - defaultMode: 420 - sources: - - serviceAccountToken: - audience: sts.amazonaws.com - expirationSeconds: 86400 - path: token diff --git a/config/prow/cluster/crier_rbac.yaml b/config/prow/cluster/crier_rbac.yaml index 53f4471080..57393297a9 100644 --- a/config/prow/cluster/crier_rbac.yaml +++ b/config/prow/cluster/crier_rbac.yaml @@ -13,18 +13,16 @@ # limitations under the License. --- +{{ if .Values.crier.serviceAccount.create }} kind: ServiceAccount apiVersion: v1 metadata: - annotations: - iam.gke.io/gcp-service-account: control-plane@k8s-prow.iam.gserviceaccount.com name: crier - namespace: default +{{ end }} --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: default name: crier rules: - apiGroups: @@ -37,46 +35,10 @@ rules: - "list" - "patch" --- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: test-pods - name: crier -rules: -- apiGroups: - - "" - resources: - - "pods" - - "events" - verbs: - - "get" - - "list" -- apiGroups: - - "" - resources: - - "pods" - verbs: - - "patch" ---- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: crier-namespaced - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: crier -subjects: -- kind: ServiceAccount name: crier - namespace: default ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: crier-namespaced - namespace: test-pods roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -84,4 +46,4 @@ roleRef: subjects: - kind: ServiceAccount name: crier - namespace: default + namespace: {{ .Release.Namespace }} diff --git a/config/prow/cluster/deck_deployment.yaml b/config/prow/cluster/deck_deployment.yaml index 50e7cb6966..abd0103817 100644 --- a/config/prow/cluster/deck_deployment.yaml +++ b/config/prow/cluster/deck_deployment.yaml @@ -15,12 +15,11 @@ apiVersion: apps/v1 kind: Deployment metadata: - namespace: default name: deck labels: app: deck spec: - replicas: 3 + replicas: 2 strategy: type: RollingUpdate rollingUpdate: @@ -31,6 +30,12 @@ spec: app: deck template: metadata: + {{- if .Values.deck.scrape_metrics }} + annotations: + prometheus.io/path: /metrics + prometheus.io/port: '9090' + prometheus.io/scrape: 'true' + {{- end }} labels: app: deck spec: @@ -38,77 +43,52 @@ spec: terminationGracePeriodSeconds: 30 containers: - name: deck - image: gcr.io/k8s-prow/deck:v20230322-fad14aa00d + image: {{ .Values.deck.image }} imagePullPolicy: Always ports: - name: http containerPort: 8080 - - name: metrics - containerPort: 9090 args: + - --kubeconfig=/etc/kubeconfig/config - --tide-url=http://tide/ - --hook-url=http://hook:8888/plugin-help - - --redirect-http-to=prow.k8s.io - - --oauth-url=/github-login - --config-path=/etc/config/config.yaml - --job-config-path=/etc/job-config - --spyglass=true - - --rerun-creates-job - - --github-token-path=/etc/github/oauth + - --github-token-path=/etc/github/token - --github-endpoint=http://ghproxy - --github-endpoint=https://api.github.com - - --github-oauth-config-file=/etc/githuboauth/secret - - --cookie-secret=/etc/cookie/secret - --plugin-config=/etc/plugins/plugins.yaml + - --github-graphql-endpoint=http://ghproxy/graphql + - --s3-credentials-file=/etc/s3-credentials/service-account.json env: - # Use KUBECONFIG envvar rather than --kubeconfig flag in order to provide multiple configs to merge. - - name: KUBECONFIG - value: "/etc/kubeconfig/config:/etc/kubeconfig-build-test-infra-trusted/kubeconfig:/etc/kubeconfig-build-k8s-prow-builds/kubeconfig:/etc/kubeconfig-build-rules-k8s/kubeconfig:/etc/kubeconfig-eks-prow-build-cluster/kubeconfig" - # AWS_ variables needed to assume role to access the prow-build-cluster EKS cluster. - - name: AWS_ROLE_ARN - value: arn:aws:iam::468814281478:role/Prow-EKS-Admin - - name: AWS_WEB_IDENTITY_TOKEN_FILE - value: /var/run/secrets/aws-iam-token/serviceaccount/token - - name: AWS_REGION - value: us-east-2 + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + - name: AWS_ROLE_SESSION_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name volumeMounts: - - name: oauth-config - mountPath: /etc/githuboauth - readOnly: true - - name: cookie-secret - mountPath: /etc/cookie - readOnly: true - - mountPath: /etc/kubeconfig - name: kubeconfig - readOnly: true - - mountPath: /etc/kubeconfig-build-test-infra-trusted - name: kubeconfig-build-test-infra-trusted - readOnly: true - - mountPath: /etc/kubeconfig-build-k8s-prow-builds - name: kubeconfig-build-k8s-prow-builds - readOnly: true - - mountPath: /etc/kubeconfig-build-rules-k8s - name: kubeconfig-build-rules-k8s - readOnly: true - - mountPath: /etc/kubeconfig-eks-prow-build-cluster - name: kubeconfig-eks-prow-build-cluster - readOnly: true - name: config mountPath: /etc/config readOnly: true - name: job-config mountPath: /etc/job-config readOnly: true - - name: oauth-token + - name: github-token mountPath: /etc/github readOnly: true - name: plugins mountPath: /etc/plugins readOnly: true - # AWS IAM token needed to assume role to access the prow-build-cluster EKS cluster. - - name: aws-iam-token - mountPath: /var/run/secrets/aws-iam-token/serviceaccount + - name: s3-credentials + mountPath: /etc/s3-credentials readOnly: true + - name: kubeconfig + mountPath: /etc/kubeconfig + readOnly: true + - name: shared-bins + mountPath: /shared-bins livenessProbe: httpGet: path: /healthz @@ -122,36 +102,20 @@ spec: initialDelaySeconds: 10 periodSeconds: 3 timeoutSeconds: 600 + initContainers: + - name: aws-iam-authenticator + env: + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + image: {{ .Values.awsIamAuthenticator.image }} + command: + - cp + - /aws-iam-authenticator + - /shared-bins/aws-iam-authenticator + volumeMounts: + - name: shared-bins + mountPath: /shared-bins volumes: - - name: oauth-config - secret: - secretName: github-oauth-config - - name: oauth-token - secret: - secretName: oauth-token - - name: cookie-secret - secret: - secretName: cookie - - name: kubeconfig - secret: - defaultMode: 420 - secretName: kubeconfig - - name: kubeconfig-build-test-infra-trusted - secret: - defaultMode: 420 - secretName: kubeconfig-build-test-infra-trusted - - name: kubeconfig-build-k8s-prow-builds - secret: - defaultMode: 420 - secretName: kubeconfig-build-k8s-prow-builds - - name: kubeconfig-build-rules-k8s - secret: - defaultMode: 420 - secretName: kubeconfig-build-rules-k8s - - name: kubeconfig-eks-prow-build-cluster - secret: - defaultMode: 420 - secretName: kubeconfig-eks-prow-build-cluster - name: config configMap: name: config @@ -161,12 +125,15 @@ spec: - name: plugins configMap: name: plugins - # AWS IAM token needed to assume role to access the prow-build-cluster EKS cluster. - - name: aws-iam-token - projected: - defaultMode: 420 - sources: - - serviceAccountToken: - audience: sts.amazonaws.com - expirationSeconds: 86400 - path: token + - name: github-token + secret: + secretName: github-token + - name: s3-credentials + secret: + secretName: s3-credentials + - name: shared-bins + emptyDir: {} + - name: kubeconfig + secret: + defaultMode: 0644 + secretName: kubeconfig diff --git a/config/prow/cluster/deck_rbac.yaml b/config/prow/cluster/deck_rbac.yaml index 500d79d8dd..d1a8489a38 100644 --- a/config/prow/cluster/deck_rbac.yaml +++ b/config/prow/cluster/deck_rbac.yaml @@ -1,15 +1,13 @@ +{{ if .Values.deck.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: - namespace: default - annotations: - iam.gke.io/gcp-service-account: control-plane@k8s-prow.iam.gserviceaccount.com name: deck +{{ end }} --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: default name: deck rules: - apiGroups: @@ -20,41 +18,11 @@ rules: - get - list - watch - # Required when deck runs with `--rerun-creates-job=true` - - create - # Required to abort jobs - patch --- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: test-pods - name: deck -rules: -- apiGroups: - - "" - resources: - - pods/log - verbs: - - get ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: default - name: deck -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: deck -subjects: -- kind: ServiceAccount - name: deck ---- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: test-pods name: deck roleRef: apiGroup: rbac.authorization.k8s.io @@ -63,4 +31,4 @@ roleRef: subjects: - kind: ServiceAccount name: deck - namespace: default + namespace: {{ .Release.Namespace }} diff --git a/config/prow/cluster/deck_service.yaml b/config/prow/cluster/deck_service.yaml index e65a50ebcb..8a418f3b6c 100644 --- a/config/prow/cluster/deck_service.yaml +++ b/config/prow/cluster/deck_service.yaml @@ -15,19 +15,10 @@ apiVersion: v1 kind: Service metadata: - labels: - app: deck - namespace: default name: deck spec: selector: app: deck ports: - - name: main - port: 80 + - port: 80 targetPort: 8080 - protocol: TCP - - name: metrics - port: 9090 - protocol: TCP - type: NodePort diff --git a/config/prow/cluster/ghproxy.yaml b/config/prow/cluster/ghproxy.yaml index f3a882a478..855bf5652f 100644 --- a/config/prow/cluster/ghproxy.yaml +++ b/config/prow/cluster/ghproxy.yaml @@ -15,7 +15,6 @@ kind: PersistentVolumeClaim apiVersion: v1 metadata: - namespace: default labels: app: ghproxy name: ghproxy @@ -24,7 +23,7 @@ spec: - ReadWriteOnce resources: requests: - storage: 100Gi + storage: {{ .Values.ghproxy.volumeSize }}Gi # gce-ssd-retain is specified in config/prow/cluster/gce-ssd-retain_storageclass.yaml # # If you are setting up your own Prow instance you can do any of the following: @@ -32,12 +31,11 @@ spec: # 2) Specify your own storage class. # 3) If you are using GKE you can use the gce-ssd-retain storage class. It can be # created with: `kubectl create -f config/prow/cluster/gce-ssd-retain_storageclass.yaml - storageClassName: gce-ssd-retain + # storageClassName: gce-ssd-retain --- apiVersion: apps/v1 kind: Deployment metadata: - namespace: default name: ghproxy labels: app: ghproxy @@ -45,20 +43,36 @@ spec: selector: matchLabels: app: ghproxy + strategy: + type: Recreate + # GHProxy does not support HA replicas: 1 # TODO(fejta): this should be HA template: metadata: + {{- if .Values.ghproxy.scrape_metrics }} + annotations: + prometheus.io/path: /metrics + prometheus.io/port: '9090' + prometheus.io/scrape: 'true' + {{- end }} labels: app: ghproxy spec: containers: - name: ghproxy - image: gcr.io/k8s-prow/ghproxy:v20230322-fad14aa00d + image: {{ .Values.ghproxy.image }} args: - --cache-dir=/cache - - --cache-sizeGB=99 + - --cache-sizeGB={{ add .Values.ghproxy.volumeSize -1 }} - --push-gateway=pushgateway - --serve-metrics=true + env: + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + - name: AWS_ROLE_SESSION_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name ports: - name: main containerPort: 8888 @@ -71,21 +85,12 @@ spec: - name: cache persistentVolumeClaim: claimName: ghproxy - # run on our dedicated node - tolerations: - - key: "dedicated" - operator: "Equal" - value: "ghproxy" - effect: "NoSchedule" - nodeSelector: - dedicated: "ghproxy" --- apiVersion: v1 kind: Service metadata: labels: app: ghproxy - namespace: default name: ghproxy spec: ports: diff --git a/config/prow/cluster/hook_deployment.yaml b/config/prow/cluster/hook_deployment.yaml index 61732fe7c8..8333694b39 100644 --- a/config/prow/cluster/hook_deployment.yaml +++ b/config/prow/cluster/hook_deployment.yaml @@ -15,12 +15,11 @@ apiVersion: apps/v1 kind: Deployment metadata: - namespace: default name: hook labels: app: hook spec: - replicas: 4 + replicas: 2 strategy: type: RollingUpdate rollingUpdate: @@ -31,6 +30,12 @@ spec: app: hook template: metadata: + {{- if .Values.hook.scrape_metrics }} + annotations: + prometheus.io/path: /metrics + prometheus.io/port: '9090' + prometheus.io/scrape: 'true' + {{- end }} labels: app: hook spec: @@ -38,39 +43,33 @@ spec: terminationGracePeriodSeconds: 180 containers: - name: hook - image: gcr.io/k8s-prow/hook:v20230322-fad14aa00d + image: {{ .Values.hook.image }} imagePullPolicy: Always args: - - --dry-run=false - - --slack-token-file=/etc/slack/token + - --kubeconfig=/etc/kubeconfig/config + - --dry-run={{ .Values.dryRun }} - --github-endpoint=http://ghproxy - --github-endpoint=https://api.github.com - - --github-token-path=/etc/github/oauth + - --github-token-path=/etc/github/token - --config-path=/etc/config/config.yaml - --job-config-path=/etc/job-config env: - # Use KUBECONFIG envvar rather than --kubeconfig flag in order to provide multiple configs to merge. - - name: KUBECONFIG - value: "/etc/kubeconfig/config:/etc/kubeconfig-build-test-infra-trusted/kubeconfig:/etc/kubeconfig-build-k8s-prow-builds/kubeconfig:/etc/kubeconfig-build-rules-k8s/kubeconfig:/etc/kubeconfig-eks-prow-build-cluster/kubeconfig" - # AWS_ variables needed to assume role to access the prow-build-cluster EKS cluster. - - name: AWS_ROLE_ARN - value: arn:aws:iam::468814281478:role/Prow-EKS-Admin - - name: AWS_WEB_IDENTITY_TOKEN_FILE - value: /var/run/secrets/aws-iam-token/serviceaccount/token - - name: AWS_REGION - value: us-east-2 + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + - name: AWS_ROLE_SESSION_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name ports: - name: http containerPort: 8888 - name: metrics containerPort: 9090 volumeMounts: - - name: slack - mountPath: /etc/slack - name: hmac mountPath: /etc/webhook readOnly: true - - name: oauth + - name: github-token mountPath: /etc/github readOnly: true - name: config @@ -82,31 +81,11 @@ spec: - name: plugins mountPath: /etc/plugins readOnly: true - - name: cat-api - mountPath: /etc/cat-api - readOnly: true - - name: unsplash-api - mountPath: /etc/unsplash-api - readOnly: true - name: kubeconfig mountPath: /etc/kubeconfig readOnly: true - - mountPath: /etc/kubeconfig-build-test-infra-trusted - name: kubeconfig-build-test-infra-trusted - readOnly: true - - mountPath: /etc/kubeconfig-build-k8s-prow-builds - name: kubeconfig-build-k8s-prow-builds - readOnly: true - - mountPath: /etc/kubeconfig-build-rules-k8s - name: kubeconfig-build-rules-k8s - readOnly: true - - mountPath: /etc/kubeconfig-eks-prow-build-cluster - name: kubeconfig-eks-prow-build-cluster - readOnly: true - # AWS IAM token needed to assume role to access the prow-build-cluster EKS cluster. - - mountPath: /var/run/secrets/aws-iam-token/serviceaccount - name: aws-iam-token - readOnly: true + - name: shared-bins + mountPath: /shared-bins livenessProbe: httpGet: path: /healthz @@ -120,16 +99,26 @@ spec: initialDelaySeconds: 10 periodSeconds: 3 timeoutSeconds: 600 + initContainers: + - name: aws-iam-authenticator + env: + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + image: {{ .Values.awsIamAuthenticator.image }} + command: + - cp + - /aws-iam-authenticator + - /shared-bins/aws-iam-authenticator + volumeMounts: + - name: shared-bins + mountPath: /shared-bins volumes: - - name: slack - secret: - secretName: slack-token - name: hmac secret: secretName: hmac-token - - name: oauth + - name: github-token secret: - secretName: oauth-token + secretName: github-token - name: config configMap: name: config @@ -139,38 +128,9 @@ spec: - name: plugins configMap: name: plugins - - name: cat-api - configMap: - name: cat-api-key - - name: unsplash-api - secret: - secretName: unsplash-api-key + - name: shared-bins + emptyDir: {} - name: kubeconfig secret: - defaultMode: 420 + defaultMode: 0644 secretName: kubeconfig - - name: kubeconfig-build-test-infra-trusted - secret: - defaultMode: 420 - secretName: kubeconfig-build-test-infra-trusted - - name: kubeconfig-build-k8s-prow-builds - secret: - defaultMode: 420 - secretName: kubeconfig-build-k8s-prow-builds - - name: kubeconfig-build-rules-k8s - secret: - defaultMode: 420 - secretName: kubeconfig-build-rules-k8s - - name: kubeconfig-eks-prow-build-cluster - secret: - defaultMode: 420 - secretName: kubeconfig-eks-prow-build-cluster - # AWS IAM token needed to assume role to access the prow-build-cluster EKS cluster. - - name: aws-iam-token - projected: - defaultMode: 420 - sources: - - serviceAccountToken: - audience: sts.amazonaws.com - expirationSeconds: 86400 - path: token diff --git a/config/prow/cluster/hook_rbac.yaml b/config/prow/cluster/hook_rbac.yaml index 81a9e28df1..9c07a0fafc 100644 --- a/config/prow/cluster/hook_rbac.yaml +++ b/config/prow/cluster/hook_rbac.yaml @@ -1,13 +1,13 @@ +{{ if .Values.hook.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: - namespace: default name: "hook" +{{ end }} --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: default name: "hook" rules: - apiGroups: @@ -31,7 +31,6 @@ rules: kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: default name: "hook" roleRef: apiGroup: rbac.authorization.k8s.io @@ -40,3 +39,4 @@ roleRef: subjects: - kind: ServiceAccount name: "hook" + namespace: {{ .Release.Namespace }} diff --git a/config/prow/cluster/hook_service.yaml b/config/prow/cluster/hook_service.yaml index f83355a9a7..a636b60490 100644 --- a/config/prow/cluster/hook_service.yaml +++ b/config/prow/cluster/hook_service.yaml @@ -15,18 +15,10 @@ apiVersion: v1 kind: Service metadata: - labels: - app: hook - namespace: default name: hook spec: selector: app: hook ports: - - name: main - port: 8888 - protocol: TCP - - name: metrics - port: 9090 - protocol: TCP - type: NodePort + - port: 8888 + type: {{ .Values.hook.service.type }} diff --git a/config/prow/cluster/horologium_deployment.yaml b/config/prow/cluster/horologium_deployment.yaml index 62a88142a1..a15c7c30cd 100644 --- a/config/prow/cluster/horologium_deployment.yaml +++ b/config/prow/cluster/horologium_deployment.yaml @@ -15,7 +15,6 @@ apiVersion: apps/v1 kind: Deployment metadata: - namespace: default name: horologium labels: app: horologium @@ -28,6 +27,12 @@ spec: app: horologium template: metadata: + {{- if .Values.horologium.scrape_metrics }} + annotations: + prometheus.io/path: /metrics + prometheus.io/port: '9090' + prometheus.io/scrape: 'true' + {{- end }} labels: app: horologium spec: @@ -35,14 +40,18 @@ spec: terminationGracePeriodSeconds: 30 containers: - name: horologium - image: gcr.io/k8s-prow/horologium:v20230322-fad14aa00d + image: {{ .Values.horologium.image }} args: - --config-path=/etc/config/config.yaml - --job-config-path=/etc/job-config - - --dry-run=false - ports: - - name: metrics - containerPort: 9090 + - --dry-run={{ .Values.dryRun }} + env: + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + - name: AWS_ROLE_SESSION_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name volumeMounts: - name: config mountPath: /etc/config diff --git a/config/prow/cluster/horologium_rbac.yaml b/config/prow/cluster/horologium_rbac.yaml index 27c3ce2228..bc61184280 100644 --- a/config/prow/cluster/horologium_rbac.yaml +++ b/config/prow/cluster/horologium_rbac.yaml @@ -1,13 +1,13 @@ +{{ if .Values.horologium.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: - namespace: default name: "horologium" +{{ end }} --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: default name: "horologium" rules: - apiGroups: @@ -22,7 +22,6 @@ rules: kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: default name: "horologium" roleRef: apiGroup: rbac.authorization.k8s.io @@ -31,3 +30,4 @@ roleRef: subjects: - kind: ServiceAccount name: "horologium" + namespace: {{ .Release.Namespace }} diff --git a/config/prow/cluster/prow_controller_manager_deployment.yaml b/config/prow/cluster/prow_controller_manager_deployment.yaml index dad378dda5..0399c1882e 100644 --- a/config/prow/cluster/prow_controller_manager_deployment.yaml +++ b/config/prow/cluster/prow_controller_manager_deployment.yaml @@ -15,12 +15,10 @@ apiVersion: apps/v1 kind: Deployment metadata: - namespace: default name: prow-controller-manager labels: app: prow-controller-manager spec: - # Mutually exclusive with plank. Only one of them may have more than zero replicas. replicas: 1 strategy: type: RollingUpdate @@ -33,58 +31,50 @@ spec: app: prow-controller-manager template: metadata: + {{- if .Values.prowControllerManager.scrape_metrics }} + annotations: + prometheus.io/path: /metrics + prometheus.io/port: '9090' + prometheus.io/scrape: 'true' + {{- end }} labels: app: prow-controller-manager spec: serviceAccountName: prow-controller-manager containers: - name: prow-controller-manager - image: gcr.io/k8s-prow/prow-controller-manager:v20230322-fad14aa00d + image: {{ .Values.prowControllerManager.image }} args: - --config-path=/etc/config/config.yaml - - --dry-run=false + - --dry-run={{ .Values.dryRun }} - --enable-controller=plank - --job-config-path=/etc/job-config + - --github-token-path=/etc/github/token + - --github-endpoint=http://ghproxy + - --github-endpoint=https://api.github.com + - --kubeconfig=/etc/kubeconfig/config env: - # Use KUBECONFIG envvar rather than --kubeconfig flag in order to provide multiple configs to merge. - - name: KUBECONFIG - value: "/etc/kubeconfig/config:/etc/kubeconfig-build-test-infra-trusted/kubeconfig:/etc/kubeconfig-build-k8s-prow-builds/kubeconfig:/etc/kubeconfig-build-rules-k8s/kubeconfig:/etc/kubeconfig-eks-prow-build-cluster/kubeconfig" - # AWS_ variables needed to assume role to access the prow-build-cluster EKS cluster. - - name: AWS_ROLE_ARN - value: arn:aws:iam::468814281478:role/Prow-EKS-Admin - - name: AWS_WEB_IDENTITY_TOKEN_FILE - value: /var/run/secrets/aws-iam-token/serviceaccount/token - - name: AWS_REGION - value: us-east-2 - ports: - - name: metrics - containerPort: 9090 + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + - name: AWS_ROLE_SESSION_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name volumeMounts: - - mountPath: /etc/kubeconfig - name: kubeconfig - readOnly: true - - mountPath: /etc/kubeconfig-build-test-infra-trusted - name: kubeconfig-build-test-infra-trusted - readOnly: true - - mountPath: /etc/kubeconfig-build-k8s-prow-builds - name: kubeconfig-build-k8s-prow-builds - readOnly: true - - mountPath: /etc/kubeconfig-build-rules-k8s - name: kubeconfig-build-rules-k8s - readOnly: true - - mountPath: /etc/kubeconfig-eks-prow-build-cluster - name: kubeconfig-eks-prow-build-cluster - readOnly: true - name: config mountPath: /etc/config readOnly: true - name: job-config mountPath: /etc/job-config readOnly: true - # AWS IAM token needed to assume role to access the prow-build-cluster EKS cluster. - - name: aws-iam-token - mountPath: /var/run/secrets/aws-iam-token/serviceaccount + - name: github-token + mountPath: /etc/github readOnly: true + - name: kubeconfig + mountPath: /etc/kubeconfig + readOnly: true + - name: shared-bins + mountPath: /shared-bins livenessProbe: # Pod is killed if this fails 3 times. httpGet: path: /healthz @@ -97,39 +87,32 @@ spec: port: 8081 initialDelaySeconds: 10 periodSeconds: 3 + initContainers: + - name: aws-iam-authenticator + env: + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + image: {{ .Values.awsIamAuthenticator.image }} + command: + - cp + - /aws-iam-authenticator + - /shared-bins/aws-iam-authenticator + volumeMounts: + - name: shared-bins + mountPath: /shared-bins volumes: - - name: kubeconfig + - name: github-token secret: - defaultMode: 420 - secretName: kubeconfig - - name: kubeconfig-build-test-infra-trusted - secret: - defaultMode: 420 - secretName: kubeconfig-build-test-infra-trusted - - name: kubeconfig-build-k8s-prow-builds - secret: - defaultMode: 420 - secretName: kubeconfig-build-k8s-prow-builds - - name: kubeconfig-build-rules-k8s - secret: - defaultMode: 420 - secretName: kubeconfig-build-rules-k8s - - name: kubeconfig-eks-prow-build-cluster - secret: - defaultMode: 420 - secretName: kubeconfig-eks-prow-build-cluster + secretName: github-token - name: config configMap: name: config - name: job-config configMap: name: job-config - # AWS IAM token needed to assume role to access the prow-build-cluster EKS cluster. - - name: aws-iam-token - projected: - defaultMode: 420 - sources: - - serviceAccountToken: - audience: sts.amazonaws.com - expirationSeconds: 86400 - path: token + - name: shared-bins + emptyDir: {} + - name: kubeconfig + secret: + defaultMode: 0644 + secretName: kubeconfig diff --git a/config/prow/cluster/prow_controller_manager_rbac.yaml b/config/prow/cluster/prow_controller_manager_rbac.yaml index 63e4f5ad56..b95e6f000b 100644 --- a/config/prow/cluster/prow_controller_manager_rbac.yaml +++ b/config/prow/cluster/prow_controller_manager_rbac.yaml @@ -12,16 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +{{ if .Values.prowControllerManager.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: - namespace: default name: "prow-controller-manager" +{{ end }} --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: default name: "prow-controller-manager" rules: - apiGroups: @@ -61,46 +61,21 @@ rules: - prowjobs verbs: - get - - update - list - watch - patch ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: test-pods - name: "prow-controller-manager" -rules: - apiGroups: - "" resources: - pods verbs: - - create - - delete - list - watch - get - - patch ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: default - name: "prow-controller-manager" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "prow-controller-manager" -subjects: -- kind: ServiceAccount - name: "prow-controller-manager" --- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: test-pods name: "prow-controller-manager" roleRef: apiGroup: rbac.authorization.k8s.io @@ -109,4 +84,4 @@ roleRef: subjects: - kind: ServiceAccount name: "prow-controller-manager" - namespace: default + namespace: {{ .Release.Namespace }} diff --git a/config/prow/cluster/sinker_deployment.yaml b/config/prow/cluster/sinker_deployment.yaml index dcda551ef1..0829914b88 100644 --- a/config/prow/cluster/sinker_deployment.yaml +++ b/config/prow/cluster/sinker_deployment.yaml @@ -1,7 +1,6 @@ apiVersion: apps/v1 kind: Deployment metadata: - namespace: default name: sinker labels: app: sinker @@ -12,90 +11,66 @@ spec: app: sinker template: metadata: + {{- if .Values.sinker.scrape_metrics }} + annotations: + prometheus.io/path: /metrics + prometheus.io/port: '9090' + prometheus.io/scrape: 'true' + {{- end }} labels: app: sinker spec: serviceAccountName: sinker containers: - name: sinker + env: + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + - name: AWS_ROLE_SESSION_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name args: + - --kubeconfig=/etc/kubeconfig/config - --config-path=/etc/config/config.yaml - --job-config-path=/etc/job-config - - --dry-run=false - image: gcr.io/k8s-prow/sinker:v20230322-fad14aa00d - env: - # Use KUBECONFIG envvar rather than --kubeconfig flag in order to provide multiple configs to merge. - - name: KUBECONFIG - value: "/etc/kubeconfig/config:/etc/kubeconfig-build-test-infra-trusted/kubeconfig:/etc/kubeconfig-build-k8s-prow-builds/kubeconfig:/etc/kubeconfig-build-rules-k8s/kubeconfig:/etc/kubeconfig-eks-prow-build-cluster/kubeconfig" - # AWS_ variables needed to assume role to access the prow-build-cluster EKS cluster. - - name: AWS_ROLE_ARN - value: arn:aws:iam::468814281478:role/Prow-EKS-Admin - - name: AWS_WEB_IDENTITY_TOKEN_FILE - value: /var/run/secrets/aws-iam-token/serviceaccount/token - - name: AWS_REGION - value: us-east-2 - ports: - - name: metrics - containerPort: 9090 + - --dry-run={{ .Values.dryRun }} + image: {{ .Values.sinker.image }} volumeMounts: - - mountPath: /etc/kubeconfig - name: kubeconfig - readOnly: true - - mountPath: /etc/kubeconfig-build-test-infra-trusted - name: kubeconfig-build-test-infra-trusted - readOnly: true - - mountPath: /etc/kubeconfig-build-k8s-prow-builds - name: kubeconfig-build-k8s-prow-builds - readOnly: true - - mountPath: /etc/kubeconfig-build-rules-k8s - name: kubeconfig-build-rules-k8s - readOnly: true - - mountPath: /etc/kubeconfig-eks-prow-build-cluster - name: kubeconfig-eks-prow-build-cluster - readOnly: true - name: config mountPath: /etc/config readOnly: true - name: job-config mountPath: /etc/job-config readOnly: true - # AWS IAM token needed to assume role to access the prow-build-cluster EKS cluster. - - name: aws-iam-token - mountPath: /var/run/secrets/aws-iam-token/serviceaccount + - name: kubeconfig + mountPath: /etc/kubeconfig readOnly: true + - name: shared-bins + mountPath: /shared-bins + initContainers: + - name: aws-iam-authenticator + env: + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + image: {{ .Values.awsIamAuthenticator.image }} + command: + - cp + - /aws-iam-authenticator + - /shared-bins/aws-iam-authenticator + volumeMounts: + - name: shared-bins + mountPath: /shared-bins volumes: - - name: kubeconfig - secret: - defaultMode: 420 - secretName: kubeconfig - - name: kubeconfig-build-test-infra-trusted - secret: - defaultMode: 420 - secretName: kubeconfig-build-test-infra-trusted - - name: kubeconfig-build-k8s-prow-builds - secret: - defaultMode: 420 - secretName: kubeconfig-build-k8s-prow-builds - - name: kubeconfig-build-rules-k8s - secret: - defaultMode: 420 - secretName: kubeconfig-build-rules-k8s - - name: kubeconfig-eks-prow-build-cluster - secret: - defaultMode: 420 - secretName: kubeconfig-eks-prow-build-cluster - - name: config - configMap: - name: config - - name: job-config - configMap: - name: job-config - # AWS IAM token needed to assume role to access the prow-build-cluster EKS cluster. - - name: aws-iam-token - projected: - defaultMode: 420 - sources: - - serviceAccountToken: - audience: sts.amazonaws.com - expirationSeconds: 86400 - path: token + - name: config + configMap: + name: config + - name: job-config + configMap: + name: job-config + - name: shared-bins + emptyDir: {} + - name: kubeconfig + secret: + defaultMode: 0644 + secretName: kubeconfig diff --git a/config/prow/cluster/sinker_rbac.yaml b/config/prow/cluster/sinker_rbac.yaml index 70eb9b52bc..c5792a22e0 100644 --- a/config/prow/cluster/sinker_rbac.yaml +++ b/config/prow/cluster/sinker_rbac.yaml @@ -1,13 +1,13 @@ +{{ if .Values.sinker.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: - namespace: default name: "sinker" +{{ end }} --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: default name: "sinker" rules: - apiGroups: @@ -50,41 +50,18 @@ rules: - events verbs: - create ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: test-pods - name: "sinker" -rules: - apiGroups: - "" resources: - pods verbs: - - delete - list - watch - get - - patch ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: default - name: "sinker" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "sinker" -subjects: -- kind: ServiceAccount - name: "sinker" --- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: test-pods name: "sinker" roleRef: apiGroup: rbac.authorization.k8s.io @@ -93,4 +70,4 @@ roleRef: subjects: - kind: ServiceAccount name: "sinker" - namespace: default + namespace: {{ .Release.Namespace }} diff --git a/config/prow/cluster/statusreconciler_deployment.yaml b/config/prow/cluster/statusreconciler_deployment.yaml index 16b35ca976..6dccad51d6 100644 --- a/config/prow/cluster/statusreconciler_deployment.yaml +++ b/config/prow/cluster/statusreconciler_deployment.yaml @@ -15,7 +15,6 @@ apiVersion: apps/v1 kind: Deployment metadata: - namespace: default name: statusreconciler labels: app: statusreconciler @@ -33,20 +32,28 @@ spec: terminationGracePeriodSeconds: 180 containers: - name: statusreconciler - image: gcr.io/k8s-prow/status-reconciler:v20230322-fad14aa00d + image: {{ .Values.statusreconciler.image }} imagePullPolicy: Always args: - - --dry-run=false + - --dry-run={{ .Values.dryRun }} - --continue-on-error=true - --plugin-config=/etc/plugins/plugins.yaml - --config-path=/etc/config/config.yaml - - --github-token-path=/etc/github/oauth + - --github-token-path=/etc/github/token - --github-endpoint=http://ghproxy - --github-endpoint=https://api.github.com - --job-config-path=/etc/job-config - - --denylist=kubernetes/kubernetes + - --s3-credentials-file=/etc/s3-credentials/service-account.json + - --status-path=s3://{{ .Values.prow.tideStatusReconcilerBucketName }}/status-reconciler-status + env: + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + - name: AWS_ROLE_SESSION_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name volumeMounts: - - name: oauth + - name: github-token mountPath: /etc/github readOnly: true - name: config @@ -58,10 +65,13 @@ spec: - name: plugins mountPath: /etc/plugins readOnly: true + - name: s3-credentials + mountPath: /etc/s3-credentials + readOnly: true volumes: - - name: oauth + - name: github-token secret: - secretName: oauth-token + secretName: github-token - name: config configMap: name: config @@ -71,3 +81,6 @@ spec: - name: plugins configMap: name: plugins + - name: s3-credentials + secret: + secretName: s3-credentials diff --git a/config/prow/cluster/statusreconciler_rbac.yaml b/config/prow/cluster/statusreconciler_rbac.yaml index 847cfb47c1..2f0bbdedcb 100644 --- a/config/prow/cluster/statusreconciler_rbac.yaml +++ b/config/prow/cluster/statusreconciler_rbac.yaml @@ -1,13 +1,13 @@ +{{ if .Values.statusreconciler.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: - namespace: default name: statusreconciler +{{ end }} --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: default name: statusreconciler rules: - apiGroups: @@ -20,7 +20,6 @@ rules: kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: default name: statusreconciler roleRef: apiGroup: rbac.authorization.k8s.io @@ -28,4 +27,4 @@ roleRef: name: statusreconciler subjects: - kind: ServiceAccount - name: statusreconciler + name: {{ .Release.Namespace }} diff --git a/config/prow/cluster/tide_deployment.yaml b/config/prow/cluster/tide_deployment.yaml index 3b26a7e2dd..3f9d26f8f4 100644 --- a/config/prow/cluster/tide_deployment.yaml +++ b/config/prow/cluster/tide_deployment.yaml @@ -15,7 +15,6 @@ apiVersion: apps/v1 kind: Deployment metadata: - namespace: default name: tide labels: app: tide @@ -28,29 +27,44 @@ spec: app: tide template: metadata: + {{- if .Values.tide.scrape_metrics }} + annotations: + prometheus.io/path: /metrics + prometheus.io/port: '9090' + prometheus.io/scrape: 'true' + {{- end }} labels: app: tide spec: serviceAccountName: tide containers: - name: tide - image: gcr.io/k8s-prow/tide:v20230322-fad14aa00d + image: {{ .Values.tide.image }} args: - - --dry-run=false + - --dry-run={{ .Values.dryRun }} - --github-endpoint=http://ghproxy - --github-endpoint=https://api.github.com - - --github-token-path=/etc/github/oauth + - --github-token-path=/etc/github/token - --config-path=/etc/config/config.yaml - --job-config-path=/etc/job-config - - --history-uri=gs://k8s-prow/tide-history.json - - --status-path=gs://k8s-prow/tide-status-checkpoint.yaml + - --history-uri=s3://{{ .Values.prow.tideStatusReconcilerBucketName }}/tide-history.json + - --status-path=s3://{{ .Values.prow.tideStatusReconcilerBucketName }}/tide-status + - --github-graphql-endpoint=http://ghproxy/graphql + - --s3-credentials-file=/etc/s3-credentials/service-account.json ports: - name: http containerPort: 8888 - name: metrics containerPort: 9090 + env: + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + - name: AWS_ROLE_SESSION_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name volumeMounts: - - name: oauth + - name: github-token mountPath: /etc/github readOnly: true - name: config @@ -59,13 +73,19 @@ spec: - name: job-config mountPath: /etc/job-config readOnly: true + - name: s3-credentials + mountPath: /etc/s3-credentials + readOnly: true volumes: - - name: oauth + - name: github-token secret: - secretName: oauth-token + secretName: github-token - name: config configMap: name: config - name: job-config configMap: name: job-config + - name: s3-credentials + secret: + secretName: s3-credentials diff --git a/config/prow/cluster/tide_rbac.yaml b/config/prow/cluster/tide_rbac.yaml index 65ce3f5704..4c294156ac 100644 --- a/config/prow/cluster/tide_rbac.yaml +++ b/config/prow/cluster/tide_rbac.yaml @@ -1,15 +1,13 @@ +{{ if .Values.tide.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: - annotations: - iam.gke.io/gcp-service-account: control-plane@k8s-prow.iam.gserviceaccount.com - namespace: default name: tide +{{ end }} --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: default name: tide rules: - apiGroups: @@ -25,7 +23,6 @@ rules: kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - namespace: default name: tide roleRef: apiGroup: rbac.authorization.k8s.io @@ -34,3 +31,4 @@ roleRef: subjects: - kind: ServiceAccount name: tide + namespace: {{ .Release.Namespace }} diff --git a/config/prow/cluster/tide_service.yaml b/config/prow/cluster/tide_service.yaml index 00ba9ae5c0..1195cb16c4 100644 --- a/config/prow/cluster/tide_service.yaml +++ b/config/prow/cluster/tide_service.yaml @@ -15,19 +15,10 @@ apiVersion: v1 kind: Service metadata: - labels: - app: tide - namespace: default name: tide spec: selector: app: tide ports: - - name: main - port: 80 + - port: 80 targetPort: 8888 - protocol: TCP - - name: metrics - port: 9090 - protocol: TCP - type: ClusterIP -- 2.38.5