apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: name: test-cluster namespace: eksa-system spec: clusterNetwork: pods: cidrBlocks: [192.168.0.0/16] serviceDomain: cluster.local services: cidrBlocks: [10.128.0.0/12] controlPlaneRef: apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane name: test-cluster namespace: eksa-system infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: DockerCluster name: test-cluster namespace: eksa-system managedExternalEtcdRef: apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 kind: EtcdadmCluster name: test-cluster-etcd namespace: eksa-system --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: DockerCluster metadata: name: test-cluster namespace: eksa-system spec: loadBalancer: imageRepository: public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind imageTag: v0.11.1-eks-a-v0.0.0-dev-build.1464 --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: DockerMachineTemplate metadata: name: test-cluster-control-plane-template-1234567890000 namespace: eksa-system spec: template: spec: extraMounts: - containerPath: /var/run/docker.sock hostPath: /var/run/docker.sock customImage: public.ecr.aws/eks-distro/kubernetes-sigs/kind/node:v1.18.16-eks-1-18-4-216edda697a37f8bf16651af6c23b7e2bb7ef42f-62681885fe3a97ee4f2b110cc277e084e71230fa --- apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane metadata: name: test-cluster namespace: eksa-system spec: machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: DockerMachineTemplate name: test-cluster-control-plane-template-1234567890000 namespace: eksa-system kubeadmConfigSpec: clusterConfiguration: imageRepository: public.ecr.aws/eks-distro/kubernetes etcd: external: endpoints: [] caFile: "/etc/kubernetes/pki/etcd/ca.crt" certFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt" keyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key" dns: imageRepository: public.ecr.aws/eks-distro/coredns imageTag: v1.8.0-eks-1-19-2 apiServer: certSANs: - localhost - 127.0.0.1 extraArgs: audit-policy-file: /etc/kubernetes/audit-policy.yaml audit-log-path: /var/log/kubernetes/api-audit.log audit-log-maxage: "30" audit-log-maxbackup: "10" audit-log-maxsize: "512" profiling: "false" oidc-client-id: my-client-id oidc-groups-claim: claim1 oidc-groups-prefix: prefix-for-groups oidc-issuer-url: https://mydomain.com/issuer oidc-required-claim: sub=test oidc-username-claim: username-claim oidc-username-prefix: username-prefix tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 extraVolumes: - hostPath: /etc/kubernetes/audit-policy.yaml mountPath: /etc/kubernetes/audit-policy.yaml name: audit-policy pathType: File readOnly: true - hostPath: /var/log/kubernetes mountPath: /var/log/kubernetes name: audit-log-dir pathType: DirectoryOrCreate readOnly: false controllerManager: extraArgs: enable-hostpath-provisioner: "true" profiling: "false" tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 scheduler: extraArgs: profiling: "false" tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 files: - content: | apiVersion: audit.k8s.io/v1beta1 kind: Policy rules: # Log aws-auth configmap changes - level: RequestResponse namespaces: ["kube-system"] verbs: ["update", "patch", "delete"] resources: - group: "" # core resources: ["configmaps"] resourceNames: ["aws-auth"] omitStages: - "RequestReceived" # The following requests were manually identified as high-volume and low-risk, # so drop them. - level: None users: ["system:kube-proxy"] verbs: ["watch"] resources: - group: "" # core resources: ["endpoints", "services", "services/status"] - level: None users: ["kubelet"] # legacy kubelet identity verbs: ["get"] resources: - group: "" # core resources: ["nodes", "nodes/status"] - level: None userGroups: ["system:nodes"] verbs: ["get"] resources: - group: "" # core resources: ["nodes", "nodes/status"] - level: None users: - system:kube-controller-manager - system:kube-scheduler - system:serviceaccount:kube-system:endpoint-controller verbs: ["get", "update"] namespaces: ["kube-system"] resources: - group: "" # core resources: ["endpoints"] - level: None users: ["system:apiserver"] verbs: ["get"] resources: - group: "" # core resources: ["namespaces", "namespaces/status", "namespaces/finalize"] # Don't log HPA fetching metrics. - level: None users: - system:kube-controller-manager verbs: ["get", "list"] resources: - group: "metrics.k8s.io" # Don't log these read-only URLs. - level: None nonResourceURLs: - /healthz* - /version - /swagger* # Don't log events requests. - level: None resources: - group: "" # core resources: ["events"] # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes - level: Request users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] verbs: ["update","patch"] resources: - group: "" # core resources: ["nodes/status", "pods/status"] omitStages: - "RequestReceived" - level: Request userGroups: ["system:nodes"] verbs: ["update","patch"] resources: - group: "" # core resources: ["nodes/status", "pods/status"] omitStages: - "RequestReceived" # deletecollection calls can be large, don't log responses for expected namespace deletions - level: Request users: ["system:serviceaccount:kube-system:namespace-controller"] verbs: ["deletecollection"] omitStages: - "RequestReceived" # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, # so only log at the Metadata level. - level: Metadata resources: - group: "" # core resources: ["secrets", "configmaps"] - group: authentication.k8s.io resources: ["tokenreviews"] omitStages: - "RequestReceived" - level: Request resources: - group: "" resources: ["serviceaccounts/token"] # Get repsonses can be large; skip them. - level: Request verbs: ["get", "list", "watch"] resources: - group: "" # core - group: "admissionregistration.k8s.io" - group: "apiextensions.k8s.io" - group: "apiregistration.k8s.io" - group: "apps" - group: "authentication.k8s.io" - group: "authorization.k8s.io" - group: "autoscaling" - group: "batch" - group: "certificates.k8s.io" - group: "extensions" - group: "metrics.k8s.io" - group: "networking.k8s.io" - group: "policy" - group: "rbac.authorization.k8s.io" - group: "scheduling.k8s.io" - group: "settings.k8s.io" - group: "storage.k8s.io" omitStages: - "RequestReceived" # Default level for known APIs - level: RequestResponse resources: - group: "" # core - group: "admissionregistration.k8s.io" - group: "apiextensions.k8s.io" - group: "apiregistration.k8s.io" - group: "apps" - group: "authentication.k8s.io" - group: "authorization.k8s.io" - group: "autoscaling" - group: "batch" - group: "certificates.k8s.io" - group: "extensions" - group: "metrics.k8s.io" - group: "networking.k8s.io" - group: "policy" - group: "rbac.authorization.k8s.io" - group: "scheduling.k8s.io" - group: "settings.k8s.io" - group: "storage.k8s.io" omitStages: - "RequestReceived" # Default level for all other requests. - level: Metadata omitStages: - "RequestReceived" owner: root:root path: /etc/kubernetes/audit-policy.yaml initConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% cgroup-driver: cgroupfs tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 joinConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% cgroup-driver: cgroupfs tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 replicas: 3 version: v1.19.6-eks-1-19-2 --- kind: EtcdadmCluster apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 metadata: name: test-cluster-etcd namespace: eksa-system spec: replicas: 3 etcdadmConfigSpec: etcdadmBuiltin: true cloudInitConfig: version: 3.4.14 cipherSuites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 infrastructureTemplate: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: DockerMachineTemplate name: test-cluster-etcd-template-1234567890000 namespace: eksa-system --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: DockerMachineTemplate metadata: name: test-cluster-etcd-template-1234567890000 namespace: eksa-system spec: template: spec: extraMounts: - containerPath: /var/run/docker.sock hostPath: /var/run/docker.sock customImage: public.ecr.aws/eks-distro/kubernetes-sigs/kind/node:v1.18.16-eks-1-18-4-216edda697a37f8bf16651af6c23b7e2bb7ef42f-62681885fe3a97ee4f2b110cc277e084e71230fa