apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: cluster.x-k8s.io/cluster-name: test name: test namespace: eksa-system spec: clusterNetwork: pods: cidrBlocks: [192.168.0.0/16] services: cidrBlocks: [10.96.0.0/12] controlPlaneEndpoint: host: 1.2.3.4 port: 6443 controlPlaneRef: apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane name: test infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 kind: CloudStackCluster name: test managedExternalEtcdRef: apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 kind: EtcdadmCluster name: test-etcd --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 kind: CloudStackCluster metadata: name: test namespace: eksa-system spec: controlPlaneEndpoint: host: 1.2.3.4 port: 6443 failureDomains: - name: default-az-0 zone: id: name: zone1 network: id: name: net1 domain: domain1 account: admin acsEndpoint: name: global namespace: eksa-system --- apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane metadata: name: test namespace: eksa-system spec: machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 kind: CloudStackMachineTemplate name: test-control-plane-template-1234567890000 kubeadmConfigSpec: clusterConfiguration: imageRepository: public.ecr.aws/eks-distro/kubernetes etcd: external: endpoints: [] caFile: "/etc/kubernetes/pki/etcd/ca.crt" certFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt" keyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key" dns: imageRepository: public.ecr.aws/eks-distro/coredns imageTag: v1.8.3-eks-1-21-4 apiServer: extraArgs: cloud-provider: external audit-policy-file: /etc/kubernetes/audit-policy.yaml audit-log-path: /var/log/kubernetes/api-audit.log audit-log-maxage: "30" audit-log-maxbackup: "10" audit-log-maxsize: "512" profiling: "false" tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 extraVolumes: - hostPath: /etc/kubernetes/audit-policy.yaml mountPath: /etc/kubernetes/audit-policy.yaml name: audit-policy pathType: File readOnly: true - hostPath: /var/log/kubernetes mountPath: /var/log/kubernetes name: audit-log-dir pathType: DirectoryOrCreate readOnly: false controllerManager: extraArgs: cloud-provider: external profiling: "false" tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 scheduler: extraArgs: profiling: "false" tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 files: - content: | apiVersion: v1 kind: Pod metadata: creationTimestamp: null name: kube-vip namespace: kube-system spec: containers: - args: - manager env: - name: vip_arp value: "true" - name: port value: "6443" - name: vip_cidr value: "32" - name: cp_enable value: "true" - name: cp_namespace value: kube-system - name: vip_ddns value: "false" - name: vip_leaderelection value: "true" - name: vip_leaseduration value: "15" - name: vip_renewdeadline value: "10" - name: vip_retryperiod value: "2" - name: address value: 1.2.3.4 image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.158 imagePullPolicy: IfNotPresent name: kube-vip resources: {} securityContext: capabilities: add: - NET_ADMIN - NET_RAW volumeMounts: - mountPath: /etc/kubernetes/admin.conf name: kubeconfig hostNetwork: true volumes: - hostPath: path: /etc/kubernetes/admin.conf name: kubeconfig status: {} owner: root:root path: /etc/kubernetes/manifests/kube-vip.yaml - content: | apiVersion: audit.k8s.io/v1beta1 kind: Policy rules: # Log aws-auth configmap changes - level: RequestResponse namespaces: ["kube-system"] verbs: ["update", "patch", "delete"] resources: - group: "" # core resources: ["configmaps"] resourceNames: ["aws-auth"] omitStages: - "RequestReceived" # The following requests were manually identified as high-volume and low-risk, # so drop them. - level: None users: ["system:kube-proxy"] verbs: ["watch"] resources: - group: "" # core resources: ["endpoints", "services", "services/status"] - level: None users: ["kubelet"] # legacy kubelet identity verbs: ["get"] resources: - group: "" # core resources: ["nodes", "nodes/status"] - level: None userGroups: ["system:nodes"] verbs: ["get"] resources: - group: "" # core resources: ["nodes", "nodes/status"] - level: None users: - system:kube-controller-manager - system:kube-scheduler - system:serviceaccount:kube-system:endpoint-controller verbs: ["get", "update"] namespaces: ["kube-system"] resources: - group: "" # core resources: ["endpoints"] - level: None users: ["system:apiserver"] verbs: ["get"] resources: - group: "" # core resources: ["namespaces", "namespaces/status", "namespaces/finalize"] # Don't log HPA fetching metrics. - level: None users: - system:kube-controller-manager verbs: ["get", "list"] resources: - group: "metrics.k8s.io" # Don't log these read-only URLs. - level: None nonResourceURLs: - /healthz* - /version - /swagger* # Don't log events requests. - level: None resources: - group: "" # core resources: ["events"] # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes - level: Request users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] verbs: ["update","patch"] resources: - group: "" # core resources: ["nodes/status", "pods/status"] omitStages: - "RequestReceived" - level: Request userGroups: ["system:nodes"] verbs: ["update","patch"] resources: - group: "" # core resources: ["nodes/status", "pods/status"] omitStages: - "RequestReceived" # deletecollection calls can be large, don't log responses for expected namespace deletions - level: Request users: ["system:serviceaccount:kube-system:namespace-controller"] verbs: ["deletecollection"] omitStages: - "RequestReceived" # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, # so only log at the Metadata level. - level: Metadata resources: - group: "" # core resources: ["secrets", "configmaps"] - group: authentication.k8s.io resources: ["tokenreviews"] omitStages: - "RequestReceived" - level: Request resources: - group: "" resources: ["serviceaccounts/token"] # Get repsonses can be large; skip them. - level: Request verbs: ["get", "list", "watch"] resources: - group: "" # core - group: "admissionregistration.k8s.io" - group: "apiextensions.k8s.io" - group: "apiregistration.k8s.io" - group: "apps" - group: "authentication.k8s.io" - group: "authorization.k8s.io" - group: "autoscaling" - group: "batch" - group: "certificates.k8s.io" - group: "extensions" - group: "metrics.k8s.io" - group: "networking.k8s.io" - group: "policy" - group: "rbac.authorization.k8s.io" - group: "scheduling.k8s.io" - group: "settings.k8s.io" - group: "storage.k8s.io" omitStages: - "RequestReceived" # Default level for known APIs - level: RequestResponse resources: - group: "" # core - group: "admissionregistration.k8s.io" - group: "apiextensions.k8s.io" - group: "apiregistration.k8s.io" - group: "apps" - group: "authentication.k8s.io" - group: "authorization.k8s.io" - group: "autoscaling" - group: "batch" - group: "certificates.k8s.io" - group: "extensions" - group: "metrics.k8s.io" - group: "networking.k8s.io" - group: "policy" - group: "rbac.authorization.k8s.io" - group: "scheduling.k8s.io" - group: "settings.k8s.io" - group: "storage.k8s.io" omitStages: - "RequestReceived" # Default level for all other requests. - level: Metadata omitStages: - "RequestReceived" owner: root:root path: /etc/kubernetes/audit-policy.yaml initConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' read-only-port: "0" anonymous-auth: "false" tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 name: "{{ ds.meta_data.hostname }}" joinConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' read-only-port: "0" anonymous-auth: "false" tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 name: "{{ ds.meta_data.hostname }}" preKubeadmCommands: - swapoff -a - hostname "{{ ds.meta_data.hostname }}" - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts - echo "127.0.0.1 localhost" >>/etc/hosts - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts - echo "{{ ds.meta_data.hostname }}" >/etc/hostname - >- if [ ! -L /var/log/kubernetes ] ; then mv /var/log/kubernetes /var/log/kubernetes-$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10) ; mkdir -p /data-small/var/log/kubernetes && ln -s /data-small/var/log/kubernetes /var/log/kubernetes ; else echo "/var/log/kubernetes already symlnk"; fi diskSetup: filesystems: - device: /dev/vdb1 overwrite: false extraOpts: - -E - lazy_itable_init=1,lazy_journal_init=1 filesystem: ext4 label: data_disk partitions: - device: /dev/vdb layout: true overwrite: false tableType: gpt mounts: - - LABEL=data_disk - /data-small useExperimentalRetryJoin: true users: - name: mySshUsername sshAuthorizedKeys: - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' sudo: ALL=(ALL) NOPASSWD:ALL format: cloud-config replicas: 3 version: v1.21.2-eks-1-21-4 --- kind: EtcdadmCluster apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 metadata: name: test-etcd namespace: eksa-system spec: replicas: 3 etcdadmConfigSpec: etcdadmBuiltin: false format: cloud-config cloudInitConfig: version: 3.4.16 installDir: "/usr/bin" etcdadmInstallCommands: - echo this line exists so that etcdadmInstallCommands is not empty - echo etcdadmInstallCommands can be removed once etcdadm bootstrap and controller fix the bug - echo that preEtcdadmCommands not run unless etcdadmBuiltin is false - echo https://github.com/mrajashree/etcdadm-bootstrap-provider/issues/13 preEtcdadmCommands: - swapoff -a - hostname "{{ ds.meta_data.hostname }}" - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts - echo "127.0.0.1 localhost" >>/etc/hosts - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts - echo "{{ ds.meta_data.hostname }}" >/etc/hostname - >- echo "type=83" | sfdisk /dev/vdb && mkfs -t ext4 /dev/vdb1 && mkdir -p /data-small && echo /dev/vdb1 /data-small ext4 defaults 0 2 >> /etc/fstab && mount /data-small - >- if [ ! -L /var/lib/ ] ; then mv /var/lib/ /var/lib/-$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10) ; mkdir -p /data-small/var/lib && ln -s /data-small/var/lib /var/lib/ ; else echo "/var/lib/ already symlnk" ; fi cipherSuites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 users: - name: mySshUsername sshAuthorizedKeys: - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' sudo: ALL=(ALL) NOPASSWD:ALL infrastructureTemplate: apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 kind: CloudStackMachineTemplate name: test-etcd-template-1234567890000 --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 kind: CloudStackMachineTemplate metadata: annotations: device.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /dev/vdb filesystem.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: ext4 label.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: data_disk mountpath.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /data-small symlinks.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /var/log/kubernetes:/data-small/var/log/kubernetes creationTimestamp: null name: test-control-plane-template-1234567890000 namespace: eksa-system spec: template: spec: affinityGroupIDs: - control-plane-anti-affinity diskOffering: customSizeInGB: 0 device: /dev/vdb filesystem: ext4 label: data_disk mountPath: /data-small name: Small offering: name: m4-large sshKey: "" template: name: centos7-k8s-118-21 --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 kind: CloudStackMachineTemplate metadata: annotations: device.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /dev/vdb filesystem.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: ext4 label.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: data_disk mountpath.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /data-small symlinks.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /var/lib/:/data-small/var/lib creationTimestamp: null name: test-etcd-template-1234567890000 namespace: eksa-system spec: template: spec: affinityGroupIDs: - etcd-affinity diskOffering: customSizeInGB: 0 device: /dev/vdb filesystem: ext4 label: data_disk mountPath: /data-small name: Small offering: name: m4-large sshKey: "" template: name: centos7-k8s-118-21 ---