apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: NutanixCluster metadata: name: "{{.clusterName}}" namespace: "{{.eksaSystemNamespace}}" spec: prismCentral: {{- if .nutanixAdditionalTrustBundle }} additionalTrustBundle: kind: String data: | {{ .nutanixAdditionalTrustBundle | indent 8 }} {{- end }} address: "{{.nutanixEndpoint}}" port: {{.nutanixPort}} insecure: {{.nutanixInsecure}} credentialRef: name: "{{.secretName}}" kind: Secret controlPlaneEndpoint: host: "{{.controlPlaneEndpointIp}}" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: cluster.x-k8s.io/cluster-name: "{{.clusterName}}" name: "{{.clusterName}}" namespace: "{{.eksaSystemNamespace}}" spec: clusterNetwork: services: cidrBlocks: {{.serviceCidrs}} pods: cidrBlocks: {{.podCidrs}} serviceDomain: "cluster.local" controlPlaneRef: apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane name: "{{.clusterName}}" infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: NutanixCluster name: "{{.clusterName}}" --- apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane metadata: name: "{{.clusterName}}" namespace: "{{.eksaSystemNamespace}}" spec: replicas: {{.controlPlaneReplicas}} version: "{{.kubernetesVersion}}" machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: NutanixMachineTemplate name: "{{.controlPlaneTemplateName}}" kubeadmConfigSpec: clusterConfiguration: imageRepository: "{{.kubernetesRepository}}" apiServer: certSANs: - localhost - 127.0.0.1 - 0.0.0.0 {{- if .apiServerExtraArgs }} extraArgs: {{ .apiServerExtraArgs.ToYaml | indent 10 }} {{- end }} {{- if .awsIamAuth}} extraVolumes: - hostPath: /var/lib/kubeadm/aws-iam-authenticator/ mountPath: /etc/kubernetes/aws-iam-authenticator/ name: authconfig readOnly: false - hostPath: /var/lib/kubeadm/aws-iam-authenticator/pki/ mountPath: /var/aws-iam-authenticator/ name: awsiamcert readOnly: false {{- end}} controllerManager: extraArgs: enable-hostpath-provisioner: "true" dns: imageRepository: {{.corednsRepository}} imageTag: {{.corednsVersion}} etcd: {{- if .externalEtcd }} external: endpoints: [] caFile: "/etc/kubernetes/pki/etcd/ca.crt" certFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt" keyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key" {{- else }} local: imageRepository: {{.etcdRepository}} imageTag: {{.etcdImageTag}} {{- end }} files: - content: | apiVersion: v1 kind: Pod metadata: creationTimestamp: null name: kube-vip namespace: kube-system spec: containers: - name: kube-vip image: {{.kubeVipImage}} imagePullPolicy: IfNotPresent args: - manager env: - name: vip_arp value: "true" - name: address value: "{{.controlPlaneEndpointIp}}" - name: port value: "6443" - name: vip_cidr value: "32" - name: cp_enable value: "true" - name: cp_namespace value: kube-system - name: vip_ddns value: "false" - name: vip_leaderelection value: "true" - name: vip_leaseduration value: "15" - name: vip_renewdeadline value: "10" - name: vip_retryperiod value: "2" - name: svc_enable value: "{{.kubeVipSvcEnable}}" - name: lb_enable value: "{{.kubeVipLBEnable}}" securityContext: capabilities: add: - NET_ADMIN - SYS_TIME - NET_RAW volumeMounts: - mountPath: /etc/kubernetes/admin.conf name: kubeconfig resources: {} hostNetwork: true volumes: - name: kubeconfig hostPath: type: FileOrCreate path: /etc/kubernetes/admin.conf status: {} owner: root:root path: /etc/kubernetes/manifests/kube-vip.yaml {{- if .registryCACert }} - content: | {{ .registryCACert | indent 8 }} owner: root:root path: "/etc/containerd/certs.d/{{ .mirrorBase }}/ca.crt" {{- end }} {{- if .proxyConfig }} - content: | [Service] Environment="HTTP_PROXY={{.httpProxy}}" Environment="HTTPS_PROXY={{.httpsProxy}}" Environment="NO_PROXY={{ stringsJoin .noProxy "," }}" owner: root:root path: /etc/systemd/system/containerd.service.d/http-proxy.conf {{- end }} {{- if .registryMirrorMap }} - content: | [plugins."io.containerd.grpc.v1.cri".registry.mirrors] {{- range $orig, $mirror := .registryMirrorMap }} [plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ $orig }}"] endpoint = ["https://{{ $mirror }}"] {{- end }} {{- if or .registryCACert .insecureSkip }} [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .mirrorBase }}".tls] {{- if .registryCACert }} ca_file = "/etc/containerd/certs.d/{{ .mirrorBase }}/ca.crt" {{- end }} {{- if .insecureSkip }} insecure_skip_verify = {{ .insecureSkip }} {{- end }} {{- end }} {{- if .registryAuth }} [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .mirrorBase }}".auth] username = "{{.registryUsername}}" password = "{{.registryPassword}}" {{- end }} owner: root:root path: "/etc/containerd/config_append.toml" {{- end }} {{- if .awsIamAuth}} - content: | # clusters refers to the remote service. clusters: - name: aws-iam-authenticator cluster: certificate-authority: /var/aws-iam-authenticator/cert.pem server: https://localhost:21362/authenticate # users refers to the API Server's webhook configuration # (we don't need to authenticate the API server). users: - name: apiserver # kubeconfig files require a context. Provide one for the API Server. current-context: webhook contexts: - name: webhook context: cluster: aws-iam-authenticator user: apiserver permissions: "0640" owner: root:root path: /var/lib/kubeadm/aws-iam-authenticator/kubeconfig.yaml - contentFrom: secret: name: {{.clusterName}}-aws-iam-authenticator-ca key: cert.pem permissions: "0640" owner: root:root path: /var/lib/kubeadm/aws-iam-authenticator/pki/cert.pem - contentFrom: secret: name: {{.clusterName}}-aws-iam-authenticator-ca key: key.pem permissions: "0640" owner: root:root path: /var/lib/kubeadm/aws-iam-authenticator/pki/key.pem {{- end}} initConfiguration: nodeRegistration: kubeletExtraArgs: # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 #cgroup-driver: cgroupfs eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 10 }} {{- end }} {{- if .controlPlaneTaints }} taints: {{- range .controlPlaneTaints}} - key: {{ .Key }} value: {{ .Value }} effect: {{ .Effect }} {{- if .TimeAdded }} timeAdded: {{ .TimeAdded }} {{- end }} {{- end }} {{- end }} joinConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: cloud-provider: external read-only-port: "0" anonymous-auth: "false" {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 10 }} {{- end }} {{- if .controlPlaneTaints }} taints: {{- range .controlPlaneTaints}} - key: {{ .Key }} value: {{ .Value }} effect: {{ .Effect }} {{- if .TimeAdded }} timeAdded: {{ .TimeAdded }} {{- end }} {{- end }} {{- end }} name: "{{`{{ ds.meta_data.hostname }}`}}" users: - name: "{{.controlPlaneSshUsername }}" lockPassword: false sudo: ALL=(ALL) NOPASSWD:ALL sshAuthorizedKeys: - "{{.controlPlaneSshAuthorizedKey}}" preKubeadmCommands: {{- if .registryMirrorMap }} - cat /etc/containerd/config_append.toml >> /etc/containerd/config.toml {{- end }} {{- if or .proxyConfig .registryMirrorMap }} - sudo systemctl daemon-reload - sudo systemctl restart containerd {{- end }} - hostnamectl set-hostname "{{`{{ ds.meta_data.hostname }}`}}" - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts - echo "127.0.0.1 localhost" >>/etc/hosts - echo "127.0.0.1 {{`{{ ds.meta_data.hostname }}`}}" >> /etc/hosts postKubeadmCommands: - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc useExperimentalRetryJoin: true --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: NutanixMachineTemplate metadata: name: "{{.controlPlaneTemplateName}}" namespace: "{{.eksaSystemNamespace}}" spec: template: spec: providerID: "nutanix://{{.clusterName}}-m1" vcpusPerSocket: {{.vcpusPerSocket}} vcpuSockets: {{.vcpuSockets}} memorySize: {{.memorySize}} systemDiskSize: {{.systemDiskSize}} image: {{- if (eq .imageIDType "name") }} type: name name: "{{.imageName}}" {{ else if (eq .imageIDType "uuid") }} type: uuid uuid: "{{.imageUUID}}" {{- end }} cluster: {{- if (eq .nutanixPEClusterIDType "name") }} type: name name: "{{.nutanixPEClusterName}}" {{- else if (eq .nutanixPEClusterIDType "uuid") }} type: uuid uuid: "{{.nutanixPEClusterUUID}}" {{- end }} subnet: {{- if (eq .subnetIDType "name") }} - type: name name: "{{.subnetName}}" {{- else if (eq .subnetIDType "uuid") }} - type: uuid uuid: "{{.subnetUUID}}" {{ end }} {{- if .projectIDType}} project: {{- if (eq .projectIDType "name") }} type: name name: "{{.projectName}}" {{- else if (eq .projectIDType "uuid") }} type: uuid uuid: "{{.projectUUID}}" {{ end }} {{ end }} {{- if .additionalCategories}} additionalCategories: {{- range .additionalCategories}} - key: "{{ .Key }}" value: "{{ .Value }}" {{- end }} {{- end }} --- {{- if .registryAuth }} apiVersion: v1 kind: Secret metadata: name: registry-credentials namespace: {{.eksaSystemNamespace}} labels: clusterctl.cluster.x-k8s.io/move: "true" stringData: username: "{{.registryUsername}}" password: "{{.registryPassword}}" {{- end }}