apiVersion: cluster.x-k8s.io/v1alpha3 kind: Cluster metadata: labels: cluster.x-k8s.io/cluster-name: default name: default namespace: default spec: clusterNetwork: pods: cidrBlocks: - 192.168.0.0/16 controlPlaneRef: apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 kind: KubeadmControlPlane name: default infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 kind: VSphereCluster name: default --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 kind: VSphereCluster metadata: name: default namespace: default spec: cloudProviderConfiguration: global: secretName: cloud-provider-vsphere-credentials secretNamespace: kube-system thumbprint: ABCDEFG network: name: /SDDC-Datacenter/network/sddc-cgw-network-1 providerConfig: cloud: controllerImage: gcr.io/cloud-provider-vsphere/cpi/release/manager:v1.18.1 virtualCenter: vsphere_server: datacenters: SDDC-Datacenter thumbprint: ABCDEFG workspace: datacenter: 'SDDC-Datacenter' datastore: /SDDC-Datacenter/datastore/WorkloadDatastore folder: /SDDC-Datacenter/vm resourcePool: '*/Resources' server: vsphere_server controlPlaneEndpoint: host: 1.2.3.4 port: 6443 server: vsphere_server thumbprint: ABCDEFG --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 kind: VSphereMachineTemplate metadata: name: default namespace: default spec: template: spec: cloneMode: linkedClone datacenter: 'SDDC-Datacenter' datastore: /SDDC-Datacenter/datastore/WorkloadDatastore diskGiB: 25 folder: /SDDC-Datacenter/vm memoryMiB: 8192 network: devices: - dhcp4: true networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 numCPUs: 2 resourcePool: '*/Resources' server: vsphere_server storagePolicyName: "vSAN Default Storage Policy" template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 thumbprint: ABCDEFG --- apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 kind: KubeadmControlPlane metadata: name: default namespace: default spec: infrastructureTemplate: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 kind: VSphereMachineTemplate name: default kubeadmConfigSpec: clusterConfiguration: apiServer: extraArgs: cloud-provider: external audit-policy-file: /etc/kubernetes/audit-policy.yaml audit-log-path: /var/log/kubernetes/api-audit.log audit-log-maxage: "30" audit-log-maxbackup: "10" audit-log-maxsize: "512" profiling: "false" extraVolumes: - hostPath: /etc/kubernetes/audit-policy.yaml mountPath: /etc/kubernetes/audit-policy.yaml name: audit-policy pathType: File readOnly: true - hostPath: /var/log/kubernetes mountPath: /var/log/kubernetes name: audit-log-dir pathType: DirectoryOrCreate readOnly: false controllerManager: extraArgs: cloud-provider: external profiling: "false" scheduler: extraArgs: profiling: "false" files: - content: | apiVersion: v1 kind: Pod metadata: creationTimestamp: null name: kube-vip namespace: kube-system spec: containers: - args: - start env: - name: vip_arp value: "true" - name: vip_leaderelection value: "true" - name: vip_address value: 1.2.3.4 - name: vip_interface value: eth0 - name: vip_leaseduration value: "15" - name: vip_renewdeadline value: "10" - name: vip_retryperiod value: "2" image: plndr/kube-vip:0.3.2 imagePullPolicy: IfNotPresent name: kube-vip resources: {} securityContext: capabilities: add: - NET_ADMIN - SYS_TIME volumeMounts: - mountPath: /etc/kubernetes/admin.conf name: kubeconfig hostNetwork: true volumes: - hostPath: path: /etc/kubernetes/admin.conf type: FileOrCreate name: kubeconfig status: {} owner: root:root path: /etc/kubernetes/manifests/kube-vip.yaml - content: | apiVersion: audit.k8s.io/v1 kind: Policy rules: # Log aws-auth configmap changes - level: RequestResponse namespaces: ["kube-system"] verbs: ["update", "patch", "delete"] resources: - group: "" # core resources: ["configmaps"] resourceNames: ["aws-auth"] omitStages: - "RequestReceived" # The following requests were manually identified as high-volume and low-risk, # so drop them. - level: None users: ["system:kube-proxy"] verbs: ["watch"] resources: - group: "" # core resources: ["endpoints", "services", "services/status"] - level: None users: ["kubelet"] # legacy kubelet identity verbs: ["get"] resources: - group: "" # core resources: ["nodes", "nodes/status"] - level: None userGroups: ["system:nodes"] verbs: ["get"] resources: - group: "" # core resources: ["nodes", "nodes/status"] - level: None users: - system:kube-controller-manager - system:kube-scheduler - system:serviceaccount:kube-system:endpoint-controller verbs: ["get", "update"] namespaces: ["kube-system"] resources: - group: "" # core resources: ["endpoints"] - level: None users: ["system:apiserver"] verbs: ["get"] resources: - group: "" # core resources: ["namespaces", "namespaces/status", "namespaces/finalize"] # Don't log HPA fetching metrics. - level: None users: - system:kube-controller-manager verbs: ["get", "list"] resources: - group: "metrics.k8s.io" # Don't log these read-only URLs. - level: None nonResourceURLs: - /healthz* - /version - /swagger* # Don't log events requests. - level: None resources: - group: "" # core resources: ["events"] # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes - level: Request users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] verbs: ["update","patch"] resources: - group: "" # core resources: ["nodes/status", "pods/status"] omitStages: - "RequestReceived" - level: Request userGroups: ["system:nodes"] verbs: ["update","patch"] resources: - group: "" # core resources: ["nodes/status", "pods/status"] omitStages: - "RequestReceived" # deletecollection calls can be large, don't log responses for expected namespace deletions - level: Request users: ["system:serviceaccount:kube-system:namespace-controller"] verbs: ["deletecollection"] omitStages: - "RequestReceived" # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, # so only log at the Metadata level. - level: Metadata resources: - group: "" # core resources: ["secrets", "configmaps"] - group: authentication.k8s.io resources: ["tokenreviews"] omitStages: - "RequestReceived" - level: Request resources: - group: "" resources: ["serviceaccounts/token"] # Get repsonses can be large; skip them. - level: Request verbs: ["get", "list", "watch"] resources: - group: "" # core - group: "admissionregistration.k8s.io" - group: "apiextensions.k8s.io" - group: "apiregistration.k8s.io" - group: "apps" - group: "authentication.k8s.io" - group: "authorization.k8s.io" - group: "autoscaling" - group: "batch" - group: "certificates.k8s.io" - group: "extensions" - group: "metrics.k8s.io" - group: "networking.k8s.io" - group: "policy" - group: "rbac.authorization.k8s.io" - group: "scheduling.k8s.io" - group: "settings.k8s.io" - group: "storage.k8s.io" omitStages: - "RequestReceived" # Default level for known APIs - level: RequestResponse resources: - group: "" # core - group: "admissionregistration.k8s.io" - group: "apiextensions.k8s.io" - group: "apiregistration.k8s.io" - group: "apps" - group: "authentication.k8s.io" - group: "authorization.k8s.io" - group: "autoscaling" - group: "batch" - group: "certificates.k8s.io" - group: "extensions" - group: "metrics.k8s.io" - group: "networking.k8s.io" - group: "policy" - group: "rbac.authorization.k8s.io" - group: "scheduling.k8s.io" - group: "settings.k8s.io" - group: "storage.k8s.io" omitStages: - "RequestReceived" # Default level for all other requests. - level: Metadata omitStages: - "RequestReceived" owner: root:root path: /etc/kubernetes/audit-policy.yaml initConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: cloud-provider: external read-only-port: "0" anonymous-auth: "false" name: '{{ ds.meta_data.hostname }}' joinConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: cloud-provider: external read-only-port: "0" anonymous-auth: "false" name: '{{ ds.meta_data.hostname }}' preKubeadmCommands: - hostname "{{ ds.meta_data.hostname }}" - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts - echo "127.0.0.1 localhost" >>/etc/hosts - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts - echo "{{ ds.meta_data.hostname }}" >/etc/hostname useExperimentalRetryJoin: true users: - name: capv sshAuthorizedKeys: - ssh-rsa sudo: ALL=(ALL) NOPASSWD:ALL replicas: 1 version: v1.19.6 --- apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 kind: KubeadmConfigTemplate metadata: name: default-md-0 namespace: default spec: template: spec: joinConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: cloud-provider: external read-only-port: "0" anonymous-auth: "false" name: '{{ ds.meta_data.hostname }}' preKubeadmCommands: - hostname "{{ ds.meta_data.hostname }}" - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts - echo "127.0.0.1 localhost" >>/etc/hosts - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts - echo "{{ ds.meta_data.hostname }}" >/etc/hostname users: - name: capv sshAuthorizedKeys: - ssh-rsa sudo: ALL=(ALL) NOPASSWD:ALL --- apiVersion: cluster.x-k8s.io/v1alpha3 kind: MachineDeployment metadata: labels: cluster.x-k8s.io/cluster-name: default name: default-md-0 namespace: default spec: clusterName: default replicas: 0 selector: matchLabels: {} template: metadata: labels: cluster.x-k8s.io/cluster-name: default spec: bootstrap: configRef: apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 kind: KubeadmConfigTemplate name: default-md-0 clusterName: default infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 kind: VSphereMachineTemplate name: default version: v1.19.6 --- apiVersion: addons.cluster.x-k8s.io/v1alpha3 kind: ClusterResourceSet metadata: labels: cluster.x-k8s.io/cluster-name: default name: default-crs-0 namespace: default spec: clusterSelector: matchLabels: cluster.x-k8s.io/cluster-name: default resources: - kind: Secret name: vsphere-csi-controller - kind: ConfigMap name: vsphere-csi-controller-role - kind: ConfigMap name: vsphere-csi-controller-binding - kind: Secret name: csi-vsphere-config - kind: ConfigMap name: csi.vsphere.vmware.com - kind: ConfigMap name: vsphere-csi-node - kind: ConfigMap name: vsphere-csi-controller --- apiVersion: v1 kind: Secret metadata: name: vsphere-csi-controller namespace: default stringData: data: | apiVersion: v1 kind: ServiceAccount metadata: name: vsphere-csi-controller namespace: kube-system type: addons.cluster.x-k8s.io/resource-set --- apiVersion: v1 data: data: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: vsphere-csi-controller-role rules: - apiGroups: - storage.k8s.io resources: - csidrivers verbs: - create - delete - apiGroups: - "" resources: - nodes - pods - secrets - configmaps verbs: - get - list - watch - apiGroups: - "" resources: - persistentvolumes verbs: - get - list - watch - update - create - delete - patch - apiGroups: - storage.k8s.io resources: - volumeattachments verbs: - get - list - watch - update - patch - apiGroups: - storage.k8s.io resources: - volumeattachments/status verbs: - patch - apiGroups: - "" resources: - persistentvolumeclaims verbs: - get - list - watch - update - apiGroups: - storage.k8s.io resources: - storageclasses - csinodes verbs: - get - list - watch - apiGroups: - "" resources: - events verbs: - list - watch - create - update - patch - apiGroups: - coordination.k8s.io resources: - leases verbs: - get - watch - list - delete - update - create - apiGroups: - snapshot.storage.k8s.io resources: - volumesnapshots verbs: - get - list - apiGroups: - snapshot.storage.k8s.io resources: - volumesnapshotcontents verbs: - get - list kind: ConfigMap metadata: name: vsphere-csi-controller-role namespace: default --- apiVersion: v1 data: data: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: vsphere-csi-controller-binding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: vsphere-csi-controller-role subjects: - kind: ServiceAccount name: vsphere-csi-controller namespace: kube-system kind: ConfigMap metadata: name: vsphere-csi-controller-binding namespace: default --- apiVersion: v1 kind: Secret metadata: name: csi-vsphere-config namespace: default stringData: data: | apiVersion: v1 kind: Secret metadata: name: csi-vsphere-config namespace: kube-system stringData: csi-vsphere.conf: |+ [Global] cluster-id = "default/default" [VirtualCenter "vsphere_server"] user = "cloudadmin@vmc.local" password = "bogus" datacenters = "SDDC-Datacenter" [Network] public-network = "/SDDC-Datacenter/network/sddc-cgw-network-1" type: Opaque type: addons.cluster.x-k8s.io/resource-set --- apiVersion: v1 data: data: | apiVersion: storage.k8s.io/v1 kind: CSIDriver metadata: name: csi.vsphere.vmware.com spec: attachRequired: true kind: ConfigMap metadata: name: csi.vsphere.vmware.com namespace: default --- apiVersion: v1 data: data: | apiVersion: apps/v1 kind: DaemonSet metadata: name: vsphere-csi-node namespace: kube-system spec: selector: matchLabels: app: vsphere-csi-node template: metadata: labels: app: vsphere-csi-node role: vsphere-csi spec: containers: - args: - --v=5 - --csi-address=$(ADDRESS) - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) env: - name: ADDRESS value: /csi/csi.sock - name: DRIVER_REG_SOCK_PATH value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 lifecycle: preStop: exec: command: - /bin/sh - -c - rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock name: node-driver-registrar resources: {} securityContext: privileged: true volumeMounts: - mountPath: /csi name: plugin-dir - mountPath: /registration name: registration-dir - env: - name: CSI_ENDPOINT value: unix:///csi/csi.sock - name: X_CSI_MODE value: node - name: X_CSI_SPEC_REQ_VALIDATION value: "false" - name: VSPHERE_CSI_CONFIG value: /etc/cloud/csi-vsphere.conf - name: LOGGER_LEVEL value: PRODUCTION - name: X_CSI_LOG_LEVEL value: INFO - name: NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 livenessProbe: failureThreshold: 3 httpGet: path: /healthz port: healthz initialDelaySeconds: 10 periodSeconds: 5 timeoutSeconds: 3 name: vsphere-csi-node ports: - containerPort: 9808 name: healthz protocol: TCP resources: {} securityContext: allowPrivilegeEscalation: true capabilities: add: - SYS_ADMIN privileged: true volumeMounts: - mountPath: /etc/cloud name: vsphere-config-volume - mountPath: /csi name: plugin-dir - mountPath: /var/lib/kubelet mountPropagation: Bidirectional name: pods-mount-dir - mountPath: /dev name: device-dir - args: - --csi-address=/csi/csi.sock image: quay.io/k8scsi/livenessprobe:v2.1.0 name: liveness-probe resources: {} volumeMounts: - mountPath: /csi name: plugin-dir dnsPolicy: Default tolerations: - effect: NoSchedule operator: Exists - effect: NoExecute operator: Exists volumes: - name: vsphere-config-volume secret: secretName: csi-vsphere-config - hostPath: path: /var/lib/kubelet/plugins_registry type: Directory name: registration-dir - hostPath: path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ type: DirectoryOrCreate name: plugin-dir - hostPath: path: /var/lib/kubelet type: Directory name: pods-mount-dir - hostPath: path: /dev name: device-dir updateStrategy: type: RollingUpdate kind: ConfigMap metadata: name: vsphere-csi-node namespace: default --- apiVersion: v1 data: data: | apiVersion: apps/v1 kind: Deployment metadata: name: vsphere-csi-controller namespace: kube-system spec: replicas: 1 selector: matchLabels: app: vsphere-csi-controller template: metadata: labels: app: vsphere-csi-controller role: vsphere-csi spec: containers: - args: - --v=4 - --timeout=300s - --csi-address=$(ADDRESS) - --leader-election env: - name: ADDRESS value: /csi/csi.sock image: quay.io/k8scsi/csi-attacher:v3.0.0 name: csi-attacher resources: {} volumeMounts: - mountPath: /csi name: socket-dir - env: - name: CSI_ENDPOINT value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - name: X_CSI_MODE value: controller - name: VSPHERE_CSI_CONFIG value: /etc/cloud/csi-vsphere.conf - name: LOGGER_LEVEL value: PRODUCTION - name: X_CSI_LOG_LEVEL value: INFO image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 livenessProbe: failureThreshold: 3 httpGet: path: /healthz port: healthz initialDelaySeconds: 10 periodSeconds: 5 timeoutSeconds: 3 name: vsphere-csi-controller ports: - containerPort: 9808 name: healthz protocol: TCP resources: {} volumeMounts: - mountPath: /etc/cloud name: vsphere-config-volume readOnly: true - mountPath: /var/lib/csi/sockets/pluginproxy/ name: socket-dir - args: - --csi-address=$(ADDRESS) env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock image: quay.io/k8scsi/livenessprobe:v2.1.0 name: liveness-probe resources: {} volumeMounts: - mountPath: /var/lib/csi/sockets/pluginproxy/ name: socket-dir - args: - --leader-election env: - name: X_CSI_FULL_SYNC_INTERVAL_MINUTES value: "30" - name: LOGGER_LEVEL value: PRODUCTION - name: VSPHERE_CSI_CONFIG value: /etc/cloud/csi-vsphere.conf image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.1.0 name: vsphere-syncer resources: {} volumeMounts: - mountPath: /etc/cloud name: vsphere-config-volume readOnly: true - args: - --v=4 - --timeout=300s - --csi-address=$(ADDRESS) - --leader-election - --default-fstype=ext4 env: - name: ADDRESS value: /csi/csi.sock image: quay.io/k8scsi/csi-provisioner:v2.0.0 name: csi-provisioner resources: {} volumeMounts: - mountPath: /csi name: socket-dir dnsPolicy: Default serviceAccountName: vsphere-csi-controller tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master operator: Exists volumes: - name: vsphere-config-volume secret: secretName: csi-vsphere-config - emptyDir: {} name: socket-dir kind: ConfigMap metadata: name: vsphere-csi-controller namespace: default --- apiVersion: v1 data: data: | apiVersion: v1 data: csi-migration: "false" kind: ConfigMap metadata: name: internal-feature-states.csi.vsphere.vmware.com namespace: kube-system kind: ConfigMap metadata: name: internal-feature-states.csi.vsphere.vmware.com namespace: default