kind: ConfigMap apiVersion: v1 metadata: name: calico-config namespace: kube-system data: # The calico-etcd PetSet service IP:port etcd_endpoints: "http://etcd-a.internal.example2.cluster.k8s.local:4001" # Configure the Calico backend to use. calico_backend: "bird" # The CNI network configuration to install on each node. cni_network_config: |- { "name": "k8s-pod-network", "cniVersion": "0.3.0", "plugins": [ { "type": "calico", "etcd_endpoints": "__ETCD_ENDPOINTS__", "log_level": "info", "ipam": { "type": "calico-ipam" }, "policy": { "type": "k8s", "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" }, "kubernetes": { "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" } }, { "type": "portmap", "snat": true, "capabilities": {"portMappings": true} } ] } --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico labels: role.kubernetes.io/networking: "1" rules: - apiGroups: - "" resources: - pods - namespaces - nodes verbs: - get - list - watch - apiGroups: - extensions resources: - networkpolicies verbs: - get - list - watch --- apiVersion: v1 kind: ServiceAccount metadata: name: calico namespace: kube-system labels: role.kubernetes.io/networking: "1" --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico labels: role.kubernetes.io/networking: "1" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico subjects: - kind: ServiceAccount name: calico namespace: kube-system --- # This manifest installs the calico/node container, as well # as the Calico CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: apps/v1 metadata: name: calico-node namespace: kube-system labels: k8s-app: calico-node role.kubernetes.io/networking: "1" spec: updateStrategy: type: RollingUpdate selector: matchLabels: k8s-app: calico-node template: metadata: labels: k8s-app: calico-node role.kubernetes.io/networking: "1" annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: hostNetwork: true serviceAccountName: calico tolerations: - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists - effect: NoSchedule operator: Exists containers: # Runs calico/node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node image: quay.io/calico/node:v2.4.0 resources: requests: cpu: 10m env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints # Enable BGP. Disable to enforce policy only. - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name: calico-config key: calico_backend # Configure the IP Pool from which Pod IPs will be chosen. - name: CALICO_IPV4POOL_CIDR value: "100.96.0.0/11" - name: CALICO_IPV4POOL_IPIP value: "cross-subnet" # Cluster type to identify the deployment type - name: CLUSTER_TYPE value: "kops,bgp" # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" # Auto-detect the BGP IP address. - name: IP value: "" securityContext: privileged: true volumeMounts: - mountPath: /lib/modules name: lib-modules readOnly: true - mountPath: /var/run/calico name: var-run-calico readOnly: false # This container installs the Calico CNI binaries # and CNI network config file on each node. - name: install-cni image: quay.io/calico/cni:v1.10.0 resources: requests: cpu: 10m imagePullPolicy: Always command: ["/install-cni.sh"] env: # The name of calico config file - name: CNI_CONF_NAME value: 10-calico.conflist # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints # The CNI network config to install on each node. - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name: calico-config key: cni_network_config volumeMounts: - mountPath: /host/opt/cni/bin name: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir volumes: # Used by calico/node. - name: lib-modules hostPath: path: /lib/modules - name: var-run-calico hostPath: path: /var/run/calico # Used to install CNI. - name: cni-bin-dir hostPath: path: /opt/cni/bin - name: cni-net-dir hostPath: path: /etc/cni/net.d --- # This manifest deploys the Calico policy controller on Kubernetes. # See https://github.com/projectcalico/k8s-policy apiVersion: apps/v1 kind: Deployment metadata: name: calico-policy-controller namespace: kube-system labels: k8s-app: calico-policy role.kubernetes.io/networking: "1" spec: # The policy controller can only have a single active instance. replicas: 1 selector: matchLabels: k8s-app: calico-policy-controller role.kubernetes.io/networking: "1" template: metadata: name: calico-policy-controller namespace: kube-system labels: k8s-app: calico-policy-controller role.kubernetes.io/networking: "1" annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: # The policy controller must run in the host network namespace so that # it isn't governed by policy that would prevent it from working. hostNetwork: true serviceAccountName: calico tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule - key: CriticalAddonsOnly operator: Exists containers: - name: calico-policy-controller image: quay.io/calico/kube-policy-controller:v0.7.0 resources: requests: cpu: 10m env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints # The location of the Kubernetes API. Use the default Kubernetes # service for API access. - name: K8S_API value: "https://kubernetes.default:443" # Since we're running in the host namespace and might not have KubeDNS # access, configure the container's /etc/hosts to resolve # kubernetes.default to the correct service clusterIP. - name: CONFIGURE_ETC_HOSTS value: "true" --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: k8s-ec2-srcdst labels: role.kubernetes.io/networking: "1" rules: - apiGroups: - "" resources: - nodes verbs: - get - list - watch - update - patch --- apiVersion: v1 kind: ServiceAccount metadata: name: k8s-ec2-srcdst namespace: kube-system labels: role.kubernetes.io/networking: "1" --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: k8s-ec2-srcdst labels: role.kubernetes.io/networking: "1" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: k8s-ec2-srcdst subjects: - kind: ServiceAccount name: k8s-ec2-srcdst namespace: kube-system --- apiVersion: apps/v1 kind: Deployment metadata: name: k8s-ec2-srcdst namespace: kube-system labels: k8s-app: k8s-ec2-srcdst role.kubernetes.io/networking: "1" spec: replicas: 1 selector: matchLabels: k8s-app: k8s-ec2-srcdst template: metadata: labels: k8s-app: k8s-ec2-srcdst role.kubernetes.io/networking: "1" annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: hostNetwork: true tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule - key: CriticalAddonsOnly operator: Exists serviceAccountName: k8s-ec2-srcdst containers: - image: ottoyiu/k8s-ec2-srcdst:v0.1.0 name: k8s-ec2-srcdst resources: requests: cpu: 10m memory: 64Mi env: - name: AWS_REGION value: eu-central-1 volumeMounts: - name: ssl-certs mountPath: "/etc/ssl/certs/ca-certificates.crt" readOnly: true imagePullPolicy: "Always" volumes: - name: ssl-certs hostPath: path: "/etc/ssl/certs/ca-certificates.crt" nodeSelector: node-role.kubernetes.io/master: ""