provider "aws" { region = local.region } provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) exec { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" # This requires the awscli to be installed locally where Terraform is executed args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) exec { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" # This requires the awscli to be installed locally where Terraform is executed args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] } } } provider "kubectl" { apply_retry_count = 5 host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) load_config_file = false exec { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" # This requires the awscli to be installed locally where Terraform is executed args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] } } data "aws_availability_zones" "available" {} locals { name = basename(path.cwd) region = "us-west-2" vpc_cidr = "10.0.0.0/16" secondary_vpc_cidr = "10.99.0.0/16" azs = slice(data.aws_availability_zones.available.names, 0, 3) tags = { Blueprint = local.name GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } ################################################################################ # Cluster ################################################################################ #tfsec:ignore:aws-eks-enable-control-plane-logging module "eks" { source = "terraform-aws-modules/eks/aws" version = "~> 19.13" cluster_name = local.name cluster_version = "1.27" cluster_endpoint_public_access = true cluster_addons = { coredns = {} kube-proxy = {} vpc-cni = { # Specify the VPC CNI addon should be deployed before compute to ensure # the addon is configured before data plane compute resources are created # See README for further details before_compute = true most_recent = true # To ensure access to the latest settings provided configuration_values = jsonencode({ env = { # Reference https://aws.github.io/aws-eks-best-practices/reliability/docs/networkmanagement/#cni-custom-networking AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG = "true" ENI_CONFIG_LABEL_DEF = "topology.kubernetes.io/zone" # Reference docs https://docs.aws.amazon.com/eks/latest/userguide/cni-increase-ip-addresses.html ENABLE_PREFIX_DELEGATION = "true" WARM_PREFIX_TARGET = "1" } }) } } vpc_id = module.vpc.vpc_id # We only want to assign the 10.0.* range subnets to the data plane subnet_ids = slice(module.vpc.private_subnets, 0, 3) control_plane_subnet_ids = module.vpc.intra_subnets eks_managed_node_groups = { initial = { instance_types = ["m5.large"] min_size = 1 max_size = 3 desired_size = 2 } } tags = local.tags } ################################################################################ # VPC-CNI Custom Networking ENIConfig ################################################################################ resource "kubectl_manifest" "eni_config" { for_each = zipmap(local.azs, slice(module.vpc.private_subnets, 3, 6)) yaml_body = yamlencode({ apiVersion = "crd.k8s.amazonaws.com/v1alpha1" kind = "ENIConfig" metadata = { name = each.key } spec = { securityGroups = [ module.eks.node_security_group_id, ] subnet = each.value } }) } ################################################################################ # Supporting Resources ################################################################################ module "vpc" { source = "terraform-aws-modules/vpc/aws" version = "~> 5.0" name = local.name cidr = local.vpc_cidr secondary_cidr_blocks = [local.secondary_vpc_cidr] # can add up to 5 total CIDR blocks azs = local.azs private_subnets = concat( [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)], [for k, v in local.azs : cidrsubnet(local.secondary_vpc_cidr, 2, k)] ) public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)] enable_nat_gateway = true single_nat_gateway = true public_subnet_tags = { "kubernetes.io/role/elb" = 1 } private_subnet_tags = { "kubernetes.io/role/internal-elb" = 1 } tags = local.tags }