provider "aws" { region = local.region } provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) token = data.aws_eks_cluster_auth.this.token } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) token = data.aws_eks_cluster_auth.this.token } } provider "kubectl" { apply_retry_count = 10 host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) load_config_file = false token = data.aws_eks_cluster_auth.this.token } data "aws_eks_cluster_auth" "this" { name = module.eks.cluster_name } data "aws_caller_identity" "current" {} data "aws_availability_zones" "available" {} locals { name = basename(path.cwd) region = "us-west-2" vpc_cidr = "10.0.0.0/16" azs = slice(data.aws_availability_zones.available.names, 0, 3) tags = { Blueprint = local.name GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } ################################################################################ # Cluster ################################################################################ #tfsec:ignore:aws-eks-enable-control-plane-logging module "eks" { source = "terraform-aws-modules/eks/aws" version = "~> 19.13" cluster_name = local.name cluster_version = "1.27" cluster_endpoint_public_access = true vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets eks_managed_node_groups = { initial = { instance_types = ["m5.large"] min_size = 1 max_size = 5 desired_size = 2 } } manage_aws_auth_configmap = true aws_auth_roles = flatten([ module.eks_blueprints_admin_team.aws_auth_configmap_role, [for team in module.eks_blueprints_dev_teams : team.aws_auth_configmap_role], ]) tags = local.tags } ################################################################################ # EKS Blueprints Teams ################################################################################ module "eks_blueprints_admin_team" { source = "aws-ia/eks-blueprints-teams/aws" version = "~> 1.0" name = "admin-team" enable_admin = true users = [data.aws_caller_identity.current.arn] cluster_arn = module.eks.cluster_arn tags = local.tags } module "eks_blueprints_dev_teams" { source = "aws-ia/eks-blueprints-teams/aws" version = "~> 1.0" for_each = { red = { labels = { project = "SuperSecret" } } blue = {} } name = "team-${each.key}" users = [data.aws_caller_identity.current.arn] cluster_arn = module.eks.cluster_arn oidc_provider_arn = module.eks.oidc_provider_arn labels = merge( { team = each.key }, try(each.value.labels, {}) ) annotations = { team = each.key } namespaces = { "team-${each.key}" = { labels = { appName = "${each.key}-team-app", projectName = "project-${each.key}", } resource_quota = { hard = { "requests.cpu" = "2000m", "requests.memory" = "4Gi", "limits.cpu" = "4000m", "limits.memory" = "16Gi", "pods" = "20", "secrets" = "20", "services" = "20" } } limit_range = { limit = [ { type = "Pod" max = { cpu = "200m" memory = "1Gi" } }, { type = "PersistentVolumeClaim" min = { storage = "24M" } }, { type = "Container" default = { cpu = "50m" memory = "24Mi" } } ] } } } tags = local.tags } ################################################################################ # Supporting Resoruces ################################################################################ module "vpc" { source = "terraform-aws-modules/vpc/aws" version = "~> 5.0" name = local.name cidr = local.vpc_cidr azs = local.azs private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] enable_nat_gateway = true single_nat_gateway = true public_subnet_tags = { "kubernetes.io/role/elb" = 1 } private_subnet_tags = { "kubernetes.io/role/internal-elb" = 1 } tags = local.tags }