/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: MIT-0 * * Permission is hereby granted, free of charge, to any person obtaining a copy of this * software and associated documentation files (the "Software"), to deal in the Software * without restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ terraform { required_version = ">= 1.0.1" required_providers { aws = { source = "hashicorp/aws" version = ">= 3.66.0" } kubernetes = { source = "hashicorp/kubernetes" version = ">= 2.6.1" } helm = { source = "hashicorp/helm" version = ">= 2.4.1" } } } provider "aws" { region = data.aws_region.current.id alias = "default" } terraform { backend "local" { path = "local_tf_state/terraform-main.tfstate" } } data "aws_region" "current" {} data "aws_availability_zones" "available" {} locals { tenant = "aws001" # AWS account name or unique id for tenant environment = "preprod" # Environment area eg., preprod or prod zone = "dev" # Environment with in one sub_tenant or business unit kubernetes_version = "1.21" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) terraform_version = "Terraform v1.0.1" } module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" version = "v3.2.0" name = local.vpc_name cidr = local.vpc_cidr azs = data.aws_availability_zones.available.names public_subnets = [for k, v in slice(data.aws_availability_zones.available.names, 0, 3) : cidrsubnet(local.vpc_cidr, 8, k)] private_subnets = [for k, v in slice(data.aws_availability_zones.available.names, 0, 3) : cidrsubnet(local.vpc_cidr, 8, k + 10)] enable_nat_gateway = true create_igw = true enable_dns_hostnames = true single_nat_gateway = true public_subnet_tags = { "kubernetes.io/cluster/${local.cluster_name}" = "shared" "kubernetes.io/role/elb" = "1" } private_subnet_tags = { "kubernetes.io/cluster/${local.cluster_name}" = "shared" "kubernetes.io/role/internal-elb" = "1" } } #--------------------------------------------------------------- # Example to consume aws-eks-accelerator-for-terraform module #--------------------------------------------------------------- module "aws-eks-accelerator-for-terraform" { source = "github.com/aws-samples/aws-eks-accelerator-for-terraform" tenant = local.tenant environment = local.environment zone = local.zone terraform_version = local.terraform_version # EKS Cluster VPC and Subnet mandatory config vpc_id = module.aws_vpc.vpc_id private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES create_eks = true kubernetes_version = local.kubernetes_version self_managed_node_groups = { #---------------------------------------------------------# # ON-DEMAND Self Managed Worker Group - Worker Group - 1 #---------------------------------------------------------# self_mg_4 = { node_group_name = "self-managed-ondemand" # Name is used to create a dedicated IAM role for each node group and adds to AWS-AUTH config map create_launch_template = true launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket or windows custom_ami_id = "ami-0dfaa019a300f219c" # Bring your own custom AMI generated by Packer/ImageBuilder/Puppet etc. public_ip = false # Enable only for public subnets pre_userdata = <<-EOT yum install -y amazon-ssm-agent \ systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \ EOT disk_size = 20 instance_type = "m5.large" desired_size = 2 max_size = 10 min_size = 2 capacity_type = "" # Optional Use this only for SPOT capacity as capacity_type = "spot" k8s_labels = { Environment = "preprod" Zone = "test" WorkerType = "SELF_MANAGED_ON_DEMAND" } additional_tags = { ExtraTag = "m5x-on-demand" Name = "m5x-on-demand" subnet_type = "private" } subnet_ids = module.aws_vpc.private_subnets create_worker_security_group = false # Creates a dedicated sec group for this Node Group }, } # END OF SELF MANAGED NODE GROUPS }