/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: MIT-0 * * Permission is hereby granted, free of charge, to any person obtaining a copy of this * software and associated documentation files (the "Software"), to deal in the Software * without restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ terraform { required_version = ">= 1.0.1" required_providers { aws = { source = "hashicorp/aws" version = ">= 3.66.0" } kubernetes = { source = "hashicorp/kubernetes" version = ">= 2.6.1" } helm = { source = "hashicorp/helm" version = ">= 2.4.1" } } } provider "aws" { region = data.aws_region.current.id alias = "default" } terraform { backend "local" { path = "local_tf_state/terraform-main.tfstate" } } data "aws_region" "current" {} data "aws_availability_zones" "available" {} locals { tenant = "aws001" # AWS account name or unique id for tenant environment = "preprod" # Environment area eg., preprod or prod zone = "dev" # Environment with in one sub_tenant or business unit kubernetes_version = "1.21" vpc_cidr = "10.0.0.0/16" vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) terraform_version = "Terraform v1.0.1" } module "aws_vpc" { source = "terraform-aws-modules/vpc/aws" version = "v3.2.0" name = local.vpc_name cidr = local.vpc_cidr azs = data.aws_availability_zones.available.names public_subnets = [for k, v in slice(data.aws_availability_zones.available.names, 0, 3) : cidrsubnet(local.vpc_cidr, 8, k)] private_subnets = [for k, v in slice(data.aws_availability_zones.available.names, 0, 3) : cidrsubnet(local.vpc_cidr, 8, k + 10)] enable_nat_gateway = true create_igw = true enable_dns_hostnames = true single_nat_gateway = true public_subnet_tags = { "kubernetes.io/cluster/${local.cluster_name}" = "shared" "kubernetes.io/role/elb" = "1" } private_subnet_tags = { "kubernetes.io/cluster/${local.cluster_name}" = "shared" "kubernetes.io/role/internal-elb" = "1" } } #--------------------------------------------------------------- # Example to consume aws-eks-accelerator-for-terraform module #--------------------------------------------------------------- module "aws-eks-accelerator-for-terraform" { source = "github.com/aws-samples/aws-eks-accelerator-for-terraform" tenant = local.tenant environment = local.environment zone = local.zone terraform_version = local.terraform_version # EKS Cluster VPC and Subnet mandatory config vpc_id = module.aws_vpc.vpc_id private_subnet_ids = module.aws_vpc.private_subnets # EKS CONTROL PLANE VARIABLES create_eks = true kubernetes_version = local.kubernetes_version # EKS MANAGED NODE GROUPS managed_node_groups = { mg_4 = { # 1> Node Group configuration - Part1 node_group_name = "managed-ondemand" # Max 40 characters for node group name create_launch_template = true # false will use the default launch template launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket public_ip = false # Use this to enable public IP for EC2 instances; only for public subnets used in launch templates ; pre_userdata = <<-EOT yum install -y amazon-ssm-agent systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent" EOT # 2> Node Group scaling configuration desired_size = 3 max_size = 12 min_size = 3 max_unavailable = 1 # or percentage = 20 # 3> Node Group compute configuration ami_type = "AL2_x86_64" # AL2_x86_64, AL2_x86_64_GPU, AL2_ARM_64, CUSTOM capacity_type = "ON_DEMAND" # ON_DEMAND or SPOT instance_types = ["m4.large"] # List of instances used only for SPOT type disk_size = 50 # 4> Node Group network configuration subnet_ids = module.aws_vpc.public_subnets k8s_taints = [] k8s_labels = { Environment = "preprod" Zone = "dev" WorkerType = "ON_DEMAND" } additional_tags = { ExtraTag = "m5x-on-demand" Name = "m5x-on-demand" subnet_type = "public" } }, } } module "kubernetes-addons" { source = "github.com/aws-samples/aws-eks-accelerator-for-terraform//modules/kubernetes-addons" eks_cluster_id = module.aws-eks-accelerator-for-terraform.eks_cluster_id eks_worker_security_group_id = module.aws-eks-accelerator-for-terraform.worker_security_group_id #K8s Add-ons enable_metrics_server = true enable_cluster_autoscaler = true #--------------------------------------- # ENABLE AGONES #--------------------------------------- # NOTE: Agones requires a Node group in Public Subnets and enable Public IP enable_agones = true # Optional agones_helm_chart agones_helm_config = { name = "agones" chart = "agones" repository = "https://agones.dev/chart/stable" version = "1.15.0" namespace = "kube-system" gameserver_minport = 7000 # required for sec group changes to worker nodes gameserver_maxport = 8000 # required for sec group changes to worker nodes values = [templatefile("${path.module}/helm_values/agones-values.yaml", { expose_udp = true gameserver_namespaces = "{${join(",", ["default", "xbox-gameservers", "xbox-gameservers"])}}" gameserver_minport = 7000 gameserver_maxport = 8000 })] } }