provider "aws" { region = local.region } provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) exec { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" # This requires the awscli to be installed locally where Terraform is executed args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) exec { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" # This requires the awscli to be installed locally where Terraform is executed args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] } } } data "aws_caller_identity" "current" {} data "aws_availability_zones" "available" {} locals { name = basename(path.cwd) region = "us-west-2" vpc_cidr = "10.0.0.0/16" azs = slice(data.aws_availability_zones.available.names, 0, 3) # To ensure name is consistent between whats created and the user data script second_volume_name = "/dev/xvdb" tags = { Blueprint = local.name GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } velero_s3_backup_location = "${module.velero_backup_s3_bucket.s3_bucket_arn}/backups" } ################################################################################ # Cluster ################################################################################ #tfsec:ignore:aws-eks-enable-control-plane-logging module "eks" { source = "terraform-aws-modules/eks/aws" version = "~> 19.13" cluster_name = local.name cluster_version = "1.27" cluster_endpoint_public_access = true vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets eks_managed_node_group_defaults = { iam_role_additional_policies = { # Not required, but used in the example to access the nodes to inspect mounted volumes AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" } } eks_managed_node_groups = { multi-volume = { instance_types = ["c5.large"] min_size = 1 max_size = 3 desired_size = 1 block_device_mappings = { # Root volume xvda = { device_name = "/dev/xvda" ebs = { volume_type = "gp3" encrypted = true kms_key_id = module.ebs_kms_key.key_arn delete_on_termination = true } } xvdb = { # This will be used for containerd's data directory device_name = local.second_volume_name ebs = { volume_size = 24 volume_type = "gp3" iops = 3000 encrypted = true kms_key_id = module.ebs_kms_key.key_arn delete_on_termination = true } } } # This user data mounts the containerd directories to the second EBS volume which # is dedicated to just contianerd. You can read more about the practice and why # here https://aws.github.io/aws-eks-best-practices/scalability/docs/data-plane/#use-multiple-ebs-volumes-for-containers # and https://github.com/containerd/containerd/blob/main/docs/ops.md#base-configuration pre_bootstrap_user_data = <<-EOT # Wait for second volume to attach before trying to mount paths TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") EC2_INSTANCE_ID=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -v http://169.254.169.254/latest/meta-data/instance-id) DATA_STATE="unknown" until [ "$${DATA_STATE}" == "attached" ]; do DATA_STATE=$(aws ec2 describe-volumes \ --region ${local.region} \ --filters \ Name=attachment.instance-id,Values=$${EC2_INSTANCE_ID} \ Name=attachment.device,Values=${local.second_volume_name} \ --query Volumes[].Attachments[].State \ --output text) sleep 5 done # Mount the containerd directories to the 2nd volume SECOND_VOL=$(lsblk -o NAME,TYPE -d | awk '/disk/ {print $1}' | sed -n '2 p') systemctl stop containerd mkfs -t ext4 /dev/$${SECOND_VOL} rm -rf /var/lib/containerd/* rm -rf /run/containerd/* mount /dev/$${SECOND_VOL} /var/lib/containerd/ mount /dev/$${SECOND_VOL} /run/containerd/ systemctl start containerd EOT } instance-store = { instance_types = ["m5ad.large"] min_size = 1 max_size = 3 desired_size = 1 block_device_mappings = { # Root volume xvda = { device_name = "/dev/xvda" ebs = { volume_size = 24 volume_type = "gp3" iops = 3000 encrypted = true kms_key_id = module.ebs_kms_key.key_arn delete_on_termination = true } } } # The virtual device name (ephemeralN). Instance store volumes are numbered # starting from 0. An instance type with 2 available instance store volumes # can specify mappings for ephemeral0 and ephemeral1. The number of available # instance store volumes depends on the instance type. After you connect to # the instance, you must mount the volume - here, we are using user data to automatically # mount the volume(s) during instance creation. # # NVMe instance store volumes are automatically enumerated and assigned a device # name. Including them in your block device mapping has no effect. pre_bootstrap_user_data = <<-EOT IDX=1 DEVICES=$(lsblk -o NAME,TYPE -dsn | awk '/disk/ {print $1}') for DEV in $DEVICES do mkfs.xfs /dev/$${DEV} mkdir -p /local$${IDX} echo /dev/$${DEV} /local$${IDX} xfs defaults,noatime 1 2 >> /etc/fstab IDX=$(($${IDX} + 1)) done mount -a EOT } } tags = local.tags } ################################################################################ # EKS Blueprints Addons ################################################################################ module "eks_blueprints_addons" { source = "aws-ia/eks-blueprints-addons/aws" version = "~> 1.0" cluster_name = module.eks.cluster_name cluster_endpoint = module.eks.cluster_endpoint cluster_version = module.eks.cluster_version oidc_provider_arn = module.eks.oidc_provider_arn create_delay_dependencies = [for group in module.eks.eks_managed_node_groups : group.node_group_arn] eks_addons = { aws-ebs-csi-driver = { service_account_role_arn = module.ebs_csi_driver_irsa.iam_role_arn } coredns = {} vpc-cni = {} kube-proxy = {} } enable_velero = true # An S3 Bucket ARN is required. This can be declared with or without a Prefix. velero = { s3_backup_location = local.velero_s3_backup_location } enable_aws_efs_csi_driver = true tags = local.tags } ################################################################################ # Storage Classes ################################################################################ resource "kubernetes_annotations" "gp2" { api_version = "storage.k8s.io/v1" kind = "StorageClass" # This is true because the resources was already created by the ebs-csi-driver addon force = "true" metadata { name = "gp2" } annotations = { # Modify annotations to remove gp2 as default storage class still retain the class "storageclass.kubernetes.io/is-default-class" = "false" } depends_on = [ module.eks_blueprints_addons ] } resource "kubernetes_storage_class_v1" "gp3" { metadata { name = "gp3" annotations = { # Annotation to set gp3 as default storage class "storageclass.kubernetes.io/is-default-class" = "true" } } storage_provisioner = "ebs.csi.aws.com" allow_volume_expansion = true reclaim_policy = "Delete" volume_binding_mode = "WaitForFirstConsumer" parameters = { encrypted = true fsType = "ext4" type = "gp3" } depends_on = [ module.eks_blueprints_addons ] } resource "kubernetes_storage_class_v1" "efs" { metadata { name = "efs" } storage_provisioner = "efs.csi.aws.com" parameters = { provisioningMode = "efs-ap" # Dynamic provisioning fileSystemId = module.efs.id directoryPerms = "700" } mount_options = [ "iam" ] depends_on = [ module.eks_blueprints_addons ] } ################################################################################ # Supporting Resources ################################################################################ module "vpc" { source = "terraform-aws-modules/vpc/aws" version = "~> 5.0" name = local.name cidr = local.vpc_cidr azs = local.azs private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] enable_nat_gateway = true single_nat_gateway = true public_subnet_tags = { "kubernetes.io/role/elb" = 1 } private_subnet_tags = { "kubernetes.io/role/internal-elb" = 1 } tags = local.tags } #tfsec:ignore:* module "velero_backup_s3_bucket" { source = "terraform-aws-modules/s3-bucket/aws" version = "~> 3.0" bucket_prefix = "${local.name}-" # Allow deletion of non-empty bucket # NOTE: This is enabled for example usage only, you should not enable this for production workloads force_destroy = true attach_deny_insecure_transport_policy = true attach_require_latest_tls_policy = true acl = "private" block_public_acls = true block_public_policy = true ignore_public_acls = true restrict_public_buckets = true control_object_ownership = true object_ownership = "BucketOwnerPreferred" versioning = { status = true mfa_delete = false } server_side_encryption_configuration = { rule = { apply_server_side_encryption_by_default = { sse_algorithm = "AES256" } } } tags = local.tags } module "efs" { source = "terraform-aws-modules/efs/aws" version = "~> 1.1" creation_token = local.name name = local.name # Mount targets / security group mount_targets = { for k, v in zipmap(local.azs, module.vpc.private_subnets) : k => { subnet_id = v } } security_group_description = "${local.name} EFS security group" security_group_vpc_id = module.vpc.vpc_id security_group_rules = { vpc = { # relying on the defaults provdied for EFS/NFS (2049/TCP + ingress) description = "NFS ingress from VPC private subnets" cidr_blocks = module.vpc.private_subnets_cidr_blocks } } tags = local.tags } module "ebs_kms_key" { source = "terraform-aws-modules/kms/aws" version = "~> 1.5" description = "Customer managed key to encrypt EKS managed node group volumes" # Policy key_administrators = [data.aws_caller_identity.current.arn] key_service_roles_for_autoscaling = [ # required for the ASG to manage encrypted volumes for nodes "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the cluster / persistentvolume-controller to create encrypted PVCs module.eks.cluster_iam_role_arn, ] # Aliases aliases = ["eks/${local.name}/ebs"] tags = local.tags } module "ebs_csi_driver_irsa" { source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" version = "~> 5.20" role_name_prefix = "${module.eks.cluster_name}-ebs-csi-driver-" attach_ebs_csi_policy = true oidc_providers = { main = { provider_arn = module.eks.oidc_provider_arn namespace_service_accounts = ["kube-system:ebs-csi-controller-sa"] } } tags = local.tags }