apiVersion: eksctl.io/v1alpha5 kind: ClusterConfig metadata: name: {{cookiecutter.cluster_name}} region: {{cookiecutter.region}} vpc: # Existing Amazon VPC clusterEndpoints: publicAccess: true privateAccess: true id: "{{cookiecutter.vpc_id}}" cidr: "{{cookiecutter.vpc_cidr}}" subnets: # Definir todas as Subnets para que seus Masters possam estar alocados. private: # TODO: suporte para mais regiões {{cookiecutter.availability_zone_1}}: id: "{{cookiecutter.subnet_priv_1a}}" cidr: "{{cookiecutter.subnet_priv_1a_cidr}}" {{cookiecutter.availability_zone_2}}: id: "{{cookiecutter.subnet_priv_1b}}" cidr: "{{cookiecutter.subnet_priv_1b_cidr}}" public: {{cookiecutter.availability_zone_1}}: id: "{{cookiecutter.subnet_pub_1a}}" cidr: "{{cookiecutter.subnet_pub_1a_cidr}}" {{cookiecutter.availability_zone_2}}: id: "{{cookiecutter.subnet_pub_1b}}" cidr: "{{cookiecutter.subnet_pub_1b_cidr}}" iam: serviceRoleARN: "{{cookiecutter.eks_service_role}}" # Example with managed node group for Amazon EKS managedNodeGroups: - name: app-node-group instanceType: t2.medium minSize: 3 desiredCapacity: 3 maxSize: 10 availabilityZones: ["{{cookiecutter.availability_zone_1}}", "{{cookiecutter.availability_zone_2}}"] volumeSize: 20 privateNetworking: true # Provisionar as instâncias dos Nodes somente em subnets privadas ssh: allow: false # Se voce quiser habilitar SSH passe a chave publica que sera enviada para o servidor # publicKeyPath: ~/.ssh/ec2_id_rsa.pub # sourceSecurityGroupIds: [""] labels: {role: worker} # Labels que vão ser aplicadas a nivel do K8s tags: # Tags que serão aplicadas nas Ec2 nodegroup-role: worker k8s.io/cluster-autoscaler/{{cookiecutter.cluster_name}}: owned k8s.io/cluster-autoscaler/enabled: "true" # Não pode ser booleano Name: {{cookiecutter.cluster_name}}-nodes # Você pode definir uma Role ou escolher que o eksctl crie as Policies para você iam: attachPolicyARNs: - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy # instanceRoleARN: "arn:aws:iam::00000:role/NodeInstanceRole" # Se voce quiser utilizar um Role ja existente withAddonPolicies: # Adiciona policies na role dos Nodes para controllers adicionais do cluster imageBuilder: false autoScaler: true externalDNS: true certManager: true appMesh: true ebs: true fsx: true efs: true albIngress: true xRay: true cloudWatch: true cloudWatch: # Configuração do export dos logs do ControlPlane para o Cloudtwatch clusterLogging: # enable specific types of cluster control plane logs enableTypes: ["audit", "authenticator", "controllerManager"] # all supported types: "api", "audit", "authenticator", "controllerManager", "scheduler" # supported special values: "*" and "all" # Those are old node groups, that are just ASG with specifc AMI for registration under K8S # nodeGroups: # - name: ng-1 # instanceType: m5.large # desiredCapacity: 3 # iam: # instanceProfileARN: "arn:aws:iam::11111:instance-profile/eks-nodes-base-role" # instanceRoleARN: "arn:aws:iam::1111:role/eks-nodes-base-role" # privateNetworking: true # securityGroups: # withShared: true # withLocal: true # attachIDs: ['sg-11111', 'sg-11112'] # ssh: # publicKeyName: 'my-instance-key' # tags: # 'environment:basedomain': 'example.org'