#!/bin/bash if [ -z "$EKS_CLUSTER_NAME" ]; then echo "Error: The EKS_CLUSTER_NAME environment variable must be set. Please run 'use-cluster '" exit 1 fi module=$1 repository_path="/eks-workshop/repository" manifests_path="/eks-workshop/manifests" base_path="$manifests_path/base-application" set -Eeuo pipefail mkdir -p /eks-workshop rm -f /home/ec2-user/environment/eks-workshop rm -rf $manifests_path REPOSITORY_REF=${REPOSITORY_REF:-""} if [ ! -z "${REPOSITORY_REF}" ]; then rm -rf $repository_path echo "Refreshing copy of workshop repository from GitHub..." git clone --quiet https://github.com/$REPOSITORY_OWNER/$REPOSITORY_NAME.git $repository_path > /dev/null (cd $repository_path && git checkout --quiet "${REPOSITORY_REF}" > /dev/null) echo "" cp -R $repository_path/manifests $manifests_path elif [ -d "/manifests" ]; then cp -R /manifests $manifests_path fi ln -s $manifests_path /home/ec2-user/environment/eks-workshop if [ ! -z "$module" ]; then if [ $module = "introduction/getting-started" ]; then exit fi fi echo "Resetting the environment, please wait" if [ -f "/eks-workshop/hooks/cleanup.sh" ]; then bash /eks-workshop/hooks/cleanup.sh fi kubectl delete pod load-generator --ignore-not-found > /dev/null kubectl apply -k $base_path --prune --all \ --prune-whitelist=autoscaling/v1/HorizontalPodAutoscaler \ --prune-whitelist=core/v1/Service \ --prune-whitelist=core/v1/ConfigMap \ --prune-whitelist=apps/v1/Deployment \ --prune-whitelist=apps/v1/StatefulSet \ --prune-whitelist=core/v1/ServiceAccount \ --prune-whitelist=core/v1/Secret \ --prune-whitelist=core/v1/PersistentVolumeClaim \ --prune-whitelist=scheduling.k8s.io/v1/PriorityClass \ --prune-whitelist=networking.k8s.io/v1/Ingress > /dev/null crossplane_crd=$(kubectl get crds | grep relationaldatabases.awsblueprints.io || [[ $? == 1 ]]) if [ ! -z "$crossplane_crd" ]; then kubectl delete relationaldatabases.awsblueprints.io -A --all > /dev/null fi echo "Waiting for application to become ready..." sleep 10 kubectl wait --for=condition=available --timeout=240s deployments -l app.kubernetes.io/created-by=eks-workshop -A > /dev/null kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A > /dev/null # Addons mkdir -p /eks-workshop/terraform cp $manifests_path/.workshop/terraform/base.tf /eks-workshop/terraform export TF_VAR_eks_cluster_id="$EKS_CLUSTER_NAME" RESOURCES_PRECREATED=${RESOURCES_PRECREATED:-""} echo "Cleaning up previous lab infrastructure..." tf_dir=$(realpath --relative-to='$PWD' '/eks-workshop/terraform') terraform -chdir="$tf_dir" init -upgrade > /tmp/terraform-destroy-init.log terraform -chdir="$tf_dir" destroy --auto-approve > /tmp/terraform-destroy.log rm -rf /eks-workshop/terraform/addon*.tf rm -rf /eks-workshop/hooks if [ ! -z "$module" ]; then module_path="$manifests_path/modules/$module" if [ -f "$module_path/.workshop/cleanup.sh" ]; then mkdir -p /eks-workshop/hooks cp "$module_path/.workshop/cleanup.sh" /eks-workshop/hooks fi if [ -f "$module_path/.workshop/terraform/addon.tf" ]; then echo "Creating infrastructure for next lab..." cp -R $module_path/.workshop/terraform/* /eks-workshop/terraform if [ "$RESOURCES_PRECREATED" = "true" ]; then rm -f /eks-workshop/terraform/addon_infrastructure.tf fi terraform -chdir="$tf_dir" init -upgrade > /tmp/terraform-apply-init.log terraform -chdir="$tf_dir" apply -refresh=false --auto-approve > /tmp/terraform-apply.log fi if [ -d "$module_path/.workshop/manifests" ]; then kubectl apply -k "$module_path/.workshop/manifests" > /dev/null fi fi terraform -chdir="$tf_dir" output -json | jq -r '.environment.value | select(. != null)' > ~/.bashrc.d/workshop-env.bash # Node groups expected_size_config="$EKS_DEFAULT_MNG_MIN $EKS_DEFAULT_MNG_MAX $EKS_DEFAULT_MNG_DESIRED" mng_size_config=$(aws eks describe-nodegroup --cluster-name $EKS_CLUSTER_NAME --nodegroup-name $EKS_DEFAULT_MNG_NAME | jq -r '.nodegroup.scalingConfig | "\(.minSize) \(.maxSize) \(.desiredSize)"') if [[ "$mng_size_config" != "$expected_size_config" ]]; then echo "Setting EKS Node Group back to initial sizing..." aws eks update-nodegroup-config --cluster-name $EKS_CLUSTER_NAME --nodegroup-name $EKS_DEFAULT_MNG_NAME \ --scaling-config desiredSize=$EKS_DEFAULT_MNG_DESIRED,minSize=$EKS_DEFAULT_MNG_MIN,maxSize=$EKS_DEFAULT_MNG_MAX > /dev/null aws eks wait nodegroup-active --cluster-name $EKS_CLUSTER_NAME --nodegroup-name $EKS_DEFAULT_MNG_NAME sleep 10 fi asg_size_config=$(aws autoscaling describe-auto-scaling-groups --filters "Name=tag:eks:nodegroup-name,Values=$EKS_DEFAULT_MNG_NAME" | jq -r '.AutoScalingGroups[0] | "\(.MinSize) \(.MaxSize) \(.DesiredCapacity)"') if [[ "$asg_size_config" != "$expected_size_config" ]]; then echo "Setting ASG back to initial sizing..." export ASG_NAME=$(aws autoscaling describe-auto-scaling-groups --filters "Name=tag:eks:nodegroup-name,Values=$EKS_DEFAULT_MNG_NAME" --query "AutoScalingGroups[0].AutoScalingGroupName" --output text) aws autoscaling update-auto-scaling-group \ --auto-scaling-group-name $ASG_NAME \ --min-size $EKS_DEFAULT_MNG_MIN \ --max-size $EKS_DEFAULT_MNG_MAX \ --desired-capacity $EKS_DEFAULT_MNG_DESIRED fi EXIT_CODE=0 timeout -s TERM 300 bash -c \ 'while [[ $(kubectl get nodes -l workshop-default=yes -o json | jq -r ".items | length") -gt 3 ]];\ do sleep 30;\ done' || EXIT_CODE=$? if [ $EXIT_CODE -ne 0 ]; then >&2 echo "Error: Nodes did not scale back to 3" exit 1 fi # Recycle workload pods in case stateful pods got restarted kubectl delete pod -l app.kubernetes.io/created-by=eks-workshop -l app.kubernetes.io/component=service -A > /dev/null kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A > /dev/null # Finished echo 'Environment is ready'