# ----------------------------------------------------------- # An CloudFormation template deploys this AWS Lambda function. The function, once # invoked, captures CloudWatch metrics that will then be enhanced, transformed, and # used for IOPS calculations # ----------------------------------------------------------- import json import boto3 from datetime import datetime from datetime import timedelta import random import string import os ec2 = boto3.client('ec2') def lambda_handler(event, context): response = ec2.describe_regions() for region in response['Regions']: print('Region name:', region['RegionName']) regionName = region ec2Resource = boto3.resource('ec2', region_name=region['RegionName']) volumes = ec2Resource.volumes.all() for volume in volumes: vol = ec2Resource.Volume(str(volume.id)) if vol.volume_type == "io1" or vol.volume_type == "io2": print ("Created(" + str(vol.create_time) + "),AZ(" + str(vol.availability_zone) + "),VolumeID(" + str(vol.volume_id) + "),VolumeType(" + str(vol.volume_type) + "),State(" + str(vol.state) + "),Size(" + str(vol.size) + "),IOPS(" + str(vol.iops) + "),IsEncrypted(" + str(vol.encrypted) + "),SnapshotID(" + str(vol.snapshot_id) + "),KMS_KEYID(" + str(vol.kms_key_id) + ")") volumeReadOpsResults = getMetricForVolume(vol.volume_id, 'VolumeReadOps', region['RegionName']) volumeWriteOpsResults = getMetricForVolume(vol.volume_id, 'VolumeWriteOps', region['RegionName']) writeFileToS3(volumeReadOpsResults, volumeWriteOpsResults, region['RegionName'], vol.volume_id, vol.iops) return { 'statusCode': 200, 'body': json.dumps('EBS Aanlyzer complete!') } # CloudWatch metrics can be queried in two different ways, GetMetricData and # GetMetricStatistics. The solution uses GetMetricData API as you can retrieve data # faster and the returned data supports metric math and ordering and pagination. You # can read more about choosing the right approach for your use case here: # https://aws.amazon.com/premiumsupport/knowledge-center/cloudwatch-getmetricdata-api/ def getMetricForVolume(volumeId, metricName, region_name): print("getMetricForVolume "+volumeId+" "+metricName) cloudwatch = boto3.client('cloudwatch', region_name) seconds_in_one_day = 86400 # used for granularity response = cloudwatch.get_metric_data( MetricDataQueries=[ { 'Id': volumeId.replace("-",""), 'MetricStat': { 'Metric': { 'Namespace': 'AWS/EBS', 'MetricName': metricName, 'Dimensions': [ { 'Name': 'VolumeId', 'Value': volumeId }, ] }, 'Period': 60, 'Stat': 'Sum', 'Unit': 'Count' }# , # 'Expression': 'string', # 'Label': 'string', # 'ReturnData': True|False, # 'Period': 123 }, ], # CloudWatch metrics cpatured for 7 days. Can edit to go beyond 7 days StartTime=datetime.now() - timedelta(days=7), EndTime=datetime.now() #, # NextToken='string', # ScanBy='TimestampDescending'|'TimestampAscending', # MaxDatapoints=123 ) return response # The function generates a unique CSV file in Amazon S3 containing metrics for the last 7 # days for each of the io1 and io2 volumes. With # minimal development effort, the script can be changed to cover a longer timespan or # capture other metrics. def writeFileToS3(readResults, writeResults, regionName, volumeId, volumeIops): print("readStatusCode:" + readResults['MetricDataResults'][0]['StatusCode']) print("writeStatusCode:" + readResults['MetricDataResults'][0]['StatusCode']) if readResults['MetricDataResults'][0]['StatusCode'] == "Complete" and writeResults['MetricDataResults'][0]['StatusCode'] == "Complete": print("writing to file "+volumeId) bucketName = os.environ['resultsBucketName'] fileObjKeyName = volumeId+".csv" lambdaPath = "/tmp/" + fileObjKeyName + "-".join(random.choices(string.ascii_uppercase + string.digits, k = 6)) f = open(lambdaPath, "x") f.write("region, volume id,read date,read sum,write date,write sum,total,total per second,provisioned iops,utilization\n"); readTimestamps = readResults['MetricDataResults'][0]['Timestamps'] readValues = readResults['MetricDataResults'][0]['Values'] writeTimestamps = writeResults['MetricDataResults'][0]['Timestamps'] writeValues = writeResults['MetricDataResults'][0]['Values'] print("READ LEN = "+str(len(readTimestamps))) print("WRITE LEN = "+str(len(writeTimestamps))) #sometimes lists length is not matching so need some adjustments readLen = len(readTimestamps) writeLen = len(writeTimestamps) if readLen < writeLen: for i in range(writeLen - readLen): writeTimestamps.pop(0) if readLen > writeLen: for i in range(readLen - writeLen): readTimestamps.pop(readLen-1) print("READ LEN AFTER MOD = "+str(len(readTimestamps))) print("WRITE LEN AFTER MOD = "+str(len(writeTimestamps))) for i in range(len(readTimestamps)): readTimestampStr = readTimestamps[i].strftime("%m/%d/%Y %H:%M:%S") readValue = readValues[i] writeTimestampStr = writeTimestamps[i].strftime("%m/%d/%Y %H:%M:%S") writeValue = writeValues[i] if readTimestamps[i].timestamp() == writeTimestamps[i].timestamp(): totalIops = readValue + writeValue totalPerSecond = 0 if totalIops > 0: totalPerSecond = (readValue + writeValue) / 60 utilization = (totalPerSecond * 100) / volumeIops f.write(regionName) f.write(",") f.write(volumeId) f.write(",") f.write(readTimestampStr) f.write(",") f.write(str(readValue)) f.write(",") f.write(writeTimestampStr) f.write(",") f.write(str(writeValue)) f.write(",") f.write(str(totalIops)) f.write(",") f.write(str(totalPerSecond)) f.write(",") f.write(str(volumeIops)) f.write(",") f.write(str(utilization)) f.write("\n") f.close() s3 = boto3.resource("s3") s3.meta.client.upload_file(lambdaPath, bucketName, fileObjKeyName)