''' Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Modified by Suman Koduri(skkoduri@amazon.com) ''' # take_snapshots # This lambda function takes a snapshot of RDS or Aurora instances according to the environment variable INTERVAL # Set INTERVAL to the amount of hours between backups. # This function will list available manual snapshots and only trigger a new one if the latest is older than INTERVAL hours import boto3 from datetime import datetime import time import os import logging import re from take_snapshots_utils import * # Initialize everything LOGLEVEL = os.getenv('LOG_LEVEL', 'ERROR').strip() BACKUP_INTERVAL = int(os.getenv('INTERVAL', '24')) if os.getenv('REGION_OVERRIDE', 'NO') != 'NO': REGION = os.getenv('REGION_OVERRIDE').strip() else: REGION = os.getenv('AWS_DEFAULT_REGION') # Set default Bucket and Key if none are specified if 'S3BUCKET' in os.environ: BUCKET = os.getenv('S3BUCKET') else: raise RDSBackupToolException(log_message) if 'S3KEY' in os.environ: KEY = os.getenv('S3KEY') else: KEY = "rds_backups_list.txt" TIMESTAMP_FORMAT = '%Y-%m-%d-%H-%M' logger = logging.getLogger() logger.setLevel(LOGLEVEL.upper()) def lambda_handler(event, context): client = boto3.client('rds', region_name=REGION) s3 = boto3.client('s3') obj = s3.get_object(Bucket = BUCKET, Key = KEY) rds_instances = obj['Body'].read().decode('utf-8').split('\r\n') now = datetime.now() pending_backups = 0 # Filter RDS/Aurora instances filtered_rds = filter_rds(rds_instances, client) # Get list of filtered RDS and Aurora instances to back up filtered_instances = filtered_rds[0] filtered_clusters = filtered_rds[1] # Filter snapshots for RDS and Aurora filtered_snapshots_rds = get_own_snapshots_source_rds(filtered_instances, paginate_api_call(client, 'describe_db_snapshots', 'DBSnapshots')) filtered_snapshots_aurora = get_own_snapshots_source_aurora(filtered_clusters, paginate_api_call(client, 'describe_db_cluster_snapshots', 'DBClusterSnapshots')) for db_instance in filtered_instances: timestamp_format = now.strftime(TIMESTAMP_FORMAT) if requires_backup_rds(BACKUP_INTERVAL, db_instance, filtered_snapshots_rds): backup_age = get_latest_snapshot_ts_rds( db_instance, filtered_snapshots_rds) if backup_age is not None: logger.info('Backing up {}. Backed up {} minutes ago'.format( db_instance, ((now - backup_age).total_seconds() / 60))) else: logger.info('Backing up {}. No previous backup found'.format(db_instance)) snapshot_identifier = '{}-snapshot-{}'.format( db_instance, timestamp_format) try: response = client.create_db_snapshot( DBSnapshotIdentifier=snapshot_identifier, DBInstanceIdentifier=db_instance, Tags=[{'Key': 'CreatedBy', 'Value': 'RDS Backup Tool'}, { 'Key': 'CreatedOn', 'Value': timestamp_format}, {'Key': 'shareAndCopy', 'Value': 'YES'}] ) manageSnapshotRecord(action = 'add', snapshot_id = response['DBSnapshot']['DBSnapshotIdentifier'], rds_name = response['DBSnapshot']['DBInstanceIdentifier'], rds_engine = response['DBSnapshot']['Engine']) except Exception as err: pending_backups += 1 else: backup_age = get_latest_snapshot_ts_rds( db_instance, filtered_snapshots_rds) logger.info('Skipped {}. Does not require backup. Backed up {} minutes ago'.format( db_instance, (now - backup_age).total_seconds() / 60)) for db_cluster in filtered_clusters: timestamp_format = now.strftime(TIMESTAMP_FORMAT) if requires_backup_aurora(BACKUP_INTERVAL, db_cluster, filtered_snapshots_aurora): backup_age = get_latest_snapshot_ts_aurora( db_cluster, filtered_snapshots_aurora) if backup_age is not None: logger.info('Backing up {}. Backed up {} minutes ago'.format( db_cluster, ((now - backup_age).total_seconds() / 60))) else: logger.info('Backing up {}. No previous backup found'.format(db_cluster)) snapshot_identifier = '{}-snapshot-{}'.format( db_cluster, timestamp_format) try: response = client.create_db_cluster_snapshot( DBClusterSnapshotIdentifier=snapshot_identifier, DBClusterIdentifier=db_cluster, Tags=[{'Key': 'CreatedBy', 'Value': 'RDS Backup Tool'}, { 'Key': 'CreatedOn', 'Value': timestamp_format}, {'Key': 'shareAndCopy', 'Value': 'YES'}] ) manageSnapshotRecord(action = 'add', snapshot_id = response['DBClusterSnapshot']['DBClusterSnapshotIdentifier'], rds_name = response['DBClusterSnapshot']['DBClusterIdentifier'], rds_engine = response['DBClusterSnapshot']['Engine']) except Exception as err: pending_backups += 1 else: backup_age = get_latest_snapshot_ts_aurora( db_cluster, filtered_snapshots_aurora) logger.info('Skipped {}. Does not require backup. Backed up {} minutes ago'.format( db_cluster, (now - backup_age).total_seconds() / 60)) if pending_backups > 0: log_message = 'Could not back up every instance. Backups pending: {}'.format(pending_backups) logger.error(log_message) raise RDSBackupToolException(log_message) if __name__ == '__main__': lambda_handler(None, None)