# Required. Where to save the extracted workload. Either S3 location or local directory. workload_location: "s3://mybucketname/myworkload" # Optional. Providing this enables automatic log retrieval from S3 and system # table information retrieval (which allows query start and end times to be # extracted, rather than just record times) source_cluster_endpoint: "" # Required only if source_cluster_endpoint is given. master_username: "awsuser" #Required for generating copy_replacements and if log location for cloudwatch logs is specified region: "" # Required. Start and end time of the workload to be extracted, e.g. 2020-06-14T21:41:16+00:00 start_time: "" end_time: "" #Replacement s3 location and IAM roles for copy files replacement_copy_location: "" replacement_iam_location: "" # Required only if extraction using ODBC is preferred and installed. Otherwise, Python driver is used. odbc_driver: "" # Leave blank to automatically retrieve audit logs from the source cluster. # You can specify a local location or S3 location to load the audit logs from # another location. log_location: "" # Location of the SQL file containing queries to unload system tables unload_system_table_queries: "core/replay/unload_system_tables.sql" # Should be a S3 location. If unspecified, system tables will not be unloaded source_cluster_system_table_unload_location: "" # If an IAM role is provided, UNLOAD will occur. If this is blank, UNLOAD of system tables will not occur. source_cluster_system_table_unload_iam_role: "" ## ## The settings below probably don't need to be modified for a typical run ## # Set the amount of logging log_level: info # Number of logfiles to maintain backup_count: 1 #Provide the schemas list for spectrum to avoid modification during Replay in format ['schema_name'] external_schemas: