# Optional - Custom identifier for this replay run
tag: ""

# Directory location of extracted workload, relative to current directory
workload_location: ""

# Endpoint and username of target cluster to replay queries on
target_cluster_endpoint: ""
target_cluster_region: ""
master_username: ""

# NLB or NAT endpoint for Simple Replay to connect to. This NLB or NAT should have connectivity to target_cluster_endpoint
nlb_nat_dns: ""

# Required only for playback using ODBC (pyodbc)
odbc_driver: ""

# If original driver isn't supported (e.g. JDBC), use this driver. "psql" or
# "odbc" are the only valid values.
default_interface: "psql"

# Optional - Leaving it empty defers to connections.json. "all on" preserves
# time between transactions. "all off" disregards time between transactions,
# executing them as a batch.
time_interval_between_transactions: ""

# Optional - Leaving it empty defers to connections.json. "all on" preserves
# time between queries. "all off" disregards time between queries, executing
# them as a batch.
time_interval_between_queries: ""

# Should COPY statements be executed?
execute_copy_statements: "false"

# Should UNLOAD statements be executed?
execute_unload_statements: "false"

# Optional - Where the UNLOADs and system table unload goes.
replay_output: ""

# Optional - Where the analysis data and summary report will be uploaded.  Example:  s3://bucket_name/path
analysis_output: ""

# Optional - Leaving this blank means UNLOADs will not be replayed. IAM role for UNLOADs to be performed with.
unload_iam_role: ""

# Optional - Leaving this blank means analysis will not be run. IAM role for analysis needs UNLOAD access.
analysis_iam_role: ""

# Location of the SQL file containing queries to unload system tables
unload_system_table_queries: "core/replay/unload_system_tables.sql"

# IAM role to UNLOAD system tables from source cluster to S3 location for later
# analysis
target_cluster_system_table_unload_iam_role: ""

# Include filters will work as "db AND user AND pid". Exclude filters will work as "db OR user OR pid".
# In case of multiple values for any specific filter, please enclose each in single quotes
filters:
  include:
    database_name: ['*']
    username: ['*']
    pid: ['*']
  exclude:
    database_name: []
    username: []
    pid: []

##
## The settings below probably don't need to be modified for a typical run
##

# Set the amount of logging
log_level: "INFO"

# number of proceses to use to parallelize the work. If omitted or null, uses
# one process per cpu - 1 
num_workers: ~

# output warnings if connections are not within this number of seconds from
# their expected time.
connection_tolerance_sec: 300

# Number of TestDrive logfiles to maintain
backup_count: 1

# Should we discard the returned data 
drop_return: true

# Should connections in the replay be throttled
limit_concurrent_connections: ~

# Should multistatement SQL be split
split_multi: true

# In case of Serverless, set up a secret to store admin username and password. Specify the name of the secret below
# Note: This admin username maps to the username specified as `master_username` in this file.  This will be updated to `admin_username` in a future release.
secret_name: ""