[dev] # Set to "huggingface", for example, if you are a huggingface developer. Default is "" partner_developer = "" # Please only set it to true if you are preparing an EI related PR # Do remember to revert it back to false before merging any PR (including EI dedicated PR) ei_mode = false # Please only set it to true if you are preparing a NEURON related PR # Do remember to revert it back to false before merging any PR (including NEURON dedicated PR) neuron_mode = false # Please only set it to true if you are preparing a NEURONX related PR # Do remember to revert it back to false before merging any PR (including NEURONX dedicated PR) neuronx_mode = false # Please only set it to true if you are preparing a GRAVITON related PR # Do remember to revert it back to false before merging any PR (including GRAVITON dedicated PR) graviton_mode = false # Please only set it to True if you are preparing a HABANA related PR # Do remember to revert it back to False before merging any PR (including HABANA dedicated PR) habana_mode = false # Please only set it to True if you are preparing a HUGGINGFACE TRCOMP related PR # Do remember to revert it back to False before merging any PR (including HUGGINGFACE TRCOMP dedicated PR) # This mode is used to build TF 2.6 and PT1.11 DLC huggingface_trcomp_mode = false # Please only set it to True if you are preparing a TRCOMP related PR # Do remember to revert it back to False before merging any PR (including TRCOMP dedicated PR) # This mode is used to build PT1.12 and above DLC trcomp_mode = false # Please only set it to True if you are preparing a Benchmark related PR # Do remember to revert it back to False before merging any PR (including Benchmark dedicated PR) benchmark_mode = false [build] # Add in frameworks you would like to build. By default, builds are disabled unless you specify building an image. # available frameworks - ["autogluon", "huggingface_tensorflow", "huggingface_pytorch", "huggingface_tensorflow_trcomp", "huggingface_pytorch_trcomp", "pytorch_trcomp", "tensorflow", "mxnet", "pytorch", "stabilityai_pytorch"] build_frameworks = [] # By default we build both training and inference containers. Set true/false values to determine which to build. build_training = true build_inference = true # Set to false in order to remove datetime tag on PR builds datetime_tag = true # Note: Need to build the images at least once with datetime_tag = false # before disabling new builds, or tests will fail do_build = true [test] ### On by default sanity_tests = true safety_check_test = false ecr_scan_allowlist_feature = false ecs_tests = true eks_tests = true ec2_tests = true ### Set ec2_efa_tests = true to be able to run any EC2 tests on any instance type that uses EFA-capable instances by ### default. If false, these types of tests will be skipped while other tests will run as usual. ### EC2 EFA tests are run in EC2 test jobs, so ec2_tests must be true if ec2_efa_tests is true. ### Off by default (set to false) ec2_efa_tests = false ### SM specific tests ### Off by default sagemaker_local_tests = false # SM remote test valid values: # "off" --> do not trigger sagemaker remote tests (default) # "standard" --> run standard sagemaker remote tests from test/sagemaker_tests # "rc" --> run release_candidate_integration tests # "efa" --> run efa sagemaker tests sagemaker_remote_tests = "off" # SM remote EFA test instance type sagemaker_remote_efa_instance_type = "" # Run CI tests for nightly images # false by default nightly_pr_test_mode = false use_scheduler = false [buildspec_override] # Assign the path to the required buildspec file from the deep-learning-containers folder # For example: # dlc-pr-tensorflow-2-habana-training = "habana/tensorflow/training/buildspec-2-10.yml" # dlc-pr-pytorch-inference = "pytorch/inference/buildspec-1-12.yml" # Setting the buildspec file path to "" allows the image builder to choose the default buildspec file. ### TRAINING PR JOBS ### # Standard Framework Training dlc-pr-mxnet-training = "" dlc-pr-pytorch-training = "" dlc-pr-tensorflow-2-training = "" dlc-pr-autogluon-training = "" # HuggingFace Training dlc-pr-huggingface-tensorflow-training = "" dlc-pr-huggingface-pytorch-training = "" # Training Compiler dlc-pr-huggingface-pytorch-trcomp-training = "" dlc-pr-huggingface-tensorflow-2-trcomp-training = "" dlc-pr-pytorch-trcomp-training = "" # Neuron Training dlc-pr-mxnet-neuron-training = "" dlc-pr-pytorch-neuron-training = "" dlc-pr-tensorflow-2-neuron-training = "" # Stability AI Training dlc-pr-stabilityai-pytorch-training = "" # Habana Training dlc-pr-pytorch-habana-training = "" dlc-pr-tensorflow-2-habana-training = "" ### INFERENCE PR JOBS ### # Standard Framework Inference dlc-pr-mxnet-inference = "" dlc-pr-pytorch-inference = "" dlc-pr-tensorflow-2-inference = "" dlc-pr-autogluon-inference = "" # Neuron Inference dlc-pr-mxnet-neuron-inference = "" dlc-pr-pytorch-neuron-inference = "" dlc-pr-tensorflow-1-neuron-inference = "" dlc-pr-tensorflow-2-neuron-inference = "" # HuggingFace Inference dlc-pr-huggingface-tensorflow-inference = "" dlc-pr-huggingface-pytorch-inference = "" dlc-pr-huggingface-pytorch-neuron-inference = "" # Stability AI Inference dlc-pr-stabilityai-pytorch-inference = "" # Graviton Inference dlc-pr-mxnet-graviton-inference = "" dlc-pr-pytorch-graviton-inference = "" dlc-pr-tensorflow-2-graviton-inference = "" # EIA Inference dlc-pr-mxnet-eia-inference = "" dlc-pr-pytorch-eia-inference = "" dlc-pr-tensorflow-2-eia-inference = ""