ARG PYTHON=python3 ARG PYTHON_VERSION=3.10.8 ARG PYTHON_SHORT_VERSION=3.10 ARG MAMBA_VERSION=23.1.0-1 ARG PYTORCH_VERSION_EC2=2.0.1 ARG PYTORCH_VERSION_SM=2.0.0 # SMD binaries ARG SMD_DATA_PARALLEL_URL=https://smdataparallel.s3.amazonaws.com/binary/pytorch/2.0.0/cu118/2023-03-20/smdistributed_dataparallel-1.8.0-cp310-cp310-linux_x86_64.whl ARG SMD_MODEL_PARALLEL_URL=https://sagemaker-distributed-model-parallel.s3.us-west-2.amazonaws.com/pytorch-2.0.0/build-artifacts/2023-04-14-20-14/smdistributed_modelparallel-1.15.0-cp310-cp310-linux_x86_64.whl # ZeRO-2D Binary ARG ZERO_2D_URL=https://aws-deepspeed-zero-2d-binaries.s3.us-west-2.amazonaws.com/r2.0.0/20230407-184728/1ea3d4b6aa41fe66277daacbb78b3743a310d85a/deepspeed-0.6.1%2B1ea3d4b-py3-none-any.whl FROM nvidia/cuda:11.8.0-base-ubuntu20.04 AS base_image ENV DEBIAN_FRONTEND=noninteractive \ LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/local/lib" RUN apt-get update \ && apt-get upgrade -y \ && apt-get autoremove -y \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* FROM base_image AS common LABEL maintainer="Amazon AI" LABEL dlc_major_version="1" ARG PYTHON ARG PYTHON_VERSION ARG PYTHON_SHORT_VERSION ARG MAMBA_VERSION ARG CUBLAS_VERSION=11.11.3.6 ARG EFA_PATH=/opt/amazon/efa # This arg required to stop docker build waiting for region configuration while installing tz data from ubuntu 20 ARG DEBIAN_FRONTEND=noninteractive # Python won’t try to write .pyc or .pyo files on the import of source modules # Force stdin, stdout and stderr to be totally unbuffered. Good for logging ENV CUDA_HOME=/opt/conda/ ENV PYTHONDONTWRITEBYTECODE=1 ENV PYTHONUNBUFFERED=1 ENV LD_LIBRARY_PATH="/usr/local/lib:${LD_LIBRARY_PATH}" ENV LD_LIBRARY_PATH="/opt/conda/lib:${LD_LIBRARY_PATH}" ENV PYTHONIOENCODING=UTF-8 ENV LANG=C.UTF-8 ENV LC_ALL=C.UTF-8 ENV PATH /opt/conda/bin:$PATH ENV TORCH_CUDA_ARCH_LIST="3.7 5.0 7.0+PTX 8.0" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CUDNN_VERSION=8.7.0.84 ENV NCCL_VERSION=2.16.2 ENV HOROVOD_VERSION=0.26.1 ENV EFA_VERSION=1.21.0 ENV OMPI_VERSION=4.1.5 ENV BRANCH_OFI=1.5.0-aws ENV GDRCOPY_VERSION=2.3.1 ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" ENV OPEN_MPI_PATH=/opt/amazon/openmpi ENV DGLBACKEND=pytorch ENV MANUAL_BUILD=0 ENV RDMAV_FORK_SAFE=1 ENV DLC_CONTAINER_TYPE=training RUN apt-get update \ # TODO: Remove systemd upgrade once it is updated in base image && apt-get -y upgrade --only-upgrade systemd \ && apt-get install -y --allow-change-held-packages --no-install-recommends \ build-essential \ ca-certificates \ cmake \ # cuda-toolkit-11-8 \ # cuda-command-line-tools-11-8 \ # cuda-nvcc-11-8 \ # libcudnn8=$CUDNN_VERSION-1+cuda11.8 \ curl \ emacs \ git \ hwloc \ jq \ libcurl4-openssl-dev \ libglib2.0-0 \ libgl1-mesa-glx \ libsm6 \ libxext6 \ libxrender-dev \ libgomp1 \ libibverbs-dev \ libhwloc-dev \ libnuma1 \ libnuma-dev \ libssl1.1 \ libssl-dev \ libtool \ openssl \ python3-dev \ unzip \ vim \ wget \ zlib1g-dev \ autoconf \ pkg-config \ check \ libsubunit0 \ libsubunit-dev \ && rm -rf /var/lib/apt/lists/* \ && apt-get clean # for conda ssl verification ENV REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt RUN curl -L -o ~/mambaforge.sh https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-x86_64.sh \ && chmod +x ~/mambaforge.sh \ && ~/mambaforge.sh -b -p /opt/conda \ && rm ~/mambaforge.sh # Install common conda packages RUN /opt/conda/bin/mamba install -y --override-channels \ -c https://aws-ml-conda-ec2.s3.us-west-2.amazonaws.com \ -c nvidia/label/cuda-11.8.0 \ pytorch-cuda=11.8 cuda-toolkit=11.8 \ && /opt/conda/bin/mamba install -y -c conda-forge \ pyopenssl \ requests \ libgcc \ pybind11 \ cmake \ python=$PYTHON_VERSION \ cython \ mkl \ mkl-include \ parso \ typing \ conda-content-trust \ charset-normalizer \ packaging \ opencv \ awscli \ boto3 \ pyyaml \ scipy \ click \ psutil \ ipython \ # Adding package for studio kernels ipykernel \ # patch CVE "cryptography>39.0.0" \ # patch CVE "pillow>=9.4" \ # force conda to install h5py with openmpi build, not mpich (rubik error) # https://anaconda.org/conda-forge/h5py/files?page=2 "mpi4py>=3.1.4,<3.2" \ h5py \ && /opt/conda/bin/mamba install -y -c dglteam/label/cu118 dgl \ && /opt/conda/bin/mamba clean -afy \ && rm -rf /etc/apt/sources.list.d/* # Install NCCL RUN cd /tmp \ && git clone https://github.com/NVIDIA/nccl.git -b v${NCCL_VERSION}-1 \ && cd nccl \ && make -j64 src.build BUILDDIR=/usr/local CUDA_LIB=${CUDA_HOME}/lib \ && rm -rf /tmp/nccl # Install EFA alone without AWS OPEN_MPI RUN mkdir /tmp/efa \ && cd /tmp/efa \ && curl -O https://s3-us-west-2.amazonaws.com/aws-efa-installer/aws-efa-installer-${EFA_VERSION}.tar.gz \ && tar -xf aws-efa-installer-${EFA_VERSION}.tar.gz \ && cd aws-efa-installer \ && apt-get update \ && ./efa_installer.sh -y --skip-kmod -g \ && rm -rf $OPEN_MPI_PATH \ && rm -rf /tmp/efa \ && rm -rf /tmp/aws-efa-installer-${EFA_VERSION}.tar.gz \ && rm -rf /var/lib/apt/lists/* \ && apt-get clean # Install OpenMPI without libfabric support RUN mkdir /tmp/openmpi \ && cd /tmp/openmpi \ && wget --quiet https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-${OMPI_VERSION}.tar.gz \ && tar zxf openmpi-${OMPI_VERSION}.tar.gz \ && cd openmpi-${OMPI_VERSION} \ && ./configure --enable-orterun-prefix-by-default --prefix=$OPEN_MPI_PATH --with-cuda=${CUDA_HOME} \ && make -j $(nproc) all \ && make install \ && ldconfig \ && cd / \ && rm -rf /tmp/openmpi \ && ompi_info --parsable --all | grep mpi_built_with_cuda_support:value ENV PATH="$OPEN_MPI_PATH/bin:$EFA_PATH/bin:$PATH" ENV LD_LIBRARY_PATH=$OPEN_MPI_PATH/lib/:$EFA_PATH/lib/:$LD_LIBRARY_PATH # Install GDRCopy which is a dependency of SM Distributed DataParallel binary # The test binaries requires cuda driver library which could be found in conda # So update the linker path to point to it to avoid -Lcuda not found # Note that starting with 11.8, cuda relate lib are moved to conda instead /usr/local/cuda RUN cd /tmp \ && git clone https://github.com/NVIDIA/gdrcopy.git -b v${GDRCOPY_VERSION} \ && cd gdrcopy \ && sed -ie '12s@$@ -L $(CUDA)/lib/stubs@' tests/Makefile \ && CUDA=/opt/conda make install \ && rm -rf /tmp/gdrcopy RUN pip install --no-cache-dir --upgrade pip --no-cache-dir --trusted-host pypi.org --trusted-host files.pythonhosted.org \ && ln -s /opt/conda/bin/pip /usr/local/bin/pip3 WORKDIR /root # Configure Open MPI and configure NCCL parameters RUN mv $OPEN_MPI_PATH/bin/mpirun $OPEN_MPI_PATH/bin/mpirun.real \ && echo '#!/bin/bash' > $OPEN_MPI_PATH/bin/mpirun \ && echo "${OPEN_MPI_PATH}/bin/mpirun.real --allow-run-as-root \"\$@\"" >> $OPEN_MPI_PATH/bin/mpirun \ && chmod a+x $OPEN_MPI_PATH/bin/mpirun \ && echo "hwloc_base_binding_policy = none" >> $OPEN_MPI_PATH/etc/openmpi-mca-params.conf \ && echo "rmaps_base_mapping_policy = slot" >> $OPEN_MPI_PATH/etc/openmpi-mca-params.conf \ && echo NCCL_DEBUG=INFO >> /etc/nccl.conf \ && echo NCCL_SOCKET_IFNAME=^docker0 >> /etc/nccl.conf # Install OpenSSH for MPI to communicate between containers, allow OpenSSH to talk to containers without asking for confirmation RUN apt-get update \ && apt-get install -y --allow-downgrades --allow-change-held-packages --no-install-recommends \ && apt-get install -y --no-install-recommends openssh-client openssh-server \ && mkdir -p /var/run/sshd \ && cat /etc/ssh/ssh_config | grep -v StrictHostKeyChecking > /etc/ssh/ssh_config.new \ && echo " StrictHostKeyChecking no" >> /etc/ssh/ssh_config.new \ && mv /etc/ssh/ssh_config.new /etc/ssh/ssh_config \ && rm -rf /var/lib/apt/lists/* \ && apt-get clean # Configure OpenSSH so that nodes can communicate with each other RUN mkdir -p /var/run/sshd && \ sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd RUN rm -rf /root/.ssh/ && \ mkdir -p /root/.ssh/ && \ ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa && \ cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys \ && printf "Host *\n StrictHostKeyChecking no\n" >> /root/.ssh/config RUN mkdir -p /etc/pki/tls/certs && cp /etc/ssl/certs/ca-certificates.crt /etc/pki/tls/certs/ca-bundle.crt RUN curl -o /license.txt https://aws-dlc-licenses.s3.amazonaws.com/pytorch-2.0/license.txt # Removing the cache as it is needed for security verification RUN rm -rf /root/.cache | true ######################################################## # _____ ____ ____ ___ # | ____/ ___|___ \ |_ _|_ __ ___ __ _ __ _ ___ # | _|| | __) | | || '_ ` _ \ / _` |/ _` |/ _ \ # | |__| |___ / __/ | || | | | | | (_| | (_| | __/ # |_____\____|_____| |___|_| |_| |_|\__,_|\__, |\___| # |___/ # ____ _ # | _ \ ___ ___(_)_ __ ___ # | |_) / _ \/ __| | '_ \ / _ \ # | _ < __/ (__| | |_) | __/ # |_| \_\___|\___|_| .__/ \___| # |_| ######################################################## FROM common AS ec2 ARG PYTHON ARG PYTHON_VERSION ARG PYTHON_SHORT_VERSION # PyTorch Binaries ARG PT_EC2_TRAINING_URL ARG PT_TORCHVISION_URL ARG PT_TORCHAUDIO_URL ARG PT_TORCHDATA_URL WORKDIR / # Install ec2 PyTorch RUN /opt/conda/bin/mamba install -y pytorch=${PYTORCH_VERSION_EC2} \ torchvision torchaudio torchtext aws-ofi-nccl-dlc \ --override-channels \ -c https://aws-ml-conda-ec2.s3.us-west-2.amazonaws.com \ -c conda-forge # install PyTorch related python packages (depends on PyTorch) RUN /opt/conda/bin/mamba install -y -c conda-forge \ # needed by fastai.distributed accelerate \ && /opt/conda/bin/mamba install -y -c fastai fastai \ # fastai conda strictly depends on dataclasses && pip uninstall -y dataclasses \ && pip install torchtnt \ # explicitly install triton build-time dependencies, which won't get installed by conda for some reason cmake \ lit # Patches RUN pip install "pillow>=9.5" RUN /opt/conda/bin/mamba install -y -c conda-forge \ "requests>=2.31.0" # Install Nvidia Apex (needs pytorch) ## Pin apex commit requested by sm-model-parallel team RUN git clone https://github.com/NVIDIA/apex \ && cd apex \ # recent commit, torch._six fix is included https://github.com/NVIDIA/apex/commit/a78ccf0b3e3f7130b3f157732dc8e8e651389922 && git checkout 27de66c \ && pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \ && cd .. \ && rm -rf apex # Install Horovod (needs pytorch) RUN pip uninstall -y horovod \ # && ldconfig /usr/local/cuda-11.8/targets/x86_64-linux/lib/stubs \ && HOROVOD_GPU_ALLREDUCE=NCCL HOROVOD_CUDA_HOME=${CUDA_HOME} HOROVOD_WITH_PYTORCH=1 pip install --no-cache-dir horovod==${HOROVOD_VERSION} \ && ldconfig RUN HOME_DIR=/root \ && curl -o ${HOME_DIR}/oss_compliance.zip https://aws-dlinfra-utilities.s3.amazonaws.com/oss_compliance.zip \ && unzip ${HOME_DIR}/oss_compliance.zip -d ${HOME_DIR}/ \ && cp ${HOME_DIR}/oss_compliance/test/testOSSCompliance /usr/local/bin/testOSSCompliance \ && chmod +x /usr/local/bin/testOSSCompliance \ && chmod +x ${HOME_DIR}/oss_compliance/generate_oss_compliance.sh \ && ${HOME_DIR}/oss_compliance/generate_oss_compliance.sh ${HOME_DIR} ${PYTHON} \ && rm -rf ${HOME_DIR}/oss_compliance* \ && rm -rf /tmp/tmp* # Removing the cache as it is needed for security verification RUN rm -rf /root/.cache | true # Starts framework CMD ["/bin/bash"] ################################################################# # ____ __ __ _ # / ___| __ _ __ _ ___| \/ | __ _| | _____ _ __ # \___ \ / _` |/ _` |/ _ \ |\/| |/ _` | |/ / _ \ '__| # ___) | (_| | (_| | __/ | | | (_| | < __/ | # |____/ \__,_|\__, |\___|_| |_|\__,_|_|\_\___|_| # |___/ # ___ ____ _ # |_ _|_ __ ___ __ _ __ _ ___ | _ \ ___ ___(_)_ __ ___ # | || '_ ` _ \ / _` |/ _` |/ _ \ | |_) / _ \/ __| | '_ \ / _ \ # | || | | | | | (_| | (_| | __/ | _ < __/ (__| | |_) | __/ # |___|_| |_| |_|\__,_|\__, |\___| |_| \_\___|\___|_| .__/ \___| # |___/ |_| ################################################################# FROM common AS sagemaker LABEL maintainer="Amazon AI" LABEL dlc_major_version="1" ARG PYTHON ARG PYTHON_VERSION ARG PYTHON_SHORT_VERSION ARG RMM_VERSION=0.15.0 # The smdebug pipeline relies for following format to perform string replace and trigger DLC pipeline for validating # the nightly builds. Therefore, while updating the smdebug version, please ensure that the format is not disturbed. ARG SMDEBUG_VERSION=1.0.34 ENV SAGEMAKER_TRAINING_MODULE=sagemaker_pytorch_container.training:main # SMD model parallel and data parallel binaries ARG SMD_DATA_PARALLEL_URL ARG SMD_MODEL_PARALLEL_URL # ZeRO-2D Binary ARG ZERO_2D_URL # PyTorch Binaries ARG PT_SM_TRAINING_URL ARG PT_TORCHVISION_URL ARG PT_TORCHAUDIO_URL ARG PT_TORCHDATA_URL # Install sm PyTorch (force remove will increase docker image size drastically) RUN /opt/conda/bin/mamba install -y pytorch=${PYTORCH_VERSION_SM} \ torchvision torchaudio torchtext aws-ofi-nccl-dlc \ --override-channels \ -c https://aws-ml-conda-pre-prod-ec2.s3.us-west-2.amazonaws.com \ -c conda-forge # install PyTorch related python packages (depends on PyTorch) RUN /opt/conda/bin/mamba install -y -c conda-forge \ # needed by fastai.distributed accelerate \ && /opt/conda/bin/mamba install -y -c fastai fastai \ # fastai conda strictly depends on dataclasses && pip uninstall -y dataclasses \ && pip install torchnet \ # explicitly install triton build-time dependencies, which won't get installed by conda for some reason cmake \ lit # Patches RUN pip install "pillow>=9.5" RUN /opt/conda/bin/mamba install -y -c conda-forge \ "requests>=2.31.0" WORKDIR / # Install Nvidia Apex (needs pytorch) ## Pin apex commit requested by sm-model-parallel team RUN git clone https://github.com/NVIDIA/apex \ && cd apex \ # requested by Rubik. Will wait for resolution on the illegal mem access error && git checkout aa756ce \ && sed -i 's/from torch._six import string_classes//g' apex/amp/_initialize.py \ && sed -i 's/string_classes/str/g' apex/amp/_initialize.py \ && pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \ && cd .. \ && rm -rf apex # Install Horovod (needs pytorch) RUN pip uninstall -y horovod \ # && ldconfig /usr/local/cuda-11.8/targets/x86_64-linux/lib/stubs \ && HOROVOD_GPU_ALLREDUCE=NCCL HOROVOD_CUDA_HOME=${CUDA_HOME} HOROVOD_WITH_PYTORCH=1 pip install --no-cache-dir horovod==${HOROVOD_VERSION} \ && ldconfig # Install libboost from source. This package is needed for smdataparallel functionality [for networking asynchronous IO]. RUN wget https://sourceforge.net/projects/boost/files/boost/1.73.0/boost_1_73_0.tar.gz/download -O boost_1_73_0.tar.gz \ && tar -xzf boost_1_73_0.tar.gz \ && cd boost_1_73_0 \ && ./bootstrap.sh \ && ./b2 threading=multi --prefix=/opt/conda -j 64 cxxflags=-fPIC cflags=-fPIC install || true \ && cd .. \ && rm -rf boost_1_73_0.tar.gz \ && rm -rf boost_1_73_0 \ && cd /opt/conda/include/boost WORKDIR /opt/pytorch # Copy workaround script for incorrect hostname COPY changehostname.c / COPY start_with_right_hostname.sh /usr/local/bin/start_with_right_hostname.sh RUN chmod +x /usr/local/bin/start_with_right_hostname.sh WORKDIR /root RUN pip install --no-cache-dir -U \ smclarify \ "sagemaker>=2,<3" \ "sagemaker-experiments<1" \ sagemaker-pytorch-training \ flash-attn==0.2.8 \ triton==2.0.0.dev20221202 # Install smdebug from source RUN cd /tmp \ && git clone https://github.com/awslabs/sagemaker-debugger --depth 1 --single-branch --branch ${SMDEBUG_VERSION} \ && cd sagemaker-debugger \ && pip install . \ && rm -rf /tmp/* # Install extra packages RUN /opt/conda/bin/mamba install -y -c conda-forge \ bokeh \ imageio \ numba \ pandas \ plotly \ shap \ scikit-learn \ seaborn \ && /opt/conda/bin/mamba clean -afy # install metis RUN rm -rf /etc/apt/sources.list.d/* \ && git clone https://github.com/KarypisLab/GKlib \ && cd GKlib \ && make config \ && make \ && make install \ && cd .. \ && git clone https://github.com/KarypisLab/METIS.git \ && cd METIS \ && make config shared=1 cc=gcc prefix=/root/local \ && make install \ && cd .. \ && rm -rf METIS GKlib \ && rm -rf /var/lib/apt/lists/* \ && apt-get clean # Install RAPIDSMemoryManager. # Requires cmake>=3.14. RUN wget -nv https://github.com/rapidsai/rmm/archive/v${RMM_VERSION}.tar.gz \ && tar -xvf v${RMM_VERSION}.tar.gz \ && cd rmm-${RMM_VERSION} \ && INSTALL_PREFIX=/usr/local ./build.sh librmm \ && cd .. \ && rm -rf v${RMM_VERSION}.tar* \ && rm -rf rmm-${RMM_VERSION} # Install ZeRO-2D Binary RUN pip install --no-cache-dir -U ${ZERO_2D_URL} # Install SM Distributed Modelparallel binary RUN pip install --no-cache-dir -U ${SMD_MODEL_PARALLEL_URL} # Install SM Distributed DataParallel binary RUN SMDATAPARALLEL_PT=1 pip install --no-cache-dir ${SMD_DATA_PARALLEL_URL} ENV LD_LIBRARY_PATH="/opt/conda/lib/python${PYTHON_SHORT_VERSION}/site-packages/smdistributed/dataparallel/lib:$LD_LIBRARY_PATH" WORKDIR / RUN HOME_DIR=/root \ && curl -o ${HOME_DIR}/oss_compliance.zip https://aws-dlinfra-utilities.s3.amazonaws.com/oss_compliance.zip \ && unzip ${HOME_DIR}/oss_compliance.zip -d ${HOME_DIR}/ \ && cp ${HOME_DIR}/oss_compliance/test/testOSSCompliance /usr/local/bin/testOSSCompliance \ && chmod +x /usr/local/bin/testOSSCompliance \ && chmod +x ${HOME_DIR}/oss_compliance/generate_oss_compliance.sh \ && ${HOME_DIR}/oss_compliance/generate_oss_compliance.sh ${HOME_DIR} ${PYTHON} \ && rm -rf ${HOME_DIR}/oss_compliance* \ && rm -rf /tmp/tmp* # Removing the cache as it is needed for security verification RUN rm -rf /root/.cache | true ENTRYPOINT ["bash", "-m", "start_with_right_hostname.sh"] CMD ["/bin/bash"]