FROM nvidia/cuda:11.2.2-base-ubuntu20.04 AS base_image LABEL maintainer="Amazon AI" LABEL dlc_major_version="1" # Specify accept-bind-to-port LABEL for inference pipelines to use SAGEMAKER_BIND_TO_PORT # https://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipeline-real-time.html LABEL com.amazonaws.sagemaker.capabilities.accept-bind-to-port=true ARG MMS_VERSION=1.1.8 ARG PYTHON=python3 ARG PYTHON_VERSION=3.9.10 ARG OPEN_MPI_VERSION=4.1.4 ARG MAMBA_VERSION=22.9.0-1 # HF ARGS ARG TF_URL=https://framework-binaries.s3.us-west-2.amazonaws.com/tensorflow/r2.11_aws/gpu/2023-03-28-20-19/tensorflow_gpu-2.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl ARG TRANSFORMERS_VERSION ENV DLC_CONTAINER_TYPE=inference ENV PYTHONDONTWRITEBYTECODE=1 \ PYTHONUNBUFFERED=1 \ LD_LIBRARY_PATH="/opt/conda/lib/:${LD_LIBRARY_PATH}:/usr/local/lib" \ PYTHONIOENCODING=UTF-8 \ LANG=C.UTF-8 \ LC_ALL=C.UTF-8 \ TEMP=/home/model-server/tmp \ DEBIAN_FRONTEND=noninteractive ENV PATH /opt/conda/bin:$PATH RUN apt-get update \ # TODO: Remove upgrade statements once packages are updated in base image && apt-get -y upgrade --only-upgrade systemd openssl cryptsetup \ && apt-get install -y --no-install-recommends \ ca-certificates \ build-essential \ libssl1.1 \ openssl \ openjdk-11-jdk-headless \ vim \ wget \ curl \ emacs \ unzip \ git \ libnuma1 \ libsndfile1-dev \ ffmpeg \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* # https://github.com/docker-library/openjdk/issues/261 https://github.com/docker-library/openjdk/pull/263/files RUN keytool -importkeystore -srckeystore /etc/ssl/certs/java/cacerts -destkeystore /etc/ssl/certs/java/cacerts.jks -deststoretype JKS -srcstorepass changeit -deststorepass changeit -noprompt; \ mv /etc/ssl/certs/java/cacerts.jks /etc/ssl/certs/java/cacerts; \ /var/lib/dpkg/info/ca-certificates-java.postinst configure; # Install CondaForge miniconda # Note: SSD Verification disabled as it breaks installs due to downgrading from py3.10 to py3.9 RUN curl -L -o ~/mambaforge.sh https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-x86_64.sh \ && chmod +x ~/mambaforge.sh \ && ~/mambaforge.sh -b -p /opt/conda \ && rm ~/mambaforge.sh \ && /opt/conda/bin/conda config --set ssl_verify False \ && /opt/conda/bin/conda update -y conda \ && /opt/conda/bin/conda install -c conda-forge \ python=${PYTHON_VERSION} \ cython \ mkl \ mkl-include \ botocore \ parso \ typing \ h5py \ requests \ # Below 2 are included in miniconda base, but not mamba so need to install conda-content-trust \ charset-normalizer \ && /opt/conda/bin/conda clean -ya RUN pip install --upgrade pip --trusted-host pypi.org --trusted-host files.pythonhosted.org \ && ln -s /opt/conda/bin/pip /usr/local/bin/pip3 \ && pip install packaging \ enum-compat==0.0.3 \ # Putting a cap in versions number to avoid potential issues with a new major version "urllib3>=1.25.9" \ cryptography==39.0.2 RUN wget https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-${OPEN_MPI_VERSION}.tar.gz \ && gunzip -c openmpi-$OPEN_MPI_VERSION.tar.gz | tar xf - \ && cd openmpi-$OPEN_MPI_VERSION \ && ./configure --prefix=/home/.openmpi \ && make all install \ && cd .. \ && rm openmpi-$OPEN_MPI_VERSION.tar.gz \ && rm -rf openmpi-$OPEN_MPI_VERSION ENV PATH="$PATH:/home/.openmpi/bin" ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/home/.openmpi/lib/" WORKDIR / RUN pip install --no-cache-dir \ multi-model-server==$MMS_VERSION \ sagemaker-inference RUN useradd -m model-server \ && mkdir -p /home/model-server/tmp \ && chown -R model-server /home/model-server COPY mms-entrypoint.py /usr/local/bin/dockerd-entrypoint.py COPY config.properties /etc/sagemaker-mms.properties RUN chmod +x /usr/local/bin/dockerd-entrypoint.py COPY deep_learning_container.py /usr/local/bin/deep_learning_container.py RUN chmod +x /usr/local/bin/deep_learning_container.py ################################# # Hugging Face specific section # ################################# # Install TF Binary RUN pip install --no-cache-dir -U ${TF_URL} # We need TF_FORCE_GPU_ALLOW_GROWTH=true to prevent TF from overusing GPU memory when loading models ENV TF_FORCE_GPU_ALLOW_GROWTH=true # install Hugging Face libraries and its dependencies RUN pip install --no-cache-dir \ transformers[sentencepiece,audio,vision]==${TRANSFORMERS_VERSION} \ "protobuf>=3.19.5,<3.20" \ "numpy>=1.21.5" \ "sagemaker-huggingface-inference-toolkit<3" RUN HOME_DIR=/root \ && curl -o ${HOME_DIR}/oss_compliance.zip https://aws-dlinfra-utilities.s3.amazonaws.com/oss_compliance.zip \ && unzip ${HOME_DIR}/oss_compliance.zip -d ${HOME_DIR}/ \ && cp ${HOME_DIR}/oss_compliance/test/testOSSCompliance /usr/local/bin/testOSSCompliance \ && chmod +x /usr/local/bin/testOSSCompliance \ && chmod +x ${HOME_DIR}/oss_compliance/generate_oss_compliance.sh \ && ${HOME_DIR}/oss_compliance/generate_oss_compliance.sh ${HOME_DIR} ${PYTHON} \ && rm -rf ${HOME_DIR}/oss_compliance* \ && rm -iRf ${HOME_DIR}/.cache EXPOSE 8080 8081 ENTRYPOINT ["python", "/usr/local/bin/dockerd-entrypoint.py"] CMD ["serve"]