# Build an image of Detectron2 with Sagemaker Multi Model Server: https://github.com/awslabs/multi-model-server # using Sagemaker PyTorch container as base image # from https://github.com/aws/sagemaker-pytorch-serving-container/ FROM 763104351884.dkr.ecr.eu-central-1.amazonaws.com/pytorch-inference:1.6.0-gpu-py36-cu101-ubuntu16.04 ############# Detectron2 pre-built binaries Pytorch default install ############ RUN pip install --upgrade torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html ############# Detectron2 section ############## RUN pip install --no-cache-dir pycocotools~=2.0.0 RUN pip install --no-cache-dir detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.6/index.html ENV FORCE_CUDA="1" # Build D2 only for Turing (G4) and Volta (P3) architectures. Use P3 for batch transforms and G4 for inference on endpoints ENV TORCH_CUDA_ARCH_LIST="Turing;Volta" # Set a fixed model cache directory. Detectron2 requirement ENV FVCORE_CACHE="/tmp"