docker run --gpus all --name=test -p 7860:7860 -itd nvcr.io/nvidia/cuda:11.8.0-devel-ubuntu22.04
docker exec -it test bash
apt update && apt install python3 -y && apt install python3-pip -y && apt install git -y && apt-get install ffmpeg libsm6 libxext6 -y && apt-get install libgl1 -y && cp /usr/bin/python3 /usr/bin/python && git clone https://github.com/threestudio-project/threestudio.git && cd threestudio && python3 -m pip install --upgrade pip && pip install torch torchvision --index-url https://download.pytorch.org/whl/cu118 && pip install -r requirements.txt && pip install fastapi==0.86 && pip install gradio==3.48.0 && pip install pydantic && pip install Schema && pip install -U gradio
运行程序
python gradio_app.py launch --listen
############
官方给出的Dockerfile
# Reference:
# https://github.com/cvpaperchallenge/Ascender
# https://github.com/nerfstudio-project/nerfstudio
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04
ARG USER_NAME=dreamer
ARG GROUP_NAME=dreamers
ARG UID=1000
ARG GID=1000
# Set compute capability for nerfacc and tiny-cuda-nn
# See https://developer.nvidia.com/cuda-gpus and limit number to speed-up build
ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6 8.9 9.0+PTX"
ENV TCNN_CUDA_ARCHITECTURES=90;89;86;80;75;70;61;60
# Speed-up build for RTX 30xx
# ENV TORCH_CUDA_ARCH_LIST="8.6"
# ENV TCNN_CUDA_ARCHITECTURES=86
# Speed-up build for RTX 40xx
# ENV TORCH_CUDA_ARCH_LIST="8.9"
# ENV TCNN_CUDA_ARCHITECTURES=89
ENV CUDA_HOME=/usr/local/cuda
ENV PATH=${CUDA_HOME}/bin:/home/${USER_NAME}/.local/bin:${PATH}
ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}
ENV LIBRARY_PATH=${CUDA_HOME}/lib64/stubs:${LIBRARY_PATH}
# apt install by root user
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
build-essential \
curl \
git \
libegl1-mesa-dev \
libgl1-mesa-dev \
libgles2-mesa-dev \
libglib2.0-0 \
libsm6 \
libxext6 \
libxrender1 \
python-is-python3 \
python3.10-dev \
python3-pip \
wget \
&& rm -rf /var/lib/apt/lists/*
# Change user to non-root user
RUN groupadd -g ${GID} ${GROUP_NAME} \
&& useradd -ms /bin/sh -u ${UID} -g ${GID} ${USER_NAME}
USER ${USER_NAME}
RUN pip install --upgrade pip setuptools ninja
RUN pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 --index-url https://download.pytorch.org/whl/cu118
# Install nerfacc and tiny-cuda-nn before installing requirements.txt
# because these two installations are time consuming and error prone
RUN pip install git+https://github.com/KAIR-BAIR/nerfacc.git@v0.5.2
RUN pip install git+https://github.com/NVlabs/tiny-cuda-nn.git#subdirectory=bindings/torch
COPY requirements.txt /tmp
RUN cd /tmp && pip install -r requirements.txt
WORKDIR /home/${USER_NAME}/threestudio
############
docker-compose
services:
threestudio:
build:
context: ../
dockerfile: docker/Dockerfile
args:
# you can set environment variables, otherwise default values will be used
USER_NAME: ${HOST_USER_NAME:-dreamer} # export HOST_USER_NAME=$USER
GROUP_NAME: ${HOST_GROUP_NAME:-dreamers}
UID: ${HOST_UID:-1000} # export HOST_UID=$(id -u)
GID: ${HOST_GID:-1000} # export HOST_GID=$(id -g)
shm_size: '4gb'
environment:
NVIDIA_DISABLE_REQUIRE: 1 # avoid wrong `nvidia-container-cli: requirement error`
tty: true
volumes:
- ../:/home/${HOST_USER_NAME:-dreamer}/threestudio
deploy:
resources:
reservations:
devices:
- driver: nvidia
capabilities: [gpu]