FROM nvidia/cuda:12.4.1-cudnn-devel-ubuntu22.04 ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && \ apt-get upgrade -y && \ apt-get install -y --no-install-recommends ca-certificates \ git \ git-lfs \ wget \ curl \ # python build dependencies \ build-essential \ libssl-dev \ zlib1g-dev \ libbz2-dev \ libreadline-dev \ libsqlite3-dev \ libncursesw5-dev \ xz-utils \ tk-dev \ libxml2-dev \ libxmlsec1-dev \ libffi-dev \ liblzma-dev \ golang-1.22-go \ nvidia-driver-550 \ ffmpeg ENV USER='user' RUN useradd -m -u 1000 ${USER} USER ${USER} ENV HOME=/home/${USER} \ PATH=${HOME}/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:${PATH} WORKDIR ${HOME}/app ENV NVIDIA_VISIBLE_DEVICES=all RUN curl https://pyenv.run | bash ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH} ARG PYTHON_VERSION=3.10.13 RUN pyenv install ${PYTHON_VERSION} && \ pyenv global ${PYTHON_VERSION} && \ pyenv rehash && \ pip install --no-cache-dir -U pip setuptools wheel && \ pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.7" "APScheduler" COPY --chown=1000 . ${HOME}/app RUN git clone https://github.com/ollama/ollama RUN git clone https://github.com/ggerganov/llama.cpp RUN pip install -r llama.cpp/requirements.txt COPY groups_merged.txt ${HOME}/app/llama.cpp/. ENV PYTHONPATH=${HOME}/app \ PYTHONUNBUFFERED=1 \ HF_HUB_ENABLE_HF_TRANSFER=1 \ GRADIO_ALLOW_FLAGGING=never \ GRADIO_NUM_PORTS=1 \ GRADIO_SERVER_NAME=0.0.0.0 \ GRADIO_THEME=huggingface \ TQDM_POSITION=-1 \ TQDM_MININTERVAL=1 \ SYSTEM=spaces \ LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64:${LD_LIBRARY_PATH} \ NVIDIA_DRIVER_CAPABILITIES=compute,utility \ NVIDIA_VISIBLE_DEVICES=all \ OLLAMA_HOST=0.0.0.0 # EXPOSE map[11434/tcp:{}] ENTRYPOINT /bin/sh start.sh