Cran-May's picture
Duplicate from matthoffner/falcon-40b-instruct-ggml
2f98890
FROM nvidia/cuda:12.0.0-cudnn8-devel-ubuntu22.04
# Set environment variables
ENV PATH="/usr/local/cuda/bin:$PATH"
ENV MODEL_NAME="falcon-40b-instruct-GGML"
ENV MODEL_FILE="falcon40b-instruct.ggmlv3.q4_K_S.bin"
ENV MODEL_URL="https://huggingface.co./TheBloke/${MODEL_NAME}/raw/ggmlv3/${MODEL_FILE}"
RUN apt update && \
apt install --no-install-recommends -y build-essential python3 python3-pip wget curl git && \
apt clean && rm -rf /var/lib/apt/lists/*
# Set the working directory in the container to /app
WORKDIR /app
# Install cmake
RUN apt-get install -y wget && \
wget -qO- "https://cmake.org/files/v3.18/cmake-3.18.0-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local
# Copy the requirements.txt file into the container
COPY requirements.txt ./
# Install any needed packages specified in requirements.txt
RUN pip3 install --upgrade pip && \
pip3 install -r requirements.txt
# Clone ctransformers and update submodule ggllm.cpp
RUN git clone --recursive https://github.com/marella/ctransformers.git && \
cd ctransformers && \
git submodule update --init models/submodules/ggllm.cpp && \
cd models/submodules/ggllm.cpp && \
git checkout master && \
git pull
# Install ctransformers from source
RUN cd ctransformers && \
CT_CUBLAS=1 FORCE_CMAKE=1 pip install .
# Download the model file
# RUN wget -O /app/${MODEL_FILE} ${MODEL_URL}
# Create user
RUN useradd -m -u 1000 user
# Create a directory for app and move the downloaded file there
# RUN mkdir -p /home/user/app && mv /app/${MODEL_FILE} /home/user/app
# Change the ownership of the copied file to user
# RUN chown user:user /home/user/app/${MODEL_FILE}
USER user
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH
WORKDIR $HOME/app
# Now you can COPY the rest of your app
COPY --chown=user . .
RUN ls -al
# Make port available to the world outside this container
EXPOSE 7860
# Run uvicorn when the container launches
CMD ["python3", "demo.py", "--host", "0.0.0.0", "--port", "7860"]