starchat-ggml / Dockerfile
matthoffner's picture
Duplicate from matthoffner/ggml-ctransformers-fastapi
7d51224
raw
history blame
915 Bytes
FROM python:latest
ENV PYTHONUNBUFFERED 1
EXPOSE 8000
WORKDIR /app
RUN wget -qO- "https://cmake.org/files/v3.17/cmake-3.17.0-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local
COPY requirements.txt ./
RUN pip install --upgrade pip && \
pip install -r requirements.txt
RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash
RUN apt-get install git-lfs
RUN git clone https://github.com/ggerganov/ggml && cd ggml && mkdir build && cd build && cmake ..
RUN git clone https://huggingface.co./bigcode/gpt_bigcode-santacoder
RUN python ggml/examples/starcoder/convert-hf-to-ggml.py ./gpt_bigcode-santacoder/
RUN cd ggml/build && make -j4 starcoder starcoder-quantize
RUN ggml/build/bin/starcoder-quantize models/./gpt_bigcode-santacoder/-ggml.bin ggml-model-q4_1.bin 3
COPY . .
RUN ls -al
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]