Auto-HF-To-GGUF / Dockerfile
HirCoir's picture
Update Dockerfile
7c48734 verified
raw
history blame contribute delete
No virus
1.45 kB
FROM debian:11-slim
# Variables de entorno
#Variables a modificar
# Token en formato base64
ENV TOKEN=aGZfWUJGUGF5VG1udml1ZHVDUlh3RnRDTGlQQ1RjZWZ5Q0hMYg==
ENV REPO_ID="Qwen/Qwen2-0.5B-Instruct"
ENV MODEL_HF_DIR="/models"
ENV MODEL_GGUF_DIR="/gguf"
#ENV QUANTIZATION_TYPES=Q8_0
RUN useradd -m -u 1000 app
RUN apt update
RUN apt install -y \
make \
cmake \
clang \
gcc \
git \
curl \
sudo \
python3 \
python3-pip \
python3-dev
# Creaci贸n de la carpeta de destino
WORKDIR /root/
RUN mkdir ${MODEL_HF_DIR}
# Clonaci贸n del repositorio
WORKDIR /root/
RUN git clone https://github.com/ggerganov/llama.cpp
# Compilaci贸n del proyecto
WORKDIR /root/llama.cpp
RUN make
# Ejecuci贸n de la herramienta principal
#RUN ./main --help
# Instalaci贸n de dependencias
RUN pip install -r requirements.txt
RUN pip install -U "huggingface_hub[cli]"
RUN pip install huggingface_hub
# Descarga del modelo en la carpeta especificada
COPY *.py .
RUN python3 download-model.py
# Conversi贸n del modelo a GGUF
RUN python3 export.py
RUN rm -R ${MODEL_HF_DIR}
# Listado de archivos descargados
RUN ls -lh ${MODEL_GGUF_DIR}
RUN python3 upload.py
RUN mv ${MODEL_GGUF_DIR}/*Q2_K.gguf /home/app/model.gguf
RUN mv /root/llama.cpp/ /home/app/
RUN chown -R app:app /home/app/
RUN rm -R ${MODEL_GGUF_DIR}
WORKDIR /home/app
USER app
# Comando de ejecuci贸n
CMD ["llama.cpp/server", "-m", "model.gguf", "--host", "0.0.0.0", "--port", "7860"]