FROM debian:11 # Variables de entorno #Variables a modificar ENV TOKEN="jipsuljWyQrQNHoqfjDupxdLvSmlHInmNN" ENV MODEL_TO_CONVERT="cognitivecomputations/dolphin-2.5-mixtral-8x7b" ENV DEST_REPO="HirCoir/dolphin-2.5-mixtral-8x7b-GGUF" ENV REPO="https://github.com/ggerganov/llama.cpp" ENV PREFIX="hf_" ENV TOKEN_CONCATENADO="${PREFIX}${TOKEN}" ENV MODEL_DIR="/model" ENV MODEL_FILES="${MODEL_DIR}/*.gguf" # Agrega un usuario no root RUN useradd -m -u 1000 app # Establece el directorio de trabajo dentro del contenedor WORKDIR /home/app RUN apt update RUN apt install -y \ make \ cmake \ clang \ gcc \ git \ curl \ sudo \ python3-dev \ python3 \ python3-pip \ python3-venv \ git espeak-ng WORKDIR /root RUN git clone https://github.com/rhasspy/piper RUN pip install -q cython>=0.29.0 espeak-phonemizer>=1.1.0 librosa>=0.9.2 numpy>=1.19.0 pytorch-lightning~=1.7.0 torch~=1.11.0 RUN pip install -q onnx onnxruntime RUN pip install -q torchtext==0.12.0 WORKDIR /root/piper/src/python RUN bash build_monotonic_align.sh RUN pip install -q torchaudio==0.11.0 torchmetrics==0.11.4 RUN pip install --upgrade gdown RUN apt install -y zip unzip wget curl RUN pip install -U "huggingface_hub[cli]" # Descarga del modelo en la carpeta especificada RUN huggingface-cli download ${MODEL_TO_CONVERT} --local-dir ${MODEL_DIR} RUN ls; sleep 30