CHAINLIT-RAG / Dockerfile
AI-RESEARCHER-2024's picture
Update Dockerfile
601a9ff verified
raw
history blame
1.41 kB
# Use an official Python runtime as a parent image
FROM python:3.10-slim
# Set environment variables
ENV PYTHONUNBUFFERED=1
ENV HOME=/home/appuser
ENV APP_HOME=/home/appuser/app
ENV PATH="/home/appuser/.local/bin:${PATH}"
# Install system dependencies
RUN apt-get update && apt-get install -y \
build-essential \
cmake \
git \
wget \
curl \
&& rm -rf /var/lib/apt/lists/*
# Create a non-root user
RUN useradd -m -u 1000 appuser && \
mkdir -p $APP_HOME && \
chown -R appuser:appuser /home/appuser
# Switch to the non-root user
USER appuser
WORKDIR $APP_HOME
# Copy the application files
COPY --chown=appuser:appuser . $APP_HOME/
# Download the model
RUN mkdir -p $APP_HOME/models && \
wget -O $APP_HOME/models/llama-model.gguf https://huggingface.co./bartowski/Meta-Llama-3.1-8B-Instruct-GGUF/resolve/main/Meta-Llama-3.1-8B-Instruct-IQ2_M.gguf
# Install Python dependencies
RUN pip install --user --no-cache-dir -r requirements.txt
# Create necessary directories with correct permissions
RUN mkdir -p $APP_HOME/.chainlit/files && \
mkdir -p $APP_HOME/mydb
# Create a chainlit.md file
RUN echo "# Welcome to RAG Chainlit Application! πŸ‘‹\n\nThis is a Retrieval-Augmented Generation application using Llama.cpp." > $APP_HOME/chainlit.md
# Expose port 8000
EXPOSE 8000
# Start the Chainlit app
CMD ["chainlit", "run", "app.py", "--host", "0.0.0.0", "--port", "8000"]