#!/bin/bash source /app/venv/bin/activate echo "Starting Ollama server" ollama serve & sleep 1 if [ -n "${MODEL}" ]; then IFS=',' read -ra MODELS <<< "${MODEL}" else MODELS=() fi for m in "${MODELS[@]}"; do echo "Pulling $m" ollama pull "$m" sleep 5 done exec streamlit run ./run.py