lightrag / start.sh
innofacisteven's picture
Update start.sh
f682dc8 verified
raw
history blame
517 Bytes
#!/usr/bin/env bash
set -euo pipefail
ollama serve &
# 等待 Ollama API 可用
for i in {1..60}; do
if curl -fsS http://127.0.0.1:11434/api/tags >/dev/null; then
break
fi
sleep 1
done
# 拉取嵌入與(如需要)生成模型
ollama pull "${EMBEDDING_MODEL:-nomic-embed-text}" || true
if [ "${LLM_BINDING:-}" = "ollama" ]; then
ollama pull "${LLM_MODEL:-llama2}" || true
fi
# 啟動 LightRAG(如平台要求自訂埠,改成 --port "${PORT:-9621}")
exec python -m lightrag.api.lightrag_server