#!/bin/bash
# ============================================================
# Script d'installation ComfyUI + Wan 2.2 + Ollama
# Pod RunPod: anime-op-v3 (A100 PCIe)
# Francis B. Morissette — Avril 2026
# ============================================================
set -e

echo "============================================"
echo "  ÉTAPE 1/5 : Vérification de l'environnement"
echo "============================================"

# Detect ComfyUI location
COMFYUI_DIR=""
for dir in /workspace/ComfyUI /comfyui /opt/ComfyUI; do
    if [ -d "$dir" ]; then
        COMFYUI_DIR="$dir"
        break
    fi
done

if [ -z "$COMFYUI_DIR" ]; then
    echo "[INFO] ComfyUI non trouvé, installation dans /workspace/ComfyUI..."
    cd /workspace
    git clone https://github.com/comfyanonymous/ComfyUI.git
    cd ComfyUI
    pip install -r requirements.txt
    COMFYUI_DIR="/workspace/ComfyUI"
else
    echo "[OK] ComfyUI trouvé dans: $COMFYUI_DIR"
    cd "$COMFYUI_DIR"
    echo "[INFO] Mise à jour de ComfyUI..."
    git pull || echo "[WARN] git pull échoué, on continue..."
fi

# Ensure directories exist
mkdir -p "$COMFYUI_DIR/models/diffusion_models"
mkdir -p "$COMFYUI_DIR/models/text_encoders"
mkdir -p "$COMFYUI_DIR/models/vae"
mkdir -p "$COMFYUI_DIR/models/clip_vision"
mkdir -p "$COMFYUI_DIR/models/loras"
mkdir -p "$COMFYUI_DIR/models/controlnet"
mkdir -p "$COMFYUI_DIR/custom_nodes"

nvidia-smi --query-gpu=name,memory.total,memory.free --format=csv,noheader
echo ""

echo "============================================"
echo "  ÉTAPE 2/5 : Installation des custom nodes"
echo "============================================"

cd "$COMFYUI_DIR/custom_nodes"

# ComfyUI Manager
if [ ! -d "ComfyUI-Manager" ]; then
    echo "[INSTALL] ComfyUI Manager..."
    git clone https://github.com/ltdrdata/ComfyUI-Manager.git
else
    echo "[OK] ComfyUI Manager déjà installé, mise à jour..."
    cd ComfyUI-Manager && git pull && cd ..
fi

# comfyui-ollama
if [ ! -d "comfyui-ollama" ]; then
    echo "[INSTALL] comfyui-ollama..."
    git clone https://github.com/stavsap/comfyui-ollama.git
    cd comfyui-ollama
    pip install -r requirements.txt 2>/dev/null || pip install ollama
    cd ..
else
    echo "[OK] comfyui-ollama déjà installé, mise à jour..."
    cd comfyui-ollama && git pull && cd ..
fi

# ComfyUI-IF_AI_tools
if [ ! -d "ComfyUI-IF_AI_tools" ]; then
    echo "[INSTALL] ComfyUI-IF_AI_tools..."
    git clone https://github.com/if-ai/ComfyUI-IF_AI_tools.git
    cd ComfyUI-IF_AI_tools
    pip install -r requirements.txt 2>/dev/null || echo "[WARN] requirements.txt non trouvé"
    cd ..
else
    echo "[OK] ComfyUI-IF_AI_tools déjà installé, mise à jour..."
    cd ComfyUI-IF_AI_tools && git pull && cd ..
fi

# ComfyUI-WanVideoWrapper (Kijai - workflows avancés)
if [ ! -d "ComfyUI-WanVideoWrapper" ]; then
    echo "[INSTALL] ComfyUI-WanVideoWrapper (Kijai)..."
    git clone https://github.com/kijai/ComfyUI-WanVideoWrapper.git
    cd ComfyUI-WanVideoWrapper
    pip install -r requirements.txt 2>/dev/null || true
    cd ..
else
    echo "[OK] ComfyUI-WanVideoWrapper déjà installé, mise à jour..."
    cd ComfyUI-WanVideoWrapper && git pull && cd ..
fi

echo ""
echo "============================================"
echo "  ÉTAPE 3/5 : Téléchargement des modèles"
echo "============================================"

pip install -q "huggingface_hub[cli]" 2>/dev/null

HF_REPO="Comfy-Org/Wan_2.2_ComfyUI_Repackaged"
MODELS_DIR="$COMFYUI_DIR/models"

# ---- T2V 14B (Text-to-Video) — fp8 pour économiser l'espace disque ----
echo ""
echo "[1/6] Wan 2.2 T2V High Noise 14B (fp8) — 13.3 GB"
if [ ! -f "$MODELS_DIR/diffusion_models/wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors" ]; then
    huggingface-cli download "$HF_REPO" \
        split_files/diffusion_models/wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors \
        --local-dir /tmp/wan_download
    mv /tmp/wan_download/split_files/diffusion_models/wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors \
        "$MODELS_DIR/diffusion_models/"
else
    echo "  [SKIP] Déjà présent."
fi

echo "[2/6] Wan 2.2 T2V Low Noise 14B (fp8) — 13.3 GB"
if [ ! -f "$MODELS_DIR/diffusion_models/wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors" ]; then
    huggingface-cli download "$HF_REPO" \
        split_files/diffusion_models/wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors \
        --local-dir /tmp/wan_download
    mv /tmp/wan_download/split_files/diffusion_models/wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors \
        "$MODELS_DIR/diffusion_models/"
else
    echo "  [SKIP] Déjà présent."
fi

# ---- I2V 14B (Image-to-Video) — fp8 ----
echo "[3/6] Wan 2.2 I2V High Noise 14B (fp8) — 13.3 GB"
if [ ! -f "$MODELS_DIR/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors" ]; then
    huggingface-cli download "$HF_REPO" \
        split_files/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors \
        --local-dir /tmp/wan_download
    mv /tmp/wan_download/split_files/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors \
        "$MODELS_DIR/diffusion_models/"
else
    echo "  [SKIP] Déjà présent."
fi

echo "[4/6] Wan 2.2 I2V Low Noise 14B (fp8) — 13.3 GB"
if [ ! -f "$MODELS_DIR/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors" ]; then
    huggingface-cli download "$HF_REPO" \
        split_files/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors \
        --local-dir /tmp/wan_download
    mv /tmp/wan_download/split_files/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors \
        "$MODELS_DIR/diffusion_models/"
else
    echo "  [SKIP] Déjà présent."
fi

# ---- Text Encoder (umt5-xxl fp8) ----
echo "[5/6] UMT5-XXL Text Encoder (fp8) — 6.3 GB"
if [ ! -f "$MODELS_DIR/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors" ]; then
    huggingface-cli download "$HF_REPO" \
        split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors \
        --local-dir /tmp/wan_download
    mv /tmp/wan_download/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors \
        "$MODELS_DIR/text_encoders/"
else
    echo "  [SKIP] Déjà présent."
fi

# ---- VAE (Wan 2.1 pour les modèles 14B) ----
echo "[6/6] Wan 2.1 VAE — 0.2 GB"
if [ ! -f "$MODELS_DIR/vae/wan_2.1_vae.safetensors" ]; then
    huggingface-cli download "$HF_REPO" \
        split_files/vae/wan_2.1_vae.safetensors \
        --local-dir /tmp/wan_download
    mv /tmp/wan_download/split_files/vae/wan_2.1_vae.safetensors \
        "$MODELS_DIR/vae/"
else
    echo "  [SKIP] Déjà présent."
fi

# Cleanup temp directory
rm -rf /tmp/wan_download

echo ""
echo "============================================"
echo "  ÉTAPE 4/5 : Installation d'Ollama"
echo "============================================"

if ! command -v ollama &>/dev/null; then
    echo "[INSTALL] Ollama..."
    curl -fsSL https://ollama.com/install.sh | sh
else
    echo "[OK] Ollama déjà installé."
fi

# Start Ollama in background
echo "[INFO] Démarrage d'Ollama en arrière-plan..."
nohup ollama serve > /tmp/ollama.log 2>&1 &
sleep 3

# Pull recommended models
echo "[INFO] Téléchargement des modèles Ollama recommandés..."
echo "  - llama3.2 (enrichissement de prompts)..."
ollama pull llama3.2 || echo "[WARN] llama3.2 pull échoué"
echo "  - llava:13b (vision/QA des frames)..."
ollama pull llava:13b || echo "[WARN] llava:13b pull échoué"

echo ""
echo "============================================"
echo "  ÉTAPE 5/5 : Vérification finale"
echo "============================================"

echo ""
echo "--- Modèles diffusion ---"
ls -lh "$MODELS_DIR/diffusion_models/"*.safetensors 2>/dev/null || echo "  [WARN] Aucun modèle trouvé"
echo ""
echo "--- Text Encoders ---"
ls -lh "$MODELS_DIR/text_encoders/"*.safetensors 2>/dev/null || echo "  [WARN] Aucun encoder trouvé"
echo ""
echo "--- VAE ---"
ls -lh "$MODELS_DIR/vae/"*.safetensors 2>/dev/null || echo "  [WARN] Aucun VAE trouvé"
echo ""
echo "--- Custom Nodes ---"
ls -d "$COMFYUI_DIR/custom_nodes/"*/ 2>/dev/null
echo ""
echo "--- Ollama ---"
ollama list 2>/dev/null || echo "  [WARN] Ollama non accessible"

echo ""
echo "============================================"
echo "  INSTALLATION TERMINÉE!"
echo "============================================"
echo ""
echo "Pour démarrer ComfyUI :"
echo "  cd $COMFYUI_DIR"
echo "  python main.py --listen 0.0.0.0 --port 8188"
echo ""
echo "Ollama tourne sur : http://localhost:11434"
echo "ComfyUI sera sur  : http://localhost:8188"
echo ""
echo "Espace disque utilisé par les modèles :"
du -sh "$MODELS_DIR" 2>/dev/null
echo ""
