#!/usr/bin/env bash
# Minimal Cosmos-Predict2.5 example on DROID dataset.
# Run from repo root with: conda activate cosmos-predict && ./run_droid_example.sh
#
# Note: The 2B *distilled* model supports only Text2World (text-to-video). For image/video
# conditioning (Image2World, Video2World) we use 2B/post-trained. Checkpoints are downloaded
# automatically from Hugging Face on first run (accept NVIDIA Open Model License and run `hf auth login`).

set -e
cd "$(dirname "$0")"

export HF_HOME="${HF_HOME:-/data/cameron/vidgen/.cache/huggingface}"
export UV_CACHE_DIR="${UV_CACHE_DIR:-/data/cameron/vidgen/.cache/uv}"

# Video2World on one DROID clip (2B post-trained)
python examples/inference.py \
  -i assets/droid/droid_sample.json \
  -o outputs/droid_video2world \
  --model=2B/post-trained \
  --inference-type=video2world

echo "Outputs written to outputs/droid_video2world/"
