import os import sys from pathlib import Path from huggingface_hub import snapshot_download # --------------------------------------------------------------------------- # Optional: pull checkpoints and auxiliary assets at startup. Set the # HF_GESTURELSM_WEIGHTS_REPO environment variable in the Space settings to the # dataset or model repo that hosts the pre-trained weights (e.g. "username/gesturelsm-assets"). # Files will be placed under ckpt/ so the existing config paths keep working. # --------------------------------------------------------------------------- BASE_DIR = Path(__file__).parent.resolve() ROOT_DIR = BASE_DIR.parent.resolve() # Ensure project root is on sys.path so intra-repo imports (e.g. `optimizers`) work. if str(ROOT_DIR) not in sys.path: sys.path.insert(0, str(ROOT_DIR)) # Quick sanity check in Space logs to confirm the repo root is visible at runtime. print("[GestureLSM] sys.path:") for entry in sys.path: print(" ", entry) CKPT_DIR = BASE_DIR / "ckpt" CKPT_DIR.mkdir(parents=True, exist_ok=True) weights_repo = os.environ.get("HF_GESTURELSM_WEIGHTS_REPO", "").strip() if weights_repo: snapshot_download( repo_id=weights_repo, repo_type="dataset", local_dir=CKPT_DIR, local_dir_use_symlinks=False, allow_patterns=["*.pth", "*.bin", "*.npz", "*.npy"], ) # Ensure expected runtime directories exist so the demo can write outputs. for relative in ["outputs/audio2pose", "datasets/BEAT_SMPL"]: (BASE_DIR / relative).mkdir(parents=True, exist_ok=True) # Reuse the existing Gradio interface defined in demo.py. from demo import demo as gesture_demo # noqa: E402 if __name__ == "__main__": gesture_demo.queue(concurrency_count=1).launch(server_name="0.0.0.0", share=False)