Spaces:
Running
Running
Commit
·
48abd32
0
Parent(s):
Initial Hugging Face Spaces deployment
Browse files- Docker configuration for HF Spaces
- Flask webapp with TimesFM integration
- Smart port detection (7860 for Docker, 8080 for local)
- Complete source library
- Sample data for testing
- .dockerignore +92 -0
- .github/workflows/deploy-to-hf-spaces.yml +67 -0
- .gitignore +209 -0
- .python-version +1 -0
- Dockerfile +66 -0
- README.md +152 -0
- README_HF_SPACES.md +152 -0
- data/sample_data.csv +201 -0
- data/sample_data_definition.json +9 -0
- docker-compose.yml +42 -0
- pyproject.toml +14 -0
- src/__init__.py +28 -0
- src/data.py +605 -0
- src/forecast.py +476 -0
- src/interactive_visualization.py +1129 -0
- src/model.py +356 -0
- src/visualization.py +618 -0
- webapp/app.py +758 -0
- webapp/requirements.txt +31 -0
- webapp/static/css/styles.css +425 -0
- webapp/static/js/app.js +1633 -0
- webapp/templates/index.html +372 -0
.dockerignore
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Sapheneia TimesFM - Docker Build Exclusions
|
| 2 |
+
# Optimize Docker build by excluding unnecessary files
|
| 3 |
+
|
| 4 |
+
# Python
|
| 5 |
+
__pycache__/
|
| 6 |
+
*.py[cod]
|
| 7 |
+
*$py.class
|
| 8 |
+
*.so
|
| 9 |
+
.Python
|
| 10 |
+
*.egg-info/
|
| 11 |
+
dist/
|
| 12 |
+
build/
|
| 13 |
+
|
| 14 |
+
# Virtual environments
|
| 15 |
+
.venv/
|
| 16 |
+
venv/
|
| 17 |
+
ENV/
|
| 18 |
+
env/
|
| 19 |
+
|
| 20 |
+
# IDE and editor files
|
| 21 |
+
.vscode/
|
| 22 |
+
.idea/
|
| 23 |
+
*.swp
|
| 24 |
+
*.swo
|
| 25 |
+
*~
|
| 26 |
+
.DS_Store
|
| 27 |
+
|
| 28 |
+
# Git
|
| 29 |
+
.git/
|
| 30 |
+
.gitignore
|
| 31 |
+
.gitattributes
|
| 32 |
+
|
| 33 |
+
# Documentation (not needed in container)
|
| 34 |
+
*.md
|
| 35 |
+
!README.md
|
| 36 |
+
SAPHENEIA.md
|
| 37 |
+
GEMINI.md
|
| 38 |
+
|
| 39 |
+
# CI/CD
|
| 40 |
+
.github/
|
| 41 |
+
|
| 42 |
+
# Local development
|
| 43 |
+
local/
|
| 44 |
+
.claude/
|
| 45 |
+
.cursor/
|
| 46 |
+
.cursorignore
|
| 47 |
+
|
| 48 |
+
# Testing
|
| 49 |
+
.pytest_cache/
|
| 50 |
+
.coverage
|
| 51 |
+
htmlcov/
|
| 52 |
+
*.log
|
| 53 |
+
|
| 54 |
+
# User uploads (will be created at runtime)
|
| 55 |
+
uploads/
|
| 56 |
+
webapp/uploads/
|
| 57 |
+
|
| 58 |
+
# Results (will be created at runtime)
|
| 59 |
+
results/
|
| 60 |
+
webapp/results/
|
| 61 |
+
|
| 62 |
+
# Logs
|
| 63 |
+
logs/
|
| 64 |
+
*.log
|
| 65 |
+
|
| 66 |
+
# Jupyter notebooks (not needed for webapp)
|
| 67 |
+
notebooks/
|
| 68 |
+
*.ipynb
|
| 69 |
+
.ipynb_checkpoints/
|
| 70 |
+
|
| 71 |
+
# Environment files
|
| 72 |
+
.env
|
| 73 |
+
.env.*
|
| 74 |
+
|
| 75 |
+
# Setup scripts (not needed in container)
|
| 76 |
+
setup.sh
|
| 77 |
+
setup_environment.sh
|
| 78 |
+
|
| 79 |
+
# Lock files (dependencies already in requirements.txt)
|
| 80 |
+
uv.lock
|
| 81 |
+
poetry.lock
|
| 82 |
+
Pipfile.lock
|
| 83 |
+
|
| 84 |
+
# macOS
|
| 85 |
+
.DS_Store
|
| 86 |
+
.DS_Store?
|
| 87 |
+
._*
|
| 88 |
+
|
| 89 |
+
# Docker files (no need to copy Docker files into Docker)
|
| 90 |
+
Dockerfile
|
| 91 |
+
docker-compose.yml
|
| 92 |
+
.dockerignore
|
.github/workflows/deploy-to-hf-spaces.yml
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Deploy to Hugging Face Spaces
|
| 2 |
+
|
| 3 |
+
# Trigger deployment on push to huggingface branch
|
| 4 |
+
on:
|
| 5 |
+
push:
|
| 6 |
+
branches:
|
| 7 |
+
- huggingface
|
| 8 |
+
# Allow manual deployment from GitHub UI
|
| 9 |
+
workflow_dispatch:
|
| 10 |
+
|
| 11 |
+
jobs:
|
| 12 |
+
sync-to-hf-space:
|
| 13 |
+
runs-on: ubuntu-latest
|
| 14 |
+
|
| 15 |
+
steps:
|
| 16 |
+
- name: Checkout repository
|
| 17 |
+
uses: actions/checkout@v4
|
| 18 |
+
with:
|
| 19 |
+
fetch-depth: 0 # Fetch all history for proper sync
|
| 20 |
+
lfs: true # Enable Git LFS if using large files
|
| 21 |
+
|
| 22 |
+
- name: Setup Python
|
| 23 |
+
uses: actions/setup-python@v5
|
| 24 |
+
with:
|
| 25 |
+
python-version: '3.11'
|
| 26 |
+
|
| 27 |
+
- name: Prepare HF Spaces README
|
| 28 |
+
run: |
|
| 29 |
+
# Copy the HF Spaces README to root as README.md
|
| 30 |
+
# This will be displayed on the HF Spaces page
|
| 31 |
+
cp README_HF_SPACES.md README.md
|
| 32 |
+
echo "✅ HF Spaces README prepared"
|
| 33 |
+
|
| 34 |
+
- name: Push to Hugging Face Spaces
|
| 35 |
+
env:
|
| 36 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
| 37 |
+
run: |
|
| 38 |
+
# Configure git
|
| 39 |
+
git config --global user.email "github-actions[bot]@users.noreply.github.com"
|
| 40 |
+
git config --global user.name "GitHub Actions Bot"
|
| 41 |
+
|
| 42 |
+
# Install huggingface_hub for better sync
|
| 43 |
+
pip install huggingface_hub
|
| 44 |
+
|
| 45 |
+
# Add HF Space as remote (using the format: https://USER:[email protected]/spaces/USERNAME/SPACENAME)
|
| 46 |
+
# Note: You'll need to set HF_SPACE_NAME in GitHub secrets (format: username/spacename)
|
| 47 |
+
git remote add hf-space https://USER:[email protected]/spaces/${{ secrets.HF_SPACE_NAME }} || true
|
| 48 |
+
|
| 49 |
+
# Force push to HF Spaces
|
| 50 |
+
git push --force hf-space huggingface:main
|
| 51 |
+
|
| 52 |
+
echo "✅ Successfully deployed to Hugging Face Spaces!"
|
| 53 |
+
echo "🚀 Your Space: https://huggingface.co/spaces/${{ secrets.HF_SPACE_NAME }}"
|
| 54 |
+
|
| 55 |
+
- name: Deployment Summary
|
| 56 |
+
run: |
|
| 57 |
+
echo "## 🎉 Deployment Complete!" >> $GITHUB_STEP_SUMMARY
|
| 58 |
+
echo "" >> $GITHUB_STEP_SUMMARY
|
| 59 |
+
echo "**Space URL:** https://huggingface.co/spaces/${{ secrets.HF_SPACE_NAME }}" >> $GITHUB_STEP_SUMMARY
|
| 60 |
+
echo "" >> $GITHUB_STEP_SUMMARY
|
| 61 |
+
echo "**Branch:** huggingface" >> $GITHUB_STEP_SUMMARY
|
| 62 |
+
echo "**Commit:** ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
|
| 63 |
+
echo "" >> $GITHUB_STEP_SUMMARY
|
| 64 |
+
echo "### Next Steps" >> $GITHUB_STEP_SUMMARY
|
| 65 |
+
echo "1. Wait for HF Spaces to rebuild (1-3 minutes)" >> $GITHUB_STEP_SUMMARY
|
| 66 |
+
echo "2. Check the Space status on Hugging Face" >> $GITHUB_STEP_SUMMARY
|
| 67 |
+
echo "3. Test the deployed application" >> $GITHUB_STEP_SUMMARY
|
.gitignore
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py,cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# UV
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
#uv.lock
|
| 102 |
+
|
| 103 |
+
# poetry
|
| 104 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 105 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 106 |
+
# commonly ignored for libraries.
|
| 107 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 108 |
+
#poetry.lock
|
| 109 |
+
|
| 110 |
+
# pdm
|
| 111 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 112 |
+
#pdm.lock
|
| 113 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 114 |
+
# in version control.
|
| 115 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
| 116 |
+
.pdm.toml
|
| 117 |
+
.pdm-python
|
| 118 |
+
.pdm-build/
|
| 119 |
+
|
| 120 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 121 |
+
__pypackages__/
|
| 122 |
+
|
| 123 |
+
# Celery stuff
|
| 124 |
+
celerybeat-schedule
|
| 125 |
+
celerybeat.pid
|
| 126 |
+
|
| 127 |
+
# SageMath parsed files
|
| 128 |
+
*.sage.py
|
| 129 |
+
|
| 130 |
+
# Environments
|
| 131 |
+
.env
|
| 132 |
+
.venv
|
| 133 |
+
env/
|
| 134 |
+
venv/
|
| 135 |
+
ENV/
|
| 136 |
+
env.bak/
|
| 137 |
+
venv.bak/
|
| 138 |
+
|
| 139 |
+
# Spyder project settings
|
| 140 |
+
.spyderproject
|
| 141 |
+
.spyproject
|
| 142 |
+
|
| 143 |
+
# Rope project settings
|
| 144 |
+
.ropeproject
|
| 145 |
+
|
| 146 |
+
# mkdocs documentation
|
| 147 |
+
/site
|
| 148 |
+
|
| 149 |
+
# mypy
|
| 150 |
+
.mypy_cache/
|
| 151 |
+
.dmypy.json
|
| 152 |
+
dmypy.json
|
| 153 |
+
|
| 154 |
+
# Pyre type checker
|
| 155 |
+
.pyre/
|
| 156 |
+
|
| 157 |
+
# pytype static type analyzer
|
| 158 |
+
.pytype/
|
| 159 |
+
|
| 160 |
+
# Cython debug symbols
|
| 161 |
+
cython_debug/
|
| 162 |
+
|
| 163 |
+
# PyCharm
|
| 164 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 165 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 166 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 167 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 168 |
+
#.idea/
|
| 169 |
+
|
| 170 |
+
# Abstra
|
| 171 |
+
# Abstra is an AI-powered process automation framework.
|
| 172 |
+
# Ignore directories containing user credentials, local state, and settings.
|
| 173 |
+
# Learn more at https://abstra.io/docs
|
| 174 |
+
.abstra/
|
| 175 |
+
|
| 176 |
+
# Visual Studio Code
|
| 177 |
+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
| 178 |
+
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
| 179 |
+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
| 180 |
+
# you could uncomment the following to ignore the enitre vscode folder
|
| 181 |
+
# .vscode/
|
| 182 |
+
|
| 183 |
+
# Ruff stuff:
|
| 184 |
+
.ruff_cache/
|
| 185 |
+
|
| 186 |
+
# PyPI configuration file
|
| 187 |
+
.pypirc
|
| 188 |
+
|
| 189 |
+
# Cursor
|
| 190 |
+
# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
|
| 191 |
+
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
|
| 192 |
+
# refer to https://docs.cursor.com/context/ignore-files
|
| 193 |
+
.cursorignore
|
| 194 |
+
.cursorindexingignore
|
| 195 |
+
|
| 196 |
+
# Custom files
|
| 197 |
+
GEMINI.md
|
| 198 |
+
SAPHENEIA.md
|
| 199 |
+
CLAUDE.md
|
| 200 |
+
local/
|
| 201 |
+
uploads/
|
| 202 |
+
webapp/uploads/
|
| 203 |
+
|
| 204 |
+
# Notebooks (not needed for HF Spaces deployment)
|
| 205 |
+
notebooks/
|
| 206 |
+
|
| 207 |
+
# macOS
|
| 208 |
+
.DS_Store
|
| 209 |
+
.DS_Store?
|
.python-version
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
3.11
|
Dockerfile
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Sapheneia TimesFM - Hugging Face Spaces Dockerfile
|
| 2 |
+
# Optimized for deployment on Hugging Face Spaces with Docker SDK
|
| 3 |
+
|
| 4 |
+
# Use Python 3.11 slim image for smaller size
|
| 5 |
+
FROM python:3.11-slim
|
| 6 |
+
|
| 7 |
+
# Set working directory
|
| 8 |
+
WORKDIR /app
|
| 9 |
+
|
| 10 |
+
# Install system dependencies
|
| 11 |
+
RUN apt-get update && apt-get install -y \
|
| 12 |
+
build-essential \
|
| 13 |
+
curl \
|
| 14 |
+
git \
|
| 15 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 16 |
+
|
| 17 |
+
# Create non-root user for security (HF Spaces best practice)
|
| 18 |
+
RUN useradd -m -u 1000 sapheneia
|
| 19 |
+
|
| 20 |
+
# Copy requirements first for better layer caching
|
| 21 |
+
COPY --chown=sapheneia:sapheneia webapp/requirements.txt /app/requirements.txt
|
| 22 |
+
COPY --chown=sapheneia:sapheneia pyproject.toml /app/pyproject.toml
|
| 23 |
+
|
| 24 |
+
# Install Python dependencies
|
| 25 |
+
# Use PyTorch backend for best compatibility across hardware
|
| 26 |
+
RUN pip install --no-cache-dir -r requirements.txt && \
|
| 27 |
+
pip install --no-cache-dir \
|
| 28 |
+
timesfm[torch]>=1.3.0 \
|
| 29 |
+
jax>=0.7.0 \
|
| 30 |
+
jaxlib>=0.7.0 \
|
| 31 |
+
plotly>=5.0.0
|
| 32 |
+
|
| 33 |
+
# Copy application code
|
| 34 |
+
COPY --chown=sapheneia:sapheneia src/ /app/src/
|
| 35 |
+
COPY --chown=sapheneia:sapheneia webapp/ /app/webapp/
|
| 36 |
+
COPY --chown=sapheneia:sapheneia data/ /app/data/
|
| 37 |
+
|
| 38 |
+
# Create necessary directories with proper permissions
|
| 39 |
+
# /app/.cache is used for model downloads (writable by sapheneia user)
|
| 40 |
+
RUN mkdir -p /app/webapp/uploads /app/webapp/results /app/logs /app/.cache && \
|
| 41 |
+
chown -R sapheneia:sapheneia /app
|
| 42 |
+
|
| 43 |
+
# Set environment variables
|
| 44 |
+
# Use /app/.cache for model caching (guaranteed writable)
|
| 45 |
+
# On HF Spaces with persistent storage, you can override to /data/.cache
|
| 46 |
+
ENV PYTHONUNBUFFERED=1 \
|
| 47 |
+
PYTHONDONTWRITEBYTECODE=1 \
|
| 48 |
+
HF_HOME=/app/.cache \
|
| 49 |
+
TRANSFORMERS_CACHE=/app/.cache \
|
| 50 |
+
HF_HUB_CACHE=/app/.cache \
|
| 51 |
+
FLASK_APP=webapp/app.py \
|
| 52 |
+
PORT=7860
|
| 53 |
+
|
| 54 |
+
# Switch to non-root user
|
| 55 |
+
USER sapheneia
|
| 56 |
+
|
| 57 |
+
# Expose port 7860 (required by Hugging Face Spaces)
|
| 58 |
+
EXPOSE 7860
|
| 59 |
+
|
| 60 |
+
# Health check
|
| 61 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
| 62 |
+
CMD curl -f http://localhost:7860/ || exit 1
|
| 63 |
+
|
| 64 |
+
# Change to webapp directory and run the application
|
| 65 |
+
WORKDIR /app/webapp
|
| 66 |
+
CMD ["python", "app.py"]
|
README.md
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Sapheneia TimesFM Forecasting
|
| 3 |
+
emoji: 📈
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
+
pinned: false
|
| 9 |
+
license: mit
|
| 10 |
+
tags:
|
| 11 |
+
- time-series
|
| 12 |
+
- forecasting
|
| 13 |
+
- timesfm
|
| 14 |
+
- finance
|
| 15 |
+
- machine-learning
|
| 16 |
+
---
|
| 17 |
+
|
| 18 |
+
# Sapheneia TimesFM: Time Series Forecasting with Google's Foundation Model
|
| 19 |
+
|
| 20 |
+
A professional web application for time series forecasting powered by Google's TimesFM (Time Series Foundation Model). Built for financial forecasting and analysis with advanced covariates support.
|
| 21 |
+
|
| 22 |
+
## 🚀 Quick Start
|
| 23 |
+
|
| 24 |
+
1. **Upload Your Data**: Upload a CSV file with time series data
|
| 25 |
+
2. **Configure Model**: Set context length, horizon, and other parameters
|
| 26 |
+
3. **Add Covariates** (Optional): Enhance forecasts with exogenous variables
|
| 27 |
+
4. **Generate Forecast**: Get point forecasts with prediction intervals
|
| 28 |
+
5. **Visualize & Download**: Interactive charts and downloadable results
|
| 29 |
+
|
| 30 |
+
## ✨ Features
|
| 31 |
+
|
| 32 |
+
### Core Capabilities
|
| 33 |
+
- **TimesFM 2.0-500m Model**: State-of-the-art foundation model for time series
|
| 34 |
+
- **Quantile Forecasting**: Prediction intervals for uncertainty quantification
|
| 35 |
+
- **Covariates Support**: Dynamic and static, numerical and categorical variables
|
| 36 |
+
- **Professional Visualizations**: Publication-quality charts with Plotly
|
| 37 |
+
- **Interactive Interface**: User-friendly web application
|
| 38 |
+
|
| 39 |
+
### Advanced Features
|
| 40 |
+
- **Multi-Series Forecasting**: Process multiple time series simultaneously
|
| 41 |
+
- **Flexible Horizons**: Forecast from 1 to 128 periods ahead
|
| 42 |
+
- **Customizable Context**: Use 64 to 2048 historical data points
|
| 43 |
+
- **Real-time Processing**: Fast inference on CPU
|
| 44 |
+
- **Export Options**: Download forecasts as CSV or HTML charts
|
| 45 |
+
|
| 46 |
+
## 📊 Data Format
|
| 47 |
+
|
| 48 |
+
Your CSV file should have:
|
| 49 |
+
- **Date column** as the first column
|
| 50 |
+
- **Time series columns** with numerical values
|
| 51 |
+
- **Data definition JSON** specifying column types:
|
| 52 |
+
|
| 53 |
+
```json
|
| 54 |
+
{
|
| 55 |
+
"price": "target",
|
| 56 |
+
"temperature": "dynamic_numerical",
|
| 57 |
+
"day_of_week": "dynamic_categorical",
|
| 58 |
+
"store_id": "static_categorical",
|
| 59 |
+
"base_sales": "static_numerical"
|
| 60 |
+
}
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
### Column Types
|
| 64 |
+
- **target**: Main time series to forecast
|
| 65 |
+
- **dynamic_numerical**: Time-varying numerical covariates
|
| 66 |
+
- **dynamic_categorical**: Time-varying categorical covariates
|
| 67 |
+
- **static_numerical**: Series-level numerical features
|
| 68 |
+
- **static_categorical**: Series-level categorical features
|
| 69 |
+
|
| 70 |
+
## 🎯 Use Cases
|
| 71 |
+
|
| 72 |
+
### Financial Forecasting
|
| 73 |
+
- Stock price prediction
|
| 74 |
+
- Revenue forecasting
|
| 75 |
+
- Trading volume estimation
|
| 76 |
+
- Risk analysis
|
| 77 |
+
|
| 78 |
+
### Business Analytics
|
| 79 |
+
- Sales forecasting
|
| 80 |
+
- Demand planning
|
| 81 |
+
- Inventory optimization
|
| 82 |
+
- Customer behavior prediction
|
| 83 |
+
|
| 84 |
+
### Research & Academia
|
| 85 |
+
- Economic indicators
|
| 86 |
+
- Climate data analysis
|
| 87 |
+
- Experimental time series
|
| 88 |
+
- Comparative studies
|
| 89 |
+
|
| 90 |
+
## 🔧 Model Configuration
|
| 91 |
+
|
| 92 |
+
### Recommended Settings
|
| 93 |
+
|
| 94 |
+
**Quick Testing**:
|
| 95 |
+
- Context Length: 64
|
| 96 |
+
- Horizon Length: 24
|
| 97 |
+
- Backend: CPU
|
| 98 |
+
|
| 99 |
+
**Production Use**:
|
| 100 |
+
- Context Length: 512-2048
|
| 101 |
+
- Horizon Length: 24-128
|
| 102 |
+
- Backend: CPU (or GPU for faster inference)
|
| 103 |
+
|
| 104 |
+
### Covariates Configuration
|
| 105 |
+
When using covariates:
|
| 106 |
+
- Dynamic covariates must cover context + horizon periods
|
| 107 |
+
- Use `xreg_mode="xreg + timesfm"` for best results
|
| 108 |
+
- Enable normalization for stability
|
| 109 |
+
- Start with small ridge values (0.0-0.01)
|
| 110 |
+
|
| 111 |
+
## 📚 About TimesFM
|
| 112 |
+
|
| 113 |
+
TimesFM is a decoder-only foundation model for time-series forecasting, pre-trained on 100 billion real-world time points. Key features:
|
| 114 |
+
|
| 115 |
+
- **Foundation Model**: Pre-trained on diverse time series data
|
| 116 |
+
- **Zero-Shot Forecasting**: Works on new data without retraining
|
| 117 |
+
- **Attention-Based**: Leverages transformer architecture
|
| 118 |
+
- **Production-Ready**: Developed and tested by Google Research
|
| 119 |
+
|
| 120 |
+
Learn more: [TimesFM Research Paper](https://research.google/blog/a-decoder-only-foundation-model-for-time-series-forecasting/)
|
| 121 |
+
|
| 122 |
+
## 🛠️ Technical Stack
|
| 123 |
+
|
| 124 |
+
- **Model**: Google TimesFM 2.0-500m (PyTorch)
|
| 125 |
+
- **Backend**: Flask + Python 3.11
|
| 126 |
+
- **Visualization**: Plotly + Matplotlib
|
| 127 |
+
- **ML Libraries**: JAX, NumPy, Pandas, scikit-learn
|
| 128 |
+
- **Deployment**: Docker on Hugging Face Spaces
|
| 129 |
+
|
| 130 |
+
## 📖 Documentation
|
| 131 |
+
|
| 132 |
+
- [GitHub Repository](https://github.com/labrem/sapheneia)
|
| 133 |
+
- [Full Documentation](https://github.com/labrem/sapheneia/blob/main/README.md)
|
| 134 |
+
- [TimesFM GitHub](https://github.com/google-research/timesfm)
|
| 135 |
+
|
| 136 |
+
## 🤝 Contributing
|
| 137 |
+
|
| 138 |
+
Contributions are welcome! This is a research project focused on advancing TimesFM capabilities for practical applications.
|
| 139 |
+
|
| 140 |
+
## 📄 License
|
| 141 |
+
|
| 142 |
+
MIT License - See [LICENSE](https://github.com/labrem/sapheneia/blob/main/LICENSE) for details.
|
| 143 |
+
|
| 144 |
+
## 🙏 Acknowledgments
|
| 145 |
+
|
| 146 |
+
- Google Research for the TimesFM foundation model
|
| 147 |
+
- Hugging Face for Spaces infrastructure
|
| 148 |
+
- The open-source time series forecasting community
|
| 149 |
+
|
| 150 |
+
---
|
| 151 |
+
|
| 152 |
+
**Note**: This application runs on CPU by default. For faster inference on large datasets, consider using GPU-enabled Spaces.
|
README_HF_SPACES.md
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Sapheneia TimesFM Forecasting
|
| 3 |
+
emoji: 📈
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
+
pinned: false
|
| 9 |
+
license: mit
|
| 10 |
+
tags:
|
| 11 |
+
- time-series
|
| 12 |
+
- forecasting
|
| 13 |
+
- timesfm
|
| 14 |
+
- finance
|
| 15 |
+
- machine-learning
|
| 16 |
+
---
|
| 17 |
+
|
| 18 |
+
# Sapheneia TimesFM: Time Series Forecasting with Google's Foundation Model
|
| 19 |
+
|
| 20 |
+
A professional web application for time series forecasting powered by Google's TimesFM (Time Series Foundation Model). Built for financial forecasting and analysis with advanced covariates support.
|
| 21 |
+
|
| 22 |
+
## 🚀 Quick Start
|
| 23 |
+
|
| 24 |
+
1. **Upload Your Data**: Upload a CSV file with time series data
|
| 25 |
+
2. **Configure Model**: Set context length, horizon, and other parameters
|
| 26 |
+
3. **Add Covariates** (Optional): Enhance forecasts with exogenous variables
|
| 27 |
+
4. **Generate Forecast**: Get point forecasts with prediction intervals
|
| 28 |
+
5. **Visualize & Download**: Interactive charts and downloadable results
|
| 29 |
+
|
| 30 |
+
## ✨ Features
|
| 31 |
+
|
| 32 |
+
### Core Capabilities
|
| 33 |
+
- **TimesFM 2.0-500m Model**: State-of-the-art foundation model for time series
|
| 34 |
+
- **Quantile Forecasting**: Prediction intervals for uncertainty quantification
|
| 35 |
+
- **Covariates Support**: Dynamic and static, numerical and categorical variables
|
| 36 |
+
- **Professional Visualizations**: Publication-quality charts with Plotly
|
| 37 |
+
- **Interactive Interface**: User-friendly web application
|
| 38 |
+
|
| 39 |
+
### Advanced Features
|
| 40 |
+
- **Multi-Series Forecasting**: Process multiple time series simultaneously
|
| 41 |
+
- **Flexible Horizons**: Forecast from 1 to 128 periods ahead
|
| 42 |
+
- **Customizable Context**: Use 64 to 2048 historical data points
|
| 43 |
+
- **Real-time Processing**: Fast inference on CPU
|
| 44 |
+
- **Export Options**: Download forecasts as CSV or HTML charts
|
| 45 |
+
|
| 46 |
+
## 📊 Data Format
|
| 47 |
+
|
| 48 |
+
Your CSV file should have:
|
| 49 |
+
- **Date column** as the first column
|
| 50 |
+
- **Time series columns** with numerical values
|
| 51 |
+
- **Data definition JSON** specifying column types:
|
| 52 |
+
|
| 53 |
+
```json
|
| 54 |
+
{
|
| 55 |
+
"price": "target",
|
| 56 |
+
"temperature": "dynamic_numerical",
|
| 57 |
+
"day_of_week": "dynamic_categorical",
|
| 58 |
+
"store_id": "static_categorical",
|
| 59 |
+
"base_sales": "static_numerical"
|
| 60 |
+
}
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
### Column Types
|
| 64 |
+
- **target**: Main time series to forecast
|
| 65 |
+
- **dynamic_numerical**: Time-varying numerical covariates
|
| 66 |
+
- **dynamic_categorical**: Time-varying categorical covariates
|
| 67 |
+
- **static_numerical**: Series-level numerical features
|
| 68 |
+
- **static_categorical**: Series-level categorical features
|
| 69 |
+
|
| 70 |
+
## 🎯 Use Cases
|
| 71 |
+
|
| 72 |
+
### Financial Forecasting
|
| 73 |
+
- Stock price prediction
|
| 74 |
+
- Revenue forecasting
|
| 75 |
+
- Trading volume estimation
|
| 76 |
+
- Risk analysis
|
| 77 |
+
|
| 78 |
+
### Business Analytics
|
| 79 |
+
- Sales forecasting
|
| 80 |
+
- Demand planning
|
| 81 |
+
- Inventory optimization
|
| 82 |
+
- Customer behavior prediction
|
| 83 |
+
|
| 84 |
+
### Research & Academia
|
| 85 |
+
- Economic indicators
|
| 86 |
+
- Climate data analysis
|
| 87 |
+
- Experimental time series
|
| 88 |
+
- Comparative studies
|
| 89 |
+
|
| 90 |
+
## 🔧 Model Configuration
|
| 91 |
+
|
| 92 |
+
### Recommended Settings
|
| 93 |
+
|
| 94 |
+
**Quick Testing**:
|
| 95 |
+
- Context Length: 64
|
| 96 |
+
- Horizon Length: 24
|
| 97 |
+
- Backend: CPU
|
| 98 |
+
|
| 99 |
+
**Production Use**:
|
| 100 |
+
- Context Length: 512-2048
|
| 101 |
+
- Horizon Length: 24-128
|
| 102 |
+
- Backend: CPU (or GPU for faster inference)
|
| 103 |
+
|
| 104 |
+
### Covariates Configuration
|
| 105 |
+
When using covariates:
|
| 106 |
+
- Dynamic covariates must cover context + horizon periods
|
| 107 |
+
- Use `xreg_mode="xreg + timesfm"` for best results
|
| 108 |
+
- Enable normalization for stability
|
| 109 |
+
- Start with small ridge values (0.0-0.01)
|
| 110 |
+
|
| 111 |
+
## 📚 About TimesFM
|
| 112 |
+
|
| 113 |
+
TimesFM is a decoder-only foundation model for time-series forecasting, pre-trained on 100 billion real-world time points. Key features:
|
| 114 |
+
|
| 115 |
+
- **Foundation Model**: Pre-trained on diverse time series data
|
| 116 |
+
- **Zero-Shot Forecasting**: Works on new data without retraining
|
| 117 |
+
- **Attention-Based**: Leverages transformer architecture
|
| 118 |
+
- **Production-Ready**: Developed and tested by Google Research
|
| 119 |
+
|
| 120 |
+
Learn more: [TimesFM Research Paper](https://research.google/blog/a-decoder-only-foundation-model-for-time-series-forecasting/)
|
| 121 |
+
|
| 122 |
+
## 🛠️ Technical Stack
|
| 123 |
+
|
| 124 |
+
- **Model**: Google TimesFM 2.0-500m (PyTorch)
|
| 125 |
+
- **Backend**: Flask + Python 3.11
|
| 126 |
+
- **Visualization**: Plotly + Matplotlib
|
| 127 |
+
- **ML Libraries**: JAX, NumPy, Pandas, scikit-learn
|
| 128 |
+
- **Deployment**: Docker on Hugging Face Spaces
|
| 129 |
+
|
| 130 |
+
## 📖 Documentation
|
| 131 |
+
|
| 132 |
+
- [GitHub Repository](https://github.com/labrem/sapheneia)
|
| 133 |
+
- [Full Documentation](https://github.com/labrem/sapheneia/blob/main/README.md)
|
| 134 |
+
- [TimesFM GitHub](https://github.com/google-research/timesfm)
|
| 135 |
+
|
| 136 |
+
## 🤝 Contributing
|
| 137 |
+
|
| 138 |
+
Contributions are welcome! This is a research project focused on advancing TimesFM capabilities for practical applications.
|
| 139 |
+
|
| 140 |
+
## 📄 License
|
| 141 |
+
|
| 142 |
+
MIT License - See [LICENSE](https://github.com/labrem/sapheneia/blob/main/LICENSE) for details.
|
| 143 |
+
|
| 144 |
+
## 🙏 Acknowledgments
|
| 145 |
+
|
| 146 |
+
- Google Research for the TimesFM foundation model
|
| 147 |
+
- Hugging Face for Spaces infrastructure
|
| 148 |
+
- The open-source time series forecasting community
|
| 149 |
+
|
| 150 |
+
---
|
| 151 |
+
|
| 152 |
+
**Note**: This application runs on CPU by default. For faster inference on large datasets, consider using GPU-enabled Spaces.
|
data/sample_data.csv
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
date,btc_price,eth_price,sp500_price,vix_index,quarter,asset_category,base_volatility
|
| 2 |
+
2020-01-01,26013.430363827687,1588.7276515800945,3390.1508377686005,10.0,1,cryptocurrency,0.08
|
| 3 |
+
2020-01-08,25100.39101182912,1548.2511933654255,3461.423742310685,17.948094686110654,1,cryptocurrency,0.08
|
| 4 |
+
2020-01-15,27130.33908446903,1715.9709874877444,3506.705131765221,21.053433741879807,1,cryptocurrency,0.08
|
| 5 |
+
2020-01-22,29524.603367721014,1864.815464906524,3512.809512480278,22.375487514075488,1,cryptocurrency,0.08
|
| 6 |
+
2020-01-29,26015.074893650224,1453.383978260739,3481.209356032858,23.771259801941124,1,cryptocurrency,0.08
|
| 7 |
+
2020-02-05,26364.719132181595,1507.7066666134278,3559.936046398046,25.363376512323068,1,cryptocurrency,0.08
|
| 8 |
+
2020-02-12,30864.77506745048,1899.5758470696271,3444.703023211277,14.52685543620182,1,cryptocurrency,0.08
|
| 9 |
+
2020-02-19,29260.170002959374,1800.7105929840325,3512.21550754496,17.333803140834622,1,cryptocurrency,0.08
|
| 10 |
+
2020-02-26,26782.18328389954,1648.3133016377633,3533.910549006919,16.433646094444924,1,cryptocurrency,0.08
|
| 11 |
+
2020-03-04,29310.97177712928,2097.4402185900703,3565.101228998545,22.683632020177285,1,cryptocurrency,0.08
|
| 12 |
+
2020-03-11,27260.936702737683,1682.3452323928611,3582.4269588273137,23.413239405697563,1,cryptocurrency,0.08
|
| 13 |
+
2020-03-18,27436.588119840606,1739.6634274484709,3456.373352700806,19.704792627462172,1,cryptocurrency,0.08
|
| 14 |
+
2020-03-25,29185.867387299513,1834.6821501076581,3431.285445550535,19.603763017859695,1,cryptocurrency,0.08
|
| 15 |
+
2020-04-01,24655.73141421941,1527.5254680664689,3633.0601108950427,19.997037124721412,2,cryptocurrency,0.08
|
| 16 |
+
2020-04-08,25087.365200982076,1481.5140880281342,3568.2403150337873,26.990156823685044,2,cryptocurrency,0.08
|
| 17 |
+
2020-04-15,27555.802622420484,1716.0901754544436,3495.0968172951543,18.51068766649672,2,cryptocurrency,0.08
|
| 18 |
+
2020-04-22,26564.93639382923,1532.3060256265396,3662.9116715495966,21.060603771507786,2,cryptocurrency,0.08
|
| 19 |
+
2020-04-29,29483.108185027413,1748.0400452934155,3562.4672251812835,20.09279327353528,2,cryptocurrency,0.08
|
| 20 |
+
2020-05-06,26650.38620361683,1560.2177942206,3642.3545219090543,23.463304760177927,2,cryptocurrency,0.08
|
| 21 |
+
2020-05-13,25483.445257203,1535.2660208792756,3565.4820701638537,21.567144202047356,2,cryptocurrency,0.08
|
| 22 |
+
2020-05-20,31904.322422313107,2135.8021849649485,3713.847418360923,13.134327353576037,2,cryptocurrency,0.08
|
| 23 |
+
2020-05-27,27686.274318439293,1506.0836100642518,3694.5721652699963,24.250499497942524,2,cryptocurrency,0.08
|
| 24 |
+
2020-06-03,28136.904912411635,1746.1420079099214,3552.6122679645714,25.32316792298304,2,cryptocurrency,0.08
|
| 25 |
+
2020-06-10,24771.732693426762,1366.454662190935,3643.6954771485794,26.583837426371936,2,cryptocurrency,0.08
|
| 26 |
+
2020-06-17,26353.96089746882,1543.9258320362765,3623.2775986907645,23.541252079189224,2,cryptocurrency,0.08
|
| 27 |
+
2020-06-24,27527.35353996867,1741.5689966089622,3679.396208612432,17.595120928457142,2,cryptocurrency,0.08
|
| 28 |
+
2020-07-01,24660.143046594567,1484.36404619347,3514.7981110904466,19.231023259608687,3,cryptocurrency,0.08
|
| 29 |
+
2020-07-08,27612.954636375038,1567.4981251948461,3636.0797453823907,17.36070453944524,3,cryptocurrency,0.08
|
| 30 |
+
2020-07-15,25312.928517217053,1464.4564160512296,3666.5753944517483,21.90656993061602,3,cryptocurrency,0.08
|
| 31 |
+
2020-07-22,25726.38250868121,1596.0337254442693,3468.836844827257,22.95853958918426,3,cryptocurrency,0.08
|
| 32 |
+
2020-07-29,24896.301044153115,1439.227780041685,3512.1678806367186,15.57856175321812,3,cryptocurrency,0.08
|
| 33 |
+
2020-08-05,30075.74225349692,1824.074993456284,3455.677483861331,20.808656925437596,3,cryptocurrency,0.08
|
| 34 |
+
2020-08-12,25737.940649628094,1547.7952149099017,3583.426821995967,22.641031274539635,3,cryptocurrency,0.08
|
| 35 |
+
2020-08-19,23544.808803045482,1366.663111381598,3658.1701370747437,20.5005733892294,3,cryptocurrency,0.08
|
| 36 |
+
2020-08-26,27245.642336402903,1809.9779417251827,3719.405629047314,14.043922256432367,3,cryptocurrency,0.08
|
| 37 |
+
2020-09-02,23062.095180465793,1427.5842133161648,3617.9340393524703,18.36949449570286,3,cryptocurrency,0.08
|
| 38 |
+
2020-09-09,25806.489769966218,1391.6039218620656,3735.561318897995,20.22079908645122,3,cryptocurrency,0.08
|
| 39 |
+
2020-09-16,21683.80833080696,1313.1576187201542,3520.588897399666,23.701157493724047,3,cryptocurrency,0.08
|
| 40 |
+
2020-09-23,22826.51771636746,1324.2722215945842,3501.0648987513114,15.102975386612982,3,cryptocurrency,0.08
|
| 41 |
+
2020-09-30,25847.46843912356,1616.9478375002202,3621.6451952691464,13.62896877244728,3,cryptocurrency,0.08
|
| 42 |
+
2020-10-07,27093.267869724008,1561.1801422074345,3656.9343312171804,18.051193592185975,4,cryptocurrency,0.08
|
| 43 |
+
2020-10-14,26026.342445034283,1552.6220369524306,3629.861526825658,25.810586233075185,4,cryptocurrency,0.08
|
| 44 |
+
2020-10-21,25604.177040954823,1575.0399735406263,3488.263598805656,19.548299790615378,4,cryptocurrency,0.08
|
| 45 |
+
2020-10-28,25428.997783466984,1591.7857277487503,3632.332631349685,15.62787786016526,4,cryptocurrency,0.08
|
| 46 |
+
2020-11-04,23358.45449915314,1317.3960629200603,3548.3140438751093,20.961394252330376,4,cryptocurrency,0.08
|
| 47 |
+
2020-11-11,25081.125855800197,1479.698548562869,3694.554345805104,17.758697096617848,4,cryptocurrency,0.08
|
| 48 |
+
2020-11-18,25904.22090426341,1517.3439894825317,3675.5507067623316,11.263533683881112,4,cryptocurrency,0.08
|
| 49 |
+
2020-11-25,29616.97483186682,1718.969583597834,3583.994410649581,17.15828047440369,4,cryptocurrency,0.08
|
| 50 |
+
2020-12-02,28349.703991994225,1851.1325548897937,3617.932447515099,20.181818051102198,4,cryptocurrency,0.08
|
| 51 |
+
2020-12-09,24291.119167813165,1486.9795270740021,3581.9247875987453,25.178734528790024,4,cryptocurrency,0.08
|
| 52 |
+
2020-12-16,29125.81064593212,1637.3758368545573,3657.3378420817376,21.916589732176348,4,cryptocurrency,0.08
|
| 53 |
+
2020-12-23,27932.31936644481,1752.853201105011,3735.928821678013,24.216501612738895,4,cryptocurrency,0.08
|
| 54 |
+
2020-12-30,27702.0379985494,1838.4864347384669,3596.9405015603297,19.358902612110608,4,cryptocurrency,0.08
|
| 55 |
+
2021-01-06,31176.437182226164,1967.1519959487805,3709.078998056109,14.317290143091743,1,cryptocurrency,0.08
|
| 56 |
+
2021-01-13,32723.76012731421,1814.2671127072786,3636.4285675538144,26.751302760299282,1,cryptocurrency,0.08
|
| 57 |
+
2021-01-20,32939.3459525182,1928.5096962066766,3620.652490423665,20.046326394403412,1,cryptocurrency,0.08
|
| 58 |
+
2021-01-27,28993.015649870104,1849.7756633182942,3673.980310480554,22.594046025940838,1,cryptocurrency,0.08
|
| 59 |
+
2021-02-03,30655.27052362839,1774.2348346881377,3609.6683957464024,18.81263184596324,1,cryptocurrency,0.08
|
| 60 |
+
2021-02-10,32673.473379727096,2003.91176959645,3647.9023141940347,19.319075660138957,1,cryptocurrency,0.08
|
| 61 |
+
2021-02-17,34801.481478350914,2168.9641266898175,3604.460999347795,18.304990363679796,1,cryptocurrency,0.08
|
| 62 |
+
2021-02-24,31303.965882881414,1791.1881533986345,3843.2902203099657,20.39731722032475,1,cryptocurrency,0.08
|
| 63 |
+
2021-03-03,32345.933418828572,1934.979795517993,3701.1519866389553,17.70455293133164,1,cryptocurrency,0.08
|
| 64 |
+
2021-03-10,30289.62742147764,1522.84730497637,3650.4442055695085,21.215399843497458,1,cryptocurrency,0.08
|
| 65 |
+
2021-03-17,30272.00098561405,1723.2892680724756,3721.130393450076,18.381367669699173,1,cryptocurrency,0.08
|
| 66 |
+
2021-03-24,35734.392564415735,2116.987445451488,3700.270239692318,14.543655116727008,1,cryptocurrency,0.08
|
| 67 |
+
2021-03-31,37463.042985731154,2107.5453141954044,3695.580130805977,19.374464193851534,1,cryptocurrency,0.08
|
| 68 |
+
2021-04-07,33494.48764650293,2173.6995995508255,3761.2253488348547,21.474549332245484,2,cryptocurrency,0.08
|
| 69 |
+
2021-04-14,36534.70711652859,2035.3330378546543,3775.4370006839385,22.77434274039861,2,cryptocurrency,0.08
|
| 70 |
+
2021-04-21,34685.90667537178,2035.3643745265563,3682.7526792704266,13.085675213315554,2,cryptocurrency,0.08
|
| 71 |
+
2021-04-28,31939.773395551507,1928.9138769694543,3682.7459794374827,24.92414872973853,2,cryptocurrency,0.08
|
| 72 |
+
2021-05-05,34506.27309262304,2219.5752946982375,3708.31841875339,22.192473813292644,2,cryptocurrency,0.08
|
| 73 |
+
2021-05-12,37744.51536372022,2102.083158747083,3564.2214778320927,11.985848493150874,2,cryptocurrency,0.08
|
| 74 |
+
2021-05-19,33095.834703766355,2101.2377080501806,3624.023147327919,21.60059205900335,2,cryptocurrency,0.08
|
| 75 |
+
2021-05-26,37373.21586503971,2243.540279097718,3842.5284659887266,16.454740205615884,2,cryptocurrency,0.08
|
| 76 |
+
2021-06-02,26546.13364433735,1514.6022391888646,3867.456269137418,22.618868573529646,2,cryptocurrency,0.08
|
| 77 |
+
2021-06-09,34681.94266419666,2128.9964984497738,3727.0664276099924,12.319848826148197,2,cryptocurrency,0.08
|
| 78 |
+
2021-06-16,32424.583509928478,1964.8382937634574,3792.5470049492874,18.014318099394075,2,cryptocurrency,0.08
|
| 79 |
+
2021-06-23,31161.074660685634,1813.5542708758915,3775.8903635777206,26.0517548607241,2,cryptocurrency,0.08
|
| 80 |
+
2021-06-30,31861.314631451005,1918.3508364624552,3994.3998732948608,22.362578029213143,2,cryptocurrency,0.08
|
| 81 |
+
2021-07-07,26735.9098088075,1573.2494598070402,3844.3777378715267,27.322364643773653,3,cryptocurrency,0.08
|
| 82 |
+
2021-07-14,30525.931562929705,1841.9515619128174,3753.040928782706,19.513637409835106,3,cryptocurrency,0.08
|
| 83 |
+
2021-07-21,31696.474095081456,1964.7500690224326,3694.770684114886,15.860266489361752,3,cryptocurrency,0.08
|
| 84 |
+
2021-07-28,34393.62447410727,2227.2640687988037,3650.2837210266825,16.79312390645784,3,cryptocurrency,0.08
|
| 85 |
+
2021-08-04,29103.51592414479,1638.1366062049062,3788.2630583589157,24.8083681463451,3,cryptocurrency,0.08
|
| 86 |
+
2021-08-11,28251.60873862212,1875.881397298962,3719.5989490839456,16.927788304196994,3,cryptocurrency,0.08
|
| 87 |
+
2021-08-18,28793.958579726615,1559.012509053275,3673.710976396624,21.758691467946257,3,cryptocurrency,0.08
|
| 88 |
+
2021-08-25,32108.02792876201,1911.8611155140036,3734.5243292979194,17.10021387537445,3,cryptocurrency,0.08
|
| 89 |
+
2021-09-01,30537.085678583815,1886.1216195368424,3705.526951498243,19.878822002200437,3,cryptocurrency,0.08
|
| 90 |
+
2021-09-08,28454.889748727768,1731.2801627737344,3920.0475786779907,23.54531693261997,3,cryptocurrency,0.08
|
| 91 |
+
2021-09-15,30913.245416623773,1797.0457357663431,3860.8921209782275,19.675495357393075,3,cryptocurrency,0.08
|
| 92 |
+
2021-09-22,29926.007707879482,1776.87565826765,3796.2379031437276,19.566237922373585,3,cryptocurrency,0.08
|
| 93 |
+
2021-09-29,32160.509389379127,1882.0650797985434,3914.444421134657,22.0377622484632,3,cryptocurrency,0.08
|
| 94 |
+
2021-10-06,28242.491216658065,1644.6140860852697,3809.6084508445547,19.355043192915083,4,cryptocurrency,0.08
|
| 95 |
+
2021-10-13,29252.66502367751,1829.7192780633607,3742.1409249964454,21.144945684303774,4,cryptocurrency,0.08
|
| 96 |
+
2021-10-20,29295.169348424068,1789.0866482695117,3928.4721157440968,21.75026472734123,4,cryptocurrency,0.08
|
| 97 |
+
2021-10-27,27103.99644359458,1569.8979289986428,3855.384789046318,20.626189612455846,4,cryptocurrency,0.08
|
| 98 |
+
2021-11-03,31491.393825858206,1974.4725914403768,3739.127203946295,17.97773687935485,4,cryptocurrency,0.08
|
| 99 |
+
2021-11-10,31733.454880294976,1933.2623192591207,3806.441976580606,16.093515535676786,4,cryptocurrency,0.08
|
| 100 |
+
2021-11-17,31451.605228954264,1963.7937691391387,3758.0259998760794,22.950510275082287,4,cryptocurrency,0.08
|
| 101 |
+
2021-11-24,31242.984089380323,1933.5934970341225,3723.46509756765,19.57839720414223,4,cryptocurrency,0.08
|
| 102 |
+
2021-12-01,28808.814115260157,1656.8817573992133,3902.973820926276,20.054100055410867,4,cryptocurrency,0.08
|
| 103 |
+
2021-12-08,31635.702963109135,1844.9770148068376,3984.086635021992,21.274928929114427,4,cryptocurrency,0.08
|
| 104 |
+
2021-12-15,32299.9210372847,2010.4078355485296,3732.4054686191403,17.47139968534234,4,cryptocurrency,0.08
|
| 105 |
+
2021-12-22,31601.12556563028,1953.9326961362867,3885.2533046447884,16.212057039618912,4,cryptocurrency,0.08
|
| 106 |
+
2021-12-29,33768.918456527244,2024.0176347250742,3795.516375329288,14.003151119290978,4,cryptocurrency,0.08
|
| 107 |
+
2022-01-05,35867.67514219597,2164.6852899411683,3811.3952718842224,20.61211647641972,1,cryptocurrency,0.08
|
| 108 |
+
2022-01-12,40988.85716666293,2616.441501760769,3806.821111731804,13.489436395875405,1,cryptocurrency,0.08
|
| 109 |
+
2022-01-19,36267.267181339346,2111.6719980331254,3789.6250117167424,27.712075534311328,1,cryptocurrency,0.08
|
| 110 |
+
2022-01-26,37024.51697055983,2282.2390670546265,3862.914086217464,13.340917574183203,1,cryptocurrency,0.08
|
| 111 |
+
2022-02-02,36538.62215412049,2170.1538064583647,3798.996416730866,25.35357853833996,1,cryptocurrency,0.08
|
| 112 |
+
2022-02-09,31923.57244191805,1894.5668615336788,3887.124155009718,23.333555550303466,1,cryptocurrency,0.08
|
| 113 |
+
2022-02-16,37572.753865369,2378.2174485601786,3865.767411312396,16.45117123323082,1,cryptocurrency,0.08
|
| 114 |
+
2022-02-23,38232.31385496564,2388.611562044529,3854.6898129894194,18.017204459965964,1,cryptocurrency,0.08
|
| 115 |
+
2022-03-02,46767.303426291925,2920.175161538678,3806.928707475578,17.16734347192859,1,cryptocurrency,0.08
|
| 116 |
+
2022-03-09,38118.78346280513,2436.4167996638985,3835.666139647011,23.976638874749625,1,cryptocurrency,0.08
|
| 117 |
+
2022-03-16,39918.00448484336,2397.5955634240877,3942.7990643943663,22.38749969764701,1,cryptocurrency,0.08
|
| 118 |
+
2022-03-23,39060.76515272677,2423.5587237344766,3926.333094042546,20.776861904366626,1,cryptocurrency,0.08
|
| 119 |
+
2022-03-30,35807.071468510316,2115.0950563276847,3815.38283382436,22.190367553706906,1,cryptocurrency,0.08
|
| 120 |
+
2022-04-06,43178.877629286595,2632.7240755540874,3901.976747055267,15.165042744209332,2,cryptocurrency,0.08
|
| 121 |
+
2022-04-13,41884.61681554112,2496.7240330422837,3956.773426744956,20.437819385568318,2,cryptocurrency,0.08
|
| 122 |
+
2022-04-20,41991.57698212666,2531.713659520441,3773.1778462767215,20.872396685649736,2,cryptocurrency,0.08
|
| 123 |
+
2022-04-27,36579.74060080865,2260.0965048702124,3947.480031133101,17.62898479149729,2,cryptocurrency,0.08
|
| 124 |
+
2022-05-04,43870.27362393204,2524.529721661283,3856.894215065556,12.320595720494955,2,cryptocurrency,0.08
|
| 125 |
+
2022-05-11,34898.12441308101,2312.948639186517,3956.782611838077,26.805869607940956,2,cryptocurrency,0.08
|
| 126 |
+
2022-05-18,40690.962041639854,2318.650277246888,3856.108789460258,17.441137936158686,2,cryptocurrency,0.08
|
| 127 |
+
2022-05-25,45961.457816231654,2590.269832851123,3780.025112451981,17.01210665520802,2,cryptocurrency,0.08
|
| 128 |
+
2022-06-01,35374.9779821172,2245.403118879778,3796.8887029582174,25.291270562538315,2,cryptocurrency,0.08
|
| 129 |
+
2022-06-08,36304.69486633776,2264.5049096044277,3929.841283893441,20.523898425705124,2,cryptocurrency,0.08
|
| 130 |
+
2022-06-15,37966.68819225969,2349.0885790014686,3950.081900501122,17.485479345539524,2,cryptocurrency,0.08
|
| 131 |
+
2022-06-22,35859.05675116436,2219.139036898003,3862.6754034094106,18.807345515045995,2,cryptocurrency,0.08
|
| 132 |
+
2022-06-29,32680.63005641072,1959.6371066262582,3987.333681886699,22.44380806138589,2,cryptocurrency,0.08
|
| 133 |
+
2022-07-06,36865.71805451943,2112.7093032240205,3811.5084505445443,14.654891270732396,3,cryptocurrency,0.08
|
| 134 |
+
2022-07-13,33379.766102366484,2010.3769814079885,3938.651309386564,23.211401429321604,3,cryptocurrency,0.08
|
| 135 |
+
2022-07-20,37423.833310079186,2169.40403755796,3852.9695089959077,12.605092294280693,3,cryptocurrency,0.08
|
| 136 |
+
2022-07-27,33210.618842200165,2089.7901199081443,3899.8273249052286,25.4762286382321,3,cryptocurrency,0.08
|
| 137 |
+
2022-08-03,40169.1496143069,2392.427466980071,3958.327132343499,17.61320026627405,3,cryptocurrency,0.08
|
| 138 |
+
2022-08-10,33113.913679467296,1904.8284920163494,3890.6248578425475,24.63100383418926,3,cryptocurrency,0.08
|
| 139 |
+
2022-08-17,34169.21714900265,2017.2085211039907,3931.3837203388885,22.320640782350573,3,cryptocurrency,0.08
|
| 140 |
+
2022-08-24,37253.25105563256,2281.344180742645,4045.9361001537814,23.268142087574617,3,cryptocurrency,0.08
|
| 141 |
+
2022-08-31,31530.53856279513,1838.5086975147674,3923.380969625202,26.378763461121597,3,cryptocurrency,0.08
|
| 142 |
+
2022-09-07,35363.54715231836,2034.5829399544673,4039.455786412542,12.182877430870835,3,cryptocurrency,0.08
|
| 143 |
+
2022-09-14,38531.600298598314,2340.0649926091583,3887.267699034101,14.445326230251219,3,cryptocurrency,0.08
|
| 144 |
+
2022-09-21,30543.349884833464,1855.0472921643927,4022.0883713834796,22.77217845486913,3,cryptocurrency,0.08
|
| 145 |
+
2022-09-28,35332.4679246442,2066.213415028399,4099.811817871814,17.16516654594906,3,cryptocurrency,0.08
|
| 146 |
+
2022-10-05,35679.147527846784,2090.3301160785377,3794.6088464612153,21.357695004920004,4,cryptocurrency,0.08
|
| 147 |
+
2022-10-12,37394.11737145949,2269.6789500683435,3927.412618546112,16.88382881561167,4,cryptocurrency,0.08
|
| 148 |
+
2022-10-19,32028.276356204336,1782.5576449425548,4040.4849122159085,23.658185368660906,4,cryptocurrency,0.08
|
| 149 |
+
2022-10-26,32069.639533721238,1788.7678043201936,3981.532856189237,17.708038704867853,4,cryptocurrency,0.08
|
| 150 |
+
2022-11-02,37508.36291977561,2169.6587754214806,4031.164260797813,15.032363678045469,4,cryptocurrency,0.08
|
| 151 |
+
2022-11-09,37226.83064005505,2209.771955501166,3956.8851367018497,15.930699994286472,4,cryptocurrency,0.08
|
| 152 |
+
2022-11-16,37519.63116019342,2286.1732811700576,4015.545309383758,17.073609490888693,4,cryptocurrency,0.08
|
| 153 |
+
2022-11-23,38285.130716672014,2466.5604598590257,3999.7519166554866,15.54100027795226,4,cryptocurrency,0.08
|
| 154 |
+
2022-11-30,35740.80335461757,2236.4085330912303,4110.75227448804,18.44775227387629,4,cryptocurrency,0.08
|
| 155 |
+
2022-12-07,38989.8815331838,2320.6849389834306,4039.9945588804717,21.420741842300174,4,cryptocurrency,0.08
|
| 156 |
+
2022-12-14,39754.04767998764,2382.97494709229,4050.383241404908,16.76361333863624,4,cryptocurrency,0.08
|
| 157 |
+
2022-12-21,37226.12228543324,2121.60649495776,3993.733206533564,29.211164945030106,4,cryptocurrency,0.08
|
| 158 |
+
2022-12-28,46455.25283018938,2784.7350725717442,3991.297522521137,17.050377347920552,4,cryptocurrency,0.08
|
| 159 |
+
2023-01-04,42190.83261104371,2494.913711713389,3999.3100349946726,22.480240047409048,1,cryptocurrency,0.08
|
| 160 |
+
2023-01-11,37482.907200156,2285.2637215561213,4069.689001826353,19.79128545283728,1,cryptocurrency,0.08
|
| 161 |
+
2023-01-18,44091.08409022052,2536.0445181306372,4007.4790362403573,18.853478315840967,1,cryptocurrency,0.08
|
| 162 |
+
2023-01-25,39243.49385199845,2415.752546335947,4068.5313380233747,20.60252488992436,1,cryptocurrency,0.08
|
| 163 |
+
2023-02-01,45790.47973747915,2957.98333466482,4220.270076504716,17.28021874867197,1,cryptocurrency,0.08
|
| 164 |
+
2023-02-08,47766.18431662727,2850.385887105154,4123.565187233285,26.835418723825512,1,cryptocurrency,0.08
|
| 165 |
+
2023-02-15,41244.987929101466,2524.405061132936,4029.650645670326,22.647587249202342,1,cryptocurrency,0.08
|
| 166 |
+
2023-02-22,48072.806490255876,2983.8998651071756,4158.394055888616,20.38407871972057,1,cryptocurrency,0.08
|
| 167 |
+
2023-03-01,46429.32847581239,2729.8745173075195,4030.327717739666,18.586177221258055,1,cryptocurrency,0.08
|
| 168 |
+
2023-03-08,48358.63378897072,2934.0284461264373,3904.583937656943,19.080765814014242,1,cryptocurrency,0.08
|
| 169 |
+
2023-03-15,53049.7967182445,3184.9918759996767,3989.4625343534726,23.46067673363244,1,cryptocurrency,0.08
|
| 170 |
+
2023-03-22,44927.42123723386,2708.81027990697,3924.766774933595,21.442752963605894,1,cryptocurrency,0.08
|
| 171 |
+
2023-03-29,43298.7891499508,2497.5161860710273,4049.514593272919,26.175818792016596,1,cryptocurrency,0.08
|
| 172 |
+
2023-04-05,42929.03364549397,2578.898613015895,4083.278339279021,22.294781775061505,2,cryptocurrency,0.08
|
| 173 |
+
2023-04-12,43219.12360159469,2657.7165652038416,4224.771098936856,18.17790564666737,2,cryptocurrency,0.08
|
| 174 |
+
2023-04-19,45823.739072909324,2948.91482249732,4115.990069940061,20.726839138228016,2,cryptocurrency,0.08
|
| 175 |
+
2023-04-26,47291.076756569324,2973.5594561943585,4074.969940004037,22.287276380637685,2,cryptocurrency,0.08
|
| 176 |
+
2023-05-03,46896.0097080977,3116.687578778165,4165.0905446422175,22.03321057941163,2,cryptocurrency,0.08
|
| 177 |
+
2023-05-10,48790.701445953666,2815.123809218767,3922.902131684889,14.497182648706442,2,cryptocurrency,0.08
|
| 178 |
+
2023-05-17,45462.21349900388,2846.7056910187803,4123.3718529541475,19.231753316301297,2,cryptocurrency,0.08
|
| 179 |
+
2023-05-24,50685.17023032277,3068.988376133018,4171.521239353436,17.08240691238004,2,cryptocurrency,0.08
|
| 180 |
+
2023-05-31,43853.69643369284,2919.314645270384,3991.6164281716315,22.67218681116937,2,cryptocurrency,0.08
|
| 181 |
+
2023-06-07,55237.84627972959,3180.3248074799244,4210.357652181416,17.2462297553927,2,cryptocurrency,0.08
|
| 182 |
+
2023-06-14,46320.02379424796,2662.5136205237877,4146.841528461757,24.054582850891407,2,cryptocurrency,0.08
|
| 183 |
+
2023-06-21,40775.6680259438,2373.218174973175,4088.49038615819,18.543743231086527,2,cryptocurrency,0.08
|
| 184 |
+
2023-06-28,39723.70622523013,2130.3153440972924,4178.8735367213485,21.663341312622588,2,cryptocurrency,0.08
|
| 185 |
+
2023-07-05,44575.24874179844,2604.2079419018974,4321.940381696047,19.527145227537545,3,cryptocurrency,0.08
|
| 186 |
+
2023-07-12,41755.91445933306,2410.260032122617,4148.855256568576,22.98612412523035,3,cryptocurrency,0.08
|
| 187 |
+
2023-07-19,44626.34235425178,2697.715115064825,4158.1242187900325,21.912677867567428,3,cryptocurrency,0.08
|
| 188 |
+
2023-07-26,43426.35599298282,2650.1050095794535,4103.4043421575825,23.04692211734117,3,cryptocurrency,0.08
|
| 189 |
+
2023-08-02,41266.95805482842,2708.289073266548,4075.1673353907113,22.397628339091057,3,cryptocurrency,0.08
|
| 190 |
+
2023-08-09,38538.36476193887,2422.1852273797003,4218.248253586754,21.157660869764115,3,cryptocurrency,0.08
|
| 191 |
+
2023-08-16,36331.53941007187,2117.012970899773,4082.0367441205412,16.196477482362862,3,cryptocurrency,0.08
|
| 192 |
+
2023-08-23,39398.21708859542,2257.70521654364,4162.2408275738035,19.66816774911505,3,cryptocurrency,0.08
|
| 193 |
+
2023-08-30,43585.36909547549,2679.443581694284,4120.496388112377,18.603043879245966,3,cryptocurrency,0.08
|
| 194 |
+
2023-09-06,41322.04655757604,2315.6585793124673,4203.89260474396,21.881243518422067,3,cryptocurrency,0.08
|
| 195 |
+
2023-09-13,36746.09966887536,2406.66287918108,4195.485536436112,18.517035951030163,3,cryptocurrency,0.08
|
| 196 |
+
2023-09-20,41197.76612597018,2617.6368623173958,4258.815859191232,14.469791457280458,3,cryptocurrency,0.08
|
| 197 |
+
2023-09-27,41998.79442643807,2460.8132302983777,4132.75622026881,22.774320978060665,3,cryptocurrency,0.08
|
| 198 |
+
2023-10-04,38086.1658989793,2089.429776292083,4156.410752429306,21.837130484576388,4,cryptocurrency,0.08
|
| 199 |
+
2023-10-11,41597.892330332936,2664.828251568611,4101.60618302818,20.28053440380154,4,cryptocurrency,0.08
|
| 200 |
+
2023-10-18,41554.91571380742,2479.01586198241,4149.436660956926,20.105628691371656,4,cryptocurrency,0.08
|
| 201 |
+
2023-10-25,38049.67711549035,2424.2761599260525,4222.000122143037,21.851732902927736,4,cryptocurrency,0.08
|
data/sample_data_definition.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"btc_price": "target",
|
| 3 |
+
"eth_price": "dynamic_numerical",
|
| 4 |
+
"sp500_price": "dynamic_numerical",
|
| 5 |
+
"vix_index": "dynamic_numerical",
|
| 6 |
+
"quarter": "dynamic_categorical",
|
| 7 |
+
"asset_category": "static_categorical",
|
| 8 |
+
"base_volatility": "static_numerical"
|
| 9 |
+
}
|
docker-compose.yml
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Sapheneia TimesFM - Docker Compose for Local Testing
|
| 2 |
+
# This file allows you to test the Docker setup locally before deploying to HF Spaces
|
| 3 |
+
#
|
| 4 |
+
# Usage:
|
| 5 |
+
# docker-compose up --build # Build and start the container
|
| 6 |
+
# docker-compose down # Stop and remove the container
|
| 7 |
+
# docker-compose logs -f # View logs
|
| 8 |
+
|
| 9 |
+
version: '3.8'
|
| 10 |
+
|
| 11 |
+
services:
|
| 12 |
+
sapheneia:
|
| 13 |
+
build:
|
| 14 |
+
context: .
|
| 15 |
+
dockerfile: Dockerfile
|
| 16 |
+
container_name: sapheneia-timesfm
|
| 17 |
+
ports:
|
| 18 |
+
- "7860:7860" # Map host port 7860 to container port 7860
|
| 19 |
+
environment:
|
| 20 |
+
- FLASK_ENV=development
|
| 21 |
+
- SECRET_KEY=dev-secret-key-change-in-production
|
| 22 |
+
- HF_HOME=/app/.cache
|
| 23 |
+
- TRANSFORMERS_CACHE=/app/.cache
|
| 24 |
+
- HF_HUB_CACHE=/app/.cache
|
| 25 |
+
volumes:
|
| 26 |
+
# Mount volumes for development (optional - comment out for production-like testing)
|
| 27 |
+
- ./webapp/uploads:/app/webapp/uploads
|
| 28 |
+
- ./webapp/results:/app/webapp/results
|
| 29 |
+
- ./logs:/app/logs
|
| 30 |
+
# Persistent model cache for faster restarts
|
| 31 |
+
- model-cache:/app/.cache
|
| 32 |
+
restart: unless-stopped
|
| 33 |
+
healthcheck:
|
| 34 |
+
test: ["CMD", "curl", "-f", "http://localhost:7860/"]
|
| 35 |
+
interval: 30s
|
| 36 |
+
timeout: 10s
|
| 37 |
+
retries: 3
|
| 38 |
+
start_period: 60s
|
| 39 |
+
|
| 40 |
+
volumes:
|
| 41 |
+
model-cache:
|
| 42 |
+
driver: local
|
pyproject.toml
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "sapheneia"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "Add your description here"
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.11"
|
| 7 |
+
dependencies = [
|
| 8 |
+
"jax>=0.7.0",
|
| 9 |
+
"jaxlib>=0.7.0",
|
| 10 |
+
"jupyter>=1.1.1",
|
| 11 |
+
"matplotlib>=3.10.5",
|
| 12 |
+
"seaborn>=0.13.2",
|
| 13 |
+
"timesfm>=1.3.0",
|
| 14 |
+
]
|
src/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Sapheneia TimesFM Library
|
| 3 |
+
|
| 4 |
+
A comprehensive TimesFM (Google's Time Series Foundation Model) implementation
|
| 5 |
+
for financial forecasting and time series analysis with covariates support.
|
| 6 |
+
|
| 7 |
+
This package provides:
|
| 8 |
+
- Model initialization and configuration
|
| 9 |
+
- Data processing and validation
|
| 10 |
+
- Forecasting with optional covariates
|
| 11 |
+
- Professional visualization
|
| 12 |
+
- Quantile forecasting capabilities
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
__version__ = "1.0.0"
|
| 16 |
+
__author__ = "Sapheneia Research Team"
|
| 17 |
+
|
| 18 |
+
from .model import TimesFMModel
|
| 19 |
+
from .data import DataProcessor
|
| 20 |
+
from .forecast import Forecaster
|
| 21 |
+
from .visualization import Visualizer
|
| 22 |
+
|
| 23 |
+
__all__ = [
|
| 24 |
+
"TimesFMModel",
|
| 25 |
+
"DataProcessor",
|
| 26 |
+
"Forecaster",
|
| 27 |
+
"Visualizer"
|
| 28 |
+
]
|
src/data.py
ADDED
|
@@ -0,0 +1,605 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Data Processing and Validation Module
|
| 3 |
+
|
| 4 |
+
This module provides comprehensive data processing capabilities for TimesFM,
|
| 5 |
+
including CSV loading, covariate preparation, and data validation.
|
| 6 |
+
|
| 7 |
+
Key Features:
|
| 8 |
+
- CSV data loading with flexible column configuration
|
| 9 |
+
- Automatic data type inference and conversion
|
| 10 |
+
- Covariates data preparation and validation
|
| 11 |
+
- Data structure formatting for TimesFM input requirements
|
| 12 |
+
- Support for dynamic and static, numerical and categorical covariates
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import pandas as pd
|
| 16 |
+
import numpy as np
|
| 17 |
+
import json
|
| 18 |
+
import logging
|
| 19 |
+
from typing import Dict, List, Union, Optional, Tuple, Any
|
| 20 |
+
from datetime import datetime, timedelta
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class DataProcessor:
|
| 26 |
+
"""
|
| 27 |
+
Handles data loading, processing, and validation for TimesFM forecasting.
|
| 28 |
+
|
| 29 |
+
This class provides methods to load CSV data, process covariates according
|
| 30 |
+
to TimesFM requirements, and validate data structures before forecasting.
|
| 31 |
+
|
| 32 |
+
Example:
|
| 33 |
+
>>> processor = DataProcessor()
|
| 34 |
+
>>> data = processor.load_csv_data("data.csv", data_definition)
|
| 35 |
+
>>> inputs, covariates = processor.prepare_forecast_data(
|
| 36 |
+
... data, context_len=100, horizon_len=24
|
| 37 |
+
... )
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def __init__(self):
|
| 41 |
+
"""Initialize the DataProcessor."""
|
| 42 |
+
self.data = None
|
| 43 |
+
self.data_definition = None
|
| 44 |
+
self.processed_data = None
|
| 45 |
+
|
| 46 |
+
def load_csv_data(
|
| 47 |
+
self,
|
| 48 |
+
csv_file_path: str,
|
| 49 |
+
data_definition: Union[str, Dict[str, str]]
|
| 50 |
+
) -> pd.DataFrame:
|
| 51 |
+
"""
|
| 52 |
+
Load CSV data with proper column type conversion based on data definition.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
csv_file_path: Path to the CSV file
|
| 56 |
+
data_definition: Either JSON file path or dictionary defining column types
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
Loaded and processed DataFrame
|
| 60 |
+
|
| 61 |
+
Raises:
|
| 62 |
+
FileNotFoundError: If CSV or JSON file not found
|
| 63 |
+
ValueError: If data definition is invalid
|
| 64 |
+
"""
|
| 65 |
+
logger.info(f"Loading CSV data from: {csv_file_path}")
|
| 66 |
+
|
| 67 |
+
# Load data definition
|
| 68 |
+
if isinstance(data_definition, str):
|
| 69 |
+
with open(data_definition, 'r') as f:
|
| 70 |
+
self.data_definition = json.load(f)
|
| 71 |
+
else:
|
| 72 |
+
self.data_definition = data_definition.copy()
|
| 73 |
+
|
| 74 |
+
logger.info(f"Data definition: {self.data_definition}")
|
| 75 |
+
|
| 76 |
+
# Load CSV
|
| 77 |
+
try:
|
| 78 |
+
self.data = pd.read_csv(csv_file_path).dropna(axis=0)
|
| 79 |
+
logger.info(f"Loaded CSV with shape: {self.data.shape}")
|
| 80 |
+
logger.info(f"Columns: {list(self.data.columns)}")
|
| 81 |
+
except FileNotFoundError:
|
| 82 |
+
raise FileNotFoundError(f"CSV file not found: {csv_file_path}")
|
| 83 |
+
|
| 84 |
+
# Validate that 'date' column exists
|
| 85 |
+
if 'date' not in self.data.columns:
|
| 86 |
+
raise ValueError("CSV file must contain a 'date' column as the first column")
|
| 87 |
+
|
| 88 |
+
# Convert date column
|
| 89 |
+
self.data['date'] = pd.to_datetime(self.data['date'])
|
| 90 |
+
logger.info(f"Date range: {self.data['date'].min()} to {self.data['date'].max()}")
|
| 91 |
+
|
| 92 |
+
# Apply data type conversions based on definition
|
| 93 |
+
self._apply_data_types()
|
| 94 |
+
|
| 95 |
+
# Validate data definition
|
| 96 |
+
self._validate_data_definition()
|
| 97 |
+
|
| 98 |
+
logger.info("✅ CSV data loaded and processed successfully")
|
| 99 |
+
return self.data.copy()
|
| 100 |
+
|
| 101 |
+
def _apply_data_types(self) -> None:
|
| 102 |
+
"""Apply appropriate data types based on the data definition."""
|
| 103 |
+
logger.info("Applying data type conversions...")
|
| 104 |
+
|
| 105 |
+
for column, data_type in self.data_definition.items():
|
| 106 |
+
if column not in self.data.columns:
|
| 107 |
+
logger.warning(f"Column '{column}' in data definition not found in CSV")
|
| 108 |
+
continue
|
| 109 |
+
|
| 110 |
+
try:
|
| 111 |
+
if data_type in ['target', 'dynamic_numerical', 'static_numerical']:
|
| 112 |
+
# Convert to float
|
| 113 |
+
self.data[column] = pd.to_numeric(self.data[column], errors='coerce')
|
| 114 |
+
logger.info(f"Converted '{column}' to numerical (float)")
|
| 115 |
+
|
| 116 |
+
elif data_type in ['dynamic_categorical', 'static_categorical']:
|
| 117 |
+
# Convert to string
|
| 118 |
+
self.data[column] = self.data[column].astype(str)
|
| 119 |
+
logger.info(f"Converted '{column}' to categorical (string)")
|
| 120 |
+
|
| 121 |
+
else:
|
| 122 |
+
logger.warning(f"Unknown data type '{data_type}' for column '{column}'")
|
| 123 |
+
|
| 124 |
+
except Exception as e:
|
| 125 |
+
logger.error(f"Failed to convert column '{column}': {str(e)}")
|
| 126 |
+
raise
|
| 127 |
+
|
| 128 |
+
def _validate_data_definition(self) -> None:
|
| 129 |
+
"""Validate the data definition against the loaded data."""
|
| 130 |
+
logger.info("Validating data definition...")
|
| 131 |
+
|
| 132 |
+
# Check for required data types
|
| 133 |
+
target_columns = [col for col, dtype in self.data_definition.items() if dtype == 'target']
|
| 134 |
+
if not target_columns:
|
| 135 |
+
raise ValueError("Data definition must contain at least one 'target' column")
|
| 136 |
+
|
| 137 |
+
if len(target_columns) > 1:
|
| 138 |
+
logger.warning(f"Multiple target columns found: {target_columns}. Using first one for univariate forecasting.")
|
| 139 |
+
|
| 140 |
+
# Validate column existence
|
| 141 |
+
missing_columns = set(self.data_definition.keys()) - set(self.data.columns)
|
| 142 |
+
if missing_columns:
|
| 143 |
+
raise ValueError(f"Columns defined in data_definition but missing from CSV: {missing_columns}")
|
| 144 |
+
|
| 145 |
+
# Check for data quality issues
|
| 146 |
+
for column in target_columns:
|
| 147 |
+
if self.data[column].isnull().any():
|
| 148 |
+
null_count = self.data[column].isnull().sum()
|
| 149 |
+
logger.warning(f"Target column '{column}' has {null_count} null values")
|
| 150 |
+
|
| 151 |
+
logger.info("✅ Data definition validation passed")
|
| 152 |
+
|
| 153 |
+
def prepare_forecast_data(
|
| 154 |
+
self,
|
| 155 |
+
data: pd.DataFrame,
|
| 156 |
+
context_len: int,
|
| 157 |
+
horizon_len: int,
|
| 158 |
+
target_column: Optional[str] = None
|
| 159 |
+
) -> Tuple[List[float], Dict[str, Any]]:
|
| 160 |
+
"""
|
| 161 |
+
Prepare data for TimesFM forecasting with covariates.
|
| 162 |
+
|
| 163 |
+
Args:
|
| 164 |
+
data: Input DataFrame
|
| 165 |
+
context_len: Length of context window for forecasting
|
| 166 |
+
horizon_len: Length of forecast horizon
|
| 167 |
+
target_column: Target column name (auto-detected if None)
|
| 168 |
+
|
| 169 |
+
Returns:
|
| 170 |
+
Tuple of (target_inputs, covariates_dict)
|
| 171 |
+
|
| 172 |
+
Raises:
|
| 173 |
+
ValueError: If insufficient data or invalid configuration
|
| 174 |
+
"""
|
| 175 |
+
logger.info(f"Preparing forecast data (context: {context_len}, horizon: {horizon_len})")
|
| 176 |
+
|
| 177 |
+
# Auto-detect target column if not specified
|
| 178 |
+
if target_column is None:
|
| 179 |
+
target_columns = [col for col, dtype in self.data_definition.items() if dtype == 'target']
|
| 180 |
+
if not target_columns:
|
| 181 |
+
raise ValueError("No target column found in data definition")
|
| 182 |
+
target_column = target_columns[0]
|
| 183 |
+
logger.info(f"Using target column: {target_column}")
|
| 184 |
+
|
| 185 |
+
# Validate data length - only need context_len for the data
|
| 186 |
+
if len(data) < context_len:
|
| 187 |
+
raise ValueError(f"Insufficient data: need {context_len} points, have {len(data)}")
|
| 188 |
+
|
| 189 |
+
# Prepare target inputs using the most recent context window
|
| 190 |
+
target_series = data[target_column].values
|
| 191 |
+
context_start = max(0, len(data) - context_len)
|
| 192 |
+
context_end = len(data) # Use last context_len periods
|
| 193 |
+
target_inputs = target_series[context_start:context_end].tolist()
|
| 194 |
+
|
| 195 |
+
logger.info(f"Target data preparation:")
|
| 196 |
+
logger.info(f" - Target column: {target_column}")
|
| 197 |
+
logger.info(f" - Context start index: {context_start}")
|
| 198 |
+
logger.info(f" - Context end index: {context_end}")
|
| 199 |
+
logger.info(f" - Target inputs length: {len(target_inputs)}")
|
| 200 |
+
logger.info(f" - Target range: {min(target_inputs):.2f} - {max(target_inputs):.2f}")
|
| 201 |
+
|
| 202 |
+
# Prepare covariates
|
| 203 |
+
covariates = self._prepare_covariates(data, context_len, horizon_len)
|
| 204 |
+
|
| 205 |
+
logger.info(f"✅ Prepared forecast data:")
|
| 206 |
+
logger.info(f" Target inputs length: {len(target_inputs)}")
|
| 207 |
+
logger.info(f" Target range: {min(target_inputs):.2f} - {max(target_inputs):.2f}")
|
| 208 |
+
logger.info(f" Covariates: {list(covariates.keys())}")
|
| 209 |
+
|
| 210 |
+
return target_inputs, covariates
|
| 211 |
+
|
| 212 |
+
def _prepare_covariates(
|
| 213 |
+
self,
|
| 214 |
+
data: pd.DataFrame,
|
| 215 |
+
context_len: int,
|
| 216 |
+
horizon_len: int
|
| 217 |
+
) -> Dict[str, Dict[str, List]]:
|
| 218 |
+
"""
|
| 219 |
+
Prepare covariates data structure for TimesFM.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
data: Input DataFrame
|
| 223 |
+
context_len: Context window length
|
| 224 |
+
horizon_len: Forecast horizon length
|
| 225 |
+
|
| 226 |
+
Returns:
|
| 227 |
+
Dictionary containing organized covariates
|
| 228 |
+
"""
|
| 229 |
+
covariates = {
|
| 230 |
+
'dynamic_numerical_covariates': {},
|
| 231 |
+
'dynamic_categorical_covariates': {},
|
| 232 |
+
'static_numerical_covariates': {},
|
| 233 |
+
'static_categorical_covariates': {}
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
# For dynamic covariates, we need context_len + horizon_len total periods
|
| 237 |
+
# Context period: last context_len periods of available data
|
| 238 |
+
# Horizon period: horizon_len periods (padded with last known values)
|
| 239 |
+
total_len = context_len + horizon_len
|
| 240 |
+
|
| 241 |
+
logger.info(f"Covariate preparation debug:")
|
| 242 |
+
logger.info(f" - Data length: {len(data)}")
|
| 243 |
+
logger.info(f" - Context length: {context_len}")
|
| 244 |
+
logger.info(f" - Horizon length: {horizon_len}")
|
| 245 |
+
logger.info(f" - Total periods needed: {total_len}")
|
| 246 |
+
logger.info(f" - Data date range: {data['date'].min()} to {data['date'].max()}")
|
| 247 |
+
logger.info(f" - Context period: last {context_len} periods of data")
|
| 248 |
+
logger.info(f" - Horizon period: {horizon_len} periods (padded with last known values)")
|
| 249 |
+
|
| 250 |
+
for column, data_type in self.data_definition.items():
|
| 251 |
+
if column == 'date' or data_type == 'target':
|
| 252 |
+
continue
|
| 253 |
+
|
| 254 |
+
if data_type == 'dynamic_numerical':
|
| 255 |
+
# Dynamic numerical: need context + horizon values
|
| 256 |
+
# Context: last context_len periods of available data
|
| 257 |
+
# Horizon: horizon_len periods (padded with last known value)
|
| 258 |
+
|
| 259 |
+
if len(data) < context_len:
|
| 260 |
+
logger.warning(f"Insufficient data for dynamic covariate '{column}': need {context_len} for context, have {len(data)}")
|
| 261 |
+
continue
|
| 262 |
+
|
| 263 |
+
# Get context values (last context_len periods)
|
| 264 |
+
context_values = data[column].iloc[-context_len:].tolist()
|
| 265 |
+
|
| 266 |
+
# Get horizon values (pad with last known value)
|
| 267 |
+
last_value = context_values[-1]
|
| 268 |
+
horizon_values = [last_value] * horizon_len
|
| 269 |
+
|
| 270 |
+
# Combine context + horizon
|
| 271 |
+
values = context_values + horizon_values
|
| 272 |
+
|
| 273 |
+
covariates['dynamic_numerical_covariates'][column] = [values]
|
| 274 |
+
logger.info(f"Added dynamic numerical covariate '{column}': {len(values)} values")
|
| 275 |
+
logger.info(f" - Context period: {len(context_values)} values (last {context_len} periods)")
|
| 276 |
+
logger.info(f" - Horizon period: {len(horizon_values)} values (padded with {last_value})")
|
| 277 |
+
|
| 278 |
+
elif data_type == 'dynamic_categorical':
|
| 279 |
+
# Dynamic categorical: need context + horizon values
|
| 280 |
+
# Context: last context_len periods of available data
|
| 281 |
+
# Horizon: horizon_len periods (padded with last known value)
|
| 282 |
+
|
| 283 |
+
if len(data) < context_len:
|
| 284 |
+
logger.warning(f"Insufficient data for dynamic covariate '{column}': need {context_len} for context, have {len(data)}")
|
| 285 |
+
continue
|
| 286 |
+
|
| 287 |
+
# Get context values (last context_len periods)
|
| 288 |
+
context_values = data[column].astype(str).iloc[-context_len:].tolist()
|
| 289 |
+
|
| 290 |
+
# Get horizon values (pad with last known value)
|
| 291 |
+
last_value = context_values[-1]
|
| 292 |
+
horizon_values = [last_value] * horizon_len
|
| 293 |
+
|
| 294 |
+
# Combine context + horizon
|
| 295 |
+
values = context_values + horizon_values
|
| 296 |
+
|
| 297 |
+
covariates['dynamic_categorical_covariates'][column] = [values]
|
| 298 |
+
logger.info(f"Added dynamic categorical covariate '{column}': {len(values)} values")
|
| 299 |
+
logger.info(f" - Context period: {len(context_values)} values (last {context_len} periods)")
|
| 300 |
+
logger.info(f" - Horizon period: {len(horizon_values)} values (padded with '{last_value}')")
|
| 301 |
+
|
| 302 |
+
elif data_type == 'static_numerical':
|
| 303 |
+
# Static numerical: single value per time series
|
| 304 |
+
value = float(data[column].iloc[0])
|
| 305 |
+
covariates['static_numerical_covariates'][column] = [value]
|
| 306 |
+
logger.info(f"Added static numerical covariate '{column}': {value}")
|
| 307 |
+
|
| 308 |
+
elif data_type == 'static_categorical':
|
| 309 |
+
# Static categorical: single value per time series
|
| 310 |
+
value = str(data[column].iloc[0])
|
| 311 |
+
covariates['static_categorical_covariates'][column] = [value]
|
| 312 |
+
logger.info(f"Added static categorical covariate '{column}': {value}")
|
| 313 |
+
|
| 314 |
+
# Remove empty covariate types
|
| 315 |
+
covariates = {k: v for k, v in covariates.items() if v}
|
| 316 |
+
|
| 317 |
+
return covariates
|
| 318 |
+
|
| 319 |
+
def get_data_summary(self) -> Dict[str, Any]:
|
| 320 |
+
"""
|
| 321 |
+
Get a summary of the loaded data.
|
| 322 |
+
|
| 323 |
+
Returns:
|
| 324 |
+
Dictionary containing data summary statistics
|
| 325 |
+
"""
|
| 326 |
+
if self.data is None:
|
| 327 |
+
return {"status": "No data loaded"}
|
| 328 |
+
|
| 329 |
+
summary = {
|
| 330 |
+
"status": "loaded",
|
| 331 |
+
"shape": self.data.shape,
|
| 332 |
+
"date_range": {
|
| 333 |
+
"start": str(self.data['date'].min().date()),
|
| 334 |
+
"end": str(self.data['date'].max().date()),
|
| 335 |
+
"total_periods": len(self.data)
|
| 336 |
+
},
|
| 337 |
+
"columns": list(self.data.columns),
|
| 338 |
+
"data_definition": self.data_definition
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
# Add column-specific statistics
|
| 342 |
+
column_stats = {}
|
| 343 |
+
for column in self.data.columns:
|
| 344 |
+
if column == 'date':
|
| 345 |
+
continue
|
| 346 |
+
|
| 347 |
+
col_data = self.data[column]
|
| 348 |
+
data_type = self.data_definition.get(column, 'unknown')
|
| 349 |
+
|
| 350 |
+
if data_type in ['target', 'dynamic_numerical', 'static_numerical']:
|
| 351 |
+
column_stats[column] = {
|
| 352 |
+
"type": data_type,
|
| 353 |
+
"dtype": str(col_data.dtype),
|
| 354 |
+
"min": float(col_data.min()) if not col_data.isnull().all() else None,
|
| 355 |
+
"max": float(col_data.max()) if not col_data.isnull().all() else None,
|
| 356 |
+
"mean": float(col_data.mean()) if not col_data.isnull().all() else None,
|
| 357 |
+
"null_count": int(col_data.isnull().sum())
|
| 358 |
+
}
|
| 359 |
+
else:
|
| 360 |
+
column_stats[column] = {
|
| 361 |
+
"type": data_type,
|
| 362 |
+
"dtype": str(col_data.dtype),
|
| 363 |
+
"unique_values": int(col_data.nunique()),
|
| 364 |
+
"null_count": int(col_data.isnull().sum()),
|
| 365 |
+
"sample_values": col_data.dropna().unique()[:5].tolist()
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
summary["column_statistics"] = column_stats
|
| 369 |
+
return summary
|
| 370 |
+
|
| 371 |
+
def validate_forecast_inputs(
|
| 372 |
+
self,
|
| 373 |
+
inputs: List[float],
|
| 374 |
+
covariates: Dict[str, Any],
|
| 375 |
+
context_len: int,
|
| 376 |
+
horizon_len: int
|
| 377 |
+
) -> bool:
|
| 378 |
+
"""
|
| 379 |
+
Validate that forecast inputs are properly formatted for TimesFM.
|
| 380 |
+
|
| 381 |
+
Args:
|
| 382 |
+
inputs: Target time series inputs
|
| 383 |
+
covariates: Covariates dictionary
|
| 384 |
+
context_len: Expected context length
|
| 385 |
+
horizon_len: Expected horizon length
|
| 386 |
+
|
| 387 |
+
Returns:
|
| 388 |
+
True if validation passes
|
| 389 |
+
|
| 390 |
+
Raises:
|
| 391 |
+
ValueError: If validation fails
|
| 392 |
+
"""
|
| 393 |
+
logger.info("Validating forecast inputs...")
|
| 394 |
+
|
| 395 |
+
# Validate inputs length
|
| 396 |
+
if len(inputs) != context_len:
|
| 397 |
+
raise ValueError(f"Input length {len(inputs)} doesn't match context_len {context_len}")
|
| 398 |
+
|
| 399 |
+
# Validate inputs are numeric
|
| 400 |
+
if not all(isinstance(x, (int, float)) and not np.isnan(x) for x in inputs):
|
| 401 |
+
raise ValueError("All inputs must be numeric and non-NaN")
|
| 402 |
+
|
| 403 |
+
# Validate covariates structure
|
| 404 |
+
total_len = context_len + horizon_len
|
| 405 |
+
|
| 406 |
+
for cov_type, cov_dict in covariates.items():
|
| 407 |
+
if cov_type in ['dynamic_numerical_covariates', 'dynamic_categorical_covariates']:
|
| 408 |
+
for name, values_list in cov_dict.items():
|
| 409 |
+
if len(values_list) != 1:
|
| 410 |
+
raise ValueError(f"Dynamic covariate '{name}' must have exactly 1 time series")
|
| 411 |
+
if len(values_list[0]) != total_len:
|
| 412 |
+
raise ValueError(f"Dynamic covariate '{name}' must have {total_len} values, got {len(values_list[0])}")
|
| 413 |
+
|
| 414 |
+
elif cov_type in ['static_numerical_covariates', 'static_categorical_covariates']:
|
| 415 |
+
for name, values_list in cov_dict.items():
|
| 416 |
+
if len(values_list) != 1:
|
| 417 |
+
raise ValueError(f"Static covariate '{name}' must have exactly 1 value")
|
| 418 |
+
|
| 419 |
+
logger.info("✅ Forecast inputs validation passed")
|
| 420 |
+
return True
|
| 421 |
+
|
| 422 |
+
def create_sample_data_definition(self, output_path: str) -> None:
|
| 423 |
+
"""
|
| 424 |
+
Create a sample data definition JSON file.
|
| 425 |
+
|
| 426 |
+
Args:
|
| 427 |
+
output_path: Path where to save the sample JSON file
|
| 428 |
+
"""
|
| 429 |
+
sample_definition = {
|
| 430 |
+
"btc_price": "target",
|
| 431 |
+
"eth_price": "dynamic_numerical",
|
| 432 |
+
"vix_index": "dynamic_numerical",
|
| 433 |
+
"sp500_price": "dynamic_numerical",
|
| 434 |
+
"quarter": "dynamic_categorical",
|
| 435 |
+
"asset_category": "static_categorical",
|
| 436 |
+
"base_price": "static_numerical"
|
| 437 |
+
}
|
| 438 |
+
|
| 439 |
+
with open(output_path, 'w') as f:
|
| 440 |
+
json.dump(sample_definition, f, indent=2)
|
| 441 |
+
|
| 442 |
+
logger.info(f"Sample data definition saved to: {output_path}")
|
| 443 |
+
print(f"Sample data definition structure:")
|
| 444 |
+
print(json.dumps(sample_definition, indent=2))
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
def prepare_visualization_data(
|
| 448 |
+
processed_data: pd.DataFrame,
|
| 449 |
+
target_inputs: Union[List[float], List[List[float]]],
|
| 450 |
+
target_column: str,
|
| 451 |
+
context_len: int,
|
| 452 |
+
horizon_len: int,
|
| 453 |
+
extended_data: Optional[pd.DataFrame] = None
|
| 454 |
+
) -> Dict[str, Any]:
|
| 455 |
+
"""
|
| 456 |
+
Centralized function to prepare visualization data from processed data.
|
| 457 |
+
|
| 458 |
+
This function creates the visualization data structure used by both
|
| 459 |
+
the webapp and notebook for consistent data handling.
|
| 460 |
+
|
| 461 |
+
Args:
|
| 462 |
+
processed_data: Processed DataFrame with date column
|
| 463 |
+
target_inputs: Target input data used for forecasting (flattenable to a single series)
|
| 464 |
+
target_column: Name of the target column
|
| 465 |
+
context_len: Context length used for forecasting
|
| 466 |
+
horizon_len: Horizon length for forecasting
|
| 467 |
+
|
| 468 |
+
Returns:
|
| 469 |
+
Dictionary containing visualization data with keys:
|
| 470 |
+
- 'historical_data': Context window used for forecasting (chronologically ordered)
|
| 471 |
+
- 'dates_historical': Corresponding historical dates
|
| 472 |
+
- 'dates_future': Future dates aligned with the forecast horizon
|
| 473 |
+
- 'target_name': Name of the target column
|
| 474 |
+
- 'actual_future': Optional actual values for the forecast horizon (if available)
|
| 475 |
+
"""
|
| 476 |
+
|
| 477 |
+
if processed_data.empty:
|
| 478 |
+
return {
|
| 479 |
+
'historical_data': [],
|
| 480 |
+
'dates_historical': [],
|
| 481 |
+
'dates_future': [],
|
| 482 |
+
'target_name': target_column,
|
| 483 |
+
'actual_future': []
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
# Work on a chronologically sorted copy to ensure alignment
|
| 487 |
+
df = processed_data.dropna(axis=0).sort_values('date').reset_index(drop=True)
|
| 488 |
+
|
| 489 |
+
# Flatten target inputs (they may arrive as List[List[float]] or List[float])
|
| 490 |
+
if isinstance(target_inputs, (list, tuple)) and target_inputs:
|
| 491 |
+
if isinstance(target_inputs[0], (list, tuple, np.ndarray)):
|
| 492 |
+
target_inputs_flat = list(target_inputs[0])
|
| 493 |
+
else:
|
| 494 |
+
target_inputs_flat = list(target_inputs)
|
| 495 |
+
else:
|
| 496 |
+
target_inputs_flat = []
|
| 497 |
+
|
| 498 |
+
# Respect the actual context length used
|
| 499 |
+
context_len_effective = len(target_inputs_flat) or context_len
|
| 500 |
+
available_len = len(df)
|
| 501 |
+
|
| 502 |
+
# Use target_inputs as historical data to ensure exact alignment with forecasting
|
| 503 |
+
# This guarantees that the historical data in visualization matches what was used for forecasting
|
| 504 |
+
if target_inputs_flat:
|
| 505 |
+
historical_slice = list(map(float, target_inputs_flat))
|
| 506 |
+
|
| 507 |
+
# For dates, we need to find the corresponding dates for the target_inputs
|
| 508 |
+
# Since target_inputs represents the last context_len periods used for forecasting,
|
| 509 |
+
# we need to find the dates that correspond to those exact data points
|
| 510 |
+
if len(df) >= context_len_effective:
|
| 511 |
+
# Get the dates for the last context_len periods (same as target_inputs)
|
| 512 |
+
dates_historical = df['date'].iloc[-context_len_effective:].tolist()
|
| 513 |
+
else:
|
| 514 |
+
# If we don't have enough data, use what we have
|
| 515 |
+
dates_historical = df['date'].tolist()
|
| 516 |
+
|
| 517 |
+
logger.info(f"Using target_inputs for historical data to ensure forecasting alignment")
|
| 518 |
+
else:
|
| 519 |
+
# Fallback to data-based extraction if target_inputs not available
|
| 520 |
+
if len(df) >= context_len_effective:
|
| 521 |
+
historical_slice = df[target_column].iloc[-context_len_effective:].astype(float).tolist()
|
| 522 |
+
dates_historical = df['date'].iloc[-context_len_effective:].tolist()
|
| 523 |
+
else:
|
| 524 |
+
historical_slice = df[target_column].astype(float).tolist()
|
| 525 |
+
dates_historical = df['date'].tolist()
|
| 526 |
+
|
| 527 |
+
logger.info(f"Using data-based extraction for historical data")
|
| 528 |
+
|
| 529 |
+
logger.info(f"Visualization data preparation:")
|
| 530 |
+
logger.info(f" - Processed data shape: {df.shape}")
|
| 531 |
+
logger.info(f" - Target column: {target_column}")
|
| 532 |
+
logger.info(f" - Context length effective: {context_len_effective}")
|
| 533 |
+
logger.info(f" - Historical slice length: {len(historical_slice)}")
|
| 534 |
+
logger.info(f" - Target inputs flat length: {len(target_inputs_flat)}")
|
| 535 |
+
logger.info(f" - Dates historical length: {len(dates_historical)}")
|
| 536 |
+
logger.info(f" - Historical data range: {min(historical_slice) if historical_slice else 'N/A'} to {max(historical_slice) if historical_slice else 'N/A'}")
|
| 537 |
+
if dates_historical:
|
| 538 |
+
logger.info(f" - First historical date: {dates_historical[0]}")
|
| 539 |
+
logger.info(f" - Last historical date: {dates_historical[-1]}")
|
| 540 |
+
|
| 541 |
+
# For future dates, we need to generate them since we only have context data
|
| 542 |
+
# Extract actual future values when present (useful for overlaying actuals)
|
| 543 |
+
# The actual future values should start from the day after the last historical date
|
| 544 |
+
# Use extended_data if available (includes horizon period), otherwise use df
|
| 545 |
+
data_for_future_extraction = extended_data if extended_data is not None else df
|
| 546 |
+
|
| 547 |
+
if len(data_for_future_extraction) > context_len_effective and dates_historical:
|
| 548 |
+
# Find the last historical date (this is the context end date)
|
| 549 |
+
last_historical_date = dates_historical[-1]
|
| 550 |
+
|
| 551 |
+
# Find data points that come after the last historical date
|
| 552 |
+
future_mask = data_for_future_extraction['date'] > last_historical_date
|
| 553 |
+
future_data = data_for_future_extraction[future_mask]
|
| 554 |
+
|
| 555 |
+
if len(future_data) > 0:
|
| 556 |
+
# Take only the first horizon_len periods of future data
|
| 557 |
+
future_slice = future_data[target_column].iloc[:horizon_len].astype(float).tolist()
|
| 558 |
+
dates_future = future_data['date'].iloc[:horizon_len].tolist()
|
| 559 |
+
|
| 560 |
+
logger.info(f"Actual future values extracted:")
|
| 561 |
+
logger.info(f" - Data for extraction length: {len(data_for_future_extraction)}")
|
| 562 |
+
logger.info(f" - Context length effective: {context_len_effective}")
|
| 563 |
+
logger.info(f" - Last historical date (context end): {last_historical_date}")
|
| 564 |
+
logger.info(f" - Future data available: {len(future_data)} periods")
|
| 565 |
+
logger.info(f" - Future slice length: {len(future_slice)}")
|
| 566 |
+
logger.info(f" - Future dates length: {len(dates_future)}")
|
| 567 |
+
if future_slice and dates_future:
|
| 568 |
+
logger.info(f" - Future values range: {min(future_slice):.4f} to {max(future_slice):.4f}")
|
| 569 |
+
logger.info(f" - First future date: {dates_future[0]}")
|
| 570 |
+
logger.info(f" - Last future date: {dates_future[-1]}")
|
| 571 |
+
else:
|
| 572 |
+
future_slice = []
|
| 573 |
+
dates_future = []
|
| 574 |
+
logger.info("No actual future values available - no data after last historical date")
|
| 575 |
+
else:
|
| 576 |
+
# No actual future values available
|
| 577 |
+
future_slice = []
|
| 578 |
+
dates_future = []
|
| 579 |
+
logger.info("No actual future values available - data doesn't extend beyond context period")
|
| 580 |
+
|
| 581 |
+
if len(dates_future) < horizon_len:
|
| 582 |
+
# Generate future dates if the dataset stops at the forecast boundary
|
| 583 |
+
inferred_delta: Optional[pd.Timedelta] = None
|
| 584 |
+
if len(dates_historical) >= 2:
|
| 585 |
+
inferred_delta = dates_historical[-1] - dates_historical[-2]
|
| 586 |
+
last_date = dates_historical[-1]
|
| 587 |
+
if hasattr(last_date, 'to_pydatetime'):
|
| 588 |
+
last_date = last_date.to_pydatetime()
|
| 589 |
+
elif isinstance(last_date, np.datetime64):
|
| 590 |
+
last_date = pd.to_datetime(last_date).to_pydatetime()
|
| 591 |
+
|
| 592 |
+
step = inferred_delta if isinstance(inferred_delta, pd.Timedelta) and inferred_delta != pd.Timedelta(0) else timedelta(days=1)
|
| 593 |
+
dates_future = [last_date + step * (i + 1) for i in range(horizon_len)]
|
| 594 |
+
future_slice = [] # No actual future data in this case
|
| 595 |
+
|
| 596 |
+
visualization_data = {
|
| 597 |
+
'historical_data': historical_slice,
|
| 598 |
+
'dates_historical': [d.isoformat() if hasattr(d, 'isoformat') else str(d) for d in dates_historical],
|
| 599 |
+
'dates_future': [d.isoformat() if hasattr(d, 'isoformat') else str(d) for d in dates_future],
|
| 600 |
+
'target_name': target_column,
|
| 601 |
+
'actual_future': future_slice
|
| 602 |
+
}
|
| 603 |
+
|
| 604 |
+
return visualization_data
|
| 605 |
+
|
src/forecast.py
ADDED
|
@@ -0,0 +1,476 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
TimesFM Forecasting Module
|
| 3 |
+
|
| 4 |
+
This module provides a simplified and robust interface for TimesFM forecasting,
|
| 5 |
+
handling both basic and covariates-enhanced forecasting with consistent quantile output.
|
| 6 |
+
|
| 7 |
+
Key Features:
|
| 8 |
+
- Single forecast method with optional covariates
|
| 9 |
+
- Always returns quantiles (never "maybe")
|
| 10 |
+
- Simplified logic: IF covariates -> use covariates, ELSE -> use basic
|
| 11 |
+
- Consistent return format: (point_forecast, quantile_forecast)
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
import pandas as pd
|
| 16 |
+
import logging
|
| 17 |
+
from typing import List, Dict, Optional, Tuple, Any, Union
|
| 18 |
+
import timesfm
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class Forecaster:
|
| 24 |
+
"""
|
| 25 |
+
Simplified TimesFM Forecaster with consistent quantile output.
|
| 26 |
+
|
| 27 |
+
This class provides a single forecast method that handles both basic and
|
| 28 |
+
covariates-enhanced forecasting, always returning quantiles.
|
| 29 |
+
|
| 30 |
+
Example:
|
| 31 |
+
>>> forecaster = Forecaster(model)
|
| 32 |
+
>>> point_forecast, quantile_forecast = forecaster.forecast(
|
| 33 |
+
... inputs=[1,2,3,4,5],
|
| 34 |
+
... use_covariates=True,
|
| 35 |
+
... dynamic_numerical_covariates={'feature1': [[1,2,3,4,5]]}
|
| 36 |
+
... )
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __init__(self, model: timesfm.TimesFm):
|
| 40 |
+
"""
|
| 41 |
+
Initialize the Forecaster with a loaded TimesFM model.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
model: Initialized TimesFM model instance
|
| 45 |
+
"""
|
| 46 |
+
self.model = model
|
| 47 |
+
self.capabilities = self._detect_capabilities()
|
| 48 |
+
logger.info(f"Forecaster initialized with capabilities: {list(self.capabilities.keys())}")
|
| 49 |
+
|
| 50 |
+
def _detect_capabilities(self) -> Dict[str, bool]:
|
| 51 |
+
"""Detect available forecasting capabilities of the model."""
|
| 52 |
+
return {
|
| 53 |
+
'basic_forecasting': True,
|
| 54 |
+
'quantile_forecasting': hasattr(self.model, 'experimental_quantile_forecast'),
|
| 55 |
+
'covariates_support': hasattr(self.model, 'forecast_with_covariates')
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
def forecast(
|
| 59 |
+
self,
|
| 60 |
+
inputs: Union[List[float], List[List[float]]],
|
| 61 |
+
freq: Union[int, List[int]] = 0,
|
| 62 |
+
dynamic_numerical_covariates: Optional[Dict[str, List[List[float]]]] = None,
|
| 63 |
+
dynamic_categorical_covariates: Optional[Dict[str, List[List[str]]]] = None,
|
| 64 |
+
static_numerical_covariates: Optional[Dict[str, List[float]]] = None,
|
| 65 |
+
static_categorical_covariates: Optional[Dict[str, List[str]]] = None,
|
| 66 |
+
use_covariates: bool = False,
|
| 67 |
+
xreg_mode: str = "xreg + timesfm",
|
| 68 |
+
ridge: float = 0.0,
|
| 69 |
+
normalize_xreg_target_per_input: bool = True
|
| 70 |
+
) -> Tuple[np.ndarray, np.ndarray]:
|
| 71 |
+
"""
|
| 72 |
+
Perform TimesFM forecasting with optional covariates support.
|
| 73 |
+
|
| 74 |
+
This is the main forecasting method that handles both basic and covariates-enhanced
|
| 75 |
+
forecasting. Quantiles are always returned regardless of covariates usage.
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
inputs: Input time series data
|
| 79 |
+
freq: Frequency indicator(s)
|
| 80 |
+
dynamic_numerical_covariates: Dynamic numerical covariates (if use_covariates=True)
|
| 81 |
+
dynamic_categorical_covariates: Dynamic categorical covariates (if use_covariates=True)
|
| 82 |
+
static_numerical_covariates: Static numerical covariates (if use_covariates=True)
|
| 83 |
+
static_categorical_covariates: Static categorical covariates (if use_covariates=True)
|
| 84 |
+
use_covariates: Whether to use covariates-enhanced forecasting
|
| 85 |
+
xreg_mode: Covariate integration mode ("xreg + timesfm" or "timesfm + xreg")
|
| 86 |
+
ridge: Ridge regression parameter for covariates
|
| 87 |
+
normalize_xreg_target_per_input: Whether to normalize covariates
|
| 88 |
+
|
| 89 |
+
Returns:
|
| 90 |
+
Tuple of (point_forecast, quantile_forecast) - both are always returned
|
| 91 |
+
|
| 92 |
+
Raises:
|
| 93 |
+
ValueError: If covariates are requested but not supported
|
| 94 |
+
Exception: If forecasting fails
|
| 95 |
+
"""
|
| 96 |
+
logger.info(f"Performing TimesFM forecasting (covariates={use_covariates})...")
|
| 97 |
+
|
| 98 |
+
# Normalize inputs format
|
| 99 |
+
if isinstance(inputs[0], (int, float)):
|
| 100 |
+
# inputs is a single list of numbers
|
| 101 |
+
inputs_norm = [inputs]
|
| 102 |
+
else:
|
| 103 |
+
# inputs is already a list of lists
|
| 104 |
+
inputs_norm = inputs
|
| 105 |
+
|
| 106 |
+
if isinstance(freq, int):
|
| 107 |
+
freq_norm = [freq] * len(inputs_norm)
|
| 108 |
+
else:
|
| 109 |
+
freq_norm = freq
|
| 110 |
+
|
| 111 |
+
try:
|
| 112 |
+
if use_covariates and any([
|
| 113 |
+
dynamic_numerical_covariates, dynamic_categorical_covariates,
|
| 114 |
+
static_numerical_covariates, static_categorical_covariates
|
| 115 |
+
]):
|
| 116 |
+
# Validate covariates support
|
| 117 |
+
if not self.capabilities['covariates_support']:
|
| 118 |
+
raise ValueError("Model does not support covariates forecasting")
|
| 119 |
+
|
| 120 |
+
# Validate covariates data structure
|
| 121 |
+
self._validate_covariates(
|
| 122 |
+
inputs_norm, dynamic_numerical_covariates, dynamic_categorical_covariates,
|
| 123 |
+
static_numerical_covariates, static_categorical_covariates
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
logger.info(f"Using covariates-enhanced forecasting (mode: {xreg_mode})...")
|
| 127 |
+
logger.info(f"Inputs shape: {[len(x) for x in inputs] if isinstance(inputs[0], list) else len(inputs)}")
|
| 128 |
+
logger.info(f"Inputs type: {type(inputs)}")
|
| 129 |
+
|
| 130 |
+
# Perform covariates forecasting with original mode
|
| 131 |
+
covariates_result = self.model.forecast_with_covariates(
|
| 132 |
+
inputs=inputs_norm,
|
| 133 |
+
dynamic_numerical_covariates=dynamic_numerical_covariates or {},
|
| 134 |
+
dynamic_categorical_covariates=dynamic_categorical_covariates or {},
|
| 135 |
+
static_numerical_covariates=static_numerical_covariates or {},
|
| 136 |
+
static_categorical_covariates=static_categorical_covariates or {},
|
| 137 |
+
freq=freq_norm,
|
| 138 |
+
xreg_mode=xreg_mode,
|
| 139 |
+
ridge=ridge,
|
| 140 |
+
normalize_xreg_target_per_input=normalize_xreg_target_per_input
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
# Handle return format from forecast_with_covariates
|
| 144 |
+
if isinstance(covariates_result, tuple) and len(covariates_result) == 2:
|
| 145 |
+
point_forecast, quantile_forecast = covariates_result
|
| 146 |
+
point_forecast = np.array(point_forecast)
|
| 147 |
+
quantile_forecast = np.array(quantile_forecast)
|
| 148 |
+
|
| 149 |
+
logger.info(f"✅ Covariates forecasting completed.")
|
| 150 |
+
logger.info(f" Point forecast shape: {point_forecast.shape}")
|
| 151 |
+
logger.info(f" Quantile forecast shape: {quantile_forecast.shape}")
|
| 152 |
+
|
| 153 |
+
# Check if we have proper quantiles (multiple quantiles, not just 1)
|
| 154 |
+
if quantile_forecast.ndim == 2 and (quantile_forecast.shape[0] == 1 or quantile_forecast.shape[1] == 1):
|
| 155 |
+
logger.warning("⚠️ Covariates forecasting returned insufficient quantiles, falling back to basic forecast for quantiles")
|
| 156 |
+
# Get quantiles from basic forecast method
|
| 157 |
+
_, quantile_forecast = self.model.forecast(inputs=inputs_norm, freq=freq_norm)
|
| 158 |
+
quantile_forecast = np.array(quantile_forecast)
|
| 159 |
+
logger.info(f"✅ Basic forecast quantiles obtained. Shape: {quantile_forecast.shape}")
|
| 160 |
+
else:
|
| 161 |
+
logger.info("✅ Using quantiles from covariates forecasting")
|
| 162 |
+
else:
|
| 163 |
+
# Fallback: If forecast_with_covariates doesn't return quantiles, get them separately
|
| 164 |
+
logger.warning("⚠️ Covariates forecasting didn't return quantiles, getting them separately")
|
| 165 |
+
point_forecast = np.array(covariates_result)
|
| 166 |
+
_, quantile_forecast = self.model.forecast(inputs=inputs_norm, freq=freq_norm)
|
| 167 |
+
quantile_forecast = np.array(quantile_forecast)
|
| 168 |
+
|
| 169 |
+
else:
|
| 170 |
+
logger.info("Using basic forecasting...")
|
| 171 |
+
|
| 172 |
+
# Perform basic forecasting - this should return (point, quantiles)
|
| 173 |
+
point_forecast, quantile_forecast = self.model.forecast(inputs=inputs_norm, freq=freq_norm)
|
| 174 |
+
point_forecast = np.array(point_forecast)
|
| 175 |
+
quantile_forecast = np.array(quantile_forecast)
|
| 176 |
+
|
| 177 |
+
logger.info(f"✅ Basic forecasting completed.")
|
| 178 |
+
|
| 179 |
+
return point_forecast, quantile_forecast
|
| 180 |
+
|
| 181 |
+
except Exception as e:
|
| 182 |
+
logger.error(f"❌ Forecasting failed: {str(e)}")
|
| 183 |
+
raise
|
| 184 |
+
|
| 185 |
+
def _validate_covariates(
|
| 186 |
+
self,
|
| 187 |
+
inputs: List[List[float]],
|
| 188 |
+
dynamic_numerical: Optional[Dict],
|
| 189 |
+
dynamic_categorical: Optional[Dict],
|
| 190 |
+
static_numerical: Optional[Dict],
|
| 191 |
+
static_categorical: Optional[Dict]
|
| 192 |
+
) -> None:
|
| 193 |
+
"""Validate covariates data structure and compatibility."""
|
| 194 |
+
logger.info("Validating covariates data structure...")
|
| 195 |
+
|
| 196 |
+
# Check that all covariates have the same number of series as inputs
|
| 197 |
+
num_series = len(inputs)
|
| 198 |
+
|
| 199 |
+
for cov_type, cov_data in [
|
| 200 |
+
("dynamic_numerical", dynamic_numerical),
|
| 201 |
+
("dynamic_categorical", dynamic_categorical),
|
| 202 |
+
("static_numerical", static_numerical),
|
| 203 |
+
("static_categorical", static_categorical)
|
| 204 |
+
]:
|
| 205 |
+
if cov_data:
|
| 206 |
+
for name, data in cov_data.items():
|
| 207 |
+
if isinstance(data[0], (list, np.ndarray)):
|
| 208 |
+
# Dynamic covariates
|
| 209 |
+
if len(data) != num_series:
|
| 210 |
+
raise ValueError(f"Dynamic covariate '{name}' has {len(data)} series, expected {num_series}")
|
| 211 |
+
else:
|
| 212 |
+
# Static covariates
|
| 213 |
+
if len(data) != num_series:
|
| 214 |
+
raise ValueError(f"Static covariate '{name}' has {len(data)} values, expected {num_series}")
|
| 215 |
+
|
| 216 |
+
logger.info("✅ Covariates validation passed")
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def run_forecast(
|
| 220 |
+
forecaster: 'Forecaster',
|
| 221 |
+
target_inputs: List[List[float]],
|
| 222 |
+
covariates: Optional[Dict[str, Any]] = None,
|
| 223 |
+
use_covariates: bool = False,
|
| 224 |
+
freq: Union[int, List[int]] = 0
|
| 225 |
+
) -> Dict[str, Any]:
|
| 226 |
+
"""
|
| 227 |
+
Centralized forecasting function that handles both basic and covariates-enhanced forecasting.
|
| 228 |
+
|
| 229 |
+
This function implements the logic to decide whether to run forecast_with_covariates
|
| 230 |
+
or the basic forecast, including fallback mechanisms and proper error handling.
|
| 231 |
+
|
| 232 |
+
Args:
|
| 233 |
+
forecaster: Initialized Forecaster instance
|
| 234 |
+
target_inputs: Input time series data
|
| 235 |
+
covariates: Dictionary containing covariate data (if use_covariates=True)
|
| 236 |
+
use_covariates: Whether to use covariates-enhanced forecasting
|
| 237 |
+
freq: Frequency indicator(s)
|
| 238 |
+
|
| 239 |
+
Returns:
|
| 240 |
+
Dictionary containing forecast results with keys:
|
| 241 |
+
- 'enhanced_forecast' or 'point_forecast': Main forecast array
|
| 242 |
+
- 'quantile_forecast': Quantile forecast array (always present)
|
| 243 |
+
- 'method': String indicating the forecasting method used
|
| 244 |
+
- 'metadata': Additional forecast metadata
|
| 245 |
+
|
| 246 |
+
Raises:
|
| 247 |
+
Exception: If forecasting fails
|
| 248 |
+
"""
|
| 249 |
+
logger.info(f"🚀 Running centralized forecast (covariates={use_covariates})...")
|
| 250 |
+
|
| 251 |
+
try:
|
| 252 |
+
results = {}
|
| 253 |
+
|
| 254 |
+
if use_covariates and covariates:
|
| 255 |
+
logger.info("Using covariates-enhanced forecasting...")
|
| 256 |
+
|
| 257 |
+
# Extract covariate data
|
| 258 |
+
dynamic_numerical = covariates.get('dynamic_numerical_covariates')
|
| 259 |
+
dynamic_categorical = covariates.get('dynamic_categorical_covariates')
|
| 260 |
+
static_numerical = covariates.get('static_numerical_covariates')
|
| 261 |
+
static_categorical = covariates.get('static_categorical_covariates')
|
| 262 |
+
|
| 263 |
+
# Perform covariates forecasting
|
| 264 |
+
point_forecast, quantile_forecast = forecaster.forecast(
|
| 265 |
+
inputs=target_inputs,
|
| 266 |
+
freq=freq,
|
| 267 |
+
dynamic_numerical_covariates=dynamic_numerical,
|
| 268 |
+
dynamic_categorical_covariates=dynamic_categorical,
|
| 269 |
+
static_numerical_covariates=static_numerical,
|
| 270 |
+
static_categorical_covariates=static_categorical,
|
| 271 |
+
use_covariates=True
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
results['point_forecast'] = point_forecast
|
| 275 |
+
results['method'] = 'covariates_enhanced'
|
| 276 |
+
|
| 277 |
+
else:
|
| 278 |
+
logger.info("Using basic forecasting...")
|
| 279 |
+
|
| 280 |
+
# Perform basic forecasting
|
| 281 |
+
point_forecast, quantile_forecast = forecaster.forecast(
|
| 282 |
+
inputs=target_inputs,
|
| 283 |
+
freq=freq,
|
| 284 |
+
use_covariates=False
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
results['point_forecast'] = point_forecast
|
| 288 |
+
results['method'] = 'basic_timesfm'
|
| 289 |
+
|
| 290 |
+
# Check for NaN values before returning
|
| 291 |
+
if np.any(np.isnan(point_forecast)):
|
| 292 |
+
logger.error(f"❌ NaN values detected in point_forecast: {np.isnan(point_forecast).sum()} out of {point_forecast.size}")
|
| 293 |
+
logger.error(f"Point forecast values: {point_forecast}")
|
| 294 |
+
raise ValueError(f"Forecasting produced NaN values in point forecast. This may be due to insufficient data or model issues.")
|
| 295 |
+
|
| 296 |
+
if np.any(np.isnan(quantile_forecast)):
|
| 297 |
+
logger.error(f"❌ NaN values detected in quantile_forecast: {np.isnan(quantile_forecast).sum()} out of {quantile_forecast.size}")
|
| 298 |
+
logger.error(f"Quantile forecast shape: {quantile_forecast.shape}")
|
| 299 |
+
raise ValueError(f"Forecasting produced NaN values in quantile forecast. This may be due to insufficient data or model issues.")
|
| 300 |
+
|
| 301 |
+
# Quantiles are always available
|
| 302 |
+
results['quantile_forecast'] = quantile_forecast
|
| 303 |
+
logger.info(f"✅ Quantile forecast obtained. Shape: {quantile_forecast.shape}")
|
| 304 |
+
|
| 305 |
+
# Add metadata
|
| 306 |
+
results['metadata'] = {
|
| 307 |
+
'input_series_count': len(target_inputs),
|
| 308 |
+
'forecast_length': results.get('point_forecast').shape[-1],
|
| 309 |
+
'covariates_used': use_covariates and covariates is not None,
|
| 310 |
+
'quantiles_available': True # Always true now
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
logger.info(f"✅ Centralized forecast completed successfully!")
|
| 314 |
+
logger.info(f" Method: {results['method']}")
|
| 315 |
+
logger.info(f" Forecast shape: {results['metadata']['forecast_length']}")
|
| 316 |
+
logger.info(f" Quantiles: Yes (shape: {quantile_forecast.shape})")
|
| 317 |
+
logger.info(f" Point forecast range: {np.min(point_forecast):.2f} to {np.max(point_forecast):.2f}")
|
| 318 |
+
|
| 319 |
+
return results
|
| 320 |
+
|
| 321 |
+
except Exception as e:
|
| 322 |
+
logger.error(f"❌ Centralized forecasting failed: {str(e)}")
|
| 323 |
+
raise
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def process_quantile_bands(
|
| 327 |
+
quantile_forecast: np.ndarray,
|
| 328 |
+
selected_indices: List[int] = None
|
| 329 |
+
) -> Dict[str, Any]:
|
| 330 |
+
"""
|
| 331 |
+
Centralized function to process quantile forecasts into quantile bands for visualization.
|
| 332 |
+
|
| 333 |
+
This function contains the logic for sorting quantiles and creating the quantile band
|
| 334 |
+
dictionary, as used in both the webapp and notebook.
|
| 335 |
+
|
| 336 |
+
Args:
|
| 337 |
+
quantile_forecast: Array of quantile forecasts with shape (horizon, num_quantiles) or (num_quantiles, horizon)
|
| 338 |
+
selected_indices: List of quantile indices to use for bands (default: [1, 3, 5, 7, 9])
|
| 339 |
+
|
| 340 |
+
Returns:
|
| 341 |
+
Dictionary of quantile bands ready for visualization with keys:
|
| 342 |
+
- 'quantile_band_0_lower', 'quantile_band_0_upper', 'quantile_band_0_label'
|
| 343 |
+
- 'quantile_band_1_lower', 'quantile_band_1_upper', 'quantile_band_1_label'
|
| 344 |
+
- etc.
|
| 345 |
+
"""
|
| 346 |
+
logger.info("🔄 Processing quantile bands...")
|
| 347 |
+
logger.info(f"Input quantile_forecast type: {type(quantile_forecast)}")
|
| 348 |
+
logger.info(f"Input quantile_forecast shape: {quantile_forecast.shape if hasattr(quantile_forecast, 'shape') else 'N/A'}")
|
| 349 |
+
|
| 350 |
+
# logger.info(f"!!!!!!!!!!!!! selected_indices: {selected_indices}")
|
| 351 |
+
# logger.info(f"!!!!!!!!!!!!! quantile_forecast.shape: {quantile_forecast.shape}")
|
| 352 |
+
|
| 353 |
+
if quantile_forecast is None:
|
| 354 |
+
logger.warning("No quantile forecast provided")
|
| 355 |
+
return {}
|
| 356 |
+
|
| 357 |
+
try:
|
| 358 |
+
# logger.info(f"!!!!!!!!!!!!! selected_indices: {selected_indices}")
|
| 359 |
+
# logger.info(f"!!!!!!!!!!!!! quantile_forecast.shape: {quantile_forecast.shape}")
|
| 360 |
+
|
| 361 |
+
# Handle quantile indices - only use default if explicitly None (not empty list)
|
| 362 |
+
if selected_indices is None:
|
| 363 |
+
# This means no quantile selection was made, use default
|
| 364 |
+
selected_indices = [1, 3, 5, 7, 9] # Q10, Q30, Q50, Q70, Q90
|
| 365 |
+
elif selected_indices == []:
|
| 366 |
+
# This means user explicitly selected no quantiles, return empty
|
| 367 |
+
logger.info("No quantiles selected by user - returning empty quantile bands")
|
| 368 |
+
return {}
|
| 369 |
+
|
| 370 |
+
# Handle different array dimensions
|
| 371 |
+
if quantile_forecast.ndim == 3:
|
| 372 |
+
# Shape is (1, horizon, num_quantiles) - squeeze out first dimension
|
| 373 |
+
q_mat = quantile_forecast.squeeze(0)
|
| 374 |
+
logger.info(f"3D array detected, squeezed to shape: {q_mat.shape}")
|
| 375 |
+
elif quantile_forecast.ndim == 1:
|
| 376 |
+
# Shape is (horizon,) - reshape to (1, horizon)
|
| 377 |
+
q_mat = quantile_forecast.reshape(1, -1)
|
| 378 |
+
logger.info(f"1D array detected, reshaped to: {q_mat.shape}")
|
| 379 |
+
else:
|
| 380 |
+
# Shape is 2D - determine if we need to transpose
|
| 381 |
+
# For quantiles, we expect (horizon, num_quantiles) format
|
| 382 |
+
# If we have more horizon than quantiles, it's likely (horizon, num_quantiles) and should be kept as-is
|
| 383 |
+
if quantile_forecast.shape[0] > quantile_forecast.shape[1]:
|
| 384 |
+
# Shape is (horizon, num_quantiles) - keep as is
|
| 385 |
+
q_mat = quantile_forecast
|
| 386 |
+
logger.info(f"2D array kept as is (horizon, quantiles): {q_mat.shape}")
|
| 387 |
+
else:
|
| 388 |
+
# Shape is (num_quantiles, horizon) - transpose to (horizon, num_quantiles)
|
| 389 |
+
q_mat = quantile_forecast.T
|
| 390 |
+
logger.info(f"2D array transposed from {quantile_forecast.shape} to {q_mat.shape}")
|
| 391 |
+
|
| 392 |
+
horizon_len, num_quantiles = q_mat.shape
|
| 393 |
+
logger.info(f"📊 Available quantiles: {num_quantiles} (indices 0-{num_quantiles-1})")
|
| 394 |
+
logger.info(f"📊 Note: Index 0 is legacy mean forecast, using indices 1-{num_quantiles-1} for actual quantiles")
|
| 395 |
+
|
| 396 |
+
# Check if we have enough quantiles for band creation (need at least 3 total: 0=legacy, 1=Q10, 2=Q20)
|
| 397 |
+
if num_quantiles < 3:
|
| 398 |
+
logger.warning(f"Not enough quantiles for band creation. Have {num_quantiles}, need at least 3")
|
| 399 |
+
return {}
|
| 400 |
+
|
| 401 |
+
# Filter selected indices to valid range (skip index 0)
|
| 402 |
+
valid_indices = [idx for idx in selected_indices if 1 <= idx < num_quantiles] # Skip index 0
|
| 403 |
+
if not valid_indices:
|
| 404 |
+
logger.warning("No valid quantile indices selected (after skipping legacy index 0)")
|
| 405 |
+
return {}
|
| 406 |
+
|
| 407 |
+
# logger.info(f"!!!!!!!!!!!!! valid_indices: {valid_indices}")
|
| 408 |
+
|
| 409 |
+
# Sort quantiles by their median magnitude to ensure proper ordering
|
| 410 |
+
quantile_medians = np.median(q_mat, axis=0)
|
| 411 |
+
sorted_indices = np.argsort(quantile_medians)
|
| 412 |
+
|
| 413 |
+
# Create quantile bands from selected indices
|
| 414 |
+
quantile_bands = {}
|
| 415 |
+
band_count = 0
|
| 416 |
+
|
| 417 |
+
for i in range(len(valid_indices) - 1):
|
| 418 |
+
lower_idx = valid_indices[i]
|
| 419 |
+
upper_idx = valid_indices[i + 1]
|
| 420 |
+
|
| 421 |
+
# Get the sorted indices for these quantiles
|
| 422 |
+
lower_sorted_idx = sorted_indices[lower_idx]
|
| 423 |
+
upper_sorted_idx = sorted_indices[upper_idx]
|
| 424 |
+
|
| 425 |
+
# Extract quantile values
|
| 426 |
+
lower_quantile = q_mat[:, lower_sorted_idx]
|
| 427 |
+
upper_quantile = q_mat[:, upper_sorted_idx]
|
| 428 |
+
|
| 429 |
+
# Create band labels
|
| 430 |
+
lower_pct = idx_to_percent(lower_idx, num_quantiles)
|
| 431 |
+
upper_pct = idx_to_percent(upper_idx, num_quantiles)
|
| 432 |
+
band_label = f"Q{lower_pct:02d}–Q{upper_pct:02d}"
|
| 433 |
+
|
| 434 |
+
# Store band data
|
| 435 |
+
quantile_bands[f'quantile_band_{band_count}_lower'] = lower_quantile.tolist()
|
| 436 |
+
quantile_bands[f'quantile_band_{band_count}_upper'] = upper_quantile.tolist()
|
| 437 |
+
quantile_bands[f'quantile_band_{band_count}_label'] = band_label
|
| 438 |
+
|
| 439 |
+
logger.info(f" Band {band_count}: {band_label} - Lower: {len(lower_quantile)}, Upper: {len(upper_quantile)}")
|
| 440 |
+
band_count += 1
|
| 441 |
+
|
| 442 |
+
logger.info(f"✅ Created {band_count} quantile bands from indices: {valid_indices}")
|
| 443 |
+
for i in range(band_count):
|
| 444 |
+
label = quantile_bands[f'quantile_band_{i}_label']
|
| 445 |
+
logger.info(f" Band {i}: {label}")
|
| 446 |
+
|
| 447 |
+
return quantile_bands
|
| 448 |
+
|
| 449 |
+
except Exception as e:
|
| 450 |
+
logger.error(f"❌ Quantile band processing failed: {str(e)}")
|
| 451 |
+
raise
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
def idx_to_percent(idx: int, num_quantiles: int) -> int:
|
| 455 |
+
"""
|
| 456 |
+
Convert quantile index to percentage for labeling.
|
| 457 |
+
|
| 458 |
+
Note: Index 0 is legacy mean forecast and should be skipped.
|
| 459 |
+
Actual quantiles start at index 1: 1->Q10, 2->Q20, ..., 9->Q90
|
| 460 |
+
|
| 461 |
+
Args:
|
| 462 |
+
idx: Quantile index (1-based for actual quantiles, 0 is legacy)
|
| 463 |
+
num_quantiles: Total number of quantiles (including legacy index 0)
|
| 464 |
+
|
| 465 |
+
Returns:
|
| 466 |
+
Percentage value (e.g., 10 for Q10, 90 for Q90)
|
| 467 |
+
"""
|
| 468 |
+
if num_quantiles == 10:
|
| 469 |
+
# Special case for 10 quantiles: 1->Q10, 2->Q20, ..., 9->Q90
|
| 470 |
+
# Index 0 is legacy mean, so actual quantiles start at index 1
|
| 471 |
+
return idx * 10
|
| 472 |
+
else:
|
| 473 |
+
# General case: distribute evenly, accounting for skipped index 0
|
| 474 |
+
# If we have 10 total quantiles (0-9), actual quantiles are 1-9
|
| 475 |
+
actual_quantiles = num_quantiles - 1 # Subtract 1 for legacy index 0
|
| 476 |
+
return int(100 * idx / actual_quantiles)
|
src/interactive_visualization.py
ADDED
|
@@ -0,0 +1,1129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Interactive Visualization Module for TimesFM Forecasting using Plotly
|
| 3 |
+
|
| 4 |
+
This module provides comprehensive interactive visualization capabilities for TimesFM forecasting,
|
| 5 |
+
including professional-grade plots with prediction intervals, covariates displays,
|
| 6 |
+
and publication-ready styling using Plotly for enhanced interactivity.
|
| 7 |
+
|
| 8 |
+
Key Features:
|
| 9 |
+
- Interactive forecast visualizations with seamless connections
|
| 10 |
+
- Prediction intervals with customizable confidence levels
|
| 11 |
+
- Covariates subplots integration
|
| 12 |
+
- Sapheneia-style professional formatting
|
| 13 |
+
- Interactive zoom, pan, and hover capabilities
|
| 14 |
+
- Export capabilities for presentations and publications
|
| 15 |
+
- Responsive design for web applications
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import pandas as pd
|
| 20 |
+
import plotly.graph_objects as go
|
| 21 |
+
import plotly.express as px
|
| 22 |
+
from plotly.subplots import make_subplots
|
| 23 |
+
from plotly.offline import plot
|
| 24 |
+
from datetime import datetime
|
| 25 |
+
from typing import List, Dict, Optional, Union
|
| 26 |
+
import logging
|
| 27 |
+
import json
|
| 28 |
+
|
| 29 |
+
logger = logging.getLogger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class InteractiveVisualizer:
|
| 33 |
+
"""
|
| 34 |
+
Interactive visualization class for TimesFM forecasting results using Plotly.
|
| 35 |
+
|
| 36 |
+
This class provides methods to create interactive, publication-quality visualizations
|
| 37 |
+
of forecasting results, including prediction intervals, covariates analysis,
|
| 38 |
+
and comprehensive time series plots with enhanced user interaction.
|
| 39 |
+
|
| 40 |
+
Example:
|
| 41 |
+
>>> viz = InteractiveVisualizer()
|
| 42 |
+
>>> fig = viz.plot_forecast_with_intervals(
|
| 43 |
+
... historical_data=historical,
|
| 44 |
+
... forecast=point_forecast,
|
| 45 |
+
... intervals=prediction_intervals,
|
| 46 |
+
... title="Bitcoin Price Forecast"
|
| 47 |
+
... )
|
| 48 |
+
>>> fig.show()
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self, style: str = "professional", theme: str = "plotly_white"):
|
| 52 |
+
"""
|
| 53 |
+
Initialize the InteractiveVisualizer with specified styling.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
style: Visualization style ("professional", "minimal", "presentation")
|
| 57 |
+
theme: Plotly theme ("plotly", "plotly_white", "plotly_dark", "ggplot2", "seaborn", "simple_white")
|
| 58 |
+
"""
|
| 59 |
+
self.style = style
|
| 60 |
+
self.theme = theme
|
| 61 |
+
self._setup_style()
|
| 62 |
+
logger.info(f"InteractiveVisualizer initialized with '{style}' style and '{theme}' theme")
|
| 63 |
+
|
| 64 |
+
def _setup_style(self) -> None:
|
| 65 |
+
"""Set up the visualization style and parameters."""
|
| 66 |
+
if self.style == "professional":
|
| 67 |
+
# Sapheneia professional style
|
| 68 |
+
self.colors = {
|
| 69 |
+
'historical': '#1f77b4',
|
| 70 |
+
'forecast': '#d62728',
|
| 71 |
+
'actual': '#2ca02c',
|
| 72 |
+
'interval_80': 'rgba(255, 179, 102, 0.3)',
|
| 73 |
+
'interval_50': 'rgba(255, 127, 14, 0.5)',
|
| 74 |
+
'grid': '#e0e0e0',
|
| 75 |
+
'background': '#fafafa',
|
| 76 |
+
'text': '#2c3e50',
|
| 77 |
+
'axis': '#34495e'
|
| 78 |
+
}
|
| 79 |
+
self.layout_config = {
|
| 80 |
+
'width': 1200,
|
| 81 |
+
'height': 800,
|
| 82 |
+
'margin': {'l': 60, 'r': 60, 't': 80, 'b': 60}
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
elif self.style == "minimal":
|
| 86 |
+
# Clean minimal style
|
| 87 |
+
self.colors = {
|
| 88 |
+
'historical': '#2E86AB',
|
| 89 |
+
'forecast': '#A23B72',
|
| 90 |
+
'actual': '#F18F01',
|
| 91 |
+
'interval_80': 'rgba(199, 62, 29, 0.3)',
|
| 92 |
+
'interval_50': 'rgba(241, 143, 1, 0.5)',
|
| 93 |
+
'grid': '#f0f0f0',
|
| 94 |
+
'background': 'white',
|
| 95 |
+
'text': '#2c3e50',
|
| 96 |
+
'axis': '#34495e'
|
| 97 |
+
}
|
| 98 |
+
self.layout_config = {
|
| 99 |
+
'width': 1000,
|
| 100 |
+
'height': 700,
|
| 101 |
+
'margin': {'l': 50, 'r': 50, 't': 60, 'b': 50}
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
else: # presentation
|
| 105 |
+
# High contrast for presentations
|
| 106 |
+
self.colors = {
|
| 107 |
+
'historical': '#003f5c',
|
| 108 |
+
'forecast': '#ff6361',
|
| 109 |
+
'actual': '#58508d',
|
| 110 |
+
'interval_80': 'rgba(255, 166, 0, 0.3)',
|
| 111 |
+
'interval_50': 'rgba(255, 99, 97, 0.5)',
|
| 112 |
+
'grid': '#e8e8e8',
|
| 113 |
+
'background': 'white',
|
| 114 |
+
'text': '#2c3e50',
|
| 115 |
+
'axis': '#34495e'
|
| 116 |
+
}
|
| 117 |
+
self.layout_config = {
|
| 118 |
+
'width': 1400,
|
| 119 |
+
'height': 900,
|
| 120 |
+
'margin': {'l': 70, 'r': 70, 't': 100, 'b': 70}
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
def _create_base_layout(self, title: str, x_title: str = "Time", y_title: str = "Value") -> Dict:
|
| 124 |
+
"""Create base layout configuration for plots."""
|
| 125 |
+
return {
|
| 126 |
+
'title': {
|
| 127 |
+
'text': title,
|
| 128 |
+
'x': 0.5,
|
| 129 |
+
'xanchor': 'center',
|
| 130 |
+
'font': {'size': 18, 'color': self.colors['text']}
|
| 131 |
+
},
|
| 132 |
+
'xaxis': {
|
| 133 |
+
'title': {'text': x_title, 'font': {'size': 14, 'color': self.colors['axis']}},
|
| 134 |
+
'tickfont': {'size': 12, 'color': self.colors['axis']},
|
| 135 |
+
'gridcolor': self.colors['grid'],
|
| 136 |
+
'showgrid': True,
|
| 137 |
+
'zeroline': False
|
| 138 |
+
},
|
| 139 |
+
'yaxis': {
|
| 140 |
+
'title': {'text': y_title, 'font': {'size': 14, 'color': self.colors['axis']}},
|
| 141 |
+
'tickfont': {'size': 12, 'color': self.colors['axis']},
|
| 142 |
+
'gridcolor': self.colors['grid'],
|
| 143 |
+
'showgrid': True,
|
| 144 |
+
'zeroline': False
|
| 145 |
+
},
|
| 146 |
+
'plot_bgcolor': self.colors['background'],
|
| 147 |
+
'paper_bgcolor': 'white',
|
| 148 |
+
'font': {'family': 'Arial, sans-serif', 'color': self.colors['text']},
|
| 149 |
+
'showlegend': True,
|
| 150 |
+
'legend': {
|
| 151 |
+
'x': 0.02,
|
| 152 |
+
'y': 0.98,
|
| 153 |
+
'yanchor': 'top',
|
| 154 |
+
'bgcolor': 'rgba(255, 255, 255, 0.8)',
|
| 155 |
+
'bordercolor': 'rgba(0, 0, 0, 0.2)',
|
| 156 |
+
'borderwidth': 1
|
| 157 |
+
},
|
| 158 |
+
'hovermode': 'x unified',
|
| 159 |
+
**self.layout_config
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
def plot_forecast_with_intervals(
|
| 163 |
+
self,
|
| 164 |
+
historical_data: Union[List[float], np.ndarray],
|
| 165 |
+
forecast: Union[List[float], np.ndarray],
|
| 166 |
+
intervals: Optional[Dict[str, np.ndarray]] = None,
|
| 167 |
+
actual_future: Optional[Union[List[float], np.ndarray]] = None,
|
| 168 |
+
dates_historical: Optional[List[Union[str, datetime]]] = None,
|
| 169 |
+
dates_future: Optional[List[Union[str, datetime]]] = None,
|
| 170 |
+
title: str = "TimesFM Forecast with Prediction Intervals",
|
| 171 |
+
target_name: str = "Value",
|
| 172 |
+
save_path: Optional[str] = None,
|
| 173 |
+
show_figure: bool = True,
|
| 174 |
+
context_len: Optional[int] = None,
|
| 175 |
+
horizon_len: Optional[int] = None,
|
| 176 |
+
y_axis_padding: float = 0.1
|
| 177 |
+
) -> go.Figure:
|
| 178 |
+
"""
|
| 179 |
+
Create an interactive forecast visualization with prediction intervals.
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
historical_data: Historical time series data
|
| 183 |
+
forecast: Point forecast values
|
| 184 |
+
intervals: Dictionary containing prediction intervals
|
| 185 |
+
actual_future: Optional actual future values for comparison
|
| 186 |
+
dates_historical: Optional dates for historical data
|
| 187 |
+
dates_future: Optional dates for forecast period
|
| 188 |
+
title: Plot title
|
| 189 |
+
target_name: Name of the target variable
|
| 190 |
+
save_path: Optional path to save the plot (HTML format)
|
| 191 |
+
show_figure: Whether to display the figure
|
| 192 |
+
context_len: Length of context window for default view focus
|
| 193 |
+
horizon_len: Length of horizon for default view focus
|
| 194 |
+
y_axis_padding: Padding factor for focused y-axis range (0.1 = 10% padding)
|
| 195 |
+
|
| 196 |
+
Returns:
|
| 197 |
+
Plotly Figure object
|
| 198 |
+
"""
|
| 199 |
+
logger.info(f"Creating interactive forecast visualization: {title}")
|
| 200 |
+
|
| 201 |
+
# Convert to numpy arrays
|
| 202 |
+
if actual_future is not None:
|
| 203 |
+
actual_future = np.array(actual_future)
|
| 204 |
+
|
| 205 |
+
# Setup time axis
|
| 206 |
+
if dates_historical is None:
|
| 207 |
+
historical_x = np.arange(len(historical_data))
|
| 208 |
+
else:
|
| 209 |
+
historical_x = pd.to_datetime(dates_historical)
|
| 210 |
+
|
| 211 |
+
future_x = np.arange(
|
| 212 |
+
len(historical_data), len(historical_data) + len(forecast)
|
| 213 |
+
) if dates_future is None else pd.to_datetime(dates_future)
|
| 214 |
+
|
| 215 |
+
# Calculate default view range (context + horizon)
|
| 216 |
+
if context_len is not None and horizon_len is not None:
|
| 217 |
+
if dates_historical is not None:
|
| 218 |
+
start_date = historical_x[0]
|
| 219 |
+
end_date = future_x[min(horizon_len - 1, len(future_x) - 1)] if len(future_x) > 0 else historical_x[-1]
|
| 220 |
+
default_x_range = [start_date, end_date]
|
| 221 |
+
else:
|
| 222 |
+
start_idx = 0
|
| 223 |
+
end_idx = len(historical_x) + len(forecast)
|
| 224 |
+
default_x_range = [start_idx, end_idx]
|
| 225 |
+
else:
|
| 226 |
+
# No specific focus, show all data
|
| 227 |
+
if dates_historical is not None:
|
| 228 |
+
start_date = historical_x[0]
|
| 229 |
+
end_date = future_x[-1] if len(future_x) > 0 else historical_x[-1]
|
| 230 |
+
default_x_range = [start_date, end_date]
|
| 231 |
+
else:
|
| 232 |
+
start_idx = 0
|
| 233 |
+
end_idx = len(historical_x) + len(forecast)
|
| 234 |
+
default_x_range = [start_idx, end_idx]
|
| 235 |
+
|
| 236 |
+
# Calculate focused y-axis range for better visibility
|
| 237 |
+
if context_len is not None and horizon_len is not None:
|
| 238 |
+
# Focus y-axis on the context + horizon period data
|
| 239 |
+
if context_len < len(historical_data):
|
| 240 |
+
# Get the data range for context + horizon
|
| 241 |
+
context_data = historical_data[-context_len:]
|
| 242 |
+
focused_data = np.concatenate([context_data, forecast])
|
| 243 |
+
|
| 244 |
+
# Include prediction intervals in y-axis calculation
|
| 245 |
+
if intervals:
|
| 246 |
+
# Collect all interval data for y-axis range calculation
|
| 247 |
+
interval_data = []
|
| 248 |
+
|
| 249 |
+
# Add 50th percentile if available
|
| 250 |
+
if 'lower_50' in intervals and 'upper_50' in intervals:
|
| 251 |
+
interval_data.extend(intervals['lower_50'])
|
| 252 |
+
interval_data.extend(intervals['upper_50'])
|
| 253 |
+
|
| 254 |
+
# Add 80th percentile if available
|
| 255 |
+
if 'lower_80' in intervals and 'upper_80' in intervals:
|
| 256 |
+
interval_data.extend(intervals['lower_80'])
|
| 257 |
+
interval_data.extend(intervals['upper_80'])
|
| 258 |
+
|
| 259 |
+
# Add other confidence levels
|
| 260 |
+
for key in intervals.keys():
|
| 261 |
+
if key.startswith('lower_') and key not in ['lower_50', 'lower_80']:
|
| 262 |
+
interval_data.extend(intervals[key])
|
| 263 |
+
elif key.startswith('upper_') and key not in ['upper_50', 'upper_80']:
|
| 264 |
+
interval_data.extend(intervals[key])
|
| 265 |
+
|
| 266 |
+
# Add quantile bands
|
| 267 |
+
for key in intervals.keys():
|
| 268 |
+
if key.startswith('quantile_band_') and key.endswith('_lower'):
|
| 269 |
+
interval_data.extend(intervals[key])
|
| 270 |
+
elif key.startswith('quantile_band_') and key.endswith('_upper'):
|
| 271 |
+
interval_data.extend(intervals[key])
|
| 272 |
+
|
| 273 |
+
# Include interval data in range calculation
|
| 274 |
+
if interval_data:
|
| 275 |
+
interval_data = np.array(interval_data)
|
| 276 |
+
all_focused_data = np.concatenate([focused_data, interval_data])
|
| 277 |
+
else:
|
| 278 |
+
all_focused_data = focused_data
|
| 279 |
+
else:
|
| 280 |
+
all_focused_data = focused_data
|
| 281 |
+
|
| 282 |
+
# Calculate y-axis range including intervals
|
| 283 |
+
data_min = np.min(all_focused_data)
|
| 284 |
+
data_max = np.max(all_focused_data)
|
| 285 |
+
data_range = data_max - data_min
|
| 286 |
+
padding = data_range * y_axis_padding
|
| 287 |
+
|
| 288 |
+
default_y_range = [data_min - padding, data_max + padding]
|
| 289 |
+
else:
|
| 290 |
+
# If context_len >= historical_data length, use all data
|
| 291 |
+
all_data = np.concatenate([historical_x, forecast])
|
| 292 |
+
|
| 293 |
+
# Include prediction intervals in y-axis calculation
|
| 294 |
+
if intervals:
|
| 295 |
+
interval_data = []
|
| 296 |
+
|
| 297 |
+
# Add 50th percentile if available
|
| 298 |
+
if 'lower_50' in intervals and 'upper_50' in intervals:
|
| 299 |
+
interval_data.extend(intervals['lower_50'])
|
| 300 |
+
interval_data.extend(intervals['upper_50'])
|
| 301 |
+
|
| 302 |
+
# Add 80th percentile if available
|
| 303 |
+
if 'lower_80' in intervals and 'upper_80' in intervals:
|
| 304 |
+
interval_data.extend(intervals['lower_80'])
|
| 305 |
+
interval_data.extend(intervals['upper_80'])
|
| 306 |
+
|
| 307 |
+
# Add other confidence levels
|
| 308 |
+
for key in intervals.keys():
|
| 309 |
+
if key.startswith('lower_') and key not in ['lower_50', 'lower_80']:
|
| 310 |
+
interval_data.extend(intervals[key])
|
| 311 |
+
elif key.startswith('upper_') and key not in ['upper_50', 'upper_80']:
|
| 312 |
+
interval_data.extend(intervals[key])
|
| 313 |
+
|
| 314 |
+
# Add quantile bands
|
| 315 |
+
for key in intervals.keys():
|
| 316 |
+
if key.startswith('quantile_band_') and key.endswith('_lower'):
|
| 317 |
+
interval_data.extend(intervals[key])
|
| 318 |
+
elif key.startswith('quantile_band_') and key.endswith('_upper'):
|
| 319 |
+
interval_data.extend(intervals[key])
|
| 320 |
+
|
| 321 |
+
# Include interval data in range calculation
|
| 322 |
+
if interval_data:
|
| 323 |
+
interval_data = np.array(interval_data)
|
| 324 |
+
all_data = np.concatenate([all_data, interval_data])
|
| 325 |
+
|
| 326 |
+
data_min = np.min(all_data)
|
| 327 |
+
data_max = np.max(all_data)
|
| 328 |
+
data_range = data_max - data_min
|
| 329 |
+
padding = data_range * y_axis_padding
|
| 330 |
+
|
| 331 |
+
default_y_range = [data_min - padding, data_max + padding]
|
| 332 |
+
else:
|
| 333 |
+
# No focused y-axis, let Plotly auto-scale
|
| 334 |
+
default_y_range = None
|
| 335 |
+
|
| 336 |
+
# Create figure
|
| 337 |
+
fig = go.Figure()
|
| 338 |
+
|
| 339 |
+
# Debug logging for historical data
|
| 340 |
+
print(f"DEBUG: Historical data length: {len(historical_data)}")
|
| 341 |
+
print(f"DEBUG: Historical data type: {type(historical_data)}")
|
| 342 |
+
print(f"DEBUG: Historical data first 5: {historical_data[:5] if len(historical_data) > 0 else 'Empty'}")
|
| 343 |
+
print(f"DEBUG: Historical data last 5: {historical_data[-5:] if len(historical_data) > 0 else 'Empty'}")
|
| 344 |
+
print(f"DEBUG: Historical x length: {len(historical_x)}")
|
| 345 |
+
print(f"DEBUG: Historical x first 5: {historical_x[:5] if len(historical_x) > 0 else 'Empty'}")
|
| 346 |
+
|
| 347 |
+
# Validate data before plotting
|
| 348 |
+
if len(historical_data) == 0:
|
| 349 |
+
print("ERROR: Historical data is empty!")
|
| 350 |
+
return None
|
| 351 |
+
|
| 352 |
+
if len(historical_x) == 0:
|
| 353 |
+
print("ERROR: Historical x-axis data is empty!")
|
| 354 |
+
return None
|
| 355 |
+
|
| 356 |
+
if len(historical_data) != len(historical_x):
|
| 357 |
+
print(f"ERROR: Mismatch between historical data ({len(historical_data)}) and x-axis ({len(historical_x)}) lengths!")
|
| 358 |
+
return None
|
| 359 |
+
|
| 360 |
+
# Plot historical data
|
| 361 |
+
print(f"DEBUG: About to plot historical data with {len(historical_data)} points")
|
| 362 |
+
print(f"DEBUG: Historical data sample: {historical_data[:3]}...{historical_data[-3:]}")
|
| 363 |
+
print(f"DEBUG: Historical x sample: {historical_x[:3]}...{historical_x[-3:]}")
|
| 364 |
+
|
| 365 |
+
historical_trace = go.Scatter(
|
| 366 |
+
x=historical_x,
|
| 367 |
+
y=historical_data,
|
| 368 |
+
mode='lines',
|
| 369 |
+
name='Historical Data',
|
| 370 |
+
line=dict(color=self.colors['historical'], width=3),
|
| 371 |
+
hovertemplate='<b>Historical</b><br>Time: %{x}<br>Value: %{y:.2f}<extra></extra>'
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
print(f"DEBUG: Historical trace created: {historical_trace}")
|
| 375 |
+
fig.add_trace(historical_trace)
|
| 376 |
+
print(f"DEBUG: Historical trace added to figure. Figure has {len(fig.data)} traces")
|
| 377 |
+
|
| 378 |
+
# Create seamless connection for forecast
|
| 379 |
+
if dates_historical is None:
|
| 380 |
+
connection_x = [len(historical_x) - 1] + list(future_x)
|
| 381 |
+
else:
|
| 382 |
+
connection_x = [historical_x[-1]] + list(future_x)
|
| 383 |
+
|
| 384 |
+
# Plot quantile intervals if available
|
| 385 |
+
if intervals:
|
| 386 |
+
# Handle different types of intervals
|
| 387 |
+
if 'lower_80' in intervals and 'upper_80' in intervals:
|
| 388 |
+
# Traditional confidence intervals
|
| 389 |
+
interval_lower = [historical_data[-1]] + list(intervals['lower_80'])
|
| 390 |
+
interval_upper = [historical_data[-1]] + list(intervals['upper_80'])
|
| 391 |
+
|
| 392 |
+
fig.add_trace(go.Scatter(
|
| 393 |
+
x=connection_x,
|
| 394 |
+
y=interval_upper,
|
| 395 |
+
mode='lines',
|
| 396 |
+
line=dict(width=0),
|
| 397 |
+
showlegend=False,
|
| 398 |
+
hoverinfo='skip'
|
| 399 |
+
))
|
| 400 |
+
|
| 401 |
+
fig.add_trace(go.Scatter(
|
| 402 |
+
x=connection_x,
|
| 403 |
+
y=interval_lower,
|
| 404 |
+
mode='lines',
|
| 405 |
+
line=dict(width=0),
|
| 406 |
+
fill='tonexty',
|
| 407 |
+
fillcolor=self.colors['interval_80'],
|
| 408 |
+
name='80% Prediction Interval',
|
| 409 |
+
hovertemplate='<b>80% Interval</b><br>Time: %{x}<br>Upper: %{y:.2f}<extra></extra>'
|
| 410 |
+
))
|
| 411 |
+
|
| 412 |
+
# Add 50% interval if available
|
| 413 |
+
if 'lower_50' in intervals and 'upper_50' in intervals:
|
| 414 |
+
interval_lower_50 = [historical_data[-1]] + list(intervals['lower_50'])
|
| 415 |
+
interval_upper_50 = [historical_data[-1]] + list(intervals['upper_50'])
|
| 416 |
+
|
| 417 |
+
fig.add_trace(go.Scatter(
|
| 418 |
+
x=connection_x,
|
| 419 |
+
y=interval_upper_50,
|
| 420 |
+
mode='lines',
|
| 421 |
+
line=dict(width=0),
|
| 422 |
+
showlegend=False,
|
| 423 |
+
hoverinfo='skip'
|
| 424 |
+
))
|
| 425 |
+
|
| 426 |
+
fig.add_trace(go.Scatter(
|
| 427 |
+
x=connection_x,
|
| 428 |
+
y=interval_lower_50,
|
| 429 |
+
mode='lines',
|
| 430 |
+
line=dict(width=0),
|
| 431 |
+
fill='tonexty',
|
| 432 |
+
fillcolor=self.colors['interval_50'],
|
| 433 |
+
name='50% Prediction Interval',
|
| 434 |
+
hovertemplate='<b>50% Interval</b><br>Time: %{x}<br>Upper: %{y:.2f}<extra></extra>'
|
| 435 |
+
))
|
| 436 |
+
|
| 437 |
+
else:
|
| 438 |
+
# Check for generic confidence levels
|
| 439 |
+
conf_levels = []
|
| 440 |
+
for key in intervals.keys():
|
| 441 |
+
if key.startswith('lower_'):
|
| 442 |
+
conf_level = key.split('_')[1]
|
| 443 |
+
if f'upper_{conf_level}' in intervals:
|
| 444 |
+
conf_levels.append(int(conf_level))
|
| 445 |
+
|
| 446 |
+
conf_levels.sort(reverse=True) # Largest first for layering
|
| 447 |
+
|
| 448 |
+
for conf_level in conf_levels:
|
| 449 |
+
lower_key = f'lower_{conf_level}'
|
| 450 |
+
upper_key = f'upper_{conf_level}'
|
| 451 |
+
|
| 452 |
+
if lower_key in intervals and upper_key in intervals:
|
| 453 |
+
# Create seamless intervals
|
| 454 |
+
interval_lower = [historical_data[-1]] + list(intervals[lower_key])
|
| 455 |
+
interval_upper = [historical_data[-1]] + list(intervals[upper_key])
|
| 456 |
+
|
| 457 |
+
alpha = 0.3 if conf_level == max(conf_levels) else 0.5
|
| 458 |
+
color = self.colors['interval_80'] if conf_level >= 80 else self.colors['interval_50']
|
| 459 |
+
|
| 460 |
+
fig.add_trace(go.Scatter(
|
| 461 |
+
x=connection_x,
|
| 462 |
+
y=interval_upper,
|
| 463 |
+
mode='lines',
|
| 464 |
+
line=dict(width=0),
|
| 465 |
+
showlegend=False,
|
| 466 |
+
hoverinfo='skip'
|
| 467 |
+
))
|
| 468 |
+
|
| 469 |
+
fig.add_trace(go.Scatter(
|
| 470 |
+
x=connection_x,
|
| 471 |
+
y=interval_lower,
|
| 472 |
+
mode='lines',
|
| 473 |
+
line=dict(width=0),
|
| 474 |
+
fill='tonexty',
|
| 475 |
+
fillcolor=color,
|
| 476 |
+
name=f'{conf_level}% Prediction Interval',
|
| 477 |
+
hovertemplate=f'<b>{conf_level}% Interval</b><br>Time: %{{x}}<br>Upper: %{{y:.2f}}<extra></extra>'
|
| 478 |
+
))
|
| 479 |
+
|
| 480 |
+
# Handle quantile bands (new format)
|
| 481 |
+
quantile_bands = {}
|
| 482 |
+
for key in intervals.keys():
|
| 483 |
+
if key.startswith('quantile_band_') and key.endswith('_lower'):
|
| 484 |
+
band_name = key.replace('quantile_band_', '').replace('_lower', '')
|
| 485 |
+
upper_key = f'quantile_band_{band_name}_upper'
|
| 486 |
+
if upper_key in intervals:
|
| 487 |
+
quantile_bands[band_name] = {
|
| 488 |
+
'lower': intervals[key],
|
| 489 |
+
'upper': intervals[upper_key]
|
| 490 |
+
}
|
| 491 |
+
|
| 492 |
+
if quantile_bands:
|
| 493 |
+
# Define colors for different bands
|
| 494 |
+
band_colors = ['rgba(255, 153, 153, 0.3)', 'rgba(153, 204, 255, 0.3)',
|
| 495 |
+
'rgba(153, 255, 153, 0.3)', 'rgba(255, 204, 153, 0.3)',
|
| 496 |
+
'rgba(204, 153, 255, 0.3)', 'rgba(255, 255, 153, 0.3)']
|
| 497 |
+
|
| 498 |
+
for i, (band_name, band_data) in enumerate(sorted(quantile_bands.items())):
|
| 499 |
+
color = band_colors[i % len(band_colors)]
|
| 500 |
+
|
| 501 |
+
interval_lower = [historical_data[-1]] + list(band_data['lower'])
|
| 502 |
+
interval_upper = [historical_data[-1]] + list(band_data['upper'])
|
| 503 |
+
|
| 504 |
+
label_key = f'quantile_band_{band_name}_label'
|
| 505 |
+
label_text = intervals.get(label_key, f'Quantile Band {int(band_name)+1}')
|
| 506 |
+
|
| 507 |
+
fig.add_trace(go.Scatter(
|
| 508 |
+
x=connection_x,
|
| 509 |
+
y=interval_upper,
|
| 510 |
+
mode='lines',
|
| 511 |
+
line=dict(width=0),
|
| 512 |
+
showlegend=False,
|
| 513 |
+
hoverinfo='skip'
|
| 514 |
+
))
|
| 515 |
+
|
| 516 |
+
fig.add_trace(go.Scatter(
|
| 517 |
+
x=connection_x,
|
| 518 |
+
y=interval_lower,
|
| 519 |
+
mode='lines',
|
| 520 |
+
line=dict(width=0),
|
| 521 |
+
fill='tonexty',
|
| 522 |
+
fillcolor=color,
|
| 523 |
+
name=label_text,
|
| 524 |
+
hovertemplate=f'<b>{label_text}</b><br>Upper: %{{y:.2f}}<extra></extra>'
|
| 525 |
+
))
|
| 526 |
+
|
| 527 |
+
fig.add_trace(go.Scatter(
|
| 528 |
+
x=future_x,
|
| 529 |
+
y=forecast,
|
| 530 |
+
mode='lines',
|
| 531 |
+
name='Point Forecast',
|
| 532 |
+
line=dict(color=self.colors['forecast'], width=3, dash='dash'),
|
| 533 |
+
hovertemplate='<b>Forecast</b><br>Time: %{x}<br>Value: %{y:.2f}<extra></extra>',
|
| 534 |
+
legendgroup='forecast'
|
| 535 |
+
))
|
| 536 |
+
|
| 537 |
+
# 2) a 2-point seamless bridge with no hover/legend
|
| 538 |
+
fig.add_trace(go.Scatter(
|
| 539 |
+
x=[historical_x[-1], future_x[0]],
|
| 540 |
+
y=[historical_data[-1], forecast[0]],
|
| 541 |
+
mode='lines',
|
| 542 |
+
line=dict(color=self.colors['forecast'], width=3, dash='dash'),
|
| 543 |
+
hoverinfo='skip',
|
| 544 |
+
showlegend=False,
|
| 545 |
+
legendgroup='forecast'
|
| 546 |
+
))
|
| 547 |
+
|
| 548 |
+
# Plot actual future data if available
|
| 549 |
+
if actual_future is not None:
|
| 550 |
+
print(f"DEBUG: Plotting actual future values")
|
| 551 |
+
print(f"DEBUG: actual_future length: {len(actual_future)}")
|
| 552 |
+
print(f"DEBUG: actual_future sample: {actual_future[:3] if len(actual_future) > 0 else 'Empty'}")
|
| 553 |
+
print(f"DEBUG: dates_future length: {len(dates_future) if dates_future else 'None'}")
|
| 554 |
+
print(f"DEBUG: dates_future sample: {dates_future[:3] if dates_future and len(dates_future) > 0 else 'Empty'}")
|
| 555 |
+
print(f"DEBUG: historical_x last value: {historical_x[-1]}")
|
| 556 |
+
print(f"DEBUG: future_x first value: {future_x[0] if len(future_x) > 0 else 'Empty'}")
|
| 557 |
+
|
| 558 |
+
actual_connection = [historical_x[-1]] + list(actual_future)
|
| 559 |
+
fig.add_trace(go.Scatter(
|
| 560 |
+
x=connection_x,
|
| 561 |
+
y=actual_connection,
|
| 562 |
+
mode='lines+markers',
|
| 563 |
+
name='Actual Future',
|
| 564 |
+
line=dict(color=self.colors['actual'], width=3),
|
| 565 |
+
marker=dict(size=8, color=self.colors['actual'],
|
| 566 |
+
line=dict(width=2, color='white')),
|
| 567 |
+
hovertemplate='<b>Actual Future</b><br>Time: %{x}<br>Value: %{y:.2f}<extra></extra>',
|
| 568 |
+
legendgroup='actual'
|
| 569 |
+
))
|
| 570 |
+
print([historical_x[-1], connection_x[0]])
|
| 571 |
+
print([historical_data[-1], actual_connection[0]])
|
| 572 |
+
|
| 573 |
+
# 2) a 2-point seamless bridge with no hover/legend
|
| 574 |
+
fig.add_trace(go.Scatter(
|
| 575 |
+
x=[historical_x[-1], connection_x[1]],
|
| 576 |
+
y=[historical_data[-1], actual_connection[1]],
|
| 577 |
+
mode='lines',
|
| 578 |
+
line=dict(color=self.colors['actual'], width=3),
|
| 579 |
+
marker=dict(size=8, color=self.colors['actual'],
|
| 580 |
+
line=dict(width=2, color='white')),
|
| 581 |
+
hoverinfo='skip',
|
| 582 |
+
showlegend=False,
|
| 583 |
+
legendgroup='actual'
|
| 584 |
+
))
|
| 585 |
+
|
| 586 |
+
# Add forecast start line
|
| 587 |
+
fig.add_vline(
|
| 588 |
+
x=pd.to_datetime(historical_x[-1]).to_pydatetime(), # or .isoformat()
|
| 589 |
+
line_dash="dot", line_color="gray", line_width=1
|
| 590 |
+
)
|
| 591 |
+
fig.add_annotation(
|
| 592 |
+
x=pd.to_datetime(historical_x[-1]).to_pydatetime(),
|
| 593 |
+
y=1, # top of plotting area
|
| 594 |
+
xref="x",
|
| 595 |
+
yref="paper",
|
| 596 |
+
text="Forecast Start",
|
| 597 |
+
showarrow=False,
|
| 598 |
+
yanchor="bottom"
|
| 599 |
+
)
|
| 600 |
+
|
| 601 |
+
# Apply layout
|
| 602 |
+
layout = self._create_base_layout(title, "Time", target_name)
|
| 603 |
+
|
| 604 |
+
# Add default view range if specified
|
| 605 |
+
if context_len is not None and horizon_len is not None:
|
| 606 |
+
layout['xaxis']['range'] = default_x_range
|
| 607 |
+
|
| 608 |
+
# Add focused y-axis range if specified
|
| 609 |
+
if default_y_range is not None:
|
| 610 |
+
layout['yaxis']['range'] = default_y_range
|
| 611 |
+
|
| 612 |
+
# Add timestamp
|
| 613 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M")
|
| 614 |
+
layout['annotations'] = [{
|
| 615 |
+
'x': 1,
|
| 616 |
+
'y': -0.1,
|
| 617 |
+
'xref': 'paper',
|
| 618 |
+
'yref': 'paper',
|
| 619 |
+
'text': f'Generated: {timestamp}',
|
| 620 |
+
'showarrow': False,
|
| 621 |
+
'font': {'size': 10, 'color': 'gray'}
|
| 622 |
+
}]
|
| 623 |
+
|
| 624 |
+
fig.update_layout(**layout)
|
| 625 |
+
|
| 626 |
+
# Save if requested
|
| 627 |
+
if save_path:
|
| 628 |
+
if save_path.endswith('.html'):
|
| 629 |
+
fig.write_html(save_path)
|
| 630 |
+
else:
|
| 631 |
+
fig.write_image(save_path)
|
| 632 |
+
logger.info(f"Interactive plot saved to: {save_path}")
|
| 633 |
+
|
| 634 |
+
# Debug final figure
|
| 635 |
+
print(f"DEBUG: Final figure has {len(fig.data)} traces")
|
| 636 |
+
for i, trace in enumerate(fig.data):
|
| 637 |
+
print(f"DEBUG: Trace {i}: name='{trace.name}', type='{trace.type}', visible={trace.visible}")
|
| 638 |
+
if hasattr(trace, 'y') and trace.y is not None:
|
| 639 |
+
print(f"DEBUG: Trace {i} y-data length: {len(trace.y) if hasattr(trace.y, '__len__') else 'scalar'}")
|
| 640 |
+
|
| 641 |
+
# Show figure if requested
|
| 642 |
+
if show_figure:
|
| 643 |
+
fig.show()
|
| 644 |
+
|
| 645 |
+
logger.info("✅ Interactive forecast visualization completed")
|
| 646 |
+
return fig
|
| 647 |
+
|
| 648 |
+
def plot_forecast_with_covariates(
|
| 649 |
+
self,
|
| 650 |
+
historical_data: Union[List[float], np.ndarray],
|
| 651 |
+
forecast: Union[List[float], np.ndarray],
|
| 652 |
+
covariates_data: Dict[str, Dict[str, Union[List[float], float, str]]],
|
| 653 |
+
intervals: Optional[Dict[str, np.ndarray]] = None,
|
| 654 |
+
actual_future: Optional[Union[List[float], np.ndarray]] = None,
|
| 655 |
+
dates_historical: Optional[List[Union[str, datetime]]] = None,
|
| 656 |
+
dates_future: Optional[List[Union[str, datetime]]] = None,
|
| 657 |
+
title: str = "TimesFM Forecast with Covariates Analysis",
|
| 658 |
+
target_name: str = "Target Value",
|
| 659 |
+
save_path: Optional[str] = None,
|
| 660 |
+
show_figure: bool = True,
|
| 661 |
+
context_len: Optional[int] = None,
|
| 662 |
+
horizon_len: Optional[int] = None,
|
| 663 |
+
show_full_history: bool = True,
|
| 664 |
+
y_axis_padding: float = 0.1
|
| 665 |
+
) -> go.Figure:
|
| 666 |
+
"""
|
| 667 |
+
Create a comprehensive interactive visualization with main forecast and covariates subplots.
|
| 668 |
+
|
| 669 |
+
Args:
|
| 670 |
+
historical_data: Historical time series data
|
| 671 |
+
forecast: Point forecast values
|
| 672 |
+
covariates_data: Dictionary containing covariates information
|
| 673 |
+
intervals: Optional prediction intervals
|
| 674 |
+
actual_future: Optional actual future values
|
| 675 |
+
dates_historical: Optional historical dates
|
| 676 |
+
dates_future: Optional future dates
|
| 677 |
+
title: Main plot title
|
| 678 |
+
target_name: Name of target variable
|
| 679 |
+
save_path: Optional save path
|
| 680 |
+
show_figure: Whether to display the figure
|
| 681 |
+
context_len: Length of context window for default view focus
|
| 682 |
+
horizon_len: Length of horizon for default view focus
|
| 683 |
+
show_full_history: Whether to show full historical data (True) or just context (False)
|
| 684 |
+
|
| 685 |
+
Returns:
|
| 686 |
+
Plotly Figure object
|
| 687 |
+
"""
|
| 688 |
+
logger.info(f"Creating comprehensive interactive forecast with covariates: {title}")
|
| 689 |
+
|
| 690 |
+
# Count covariates for subplot layout
|
| 691 |
+
num_covariates = len([k for k, v in covariates_data.items()
|
| 692 |
+
if isinstance(v, dict) and 'historical' in v])
|
| 693 |
+
|
| 694 |
+
# Create subplot layout
|
| 695 |
+
if num_covariates == 0:
|
| 696 |
+
return self.plot_forecast_with_intervals(
|
| 697 |
+
historical_data, forecast, intervals, actual_future,
|
| 698 |
+
dates_historical, dates_future, title, target_name, save_path, show_figure,
|
| 699 |
+
context_len, horizon_len, show_full_history, y_axis_padding
|
| 700 |
+
)
|
| 701 |
+
|
| 702 |
+
# Determine grid layout
|
| 703 |
+
if num_covariates <= 2:
|
| 704 |
+
rows, cols = 2, 2
|
| 705 |
+
subplot_titles = [title] + [f'{name.replace("_", " ").title()}'
|
| 706 |
+
for name in list(covariates_data.keys())[:3]]
|
| 707 |
+
elif num_covariates <= 4:
|
| 708 |
+
rows, cols = 3, 2
|
| 709 |
+
subplot_titles = [title] + [f'{name.replace("_", " ").title()}'
|
| 710 |
+
for name in list(covariates_data.keys())[:5]]
|
| 711 |
+
else:
|
| 712 |
+
rows, cols = 4, 2
|
| 713 |
+
subplot_titles = [title] + [f'{name.replace("_", " ").title()}'
|
| 714 |
+
for name in list(covariates_data.keys())[:7]]
|
| 715 |
+
|
| 716 |
+
# Create subplots
|
| 717 |
+
fig = make_subplots(
|
| 718 |
+
rows=rows, cols=cols,
|
| 719 |
+
subplot_titles=subplot_titles,
|
| 720 |
+
vertical_spacing=0.08,
|
| 721 |
+
horizontal_spacing=0.1
|
| 722 |
+
)
|
| 723 |
+
|
| 724 |
+
# Convert data
|
| 725 |
+
historical_data = np.array(historical_data)
|
| 726 |
+
forecast = np.array(forecast)
|
| 727 |
+
|
| 728 |
+
# Setup time axes
|
| 729 |
+
if dates_historical is None:
|
| 730 |
+
historical_x = np.arange(len(historical_data))
|
| 731 |
+
future_x = np.arange(len(historical_data), len(historical_data) + len(forecast))
|
| 732 |
+
else:
|
| 733 |
+
historical_x = pd.to_datetime(dates_historical)
|
| 734 |
+
future_x = pd.to_datetime(dates_future) if dates_future is not None else None
|
| 735 |
+
|
| 736 |
+
# Plot main forecast (similar to single plot method)
|
| 737 |
+
# Historical data
|
| 738 |
+
fig.add_trace(go.Scatter(
|
| 739 |
+
x=historical_x,
|
| 740 |
+
y=historical_data,
|
| 741 |
+
mode='lines',
|
| 742 |
+
name='Historical Data',
|
| 743 |
+
line=dict(color=self.colors['historical'], width=3),
|
| 744 |
+
hovertemplate='<b>Historical</b><br>Time: %{x}<br>Value: %{y:.2f}<extra></extra>'
|
| 745 |
+
), row=1, col=1)
|
| 746 |
+
|
| 747 |
+
# Forecast with seamless connection
|
| 748 |
+
if dates_historical is None:
|
| 749 |
+
connection_x = [len(historical_data) - 1] + list(future_x)
|
| 750 |
+
else:
|
| 751 |
+
connection_x = [historical_x[-1]] + list(future_x)
|
| 752 |
+
connection_forecast = [historical_data[-1]] + list(forecast)
|
| 753 |
+
|
| 754 |
+
# Plot intervals if available
|
| 755 |
+
if intervals:
|
| 756 |
+
for key in intervals.keys():
|
| 757 |
+
if key.startswith('lower_'):
|
| 758 |
+
conf_level = key.split('_')[1]
|
| 759 |
+
upper_key = f'upper_{conf_level}'
|
| 760 |
+
if upper_key in intervals:
|
| 761 |
+
interval_lower = [historical_data[-1]] + list(intervals[key])
|
| 762 |
+
interval_upper = [historical_data[-1]] + list(intervals[upper_key])
|
| 763 |
+
|
| 764 |
+
alpha = 0.3 if int(conf_level) >= 80 else 0.5
|
| 765 |
+
color = self.colors['interval_80'] if int(conf_level) >= 80 else self.colors['interval_50']
|
| 766 |
+
|
| 767 |
+
fig.add_trace(go.Scatter(
|
| 768 |
+
x=connection_x,
|
| 769 |
+
y=interval_upper,
|
| 770 |
+
mode='lines',
|
| 771 |
+
line=dict(width=0),
|
| 772 |
+
showlegend=False,
|
| 773 |
+
hoverinfo='skip'
|
| 774 |
+
), row=1, col=1)
|
| 775 |
+
|
| 776 |
+
fig.add_trace(go.Scatter(
|
| 777 |
+
x=connection_x,
|
| 778 |
+
y=interval_lower,
|
| 779 |
+
mode='lines',
|
| 780 |
+
line=dict(width=0),
|
| 781 |
+
fill='tonexty',
|
| 782 |
+
fillcolor=color,
|
| 783 |
+
name=f'{conf_level}% Prediction Interval',
|
| 784 |
+
hovertemplate=f'<b>{conf_level}% Interval</b><br>Time: %{{x}}<br>Upper: %{{y:.2f}}<extra></extra>'
|
| 785 |
+
), row=1, col=1)
|
| 786 |
+
|
| 787 |
+
# Forecast line
|
| 788 |
+
fig.add_trace(go.Scatter(
|
| 789 |
+
x=connection_x,
|
| 790 |
+
y=connection_forecast,
|
| 791 |
+
mode='lines',
|
| 792 |
+
name='Point Forecast',
|
| 793 |
+
line=dict(color=self.colors['forecast'], width=3, dash='dash'),
|
| 794 |
+
hovertemplate='<b>Forecast</b><br>Time: %{x}<br>Value: %{y:.2f}<extra></extra>'
|
| 795 |
+
), row=1, col=1)
|
| 796 |
+
|
| 797 |
+
# Plot actual future if available
|
| 798 |
+
if actual_future is not None:
|
| 799 |
+
actual_future = np.array(actual_future)
|
| 800 |
+
actual_connection = [historical_data[-1]] + list(actual_future)
|
| 801 |
+
fig.add_trace(go.Scatter(
|
| 802 |
+
x=connection_x,
|
| 803 |
+
y=actual_connection,
|
| 804 |
+
mode='lines+markers',
|
| 805 |
+
name='Actual Future',
|
| 806 |
+
line=dict(color=self.colors['actual'], width=3),
|
| 807 |
+
marker=dict(size=8, color=self.colors['actual'],
|
| 808 |
+
line=dict(width=2, color='white')),
|
| 809 |
+
hovertemplate='<b>Actual Future</b><br>Time: %{x}<br>Value: %{y:.2f}<extra></extra>'
|
| 810 |
+
), row=1, col=1)
|
| 811 |
+
|
| 812 |
+
# Forecast start line (commented out due to datetime compatibility issues)
|
| 813 |
+
# forecast_start = historical_x[-1] if dates_historical is not None else len(historical_data) - 1
|
| 814 |
+
# fig.add_vline(
|
| 815 |
+
# x=forecast_start,
|
| 816 |
+
# line_dash="dot",
|
| 817 |
+
# line_color="gray",
|
| 818 |
+
# line_width=2,
|
| 819 |
+
# annotation_text="Forecast Start",
|
| 820 |
+
# annotation_position="top"
|
| 821 |
+
# )
|
| 822 |
+
|
| 823 |
+
# Create covariate subplots
|
| 824 |
+
covariate_colors = ['#9467bd', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf', '#d62728']
|
| 825 |
+
|
| 826 |
+
plot_idx = 0
|
| 827 |
+
for cov_name, cov_data in covariates_data.items():
|
| 828 |
+
if not isinstance(cov_data, dict) or 'historical' not in cov_data:
|
| 829 |
+
continue
|
| 830 |
+
|
| 831 |
+
if plot_idx >= (rows - 1) * cols: # Don't exceed subplot capacity
|
| 832 |
+
break
|
| 833 |
+
|
| 834 |
+
# Calculate subplot position
|
| 835 |
+
row = 2 + plot_idx // cols
|
| 836 |
+
col = 1 + plot_idx % cols
|
| 837 |
+
color = covariate_colors[plot_idx % len(covariate_colors)]
|
| 838 |
+
|
| 839 |
+
# Plot historical covariate data
|
| 840 |
+
fig.add_trace(go.Scatter(
|
| 841 |
+
x=historical_x,
|
| 842 |
+
y=cov_data['historical'],
|
| 843 |
+
mode='lines',
|
| 844 |
+
name=f'{cov_name.replace("_", " ").title()} Historical',
|
| 845 |
+
line=dict(color=color, width=2.5),
|
| 846 |
+
hovertemplate=f'<b>{cov_name.replace("_", " ").title()}</b><br>Time: %{{x}}<br>Value: %{{y:.2f}}<extra></extra>',
|
| 847 |
+
showlegend=False
|
| 848 |
+
), row=row, col=col)
|
| 849 |
+
|
| 850 |
+
# Plot future covariate data if available
|
| 851 |
+
if 'future' in cov_data and future_x is not None:
|
| 852 |
+
combined_data = list(cov_data['historical']) + list(cov_data['future'])
|
| 853 |
+
if dates_historical is None:
|
| 854 |
+
combined_x = np.arange(len(combined_data))
|
| 855 |
+
else:
|
| 856 |
+
combined_x = list(historical_x) + list(future_x)
|
| 857 |
+
|
| 858 |
+
future_start_idx = len(cov_data['historical']) - 1
|
| 859 |
+
fig.add_trace(go.Scatter(
|
| 860 |
+
x=combined_x[future_start_idx:],
|
| 861 |
+
y=combined_data[future_start_idx:],
|
| 862 |
+
mode='lines+markers',
|
| 863 |
+
name=f'{cov_name.replace("_", " ").title()} Future',
|
| 864 |
+
line=dict(color=color, width=2.5, dash='dash'),
|
| 865 |
+
marker=dict(size=6, color=color),
|
| 866 |
+
hovertemplate=f'<b>{cov_name.replace("_", " ").title()} Future</b><br>Time: %{{x}}<br>Value: %{{y:.2f}}<extra></extra>',
|
| 867 |
+
showlegend=False
|
| 868 |
+
), row=row, col=col)
|
| 869 |
+
|
| 870 |
+
# Forecast start line for covariate (commented out due to datetime compatibility issues)
|
| 871 |
+
# fig.add_vline(
|
| 872 |
+
# x=forecast_start,
|
| 873 |
+
# line_dash="dot",
|
| 874 |
+
# line_color="gray",
|
| 875 |
+
# line_width=1,
|
| 876 |
+
# row=row, col=col
|
| 877 |
+
# )
|
| 878 |
+
|
| 879 |
+
plot_idx += 1
|
| 880 |
+
|
| 881 |
+
# Update layout
|
| 882 |
+
fig.update_layout(
|
| 883 |
+
title=f'TimesFM Comprehensive Forecasting Analysis',
|
| 884 |
+
title_x=0.5,
|
| 885 |
+
title_font_size=20,
|
| 886 |
+
height=800,
|
| 887 |
+
showlegend=True,
|
| 888 |
+
hovermode='x unified'
|
| 889 |
+
)
|
| 890 |
+
|
| 891 |
+
# Update axes
|
| 892 |
+
for i in range(1, rows + 1):
|
| 893 |
+
for j in range(1, cols + 1):
|
| 894 |
+
fig.update_xaxes(
|
| 895 |
+
title_text="Time" if i == 1 else "",
|
| 896 |
+
gridcolor=self.colors['grid'],
|
| 897 |
+
showgrid=True,
|
| 898 |
+
row=i, col=j
|
| 899 |
+
)
|
| 900 |
+
fig.update_yaxes(
|
| 901 |
+
title_text=target_name if i == 1 else "Value",
|
| 902 |
+
gridcolor=self.colors['grid'],
|
| 903 |
+
showgrid=True,
|
| 904 |
+
row=i, col=j
|
| 905 |
+
)
|
| 906 |
+
|
| 907 |
+
# Add timestamp
|
| 908 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M")
|
| 909 |
+
fig.add_annotation(
|
| 910 |
+
x=1, y=-0.1,
|
| 911 |
+
xref='paper', yref='paper',
|
| 912 |
+
text=f'Generated: {timestamp}',
|
| 913 |
+
showarrow=False,
|
| 914 |
+
font=dict(size=10, color='gray')
|
| 915 |
+
)
|
| 916 |
+
|
| 917 |
+
# Save if requested
|
| 918 |
+
if save_path:
|
| 919 |
+
if save_path.endswith('.html'):
|
| 920 |
+
fig.write_html(save_path)
|
| 921 |
+
else:
|
| 922 |
+
fig.write_image(save_path)
|
| 923 |
+
logger.info(f"Comprehensive interactive plot saved to: {save_path}")
|
| 924 |
+
|
| 925 |
+
# Show figure if requested
|
| 926 |
+
if show_figure:
|
| 927 |
+
fig.show()
|
| 928 |
+
|
| 929 |
+
logger.info("✅ Comprehensive interactive forecast visualization completed")
|
| 930 |
+
return fig
|
| 931 |
+
|
| 932 |
+
def plot_forecast_comparison(
|
| 933 |
+
self,
|
| 934 |
+
forecasts_dict: Dict[str, np.ndarray],
|
| 935 |
+
historical_data: Union[List[float], np.ndarray],
|
| 936 |
+
actual_future: Optional[Union[List[float], np.ndarray]] = None,
|
| 937 |
+
title: str = "Forecast Methods Comparison",
|
| 938 |
+
save_path: Optional[str] = None,
|
| 939 |
+
show_figure: bool = True
|
| 940 |
+
) -> go.Figure:
|
| 941 |
+
"""
|
| 942 |
+
Compare multiple forecasting methods in an interactive plot.
|
| 943 |
+
|
| 944 |
+
Args:
|
| 945 |
+
forecasts_dict: Dictionary of {method_name: forecast_array}
|
| 946 |
+
historical_data: Historical data for context
|
| 947 |
+
actual_future: Optional actual future values
|
| 948 |
+
title: Plot title
|
| 949 |
+
save_path: Optional save path
|
| 950 |
+
show_figure: Whether to display the figure
|
| 951 |
+
|
| 952 |
+
Returns:
|
| 953 |
+
Plotly Figure object
|
| 954 |
+
"""
|
| 955 |
+
logger.info(f"Creating interactive forecast comparison plot: {title}")
|
| 956 |
+
|
| 957 |
+
fig = go.Figure()
|
| 958 |
+
|
| 959 |
+
historical_data = np.array(historical_data)
|
| 960 |
+
historical_x = np.arange(len(historical_data))
|
| 961 |
+
|
| 962 |
+
# Plot historical data
|
| 963 |
+
fig.add_trace(go.Scatter(
|
| 964 |
+
x=historical_x,
|
| 965 |
+
y=historical_data,
|
| 966 |
+
mode='lines',
|
| 967 |
+
name='Historical Data',
|
| 968 |
+
line=dict(color=self.colors['historical'], width=3),
|
| 969 |
+
hovertemplate='<b>Historical</b><br>Time: %{x}<br>Value: %{y:.2f}<extra></extra>'
|
| 970 |
+
))
|
| 971 |
+
|
| 972 |
+
# Plot different forecasts
|
| 973 |
+
forecast_colors = ['#d62728', '#ff7f0e', '#2ca02c', '#9467bd', '#8c564b']
|
| 974 |
+
|
| 975 |
+
for i, (method, forecast) in enumerate(forecasts_dict.items()):
|
| 976 |
+
forecast = np.array(forecast)
|
| 977 |
+
future_x = np.arange(len(historical_data), len(historical_data) + len(forecast))
|
| 978 |
+
|
| 979 |
+
# Seamless connection
|
| 980 |
+
connection_x = [len(historical_data) - 1] + list(future_x)
|
| 981 |
+
connection_forecast = [historical_data[-1]] + list(forecast)
|
| 982 |
+
|
| 983 |
+
color = forecast_colors[i % len(forecast_colors)]
|
| 984 |
+
linestyle = 'dash' if i == 0 else 'dot'
|
| 985 |
+
|
| 986 |
+
fig.add_trace(go.Scatter(
|
| 987 |
+
x=connection_x,
|
| 988 |
+
y=connection_forecast,
|
| 989 |
+
mode='lines',
|
| 990 |
+
name=f'{method} Forecast',
|
| 991 |
+
line=dict(color=color, width=3, dash=linestyle),
|
| 992 |
+
hovertemplate=f'<b>{method} Forecast</b><br>Time: %{{x}}<br>Value: %{{y:.2f}}<extra></extra>'
|
| 993 |
+
))
|
| 994 |
+
|
| 995 |
+
# Plot actual future if available
|
| 996 |
+
if actual_future is not None:
|
| 997 |
+
actual_future = np.array(actual_future)
|
| 998 |
+
future_x = np.arange(len(historical_data), len(historical_data) + len(actual_future))
|
| 999 |
+
connection_x = [len(historical_data) - 1] + list(future_x)
|
| 1000 |
+
actual_connection = [historical_data[-1]] + list(actual_future)
|
| 1001 |
+
|
| 1002 |
+
fig.add_trace(go.Scatter(
|
| 1003 |
+
x=connection_x,
|
| 1004 |
+
y=actual_connection,
|
| 1005 |
+
mode='lines+markers',
|
| 1006 |
+
name='Actual Future',
|
| 1007 |
+
line=dict(color=self.colors['actual'], width=3),
|
| 1008 |
+
marker=dict(size=8, color=self.colors['actual'],
|
| 1009 |
+
line=dict(width=2, color='white')),
|
| 1010 |
+
hovertemplate='<b>Actual Future</b><br>Time: %{x}<br>Value: %{y:.2f}<extra></extra>'
|
| 1011 |
+
))
|
| 1012 |
+
|
| 1013 |
+
# Forecast start line
|
| 1014 |
+
fig.add_vline(
|
| 1015 |
+
x=len(historical_data) - 1,
|
| 1016 |
+
line_dash="dot",
|
| 1017 |
+
line_color="gray",
|
| 1018 |
+
line_width=2,
|
| 1019 |
+
annotation_text="Forecast Start",
|
| 1020 |
+
annotation_position="top"
|
| 1021 |
+
)
|
| 1022 |
+
|
| 1023 |
+
# Apply layout
|
| 1024 |
+
layout = self._create_base_layout(title, "Time", "Value")
|
| 1025 |
+
fig.update_layout(**layout)
|
| 1026 |
+
|
| 1027 |
+
# Add timestamp
|
| 1028 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M")
|
| 1029 |
+
fig.add_annotation(
|
| 1030 |
+
x=1, y=-0.1,
|
| 1031 |
+
xref='paper', yref='paper',
|
| 1032 |
+
text=f'Generated: {timestamp}',
|
| 1033 |
+
showarrow=False,
|
| 1034 |
+
font=dict(size=10, color='gray')
|
| 1035 |
+
)
|
| 1036 |
+
|
| 1037 |
+
# Save if requested
|
| 1038 |
+
if save_path:
|
| 1039 |
+
if save_path.endswith('.html'):
|
| 1040 |
+
fig.write_html(save_path)
|
| 1041 |
+
else:
|
| 1042 |
+
fig.write_image(save_path)
|
| 1043 |
+
logger.info(f"Comparison plot saved to: {save_path}")
|
| 1044 |
+
|
| 1045 |
+
# Show figure if requested
|
| 1046 |
+
if show_figure:
|
| 1047 |
+
fig.show()
|
| 1048 |
+
|
| 1049 |
+
logger.info("✅ Interactive forecast comparison visualization completed")
|
| 1050 |
+
return fig
|
| 1051 |
+
|
| 1052 |
+
def create_dashboard(
|
| 1053 |
+
self,
|
| 1054 |
+
historical_data: Union[List[float], np.ndarray],
|
| 1055 |
+
forecast: Union[List[float], np.ndarray],
|
| 1056 |
+
intervals: Optional[Dict[str, np.ndarray]] = None,
|
| 1057 |
+
covariates_data: Optional[Dict[str, Dict[str, Union[List[float], float, str]]]] = None,
|
| 1058 |
+
actual_future: Optional[Union[List[float], np.ndarray]] = None,
|
| 1059 |
+
dates_historical: Optional[List[Union[str, datetime]]] = None,
|
| 1060 |
+
dates_future: Optional[List[Union[str, datetime]]] = None,
|
| 1061 |
+
title: str = "TimesFM Forecasting Dashboard",
|
| 1062 |
+
target_name: str = "Value",
|
| 1063 |
+
save_path: Optional[str] = None,
|
| 1064 |
+
show_figure: bool = True,
|
| 1065 |
+
context_len: Optional[int] = None,
|
| 1066 |
+
horizon_len: Optional[int] = None,
|
| 1067 |
+
show_full_history: bool = True,
|
| 1068 |
+
y_axis_padding: float = 0.1
|
| 1069 |
+
) -> go.Figure:
|
| 1070 |
+
"""
|
| 1071 |
+
Create a comprehensive dashboard with multiple visualization panels.
|
| 1072 |
+
|
| 1073 |
+
Args:
|
| 1074 |
+
historical_data: Historical time series data
|
| 1075 |
+
forecast: Point forecast values
|
| 1076 |
+
intervals: Optional prediction intervals
|
| 1077 |
+
covariates_data: Optional covariates data
|
| 1078 |
+
actual_future: Optional actual future values
|
| 1079 |
+
dates_historical: Optional historical dates
|
| 1080 |
+
dates_future: Optional future dates
|
| 1081 |
+
title: Dashboard title
|
| 1082 |
+
target_name: Name of target variable
|
| 1083 |
+
save_path: Optional save path
|
| 1084 |
+
show_figure: Whether to display the figure
|
| 1085 |
+
|
| 1086 |
+
Returns:
|
| 1087 |
+
Plotly Figure object
|
| 1088 |
+
"""
|
| 1089 |
+
logger.info(f"Creating interactive forecasting dashboard: {title}")
|
| 1090 |
+
|
| 1091 |
+
# If covariates are provided, use the comprehensive view
|
| 1092 |
+
if covariates_data and len(covariates_data) > 0:
|
| 1093 |
+
return self.plot_forecast_with_covariates(
|
| 1094 |
+
historical_data, forecast, covariates_data, intervals,
|
| 1095 |
+
actual_future, dates_historical, dates_future,
|
| 1096 |
+
title, target_name, save_path, show_figure,
|
| 1097 |
+
context_len, horizon_len, show_full_history, y_axis_padding
|
| 1098 |
+
)
|
| 1099 |
+
else:
|
| 1100 |
+
# Otherwise, use the standard forecast view
|
| 1101 |
+
return self.plot_forecast_with_intervals(
|
| 1102 |
+
historical_data, forecast, intervals, actual_future,
|
| 1103 |
+
dates_historical, dates_future, title, target_name, save_path, show_figure,
|
| 1104 |
+
context_len, horizon_len, show_full_history, y_axis_padding
|
| 1105 |
+
)
|
| 1106 |
+
|
| 1107 |
+
def export_to_json(self, fig: go.Figure, file_path: str) -> None:
|
| 1108 |
+
"""
|
| 1109 |
+
Export a Plotly figure to JSON format for web integration.
|
| 1110 |
+
|
| 1111 |
+
Args:
|
| 1112 |
+
fig: Plotly Figure object
|
| 1113 |
+
file_path: Path to save the JSON file
|
| 1114 |
+
"""
|
| 1115 |
+
fig.write_json(file_path)
|
| 1116 |
+
logger.info(f"Figure exported to JSON: {file_path}")
|
| 1117 |
+
|
| 1118 |
+
def get_figure_html(self, fig: go.Figure, include_plotlyjs: bool = True) -> str:
|
| 1119 |
+
"""
|
| 1120 |
+
Get the HTML representation of a figure.
|
| 1121 |
+
|
| 1122 |
+
Args:
|
| 1123 |
+
fig: Plotly Figure object
|
| 1124 |
+
include_plotlyjs: Whether to include Plotly.js in the HTML
|
| 1125 |
+
|
| 1126 |
+
Returns:
|
| 1127 |
+
HTML string representation of the figure
|
| 1128 |
+
"""
|
| 1129 |
+
return fig.to_html(include_plotlyjs=include_plotlyjs)
|
src/model.py
ADDED
|
@@ -0,0 +1,356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
TimesFM Model Initialization and Configuration
|
| 3 |
+
|
| 4 |
+
This module provides a unified interface for initializing and configuring
|
| 5 |
+
Google's TimesFM foundation model for time series forecasting.
|
| 6 |
+
|
| 7 |
+
Key Features:
|
| 8 |
+
- Support for both HuggingFace checkpoints and local model paths
|
| 9 |
+
- Automatic backend detection (CPU/GPU/TPU)
|
| 10 |
+
- Configurable model parameters optimized for financial time series
|
| 11 |
+
- Built-in model validation and testing
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import logging
|
| 15 |
+
from typing import Optional, Dict, Any, Tuple
|
| 16 |
+
import numpy as np
|
| 17 |
+
import timesfm
|
| 18 |
+
|
| 19 |
+
# Configure logging
|
| 20 |
+
logging.basicConfig(level=logging.INFO)
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class TimesFMModel:
|
| 25 |
+
"""
|
| 26 |
+
A wrapper class for TimesFM model initialization and configuration.
|
| 27 |
+
|
| 28 |
+
This class provides a unified interface for loading TimesFM models from
|
| 29 |
+
either HuggingFace checkpoints or local paths, with automatic parameter
|
| 30 |
+
optimization and validation.
|
| 31 |
+
|
| 32 |
+
Example:
|
| 33 |
+
>>> model_wrapper = TimesFMModel(
|
| 34 |
+
... backend="cpu",
|
| 35 |
+
... context_len=512,
|
| 36 |
+
... horizon_len=24
|
| 37 |
+
... )
|
| 38 |
+
>>> model = model_wrapper.load_model()
|
| 39 |
+
>>> forecast, _ = model.forecast(inputs=[[1,2,3,4,5]], freq=[0])
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
def __init__(
|
| 43 |
+
self,
|
| 44 |
+
backend: str = "cpu",
|
| 45 |
+
context_len: int = 512,
|
| 46 |
+
horizon_len: int = 24,
|
| 47 |
+
per_core_batch_size: Optional[int] = None,
|
| 48 |
+
checkpoint: Optional[str] = None,
|
| 49 |
+
local_model_path: Optional[str] = None,
|
| 50 |
+
num_layers: int = 50,
|
| 51 |
+
use_positional_embedding: bool = False,
|
| 52 |
+
input_patch_len = 32,
|
| 53 |
+
output_patch_len = 128,
|
| 54 |
+
):
|
| 55 |
+
"""
|
| 56 |
+
Initialize TimesFM model configuration.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
backend: Computing backend ("cpu", "gpu", "tpu")
|
| 60 |
+
context_len: Maximum context length for input time series
|
| 61 |
+
horizon_len: Forecast horizon length
|
| 62 |
+
per_core_batch_size: Batch size per core (auto-configured if None)
|
| 63 |
+
checkpoint: HuggingFace checkpoint repo ID
|
| 64 |
+
local_model_path: Path to local model checkpoint
|
| 65 |
+
num_layers: Number of model layers (must match checkpoint)
|
| 66 |
+
use_positional_embedding: Whether to use positional embeddings
|
| 67 |
+
|
| 68 |
+
Raises:
|
| 69 |
+
ValueError: If both checkpoint and local_model_path are specified
|
| 70 |
+
"""
|
| 71 |
+
self.backend = backend
|
| 72 |
+
self.context_len = context_len
|
| 73 |
+
self.horizon_len = horizon_len
|
| 74 |
+
self.num_layers = num_layers
|
| 75 |
+
self.use_positional_embedding = use_positional_embedding
|
| 76 |
+
self.input_patch_len = input_patch_len
|
| 77 |
+
self.output_patch_len = output_patch_len
|
| 78 |
+
|
| 79 |
+
# Validate checkpoint configuration
|
| 80 |
+
if checkpoint and local_model_path:
|
| 81 |
+
raise ValueError("Cannot specify both checkpoint and local_model_path")
|
| 82 |
+
|
| 83 |
+
# Set default checkpoint if none specified
|
| 84 |
+
if not checkpoint and not local_model_path:
|
| 85 |
+
checkpoint = "google/timesfm-2.0-500m-pytorch" # Default to PyTorch version
|
| 86 |
+
|
| 87 |
+
self.checkpoint = checkpoint
|
| 88 |
+
self.local_model_path = local_model_path
|
| 89 |
+
|
| 90 |
+
# Auto-configure batch size based on backend
|
| 91 |
+
if per_core_batch_size is None:
|
| 92 |
+
self.per_core_batch_size = self._auto_configure_batch_size()
|
| 93 |
+
else:
|
| 94 |
+
self.per_core_batch_size = per_core_batch_size
|
| 95 |
+
|
| 96 |
+
self.model = None
|
| 97 |
+
|
| 98 |
+
logger.info(f"TimesFM Model Configuration:")
|
| 99 |
+
logger.info(f" Backend: {self.backend}")
|
| 100 |
+
logger.info(f" Context Length: {self.context_len}")
|
| 101 |
+
logger.info(f" Horizon Length: {self.horizon_len}")
|
| 102 |
+
logger.info(f" Batch Size: {self.per_core_batch_size}")
|
| 103 |
+
logger.info(f" Layers: {self.num_layers}")
|
| 104 |
+
if checkpoint:
|
| 105 |
+
logger.info(f" Checkpoint: {checkpoint}")
|
| 106 |
+
if local_model_path:
|
| 107 |
+
logger.info(f" Local Model: {local_model_path}")
|
| 108 |
+
|
| 109 |
+
def _auto_configure_batch_size(self) -> int:
|
| 110 |
+
"""
|
| 111 |
+
Automatically configure batch size based on backend and available resources.
|
| 112 |
+
|
| 113 |
+
Returns:
|
| 114 |
+
Optimal batch size for the specified backend
|
| 115 |
+
"""
|
| 116 |
+
if self.backend == "cpu":
|
| 117 |
+
return 1 # Conservative for CPU
|
| 118 |
+
elif self.backend == "gpu":
|
| 119 |
+
return 8 # Moderate for GPU
|
| 120 |
+
elif self.backend == "tpu":
|
| 121 |
+
return 32 # Aggressive for TPU
|
| 122 |
+
else:
|
| 123 |
+
logger.warning(f"Unknown backend '{self.backend}', using default batch size")
|
| 124 |
+
return 1
|
| 125 |
+
|
| 126 |
+
def load_model(self) -> timesfm.TimesFm:
|
| 127 |
+
"""
|
| 128 |
+
Load and initialize the TimesFM model.
|
| 129 |
+
|
| 130 |
+
This method creates the TimesFM model with the specified configuration,
|
| 131 |
+
loads the checkpoint, and performs basic validation.
|
| 132 |
+
|
| 133 |
+
Returns:
|
| 134 |
+
Initialized TimesFM model instance
|
| 135 |
+
|
| 136 |
+
Raises:
|
| 137 |
+
Exception: If model loading fails
|
| 138 |
+
"""
|
| 139 |
+
try:
|
| 140 |
+
logger.info("Initializing TimesFM model...")
|
| 141 |
+
|
| 142 |
+
# Create model hyperparameters
|
| 143 |
+
hparams = timesfm.TimesFmHparams(
|
| 144 |
+
backend=self.backend,
|
| 145 |
+
per_core_batch_size=self.per_core_batch_size,
|
| 146 |
+
horizon_len=self.horizon_len,
|
| 147 |
+
num_layers=self.num_layers,
|
| 148 |
+
use_positional_embedding=self.use_positional_embedding,
|
| 149 |
+
context_len=self.context_len,
|
| 150 |
+
input_patch_len=self.input_patch_len,
|
| 151 |
+
output_patch_len=self.output_patch_len,
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
# Create checkpoint configuration
|
| 155 |
+
if self.checkpoint:
|
| 156 |
+
# Load from HuggingFace
|
| 157 |
+
checkpoint_config = timesfm.TimesFmCheckpoint(
|
| 158 |
+
huggingface_repo_id=self.checkpoint
|
| 159 |
+
)
|
| 160 |
+
logger.info(f"Loading from HuggingFace: {self.checkpoint}")
|
| 161 |
+
else:
|
| 162 |
+
# Load from local path
|
| 163 |
+
checkpoint_config = timesfm.TimesFmCheckpoint(
|
| 164 |
+
path=self.local_model_path
|
| 165 |
+
)
|
| 166 |
+
logger.info(f"Loading from local path: {self.local_model_path}")
|
| 167 |
+
|
| 168 |
+
# Initialize model
|
| 169 |
+
self.model = timesfm.TimesFm(
|
| 170 |
+
hparams=hparams,
|
| 171 |
+
checkpoint=checkpoint_config
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
# Validate model functionality
|
| 175 |
+
# Note: Temporarily disabled validation due to shape constraints
|
| 176 |
+
# self._validate_model()
|
| 177 |
+
logger.info("⚠️ Model validation skipped due to TimesFM shape constraints")
|
| 178 |
+
|
| 179 |
+
logger.info("✅ TimesFM model loaded successfully!")
|
| 180 |
+
return self.model
|
| 181 |
+
|
| 182 |
+
except Exception as e:
|
| 183 |
+
logger.error(f"❌ Failed to load TimesFM model: {str(e)}")
|
| 184 |
+
raise
|
| 185 |
+
|
| 186 |
+
def _validate_model(self) -> None:
|
| 187 |
+
"""
|
| 188 |
+
Validate that the model is working correctly with a simple test.
|
| 189 |
+
|
| 190 |
+
Raises:
|
| 191 |
+
Exception: If model validation fails
|
| 192 |
+
"""
|
| 193 |
+
try:
|
| 194 |
+
logger.info("Validating model functionality...")
|
| 195 |
+
|
| 196 |
+
# Create test data with sufficient length (at least 32 points for reshaping)
|
| 197 |
+
# Use a simple linear pattern that should work with any model architecture
|
| 198 |
+
test_length = max(32, self.context_len // 4) # Ensure minimum length
|
| 199 |
+
test_data = [float(i + 1) for i in range(test_length)]
|
| 200 |
+
test_inputs = [test_data]
|
| 201 |
+
test_freq = [0] # Generic frequency
|
| 202 |
+
|
| 203 |
+
# Test basic forecasting
|
| 204 |
+
forecast, _ = self.model.forecast(inputs=test_inputs, freq=test_freq)
|
| 205 |
+
forecast_array = np.array(forecast)
|
| 206 |
+
|
| 207 |
+
# Validate output shape
|
| 208 |
+
expected_shape = (1, self.horizon_len)
|
| 209 |
+
if forecast_array.shape != expected_shape:
|
| 210 |
+
raise ValueError(f"Unexpected forecast shape: {forecast_array.shape}, expected: {expected_shape}")
|
| 211 |
+
|
| 212 |
+
# Test quantile forecasting if available
|
| 213 |
+
if hasattr(self.model, 'experimental_quantile_forecast'):
|
| 214 |
+
logger.info("Testing quantile forecasting capability...")
|
| 215 |
+
quantile_forecast = self.model.experimental_quantile_forecast(
|
| 216 |
+
inputs=test_inputs,
|
| 217 |
+
freq=test_freq
|
| 218 |
+
)
|
| 219 |
+
logger.info("✅ Quantile forecasting available")
|
| 220 |
+
else:
|
| 221 |
+
logger.warning("⚠️ Quantile forecasting not available")
|
| 222 |
+
|
| 223 |
+
# Test covariates functionality if available
|
| 224 |
+
if hasattr(self.model, 'forecast_with_covariates'):
|
| 225 |
+
logger.info("✅ Covariates functionality available")
|
| 226 |
+
else:
|
| 227 |
+
logger.warning("⚠️ Covariates functionality not available")
|
| 228 |
+
|
| 229 |
+
logger.info(f"✅ Model validation passed! Output shape: {forecast_array.shape}")
|
| 230 |
+
|
| 231 |
+
except Exception as e:
|
| 232 |
+
logger.error(f"❌ Model validation failed: {str(e)}")
|
| 233 |
+
raise
|
| 234 |
+
|
| 235 |
+
def get_model_info(self) -> Dict[str, Any]:
|
| 236 |
+
"""
|
| 237 |
+
Get comprehensive information about the loaded model.
|
| 238 |
+
|
| 239 |
+
Returns:
|
| 240 |
+
Dictionary containing model configuration and capabilities
|
| 241 |
+
"""
|
| 242 |
+
if not self.model:
|
| 243 |
+
return {"status": "Model not loaded"}
|
| 244 |
+
|
| 245 |
+
info = {
|
| 246 |
+
"status": "loaded",
|
| 247 |
+
"backend": self.backend,
|
| 248 |
+
"context_len": self.context_len,
|
| 249 |
+
"horizon_len": self.horizon_len,
|
| 250 |
+
"batch_size": self.per_core_batch_size,
|
| 251 |
+
"num_layers": self.num_layers,
|
| 252 |
+
"positional_embedding": self.use_positional_embedding,
|
| 253 |
+
"capabilities": {
|
| 254 |
+
"basic_forecasting": True,
|
| 255 |
+
"quantile_forecasting": True,
|
| 256 |
+
"covariates_support": hasattr(self.model, 'forecast_with_covariates')
|
| 257 |
+
}
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
if self.checkpoint:
|
| 261 |
+
info["checkpoint"] = self.checkpoint
|
| 262 |
+
if self.local_model_path:
|
| 263 |
+
info["local_model_path"] = self.local_model_path
|
| 264 |
+
|
| 265 |
+
return info
|
| 266 |
+
|
| 267 |
+
def update_horizon(self, new_horizon: int) -> None:
|
| 268 |
+
"""
|
| 269 |
+
Update the forecast horizon length.
|
| 270 |
+
|
| 271 |
+
Note: This requires reloading the model to take effect.
|
| 272 |
+
|
| 273 |
+
Args:
|
| 274 |
+
new_horizon: New forecast horizon length
|
| 275 |
+
"""
|
| 276 |
+
logger.info(f"Updating horizon length from {self.horizon_len} to {new_horizon}")
|
| 277 |
+
self.horizon_len = new_horizon
|
| 278 |
+
|
| 279 |
+
if self.model:
|
| 280 |
+
logger.warning("Model needs to be reloaded for horizon change to take effect")
|
| 281 |
+
|
| 282 |
+
def update_context(self, new_context: int) -> None:
|
| 283 |
+
"""
|
| 284 |
+
Update the context length.
|
| 285 |
+
|
| 286 |
+
Note: This requires reloading the model to take effect.
|
| 287 |
+
|
| 288 |
+
Args:
|
| 289 |
+
new_context: New context length
|
| 290 |
+
"""
|
| 291 |
+
logger.info(f"Updating context length from {self.context_len} to {new_context}")
|
| 292 |
+
self.context_len = new_context
|
| 293 |
+
|
| 294 |
+
if self.model:
|
| 295 |
+
logger.warning("Model needs to be reloaded for context change to take effect")
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def initialize_timesfm_model(
|
| 299 |
+
backend: str = "cpu",
|
| 300 |
+
context_len: int = 100,
|
| 301 |
+
horizon_len: int = 24,
|
| 302 |
+
checkpoint: Optional[str] = None,
|
| 303 |
+
local_model_path: Optional[str] = None
|
| 304 |
+
) -> Tuple[TimesFMModel, 'Forecaster', 'InteractiveVisualizer']:
|
| 305 |
+
"""
|
| 306 |
+
Centralized function to initialize TimesFM model with all required components.
|
| 307 |
+
|
| 308 |
+
This function encapsulates the complete model loading and initialization process,
|
| 309 |
+
including the creation of TimesFMModel, Forecaster, and Visualizer objects.
|
| 310 |
+
|
| 311 |
+
Args:
|
| 312 |
+
backend: Computing backend ("cpu", "gpu", "tpu")
|
| 313 |
+
context_len: Maximum context length for input time series
|
| 314 |
+
horizon_len: Forecast horizon length
|
| 315 |
+
checkpoint: HuggingFace checkpoint repo ID
|
| 316 |
+
local_model_path: Path to local model checkpoint
|
| 317 |
+
|
| 318 |
+
Returns:
|
| 319 |
+
Tuple of (model_wrapper, forecaster, visualizer)
|
| 320 |
+
|
| 321 |
+
Raises:
|
| 322 |
+
Exception: If model initialization fails
|
| 323 |
+
"""
|
| 324 |
+
logger.info("🚀 Initializing TimesFM model with centralized function...")
|
| 325 |
+
|
| 326 |
+
try:
|
| 327 |
+
# Import here to avoid circular imports
|
| 328 |
+
from forecast import Forecaster
|
| 329 |
+
from interactive_visualization import InteractiveVisualizer
|
| 330 |
+
|
| 331 |
+
# Create model wrapper
|
| 332 |
+
model_wrapper = TimesFMModel(
|
| 333 |
+
backend=backend,
|
| 334 |
+
context_len=context_len,
|
| 335 |
+
horizon_len=horizon_len,
|
| 336 |
+
checkpoint=checkpoint,
|
| 337 |
+
local_model_path=local_model_path
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
# Load the actual TimesFM model
|
| 341 |
+
timesfm_model = model_wrapper.load_model()
|
| 342 |
+
|
| 343 |
+
# Create forecaster and visualizer
|
| 344 |
+
forecaster = Forecaster(timesfm_model)
|
| 345 |
+
visualizer = InteractiveVisualizer(style="professional")
|
| 346 |
+
|
| 347 |
+
logger.info("✅ TimesFM model initialization completed successfully!")
|
| 348 |
+
logger.info(f" Model: {model_wrapper.checkpoint or model_wrapper.local_model_path}")
|
| 349 |
+
logger.info(f" Backend: {backend}")
|
| 350 |
+
logger.info(f" Context: {context_len}, Horizon: {horizon_len}")
|
| 351 |
+
|
| 352 |
+
return model_wrapper, forecaster, visualizer
|
| 353 |
+
|
| 354 |
+
except Exception as e:
|
| 355 |
+
logger.error(f"❌ TimesFM model initialization failed: {str(e)}")
|
| 356 |
+
raise
|
src/visualization.py
ADDED
|
@@ -0,0 +1,618 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Professional Visualization Module for TimesFM Forecasting
|
| 3 |
+
|
| 4 |
+
This module provides comprehensive visualization capabilities for TimesFM forecasting,
|
| 5 |
+
including professional-grade plots with prediction intervals, covariates displays,
|
| 6 |
+
and publication-ready styling.
|
| 7 |
+
|
| 8 |
+
Key Features:
|
| 9 |
+
- Professional forecast visualizations with seamless connections
|
| 10 |
+
- Prediction intervals with customizable confidence levels
|
| 11 |
+
- Covariates subplots integration
|
| 12 |
+
- Sapheneia-style professional formatting
|
| 13 |
+
- Interactive and static plot options
|
| 14 |
+
- Export capabilities for presentations and publications
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
import pandas as pd
|
| 19 |
+
import matplotlib.pyplot as plt
|
| 20 |
+
import matplotlib.dates as mdates
|
| 21 |
+
import seaborn as sns
|
| 22 |
+
from datetime import datetime
|
| 23 |
+
from typing import List, Dict, Optional, Union
|
| 24 |
+
import logging
|
| 25 |
+
|
| 26 |
+
logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
# Set professional style
|
| 29 |
+
plt.style.use('seaborn-v0_8')
|
| 30 |
+
sns.set_palette("husl")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class Visualizer:
|
| 34 |
+
"""
|
| 35 |
+
Professional visualization class for TimesFM forecasting results.
|
| 36 |
+
|
| 37 |
+
This class provides methods to create publication-quality visualizations
|
| 38 |
+
of forecasting results, including prediction intervals, covariates analysis,
|
| 39 |
+
and comprehensive time series plots.
|
| 40 |
+
|
| 41 |
+
Example:
|
| 42 |
+
>>> viz = Visualizer()
|
| 43 |
+
>>> fig = viz.plot_forecast_with_intervals(
|
| 44 |
+
... historical_data=historical,
|
| 45 |
+
... forecast=point_forecast,
|
| 46 |
+
... intervals=prediction_intervals,
|
| 47 |
+
... title="Bitcoin Price Forecast"
|
| 48 |
+
... )
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self, style: str = "professional"):
|
| 52 |
+
"""
|
| 53 |
+
Initialize the Visualizer with specified styling.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
style: Visualization style ("professional", "minimal", "presentation")
|
| 57 |
+
"""
|
| 58 |
+
self.style = style
|
| 59 |
+
self._setup_style()
|
| 60 |
+
logger.info(f"Visualizer initialized with '{style}' style")
|
| 61 |
+
|
| 62 |
+
def _setup_style(self) -> None:
|
| 63 |
+
"""Set up the visualization style and parameters."""
|
| 64 |
+
if self.style == "professional":
|
| 65 |
+
# Sapheneia professional style
|
| 66 |
+
self.colors = {
|
| 67 |
+
'historical': '#1f77b4',
|
| 68 |
+
'forecast': '#d62728',
|
| 69 |
+
'actual': '#2ca02c',
|
| 70 |
+
'interval_80': '#ffb366',
|
| 71 |
+
'interval_50': '#ff7f0e',
|
| 72 |
+
'grid': '#e0e0e0',
|
| 73 |
+
'background': '#fafafa'
|
| 74 |
+
}
|
| 75 |
+
self.figsize = (16, 12)
|
| 76 |
+
|
| 77 |
+
elif self.style == "minimal":
|
| 78 |
+
# Clean minimal style
|
| 79 |
+
self.colors = {
|
| 80 |
+
'historical': '#2E86AB',
|
| 81 |
+
'forecast': '#A23B72',
|
| 82 |
+
'actual': '#F18F01',
|
| 83 |
+
'interval_80': '#C73E1D',
|
| 84 |
+
'interval_50': '#F18F01',
|
| 85 |
+
'grid': '#f0f0f0',
|
| 86 |
+
'background': 'white'
|
| 87 |
+
}
|
| 88 |
+
self.figsize = (14, 10)
|
| 89 |
+
|
| 90 |
+
else: # presentation
|
| 91 |
+
# High contrast for presentations
|
| 92 |
+
self.colors = {
|
| 93 |
+
'historical': '#003f5c',
|
| 94 |
+
'forecast': '#ff6361',
|
| 95 |
+
'actual': '#58508d',
|
| 96 |
+
'interval_80': '#ffa600',
|
| 97 |
+
'interval_50': '#ff6361',
|
| 98 |
+
'grid': '#e8e8e8',
|
| 99 |
+
'background': 'white'
|
| 100 |
+
}
|
| 101 |
+
self.figsize = (18, 14)
|
| 102 |
+
|
| 103 |
+
def plot_forecast_with_intervals(
|
| 104 |
+
self,
|
| 105 |
+
historical_data: Union[List[float], np.ndarray],
|
| 106 |
+
forecast: Union[List[float], np.ndarray],
|
| 107 |
+
intervals: Optional[Dict[str, np.ndarray]] = None,
|
| 108 |
+
actual_future: Optional[Union[List[float], np.ndarray]] = None,
|
| 109 |
+
dates_historical: Optional[List[Union[str, datetime]]] = None,
|
| 110 |
+
dates_future: Optional[List[Union[str, datetime]]] = None,
|
| 111 |
+
title: str = "TimesFM Forecast with Prediction Intervals",
|
| 112 |
+
target_name: str = "Value",
|
| 113 |
+
save_path: Optional[str] = None
|
| 114 |
+
) -> plt.Figure:
|
| 115 |
+
"""
|
| 116 |
+
Create a professional forecast visualization with prediction intervals.
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
historical_data: Historical time series data
|
| 120 |
+
forecast: Point forecast values
|
| 121 |
+
intervals: Dictionary containing prediction intervals
|
| 122 |
+
actual_future: Optional actual future values for comparison
|
| 123 |
+
dates_historical: Optional dates for historical data
|
| 124 |
+
dates_future: Optional dates for forecast period
|
| 125 |
+
title: Plot title
|
| 126 |
+
target_name: Name of the target variable
|
| 127 |
+
save_path: Optional path to save the plot
|
| 128 |
+
|
| 129 |
+
Returns:
|
| 130 |
+
Matplotlib Figure object
|
| 131 |
+
"""
|
| 132 |
+
logger.info(f"Creating forecast visualization: {title}")
|
| 133 |
+
|
| 134 |
+
# Convert to numpy arrays
|
| 135 |
+
historical_data = np.array(historical_data)
|
| 136 |
+
forecast = np.array(forecast)
|
| 137 |
+
if actual_future is not None:
|
| 138 |
+
actual_future = np.array(actual_future)
|
| 139 |
+
|
| 140 |
+
# Create figure
|
| 141 |
+
fig, ax = plt.subplots(figsize=self.figsize)
|
| 142 |
+
ax.set_facecolor(self.colors['background'])
|
| 143 |
+
|
| 144 |
+
# Setup time axis
|
| 145 |
+
if dates_historical is None:
|
| 146 |
+
historical_x = np.arange(len(historical_data))
|
| 147 |
+
else:
|
| 148 |
+
historical_x = pd.to_datetime(dates_historical)
|
| 149 |
+
|
| 150 |
+
if dates_future is None:
|
| 151 |
+
future_x = np.arange(len(historical_data), len(historical_data) + len(forecast))
|
| 152 |
+
else:
|
| 153 |
+
future_x = pd.to_datetime(dates_future)
|
| 154 |
+
|
| 155 |
+
# Plot historical data
|
| 156 |
+
ax.plot(historical_x, historical_data,
|
| 157 |
+
color=self.colors['historical'], linewidth=2.5,
|
| 158 |
+
label='Historical Data', zorder=5)
|
| 159 |
+
|
| 160 |
+
# Create seamless connection for forecast
|
| 161 |
+
if dates_historical is None:
|
| 162 |
+
connection_x = [len(historical_data) - 1] + list(future_x)
|
| 163 |
+
else:
|
| 164 |
+
connection_x = [historical_x[-1]] + list(future_x)
|
| 165 |
+
|
| 166 |
+
connection_forecast = [historical_data[-1]] + list(forecast)
|
| 167 |
+
|
| 168 |
+
# Plot quantile intervals if available
|
| 169 |
+
if intervals:
|
| 170 |
+
# Handle different types of intervals
|
| 171 |
+
if 'lower_80' in intervals and 'upper_80' in intervals:
|
| 172 |
+
# Traditional confidence intervals
|
| 173 |
+
interval_lower = [historical_data[-1]] + list(intervals['lower_80'])
|
| 174 |
+
interval_upper = [historical_data[-1]] + list(intervals['upper_80'])
|
| 175 |
+
|
| 176 |
+
ax.fill_between(connection_x, interval_lower, interval_upper,
|
| 177 |
+
alpha=0.3, color=self.colors['interval_80'],
|
| 178 |
+
label='80% Quantile Interval', zorder=1)
|
| 179 |
+
|
| 180 |
+
# Add 50% interval if available
|
| 181 |
+
if 'lower_50' in intervals and 'upper_50' in intervals:
|
| 182 |
+
interval_lower_50 = [historical_data[-1]] + list(intervals['lower_50'])
|
| 183 |
+
interval_upper_50 = [historical_data[-1]] + list(intervals['upper_50'])
|
| 184 |
+
|
| 185 |
+
ax.fill_between(connection_x, interval_lower_50, interval_upper_50,
|
| 186 |
+
alpha=0.5, color=self.colors['interval_50'],
|
| 187 |
+
label='50% Quantile Interval', zorder=2)
|
| 188 |
+
|
| 189 |
+
else:
|
| 190 |
+
# Check for generic confidence levels
|
| 191 |
+
conf_levels = []
|
| 192 |
+
for key in intervals.keys():
|
| 193 |
+
if key.startswith('lower_'):
|
| 194 |
+
conf_level = key.split('_')[1]
|
| 195 |
+
if f'upper_{conf_level}' in intervals:
|
| 196 |
+
conf_levels.append(int(conf_level))
|
| 197 |
+
|
| 198 |
+
conf_levels.sort(reverse=True) # Largest first for layering
|
| 199 |
+
|
| 200 |
+
for conf_level in conf_levels:
|
| 201 |
+
lower_key = f'lower_{conf_level}'
|
| 202 |
+
upper_key = f'upper_{conf_level}'
|
| 203 |
+
|
| 204 |
+
if lower_key in intervals and upper_key in intervals:
|
| 205 |
+
# Create seamless intervals
|
| 206 |
+
interval_lower = [historical_data[-1]] + list(intervals[lower_key])
|
| 207 |
+
interval_upper = [historical_data[-1]] + list(intervals[upper_key])
|
| 208 |
+
|
| 209 |
+
alpha = 0.3 if conf_level == max(conf_levels) else 0.5
|
| 210 |
+
color = self.colors['interval_80'] if conf_level >= 80 else self.colors['interval_50']
|
| 211 |
+
|
| 212 |
+
ax.fill_between(connection_x, interval_lower, interval_upper,
|
| 213 |
+
alpha=alpha, color=color,
|
| 214 |
+
label=f'{conf_level}% Quantile Interval', zorder=1)
|
| 215 |
+
|
| 216 |
+
# Handle quantile bands (new format)
|
| 217 |
+
quantile_bands = {}
|
| 218 |
+
for key in intervals.keys():
|
| 219 |
+
if key.startswith('quantile_band_') and key.endswith('_lower'):
|
| 220 |
+
band_name = key.replace('quantile_band_', '').replace('_lower', '')
|
| 221 |
+
upper_key = f'quantile_band_{band_name}_upper'
|
| 222 |
+
if upper_key in intervals:
|
| 223 |
+
quantile_bands[band_name] = {
|
| 224 |
+
'lower': intervals[key],
|
| 225 |
+
'upper': intervals[upper_key]
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
if quantile_bands:
|
| 229 |
+
# Define colors for different bands
|
| 230 |
+
band_colors = ['#ff9999', '#99ccff', '#99ff99', '#ffcc99', '#cc99ff', '#ffff99']
|
| 231 |
+
|
| 232 |
+
logger.info(f"Processing {len(quantile_bands)} quantile bands")
|
| 233 |
+
logger.info(f"Connection_x length: {len(connection_x)}, Forecast length: {len(forecast)}")
|
| 234 |
+
|
| 235 |
+
for i, (band_name, band_data) in enumerate(sorted(quantile_bands.items())):
|
| 236 |
+
color = band_colors[i % len(band_colors)]
|
| 237 |
+
alpha = 0.3 + (0.2 * (1 - i / max(1, len(quantile_bands) - 1))) # Vary alpha
|
| 238 |
+
|
| 239 |
+
# Ensure quantile band data matches forecast length
|
| 240 |
+
lower_values = band_data['lower']
|
| 241 |
+
upper_values = band_data['upper']
|
| 242 |
+
|
| 243 |
+
logger.info(f"Band {band_name}: lower length={len(lower_values)}, upper length={len(upper_values)}")
|
| 244 |
+
|
| 245 |
+
# Truncate or pad to match forecast length
|
| 246 |
+
if len(lower_values) > len(forecast):
|
| 247 |
+
lower_values = lower_values[:len(forecast)]
|
| 248 |
+
upper_values = upper_values[:len(forecast)]
|
| 249 |
+
logger.info(f"Truncated band {band_name} to forecast length")
|
| 250 |
+
elif len(lower_values) < len(forecast):
|
| 251 |
+
# Pad with last value if too short
|
| 252 |
+
last_lower = lower_values[-1] if lower_values else 0
|
| 253 |
+
last_upper = upper_values[-1] if upper_values else 0
|
| 254 |
+
lower_values = list(lower_values) + [last_lower] * (len(forecast) - len(lower_values))
|
| 255 |
+
upper_values = list(upper_values) + [last_upper] * (len(forecast) - len(upper_values))
|
| 256 |
+
logger.info(f"Padded band {band_name} to forecast length")
|
| 257 |
+
|
| 258 |
+
interval_lower = [historical_data[-1]] + list(lower_values)
|
| 259 |
+
interval_upper = [historical_data[-1]] + list(upper_values)
|
| 260 |
+
|
| 261 |
+
logger.info(f"Final interval lengths: lower={len(interval_lower)}, upper={len(interval_upper)}, connection_x={len(connection_x)}")
|
| 262 |
+
|
| 263 |
+
label_key = f'quantile_band_{band_name}_label'
|
| 264 |
+
label_text = intervals.get(label_key, f'Quantile Band {int(band_name)+1}')
|
| 265 |
+
|
| 266 |
+
ax.fill_between(connection_x, interval_lower, interval_upper,
|
| 267 |
+
alpha=alpha, color=color,
|
| 268 |
+
label=label_text, zorder=1)
|
| 269 |
+
|
| 270 |
+
# Plot forecast line
|
| 271 |
+
ax.plot(connection_x, connection_forecast,
|
| 272 |
+
color=self.colors['forecast'], linestyle='--', linewidth=2.5,
|
| 273 |
+
label='Point Forecast', zorder=4)
|
| 274 |
+
|
| 275 |
+
# Plot actual future data if available
|
| 276 |
+
if actual_future is not None:
|
| 277 |
+
actual_connection = [historical_data[-1]] + list(actual_future)
|
| 278 |
+
ax.plot(connection_x, actual_connection,
|
| 279 |
+
color=self.colors['actual'], linewidth=3,
|
| 280 |
+
marker='o', markersize=6, markeredgecolor='white',
|
| 281 |
+
markeredgewidth=1, label='Actual Future', zorder=6)
|
| 282 |
+
|
| 283 |
+
# Add forecast start line
|
| 284 |
+
forecast_start = historical_x[-1] if dates_historical else len(historical_data) - 1
|
| 285 |
+
ax.axvline(x=forecast_start, color='gray', linestyle=':',
|
| 286 |
+
alpha=0.7, linewidth=1.5, label='Forecast Start')
|
| 287 |
+
|
| 288 |
+
# Styling
|
| 289 |
+
ax.set_title(title, fontsize=18, fontweight='bold', pad=20)
|
| 290 |
+
ax.set_ylabel(target_name, fontsize=14, fontweight='bold')
|
| 291 |
+
ax.set_xlabel('Time', fontsize=14, fontweight='bold')
|
| 292 |
+
|
| 293 |
+
# Grid
|
| 294 |
+
ax.grid(True, alpha=0.3, linestyle='-', linewidth=0.5, color=self.colors['grid'])
|
| 295 |
+
|
| 296 |
+
# Legend
|
| 297 |
+
legend = ax.legend(loc='upper left', fontsize=12, frameon=True,
|
| 298 |
+
fancybox=True, shadow=True, framealpha=0.95)
|
| 299 |
+
legend.get_frame().set_facecolor('white')
|
| 300 |
+
|
| 301 |
+
# Format axes
|
| 302 |
+
ax.tick_params(labelsize=12)
|
| 303 |
+
|
| 304 |
+
# Format dates if using datetime
|
| 305 |
+
if dates_historical is not None:
|
| 306 |
+
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
|
| 307 |
+
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
|
| 308 |
+
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45)
|
| 309 |
+
|
| 310 |
+
# Add timestamp
|
| 311 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M")
|
| 312 |
+
fig.text(0.99, 0.01, f'Generated: {timestamp}', ha='right', va='bottom',
|
| 313 |
+
fontsize=10, alpha=0.7)
|
| 314 |
+
|
| 315 |
+
plt.tight_layout()
|
| 316 |
+
|
| 317 |
+
# Save if requested
|
| 318 |
+
if save_path:
|
| 319 |
+
plt.savefig(save_path, dpi=300, bbox_inches='tight', facecolor='white')
|
| 320 |
+
logger.info(f"Plot saved to: {save_path}")
|
| 321 |
+
|
| 322 |
+
logger.info("✅ Forecast visualization completed")
|
| 323 |
+
return fig
|
| 324 |
+
|
| 325 |
+
def plot_forecast_with_covariates(
|
| 326 |
+
self,
|
| 327 |
+
historical_data: Union[List[float], np.ndarray],
|
| 328 |
+
forecast: Union[List[float], np.ndarray],
|
| 329 |
+
covariates_data: Dict[str, Dict[str, Union[List[float], float, str]]],
|
| 330 |
+
intervals: Optional[Dict[str, np.ndarray]] = None,
|
| 331 |
+
actual_future: Optional[Union[List[float], np.ndarray]] = None,
|
| 332 |
+
dates_historical: Optional[List[Union[str, datetime]]] = None,
|
| 333 |
+
dates_future: Optional[List[Union[str, datetime]]] = None,
|
| 334 |
+
title: str = "TimesFM Forecast with Covariates Analysis",
|
| 335 |
+
target_name: str = "Target Value",
|
| 336 |
+
save_path: Optional[str] = None
|
| 337 |
+
) -> plt.Figure:
|
| 338 |
+
"""
|
| 339 |
+
Create a comprehensive visualization with main forecast and covariates subplots.
|
| 340 |
+
|
| 341 |
+
Args:
|
| 342 |
+
historical_data: Historical time series data
|
| 343 |
+
forecast: Point forecast values
|
| 344 |
+
covariates_data: Dictionary containing covariates information
|
| 345 |
+
intervals: Optional prediction intervals
|
| 346 |
+
actual_future: Optional actual future values
|
| 347 |
+
dates_historical: Optional historical dates
|
| 348 |
+
dates_future: Optional future dates
|
| 349 |
+
title: Main plot title
|
| 350 |
+
target_name: Name of target variable
|
| 351 |
+
save_path: Optional save path
|
| 352 |
+
|
| 353 |
+
Returns:
|
| 354 |
+
Matplotlib Figure object
|
| 355 |
+
"""
|
| 356 |
+
logger.info(f"Creating comprehensive forecast with covariates: {title}")
|
| 357 |
+
|
| 358 |
+
# Count covariates for subplot layout
|
| 359 |
+
num_covariates = len([k for k, v in covariates_data.items()
|
| 360 |
+
if isinstance(v, dict) and 'historical' in v])
|
| 361 |
+
|
| 362 |
+
# Create subplot layout
|
| 363 |
+
if num_covariates == 0:
|
| 364 |
+
return self.plot_forecast_with_intervals(
|
| 365 |
+
historical_data, forecast, intervals, actual_future,
|
| 366 |
+
dates_historical, dates_future, title, target_name, save_path
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
# Determine grid layout
|
| 370 |
+
if num_covariates <= 2:
|
| 371 |
+
rows, cols = 2, 2
|
| 372 |
+
height_ratios = [3, 1]
|
| 373 |
+
elif num_covariates <= 4:
|
| 374 |
+
rows, cols = 3, 2
|
| 375 |
+
height_ratios = [3, 1, 1]
|
| 376 |
+
else:
|
| 377 |
+
rows, cols = 4, 2
|
| 378 |
+
height_ratios = [3, 1, 1, 1]
|
| 379 |
+
|
| 380 |
+
fig = plt.figure(figsize=(18, 14))
|
| 381 |
+
gs = fig.add_gridspec(rows, cols, height_ratios=height_ratios,
|
| 382 |
+
hspace=0.35, wspace=0.25)
|
| 383 |
+
|
| 384 |
+
# Main forecast plot (top row, full width)
|
| 385 |
+
ax_main = fig.add_subplot(gs[0, :])
|
| 386 |
+
|
| 387 |
+
# Convert data
|
| 388 |
+
historical_data = np.array(historical_data)
|
| 389 |
+
forecast = np.array(forecast)
|
| 390 |
+
|
| 391 |
+
# Setup time axes
|
| 392 |
+
if dates_historical is None:
|
| 393 |
+
historical_x = np.arange(len(historical_data))
|
| 394 |
+
future_x = np.arange(len(historical_data), len(historical_data) + len(forecast))
|
| 395 |
+
else:
|
| 396 |
+
historical_x = pd.to_datetime(dates_historical)
|
| 397 |
+
future_x = pd.to_datetime(dates_future) if dates_future else None
|
| 398 |
+
|
| 399 |
+
# Plot main forecast (similar to single plot method)
|
| 400 |
+
ax_main.set_facecolor(self.colors['background'])
|
| 401 |
+
ax_main.plot(historical_x, historical_data,
|
| 402 |
+
color=self.colors['historical'], linewidth=2.5,
|
| 403 |
+
label='Historical Data', zorder=5)
|
| 404 |
+
|
| 405 |
+
# Forecast with seamless connection
|
| 406 |
+
if dates_historical is None:
|
| 407 |
+
connection_x = [len(historical_data) - 1] + list(future_x)
|
| 408 |
+
else:
|
| 409 |
+
connection_x = [historical_x[-1]] + list(future_x)
|
| 410 |
+
connection_forecast = [historical_data[-1]] + list(forecast)
|
| 411 |
+
|
| 412 |
+
# Plot intervals if available
|
| 413 |
+
if intervals:
|
| 414 |
+
for key in intervals.keys():
|
| 415 |
+
if key.startswith('lower_'):
|
| 416 |
+
conf_level = key.split('_')[1]
|
| 417 |
+
upper_key = f'upper_{conf_level}'
|
| 418 |
+
if upper_key in intervals:
|
| 419 |
+
interval_lower = [historical_data[-1]] + list(intervals[key])
|
| 420 |
+
interval_upper = [historical_data[-1]] + list(intervals[upper_key])
|
| 421 |
+
|
| 422 |
+
alpha = 0.3 if int(conf_level) >= 80 else 0.5
|
| 423 |
+
color = self.colors['interval_80'] if int(conf_level) >= 80 else self.colors['interval_50']
|
| 424 |
+
|
| 425 |
+
ax_main.fill_between(connection_x, interval_lower, interval_upper,
|
| 426 |
+
alpha=alpha, color=color,
|
| 427 |
+
label=f'{conf_level}% Prediction Interval')
|
| 428 |
+
|
| 429 |
+
ax_main.plot(connection_x, connection_forecast,
|
| 430 |
+
color=self.colors['forecast'], linestyle='--', linewidth=2.5,
|
| 431 |
+
label='Point Forecast', zorder=4)
|
| 432 |
+
|
| 433 |
+
# Plot actual future if available
|
| 434 |
+
if actual_future is not None:
|
| 435 |
+
actual_future = np.array(actual_future)
|
| 436 |
+
actual_connection = [historical_data[-1]] + list(actual_future)
|
| 437 |
+
ax_main.plot(connection_x, actual_connection,
|
| 438 |
+
color=self.colors['actual'], linewidth=3,
|
| 439 |
+
marker='o', markersize=6, markeredgecolor='white',
|
| 440 |
+
markeredgewidth=1, label='Actual Future', zorder=6)
|
| 441 |
+
|
| 442 |
+
# Forecast start line
|
| 443 |
+
forecast_start = historical_x[-1] if dates_historical else len(historical_data) - 1
|
| 444 |
+
ax_main.axvline(x=forecast_start, color='gray', linestyle=':',
|
| 445 |
+
alpha=0.7, linewidth=1.5, label='Forecast Start')
|
| 446 |
+
|
| 447 |
+
# Main plot styling
|
| 448 |
+
ax_main.set_title(title, fontsize=18, fontweight='bold', pad=20)
|
| 449 |
+
ax_main.set_ylabel(target_name, fontsize=14, fontweight='bold')
|
| 450 |
+
ax_main.grid(True, alpha=0.3, color=self.colors['grid'])
|
| 451 |
+
ax_main.tick_params(labelsize=12)
|
| 452 |
+
legend = ax_main.legend(loc='upper left', fontsize=12, frameon=True)
|
| 453 |
+
legend.get_frame().set_facecolor('white')
|
| 454 |
+
|
| 455 |
+
# Create covariate subplots
|
| 456 |
+
covariate_colors = ['#9467bd', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf', '#d62728']
|
| 457 |
+
|
| 458 |
+
plot_idx = 0
|
| 459 |
+
for cov_name, cov_data in covariates_data.items():
|
| 460 |
+
if not isinstance(cov_data, dict) or 'historical' not in cov_data:
|
| 461 |
+
continue
|
| 462 |
+
|
| 463 |
+
if plot_idx >= (rows - 1) * cols: # Don't exceed subplot capacity
|
| 464 |
+
break
|
| 465 |
+
|
| 466 |
+
# Calculate subplot position
|
| 467 |
+
row = 1 + plot_idx // cols
|
| 468 |
+
col = plot_idx % cols
|
| 469 |
+
ax_cov = fig.add_subplot(gs[row, col])
|
| 470 |
+
|
| 471 |
+
color = covariate_colors[plot_idx % len(covariate_colors)]
|
| 472 |
+
|
| 473 |
+
# Plot historical covariate data
|
| 474 |
+
ax_cov.plot(historical_x, cov_data['historical'],
|
| 475 |
+
color=color, linewidth=2.5, alpha=0.8, label='Historical')
|
| 476 |
+
|
| 477 |
+
# Plot future covariate data if available
|
| 478 |
+
if 'future' in cov_data and future_x is not None:
|
| 479 |
+
combined_data = list(cov_data['historical']) + list(cov_data['future'])
|
| 480 |
+
if dates_historical is None:
|
| 481 |
+
combined_x = np.arange(len(combined_data))
|
| 482 |
+
else:
|
| 483 |
+
combined_x = list(historical_x) + list(future_x)
|
| 484 |
+
|
| 485 |
+
future_start_idx = len(cov_data['historical']) - 1
|
| 486 |
+
ax_cov.plot(combined_x[future_start_idx:], combined_data[future_start_idx:],
|
| 487 |
+
color=color, linewidth=2.5, linestyle='--', alpha=0.9,
|
| 488 |
+
marker='s', markersize=4, label='Future')
|
| 489 |
+
|
| 490 |
+
# Forecast start line
|
| 491 |
+
ax_cov.axvline(x=forecast_start, color='gray', linestyle=':', alpha=0.5)
|
| 492 |
+
|
| 493 |
+
# Styling
|
| 494 |
+
ax_cov.set_title(f'{cov_name.replace("_", " ").title()}',
|
| 495 |
+
fontsize=12, fontweight='bold')
|
| 496 |
+
ax_cov.set_ylabel('Value', fontsize=10)
|
| 497 |
+
ax_cov.grid(True, alpha=0.3, color=self.colors['grid'])
|
| 498 |
+
ax_cov.tick_params(labelsize=9)
|
| 499 |
+
ax_cov.legend(fontsize=8, loc='upper left')
|
| 500 |
+
ax_cov.set_facecolor(self.colors['background'])
|
| 501 |
+
|
| 502 |
+
plot_idx += 1
|
| 503 |
+
|
| 504 |
+
# Format x-axis for dates
|
| 505 |
+
if dates_historical is not None:
|
| 506 |
+
for ax in fig.get_axes():
|
| 507 |
+
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%d'))
|
| 508 |
+
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45)
|
| 509 |
+
|
| 510 |
+
# Overall title and timestamp
|
| 511 |
+
fig.suptitle('TimesFM Comprehensive Forecasting Analysis',
|
| 512 |
+
fontsize=20, fontweight='bold', y=0.98)
|
| 513 |
+
|
| 514 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M")
|
| 515 |
+
fig.text(0.99, 0.01, f'Generated: {timestamp}', ha='right', va='bottom',
|
| 516 |
+
fontsize=10, alpha=0.7)
|
| 517 |
+
|
| 518 |
+
plt.tight_layout()
|
| 519 |
+
|
| 520 |
+
if save_path:
|
| 521 |
+
plt.savefig(save_path, dpi=300, bbox_inches='tight', facecolor='white')
|
| 522 |
+
logger.info(f"Comprehensive plot saved to: {save_path}")
|
| 523 |
+
|
| 524 |
+
logger.info("✅ Comprehensive forecast visualization completed")
|
| 525 |
+
return fig
|
| 526 |
+
|
| 527 |
+
def plot_forecast_comparison(
|
| 528 |
+
self,
|
| 529 |
+
forecasts_dict: Dict[str, np.ndarray],
|
| 530 |
+
historical_data: Union[List[float], np.ndarray],
|
| 531 |
+
actual_future: Optional[Union[List[float], np.ndarray]] = None,
|
| 532 |
+
title: str = "Forecast Methods Comparison",
|
| 533 |
+
save_path: Optional[str] = None
|
| 534 |
+
) -> plt.Figure:
|
| 535 |
+
"""
|
| 536 |
+
Compare multiple forecasting methods in a single plot.
|
| 537 |
+
|
| 538 |
+
Args:
|
| 539 |
+
forecasts_dict: Dictionary of {method_name: forecast_array}
|
| 540 |
+
historical_data: Historical data for context
|
| 541 |
+
actual_future: Optional actual future values
|
| 542 |
+
title: Plot title
|
| 543 |
+
save_path: Optional save path
|
| 544 |
+
|
| 545 |
+
Returns:
|
| 546 |
+
Matplotlib Figure object
|
| 547 |
+
"""
|
| 548 |
+
logger.info(f"Creating forecast comparison plot: {title}")
|
| 549 |
+
|
| 550 |
+
fig, ax = plt.subplots(figsize=self.figsize)
|
| 551 |
+
ax.set_facecolor(self.colors['background'])
|
| 552 |
+
|
| 553 |
+
historical_data = np.array(historical_data)
|
| 554 |
+
historical_x = np.arange(len(historical_data))
|
| 555 |
+
|
| 556 |
+
# Plot historical data
|
| 557 |
+
ax.plot(historical_x, historical_data,
|
| 558 |
+
color=self.colors['historical'], linewidth=2.5,
|
| 559 |
+
label='Historical Data', zorder=5)
|
| 560 |
+
|
| 561 |
+
# Plot different forecasts
|
| 562 |
+
forecast_colors = ['#d62728', '#ff7f0e', '#2ca02c', '#9467bd', '#8c564b']
|
| 563 |
+
|
| 564 |
+
for i, (method, forecast) in enumerate(forecasts_dict.items()):
|
| 565 |
+
forecast = np.array(forecast)
|
| 566 |
+
future_x = np.arange(len(historical_data), len(historical_data) + len(forecast))
|
| 567 |
+
|
| 568 |
+
# Seamless connection
|
| 569 |
+
connection_x = [len(historical_data) - 1] + list(future_x)
|
| 570 |
+
connection_forecast = [historical_data[-1]] + list(forecast)
|
| 571 |
+
|
| 572 |
+
color = forecast_colors[i % len(forecast_colors)]
|
| 573 |
+
linestyle = '--' if i == 0 else '-.'
|
| 574 |
+
|
| 575 |
+
ax.plot(connection_x, connection_forecast,
|
| 576 |
+
color=color, linestyle=linestyle, linewidth=2.5,
|
| 577 |
+
label=f'{method} Forecast', zorder=3)
|
| 578 |
+
|
| 579 |
+
# Plot actual future if available
|
| 580 |
+
if actual_future is not None:
|
| 581 |
+
actual_future = np.array(actual_future)
|
| 582 |
+
future_x = np.arange(len(historical_data), len(historical_data) + len(actual_future))
|
| 583 |
+
connection_x = [len(historical_data) - 1] + list(future_x)
|
| 584 |
+
actual_connection = [historical_data[-1]] + list(actual_future)
|
| 585 |
+
|
| 586 |
+
ax.plot(connection_x, actual_connection,
|
| 587 |
+
color=self.colors['actual'], linewidth=3,
|
| 588 |
+
marker='o', markersize=6, markeredgecolor='white',
|
| 589 |
+
markeredgewidth=1, label='Actual Future', zorder=6)
|
| 590 |
+
|
| 591 |
+
# Forecast start line
|
| 592 |
+
ax.axvline(x=len(historical_data) - 1, color='gray', linestyle=':',
|
| 593 |
+
alpha=0.7, linewidth=1.5, label='Forecast Start')
|
| 594 |
+
|
| 595 |
+
# Styling
|
| 596 |
+
ax.set_title(title, fontsize=18, fontweight='bold', pad=20)
|
| 597 |
+
ax.set_ylabel('Value', fontsize=14, fontweight='bold')
|
| 598 |
+
ax.set_xlabel('Time', fontsize=14, fontweight='bold')
|
| 599 |
+
ax.grid(True, alpha=0.3, color=self.colors['grid'])
|
| 600 |
+
ax.tick_params(labelsize=12)
|
| 601 |
+
|
| 602 |
+
# Legend
|
| 603 |
+
legend = ax.legend(loc='upper left', fontsize=12, frameon=True)
|
| 604 |
+
legend.get_frame().set_facecolor('white')
|
| 605 |
+
|
| 606 |
+
# Timestamp
|
| 607 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M")
|
| 608 |
+
fig.text(0.99, 0.01, f'Generated: {timestamp}', ha='right', va='bottom',
|
| 609 |
+
fontsize=10, alpha=0.7)
|
| 610 |
+
|
| 611 |
+
plt.tight_layout()
|
| 612 |
+
|
| 613 |
+
if save_path:
|
| 614 |
+
plt.savefig(save_path, dpi=300, bbox_inches='tight', facecolor='white')
|
| 615 |
+
logger.info(f"Comparison plot saved to: {save_path}")
|
| 616 |
+
|
| 617 |
+
logger.info("✅ Forecast comparison visualization completed")
|
| 618 |
+
return fig
|
webapp/app.py
ADDED
|
@@ -0,0 +1,758 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Sapheneia TimesFM Web Application
|
| 3 |
+
|
| 4 |
+
A Flask-based web application for TimesFM forecasting with a professional interface.
|
| 5 |
+
Supports localhost deployment.
|
| 6 |
+
|
| 7 |
+
Features:
|
| 8 |
+
- File upload for CSV data
|
| 9 |
+
- Interactive parameter configuration
|
| 10 |
+
- Real-time forecasting with TimesFM
|
| 11 |
+
- Professional visualizations
|
| 12 |
+
- Downloadable results
|
| 13 |
+
- Support for covariates and quantile forecasting
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import os
|
| 17 |
+
import sys
|
| 18 |
+
import json
|
| 19 |
+
import logging
|
| 20 |
+
from datetime import datetime, timedelta
|
| 21 |
+
from typing import Dict, List, Optional, Any
|
| 22 |
+
|
| 23 |
+
import pandas as pd
|
| 24 |
+
import numpy as np
|
| 25 |
+
from flask import Flask, render_template, request, jsonify, send_file, redirect, url_for, flash
|
| 26 |
+
from werkzeug.utils import secure_filename
|
| 27 |
+
import tempfile
|
| 28 |
+
|
| 29 |
+
# Add src to path
|
| 30 |
+
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
|
| 31 |
+
|
| 32 |
+
# Import Sapheneia TimesFM modules
|
| 33 |
+
from model import TimesFMModel, initialize_timesfm_model
|
| 34 |
+
from data import DataProcessor, prepare_visualization_data
|
| 35 |
+
from forecast import Forecaster, run_forecast, process_quantile_bands
|
| 36 |
+
|
| 37 |
+
# Configure logging
|
| 38 |
+
logging.basicConfig(level=logging.INFO)
|
| 39 |
+
logger = logging.getLogger(__name__)
|
| 40 |
+
|
| 41 |
+
# Initialize Flask app
|
| 42 |
+
app = Flask(__name__)
|
| 43 |
+
app.secret_key = os.environ.get('SECRET_KEY', 'your-secret-key-change-this-in-production')
|
| 44 |
+
|
| 45 |
+
# Configuration
|
| 46 |
+
UPLOAD_FOLDER = 'uploads'
|
| 47 |
+
RESULTS_FOLDER = 'results'
|
| 48 |
+
ALLOWED_EXTENSIONS = {'csv'}
|
| 49 |
+
MAX_FILE_SIZE = 16 * 1024 * 1024 # 16MB
|
| 50 |
+
|
| 51 |
+
# Ensure directories exist
|
| 52 |
+
for folder in [UPLOAD_FOLDER, RESULTS_FOLDER]:
|
| 53 |
+
os.makedirs(folder, exist_ok=True)
|
| 54 |
+
|
| 55 |
+
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
|
| 56 |
+
app.config['RESULTS_FOLDER'] = RESULTS_FOLDER
|
| 57 |
+
app.config['MAX_CONTENT_LENGTH'] = MAX_FILE_SIZE
|
| 58 |
+
|
| 59 |
+
# Global variables for model management
|
| 60 |
+
current_model = None
|
| 61 |
+
current_forecaster = None
|
| 62 |
+
current_visualizer = None
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def allowed_file(filename):
|
| 66 |
+
"""Check if file extension is allowed."""
|
| 67 |
+
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def init_model(backend='cpu', context_len=64, horizon_len=24, checkpoint=None, local_path=None):
|
| 71 |
+
"""Initialize TimesFM model with given parameters using centralized function."""
|
| 72 |
+
global current_model, current_forecaster, current_visualizer
|
| 73 |
+
|
| 74 |
+
try:
|
| 75 |
+
logger.info(f"Initializing model with backend={backend}, context={context_len}, horizon={horizon_len}")
|
| 76 |
+
|
| 77 |
+
# Use centralized model initialization
|
| 78 |
+
current_model, current_forecaster, current_visualizer = initialize_timesfm_model(
|
| 79 |
+
backend=backend,
|
| 80 |
+
context_len=context_len,
|
| 81 |
+
horizon_len=horizon_len,
|
| 82 |
+
checkpoint=checkpoint,
|
| 83 |
+
local_model_path=local_path
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
logger.info("Model initialized successfully")
|
| 87 |
+
return True, "Model initialized successfully"
|
| 88 |
+
|
| 89 |
+
except Exception as e:
|
| 90 |
+
logger.error(f"Model initialization failed: {str(e)}")
|
| 91 |
+
return False, f"Model initialization failed: {str(e)}"
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# Sample data generation removed as per requirements
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@app.route('/')
|
| 98 |
+
def index():
|
| 99 |
+
"""Main page."""
|
| 100 |
+
return render_template('index.html')
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
@app.route('/api/model/init', methods=['POST'])
|
| 104 |
+
def api_init_model():
|
| 105 |
+
"""Initialize TimesFM model via API."""
|
| 106 |
+
try:
|
| 107 |
+
data = request.get_json()
|
| 108 |
+
|
| 109 |
+
backend = data.get('backend', 'cpu')
|
| 110 |
+
context_len = int(data.get('context_len', 64))
|
| 111 |
+
horizon_len = int(data.get('horizon_len', 24))
|
| 112 |
+
checkpoint = data.get('checkpoint')
|
| 113 |
+
local_path = data.get('local_path')
|
| 114 |
+
|
| 115 |
+
# Use default checkpoint if none specified
|
| 116 |
+
if not checkpoint and not local_path:
|
| 117 |
+
checkpoint = "google/timesfm-2.0-500m-pytorch"
|
| 118 |
+
|
| 119 |
+
success, message = init_model(backend, context_len, horizon_len, checkpoint, local_path)
|
| 120 |
+
|
| 121 |
+
return jsonify({
|
| 122 |
+
'success': success,
|
| 123 |
+
'message': message,
|
| 124 |
+
'model_info': current_model.get_model_info() if success else None
|
| 125 |
+
})
|
| 126 |
+
|
| 127 |
+
except Exception as e:
|
| 128 |
+
logger.error(f"API model init error: {str(e)}")
|
| 129 |
+
return jsonify({'success': False, 'message': str(e)}), 500
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
@app.route('/api/data/upload', methods=['POST'])
|
| 133 |
+
def api_upload_data():
|
| 134 |
+
"""Upload and process CSV data."""
|
| 135 |
+
try:
|
| 136 |
+
if 'file' not in request.files:
|
| 137 |
+
return jsonify({'success': False, 'message': 'No file uploaded'}), 400
|
| 138 |
+
|
| 139 |
+
file = request.files['file']
|
| 140 |
+
if file.filename == '':
|
| 141 |
+
return jsonify({'success': False, 'message': 'No file selected'}), 400
|
| 142 |
+
|
| 143 |
+
if not allowed_file(file.filename):
|
| 144 |
+
return jsonify({'success': False, 'message': 'Invalid file type. Only CSV files allowed.'}), 400
|
| 145 |
+
|
| 146 |
+
# Save uploaded file
|
| 147 |
+
filename = secure_filename(file.filename)
|
| 148 |
+
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
| 149 |
+
filename = f"{timestamp}_{filename}"
|
| 150 |
+
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
|
| 151 |
+
|
| 152 |
+
try:
|
| 153 |
+
file.save(filepath)
|
| 154 |
+
logger.info(f"File saved successfully: {filepath}")
|
| 155 |
+
|
| 156 |
+
# Load and analyze data
|
| 157 |
+
df = pd.read_csv(filepath)
|
| 158 |
+
logger.info(f"CSV loaded successfully with shape: {df.shape}")
|
| 159 |
+
|
| 160 |
+
# Convert data to JSON-serializable format
|
| 161 |
+
df_head = df.head()
|
| 162 |
+
head_records = []
|
| 163 |
+
for _, row in df_head.iterrows():
|
| 164 |
+
record = {}
|
| 165 |
+
for col in df.columns:
|
| 166 |
+
value = row[col]
|
| 167 |
+
if pd.isna(value):
|
| 168 |
+
record[col] = None
|
| 169 |
+
elif isinstance(value, (pd.Timestamp, datetime)):
|
| 170 |
+
record[col] = str(value)
|
| 171 |
+
elif isinstance(value, (np.integer, np.floating)):
|
| 172 |
+
record[col] = float(value)
|
| 173 |
+
else:
|
| 174 |
+
record[col] = str(value)
|
| 175 |
+
head_records.append(record)
|
| 176 |
+
|
| 177 |
+
df_info = {
|
| 178 |
+
'filename': filename,
|
| 179 |
+
'shape': list(df.shape), # Convert tuple to list
|
| 180 |
+
'columns': df.columns.tolist(),
|
| 181 |
+
'dtypes': {col: str(dtype) for col, dtype in df.dtypes.items()},
|
| 182 |
+
'head': head_records,
|
| 183 |
+
'null_counts': {col: int(count) for col, count in df.isnull().sum().items()}
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
# Check for date column
|
| 187 |
+
has_date = 'date' in df.columns
|
| 188 |
+
if has_date:
|
| 189 |
+
try:
|
| 190 |
+
df['date'] = pd.to_datetime(df['date'], errors='coerce')
|
| 191 |
+
# Get all unique dates in the data
|
| 192 |
+
available_dates = df['date'].dropna().dt.date.unique()
|
| 193 |
+
available_dates = sorted([str(date) for date in available_dates])
|
| 194 |
+
|
| 195 |
+
df_info['date_range'] = {
|
| 196 |
+
'start': str(df['date'].min().date()) if not df['date'].isnull().all() else None,
|
| 197 |
+
'end': str(df['date'].max().date()) if not df['date'].isnull().all() else None,
|
| 198 |
+
'periods': len(df),
|
| 199 |
+
'available_dates': available_dates
|
| 200 |
+
}
|
| 201 |
+
except Exception as date_error:
|
| 202 |
+
logger.warning(f"Date parsing failed: {date_error}")
|
| 203 |
+
has_date = False
|
| 204 |
+
|
| 205 |
+
# Check if this looks like forecast output data instead of time series data
|
| 206 |
+
forecast_output_indicators = [
|
| 207 |
+
'period', 'point_forecast', 'quantile_forecast', 'forecast',
|
| 208 |
+
'prediction', 'forecast_lower', 'forecast_upper', 'quantile'
|
| 209 |
+
]
|
| 210 |
+
|
| 211 |
+
column_names_lower = [col.lower() for col in df.columns]
|
| 212 |
+
is_forecast_output = any(indicator in ' '.join(column_names_lower) for indicator in forecast_output_indicators)
|
| 213 |
+
|
| 214 |
+
if is_forecast_output:
|
| 215 |
+
logger.warning("Detected forecast output data instead of time series data")
|
| 216 |
+
return jsonify({
|
| 217 |
+
'success': False,
|
| 218 |
+
'message': 'This appears to be forecast output data, not time series input data. Please upload your original time series data with a "date" column and numeric value columns.',
|
| 219 |
+
'is_forecast_output': True,
|
| 220 |
+
'suggested_columns': ['date', 'value', 'price', 'amount', 'count', 'sales', 'revenue']
|
| 221 |
+
}), 400
|
| 222 |
+
|
| 223 |
+
logger.info(f"Data analysis completed. Has date column: {has_date}")
|
| 224 |
+
|
| 225 |
+
# Create response
|
| 226 |
+
response_data = {
|
| 227 |
+
'success': True,
|
| 228 |
+
'message': 'File uploaded successfully',
|
| 229 |
+
'data_info': df_info,
|
| 230 |
+
'has_date_column': has_date
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
logger.info(f"Sending response with keys: {list(response_data.keys())}")
|
| 234 |
+
return jsonify(response_data)
|
| 235 |
+
|
| 236 |
+
except Exception as processing_error:
|
| 237 |
+
logger.error(f"File processing error: {processing_error}")
|
| 238 |
+
if os.path.exists(filepath):
|
| 239 |
+
os.remove(filepath)
|
| 240 |
+
raise processing_error
|
| 241 |
+
|
| 242 |
+
except Exception as e:
|
| 243 |
+
logger.error(f"File upload error: {str(e)}")
|
| 244 |
+
return jsonify({'success': False, 'message': f'Upload failed: {str(e)}'}), 500
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
@app.route('/api/sample_data', methods=['POST'])
|
| 248 |
+
def api_sample_data():
|
| 249 |
+
"""Generate sample time series data for testing."""
|
| 250 |
+
try:
|
| 251 |
+
data = request.get_json()
|
| 252 |
+
data_type = data.get('type', 'financial')
|
| 253 |
+
periods = int(data.get('periods', 100))
|
| 254 |
+
|
| 255 |
+
# Generate sample data
|
| 256 |
+
dates = pd.date_range(start='2020-01-01', periods=periods, freq='D')
|
| 257 |
+
|
| 258 |
+
if data_type == 'financial':
|
| 259 |
+
# Generate financial time series (like stock prices)
|
| 260 |
+
np.random.seed(42)
|
| 261 |
+
base_price = 100
|
| 262 |
+
returns = np.random.normal(0.001, 0.02, periods) # Daily returns
|
| 263 |
+
prices = [base_price]
|
| 264 |
+
for ret in returns[1:]:
|
| 265 |
+
prices.append(prices[-1] * (1 + ret))
|
| 266 |
+
|
| 267 |
+
sample_data = pd.DataFrame({
|
| 268 |
+
'date': dates,
|
| 269 |
+
'price': prices,
|
| 270 |
+
'volume': np.random.randint(1000, 10000, periods),
|
| 271 |
+
'volatility': np.random.uniform(0.1, 0.3, periods)
|
| 272 |
+
})
|
| 273 |
+
|
| 274 |
+
elif data_type == 'sales':
|
| 275 |
+
# Generate sales data
|
| 276 |
+
np.random.seed(42)
|
| 277 |
+
trend = np.linspace(100, 150, periods)
|
| 278 |
+
seasonal = 20 * np.sin(2 * np.pi * np.arange(periods) / 365.25)
|
| 279 |
+
noise = np.random.normal(0, 5, periods)
|
| 280 |
+
sales = trend + seasonal + noise
|
| 281 |
+
|
| 282 |
+
sample_data = pd.DataFrame({
|
| 283 |
+
'date': dates,
|
| 284 |
+
'sales': sales,
|
| 285 |
+
'customers': np.random.randint(50, 200, periods),
|
| 286 |
+
'marketing_spend': np.random.uniform(1000, 5000, periods)
|
| 287 |
+
})
|
| 288 |
+
|
| 289 |
+
else:
|
| 290 |
+
# Generate generic time series
|
| 291 |
+
np.random.seed(42)
|
| 292 |
+
trend = np.linspace(0, 100, periods)
|
| 293 |
+
seasonal = 10 * np.sin(2 * np.pi * np.arange(periods) / 30)
|
| 294 |
+
noise = np.random.normal(0, 2, periods)
|
| 295 |
+
values = trend + seasonal + noise
|
| 296 |
+
|
| 297 |
+
sample_data = pd.DataFrame({
|
| 298 |
+
'date': dates,
|
| 299 |
+
'value': values,
|
| 300 |
+
'category': np.random.choice(['A', 'B', 'C'], periods),
|
| 301 |
+
'score': np.random.uniform(0, 100, periods)
|
| 302 |
+
})
|
| 303 |
+
|
| 304 |
+
# Save sample data
|
| 305 |
+
filename = f"sample_{data_type}_data_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
|
| 306 |
+
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
|
| 307 |
+
sample_data.to_csv(filepath, index=False)
|
| 308 |
+
|
| 309 |
+
# Return data info
|
| 310 |
+
df_info = {
|
| 311 |
+
'filename': filename,
|
| 312 |
+
'shape': list(sample_data.shape),
|
| 313 |
+
'columns': sample_data.columns.tolist(),
|
| 314 |
+
'dtypes': {col: str(dtype) for col, dtype in sample_data.dtypes.items()},
|
| 315 |
+
'head': sample_data.head().to_dict('records'),
|
| 316 |
+
'null_counts': {col: int(count) for col, count in sample_data.isnull().sum().items()},
|
| 317 |
+
'date_range': {
|
| 318 |
+
'start': str(sample_data['date'].min().date()),
|
| 319 |
+
'end': str(sample_data['date'].max().date()),
|
| 320 |
+
'periods': len(sample_data)
|
| 321 |
+
}
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
return jsonify({
|
| 325 |
+
'success': True,
|
| 326 |
+
'message': f'Sample {data_type} data generated successfully',
|
| 327 |
+
'data_info': df_info,
|
| 328 |
+
'has_date_column': True
|
| 329 |
+
})
|
| 330 |
+
|
| 331 |
+
except Exception as e:
|
| 332 |
+
logger.error(f"Sample data generation error: {str(e)}")
|
| 333 |
+
return jsonify({'success': False, 'message': f'Sample data generation failed: {str(e)}'}), 500
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
@app.route('/api/forecast', methods=['POST'])
|
| 337 |
+
def api_forecast():
|
| 338 |
+
"""Perform forecasting with uploaded data."""
|
| 339 |
+
try:
|
| 340 |
+
if not current_model or not current_forecaster:
|
| 341 |
+
return jsonify({'success': False, 'message': 'Model not initialized'}), 400
|
| 342 |
+
|
| 343 |
+
data = request.get_json()
|
| 344 |
+
filename = data.get('filename')
|
| 345 |
+
data_definition = data.get('data_definition', {})
|
| 346 |
+
use_covariates = data.get('use_covariates', False)
|
| 347 |
+
use_quantiles = data.get('use_quantiles', False)
|
| 348 |
+
context_len = int(data.get('context_len', 64))
|
| 349 |
+
horizon_len = int(data.get('horizon_len', 24))
|
| 350 |
+
|
| 351 |
+
if not filename:
|
| 352 |
+
return jsonify({'success': False, 'message': 'No data file specified'}), 400
|
| 353 |
+
|
| 354 |
+
# Load data
|
| 355 |
+
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
|
| 356 |
+
if not os.path.exists(filepath):
|
| 357 |
+
return jsonify({'success': False, 'message': 'Data file not found'}), 400
|
| 358 |
+
|
| 359 |
+
# Process data
|
| 360 |
+
data_processor = DataProcessor()
|
| 361 |
+
processed_data = data_processor.load_csv_data(filepath, data_definition)
|
| 362 |
+
|
| 363 |
+
# Filter data based on context dates if provided
|
| 364 |
+
context_start_date = data.get('context_start_date')
|
| 365 |
+
context_end_date = data.get('context_end_date')
|
| 366 |
+
|
| 367 |
+
if context_start_date and context_end_date:
|
| 368 |
+
# Convert string dates to datetime
|
| 369 |
+
context_start = pd.to_datetime(context_start_date)
|
| 370 |
+
context_end = pd.to_datetime(context_end_date)
|
| 371 |
+
|
| 372 |
+
# For visualization, we need data that includes both context and horizon periods
|
| 373 |
+
# Calculate horizon end date (horizon_len periods after context_end)
|
| 374 |
+
horizon_end = context_end + pd.Timedelta(days=horizon_len * 7) # Assuming weekly data
|
| 375 |
+
|
| 376 |
+
# Filter data to include both context and horizon periods for actual future values
|
| 377 |
+
processed_data_for_viz = processed_data[
|
| 378 |
+
(processed_data['date'] >= context_start) &
|
| 379 |
+
(processed_data['date'] <= horizon_end)
|
| 380 |
+
].reset_index(drop=True)
|
| 381 |
+
|
| 382 |
+
# For forecasting, we still only use the context period
|
| 383 |
+
processed_data_for_forecast = processed_data[
|
| 384 |
+
(processed_data['date'] >= context_start) &
|
| 385 |
+
(processed_data['date'] <= context_end)
|
| 386 |
+
].reset_index(drop=True)
|
| 387 |
+
|
| 388 |
+
logger.info(f"Filtered data for forecasting: {context_start_date} to {context_end_date}")
|
| 389 |
+
logger.info(f"Forecast data shape: {processed_data_for_forecast.shape}")
|
| 390 |
+
logger.info(f"Forecast data date range: {processed_data_for_forecast['date'].min()} to {processed_data_for_forecast['date'].max()}")
|
| 391 |
+
logger.info(f"Filtered data for visualization: {context_start_date} to {horizon_end.strftime('%Y-%m-%d')}")
|
| 392 |
+
logger.info(f"Visualization data shape: {processed_data_for_viz.shape}")
|
| 393 |
+
logger.info(f"Visualization data date range: {processed_data_for_viz['date'].min()} to {processed_data_for_viz['date'].max()}")
|
| 394 |
+
|
| 395 |
+
# Use forecast data for the actual forecasting
|
| 396 |
+
processed_data = processed_data_for_forecast
|
| 397 |
+
|
| 398 |
+
# Check data sufficiency - only need context_len for the data
|
| 399 |
+
if len(processed_data) < context_len:
|
| 400 |
+
return jsonify({
|
| 401 |
+
'success': False,
|
| 402 |
+
'message': f'Insufficient data. Need {context_len} periods, have {len(processed_data)}'
|
| 403 |
+
}), 400
|
| 404 |
+
|
| 405 |
+
# Prepare forecast data
|
| 406 |
+
target_column = None
|
| 407 |
+
for col, dtype in data_definition.items():
|
| 408 |
+
if dtype == 'target':
|
| 409 |
+
target_column = col
|
| 410 |
+
break
|
| 411 |
+
|
| 412 |
+
if not target_column:
|
| 413 |
+
return jsonify({'success': False, 'message': 'No target column specified'}), 400
|
| 414 |
+
|
| 415 |
+
target_inputs, covariates = data_processor.prepare_forecast_data(
|
| 416 |
+
processed_data, context_len, horizon_len, target_column
|
| 417 |
+
)
|
| 418 |
+
|
| 419 |
+
# Debug: Log covariate information
|
| 420 |
+
logger.info(f"Forecast data preparation completed:")
|
| 421 |
+
logger.info(f" - Context length: {context_len}")
|
| 422 |
+
logger.info(f" - Horizon length: {horizon_len}")
|
| 423 |
+
logger.info(f" - Processed data shape: {processed_data.shape}")
|
| 424 |
+
logger.info(f" - Processed data date range: {processed_data['date'].min()} to {processed_data['date'].max()}")
|
| 425 |
+
logger.info(f" - Target inputs length: {len(target_inputs)}")
|
| 426 |
+
logger.info(f" - Covariates keys: {list(covariates.keys()) if covariates else 'None'}")
|
| 427 |
+
|
| 428 |
+
if covariates:
|
| 429 |
+
for cov_type, cov_data in covariates.items():
|
| 430 |
+
if isinstance(cov_data, dict):
|
| 431 |
+
logger.info(f" - {cov_type} covariates: {len(cov_data)} items")
|
| 432 |
+
for key, value in cov_data.items():
|
| 433 |
+
if isinstance(value, list):
|
| 434 |
+
logger.info(f" - {key}: {len(value)} values")
|
| 435 |
+
elif isinstance(value, np.ndarray):
|
| 436 |
+
logger.info(f" - {key}: shape {value.shape}")
|
| 437 |
+
else:
|
| 438 |
+
logger.info(f" - {key}: {type(value)}")
|
| 439 |
+
else:
|
| 440 |
+
logger.info(f" - {cov_type} covariates: {type(cov_data)}")
|
| 441 |
+
|
| 442 |
+
# COMPREHENSIVE MODEL INPUT DEBUGGING
|
| 443 |
+
logger.info("=" * 80)
|
| 444 |
+
logger.info("COMPREHENSIVE MODEL INPUT DEBUGGING")
|
| 445 |
+
logger.info("=" * 80)
|
| 446 |
+
|
| 447 |
+
# Frontend parameters received
|
| 448 |
+
logger.info(f"FRONTEND PARAMETERS RECEIVED:")
|
| 449 |
+
logger.info(f" - Context Start Date: {context_start_date}")
|
| 450 |
+
logger.info(f" - Context End Date: {context_end_date}")
|
| 451 |
+
logger.info(f" - Context Length: {context_len}")
|
| 452 |
+
logger.info(f" - Horizon Length: {horizon_len}")
|
| 453 |
+
logger.info(f" - Target Column: {target_column}")
|
| 454 |
+
|
| 455 |
+
# Data filtering results
|
| 456 |
+
logger.info(f"DATA FILTERING RESULTS:")
|
| 457 |
+
logger.info(f" - Original data shape: {processed_data.shape}")
|
| 458 |
+
logger.info(f" - Filtered data date range: {processed_data['date'].min()} to {processed_data['date'].max()}")
|
| 459 |
+
logger.info(f" - Available columns: {list(processed_data.columns)}")
|
| 460 |
+
|
| 461 |
+
# Target data details
|
| 462 |
+
logger.info(f"TARGET DATA DETAILS:")
|
| 463 |
+
logger.info(f" - Target column: {target_column}")
|
| 464 |
+
logger.info(f" - Target values length: {len(target_inputs)}")
|
| 465 |
+
logger.info(f" - Target values range: {min(target_inputs):.4f} to {max(target_inputs):.4f}")
|
| 466 |
+
logger.info(f" - First 5 target values: {target_inputs[:5]}")
|
| 467 |
+
logger.info(f" - Last 5 target values: {target_inputs[-5:]}")
|
| 468 |
+
|
| 469 |
+
# Covariate details
|
| 470 |
+
logger.info(f"COVARIATE DETAILS:")
|
| 471 |
+
if covariates:
|
| 472 |
+
for cov_type, cov_data in covariates.items():
|
| 473 |
+
logger.info(f" - {cov_type}:")
|
| 474 |
+
if isinstance(cov_data, dict):
|
| 475 |
+
for key, value in cov_data.items():
|
| 476 |
+
if isinstance(value, list) and len(value) > 0:
|
| 477 |
+
if isinstance(value[0], list): # Nested list structure
|
| 478 |
+
inner_list = value[0]
|
| 479 |
+
logger.info(f" - {key}: {len(inner_list)} values")
|
| 480 |
+
logger.info(f" First 5: {inner_list[:5]}")
|
| 481 |
+
logger.info(f" Last 5: {inner_list[-5:]}")
|
| 482 |
+
else: # Simple list
|
| 483 |
+
logger.info(f" - {key}: {len(value)} values")
|
| 484 |
+
logger.info(f" Values: {value}")
|
| 485 |
+
else:
|
| 486 |
+
logger.info(f" - {key}: {value}")
|
| 487 |
+
else:
|
| 488 |
+
logger.info(f" - Raw data: {cov_data}")
|
| 489 |
+
else:
|
| 490 |
+
logger.info(" - No covariates provided")
|
| 491 |
+
|
| 492 |
+
# Model configuration
|
| 493 |
+
logger.info(f"MODEL CONFIGURATION:")
|
| 494 |
+
logger.info(f" - Context Length: {context_len}")
|
| 495 |
+
logger.info(f" - Horizon Length: {horizon_len}")
|
| 496 |
+
logger.info(f" - Total Length: {context_len + horizon_len}")
|
| 497 |
+
logger.info(f" - Has Covariates: {bool(covariates)}")
|
| 498 |
+
|
| 499 |
+
logger.info("=" * 80)
|
| 500 |
+
|
| 501 |
+
# Perform forecasting using centralized function
|
| 502 |
+
try:
|
| 503 |
+
# Ensure target_inputs is in the correct format (list of lists)
|
| 504 |
+
if isinstance(target_inputs[0], (int, float)):
|
| 505 |
+
target_inputs_formatted = [target_inputs]
|
| 506 |
+
else:
|
| 507 |
+
target_inputs_formatted = target_inputs
|
| 508 |
+
|
| 509 |
+
results = run_forecast(
|
| 510 |
+
forecaster=current_forecaster,
|
| 511 |
+
target_inputs=target_inputs_formatted,
|
| 512 |
+
covariates=covariates if use_covariates and any(covariates.values()) else None,
|
| 513 |
+
use_covariates=use_covariates and any(covariates.values()),
|
| 514 |
+
freq=0
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
# Check for NaN values before JSON serialization
|
| 518 |
+
for key, value in results.items():
|
| 519 |
+
if isinstance(value, np.ndarray):
|
| 520 |
+
if np.any(np.isnan(value)):
|
| 521 |
+
logger.error(f"❌ NaN values detected in {key}: {np.isnan(value).sum()} out of {value.size}")
|
| 522 |
+
return jsonify({
|
| 523 |
+
'success': False,
|
| 524 |
+
'message': f'Forecasting failed: Invalid values (NaN) detected in {key}. This may be due to insufficient data or model issues.'
|
| 525 |
+
}), 500
|
| 526 |
+
|
| 527 |
+
if key == 'quantile_forecast':
|
| 528 |
+
# For quantiles, keep the full array structure
|
| 529 |
+
if value.ndim == 3:
|
| 530 |
+
results[key] = value[0].tolist() # (1, horizon, quantiles) -> (horizon, quantiles)
|
| 531 |
+
else:
|
| 532 |
+
results[key] = value.tolist()
|
| 533 |
+
elif value.ndim > 1:
|
| 534 |
+
results[key] = value[0].tolist() # Take first series if batch
|
| 535 |
+
else:
|
| 536 |
+
results[key] = value.tolist()
|
| 537 |
+
elif isinstance(value, (list, tuple)):
|
| 538 |
+
# Check for NaN in lists/tuples
|
| 539 |
+
if any(isinstance(x, float) and np.isnan(x) for x in value):
|
| 540 |
+
logger.error(f"❌ NaN values detected in {key} list")
|
| 541 |
+
return jsonify({
|
| 542 |
+
'success': False,
|
| 543 |
+
'message': f'Forecasting failed: Invalid values (NaN) detected in {key}. This may be due to insufficient data or model issues.'
|
| 544 |
+
}), 500
|
| 545 |
+
|
| 546 |
+
logger.info(f"✅ Centralized forecasting completed. Methods: {list(results.keys())}")
|
| 547 |
+
logger.info(f"Results structure: {[(k, type(v), len(v) if hasattr(v, '__len__') else 'N/A') for k, v in results.items()]}")
|
| 548 |
+
if 'quantile_forecast' in results:
|
| 549 |
+
shape_quantile = len(results['quantile_forecast']) if hasattr(results['quantile_forecast'], '__len__') else 'N/A'
|
| 550 |
+
logger.info(f"Quantile forecast shape: {shape_quantile}")
|
| 551 |
+
else:
|
| 552 |
+
logger.warning("No quantile_forecast in results!")
|
| 553 |
+
|
| 554 |
+
except Exception as e:
|
| 555 |
+
logger.error(f"Centralized forecasting failed: {str(e)}")
|
| 556 |
+
return jsonify({'success': False, 'message': f'Forecasting failed: {str(e)}'}), 500
|
| 557 |
+
|
| 558 |
+
# Prepare visualization data using centralized function
|
| 559 |
+
# Use forecast data for historical data (respects context end date)
|
| 560 |
+
# Pass extended data separately for actual future values
|
| 561 |
+
visualization_data = prepare_visualization_data(
|
| 562 |
+
processed_data=processed_data, # Use forecast data for historical data
|
| 563 |
+
target_inputs=target_inputs,
|
| 564 |
+
target_column=target_column,
|
| 565 |
+
context_len=context_len,
|
| 566 |
+
horizon_len=horizon_len,
|
| 567 |
+
extended_data=processed_data_for_viz if 'processed_data_for_viz' in locals() else None
|
| 568 |
+
)
|
| 569 |
+
|
| 570 |
+
return jsonify({
|
| 571 |
+
'success': True,
|
| 572 |
+
'message': 'Forecasting completed successfully',
|
| 573 |
+
'results': results,
|
| 574 |
+
'visualization_data': visualization_data,
|
| 575 |
+
'forecast_summary': {
|
| 576 |
+
'methods_used': list(results.keys()),
|
| 577 |
+
'context_length': context_len,
|
| 578 |
+
'horizon_length': horizon_len,
|
| 579 |
+
'target_column': target_column,
|
| 580 |
+
'covariates_used': use_covariates and any(covariates.values())
|
| 581 |
+
}
|
| 582 |
+
})
|
| 583 |
+
|
| 584 |
+
except Exception as e:
|
| 585 |
+
logger.error(f"Forecasting error: {str(e)}")
|
| 586 |
+
return jsonify({'success': False, 'message': f'Forecasting failed: {str(e)}'}), 500
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
@app.route('/api/visualize', methods=['POST'])
|
| 590 |
+
def api_visualize():
|
| 591 |
+
"""Generate visualization and return as base64 image."""
|
| 592 |
+
try:
|
| 593 |
+
data = request.get_json()
|
| 594 |
+
viz_data = data.get('visualization_data', {})
|
| 595 |
+
results = data.get('results', {})
|
| 596 |
+
selected_indices = data.get('quantile_indices', [])
|
| 597 |
+
|
| 598 |
+
if not current_visualizer:
|
| 599 |
+
return jsonify({'success': False, 'message': 'Visualizer not initialized'}), 400
|
| 600 |
+
|
| 601 |
+
# Extract data
|
| 602 |
+
historical_data = viz_data.get('historical_data', [])
|
| 603 |
+
dates_historical = [pd.to_datetime(d) for d in viz_data.get('dates_historical', [])]
|
| 604 |
+
dates_future = [pd.to_datetime(d) for d in viz_data.get('dates_future', [])]
|
| 605 |
+
actual_future = viz_data.get('actual_future', [])
|
| 606 |
+
target_name = viz_data.get('target_name', 'Value')
|
| 607 |
+
|
| 608 |
+
# COMPREHENSIVE VISUALIZATION DEBUGGING
|
| 609 |
+
logger.info("=" * 80)
|
| 610 |
+
logger.info("VISUALIZATION ENDPOINT DEBUGGING")
|
| 611 |
+
logger.info("=" * 80)
|
| 612 |
+
logger.info(f"Visualization data received:")
|
| 613 |
+
logger.info(f" - historical_data length: {len(historical_data)}")
|
| 614 |
+
logger.info(f" - dates_historical length: {len(dates_historical)}")
|
| 615 |
+
logger.info(f" - dates_future length: {len(dates_future)}")
|
| 616 |
+
logger.info(f" - actual_future length: {len(actual_future)}")
|
| 617 |
+
logger.info(f" - target_name: {target_name}")
|
| 618 |
+
|
| 619 |
+
if historical_data:
|
| 620 |
+
logger.info(f" - historical_data range: {min(historical_data):.4f} to {max(historical_data):.4f}")
|
| 621 |
+
logger.info(f" - first 5 historical values: {historical_data[:5]}")
|
| 622 |
+
logger.info(f" - last 5 historical values: {historical_data[-5:]}")
|
| 623 |
+
|
| 624 |
+
if dates_historical:
|
| 625 |
+
logger.info(f" - first historical date: {dates_historical[0]}")
|
| 626 |
+
logger.info(f" - last historical date: {dates_historical[-1]}")
|
| 627 |
+
|
| 628 |
+
logger.info(f"Results keys: {list(results.keys())}")
|
| 629 |
+
logger.info("=" * 80)
|
| 630 |
+
|
| 631 |
+
# Choose best forecast
|
| 632 |
+
if 'point_forecast' in results:
|
| 633 |
+
forecast = results['point_forecast']
|
| 634 |
+
if results.get('method') == 'covariates_enhanced':
|
| 635 |
+
title = f"{target_name} Forecast with Covariates Enhancement"
|
| 636 |
+
else:
|
| 637 |
+
title = f"{target_name} Forecast (TimesFM)"
|
| 638 |
+
else:
|
| 639 |
+
return jsonify({'success': False, 'message': 'No forecast data available'}), 400
|
| 640 |
+
|
| 641 |
+
# Process quantile bands using centralized function
|
| 642 |
+
intervals = {}
|
| 643 |
+
used_quantile_intervals = False
|
| 644 |
+
quantile_shape = None
|
| 645 |
+
|
| 646 |
+
logger.info(f"Available results keys: {list(results.keys())}")
|
| 647 |
+
if 'quantile_forecast' in results:
|
| 648 |
+
try:
|
| 649 |
+
quantiles = np.array(results['quantile_forecast'])
|
| 650 |
+
quantile_shape = list(quantiles.shape)
|
| 651 |
+
logger.info(f"Quantile forecast shape received for viz: {quantile_shape}")
|
| 652 |
+
|
| 653 |
+
# Use centralized quantile processing
|
| 654 |
+
intervals = process_quantile_bands(
|
| 655 |
+
quantile_forecast=quantiles,
|
| 656 |
+
selected_indices=selected_indices if selected_indices and len(selected_indices) > 0 else []
|
| 657 |
+
)
|
| 658 |
+
|
| 659 |
+
used_quantile_intervals = len(intervals) > 0
|
| 660 |
+
logger.info(f"✅ Processed quantile bands using centralized function. Bands: {len(intervals)//3}")
|
| 661 |
+
|
| 662 |
+
except Exception as e:
|
| 663 |
+
logger.warning(f"Quantile band processing failed: {e}")
|
| 664 |
+
intervals = {}
|
| 665 |
+
used_quantile_intervals = False
|
| 666 |
+
else:
|
| 667 |
+
logger.warning("No quantile_forecast found in results - quantile intervals will not be displayed")
|
| 668 |
+
|
| 669 |
+
# Generate plot
|
| 670 |
+
try:
|
| 671 |
+
logger.info(f"Generating plot with forecast length: {len(forecast)}")
|
| 672 |
+
logger.info(f"Historical data type: {type(historical_data)}, length: {len(historical_data) if hasattr(historical_data, '__len__') else 'N/A'}")
|
| 673 |
+
logger.info(f"Forecast type: {type(forecast)}, length: {len(forecast) if hasattr(forecast, '__len__') else 'N/A'}")
|
| 674 |
+
logger.info(f"Intervals keys: {list(intervals.keys()) if intervals else 'None'}")
|
| 675 |
+
logger.info(f"Dates historical length: {len(dates_historical)}")
|
| 676 |
+
logger.info(f"Dates future length: {len(dates_future)}")
|
| 677 |
+
|
| 678 |
+
fig = current_visualizer.plot_forecast_with_intervals(
|
| 679 |
+
historical_data=historical_data,
|
| 680 |
+
forecast=forecast,
|
| 681 |
+
intervals=intervals if intervals else None,
|
| 682 |
+
actual_future=actual_future if actual_future else None,
|
| 683 |
+
dates_historical=dates_historical,
|
| 684 |
+
dates_future=dates_future,
|
| 685 |
+
title=title,
|
| 686 |
+
target_name=target_name,
|
| 687 |
+
show_figure=False
|
| 688 |
+
)
|
| 689 |
+
logger.info("Interactive plot generated successfully")
|
| 690 |
+
except Exception as plot_error:
|
| 691 |
+
logger.error(f"Plot generation failed: {str(plot_error)}")
|
| 692 |
+
import traceback
|
| 693 |
+
traceback.print_exc()
|
| 694 |
+
raise plot_error
|
| 695 |
+
|
| 696 |
+
figure_payload = json.loads(fig.to_json())
|
| 697 |
+
plot_config = {
|
| 698 |
+
'responsive': True,
|
| 699 |
+
'displaylogo': False,
|
| 700 |
+
'modeBarButtonsToRemove': ['lasso2d', 'select2d']
|
| 701 |
+
}
|
| 702 |
+
|
| 703 |
+
return jsonify({
|
| 704 |
+
'success': True,
|
| 705 |
+
'message': 'Visualization generated successfully',
|
| 706 |
+
'figure': figure_payload,
|
| 707 |
+
'config': plot_config,
|
| 708 |
+
'used_quantile_intervals': used_quantile_intervals,
|
| 709 |
+
'quantile_shape': quantile_shape
|
| 710 |
+
})
|
| 711 |
+
|
| 712 |
+
except Exception as e:
|
| 713 |
+
logger.error(f"Visualization error: {str(e)}")
|
| 714 |
+
return jsonify({'success': False, 'message': f'Visualization failed: {str(e)}'}), 500
|
| 715 |
+
|
| 716 |
+
|
| 717 |
+
@app.route('/health')
|
| 718 |
+
def health_check():
|
| 719 |
+
"""Health check endpoint."""
|
| 720 |
+
return jsonify({
|
| 721 |
+
'status': 'healthy',
|
| 722 |
+
'timestamp': datetime.now().isoformat(),
|
| 723 |
+
'model_loaded': current_model is not None
|
| 724 |
+
})
|
| 725 |
+
|
| 726 |
+
|
| 727 |
+
@app.errorhandler(413)
|
| 728 |
+
def too_large(e):
|
| 729 |
+
"""Handle file too large error."""
|
| 730 |
+
return jsonify({'success': False, 'message': 'File too large. Maximum size is 16MB.'}), 413
|
| 731 |
+
|
| 732 |
+
|
| 733 |
+
@app.errorhandler(500)
|
| 734 |
+
def internal_error(e):
|
| 735 |
+
"""Handle internal server error."""
|
| 736 |
+
return jsonify({'success': False, 'message': 'Internal server error.'}), 500
|
| 737 |
+
|
| 738 |
+
|
| 739 |
+
if __name__ == '__main__':
|
| 740 |
+
# Configuration for different environments
|
| 741 |
+
# Detect if running in Docker/HF Spaces (port 7860) or local development (port 8080)
|
| 742 |
+
is_docker = os.path.exists('/.dockerenv')
|
| 743 |
+
is_hf_spaces = os.environ.get('SPACE_ID') is not None
|
| 744 |
+
|
| 745 |
+
# Use port 7860 for Docker/HF Spaces, 8080 for local development
|
| 746 |
+
default_port = 7860 if (is_docker or is_hf_spaces) else 8080
|
| 747 |
+
port = int(os.environ.get('PORT', default_port))
|
| 748 |
+
debug = os.environ.get('FLASK_ENV') == 'development'
|
| 749 |
+
|
| 750 |
+
# Initialize default model for local development or HF Spaces
|
| 751 |
+
if debug or is_hf_spaces:
|
| 752 |
+
logger.info("Initializing default TimesFM model...")
|
| 753 |
+
init_model(backend='cpu', context_len=64, horizon_len=24,
|
| 754 |
+
checkpoint="google/timesfm-2.0-500m-pytorch")
|
| 755 |
+
|
| 756 |
+
# Run the app
|
| 757 |
+
logger.info(f"Starting Sapheneia TimesFM webapp on port {port}")
|
| 758 |
+
app.run(host='0.0.0.0', port=port, debug=debug)
|
webapp/requirements.txt
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Sapheneia TimesFM Web Application Dependencies
|
| 2 |
+
|
| 3 |
+
# Core framework
|
| 4 |
+
Flask==3.0.0
|
| 5 |
+
Werkzeug==3.0.1
|
| 6 |
+
Jinja2==3.1.2
|
| 7 |
+
|
| 8 |
+
# TimesFM and AI dependencies
|
| 9 |
+
timesfm>=1.3.0
|
| 10 |
+
jax>=0.7.0
|
| 11 |
+
jaxlib>=0.7.0
|
| 12 |
+
torch>=2.0.0
|
| 13 |
+
|
| 14 |
+
# Data processing
|
| 15 |
+
pandas>=2.0.0
|
| 16 |
+
numpy>=1.26.4
|
| 17 |
+
scikit-learn>=1.2.2
|
| 18 |
+
|
| 19 |
+
# Visualization
|
| 20 |
+
matplotlib>=3.10.5
|
| 21 |
+
seaborn>=0.13.2
|
| 22 |
+
|
| 23 |
+
# Utilities
|
| 24 |
+
python-dateutil>=2.8.2
|
| 25 |
+
typer>=0.12.3
|
| 26 |
+
|
| 27 |
+
# Production server
|
| 28 |
+
gunicorn==21.2.0
|
| 29 |
+
|
| 30 |
+
# Optional: For enhanced compatibility
|
| 31 |
+
Pillow>=9.0.0
|
webapp/static/css/styles.css
ADDED
|
@@ -0,0 +1,425 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Sapheneia TimesFM Web Application Styles
|
| 3 |
+
* Professional styling for time series forecasting interface
|
| 4 |
+
*/
|
| 5 |
+
|
| 6 |
+
:root {
|
| 7 |
+
--sapheneia-primary: #0066cc;
|
| 8 |
+
--sapheneia-secondary: #6c757d;
|
| 9 |
+
--sapheneia-success: #28a745;
|
| 10 |
+
--sapheneia-warning: #ffc107;
|
| 11 |
+
--sapheneia-info: #17a2b8;
|
| 12 |
+
--sapheneia-light: #f8f9fa;
|
| 13 |
+
--sapheneia-dark: #343a40;
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
/* Global Styles */
|
| 17 |
+
body {
|
| 18 |
+
background-color: var(--sapheneia-light);
|
| 19 |
+
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
.navbar-brand {
|
| 23 |
+
font-size: 1.5rem;
|
| 24 |
+
font-weight: 700;
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
/* Card Enhancements */
|
| 28 |
+
.card {
|
| 29 |
+
border: none;
|
| 30 |
+
box-shadow: 0 0.125rem 0.25rem rgba(0, 0, 0, 0.075);
|
| 31 |
+
border-radius: 0.5rem;
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
.card-header {
|
| 35 |
+
border-top-left-radius: 0.5rem !important;
|
| 36 |
+
border-top-right-radius: 0.5rem !important;
|
| 37 |
+
border-bottom: none;
|
| 38 |
+
font-weight: 600;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
/* Form Enhancements */
|
| 42 |
+
.form-control:focus,
|
| 43 |
+
.form-select:focus {
|
| 44 |
+
border-color: var(--sapheneia-primary);
|
| 45 |
+
box-shadow: 0 0 0 0.2rem rgba(0, 102, 204, 0.25);
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
.form-label {
|
| 49 |
+
font-weight: 600;
|
| 50 |
+
color: var(--sapheneia-dark);
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
/* Button Styles */
|
| 54 |
+
.btn {
|
| 55 |
+
border-radius: 0.375rem;
|
| 56 |
+
font-weight: 500;
|
| 57 |
+
transition: all 0.2s ease-in-out;
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
.btn:hover {
|
| 61 |
+
transform: translateY(-1px);
|
| 62 |
+
box-shadow: 0 0.25rem 0.5rem rgba(0, 0, 0, 0.15);
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
/* Status Badge */
|
| 66 |
+
.badge {
|
| 67 |
+
font-size: 0.875rem;
|
| 68 |
+
padding: 0.5rem 0.75rem;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
/* Column Selection Styles */
|
| 72 |
+
.column-checkbox {
|
| 73 |
+
transform: scale(1.1);
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
.column-checkbox:checked {
|
| 77 |
+
background-color: var(--sapheneia-success);
|
| 78 |
+
border-color: var(--sapheneia-success);
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
.form-check-label {
|
| 82 |
+
font-weight: 500;
|
| 83 |
+
color: var(--sapheneia-dark);
|
| 84 |
+
cursor: pointer;
|
| 85 |
+
transition: color 0.2s ease-in-out;
|
| 86 |
+
font-size: 0.85rem;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
.form-check-label:hover {
|
| 90 |
+
color: var(--sapheneia-primary);
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
/* Data Definition Styles */
|
| 94 |
+
.column-definition {
|
| 95 |
+
background-color: white;
|
| 96 |
+
border: 1px solid #dee2e6;
|
| 97 |
+
border-radius: 0.375rem;
|
| 98 |
+
padding: 1rem;
|
| 99 |
+
margin-bottom: 1rem;
|
| 100 |
+
transition: all 0.2s ease-in-out;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
.column-definition:hover {
|
| 104 |
+
border-color: var(--sapheneia-primary);
|
| 105 |
+
box-shadow: 0 0.125rem 0.25rem rgba(0, 102, 204, 0.1);
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
.column-name {
|
| 109 |
+
font-weight: 600;
|
| 110 |
+
color: var(--sapheneia-primary);
|
| 111 |
+
font-family: 'Courier New', monospace;
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
.column-type {
|
| 115 |
+
font-size: 0.875rem;
|
| 116 |
+
color: var(--sapheneia-secondary);
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
/* Data Info Table */
|
| 120 |
+
.data-info-table {
|
| 121 |
+
background-color: white;
|
| 122 |
+
border-radius: 0.375rem;
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
.data-info-table th {
|
| 126 |
+
background-color: var(--sapheneia-light);
|
| 127 |
+
font-weight: 600;
|
| 128 |
+
border-top: none;
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
/* Alert Enhancements */
|
| 132 |
+
.alert {
|
| 133 |
+
border: none;
|
| 134 |
+
border-radius: 0.5rem;
|
| 135 |
+
box-shadow: 0 0.125rem 0.25rem rgba(0, 0, 0, 0.075);
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
.alert-success {
|
| 139 |
+
background-color: rgba(40, 167, 69, 0.1);
|
| 140 |
+
color: #155724;
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
.alert-danger {
|
| 144 |
+
background-color: rgba(220, 53, 69, 0.1);
|
| 145 |
+
color: #721c24;
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
.alert-warning {
|
| 149 |
+
background-color: rgba(255, 193, 7, 0.1);
|
| 150 |
+
color: #856404;
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
.alert-info {
|
| 154 |
+
background-color: rgba(23, 162, 184, 0.1);
|
| 155 |
+
color: #0c5460;
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
/* Loading Styles */
|
| 159 |
+
.spinner-border {
|
| 160 |
+
width: 3rem;
|
| 161 |
+
height: 3rem;
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
/* Tab Enhancements */
|
| 165 |
+
.nav-tabs .nav-link {
|
| 166 |
+
border: none;
|
| 167 |
+
border-radius: 0.375rem 0.375rem 0 0;
|
| 168 |
+
color: var(--sapheneia-secondary);
|
| 169 |
+
font-weight: 500;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
.nav-tabs .nav-link.active {
|
| 173 |
+
background-color: white;
|
| 174 |
+
color: var(--sapheneia-primary);
|
| 175 |
+
border-bottom: 2px solid var(--sapheneia-primary);
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
/* Chart Container */
|
| 179 |
+
.chart-container {
|
| 180 |
+
width: 100%;
|
| 181 |
+
height: 70vh;
|
| 182 |
+
min-height: 480px;
|
| 183 |
+
max-height: 800px;
|
| 184 |
+
margin: 0 auto;
|
| 185 |
+
position: relative;
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
#forecastChart {
|
| 189 |
+
border: 1px solid #dee2e6;
|
| 190 |
+
border-radius: 0.375rem;
|
| 191 |
+
background-color: white;
|
| 192 |
+
width: 100%;
|
| 193 |
+
height: 100%;
|
| 194 |
+
resize: both;
|
| 195 |
+
overflow: hidden;
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
/* Summary Cards */
|
| 199 |
+
.summary-card {
|
| 200 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 201 |
+
color: white;
|
| 202 |
+
border-radius: 0.5rem;
|
| 203 |
+
padding: 1.5rem;
|
| 204 |
+
margin-bottom: 1rem;
|
| 205 |
+
text-align: center;
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
.summary-value {
|
| 209 |
+
font-size: 2rem;
|
| 210 |
+
font-weight: 700;
|
| 211 |
+
margin-bottom: 0.5rem;
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
.summary-label {
|
| 215 |
+
font-size: 0.875rem;
|
| 216 |
+
opacity: 0.9;
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
/* Data Table */
|
| 220 |
+
.table-responsive {
|
| 221 |
+
border-radius: 0.375rem;
|
| 222 |
+
overflow: hidden;
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
.table {
|
| 226 |
+
margin-bottom: 0;
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
.table th {
|
| 230 |
+
background-color: var(--sapheneia-primary);
|
| 231 |
+
color: white;
|
| 232 |
+
font-weight: 600;
|
| 233 |
+
border: none;
|
| 234 |
+
padding: 1rem 0.75rem;
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
.table td {
|
| 238 |
+
padding: 0.75rem;
|
| 239 |
+
vertical-align: middle;
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
.table-striped tbody tr:nth-of-type(odd) {
|
| 243 |
+
background-color: rgba(0, 102, 204, 0.05);
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
/* Responsive Adjustments */
|
| 247 |
+
@media (max-width: 768px) {
|
| 248 |
+
.container-fluid {
|
| 249 |
+
padding-left: 1rem;
|
| 250 |
+
padding-right: 1rem;
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
.card-body {
|
| 254 |
+
padding: 1rem;
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
.btn {
|
| 258 |
+
width: 100%;
|
| 259 |
+
margin-bottom: 0.5rem;
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
.row .col-md-3,
|
| 263 |
+
.row .col-md-4,
|
| 264 |
+
.row .col-md-6 {
|
| 265 |
+
margin-bottom: 1rem;
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
/* Mobile chart adjustments */
|
| 269 |
+
.chart-container {
|
| 270 |
+
height: 50vh;
|
| 271 |
+
min-height: 300px;
|
| 272 |
+
}
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
/* Chart responsiveness for different screen sizes */
|
| 276 |
+
@media (min-width: 1200px) {
|
| 277 |
+
.chart-container {
|
| 278 |
+
height: 75vh;
|
| 279 |
+
max-height: 900px;
|
| 280 |
+
}
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
@media (min-width: 768px) and (max-width: 1199px) {
|
| 284 |
+
.chart-container {
|
| 285 |
+
height: 65vh;
|
| 286 |
+
max-height: 700px;
|
| 287 |
+
}
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
/* Animation Classes */
|
| 291 |
+
.fade-in {
|
| 292 |
+
animation: fadeIn 0.5s ease-in;
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
@keyframes fadeIn {
|
| 296 |
+
from { opacity: 0; transform: translateY(10px); }
|
| 297 |
+
to { opacity: 1; transform: translateY(0); }
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
.slide-in {
|
| 301 |
+
animation: slideIn 0.3s ease-out;
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
@keyframes slideIn {
|
| 305 |
+
from { transform: translateX(-10px); opacity: 0; }
|
| 306 |
+
to { transform: translateX(0); opacity: 1; }
|
| 307 |
+
}
|
| 308 |
+
|
| 309 |
+
/* Success/Error States */
|
| 310 |
+
.is-valid {
|
| 311 |
+
border-color: var(--sapheneia-success);
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
.is-invalid {
|
| 315 |
+
border-color: #dc3545;
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
.valid-feedback {
|
| 319 |
+
color: var(--sapheneia-success);
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
.invalid-feedback {
|
| 323 |
+
color: #dc3545;
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
/* Progress Indicator */
|
| 327 |
+
.progress-step {
|
| 328 |
+
display: inline-block;
|
| 329 |
+
width: 2rem;
|
| 330 |
+
height: 2rem;
|
| 331 |
+
border-radius: 50%;
|
| 332 |
+
background-color: #e9ecef;
|
| 333 |
+
color: var(--sapheneia-secondary);
|
| 334 |
+
text-align: center;
|
| 335 |
+
line-height: 2rem;
|
| 336 |
+
font-weight: 600;
|
| 337 |
+
margin-right: 0.5rem;
|
| 338 |
+
transition: all 0.3s ease;
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
.progress-step.completed {
|
| 342 |
+
background-color: var(--sapheneia-success);
|
| 343 |
+
color: white;
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
.progress-step.active {
|
| 347 |
+
background-color: var(--sapheneia-primary);
|
| 348 |
+
color: white;
|
| 349 |
+
box-shadow: 0 0 0 0.2rem rgba(0, 102, 204, 0.25);
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
/* Utility Classes */
|
| 353 |
+
.text-sapheneia {
|
| 354 |
+
color: var(--sapheneia-primary) !important;
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
+
.bg-sapheneia {
|
| 358 |
+
background-color: var(--sapheneia-primary) !important;
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
.border-sapheneia {
|
| 362 |
+
border-color: var(--sapheneia-primary) !important;
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
/* File Upload Enhancement */
|
| 366 |
+
.form-control[type="file"] {
|
| 367 |
+
padding: 0.5rem;
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
.form-control[type="file"]::-webkit-file-upload-button {
|
| 371 |
+
padding: 0.375rem 0.75rem;
|
| 372 |
+
margin-right: 0.5rem;
|
| 373 |
+
background-color: var(--sapheneia-primary);
|
| 374 |
+
color: white;
|
| 375 |
+
border: none;
|
| 376 |
+
border-radius: 0.25rem;
|
| 377 |
+
cursor: pointer;
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
/* Error Display Styles */
|
| 381 |
+
.error-details pre {
|
| 382 |
+
background-color: #f8f9fa !important;
|
| 383 |
+
border: 1px solid #dee2e6;
|
| 384 |
+
border-radius: 0.375rem;
|
| 385 |
+
font-family: 'Courier New', monospace;
|
| 386 |
+
font-size: 0.85rem;
|
| 387 |
+
line-height: 1.4;
|
| 388 |
+
max-height: 300px;
|
| 389 |
+
overflow-y: auto;
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
.alert-danger .alert-heading {
|
| 393 |
+
color: #721c24;
|
| 394 |
+
font-weight: 600;
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
.alert-danger hr {
|
| 398 |
+
border-top-color: #f5c6cb;
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
.alert-danger ul {
|
| 402 |
+
margin-bottom: 0;
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
.alert-danger li {
|
| 406 |
+
margin-bottom: 0.25rem;
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
/* Custom Scrollbar */
|
| 410 |
+
::-webkit-scrollbar {
|
| 411 |
+
width: 8px;
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
::-webkit-scrollbar-track {
|
| 415 |
+
background: #f1f1f1;
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
::-webkit-scrollbar-thumb {
|
| 419 |
+
background: var(--sapheneia-primary);
|
| 420 |
+
border-radius: 4px;
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
::-webkit-scrollbar-thumb:hover {
|
| 424 |
+
background: #0056b3;
|
| 425 |
+
}
|
webapp/static/js/app.js
ADDED
|
@@ -0,0 +1,1633 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Sapheneia TimesFM Web Application JavaScript
|
| 3 |
+
* Handles user interactions, API calls, and dynamic content updates
|
| 4 |
+
*/
|
| 5 |
+
|
| 6 |
+
class SapheneiaTimesFM {
|
| 7 |
+
constructor() {
|
| 8 |
+
this.modelInitialized = false;
|
| 9 |
+
this.currentData = null;
|
| 10 |
+
this.currentResults = null;
|
| 11 |
+
this.currentPlotFigure = null;
|
| 12 |
+
this.currentPlotConfig = null;
|
| 13 |
+
this.resizeTimeout = null;
|
| 14 |
+
this.init();
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
init() {
|
| 18 |
+
this.bindEvents();
|
| 19 |
+
this.setupFormValidation();
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
bindEvents() {
|
| 23 |
+
// Model configuration
|
| 24 |
+
document.getElementById('modelConfigForm').addEventListener('submit', (e) => {
|
| 25 |
+
e.preventDefault();
|
| 26 |
+
this.initializeModel();
|
| 27 |
+
});
|
| 28 |
+
|
| 29 |
+
document.getElementById('modelSource').addEventListener('change', (e) => {
|
| 30 |
+
this.toggleModelSourceFields(e.target.value);
|
| 31 |
+
});
|
| 32 |
+
|
| 33 |
+
// Data upload
|
| 34 |
+
document.getElementById('uploadBtn').addEventListener('click', () => {
|
| 35 |
+
this.uploadData();
|
| 36 |
+
});
|
| 37 |
+
|
| 38 |
+
// Sample data generation removed as per requirements
|
| 39 |
+
|
| 40 |
+
// Forecasting
|
| 41 |
+
document.getElementById('forecastConfigForm').addEventListener('submit', (e) => {
|
| 42 |
+
e.preventDefault();
|
| 43 |
+
this.runForecast();
|
| 44 |
+
});
|
| 45 |
+
|
| 46 |
+
// Quantile selector is now always visible, no toggle needed
|
| 47 |
+
|
| 48 |
+
// Download chart
|
| 49 |
+
document.getElementById('downloadChart').addEventListener('click', () => {
|
| 50 |
+
this.downloadChart();
|
| 51 |
+
});
|
| 52 |
+
|
| 53 |
+
// Download data
|
| 54 |
+
document.getElementById('downloadData').addEventListener('click', () => {
|
| 55 |
+
this.downloadData();
|
| 56 |
+
});
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
// Tab switching - preserve scroll position
|
| 60 |
+
this.setupTabSwitching();
|
| 61 |
+
|
| 62 |
+
// Bind quantile events
|
| 63 |
+
this.bindQuantileEvents();
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
setupFormValidation() {
|
| 67 |
+
// Add real-time validation for numeric inputs
|
| 68 |
+
const numericInputs = ['contextLen', 'horizonLen'];
|
| 69 |
+
numericInputs.forEach(id => {
|
| 70 |
+
const input = document.getElementById(id);
|
| 71 |
+
input.addEventListener('input', (e) => {
|
| 72 |
+
this.validateNumericInput(e.target);
|
| 73 |
+
});
|
| 74 |
+
});
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
validateNumericInput(input) {
|
| 78 |
+
const value = parseInt(input.value);
|
| 79 |
+
const min = parseInt(input.min);
|
| 80 |
+
const max = parseInt(input.max);
|
| 81 |
+
|
| 82 |
+
if (isNaN(value) || value < min || value > max) {
|
| 83 |
+
input.classList.add('is-invalid');
|
| 84 |
+
return false;
|
| 85 |
+
} else {
|
| 86 |
+
input.classList.remove('is-invalid');
|
| 87 |
+
input.classList.add('is-valid');
|
| 88 |
+
return true;
|
| 89 |
+
}
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
toggleModelSourceFields(source) {
|
| 93 |
+
const huggingfaceRow = document.getElementById('huggingfaceRow');
|
| 94 |
+
const localPathRow = document.getElementById('localPathRow');
|
| 95 |
+
|
| 96 |
+
if (source === 'local') {
|
| 97 |
+
huggingfaceRow.style.display = 'none';
|
| 98 |
+
localPathRow.style.display = 'block';
|
| 99 |
+
} else {
|
| 100 |
+
huggingfaceRow.style.display = 'block';
|
| 101 |
+
localPathRow.style.display = 'none';
|
| 102 |
+
}
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
showAlert(type, title, message) {
|
| 106 |
+
const alertContainer = document.getElementById('alertContainer');
|
| 107 |
+
const alertId = 'alert_' + Date.now();
|
| 108 |
+
|
| 109 |
+
const alertHtml = `
|
| 110 |
+
<div class="alert alert-${type} alert-dismissible fade show slide-in" role="alert" id="${alertId}">
|
| 111 |
+
<strong>${title}</strong> ${message}
|
| 112 |
+
<button type="button" class="btn-close" data-bs-dismiss="alert"></button>
|
| 113 |
+
</div>
|
| 114 |
+
`;
|
| 115 |
+
|
| 116 |
+
alertContainer.insertAdjacentHTML('beforeend', alertHtml);
|
| 117 |
+
|
| 118 |
+
// Auto-dismiss after 5 seconds
|
| 119 |
+
setTimeout(() => {
|
| 120 |
+
const alert = document.getElementById(alertId);
|
| 121 |
+
if (alert) {
|
| 122 |
+
const bsAlert = new bootstrap.Alert(alert);
|
| 123 |
+
bsAlert.close();
|
| 124 |
+
}
|
| 125 |
+
}, 5000);
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
showLoading(title = 'Processing...', subtitle = 'Please wait while we process your request.') {
|
| 129 |
+
const loadingTextElement = document.getElementById('loadingText');
|
| 130 |
+
const loadingSubtextElement = document.getElementById('loadingSubtext');
|
| 131 |
+
const modalElement = document.getElementById('loadingModal');
|
| 132 |
+
|
| 133 |
+
if (!loadingTextElement || !loadingSubtextElement || !modalElement) {
|
| 134 |
+
console.error('Loading modal elements not found:', {
|
| 135 |
+
loadingText: !!loadingTextElement,
|
| 136 |
+
loadingSubtext: !!loadingSubtextElement,
|
| 137 |
+
loadingModal: !!modalElement
|
| 138 |
+
});
|
| 139 |
+
return;
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
loadingTextElement.textContent = title;
|
| 143 |
+
loadingSubtextElement.textContent = subtitle;
|
| 144 |
+
|
| 145 |
+
const modal = new bootstrap.Modal(modalElement);
|
| 146 |
+
modal.show();
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
hideLoading() {
|
| 150 |
+
console.log('hideLoading() called');
|
| 151 |
+
|
| 152 |
+
// Multiple approaches to ensure modal is hidden
|
| 153 |
+
const modalElement = document.getElementById('loadingModal');
|
| 154 |
+
const modal = bootstrap.Modal.getInstance(modalElement);
|
| 155 |
+
|
| 156 |
+
console.log('Modal instance:', modal);
|
| 157 |
+
console.log('Modal element classes:', modalElement?.classList.toString());
|
| 158 |
+
|
| 159 |
+
// Approach 1: Try Bootstrap's hide method
|
| 160 |
+
if (modal) {
|
| 161 |
+
modal.hide();
|
| 162 |
+
console.log('Called Bootstrap modal.hide()');
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
// Approach 2: Force hide with a slight delay to ensure Bootstrap completes
|
| 166 |
+
setTimeout(() => {
|
| 167 |
+
console.log('Force hiding modal after timeout...');
|
| 168 |
+
console.log('Modal element before force hide:', modalElement);
|
| 169 |
+
console.log('Modal is visible?', modalElement?.offsetParent !== null);
|
| 170 |
+
|
| 171 |
+
if (modalElement) {
|
| 172 |
+
// Remove Bootstrap classes and attributes more aggressively
|
| 173 |
+
modalElement.classList.remove('show', 'd-block');
|
| 174 |
+
modalElement.classList.add('d-none');
|
| 175 |
+
modalElement.setAttribute('aria-hidden', 'true');
|
| 176 |
+
modalElement.removeAttribute('aria-modal');
|
| 177 |
+
modalElement.removeAttribute('role');
|
| 178 |
+
modalElement.style.display = 'none !important';
|
| 179 |
+
|
| 180 |
+
// Also hide the modal dialog
|
| 181 |
+
const modalDialog = modalElement.querySelector('.modal-dialog');
|
| 182 |
+
if (modalDialog) {
|
| 183 |
+
modalDialog.style.display = 'none';
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
// Clean up body classes and attributes
|
| 187 |
+
document.body.classList.remove('modal-open');
|
| 188 |
+
document.body.style.overflow = '';
|
| 189 |
+
document.body.style.paddingRight = '';
|
| 190 |
+
|
| 191 |
+
// Remove backdrop more precisely - only actual modal backdrops
|
| 192 |
+
const backdrops = document.querySelectorAll('.modal-backdrop');
|
| 193 |
+
console.log('Found modal backdrops:', backdrops.length);
|
| 194 |
+
backdrops.forEach((element, index) => {
|
| 195 |
+
console.log(`Removing backdrop ${index}:`, element);
|
| 196 |
+
element.remove();
|
| 197 |
+
});
|
| 198 |
+
|
| 199 |
+
// Double-check for any remaining modal-related elements (but don't remove them)
|
| 200 |
+
const remainingModals = document.querySelectorAll('.modal.show, .modal.d-block');
|
| 201 |
+
remainingModals.forEach(m => {
|
| 202 |
+
// Only hide modals, don't remove them from DOM
|
| 203 |
+
if (m !== modalElement) { // Don't double-process our target modal
|
| 204 |
+
m.classList.remove('show', 'd-block');
|
| 205 |
+
m.classList.add('d-none');
|
| 206 |
+
m.style.display = 'none';
|
| 207 |
+
}
|
| 208 |
+
});
|
| 209 |
+
|
| 210 |
+
console.log('Modal force hidden - classes now:', modalElement.classList.toString());
|
| 211 |
+
console.log('Modal visible after force hide?', modalElement.offsetParent !== null);
|
| 212 |
+
console.log('Body classes after cleanup:', document.body.classList.toString());
|
| 213 |
+
}
|
| 214 |
+
}, 100);
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
updateModelStatus(status, info = null) {
|
| 218 |
+
const statusElement = document.getElementById('modelStatus');
|
| 219 |
+
|
| 220 |
+
if (status === 'initializing') {
|
| 221 |
+
statusElement.innerHTML = '<span class="badge bg-warning">Initializing...</span>';
|
| 222 |
+
} else if (status === 'ready') {
|
| 223 |
+
statusElement.innerHTML = '<span class="badge bg-success">Ready</span>';
|
| 224 |
+
this.modelInitialized = true;
|
| 225 |
+
this.updateForecastButtonState();
|
| 226 |
+
} else if (status === 'error') {
|
| 227 |
+
statusElement.innerHTML = '<span class="badge bg-danger">Error</span>';
|
| 228 |
+
this.modelInitialized = false;
|
| 229 |
+
} else {
|
| 230 |
+
statusElement.innerHTML = '<span class="badge bg-secondary">Not Initialized</span>';
|
| 231 |
+
this.modelInitialized = false;
|
| 232 |
+
}
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
async initializeModel() {
|
| 236 |
+
const form = document.getElementById('modelConfigForm');
|
| 237 |
+
const formData = new FormData(form);
|
| 238 |
+
|
| 239 |
+
const config = {
|
| 240 |
+
backend: document.getElementById('backend').value,
|
| 241 |
+
context_len: parseInt(document.getElementById('contextLen').value),
|
| 242 |
+
horizon_len: parseInt(document.getElementById('horizonLen').value)
|
| 243 |
+
};
|
| 244 |
+
|
| 245 |
+
const modelSource = document.getElementById('modelSource').value;
|
| 246 |
+
if (modelSource === 'local') {
|
| 247 |
+
config.local_path = document.getElementById('localPath').value;
|
| 248 |
+
if (!config.local_path) {
|
| 249 |
+
this.showAlert('danger', 'Error', 'Please provide a local model path.');
|
| 250 |
+
return;
|
| 251 |
+
}
|
| 252 |
+
} else {
|
| 253 |
+
config.checkpoint = document.getElementById('checkpoint').value;
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
this.updateModelStatus('initializing');
|
| 257 |
+
this.showLoading('Initializing TimesFM Model', 'This may take a few minutes on first run...');
|
| 258 |
+
|
| 259 |
+
try {
|
| 260 |
+
const response = await fetch('/api/model/init', {
|
| 261 |
+
method: 'POST',
|
| 262 |
+
headers: {
|
| 263 |
+
'Content-Type': 'application/json',
|
| 264 |
+
},
|
| 265 |
+
body: JSON.stringify(config)
|
| 266 |
+
});
|
| 267 |
+
|
| 268 |
+
const result = await response.json();
|
| 269 |
+
|
| 270 |
+
if (result.success) {
|
| 271 |
+
this.updateModelStatus('ready', result.model_info);
|
| 272 |
+
this.showAlert('success', 'Success', 'TimesFM model initialized successfully!');
|
| 273 |
+
|
| 274 |
+
// Update UI with model capabilities
|
| 275 |
+
if (result.model_info && result.model_info.capabilities) {
|
| 276 |
+
this.updateCapabilitiesUI(result.model_info.capabilities);
|
| 277 |
+
}
|
| 278 |
+
} else {
|
| 279 |
+
this.updateModelStatus('error');
|
| 280 |
+
this.showAlert('danger', 'Initialization Failed', result.message || 'Unknown error occurred');
|
| 281 |
+
}
|
| 282 |
+
} catch (error) {
|
| 283 |
+
this.updateModelStatus('error');
|
| 284 |
+
this.showAlert('danger', 'Network Error', 'Failed to communicate with server: ' + error.message);
|
| 285 |
+
} finally {
|
| 286 |
+
this.hideLoading();
|
| 287 |
+
}
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
updateCapabilitiesUI(capabilities) {
|
| 291 |
+
// Quantile selector is now always visible and enabled
|
| 292 |
+
// No need to update any checkboxes since they're always shown
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
async uploadData() {
|
| 296 |
+
const fileInput = document.getElementById('dataFile');
|
| 297 |
+
const file = fileInput.files[0];
|
| 298 |
+
|
| 299 |
+
if (!file) {
|
| 300 |
+
this.showAlert('warning', 'No File Selected', 'Please select a CSV file to upload.');
|
| 301 |
+
return;
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
if (!file.name.toLowerCase().endsWith('.csv')) {
|
| 305 |
+
this.showAlert('danger', 'Invalid File Type', 'Please upload a CSV file.');
|
| 306 |
+
return;
|
| 307 |
+
}
|
| 308 |
+
|
| 309 |
+
const formData = new FormData();
|
| 310 |
+
formData.append('file', file);
|
| 311 |
+
|
| 312 |
+
this.showLoading('Uploading Data', 'Processing your CSV file...');
|
| 313 |
+
|
| 314 |
+
try {
|
| 315 |
+
console.log('Starting file upload...');
|
| 316 |
+
const response = await fetch('/api/data/upload', {
|
| 317 |
+
method: 'POST',
|
| 318 |
+
body: formData
|
| 319 |
+
});
|
| 320 |
+
|
| 321 |
+
console.log('Response received:', response.status, response.statusText);
|
| 322 |
+
|
| 323 |
+
if (!response.ok) {
|
| 324 |
+
throw new Error(`HTTP error! status: ${response.status}`);
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
// Get response text first to debug potential JSON parsing issues
|
| 328 |
+
const responseText = await response.text();
|
| 329 |
+
console.log('Raw response text:', responseText.substring(0, 200) + '...');
|
| 330 |
+
|
| 331 |
+
let result;
|
| 332 |
+
try {
|
| 333 |
+
result = JSON.parse(responseText);
|
| 334 |
+
console.log('Parsed result:', result);
|
| 335 |
+
} catch (jsonError) {
|
| 336 |
+
console.error('JSON parsing failed:', jsonError);
|
| 337 |
+
console.error('Response text that failed to parse:', responseText);
|
| 338 |
+
throw new Error(`Failed to parse JSON response: ${jsonError.message}`);
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
if (result.success) {
|
| 342 |
+
console.log('Processing successful result...');
|
| 343 |
+
try {
|
| 344 |
+
this.currentData = result.data_info;
|
| 345 |
+
console.log('Set currentData:', this.currentData);
|
| 346 |
+
|
| 347 |
+
this.showAlert('success', 'Upload Successful', 'Data uploaded and processed successfully!');
|
| 348 |
+
console.log('Showed success alert');
|
| 349 |
+
|
| 350 |
+
this.displayDataInfo(result.data_info);
|
| 351 |
+
console.log('Displayed data info');
|
| 352 |
+
|
| 353 |
+
this.generateDataDefinition(result.data_info.columns);
|
| 354 |
+
console.log('Generated data definition');
|
| 355 |
+
|
| 356 |
+
this.updateForecastButtonState();
|
| 357 |
+
console.log('Updated forecast button state');
|
| 358 |
+
|
| 359 |
+
} catch (processingError) {
|
| 360 |
+
console.error('Error processing successful result:', processingError);
|
| 361 |
+
this.showAlert('danger', 'Processing Error', `Error processing upload result: ${processingError.message}`);
|
| 362 |
+
}
|
| 363 |
+
} else {
|
| 364 |
+
console.log('Upload failed, showing error:', result.message);
|
| 365 |
+
|
| 366 |
+
// Handle forecast output data error specially
|
| 367 |
+
if (result.is_forecast_output) {
|
| 368 |
+
const suggestedColumns = result.suggested_columns ? result.suggested_columns.join(', ') : 'date, value, price, amount, count, sales, revenue';
|
| 369 |
+
this.showAlert('warning', 'Wrong Data Type',
|
| 370 |
+
`${result.message}<br><br><strong>Expected columns:</strong> ${suggestedColumns}<br><br>` +
|
| 371 |
+
'Please upload your original time series data, not forecast output data.');
|
| 372 |
+
} else {
|
| 373 |
+
this.showAlert('danger', 'Upload Failed', result.message || 'Upload failed');
|
| 374 |
+
}
|
| 375 |
+
}
|
| 376 |
+
} catch (error) {
|
| 377 |
+
console.error('Upload error:', error);
|
| 378 |
+
this.showAlert('danger', 'Network Error', 'Failed to upload file: ' + error.message);
|
| 379 |
+
} finally {
|
| 380 |
+
console.log('Upload process completed');
|
| 381 |
+
this.hideLoading();
|
| 382 |
+
}
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
// Sample data generation removed as per requirements
|
| 386 |
+
|
| 387 |
+
displayDataInfo(dataInfo) {
|
| 388 |
+
const dataInfoDiv = document.getElementById('dataInfo');
|
| 389 |
+
const dataDetailsDiv = document.getElementById('dataDetails');
|
| 390 |
+
|
| 391 |
+
let html = `
|
| 392 |
+
<div class="row">
|
| 393 |
+
<div class="col-md-6">
|
| 394 |
+
<table class="table table-sm data-info-table">
|
| 395 |
+
<tr><th>Filename</th><td>${dataInfo.filename}</td></tr>
|
| 396 |
+
<tr><th>Shape</th><td>${dataInfo.shape[0]} rows × ${dataInfo.shape[1]} columns</td></tr>
|
| 397 |
+
${dataInfo.date_range ? `
|
| 398 |
+
<tr><th>Date Range</th><td>${dataInfo.date_range.start} to ${dataInfo.date_range.end}</td></tr>
|
| 399 |
+
<tr><th>Total Periods</th><td>${dataInfo.date_range.periods}</td></tr>
|
| 400 |
+
` : ''}
|
| 401 |
+
</table>
|
| 402 |
+
</div>
|
| 403 |
+
<div class="col-md-6">
|
| 404 |
+
<h6>Data Preview</h6>
|
| 405 |
+
<div class="table-responsive">
|
| 406 |
+
<table class="table table-sm table-striped">
|
| 407 |
+
<thead>
|
| 408 |
+
<tr>
|
| 409 |
+
${dataInfo.columns.map(col => `<th>${col}</th>`).join('')}
|
| 410 |
+
</tr>
|
| 411 |
+
</thead>
|
| 412 |
+
<tbody>
|
| 413 |
+
${dataInfo.head.slice(0, 3).map(row => `
|
| 414 |
+
<tr>
|
| 415 |
+
${dataInfo.columns.map(col => `<td>${this.formatValue(row[col])}</td>`).join('')}
|
| 416 |
+
</tr>
|
| 417 |
+
`).join('')}
|
| 418 |
+
</tbody>
|
| 419 |
+
</table>
|
| 420 |
+
</div>
|
| 421 |
+
</div>
|
| 422 |
+
</div>
|
| 423 |
+
`;
|
| 424 |
+
|
| 425 |
+
if (dataDetailsDiv) {
|
| 426 |
+
dataDetailsDiv.innerHTML = html;
|
| 427 |
+
}
|
| 428 |
+
if (dataInfoDiv) {
|
| 429 |
+
dataInfoDiv.style.display = 'block';
|
| 430 |
+
dataInfoDiv.classList.add('fade-in');
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
// Generate data definition with integrated checkboxes
|
| 434 |
+
this.generateDataDefinition(dataInfo.columns);
|
| 435 |
+
|
| 436 |
+
// Initialize date configuration
|
| 437 |
+
this.initializeDateConfiguration(dataInfo);
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
formatValue(value) {
|
| 441 |
+
if (value == null) return 'null';
|
| 442 |
+
if (typeof value === 'number') {
|
| 443 |
+
return value.toLocaleString(undefined, { maximumFractionDigits: 2 });
|
| 444 |
+
}
|
| 445 |
+
return String(value).substring(0, 20) + (String(value).length > 20 ? '...' : '');
|
| 446 |
+
}
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
bindColumnSelectionEvents() {
|
| 450 |
+
// Select all button
|
| 451 |
+
document.getElementById('selectAllColumns').addEventListener('click', () => {
|
| 452 |
+
document.querySelectorAll('.column-checkbox').forEach(cb => {
|
| 453 |
+
cb.checked = true;
|
| 454 |
+
});
|
| 455 |
+
});
|
| 456 |
+
|
| 457 |
+
// Deselect all button
|
| 458 |
+
document.getElementById('deselectAllColumns').addEventListener('click', () => {
|
| 459 |
+
document.querySelectorAll('.column-checkbox').forEach(cb => {
|
| 460 |
+
cb.checked = false;
|
| 461 |
+
});
|
| 462 |
+
});
|
| 463 |
+
|
| 464 |
+
// Individual checkbox change events
|
| 465 |
+
document.querySelectorAll('.column-checkbox').forEach(cb => {
|
| 466 |
+
cb.addEventListener('change', () => {
|
| 467 |
+
this.updateColumnSelectionState();
|
| 468 |
+
});
|
| 469 |
+
});
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
updateColumnSelectionState() {
|
| 473 |
+
// This function can be used for any additional state updates if needed
|
| 474 |
+
// Currently, the checkboxes are integrated into the data definition section
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
getSelectedColumns() {
|
| 478 |
+
return Array.from(document.querySelectorAll('.column-checkbox:checked'))
|
| 479 |
+
.map(cb => cb.value);
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
generateDataDefinition(columns, isSampleData = false) {
|
| 484 |
+
const definitionDiv = document.getElementById('dataDefinition');
|
| 485 |
+
const columnsDiv = document.getElementById('columnDefinitions');
|
| 486 |
+
|
| 487 |
+
let html = '<div class="row">';
|
| 488 |
+
|
| 489 |
+
columns.forEach((col, index) => {
|
| 490 |
+
if (col === 'date') return; // Skip date column
|
| 491 |
+
|
| 492 |
+
// Default to target for first column, others as dynamic_numerical
|
| 493 |
+
const defaultValue = index === 1 ? 'target' : 'dynamic_numerical';
|
| 494 |
+
|
| 495 |
+
html += `
|
| 496 |
+
<div class="col-md-6 col-lg-4">
|
| 497 |
+
<div class="column-definition">
|
| 498 |
+
<div class="d-flex justify-content-between align-items-center mb-2">
|
| 499 |
+
<div class="column-name">${col}</div>
|
| 500 |
+
<div class="form-check form-switch">
|
| 501 |
+
<input class="form-check-input column-checkbox" type="checkbox"
|
| 502 |
+
id="col_${col}" value="${col}" checked>
|
| 503 |
+
<label class="form-check-label" for="col_${col}">
|
| 504 |
+
</label>
|
| 505 |
+
</div>
|
| 506 |
+
</div>
|
| 507 |
+
<select class="form-select form-select-sm" id="def_${col}">
|
| 508 |
+
<option value="target" ${defaultValue === 'target' ? 'selected' : ''}>Target (main forecast variable)</option>
|
| 509 |
+
<option value="dynamic_numerical" ${defaultValue === 'dynamic_numerical' ? 'selected' : ''}>Dynamic Numerical</option>
|
| 510 |
+
<option value="dynamic_categorical" ${defaultValue === 'dynamic_categorical' ? 'selected' : ''}>Dynamic Categorical</option>
|
| 511 |
+
<option value="static_numerical" ${defaultValue === 'static_numerical' ? 'selected' : ''}>Static Numerical</option>
|
| 512 |
+
<option value="static_categorical" ${defaultValue === 'static_categorical' ? 'selected' : ''}>Static Categorical</option>
|
| 513 |
+
</select>
|
| 514 |
+
</div>
|
| 515 |
+
</div>
|
| 516 |
+
`;
|
| 517 |
+
});
|
| 518 |
+
|
| 519 |
+
html += '</div>';
|
| 520 |
+
|
| 521 |
+
// Add control buttons
|
| 522 |
+
html += `
|
| 523 |
+
<div class="mt-3">
|
| 524 |
+
<button type="button" class="btn btn-outline-primary btn-sm" id="selectAllColumns">
|
| 525 |
+
<i class="fas fa-check-square me-1"></i>Select All
|
| 526 |
+
</button>
|
| 527 |
+
<button type="button" class="btn btn-outline-secondary btn-sm ms-2" id="deselectAllColumns">
|
| 528 |
+
<i class="fas fa-square me-1"></i>Deselect All
|
| 529 |
+
</button>
|
| 530 |
+
</div>
|
| 531 |
+
`;
|
| 532 |
+
|
| 533 |
+
if (columnsDiv) {
|
| 534 |
+
columnsDiv.innerHTML = html;
|
| 535 |
+
}
|
| 536 |
+
if (definitionDiv) {
|
| 537 |
+
definitionDiv.style.display = 'block';
|
| 538 |
+
definitionDiv.classList.add('fade-in');
|
| 539 |
+
}
|
| 540 |
+
|
| 541 |
+
// Bind event listeners
|
| 542 |
+
this.bindColumnSelectionEvents();
|
| 543 |
+
}
|
| 544 |
+
|
| 545 |
+
getDataDefinition() {
|
| 546 |
+
const definition = {};
|
| 547 |
+
const selectedColumns = this.getSelectedColumns();
|
| 548 |
+
|
| 549 |
+
selectedColumns.forEach(col => {
|
| 550 |
+
const select = document.getElementById(`def_${col}`);
|
| 551 |
+
if (select) {
|
| 552 |
+
definition[col] = select.value;
|
| 553 |
+
}
|
| 554 |
+
});
|
| 555 |
+
|
| 556 |
+
return definition;
|
| 557 |
+
}
|
| 558 |
+
|
| 559 |
+
initializeDateConfiguration(dataInfo) {
|
| 560 |
+
const contextDatesSection = document.getElementById('contextDatesSection');
|
| 561 |
+
if (!contextDatesSection) {
|
| 562 |
+
console.error('Context dates section not found');
|
| 563 |
+
return;
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
contextDatesSection.style.display = 'block';
|
| 567 |
+
contextDatesSection.classList.add('fade-in');
|
| 568 |
+
|
| 569 |
+
// Store data range for constraints
|
| 570 |
+
this.dataDateRange = dataInfo.date_range;
|
| 571 |
+
this.dataPeriods = dataInfo.date_range.periods; // Actual number of periods in data
|
| 572 |
+
this.availableDates = dataInfo.date_range.available_dates || []; // All available dates in data
|
| 573 |
+
|
| 574 |
+
if (this.dataDateRange && this.dataDateRange.start && this.dataDateRange.end) {
|
| 575 |
+
// Update available data information
|
| 576 |
+
const availableDataLength = document.getElementById('availableDataLength');
|
| 577 |
+
const availableDataRange = document.getElementById('availableDataRange');
|
| 578 |
+
const contextStartDate = document.getElementById('contextStartDate');
|
| 579 |
+
const contextEndDate = document.getElementById('contextEndDate');
|
| 580 |
+
|
| 581 |
+
if (availableDataLength) availableDataLength.textContent = this.dataPeriods;
|
| 582 |
+
if (availableDataRange) availableDataRange.textContent =
|
| 583 |
+
`${this.dataDateRange.start} to ${this.dataDateRange.end}`;
|
| 584 |
+
|
| 585 |
+
// Set up date constraints
|
| 586 |
+
this.setupDateConstraints(contextStartDate, contextEndDate);
|
| 587 |
+
|
| 588 |
+
// Initialize context dates to full data range
|
| 589 |
+
if (contextStartDate) contextStartDate.value = this.dataDateRange.start;
|
| 590 |
+
if (contextEndDate) contextEndDate.value = this.dataDateRange.end;
|
| 591 |
+
|
| 592 |
+
// Calculate and display context length
|
| 593 |
+
this.updateContextLengthFromDates();
|
| 594 |
+
|
| 595 |
+
// Test: Force update after a short delay to ensure elements are ready
|
| 596 |
+
setTimeout(() => {
|
| 597 |
+
console.log('Forcing context length update after delay');
|
| 598 |
+
this.updateContextLengthFromDates();
|
| 599 |
+
}, 100);
|
| 600 |
+
}
|
| 601 |
+
|
| 602 |
+
// Bind date change events
|
| 603 |
+
this.bindDateChangeEvents();
|
| 604 |
+
}
|
| 605 |
+
|
| 606 |
+
setupDateConstraints(contextStartDate, contextEndDate) {
|
| 607 |
+
if (!this.availableDates || this.availableDates.length === 0) {
|
| 608 |
+
console.warn('No available dates found in data');
|
| 609 |
+
return;
|
| 610 |
+
}
|
| 611 |
+
|
| 612 |
+
console.log('Setting up date constraints with available dates:', this.availableDates);
|
| 613 |
+
|
| 614 |
+
// Populate select dropdowns with available dates
|
| 615 |
+
if (contextStartDate) {
|
| 616 |
+
this.populateDateSelect(contextStartDate, this.availableDates);
|
| 617 |
+
}
|
| 618 |
+
if (contextEndDate) {
|
| 619 |
+
this.populateDateSelect(contextEndDate, this.availableDates);
|
| 620 |
+
}
|
| 621 |
+
}
|
| 622 |
+
|
| 623 |
+
populateDateSelect(selectElement, availableDates) {
|
| 624 |
+
// Clear existing options except the first placeholder
|
| 625 |
+
while (selectElement.children.length > 1) {
|
| 626 |
+
selectElement.removeChild(selectElement.lastChild);
|
| 627 |
+
}
|
| 628 |
+
|
| 629 |
+
// Add options for each available date
|
| 630 |
+
availableDates.forEach(date => {
|
| 631 |
+
const option = document.createElement('option');
|
| 632 |
+
option.value = date;
|
| 633 |
+
option.textContent = date;
|
| 634 |
+
selectElement.appendChild(option);
|
| 635 |
+
});
|
| 636 |
+
}
|
| 637 |
+
|
| 638 |
+
bindDateChangeEvents() {
|
| 639 |
+
// Context start date change event - validates and updates context length
|
| 640 |
+
const contextStartDate = document.getElementById('contextStartDate');
|
| 641 |
+
if (contextStartDate) {
|
| 642 |
+
console.log('Binding context start date event listener');
|
| 643 |
+
contextStartDate.addEventListener('change', () => {
|
| 644 |
+
this.updateContextFromStartDate();
|
| 645 |
+
});
|
| 646 |
+
} else {
|
| 647 |
+
console.error('Context start date element not found');
|
| 648 |
+
}
|
| 649 |
+
|
| 650 |
+
// Context end date change event - updates context length
|
| 651 |
+
const contextEndDate = document.getElementById('contextEndDate');
|
| 652 |
+
if (contextEndDate) {
|
| 653 |
+
console.log('Binding context end date event listener');
|
| 654 |
+
contextEndDate.addEventListener('change', () => {
|
| 655 |
+
this.updateContextFromEndDate();
|
| 656 |
+
});
|
| 657 |
+
} else {
|
| 658 |
+
console.error('Context end date element not found');
|
| 659 |
+
}
|
| 660 |
+
}
|
| 661 |
+
|
| 662 |
+
|
| 663 |
+
updateModelConfiguration(contextLen, horizonLen) {
|
| 664 |
+
const contextLenElement = document.getElementById('contextLen');
|
| 665 |
+
const horizonLenElement = document.getElementById('horizonLen');
|
| 666 |
+
|
| 667 |
+
if (contextLenElement) contextLenElement.value = contextLen;
|
| 668 |
+
if (horizonLenElement) horizonLenElement.value = horizonLen;
|
| 669 |
+
}
|
| 670 |
+
|
| 671 |
+
calculateDaysDifference(startDate, endDate) {
|
| 672 |
+
const start = new Date(startDate);
|
| 673 |
+
const end = new Date(endDate);
|
| 674 |
+
const timeDiff = end.getTime() - start.getTime();
|
| 675 |
+
return Math.ceil(timeDiff / (1000 * 3600 * 24)) + 1; // +1 to include both start and end dates
|
| 676 |
+
}
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
updateContextFromStartDate() {
|
| 681 |
+
// When context start date changes, validate and update context length
|
| 682 |
+
console.log('Context start date changed');
|
| 683 |
+
const contextStartElement = document.getElementById('contextStartDate');
|
| 684 |
+
if (!contextStartElement) return;
|
| 685 |
+
|
| 686 |
+
const contextStart = contextStartElement.value;
|
| 687 |
+
if (!contextStart) return;
|
| 688 |
+
|
| 689 |
+
// Since we're using select dropdowns with available dates, no need for range validation
|
| 690 |
+
console.log('Selected context start date:', contextStart);
|
| 691 |
+
|
| 692 |
+
// Recalculate context length and validate constraints
|
| 693 |
+
this.updateContextLengthFromDates();
|
| 694 |
+
}
|
| 695 |
+
|
| 696 |
+
updateContextFromEndDate() {
|
| 697 |
+
// When context end date changes, validate and update context length
|
| 698 |
+
console.log('Context end date changed');
|
| 699 |
+
const contextEndElement = document.getElementById('contextEndDate');
|
| 700 |
+
if (!contextEndElement) return;
|
| 701 |
+
|
| 702 |
+
const contextEnd = contextEndElement.value;
|
| 703 |
+
if (!contextEnd) return;
|
| 704 |
+
|
| 705 |
+
// Since we're using select dropdowns with available dates, no need for range validation
|
| 706 |
+
console.log('Selected context end date:', contextEnd);
|
| 707 |
+
|
| 708 |
+
// Recalculate context length and validate constraints
|
| 709 |
+
this.updateContextLengthFromDates();
|
| 710 |
+
}
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
updateContextLengthFromDates() {
|
| 714 |
+
// Calculate context length based on actual data periods between selected dates
|
| 715 |
+
console.log('updateContextLengthFromDates called');
|
| 716 |
+
const contextStartElement = document.getElementById('contextStartDate');
|
| 717 |
+
const contextEndElement = document.getElementById('contextEndDate');
|
| 718 |
+
|
| 719 |
+
if (!contextStartElement || !contextEndElement || !this.availableDates) {
|
| 720 |
+
console.log('Missing elements or available dates:', {
|
| 721 |
+
contextStartElement: !!contextStartElement,
|
| 722 |
+
contextEndElement: !!contextEndElement,
|
| 723 |
+
availableDates: !!this.availableDates
|
| 724 |
+
});
|
| 725 |
+
return;
|
| 726 |
+
}
|
| 727 |
+
|
| 728 |
+
const contextStart = contextStartElement.value;
|
| 729 |
+
const contextEnd = contextEndElement.value;
|
| 730 |
+
|
| 731 |
+
if (!contextStart || !contextEnd) {
|
| 732 |
+
console.log('Missing date values:', { contextStart, contextEnd });
|
| 733 |
+
return;
|
| 734 |
+
}
|
| 735 |
+
|
| 736 |
+
// Find the indices of the selected dates in the available dates array
|
| 737 |
+
const startIndex = this.availableDates.indexOf(contextStart);
|
| 738 |
+
const endIndex = this.availableDates.indexOf(contextEnd);
|
| 739 |
+
|
| 740 |
+
if (startIndex === -1 || endIndex === -1) {
|
| 741 |
+
console.error('Selected dates not found in available dates');
|
| 742 |
+
return;
|
| 743 |
+
}
|
| 744 |
+
|
| 745 |
+
// Calculate context length as the number of periods between the dates (inclusive)
|
| 746 |
+
let contextLen = endIndex - startIndex + 1;
|
| 747 |
+
|
| 748 |
+
console.log('Context length calculation:', {
|
| 749 |
+
contextStart,
|
| 750 |
+
contextEnd,
|
| 751 |
+
startIndex,
|
| 752 |
+
endIndex,
|
| 753 |
+
contextLen,
|
| 754 |
+
availableDatesCount: this.availableDates.length
|
| 755 |
+
});
|
| 756 |
+
|
| 757 |
+
// Ensure context length is positive and doesn't exceed available data
|
| 758 |
+
if (contextLen <= 0) {
|
| 759 |
+
console.warn('Context length is zero or negative, setting to 1');
|
| 760 |
+
contextLen = 1;
|
| 761 |
+
}
|
| 762 |
+
|
| 763 |
+
if (contextLen > this.dataPeriods) {
|
| 764 |
+
console.warn(`Context length (${contextLen}) exceeds available data periods (${this.dataPeriods}), adjusting`);
|
| 765 |
+
contextLen = this.dataPeriods;
|
| 766 |
+
}
|
| 767 |
+
|
| 768 |
+
// Get horizon length from model configuration
|
| 769 |
+
const horizonLen = parseInt(document.getElementById('horizonLen').value) || 24;
|
| 770 |
+
|
| 771 |
+
// Apply 32-multiple truncation (truncate earlier periods)
|
| 772 |
+
const truncatedContextLen = contextLen - (contextLen % 32);
|
| 773 |
+
|
| 774 |
+
// Validate TimesFM constraints
|
| 775 |
+
const totalLength = truncatedContextLen + horizonLen;
|
| 776 |
+
const isMultipleOf32 = truncatedContextLen % 32 === 0;
|
| 777 |
+
const isWithinLimit = totalLength <= 4096;
|
| 778 |
+
|
| 779 |
+
if (truncatedContextLen < 32) {
|
| 780 |
+
this.showAlert('error', 'Insufficient Context',
|
| 781 |
+
`Context length ${truncatedContextLen} is less than minimum 32 periods required by TimesFM.`);
|
| 782 |
+
return;
|
| 783 |
+
}
|
| 784 |
+
|
| 785 |
+
if (!isWithinLimit) {
|
| 786 |
+
this.showAlert('warning', 'Constraint Warning',
|
| 787 |
+
`Total length ${totalLength} exceeds TimesFM limit of 4096.`);
|
| 788 |
+
}
|
| 789 |
+
|
| 790 |
+
// Update model configuration and display
|
| 791 |
+
this.updateModelConfiguration(truncatedContextLen, horizonLen);
|
| 792 |
+
|
| 793 |
+
const contextLengthDisplay = document.getElementById('contextLengthDisplay');
|
| 794 |
+
if (contextLengthDisplay) {
|
| 795 |
+
contextLengthDisplay.textContent = truncatedContextLen;
|
| 796 |
+
}
|
| 797 |
+
|
| 798 |
+
this.updateConstraintFeedback(truncatedContextLen, horizonLen);
|
| 799 |
+
}
|
| 800 |
+
|
| 801 |
+
updateConstraintFeedback(contextLen, horizonLen) {
|
| 802 |
+
const totalLength = contextLen + horizonLen;
|
| 803 |
+
const isMultipleOf32 = contextLen % 32 === 0;
|
| 804 |
+
const isWithinLimit = totalLength <= 4096;
|
| 805 |
+
|
| 806 |
+
// Update context length display with color coding
|
| 807 |
+
const contextDisplay = document.getElementById('contextLengthDisplay');
|
| 808 |
+
if (contextDisplay) {
|
| 809 |
+
if (isMultipleOf32) {
|
| 810 |
+
contextDisplay.style.color = 'green';
|
| 811 |
+
} else {
|
| 812 |
+
contextDisplay.style.color = 'red';
|
| 813 |
+
}
|
| 814 |
+
}
|
| 815 |
+
|
| 816 |
+
// Update total length display with color coding
|
| 817 |
+
const totalDisplay = document.getElementById('totalLengthDisplay');
|
| 818 |
+
if (totalDisplay) {
|
| 819 |
+
if (isWithinLimit) {
|
| 820 |
+
totalDisplay.style.color = 'green';
|
| 821 |
+
} else {
|
| 822 |
+
totalDisplay.style.color = 'red';
|
| 823 |
+
}
|
| 824 |
+
}
|
| 825 |
+
|
| 826 |
+
// Show warning if constraints are violated
|
| 827 |
+
if (!isMultipleOf32 || !isWithinLimit) {
|
| 828 |
+
this.showAlert('warning', 'Constraint Warning',
|
| 829 |
+
`TimesFM 2.0 constraints: Context length must be multiple of 32 (${contextLen}), total length must be ≤ 4096 (${totalLength})`);
|
| 830 |
+
}
|
| 831 |
+
}
|
| 832 |
+
|
| 833 |
+
updateForecastButtonState() {
|
| 834 |
+
const forecastBtn = document.getElementById('forecastBtn');
|
| 835 |
+
const canForecast = this.modelInitialized && this.currentData;
|
| 836 |
+
|
| 837 |
+
forecastBtn.disabled = !canForecast;
|
| 838 |
+
|
| 839 |
+
if (canForecast) {
|
| 840 |
+
forecastBtn.classList.remove('btn-secondary');
|
| 841 |
+
forecastBtn.classList.add('btn-warning');
|
| 842 |
+
} else {
|
| 843 |
+
forecastBtn.classList.add('btn-secondary');
|
| 844 |
+
forecastBtn.classList.remove('btn-warning');
|
| 845 |
+
}
|
| 846 |
+
}
|
| 847 |
+
|
| 848 |
+
async runForecast() {
|
| 849 |
+
if (!this.modelInitialized || !this.currentData) {
|
| 850 |
+
this.showAlert('warning', 'Cannot Forecast', 'Please initialize model and upload data first.');
|
| 851 |
+
return;
|
| 852 |
+
}
|
| 853 |
+
|
| 854 |
+
// Clear any previous results/errors
|
| 855 |
+
this.clearForecastResults();
|
| 856 |
+
|
| 857 |
+
const selectedColumns = this.getSelectedColumns();
|
| 858 |
+
if (selectedColumns.length === 0) {
|
| 859 |
+
this.showAlert('warning', 'No Variables Selected', 'Please select at least one variable for forecasting.');
|
| 860 |
+
return;
|
| 861 |
+
}
|
| 862 |
+
|
| 863 |
+
const dataDefinition = this.getDataDefinition();
|
| 864 |
+
|
| 865 |
+
// Validate that at least one target column is defined
|
| 866 |
+
const hasTarget = Object.values(dataDefinition).includes('target');
|
| 867 |
+
if (!hasTarget) {
|
| 868 |
+
this.showAlert('warning', 'No Target Variable', 'Please define at least one selected column as the target variable.');
|
| 869 |
+
return;
|
| 870 |
+
}
|
| 871 |
+
|
| 872 |
+
const config = {
|
| 873 |
+
filename: this.currentData.filename,
|
| 874 |
+
data_definition: dataDefinition,
|
| 875 |
+
use_covariates: document.getElementById('useCovariates').checked,
|
| 876 |
+
context_len: parseInt(document.getElementById('contextLen').value),
|
| 877 |
+
horizon_len: parseInt(document.getElementById('horizonLen').value),
|
| 878 |
+
context_start_date: document.getElementById('contextStartDate').value,
|
| 879 |
+
context_end_date: document.getElementById('contextEndDate').value
|
| 880 |
+
};
|
| 881 |
+
|
| 882 |
+
// Attach user-selected quantile ticks (no default - respect user selection)
|
| 883 |
+
let ticks = Array.from(document.querySelectorAll('.quantile-tick'))
|
| 884 |
+
.filter(cb => cb.checked)
|
| 885 |
+
.map(cb => parseInt(cb.value))
|
| 886 |
+
.sort((a,b) => a - b);
|
| 887 |
+
// Don't set default quantiles - pass empty array if none selected
|
| 888 |
+
config.quantile_indices = ticks;
|
| 889 |
+
|
| 890 |
+
// FRONTEND DEBUGGING - Show what we're sending to backend
|
| 891 |
+
console.log("=".repeat(80));
|
| 892 |
+
console.log("FRONTEND DEBUGGING - SENDING TO BACKEND");
|
| 893 |
+
console.log("=".repeat(80));
|
| 894 |
+
console.log("Configuration being sent to backend:");
|
| 895 |
+
console.log(" - Filename:", config.filename);
|
| 896 |
+
console.log(" - Use Covariates:", config.use_covariates);
|
| 897 |
+
console.log(" - Context Length:", config.context_len);
|
| 898 |
+
console.log(" - Horizon Length:", config.horizon_len);
|
| 899 |
+
console.log(" - Context Start Date:", config.context_start_date);
|
| 900 |
+
console.log(" - Context End Date:", config.context_end_date);
|
| 901 |
+
console.log(" - Quantile Indices:", config.quantile_indices);
|
| 902 |
+
|
| 903 |
+
// Show current UI state
|
| 904 |
+
console.log("Current UI state:");
|
| 905 |
+
console.log(" - Context Start Date element value:", document.getElementById('contextStartDate').value);
|
| 906 |
+
console.log(" - Context End Date element value:", document.getElementById('contextEndDate').value);
|
| 907 |
+
console.log(" - Context Length element value:", document.getElementById('contextLen').value);
|
| 908 |
+
console.log(" - Horizon Length element value:", document.getElementById('horizonLen').value);
|
| 909 |
+
console.log(" - Context Length display:", document.getElementById('contextLengthDisplay').textContent);
|
| 910 |
+
console.log(" - Use Covariates checkbox:", document.getElementById('useCovariates').checked);
|
| 911 |
+
|
| 912 |
+
// Show data definition
|
| 913 |
+
console.log("Data definition:");
|
| 914 |
+
console.log(" - Selected columns:", Object.keys(dataDefinition));
|
| 915 |
+
console.log(" - Column types:", dataDefinition);
|
| 916 |
+
|
| 917 |
+
console.log("=".repeat(80));
|
| 918 |
+
|
| 919 |
+
this.showLoading('Running Forecast', 'TimesFM is analyzing your data and generating forecasts...');
|
| 920 |
+
|
| 921 |
+
try {
|
| 922 |
+
const response = await fetch('/api/forecast', {
|
| 923 |
+
method: 'POST',
|
| 924 |
+
headers: {
|
| 925 |
+
'Content-Type': 'application/json',
|
| 926 |
+
},
|
| 927 |
+
body: JSON.stringify(config)
|
| 928 |
+
});
|
| 929 |
+
|
| 930 |
+
const result = await response.json();
|
| 931 |
+
|
| 932 |
+
if (response.ok && result.success) {
|
| 933 |
+
console.log('Forecast successful, proceeding to display and visualize...');
|
| 934 |
+
this.currentResults = result;
|
| 935 |
+
this.showAlert('success', 'Forecast Complete', 'Forecasting completed successfully!');
|
| 936 |
+
|
| 937 |
+
try {
|
| 938 |
+
await this.displayResults(result);
|
| 939 |
+
console.log('displayResults completed successfully');
|
| 940 |
+
} catch (displayError) {
|
| 941 |
+
console.error('Error in displayResults:', displayError);
|
| 942 |
+
}
|
| 943 |
+
|
| 944 |
+
console.log('About to call generateVisualization...');
|
| 945 |
+
try {
|
| 946 |
+
this.generateVisualization(result);
|
| 947 |
+
} catch (vizError) {
|
| 948 |
+
console.error('Error calling generateVisualization:', vizError);
|
| 949 |
+
}
|
| 950 |
+
} else {
|
| 951 |
+
// Handle both HTTP errors and API errors
|
| 952 |
+
const errorMessage = result.message || `HTTP ${response.status}: ${response.statusText}`;
|
| 953 |
+
console.log('Forecast error received:', { result, response: { status: response.status, statusText: response.statusText } });
|
| 954 |
+
this.displayForecastError(errorMessage);
|
| 955 |
+
this.showAlert('danger', 'Forecast Failed', 'Please check the error details below.');
|
| 956 |
+
}
|
| 957 |
+
} catch (error) {
|
| 958 |
+
this.displayForecastError('Network Error: ' + error.message);
|
| 959 |
+
this.showAlert('danger', 'Network Error', 'Please check the error details below.');
|
| 960 |
+
} finally {
|
| 961 |
+
this.hideLoading();
|
| 962 |
+
}
|
| 963 |
+
}
|
| 964 |
+
|
| 965 |
+
displayForecastError(errorMessage) {
|
| 966 |
+
console.log('Displaying forecast error:', errorMessage);
|
| 967 |
+
const resultsCard = document.getElementById('resultsCard');
|
| 968 |
+
const cardBody = resultsCard.querySelector('.card-body');
|
| 969 |
+
|
| 970 |
+
console.log('Found elements:', { resultsCard, cardBody });
|
| 971 |
+
|
| 972 |
+
// Clear any existing results and hide tabs
|
| 973 |
+
const resultTabs = document.getElementById('resultTabs');
|
| 974 |
+
const resultTabContent = document.getElementById('resultTabContent');
|
| 975 |
+
|
| 976 |
+
if (resultTabs) resultTabs.style.display = 'none';
|
| 977 |
+
if (resultTabContent) resultTabContent.style.display = 'none';
|
| 978 |
+
|
| 979 |
+
// Create error display
|
| 980 |
+
const errorHtml = `
|
| 981 |
+
<div class="alert alert-danger" role="alert">
|
| 982 |
+
<h5 class="alert-heading">
|
| 983 |
+
<i class="fas fa-exclamation-triangle me-2"></i>Forecast Error
|
| 984 |
+
</h5>
|
| 985 |
+
<hr>
|
| 986 |
+
<div class="error-details">
|
| 987 |
+
<h6>Error Details:</h6>
|
| 988 |
+
<pre class="bg-light p-3 rounded mt-2" style="white-space: pre-wrap; font-size: 0.9rem;">${errorMessage}</pre>
|
| 989 |
+
</div>
|
| 990 |
+
<div class="mt-3">
|
| 991 |
+
<h6>Common Solutions:</h6>
|
| 992 |
+
<ul class="mb-0">
|
| 993 |
+
<li>Check that your data definition is correct (especially static vs dynamic covariates)</li>
|
| 994 |
+
<li>Ensure all selected variables have valid data (no missing values in critical columns)</li>
|
| 995 |
+
<li>Verify that at least one variable is defined as "Target"</li>
|
| 996 |
+
<li>Check that your data has enough historical points for the selected context length</li>
|
| 997 |
+
</ul>
|
| 998 |
+
</div>
|
| 999 |
+
</div>
|
| 1000 |
+
`;
|
| 1001 |
+
|
| 1002 |
+
cardBody.innerHTML = errorHtml;
|
| 1003 |
+
resultsCard.style.display = 'block';
|
| 1004 |
+
resultsCard.classList.add('fade-in');
|
| 1005 |
+
}
|
| 1006 |
+
|
| 1007 |
+
clearForecastResults() {
|
| 1008 |
+
const resultsCard = document.getElementById('resultsCard');
|
| 1009 |
+
|
| 1010 |
+
if (resultsCard) {
|
| 1011 |
+
resultsCard.style.display = 'none';
|
| 1012 |
+
// Reset the card body to its original structure
|
| 1013 |
+
const cardBody = resultsCard.querySelector('.card-body');
|
| 1014 |
+
if (cardBody) {
|
| 1015 |
+
// Restore original structure
|
| 1016 |
+
cardBody.innerHTML = `
|
| 1017 |
+
<!-- Tabs for different result views -->
|
| 1018 |
+
<ul class="nav nav-tabs" id="resultTabs" role="tablist">
|
| 1019 |
+
<li class="nav-item" role="presentation">
|
| 1020 |
+
<button class="nav-link active" id="visualization-tab" data-bs-toggle="tab"
|
| 1021 |
+
data-bs-target="#visualization" type="button" role="tab">
|
| 1022 |
+
<i class="fas fa-chart-area me-1"></i>
|
| 1023 |
+
Visualization
|
| 1024 |
+
</button>
|
| 1025 |
+
</li>
|
| 1026 |
+
<li class="nav-item" role="presentation">
|
| 1027 |
+
<button class="nav-link" id="summary-tab" data-bs-toggle="tab"
|
| 1028 |
+
data-bs-target="#summary" type="button" role="tab">
|
| 1029 |
+
<i class="fas fa-list me-1"></i>
|
| 1030 |
+
Summary
|
| 1031 |
+
</button>
|
| 1032 |
+
</li>
|
| 1033 |
+
<li class="nav-item" role="presentation">
|
| 1034 |
+
<button class="nav-link" id="data-tab" data-bs-toggle="tab"
|
| 1035 |
+
data-bs-target="#data" type="button" role="tab">
|
| 1036 |
+
<i class="fas fa-table me-1"></i>
|
| 1037 |
+
Data
|
| 1038 |
+
</button>
|
| 1039 |
+
</li>
|
| 1040 |
+
</ul>
|
| 1041 |
+
|
| 1042 |
+
<!-- Tab Content -->
|
| 1043 |
+
<div class="tab-content mt-3" id="resultTabContent">
|
| 1044 |
+
<!-- Visualization Tab -->
|
| 1045 |
+
<div class="tab-pane fade show active" id="visualization" role="tabpanel">
|
| 1046 |
+
<div class="chart-container">
|
| 1047 |
+
<div id="forecastChart"></div>
|
| 1048 |
+
</div>
|
| 1049 |
+
<div class="mt-3">
|
| 1050 |
+
<button type="button" class="btn btn-outline-primary" id="downloadChart">
|
| 1051 |
+
<i class="fas fa-download me-2"></i>
|
| 1052 |
+
Download Chart
|
| 1053 |
+
</button>
|
| 1054 |
+
<button type="button" class="btn btn-outline-success ms-2" id="downloadData">
|
| 1055 |
+
<i class="fas fa-table me-2"></i>
|
| 1056 |
+
Download Data
|
| 1057 |
+
</button>
|
| 1058 |
+
</div>
|
| 1059 |
+
<!-- Quantile selection (always visible) -->
|
| 1060 |
+
<div class="row mt-3" id="quantileSelector">
|
| 1061 |
+
<div class="col-12">
|
| 1062 |
+
<label class="form-label">Select quantiles to shade (choose lower and upper)</label>
|
| 1063 |
+
<div class="d-flex flex-wrap gap-2" id="quantileCheckboxes">
|
| 1064 |
+
<div class="form-check form-check-inline">
|
| 1065 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="1" id="q1" checked>
|
| 1066 |
+
<label class="form-check-label" for="q1">Q10</label>
|
| 1067 |
+
</div>
|
| 1068 |
+
<div class="form-check form-check-inline">
|
| 1069 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="2" id="q2">
|
| 1070 |
+
<label class="form-check-label" for="q2">Q20</label>
|
| 1071 |
+
</div>
|
| 1072 |
+
<div class="form-check form-check-inline">
|
| 1073 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="3" id="q3">
|
| 1074 |
+
<label class="form-check-label" for="q3">Q30</label>
|
| 1075 |
+
</div>
|
| 1076 |
+
<div class="form-check form-check-inline">
|
| 1077 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="4" id="q4">
|
| 1078 |
+
<label class="form-check-label" for="q4">Q40</label>
|
| 1079 |
+
</div>
|
| 1080 |
+
<div class="form-check form-check-inline">
|
| 1081 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="5" id="q5">
|
| 1082 |
+
<label class="form-check-label" for="q5">Q50</label>
|
| 1083 |
+
</div>
|
| 1084 |
+
<div class="form-check form-check-inline">
|
| 1085 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="6" id="q6">
|
| 1086 |
+
<label class="form-check-label" for="q6">Q60</label>
|
| 1087 |
+
</div>
|
| 1088 |
+
<div class="form-check form-check-inline">
|
| 1089 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="7" id="q7">
|
| 1090 |
+
<label class="form-check-label" for="q7">Q70</label>
|
| 1091 |
+
</div>
|
| 1092 |
+
<div class="form-check form-check-inline">
|
| 1093 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="8" id="q8">
|
| 1094 |
+
<label class="form-check-label" for="q8">Q80</label>
|
| 1095 |
+
</div>
|
| 1096 |
+
<div class="form-check form-check-inline">
|
| 1097 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="9" id="q9" checked>
|
| 1098 |
+
<label class="form-check-label" for="q9">Q90</label>
|
| 1099 |
+
</div>
|
| 1100 |
+
</div>
|
| 1101 |
+
</div>
|
| 1102 |
+
</div>
|
| 1103 |
+
</div>
|
| 1104 |
+
|
| 1105 |
+
<!-- Summary Tab -->
|
| 1106 |
+
<div class="tab-pane fade" id="summary" role="tabpanel">
|
| 1107 |
+
<div id="forecastSummary">
|
| 1108 |
+
<p class="text-muted">Summary will appear here after forecasting.</p>
|
| 1109 |
+
</div>
|
| 1110 |
+
</div>
|
| 1111 |
+
|
| 1112 |
+
<!-- Data Tab -->
|
| 1113 |
+
<div class="tab-pane fade" id="data" role="tabpanel">
|
| 1114 |
+
<div id="forecastData">
|
| 1115 |
+
<p class="text-muted">Data will appear here after forecasting.</p>
|
| 1116 |
+
</div>
|
| 1117 |
+
</div>
|
| 1118 |
+
</div>
|
| 1119 |
+
`;
|
| 1120 |
+
}
|
| 1121 |
+
|
| 1122 |
+
// Re-bind quantile change events
|
| 1123 |
+
this.bindQuantileEvents();
|
| 1124 |
+
|
| 1125 |
+
// Re-bind download button events
|
| 1126 |
+
this.bindDownloadEvents();
|
| 1127 |
+
}
|
| 1128 |
+
}
|
| 1129 |
+
|
| 1130 |
+
bindQuantileEvents() {
|
| 1131 |
+
// Bind change events to quantile checkboxes
|
| 1132 |
+
document.querySelectorAll('.quantile-tick').forEach(checkbox => {
|
| 1133 |
+
checkbox.addEventListener('change', () => {
|
| 1134 |
+
this.refreshVisualization();
|
| 1135 |
+
});
|
| 1136 |
+
});
|
| 1137 |
+
}
|
| 1138 |
+
|
| 1139 |
+
refreshVisualization() {
|
| 1140 |
+
// Only refresh if we have current results
|
| 1141 |
+
if (this.currentResults && this.currentResults.success) {
|
| 1142 |
+
console.log('Refreshing visualization with new quantile selection...');
|
| 1143 |
+
|
| 1144 |
+
// Get selected quantiles (empty array if none selected)
|
| 1145 |
+
const selectedQuantiles = document.querySelectorAll('.quantile-tick:checked');
|
| 1146 |
+
const quantileIndices = Array.from(selectedQuantiles)
|
| 1147 |
+
.map(cb => parseInt(cb.value))
|
| 1148 |
+
.sort((a, b) => a - b);
|
| 1149 |
+
|
| 1150 |
+
console.log('Selected quantile indices:', quantileIndices);
|
| 1151 |
+
console.log('Number of selected quantiles:', quantileIndices.length);
|
| 1152 |
+
|
| 1153 |
+
// Create a copy of results with updated quantile indices
|
| 1154 |
+
const updatedResults = { ...this.currentResults };
|
| 1155 |
+
updatedResults.quantile_indices = quantileIndices;
|
| 1156 |
+
|
| 1157 |
+
this.generateVisualization(updatedResults);
|
| 1158 |
+
}
|
| 1159 |
+
}
|
| 1160 |
+
|
| 1161 |
+
bindDownloadEvents() {
|
| 1162 |
+
// Re-bind download button events after HTML recreation
|
| 1163 |
+
const downloadChartBtn = document.getElementById('downloadChart');
|
| 1164 |
+
const downloadDataBtn = document.getElementById('downloadData');
|
| 1165 |
+
|
| 1166 |
+
if (downloadChartBtn) {
|
| 1167 |
+
downloadChartBtn.addEventListener('click', () => {
|
| 1168 |
+
this.downloadChart();
|
| 1169 |
+
});
|
| 1170 |
+
}
|
| 1171 |
+
|
| 1172 |
+
if (downloadDataBtn) {
|
| 1173 |
+
downloadDataBtn.addEventListener('click', () => {
|
| 1174 |
+
this.downloadData();
|
| 1175 |
+
});
|
| 1176 |
+
}
|
| 1177 |
+
}
|
| 1178 |
+
|
| 1179 |
+
async displayResults(result) {
|
| 1180 |
+
const resultsCard = document.getElementById('resultsCard');
|
| 1181 |
+
|
| 1182 |
+
if (!resultsCard) {
|
| 1183 |
+
console.error('resultsCard element not found');
|
| 1184 |
+
return;
|
| 1185 |
+
}
|
| 1186 |
+
|
| 1187 |
+
resultsCard.style.display = 'block';
|
| 1188 |
+
resultsCard.classList.add('fade-in');
|
| 1189 |
+
|
| 1190 |
+
// Populate summary tab
|
| 1191 |
+
this.displaySummary(result.forecast_summary, result.results);
|
| 1192 |
+
|
| 1193 |
+
// Populate data tab
|
| 1194 |
+
this.displayDataTable(result.results);
|
| 1195 |
+
}
|
| 1196 |
+
|
| 1197 |
+
displaySummary(summary, results) {
|
| 1198 |
+
const summaryDiv = document.getElementById('forecastSummary');
|
| 1199 |
+
|
| 1200 |
+
if (!summaryDiv) {
|
| 1201 |
+
console.error('forecastSummary element not found');
|
| 1202 |
+
return;
|
| 1203 |
+
}
|
| 1204 |
+
|
| 1205 |
+
const methodsCount = summary.methods_used.length;
|
| 1206 |
+
const mainForecast = results.enhanced_forecast || results.point_forecast;
|
| 1207 |
+
const avgForecast = mainForecast ? (mainForecast.reduce((a, b) => a + b, 0) / mainForecast.length) : 0;
|
| 1208 |
+
|
| 1209 |
+
let html = `
|
| 1210 |
+
<div class="row mb-4">
|
| 1211 |
+
<div class="col-md-3">
|
| 1212 |
+
<div class="summary-card bg-primary">
|
| 1213 |
+
<div class="summary-value">${summary.context_length}</div>
|
| 1214 |
+
<div class="summary-label">Context Length</div>
|
| 1215 |
+
</div>
|
| 1216 |
+
</div>
|
| 1217 |
+
<div class="col-md-3">
|
| 1218 |
+
<div class="summary-card bg-success">
|
| 1219 |
+
<div class="summary-value">${summary.horizon_length}</div>
|
| 1220 |
+
<div class="summary-label">Forecast Periods</div>
|
| 1221 |
+
</div>
|
| 1222 |
+
</div>
|
| 1223 |
+
<div class="col-md-3">
|
| 1224 |
+
<div class="summary-card bg-info">
|
| 1225 |
+
<div class="summary-value">${methodsCount}</div>
|
| 1226 |
+
<div class="summary-label">Methods Used</div>
|
| 1227 |
+
</div>
|
| 1228 |
+
</div>
|
| 1229 |
+
<div class="col-md-3">
|
| 1230 |
+
<div class="summary-card bg-warning">
|
| 1231 |
+
<div class="summary-value">$${avgForecast.toLocaleString(undefined, {maximumFractionDigits: 0})}</div>
|
| 1232 |
+
<div class="summary-label">Avg. Forecast</div>
|
| 1233 |
+
</div>
|
| 1234 |
+
</div>
|
| 1235 |
+
</div>
|
| 1236 |
+
|
| 1237 |
+
<div class="row">
|
| 1238 |
+
<div class="col-md-6">
|
| 1239 |
+
<h6>Configuration</h6>
|
| 1240 |
+
<table class="table table-sm">
|
| 1241 |
+
<tr><th>Target Variable</th><td>${summary.target_column}</td></tr>
|
| 1242 |
+
<tr><th>Covariates Used</th><td>${summary.covariates_used ? 'Yes' : 'No'}</td></tr>
|
| 1243 |
+
<tr><th>Methods</th><td>${summary.methods_used.join(', ')}</td></tr>
|
| 1244 |
+
</table>
|
| 1245 |
+
</div>
|
| 1246 |
+
<div class="col-md-6">
|
| 1247 |
+
<h6>Forecast Methods</h6>
|
| 1248 |
+
<ul class="list-group list-group-flush">
|
| 1249 |
+
`;
|
| 1250 |
+
|
| 1251 |
+
summary.methods_used.forEach(method => {
|
| 1252 |
+
const methodName = method.replace('_', ' ').replace(/\b\w/g, l => l.toUpperCase());
|
| 1253 |
+
const forecast = results[method];
|
| 1254 |
+
|
| 1255 |
+
// Check if forecast is an array and has numeric values
|
| 1256 |
+
if (Array.isArray(forecast) && forecast.length > 0 && typeof forecast[0] === 'number') {
|
| 1257 |
+
const minVal = Math.min(...forecast);
|
| 1258 |
+
const maxVal = Math.max(...forecast);
|
| 1259 |
+
|
| 1260 |
+
html += `
|
| 1261 |
+
<li class="list-group-item d-flex justify-content-between align-items-center">
|
| 1262 |
+
${methodName}
|
| 1263 |
+
<span class="badge bg-secondary">$${minVal.toLocaleString()} - $${maxVal.toLocaleString()}</span>
|
| 1264 |
+
</li>
|
| 1265 |
+
`;
|
| 1266 |
+
} else {
|
| 1267 |
+
// Fallback for non-array or non-numeric data
|
| 1268 |
+
html += `
|
| 1269 |
+
<li class="list-group-item d-flex justify-content-between align-items-center">
|
| 1270 |
+
${methodName}
|
| 1271 |
+
<span class="badge bg-secondary">Available</span>
|
| 1272 |
+
</li>
|
| 1273 |
+
`;
|
| 1274 |
+
}
|
| 1275 |
+
});
|
| 1276 |
+
|
| 1277 |
+
html += `
|
| 1278 |
+
</ul>
|
| 1279 |
+
</div>
|
| 1280 |
+
</div>
|
| 1281 |
+
`;
|
| 1282 |
+
|
| 1283 |
+
summaryDiv.innerHTML = html;
|
| 1284 |
+
}
|
| 1285 |
+
|
| 1286 |
+
displayDataTable(results) {
|
| 1287 |
+
const dataDiv = document.getElementById('forecastData');
|
| 1288 |
+
|
| 1289 |
+
if (!dataDiv) {
|
| 1290 |
+
console.error('forecastData element not found');
|
| 1291 |
+
return;
|
| 1292 |
+
}
|
| 1293 |
+
|
| 1294 |
+
// Create table with forecast results
|
| 1295 |
+
const forecastLength = results.point_forecast ? results.point_forecast.length :
|
| 1296 |
+
results.enhanced_forecast ? results.enhanced_forecast.length : 0;
|
| 1297 |
+
|
| 1298 |
+
if (forecastLength === 0) {
|
| 1299 |
+
dataDiv.innerHTML = '<p>No forecast data available.</p>';
|
| 1300 |
+
return;
|
| 1301 |
+
}
|
| 1302 |
+
|
| 1303 |
+
let html = `
|
| 1304 |
+
<div class="table-responsive">
|
| 1305 |
+
<table class="table table-striped">
|
| 1306 |
+
<thead>
|
| 1307 |
+
<tr>
|
| 1308 |
+
<th>Period</th>
|
| 1309 |
+
`;
|
| 1310 |
+
|
| 1311 |
+
Object.keys(results).forEach(method => {
|
| 1312 |
+
if (method !== 'prediction_intervals') {
|
| 1313 |
+
const methodName = method.replace('_', ' ').replace(/\b\w/g, l => l.toUpperCase());
|
| 1314 |
+
html += `<th>${methodName}</th>`;
|
| 1315 |
+
}
|
| 1316 |
+
});
|
| 1317 |
+
|
| 1318 |
+
html += `</tr></thead><tbody>`;
|
| 1319 |
+
|
| 1320 |
+
for (let i = 0; i < forecastLength; i++) {
|
| 1321 |
+
html += `<tr><td>${i + 1}</td>`;
|
| 1322 |
+
|
| 1323 |
+
Object.entries(results).forEach(([method, values]) => {
|
| 1324 |
+
if (method !== 'prediction_intervals' && Array.isArray(values)) {
|
| 1325 |
+
const value = values[i];
|
| 1326 |
+
html += `<td>$${value.toLocaleString(undefined, {minimumFractionDigits: 2, maximumFractionDigits: 2})}</td>`;
|
| 1327 |
+
}
|
| 1328 |
+
});
|
| 1329 |
+
|
| 1330 |
+
html += '</tr>';
|
| 1331 |
+
}
|
| 1332 |
+
|
| 1333 |
+
html += '</tbody></table></div>';
|
| 1334 |
+
dataDiv.innerHTML = html;
|
| 1335 |
+
}
|
| 1336 |
+
|
| 1337 |
+
async generateVisualization(result) {
|
| 1338 |
+
console.log('generateVisualization called with result:', result);
|
| 1339 |
+
console.log('Visualization data:', result.visualization_data);
|
| 1340 |
+
console.log('Historical data length:', result.visualization_data?.historical_data?.length);
|
| 1341 |
+
console.log('Historical dates length:', result.visualization_data?.dates_historical?.length);
|
| 1342 |
+
console.log('Target name:', result.visualization_data?.target_name);
|
| 1343 |
+
this.showLoading('Generating Chart', 'Creating professional forecast visualization...');
|
| 1344 |
+
|
| 1345 |
+
try {
|
| 1346 |
+
const response = await fetch('/api/visualize', {
|
| 1347 |
+
method: 'POST',
|
| 1348 |
+
headers: {
|
| 1349 |
+
'Content-Type': 'application/json',
|
| 1350 |
+
},
|
| 1351 |
+
body: JSON.stringify({
|
| 1352 |
+
visualization_data: result.visualization_data,
|
| 1353 |
+
results: result.results,
|
| 1354 |
+
quantile_indices: (function(){
|
| 1355 |
+
let t = Array.from(document.querySelectorAll('.quantile-tick'))
|
| 1356 |
+
.filter(cb => cb.checked)
|
| 1357 |
+
.map(cb => parseInt(cb.value))
|
| 1358 |
+
.sort((a,b) => a-b);
|
| 1359 |
+
console.log('generateVisualization - quantile indices being sent:', t);
|
| 1360 |
+
console.log('generateVisualization - number of quantiles:', t.length);
|
| 1361 |
+
// Don't set default quantiles - pass empty array if none selected
|
| 1362 |
+
return t;
|
| 1363 |
+
})()
|
| 1364 |
+
})
|
| 1365 |
+
});
|
| 1366 |
+
|
| 1367 |
+
const vizResult = await response.json();
|
| 1368 |
+
|
| 1369 |
+
console.log('Visualization result received:', vizResult);
|
| 1370 |
+
console.log('Visualization success:', vizResult.success);
|
| 1371 |
+
if (vizResult.figure) {
|
| 1372 |
+
console.log('Figure data type:', typeof vizResult.figure);
|
| 1373 |
+
if (typeof vizResult.figure === 'string') {
|
| 1374 |
+
console.log('Figure string length:', vizResult.figure.length);
|
| 1375 |
+
} else {
|
| 1376 |
+
console.log('Figure object keys:', Object.keys(vizResult.figure));
|
| 1377 |
+
if (vizResult.figure.data) {
|
| 1378 |
+
console.log('Figure data traces:', vizResult.figure.data.length);
|
| 1379 |
+
vizResult.figure.data.forEach((trace, i) => {
|
| 1380 |
+
console.log(`Trace ${i}: name='${trace.name}', type='${trace.type}', visible=${trace.visible}`);
|
| 1381 |
+
if (trace.y) {
|
| 1382 |
+
console.log(`Trace ${i} y-data length: ${trace.y.length || 'scalar'}`);
|
| 1383 |
+
}
|
| 1384 |
+
});
|
| 1385 |
+
}
|
| 1386 |
+
}
|
| 1387 |
+
}
|
| 1388 |
+
|
| 1389 |
+
if (vizResult.success) {
|
| 1390 |
+
if (typeof Plotly === 'undefined') {
|
| 1391 |
+
console.error('Plotly library not found on window');
|
| 1392 |
+
this.showAlert('danger', 'Visualization Error', 'Plotly library is not loaded.');
|
| 1393 |
+
return;
|
| 1394 |
+
}
|
| 1395 |
+
|
| 1396 |
+
const chartContainer = document.getElementById('forecastChart');
|
| 1397 |
+
|
| 1398 |
+
if (!chartContainer) {
|
| 1399 |
+
console.error('Chart container element not found!');
|
| 1400 |
+
this.showAlert('danger', 'Display Error', 'Chart display element not found');
|
| 1401 |
+
return;
|
| 1402 |
+
}
|
| 1403 |
+
|
| 1404 |
+
const figurePayload = typeof vizResult.figure === 'string'
|
| 1405 |
+
? JSON.parse(vizResult.figure)
|
| 1406 |
+
: (vizResult.figure || {});
|
| 1407 |
+
|
| 1408 |
+
if (!figurePayload.data || !Array.isArray(figurePayload.data) || figurePayload.data.length === 0) {
|
| 1409 |
+
console.error('Figure payload missing data:', figurePayload);
|
| 1410 |
+
this.currentPlotFigure = null;
|
| 1411 |
+
this.currentPlotConfig = null;
|
| 1412 |
+
this.showAlert('warning', 'Visualization Failed', 'Received empty plot data.');
|
| 1413 |
+
return;
|
| 1414 |
+
}
|
| 1415 |
+
|
| 1416 |
+
const defaultConfig = {
|
| 1417 |
+
responsive: true,
|
| 1418 |
+
displaylogo: false,
|
| 1419 |
+
autosizable: true
|
| 1420 |
+
};
|
| 1421 |
+
const plotConfig = Object.assign({}, defaultConfig, vizResult.config || {});
|
| 1422 |
+
const layout = Object.assign({}, figurePayload.layout || {}, {
|
| 1423 |
+
autosize: true,
|
| 1424 |
+
width: null,
|
| 1425 |
+
height: null
|
| 1426 |
+
});
|
| 1427 |
+
|
| 1428 |
+
try {
|
| 1429 |
+
await Plotly.react(chartContainer, figurePayload.data, layout, plotConfig);
|
| 1430 |
+
chartContainer.style.display = 'block';
|
| 1431 |
+
|
| 1432 |
+
// Make chart responsive to window resize with debouncing
|
| 1433 |
+
window.addEventListener('resize', () => {
|
| 1434 |
+
clearTimeout(this.resizeTimeout);
|
| 1435 |
+
this.resizeTimeout = setTimeout(() => {
|
| 1436 |
+
if (typeof Plotly !== 'undefined' && chartContainer) {
|
| 1437 |
+
Plotly.Plots.resize(chartContainer);
|
| 1438 |
+
}
|
| 1439 |
+
}, 250);
|
| 1440 |
+
});
|
| 1441 |
+
|
| 1442 |
+
// Cache the latest figure/config for downloads and refreshes
|
| 1443 |
+
this.currentPlotFigure = Object.assign({}, figurePayload, { layout });
|
| 1444 |
+
this.currentPlotConfig = plotConfig;
|
| 1445 |
+
|
| 1446 |
+
const resultsCard = document.getElementById('resultsCard');
|
| 1447 |
+
if (resultsCard) {
|
| 1448 |
+
resultsCard.scrollIntoView({ behavior: 'smooth', block: 'start' });
|
| 1449 |
+
}
|
| 1450 |
+
} catch (plotlyError) {
|
| 1451 |
+
console.error('Plotly rendering failed:', plotlyError);
|
| 1452 |
+
this.currentPlotFigure = null;
|
| 1453 |
+
this.currentPlotConfig = null;
|
| 1454 |
+
this.showAlert('danger', 'Visualization Error', 'Failed to render interactive chart.');
|
| 1455 |
+
}
|
| 1456 |
+
} else {
|
| 1457 |
+
console.error('Visualization failed:', vizResult.message);
|
| 1458 |
+
this.currentPlotFigure = null;
|
| 1459 |
+
this.currentPlotConfig = null;
|
| 1460 |
+
this.showAlert('warning', 'Visualization Failed', vizResult.message || 'Failed to generate chart');
|
| 1461 |
+
}
|
| 1462 |
+
} catch (error) {
|
| 1463 |
+
this.currentPlotFigure = null;
|
| 1464 |
+
this.currentPlotConfig = null;
|
| 1465 |
+
this.showAlert('danger', 'Chart Error', 'Failed to generate visualization: ' + error.message);
|
| 1466 |
+
} finally {
|
| 1467 |
+
this.hideLoading();
|
| 1468 |
+
}
|
| 1469 |
+
}
|
| 1470 |
+
|
| 1471 |
+
downloadChart() {
|
| 1472 |
+
const chartContainer = document.getElementById('forecastChart');
|
| 1473 |
+
|
| 1474 |
+
if (!chartContainer || !this.currentPlotFigure) {
|
| 1475 |
+
this.showAlert('warning', 'No Chart', 'No chart available for download.');
|
| 1476 |
+
return;
|
| 1477 |
+
}
|
| 1478 |
+
|
| 1479 |
+
if (typeof Plotly === 'undefined') {
|
| 1480 |
+
this.showAlert('danger', 'Download Error', 'Plotly library is not available.');
|
| 1481 |
+
return;
|
| 1482 |
+
}
|
| 1483 |
+
|
| 1484 |
+
const filename = `sapheneia_forecast_${new Date().toISOString().slice(0, 10)}`;
|
| 1485 |
+
|
| 1486 |
+
Plotly.downloadImage(chartContainer, {
|
| 1487 |
+
format: 'png',
|
| 1488 |
+
filename,
|
| 1489 |
+
width: this.currentPlotFigure.layout?.width || 1200,
|
| 1490 |
+
height: this.currentPlotFigure.layout?.height || 800
|
| 1491 |
+
})
|
| 1492 |
+
.then(() => {
|
| 1493 |
+
this.showAlert('success', 'Download Started', 'Chart download has started.');
|
| 1494 |
+
})
|
| 1495 |
+
.catch((error) => {
|
| 1496 |
+
console.error('Plotly download failed:', error);
|
| 1497 |
+
this.showAlert('danger', 'Download Error', 'Failed to download chart image.');
|
| 1498 |
+
});
|
| 1499 |
+
}
|
| 1500 |
+
|
| 1501 |
+
downloadData() {
|
| 1502 |
+
if (!this.currentResults || !this.currentResults.results) {
|
| 1503 |
+
this.showAlert('warning', 'No Data', 'No forecast data available for download.');
|
| 1504 |
+
return;
|
| 1505 |
+
}
|
| 1506 |
+
|
| 1507 |
+
const results = this.currentResults.results;
|
| 1508 |
+
const forecastLength = results.point_forecast ? results.point_forecast.length :
|
| 1509 |
+
results.enhanced_forecast ? results.enhanced_forecast.length : 0;
|
| 1510 |
+
|
| 1511 |
+
if (forecastLength === 0) {
|
| 1512 |
+
this.showAlert('warning', 'No Data', 'No forecast data available for download.');
|
| 1513 |
+
return;
|
| 1514 |
+
}
|
| 1515 |
+
|
| 1516 |
+
// Create CSV content
|
| 1517 |
+
let csvContent = 'Period,Point_Forecast';
|
| 1518 |
+
|
| 1519 |
+
// Add raw quantile columns (skip index 0 - legacy mean)
|
| 1520 |
+
if (results.quantile_forecast && Array.isArray(results.quantile_forecast)) {
|
| 1521 |
+
const numQuantiles = results.quantile_forecast[0] ? results.quantile_forecast[0].length : 0;
|
| 1522 |
+
// Skip index 0 (legacy mean), add Q10, Q20, Q30, Q40, Q50, Q60, Q70, Q80, Q90
|
| 1523 |
+
for (let i = 1; i < numQuantiles; i++) {
|
| 1524 |
+
const percentile = i * 10; // 1->Q10, 2->Q20, etc.
|
| 1525 |
+
csvContent += `,Q${percentile}`;
|
| 1526 |
+
}
|
| 1527 |
+
}
|
| 1528 |
+
|
| 1529 |
+
// Only include Period, Point_Forecast, and Q10-Q90 columns
|
| 1530 |
+
// No additional columns needed
|
| 1531 |
+
|
| 1532 |
+
csvContent += '\n';
|
| 1533 |
+
|
| 1534 |
+
// Add data rows
|
| 1535 |
+
for (let i = 0; i < forecastLength; i++) {
|
| 1536 |
+
csvContent += `${i + 1}`;
|
| 1537 |
+
|
| 1538 |
+
// Point forecast
|
| 1539 |
+
const pointForecast = results.enhanced_forecast || results.point_forecast;
|
| 1540 |
+
csvContent += `,${pointForecast[i]}`;
|
| 1541 |
+
|
| 1542 |
+
// Raw quantiles (skip index 0 - legacy mean)
|
| 1543 |
+
if (results.quantile_forecast && Array.isArray(results.quantile_forecast)) {
|
| 1544 |
+
const quantileRow = results.quantile_forecast[i];
|
| 1545 |
+
if (quantileRow && Array.isArray(quantileRow)) {
|
| 1546 |
+
// Skip index 0, add Q10, Q20, Q30, Q40, Q50, Q60, Q70, Q80, Q90
|
| 1547 |
+
for (let j = 1; j < quantileRow.length; j++) {
|
| 1548 |
+
csvContent += `,${quantileRow[j]}`;
|
| 1549 |
+
}
|
| 1550 |
+
}
|
| 1551 |
+
}
|
| 1552 |
+
|
| 1553 |
+
// Only include Period, Point_Forecast, and Q10-Q90 data
|
| 1554 |
+
// No additional data needed
|
| 1555 |
+
|
| 1556 |
+
csvContent += '\n';
|
| 1557 |
+
}
|
| 1558 |
+
|
| 1559 |
+
// Create and download file
|
| 1560 |
+
const blob = new Blob([csvContent], { type: 'text/csv;charset=utf-8;' });
|
| 1561 |
+
const link = document.createElement('a');
|
| 1562 |
+
const url = URL.createObjectURL(blob);
|
| 1563 |
+
link.setAttribute('href', url);
|
| 1564 |
+
link.setAttribute('download', `sapheneia_forecast_data_${new Date().toISOString().slice(0, 10)}.csv`);
|
| 1565 |
+
link.style.visibility = 'hidden';
|
| 1566 |
+
document.body.appendChild(link);
|
| 1567 |
+
link.click();
|
| 1568 |
+
document.body.removeChild(link);
|
| 1569 |
+
|
| 1570 |
+
this.showAlert('success', 'Download Started', 'Forecast data download has started.');
|
| 1571 |
+
}
|
| 1572 |
+
|
| 1573 |
+
setupTabSwitching() {
|
| 1574 |
+
// Store scroll position when switching away from visualization tab
|
| 1575 |
+
const visualizationTab = document.getElementById('visualization-tab');
|
| 1576 |
+
const summaryTab = document.getElementById('summary-tab');
|
| 1577 |
+
const dataTab = document.getElementById('data-tab');
|
| 1578 |
+
|
| 1579 |
+
// Store scroll position when leaving visualization tab
|
| 1580 |
+
visualizationTab.addEventListener('hidden.bs.tab', () => {
|
| 1581 |
+
this.visualizationScrollPosition = window.pageYOffset;
|
| 1582 |
+
});
|
| 1583 |
+
|
| 1584 |
+
// Restore scroll position when returning to visualization tab
|
| 1585 |
+
visualizationTab.addEventListener('shown.bs.tab', () => {
|
| 1586 |
+
if (this.visualizationScrollPosition !== undefined) {
|
| 1587 |
+
setTimeout(() => {
|
| 1588 |
+
window.scrollTo(0, this.visualizationScrollPosition);
|
| 1589 |
+
}, 100); // Small delay to ensure content is rendered
|
| 1590 |
+
}
|
| 1591 |
+
|
| 1592 |
+
// Resize chart when visualization tab is shown
|
| 1593 |
+
if (this.currentPlotFigure && typeof Plotly !== 'undefined') {
|
| 1594 |
+
setTimeout(() => {
|
| 1595 |
+
Plotly.Plots.resize(document.getElementById('forecastChart'));
|
| 1596 |
+
}, 200);
|
| 1597 |
+
}
|
| 1598 |
+
});
|
| 1599 |
+
|
| 1600 |
+
// Store scroll position when leaving summary tab
|
| 1601 |
+
summaryTab.addEventListener('hidden.bs.tab', () => {
|
| 1602 |
+
this.summaryScrollPosition = window.pageYOffset;
|
| 1603 |
+
});
|
| 1604 |
+
|
| 1605 |
+
// Restore scroll position when returning to summary tab
|
| 1606 |
+
summaryTab.addEventListener('shown.bs.tab', () => {
|
| 1607 |
+
if (this.summaryScrollPosition !== undefined) {
|
| 1608 |
+
setTimeout(() => {
|
| 1609 |
+
window.scrollTo(0, this.summaryScrollPosition);
|
| 1610 |
+
}, 100);
|
| 1611 |
+
}
|
| 1612 |
+
});
|
| 1613 |
+
|
| 1614 |
+
// Store scroll position when leaving data tab
|
| 1615 |
+
dataTab.addEventListener('hidden.bs.tab', () => {
|
| 1616 |
+
this.dataScrollPosition = window.pageYOffset;
|
| 1617 |
+
});
|
| 1618 |
+
|
| 1619 |
+
// Restore scroll position when returning to data tab
|
| 1620 |
+
dataTab.addEventListener('shown.bs.tab', () => {
|
| 1621 |
+
if (this.dataScrollPosition !== undefined) {
|
| 1622 |
+
setTimeout(() => {
|
| 1623 |
+
window.scrollTo(0, this.dataScrollPosition);
|
| 1624 |
+
}, 100);
|
| 1625 |
+
}
|
| 1626 |
+
});
|
| 1627 |
+
}
|
| 1628 |
+
}
|
| 1629 |
+
|
| 1630 |
+
// Initialize the application when the DOM is loaded
|
| 1631 |
+
document.addEventListener('DOMContentLoaded', function() {
|
| 1632 |
+
window.sapheneiaApp = new SapheneiaTimesFM();
|
| 1633 |
+
});
|
webapp/templates/index.html
ADDED
|
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>Sapheneia TimesFM - Professional Time Series Forecasting</title>
|
| 7 |
+
|
| 8 |
+
<!-- Bootstrap CSS -->
|
| 9 |
+
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet">
|
| 10 |
+
<!-- Font Awesome -->
|
| 11 |
+
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css" rel="stylesheet">
|
| 12 |
+
<!-- Custom CSS -->
|
| 13 |
+
<link href="{{ url_for('static', filename='css/styles.css') }}" rel="stylesheet">
|
| 14 |
+
</head>
|
| 15 |
+
<body>
|
| 16 |
+
<!-- Navigation -->
|
| 17 |
+
<nav class="navbar navbar-expand-lg navbar-dark bg-dark">
|
| 18 |
+
<div class="container">
|
| 19 |
+
<a class="navbar-brand fw-bold" href="#">
|
| 20 |
+
<i class="fas fa-chart-line me-2"></i>
|
| 21 |
+
Sapheneia TimesFM
|
| 22 |
+
</a>
|
| 23 |
+
<span class="navbar-text">
|
| 24 |
+
Professional Time Series Forecasting Platform
|
| 25 |
+
</span>
|
| 26 |
+
</div>
|
| 27 |
+
</nav>
|
| 28 |
+
|
| 29 |
+
<!-- Main Container -->
|
| 30 |
+
<div class="container-fluid py-4">
|
| 31 |
+
<!-- Alert Container -->
|
| 32 |
+
<div id="alertContainer"></div>
|
| 33 |
+
|
| 34 |
+
<!-- Data Upload Card -->
|
| 35 |
+
<div class="card mb-4">
|
| 36 |
+
<div class="card-header bg-success text-white">
|
| 37 |
+
<h5 class="mb-0">
|
| 38 |
+
<i class="fas fa-upload me-2"></i>
|
| 39 |
+
Data Upload & Configuration
|
| 40 |
+
</h5>
|
| 41 |
+
</div>
|
| 42 |
+
<div class="card-body">
|
| 43 |
+
<div class="row">
|
| 44 |
+
<div class="col-md-6">
|
| 45 |
+
<h6>Upload CSV File</h6>
|
| 46 |
+
<div class="mb-3">
|
| 47 |
+
<input type="file" class="form-control" id="dataFile" accept=".csv">
|
| 48 |
+
<div class="form-text">
|
| 49 |
+
CSV file with 'date' column required. Max size: 16MB.<br>
|
| 50 |
+
<strong>Recommended:</strong> Navigate to the <code>/local</code> directory in your project workspace.
|
| 51 |
+
</div>
|
| 52 |
+
</div>
|
| 53 |
+
<button type="button" class="btn btn-success" id="uploadBtn">
|
| 54 |
+
<i class="fas fa-upload me-2"></i>
|
| 55 |
+
Upload Data
|
| 56 |
+
</button>
|
| 57 |
+
</div>
|
| 58 |
+
<div class="col-md-6">
|
| 59 |
+
<h6>Data Requirements</h6>
|
| 60 |
+
<p class="text-muted">
|
| 61 |
+
Your CSV file must contain:
|
| 62 |
+
</p>
|
| 63 |
+
<ul class="text-muted">
|
| 64 |
+
<li>A 'date' column as the first column</li>
|
| 65 |
+
<li>At least one target time series column</li>
|
| 66 |
+
<li>Optional: covariate columns for enhanced forecasting</li>
|
| 67 |
+
</ul>
|
| 68 |
+
</div>
|
| 69 |
+
</div>
|
| 70 |
+
|
| 71 |
+
<!-- Data Info Section -->
|
| 72 |
+
<div id="dataInfo" style="display: none;" class="mt-4">
|
| 73 |
+
<hr>
|
| 74 |
+
<h6>Data Information</h6>
|
| 75 |
+
<div id="dataDetails"></div>
|
| 76 |
+
</div>
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
<!-- Data Definition Section -->
|
| 80 |
+
<div id="dataDefinition" style="display: none;" class="mt-4">
|
| 81 |
+
<hr>
|
| 82 |
+
<h6>Data Definition</h6>
|
| 83 |
+
<p class="text-muted">
|
| 84 |
+
Select which variables to include in your forecast and define how each should be used:
|
| 85 |
+
</p>
|
| 86 |
+
<div id="columnDefinitions"></div>
|
| 87 |
+
</div>
|
| 88 |
+
</div>
|
| 89 |
+
</div>
|
| 90 |
+
|
| 91 |
+
<!-- Model Configuration Card -->
|
| 92 |
+
<div class="card mb-4">
|
| 93 |
+
<div class="card-header bg-primary text-white">
|
| 94 |
+
<h5 class="mb-0">
|
| 95 |
+
<i class="fas fa-cog me-2"></i>
|
| 96 |
+
TimesFM Model Configuration
|
| 97 |
+
</h5>
|
| 98 |
+
</div>
|
| 99 |
+
<div class="card-body">
|
| 100 |
+
<!-- Context Dates Section -->
|
| 101 |
+
<div id="contextDatesSection" style="display: none;" class="mb-4">
|
| 102 |
+
<h6 class="mb-3">
|
| 103 |
+
<i class="fas fa-calendar-alt me-2"></i>
|
| 104 |
+
Context Dates
|
| 105 |
+
</h6>
|
| 106 |
+
<p class="text-muted">
|
| 107 |
+
Configure the context period (historical data) for forecasting:
|
| 108 |
+
</p>
|
| 109 |
+
<div class="alert alert-info mb-3">
|
| 110 |
+
<small>
|
| 111 |
+
<strong>Available Data:</strong> <span id="availableDataLength">0</span> periods
|
| 112 |
+
(<span id="availableDataRange">-</span>)
|
| 113 |
+
</small>
|
| 114 |
+
</div>
|
| 115 |
+
<div class="row">
|
| 116 |
+
<div class="col-md-6">
|
| 117 |
+
<div class="mb-3">
|
| 118 |
+
<label class="form-label">Start Date</label>
|
| 119 |
+
<select class="form-control" id="contextStartDate">
|
| 120 |
+
<option value="">Select start date...</option>
|
| 121 |
+
</select>
|
| 122 |
+
<div class="form-text">Start of historical data period</div>
|
| 123 |
+
</div>
|
| 124 |
+
</div>
|
| 125 |
+
<div class="col-md-6">
|
| 126 |
+
<div class="mb-3">
|
| 127 |
+
<label class="form-label">End Date</label>
|
| 128 |
+
<select class="form-control" id="contextEndDate">
|
| 129 |
+
<option value="">Select end date...</option>
|
| 130 |
+
</select>
|
| 131 |
+
<div class="form-text">End of historical data period</div>
|
| 132 |
+
</div>
|
| 133 |
+
</div>
|
| 134 |
+
</div>
|
| 135 |
+
<div class="alert alert-info">
|
| 136 |
+
<small>
|
| 137 |
+
<strong>Context Length:</strong> <span id="contextLengthDisplay">0</span> periods<br>
|
| 138 |
+
<strong>Constraint:</strong> Must be multiple of 32, max 4096 total
|
| 139 |
+
</small>
|
| 140 |
+
</div>
|
| 141 |
+
</div>
|
| 142 |
+
|
| 143 |
+
<form id="modelConfigForm">
|
| 144 |
+
<div class="row">
|
| 145 |
+
<div class="col-md-3">
|
| 146 |
+
<label class="form-label">Backend</label>
|
| 147 |
+
<select class="form-select" id="backend">
|
| 148 |
+
<option value="cpu" selected>CPU</option>
|
| 149 |
+
<option value="gpu">GPU</option>
|
| 150 |
+
<option value="tpu">TPU</option>
|
| 151 |
+
</select>
|
| 152 |
+
</div>
|
| 153 |
+
<div class="col-md-3">
|
| 154 |
+
<label class="form-label">Context Length</label>
|
| 155 |
+
<input type="number" class="form-control" id="contextLen" value="64" min="10" max="2048">
|
| 156 |
+
</div>
|
| 157 |
+
<div class="col-md-3">
|
| 158 |
+
<label class="form-label">Horizon Length</label>
|
| 159 |
+
<input type="number" class="form-control" id="horizonLen" value="24" min="1" max="128">
|
| 160 |
+
</div>
|
| 161 |
+
<div class="col-md-3">
|
| 162 |
+
<label class="form-label">Model Source</label>
|
| 163 |
+
<select class="form-select" id="modelSource">
|
| 164 |
+
<option value="huggingface" selected>HuggingFace Checkpoint</option>
|
| 165 |
+
<option value="local">Local Model Path</option>
|
| 166 |
+
</select>
|
| 167 |
+
</div>
|
| 168 |
+
</div>
|
| 169 |
+
|
| 170 |
+
<!-- HuggingFace Checkpoint Row -->
|
| 171 |
+
<div class="row mt-3" id="huggingfaceRow">
|
| 172 |
+
<div class="col-md-12">
|
| 173 |
+
<label class="form-label">HuggingFace Checkpoint</label>
|
| 174 |
+
<select class="form-select" id="checkpoint">
|
| 175 |
+
<option value="google/timesfm-2.0-500m-pytorch" selected>google/timesfm-2.0-500m-pytorch</option>
|
| 176 |
+
</select>
|
| 177 |
+
<div class="form-text">PyTorch model for CPU/GPU inference</div>
|
| 178 |
+
</div>
|
| 179 |
+
</div>
|
| 180 |
+
|
| 181 |
+
<!-- Local Model Path Row -->
|
| 182 |
+
<div class="row mt-3" id="localPathRow" style="display: none;">
|
| 183 |
+
<div class="col-md-12">
|
| 184 |
+
<label class="form-label">Local Model Path</label>
|
| 185 |
+
<input type="text" class="form-control" id="localPath" placeholder="/path/to/your/timesfm/model">
|
| 186 |
+
<div class="form-text">Enter the full path to your locally stored TimesFM model</div>
|
| 187 |
+
</div>
|
| 188 |
+
</div>
|
| 189 |
+
<div class="mt-3">
|
| 190 |
+
<button type="submit" class="btn btn-primary">
|
| 191 |
+
<i class="fas fa-play me-2"></i>
|
| 192 |
+
Initialize Model
|
| 193 |
+
</button>
|
| 194 |
+
<span id="modelStatus" class="ms-3">
|
| 195 |
+
<span class="badge bg-secondary">Not Initialized</span>
|
| 196 |
+
</span>
|
| 197 |
+
</div>
|
| 198 |
+
</form>
|
| 199 |
+
</div>
|
| 200 |
+
</div>
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
<!-- Forecasting Configuration Card -->
|
| 204 |
+
<div class="card mb-4">
|
| 205 |
+
<div class="card-header bg-warning text-dark">
|
| 206 |
+
<h5 class="mb-0">
|
| 207 |
+
<i class="fas fa-crystal-ball me-2"></i>
|
| 208 |
+
Forecasting Configuration
|
| 209 |
+
</h5>
|
| 210 |
+
</div>
|
| 211 |
+
<div class="card-body">
|
| 212 |
+
<form id="forecastConfigForm">
|
| 213 |
+
<div class="row">
|
| 214 |
+
<div class="col-md-4">
|
| 215 |
+
<div class="form-check form-switch">
|
| 216 |
+
<input class="form-check-input" type="checkbox" id="useCovariates" checked>
|
| 217 |
+
<label class="form-check-label" for="useCovariates">
|
| 218 |
+
Use Covariates
|
| 219 |
+
</label>
|
| 220 |
+
</div>
|
| 221 |
+
</div>
|
| 222 |
+
<div class="col-md-8"></div>
|
| 223 |
+
</div>
|
| 224 |
+
<div class="mt-3">
|
| 225 |
+
<button type="submit" class="btn btn-warning text-dark" disabled id="forecastBtn">
|
| 226 |
+
<i class="fas fa-magic me-2"></i>
|
| 227 |
+
Run Forecast
|
| 228 |
+
</button>
|
| 229 |
+
</div>
|
| 230 |
+
</form>
|
| 231 |
+
</div>
|
| 232 |
+
</div>
|
| 233 |
+
|
| 234 |
+
<!-- Results Card -->
|
| 235 |
+
<div class="card" id="resultsCard" style="display: none;">
|
| 236 |
+
<div class="card-header bg-info text-white">
|
| 237 |
+
<h5 class="mb-0">
|
| 238 |
+
<i class="fas fa-chart-line me-2"></i>
|
| 239 |
+
Forecasting Results
|
| 240 |
+
</h5>
|
| 241 |
+
</div>
|
| 242 |
+
<div class="card-body">
|
| 243 |
+
<!-- Tabs for different result views -->
|
| 244 |
+
<ul class="nav nav-tabs" id="resultTabs" role="tablist">
|
| 245 |
+
<li class="nav-item" role="presentation">
|
| 246 |
+
<button class="nav-link active" id="visualization-tab" data-bs-toggle="tab"
|
| 247 |
+
data-bs-target="#visualization" type="button" role="tab">
|
| 248 |
+
<i class="fas fa-chart-area me-1"></i>
|
| 249 |
+
Visualization
|
| 250 |
+
</button>
|
| 251 |
+
</li>
|
| 252 |
+
<li class="nav-item" role="presentation">
|
| 253 |
+
<button class="nav-link" id="summary-tab" data-bs-toggle="tab"
|
| 254 |
+
data-bs-target="#summary" type="button" role="tab">
|
| 255 |
+
<i class="fas fa-list me-1"></i>
|
| 256 |
+
Summary
|
| 257 |
+
</button>
|
| 258 |
+
</li>
|
| 259 |
+
<li class="nav-item" role="presentation">
|
| 260 |
+
<button class="nav-link" id="data-tab" data-bs-toggle="tab"
|
| 261 |
+
data-bs-target="#data" type="button" role="tab">
|
| 262 |
+
<i class="fas fa-table me-1"></i>
|
| 263 |
+
Data
|
| 264 |
+
</button>
|
| 265 |
+
</li>
|
| 266 |
+
</ul>
|
| 267 |
+
|
| 268 |
+
<!-- Tab Content -->
|
| 269 |
+
<div class="tab-content mt-3" id="resultTabContent">
|
| 270 |
+
<!-- Visualization Tab -->
|
| 271 |
+
<div class="tab-pane fade show active" id="visualization" role="tabpanel">
|
| 272 |
+
<div class="chart-container">
|
| 273 |
+
<div id="forecastChart"></div>
|
| 274 |
+
</div>
|
| 275 |
+
<div class="mt-3">
|
| 276 |
+
<button type="button" class="btn btn-outline-primary" id="downloadChart">
|
| 277 |
+
<i class="fas fa-download me-2"></i>
|
| 278 |
+
Download Chart
|
| 279 |
+
</button>
|
| 280 |
+
<button type="button" class="btn btn-outline-success ms-2" id="downloadData">
|
| 281 |
+
<i class="fas fa-table me-2"></i>
|
| 282 |
+
Download Data
|
| 283 |
+
</button>
|
| 284 |
+
</div>
|
| 285 |
+
<!-- Quantile selection (always visible) -->
|
| 286 |
+
<div class="row mt-3" id="quantileSelector">
|
| 287 |
+
<div class="col-12">
|
| 288 |
+
<label class="form-label">Select quantiles to shade (choose lower and upper)</label>
|
| 289 |
+
<div class="d-flex flex-wrap gap-2" id="quantileCheckboxes">
|
| 290 |
+
<div class="form-check form-check-inline">
|
| 291 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="1" id="q1" checked>
|
| 292 |
+
<label class="form-check-label" for="q1">Q10</label>
|
| 293 |
+
</div>
|
| 294 |
+
<div class="form-check form-check-inline">
|
| 295 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="2" id="q2">
|
| 296 |
+
<label class="form-check-label" for="q2">Q20</label>
|
| 297 |
+
</div>
|
| 298 |
+
<div class="form-check form-check-inline">
|
| 299 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="3" id="q3">
|
| 300 |
+
<label class="form-check-label" for="q3">Q30</label>
|
| 301 |
+
</div>
|
| 302 |
+
<div class="form-check form-check-inline">
|
| 303 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="4" id="q4">
|
| 304 |
+
<label class="form-check-label" for="q4">Q40</label>
|
| 305 |
+
</div>
|
| 306 |
+
<div class="form-check form-check-inline">
|
| 307 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="5" id="q5">
|
| 308 |
+
<label class="form-check-label" for="q5">Q50</label>
|
| 309 |
+
</div>
|
| 310 |
+
<div class="form-check form-check-inline">
|
| 311 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="6" id="q6">
|
| 312 |
+
<label class="form-check-label" for="q6">Q60</label>
|
| 313 |
+
</div>
|
| 314 |
+
<div class="form-check form-check-inline">
|
| 315 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="7" id="q7">
|
| 316 |
+
<label class="form-check-label" for="q7">Q70</label>
|
| 317 |
+
</div>
|
| 318 |
+
<div class="form-check form-check-inline">
|
| 319 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="8" id="q8">
|
| 320 |
+
<label class="form-check-label" for="q8">Q80</label>
|
| 321 |
+
</div>
|
| 322 |
+
<div class="form-check form-check-inline">
|
| 323 |
+
<input class="form-check-input quantile-tick" type="checkbox" value="9" id="q9" checked>
|
| 324 |
+
<label class="form-check-label" for="q9">Q90</label>
|
| 325 |
+
</div>
|
| 326 |
+
</div>
|
| 327 |
+
<div class="form-text">Defaults to Q10-Q90 bands. Index 0 (legacy mean) is automatically skipped.</div>
|
| 328 |
+
</div>
|
| 329 |
+
</div>
|
| 330 |
+
</div>
|
| 331 |
+
|
| 332 |
+
<!-- Summary Tab -->
|
| 333 |
+
<div class="tab-pane fade" id="summary" role="tabpanel">
|
| 334 |
+
<div id="forecastSummary">
|
| 335 |
+
<!-- Summary content will be populated here -->
|
| 336 |
+
</div>
|
| 337 |
+
</div>
|
| 338 |
+
|
| 339 |
+
<!-- Data Tab -->
|
| 340 |
+
<div class="tab-pane fade" id="data" role="tabpanel">
|
| 341 |
+
<div id="forecastData">
|
| 342 |
+
<!-- Data table will be populated here -->
|
| 343 |
+
</div>
|
| 344 |
+
</div>
|
| 345 |
+
</div>
|
| 346 |
+
</div>
|
| 347 |
+
</div>
|
| 348 |
+
</div>
|
| 349 |
+
|
| 350 |
+
<!-- Loading Modal -->
|
| 351 |
+
<div class="modal fade" id="loadingModal" tabindex="-1" data-bs-backdrop="static">
|
| 352 |
+
<div class="modal-dialog modal-dialog-centered">
|
| 353 |
+
<div class="modal-content">
|
| 354 |
+
<div class="modal-body text-center py-4">
|
| 355 |
+
<div class="spinner-border text-primary mb-3" role="status">
|
| 356 |
+
<span class="visually-hidden">Loading...</span>
|
| 357 |
+
</div>
|
| 358 |
+
<h6 id="loadingText">Processing...</h6>
|
| 359 |
+
<p class="text-muted mb-0" id="loadingSubtext">Please wait while we process your request.</p>
|
| 360 |
+
</div>
|
| 361 |
+
</div>
|
| 362 |
+
</div>
|
| 363 |
+
</div>
|
| 364 |
+
|
| 365 |
+
<!-- Bootstrap JS -->
|
| 366 |
+
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js"></script>
|
| 367 |
+
<!-- Plotly JS for interactive visualizations -->
|
| 368 |
+
<script src="https://cdn.plot.ly/plotly-2.26.0.min.js"></script>
|
| 369 |
+
<!-- Custom JS -->
|
| 370 |
+
<script src="{{ url_for('static', filename='js/app.js') }}"></script>
|
| 371 |
+
</body>
|
| 372 |
+
</html>
|