Spaces:
Build error
Build error
Deploy Chronos2 Forecasting API v3.0.0 with new SOLID architecture
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .dockerignore +56 -0
- Dockerfile.spaces +53 -0
- README.md +43 -164
- app/api/__init__.py +0 -0
- app/api/dependencies.py +194 -0
- app/api/middleware/__init__.py +0 -0
- app/api/routes/__init__.py +17 -0
- app/api/routes/anomaly.py +85 -0
- app/api/routes/backtest.py +83 -0
- app/api/routes/forecast.py +158 -0
- app/api/routes/health.py +87 -0
- app/application/__init__.py +7 -0
- app/application/dtos/__init__.py +36 -0
- app/application/dtos/anomaly_dtos.py +86 -0
- app/application/dtos/backtest_dtos.py +84 -0
- app/application/dtos/forecast_dtos.py +111 -0
- app/application/mappers/__init__.py +17 -0
- app/application/mappers/anomaly_mapper.py +73 -0
- app/application/mappers/backtest_mapper.py +66 -0
- app/application/mappers/forecast_mapper.py +127 -0
- app/application/use_cases/__init__.py +17 -0
- app/application/use_cases/anomaly_use_case.py +198 -0
- app/application/use_cases/backtest_use_case.py +172 -0
- app/application/use_cases/forecast_use_case.py +186 -0
- app/domain/__init__.py +0 -0
- app/domain/interfaces/__init__.py +0 -0
- app/domain/interfaces/data_transformer.py +65 -0
- app/domain/interfaces/forecast_model.py +104 -0
- app/domain/models/__init__.py +0 -0
- app/domain/models/anomaly.py +115 -0
- app/domain/models/forecast_config.py +118 -0
- app/domain/models/forecast_result.py +147 -0
- app/domain/models/time_series.py +124 -0
- app/domain/services/__init__.py +0 -0
- app/domain/services/anomaly_service.py +191 -0
- app/domain/services/backtest_service.py +243 -0
- app/domain/services/forecast_service.py +194 -0
- app/infrastructure/__init__.py +0 -0
- app/infrastructure/config/__init__.py +0 -0
- app/infrastructure/config/settings.py +75 -0
- app/infrastructure/ml/__init__.py +0 -0
- app/infrastructure/ml/chronos_model.py +129 -0
- app/infrastructure/ml/model_factory.py +172 -0
- app/main_from_hf_space.py +681 -0
- app/main_hf.py +681 -0
- app/main_v2.1.1_backup.py +717 -0
- app/main_v3.py +186 -0
- app/main_working_version.py +643 -0
- app/schemas/__init__.py +0 -0
- app/schemas/requests/__init__.py +0 -0
.dockerignore
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Git
|
| 2 |
+
.git
|
| 3 |
+
.gitignore
|
| 4 |
+
.github
|
| 5 |
+
|
| 6 |
+
# Python
|
| 7 |
+
__pycache__
|
| 8 |
+
*.pyc
|
| 9 |
+
*.pyo
|
| 10 |
+
*.pyd
|
| 11 |
+
.Python
|
| 12 |
+
*.so
|
| 13 |
+
*.egg
|
| 14 |
+
*.egg-info
|
| 15 |
+
dist
|
| 16 |
+
build
|
| 17 |
+
.pytest_cache
|
| 18 |
+
.coverage
|
| 19 |
+
htmlcov
|
| 20 |
+
.mypy_cache
|
| 21 |
+
|
| 22 |
+
# Environment
|
| 23 |
+
.env
|
| 24 |
+
.venv
|
| 25 |
+
venv
|
| 26 |
+
env
|
| 27 |
+
|
| 28 |
+
# IDE
|
| 29 |
+
.vscode
|
| 30 |
+
.idea
|
| 31 |
+
*.swp
|
| 32 |
+
*.swo
|
| 33 |
+
*~
|
| 34 |
+
|
| 35 |
+
# Documentation (large files)
|
| 36 |
+
*.md
|
| 37 |
+
!README.md
|
| 38 |
+
FASE_*.md
|
| 39 |
+
|
| 40 |
+
# Tests
|
| 41 |
+
tests/
|
| 42 |
+
test_*.py
|
| 43 |
+
|
| 44 |
+
# Logs
|
| 45 |
+
logs/
|
| 46 |
+
*.log
|
| 47 |
+
|
| 48 |
+
# Temporary
|
| 49 |
+
tmp/
|
| 50 |
+
temp/
|
| 51 |
+
.DS_Store
|
| 52 |
+
|
| 53 |
+
# Old versions
|
| 54 |
+
*_backup.py
|
| 55 |
+
*_old.py
|
| 56 |
+
*.old
|
Dockerfile.spaces
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Dockerfile optimizado para HuggingFace Spaces
|
| 2 |
+
# Using new v3 architecture
|
| 3 |
+
|
| 4 |
+
FROM python:3.10-slim
|
| 5 |
+
|
| 6 |
+
# Environment variables
|
| 7 |
+
ENV PYTHONUNBUFFERED=1 \
|
| 8 |
+
PYTHONDONTWRITEBYTECODE=1 \
|
| 9 |
+
PORT=7860 \
|
| 10 |
+
MODEL_ID=amazon/chronos-2 \
|
| 11 |
+
DEVICE_MAP=cpu
|
| 12 |
+
|
| 13 |
+
# Working directory
|
| 14 |
+
WORKDIR /app
|
| 15 |
+
|
| 16 |
+
# Install system dependencies
|
| 17 |
+
RUN apt-get update && \
|
| 18 |
+
apt-get install -y --no-install-recommends \
|
| 19 |
+
build-essential \
|
| 20 |
+
curl \
|
| 21 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 22 |
+
|
| 23 |
+
# Copy requirements
|
| 24 |
+
COPY requirements.txt .
|
| 25 |
+
|
| 26 |
+
# Install Python dependencies
|
| 27 |
+
RUN pip install --no-cache-dir --upgrade pip && \
|
| 28 |
+
pip install --no-cache-dir -r requirements.txt
|
| 29 |
+
|
| 30 |
+
# Copy application code (NEW v3 architecture)
|
| 31 |
+
COPY app/ ./app/
|
| 32 |
+
|
| 33 |
+
# Copy static files (Excel Add-in)
|
| 34 |
+
COPY static/ ./static/
|
| 35 |
+
|
| 36 |
+
# Copy docs (optional but good to have)
|
| 37 |
+
COPY docs/ ./docs/ 2>/dev/null || true
|
| 38 |
+
|
| 39 |
+
# Create non-root user
|
| 40 |
+
RUN useradd -m -u 1000 user && \
|
| 41 |
+
chown -R user:user /app
|
| 42 |
+
|
| 43 |
+
USER user
|
| 44 |
+
|
| 45 |
+
# Expose port (HF Spaces uses 7860)
|
| 46 |
+
EXPOSE 7860
|
| 47 |
+
|
| 48 |
+
# Health check
|
| 49 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=90s --retries=3 \
|
| 50 |
+
CMD curl -f http://localhost:7860/health || exit 1
|
| 51 |
+
|
| 52 |
+
# Start command - USING NEW MAIN_V3
|
| 53 |
+
CMD ["uvicorn", "app.main_v3:app", "--host", "0.0.0.0", "--port", "7860", "--workers", "1"]
|
README.md
CHANGED
|
@@ -1,195 +1,74 @@
|
|
| 1 |
---
|
| 2 |
-
title: Chronos2
|
| 3 |
emoji: 📊
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: green
|
| 6 |
sdk: docker
|
|
|
|
| 7 |
app_port: 7860
|
| 8 |
-
pinned: false
|
| 9 |
-
license: mit
|
| 10 |
---
|
| 11 |
|
| 12 |
-
#
|
| 13 |
|
| 14 |
-
|
| 15 |
|
| 16 |
-
|
| 17 |
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
-
|
| 21 |
-
- ✅ **Detección de anomalías**: Identifica valores atípicos automáticamente
|
| 22 |
-
- ✅ **Backtesting**: Valida la precisión de tus modelos
|
| 23 |
-
- ✅ **API REST con FastAPI**: Fácil integración
|
| 24 |
-
- ✅ **Documentación interactiva**: Swagger UI incluido
|
| 25 |
|
| 26 |
-
|
| 27 |
|
| 28 |
-
|
| 29 |
-
- **
|
| 30 |
-
- **
|
| 31 |
-
- **
|
|
|
|
|
|
|
| 32 |
|
| 33 |
-
|
| 34 |
|
| 35 |
-
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
|
| 38 |
-
curl -X POST https://YOUR-USERNAME-chronos2-excel-forecasting-api.hf.space/forecast_univariate \
|
| 39 |
-
-H "Content-Type: application/json" \
|
| 40 |
-
-d '{
|
| 41 |
-
"series": {"values": [100, 102, 105, 103, 108, 112, 115]},
|
| 42 |
-
"prediction_length": 3,
|
| 43 |
-
"freq": "D"
|
| 44 |
-
}'
|
| 45 |
-
```
|
| 46 |
-
|
| 47 |
-
**Respuesta esperada:**
|
| 48 |
-
```json
|
| 49 |
-
{
|
| 50 |
-
"timestamps": ["t+1", "t+2", "t+3"],
|
| 51 |
-
"median": [117.5, 119.2, 121.0],
|
| 52 |
-
"quantiles": {
|
| 53 |
-
"0.1": [112.3, 113.8, 115.5],
|
| 54 |
-
"0.5": [117.5, 119.2, 121.0],
|
| 55 |
-
"0.9": [122.7, 124.6, 126.5]
|
| 56 |
-
}
|
| 57 |
-
}
|
| 58 |
-
```
|
| 59 |
-
|
| 60 |
-
### Detección de Anomalías
|
| 61 |
|
| 62 |
```bash
|
| 63 |
-
curl -X POST https://
|
| 64 |
-H "Content-Type: application/json" \
|
| 65 |
-d '{
|
| 66 |
-
"
|
| 67 |
-
"
|
| 68 |
-
"
|
| 69 |
-
}'
|
| 70 |
-
```
|
| 71 |
-
|
| 72 |
-
### Backtesting
|
| 73 |
-
|
| 74 |
-
```bash
|
| 75 |
-
curl -X POST https://YOUR-USERNAME-chronos2-excel-forecasting-api.hf.space/backtest_simple \
|
| 76 |
-
-H "Content-Type: application/json" \
|
| 77 |
-
-d '{
|
| 78 |
-
"series": {"values": [100, 102, 105, 103, 108, 112, 115, 118, 120, 122, 125, 128]},
|
| 79 |
-
"prediction_length": 7,
|
| 80 |
-
"test_length": 4
|
| 81 |
}'
|
| 82 |
```
|
| 83 |
|
| 84 |
-
##
|
| 85 |
-
|
| 86 |
-
| Endpoint | Método | Descripción |
|
| 87 |
-
|----------|--------|-------------|
|
| 88 |
-
| `/` | GET | Información de la API |
|
| 89 |
-
| `/health` | GET | Health check del servicio |
|
| 90 |
-
| `/docs` | GET | Documentación Swagger |
|
| 91 |
-
| `/forecast_univariate` | POST | Pronóstico de serie simple |
|
| 92 |
-
| `/detect_anomalies` | POST | Detectar valores atípicos |
|
| 93 |
-
| `/backtest_simple` | POST | Validar precisión del modelo |
|
| 94 |
-
| `/simple_forecast` | POST | Pronóstico rápido (testing) |
|
| 95 |
-
|
| 96 |
-
## 💻 Uso con Excel
|
| 97 |
-
|
| 98 |
-
Este API funciona perfectamente con nuestro **Office Add-in para Excel**:
|
| 99 |
-
|
| 100 |
-
1. Descarga el Add-in desde [GitHub](https://github.com/tu-usuario/chronos2-server)
|
| 101 |
-
2. Configura la URL de este Space en el Add-in
|
| 102 |
-
3. ¡Realiza pronósticos directamente desde tus hojas de cálculo!
|
| 103 |
-
|
| 104 |
-
### Ejemplo en Excel
|
| 105 |
-
|
| 106 |
-
```javascript
|
| 107 |
-
// En el Excel Add-in, configura:
|
| 108 |
-
const API_BASE_URL = 'https://YOUR-USERNAME-chronos2-excel-forecasting-api.hf.space';
|
| 109 |
-
```
|
| 110 |
-
|
| 111 |
-
## 🛠️ Tecnologías
|
| 112 |
|
| 113 |
-
|
| 114 |
-
- **
|
| 115 |
-
- **
|
| 116 |
-
- **
|
|
|
|
| 117 |
|
| 118 |
-
##
|
| 119 |
|
| 120 |
-
-
|
| 121 |
-
-
|
| 122 |
-
-
|
| 123 |
-
-
|
| 124 |
-
- 🏪 **Retail**: Planifica recursos y personal
|
| 125 |
|
| 126 |
-
##
|
| 127 |
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
Para desplegar tu propia instancia, configura:
|
| 131 |
-
|
| 132 |
-
- `HF_TOKEN`: Tu token de Hugging Face (requerido)
|
| 133 |
-
- `CHRONOS_MODEL_ID`: ID del modelo (default: `amazon/chronos-t5-large`)
|
| 134 |
-
- `PORT`: Puerto del servidor (default: `7860`)
|
| 135 |
-
|
| 136 |
-
### Crear tu propio Space
|
| 137 |
-
|
| 138 |
-
1. Fork este repositorio
|
| 139 |
-
2. Crea un nuevo Space en Hugging Face
|
| 140 |
-
3. Selecciona **Docker** como SDK
|
| 141 |
-
4. Conecta tu repositorio
|
| 142 |
-
5. Configura `HF_TOKEN` en los Secrets del Space
|
| 143 |
-
6. ¡Listo!
|
| 144 |
-
|
| 145 |
-
## 🔒 Seguridad
|
| 146 |
-
|
| 147 |
-
- ✅ CORS configurado para orígenes permitidos
|
| 148 |
-
- ✅ Validación de entrada con Pydantic
|
| 149 |
-
- ✅ Rate limiting en HuggingFace Inference API
|
| 150 |
-
- ✅ Timeouts configurados para evitar bloqueos
|
| 151 |
-
|
| 152 |
-
## 📚 Recursos
|
| 153 |
-
|
| 154 |
-
- [Documentación de Chronos-2](https://huggingface.co/amazon/chronos-t5-large)
|
| 155 |
-
- [API de HuggingFace Inference](https://huggingface.co/docs/api-inference)
|
| 156 |
-
- [FastAPI Docs](https://fastapi.tiangolo.com/)
|
| 157 |
-
- [Tutorial de Office Add-ins](https://docs.microsoft.com/en-us/office/dev/add-ins/)
|
| 158 |
-
|
| 159 |
-
## 🐛 Solución de Problemas
|
| 160 |
-
|
| 161 |
-
### "Model is loading"
|
| 162 |
-
|
| 163 |
-
La primera request puede tardar 30-60 segundos mientras el modelo se carga. Reintenta después.
|
| 164 |
-
|
| 165 |
-
### "HF_TOKEN not configured"
|
| 166 |
-
|
| 167 |
-
Asegúrate de configurar `HF_TOKEN` en los Secrets de tu Space.
|
| 168 |
-
|
| 169 |
-
### Errores de timeout
|
| 170 |
-
|
| 171 |
-
El modelo puede estar frío. Espera unos segundos y reintenta.
|
| 172 |
-
|
| 173 |
-
## 📝 Licencia
|
| 174 |
-
|
| 175 |
-
MIT License - Ver [LICENSE](LICENSE) para más detalles.
|
| 176 |
-
|
| 177 |
-
## 🤝 Contribuir
|
| 178 |
-
|
| 179 |
-
¿Quieres mejorar este proyecto?
|
| 180 |
-
|
| 181 |
-
1. Fork el repositorio
|
| 182 |
-
2. Crea una branch para tu feature (`git checkout -b feature/amazing`)
|
| 183 |
-
3. Commit tus cambios (`git commit -m 'Add amazing feature'`)
|
| 184 |
-
4. Push a la branch (`git push origin feature/amazing`)
|
| 185 |
-
5. Abre un Pull Request
|
| 186 |
-
|
| 187 |
-
## 📧 Contacto
|
| 188 |
-
|
| 189 |
-
¿Preguntas o sugerencias? Abre un [issue en GitHub](https://github.com/tu-usuario/chronos2-server/issues).
|
| 190 |
-
|
| 191 |
-
---
|
| 192 |
|
| 193 |
-
|
| 194 |
|
| 195 |
-
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Chronos2 Forecasting API
|
| 3 |
emoji: 📊
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: green
|
| 6 |
sdk: docker
|
| 7 |
+
app_file: Dockerfile.spaces
|
| 8 |
app_port: 7860
|
|
|
|
|
|
|
| 9 |
---
|
| 10 |
|
| 11 |
+
# Chronos2 Excel Forecasting API
|
| 12 |
|
| 13 |
+
Time series forecasting API powered by Amazon Chronos-2 model with Excel Add-in support.
|
| 14 |
|
| 15 |
+
## Features
|
| 16 |
|
| 17 |
+
- ✅ **Univariate & Multivariate Forecasting** - Multiple time series support
|
| 18 |
+
- ✅ **Anomaly Detection** - Detect outliers in your data
|
| 19 |
+
- ✅ **Backtesting** - Validate forecast accuracy
|
| 20 |
+
- ✅ **Excel Add-in** - Direct integration with Microsoft Excel
|
| 21 |
+
- ✅ **Interactive Charts** - Visualize forecasts and anomalies
|
| 22 |
+
- ✅ **REST API** - Easy integration with any platform
|
| 23 |
|
| 24 |
+
## Quick Start
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
+
### API Endpoints
|
| 27 |
|
| 28 |
+
- **Health Check**: `GET /health`
|
| 29 |
+
- **Documentation**: `GET /docs`
|
| 30 |
+
- **Univariate Forecast**: `POST /forecast/univariate`
|
| 31 |
+
- **Multivariate Forecast**: `POST /forecast/multivariate`
|
| 32 |
+
- **Anomaly Detection**: `POST /forecast/anomaly`
|
| 33 |
+
- **Backtesting**: `POST /forecast/backtest`
|
| 34 |
|
| 35 |
+
### Excel Add-in
|
| 36 |
|
| 37 |
+
Load the add-in in Excel:
|
| 38 |
+
1. Insert → Add-ins → Upload My Add-in
|
| 39 |
+
2. Paste URL: `https://ttzzs-chronos2-excel-forecasting-api.hf.space/manifest.xml`
|
| 40 |
|
| 41 |
+
### Example API Call
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
```bash
|
| 44 |
+
curl -X POST https://ttzzs-chronos2-excel-forecasting-api.hf.space/forecast/univariate \
|
| 45 |
-H "Content-Type: application/json" \
|
| 46 |
-d '{
|
| 47 |
+
"values": [100, 102, 105, 108, 110],
|
| 48 |
+
"prediction_length": 3,
|
| 49 |
+
"model_id": "amazon/chronos-2"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
}'
|
| 51 |
```
|
| 52 |
|
| 53 |
+
## Architecture
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
+
Built with Clean Architecture principles:
|
| 56 |
+
- **Domain Layer** - Business logic and entities
|
| 57 |
+
- **Application Layer** - Use cases and services
|
| 58 |
+
- **Infrastructure Layer** - External dependencies (ML models, storage)
|
| 59 |
+
- **API Layer** - FastAPI routes and DTOs
|
| 60 |
|
| 61 |
+
## Technology Stack
|
| 62 |
|
| 63 |
+
- **Framework**: FastAPI 0.115.5
|
| 64 |
+
- **ML Model**: Amazon Chronos-2 (Transformer-based forecasting)
|
| 65 |
+
- **Python**: 3.10+
|
| 66 |
+
- **Docker**: Optimized multi-stage build
|
|
|
|
| 67 |
|
| 68 |
+
## License
|
| 69 |
|
| 70 |
+
MIT License
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
|
| 72 |
+
## Support
|
| 73 |
|
| 74 |
+
For issues and questions, visit the [GitHub repository](https://github.com/vargasjosej/aprender_ai/tree/refactor/solid-architecture/chronos2-server).
|
app/api/__init__.py
ADDED
|
File without changes
|
app/api/dependencies.py
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Dependency Injection para FastAPI.
|
| 3 |
+
|
| 4 |
+
Provee instancias de servicios, repositorios y casos de uso
|
| 5 |
+
usando el sistema de DI de FastAPI.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from functools import lru_cache
|
| 9 |
+
from fastapi import Depends
|
| 10 |
+
|
| 11 |
+
# Infrastructure
|
| 12 |
+
from app.infrastructure.ml.model_factory import ModelFactory
|
| 13 |
+
from app.infrastructure.config.settings import get_settings
|
| 14 |
+
|
| 15 |
+
# Domain
|
| 16 |
+
from app.domain.interfaces.forecast_model import IForecastModel
|
| 17 |
+
from app.domain.interfaces.data_transformer import IDataTransformer
|
| 18 |
+
from app.domain.services.forecast_service import ForecastService
|
| 19 |
+
from app.domain.services.anomaly_service import AnomalyService
|
| 20 |
+
|
| 21 |
+
# Application
|
| 22 |
+
from app.application.use_cases.forecast_use_case import (
|
| 23 |
+
ForecastUnivariateUseCase,
|
| 24 |
+
ForecastMultiSeriesUseCase
|
| 25 |
+
)
|
| 26 |
+
from app.application.use_cases.anomaly_use_case import DetectAnomaliesUseCase
|
| 27 |
+
from app.application.use_cases.backtest_use_case import BacktestUseCase
|
| 28 |
+
|
| 29 |
+
# Utils
|
| 30 |
+
from app.utils.dataframe_builder import DataFrameBuilder
|
| 31 |
+
from app.utils.logger import setup_logger
|
| 32 |
+
|
| 33 |
+
# Get settings instance
|
| 34 |
+
settings = get_settings()
|
| 35 |
+
|
| 36 |
+
logger = setup_logger(__name__)
|
| 37 |
+
|
| 38 |
+
# ============================================================================
|
| 39 |
+
# Infrastructure Layer Dependencies
|
| 40 |
+
# ============================================================================
|
| 41 |
+
|
| 42 |
+
# Singleton para el modelo de forecasting
|
| 43 |
+
_model_instance: IForecastModel = None
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def get_forecast_model() -> IForecastModel:
|
| 47 |
+
"""
|
| 48 |
+
Dependency: Modelo de forecasting (Singleton).
|
| 49 |
+
|
| 50 |
+
Usa Chronos-2 por defecto. El modelo se carga una sola vez
|
| 51 |
+
y se reutiliza en todas las requests.
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
IForecastModel: Instancia del modelo
|
| 55 |
+
"""
|
| 56 |
+
global _model_instance
|
| 57 |
+
|
| 58 |
+
if _model_instance is None:
|
| 59 |
+
logger.info("Initializing forecast model (first time)")
|
| 60 |
+
_model_instance = ModelFactory.create(
|
| 61 |
+
model_type="chronos2",
|
| 62 |
+
model_id=settings.model_id,
|
| 63 |
+
device_map=settings.device_map
|
| 64 |
+
)
|
| 65 |
+
logger.info(f"Model loaded: {_model_instance.get_model_info()}")
|
| 66 |
+
|
| 67 |
+
return _model_instance
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def get_data_transformer() -> IDataTransformer:
|
| 71 |
+
"""
|
| 72 |
+
Dependency: Transformador de datos.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
IDataTransformer: Instancia del transformador
|
| 76 |
+
"""
|
| 77 |
+
return DataFrameBuilder()
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
# ============================================================================
|
| 81 |
+
# Domain Layer Dependencies
|
| 82 |
+
# ============================================================================
|
| 83 |
+
|
| 84 |
+
def get_forecast_service(
|
| 85 |
+
model: IForecastModel = Depends(get_forecast_model),
|
| 86 |
+
transformer: IDataTransformer = Depends(get_data_transformer)
|
| 87 |
+
) -> ForecastService:
|
| 88 |
+
"""
|
| 89 |
+
Dependency: Servicio de dominio para forecasting.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
model: Modelo de forecasting
|
| 93 |
+
transformer: Transformador de datos
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
ForecastService: Servicio de forecasting
|
| 97 |
+
"""
|
| 98 |
+
return ForecastService(model=model, transformer=transformer)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def get_anomaly_service(
|
| 102 |
+
model: IForecastModel = Depends(get_forecast_model),
|
| 103 |
+
transformer: IDataTransformer = Depends(get_data_transformer)
|
| 104 |
+
) -> AnomalyService:
|
| 105 |
+
"""
|
| 106 |
+
Dependency: Servicio de dominio para detección de anomalías.
|
| 107 |
+
|
| 108 |
+
Args:
|
| 109 |
+
model: Modelo de forecasting
|
| 110 |
+
transformer: Transformador de datos
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
AnomalyService: Servicio de anomalías
|
| 114 |
+
"""
|
| 115 |
+
return AnomalyService(model=model, transformer=transformer)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
# ============================================================================
|
| 119 |
+
# Application Layer Dependencies (Use Cases)
|
| 120 |
+
# ============================================================================
|
| 121 |
+
|
| 122 |
+
def get_forecast_univariate_use_case(
|
| 123 |
+
service: ForecastService = Depends(get_forecast_service)
|
| 124 |
+
) -> ForecastUnivariateUseCase:
|
| 125 |
+
"""
|
| 126 |
+
Dependency: Caso de uso de pronóstico univariado.
|
| 127 |
+
|
| 128 |
+
Args:
|
| 129 |
+
service: Servicio de forecasting
|
| 130 |
+
|
| 131 |
+
Returns:
|
| 132 |
+
ForecastUnivariateUseCase: Caso de uso
|
| 133 |
+
"""
|
| 134 |
+
return ForecastUnivariateUseCase(forecast_service=service)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def get_forecast_multi_series_use_case(
|
| 138 |
+
service: ForecastService = Depends(get_forecast_service)
|
| 139 |
+
) -> ForecastMultiSeriesUseCase:
|
| 140 |
+
"""
|
| 141 |
+
Dependency: Caso de uso de pronóstico multi-series.
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
service: Servicio de forecasting
|
| 145 |
+
|
| 146 |
+
Returns:
|
| 147 |
+
ForecastMultiSeriesUseCase: Caso de uso
|
| 148 |
+
"""
|
| 149 |
+
return ForecastMultiSeriesUseCase(forecast_service=service)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def get_detect_anomalies_use_case(
|
| 153 |
+
service: AnomalyService = Depends(get_anomaly_service)
|
| 154 |
+
) -> DetectAnomaliesUseCase:
|
| 155 |
+
"""
|
| 156 |
+
Dependency: Caso de uso de detección de anomalías.
|
| 157 |
+
|
| 158 |
+
Args:
|
| 159 |
+
service: Servicio de anomalías
|
| 160 |
+
|
| 161 |
+
Returns:
|
| 162 |
+
DetectAnomaliesUseCase: Caso de uso
|
| 163 |
+
"""
|
| 164 |
+
return DetectAnomaliesUseCase(anomaly_service=service)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def get_backtest_use_case(
|
| 168 |
+
service: ForecastService = Depends(get_forecast_service)
|
| 169 |
+
) -> BacktestUseCase:
|
| 170 |
+
"""
|
| 171 |
+
Dependency: Caso de uso de backtesting.
|
| 172 |
+
|
| 173 |
+
Args:
|
| 174 |
+
service: Servicio de forecasting
|
| 175 |
+
|
| 176 |
+
Returns:
|
| 177 |
+
BacktestUseCase: Caso de uso
|
| 178 |
+
"""
|
| 179 |
+
return BacktestUseCase(forecast_service=service)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
# ============================================================================
|
| 183 |
+
# Utility Functions
|
| 184 |
+
# ============================================================================
|
| 185 |
+
|
| 186 |
+
def reset_model():
|
| 187 |
+
"""
|
| 188 |
+
Resetea el modelo (útil para testing).
|
| 189 |
+
|
| 190 |
+
ADVERTENCIA: Solo usar en tests, no en producción.
|
| 191 |
+
"""
|
| 192 |
+
global _model_instance
|
| 193 |
+
_model_instance = None
|
| 194 |
+
logger.warning("Model instance reset")
|
app/api/middleware/__init__.py
ADDED
|
File without changes
|
app/api/routes/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
API Routes package.
|
| 3 |
+
|
| 4 |
+
Contiene todos los endpoints de la API organizados por funcionalidad.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from .health import router as health_router
|
| 8 |
+
from .forecast import router as forecast_router
|
| 9 |
+
from .anomaly import router as anomaly_router
|
| 10 |
+
from .backtest import router as backtest_router
|
| 11 |
+
|
| 12 |
+
__all__ = [
|
| 13 |
+
"health_router",
|
| 14 |
+
"forecast_router",
|
| 15 |
+
"anomaly_router",
|
| 16 |
+
"backtest_router"
|
| 17 |
+
]
|
app/api/routes/anomaly.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Anomaly detection API endpoints.
|
| 3 |
+
|
| 4 |
+
Responsabilidad: Manejar requests de detección de anomalías.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from fastapi import APIRouter, Depends, HTTPException, status
|
| 8 |
+
|
| 9 |
+
from app.api.dependencies import get_detect_anomalies_use_case
|
| 10 |
+
from app.application.use_cases.anomaly_use_case import DetectAnomaliesUseCase
|
| 11 |
+
from app.application.dtos.anomaly_dtos import (
|
| 12 |
+
DetectAnomaliesRequestDTO,
|
| 13 |
+
DetectAnomaliesResponseDTO
|
| 14 |
+
)
|
| 15 |
+
from app.utils.logger import setup_logger
|
| 16 |
+
|
| 17 |
+
logger = setup_logger(__name__)
|
| 18 |
+
|
| 19 |
+
router = APIRouter(prefix="/anomaly", tags=["Anomaly Detection"])
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@router.post(
|
| 23 |
+
"/detect",
|
| 24 |
+
response_model=DetectAnomaliesResponseDTO,
|
| 25 |
+
status_code=status.HTTP_200_OK,
|
| 26 |
+
summary="Detectar anomalías",
|
| 27 |
+
description="Detecta anomalías comparando valores observados con pronóstico"
|
| 28 |
+
)
|
| 29 |
+
async def detect_anomalies(
|
| 30 |
+
request: DetectAnomaliesRequestDTO,
|
| 31 |
+
use_case: DetectAnomaliesUseCase = Depends(get_detect_anomalies_use_case)
|
| 32 |
+
):
|
| 33 |
+
"""
|
| 34 |
+
Detecta anomalías en serie temporal.
|
| 35 |
+
|
| 36 |
+
Compara valores observados recientes con pronóstico basado
|
| 37 |
+
en contexto histórico. Marca como anomalías los valores que
|
| 38 |
+
caen fuera de intervalos de confianza.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
request: Contexto histórico y valores recientes a evaluar
|
| 42 |
+
use_case: Caso de uso inyectado
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
Lista de puntos con indicador de anomalía
|
| 46 |
+
|
| 47 |
+
Example:
|
| 48 |
+
```json
|
| 49 |
+
{
|
| 50 |
+
"context_values": [100, 102, 105, 103, 108],
|
| 51 |
+
"recent_observed": [112, 150, 115],
|
| 52 |
+
"quantile_low": 0.05,
|
| 53 |
+
"quantile_high": 0.95,
|
| 54 |
+
"freq": "D"
|
| 55 |
+
}
|
| 56 |
+
```
|
| 57 |
+
"""
|
| 58 |
+
try:
|
| 59 |
+
logger.info(
|
| 60 |
+
f"Anomaly detection request: {len(request.context_values)} context, "
|
| 61 |
+
f"{len(request.recent_observed)} to evaluate"
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
# Ejecutar use case
|
| 65 |
+
response = use_case.execute(request)
|
| 66 |
+
|
| 67 |
+
num_anomalies = sum(1 for p in response.anomaly_points if p.is_anomaly)
|
| 68 |
+
logger.info(
|
| 69 |
+
f"Anomaly detection completed: {num_anomalies} anomalies found"
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
return response
|
| 73 |
+
|
| 74 |
+
except ValueError as e:
|
| 75 |
+
logger.error(f"Validation error: {e}")
|
| 76 |
+
raise HTTPException(
|
| 77 |
+
status_code=status.HTTP_400_BAD_REQUEST,
|
| 78 |
+
detail=str(e)
|
| 79 |
+
)
|
| 80 |
+
except Exception as e:
|
| 81 |
+
logger.error(f"Unexpected error in anomaly detection: {e}", exc_info=True)
|
| 82 |
+
raise HTTPException(
|
| 83 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
| 84 |
+
detail="Error interno en detección de anomalías"
|
| 85 |
+
)
|
app/api/routes/backtest.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Backtesting API endpoints.
|
| 3 |
+
|
| 4 |
+
Responsabilidad: Manejar requests de backtesting (evaluación de modelos).
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from fastapi import APIRouter, Depends, HTTPException, status
|
| 8 |
+
|
| 9 |
+
from app.api.dependencies import get_backtest_use_case
|
| 10 |
+
from app.application.use_cases.backtest_use_case import BacktestUseCase
|
| 11 |
+
from app.application.dtos.backtest_dtos import (
|
| 12 |
+
BacktestRequestDTO,
|
| 13 |
+
BacktestResponseDTO
|
| 14 |
+
)
|
| 15 |
+
from app.utils.logger import setup_logger
|
| 16 |
+
|
| 17 |
+
logger = setup_logger(__name__)
|
| 18 |
+
|
| 19 |
+
router = APIRouter(prefix="/backtest", tags=["Backtesting"])
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@router.post(
|
| 23 |
+
"/simple",
|
| 24 |
+
response_model=BacktestResponseDTO,
|
| 25 |
+
status_code=status.HTTP_200_OK,
|
| 26 |
+
summary="Backtesting simple",
|
| 27 |
+
description="Evalúa pronóstico comparando con valores reales"
|
| 28 |
+
)
|
| 29 |
+
async def backtest_simple(
|
| 30 |
+
request: BacktestRequestDTO,
|
| 31 |
+
use_case: BacktestUseCase = Depends(get_backtest_use_case)
|
| 32 |
+
):
|
| 33 |
+
"""
|
| 34 |
+
Backtesting simple (hold-out).
|
| 35 |
+
|
| 36 |
+
Divide la serie en train/test, genera pronóstico con train,
|
| 37 |
+
y compara con test para calcular métricas de error.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
request: Serie completa y parámetros de backtesting
|
| 41 |
+
use_case: Caso de uso inyectado
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
Métricas de error (MAE, MAPE, RMSE) y comparación forecast vs actual
|
| 45 |
+
|
| 46 |
+
Example:
|
| 47 |
+
```json
|
| 48 |
+
{
|
| 49 |
+
"full_series": [100, 102, 105, 103, 108, 112, 115, 118],
|
| 50 |
+
"test_size": 3,
|
| 51 |
+
"freq": "D",
|
| 52 |
+
"quantile_levels": [0.1, 0.5, 0.9]
|
| 53 |
+
}
|
| 54 |
+
```
|
| 55 |
+
"""
|
| 56 |
+
try:
|
| 57 |
+
logger.info(
|
| 58 |
+
f"Backtest request: {len(request.full_series)} values, "
|
| 59 |
+
f"test_size={request.test_size}"
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
# Ejecutar use case
|
| 63 |
+
response = use_case.execute(request)
|
| 64 |
+
|
| 65 |
+
logger.info(
|
| 66 |
+
f"Backtest completed: MAE={response.metrics.mae:.2f}, "
|
| 67 |
+
f"MAPE={response.metrics.mape:.2f}%"
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
return response
|
| 71 |
+
|
| 72 |
+
except ValueError as e:
|
| 73 |
+
logger.error(f"Validation error: {e}")
|
| 74 |
+
raise HTTPException(
|
| 75 |
+
status_code=status.HTTP_400_BAD_REQUEST,
|
| 76 |
+
detail=str(e)
|
| 77 |
+
)
|
| 78 |
+
except Exception as e:
|
| 79 |
+
logger.error(f"Unexpected error in backtest: {e}", exc_info=True)
|
| 80 |
+
raise HTTPException(
|
| 81 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
| 82 |
+
detail="Error interno en backtesting"
|
| 83 |
+
)
|
app/api/routes/forecast.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Forecast API endpoints.
|
| 3 |
+
|
| 4 |
+
Responsabilidad: Manejar requests de forecasting y delegar a use cases.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from fastapi import APIRouter, Depends, HTTPException, status
|
| 8 |
+
from typing import List
|
| 9 |
+
|
| 10 |
+
from app.api.dependencies import (
|
| 11 |
+
get_forecast_univariate_use_case,
|
| 12 |
+
get_forecast_multi_series_use_case
|
| 13 |
+
)
|
| 14 |
+
from app.application.use_cases.forecast_use_case import (
|
| 15 |
+
ForecastUnivariateUseCase,
|
| 16 |
+
ForecastMultiSeriesUseCase
|
| 17 |
+
)
|
| 18 |
+
from app.application.dtos.forecast_dtos import (
|
| 19 |
+
ForecastUnivariateRequestDTO,
|
| 20 |
+
ForecastUnivariateResponseDTO,
|
| 21 |
+
ForecastMultiSeriesRequestDTO,
|
| 22 |
+
ForecastMultiSeriesResponseDTO
|
| 23 |
+
)
|
| 24 |
+
from app.utils.logger import setup_logger
|
| 25 |
+
|
| 26 |
+
logger = setup_logger(__name__)
|
| 27 |
+
|
| 28 |
+
router = APIRouter(prefix="/forecast", tags=["Forecast"])
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@router.post(
|
| 32 |
+
"/univariate",
|
| 33 |
+
response_model=ForecastUnivariateResponseDTO,
|
| 34 |
+
status_code=status.HTTP_200_OK,
|
| 35 |
+
summary="Pronóstico univariado",
|
| 36 |
+
description="Genera pronóstico para una serie temporal sin covariables"
|
| 37 |
+
)
|
| 38 |
+
async def forecast_univariate(
|
| 39 |
+
request: ForecastUnivariateRequestDTO,
|
| 40 |
+
use_case: ForecastUnivariateUseCase = Depends(get_forecast_univariate_use_case)
|
| 41 |
+
):
|
| 42 |
+
"""
|
| 43 |
+
Pronóstico univariado.
|
| 44 |
+
|
| 45 |
+
Genera pronóstico probabilístico para una serie temporal simple,
|
| 46 |
+
sin variables exógenas.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
request: Datos de la serie y parámetros de predicción
|
| 50 |
+
use_case: Caso de uso inyectado
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
Pronóstico con mediana y cuantiles
|
| 54 |
+
|
| 55 |
+
Raises:
|
| 56 |
+
HTTPException: Si hay error en la predicción
|
| 57 |
+
|
| 58 |
+
Example:
|
| 59 |
+
```json
|
| 60 |
+
{
|
| 61 |
+
"values": [100, 102, 105, 103, 108, 112],
|
| 62 |
+
"prediction_length": 3,
|
| 63 |
+
"freq": "D",
|
| 64 |
+
"quantile_levels": [0.1, 0.5, 0.9]
|
| 65 |
+
}
|
| 66 |
+
```
|
| 67 |
+
"""
|
| 68 |
+
try:
|
| 69 |
+
logger.info(
|
| 70 |
+
f"Forecast univariate request: {len(request.values)} values, "
|
| 71 |
+
f"{request.prediction_length} steps ahead"
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# Ejecutar use case
|
| 75 |
+
response = use_case.execute(request)
|
| 76 |
+
|
| 77 |
+
logger.info(f"Forecast completed: {len(response.timestamps)} predictions")
|
| 78 |
+
return response
|
| 79 |
+
|
| 80 |
+
except ValueError as e:
|
| 81 |
+
logger.error(f"Validation error: {e}")
|
| 82 |
+
raise HTTPException(
|
| 83 |
+
status_code=status.HTTP_400_BAD_REQUEST,
|
| 84 |
+
detail=str(e)
|
| 85 |
+
)
|
| 86 |
+
except Exception as e:
|
| 87 |
+
logger.error(f"Unexpected error in forecast: {e}", exc_info=True)
|
| 88 |
+
raise HTTPException(
|
| 89 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
| 90 |
+
detail="Error interno al generar pronóstico"
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@router.post(
|
| 95 |
+
"/multi-series",
|
| 96 |
+
response_model=ForecastMultiSeriesResponseDTO,
|
| 97 |
+
status_code=status.HTTP_200_OK,
|
| 98 |
+
summary="Pronóstico multi-series",
|
| 99 |
+
description="Genera pronósticos para múltiples series simultáneamente"
|
| 100 |
+
)
|
| 101 |
+
async def forecast_multi_series(
|
| 102 |
+
request: ForecastMultiSeriesRequestDTO,
|
| 103 |
+
use_case: ForecastMultiSeriesUseCase = Depends(get_forecast_multi_series_use_case)
|
| 104 |
+
):
|
| 105 |
+
"""
|
| 106 |
+
Pronóstico para múltiples series.
|
| 107 |
+
|
| 108 |
+
Genera pronósticos independientes para varias series temporales
|
| 109 |
+
en una sola llamada.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
request: Lista de series y parámetros
|
| 113 |
+
use_case: Caso de uso inyectado
|
| 114 |
+
|
| 115 |
+
Returns:
|
| 116 |
+
Lista de pronósticos, uno por cada serie
|
| 117 |
+
|
| 118 |
+
Example:
|
| 119 |
+
```json
|
| 120 |
+
{
|
| 121 |
+
"series_list": [
|
| 122 |
+
{"series_id": "sales", "values": [100, 102, 105]},
|
| 123 |
+
{"series_id": "revenue", "values": [200, 205, 210]}
|
| 124 |
+
],
|
| 125 |
+
"prediction_length": 3,
|
| 126 |
+
"freq": "D"
|
| 127 |
+
}
|
| 128 |
+
```
|
| 129 |
+
"""
|
| 130 |
+
try:
|
| 131 |
+
logger.info(
|
| 132 |
+
f"Forecast multi-series request: {len(request.series_list)} series"
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
# Ejecutar use case
|
| 136 |
+
response = use_case.execute(request)
|
| 137 |
+
|
| 138 |
+
logger.info(
|
| 139 |
+
f"Multi-series forecast completed: "
|
| 140 |
+
f"{len(response.forecasts)} forecasts"
|
| 141 |
+
)
|
| 142 |
+
return response
|
| 143 |
+
|
| 144 |
+
except ValueError as e:
|
| 145 |
+
logger.error(f"Validation error: {e}")
|
| 146 |
+
raise HTTPException(
|
| 147 |
+
status_code=status.HTTP_400_BAD_REQUEST,
|
| 148 |
+
detail=str(e)
|
| 149 |
+
)
|
| 150 |
+
except Exception as e:
|
| 151 |
+
logger.error(
|
| 152 |
+
f"Unexpected error in multi-series forecast: {e}",
|
| 153 |
+
exc_info=True
|
| 154 |
+
)
|
| 155 |
+
raise HTTPException(
|
| 156 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
| 157 |
+
detail="Error interno al generar pronósticos"
|
| 158 |
+
)
|
app/api/routes/health.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Health check and system info endpoints.
|
| 3 |
+
|
| 4 |
+
Responsabilidad: Verificar el estado de la API y servicios.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from fastapi import APIRouter, Depends
|
| 8 |
+
from typing import Dict, Any
|
| 9 |
+
|
| 10 |
+
from app.api.dependencies import get_forecast_model
|
| 11 |
+
from app.domain.interfaces.forecast_model import IForecastModel
|
| 12 |
+
from app.infrastructure.config.settings import get_settings
|
| 13 |
+
from app.utils.logger import setup_logger
|
| 14 |
+
|
| 15 |
+
logger = setup_logger(__name__)
|
| 16 |
+
settings = get_settings()
|
| 17 |
+
|
| 18 |
+
router = APIRouter(prefix="/health", tags=["Health"])
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@router.get("", response_model=Dict[str, Any])
|
| 22 |
+
async def health_check(
|
| 23 |
+
model: IForecastModel = Depends(get_forecast_model)
|
| 24 |
+
):
|
| 25 |
+
"""
|
| 26 |
+
Health check endpoint.
|
| 27 |
+
|
| 28 |
+
Verifica que la API esté funcionando y el modelo esté cargado.
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
Estado de la API y información del modelo
|
| 32 |
+
"""
|
| 33 |
+
try:
|
| 34 |
+
model_info = model.get_model_info()
|
| 35 |
+
|
| 36 |
+
return {
|
| 37 |
+
"status": "ok",
|
| 38 |
+
"version": settings.api_version,
|
| 39 |
+
"model": model_info,
|
| 40 |
+
"message": "Chronos-2 API is running"
|
| 41 |
+
}
|
| 42 |
+
except Exception as e:
|
| 43 |
+
logger.error(f"Health check failed: {e}")
|
| 44 |
+
return {
|
| 45 |
+
"status": "error",
|
| 46 |
+
"version": settings.api_version,
|
| 47 |
+
"error": str(e),
|
| 48 |
+
"message": "API is running but model is not available"
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@router.get("/info", response_model=Dict[str, Any])
|
| 53 |
+
async def system_info():
|
| 54 |
+
"""
|
| 55 |
+
System information endpoint.
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
Información sobre la arquitectura y configuración
|
| 59 |
+
"""
|
| 60 |
+
return {
|
| 61 |
+
"api": {
|
| 62 |
+
"title": settings.api_title,
|
| 63 |
+
"version": settings.api_version,
|
| 64 |
+
"description": settings.api_description
|
| 65 |
+
},
|
| 66 |
+
"architecture": {
|
| 67 |
+
"style": "Clean Architecture",
|
| 68 |
+
"principles": "SOLID",
|
| 69 |
+
"layers": [
|
| 70 |
+
"Presentation (API)",
|
| 71 |
+
"Application (Use Cases)",
|
| 72 |
+
"Domain (Business Logic)",
|
| 73 |
+
"Infrastructure (External Services)"
|
| 74 |
+
]
|
| 75 |
+
},
|
| 76 |
+
"model": {
|
| 77 |
+
"id": settings.model_id,
|
| 78 |
+
"device": settings.device_map
|
| 79 |
+
},
|
| 80 |
+
"endpoints": {
|
| 81 |
+
"docs": "/docs",
|
| 82 |
+
"health": "/health",
|
| 83 |
+
"forecast": "/forecast",
|
| 84 |
+
"anomaly": "/anomaly",
|
| 85 |
+
"backtest": "/backtest"
|
| 86 |
+
}
|
| 87 |
+
}
|
app/application/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Application Layer - Use Cases y DTOs.
|
| 3 |
+
|
| 4 |
+
Esta capa contiene la lógica de aplicación (casos de uso) que orquestan
|
| 5 |
+
las entidades y servicios del dominio para cumplir con los requisitos
|
| 6 |
+
de la aplicación.
|
| 7 |
+
"""
|
app/application/dtos/__init__.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Data Transfer Objects (DTOs).
|
| 3 |
+
|
| 4 |
+
DTOs para transferir datos entre capas, evitando acoplamiento
|
| 5 |
+
entre la capa de presentación y el dominio.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from .forecast_dtos import (
|
| 9 |
+
ForecastInputDTO,
|
| 10 |
+
ForecastOutputDTO,
|
| 11 |
+
MultiForecastInputDTO,
|
| 12 |
+
MultiForecastOutputDTO
|
| 13 |
+
)
|
| 14 |
+
from .anomaly_dtos import (
|
| 15 |
+
AnomalyDetectionInputDTO,
|
| 16 |
+
AnomalyDetectionOutputDTO,
|
| 17 |
+
AnomalyPointDTO
|
| 18 |
+
)
|
| 19 |
+
from .backtest_dtos import (
|
| 20 |
+
BacktestInputDTO,
|
| 21 |
+
BacktestOutputDTO,
|
| 22 |
+
BacktestMetricsDTO
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
__all__ = [
|
| 26 |
+
"ForecastInputDTO",
|
| 27 |
+
"ForecastOutputDTO",
|
| 28 |
+
"MultiForecastInputDTO",
|
| 29 |
+
"MultiForecastOutputDTO",
|
| 30 |
+
"AnomalyDetectionInputDTO",
|
| 31 |
+
"AnomalyDetectionOutputDTO",
|
| 32 |
+
"AnomalyPointDTO",
|
| 33 |
+
"BacktestInputDTO",
|
| 34 |
+
"BacktestOutputDTO",
|
| 35 |
+
"BacktestMetricsDTO",
|
| 36 |
+
]
|
app/application/dtos/anomaly_dtos.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
DTOs para casos de uso de Detección de Anomalías.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from typing import List, Optional, Dict
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@dataclass
|
| 10 |
+
class AnomalyPointDTO:
|
| 11 |
+
"""DTO para un punto de anomalía."""
|
| 12 |
+
|
| 13 |
+
index: int
|
| 14 |
+
value: float
|
| 15 |
+
expected: float
|
| 16 |
+
lower_bound: float
|
| 17 |
+
upper_bound: float
|
| 18 |
+
is_anomaly: bool
|
| 19 |
+
z_score: float = 0.0
|
| 20 |
+
severity: str = "normal" # normal, low, medium, high
|
| 21 |
+
|
| 22 |
+
def to_dict(self) -> Dict:
|
| 23 |
+
"""Convierte a diccionario."""
|
| 24 |
+
return {
|
| 25 |
+
"index": self.index,
|
| 26 |
+
"value": self.value,
|
| 27 |
+
"expected": self.expected,
|
| 28 |
+
"lower_bound": self.lower_bound,
|
| 29 |
+
"upper_bound": self.upper_bound,
|
| 30 |
+
"is_anomaly": self.is_anomaly,
|
| 31 |
+
"z_score": round(self.z_score, 2),
|
| 32 |
+
"severity": self.severity
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class AnomalyDetectionInputDTO:
|
| 38 |
+
"""DTO de entrada para detección de anomalías."""
|
| 39 |
+
|
| 40 |
+
context_values: List[float]
|
| 41 |
+
recent_values: List[float]
|
| 42 |
+
quantile_low: float = 0.05
|
| 43 |
+
quantile_high: float = 0.95
|
| 44 |
+
context_timestamps: Optional[List[str]] = None
|
| 45 |
+
freq: str = "D"
|
| 46 |
+
|
| 47 |
+
def validate(self) -> None:
|
| 48 |
+
"""Valida los datos de entrada."""
|
| 49 |
+
if not self.context_values:
|
| 50 |
+
raise ValueError("context_values no puede estar vacío")
|
| 51 |
+
|
| 52 |
+
if not self.recent_values:
|
| 53 |
+
raise ValueError("recent_values no puede estar vacío")
|
| 54 |
+
|
| 55 |
+
if len(self.context_values) < 3:
|
| 56 |
+
raise ValueError("context_values debe tener al menos 3 puntos")
|
| 57 |
+
|
| 58 |
+
if not (0 < self.quantile_low < 0.5):
|
| 59 |
+
raise ValueError("quantile_low debe estar en (0, 0.5)")
|
| 60 |
+
|
| 61 |
+
if not (0.5 < self.quantile_high < 1):
|
| 62 |
+
raise ValueError("quantile_high debe estar en (0.5, 1)")
|
| 63 |
+
|
| 64 |
+
if self.context_timestamps and len(self.context_timestamps) != len(self.context_values):
|
| 65 |
+
raise ValueError("context_timestamps y context_values deben tener la misma longitud")
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@dataclass
|
| 69 |
+
class AnomalyDetectionOutputDTO:
|
| 70 |
+
"""DTO de salida para detección de anomalías."""
|
| 71 |
+
|
| 72 |
+
anomalies: List[AnomalyPointDTO]
|
| 73 |
+
total_points: int
|
| 74 |
+
anomaly_count: int
|
| 75 |
+
anomaly_rate: float
|
| 76 |
+
summary: Dict
|
| 77 |
+
|
| 78 |
+
def to_dict(self) -> Dict:
|
| 79 |
+
"""Convierte a diccionario."""
|
| 80 |
+
return {
|
| 81 |
+
"anomalies": [a.to_dict() for a in self.anomalies],
|
| 82 |
+
"total_points": self.total_points,
|
| 83 |
+
"anomaly_count": self.anomaly_count,
|
| 84 |
+
"anomaly_rate": round(self.anomaly_rate, 3),
|
| 85 |
+
"summary": self.summary
|
| 86 |
+
}
|
app/application/dtos/backtest_dtos.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
DTOs para casos de uso de Backtesting.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from typing import List, Optional, Dict
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@dataclass
|
| 10 |
+
class BacktestMetricsDTO:
|
| 11 |
+
"""DTO para métricas de backtest."""
|
| 12 |
+
|
| 13 |
+
mae: float
|
| 14 |
+
mape: float
|
| 15 |
+
rmse: float
|
| 16 |
+
mse: float
|
| 17 |
+
|
| 18 |
+
def to_dict(self) -> Dict:
|
| 19 |
+
"""Convierte a diccionario."""
|
| 20 |
+
return {
|
| 21 |
+
"mae": round(self.mae, 4),
|
| 22 |
+
"mape": round(self.mape, 4),
|
| 23 |
+
"rmse": round(self.rmse, 4),
|
| 24 |
+
"mse": round(self.mse, 4)
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@dataclass
|
| 29 |
+
class BacktestInputDTO:
|
| 30 |
+
"""DTO de entrada para backtest."""
|
| 31 |
+
|
| 32 |
+
values: List[float]
|
| 33 |
+
test_size: int
|
| 34 |
+
quantile_levels: List[float]
|
| 35 |
+
timestamps: Optional[List[str]] = None
|
| 36 |
+
freq: str = "D"
|
| 37 |
+
|
| 38 |
+
def validate(self) -> None:
|
| 39 |
+
"""Valida los datos de entrada."""
|
| 40 |
+
if not self.values:
|
| 41 |
+
raise ValueError("values no puede estar vacío")
|
| 42 |
+
|
| 43 |
+
if self.test_size < 1:
|
| 44 |
+
raise ValueError("test_size debe ser >= 1")
|
| 45 |
+
|
| 46 |
+
if self.test_size >= len(self.values):
|
| 47 |
+
raise ValueError("test_size debe ser menor que la longitud de values")
|
| 48 |
+
|
| 49 |
+
train_size = len(self.values) - self.test_size
|
| 50 |
+
if train_size < 3:
|
| 51 |
+
raise ValueError("train_size debe ser al menos 3 puntos")
|
| 52 |
+
|
| 53 |
+
if not all(0 <= q <= 1 for q in self.quantile_levels):
|
| 54 |
+
raise ValueError("quantile_levels debe estar en [0, 1]")
|
| 55 |
+
|
| 56 |
+
if self.timestamps and len(self.timestamps) != len(self.values):
|
| 57 |
+
raise ValueError("timestamps y values deben tener la misma longitud")
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@dataclass
|
| 61 |
+
class BacktestOutputDTO:
|
| 62 |
+
"""DTO de salida para backtest."""
|
| 63 |
+
|
| 64 |
+
forecast_values: List[float]
|
| 65 |
+
actual_values: List[float]
|
| 66 |
+
errors: List[float]
|
| 67 |
+
metrics: BacktestMetricsDTO
|
| 68 |
+
timestamps: List[str]
|
| 69 |
+
quantiles: Optional[Dict[str, List[float]]] = None
|
| 70 |
+
|
| 71 |
+
def to_dict(self) -> Dict:
|
| 72 |
+
"""Convierte a diccionario."""
|
| 73 |
+
result = {
|
| 74 |
+
"forecast_values": self.forecast_values,
|
| 75 |
+
"actual_values": self.actual_values,
|
| 76 |
+
"errors": self.errors,
|
| 77 |
+
"metrics": self.metrics.to_dict(),
|
| 78 |
+
"timestamps": self.timestamps
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
if self.quantiles:
|
| 82 |
+
result["quantiles"] = self.quantiles
|
| 83 |
+
|
| 84 |
+
return result
|
app/application/dtos/forecast_dtos.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
DTOs para casos de uso de Forecasting.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from typing import List, Optional, Dict
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@dataclass
|
| 10 |
+
class ForecastInputDTO:
|
| 11 |
+
"""DTO de entrada para pronóstico univariado."""
|
| 12 |
+
|
| 13 |
+
values: List[float]
|
| 14 |
+
prediction_length: int
|
| 15 |
+
quantile_levels: List[float]
|
| 16 |
+
timestamps: Optional[List[str]] = None
|
| 17 |
+
series_id: str = "series_0"
|
| 18 |
+
freq: str = "D"
|
| 19 |
+
|
| 20 |
+
def validate(self) -> None:
|
| 21 |
+
"""Valida los datos de entrada."""
|
| 22 |
+
if not self.values:
|
| 23 |
+
raise ValueError("values no puede estar vacío")
|
| 24 |
+
|
| 25 |
+
if self.prediction_length < 1:
|
| 26 |
+
raise ValueError("prediction_length debe ser >= 1")
|
| 27 |
+
|
| 28 |
+
if not all(0 <= q <= 1 for q in self.quantile_levels):
|
| 29 |
+
raise ValueError("quantile_levels debe estar en [0, 1]")
|
| 30 |
+
|
| 31 |
+
if self.timestamps and len(self.timestamps) != len(self.values):
|
| 32 |
+
raise ValueError("timestamps y values deben tener la misma longitud")
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@dataclass
|
| 36 |
+
class ForecastOutputDTO:
|
| 37 |
+
"""DTO de salida para pronóstico univariado."""
|
| 38 |
+
|
| 39 |
+
timestamps: List[str]
|
| 40 |
+
median: List[float]
|
| 41 |
+
quantiles: Dict[str, List[float]]
|
| 42 |
+
series_id: str = "series_0"
|
| 43 |
+
metadata: Optional[Dict] = None
|
| 44 |
+
|
| 45 |
+
def to_dict(self) -> Dict:
|
| 46 |
+
"""Convierte a diccionario."""
|
| 47 |
+
result = {
|
| 48 |
+
"timestamps": self.timestamps,
|
| 49 |
+
"median": self.median,
|
| 50 |
+
"quantiles": self.quantiles,
|
| 51 |
+
"series_id": self.series_id
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
if self.metadata:
|
| 55 |
+
result["metadata"] = self.metadata
|
| 56 |
+
|
| 57 |
+
return result
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@dataclass
|
| 61 |
+
class SeriesInputDTO:
|
| 62 |
+
"""DTO para una serie individual en pronóstico múltiple."""
|
| 63 |
+
|
| 64 |
+
series_id: str
|
| 65 |
+
values: List[float]
|
| 66 |
+
timestamps: Optional[List[str]] = None
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@dataclass
|
| 70 |
+
class MultiForecastInputDTO:
|
| 71 |
+
"""DTO de entrada para pronóstico múltiple."""
|
| 72 |
+
|
| 73 |
+
series_list: List[SeriesInputDTO]
|
| 74 |
+
prediction_length: int
|
| 75 |
+
quantile_levels: List[float]
|
| 76 |
+
freq: str = "D"
|
| 77 |
+
|
| 78 |
+
def validate(self) -> None:
|
| 79 |
+
"""Valida los datos de entrada."""
|
| 80 |
+
if not self.series_list:
|
| 81 |
+
raise ValueError("series_list no puede estar vacío")
|
| 82 |
+
|
| 83 |
+
if self.prediction_length < 1:
|
| 84 |
+
raise ValueError("prediction_length debe ser >= 1")
|
| 85 |
+
|
| 86 |
+
if not all(0 <= q <= 1 for q in self.quantile_levels):
|
| 87 |
+
raise ValueError("quantile_levels debe estar en [0, 1]")
|
| 88 |
+
|
| 89 |
+
# Validar cada serie
|
| 90 |
+
for series in self.series_list:
|
| 91 |
+
if not series.values:
|
| 92 |
+
raise ValueError(f"Serie {series.series_id} está vacía")
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
@dataclass
|
| 96 |
+
class MultiForecastOutputDTO:
|
| 97 |
+
"""DTO de salida para pronóstico múltiple."""
|
| 98 |
+
|
| 99 |
+
results: List[ForecastOutputDTO]
|
| 100 |
+
total_series: int
|
| 101 |
+
successful: int
|
| 102 |
+
failed: int
|
| 103 |
+
|
| 104 |
+
def to_dict(self) -> Dict:
|
| 105 |
+
"""Convierte a diccionario."""
|
| 106 |
+
return {
|
| 107 |
+
"results": [r.to_dict() for r in self.results],
|
| 108 |
+
"total_series": self.total_series,
|
| 109 |
+
"successful": self.successful,
|
| 110 |
+
"failed": self.failed
|
| 111 |
+
}
|
app/application/mappers/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Mappers - Conversión entre API Schemas y DTOs.
|
| 3 |
+
|
| 4 |
+
Los mappers se encargan de convertir entre la capa de presentación
|
| 5 |
+
(API schemas) y la capa de aplicación (DTOs), manteniendo las capas
|
| 6 |
+
desacopladas.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from .forecast_mapper import ForecastMapper
|
| 10 |
+
from .anomaly_mapper import AnomalyMapper
|
| 11 |
+
from .backtest_mapper import BacktestMapper
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
"ForecastMapper",
|
| 15 |
+
"AnomalyMapper",
|
| 16 |
+
"BacktestMapper",
|
| 17 |
+
]
|
app/application/mappers/anomaly_mapper.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Mapper para casos de uso de Detección de Anomalías.
|
| 3 |
+
|
| 4 |
+
Convierte entre API schemas (Pydantic) y DTOs de aplicación.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from app.schemas.requests.anomaly import AnomalyDetectionRequest
|
| 8 |
+
from app.schemas.responses.anomaly import AnomalyDetectionResponse, AnomalyPoint
|
| 9 |
+
from app.application.dtos.anomaly_dtos import (
|
| 10 |
+
AnomalyDetectionInputDTO,
|
| 11 |
+
AnomalyDetectionOutputDTO,
|
| 12 |
+
AnomalyPointDTO
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class AnomalyMapper:
|
| 17 |
+
"""
|
| 18 |
+
Mapper para convertir entre API schemas y DTOs de anomalías.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
@staticmethod
|
| 22 |
+
def to_input_dto(request: AnomalyDetectionRequest) -> AnomalyDetectionInputDTO:
|
| 23 |
+
"""
|
| 24 |
+
Convierte API request a DTO de entrada.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
request: Request de la API
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
AnomalyDetectionInputDTO: DTO para el caso de uso
|
| 31 |
+
"""
|
| 32 |
+
return AnomalyDetectionInputDTO(
|
| 33 |
+
context_values=request.context.values,
|
| 34 |
+
recent_values=request.recent_values,
|
| 35 |
+
quantile_low=request.quantile_low,
|
| 36 |
+
quantile_high=request.quantile_high,
|
| 37 |
+
context_timestamps=request.context.timestamps,
|
| 38 |
+
freq=request.freq
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
@staticmethod
|
| 42 |
+
def from_output_dto(dto: AnomalyDetectionOutputDTO) -> AnomalyDetectionResponse:
|
| 43 |
+
"""
|
| 44 |
+
Convierte DTO de salida a API response.
|
| 45 |
+
|
| 46 |
+
Args:
|
| 47 |
+
dto: DTO del caso de uso
|
| 48 |
+
|
| 49 |
+
Returns:
|
| 50 |
+
AnomalyDetectionResponse: Response para la API
|
| 51 |
+
"""
|
| 52 |
+
# Convertir cada punto de anomalía
|
| 53 |
+
anomaly_points = [
|
| 54 |
+
AnomalyPoint(
|
| 55 |
+
index=ap.index,
|
| 56 |
+
value=ap.value,
|
| 57 |
+
expected=ap.expected,
|
| 58 |
+
lower_bound=ap.lower_bound,
|
| 59 |
+
upper_bound=ap.upper_bound,
|
| 60 |
+
is_anomaly=ap.is_anomaly,
|
| 61 |
+
z_score=ap.z_score,
|
| 62 |
+
severity=ap.severity
|
| 63 |
+
)
|
| 64 |
+
for ap in dto.anomalies
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
return AnomalyDetectionResponse(
|
| 68 |
+
anomalies=anomaly_points,
|
| 69 |
+
total_points=dto.total_points,
|
| 70 |
+
anomaly_count=dto.anomaly_count,
|
| 71 |
+
anomaly_rate=dto.anomaly_rate,
|
| 72 |
+
summary=dto.summary
|
| 73 |
+
)
|
app/application/mappers/backtest_mapper.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Mapper para casos de uso de Backtesting.
|
| 3 |
+
|
| 4 |
+
Convierte entre API schemas (Pydantic) y DTOs de aplicación.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from app.schemas.requests.backtest import BacktestRequest
|
| 8 |
+
from app.schemas.responses.backtest import BacktestResponse, BacktestMetrics
|
| 9 |
+
from app.application.dtos.backtest_dtos import (
|
| 10 |
+
BacktestInputDTO,
|
| 11 |
+
BacktestOutputDTO,
|
| 12 |
+
BacktestMetricsDTO
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class BacktestMapper:
|
| 17 |
+
"""
|
| 18 |
+
Mapper para convertir entre API schemas y DTOs de backtest.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
@staticmethod
|
| 22 |
+
def to_input_dto(request: BacktestRequest) -> BacktestInputDTO:
|
| 23 |
+
"""
|
| 24 |
+
Convierte API request a DTO de entrada.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
request: Request de la API
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
BacktestInputDTO: DTO para el caso de uso
|
| 31 |
+
"""
|
| 32 |
+
return BacktestInputDTO(
|
| 33 |
+
values=request.series.values,
|
| 34 |
+
test_size=request.test_size,
|
| 35 |
+
quantile_levels=request.quantile_levels,
|
| 36 |
+
timestamps=request.series.timestamps,
|
| 37 |
+
freq=request.freq
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
@staticmethod
|
| 41 |
+
def from_output_dto(dto: BacktestOutputDTO) -> BacktestResponse:
|
| 42 |
+
"""
|
| 43 |
+
Convierte DTO de salida a API response.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
dto: DTO del caso de uso
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
BacktestResponse: Response para la API
|
| 50 |
+
"""
|
| 51 |
+
# Convertir métricas
|
| 52 |
+
metrics = BacktestMetrics(
|
| 53 |
+
mae=dto.metrics.mae,
|
| 54 |
+
mape=dto.metrics.mape,
|
| 55 |
+
rmse=dto.metrics.rmse,
|
| 56 |
+
mse=dto.metrics.mse
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
return BacktestResponse(
|
| 60 |
+
forecast_values=dto.forecast_values,
|
| 61 |
+
actual_values=dto.actual_values,
|
| 62 |
+
errors=dto.errors,
|
| 63 |
+
metrics=metrics,
|
| 64 |
+
timestamps=dto.timestamps,
|
| 65 |
+
quantiles=dto.quantiles
|
| 66 |
+
)
|
app/application/mappers/forecast_mapper.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Mapper para casos de uso de Forecasting.
|
| 3 |
+
|
| 4 |
+
Convierte entre API schemas (Pydantic) y DTOs de aplicación.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Dict, Any
|
| 8 |
+
from app.schemas.requests.forecast import (
|
| 9 |
+
ForecastUnivariateRequest,
|
| 10 |
+
ForecastMultiSeriesRequest,
|
| 11 |
+
SeriesData
|
| 12 |
+
)
|
| 13 |
+
from app.schemas.responses.forecast import (
|
| 14 |
+
ForecastUnivariateResponse,
|
| 15 |
+
ForecastMultiSeriesResponse
|
| 16 |
+
)
|
| 17 |
+
from app.application.dtos.forecast_dtos import (
|
| 18 |
+
ForecastInputDTO,
|
| 19 |
+
ForecastOutputDTO,
|
| 20 |
+
MultiForecastInputDTO,
|
| 21 |
+
MultiForecastOutputDTO,
|
| 22 |
+
SeriesInputDTO
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class ForecastMapper:
|
| 27 |
+
"""
|
| 28 |
+
Mapper para convertir entre API schemas y DTOs de forecasting.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
@staticmethod
|
| 32 |
+
def to_univariate_input_dto(
|
| 33 |
+
request: ForecastUnivariateRequest
|
| 34 |
+
) -> ForecastInputDTO:
|
| 35 |
+
"""
|
| 36 |
+
Convierte API request a DTO de entrada.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
request: Request de la API
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
ForecastInputDTO: DTO para el caso de uso
|
| 43 |
+
"""
|
| 44 |
+
return ForecastInputDTO(
|
| 45 |
+
values=request.series.values,
|
| 46 |
+
prediction_length=request.prediction_length,
|
| 47 |
+
quantile_levels=request.quantile_levels,
|
| 48 |
+
timestamps=request.series.timestamps,
|
| 49 |
+
series_id=getattr(request.series, 'series_id', 'series_0'),
|
| 50 |
+
freq=request.freq
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
@staticmethod
|
| 54 |
+
def from_univariate_output_dto(
|
| 55 |
+
dto: ForecastOutputDTO
|
| 56 |
+
) -> ForecastUnivariateResponse:
|
| 57 |
+
"""
|
| 58 |
+
Convierte DTO de salida a API response.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
dto: DTO del caso de uso
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
ForecastUnivariateResponse: Response para la API
|
| 65 |
+
"""
|
| 66 |
+
return ForecastUnivariateResponse(
|
| 67 |
+
timestamps=dto.timestamps,
|
| 68 |
+
median=dto.median,
|
| 69 |
+
quantiles=dto.quantiles,
|
| 70 |
+
series_id=dto.series_id,
|
| 71 |
+
metadata=dto.metadata
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
@staticmethod
|
| 75 |
+
def to_multi_series_input_dto(
|
| 76 |
+
request: ForecastMultiSeriesRequest
|
| 77 |
+
) -> MultiForecastInputDTO:
|
| 78 |
+
"""
|
| 79 |
+
Convierte API request multi-series a DTO de entrada.
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
request: Request de la API
|
| 83 |
+
|
| 84 |
+
Returns:
|
| 85 |
+
MultiForecastInputDTO: DTO para el caso de uso
|
| 86 |
+
"""
|
| 87 |
+
series_list = []
|
| 88 |
+
for series_data in request.series_list:
|
| 89 |
+
series_dto = SeriesInputDTO(
|
| 90 |
+
series_id=series_data.series_id,
|
| 91 |
+
values=series_data.values,
|
| 92 |
+
timestamps=series_data.timestamps
|
| 93 |
+
)
|
| 94 |
+
series_list.append(series_dto)
|
| 95 |
+
|
| 96 |
+
return MultiForecastInputDTO(
|
| 97 |
+
series_list=series_list,
|
| 98 |
+
prediction_length=request.prediction_length,
|
| 99 |
+
quantile_levels=request.quantile_levels,
|
| 100 |
+
freq=request.freq
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
@staticmethod
|
| 104 |
+
def from_multi_series_output_dto(
|
| 105 |
+
dto: MultiForecastOutputDTO
|
| 106 |
+
) -> ForecastMultiSeriesResponse:
|
| 107 |
+
"""
|
| 108 |
+
Convierte DTO de salida multi-series a API response.
|
| 109 |
+
|
| 110 |
+
Args:
|
| 111 |
+
dto: DTO del caso de uso
|
| 112 |
+
|
| 113 |
+
Returns:
|
| 114 |
+
ForecastMultiSeriesResponse: Response para la API
|
| 115 |
+
"""
|
| 116 |
+
# Convertir cada resultado individual
|
| 117 |
+
results = [
|
| 118 |
+
ForecastMapper.from_univariate_output_dto(result)
|
| 119 |
+
for result in dto.results
|
| 120 |
+
]
|
| 121 |
+
|
| 122 |
+
return ForecastMultiSeriesResponse(
|
| 123 |
+
results=results,
|
| 124 |
+
total_series=dto.total_series,
|
| 125 |
+
successful=dto.successful,
|
| 126 |
+
failed=dto.failed
|
| 127 |
+
)
|
app/application/use_cases/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Use Cases - Lógica de Aplicación.
|
| 3 |
+
|
| 4 |
+
Los casos de uso orquestan las operaciones del dominio para cumplir
|
| 5 |
+
con los requisitos de la aplicación. Implementan el patrón Command/Query.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from .forecast_use_case import ForecastUnivariateUseCase, ForecastMultiSeriesUseCase
|
| 9 |
+
from .anomaly_use_case import DetectAnomaliesUseCase
|
| 10 |
+
from .backtest_use_case import BacktestUseCase
|
| 11 |
+
|
| 12 |
+
__all__ = [
|
| 13 |
+
"ForecastUnivariateUseCase",
|
| 14 |
+
"ForecastMultiSeriesUseCase",
|
| 15 |
+
"DetectAnomaliesUseCase",
|
| 16 |
+
"BacktestUseCase",
|
| 17 |
+
]
|
app/application/use_cases/anomaly_use_case.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Caso de uso para Detección de Anomalías.
|
| 3 |
+
|
| 4 |
+
Implementa la lógica de aplicación para detectar anomalías
|
| 5 |
+
en series temporales usando pronósticos probabilísticos.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from app.domain.services.anomaly_service import AnomalyService
|
| 9 |
+
from app.domain.models.time_series import TimeSeries
|
| 10 |
+
from app.domain.models.forecast_config import ForecastConfig
|
| 11 |
+
from app.application.dtos.anomaly_dtos import (
|
| 12 |
+
AnomalyDetectionInputDTO,
|
| 13 |
+
AnomalyDetectionOutputDTO,
|
| 14 |
+
AnomalyPointDTO
|
| 15 |
+
)
|
| 16 |
+
from app.utils.logger import setup_logger
|
| 17 |
+
|
| 18 |
+
logger = setup_logger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class DetectAnomaliesUseCase:
|
| 22 |
+
"""
|
| 23 |
+
Caso de uso: Detección de Anomalías.
|
| 24 |
+
|
| 25 |
+
Responsabilidad: Detectar anomalías comparando valores observados
|
| 26 |
+
con pronósticos probabilísticos.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self, anomaly_service: AnomalyService):
|
| 30 |
+
"""
|
| 31 |
+
Inicializa el caso de uso.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
anomaly_service: Servicio de dominio para detección de anomalías
|
| 35 |
+
"""
|
| 36 |
+
self.anomaly_service = anomaly_service
|
| 37 |
+
logger.info("DetectAnomaliesUseCase initialized")
|
| 38 |
+
|
| 39 |
+
def execute(self, input_dto: AnomalyDetectionInputDTO) -> AnomalyDetectionOutputDTO:
|
| 40 |
+
"""
|
| 41 |
+
Ejecuta el caso de uso de detección de anomalías.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
input_dto: Datos de entrada con contexto y valores recientes
|
| 45 |
+
|
| 46 |
+
Returns:
|
| 47 |
+
AnomalyDetectionOutputDTO: Puntos de anomalía detectados
|
| 48 |
+
|
| 49 |
+
Raises:
|
| 50 |
+
ValueError: Si los datos son inválidos
|
| 51 |
+
RuntimeError: Si falla la detección
|
| 52 |
+
"""
|
| 53 |
+
logger.info(
|
| 54 |
+
f"Detecting anomalies: {len(input_dto.context_values)} context points, "
|
| 55 |
+
f"{len(input_dto.recent_values)} recent points"
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
# Validar entrada
|
| 59 |
+
input_dto.validate()
|
| 60 |
+
|
| 61 |
+
# Convertir DTO a modelos de dominio
|
| 62 |
+
context = TimeSeries(
|
| 63 |
+
values=input_dto.context_values,
|
| 64 |
+
timestamps=input_dto.context_timestamps,
|
| 65 |
+
freq=input_dto.freq
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
config = ForecastConfig(
|
| 69 |
+
prediction_length=len(input_dto.recent_values),
|
| 70 |
+
quantile_levels=[input_dto.quantile_low, 0.5, input_dto.quantile_high],
|
| 71 |
+
freq=input_dto.freq
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# Ejecutar servicio de dominio
|
| 75 |
+
try:
|
| 76 |
+
anomaly_points = self.anomaly_service.detect_anomalies(
|
| 77 |
+
context=context,
|
| 78 |
+
recent_observed=input_dto.recent_values,
|
| 79 |
+
config=config,
|
| 80 |
+
quantile_low=input_dto.quantile_low,
|
| 81 |
+
quantile_high=input_dto.quantile_high
|
| 82 |
+
)
|
| 83 |
+
logger.info(f"Anomaly detection completed")
|
| 84 |
+
except Exception as e:
|
| 85 |
+
logger.error(f"Anomaly detection failed: {e}", exc_info=True)
|
| 86 |
+
raise RuntimeError(f"Anomaly detection failed: {str(e)}") from e
|
| 87 |
+
|
| 88 |
+
# Convertir a DTOs y calcular severidad
|
| 89 |
+
anomaly_dtos = []
|
| 90 |
+
for ap in anomaly_points:
|
| 91 |
+
severity = self._calculate_severity(ap.z_score, ap.is_anomaly)
|
| 92 |
+
|
| 93 |
+
dto = AnomalyPointDTO(
|
| 94 |
+
index=ap.index,
|
| 95 |
+
value=ap.value,
|
| 96 |
+
expected=ap.expected,
|
| 97 |
+
lower_bound=ap.lower_bound,
|
| 98 |
+
upper_bound=ap.upper_bound,
|
| 99 |
+
is_anomaly=ap.is_anomaly,
|
| 100 |
+
z_score=ap.z_score,
|
| 101 |
+
severity=severity
|
| 102 |
+
)
|
| 103 |
+
anomaly_dtos.append(dto)
|
| 104 |
+
|
| 105 |
+
# Calcular estadísticas
|
| 106 |
+
anomaly_count = sum(1 for a in anomaly_dtos if a.is_anomaly)
|
| 107 |
+
total_points = len(anomaly_dtos)
|
| 108 |
+
anomaly_rate = anomaly_count / total_points if total_points > 0 else 0.0
|
| 109 |
+
|
| 110 |
+
# Crear resumen
|
| 111 |
+
summary = self._create_summary(anomaly_dtos, input_dto)
|
| 112 |
+
|
| 113 |
+
logger.info(
|
| 114 |
+
f"Anomalies detected: {anomaly_count}/{total_points} "
|
| 115 |
+
f"({anomaly_rate*100:.1f}%)"
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
# Crear DTO de salida
|
| 119 |
+
output_dto = AnomalyDetectionOutputDTO(
|
| 120 |
+
anomalies=anomaly_dtos,
|
| 121 |
+
total_points=total_points,
|
| 122 |
+
anomaly_count=anomaly_count,
|
| 123 |
+
anomaly_rate=anomaly_rate,
|
| 124 |
+
summary=summary
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
return output_dto
|
| 128 |
+
|
| 129 |
+
def _calculate_severity(self, z_score: float, is_anomaly: bool) -> str:
|
| 130 |
+
"""
|
| 131 |
+
Calcula la severidad de una anomalía basándose en el z-score.
|
| 132 |
+
|
| 133 |
+
Args:
|
| 134 |
+
z_score: Puntuación Z
|
| 135 |
+
is_anomaly: Si es anomalía
|
| 136 |
+
|
| 137 |
+
Returns:
|
| 138 |
+
str: Nivel de severidad (normal, low, medium, high)
|
| 139 |
+
"""
|
| 140 |
+
if not is_anomaly:
|
| 141 |
+
return "normal"
|
| 142 |
+
|
| 143 |
+
if z_score < 1.5:
|
| 144 |
+
return "low"
|
| 145 |
+
elif z_score < 2.5:
|
| 146 |
+
return "medium"
|
| 147 |
+
else:
|
| 148 |
+
return "high"
|
| 149 |
+
|
| 150 |
+
def _create_summary(
|
| 151 |
+
self,
|
| 152 |
+
anomaly_dtos: list,
|
| 153 |
+
input_dto: AnomalyDetectionInputDTO
|
| 154 |
+
) -> dict:
|
| 155 |
+
"""
|
| 156 |
+
Crea un resumen de la detección de anomalías.
|
| 157 |
+
|
| 158 |
+
Args:
|
| 159 |
+
anomaly_dtos: Lista de anomalías detectadas
|
| 160 |
+
input_dto: Datos de entrada originales
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
dict: Resumen con estadísticas
|
| 164 |
+
"""
|
| 165 |
+
anomalies_only = [a for a in anomaly_dtos if a.is_anomaly]
|
| 166 |
+
|
| 167 |
+
if not anomalies_only:
|
| 168 |
+
return {
|
| 169 |
+
"has_anomalies": False,
|
| 170 |
+
"severity_distribution": {"normal": len(anomaly_dtos)},
|
| 171 |
+
"max_z_score": 0.0,
|
| 172 |
+
"avg_deviation": 0.0
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
# Distribución por severidad
|
| 176 |
+
severity_dist = {
|
| 177 |
+
"normal": sum(1 for a in anomaly_dtos if a.severity == "normal"),
|
| 178 |
+
"low": sum(1 for a in anomaly_dtos if a.severity == "low"),
|
| 179 |
+
"medium": sum(1 for a in anomaly_dtos if a.severity == "medium"),
|
| 180 |
+
"high": sum(1 for a in anomaly_dtos if a.severity == "high")
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
# Estadísticas de anomalías
|
| 184 |
+
max_z_score = max(a.z_score for a in anomalies_only)
|
| 185 |
+
avg_deviation = sum(
|
| 186 |
+
abs(a.value - a.expected) for a in anomalies_only
|
| 187 |
+
) / len(anomalies_only)
|
| 188 |
+
|
| 189 |
+
return {
|
| 190 |
+
"has_anomalies": True,
|
| 191 |
+
"severity_distribution": severity_dist,
|
| 192 |
+
"max_z_score": round(max_z_score, 2),
|
| 193 |
+
"avg_deviation": round(avg_deviation, 2),
|
| 194 |
+
"quantile_range": {
|
| 195 |
+
"low": input_dto.quantile_low,
|
| 196 |
+
"high": input_dto.quantile_high
|
| 197 |
+
}
|
| 198 |
+
}
|
app/application/use_cases/backtest_use_case.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Caso de uso para Backtesting.
|
| 3 |
+
|
| 4 |
+
Implementa la lógica de aplicación para evaluar la precisión
|
| 5 |
+
de pronósticos usando datos históricos.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import math
|
| 9 |
+
from typing import List
|
| 10 |
+
from app.domain.services.forecast_service import ForecastService
|
| 11 |
+
from app.domain.models.time_series import TimeSeries
|
| 12 |
+
from app.domain.models.forecast_config import ForecastConfig
|
| 13 |
+
from app.application.dtos.backtest_dtos import (
|
| 14 |
+
BacktestInputDTO,
|
| 15 |
+
BacktestOutputDTO,
|
| 16 |
+
BacktestMetricsDTO
|
| 17 |
+
)
|
| 18 |
+
from app.utils.logger import setup_logger
|
| 19 |
+
|
| 20 |
+
logger = setup_logger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class BacktestUseCase:
|
| 24 |
+
"""
|
| 25 |
+
Caso de uso: Backtesting.
|
| 26 |
+
|
| 27 |
+
Responsabilidad: Evaluar precisión del modelo usando datos históricos.
|
| 28 |
+
Divide los datos en train/test y calcula métricas de error.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(self, forecast_service: ForecastService):
|
| 32 |
+
"""
|
| 33 |
+
Inicializa el caso de uso.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
forecast_service: Servicio de dominio para forecasting
|
| 37 |
+
"""
|
| 38 |
+
self.forecast_service = forecast_service
|
| 39 |
+
logger.info("BacktestUseCase initialized")
|
| 40 |
+
|
| 41 |
+
def execute(self, input_dto: BacktestInputDTO) -> BacktestOutputDTO:
|
| 42 |
+
"""
|
| 43 |
+
Ejecuta el caso de uso de backtesting.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
input_dto: Datos de entrada con serie completa y tamaño de test
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
BacktestOutputDTO: Resultados del backtest con métricas
|
| 50 |
+
|
| 51 |
+
Raises:
|
| 52 |
+
ValueError: Si los datos son inválidos
|
| 53 |
+
RuntimeError: Si falla el backtest
|
| 54 |
+
"""
|
| 55 |
+
logger.info(
|
| 56 |
+
f"Executing backtest: {len(input_dto.values)} total points, "
|
| 57 |
+
f"{input_dto.test_size} test points"
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
# Validar entrada
|
| 61 |
+
input_dto.validate()
|
| 62 |
+
|
| 63 |
+
# Dividir en train/test
|
| 64 |
+
train_values = input_dto.values[:-input_dto.test_size]
|
| 65 |
+
test_values = input_dto.values[-input_dto.test_size:]
|
| 66 |
+
|
| 67 |
+
train_timestamps = None
|
| 68 |
+
test_timestamps = None
|
| 69 |
+
|
| 70 |
+
if input_dto.timestamps:
|
| 71 |
+
train_timestamps = input_dto.timestamps[:-input_dto.test_size]
|
| 72 |
+
test_timestamps = input_dto.timestamps[-input_dto.test_size:]
|
| 73 |
+
|
| 74 |
+
logger.info(f"Train size: {len(train_values)}, Test size: {len(test_values)}")
|
| 75 |
+
|
| 76 |
+
# Crear modelos de dominio para train
|
| 77 |
+
train_series = TimeSeries(
|
| 78 |
+
values=train_values,
|
| 79 |
+
timestamps=train_timestamps,
|
| 80 |
+
freq=input_dto.freq
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
config = ForecastConfig(
|
| 84 |
+
prediction_length=input_dto.test_size,
|
| 85 |
+
quantile_levels=input_dto.quantile_levels,
|
| 86 |
+
freq=input_dto.freq
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
# Ejecutar pronóstico sobre datos de train
|
| 90 |
+
try:
|
| 91 |
+
result = self.forecast_service.forecast_univariate(train_series, config)
|
| 92 |
+
logger.info(f"Forecast completed: {len(result.median)} predictions")
|
| 93 |
+
except Exception as e:
|
| 94 |
+
logger.error(f"Backtest forecast failed: {e}", exc_info=True)
|
| 95 |
+
raise RuntimeError(f"Backtest forecast failed: {str(e)}") from e
|
| 96 |
+
|
| 97 |
+
# Comparar con valores reales
|
| 98 |
+
forecast_values = result.median
|
| 99 |
+
actual_values = test_values
|
| 100 |
+
|
| 101 |
+
# Calcular errores
|
| 102 |
+
errors = [
|
| 103 |
+
actual - forecast
|
| 104 |
+
for actual, forecast in zip(actual_values, forecast_values)
|
| 105 |
+
]
|
| 106 |
+
|
| 107 |
+
# Calcular métricas
|
| 108 |
+
metrics = self._calculate_metrics(actual_values, forecast_values)
|
| 109 |
+
|
| 110 |
+
logger.info(
|
| 111 |
+
f"Backtest metrics - MAE: {metrics.mae:.2f}, "
|
| 112 |
+
f"MAPE: {metrics.mape:.2f}%, RMSE: {metrics.rmse:.2f}"
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
# Preparar timestamps de salida
|
| 116 |
+
if test_timestamps:
|
| 117 |
+
output_timestamps = test_timestamps
|
| 118 |
+
else:
|
| 119 |
+
output_timestamps = result.timestamps
|
| 120 |
+
|
| 121 |
+
# Crear DTO de salida
|
| 122 |
+
output_dto = BacktestOutputDTO(
|
| 123 |
+
forecast_values=forecast_values,
|
| 124 |
+
actual_values=actual_values,
|
| 125 |
+
errors=errors,
|
| 126 |
+
metrics=metrics,
|
| 127 |
+
timestamps=output_timestamps,
|
| 128 |
+
quantiles=result.quantiles if result.quantiles else None
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
return output_dto
|
| 132 |
+
|
| 133 |
+
def _calculate_metrics(
|
| 134 |
+
self,
|
| 135 |
+
actual: List[float],
|
| 136 |
+
forecast: List[float]
|
| 137 |
+
) -> BacktestMetricsDTO:
|
| 138 |
+
"""
|
| 139 |
+
Calcula métricas de error para el backtest.
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
actual: Valores reales
|
| 143 |
+
forecast: Valores pronosticados
|
| 144 |
+
|
| 145 |
+
Returns:
|
| 146 |
+
BacktestMetricsDTO: Métricas calculadas
|
| 147 |
+
"""
|
| 148 |
+
n = len(actual)
|
| 149 |
+
|
| 150 |
+
# Mean Absolute Error
|
| 151 |
+
mae = sum(abs(a - f) for a, f in zip(actual, forecast)) / n
|
| 152 |
+
|
| 153 |
+
# Mean Absolute Percentage Error
|
| 154 |
+
mape_values = []
|
| 155 |
+
for a, f in zip(actual, forecast):
|
| 156 |
+
if a != 0:
|
| 157 |
+
mape_values.append(abs((a - f) / a))
|
| 158 |
+
|
| 159 |
+
mape = (sum(mape_values) / len(mape_values) * 100) if mape_values else 0.0
|
| 160 |
+
|
| 161 |
+
# Mean Squared Error
|
| 162 |
+
mse = sum((a - f) ** 2 for a, f in zip(actual, forecast)) / n
|
| 163 |
+
|
| 164 |
+
# Root Mean Squared Error
|
| 165 |
+
rmse = math.sqrt(mse)
|
| 166 |
+
|
| 167 |
+
return BacktestMetricsDTO(
|
| 168 |
+
mae=mae,
|
| 169 |
+
mape=mape,
|
| 170 |
+
rmse=rmse,
|
| 171 |
+
mse=mse
|
| 172 |
+
)
|
app/application/use_cases/forecast_use_case.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Casos de uso para Forecasting.
|
| 3 |
+
|
| 4 |
+
Implementan la lógica de aplicación para pronósticos,
|
| 5 |
+
orquestando servicios de dominio y transformando DTOs.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import List
|
| 9 |
+
from app.domain.services.forecast_service import ForecastService
|
| 10 |
+
from app.domain.models.time_series import TimeSeries
|
| 11 |
+
from app.domain.models.forecast_config import ForecastConfig
|
| 12 |
+
from app.application.dtos.forecast_dtos import (
|
| 13 |
+
ForecastInputDTO,
|
| 14 |
+
ForecastOutputDTO,
|
| 15 |
+
MultiForecastInputDTO,
|
| 16 |
+
MultiForecastOutputDTO,
|
| 17 |
+
SeriesInputDTO
|
| 18 |
+
)
|
| 19 |
+
from app.utils.logger import setup_logger
|
| 20 |
+
|
| 21 |
+
logger = setup_logger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class ForecastUnivariateUseCase:
|
| 25 |
+
"""
|
| 26 |
+
Caso de uso: Pronóstico Univariado.
|
| 27 |
+
|
| 28 |
+
Responsabilidad: Ejecutar pronóstico para una serie temporal única.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(self, forecast_service: ForecastService):
|
| 32 |
+
"""
|
| 33 |
+
Inicializa el caso de uso.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
forecast_service: Servicio de dominio para forecasting
|
| 37 |
+
"""
|
| 38 |
+
self.forecast_service = forecast_service
|
| 39 |
+
logger.info("ForecastUnivariateUseCase initialized")
|
| 40 |
+
|
| 41 |
+
def execute(self, input_dto: ForecastInputDTO) -> ForecastOutputDTO:
|
| 42 |
+
"""
|
| 43 |
+
Ejecuta el caso de uso.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
input_dto: Datos de entrada validados
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
ForecastOutputDTO: Resultado del pronóstico
|
| 50 |
+
|
| 51 |
+
Raises:
|
| 52 |
+
ValueError: Si los datos son inválidos
|
| 53 |
+
RuntimeError: Si falla el pronóstico
|
| 54 |
+
"""
|
| 55 |
+
logger.info(f"Executing forecast for series: {input_dto.series_id}")
|
| 56 |
+
|
| 57 |
+
# Validar entrada
|
| 58 |
+
input_dto.validate()
|
| 59 |
+
|
| 60 |
+
# Convertir DTO a modelos de dominio
|
| 61 |
+
series = TimeSeries(
|
| 62 |
+
values=input_dto.values,
|
| 63 |
+
timestamps=input_dto.timestamps,
|
| 64 |
+
series_id=input_dto.series_id,
|
| 65 |
+
freq=input_dto.freq
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
config = ForecastConfig(
|
| 69 |
+
prediction_length=input_dto.prediction_length,
|
| 70 |
+
quantile_levels=input_dto.quantile_levels,
|
| 71 |
+
freq=input_dto.freq
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# Ejecutar servicio de dominio
|
| 75 |
+
try:
|
| 76 |
+
result = self.forecast_service.forecast_univariate(series, config)
|
| 77 |
+
logger.info(f"Forecast completed: {len(result.timestamps)} periods")
|
| 78 |
+
except Exception as e:
|
| 79 |
+
logger.error(f"Forecast failed: {e}", exc_info=True)
|
| 80 |
+
raise RuntimeError(f"Forecast execution failed: {str(e)}") from e
|
| 81 |
+
|
| 82 |
+
# Convertir resultado a DTO
|
| 83 |
+
output_dto = ForecastOutputDTO(
|
| 84 |
+
timestamps=result.timestamps,
|
| 85 |
+
median=result.median,
|
| 86 |
+
quantiles=result.quantiles,
|
| 87 |
+
series_id=result.series_id,
|
| 88 |
+
metadata={
|
| 89 |
+
"prediction_length": config.prediction_length,
|
| 90 |
+
"freq": config.freq,
|
| 91 |
+
"context_length": len(series.values)
|
| 92 |
+
}
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
return output_dto
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class ForecastMultiSeriesUseCase:
|
| 99 |
+
"""
|
| 100 |
+
Caso de uso: Pronóstico Multi-Series.
|
| 101 |
+
|
| 102 |
+
Responsabilidad: Ejecutar pronósticos para múltiples series.
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
def __init__(self, forecast_service: ForecastService):
|
| 106 |
+
"""
|
| 107 |
+
Inicializa el caso de uso.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
forecast_service: Servicio de dominio para forecasting
|
| 111 |
+
"""
|
| 112 |
+
self.forecast_service = forecast_service
|
| 113 |
+
logger.info("ForecastMultiSeriesUseCase initialized")
|
| 114 |
+
|
| 115 |
+
def execute(self, input_dto: MultiForecastInputDTO) -> MultiForecastOutputDTO:
|
| 116 |
+
"""
|
| 117 |
+
Ejecuta el caso de uso para múltiples series.
|
| 118 |
+
|
| 119 |
+
Args:
|
| 120 |
+
input_dto: Datos de entrada con múltiples series
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
MultiForecastOutputDTO: Resultados de todos los pronósticos
|
| 124 |
+
"""
|
| 125 |
+
logger.info(f"Executing forecast for {len(input_dto.series_list)} series")
|
| 126 |
+
|
| 127 |
+
# Validar entrada
|
| 128 |
+
input_dto.validate()
|
| 129 |
+
|
| 130 |
+
# Configuración compartida
|
| 131 |
+
config = ForecastConfig(
|
| 132 |
+
prediction_length=input_dto.prediction_length,
|
| 133 |
+
quantile_levels=input_dto.quantile_levels,
|
| 134 |
+
freq=input_dto.freq
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
# Convertir DTOs a modelos de dominio
|
| 138 |
+
time_series_list: List[TimeSeries] = []
|
| 139 |
+
for series_dto in input_dto.series_list:
|
| 140 |
+
series = TimeSeries(
|
| 141 |
+
values=series_dto.values,
|
| 142 |
+
timestamps=series_dto.timestamps,
|
| 143 |
+
series_id=series_dto.series_id,
|
| 144 |
+
freq=input_dto.freq
|
| 145 |
+
)
|
| 146 |
+
time_series_list.append(series)
|
| 147 |
+
|
| 148 |
+
# Ejecutar servicio de dominio
|
| 149 |
+
results = []
|
| 150 |
+
successful = 0
|
| 151 |
+
failed = 0
|
| 152 |
+
|
| 153 |
+
for ts in time_series_list:
|
| 154 |
+
try:
|
| 155 |
+
result = self.forecast_service.forecast_univariate(ts, config)
|
| 156 |
+
|
| 157 |
+
output_dto = ForecastOutputDTO(
|
| 158 |
+
timestamps=result.timestamps,
|
| 159 |
+
median=result.median,
|
| 160 |
+
quantiles=result.quantiles,
|
| 161 |
+
series_id=result.series_id,
|
| 162 |
+
metadata={
|
| 163 |
+
"prediction_length": config.prediction_length,
|
| 164 |
+
"freq": config.freq,
|
| 165 |
+
"context_length": len(ts.values)
|
| 166 |
+
}
|
| 167 |
+
)
|
| 168 |
+
results.append(output_dto)
|
| 169 |
+
successful += 1
|
| 170 |
+
|
| 171 |
+
except Exception as e:
|
| 172 |
+
logger.error(f"Forecast failed for series {ts.series_id}: {e}")
|
| 173 |
+
failed += 1
|
| 174 |
+
# Continuar con las siguientes series
|
| 175 |
+
|
| 176 |
+
logger.info(f"Multi-series forecast completed: {successful} successful, {failed} failed")
|
| 177 |
+
|
| 178 |
+
# Crear DTO de salida
|
| 179 |
+
multi_output = MultiForecastOutputDTO(
|
| 180 |
+
results=results,
|
| 181 |
+
total_series=len(input_dto.series_list),
|
| 182 |
+
successful=successful,
|
| 183 |
+
failed=failed
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
return multi_output
|
app/domain/__init__.py
ADDED
|
File without changes
|
app/domain/interfaces/__init__.py
ADDED
|
File without changes
|
app/domain/interfaces/data_transformer.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Interface para transformación de datos.
|
| 3 |
+
|
| 4 |
+
Define la abstracción para convertir datos entre diferentes formatos,
|
| 5 |
+
cumpliendo con ISP (Interface Segregation Principle).
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from abc import ABC, abstractmethod
|
| 9 |
+
from typing import List, Optional, Dict, Any
|
| 10 |
+
import pandas as pd
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class IDataTransformer(ABC):
|
| 14 |
+
"""
|
| 15 |
+
Interface para transformación de datos de series temporales.
|
| 16 |
+
|
| 17 |
+
Esta interface está segregada para contener solo métodos relacionados
|
| 18 |
+
con transformación de datos (ISP).
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
@abstractmethod
|
| 22 |
+
def build_context_df(
|
| 23 |
+
self,
|
| 24 |
+
values: List[float],
|
| 25 |
+
timestamps: Optional[List[str]] = None,
|
| 26 |
+
series_id: str = "series_0",
|
| 27 |
+
freq: str = "D"
|
| 28 |
+
) -> pd.DataFrame:
|
| 29 |
+
"""
|
| 30 |
+
Construye un DataFrame de contexto para forecasting.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
values: Lista de valores históricos
|
| 34 |
+
timestamps: Lista de timestamps (opcional, se generan si es None)
|
| 35 |
+
series_id: Identificador de la serie
|
| 36 |
+
freq: Frecuencia temporal (D=daily, H=hourly, etc.)
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
pd.DataFrame: DataFrame con columnas id, timestamp, target
|
| 40 |
+
|
| 41 |
+
Raises:
|
| 42 |
+
ValueError: Si valores y timestamps tienen longitudes diferentes
|
| 43 |
+
"""
|
| 44 |
+
pass
|
| 45 |
+
|
| 46 |
+
@abstractmethod
|
| 47 |
+
def parse_prediction_result(
|
| 48 |
+
self,
|
| 49 |
+
pred_df: pd.DataFrame,
|
| 50 |
+
quantile_levels: List[float]
|
| 51 |
+
) -> Dict[str, Any]:
|
| 52 |
+
"""
|
| 53 |
+
Parsea el resultado de predicción a un formato estándar.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
pred_df: DataFrame con predicciones del modelo
|
| 57 |
+
quantile_levels: Cuantiles calculados
|
| 58 |
+
|
| 59 |
+
Returns:
|
| 60 |
+
Dict con:
|
| 61 |
+
- timestamps: Lista de timestamps
|
| 62 |
+
- median: Lista de valores medianos
|
| 63 |
+
- quantiles: Dict {cuantil: [valores]}
|
| 64 |
+
"""
|
| 65 |
+
pass
|
app/domain/interfaces/forecast_model.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Interface para modelos de forecasting.
|
| 3 |
+
|
| 4 |
+
Este módulo define la abstracción IForecastModel que permite
|
| 5 |
+
diferentes implementaciones de modelos (Chronos, Prophet, ARIMA, etc.)
|
| 6 |
+
cumpliendo con DIP (Dependency Inversion Principle).
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from abc import ABC, abstractmethod
|
| 10 |
+
from typing import List, Dict, Any, Optional
|
| 11 |
+
import pandas as pd
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class IForecastModel(ABC):
|
| 15 |
+
"""
|
| 16 |
+
Interface para modelos de forecasting.
|
| 17 |
+
|
| 18 |
+
Esta abstracción permite que diferentes implementaciones de modelos
|
| 19 |
+
sean intercambiables sin modificar el código que las usa (DIP + LSP).
|
| 20 |
+
|
| 21 |
+
Ejemplos de implementaciones:
|
| 22 |
+
- ChronosModel (Chronos-2)
|
| 23 |
+
- ProphetModel (Facebook Prophet)
|
| 24 |
+
- ARIMAModel (ARIMA tradicional)
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
@abstractmethod
|
| 28 |
+
def predict(
|
| 29 |
+
self,
|
| 30 |
+
context_df: pd.DataFrame,
|
| 31 |
+
prediction_length: int,
|
| 32 |
+
quantile_levels: List[float],
|
| 33 |
+
**kwargs
|
| 34 |
+
) -> pd.DataFrame:
|
| 35 |
+
"""
|
| 36 |
+
Genera pronósticos probabilísticos.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
context_df: DataFrame con datos históricos.
|
| 40 |
+
Debe contener columnas: id, timestamp, target
|
| 41 |
+
prediction_length: Número de pasos a predecir
|
| 42 |
+
quantile_levels: Lista de cuantiles a calcular (ej: [0.1, 0.5, 0.9])
|
| 43 |
+
**kwargs: Parámetros adicionales específicos del modelo
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
pd.DataFrame: Pronósticos con columnas:
|
| 47 |
+
- id: Identificador de serie
|
| 48 |
+
- timestamp: Timestamp de predicción
|
| 49 |
+
- predictions: Valor mediano
|
| 50 |
+
- {q}: Valor para cada cuantil q
|
| 51 |
+
|
| 52 |
+
Raises:
|
| 53 |
+
ValueError: Si los datos de entrada son inválidos
|
| 54 |
+
RuntimeError: Si el modelo falla al predecir
|
| 55 |
+
"""
|
| 56 |
+
pass
|
| 57 |
+
|
| 58 |
+
@abstractmethod
|
| 59 |
+
def get_model_info(self) -> Dict[str, Any]:
|
| 60 |
+
"""
|
| 61 |
+
Retorna información del modelo.
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
Dict con información del modelo:
|
| 65 |
+
- type: Tipo de modelo (ej: "Chronos2", "Prophet")
|
| 66 |
+
- model_id: ID del modelo
|
| 67 |
+
- version: Versión del modelo
|
| 68 |
+
- device: Dispositivo usado (cpu/cuda)
|
| 69 |
+
- otros campos específicos del modelo
|
| 70 |
+
"""
|
| 71 |
+
pass
|
| 72 |
+
|
| 73 |
+
def validate_context(self, context_df: pd.DataFrame) -> bool:
|
| 74 |
+
"""
|
| 75 |
+
Valida que el DataFrame de contexto tenga el formato correcto.
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
context_df: DataFrame a validar
|
| 79 |
+
|
| 80 |
+
Returns:
|
| 81 |
+
bool: True si es válido
|
| 82 |
+
|
| 83 |
+
Raises:
|
| 84 |
+
ValueError: Si el DataFrame es inválido
|
| 85 |
+
"""
|
| 86 |
+
required_columns = {"id", "timestamp", "target"}
|
| 87 |
+
|
| 88 |
+
if not isinstance(context_df, pd.DataFrame):
|
| 89 |
+
raise ValueError("context_df debe ser un pandas DataFrame")
|
| 90 |
+
|
| 91 |
+
missing_columns = required_columns - set(context_df.columns)
|
| 92 |
+
if missing_columns:
|
| 93 |
+
raise ValueError(
|
| 94 |
+
f"Faltan columnas requeridas: {missing_columns}. "
|
| 95 |
+
f"Se encontraron: {set(context_df.columns)}"
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
if context_df.empty:
|
| 99 |
+
raise ValueError("context_df no puede estar vacío")
|
| 100 |
+
|
| 101 |
+
if context_df["target"].isnull().any():
|
| 102 |
+
raise ValueError("La columna 'target' contiene valores nulos")
|
| 103 |
+
|
| 104 |
+
return True
|
app/domain/models/__init__.py
ADDED
|
File without changes
|
app/domain/models/anomaly.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Modelo de dominio para anomalías detectadas.
|
| 3 |
+
|
| 4 |
+
Este módulo define la entidad AnomalyPoint, cumpliendo con SRP.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from typing import Optional
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@dataclass
|
| 12 |
+
class AnomalyPoint:
|
| 13 |
+
"""
|
| 14 |
+
Representa un punto con posible anomalía detectada.
|
| 15 |
+
|
| 16 |
+
Attributes:
|
| 17 |
+
index: Índice del punto en la serie
|
| 18 |
+
value: Valor observado
|
| 19 |
+
expected: Valor esperado (mediana del pronóstico)
|
| 20 |
+
lower_bound: Límite inferior del intervalo de confianza
|
| 21 |
+
upper_bound: Límite superior del intervalo de confianza
|
| 22 |
+
is_anomaly: Indica si el punto es una anomalía
|
| 23 |
+
z_score: Puntuación Z del punto (opcional)
|
| 24 |
+
severity: Severidad de la anomalía (low, medium, high)
|
| 25 |
+
|
| 26 |
+
Example:
|
| 27 |
+
>>> point = AnomalyPoint(
|
| 28 |
+
... index=5,
|
| 29 |
+
... value=200.0,
|
| 30 |
+
... expected=120.0,
|
| 31 |
+
... lower_bound=115.0,
|
| 32 |
+
... upper_bound=125.0,
|
| 33 |
+
... is_anomaly=True,
|
| 34 |
+
... z_score=4.5
|
| 35 |
+
... )
|
| 36 |
+
>>> point.deviation
|
| 37 |
+
80.0
|
| 38 |
+
>>> point.severity
|
| 39 |
+
'high'
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
index: int
|
| 43 |
+
value: float
|
| 44 |
+
expected: float
|
| 45 |
+
lower_bound: float
|
| 46 |
+
upper_bound: float
|
| 47 |
+
is_anomaly: bool
|
| 48 |
+
z_score: float = 0.0
|
| 49 |
+
severity: Optional[str] = None
|
| 50 |
+
|
| 51 |
+
def __post_init__(self):
|
| 52 |
+
"""Cálculo automático de severidad"""
|
| 53 |
+
if self.severity is None and self.is_anomaly:
|
| 54 |
+
self.severity = self._calculate_severity()
|
| 55 |
+
|
| 56 |
+
@property
|
| 57 |
+
def deviation(self) -> float:
|
| 58 |
+
"""
|
| 59 |
+
Calcula la desviación del valor respecto al esperado.
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
float: Diferencia absoluta entre valor y esperado
|
| 63 |
+
"""
|
| 64 |
+
return abs(self.value - self.expected)
|
| 65 |
+
|
| 66 |
+
@property
|
| 67 |
+
def deviation_percentage(self) -> float:
|
| 68 |
+
"""
|
| 69 |
+
Calcula el porcentaje de desviación.
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
float: Desviación como porcentaje del valor esperado
|
| 73 |
+
"""
|
| 74 |
+
if self.expected == 0:
|
| 75 |
+
return float('inf') if self.value != 0 else 0.0
|
| 76 |
+
return (self.deviation / abs(self.expected)) * 100
|
| 77 |
+
|
| 78 |
+
def _calculate_severity(self) -> str:
|
| 79 |
+
"""
|
| 80 |
+
Calcula la severidad de la anomalía basada en z_score.
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
str: "low", "medium" o "high"
|
| 84 |
+
"""
|
| 85 |
+
abs_z = abs(self.z_score)
|
| 86 |
+
|
| 87 |
+
if abs_z >= 4.0:
|
| 88 |
+
return "high"
|
| 89 |
+
elif abs_z >= 3.0:
|
| 90 |
+
return "medium"
|
| 91 |
+
else:
|
| 92 |
+
return "low"
|
| 93 |
+
|
| 94 |
+
def is_above_expected(self) -> bool:
|
| 95 |
+
"""Retorna True si el valor está por encima del esperado"""
|
| 96 |
+
return self.value > self.expected
|
| 97 |
+
|
| 98 |
+
def is_below_expected(self) -> bool:
|
| 99 |
+
"""Retorna True si el valor está por debajo del esperado"""
|
| 100 |
+
return self.value < self.expected
|
| 101 |
+
|
| 102 |
+
def to_dict(self) -> dict:
|
| 103 |
+
"""Serializa el punto a diccionario"""
|
| 104 |
+
return {
|
| 105 |
+
"index": self.index,
|
| 106 |
+
"value": self.value,
|
| 107 |
+
"expected": self.expected,
|
| 108 |
+
"lower_bound": self.lower_bound,
|
| 109 |
+
"upper_bound": self.upper_bound,
|
| 110 |
+
"is_anomaly": self.is_anomaly,
|
| 111 |
+
"z_score": self.z_score,
|
| 112 |
+
"severity": self.severity,
|
| 113 |
+
"deviation": self.deviation,
|
| 114 |
+
"deviation_percentage": self.deviation_percentage
|
| 115 |
+
}
|
app/domain/models/forecast_config.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Modelo de dominio para configuración de forecasting.
|
| 3 |
+
|
| 4 |
+
Este módulo define la entidad ForecastConfig, cumpliendo con SRP.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from dataclasses import dataclass, field
|
| 8 |
+
from typing import List
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@dataclass
|
| 12 |
+
class ForecastConfig:
|
| 13 |
+
"""
|
| 14 |
+
Configuración para operaciones de forecasting.
|
| 15 |
+
|
| 16 |
+
Define los parámetros necesarios para realizar un pronóstico,
|
| 17 |
+
incluyendo horizonte de predicción, cuantiles y frecuencia.
|
| 18 |
+
|
| 19 |
+
Attributes:
|
| 20 |
+
prediction_length: Número de períodos a pronosticar
|
| 21 |
+
quantile_levels: Cuantiles a calcular (ej: [0.1, 0.5, 0.9])
|
| 22 |
+
freq: Frecuencia temporal (D, H, M, etc.)
|
| 23 |
+
|
| 24 |
+
Example:
|
| 25 |
+
>>> config = ForecastConfig(
|
| 26 |
+
... prediction_length=7,
|
| 27 |
+
... quantile_levels=[0.1, 0.5, 0.9],
|
| 28 |
+
... freq="D"
|
| 29 |
+
... )
|
| 30 |
+
>>> config.has_median
|
| 31 |
+
True
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
prediction_length: int
|
| 35 |
+
quantile_levels: List[float] = field(default_factory=lambda: [0.1, 0.5, 0.9])
|
| 36 |
+
freq: str = "D"
|
| 37 |
+
|
| 38 |
+
def __post_init__(self):
|
| 39 |
+
"""Validación y normalización automática"""
|
| 40 |
+
self.validate()
|
| 41 |
+
self._ensure_median()
|
| 42 |
+
self._sort_quantiles()
|
| 43 |
+
|
| 44 |
+
@property
|
| 45 |
+
def has_median(self) -> bool:
|
| 46 |
+
"""Verifica si el cuantil 0.5 (mediana) está incluido"""
|
| 47 |
+
return 0.5 in self.quantile_levels
|
| 48 |
+
|
| 49 |
+
def validate(self) -> bool:
|
| 50 |
+
"""
|
| 51 |
+
Valida la configuración.
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
bool: True si es válida
|
| 55 |
+
|
| 56 |
+
Raises:
|
| 57 |
+
ValueError: Si la configuración es inválida
|
| 58 |
+
"""
|
| 59 |
+
# Validar prediction_length
|
| 60 |
+
if self.prediction_length < 1:
|
| 61 |
+
raise ValueError(
|
| 62 |
+
f"prediction_length debe ser >= 1, recibido: {self.prediction_length}"
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
# Validar quantile_levels
|
| 66 |
+
if not self.quantile_levels:
|
| 67 |
+
raise ValueError("quantile_levels no puede estar vacío")
|
| 68 |
+
|
| 69 |
+
# Verificar que los cuantiles estén en [0, 1]
|
| 70 |
+
for q in self.quantile_levels:
|
| 71 |
+
if not 0 <= q <= 1:
|
| 72 |
+
raise ValueError(
|
| 73 |
+
f"Todos los cuantiles deben estar en [0, 1], encontrado: {q}"
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
# Validar freq
|
| 77 |
+
valid_freqs = {"D", "H", "M", "W", "Y", "Q", "S", "T", "min"}
|
| 78 |
+
if self.freq not in valid_freqs:
|
| 79 |
+
raise ValueError(
|
| 80 |
+
f"Frecuencia '{self.freq}' no reconocida. "
|
| 81 |
+
f"Válidas: {valid_freqs}"
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
return True
|
| 85 |
+
|
| 86 |
+
def _ensure_median(self):
|
| 87 |
+
"""Asegura que la mediana (0.5) esté incluida"""
|
| 88 |
+
if not self.has_median:
|
| 89 |
+
self.quantile_levels.append(0.5)
|
| 90 |
+
|
| 91 |
+
def _sort_quantiles(self):
|
| 92 |
+
"""Ordena los cuantiles de menor a mayor"""
|
| 93 |
+
self.quantile_levels = sorted(set(self.quantile_levels))
|
| 94 |
+
|
| 95 |
+
@classmethod
|
| 96 |
+
def default(cls) -> "ForecastConfig":
|
| 97 |
+
"""
|
| 98 |
+
Crea una configuración con valores por defecto.
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
ForecastConfig: Configuración por defecto
|
| 102 |
+
- prediction_length: 7
|
| 103 |
+
- quantile_levels: [0.1, 0.5, 0.9]
|
| 104 |
+
- freq: "D"
|
| 105 |
+
"""
|
| 106 |
+
return cls(
|
| 107 |
+
prediction_length=7,
|
| 108 |
+
quantile_levels=[0.1, 0.5, 0.9],
|
| 109 |
+
freq="D"
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
def to_dict(self) -> dict:
|
| 113 |
+
"""Serializa la configuración a diccionario"""
|
| 114 |
+
return {
|
| 115 |
+
"prediction_length": self.prediction_length,
|
| 116 |
+
"quantile_levels": self.quantile_levels,
|
| 117 |
+
"freq": self.freq
|
| 118 |
+
}
|
app/domain/models/forecast_result.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Modelo de dominio para resultados de forecasting.
|
| 3 |
+
|
| 4 |
+
Este módulo define la entidad ForecastResult, cumpliendo con SRP.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from typing import List, Dict, Any
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@dataclass
|
| 12 |
+
class ForecastResult:
|
| 13 |
+
"""
|
| 14 |
+
Resultado de una operación de forecasting.
|
| 15 |
+
|
| 16 |
+
Encapsula los pronósticos generados, incluyendo timestamps,
|
| 17 |
+
valores medianos y cuantiles.
|
| 18 |
+
|
| 19 |
+
Attributes:
|
| 20 |
+
timestamps: Lista de timestamps pronosticados
|
| 21 |
+
median: Lista de valores medianos (cuantil 0.5)
|
| 22 |
+
quantiles: Dict de cuantil -> valores (ej: {"0.1": [...], "0.9": [...]})
|
| 23 |
+
series_id: Identificador de la serie
|
| 24 |
+
metadata: Información adicional del forecast
|
| 25 |
+
|
| 26 |
+
Example:
|
| 27 |
+
>>> result = ForecastResult(
|
| 28 |
+
... timestamps=["2025-11-10", "2025-11-11"],
|
| 29 |
+
... median=[120.5, 122.3],
|
| 30 |
+
... quantiles={"0.1": [115.2, 116.8], "0.9": [125.8, 127.8]},
|
| 31 |
+
... series_id="sales_A"
|
| 32 |
+
... )
|
| 33 |
+
>>> result.length
|
| 34 |
+
2
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
timestamps: List[str]
|
| 38 |
+
median: List[float]
|
| 39 |
+
quantiles: Dict[str, List[float]]
|
| 40 |
+
series_id: str = "series_0"
|
| 41 |
+
metadata: Dict[str, Any] = None
|
| 42 |
+
|
| 43 |
+
def __post_init__(self):
|
| 44 |
+
"""Validación automática al crear la instancia"""
|
| 45 |
+
if self.metadata is None:
|
| 46 |
+
self.metadata = {}
|
| 47 |
+
self.validate()
|
| 48 |
+
|
| 49 |
+
@property
|
| 50 |
+
def length(self) -> int:
|
| 51 |
+
"""Retorna el número de períodos pronosticados"""
|
| 52 |
+
return len(self.timestamps)
|
| 53 |
+
|
| 54 |
+
def validate(self) -> bool:
|
| 55 |
+
"""
|
| 56 |
+
Valida la consistencia del resultado.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
bool: True si es válido
|
| 60 |
+
|
| 61 |
+
Raises:
|
| 62 |
+
ValueError: Si el resultado es inválido
|
| 63 |
+
"""
|
| 64 |
+
n = len(self.timestamps)
|
| 65 |
+
|
| 66 |
+
# Validar que no esté vacío
|
| 67 |
+
if n == 0:
|
| 68 |
+
raise ValueError("El resultado no puede estar vacío")
|
| 69 |
+
|
| 70 |
+
# Validar longitud de median
|
| 71 |
+
if len(self.median) != n:
|
| 72 |
+
raise ValueError(
|
| 73 |
+
f"Median ({len(self.median)}) debe tener la misma longitud "
|
| 74 |
+
f"que timestamps ({n})"
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
# Validar longitud de cada cuantil
|
| 78 |
+
for q, values in self.quantiles.items():
|
| 79 |
+
if len(values) != n:
|
| 80 |
+
raise ValueError(
|
| 81 |
+
f"Cuantil {q} ({len(values)}) debe tener la misma longitud "
|
| 82 |
+
f"que timestamps ({n})"
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
# Validar que todos los valores sean numéricos
|
| 86 |
+
if not all(isinstance(v, (int, float)) for v in self.median):
|
| 87 |
+
raise ValueError("Median debe contener solo valores numéricos")
|
| 88 |
+
|
| 89 |
+
for q, values in self.quantiles.items():
|
| 90 |
+
if not all(isinstance(v, (int, float)) for v in values):
|
| 91 |
+
raise ValueError(f"Cuantil {q} debe contener solo valores numéricos")
|
| 92 |
+
|
| 93 |
+
return True
|
| 94 |
+
|
| 95 |
+
def get_quantile(self, level: float) -> List[float]:
|
| 96 |
+
"""
|
| 97 |
+
Obtiene los valores de un cuantil específico.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
level: Nivel del cuantil (ej: 0.1, 0.5, 0.9)
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
List[float]: Valores del cuantil
|
| 104 |
+
|
| 105 |
+
Raises:
|
| 106 |
+
KeyError: Si el cuantil no existe
|
| 107 |
+
"""
|
| 108 |
+
key = f"{level:.3g}"
|
| 109 |
+
if key not in self.quantiles:
|
| 110 |
+
available = list(self.quantiles.keys())
|
| 111 |
+
raise KeyError(
|
| 112 |
+
f"Cuantil {level} no encontrado. Disponibles: {available}"
|
| 113 |
+
)
|
| 114 |
+
return self.quantiles[key]
|
| 115 |
+
|
| 116 |
+
def get_interval(self, lower: float = 0.1, upper: float = 0.9) -> Dict[str, List[float]]:
|
| 117 |
+
"""
|
| 118 |
+
Obtiene un intervalo de predicción.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
lower: Cuantil inferior (default: 0.1)
|
| 122 |
+
upper: Cuantil superior (default: 0.9)
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
Dict con "lower", "median", "upper"
|
| 126 |
+
"""
|
| 127 |
+
return {
|
| 128 |
+
"lower": self.get_quantile(lower),
|
| 129 |
+
"median": self.median,
|
| 130 |
+
"upper": self.get_quantile(upper)
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 134 |
+
"""
|
| 135 |
+
Serializa el resultado a diccionario.
|
| 136 |
+
|
| 137 |
+
Returns:
|
| 138 |
+
Dict con la representación del resultado
|
| 139 |
+
"""
|
| 140 |
+
return {
|
| 141 |
+
"timestamps": self.timestamps,
|
| 142 |
+
"median": self.median,
|
| 143 |
+
"quantiles": self.quantiles,
|
| 144 |
+
"series_id": self.series_id,
|
| 145 |
+
"length": self.length,
|
| 146 |
+
"metadata": self.metadata
|
| 147 |
+
}
|
app/domain/models/time_series.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Modelo de dominio para series temporales.
|
| 3 |
+
|
| 4 |
+
Este módulo define la entidad TimeSeries, cumpliendo con SRP.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from dataclasses import dataclass, field
|
| 8 |
+
from typing import List, Optional, Dict, Any
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@dataclass
|
| 12 |
+
class TimeSeries:
|
| 13 |
+
"""
|
| 14 |
+
Modelo de dominio para una serie temporal.
|
| 15 |
+
|
| 16 |
+
Representa una serie temporal con sus valores, timestamps opcionales
|
| 17 |
+
y metadata asociada. Esta clase es inmutable después de la validación.
|
| 18 |
+
|
| 19 |
+
Attributes:
|
| 20 |
+
values: Lista de valores numéricos de la serie
|
| 21 |
+
timestamps: Lista opcional de timestamps (strings ISO o índices)
|
| 22 |
+
series_id: Identificador único de la serie
|
| 23 |
+
freq: Frecuencia temporal (D=daily, H=hourly, M=monthly, etc.)
|
| 24 |
+
metadata: Diccionario con información adicional
|
| 25 |
+
|
| 26 |
+
Example:
|
| 27 |
+
>>> series = TimeSeries(
|
| 28 |
+
... values=[100, 102, 105, 103, 108],
|
| 29 |
+
... series_id="sales_product_a",
|
| 30 |
+
... freq="D"
|
| 31 |
+
... )
|
| 32 |
+
>>> series.length
|
| 33 |
+
5
|
| 34 |
+
>>> series.validate()
|
| 35 |
+
True
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
values: List[float]
|
| 39 |
+
timestamps: Optional[List[str]] = None
|
| 40 |
+
series_id: str = "series_0"
|
| 41 |
+
freq: str = "D"
|
| 42 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
| 43 |
+
|
| 44 |
+
def __post_init__(self):
|
| 45 |
+
"""Validación automática al crear la instancia"""
|
| 46 |
+
self.validate()
|
| 47 |
+
|
| 48 |
+
@property
|
| 49 |
+
def length(self) -> int:
|
| 50 |
+
"""Retorna la longitud de la serie"""
|
| 51 |
+
return len(self.values)
|
| 52 |
+
|
| 53 |
+
def validate(self) -> bool:
|
| 54 |
+
"""
|
| 55 |
+
Valida la consistencia de la serie temporal.
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
bool: True si la serie es válida
|
| 59 |
+
|
| 60 |
+
Raises:
|
| 61 |
+
ValueError: Si la serie es inválida
|
| 62 |
+
"""
|
| 63 |
+
# Verificar que no esté vacía
|
| 64 |
+
if not self.values or len(self.values) == 0:
|
| 65 |
+
raise ValueError("La serie temporal no puede estar vacía")
|
| 66 |
+
|
| 67 |
+
# Verificar que todos sean números
|
| 68 |
+
if not all(isinstance(v, (int, float)) for v in self.values):
|
| 69 |
+
raise ValueError("Todos los valores deben ser numéricos")
|
| 70 |
+
|
| 71 |
+
# Verificar que no haya None/NaN
|
| 72 |
+
if any(v is None or (isinstance(v, float) and v != v) for v in self.values):
|
| 73 |
+
raise ValueError("La serie contiene valores nulos o NaN")
|
| 74 |
+
|
| 75 |
+
# Si hay timestamps, verificar longitud
|
| 76 |
+
if self.timestamps is not None:
|
| 77 |
+
if len(self.timestamps) != len(self.values):
|
| 78 |
+
raise ValueError(
|
| 79 |
+
f"Timestamps ({len(self.timestamps)}) y values ({len(self.values)}) "
|
| 80 |
+
"deben tener la misma longitud"
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
return True
|
| 84 |
+
|
| 85 |
+
def get_subset(self, start: int, end: int) -> "TimeSeries":
|
| 86 |
+
"""
|
| 87 |
+
Retorna un subset de la serie temporal.
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
start: Índice de inicio (inclusive)
|
| 91 |
+
end: Índice de fin (exclusive)
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
TimeSeries: Nueva instancia con el subset
|
| 95 |
+
"""
|
| 96 |
+
subset_values = self.values[start:end]
|
| 97 |
+
subset_timestamps = None
|
| 98 |
+
|
| 99 |
+
if self.timestamps:
|
| 100 |
+
subset_timestamps = self.timestamps[start:end]
|
| 101 |
+
|
| 102 |
+
return TimeSeries(
|
| 103 |
+
values=subset_values,
|
| 104 |
+
timestamps=subset_timestamps,
|
| 105 |
+
series_id=self.series_id,
|
| 106 |
+
freq=self.freq,
|
| 107 |
+
metadata=self.metadata.copy()
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 111 |
+
"""
|
| 112 |
+
Serializa la serie a diccionario.
|
| 113 |
+
|
| 114 |
+
Returns:
|
| 115 |
+
Dict con la representación de la serie
|
| 116 |
+
"""
|
| 117 |
+
return {
|
| 118 |
+
"values": self.values,
|
| 119 |
+
"timestamps": self.timestamps,
|
| 120 |
+
"series_id": self.series_id,
|
| 121 |
+
"freq": self.freq,
|
| 122 |
+
"length": self.length,
|
| 123 |
+
"metadata": self.metadata
|
| 124 |
+
}
|
app/domain/services/__init__.py
ADDED
|
File without changes
|
app/domain/services/anomaly_service.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Servicio de dominio para detección de anomalías.
|
| 3 |
+
|
| 4 |
+
Este servicio encapsula la lógica de detección de anomalías,
|
| 5 |
+
cumpliendo con SRP y DIP.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import List
|
| 9 |
+
from app.domain.interfaces.forecast_model import IForecastModel
|
| 10 |
+
from app.domain.interfaces.data_transformer import IDataTransformer
|
| 11 |
+
from app.domain.models.time_series import TimeSeries
|
| 12 |
+
from app.domain.models.forecast_config import ForecastConfig
|
| 13 |
+
from app.domain.models.anomaly import AnomalyPoint
|
| 14 |
+
from app.utils.logger import setup_logger
|
| 15 |
+
|
| 16 |
+
logger = setup_logger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class AnomalyService:
|
| 20 |
+
"""
|
| 21 |
+
Servicio de dominio para detección de anomalías.
|
| 22 |
+
|
| 23 |
+
Detecta puntos anómalos comparando valores observados con
|
| 24 |
+
pronósticos del modelo, usando intervalos de predicción.
|
| 25 |
+
|
| 26 |
+
Attributes:
|
| 27 |
+
model: Modelo de forecasting
|
| 28 |
+
transformer: Transformador de datos
|
| 29 |
+
|
| 30 |
+
Example:
|
| 31 |
+
>>> service = AnomalyService(model, transformer)
|
| 32 |
+
>>> context = TimeSeries(values=[100, 102, 105, 103, 108])
|
| 33 |
+
>>> recent = [107, 200, 106] # 200 es anomalía
|
| 34 |
+
>>> anomalies = service.detect_anomalies(context, recent, config)
|
| 35 |
+
>>> sum(1 for a in anomalies if a.is_anomaly)
|
| 36 |
+
1
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __init__(
|
| 40 |
+
self,
|
| 41 |
+
model: IForecastModel,
|
| 42 |
+
transformer: IDataTransformer
|
| 43 |
+
):
|
| 44 |
+
"""
|
| 45 |
+
Inicializa el servicio.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
model: Implementación de IForecastModel
|
| 49 |
+
transformer: Implementación de IDataTransformer
|
| 50 |
+
"""
|
| 51 |
+
self.model = model
|
| 52 |
+
self.transformer = transformer
|
| 53 |
+
logger.info("AnomalyService initialized")
|
| 54 |
+
|
| 55 |
+
def detect_anomalies(
|
| 56 |
+
self,
|
| 57 |
+
context: TimeSeries,
|
| 58 |
+
recent_observed: List[float],
|
| 59 |
+
config: ForecastConfig,
|
| 60 |
+
quantile_low: float = 0.05,
|
| 61 |
+
quantile_high: float = 0.95
|
| 62 |
+
) -> List[AnomalyPoint]:
|
| 63 |
+
"""
|
| 64 |
+
Detecta anomalías comparando observaciones con pronóstico.
|
| 65 |
+
|
| 66 |
+
Un punto se considera anómalo si cae fuera del intervalo
|
| 67 |
+
[quantile_low, quantile_high] del pronóstico.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
context: Serie temporal histórica (contexto)
|
| 71 |
+
recent_observed: Valores recientes a evaluar
|
| 72 |
+
config: Configuración del forecast
|
| 73 |
+
quantile_low: Cuantil inferior del intervalo (default: 0.05)
|
| 74 |
+
quantile_high: Cuantil superior del intervalo (default: 0.95)
|
| 75 |
+
|
| 76 |
+
Returns:
|
| 77 |
+
List[AnomalyPoint]: Lista de puntos con indicador de anomalía
|
| 78 |
+
|
| 79 |
+
Raises:
|
| 80 |
+
ValueError: Si las longitudes no coinciden
|
| 81 |
+
|
| 82 |
+
Example:
|
| 83 |
+
>>> context = TimeSeries(values=[100, 102, 105])
|
| 84 |
+
>>> recent = [106, 250, 104] # 250 es anomalía
|
| 85 |
+
>>> config = ForecastConfig(prediction_length=3)
|
| 86 |
+
>>> anomalies = service.detect_anomalies(context, recent, config)
|
| 87 |
+
>>> anomalies[1].is_anomaly
|
| 88 |
+
True
|
| 89 |
+
"""
|
| 90 |
+
logger.info(
|
| 91 |
+
f"Detecting anomalies in {len(recent_observed)} points "
|
| 92 |
+
f"(interval: [{quantile_low}, {quantile_high}])"
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
# Validar longitudes
|
| 96 |
+
if len(recent_observed) != config.prediction_length:
|
| 97 |
+
raise ValueError(
|
| 98 |
+
f"recent_observed length ({len(recent_observed)}) must equal "
|
| 99 |
+
f"prediction_length ({config.prediction_length})"
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
# Preparar config con cuantiles necesarios
|
| 103 |
+
quantiles = sorted(set([quantile_low, 0.5, quantile_high]))
|
| 104 |
+
config_anomaly = ForecastConfig(
|
| 105 |
+
prediction_length=config.prediction_length,
|
| 106 |
+
quantile_levels=quantiles,
|
| 107 |
+
freq=config.freq
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
# Construir DataFrame de contexto
|
| 111 |
+
context_df = self.transformer.build_context_df(
|
| 112 |
+
values=context.values,
|
| 113 |
+
timestamps=context.timestamps,
|
| 114 |
+
series_id=context.series_id,
|
| 115 |
+
freq=config.freq
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
# Predecir
|
| 119 |
+
pred_df = self.model.predict(
|
| 120 |
+
context_df=context_df,
|
| 121 |
+
prediction_length=config_anomaly.prediction_length,
|
| 122 |
+
quantile_levels=config_anomaly.quantile_levels
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Parsear resultado
|
| 126 |
+
result = self.transformer.parse_prediction_result(
|
| 127 |
+
pred_df=pred_df,
|
| 128 |
+
quantile_levels=quantiles
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
# Detectar anomalías
|
| 132 |
+
anomalies = []
|
| 133 |
+
q_low_key = f"{quantile_low:.3g}"
|
| 134 |
+
q_high_key = f"{quantile_high:.3g}"
|
| 135 |
+
|
| 136 |
+
for i, obs in enumerate(recent_observed):
|
| 137 |
+
expected = result["median"][i]
|
| 138 |
+
lower = result["quantiles"][q_low_key][i]
|
| 139 |
+
upper = result["quantiles"][q_high_key][i]
|
| 140 |
+
|
| 141 |
+
# Verificar si está fuera del intervalo
|
| 142 |
+
is_anom = (obs < lower) or (obs > upper)
|
| 143 |
+
|
| 144 |
+
# Calcular z-score aproximado
|
| 145 |
+
spread = (upper - lower) / 2
|
| 146 |
+
z_score = abs(obs - expected) / (spread + 1e-8) if spread > 0 else 0
|
| 147 |
+
|
| 148 |
+
anomalies.append(AnomalyPoint(
|
| 149 |
+
index=i,
|
| 150 |
+
value=obs,
|
| 151 |
+
expected=expected,
|
| 152 |
+
lower_bound=lower,
|
| 153 |
+
upper_bound=upper,
|
| 154 |
+
is_anomaly=is_anom,
|
| 155 |
+
z_score=z_score
|
| 156 |
+
))
|
| 157 |
+
|
| 158 |
+
num_anomalies = sum(1 for a in anomalies if a.is_anomaly)
|
| 159 |
+
logger.info(
|
| 160 |
+
f"Anomaly detection completed: {num_anomalies}/{len(anomalies)} "
|
| 161 |
+
"anomalies detected"
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
return anomalies
|
| 165 |
+
|
| 166 |
+
def get_anomaly_summary(self, anomalies: List[AnomalyPoint]) -> dict:
|
| 167 |
+
"""
|
| 168 |
+
Genera un resumen de las anomalías detectadas.
|
| 169 |
+
|
| 170 |
+
Args:
|
| 171 |
+
anomalies: Lista de anomalías
|
| 172 |
+
|
| 173 |
+
Returns:
|
| 174 |
+
Dict con estadísticas de las anomalías
|
| 175 |
+
"""
|
| 176 |
+
total = len(anomalies)
|
| 177 |
+
detected = sum(1 for a in anomalies if a.is_anomaly)
|
| 178 |
+
|
| 179 |
+
severities = {"low": 0, "medium": 0, "high": 0}
|
| 180 |
+
for a in anomalies:
|
| 181 |
+
if a.is_anomaly and a.severity:
|
| 182 |
+
severities[a.severity] += 1
|
| 183 |
+
|
| 184 |
+
return {
|
| 185 |
+
"total_points": total,
|
| 186 |
+
"anomalies_detected": detected,
|
| 187 |
+
"anomaly_rate": (detected / total * 100) if total > 0 else 0,
|
| 188 |
+
"severities": severities,
|
| 189 |
+
"max_deviation": max((a.deviation for a in anomalies), default=0),
|
| 190 |
+
"max_z_score": max((abs(a.z_score) for a in anomalies), default=0)
|
| 191 |
+
}
|
app/domain/services/backtest_service.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Servicio de dominio para backtesting.
|
| 3 |
+
|
| 4 |
+
Este servicio encapsula la lógica de validación de modelos,
|
| 5 |
+
cumpliendo con SRP y DIP.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
from dataclasses import dataclass
|
| 10 |
+
from typing import List
|
| 11 |
+
from app.domain.interfaces.forecast_model import IForecastModel
|
| 12 |
+
from app.domain.interfaces.data_transformer import IDataTransformer
|
| 13 |
+
from app.domain.models.time_series import TimeSeries
|
| 14 |
+
from app.domain.models.forecast_config import ForecastConfig
|
| 15 |
+
from app.utils.logger import setup_logger
|
| 16 |
+
|
| 17 |
+
logger = setup_logger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@dataclass
|
| 21 |
+
class BacktestMetrics:
|
| 22 |
+
"""
|
| 23 |
+
Métricas de evaluación de un backtest.
|
| 24 |
+
|
| 25 |
+
Attributes:
|
| 26 |
+
mae: Mean Absolute Error
|
| 27 |
+
mape: Mean Absolute Percentage Error (%)
|
| 28 |
+
rmse: Root Mean Squared Error
|
| 29 |
+
wql: Weighted Quantile Loss (para cuantil 0.5)
|
| 30 |
+
"""
|
| 31 |
+
mae: float
|
| 32 |
+
mape: float
|
| 33 |
+
rmse: float
|
| 34 |
+
wql: float
|
| 35 |
+
|
| 36 |
+
def to_dict(self) -> dict:
|
| 37 |
+
"""Serializa las métricas"""
|
| 38 |
+
return {
|
| 39 |
+
"mae": self.mae,
|
| 40 |
+
"mape": self.mape,
|
| 41 |
+
"rmse": self.rmse,
|
| 42 |
+
"wql": self.wql
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@dataclass
|
| 47 |
+
class BacktestResult:
|
| 48 |
+
"""
|
| 49 |
+
Resultado completo de un backtest.
|
| 50 |
+
|
| 51 |
+
Attributes:
|
| 52 |
+
metrics: Métricas de evaluación
|
| 53 |
+
forecast: Valores pronosticados
|
| 54 |
+
actuals: Valores reales
|
| 55 |
+
timestamps: Timestamps del período de prueba
|
| 56 |
+
"""
|
| 57 |
+
metrics: BacktestMetrics
|
| 58 |
+
forecast: List[float]
|
| 59 |
+
actuals: List[float]
|
| 60 |
+
timestamps: List[str]
|
| 61 |
+
|
| 62 |
+
def to_dict(self) -> dict:
|
| 63 |
+
"""Serializa el resultado"""
|
| 64 |
+
return {
|
| 65 |
+
"metrics": self.metrics.to_dict(),
|
| 66 |
+
"forecast": self.forecast,
|
| 67 |
+
"actuals": self.actuals,
|
| 68 |
+
"timestamps": self.timestamps
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class BacktestService:
|
| 73 |
+
"""
|
| 74 |
+
Servicio de dominio para backtesting de modelos.
|
| 75 |
+
|
| 76 |
+
Realiza validación de modelos separando la serie en train/test
|
| 77 |
+
y comparando pronósticos con valores reales.
|
| 78 |
+
|
| 79 |
+
Attributes:
|
| 80 |
+
model: Modelo de forecasting
|
| 81 |
+
transformer: Transformador de datos
|
| 82 |
+
|
| 83 |
+
Example:
|
| 84 |
+
>>> service = BacktestService(model, transformer)
|
| 85 |
+
>>> series = TimeSeries(values=[100, 102, 105, 103, 108, 112, 115])
|
| 86 |
+
>>> result = service.simple_backtest(series, test_length=3)
|
| 87 |
+
>>> result.metrics.mae < 5 # Buen modelo
|
| 88 |
+
True
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
def __init__(
|
| 92 |
+
self,
|
| 93 |
+
model: IForecastModel,
|
| 94 |
+
transformer: IDataTransformer
|
| 95 |
+
):
|
| 96 |
+
"""
|
| 97 |
+
Inicializa el servicio.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
model: Implementación de IForecastModel
|
| 101 |
+
transformer: Implementación de IDataTransformer
|
| 102 |
+
"""
|
| 103 |
+
self.model = model
|
| 104 |
+
self.transformer = transformer
|
| 105 |
+
logger.info("BacktestService initialized")
|
| 106 |
+
|
| 107 |
+
def simple_backtest(
|
| 108 |
+
self,
|
| 109 |
+
series: TimeSeries,
|
| 110 |
+
test_length: int,
|
| 111 |
+
config: ForecastConfig = None
|
| 112 |
+
) -> BacktestResult:
|
| 113 |
+
"""
|
| 114 |
+
Realiza un backtest simple: train/test split.
|
| 115 |
+
|
| 116 |
+
Separa la serie en train (histórico) y test (validación),
|
| 117 |
+
genera pronóstico para el período test y calcula métricas.
|
| 118 |
+
|
| 119 |
+
Args:
|
| 120 |
+
series: Serie temporal completa
|
| 121 |
+
test_length: Número de puntos para test (final de la serie)
|
| 122 |
+
config: Configuración del forecast (opcional)
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
BacktestResult: Resultado con métricas y pronósticos
|
| 126 |
+
|
| 127 |
+
Raises:
|
| 128 |
+
ValueError: Si test_length >= longitud de la serie
|
| 129 |
+
|
| 130 |
+
Example:
|
| 131 |
+
>>> series = TimeSeries(values=[100, 102, 105, 103, 108])
|
| 132 |
+
>>> result = service.simple_backtest(series, test_length=2)
|
| 133 |
+
>>> len(result.forecast)
|
| 134 |
+
2
|
| 135 |
+
"""
|
| 136 |
+
logger.info(
|
| 137 |
+
f"Running simple backtest for series '{series.series_id}' "
|
| 138 |
+
f"(total_length={series.length}, test_length={test_length})"
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
# Validar
|
| 142 |
+
if test_length >= series.length:
|
| 143 |
+
raise ValueError(
|
| 144 |
+
f"test_length ({test_length}) debe ser menor que "
|
| 145 |
+
f"la longitud de la serie ({series.length})"
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
if test_length < 1:
|
| 149 |
+
raise ValueError(f"test_length debe ser >= 1, recibido: {test_length}")
|
| 150 |
+
|
| 151 |
+
# Configuración por defecto si no se proporciona
|
| 152 |
+
if config is None:
|
| 153 |
+
config = ForecastConfig(
|
| 154 |
+
prediction_length=test_length,
|
| 155 |
+
quantile_levels=[0.5], # Solo mediana para backtest
|
| 156 |
+
freq=series.freq
|
| 157 |
+
)
|
| 158 |
+
else:
|
| 159 |
+
# Ajustar prediction_length
|
| 160 |
+
config.prediction_length = test_length
|
| 161 |
+
|
| 162 |
+
# Separar train/test
|
| 163 |
+
train_length = series.length - test_length
|
| 164 |
+
train_series = series.get_subset(0, train_length)
|
| 165 |
+
test_values = series.values[train_length:]
|
| 166 |
+
|
| 167 |
+
logger.debug(f"Train length: {train_length}, Test length: {test_length}")
|
| 168 |
+
|
| 169 |
+
# Construir DataFrame de train
|
| 170 |
+
context_df = self.transformer.build_context_df(
|
| 171 |
+
values=train_series.values,
|
| 172 |
+
timestamps=train_series.timestamps,
|
| 173 |
+
series_id=series.series_id,
|
| 174 |
+
freq=config.freq
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
# Predecir
|
| 178 |
+
pred_df = self.model.predict(
|
| 179 |
+
context_df=context_df,
|
| 180 |
+
prediction_length=test_length,
|
| 181 |
+
quantile_levels=[0.5]
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
# Parsear resultado
|
| 185 |
+
result = self.transformer.parse_prediction_result(
|
| 186 |
+
pred_df=pred_df,
|
| 187 |
+
quantile_levels=[0.5]
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
forecast = np.array(result["median"], dtype=float)
|
| 191 |
+
actuals = np.array(test_values, dtype=float)
|
| 192 |
+
|
| 193 |
+
# Calcular métricas
|
| 194 |
+
metrics = self._calculate_metrics(forecast, actuals)
|
| 195 |
+
|
| 196 |
+
logger.info(
|
| 197 |
+
f"Backtest completed: MAE={metrics.mae:.2f}, "
|
| 198 |
+
f"MAPE={metrics.mape:.2f}%, RMSE={metrics.rmse:.2f}"
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
return BacktestResult(
|
| 202 |
+
metrics=metrics,
|
| 203 |
+
forecast=forecast.tolist(),
|
| 204 |
+
actuals=actuals.tolist(),
|
| 205 |
+
timestamps=result["timestamps"]
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
def _calculate_metrics(
|
| 209 |
+
self,
|
| 210 |
+
forecast: np.ndarray,
|
| 211 |
+
actuals: np.ndarray
|
| 212 |
+
) -> BacktestMetrics:
|
| 213 |
+
"""
|
| 214 |
+
Calcula métricas de evaluación.
|
| 215 |
+
|
| 216 |
+
Args:
|
| 217 |
+
forecast: Valores pronosticados
|
| 218 |
+
actuals: Valores reales
|
| 219 |
+
|
| 220 |
+
Returns:
|
| 221 |
+
BacktestMetrics: Métricas calculadas
|
| 222 |
+
"""
|
| 223 |
+
# MAE: Mean Absolute Error
|
| 224 |
+
mae = float(np.mean(np.abs(actuals - forecast)))
|
| 225 |
+
|
| 226 |
+
# MAPE: Mean Absolute Percentage Error
|
| 227 |
+
eps = 1e-8
|
| 228 |
+
mape = float(np.mean(np.abs((actuals - forecast) / (actuals + eps)))) * 100.0
|
| 229 |
+
|
| 230 |
+
# RMSE: Root Mean Squared Error
|
| 231 |
+
rmse = float(np.sqrt(np.mean((actuals - forecast) ** 2)))
|
| 232 |
+
|
| 233 |
+
# WQL: Weighted Quantile Loss (para cuantil 0.5 = MAE/2)
|
| 234 |
+
tau = 0.5
|
| 235 |
+
diff = actuals - forecast
|
| 236 |
+
wql = float(np.mean(np.maximum(tau * diff, (tau - 1) * diff)))
|
| 237 |
+
|
| 238 |
+
return BacktestMetrics(
|
| 239 |
+
mae=mae,
|
| 240 |
+
mape=mape,
|
| 241 |
+
rmse=rmse,
|
| 242 |
+
wql=wql
|
| 243 |
+
)
|
app/domain/services/forecast_service.py
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Servicio de dominio para forecasting.
|
| 3 |
+
|
| 4 |
+
Este servicio orquesta la lógica de negocio de forecasting,
|
| 5 |
+
cumpliendo con SRP y DIP.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import List
|
| 9 |
+
from app.domain.interfaces.forecast_model import IForecastModel
|
| 10 |
+
from app.domain.interfaces.data_transformer import IDataTransformer
|
| 11 |
+
from app.domain.models.time_series import TimeSeries
|
| 12 |
+
from app.domain.models.forecast_config import ForecastConfig
|
| 13 |
+
from app.domain.models.forecast_result import ForecastResult
|
| 14 |
+
from app.utils.logger import setup_logger
|
| 15 |
+
|
| 16 |
+
logger = setup_logger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class ForecastService:
|
| 20 |
+
"""
|
| 21 |
+
Servicio de dominio para operaciones de forecasting.
|
| 22 |
+
|
| 23 |
+
Este servicio encapsula la lógica de negocio para generar pronósticos,
|
| 24 |
+
dependiendo de abstracciones (IForecastModel, IDataTransformer) en lugar
|
| 25 |
+
de implementaciones concretas (DIP).
|
| 26 |
+
|
| 27 |
+
Attributes:
|
| 28 |
+
model: Modelo de forecasting (implementa IForecastModel)
|
| 29 |
+
transformer: Transformador de datos (implementa IDataTransformer)
|
| 30 |
+
|
| 31 |
+
Example:
|
| 32 |
+
>>> from app.infrastructure.ml.chronos_model import ChronosModel
|
| 33 |
+
>>> from app.utils.dataframe_builder import DataFrameBuilder
|
| 34 |
+
>>>
|
| 35 |
+
>>> model = ChronosModel("amazon/chronos-2")
|
| 36 |
+
>>> transformer = DataFrameBuilder()
|
| 37 |
+
>>> service = ForecastService(model, transformer)
|
| 38 |
+
>>>
|
| 39 |
+
>>> series = TimeSeries(values=[100, 102, 105])
|
| 40 |
+
>>> config = ForecastConfig(prediction_length=3)
|
| 41 |
+
>>> result = service.forecast_univariate(series, config)
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def __init__(
|
| 45 |
+
self,
|
| 46 |
+
model: IForecastModel,
|
| 47 |
+
transformer: IDataTransformer
|
| 48 |
+
):
|
| 49 |
+
"""
|
| 50 |
+
Inicializa el servicio con sus dependencias.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
model: Implementación de IForecastModel
|
| 54 |
+
transformer: Implementación de IDataTransformer
|
| 55 |
+
"""
|
| 56 |
+
self.model = model
|
| 57 |
+
self.transformer = transformer
|
| 58 |
+
|
| 59 |
+
model_info = self.model.get_model_info()
|
| 60 |
+
logger.info(
|
| 61 |
+
f"ForecastService initialized with model: {model_info.get('type', 'unknown')}"
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
def forecast_univariate(
|
| 65 |
+
self,
|
| 66 |
+
series: TimeSeries,
|
| 67 |
+
config: ForecastConfig
|
| 68 |
+
) -> ForecastResult:
|
| 69 |
+
"""
|
| 70 |
+
Genera pronóstico para una serie univariada.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
series: Serie temporal a pronosticar
|
| 74 |
+
config: Configuración del forecast
|
| 75 |
+
|
| 76 |
+
Returns:
|
| 77 |
+
ForecastResult: Resultado con pronósticos
|
| 78 |
+
|
| 79 |
+
Raises:
|
| 80 |
+
ValueError: Si la serie o configuración son inválidas
|
| 81 |
+
RuntimeError: Si el modelo falla al predecir
|
| 82 |
+
|
| 83 |
+
Example:
|
| 84 |
+
>>> series = TimeSeries(values=[100, 102, 105, 103, 108])
|
| 85 |
+
>>> config = ForecastConfig(prediction_length=3)
|
| 86 |
+
>>> result = service.forecast_univariate(series, config)
|
| 87 |
+
>>> len(result.median)
|
| 88 |
+
3
|
| 89 |
+
"""
|
| 90 |
+
logger.info(
|
| 91 |
+
f"Forecasting univariate series '{series.series_id}' "
|
| 92 |
+
f"(length={series.length}, horizon={config.prediction_length})"
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
# Validar entrada
|
| 96 |
+
series.validate()
|
| 97 |
+
config.validate()
|
| 98 |
+
|
| 99 |
+
# Transformar serie a DataFrame
|
| 100 |
+
context_df = self.transformer.build_context_df(
|
| 101 |
+
values=series.values,
|
| 102 |
+
timestamps=series.timestamps,
|
| 103 |
+
series_id=series.series_id,
|
| 104 |
+
freq=config.freq
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
logger.debug(f"Context DataFrame shape: {context_df.shape}")
|
| 108 |
+
|
| 109 |
+
# Validar DataFrame
|
| 110 |
+
self.model.validate_context(context_df)
|
| 111 |
+
|
| 112 |
+
# Predecir
|
| 113 |
+
try:
|
| 114 |
+
pred_df = self.model.predict(
|
| 115 |
+
context_df=context_df,
|
| 116 |
+
prediction_length=config.prediction_length,
|
| 117 |
+
quantile_levels=config.quantile_levels
|
| 118 |
+
)
|
| 119 |
+
except Exception as e:
|
| 120 |
+
logger.error(f"Model prediction failed: {e}", exc_info=True)
|
| 121 |
+
raise RuntimeError(f"Error al predecir: {e}") from e
|
| 122 |
+
|
| 123 |
+
logger.debug(f"Prediction DataFrame shape: {pred_df.shape}")
|
| 124 |
+
|
| 125 |
+
# Parsear resultado
|
| 126 |
+
result_dict = self.transformer.parse_prediction_result(
|
| 127 |
+
pred_df=pred_df,
|
| 128 |
+
quantile_levels=config.quantile_levels
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
# Crear ForecastResult
|
| 132 |
+
result = ForecastResult(
|
| 133 |
+
timestamps=result_dict["timestamps"],
|
| 134 |
+
median=result_dict["median"],
|
| 135 |
+
quantiles=result_dict["quantiles"],
|
| 136 |
+
series_id=series.series_id,
|
| 137 |
+
metadata={
|
| 138 |
+
"prediction_length": config.prediction_length,
|
| 139 |
+
"quantile_levels": config.quantile_levels,
|
| 140 |
+
"freq": config.freq,
|
| 141 |
+
"model": self.model.get_model_info()
|
| 142 |
+
}
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
logger.info(
|
| 146 |
+
f"Forecast completed: {result.length} periods generated "
|
| 147 |
+
f"for series '{series.series_id}'"
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
return result
|
| 151 |
+
|
| 152 |
+
def forecast_multi_series(
|
| 153 |
+
self,
|
| 154 |
+
series_list: List[TimeSeries],
|
| 155 |
+
config: ForecastConfig
|
| 156 |
+
) -> List[ForecastResult]:
|
| 157 |
+
"""
|
| 158 |
+
Genera pronósticos para múltiples series.
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
series_list: Lista de series temporales
|
| 162 |
+
config: Configuración del forecast (misma para todas)
|
| 163 |
+
|
| 164 |
+
Returns:
|
| 165 |
+
List[ForecastResult]: Lista de resultados (uno por serie)
|
| 166 |
+
|
| 167 |
+
Example:
|
| 168 |
+
>>> series1 = TimeSeries(values=[100, 102], series_id="A")
|
| 169 |
+
>>> series2 = TimeSeries(values=[200, 205], series_id="B")
|
| 170 |
+
>>> results = service.forecast_multi_series([series1, series2], config)
|
| 171 |
+
>>> len(results)
|
| 172 |
+
2
|
| 173 |
+
"""
|
| 174 |
+
logger.info(f"Forecasting {len(series_list)} series")
|
| 175 |
+
|
| 176 |
+
if not series_list:
|
| 177 |
+
raise ValueError("series_list no puede estar vacía")
|
| 178 |
+
|
| 179 |
+
results = []
|
| 180 |
+
for i, series in enumerate(series_list):
|
| 181 |
+
logger.debug(f"Processing series {i+1}/{len(series_list)}: {series.series_id}")
|
| 182 |
+
|
| 183 |
+
try:
|
| 184 |
+
result = self.forecast_univariate(series, config)
|
| 185 |
+
results.append(result)
|
| 186 |
+
except Exception as e:
|
| 187 |
+
logger.error(
|
| 188 |
+
f"Failed to forecast series '{series.series_id}': {e}",
|
| 189 |
+
exc_info=True
|
| 190 |
+
)
|
| 191 |
+
raise
|
| 192 |
+
|
| 193 |
+
logger.info(f"Multi-series forecast completed: {len(results)} series processed")
|
| 194 |
+
return results
|
app/infrastructure/__init__.py
ADDED
|
File without changes
|
app/infrastructure/config/__init__.py
ADDED
|
File without changes
|
app/infrastructure/config/settings.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Configuración centralizada del proyecto usando Pydantic Settings.
|
| 3 |
+
|
| 4 |
+
Este módulo implementa el patrón Singleton para la configuración,
|
| 5 |
+
cumpliendo con el principio SRP (Single Responsibility Principle).
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import List
|
| 9 |
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Settings(BaseSettings):
|
| 13 |
+
"""
|
| 14 |
+
Configuración centralizada de la aplicación.
|
| 15 |
+
|
| 16 |
+
Todas las configuraciones se cargan desde variables de entorno
|
| 17 |
+
o valores por defecto. Esto permite fácil configuración en
|
| 18 |
+
diferentes ambientes (dev, staging, production).
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
# API Configuration
|
| 22 |
+
api_title: str = "Chronos-2 Forecasting API"
|
| 23 |
+
api_version: str = "3.0.0"
|
| 24 |
+
api_description: str = (
|
| 25 |
+
"API de pronósticos con Chronos-2 + Excel Add-in. "
|
| 26 |
+
"Refactorizado con Clean Architecture y principios SOLID."
|
| 27 |
+
)
|
| 28 |
+
api_port: int = 8000
|
| 29 |
+
|
| 30 |
+
# Model Configuration
|
| 31 |
+
model_id: str = "amazon/chronos-2"
|
| 32 |
+
device_map: str = "cpu"
|
| 33 |
+
|
| 34 |
+
# CORS Configuration
|
| 35 |
+
cors_origins: List[str] = [
|
| 36 |
+
"https://localhost:3000",
|
| 37 |
+
"https://localhost:3001",
|
| 38 |
+
"https://ttzzs-chronos2-excel-forecasting-api.hf.space",
|
| 39 |
+
"*" # Permitir todos los orígenes para Office Add-ins
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
# Logging
|
| 43 |
+
log_level: str = "INFO"
|
| 44 |
+
log_format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
| 45 |
+
|
| 46 |
+
# Static Files
|
| 47 |
+
static_dir: str = "static"
|
| 48 |
+
|
| 49 |
+
model_config = SettingsConfigDict(
|
| 50 |
+
env_file=".env",
|
| 51 |
+
env_file_encoding="utf-8",
|
| 52 |
+
case_sensitive=False,
|
| 53 |
+
extra="ignore"
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
# Singleton instance
|
| 58 |
+
_settings_instance = None
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def get_settings() -> Settings:
|
| 62 |
+
"""
|
| 63 |
+
Obtiene la instancia singleton de Settings.
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
Settings: Instancia de configuración
|
| 67 |
+
"""
|
| 68 |
+
global _settings_instance
|
| 69 |
+
if _settings_instance is None:
|
| 70 |
+
_settings_instance = Settings()
|
| 71 |
+
return _settings_instance
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
# Exportar instancia por defecto para uso directo
|
| 75 |
+
settings = get_settings()
|
app/infrastructure/ml/__init__.py
ADDED
|
File without changes
|
app/infrastructure/ml/chronos_model.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Implementación concreta del modelo Chronos-2.
|
| 3 |
+
|
| 4 |
+
Este módulo implementa la interfaz IForecastModel usando Chronos2Pipeline,
|
| 5 |
+
aplicando el principio DIP (Dependency Inversion Principle).
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import List, Dict, Any
|
| 9 |
+
import pandas as pd
|
| 10 |
+
from chronos import Chronos2Pipeline
|
| 11 |
+
|
| 12 |
+
from app.domain.interfaces.forecast_model import IForecastModel
|
| 13 |
+
from app.utils.logger import setup_logger
|
| 14 |
+
|
| 15 |
+
logger = setup_logger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class ChronosModel(IForecastModel):
|
| 19 |
+
"""
|
| 20 |
+
Implementación concreta de IForecastModel usando Chronos-2.
|
| 21 |
+
|
| 22 |
+
Esta clase puede ser reemplazada por otra implementación
|
| 23 |
+
(Prophet, ARIMA, etc.) sin modificar el resto del código,
|
| 24 |
+
gracias al principio DIP.
|
| 25 |
+
|
| 26 |
+
Attributes:
|
| 27 |
+
model_id: ID del modelo en HuggingFace
|
| 28 |
+
device_map: Dispositivo para inferencia (cpu/cuda)
|
| 29 |
+
pipeline: Pipeline de Chronos2
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(self, model_id: str = "amazon/chronos-2", device_map: str = "cpu"):
|
| 33 |
+
"""
|
| 34 |
+
Inicializa el modelo Chronos-2.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
model_id: ID del modelo en HuggingFace
|
| 38 |
+
device_map: Dispositivo para inferencia (cpu/cuda)
|
| 39 |
+
"""
|
| 40 |
+
self.model_id = model_id
|
| 41 |
+
self.device_map = device_map
|
| 42 |
+
|
| 43 |
+
logger.info(f"Loading Chronos model: {model_id} on {device_map}")
|
| 44 |
+
|
| 45 |
+
try:
|
| 46 |
+
self.pipeline = Chronos2Pipeline.from_pretrained(
|
| 47 |
+
model_id,
|
| 48 |
+
device_map=device_map
|
| 49 |
+
)
|
| 50 |
+
logger.info("Chronos model loaded successfully")
|
| 51 |
+
except Exception as e:
|
| 52 |
+
logger.error(f"Failed to load Chronos model: {e}")
|
| 53 |
+
raise
|
| 54 |
+
|
| 55 |
+
def predict(
|
| 56 |
+
self,
|
| 57 |
+
context_df: pd.DataFrame,
|
| 58 |
+
prediction_length: int,
|
| 59 |
+
quantile_levels: List[float],
|
| 60 |
+
**kwargs
|
| 61 |
+
) -> pd.DataFrame:
|
| 62 |
+
"""
|
| 63 |
+
Genera pronósticos probabilísticos usando Chronos-2.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
context_df: DataFrame con columnas [id, timestamp, target]
|
| 67 |
+
prediction_length: Horizonte de predicción
|
| 68 |
+
quantile_levels: Cuantiles a calcular (ej: [0.1, 0.5, 0.9])
|
| 69 |
+
**kwargs: Argumentos adicionales para el pipeline
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
DataFrame con pronósticos y cuantiles
|
| 73 |
+
|
| 74 |
+
Raises:
|
| 75 |
+
ValueError: Si el context_df no tiene el formato correcto
|
| 76 |
+
RuntimeError: Si falla la inferencia
|
| 77 |
+
"""
|
| 78 |
+
logger.debug(
|
| 79 |
+
f"Predicting {prediction_length} steps with "
|
| 80 |
+
f"{len(quantile_levels)} quantiles"
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
# Validar formato del DataFrame
|
| 84 |
+
required_cols = {"id", "timestamp", "target"}
|
| 85 |
+
if not required_cols.issubset(context_df.columns):
|
| 86 |
+
raise ValueError(
|
| 87 |
+
f"context_df debe tener columnas: {required_cols}. "
|
| 88 |
+
f"Encontradas: {set(context_df.columns)}"
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
try:
|
| 92 |
+
# Realizar predicción
|
| 93 |
+
pred_df = self.pipeline.predict_df(
|
| 94 |
+
context_df,
|
| 95 |
+
prediction_length=prediction_length,
|
| 96 |
+
quantile_levels=quantile_levels,
|
| 97 |
+
id_column="id",
|
| 98 |
+
timestamp_column="timestamp",
|
| 99 |
+
target="target",
|
| 100 |
+
**kwargs
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
# Ordenar resultado
|
| 104 |
+
result = pred_df.sort_values(["id", "timestamp"])
|
| 105 |
+
|
| 106 |
+
logger.debug(f"Prediction completed: {len(result)} rows")
|
| 107 |
+
return result
|
| 108 |
+
|
| 109 |
+
except Exception as e:
|
| 110 |
+
logger.error(f"Prediction failed: {e}")
|
| 111 |
+
raise RuntimeError(f"Error en predicción: {e}") from e
|
| 112 |
+
|
| 113 |
+
def get_model_info(self) -> Dict[str, Any]:
|
| 114 |
+
"""
|
| 115 |
+
Retorna información del modelo.
|
| 116 |
+
|
| 117 |
+
Returns:
|
| 118 |
+
Diccionario con información del modelo
|
| 119 |
+
"""
|
| 120 |
+
return {
|
| 121 |
+
"type": "Chronos2",
|
| 122 |
+
"model_id": self.model_id,
|
| 123 |
+
"device": self.device_map,
|
| 124 |
+
"provider": "Amazon",
|
| 125 |
+
"version": "2.0"
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
def __repr__(self) -> str:
|
| 129 |
+
return f"ChronosModel(model_id='{self.model_id}', device='{self.device_map}')"
|
app/infrastructure/ml/model_factory.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Factory para crear modelos de forecasting.
|
| 3 |
+
|
| 4 |
+
Este módulo implementa el patrón Factory aplicando OCP
|
| 5 |
+
(Open/Closed Principle) - abierto para extensión, cerrado para modificación.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import Dict, Type, List
|
| 9 |
+
|
| 10 |
+
from app.domain.interfaces.forecast_model import IForecastModel
|
| 11 |
+
from app.infrastructure.ml.chronos_model import ChronosModel
|
| 12 |
+
from app.utils.logger import setup_logger
|
| 13 |
+
|
| 14 |
+
logger = setup_logger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class ModelFactory:
|
| 18 |
+
"""
|
| 19 |
+
Factory para crear modelos de forecasting.
|
| 20 |
+
|
| 21 |
+
Permite agregar nuevos modelos sin modificar código existente,
|
| 22 |
+
aplicando el principio OCP (Open/Closed Principle).
|
| 23 |
+
|
| 24 |
+
Ejemplo de uso:
|
| 25 |
+
>>> model = ModelFactory.create("chronos2", model_id="amazon/chronos-2")
|
| 26 |
+
>>> # Futuro: model = ModelFactory.create("prophet", ...)
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
# Registro de modelos disponibles
|
| 30 |
+
_models: Dict[str, Type[IForecastModel]] = {
|
| 31 |
+
"chronos2": ChronosModel,
|
| 32 |
+
# Futuro: Agregar sin modificar código existente
|
| 33 |
+
# "prophet": ProphetModel,
|
| 34 |
+
# "arima": ARIMAModel,
|
| 35 |
+
# "custom": CustomModel,
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
@classmethod
|
| 39 |
+
def create(
|
| 40 |
+
cls,
|
| 41 |
+
model_type: str,
|
| 42 |
+
**kwargs
|
| 43 |
+
) -> IForecastModel:
|
| 44 |
+
"""
|
| 45 |
+
Crea una instancia de modelo de forecasting.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
model_type: Tipo de modelo ("chronos2", "prophet", etc.)
|
| 49 |
+
**kwargs: Parámetros específicos del modelo
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
Instancia de IForecastModel
|
| 53 |
+
|
| 54 |
+
Raises:
|
| 55 |
+
ValueError: Si el tipo de modelo no existe
|
| 56 |
+
|
| 57 |
+
Example:
|
| 58 |
+
>>> model = ModelFactory.create(
|
| 59 |
+
... "chronos2",
|
| 60 |
+
... model_id="amazon/chronos-2",
|
| 61 |
+
... device_map="cpu"
|
| 62 |
+
... )
|
| 63 |
+
"""
|
| 64 |
+
if model_type not in cls._models:
|
| 65 |
+
available = ", ".join(cls._models.keys())
|
| 66 |
+
raise ValueError(
|
| 67 |
+
f"Unknown model type: '{model_type}'. "
|
| 68 |
+
f"Available: {available}"
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
model_class = cls._models[model_type]
|
| 72 |
+
logger.info(f"Creating model: {model_type}")
|
| 73 |
+
|
| 74 |
+
try:
|
| 75 |
+
instance = model_class(**kwargs)
|
| 76 |
+
logger.info(f"Model created: {instance}")
|
| 77 |
+
return instance
|
| 78 |
+
except Exception as e:
|
| 79 |
+
logger.error(f"Failed to create model '{model_type}': {e}")
|
| 80 |
+
raise
|
| 81 |
+
|
| 82 |
+
@classmethod
|
| 83 |
+
def register_model(
|
| 84 |
+
cls,
|
| 85 |
+
name: str,
|
| 86 |
+
model_class: Type[IForecastModel]
|
| 87 |
+
) -> None:
|
| 88 |
+
"""
|
| 89 |
+
Registra un nuevo tipo de modelo (OCP - extensión).
|
| 90 |
+
|
| 91 |
+
Permite agregar nuevos modelos dinámicamente sin modificar
|
| 92 |
+
el código de la factory.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
name: Nombre del modelo
|
| 96 |
+
model_class: Clase que implementa IForecastModel
|
| 97 |
+
|
| 98 |
+
Raises:
|
| 99 |
+
TypeError: Si model_class no implementa IForecastModel
|
| 100 |
+
ValueError: Si el nombre ya está registrado
|
| 101 |
+
|
| 102 |
+
Example:
|
| 103 |
+
>>> class MyCustomModel(IForecastModel):
|
| 104 |
+
... pass
|
| 105 |
+
>>> ModelFactory.register_model("custom", MyCustomModel)
|
| 106 |
+
"""
|
| 107 |
+
# Validar que implementa la interfaz
|
| 108 |
+
if not issubclass(model_class, IForecastModel):
|
| 109 |
+
raise TypeError(
|
| 110 |
+
f"{model_class.__name__} debe implementar IForecastModel"
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
# Validar que no esté duplicado
|
| 114 |
+
if name in cls._models:
|
| 115 |
+
raise ValueError(
|
| 116 |
+
f"Model '{name}' ya está registrado. "
|
| 117 |
+
f"Use un nombre diferente o llame a unregister_model primero."
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
cls._models[name] = model_class
|
| 121 |
+
logger.info(f"Registered new model: {name} -> {model_class.__name__}")
|
| 122 |
+
|
| 123 |
+
@classmethod
|
| 124 |
+
def unregister_model(cls, name: str) -> None:
|
| 125 |
+
"""
|
| 126 |
+
Elimina un modelo del registro.
|
| 127 |
+
|
| 128 |
+
Args:
|
| 129 |
+
name: Nombre del modelo a eliminar
|
| 130 |
+
|
| 131 |
+
Raises:
|
| 132 |
+
ValueError: Si el modelo no existe
|
| 133 |
+
"""
|
| 134 |
+
if name not in cls._models:
|
| 135 |
+
raise ValueError(f"Model '{name}' no está registrado")
|
| 136 |
+
|
| 137 |
+
del cls._models[name]
|
| 138 |
+
logger.info(f"Unregistered model: {name}")
|
| 139 |
+
|
| 140 |
+
@classmethod
|
| 141 |
+
def list_available_models(cls) -> List[str]:
|
| 142 |
+
"""
|
| 143 |
+
Lista todos los modelos disponibles.
|
| 144 |
+
|
| 145 |
+
Returns:
|
| 146 |
+
Lista de nombres de modelos
|
| 147 |
+
"""
|
| 148 |
+
return list(cls._models.keys())
|
| 149 |
+
|
| 150 |
+
@classmethod
|
| 151 |
+
def get_model_info(cls, model_type: str) -> Dict[str, str]:
|
| 152 |
+
"""
|
| 153 |
+
Obtiene información sobre un tipo de modelo.
|
| 154 |
+
|
| 155 |
+
Args:
|
| 156 |
+
model_type: Nombre del tipo de modelo
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
Diccionario con información del modelo
|
| 160 |
+
|
| 161 |
+
Raises:
|
| 162 |
+
ValueError: Si el modelo no existe
|
| 163 |
+
"""
|
| 164 |
+
if model_type not in cls._models:
|
| 165 |
+
raise ValueError(f"Model '{model_type}' no está registrado")
|
| 166 |
+
|
| 167 |
+
model_class = cls._models[model_type]
|
| 168 |
+
return {
|
| 169 |
+
"name": model_type,
|
| 170 |
+
"class": model_class.__name__,
|
| 171 |
+
"module": model_class.__module__
|
| 172 |
+
}
|
app/main_from_hf_space.py
ADDED
|
@@ -0,0 +1,681 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import List, Dict, Optional
|
| 3 |
+
import json
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pandas as pd
|
| 7 |
+
from fastapi import FastAPI, HTTPException
|
| 8 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 9 |
+
from pydantic import BaseModel, Field
|
| 10 |
+
from huggingface_hub import InferenceClient
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# =========================
|
| 14 |
+
# Configuración
|
| 15 |
+
# =========================
|
| 16 |
+
|
| 17 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 18 |
+
MODEL_ID = os.getenv("CHRONOS_MODEL_ID", "amazon/chronos-t5-large")
|
| 19 |
+
|
| 20 |
+
app = FastAPI(
|
| 21 |
+
title="Chronos-2 Forecasting API (HF Inference)",
|
| 22 |
+
description=(
|
| 23 |
+
"API de pronósticos usando Chronos-2 via Hugging Face Inference API. "
|
| 24 |
+
"Compatible con Excel Add-in."
|
| 25 |
+
),
|
| 26 |
+
version="1.0.0",
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
# Configurar CORS
|
| 30 |
+
app.add_middleware(
|
| 31 |
+
CORSMiddleware,
|
| 32 |
+
allow_origins=["*"], # En producción, especificar dominios permitidos
|
| 33 |
+
allow_credentials=True,
|
| 34 |
+
allow_methods=["*"],
|
| 35 |
+
allow_headers=["*"],
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
# Cliente de HF Inference
|
| 39 |
+
if not HF_TOKEN:
|
| 40 |
+
print("⚠️ WARNING: HF_TOKEN no configurado. La API puede no funcionar correctamente.")
|
| 41 |
+
print(" Configura HF_TOKEN en las variables de entorno del Space.")
|
| 42 |
+
client = None
|
| 43 |
+
else:
|
| 44 |
+
client = InferenceClient(token=HF_TOKEN)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# =========================
|
| 48 |
+
# Modelos Pydantic
|
| 49 |
+
# =========================
|
| 50 |
+
|
| 51 |
+
class UnivariateSeries(BaseModel):
|
| 52 |
+
values: List[float]
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class ForecastUnivariateRequest(BaseModel):
|
| 56 |
+
series: UnivariateSeries
|
| 57 |
+
prediction_length: int = Field(7, description="Número de pasos a predecir")
|
| 58 |
+
quantile_levels: Optional[List[float]] = Field(
|
| 59 |
+
default=[0.1, 0.5, 0.9],
|
| 60 |
+
description="Cuantiles para intervalos de confianza"
|
| 61 |
+
)
|
| 62 |
+
freq: str = Field("D", description="Frecuencia temporal (D, W, M, etc.)")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class ForecastUnivariateResponse(BaseModel):
|
| 66 |
+
timestamps: List[str]
|
| 67 |
+
median: List[float]
|
| 68 |
+
quantiles: Dict[str, List[float]]
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class AnomalyDetectionRequest(BaseModel):
|
| 72 |
+
context: UnivariateSeries
|
| 73 |
+
recent_observed: List[float]
|
| 74 |
+
prediction_length: int = 7
|
| 75 |
+
quantile_low: float = 0.05
|
| 76 |
+
quantile_high: float = 0.95
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class AnomalyPoint(BaseModel):
|
| 80 |
+
index: int
|
| 81 |
+
value: float
|
| 82 |
+
predicted_median: float
|
| 83 |
+
lower: float
|
| 84 |
+
upper: float
|
| 85 |
+
is_anomaly: bool
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class AnomalyDetectionResponse(BaseModel):
|
| 89 |
+
anomalies: List[AnomalyPoint]
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class BacktestRequest(BaseModel):
|
| 93 |
+
series: UnivariateSeries
|
| 94 |
+
prediction_length: int = 7
|
| 95 |
+
test_length: int = 28
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class BacktestMetrics(BaseModel):
|
| 99 |
+
mae: float
|
| 100 |
+
mape: float
|
| 101 |
+
rmse: float
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class BacktestResponse(BaseModel):
|
| 105 |
+
metrics: BacktestMetrics
|
| 106 |
+
forecast_median: List[float]
|
| 107 |
+
forecast_timestamps: List[str]
|
| 108 |
+
actuals: List[float]
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
# Modelos para Multi-Series
|
| 112 |
+
class MultiSeriesItem(BaseModel):
|
| 113 |
+
series_id: str
|
| 114 |
+
values: List[float]
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class ForecastMultiIdRequest(BaseModel):
|
| 118 |
+
series_list: List[MultiSeriesItem]
|
| 119 |
+
prediction_length: int = Field(7, description="Número de pasos a predecir")
|
| 120 |
+
quantile_levels: Optional[List[float]] = Field(
|
| 121 |
+
default=[0.1, 0.5, 0.9],
|
| 122 |
+
description="Cuantiles para intervalos de confianza"
|
| 123 |
+
)
|
| 124 |
+
freq: str = Field("D", description="Frecuencia temporal (D, W, M, etc.)")
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class ForecastMultiIdResponse(BaseModel):
|
| 128 |
+
forecasts: List[ForecastUnivariateResponse]
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
# Modelos para Covariates
|
| 132 |
+
class CovariateData(BaseModel):
|
| 133 |
+
values: List[float]
|
| 134 |
+
name: str = Field(..., description="Nombre de la covariable")
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class ForecastWithCovariatesRequest(BaseModel):
|
| 138 |
+
target_series: UnivariateSeries
|
| 139 |
+
covariates_history: List[CovariateData]
|
| 140 |
+
covariates_future: List[CovariateData]
|
| 141 |
+
prediction_length: int = Field(7, description="Número de pasos a predecir")
|
| 142 |
+
quantile_levels: Optional[List[float]] = Field(
|
| 143 |
+
default=[0.1, 0.5, 0.9],
|
| 144 |
+
description="Cuantiles para intervalos de confianza"
|
| 145 |
+
)
|
| 146 |
+
freq: str = Field("D", description="Frecuencia temporal")
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
# Modelos para Scenarios
|
| 150 |
+
class ScenarioData(BaseModel):
|
| 151 |
+
scenario_name: str
|
| 152 |
+
covariate_values: Dict[str, List[float]]
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class GenerateScenariosRequest(BaseModel):
|
| 156 |
+
target_series: UnivariateSeries
|
| 157 |
+
scenarios: List[ScenarioData]
|
| 158 |
+
prediction_length: int = Field(7, description="Número de pasos a predecir")
|
| 159 |
+
freq: str = Field("D", description="Frecuencia temporal")
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class ScenarioForecast(BaseModel):
|
| 163 |
+
scenario_name: str
|
| 164 |
+
timestamps: List[str]
|
| 165 |
+
median: List[float]
|
| 166 |
+
quantiles: Dict[str, List[float]]
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class GenerateScenariosResponse(BaseModel):
|
| 170 |
+
scenarios: List[ScenarioForecast]
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# Modelos para Multivariate
|
| 174 |
+
class MultivariateSeries(BaseModel):
|
| 175 |
+
series_name: str
|
| 176 |
+
values: List[float]
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class ForecastMultivariateRequest(BaseModel):
|
| 180 |
+
series_list: List[MultivariateSeries]
|
| 181 |
+
prediction_length: int = Field(7, description="Número de pasos a predecir")
|
| 182 |
+
quantile_levels: Optional[List[float]] = Field(
|
| 183 |
+
default=[0.1, 0.5, 0.9],
|
| 184 |
+
description="Cuantiles para intervalos de confianza"
|
| 185 |
+
)
|
| 186 |
+
freq: str = Field("D", description="Frecuencia temporal")
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
class MultivariateForecast(BaseModel):
|
| 190 |
+
series_name: str
|
| 191 |
+
timestamps: List[str]
|
| 192 |
+
median: List[float]
|
| 193 |
+
quantiles: Dict[str, List[float]]
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
class ForecastMultivariateResponse(BaseModel):
|
| 197 |
+
forecasts: List[MultivariateForecast]
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
# =========================
|
| 201 |
+
# Función auxiliar para llamar a HF Inference
|
| 202 |
+
# =========================
|
| 203 |
+
|
| 204 |
+
def call_chronos_inference(series: List[float], prediction_length: int) -> Dict:
|
| 205 |
+
"""
|
| 206 |
+
Llama a la API de Hugging Face Inference para Chronos.
|
| 207 |
+
Retorna un diccionario con las predicciones.
|
| 208 |
+
"""
|
| 209 |
+
if client is None:
|
| 210 |
+
raise HTTPException(
|
| 211 |
+
status_code=503,
|
| 212 |
+
detail="HF_TOKEN no configurado. Contacta al administrador del servicio."
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
try:
|
| 216 |
+
# Intentar usando el endpoint específico de time series
|
| 217 |
+
import requests
|
| 218 |
+
|
| 219 |
+
url = f"https://router.huggingface.co/hf-inference/models/{MODEL_ID}"
|
| 220 |
+
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
| 221 |
+
|
| 222 |
+
payload = {
|
| 223 |
+
"inputs": series,
|
| 224 |
+
"parameters": {
|
| 225 |
+
"prediction_length": prediction_length,
|
| 226 |
+
"num_samples": 100 # Para obtener cuantiles
|
| 227 |
+
}
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
response = requests.post(url, headers=headers, json=payload, timeout=60)
|
| 231 |
+
|
| 232 |
+
if response.status_code == 503:
|
| 233 |
+
raise HTTPException(
|
| 234 |
+
status_code=503,
|
| 235 |
+
detail="El modelo está cargando. Por favor, intenta de nuevo en 30-60 segundos."
|
| 236 |
+
)
|
| 237 |
+
elif response.status_code != 200:
|
| 238 |
+
raise HTTPException(
|
| 239 |
+
status_code=response.status_code,
|
| 240 |
+
detail=f"Error de la API de HuggingFace: {response.text}"
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
result = response.json()
|
| 244 |
+
return result
|
| 245 |
+
|
| 246 |
+
except requests.exceptions.Timeout:
|
| 247 |
+
raise HTTPException(
|
| 248 |
+
status_code=504,
|
| 249 |
+
detail="Timeout al comunicarse con HuggingFace API. El modelo puede estar cargando."
|
| 250 |
+
)
|
| 251 |
+
except Exception as e:
|
| 252 |
+
raise HTTPException(
|
| 253 |
+
status_code=500,
|
| 254 |
+
detail=f"Error inesperado: {str(e)}"
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def process_chronos_output(raw_output: Dict, prediction_length: int) -> Dict:
|
| 259 |
+
"""
|
| 260 |
+
Procesa la salida de Chronos para extraer mediana y cuantiles.
|
| 261 |
+
"""
|
| 262 |
+
# La API de Chronos puede devolver diferentes formatos
|
| 263 |
+
# Intentamos adaptarnos a ellos
|
| 264 |
+
|
| 265 |
+
if isinstance(raw_output, list):
|
| 266 |
+
# Si es una lista de valores, asumimos que es la predicción media
|
| 267 |
+
median = raw_output[:prediction_length]
|
| 268 |
+
return {
|
| 269 |
+
"median": median,
|
| 270 |
+
"quantiles": {
|
| 271 |
+
"0.1": median, # Sin cuantiles, usar median
|
| 272 |
+
"0.5": median,
|
| 273 |
+
"0.9": median
|
| 274 |
+
}
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
# Si tiene estructura más compleja, intentar extraer
|
| 278 |
+
if "forecast" in raw_output:
|
| 279 |
+
forecast = raw_output["forecast"]
|
| 280 |
+
if "median" in forecast:
|
| 281 |
+
median = forecast["median"][:prediction_length]
|
| 282 |
+
else:
|
| 283 |
+
median = forecast.get("mean", [0] * prediction_length)[:prediction_length]
|
| 284 |
+
|
| 285 |
+
quantiles = forecast.get("quantiles", {})
|
| 286 |
+
return {
|
| 287 |
+
"median": median,
|
| 288 |
+
"quantiles": quantiles
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
# Formato por defecto
|
| 292 |
+
return {
|
| 293 |
+
"median": [0] * prediction_length,
|
| 294 |
+
"quantiles": {
|
| 295 |
+
"0.1": [0] * prediction_length,
|
| 296 |
+
"0.5": [0] * prediction_length,
|
| 297 |
+
"0.9": [0] * prediction_length
|
| 298 |
+
}
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
# =========================
|
| 303 |
+
# Endpoints
|
| 304 |
+
# =========================
|
| 305 |
+
|
| 306 |
+
@app.get("/")
|
| 307 |
+
def root():
|
| 308 |
+
"""Información básica de la API"""
|
| 309 |
+
return {
|
| 310 |
+
"name": "Chronos-2 Forecasting API",
|
| 311 |
+
"version": "1.0.0",
|
| 312 |
+
"model": MODEL_ID,
|
| 313 |
+
"status": "running",
|
| 314 |
+
"docs": "/docs",
|
| 315 |
+
"health": "/health"
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
@app.get("/health")
|
| 320 |
+
def health():
|
| 321 |
+
"""Health check del servicio"""
|
| 322 |
+
return {
|
| 323 |
+
"status": "ok" if HF_TOKEN else "warning",
|
| 324 |
+
"model_id": MODEL_ID,
|
| 325 |
+
"hf_token_configured": HF_TOKEN is not None,
|
| 326 |
+
"message": "Ready" if HF_TOKEN else "HF_TOKEN not configured"
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
@app.post("/forecast_univariate", response_model=ForecastUnivariateResponse)
|
| 331 |
+
def forecast_univariate(req: ForecastUnivariateRequest):
|
| 332 |
+
"""
|
| 333 |
+
Pronóstico para una serie temporal univariada.
|
| 334 |
+
|
| 335 |
+
Compatible con el Excel Add-in.
|
| 336 |
+
"""
|
| 337 |
+
values = req.series.values
|
| 338 |
+
n = len(values)
|
| 339 |
+
|
| 340 |
+
if n == 0:
|
| 341 |
+
raise HTTPException(status_code=400, detail="La serie no puede estar vacía.")
|
| 342 |
+
|
| 343 |
+
if n < 3:
|
| 344 |
+
raise HTTPException(
|
| 345 |
+
status_code=400,
|
| 346 |
+
detail="La serie debe tener al menos 3 puntos históricos."
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
# Llamar a la API de HuggingFace
|
| 350 |
+
raw_output = call_chronos_inference(values, req.prediction_length)
|
| 351 |
+
|
| 352 |
+
# Procesar la salida
|
| 353 |
+
processed = process_chronos_output(raw_output, req.prediction_length)
|
| 354 |
+
|
| 355 |
+
# Generar timestamps
|
| 356 |
+
timestamps = [f"t+{i+1}" for i in range(req.prediction_length)]
|
| 357 |
+
|
| 358 |
+
return ForecastUnivariateResponse(
|
| 359 |
+
timestamps=timestamps,
|
| 360 |
+
median=processed["median"],
|
| 361 |
+
quantiles=processed["quantiles"]
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
@app.post("/detect_anomalies", response_model=AnomalyDetectionResponse)
|
| 366 |
+
def detect_anomalies(req: AnomalyDetectionRequest):
|
| 367 |
+
"""
|
| 368 |
+
Detecta anomalías comparando valores observados con predicciones.
|
| 369 |
+
"""
|
| 370 |
+
n_hist = len(req.context.values)
|
| 371 |
+
|
| 372 |
+
if n_hist == 0:
|
| 373 |
+
raise HTTPException(status_code=400, detail="El contexto no puede estar vacío.")
|
| 374 |
+
|
| 375 |
+
if len(req.recent_observed) != req.prediction_length:
|
| 376 |
+
raise HTTPException(
|
| 377 |
+
status_code=400,
|
| 378 |
+
detail="recent_observed debe tener la misma longitud que prediction_length."
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
# Hacer predicción
|
| 382 |
+
raw_output = call_chronos_inference(req.context.values, req.prediction_length)
|
| 383 |
+
processed = process_chronos_output(raw_output, req.prediction_length)
|
| 384 |
+
|
| 385 |
+
# Comparar con valores observados
|
| 386 |
+
anomalies: List[AnomalyPoint] = []
|
| 387 |
+
|
| 388 |
+
median = processed["median"]
|
| 389 |
+
# Intentar obtener cuantiles o usar aproximaciones
|
| 390 |
+
q_low = processed["quantiles"].get(str(req.quantile_low), median)
|
| 391 |
+
q_high = processed["quantiles"].get(str(req.quantile_high), median)
|
| 392 |
+
|
| 393 |
+
for i, obs in enumerate(req.recent_observed):
|
| 394 |
+
if i < len(median):
|
| 395 |
+
lower = q_low[i] if i < len(q_low) else median[i] * 0.8
|
| 396 |
+
upper = q_high[i] if i < len(q_high) else median[i] * 1.2
|
| 397 |
+
predicted = median[i]
|
| 398 |
+
is_anom = (obs < lower) or (obs > upper)
|
| 399 |
+
|
| 400 |
+
anomalies.append(
|
| 401 |
+
AnomalyPoint(
|
| 402 |
+
index=i,
|
| 403 |
+
value=obs,
|
| 404 |
+
predicted_median=predicted,
|
| 405 |
+
lower=lower,
|
| 406 |
+
upper=upper,
|
| 407 |
+
is_anomaly=is_anom,
|
| 408 |
+
)
|
| 409 |
+
)
|
| 410 |
+
|
| 411 |
+
return AnomalyDetectionResponse(anomalies=anomalies)
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
@app.post("/backtest_simple", response_model=BacktestResponse)
|
| 415 |
+
def backtest_simple(req: BacktestRequest):
|
| 416 |
+
"""
|
| 417 |
+
Backtesting simple: divide la serie en train/test y evalúa métricas.
|
| 418 |
+
"""
|
| 419 |
+
values = np.array(req.series.values, dtype=float)
|
| 420 |
+
n = len(values)
|
| 421 |
+
|
| 422 |
+
if n <= req.test_length:
|
| 423 |
+
raise HTTPException(
|
| 424 |
+
status_code=400,
|
| 425 |
+
detail="La serie debe ser más larga que test_length."
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
# Dividir en train/test
|
| 429 |
+
train = values[: n - req.test_length].tolist()
|
| 430 |
+
test = values[n - req.test_length :].tolist()
|
| 431 |
+
|
| 432 |
+
# Hacer predicción
|
| 433 |
+
raw_output = call_chronos_inference(train, req.test_length)
|
| 434 |
+
processed = process_chronos_output(raw_output, req.test_length)
|
| 435 |
+
|
| 436 |
+
forecast = np.array(processed["median"], dtype=float)
|
| 437 |
+
test_arr = np.array(test, dtype=float)
|
| 438 |
+
|
| 439 |
+
# Calcular métricas
|
| 440 |
+
mae = float(np.mean(np.abs(test_arr - forecast)))
|
| 441 |
+
rmse = float(np.sqrt(np.mean((test_arr - forecast) ** 2)))
|
| 442 |
+
|
| 443 |
+
eps = 1e-8
|
| 444 |
+
mape = float(np.mean(np.abs((test_arr - forecast) / (test_arr + eps)))) * 100.0
|
| 445 |
+
|
| 446 |
+
timestamps = [f"test_t{i+1}" for i in range(req.test_length)]
|
| 447 |
+
|
| 448 |
+
metrics = BacktestMetrics(mae=mae, mape=mape, rmse=rmse)
|
| 449 |
+
|
| 450 |
+
return BacktestResponse(
|
| 451 |
+
metrics=metrics,
|
| 452 |
+
forecast_median=forecast.tolist(),
|
| 453 |
+
forecast_timestamps=timestamps,
|
| 454 |
+
actuals=test,
|
| 455 |
+
)
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
# =========================
|
| 459 |
+
# Endpoints simplificados para testing
|
| 460 |
+
# =========================
|
| 461 |
+
|
| 462 |
+
@app.post("/simple_forecast")
|
| 463 |
+
def simple_forecast(series: List[float], prediction_length: int = 7):
|
| 464 |
+
"""
|
| 465 |
+
Endpoint simplificado para testing rápido.
|
| 466 |
+
"""
|
| 467 |
+
if not series:
|
| 468 |
+
raise HTTPException(status_code=400, detail="Serie vacía")
|
| 469 |
+
|
| 470 |
+
raw_output = call_chronos_inference(series, prediction_length)
|
| 471 |
+
processed = process_chronos_output(raw_output, prediction_length)
|
| 472 |
+
|
| 473 |
+
return {
|
| 474 |
+
"input_series": series,
|
| 475 |
+
"prediction_length": prediction_length,
|
| 476 |
+
"forecast": processed["median"],
|
| 477 |
+
"model": MODEL_ID
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
# =========================
|
| 482 |
+
# NUEVOS ENDPOINTS IMPLEMENTADOS
|
| 483 |
+
# =========================
|
| 484 |
+
|
| 485 |
+
@app.post("/forecast_multi_id", response_model=ForecastMultiIdResponse)
|
| 486 |
+
def forecast_multi_id(req: ForecastMultiIdRequest):
|
| 487 |
+
"""
|
| 488 |
+
Pronóstico para múltiples series temporales independientes.
|
| 489 |
+
Cada serie se procesa por separado y devuelve su pronóstico.
|
| 490 |
+
|
| 491 |
+
Útil para pronósticos de múltiples productos, ubicaciones, etc.
|
| 492 |
+
"""
|
| 493 |
+
if not req.series_list:
|
| 494 |
+
raise HTTPException(status_code=400, detail="La lista de series no puede estar vacía.")
|
| 495 |
+
|
| 496 |
+
forecasts = []
|
| 497 |
+
|
| 498 |
+
for series_item in req.series_list:
|
| 499 |
+
values = series_item.values
|
| 500 |
+
|
| 501 |
+
if len(values) < 3:
|
| 502 |
+
raise HTTPException(
|
| 503 |
+
status_code=400,
|
| 504 |
+
detail=f"La serie '{series_item.series_id}' debe tener al menos 3 puntos."
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
# Hacer predicción para esta serie
|
| 508 |
+
raw_output = call_chronos_inference(values, req.prediction_length)
|
| 509 |
+
processed = process_chronos_output(raw_output, req.prediction_length)
|
| 510 |
+
|
| 511 |
+
# Generar timestamps
|
| 512 |
+
timestamps = [f"t+{i+1}" for i in range(req.prediction_length)]
|
| 513 |
+
|
| 514 |
+
# Agregar a la lista de resultados
|
| 515 |
+
forecasts.append(
|
| 516 |
+
ForecastUnivariateResponse(
|
| 517 |
+
timestamps=timestamps,
|
| 518 |
+
median=processed["median"],
|
| 519 |
+
quantiles=processed["quantiles"]
|
| 520 |
+
)
|
| 521 |
+
)
|
| 522 |
+
|
| 523 |
+
return ForecastMultiIdResponse(forecasts=forecasts)
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
@app.post("/forecast_with_covariates")
|
| 527 |
+
def forecast_with_covariates(req: ForecastWithCovariatesRequest):
|
| 528 |
+
"""
|
| 529 |
+
Pronóstico con variables covariables (exógenas).
|
| 530 |
+
|
| 531 |
+
NOTA: Chronos-2 es un modelo univariado puro. Esta implementación
|
| 532 |
+
es una aproximación que usa las covariables para ajustar el contexto,
|
| 533 |
+
pero no es un modelo multivariado verdadero.
|
| 534 |
+
|
| 535 |
+
Para pronósticos reales con covariables, considera usar modelos como
|
| 536 |
+
TimesFM, Temporal Fusion Transformer, o Prophet.
|
| 537 |
+
"""
|
| 538 |
+
target_values = req.target_series.values
|
| 539 |
+
|
| 540 |
+
if len(target_values) < 3:
|
| 541 |
+
raise HTTPException(
|
| 542 |
+
status_code=400,
|
| 543 |
+
detail="La serie objetivo debe tener al menos 3 puntos."
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
# Verificar que las covariables tengan la longitud correcta
|
| 547 |
+
for cov in req.covariates_history:
|
| 548 |
+
if len(cov.values) != len(target_values):
|
| 549 |
+
raise HTTPException(
|
| 550 |
+
status_code=400,
|
| 551 |
+
detail=f"La covariable '{cov.name}' debe tener la misma longitud que la serie objetivo."
|
| 552 |
+
)
|
| 553 |
+
|
| 554 |
+
for cov in req.covariates_future:
|
| 555 |
+
if len(cov.values) != req.prediction_length:
|
| 556 |
+
raise HTTPException(
|
| 557 |
+
status_code=400,
|
| 558 |
+
detail=f"La covariable futura '{cov.name}' debe tener longitud = prediction_length."
|
| 559 |
+
)
|
| 560 |
+
|
| 561 |
+
# APROXIMACIÓN: Usar solo la serie objetivo
|
| 562 |
+
# En un modelo verdadero con covariables, estas se integrarían en el modelo
|
| 563 |
+
raw_output = call_chronos_inference(target_values, req.prediction_length)
|
| 564 |
+
processed = process_chronos_output(raw_output, req.prediction_length)
|
| 565 |
+
|
| 566 |
+
# Generar timestamps
|
| 567 |
+
timestamps = [f"t+{i+1}" for i in range(req.prediction_length)]
|
| 568 |
+
|
| 569 |
+
# Nota: Las covariables se devuelven para referencia pero no afectan el forecast
|
| 570 |
+
return {
|
| 571 |
+
"timestamps": timestamps,
|
| 572 |
+
"median": processed["median"],
|
| 573 |
+
"quantiles": processed["quantiles"],
|
| 574 |
+
"note": "Chronos-2 no usa covariables nativamente. Este forecast se basa solo en la serie objetivo.",
|
| 575 |
+
"covariates_used": [cov.name for cov in req.covariates_history],
|
| 576 |
+
"covariates_future": [cov.name for cov in req.covariates_future]
|
| 577 |
+
}
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
@app.post("/generate_scenarios", response_model=GenerateScenariosResponse)
|
| 581 |
+
def generate_scenarios(req: GenerateScenariosRequest):
|
| 582 |
+
"""
|
| 583 |
+
Genera pronósticos para múltiples escenarios "what-if".
|
| 584 |
+
|
| 585 |
+
Cada escenario representa una configuración diferente de covariables futuras.
|
| 586 |
+
Útil para análisis de sensibilidad y planificación.
|
| 587 |
+
|
| 588 |
+
NOTA: Como Chronos-2 no usa covariables, todos los escenarios
|
| 589 |
+
producirán el mismo forecast base. Esta funcionalidad es más útil
|
| 590 |
+
con modelos que soporten covariables.
|
| 591 |
+
"""
|
| 592 |
+
target_values = req.target_series.values
|
| 593 |
+
|
| 594 |
+
if len(target_values) < 3:
|
| 595 |
+
raise HTTPException(
|
| 596 |
+
status_code=400,
|
| 597 |
+
detail="La serie objetivo debe tener al menos 3 puntos."
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
if not req.scenarios:
|
| 601 |
+
raise HTTPException(
|
| 602 |
+
status_code=400,
|
| 603 |
+
detail="Debe proporcionar al menos un escenario."
|
| 604 |
+
)
|
| 605 |
+
|
| 606 |
+
# Hacer una predicción base
|
| 607 |
+
raw_output = call_chronos_inference(target_values, req.prediction_length)
|
| 608 |
+
processed = process_chronos_output(raw_output, req.prediction_length)
|
| 609 |
+
|
| 610 |
+
# Generar timestamps
|
| 611 |
+
timestamps = [f"t+{i+1}" for i in range(req.prediction_length)]
|
| 612 |
+
|
| 613 |
+
scenarios_output = []
|
| 614 |
+
|
| 615 |
+
for scenario in req.scenarios:
|
| 616 |
+
# En un modelo real con covariables, aquí se usarían los valores
|
| 617 |
+
# de scenario.covariate_values para generar diferentes forecasts
|
| 618 |
+
|
| 619 |
+
# Por ahora, todos los escenarios usan el mismo forecast base
|
| 620 |
+
scenarios_output.append(
|
| 621 |
+
ScenarioForecast(
|
| 622 |
+
scenario_name=scenario.scenario_name,
|
| 623 |
+
timestamps=timestamps,
|
| 624 |
+
median=processed["median"],
|
| 625 |
+
quantiles=processed["quantiles"]
|
| 626 |
+
)
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
return GenerateScenariosResponse(scenarios=scenarios_output)
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
@app.post("/forecast_multivariate", response_model=ForecastMultivariateResponse)
|
| 633 |
+
def forecast_multivariate(req: ForecastMultivariateRequest):
|
| 634 |
+
"""
|
| 635 |
+
Pronóstico multivariado: predice múltiples series relacionadas.
|
| 636 |
+
|
| 637 |
+
NOTA: Chronos-2 es fundamentalmente univariado. Esta implementación
|
| 638 |
+
procesa cada serie independientemente. Para pronósticos multivariados
|
| 639 |
+
verdaderos (que capturan correlaciones entre series), usa modelos como
|
| 640 |
+
Temporal Fusion Transformer, DeepAR, o Vector Autoregression (VAR).
|
| 641 |
+
"""
|
| 642 |
+
if not req.series_list:
|
| 643 |
+
raise HTTPException(
|
| 644 |
+
status_code=400,
|
| 645 |
+
detail="La lista de series no puede estar vacía."
|
| 646 |
+
)
|
| 647 |
+
|
| 648 |
+
forecasts = []
|
| 649 |
+
|
| 650 |
+
for series_item in req.series_list:
|
| 651 |
+
values = series_item.values
|
| 652 |
+
|
| 653 |
+
if len(values) < 3:
|
| 654 |
+
raise HTTPException(
|
| 655 |
+
status_code=400,
|
| 656 |
+
detail=f"La serie '{series_item.series_name}' debe tener al menos 3 puntos."
|
| 657 |
+
)
|
| 658 |
+
|
| 659 |
+
# Procesar cada serie independientemente
|
| 660 |
+
raw_output = call_chronos_inference(values, req.prediction_length)
|
| 661 |
+
processed = process_chronos_output(raw_output, req.prediction_length)
|
| 662 |
+
|
| 663 |
+
# Generar timestamps
|
| 664 |
+
timestamps = [f"t+{i+1}" for i in range(req.prediction_length)]
|
| 665 |
+
|
| 666 |
+
forecasts.append(
|
| 667 |
+
MultivariateForecast(
|
| 668 |
+
series_name=series_item.series_name,
|
| 669 |
+
timestamps=timestamps,
|
| 670 |
+
median=processed["median"],
|
| 671 |
+
quantiles=processed["quantiles"]
|
| 672 |
+
)
|
| 673 |
+
)
|
| 674 |
+
|
| 675 |
+
return ForecastMultivariateResponse(forecasts=forecasts)
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
if __name__ == "__main__":
|
| 679 |
+
import uvicorn
|
| 680 |
+
port = int(os.getenv("PORT", 7860))
|
| 681 |
+
uvicorn.run(app, host="0.0.0.0", port=port)
|
app/main_hf.py
ADDED
|
@@ -0,0 +1,681 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import List, Dict, Optional
|
| 3 |
+
import json
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pandas as pd
|
| 7 |
+
from fastapi import FastAPI, HTTPException
|
| 8 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 9 |
+
from pydantic import BaseModel, Field
|
| 10 |
+
from huggingface_hub import InferenceClient
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# =========================
|
| 14 |
+
# Configuración
|
| 15 |
+
# =========================
|
| 16 |
+
|
| 17 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 18 |
+
MODEL_ID = os.getenv("CHRONOS_MODEL_ID", "amazon/chronos-2")
|
| 19 |
+
|
| 20 |
+
app = FastAPI(
|
| 21 |
+
title="Chronos-2 Forecasting API (HF Inference)",
|
| 22 |
+
description=(
|
| 23 |
+
"API de pronósticos usando Chronos-2 via Hugging Face Inference API. "
|
| 24 |
+
"Compatible con Excel Add-in."
|
| 25 |
+
),
|
| 26 |
+
version="1.0.0",
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
# Configurar CORS
|
| 30 |
+
app.add_middleware(
|
| 31 |
+
CORSMiddleware,
|
| 32 |
+
allow_origins=["*"], # En producción, especificar dominios permitidos
|
| 33 |
+
allow_credentials=True,
|
| 34 |
+
allow_methods=["*"],
|
| 35 |
+
allow_headers=["*"],
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
# Cliente de HF Inference
|
| 39 |
+
if not HF_TOKEN:
|
| 40 |
+
print("⚠️ WARNING: HF_TOKEN no configurado. La API puede no funcionar correctamente.")
|
| 41 |
+
print(" Configura HF_TOKEN en las variables de entorno del Space.")
|
| 42 |
+
client = None
|
| 43 |
+
else:
|
| 44 |
+
client = InferenceClient(token=HF_TOKEN)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# =========================
|
| 48 |
+
# Modelos Pydantic
|
| 49 |
+
# =========================
|
| 50 |
+
|
| 51 |
+
class UnivariateSeries(BaseModel):
|
| 52 |
+
values: List[float]
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class ForecastUnivariateRequest(BaseModel):
|
| 56 |
+
series: UnivariateSeries
|
| 57 |
+
prediction_length: int = Field(7, description="Número de pasos a predecir")
|
| 58 |
+
quantile_levels: Optional[List[float]] = Field(
|
| 59 |
+
default=[0.1, 0.5, 0.9],
|
| 60 |
+
description="Cuantiles para intervalos de confianza"
|
| 61 |
+
)
|
| 62 |
+
freq: str = Field("D", description="Frecuencia temporal (D, W, M, etc.)")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class ForecastUnivariateResponse(BaseModel):
|
| 66 |
+
timestamps: List[str]
|
| 67 |
+
median: List[float]
|
| 68 |
+
quantiles: Dict[str, List[float]]
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class AnomalyDetectionRequest(BaseModel):
|
| 72 |
+
context: UnivariateSeries
|
| 73 |
+
recent_observed: List[float]
|
| 74 |
+
prediction_length: int = 7
|
| 75 |
+
quantile_low: float = 0.05
|
| 76 |
+
quantile_high: float = 0.95
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class AnomalyPoint(BaseModel):
|
| 80 |
+
index: int
|
| 81 |
+
value: float
|
| 82 |
+
predicted_median: float
|
| 83 |
+
lower: float
|
| 84 |
+
upper: float
|
| 85 |
+
is_anomaly: bool
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class AnomalyDetectionResponse(BaseModel):
|
| 89 |
+
anomalies: List[AnomalyPoint]
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class BacktestRequest(BaseModel):
|
| 93 |
+
series: UnivariateSeries
|
| 94 |
+
prediction_length: int = 7
|
| 95 |
+
test_length: int = 28
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class BacktestMetrics(BaseModel):
|
| 99 |
+
mae: float
|
| 100 |
+
mape: float
|
| 101 |
+
rmse: float
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class BacktestResponse(BaseModel):
|
| 105 |
+
metrics: BacktestMetrics
|
| 106 |
+
forecast_median: List[float]
|
| 107 |
+
forecast_timestamps: List[str]
|
| 108 |
+
actuals: List[float]
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
# Modelos para Multi-Series
|
| 112 |
+
class MultiSeriesItem(BaseModel):
|
| 113 |
+
series_id: str
|
| 114 |
+
values: List[float]
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class ForecastMultiIdRequest(BaseModel):
|
| 118 |
+
series_list: List[MultiSeriesItem]
|
| 119 |
+
prediction_length: int = Field(7, description="Número de pasos a predecir")
|
| 120 |
+
quantile_levels: Optional[List[float]] = Field(
|
| 121 |
+
default=[0.1, 0.5, 0.9],
|
| 122 |
+
description="Cuantiles para intervalos de confianza"
|
| 123 |
+
)
|
| 124 |
+
freq: str = Field("D", description="Frecuencia temporal (D, W, M, etc.)")
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class ForecastMultiIdResponse(BaseModel):
|
| 128 |
+
forecasts: List[ForecastUnivariateResponse]
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
# Modelos para Covariates
|
| 132 |
+
class CovariateData(BaseModel):
|
| 133 |
+
values: List[float]
|
| 134 |
+
name: str = Field(..., description="Nombre de la covariable")
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class ForecastWithCovariatesRequest(BaseModel):
|
| 138 |
+
target_series: UnivariateSeries
|
| 139 |
+
covariates_history: List[CovariateData]
|
| 140 |
+
covariates_future: List[CovariateData]
|
| 141 |
+
prediction_length: int = Field(7, description="Número de pasos a predecir")
|
| 142 |
+
quantile_levels: Optional[List[float]] = Field(
|
| 143 |
+
default=[0.1, 0.5, 0.9],
|
| 144 |
+
description="Cuantiles para intervalos de confianza"
|
| 145 |
+
)
|
| 146 |
+
freq: str = Field("D", description="Frecuencia temporal")
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
# Modelos para Scenarios
|
| 150 |
+
class ScenarioData(BaseModel):
|
| 151 |
+
scenario_name: str
|
| 152 |
+
covariate_values: Dict[str, List[float]]
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class GenerateScenariosRequest(BaseModel):
|
| 156 |
+
target_series: UnivariateSeries
|
| 157 |
+
scenarios: List[ScenarioData]
|
| 158 |
+
prediction_length: int = Field(7, description="Número de pasos a predecir")
|
| 159 |
+
freq: str = Field("D", description="Frecuencia temporal")
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class ScenarioForecast(BaseModel):
|
| 163 |
+
scenario_name: str
|
| 164 |
+
timestamps: List[str]
|
| 165 |
+
median: List[float]
|
| 166 |
+
quantiles: Dict[str, List[float]]
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class GenerateScenariosResponse(BaseModel):
|
| 170 |
+
scenarios: List[ScenarioForecast]
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# Modelos para Multivariate
|
| 174 |
+
class MultivariateSeries(BaseModel):
|
| 175 |
+
series_name: str
|
| 176 |
+
values: List[float]
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class ForecastMultivariateRequest(BaseModel):
|
| 180 |
+
series_list: List[MultivariateSeries]
|
| 181 |
+
prediction_length: int = Field(7, description="Número de pasos a predecir")
|
| 182 |
+
quantile_levels: Optional[List[float]] = Field(
|
| 183 |
+
default=[0.1, 0.5, 0.9],
|
| 184 |
+
description="Cuantiles para intervalos de confianza"
|
| 185 |
+
)
|
| 186 |
+
freq: str = Field("D", description="Frecuencia temporal")
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
class MultivariateForecast(BaseModel):
|
| 190 |
+
series_name: str
|
| 191 |
+
timestamps: List[str]
|
| 192 |
+
median: List[float]
|
| 193 |
+
quantiles: Dict[str, List[float]]
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
class ForecastMultivariateResponse(BaseModel):
|
| 197 |
+
forecasts: List[MultivariateForecast]
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
# =========================
|
| 201 |
+
# Función auxiliar para llamar a HF Inference
|
| 202 |
+
# =========================
|
| 203 |
+
|
| 204 |
+
def call_chronos_inference(series: List[float], prediction_length: int) -> Dict:
|
| 205 |
+
"""
|
| 206 |
+
Llama a la API de Hugging Face Inference para Chronos.
|
| 207 |
+
Retorna un diccionario con las predicciones.
|
| 208 |
+
"""
|
| 209 |
+
if client is None:
|
| 210 |
+
raise HTTPException(
|
| 211 |
+
status_code=503,
|
| 212 |
+
detail="HF_TOKEN no configurado. Contacta al administrador del servicio."
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
try:
|
| 216 |
+
# Intentar usando el endpoint específico de time series
|
| 217 |
+
import requests
|
| 218 |
+
|
| 219 |
+
url = f"https://api-inference.huggingface.co/models/{MODEL_ID}"
|
| 220 |
+
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
| 221 |
+
|
| 222 |
+
payload = {
|
| 223 |
+
"inputs": series,
|
| 224 |
+
"parameters": {
|
| 225 |
+
"prediction_length": prediction_length,
|
| 226 |
+
"num_samples": 100 # Para obtener cuantiles
|
| 227 |
+
}
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
response = requests.post(url, headers=headers, json=payload, timeout=60)
|
| 231 |
+
|
| 232 |
+
if response.status_code == 503:
|
| 233 |
+
raise HTTPException(
|
| 234 |
+
status_code=503,
|
| 235 |
+
detail="El modelo está cargando. Por favor, intenta de nuevo en 30-60 segundos."
|
| 236 |
+
)
|
| 237 |
+
elif response.status_code != 200:
|
| 238 |
+
raise HTTPException(
|
| 239 |
+
status_code=response.status_code,
|
| 240 |
+
detail=f"Error de la API de HuggingFace: {response.text}"
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
result = response.json()
|
| 244 |
+
return result
|
| 245 |
+
|
| 246 |
+
except requests.exceptions.Timeout:
|
| 247 |
+
raise HTTPException(
|
| 248 |
+
status_code=504,
|
| 249 |
+
detail="Timeout al comunicarse con HuggingFace API. El modelo puede estar cargando."
|
| 250 |
+
)
|
| 251 |
+
except Exception as e:
|
| 252 |
+
raise HTTPException(
|
| 253 |
+
status_code=500,
|
| 254 |
+
detail=f"Error inesperado: {str(e)}"
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def process_chronos_output(raw_output: Dict, prediction_length: int) -> Dict:
|
| 259 |
+
"""
|
| 260 |
+
Procesa la salida de Chronos para extraer mediana y cuantiles.
|
| 261 |
+
"""
|
| 262 |
+
# La API de Chronos puede devolver diferentes formatos
|
| 263 |
+
# Intentamos adaptarnos a ellos
|
| 264 |
+
|
| 265 |
+
if isinstance(raw_output, list):
|
| 266 |
+
# Si es una lista de valores, asumimos que es la predicción media
|
| 267 |
+
median = raw_output[:prediction_length]
|
| 268 |
+
return {
|
| 269 |
+
"median": median,
|
| 270 |
+
"quantiles": {
|
| 271 |
+
"0.1": median, # Sin cuantiles, usar median
|
| 272 |
+
"0.5": median,
|
| 273 |
+
"0.9": median
|
| 274 |
+
}
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
# Si tiene estructura más compleja, intentar extraer
|
| 278 |
+
if "forecast" in raw_output:
|
| 279 |
+
forecast = raw_output["forecast"]
|
| 280 |
+
if "median" in forecast:
|
| 281 |
+
median = forecast["median"][:prediction_length]
|
| 282 |
+
else:
|
| 283 |
+
median = forecast.get("mean", [0] * prediction_length)[:prediction_length]
|
| 284 |
+
|
| 285 |
+
quantiles = forecast.get("quantiles", {})
|
| 286 |
+
return {
|
| 287 |
+
"median": median,
|
| 288 |
+
"quantiles": quantiles
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
# Formato por defecto
|
| 292 |
+
return {
|
| 293 |
+
"median": [0] * prediction_length,
|
| 294 |
+
"quantiles": {
|
| 295 |
+
"0.1": [0] * prediction_length,
|
| 296 |
+
"0.5": [0] * prediction_length,
|
| 297 |
+
"0.9": [0] * prediction_length
|
| 298 |
+
}
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
# =========================
|
| 303 |
+
# Endpoints
|
| 304 |
+
# =========================
|
| 305 |
+
|
| 306 |
+
@app.get("/")
|
| 307 |
+
def root():
|
| 308 |
+
"""Información básica de la API"""
|
| 309 |
+
return {
|
| 310 |
+
"name": "Chronos-2 Forecasting API",
|
| 311 |
+
"version": "1.0.0",
|
| 312 |
+
"model": MODEL_ID,
|
| 313 |
+
"status": "running",
|
| 314 |
+
"docs": "/docs",
|
| 315 |
+
"health": "/health"
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
@app.get("/health")
|
| 320 |
+
def health():
|
| 321 |
+
"""Health check del servicio"""
|
| 322 |
+
return {
|
| 323 |
+
"status": "ok" if HF_TOKEN else "warning",
|
| 324 |
+
"model_id": MODEL_ID,
|
| 325 |
+
"hf_token_configured": HF_TOKEN is not None,
|
| 326 |
+
"message": "Ready" if HF_TOKEN else "HF_TOKEN not configured"
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
@app.post("/forecast_univariate", response_model=ForecastUnivariateResponse)
|
| 331 |
+
def forecast_univariate(req: ForecastUnivariateRequest):
|
| 332 |
+
"""
|
| 333 |
+
Pronóstico para una serie temporal univariada.
|
| 334 |
+
|
| 335 |
+
Compatible con el Excel Add-in.
|
| 336 |
+
"""
|
| 337 |
+
values = req.series.values
|
| 338 |
+
n = len(values)
|
| 339 |
+
|
| 340 |
+
if n == 0:
|
| 341 |
+
raise HTTPException(status_code=400, detail="La serie no puede estar vacía.")
|
| 342 |
+
|
| 343 |
+
if n < 3:
|
| 344 |
+
raise HTTPException(
|
| 345 |
+
status_code=400,
|
| 346 |
+
detail="La serie debe tener al menos 3 puntos históricos."
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
# Llamar a la API de HuggingFace
|
| 350 |
+
raw_output = call_chronos_inference(values, req.prediction_length)
|
| 351 |
+
|
| 352 |
+
# Procesar la salida
|
| 353 |
+
processed = process_chronos_output(raw_output, req.prediction_length)
|
| 354 |
+
|
| 355 |
+
# Generar timestamps
|
| 356 |
+
timestamps = [f"t+{i+1}" for i in range(req.prediction_length)]
|
| 357 |
+
|
| 358 |
+
return ForecastUnivariateResponse(
|
| 359 |
+
timestamps=timestamps,
|
| 360 |
+
median=processed["median"],
|
| 361 |
+
quantiles=processed["quantiles"]
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
@app.post("/detect_anomalies", response_model=AnomalyDetectionResponse)
|
| 366 |
+
def detect_anomalies(req: AnomalyDetectionRequest):
|
| 367 |
+
"""
|
| 368 |
+
Detecta anomalías comparando valores observados con predicciones.
|
| 369 |
+
"""
|
| 370 |
+
n_hist = len(req.context.values)
|
| 371 |
+
|
| 372 |
+
if n_hist == 0:
|
| 373 |
+
raise HTTPException(status_code=400, detail="El contexto no puede estar vacío.")
|
| 374 |
+
|
| 375 |
+
if len(req.recent_observed) != req.prediction_length:
|
| 376 |
+
raise HTTPException(
|
| 377 |
+
status_code=400,
|
| 378 |
+
detail="recent_observed debe tener la misma longitud que prediction_length."
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
# Hacer predicción
|
| 382 |
+
raw_output = call_chronos_inference(req.context.values, req.prediction_length)
|
| 383 |
+
processed = process_chronos_output(raw_output, req.prediction_length)
|
| 384 |
+
|
| 385 |
+
# Comparar con valores observados
|
| 386 |
+
anomalies: List[AnomalyPoint] = []
|
| 387 |
+
|
| 388 |
+
median = processed["median"]
|
| 389 |
+
# Intentar obtener cuantiles o usar aproximaciones
|
| 390 |
+
q_low = processed["quantiles"].get(str(req.quantile_low), median)
|
| 391 |
+
q_high = processed["quantiles"].get(str(req.quantile_high), median)
|
| 392 |
+
|
| 393 |
+
for i, obs in enumerate(req.recent_observed):
|
| 394 |
+
if i < len(median):
|
| 395 |
+
lower = q_low[i] if i < len(q_low) else median[i] * 0.8
|
| 396 |
+
upper = q_high[i] if i < len(q_high) else median[i] * 1.2
|
| 397 |
+
predicted = median[i]
|
| 398 |
+
is_anom = (obs < lower) or (obs > upper)
|
| 399 |
+
|
| 400 |
+
anomalies.append(
|
| 401 |
+
AnomalyPoint(
|
| 402 |
+
index=i,
|
| 403 |
+
value=obs,
|
| 404 |
+
predicted_median=predicted,
|
| 405 |
+
lower=lower,
|
| 406 |
+
upper=upper,
|
| 407 |
+
is_anomaly=is_anom,
|
| 408 |
+
)
|
| 409 |
+
)
|
| 410 |
+
|
| 411 |
+
return AnomalyDetectionResponse(anomalies=anomalies)
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
@app.post("/backtest_simple", response_model=BacktestResponse)
|
| 415 |
+
def backtest_simple(req: BacktestRequest):
|
| 416 |
+
"""
|
| 417 |
+
Backtesting simple: divide la serie en train/test y evalúa métricas.
|
| 418 |
+
"""
|
| 419 |
+
values = np.array(req.series.values, dtype=float)
|
| 420 |
+
n = len(values)
|
| 421 |
+
|
| 422 |
+
if n <= req.test_length:
|
| 423 |
+
raise HTTPException(
|
| 424 |
+
status_code=400,
|
| 425 |
+
detail="La serie debe ser más larga que test_length."
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
# Dividir en train/test
|
| 429 |
+
train = values[: n - req.test_length].tolist()
|
| 430 |
+
test = values[n - req.test_length :].tolist()
|
| 431 |
+
|
| 432 |
+
# Hacer predicción
|
| 433 |
+
raw_output = call_chronos_inference(train, req.test_length)
|
| 434 |
+
processed = process_chronos_output(raw_output, req.test_length)
|
| 435 |
+
|
| 436 |
+
forecast = np.array(processed["median"], dtype=float)
|
| 437 |
+
test_arr = np.array(test, dtype=float)
|
| 438 |
+
|
| 439 |
+
# Calcular métricas
|
| 440 |
+
mae = float(np.mean(np.abs(test_arr - forecast)))
|
| 441 |
+
rmse = float(np.sqrt(np.mean((test_arr - forecast) ** 2)))
|
| 442 |
+
|
| 443 |
+
eps = 1e-8
|
| 444 |
+
mape = float(np.mean(np.abs((test_arr - forecast) / (test_arr + eps)))) * 100.0
|
| 445 |
+
|
| 446 |
+
timestamps = [f"test_t{i+1}" for i in range(req.test_length)]
|
| 447 |
+
|
| 448 |
+
metrics = BacktestMetrics(mae=mae, mape=mape, rmse=rmse)
|
| 449 |
+
|
| 450 |
+
return BacktestResponse(
|
| 451 |
+
metrics=metrics,
|
| 452 |
+
forecast_median=forecast.tolist(),
|
| 453 |
+
forecast_timestamps=timestamps,
|
| 454 |
+
actuals=test,
|
| 455 |
+
)
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
# =========================
|
| 459 |
+
# Endpoints simplificados para testing
|
| 460 |
+
# =========================
|
| 461 |
+
|
| 462 |
+
@app.post("/simple_forecast")
|
| 463 |
+
def simple_forecast(series: List[float], prediction_length: int = 7):
|
| 464 |
+
"""
|
| 465 |
+
Endpoint simplificado para testing rápido.
|
| 466 |
+
"""
|
| 467 |
+
if not series:
|
| 468 |
+
raise HTTPException(status_code=400, detail="Serie vacía")
|
| 469 |
+
|
| 470 |
+
raw_output = call_chronos_inference(series, prediction_length)
|
| 471 |
+
processed = process_chronos_output(raw_output, prediction_length)
|
| 472 |
+
|
| 473 |
+
return {
|
| 474 |
+
"input_series": series,
|
| 475 |
+
"prediction_length": prediction_length,
|
| 476 |
+
"forecast": processed["median"],
|
| 477 |
+
"model": MODEL_ID
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
# =========================
|
| 482 |
+
# NUEVOS ENDPOINTS IMPLEMENTADOS
|
| 483 |
+
# =========================
|
| 484 |
+
|
| 485 |
+
@app.post("/forecast_multi_id", response_model=ForecastMultiIdResponse)
|
| 486 |
+
def forecast_multi_id(req: ForecastMultiIdRequest):
|
| 487 |
+
"""
|
| 488 |
+
Pronóstico para múltiples series temporales independientes.
|
| 489 |
+
Cada serie se procesa por separado y devuelve su pronóstico.
|
| 490 |
+
|
| 491 |
+
Útil para pronósticos de múltiples productos, ubicaciones, etc.
|
| 492 |
+
"""
|
| 493 |
+
if not req.series_list:
|
| 494 |
+
raise HTTPException(status_code=400, detail="La lista de series no puede estar vacía.")
|
| 495 |
+
|
| 496 |
+
forecasts = []
|
| 497 |
+
|
| 498 |
+
for series_item in req.series_list:
|
| 499 |
+
values = series_item.values
|
| 500 |
+
|
| 501 |
+
if len(values) < 3:
|
| 502 |
+
raise HTTPException(
|
| 503 |
+
status_code=400,
|
| 504 |
+
detail=f"La serie '{series_item.series_id}' debe tener al menos 3 puntos."
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
# Hacer predicción para esta serie
|
| 508 |
+
raw_output = call_chronos_inference(values, req.prediction_length)
|
| 509 |
+
processed = process_chronos_output(raw_output, req.prediction_length)
|
| 510 |
+
|
| 511 |
+
# Generar timestamps
|
| 512 |
+
timestamps = [f"t+{i+1}" for i in range(req.prediction_length)]
|
| 513 |
+
|
| 514 |
+
# Agregar a la lista de resultados
|
| 515 |
+
forecasts.append(
|
| 516 |
+
ForecastUnivariateResponse(
|
| 517 |
+
timestamps=timestamps,
|
| 518 |
+
median=processed["median"],
|
| 519 |
+
quantiles=processed["quantiles"]
|
| 520 |
+
)
|
| 521 |
+
)
|
| 522 |
+
|
| 523 |
+
return ForecastMultiIdResponse(forecasts=forecasts)
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
@app.post("/forecast_with_covariates")
|
| 527 |
+
def forecast_with_covariates(req: ForecastWithCovariatesRequest):
|
| 528 |
+
"""
|
| 529 |
+
Pronóstico con variables covariables (exógenas).
|
| 530 |
+
|
| 531 |
+
NOTA: Chronos-2 es un modelo univariado puro. Esta implementación
|
| 532 |
+
es una aproximación que usa las covariables para ajustar el contexto,
|
| 533 |
+
pero no es un modelo multivariado verdadero.
|
| 534 |
+
|
| 535 |
+
Para pronósticos reales con covariables, considera usar modelos como
|
| 536 |
+
TimesFM, Temporal Fusion Transformer, o Prophet.
|
| 537 |
+
"""
|
| 538 |
+
target_values = req.target_series.values
|
| 539 |
+
|
| 540 |
+
if len(target_values) < 3:
|
| 541 |
+
raise HTTPException(
|
| 542 |
+
status_code=400,
|
| 543 |
+
detail="La serie objetivo debe tener al menos 3 puntos."
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
# Verificar que las covariables tengan la longitud correcta
|
| 547 |
+
for cov in req.covariates_history:
|
| 548 |
+
if len(cov.values) != len(target_values):
|
| 549 |
+
raise HTTPException(
|
| 550 |
+
status_code=400,
|
| 551 |
+
detail=f"La covariable '{cov.name}' debe tener la misma longitud que la serie objetivo."
|
| 552 |
+
)
|
| 553 |
+
|
| 554 |
+
for cov in req.covariates_future:
|
| 555 |
+
if len(cov.values) != req.prediction_length:
|
| 556 |
+
raise HTTPException(
|
| 557 |
+
status_code=400,
|
| 558 |
+
detail=f"La covariable futura '{cov.name}' debe tener longitud = prediction_length."
|
| 559 |
+
)
|
| 560 |
+
|
| 561 |
+
# APROXIMACIÓN: Usar solo la serie objetivo
|
| 562 |
+
# En un modelo verdadero con covariables, estas se integrarían en el modelo
|
| 563 |
+
raw_output = call_chronos_inference(target_values, req.prediction_length)
|
| 564 |
+
processed = process_chronos_output(raw_output, req.prediction_length)
|
| 565 |
+
|
| 566 |
+
# Generar timestamps
|
| 567 |
+
timestamps = [f"t+{i+1}" for i in range(req.prediction_length)]
|
| 568 |
+
|
| 569 |
+
# Nota: Las covariables se devuelven para referencia pero no afectan el forecast
|
| 570 |
+
return {
|
| 571 |
+
"timestamps": timestamps,
|
| 572 |
+
"median": processed["median"],
|
| 573 |
+
"quantiles": processed["quantiles"],
|
| 574 |
+
"note": "Chronos-2 no usa covariables nativamente. Este forecast se basa solo en la serie objetivo.",
|
| 575 |
+
"covariates_used": [cov.name for cov in req.covariates_history],
|
| 576 |
+
"covariates_future": [cov.name for cov in req.covariates_future]
|
| 577 |
+
}
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
@app.post("/generate_scenarios", response_model=GenerateScenariosResponse)
|
| 581 |
+
def generate_scenarios(req: GenerateScenariosRequest):
|
| 582 |
+
"""
|
| 583 |
+
Genera pronósticos para múltiples escenarios "what-if".
|
| 584 |
+
|
| 585 |
+
Cada escenario representa una configuración diferente de covariables futuras.
|
| 586 |
+
Útil para análisis de sensibilidad y planificación.
|
| 587 |
+
|
| 588 |
+
NOTA: Como Chronos-2 no usa covariables, todos los escenarios
|
| 589 |
+
producirán el mismo forecast base. Esta funcionalidad es más útil
|
| 590 |
+
con modelos que soporten covariables.
|
| 591 |
+
"""
|
| 592 |
+
target_values = req.target_series.values
|
| 593 |
+
|
| 594 |
+
if len(target_values) < 3:
|
| 595 |
+
raise HTTPException(
|
| 596 |
+
status_code=400,
|
| 597 |
+
detail="La serie objetivo debe tener al menos 3 puntos."
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
if not req.scenarios:
|
| 601 |
+
raise HTTPException(
|
| 602 |
+
status_code=400,
|
| 603 |
+
detail="Debe proporcionar al menos un escenario."
|
| 604 |
+
)
|
| 605 |
+
|
| 606 |
+
# Hacer una predicción base
|
| 607 |
+
raw_output = call_chronos_inference(target_values, req.prediction_length)
|
| 608 |
+
processed = process_chronos_output(raw_output, req.prediction_length)
|
| 609 |
+
|
| 610 |
+
# Generar timestamps
|
| 611 |
+
timestamps = [f"t+{i+1}" for i in range(req.prediction_length)]
|
| 612 |
+
|
| 613 |
+
scenarios_output = []
|
| 614 |
+
|
| 615 |
+
for scenario in req.scenarios:
|
| 616 |
+
# En un modelo real con covariables, aquí se usarían los valores
|
| 617 |
+
# de scenario.covariate_values para generar diferentes forecasts
|
| 618 |
+
|
| 619 |
+
# Por ahora, todos los escenarios usan el mismo forecast base
|
| 620 |
+
scenarios_output.append(
|
| 621 |
+
ScenarioForecast(
|
| 622 |
+
scenario_name=scenario.scenario_name,
|
| 623 |
+
timestamps=timestamps,
|
| 624 |
+
median=processed["median"],
|
| 625 |
+
quantiles=processed["quantiles"]
|
| 626 |
+
)
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
return GenerateScenariosResponse(scenarios=scenarios_output)
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
@app.post("/forecast_multivariate", response_model=ForecastMultivariateResponse)
|
| 633 |
+
def forecast_multivariate(req: ForecastMultivariateRequest):
|
| 634 |
+
"""
|
| 635 |
+
Pronóstico multivariado: predice múltiples series relacionadas.
|
| 636 |
+
|
| 637 |
+
NOTA: Chronos-2 es fundamentalmente univariado. Esta implementación
|
| 638 |
+
procesa cada serie independientemente. Para pronósticos multivariados
|
| 639 |
+
verdaderos (que capturan correlaciones entre series), usa modelos como
|
| 640 |
+
Temporal Fusion Transformer, DeepAR, o Vector Autoregression (VAR).
|
| 641 |
+
"""
|
| 642 |
+
if not req.series_list:
|
| 643 |
+
raise HTTPException(
|
| 644 |
+
status_code=400,
|
| 645 |
+
detail="La lista de series no puede estar vacía."
|
| 646 |
+
)
|
| 647 |
+
|
| 648 |
+
forecasts = []
|
| 649 |
+
|
| 650 |
+
for series_item in req.series_list:
|
| 651 |
+
values = series_item.values
|
| 652 |
+
|
| 653 |
+
if len(values) < 3:
|
| 654 |
+
raise HTTPException(
|
| 655 |
+
status_code=400,
|
| 656 |
+
detail=f"La serie '{series_item.series_name}' debe tener al menos 3 puntos."
|
| 657 |
+
)
|
| 658 |
+
|
| 659 |
+
# Procesar cada serie independientemente
|
| 660 |
+
raw_output = call_chronos_inference(values, req.prediction_length)
|
| 661 |
+
processed = process_chronos_output(raw_output, req.prediction_length)
|
| 662 |
+
|
| 663 |
+
# Generar timestamps
|
| 664 |
+
timestamps = [f"t+{i+1}" for i in range(req.prediction_length)]
|
| 665 |
+
|
| 666 |
+
forecasts.append(
|
| 667 |
+
MultivariateForecast(
|
| 668 |
+
series_name=series_item.series_name,
|
| 669 |
+
timestamps=timestamps,
|
| 670 |
+
median=processed["median"],
|
| 671 |
+
quantiles=processed["quantiles"]
|
| 672 |
+
)
|
| 673 |
+
)
|
| 674 |
+
|
| 675 |
+
return ForecastMultivariateResponse(forecasts=forecasts)
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
if __name__ == "__main__":
|
| 679 |
+
import uvicorn
|
| 680 |
+
port = int(os.getenv("PORT", 7860))
|
| 681 |
+
uvicorn.run(app, host="0.0.0.0", port=port)
|
app/main_v2.1.1_backup.py
ADDED
|
@@ -0,0 +1,717 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import List, Dict, Optional
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pandas as pd
|
| 6 |
+
from fastapi import FastAPI, HTTPException
|
| 7 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 8 |
+
from fastapi.staticfiles import StaticFiles
|
| 9 |
+
from fastapi.responses import FileResponse
|
| 10 |
+
from pydantic import BaseModel, Field
|
| 11 |
+
|
| 12 |
+
from chronos import Chronos2Pipeline
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# =========================
|
| 16 |
+
# Configuración del modelo
|
| 17 |
+
# =========================
|
| 18 |
+
|
| 19 |
+
MODEL_ID = os.getenv("CHRONOS_MODEL_ID", "amazon/chronos-2")
|
| 20 |
+
DEVICE_MAP = os.getenv("DEVICE_MAP", "cpu") # "cpu" o "cuda"
|
| 21 |
+
|
| 22 |
+
app = FastAPI(
|
| 23 |
+
title="Chronos-2 Universal Forecasting API + Excel Add-in",
|
| 24 |
+
description=(
|
| 25 |
+
"Servidor para pronósticos con Chronos-2: univariante, "
|
| 26 |
+
"multivariante, covariables, escenarios, anomalías y backtesting. "
|
| 27 |
+
"Incluye Excel Add-in v2.1.0 con archivos estáticos."
|
| 28 |
+
),
|
| 29 |
+
version="2.1.0",
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
# Configurar CORS para Excel Add-in
|
| 33 |
+
app.add_middleware(
|
| 34 |
+
CORSMiddleware,
|
| 35 |
+
allow_origins=[
|
| 36 |
+
"https://localhost:3001",
|
| 37 |
+
"https://localhost:3000",
|
| 38 |
+
"https://ttzzs-chronos2-excel-forecasting-api.hf.space",
|
| 39 |
+
"*" # Permitir todos los orígenes para Office Add-ins
|
| 40 |
+
],
|
| 41 |
+
allow_credentials=True,
|
| 42 |
+
allow_methods=["*"],
|
| 43 |
+
allow_headers=["*"],
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
# Carga única del modelo al iniciar el proceso
|
| 47 |
+
pipeline = Chronos2Pipeline.from_pretrained(MODEL_ID, device_map=DEVICE_MAP)
|
| 48 |
+
|
| 49 |
+
# =========================
|
| 50 |
+
# Archivos estáticos para Excel Add-in
|
| 51 |
+
# =========================
|
| 52 |
+
|
| 53 |
+
# Montar directorios estáticos si existen
|
| 54 |
+
if os.path.exists("static"):
|
| 55 |
+
app.mount("/assets", StaticFiles(directory="static/assets"), name="assets")
|
| 56 |
+
app.mount("/taskpane", StaticFiles(directory="static/taskpane"), name="taskpane")
|
| 57 |
+
app.mount("/commands", StaticFiles(directory="static/commands"), name="commands")
|
| 58 |
+
|
| 59 |
+
# Endpoint para manifest.xml
|
| 60 |
+
@app.get("/manifest.xml", response_class=FileResponse)
|
| 61 |
+
async def get_manifest():
|
| 62 |
+
"""Devuelve el manifest.xml del Excel Add-in"""
|
| 63 |
+
return FileResponse("static/manifest.xml", media_type="application/xml")
|
| 64 |
+
|
| 65 |
+
@app.get("/", tags=["Info"])
|
| 66 |
+
async def root_with_addon():
|
| 67 |
+
"""Información del API + Add-in"""
|
| 68 |
+
return {
|
| 69 |
+
"name": "Chronos-2 Forecasting API",
|
| 70 |
+
"version": "2.1.0",
|
| 71 |
+
"model": MODEL_ID,
|
| 72 |
+
"endpoints": {
|
| 73 |
+
"api": [
|
| 74 |
+
"/health",
|
| 75 |
+
"/forecast_univariate",
|
| 76 |
+
"/forecast_multi_id",
|
| 77 |
+
"/forecast_with_covariates",
|
| 78 |
+
"/forecast_multivariate",
|
| 79 |
+
"/forecast_scenarios",
|
| 80 |
+
"/detect_anomalies",
|
| 81 |
+
"/backtest_simple"
|
| 82 |
+
],
|
| 83 |
+
"add_in": [
|
| 84 |
+
"/manifest.xml",
|
| 85 |
+
"/taskpane/taskpane.html",
|
| 86 |
+
"/assets/icon-*.png"
|
| 87 |
+
]
|
| 88 |
+
},
|
| 89 |
+
"docs": "/docs",
|
| 90 |
+
"excel_add_in": {
|
| 91 |
+
"manifest_url": "https://ttzzs-chronos2-excel-forecasting-api.hf.space/manifest.xml",
|
| 92 |
+
"version": "2.1.0",
|
| 93 |
+
"features": [
|
| 94 |
+
"Univariate Forecast",
|
| 95 |
+
"Multi-Series Forecast",
|
| 96 |
+
"Forecast with Covariates",
|
| 97 |
+
"Scenario Analysis",
|
| 98 |
+
"Multivariate Forecast",
|
| 99 |
+
"Anomaly Detection",
|
| 100 |
+
"Backtest"
|
| 101 |
+
]
|
| 102 |
+
}
|
| 103 |
+
}
|
| 104 |
+
else:
|
| 105 |
+
@app.get("/", tags=["Info"])
|
| 106 |
+
async def root_api_only():
|
| 107 |
+
"""Información del API (sin Add-in)"""
|
| 108 |
+
return {
|
| 109 |
+
"name": "Chronos-2 Forecasting API",
|
| 110 |
+
"version": "2.1.0",
|
| 111 |
+
"model": MODEL_ID,
|
| 112 |
+
"docs": "/docs"
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
# =========================
|
| 117 |
+
# Modelos Pydantic comunes
|
| 118 |
+
# =========================
|
| 119 |
+
|
| 120 |
+
class BaseForecastConfig(BaseModel):
|
| 121 |
+
prediction_length: int = Field(
|
| 122 |
+
7, description="Horizonte de predicción (número de pasos futuros)"
|
| 123 |
+
)
|
| 124 |
+
quantile_levels: List[float] = Field(
|
| 125 |
+
default_factory=lambda: [0.1, 0.5, 0.9],
|
| 126 |
+
description="Cuantiles para el pronóstico probabilístico",
|
| 127 |
+
)
|
| 128 |
+
start_timestamp: Optional[str] = Field(
|
| 129 |
+
default=None,
|
| 130 |
+
description=(
|
| 131 |
+
"Fecha/hora inicial del histórico (formato ISO). "
|
| 132 |
+
"Si no se especifica, se usan índices enteros."
|
| 133 |
+
),
|
| 134 |
+
)
|
| 135 |
+
freq: str = Field(
|
| 136 |
+
"D",
|
| 137 |
+
description="Frecuencia temporal (p.ej. 'D' diario, 'H' horario, 'W' semanal...).",
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class UnivariateSeries(BaseModel):
|
| 142 |
+
values: List[float]
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class MultiSeriesItem(BaseModel):
|
| 146 |
+
series_id: str
|
| 147 |
+
values: List[float]
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class CovariatePoint(BaseModel):
|
| 151 |
+
"""
|
| 152 |
+
Punto temporal usado tanto para contexto (histórico) como para covariables futuras.
|
| 153 |
+
"""
|
| 154 |
+
timestamp: Optional[str] = None # opcional si se usan índices enteros
|
| 155 |
+
id: Optional[str] = None # id de serie, por defecto 'series_0'
|
| 156 |
+
target: Optional[float] = None # valor de la variable objetivo (histórico)
|
| 157 |
+
covariates: Dict[str, float] = Field(
|
| 158 |
+
default_factory=dict,
|
| 159 |
+
description="Nombre -> valor de cada covariable dinámica.",
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
# =========================
|
| 164 |
+
# 1) Healthcheck
|
| 165 |
+
# =========================
|
| 166 |
+
|
| 167 |
+
@app.get("/health")
|
| 168 |
+
def health():
|
| 169 |
+
"""
|
| 170 |
+
Devuelve información básica del estado del servidor y el modelo cargado.
|
| 171 |
+
"""
|
| 172 |
+
return {
|
| 173 |
+
"status": "ok",
|
| 174 |
+
"model_id": MODEL_ID,
|
| 175 |
+
"device_map": DEVICE_MAP,
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
# =========================
|
| 180 |
+
# 2) Pronóstico univariante
|
| 181 |
+
# =========================
|
| 182 |
+
|
| 183 |
+
class ForecastUnivariateRequest(BaseForecastConfig):
|
| 184 |
+
series: UnivariateSeries
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
class ForecastUnivariateResponse(BaseModel):
|
| 188 |
+
timestamps: List[str]
|
| 189 |
+
median: List[float]
|
| 190 |
+
quantiles: Dict[str, List[float]] # "0.1" -> [..], "0.9" -> [..]
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
@app.post("/forecast_univariate", response_model=ForecastUnivariateResponse)
|
| 194 |
+
def forecast_univariate(req: ForecastUnivariateRequest):
|
| 195 |
+
"""
|
| 196 |
+
Pronóstico para una sola serie temporal (univariante, sin covariables).
|
| 197 |
+
Pensado para uso directo desde Excel u otras herramientas sencillas.
|
| 198 |
+
"""
|
| 199 |
+
values = req.series.values
|
| 200 |
+
n = len(values)
|
| 201 |
+
if n == 0:
|
| 202 |
+
raise HTTPException(status_code=400, detail="La serie no puede estar vacía.")
|
| 203 |
+
|
| 204 |
+
# Construimos contexto como DataFrame largo (id, timestamp, target)
|
| 205 |
+
if req.start_timestamp:
|
| 206 |
+
timestamps = pd.date_range(
|
| 207 |
+
start=pd.to_datetime(req.start_timestamp),
|
| 208 |
+
periods=n,
|
| 209 |
+
freq=req.freq,
|
| 210 |
+
)
|
| 211 |
+
else:
|
| 212 |
+
timestamps = pd.RangeIndex(start=0, stop=n, step=1)
|
| 213 |
+
|
| 214 |
+
context_df = pd.DataFrame(
|
| 215 |
+
{
|
| 216 |
+
"id": ["series_0"] * n,
|
| 217 |
+
"timestamp": timestamps,
|
| 218 |
+
"target": values,
|
| 219 |
+
}
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
pred_df = pipeline.predict_df(
|
| 223 |
+
context_df,
|
| 224 |
+
prediction_length=req.prediction_length,
|
| 225 |
+
quantile_levels=req.quantile_levels,
|
| 226 |
+
id_column="id",
|
| 227 |
+
timestamp_column="timestamp",
|
| 228 |
+
target="target",
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
pred_df = pred_df.sort_values("timestamp")
|
| 232 |
+
timestamps_out = pred_df["timestamp"].astype(str).tolist()
|
| 233 |
+
median = pred_df["predictions"].astype(float).tolist()
|
| 234 |
+
|
| 235 |
+
quantiles_dict: Dict[str, List[float]] = {}
|
| 236 |
+
for q in req.quantile_levels:
|
| 237 |
+
key = f"{q:.3g}"
|
| 238 |
+
if key in pred_df.columns:
|
| 239 |
+
quantiles_dict[key] = pred_df[key].astype(float).tolist()
|
| 240 |
+
|
| 241 |
+
return ForecastUnivariateResponse(
|
| 242 |
+
timestamps=timestamps_out,
|
| 243 |
+
median=median,
|
| 244 |
+
quantiles=quantiles_dict,
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
# =========================
|
| 249 |
+
# 3) Multi-serie (multi-id)
|
| 250 |
+
# =========================
|
| 251 |
+
|
| 252 |
+
class ForecastMultiSeriesRequest(BaseForecastConfig):
|
| 253 |
+
series_list: List[MultiSeriesItem]
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
class SeriesForecast(BaseModel):
|
| 257 |
+
series_id: str
|
| 258 |
+
timestamps: List[str]
|
| 259 |
+
median: List[float]
|
| 260 |
+
quantiles: Dict[str, List[float]]
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
class ForecastMultiSeriesResponse(BaseModel):
|
| 264 |
+
forecasts: List[SeriesForecast]
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
@app.post("/forecast_multi_id", response_model=ForecastMultiSeriesResponse)
|
| 268 |
+
def forecast_multi_id(req: ForecastMultiSeriesRequest):
|
| 269 |
+
"""
|
| 270 |
+
Pronóstico para múltiples series (por ejemplo, varios SKU o tiendas).
|
| 271 |
+
"""
|
| 272 |
+
if not req.series_list:
|
| 273 |
+
raise HTTPException(status_code=400, detail="Debes enviar al menos una serie.")
|
| 274 |
+
|
| 275 |
+
frames = []
|
| 276 |
+
for item in req.series_list:
|
| 277 |
+
n = len(item.values)
|
| 278 |
+
if n == 0:
|
| 279 |
+
continue
|
| 280 |
+
if req.start_timestamp:
|
| 281 |
+
timestamps = pd.date_range(
|
| 282 |
+
start=pd.to_datetime(req.start_timestamp),
|
| 283 |
+
periods=n,
|
| 284 |
+
freq=req.freq,
|
| 285 |
+
)
|
| 286 |
+
else:
|
| 287 |
+
timestamps = pd.RangeIndex(start=0, stop=n, step=1)
|
| 288 |
+
|
| 289 |
+
frames.append(
|
| 290 |
+
pd.DataFrame(
|
| 291 |
+
{
|
| 292 |
+
"id": [item.series_id] * n,
|
| 293 |
+
"timestamp": timestamps,
|
| 294 |
+
"target": item.values,
|
| 295 |
+
}
|
| 296 |
+
)
|
| 297 |
+
)
|
| 298 |
+
|
| 299 |
+
if not frames:
|
| 300 |
+
raise HTTPException(status_code=400, detail="Todas las series están vacías.")
|
| 301 |
+
|
| 302 |
+
context_df = pd.concat(frames, ignore_index=True)
|
| 303 |
+
|
| 304 |
+
pred_df = pipeline.predict_df(
|
| 305 |
+
context_df,
|
| 306 |
+
prediction_length=req.prediction_length,
|
| 307 |
+
quantile_levels=req.quantile_levels,
|
| 308 |
+
id_column="id",
|
| 309 |
+
timestamp_column="timestamp",
|
| 310 |
+
target="target",
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
forecasts: List[SeriesForecast] = []
|
| 314 |
+
for series_id, group in pred_df.groupby("id"):
|
| 315 |
+
group = group.sort_values("timestamp")
|
| 316 |
+
timestamps_out = group["timestamp"].astype(str).tolist()
|
| 317 |
+
median = group["predictions"].astype(float).tolist()
|
| 318 |
+
quantiles_dict: Dict[str, List[float]] = {}
|
| 319 |
+
for q in req.quantile_levels:
|
| 320 |
+
key = f"{q:.3g}"
|
| 321 |
+
if key in group.columns:
|
| 322 |
+
quantiles_dict[key] = group[key].astype(float).tolist()
|
| 323 |
+
|
| 324 |
+
forecasts.append(
|
| 325 |
+
SeriesForecast(
|
| 326 |
+
series_id=series_id,
|
| 327 |
+
timestamps=timestamps_out,
|
| 328 |
+
median=median,
|
| 329 |
+
quantiles=quantiles_dict,
|
| 330 |
+
)
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
return ForecastMultiSeriesResponse(forecasts=forecasts)
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
# =========================
|
| 337 |
+
# 4) Pronóstico con covariables
|
| 338 |
+
# =========================
|
| 339 |
+
|
| 340 |
+
class ForecastWithCovariatesRequest(BaseForecastConfig):
|
| 341 |
+
context: List[CovariatePoint]
|
| 342 |
+
future: Optional[List[CovariatePoint]] = None
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
class ForecastWithCovariatesResponse(BaseModel):
|
| 346 |
+
# filas con todas las columnas de pred_df serializadas como string
|
| 347 |
+
pred_df: List[Dict[str, str]]
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
@app.post("/forecast_with_covariates", response_model=ForecastWithCovariatesResponse)
|
| 351 |
+
def forecast_with_covariates(req: ForecastWithCovariatesRequest):
|
| 352 |
+
"""
|
| 353 |
+
Pronóstico con información de covariables (promos, precio, clima...) tanto
|
| 354 |
+
en el histórico (context) como en futuros posibles (future).
|
| 355 |
+
"""
|
| 356 |
+
if not req.context:
|
| 357 |
+
raise HTTPException(status_code=400, detail="El contexto no puede estar vacío.")
|
| 358 |
+
|
| 359 |
+
ctx_rows = []
|
| 360 |
+
for p in req.context:
|
| 361 |
+
if p.target is None:
|
| 362 |
+
continue
|
| 363 |
+
row = {
|
| 364 |
+
"id": p.id or "series_0",
|
| 365 |
+
"timestamp": p.timestamp,
|
| 366 |
+
"target": p.target,
|
| 367 |
+
}
|
| 368 |
+
for k, v in p.covariates.items():
|
| 369 |
+
row[k] = v
|
| 370 |
+
ctx_rows.append(row)
|
| 371 |
+
|
| 372 |
+
context_df = pd.DataFrame(ctx_rows)
|
| 373 |
+
if "timestamp" not in context_df or context_df["timestamp"].isna().any():
|
| 374 |
+
context_df["timestamp"] = pd.RangeIndex(start=0, stop=len(context_df), step=1)
|
| 375 |
+
|
| 376 |
+
future_df = None
|
| 377 |
+
if req.future:
|
| 378 |
+
fut_rows = []
|
| 379 |
+
for p in req.future:
|
| 380 |
+
row = {
|
| 381 |
+
"id": p.id or "series_0",
|
| 382 |
+
"timestamp": p.timestamp,
|
| 383 |
+
}
|
| 384 |
+
for k, v in p.covariates.items():
|
| 385 |
+
row[k] = v
|
| 386 |
+
fut_rows.append(row)
|
| 387 |
+
future_df = pd.DataFrame(fut_rows)
|
| 388 |
+
if "timestamp" not in future_df or future_df["timestamp"].isna().any():
|
| 389 |
+
future_df["timestamp"] = pd.RangeIndex(
|
| 390 |
+
start=context_df["timestamp"].max() + 1,
|
| 391 |
+
stop=context_df["timestamp"].max() + 1 + len(future_df),
|
| 392 |
+
step=1,
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
pred_df = pipeline.predict_df(
|
| 396 |
+
context_df,
|
| 397 |
+
future_df=future_df,
|
| 398 |
+
prediction_length=req.prediction_length,
|
| 399 |
+
quantile_levels=req.quantile_levels,
|
| 400 |
+
id_column="id",
|
| 401 |
+
timestamp_column="timestamp",
|
| 402 |
+
target="target",
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
pred_df = pred_df.sort_values(["id", "timestamp"])
|
| 406 |
+
out_records: List[Dict[str, str]] = []
|
| 407 |
+
for _, row in pred_df.iterrows():
|
| 408 |
+
record = {k: str(v) for k, v in row.items()}
|
| 409 |
+
out_records.append(record)
|
| 410 |
+
|
| 411 |
+
return ForecastWithCovariatesResponse(pred_df=out_records)
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
# =========================
|
| 415 |
+
# 5) Multivariante (varios targets)
|
| 416 |
+
# =========================
|
| 417 |
+
|
| 418 |
+
class MultivariateContextPoint(BaseModel):
|
| 419 |
+
timestamp: Optional[str] = None
|
| 420 |
+
id: Optional[str] = None
|
| 421 |
+
targets: Dict[str, float] # p.ej. {"demand": 100, "returns": 5}
|
| 422 |
+
covariates: Dict[str, float] = Field(default_factory=dict)
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
class ForecastMultivariateRequest(BaseForecastConfig):
|
| 426 |
+
context: List[MultivariateContextPoint]
|
| 427 |
+
target_columns: List[str] # nombres de columnas objetivo
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
class ForecastMultivariateResponse(BaseModel):
|
| 431 |
+
pred_df: List[Dict[str, str]]
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
@app.post("/forecast_multivariate", response_model=ForecastMultivariateResponse)
|
| 435 |
+
def forecast_multivariate(req: ForecastMultivariateRequest):
|
| 436 |
+
"""
|
| 437 |
+
Pronóstico multivariante: múltiples columnas objetivo (p.ej. demanda y devoluciones).
|
| 438 |
+
"""
|
| 439 |
+
if not req.context:
|
| 440 |
+
raise HTTPException(status_code=400, detail="El contexto no puede estar vacío.")
|
| 441 |
+
if not req.target_columns:
|
| 442 |
+
raise HTTPException(status_code=400, detail="Debes indicar columnas objetivo.")
|
| 443 |
+
|
| 444 |
+
rows = []
|
| 445 |
+
for p in req.context:
|
| 446 |
+
base = {
|
| 447 |
+
"id": p.id or "series_0",
|
| 448 |
+
"timestamp": p.timestamp,
|
| 449 |
+
}
|
| 450 |
+
for t_name, t_val in p.targets.items():
|
| 451 |
+
base[t_name] = t_val
|
| 452 |
+
for k, v in p.covariates.items():
|
| 453 |
+
base[k] = v
|
| 454 |
+
rows.append(base)
|
| 455 |
+
|
| 456 |
+
context_df = pd.DataFrame(rows)
|
| 457 |
+
if "timestamp" not in context_df or context_df["timestamp"].isna().any():
|
| 458 |
+
context_df["timestamp"] = pd.RangeIndex(start=0, stop=len(context_df), step=1)
|
| 459 |
+
|
| 460 |
+
pred_df = pipeline.predict_df(
|
| 461 |
+
context_df,
|
| 462 |
+
prediction_length=req.prediction_length,
|
| 463 |
+
quantile_levels=req.quantile_levels,
|
| 464 |
+
id_column="id",
|
| 465 |
+
timestamp_column="timestamp",
|
| 466 |
+
target=req.target_columns,
|
| 467 |
+
)
|
| 468 |
+
|
| 469 |
+
pred_df = pred_df.sort_values(["id", "timestamp"])
|
| 470 |
+
out_records = [{k: str(v) for k, v in row.items()} for _, row in pred_df.iterrows()]
|
| 471 |
+
return ForecastMultivariateResponse(pred_df=out_records)
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
# =========================
|
| 475 |
+
# 6) Escenarios (what-if)
|
| 476 |
+
# =========================
|
| 477 |
+
|
| 478 |
+
class ScenarioDefinition(BaseModel):
|
| 479 |
+
name: str
|
| 480 |
+
future_covariates: List[CovariatePoint]
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
class ScenarioForecast(BaseModel):
|
| 484 |
+
name: str
|
| 485 |
+
pred_df: List[Dict[str, str]]
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
class ForecastScenariosRequest(BaseForecastConfig):
|
| 489 |
+
context: List[CovariatePoint]
|
| 490 |
+
scenarios: List[ScenarioDefinition]
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
class ForecastScenariosResponse(BaseModel):
|
| 494 |
+
scenarios: List[ScenarioForecast]
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
@app.post("/forecast_scenarios", response_model=ForecastScenariosResponse)
|
| 498 |
+
def forecast_scenarios(req: ForecastScenariosRequest):
|
| 499 |
+
"""
|
| 500 |
+
Evaluación de múltiples escenarios (what-if) cambiando las covariables futuras
|
| 501 |
+
(por ejemplo, promo ON/OFF, diferentes precios, etc.).
|
| 502 |
+
"""
|
| 503 |
+
if not req.context:
|
| 504 |
+
raise HTTPException(status_code=400, detail="El contexto no puede estar vacío.")
|
| 505 |
+
if not req.scenarios:
|
| 506 |
+
raise HTTPException(status_code=400, detail="Debes definir al menos un escenario.")
|
| 507 |
+
|
| 508 |
+
ctx_rows = []
|
| 509 |
+
for p in req.context:
|
| 510 |
+
if p.target is None:
|
| 511 |
+
continue
|
| 512 |
+
row = {
|
| 513 |
+
"id": p.id or "series_0",
|
| 514 |
+
"timestamp": p.timestamp,
|
| 515 |
+
"target": p.target,
|
| 516 |
+
}
|
| 517 |
+
for k, v in p.covariates.items():
|
| 518 |
+
row[k] = v
|
| 519 |
+
ctx_rows.append(row)
|
| 520 |
+
|
| 521 |
+
context_df = pd.DataFrame(ctx_rows)
|
| 522 |
+
if "timestamp" not in context_df or context_df["timestamp"].isna().any():
|
| 523 |
+
context_df["timestamp"] = pd.RangeIndex(start=0, stop=len(context_df), step=1)
|
| 524 |
+
|
| 525 |
+
results: List[ScenarioForecast] = []
|
| 526 |
+
|
| 527 |
+
for scen in req.scenarios:
|
| 528 |
+
fut_rows = []
|
| 529 |
+
for p in scen.future_covariates:
|
| 530 |
+
row = {
|
| 531 |
+
"id": p.id or "series_0",
|
| 532 |
+
"timestamp": p.timestamp,
|
| 533 |
+
}
|
| 534 |
+
for k, v in p.covariates.items():
|
| 535 |
+
row[k] = v
|
| 536 |
+
fut_rows.append(row)
|
| 537 |
+
future_df = pd.DataFrame(fut_rows)
|
| 538 |
+
if "timestamp" not in future_df or future_df["timestamp"].isna().any():
|
| 539 |
+
future_df["timestamp"] = pd.RangeIndex(
|
| 540 |
+
start=context_df["timestamp"].max() + 1,
|
| 541 |
+
stop=context_df["timestamp"].max() + 1 + len(future_df),
|
| 542 |
+
step=1,
|
| 543 |
+
)
|
| 544 |
+
|
| 545 |
+
pred_df = pipeline.predict_df(
|
| 546 |
+
context_df,
|
| 547 |
+
future_df=future_df,
|
| 548 |
+
prediction_length=req.prediction_length,
|
| 549 |
+
quantile_levels=req.quantile_levels,
|
| 550 |
+
id_column="id",
|
| 551 |
+
timestamp_column="timestamp",
|
| 552 |
+
target="target",
|
| 553 |
+
)
|
| 554 |
+
pred_df = pred_df.sort_values(["id", "timestamp"])
|
| 555 |
+
out_records = [{k: str(v) for k, v in row.items()} for _, row in pred_df.iterrows()]
|
| 556 |
+
|
| 557 |
+
results.append(ScenarioForecast(name=scen.name, pred_df=out_records))
|
| 558 |
+
|
| 559 |
+
return ForecastScenariosResponse(scenarios=results)
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
# =========================
|
| 563 |
+
# 7) Detección de anomalías
|
| 564 |
+
# =========================
|
| 565 |
+
|
| 566 |
+
class AnomalyDetectionRequest(BaseModel):
|
| 567 |
+
context: UnivariateSeries
|
| 568 |
+
recent_observed: List[float]
|
| 569 |
+
prediction_length: int = 7
|
| 570 |
+
quantile_low: float = 0.05
|
| 571 |
+
quantile_high: float = 0.95
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
class AnomalyPoint(BaseModel):
|
| 575 |
+
index: int
|
| 576 |
+
value: float
|
| 577 |
+
predicted_median: float
|
| 578 |
+
lower: float
|
| 579 |
+
upper: float
|
| 580 |
+
is_anomaly: bool
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
class AnomalyDetectionResponse(BaseModel):
|
| 584 |
+
anomalies: List[AnomalyPoint]
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
@app.post("/detect_anomalies", response_model=AnomalyDetectionResponse)
|
| 588 |
+
def detect_anomalies(req: AnomalyDetectionRequest):
|
| 589 |
+
"""
|
| 590 |
+
Marca como anomalías los puntos observados recientes que caen fuera del
|
| 591 |
+
intervalo [quantile_low, quantile_high] del pronóstico.
|
| 592 |
+
"""
|
| 593 |
+
n_hist = len(req.context.values)
|
| 594 |
+
if n_hist == 0:
|
| 595 |
+
raise HTTPException(status_code=400, detail="La serie histórica no puede estar vacía.")
|
| 596 |
+
if len(req.recent_observed) != req.prediction_length:
|
| 597 |
+
raise HTTPException(
|
| 598 |
+
status_code=400,
|
| 599 |
+
detail="recent_observed debe tener la misma longitud que prediction_length.",
|
| 600 |
+
)
|
| 601 |
+
|
| 602 |
+
context_df = pd.DataFrame(
|
| 603 |
+
{
|
| 604 |
+
"id": ["series_0"] * n_hist,
|
| 605 |
+
"timestamp": pd.RangeIndex(start=0, stop=n_hist, step=1),
|
| 606 |
+
"target": req.context.values,
|
| 607 |
+
}
|
| 608 |
+
)
|
| 609 |
+
|
| 610 |
+
quantiles = sorted({req.quantile_low, 0.5, req.quantile_high})
|
| 611 |
+
pred_df = pipeline.predict_df(
|
| 612 |
+
context_df,
|
| 613 |
+
prediction_length=req.prediction_length,
|
| 614 |
+
quantile_levels=quantiles,
|
| 615 |
+
id_column="id",
|
| 616 |
+
timestamp_column="timestamp",
|
| 617 |
+
target="target",
|
| 618 |
+
).sort_values("timestamp")
|
| 619 |
+
|
| 620 |
+
q_low_col = f"{req.quantile_low:.3g}"
|
| 621 |
+
q_high_col = f"{req.quantile_high:.3g}"
|
| 622 |
+
|
| 623 |
+
anomalies: List[AnomalyPoint] = []
|
| 624 |
+
for i, (obs, (_, row)) in enumerate(zip(req.recent_observed, pred_df.iterrows())):
|
| 625 |
+
lower = float(row[q_low_col])
|
| 626 |
+
upper = float(row[q_high_col])
|
| 627 |
+
median = float(row["predictions"])
|
| 628 |
+
is_anom = (obs < lower) or (obs > upper)
|
| 629 |
+
anomalies.append(
|
| 630 |
+
AnomalyPoint(
|
| 631 |
+
index=i,
|
| 632 |
+
value=obs,
|
| 633 |
+
predicted_median=median,
|
| 634 |
+
lower=lower,
|
| 635 |
+
upper=upper,
|
| 636 |
+
is_anomaly=is_anom,
|
| 637 |
+
)
|
| 638 |
+
)
|
| 639 |
+
|
| 640 |
+
return AnomalyDetectionResponse(anomalies=anomalies)
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
# =========================
|
| 644 |
+
# 8) Backtest simple
|
| 645 |
+
# =========================
|
| 646 |
+
|
| 647 |
+
class BacktestRequest(BaseModel):
|
| 648 |
+
series: UnivariateSeries
|
| 649 |
+
prediction_length: int = 7
|
| 650 |
+
test_length: int = 28
|
| 651 |
+
|
| 652 |
+
|
| 653 |
+
class BacktestMetrics(BaseModel):
|
| 654 |
+
mae: float
|
| 655 |
+
mape: float
|
| 656 |
+
wql: float # Weighted Quantile Loss aproximada para el cuantil 0.5
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
class BacktestResponse(BaseModel):
|
| 660 |
+
metrics: BacktestMetrics
|
| 661 |
+
forecast_median: List[float]
|
| 662 |
+
forecast_timestamps: List[str]
|
| 663 |
+
actuals: List[float]
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
@app.post("/backtest_simple", response_model=BacktestResponse)
|
| 667 |
+
def backtest_simple(req: BacktestRequest):
|
| 668 |
+
"""
|
| 669 |
+
Backtest sencillo: separamos un tramo final de la serie como test, pronosticamos
|
| 670 |
+
ese tramo y calculamos métricas MAE / MAPE / WQL.
|
| 671 |
+
"""
|
| 672 |
+
values = np.array(req.series.values, dtype=float)
|
| 673 |
+
n = len(values)
|
| 674 |
+
if n <= req.test_length:
|
| 675 |
+
raise HTTPException(
|
| 676 |
+
status_code=400,
|
| 677 |
+
detail="La serie debe ser más larga que test_length.",
|
| 678 |
+
)
|
| 679 |
+
|
| 680 |
+
train = values[: n - req.test_length]
|
| 681 |
+
test = values[n - req.test_length :]
|
| 682 |
+
|
| 683 |
+
context_df = pd.DataFrame(
|
| 684 |
+
{
|
| 685 |
+
"id": ["series_0"] * len(train),
|
| 686 |
+
"timestamp": pd.RangeIndex(start=0, stop=len(train), step=1),
|
| 687 |
+
"target": train.tolist(),
|
| 688 |
+
}
|
| 689 |
+
)
|
| 690 |
+
|
| 691 |
+
pred_df = pipeline.predict_df(
|
| 692 |
+
context_df,
|
| 693 |
+
prediction_length=req.test_length,
|
| 694 |
+
quantile_levels=[0.5],
|
| 695 |
+
id_column="id",
|
| 696 |
+
timestamp_column="timestamp",
|
| 697 |
+
target="target",
|
| 698 |
+
).sort_values("timestamp")
|
| 699 |
+
|
| 700 |
+
forecast = pred_df["predictions"].to_numpy(dtype=float)
|
| 701 |
+
timestamps = pred_df["timestamp"].astype(str).tolist()
|
| 702 |
+
|
| 703 |
+
mae = float(np.mean(np.abs(test - forecast)))
|
| 704 |
+
eps = 1e-8
|
| 705 |
+
mape = float(np.mean(np.abs((test - forecast) / (test + eps)))) * 100.0
|
| 706 |
+
tau = 0.5
|
| 707 |
+
diff = test - forecast
|
| 708 |
+
wql = float(np.mean(np.maximum(tau * diff, (tau - 1) * diff)))
|
| 709 |
+
|
| 710 |
+
metrics = BacktestMetrics(mae=mae, mape=mape, wql=wql)
|
| 711 |
+
|
| 712 |
+
return BacktestResponse(
|
| 713 |
+
metrics=metrics,
|
| 714 |
+
forecast_median=forecast.tolist(),
|
| 715 |
+
forecast_timestamps=timestamps,
|
| 716 |
+
actuals=test.tolist(),
|
| 717 |
+
)
|
app/main_v3.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Chronos-2 Forecasting API - Clean Architecture Version 3.0
|
| 3 |
+
|
| 4 |
+
Este es el punto de entrada de la aplicación, refactorizado siguiendo
|
| 5 |
+
Clean Architecture y principios SOLID.
|
| 6 |
+
|
| 7 |
+
Características:
|
| 8 |
+
- Arquitectura en capas (Presentation, Application, Domain, Infrastructure)
|
| 9 |
+
- Dependency Injection completa
|
| 10 |
+
- Separación de responsabilidades
|
| 11 |
+
- Código mantenible y testeable
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from fastapi import FastAPI
|
| 15 |
+
from fastapi.staticfiles import StaticFiles
|
| 16 |
+
from fastapi.responses import FileResponse
|
| 17 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 18 |
+
import os
|
| 19 |
+
|
| 20 |
+
from app.infrastructure.config.settings import get_settings
|
| 21 |
+
from app.utils.logger import setup_logger
|
| 22 |
+
|
| 23 |
+
# Import routers
|
| 24 |
+
from app.api.routes import (
|
| 25 |
+
health_router,
|
| 26 |
+
forecast_router,
|
| 27 |
+
anomaly_router,
|
| 28 |
+
backtest_router
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
logger = setup_logger(__name__)
|
| 32 |
+
settings = get_settings()
|
| 33 |
+
|
| 34 |
+
# ============================================================================
|
| 35 |
+
# Create FastAPI App
|
| 36 |
+
# ============================================================================
|
| 37 |
+
|
| 38 |
+
app = FastAPI(
|
| 39 |
+
title=settings.api_title,
|
| 40 |
+
version=settings.api_version,
|
| 41 |
+
description=settings.api_description,
|
| 42 |
+
docs_url="/docs",
|
| 43 |
+
redoc_url="/redoc",
|
| 44 |
+
openapi_url="/openapi.json"
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
# ============================================================================
|
| 48 |
+
# Middleware
|
| 49 |
+
# ============================================================================
|
| 50 |
+
|
| 51 |
+
# CORS Middleware
|
| 52 |
+
app.add_middleware(
|
| 53 |
+
CORSMiddleware,
|
| 54 |
+
allow_origins=settings.cors_origins,
|
| 55 |
+
allow_credentials=True,
|
| 56 |
+
allow_methods=["*"],
|
| 57 |
+
allow_headers=["*"],
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
# ============================================================================
|
| 61 |
+
# API Routes
|
| 62 |
+
# ============================================================================
|
| 63 |
+
|
| 64 |
+
# Health check endpoint (temporal, será movido a routes/health.py)
|
| 65 |
+
@app.get("/health", tags=["Health"])
|
| 66 |
+
async def health_check():
|
| 67 |
+
"""Check if the API is running and model is loaded."""
|
| 68 |
+
from app.api.dependencies import get_forecast_model
|
| 69 |
+
|
| 70 |
+
try:
|
| 71 |
+
model = get_forecast_model()
|
| 72 |
+
model_info = model.get_model_info()
|
| 73 |
+
|
| 74 |
+
return {
|
| 75 |
+
"status": "ok",
|
| 76 |
+
"version": settings.api_version,
|
| 77 |
+
"model": model_info
|
| 78 |
+
}
|
| 79 |
+
except Exception as e:
|
| 80 |
+
logger.error(f"Health check failed: {e}")
|
| 81 |
+
return {
|
| 82 |
+
"status": "error",
|
| 83 |
+
"version": settings.api_version,
|
| 84 |
+
"error": str(e)
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
# Include routers
|
| 89 |
+
app.include_router(health_router)
|
| 90 |
+
app.include_router(forecast_router)
|
| 91 |
+
app.include_router(anomaly_router)
|
| 92 |
+
app.include_router(backtest_router)
|
| 93 |
+
|
| 94 |
+
# ============================================================================
|
| 95 |
+
# Static Files (Excel Add-in)
|
| 96 |
+
# ============================================================================
|
| 97 |
+
|
| 98 |
+
if os.path.exists(settings.static_dir):
|
| 99 |
+
logger.info(f"Mounting static files from: {settings.static_dir}")
|
| 100 |
+
|
| 101 |
+
# Mount subdirectories
|
| 102 |
+
for subdir in ["assets", "taskpane", "commands"]:
|
| 103 |
+
path = os.path.join(settings.static_dir, subdir)
|
| 104 |
+
if os.path.exists(path):
|
| 105 |
+
app.mount(f"/{subdir}", StaticFiles(directory=path), name=subdir)
|
| 106 |
+
logger.info(f"Mounted /{subdir}")
|
| 107 |
+
|
| 108 |
+
# Manifest file
|
| 109 |
+
manifest_path = os.path.join(settings.static_dir, "manifest.xml")
|
| 110 |
+
if os.path.exists(manifest_path):
|
| 111 |
+
@app.get("/manifest.xml")
|
| 112 |
+
async def get_manifest():
|
| 113 |
+
"""Serve Excel Add-in manifest."""
|
| 114 |
+
return FileResponse(manifest_path, media_type="application/xml")
|
| 115 |
+
logger.info("Manifest endpoint registered")
|
| 116 |
+
else:
|
| 117 |
+
logger.warning(f"Static directory not found: {settings.static_dir}")
|
| 118 |
+
|
| 119 |
+
# ============================================================================
|
| 120 |
+
# Startup/Shutdown Events
|
| 121 |
+
# ============================================================================
|
| 122 |
+
|
| 123 |
+
@app.on_event("startup")
|
| 124 |
+
async def startup_event():
|
| 125 |
+
"""Initialize resources on startup."""
|
| 126 |
+
logger.info("=" * 60)
|
| 127 |
+
logger.info(f"🚀 {settings.api_title} v{settings.api_version}")
|
| 128 |
+
logger.info("=" * 60)
|
| 129 |
+
logger.info("Architecture: Clean Architecture (4 layers)")
|
| 130 |
+
logger.info("Principles: SOLID")
|
| 131 |
+
logger.info(f"Model: {settings.model_id}")
|
| 132 |
+
logger.info(f"Device: {settings.device_map}")
|
| 133 |
+
logger.info("=" * 60)
|
| 134 |
+
|
| 135 |
+
# Pre-load model
|
| 136 |
+
try:
|
| 137 |
+
from app.api.dependencies import get_forecast_model
|
| 138 |
+
logger.info("Pre-loading forecast model...")
|
| 139 |
+
model = get_forecast_model()
|
| 140 |
+
logger.info(f"✅ Model loaded: {model.get_model_info()}")
|
| 141 |
+
except Exception as e:
|
| 142 |
+
logger.error(f"❌ Failed to load model: {e}")
|
| 143 |
+
logger.error("API will start but forecasting will fail until model loads")
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
@app.on_event("shutdown")
|
| 147 |
+
async def shutdown_event():
|
| 148 |
+
"""Cleanup resources on shutdown."""
|
| 149 |
+
logger.info("=" * 60)
|
| 150 |
+
logger.info("Shutting down Chronos-2 API...")
|
| 151 |
+
logger.info("=" * 60)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
# ============================================================================
|
| 155 |
+
# Root Endpoint
|
| 156 |
+
# ============================================================================
|
| 157 |
+
|
| 158 |
+
@app.get("/", tags=["Info"])
|
| 159 |
+
async def root():
|
| 160 |
+
"""API information and documentation links."""
|
| 161 |
+
return {
|
| 162 |
+
"name": settings.api_title,
|
| 163 |
+
"version": settings.api_version,
|
| 164 |
+
"description": settings.api_description,
|
| 165 |
+
"docs": "/docs",
|
| 166 |
+
"health": "/health",
|
| 167 |
+
"architecture": "Clean Architecture with SOLID principles",
|
| 168 |
+
"layers": {
|
| 169 |
+
"presentation": "FastAPI (app/api/)",
|
| 170 |
+
"application": "Use Cases (app/application/)",
|
| 171 |
+
"domain": "Business Logic (app/domain/)",
|
| 172 |
+
"infrastructure": "External Services (app/infrastructure/)"
|
| 173 |
+
}
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
if __name__ == "__main__":
|
| 178 |
+
import uvicorn
|
| 179 |
+
|
| 180 |
+
uvicorn.run(
|
| 181 |
+
"app.main_v3:app",
|
| 182 |
+
host="0.0.0.0",
|
| 183 |
+
port=settings.api_port,
|
| 184 |
+
reload=True,
|
| 185 |
+
log_level=settings.log_level.lower()
|
| 186 |
+
)
|
app/main_working_version.py
ADDED
|
@@ -0,0 +1,643 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import List, Dict, Optional
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pandas as pd
|
| 6 |
+
from fastapi import FastAPI, HTTPException
|
| 7 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 8 |
+
from pydantic import BaseModel, Field
|
| 9 |
+
|
| 10 |
+
from chronos import Chronos2Pipeline
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# =========================
|
| 14 |
+
# Configuración del modelo
|
| 15 |
+
# =========================
|
| 16 |
+
|
| 17 |
+
MODEL_ID = os.getenv("CHRONOS_MODEL_ID", "amazon/chronos-2")
|
| 18 |
+
DEVICE_MAP = os.getenv("DEVICE_MAP", "cpu") # "cpu" o "cuda"
|
| 19 |
+
|
| 20 |
+
app = FastAPI(
|
| 21 |
+
title="Chronos-2 Universal Forecasting API",
|
| 22 |
+
description=(
|
| 23 |
+
"Servidor local (Docker) para pronósticos con Chronos-2: univariante, "
|
| 24 |
+
"multivariante, covariables, escenarios, anomalías y backtesting."
|
| 25 |
+
),
|
| 26 |
+
version="1.0.0",
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
# Configurar CORS para Excel Add-in
|
| 30 |
+
app.add_middleware(
|
| 31 |
+
CORSMiddleware,
|
| 32 |
+
allow_origins=["https://localhost:3001", "https://localhost:3000"],
|
| 33 |
+
allow_credentials=True,
|
| 34 |
+
allow_methods=["*"],
|
| 35 |
+
allow_headers=["*"],
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
# Carga única del modelo al iniciar el proceso
|
| 39 |
+
pipeline = Chronos2Pipeline.from_pretrained(MODEL_ID, device_map=DEVICE_MAP)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# =========================
|
| 43 |
+
# Modelos Pydantic comunes
|
| 44 |
+
# =========================
|
| 45 |
+
|
| 46 |
+
class BaseForecastConfig(BaseModel):
|
| 47 |
+
prediction_length: int = Field(
|
| 48 |
+
7, description="Horizonte de predicción (número de pasos futuros)"
|
| 49 |
+
)
|
| 50 |
+
quantile_levels: List[float] = Field(
|
| 51 |
+
default_factory=lambda: [0.1, 0.5, 0.9],
|
| 52 |
+
description="Cuantiles para el pronóstico probabilístico",
|
| 53 |
+
)
|
| 54 |
+
start_timestamp: Optional[str] = Field(
|
| 55 |
+
default=None,
|
| 56 |
+
description=(
|
| 57 |
+
"Fecha/hora inicial del histórico (formato ISO). "
|
| 58 |
+
"Si no se especifica, se usan índices enteros."
|
| 59 |
+
),
|
| 60 |
+
)
|
| 61 |
+
freq: str = Field(
|
| 62 |
+
"D",
|
| 63 |
+
description="Frecuencia temporal (p.ej. 'D' diario, 'H' horario, 'W' semanal...).",
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class UnivariateSeries(BaseModel):
|
| 68 |
+
values: List[float]
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class MultiSeriesItem(BaseModel):
|
| 72 |
+
series_id: str
|
| 73 |
+
values: List[float]
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class CovariatePoint(BaseModel):
|
| 77 |
+
"""
|
| 78 |
+
Punto temporal usado tanto para contexto (histórico) como para covariables futuras.
|
| 79 |
+
"""
|
| 80 |
+
timestamp: Optional[str] = None # opcional si se usan índices enteros
|
| 81 |
+
id: Optional[str] = None # id de serie, por defecto 'series_0'
|
| 82 |
+
target: Optional[float] = None # valor de la variable objetivo (histórico)
|
| 83 |
+
covariates: Dict[str, float] = Field(
|
| 84 |
+
default_factory=dict,
|
| 85 |
+
description="Nombre -> valor de cada covariable dinámica.",
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# =========================
|
| 90 |
+
# 1) Healthcheck
|
| 91 |
+
# =========================
|
| 92 |
+
|
| 93 |
+
@app.get("/health")
|
| 94 |
+
def health():
|
| 95 |
+
"""
|
| 96 |
+
Devuelve información básica del estado del servidor y el modelo cargado.
|
| 97 |
+
"""
|
| 98 |
+
return {
|
| 99 |
+
"status": "ok",
|
| 100 |
+
"model_id": MODEL_ID,
|
| 101 |
+
"device_map": DEVICE_MAP,
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
# =========================
|
| 106 |
+
# 2) Pronóstico univariante
|
| 107 |
+
# =========================
|
| 108 |
+
|
| 109 |
+
class ForecastUnivariateRequest(BaseForecastConfig):
|
| 110 |
+
series: UnivariateSeries
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class ForecastUnivariateResponse(BaseModel):
|
| 114 |
+
timestamps: List[str]
|
| 115 |
+
median: List[float]
|
| 116 |
+
quantiles: Dict[str, List[float]] # "0.1" -> [..], "0.9" -> [..]
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
@app.post("/forecast_univariate", response_model=ForecastUnivariateResponse)
|
| 120 |
+
def forecast_univariate(req: ForecastUnivariateRequest):
|
| 121 |
+
"""
|
| 122 |
+
Pronóstico para una sola serie temporal (univariante, sin covariables).
|
| 123 |
+
Pensado para uso directo desde Excel u otras herramientas sencillas.
|
| 124 |
+
"""
|
| 125 |
+
values = req.series.values
|
| 126 |
+
n = len(values)
|
| 127 |
+
if n == 0:
|
| 128 |
+
raise HTTPException(status_code=400, detail="La serie no puede estar vacía.")
|
| 129 |
+
|
| 130 |
+
# Construimos contexto como DataFrame largo (id, timestamp, target)
|
| 131 |
+
if req.start_timestamp:
|
| 132 |
+
timestamps = pd.date_range(
|
| 133 |
+
start=pd.to_datetime(req.start_timestamp),
|
| 134 |
+
periods=n,
|
| 135 |
+
freq=req.freq,
|
| 136 |
+
)
|
| 137 |
+
else:
|
| 138 |
+
timestamps = pd.RangeIndex(start=0, stop=n, step=1)
|
| 139 |
+
|
| 140 |
+
context_df = pd.DataFrame(
|
| 141 |
+
{
|
| 142 |
+
"id": ["series_0"] * n,
|
| 143 |
+
"timestamp": timestamps,
|
| 144 |
+
"target": values,
|
| 145 |
+
}
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
pred_df = pipeline.predict_df(
|
| 149 |
+
context_df,
|
| 150 |
+
prediction_length=req.prediction_length,
|
| 151 |
+
quantile_levels=req.quantile_levels,
|
| 152 |
+
id_column="id",
|
| 153 |
+
timestamp_column="timestamp",
|
| 154 |
+
target="target",
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
pred_df = pred_df.sort_values("timestamp")
|
| 158 |
+
timestamps_out = pred_df["timestamp"].astype(str).tolist()
|
| 159 |
+
median = pred_df["predictions"].astype(float).tolist()
|
| 160 |
+
|
| 161 |
+
quantiles_dict: Dict[str, List[float]] = {}
|
| 162 |
+
for q in req.quantile_levels:
|
| 163 |
+
key = f"{q:.3g}"
|
| 164 |
+
if key in pred_df.columns:
|
| 165 |
+
quantiles_dict[key] = pred_df[key].astype(float).tolist()
|
| 166 |
+
|
| 167 |
+
return ForecastUnivariateResponse(
|
| 168 |
+
timestamps=timestamps_out,
|
| 169 |
+
median=median,
|
| 170 |
+
quantiles=quantiles_dict,
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
# =========================
|
| 175 |
+
# 3) Multi-serie (multi-id)
|
| 176 |
+
# =========================
|
| 177 |
+
|
| 178 |
+
class ForecastMultiSeriesRequest(BaseForecastConfig):
|
| 179 |
+
series_list: List[MultiSeriesItem]
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class SeriesForecast(BaseModel):
|
| 183 |
+
series_id: str
|
| 184 |
+
timestamps: List[str]
|
| 185 |
+
median: List[float]
|
| 186 |
+
quantiles: Dict[str, List[float]]
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
class ForecastMultiSeriesResponse(BaseModel):
|
| 190 |
+
forecasts: List[SeriesForecast]
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
@app.post("/forecast_multi_id", response_model=ForecastMultiSeriesResponse)
|
| 194 |
+
def forecast_multi_id(req: ForecastMultiSeriesRequest):
|
| 195 |
+
"""
|
| 196 |
+
Pronóstico para múltiples series (por ejemplo, varios SKU o tiendas).
|
| 197 |
+
"""
|
| 198 |
+
if not req.series_list:
|
| 199 |
+
raise HTTPException(status_code=400, detail="Debes enviar al menos una serie.")
|
| 200 |
+
|
| 201 |
+
frames = []
|
| 202 |
+
for item in req.series_list:
|
| 203 |
+
n = len(item.values)
|
| 204 |
+
if n == 0:
|
| 205 |
+
continue
|
| 206 |
+
if req.start_timestamp:
|
| 207 |
+
timestamps = pd.date_range(
|
| 208 |
+
start=pd.to_datetime(req.start_timestamp),
|
| 209 |
+
periods=n,
|
| 210 |
+
freq=req.freq,
|
| 211 |
+
)
|
| 212 |
+
else:
|
| 213 |
+
timestamps = pd.RangeIndex(start=0, stop=n, step=1)
|
| 214 |
+
|
| 215 |
+
frames.append(
|
| 216 |
+
pd.DataFrame(
|
| 217 |
+
{
|
| 218 |
+
"id": [item.series_id] * n,
|
| 219 |
+
"timestamp": timestamps,
|
| 220 |
+
"target": item.values,
|
| 221 |
+
}
|
| 222 |
+
)
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
if not frames:
|
| 226 |
+
raise HTTPException(status_code=400, detail="Todas las series están vacías.")
|
| 227 |
+
|
| 228 |
+
context_df = pd.concat(frames, ignore_index=True)
|
| 229 |
+
|
| 230 |
+
pred_df = pipeline.predict_df(
|
| 231 |
+
context_df,
|
| 232 |
+
prediction_length=req.prediction_length,
|
| 233 |
+
quantile_levels=req.quantile_levels,
|
| 234 |
+
id_column="id",
|
| 235 |
+
timestamp_column="timestamp",
|
| 236 |
+
target="target",
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
forecasts: List[SeriesForecast] = []
|
| 240 |
+
for series_id, group in pred_df.groupby("id"):
|
| 241 |
+
group = group.sort_values("timestamp")
|
| 242 |
+
timestamps_out = group["timestamp"].astype(str).tolist()
|
| 243 |
+
median = group["predictions"].astype(float).tolist()
|
| 244 |
+
quantiles_dict: Dict[str, List[float]] = {}
|
| 245 |
+
for q in req.quantile_levels:
|
| 246 |
+
key = f"{q:.3g}"
|
| 247 |
+
if key in group.columns:
|
| 248 |
+
quantiles_dict[key] = group[key].astype(float).tolist()
|
| 249 |
+
|
| 250 |
+
forecasts.append(
|
| 251 |
+
SeriesForecast(
|
| 252 |
+
series_id=series_id,
|
| 253 |
+
timestamps=timestamps_out,
|
| 254 |
+
median=median,
|
| 255 |
+
quantiles=quantiles_dict,
|
| 256 |
+
)
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
return ForecastMultiSeriesResponse(forecasts=forecasts)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
# =========================
|
| 263 |
+
# 4) Pronóstico con covariables
|
| 264 |
+
# =========================
|
| 265 |
+
|
| 266 |
+
class ForecastWithCovariatesRequest(BaseForecastConfig):
|
| 267 |
+
context: List[CovariatePoint]
|
| 268 |
+
future: Optional[List[CovariatePoint]] = None
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
class ForecastWithCovariatesResponse(BaseModel):
|
| 272 |
+
# filas con todas las columnas de pred_df serializadas como string
|
| 273 |
+
pred_df: List[Dict[str, str]]
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
@app.post("/forecast_with_covariates", response_model=ForecastWithCovariatesResponse)
|
| 277 |
+
def forecast_with_covariates(req: ForecastWithCovariatesRequest):
|
| 278 |
+
"""
|
| 279 |
+
Pronóstico con información de covariables (promos, precio, clima...) tanto
|
| 280 |
+
en el histórico (context) como en futuros posibles (future).
|
| 281 |
+
"""
|
| 282 |
+
if not req.context:
|
| 283 |
+
raise HTTPException(status_code=400, detail="El contexto no puede estar vacío.")
|
| 284 |
+
|
| 285 |
+
ctx_rows = []
|
| 286 |
+
for p in req.context:
|
| 287 |
+
if p.target is None:
|
| 288 |
+
continue
|
| 289 |
+
row = {
|
| 290 |
+
"id": p.id or "series_0",
|
| 291 |
+
"timestamp": p.timestamp,
|
| 292 |
+
"target": p.target,
|
| 293 |
+
}
|
| 294 |
+
for k, v in p.covariates.items():
|
| 295 |
+
row[k] = v
|
| 296 |
+
ctx_rows.append(row)
|
| 297 |
+
|
| 298 |
+
context_df = pd.DataFrame(ctx_rows)
|
| 299 |
+
if "timestamp" not in context_df or context_df["timestamp"].isna().any():
|
| 300 |
+
context_df["timestamp"] = pd.RangeIndex(start=0, stop=len(context_df), step=1)
|
| 301 |
+
|
| 302 |
+
future_df = None
|
| 303 |
+
if req.future:
|
| 304 |
+
fut_rows = []
|
| 305 |
+
for p in req.future:
|
| 306 |
+
row = {
|
| 307 |
+
"id": p.id or "series_0",
|
| 308 |
+
"timestamp": p.timestamp,
|
| 309 |
+
}
|
| 310 |
+
for k, v in p.covariates.items():
|
| 311 |
+
row[k] = v
|
| 312 |
+
fut_rows.append(row)
|
| 313 |
+
future_df = pd.DataFrame(fut_rows)
|
| 314 |
+
if "timestamp" not in future_df or future_df["timestamp"].isna().any():
|
| 315 |
+
future_df["timestamp"] = pd.RangeIndex(
|
| 316 |
+
start=context_df["timestamp"].max() + 1,
|
| 317 |
+
stop=context_df["timestamp"].max() + 1 + len(future_df),
|
| 318 |
+
step=1,
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
pred_df = pipeline.predict_df(
|
| 322 |
+
context_df,
|
| 323 |
+
future_df=future_df,
|
| 324 |
+
prediction_length=req.prediction_length,
|
| 325 |
+
quantile_levels=req.quantile_levels,
|
| 326 |
+
id_column="id",
|
| 327 |
+
timestamp_column="timestamp",
|
| 328 |
+
target="target",
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
pred_df = pred_df.sort_values(["id", "timestamp"])
|
| 332 |
+
out_records: List[Dict[str, str]] = []
|
| 333 |
+
for _, row in pred_df.iterrows():
|
| 334 |
+
record = {k: str(v) for k, v in row.items()}
|
| 335 |
+
out_records.append(record)
|
| 336 |
+
|
| 337 |
+
return ForecastWithCovariatesResponse(pred_df=out_records)
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
# =========================
|
| 341 |
+
# 5) Multivariante (varios targets)
|
| 342 |
+
# =========================
|
| 343 |
+
|
| 344 |
+
class MultivariateContextPoint(BaseModel):
|
| 345 |
+
timestamp: Optional[str] = None
|
| 346 |
+
id: Optional[str] = None
|
| 347 |
+
targets: Dict[str, float] # p.ej. {"demand": 100, "returns": 5}
|
| 348 |
+
covariates: Dict[str, float] = Field(default_factory=dict)
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
class ForecastMultivariateRequest(BaseForecastConfig):
|
| 352 |
+
context: List[MultivariateContextPoint]
|
| 353 |
+
target_columns: List[str] # nombres de columnas objetivo
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
class ForecastMultivariateResponse(BaseModel):
|
| 357 |
+
pred_df: List[Dict[str, str]]
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
@app.post("/forecast_multivariate", response_model=ForecastMultivariateResponse)
|
| 361 |
+
def forecast_multivariate(req: ForecastMultivariateRequest):
|
| 362 |
+
"""
|
| 363 |
+
Pronóstico multivariante: múltiples columnas objetivo (p.ej. demanda y devoluciones).
|
| 364 |
+
"""
|
| 365 |
+
if not req.context:
|
| 366 |
+
raise HTTPException(status_code=400, detail="El contexto no puede estar vacío.")
|
| 367 |
+
if not req.target_columns:
|
| 368 |
+
raise HTTPException(status_code=400, detail="Debes indicar columnas objetivo.")
|
| 369 |
+
|
| 370 |
+
rows = []
|
| 371 |
+
for p in req.context:
|
| 372 |
+
base = {
|
| 373 |
+
"id": p.id or "series_0",
|
| 374 |
+
"timestamp": p.timestamp,
|
| 375 |
+
}
|
| 376 |
+
for t_name, t_val in p.targets.items():
|
| 377 |
+
base[t_name] = t_val
|
| 378 |
+
for k, v in p.covariates.items():
|
| 379 |
+
base[k] = v
|
| 380 |
+
rows.append(base)
|
| 381 |
+
|
| 382 |
+
context_df = pd.DataFrame(rows)
|
| 383 |
+
if "timestamp" not in context_df or context_df["timestamp"].isna().any():
|
| 384 |
+
context_df["timestamp"] = pd.RangeIndex(start=0, stop=len(context_df), step=1)
|
| 385 |
+
|
| 386 |
+
pred_df = pipeline.predict_df(
|
| 387 |
+
context_df,
|
| 388 |
+
prediction_length=req.prediction_length,
|
| 389 |
+
quantile_levels=req.quantile_levels,
|
| 390 |
+
id_column="id",
|
| 391 |
+
timestamp_column="timestamp",
|
| 392 |
+
target=req.target_columns,
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
pred_df = pred_df.sort_values(["id", "timestamp"])
|
| 396 |
+
out_records = [{k: str(v) for k, v in row.items()} for _, row in pred_df.iterrows()]
|
| 397 |
+
return ForecastMultivariateResponse(pred_df=out_records)
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
# =========================
|
| 401 |
+
# 6) Escenarios (what-if)
|
| 402 |
+
# =========================
|
| 403 |
+
|
| 404 |
+
class ScenarioDefinition(BaseModel):
|
| 405 |
+
name: str
|
| 406 |
+
future_covariates: List[CovariatePoint]
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
class ScenarioForecast(BaseModel):
|
| 410 |
+
name: str
|
| 411 |
+
pred_df: List[Dict[str, str]]
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
class ForecastScenariosRequest(BaseForecastConfig):
|
| 415 |
+
context: List[CovariatePoint]
|
| 416 |
+
scenarios: List[ScenarioDefinition]
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
class ForecastScenariosResponse(BaseModel):
|
| 420 |
+
scenarios: List[ScenarioForecast]
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
@app.post("/forecast_scenarios", response_model=ForecastScenariosResponse)
|
| 424 |
+
def forecast_scenarios(req: ForecastScenariosRequest):
|
| 425 |
+
"""
|
| 426 |
+
Evaluación de múltiples escenarios (what-if) cambiando las covariables futuras
|
| 427 |
+
(por ejemplo, promo ON/OFF, diferentes precios, etc.).
|
| 428 |
+
"""
|
| 429 |
+
if not req.context:
|
| 430 |
+
raise HTTPException(status_code=400, detail="El contexto no puede estar vacío.")
|
| 431 |
+
if not req.scenarios:
|
| 432 |
+
raise HTTPException(status_code=400, detail="Debes definir al menos un escenario.")
|
| 433 |
+
|
| 434 |
+
ctx_rows = []
|
| 435 |
+
for p in req.context:
|
| 436 |
+
if p.target is None:
|
| 437 |
+
continue
|
| 438 |
+
row = {
|
| 439 |
+
"id": p.id or "series_0",
|
| 440 |
+
"timestamp": p.timestamp,
|
| 441 |
+
"target": p.target,
|
| 442 |
+
}
|
| 443 |
+
for k, v in p.covariates.items():
|
| 444 |
+
row[k] = v
|
| 445 |
+
ctx_rows.append(row)
|
| 446 |
+
|
| 447 |
+
context_df = pd.DataFrame(ctx_rows)
|
| 448 |
+
if "timestamp" not in context_df or context_df["timestamp"].isna().any():
|
| 449 |
+
context_df["timestamp"] = pd.RangeIndex(start=0, stop=len(context_df), step=1)
|
| 450 |
+
|
| 451 |
+
results: List[ScenarioForecast] = []
|
| 452 |
+
|
| 453 |
+
for scen in req.scenarios:
|
| 454 |
+
fut_rows = []
|
| 455 |
+
for p in scen.future_covariates:
|
| 456 |
+
row = {
|
| 457 |
+
"id": p.id or "series_0",
|
| 458 |
+
"timestamp": p.timestamp,
|
| 459 |
+
}
|
| 460 |
+
for k, v in p.covariates.items():
|
| 461 |
+
row[k] = v
|
| 462 |
+
fut_rows.append(row)
|
| 463 |
+
future_df = pd.DataFrame(fut_rows)
|
| 464 |
+
if "timestamp" not in future_df or future_df["timestamp"].isna().any():
|
| 465 |
+
future_df["timestamp"] = pd.RangeIndex(
|
| 466 |
+
start=context_df["timestamp"].max() + 1,
|
| 467 |
+
stop=context_df["timestamp"].max() + 1 + len(future_df),
|
| 468 |
+
step=1,
|
| 469 |
+
)
|
| 470 |
+
|
| 471 |
+
pred_df = pipeline.predict_df(
|
| 472 |
+
context_df,
|
| 473 |
+
future_df=future_df,
|
| 474 |
+
prediction_length=req.prediction_length,
|
| 475 |
+
quantile_levels=req.quantile_levels,
|
| 476 |
+
id_column="id",
|
| 477 |
+
timestamp_column="timestamp",
|
| 478 |
+
target="target",
|
| 479 |
+
)
|
| 480 |
+
pred_df = pred_df.sort_values(["id", "timestamp"])
|
| 481 |
+
out_records = [{k: str(v) for k, v in row.items()} for _, row in pred_df.iterrows()]
|
| 482 |
+
|
| 483 |
+
results.append(ScenarioForecast(name=scen.name, pred_df=out_records))
|
| 484 |
+
|
| 485 |
+
return ForecastScenariosResponse(scenarios=results)
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
# =========================
|
| 489 |
+
# 7) Detección de anomalías
|
| 490 |
+
# =========================
|
| 491 |
+
|
| 492 |
+
class AnomalyDetectionRequest(BaseModel):
|
| 493 |
+
context: UnivariateSeries
|
| 494 |
+
recent_observed: List[float]
|
| 495 |
+
prediction_length: int = 7
|
| 496 |
+
quantile_low: float = 0.05
|
| 497 |
+
quantile_high: float = 0.95
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
class AnomalyPoint(BaseModel):
|
| 501 |
+
index: int
|
| 502 |
+
value: float
|
| 503 |
+
predicted_median: float
|
| 504 |
+
lower: float
|
| 505 |
+
upper: float
|
| 506 |
+
is_anomaly: bool
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
class AnomalyDetectionResponse(BaseModel):
|
| 510 |
+
anomalies: List[AnomalyPoint]
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
@app.post("/detect_anomalies", response_model=AnomalyDetectionResponse)
|
| 514 |
+
def detect_anomalies(req: AnomalyDetectionRequest):
|
| 515 |
+
"""
|
| 516 |
+
Marca como anomalías los puntos observados recientes que caen fuera del
|
| 517 |
+
intervalo [quantile_low, quantile_high] del pronóstico.
|
| 518 |
+
"""
|
| 519 |
+
n_hist = len(req.context.values)
|
| 520 |
+
if n_hist == 0:
|
| 521 |
+
raise HTTPException(status_code=400, detail="La serie histórica no puede estar vacía.")
|
| 522 |
+
if len(req.recent_observed) != req.prediction_length:
|
| 523 |
+
raise HTTPException(
|
| 524 |
+
status_code=400,
|
| 525 |
+
detail="recent_observed debe tener la misma longitud que prediction_length.",
|
| 526 |
+
)
|
| 527 |
+
|
| 528 |
+
context_df = pd.DataFrame(
|
| 529 |
+
{
|
| 530 |
+
"id": ["series_0"] * n_hist,
|
| 531 |
+
"timestamp": pd.RangeIndex(start=0, stop=n_hist, step=1),
|
| 532 |
+
"target": req.context.values,
|
| 533 |
+
}
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
quantiles = sorted({req.quantile_low, 0.5, req.quantile_high})
|
| 537 |
+
pred_df = pipeline.predict_df(
|
| 538 |
+
context_df,
|
| 539 |
+
prediction_length=req.prediction_length,
|
| 540 |
+
quantile_levels=quantiles,
|
| 541 |
+
id_column="id",
|
| 542 |
+
timestamp_column="timestamp",
|
| 543 |
+
target="target",
|
| 544 |
+
).sort_values("timestamp")
|
| 545 |
+
|
| 546 |
+
q_low_col = f"{req.quantile_low:.3g}"
|
| 547 |
+
q_high_col = f"{req.quantile_high:.3g}"
|
| 548 |
+
|
| 549 |
+
anomalies: List[AnomalyPoint] = []
|
| 550 |
+
for i, (obs, (_, row)) in enumerate(zip(req.recent_observed, pred_df.iterrows())):
|
| 551 |
+
lower = float(row[q_low_col])
|
| 552 |
+
upper = float(row[q_high_col])
|
| 553 |
+
median = float(row["predictions"])
|
| 554 |
+
is_anom = (obs < lower) or (obs > upper)
|
| 555 |
+
anomalies.append(
|
| 556 |
+
AnomalyPoint(
|
| 557 |
+
index=i,
|
| 558 |
+
value=obs,
|
| 559 |
+
predicted_median=median,
|
| 560 |
+
lower=lower,
|
| 561 |
+
upper=upper,
|
| 562 |
+
is_anomaly=is_anom,
|
| 563 |
+
)
|
| 564 |
+
)
|
| 565 |
+
|
| 566 |
+
return AnomalyDetectionResponse(anomalies=anomalies)
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
# =========================
|
| 570 |
+
# 8) Backtest simple
|
| 571 |
+
# =========================
|
| 572 |
+
|
| 573 |
+
class BacktestRequest(BaseModel):
|
| 574 |
+
series: UnivariateSeries
|
| 575 |
+
prediction_length: int = 7
|
| 576 |
+
test_length: int = 28
|
| 577 |
+
|
| 578 |
+
|
| 579 |
+
class BacktestMetrics(BaseModel):
|
| 580 |
+
mae: float
|
| 581 |
+
mape: float
|
| 582 |
+
wql: float # Weighted Quantile Loss aproximada para el cuantil 0.5
|
| 583 |
+
|
| 584 |
+
|
| 585 |
+
class BacktestResponse(BaseModel):
|
| 586 |
+
metrics: BacktestMetrics
|
| 587 |
+
forecast_median: List[float]
|
| 588 |
+
forecast_timestamps: List[str]
|
| 589 |
+
actuals: List[float]
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
@app.post("/backtest_simple", response_model=BacktestResponse)
|
| 593 |
+
def backtest_simple(req: BacktestRequest):
|
| 594 |
+
"""
|
| 595 |
+
Backtest sencillo: separamos un tramo final de la serie como test, pronosticamos
|
| 596 |
+
ese tramo y calculamos métricas MAE / MAPE / WQL.
|
| 597 |
+
"""
|
| 598 |
+
values = np.array(req.series.values, dtype=float)
|
| 599 |
+
n = len(values)
|
| 600 |
+
if n <= req.test_length:
|
| 601 |
+
raise HTTPException(
|
| 602 |
+
status_code=400,
|
| 603 |
+
detail="La serie debe ser más larga que test_length.",
|
| 604 |
+
)
|
| 605 |
+
|
| 606 |
+
train = values[: n - req.test_length]
|
| 607 |
+
test = values[n - req.test_length :]
|
| 608 |
+
|
| 609 |
+
context_df = pd.DataFrame(
|
| 610 |
+
{
|
| 611 |
+
"id": ["series_0"] * len(train),
|
| 612 |
+
"timestamp": pd.RangeIndex(start=0, stop=len(train), step=1),
|
| 613 |
+
"target": train.tolist(),
|
| 614 |
+
}
|
| 615 |
+
)
|
| 616 |
+
|
| 617 |
+
pred_df = pipeline.predict_df(
|
| 618 |
+
context_df,
|
| 619 |
+
prediction_length=req.test_length,
|
| 620 |
+
quantile_levels=[0.5],
|
| 621 |
+
id_column="id",
|
| 622 |
+
timestamp_column="timestamp",
|
| 623 |
+
target="target",
|
| 624 |
+
).sort_values("timestamp")
|
| 625 |
+
|
| 626 |
+
forecast = pred_df["predictions"].to_numpy(dtype=float)
|
| 627 |
+
timestamps = pred_df["timestamp"].astype(str).tolist()
|
| 628 |
+
|
| 629 |
+
mae = float(np.mean(np.abs(test - forecast)))
|
| 630 |
+
eps = 1e-8
|
| 631 |
+
mape = float(np.mean(np.abs((test - forecast) / (test + eps)))) * 100.0
|
| 632 |
+
tau = 0.5
|
| 633 |
+
diff = test - forecast
|
| 634 |
+
wql = float(np.mean(np.maximum(tau * diff, (tau - 1) * diff)))
|
| 635 |
+
|
| 636 |
+
metrics = BacktestMetrics(mae=mae, mape=mape, wql=wql)
|
| 637 |
+
|
| 638 |
+
return BacktestResponse(
|
| 639 |
+
metrics=metrics,
|
| 640 |
+
forecast_median=forecast.tolist(),
|
| 641 |
+
forecast_timestamps=timestamps,
|
| 642 |
+
actuals=test.tolist(),
|
| 643 |
+
)
|
app/schemas/__init__.py
ADDED
|
File without changes
|
app/schemas/requests/__init__.py
ADDED
|
File without changes
|