From 25dd9f432f2e593c79d80d49d092b68d4de93739 Mon Sep 17 00:00:00 2001
From: xumengwen <421269175@qq.com>
Date: Mon, 17 Jun 2024 09:52:43 +0000
Subject: [PATCH] first commit
---
.gitmodules | 12 +
CODE_OF_CONDUCT.md | 9 +
LICENSE | 21 +
README.md | 305 +++
SECURITY.md | 41 +
config/default/autoformer.yaml | 45 +
config/default/csdi.yaml | 45 +
config/default/dlinear.yaml | 32 +
config/default/gru.yaml | 34 +
config/default/gru_maf.yaml | 42 +
config/default/gru_nvp.yaml | 42 +
config/default/itransformer.yaml | 41 +
config/default/linear.yaml | 29 +
config/default/mean.yaml | 28 +
config/default/nhits.yaml | 43 +
config/default/nlinear.yaml | 31 +
config/default/patchtst.yaml | 37 +
config/default/timegrad.yaml | 40 +
config/default/timesnet.yaml | 36 +
config/default/trans_maf.yaml | 46 +
config/default/transformer.yaml | 38 +
config/default/tsdiff.yaml | 44 +
config/ltsf/electricity_ltsf/csdi.yaml | 45 +
config/ltsf/electricity_ltsf/dlinear.yaml | 35 +
config/ltsf/electricity_ltsf/gru_nvp.yaml | 45 +
config/ltsf/electricity_ltsf/patchtst.yaml | 40 +
config/ltsf/electricity_ltsf/timegrad.yaml | 44 +
config/ltsf/electricity_ltsf/timesnet.yaml | 40 +
config/ltsf/etth1/csdi.yaml | 47 +
config/ltsf/etth1/dlinear.yaml | 35 +
config/ltsf/etth1/gru_nvp.yaml | 45 +
config/ltsf/etth1/patchtst.yaml | 39 +
config/ltsf/etth1/timegrad.yaml | 44 +
config/ltsf/etth2/csdi.yaml | 47 +
config/ltsf/etth2/dlinear.yaml | 35 +
config/ltsf/etth2/gru_nvp.yaml | 46 +
config/ltsf/etth2/patchtst.yaml | 40 +
config/ltsf/etth2/timegrad.yaml | 44 +
config/ltsf/ettm1/csdi.yaml | 47 +
config/ltsf/ettm1/dlinear.yaml | 35 +
config/ltsf/ettm1/gru_nvp.yaml | 45 +
config/ltsf/ettm1/patchtst.yaml | 39 +
config/ltsf/ettm1/timegrad.yaml | 44 +
config/ltsf/ettm2/csdi.yaml | 47 +
config/ltsf/ettm2/dlinear.yaml | 35 +
config/ltsf/ettm2/gru_nvp.yaml | 45 +
config/ltsf/ettm2/patchtst.yaml | 39 +
config/ltsf/ettm2/timegrad.yaml | 44 +
config/ltsf/exchange_ltsf/csdi.yaml | 47 +
config/ltsf/exchange_ltsf/dlinear.yaml | 35 +
config/ltsf/exchange_ltsf/gru_nvp.yaml | 45 +
config/ltsf/exchange_ltsf/patchtst.yaml | 39 +
config/ltsf/exchange_ltsf/timegrad.yaml | 44 +
config/ltsf/illness_ltsf/csdi.yaml | 47 +
config/ltsf/illness_ltsf/dlinear.yaml | 35 +
config/ltsf/illness_ltsf/gru_nvp.yaml | 45 +
config/ltsf/illness_ltsf/patchtst.yaml | 40 +
config/ltsf/illness_ltsf/timegrad.yaml | 44 +
config/ltsf/traffic_ltsf/csdi.yaml | 47 +
config/ltsf/traffic_ltsf/dlinear.yaml | 35 +
config/ltsf/traffic_ltsf/gru_nvp.yaml | 45 +
config/ltsf/traffic_ltsf/patchtst.yaml | 39 +
config/ltsf/traffic_ltsf/timegrad.yaml | 44 +
config/ltsf/weather_ltsf/csdi.yaml | 47 +
config/ltsf/weather_ltsf/dlinear.yaml | 35 +
config/ltsf/weather_ltsf/gru_nvp.yaml | 45 +
config/ltsf/weather_ltsf/patchtst.yaml | 39 +
config/ltsf/weather_ltsf/timegrad.yaml | 44 +
config/m4/m4_daily/csdi.yaml | 46 +
config/m4/m4_daily/dlinear.yaml | 34 +
config/m4/m4_daily/gru_nvp.yaml | 44 +
config/m4/m4_daily/patchtst.yaml | 39 +
config/m4/m4_daily/timegrad.yaml | 44 +
config/m4/m4_weekly/csdi.yaml | 46 +
config/m4/m4_weekly/dlinear.yaml | 34 +
config/m4/m4_weekly/gru_nvp.yaml | 44 +
config/m4/m4_weekly/patchtst.yaml | 39 +
config/m4/m4_weekly/timegrad.yaml | 44 +
config/m4/m5/csdi.yaml | 46 +
config/m4/m5/dlinear.yaml | 35 +
config/m4/m5/gru_nvp.yaml | 45 +
config/m4/m5/patchtst.yaml | 40 +
config/m4/m5/timegrad.yaml | 44 +
config/m4/tourism_monthly/csdi.yaml | 46 +
config/m4/tourism_monthly/dlinear.yaml | 34 +
config/m4/tourism_monthly/gru_nvp.yaml | 44 +
config/m4/tourism_monthly/patchtst.yaml | 39 +
config/m4/tourism_monthly/timegrad.yaml | 44 +
config/pipeline_config.yaml | 53 +
config/stsf/electricity/dlinear.yaml | 32 +
config/stsf/electricity/gru.yaml | 34 +
config/stsf/electricity/gru_maf.yaml | 42 +
config/stsf/electricity/gru_nvp.yaml | 42 +
config/stsf/electricity/patchtst.yaml | 37 +
config/stsf/electricity/timegrad.yaml | 41 +
config/stsf/electricity/timesnet.yaml | 36 +
config/stsf/electricity/trans_maf.yaml | 46 +
config/stsf/electricity/transformer.yaml | 38 +
config/stsf/exchange/dlinear.yaml | 32 +
config/stsf/exchange/gru.yaml | 34 +
config/stsf/exchange/gru_maf.yaml | 42 +
config/stsf/exchange/gru_nvp.yaml | 42 +
config/stsf/exchange/patchtst.yaml | 37 +
config/stsf/exchange/timegrad.yaml | 41 +
config/stsf/exchange/timesnet.yaml | 36 +
config/stsf/exchange/trans_maf.yaml | 46 +
config/stsf/exchange/transformer.yaml | 38 +
config/stsf/solar/dlinear.yaml | 32 +
config/stsf/solar/gru.yaml | 34 +
config/stsf/solar/gru_maf.yaml | 42 +
config/stsf/solar/gru_nvp.yaml | 42 +
config/stsf/solar/patchtst.yaml | 37 +
config/stsf/solar/timegrad.yaml | 41 +
config/stsf/solar/timesnet.yaml | 36 +
config/stsf/solar/trans_maf.yaml | 46 +
config/stsf/solar/transformer.yaml | 38 +
config/stsf/traffic/csdi.yaml | 45 +
config/stsf/traffic/dlinear.yaml | 32 +
config/stsf/traffic/gru.yaml | 34 +
config/stsf/traffic/gru_maf.yaml | 42 +
config/stsf/traffic/gru_nvp.yaml | 42 +
config/stsf/traffic/patchtst.yaml | 38 +
config/stsf/traffic/timegrad.yaml | 41 +
config/stsf/traffic/timesnet.yaml | 36 +
config/stsf/traffic/trans_maf.yaml | 46 +
config/stsf/traffic/transformer.yaml | 38 +
config/stsf/wiki/csdi.yaml | 45 +
config/stsf/wiki/dlinear.yaml | 32 +
config/stsf/wiki/gru.yaml | 34 +
config/stsf/wiki/gru_maf.yaml | 42 +
config/stsf/wiki/gru_nvp.yaml | 42 +
config/stsf/wiki/patchtst.yaml | 38 +
config/stsf/wiki/timegrad.yaml | 41 +
config/stsf/wiki/timesnet.yaml | 36 +
config/stsf/wiki/trans_maf.yaml | 46 +
config/stsf/wiki/transformer.yaml | 38 +
config/tsfm/chronos.yaml | 28 +
config/tsfm/forecastpfn.yaml | 29 +
config/tsfm/lag_llama.yaml | 29 +
config/tsfm/moirai.yaml | 31 +
config/tsfm/timer.yaml | 28 +
config/tsfm/timesfm.yaml | 31 +
config/tsfm/tinytimemixer.yaml | 25 +
config/tsfm/units.yaml | 28 +
docs/benchmark/FOUNDATION_MODEL.md | 32 +
docs/benchmark/README.md | 23 +
docs/benchmark/figs/FM_dataset.jpg | Bin 0 -> 268047 bytes
docs/benchmark/figs/FM_summary.jpg | Bin 0 -> 301553 bytes
docs/benchmark/figs/fm_short_term.jpg | Bin 0 -> 262161 bytes
docs/benchmark/figs/fm_var_hor.jpg | Bin 0 -> 1242920 bytes
docs/benchmark/figs/long_bench.jpg | Bin 0 -> 3101238 bytes
docs/benchmark/figs/short_bench.jpg | Bin 0 -> 885625 bytes
docs/documentation/README.md | 215 ++
docs/figs/probts_framework.png | Bin 0 -> 430439 bytes
docs/figs/probts_logo.png | Bin 0 -> 278075 bytes
probts/__init__.py | 3 +
probts/callbacks/__init__.py | 2 +
probts/callbacks/memory_callback.py | 129 ++
probts/callbacks/time_callback.py | 76 +
probts/data/__init__.py | 3 +
probts/data/data_loader.py | 403 ++++
probts/data/data_manager.py | 308 +++
probts/data/data_module.py | 46 +
probts/data/ltsf_datasets.py | 106 +
probts/data/stsf_datasets.py | 193 ++
probts/data/time_features.py | 287 +++
probts/model/__init__.py | 1 +
probts/model/forecast_module.py | 146 ++
probts/model/forecaster/__init__.py | 3 +
probts/model/forecaster/forecaster.py | 169 ++
.../forecaster/point_forecaster/__init__.py | 42 +
.../forecaster/point_forecaster/autoformer.py | 139 ++
.../forecaster/point_forecaster/dlinear.py | 114 +
.../point_forecaster/forecastpfn.py | 167 ++
.../model/forecaster/point_forecaster/gru.py | 72 +
.../point_forecaster/itransformer.py | 112 +
.../forecaster/point_forecaster/linear.py | 56 +
.../model/forecaster/point_forecaster/mean.py | 33 +
.../forecaster/point_forecaster/nhits.py | 468 +++++
.../forecaster/point_forecaster/nlinear.py | 66 +
.../forecaster/point_forecaster/patchtst.py | 127 ++
.../forecaster/point_forecaster/timer.py | 93 +
.../forecaster/point_forecaster/timesfm.py | 85 +
.../forecaster/point_forecaster/timesnet.py | 163 ++
.../point_forecaster/tinytimemixer.py | 48 +
.../point_forecaster/transformer.py | 98 +
.../forecaster/point_forecaster/units.py | 1062 ++++++++++
.../forecaster/prob_forecaster/__init__.py | 40 +
.../forecaster/prob_forecaster/chronos.py | 66 +
.../model/forecaster/prob_forecaster/csdi.py | 200 ++
.../forecaster/prob_forecaster/gru_maf.py | 114 +
.../forecaster/prob_forecaster/gru_nvp.py | 114 +
.../forecaster/prob_forecaster/lag_llama.py | 86 +
.../forecaster/prob_forecaster/moirai.py | 62 +
.../forecaster/prob_forecaster/timegrad.py | 112 +
.../forecaster/prob_forecaster/trans_maf.py | 138 ++
.../forecaster/prob_forecaster/tsdiff.py | 242 +++
probts/model/nn/MAF.py | 197 ++
probts/model/nn/RealNVP.py | 115 ++
probts/model/nn/__init__.py | 3 +
probts/model/nn/flow_model.py | 149 ++
probts/model/nn/gaussian_diffusion.py | 424 ++++
probts/model/nn/layers/AutoCorrelation.py | 159 ++
probts/model/nn/layers/Autoformer_EncDec.py | 173 ++
probts/model/nn/layers/Conv_Blocks.py | 60 +
probts/model/nn/layers/Embed.py | 194 ++
probts/model/nn/layers/Moirai_backbone.py | 513 +++++
probts/model/nn/layers/PatchTST_backbone.py | 387 ++++
probts/model/nn/layers/PatchTST_layers.py | 129 ++
probts/model/nn/layers/RevIN.py | 69 +
.../model/nn/layers/SelfAttention_Family.py | 331 +++
probts/model/nn/layers/Transformer_EncDec.py | 134 ++
probts/model/nn/layers/__init__.py | 5 +
probts/model/nn/layers/diffusion_layers.py | 173 ++
probts/model/nn/layers/s4.py | 1840 +++++++++++++++++
probts/model/nn/layers/s4_backbones.py | 184 ++
probts/utils/__init__.py | 2 +
probts/utils/download_datasets.py | 12 +
probts/utils/evaluator.py | 90 +
probts/utils/masking.py | 26 +
probts/utils/metrics.py | 80 +
probts/utils/utils.py | 228 ++
pyproject.toml | 68 +
run.py | 170 ++
run.sh | 34 +
run_tsfm.sh | 34 +
submodules/lag_llama | 1 +
submodules/timesfm | 1 +
submodules/tsfm | 1 +
submodules/uni2ts | 1 +
230 files changed, 18169 insertions(+)
create mode 100644 .gitmodules
create mode 100644 CODE_OF_CONDUCT.md
create mode 100644 LICENSE
create mode 100644 README.md
create mode 100644 SECURITY.md
create mode 100644 config/default/autoformer.yaml
create mode 100644 config/default/csdi.yaml
create mode 100644 config/default/dlinear.yaml
create mode 100644 config/default/gru.yaml
create mode 100644 config/default/gru_maf.yaml
create mode 100644 config/default/gru_nvp.yaml
create mode 100644 config/default/itransformer.yaml
create mode 100644 config/default/linear.yaml
create mode 100644 config/default/mean.yaml
create mode 100644 config/default/nhits.yaml
create mode 100644 config/default/nlinear.yaml
create mode 100644 config/default/patchtst.yaml
create mode 100644 config/default/timegrad.yaml
create mode 100644 config/default/timesnet.yaml
create mode 100644 config/default/trans_maf.yaml
create mode 100644 config/default/transformer.yaml
create mode 100644 config/default/tsdiff.yaml
create mode 100644 config/ltsf/electricity_ltsf/csdi.yaml
create mode 100644 config/ltsf/electricity_ltsf/dlinear.yaml
create mode 100644 config/ltsf/electricity_ltsf/gru_nvp.yaml
create mode 100644 config/ltsf/electricity_ltsf/patchtst.yaml
create mode 100644 config/ltsf/electricity_ltsf/timegrad.yaml
create mode 100644 config/ltsf/electricity_ltsf/timesnet.yaml
create mode 100644 config/ltsf/etth1/csdi.yaml
create mode 100644 config/ltsf/etth1/dlinear.yaml
create mode 100644 config/ltsf/etth1/gru_nvp.yaml
create mode 100644 config/ltsf/etth1/patchtst.yaml
create mode 100644 config/ltsf/etth1/timegrad.yaml
create mode 100644 config/ltsf/etth2/csdi.yaml
create mode 100644 config/ltsf/etth2/dlinear.yaml
create mode 100644 config/ltsf/etth2/gru_nvp.yaml
create mode 100644 config/ltsf/etth2/patchtst.yaml
create mode 100644 config/ltsf/etth2/timegrad.yaml
create mode 100644 config/ltsf/ettm1/csdi.yaml
create mode 100644 config/ltsf/ettm1/dlinear.yaml
create mode 100644 config/ltsf/ettm1/gru_nvp.yaml
create mode 100644 config/ltsf/ettm1/patchtst.yaml
create mode 100644 config/ltsf/ettm1/timegrad.yaml
create mode 100644 config/ltsf/ettm2/csdi.yaml
create mode 100644 config/ltsf/ettm2/dlinear.yaml
create mode 100644 config/ltsf/ettm2/gru_nvp.yaml
create mode 100644 config/ltsf/ettm2/patchtst.yaml
create mode 100644 config/ltsf/ettm2/timegrad.yaml
create mode 100644 config/ltsf/exchange_ltsf/csdi.yaml
create mode 100644 config/ltsf/exchange_ltsf/dlinear.yaml
create mode 100644 config/ltsf/exchange_ltsf/gru_nvp.yaml
create mode 100644 config/ltsf/exchange_ltsf/patchtst.yaml
create mode 100644 config/ltsf/exchange_ltsf/timegrad.yaml
create mode 100644 config/ltsf/illness_ltsf/csdi.yaml
create mode 100644 config/ltsf/illness_ltsf/dlinear.yaml
create mode 100644 config/ltsf/illness_ltsf/gru_nvp.yaml
create mode 100644 config/ltsf/illness_ltsf/patchtst.yaml
create mode 100644 config/ltsf/illness_ltsf/timegrad.yaml
create mode 100644 config/ltsf/traffic_ltsf/csdi.yaml
create mode 100644 config/ltsf/traffic_ltsf/dlinear.yaml
create mode 100644 config/ltsf/traffic_ltsf/gru_nvp.yaml
create mode 100644 config/ltsf/traffic_ltsf/patchtst.yaml
create mode 100644 config/ltsf/traffic_ltsf/timegrad.yaml
create mode 100644 config/ltsf/weather_ltsf/csdi.yaml
create mode 100644 config/ltsf/weather_ltsf/dlinear.yaml
create mode 100644 config/ltsf/weather_ltsf/gru_nvp.yaml
create mode 100644 config/ltsf/weather_ltsf/patchtst.yaml
create mode 100644 config/ltsf/weather_ltsf/timegrad.yaml
create mode 100644 config/m4/m4_daily/csdi.yaml
create mode 100644 config/m4/m4_daily/dlinear.yaml
create mode 100644 config/m4/m4_daily/gru_nvp.yaml
create mode 100644 config/m4/m4_daily/patchtst.yaml
create mode 100644 config/m4/m4_daily/timegrad.yaml
create mode 100644 config/m4/m4_weekly/csdi.yaml
create mode 100644 config/m4/m4_weekly/dlinear.yaml
create mode 100644 config/m4/m4_weekly/gru_nvp.yaml
create mode 100644 config/m4/m4_weekly/patchtst.yaml
create mode 100644 config/m4/m4_weekly/timegrad.yaml
create mode 100644 config/m4/m5/csdi.yaml
create mode 100644 config/m4/m5/dlinear.yaml
create mode 100644 config/m4/m5/gru_nvp.yaml
create mode 100644 config/m4/m5/patchtst.yaml
create mode 100644 config/m4/m5/timegrad.yaml
create mode 100644 config/m4/tourism_monthly/csdi.yaml
create mode 100644 config/m4/tourism_monthly/dlinear.yaml
create mode 100644 config/m4/tourism_monthly/gru_nvp.yaml
create mode 100644 config/m4/tourism_monthly/patchtst.yaml
create mode 100644 config/m4/tourism_monthly/timegrad.yaml
create mode 100644 config/pipeline_config.yaml
create mode 100644 config/stsf/electricity/dlinear.yaml
create mode 100644 config/stsf/electricity/gru.yaml
create mode 100644 config/stsf/electricity/gru_maf.yaml
create mode 100644 config/stsf/electricity/gru_nvp.yaml
create mode 100644 config/stsf/electricity/patchtst.yaml
create mode 100644 config/stsf/electricity/timegrad.yaml
create mode 100644 config/stsf/electricity/timesnet.yaml
create mode 100644 config/stsf/electricity/trans_maf.yaml
create mode 100644 config/stsf/electricity/transformer.yaml
create mode 100644 config/stsf/exchange/dlinear.yaml
create mode 100644 config/stsf/exchange/gru.yaml
create mode 100644 config/stsf/exchange/gru_maf.yaml
create mode 100644 config/stsf/exchange/gru_nvp.yaml
create mode 100644 config/stsf/exchange/patchtst.yaml
create mode 100644 config/stsf/exchange/timegrad.yaml
create mode 100644 config/stsf/exchange/timesnet.yaml
create mode 100644 config/stsf/exchange/trans_maf.yaml
create mode 100644 config/stsf/exchange/transformer.yaml
create mode 100644 config/stsf/solar/dlinear.yaml
create mode 100644 config/stsf/solar/gru.yaml
create mode 100644 config/stsf/solar/gru_maf.yaml
create mode 100644 config/stsf/solar/gru_nvp.yaml
create mode 100644 config/stsf/solar/patchtst.yaml
create mode 100644 config/stsf/solar/timegrad.yaml
create mode 100644 config/stsf/solar/timesnet.yaml
create mode 100644 config/stsf/solar/trans_maf.yaml
create mode 100644 config/stsf/solar/transformer.yaml
create mode 100644 config/stsf/traffic/csdi.yaml
create mode 100644 config/stsf/traffic/dlinear.yaml
create mode 100644 config/stsf/traffic/gru.yaml
create mode 100644 config/stsf/traffic/gru_maf.yaml
create mode 100644 config/stsf/traffic/gru_nvp.yaml
create mode 100644 config/stsf/traffic/patchtst.yaml
create mode 100644 config/stsf/traffic/timegrad.yaml
create mode 100644 config/stsf/traffic/timesnet.yaml
create mode 100644 config/stsf/traffic/trans_maf.yaml
create mode 100644 config/stsf/traffic/transformer.yaml
create mode 100644 config/stsf/wiki/csdi.yaml
create mode 100644 config/stsf/wiki/dlinear.yaml
create mode 100644 config/stsf/wiki/gru.yaml
create mode 100644 config/stsf/wiki/gru_maf.yaml
create mode 100644 config/stsf/wiki/gru_nvp.yaml
create mode 100644 config/stsf/wiki/patchtst.yaml
create mode 100644 config/stsf/wiki/timegrad.yaml
create mode 100644 config/stsf/wiki/timesnet.yaml
create mode 100644 config/stsf/wiki/trans_maf.yaml
create mode 100644 config/stsf/wiki/transformer.yaml
create mode 100644 config/tsfm/chronos.yaml
create mode 100644 config/tsfm/forecastpfn.yaml
create mode 100644 config/tsfm/lag_llama.yaml
create mode 100644 config/tsfm/moirai.yaml
create mode 100644 config/tsfm/timer.yaml
create mode 100644 config/tsfm/timesfm.yaml
create mode 100644 config/tsfm/tinytimemixer.yaml
create mode 100644 config/tsfm/units.yaml
create mode 100644 docs/benchmark/FOUNDATION_MODEL.md
create mode 100644 docs/benchmark/README.md
create mode 100644 docs/benchmark/figs/FM_dataset.jpg
create mode 100644 docs/benchmark/figs/FM_summary.jpg
create mode 100644 docs/benchmark/figs/fm_short_term.jpg
create mode 100644 docs/benchmark/figs/fm_var_hor.jpg
create mode 100644 docs/benchmark/figs/long_bench.jpg
create mode 100644 docs/benchmark/figs/short_bench.jpg
create mode 100644 docs/documentation/README.md
create mode 100644 docs/figs/probts_framework.png
create mode 100644 docs/figs/probts_logo.png
create mode 100644 probts/__init__.py
create mode 100644 probts/callbacks/__init__.py
create mode 100644 probts/callbacks/memory_callback.py
create mode 100644 probts/callbacks/time_callback.py
create mode 100644 probts/data/__init__.py
create mode 100644 probts/data/data_loader.py
create mode 100644 probts/data/data_manager.py
create mode 100644 probts/data/data_module.py
create mode 100644 probts/data/ltsf_datasets.py
create mode 100644 probts/data/stsf_datasets.py
create mode 100644 probts/data/time_features.py
create mode 100644 probts/model/__init__.py
create mode 100644 probts/model/forecast_module.py
create mode 100644 probts/model/forecaster/__init__.py
create mode 100644 probts/model/forecaster/forecaster.py
create mode 100644 probts/model/forecaster/point_forecaster/__init__.py
create mode 100644 probts/model/forecaster/point_forecaster/autoformer.py
create mode 100644 probts/model/forecaster/point_forecaster/dlinear.py
create mode 100644 probts/model/forecaster/point_forecaster/forecastpfn.py
create mode 100644 probts/model/forecaster/point_forecaster/gru.py
create mode 100644 probts/model/forecaster/point_forecaster/itransformer.py
create mode 100644 probts/model/forecaster/point_forecaster/linear.py
create mode 100644 probts/model/forecaster/point_forecaster/mean.py
create mode 100644 probts/model/forecaster/point_forecaster/nhits.py
create mode 100644 probts/model/forecaster/point_forecaster/nlinear.py
create mode 100644 probts/model/forecaster/point_forecaster/patchtst.py
create mode 100644 probts/model/forecaster/point_forecaster/timer.py
create mode 100644 probts/model/forecaster/point_forecaster/timesfm.py
create mode 100644 probts/model/forecaster/point_forecaster/timesnet.py
create mode 100644 probts/model/forecaster/point_forecaster/tinytimemixer.py
create mode 100644 probts/model/forecaster/point_forecaster/transformer.py
create mode 100644 probts/model/forecaster/point_forecaster/units.py
create mode 100644 probts/model/forecaster/prob_forecaster/__init__.py
create mode 100644 probts/model/forecaster/prob_forecaster/chronos.py
create mode 100644 probts/model/forecaster/prob_forecaster/csdi.py
create mode 100644 probts/model/forecaster/prob_forecaster/gru_maf.py
create mode 100644 probts/model/forecaster/prob_forecaster/gru_nvp.py
create mode 100644 probts/model/forecaster/prob_forecaster/lag_llama.py
create mode 100644 probts/model/forecaster/prob_forecaster/moirai.py
create mode 100644 probts/model/forecaster/prob_forecaster/timegrad.py
create mode 100644 probts/model/forecaster/prob_forecaster/trans_maf.py
create mode 100644 probts/model/forecaster/prob_forecaster/tsdiff.py
create mode 100644 probts/model/nn/MAF.py
create mode 100644 probts/model/nn/RealNVP.py
create mode 100644 probts/model/nn/__init__.py
create mode 100644 probts/model/nn/flow_model.py
create mode 100644 probts/model/nn/gaussian_diffusion.py
create mode 100644 probts/model/nn/layers/AutoCorrelation.py
create mode 100644 probts/model/nn/layers/Autoformer_EncDec.py
create mode 100644 probts/model/nn/layers/Conv_Blocks.py
create mode 100644 probts/model/nn/layers/Embed.py
create mode 100644 probts/model/nn/layers/Moirai_backbone.py
create mode 100644 probts/model/nn/layers/PatchTST_backbone.py
create mode 100644 probts/model/nn/layers/PatchTST_layers.py
create mode 100644 probts/model/nn/layers/RevIN.py
create mode 100644 probts/model/nn/layers/SelfAttention_Family.py
create mode 100644 probts/model/nn/layers/Transformer_EncDec.py
create mode 100644 probts/model/nn/layers/__init__.py
create mode 100644 probts/model/nn/layers/diffusion_layers.py
create mode 100644 probts/model/nn/layers/s4.py
create mode 100644 probts/model/nn/layers/s4_backbones.py
create mode 100644 probts/utils/__init__.py
create mode 100644 probts/utils/download_datasets.py
create mode 100644 probts/utils/evaluator.py
create mode 100644 probts/utils/masking.py
create mode 100644 probts/utils/metrics.py
create mode 100644 probts/utils/utils.py
create mode 100644 pyproject.toml
create mode 100644 run.py
create mode 100644 run.sh
create mode 100644 run_tsfm.sh
create mode 160000 submodules/lag_llama
create mode 160000 submodules/timesfm
create mode 160000 submodules/tsfm
create mode 160000 submodules/uni2ts
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..ad01e60
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,12 @@
+[submodule "submodules/uni2ts"]
+ path = submodules/uni2ts
+ url = https://github.com/SalesforceAIResearch/uni2ts.git
+[submodule "submodules/lag_llama"]
+ path = submodules/lag_llama
+ url = https://github.com/time-series-foundation-models/lag-llama.git
+[submodule "submodules/timesfm"]
+ path = submodules/timesfm
+ url = https://github.com/google-research/timesfm.git
+[submodule "submodules/tsfm"]
+ path = submodules/tsfm
+ url = https://github.com/ibm-granite/granite-tsfm.git
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..f9ba8cf
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,9 @@
+# Microsoft Open Source Code of Conduct
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+
+Resources:
+
+- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
+- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
+- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..9e841e7
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+ MIT License
+
+ Copyright (c) Microsoft Corporation.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..80b173b
--- /dev/null
+++ b/README.md
@@ -0,0 +1,305 @@
+
+
+# ProbTS: Benchmarking Point and Distributional Forecasting across Diverse Prediction Horizons
+
+----------
+[ [Paper](https://arxiv.org/abs/2310.07446) | [Benchmarking](./docs/benchmark/README.md) | [Documentation](./docs/documentation/README.md) ]
+
+A wide range of industrial applications desire precise point and distributional forecasting for diverse prediction horizons. ProbTS serves as a benchmarking tool to aid in understanding how advanced time-series models fulfill these essential forecasting needs. It also sheds light on their advantages and disadvantages in addressing different challenges and unveil the possibilities for future research.
+
+To achieve these objectives, ProbTS provides a unified pipeline that implements [cutting-edge models](#-available-models) from different research threads, including:
+- Long-term point forecasting approaches, such as [PatchTST](https://arxiv.org/abs/2211.14730), [iTransformer](https://arxiv.org/abs/2310.06625), etc.
+- Short-term probabilistic forecasting methods, such as [TimeGrad](https://arxiv.org/abs/2101.12072), [CSDI](https://arxiv.org/abs/2107.03502), etc.
+- Recent time-series foundation models for universal forecasting, such as [TimesFM](https://arxiv.org/abs/2310.10688), [MOIRAI](https://arxiv.org/abs/2402.02592), etc.
+
+Specifically, ProbTS emphasizes the differences in their primary methodological designs, including:
+- Supporting point or distributional forecasts
+- Using autoregressive or non-autoregressive decoding schemes for multi-step outputs
+
+
+
+## Available Models 🧩
+
+ProbTS includes both classical time-series models, specializing in long-term point forecasting or short-term distributional forecasting, and recent time-series foundation models that offer zero-shot and arbitrary-horizon forecasting capabilities for new time series.
+
+### Classical Time-series Models
+
+| **Model** | **Original Eval. Horizon** | **Estimation** | **Decoding Scheme** | **Class Path** |
+| --- | --- | --- | --- | --- |
+| Linear | - | Point | Auto / Non-auto | `probts.model.forecaster.point_forecaster.LinearForecaster` |
+| [GRU](https://arxiv.org/abs/1412.3555) | - | Point | Auto / Non-auto | `probts.model.forecaster.point_forecaster.GRUForecaster` |
+| [Transformer](https://arxiv.org/abs/1706.03762) | - | Point | Auto / Non-auto | `probts.model.forecaster.point_forecaster.TransformerForecaster` |
+| [Autoformer](https://arxiv.org/abs/2106.13008) | Long-trem | Point | Non-auto | `probts.model.forecaster.point_forecaster.Autoformer` |
+| [N-HiTS](https://arxiv.org/abs/2201.12886) | Long-trem | Point | Non-auto | `probts.model.forecaster.point_forecaster.NHiTS` |
+| [NLinear](https://arxiv.org/abs/2205.13504) | Long-trem | Point | Non-auto | `probts.model.forecaster.point_forecaster.NLinear` |
+| [DLinear](https://arxiv.org/abs/2205.13504) | Long-trem | Point | Non-auto | `probts.model.forecaster.point_forecaster.DLinear` |
+| [TimesNet](https://arxiv.org/abs/2210.02186) | Short- / Long-term | Point | Non-auto | `probts.model.forecaster.point_forecaster.TimesNet` |
+| [PatchTST](https://arxiv.org/abs/2211.14730) | Long-trem | Point | Non-auto | `probts.model.forecaster.point_forecaster.PatchTST` |
+| [iTransformer](https://arxiv.org/abs/2310.06625) | Long-trem | Point | Non-auto | `probts.model.forecaster.point_forecaster.iTransformer` |
+| [GRU NVP](https://arxiv.org/abs/2002.06103) | Short-term | Probabilistic | Auto | `probts.model.forecaster.prob_forecaster.GRU_NVP` |
+| [GRU MAF](https://arxiv.org/abs/2002.06103) | Short-term | Probabilistic | Auto | `probts.model.forecaster.prob_forecaster.GRU_MAF` |
+| [Trans MAF](https://arxiv.org/abs/2002.06103) | Short-term | Probabilistic | Auto | `probts.model.forecaster.prob_forecaster.Trans_MAF` |
+| [TimeGrad](https://arxiv.org/abs/2101.12072) | Short-term | Probabilistic | Auto | `probts.model.forecaster.prob_forecaster.TimeGrad` |
+| [CSDI](https://arxiv.org/abs/2107.03502) | Short-term | Probabilistic | Non-auto | `probts.model.forecaster.prob_forecaster.CSDI` |
+| [TSDiff](https://arxiv.org/abs/2307.11494) | Short-term | Probabilistic | Non-auto | `probts.model.forecaster.prob_forecaster.TSDiffCond` |
+
+### Fundation Models
+
+| **Model** | **Any Horizon** | **Estimation** | **Decoding Scheme** | **Class Path** |
+| --- | --- | --- | --- | --- |
+| [Lag-Llama](https://arxiv.org/abs/2310.08278) | ✔ | Probabilistic | Auto | `probts.model.forecaster.prob_forecaster.LagLlama` |
+| [ForecastPFN](https://arxiv.org/abs/2311.01933) | ✔ | Point | Non-auto | `probts.model.forecaster.point_forecaster.ForecastPFN` |
+| [TimesFM](https://arxiv.org/abs/2310.10688) | ✔ | Point | Auto | `probts.model.forecaster.point_forecaster.TimesFM` |
+| [TTM](https://arxiv.org/abs/2401.03955) | ✘ | Point | Non-auto | `probts.model.forecaster.point_forecaster.TinyTimeMixer` |
+| [Timer](https://arxiv.org/abs/2402.02368) | ✔ | Point | Auto | `probts.model.forecaster.point_forecaster.Timer` |
+| [MOIRAI](https://arxiv.org/abs/2402.02592) | ✔ | Probabilistic | Non-auto | `probts.model.forecaster.prob_forecaster.Moirai` |
+| [UniTS](https://arxiv.org/abs/2403.00131) | ✔ | Point | Non-auto | `probts.model.forecaster.point_forecaster.UniTS` |
+| [Chronos](https://arxiv.org/abs/2403.07815) | ✔ | Probabilistic | Auto | `probts.model.forecaster.prob_forecaster.Chronos` |
+
+Stay tuned for more models to be added in the future.
+
+
+## Setup :wrench:
+
+### Environment
+
+ProbTS is developed with Python 3.10 and relies on [PyTorch Lightning](https://github.com/Lightning-AI/lightning). To set up the environment:
+
+```bash
+# Create a new conda environment
+conda create -n probts python=3.10
+conda activate probts
+
+# Install required packages
+pip install .
+pip uninstall -y probts # recommended to uninstall the root package (optional)
+```
+
+[Optional] For time-series foundation models, you need to install basic packages and additional dependencies:
+
+```bash
+# Create a new conda environment
+conda create -n probts_fm python=3.10
+conda activate probts_fm
+
+# Install required packages
+pip install .
+
+# Git submodule
+git submodule update --init --recursive
+
+# Install additional packages for foundation models
+pip install ".[tsfm]"
+pip uninstall -y probts # recommended to uninstall the root package (optional)
+
+# For MOIRAI, we fix the version of the package for better performance
+cd submodules/uni2ts
+git reset --hard fce6a6f57bc3bc1a57c7feb3abc6c7eb2f264301
+```
+
+
+
+Optional for TSFMs reproducibility
+
+```bash
+# For TimesFM, fix the version for reproducibility (optional)
+cd submodules/timesfm
+git reset --hard 5c7b905
+
+# For Lag-Llama, fix the version for reproducibility (optional)
+cd submodules/lag_llama
+git reset --hard 4ad82d9
+
+# For TinyTimeMixer, fix the version for reproducibility (optional)
+cd submodules/tsfm
+git reset --hard bb125c14a05e4231636d6b64f8951d5fe96da1dc
+```
+
+
+
+### Datasets
+
+- **Short-Term Forecasting**: We use datasets from [GluonTS](https://github.com/awslabs/gluonts).
+ Configure the datasets using `--data.data_manager.init_args.dataset {DATASET_NAME}`. You can choose from multivariate or univariate datasets as per your requirement.
+ ```bash
+ # Multivariate Datasets
+ ['exchange_rate_nips', 'electricity_nips', 'traffic_nips', 'solar_nips', 'wiki2000_nips']
+
+ # Univariate Datasets
+ ['tourism_monthly', 'tourism_quarterly', 'tourism_yearly', 'm4_hourly', 'm4_daily', 'm4_weekly', 'm4_monthly', 'm4_quarterly', 'm4_yearly', 'm5']
+ ```
+
+- **Long-Term Forecasting**: To set up the long-term forecasting datasets, please follow these steps:
+ 1. Download long-term forecasting datasets from [HERE](https://drive.google.com/drive/folders/1ZOYpTUa82_jCcxIdTmyr0LXQfvaM9vIy) and place them in `./dataset`.
+ 2. [Opt.] Download CAISO and NordPool datasets from [DEPTS](https://github.com/weifantt/DEPTS/tree/main) and place them in ```./dataset```.
+
+ Configure the datasets using `--data.data_manager.init_args.dataset {DATASET_NAME}` with the following list of available datasets:
+ ```bash
+ # Long-term Forecasting
+ ['etth1', 'etth2','ettm1','ettm2','traffic_ltsf', 'electricity_ltsf', 'exchange_ltsf', 'illness_ltsf', 'weather_ltsf', 'caiso', 'nordpool']
+ ```
+ Note: When utilizing long-term forecasting datasets, you must explicitly specify the `context_length` and `prediction_length` parameters. For example, to set a context length of 96 and a prediction length of 192, use the following command-line arguments:
+ ```bash
+ --data.data_manager.init_args.context_length 96 \
+ --data.data_manager.init_args.prediction_length 192 \
+ ```
+
+
+### Checkpoints for Foundation Models
+
+For full reproducibility, we provide the checkpoints for some foundation models as of the paper completion date. Download the checkpoints from [here](https://drive.google.com/drive/folders/1FaCk9Lj9KZGEO09gehNqC4fbTj4wnN8j?usp=sharing) and place them in the ./checkpoints folder.
+
+
+You can also download the newest checkpoints from the following repositories:
+
+- For `Timer`, download the checkpoints from its [official repository](https://github.com/thuml/Large-Time-Series-Model?tab=readme-ov-file#code-for-fine-tuning) ([Google Drive](https://drive.google.com/drive/folders/15oaiAl4OO5gFqZMJD2lOtX2fxHbpgcU8) or [Tsinghua Cloud](https://cloud.tsinghua.edu.cn/d/235e6bfcf5fa440bb119/)) under the folder `/path/to/checkpoints/timer/`.
+- For `ForecastPFN`, download the checkpoints from its [official repository](https://github.com/abacusai/ForecastPFN#installation-) ([Google Drive](https://drive.google.com/file/d/1acp5thS7I4g_6Gw40wNFGnU1Sx14z0cU/view)) under the folder `/path/to/checkpoints/forecastpfn/`.
+- For `UniTS`, download the checkpoints `units_x128_pretrain_checkpoint.pth` from its [official repository](https://github.com/mims-harvard/UniTS/releases/tag/ckpt) under the folder `/path/to/checkpoints/units/`.
+- For `Lag-Llama`, download the checkpoints `lag-llama.ckpt` from its [huggingface repository](https://huggingface.co/time-series-foundation-models/Lag-Llama/tree/main) under the folder `/path/to/checkpoints/lag_llama/`.
+- For other models, they can be automatically downloaded from huggingface during the first run.
+
+| **Model** | **HuggingFace** |
+| --- | --- |
+| `MOIRAI` | [Link](https://huggingface.co/Salesforce/moirai-1.0-R-small) |
+| `Chronos` | [Link](https://huggingface.co/amazon/chronos-t5-large) |
+| `TinyTimeMixer` | [Link](https://huggingface.co/ibm-granite/granite-timeseries-ttm-v1) |
+| `TimesFM` | [Link](https://huggingface.co/google/timesfm-1.0-200m) |
+
+
+
+## Quick Start :rocket:
+
+Specify `--config` with a specific configuration file to reproduce results of point or probabilistic models on commonly used long- and short-term forecasting datasets. Configuration files are included in the [config](./config/) folder.
+
+To run non-universal models:
+```bash
+bash run.sh
+```
+
+To run foundation models:
+```bash
+bash run_tsfm.sh
+```
+
+### Short-term Forecasting
+
+For short-term forecasting scenarios, datasets and corresponding `context_length` and `prediction_length` are automatically obtained from [GluonTS](https://github.com/awslabs/gluonts). Use the following command:
+
+```bash
+python run.py --config config/path/to/model.yaml \
+ --data.data_manager.init_args.path /path/to/datasets/ \
+ --trainer.default_root_dir /path/to/log_dir/ \
+ --data.data_manager.init_args.dataset {DATASET_NAME}
+```
+See full `DATASET_NAME` list:
+```python
+from gluonts.dataset.repository import dataset_names
+print(dataset_names)
+```
+
+### Long-term Forecasting
+
+For long-term forecasting scenarios, `context_length` and `prediction_length` must be explicitly assigned:
+
+```bash
+python run.py --config config/path/to/model.yaml \
+ --data.data_manager.init_args.path /path/to/datasets/ \
+ --trainer.default_root_dir /path/to/log_dir/ \
+ --data.data_manager.init_args.dataset {DATASET_NAME} \
+ --data.data_manager.init_args.context_length {CTX_LEN} \
+ --data.data_manager.init_args.prediction_length {PRED_LEN}
+```
+
+`DATASET_NAME` options:
+```bash
+['etth1', 'etth2','ettm1','ettm2','traffic_ltsf', 'electricity_ltsf', 'exchange_ltsf', 'illness_ltsf', 'weather_ltsf', 'caiso', 'nordpool']
+```
+
+## Benchmarking :balance_scale:
+
+By utilizing ProbTS, we conduct a systematic comparison between studies that focus on point forecasting and those aimed at distributional estimation, employing various forecasting horizons and evaluation metrics. For more details
+
+- [Short-term & Long-term Forecasting Benchmarking](./docs/benchmark/README.md)
+- [Evaluating Time Series Foundation Models](./docs/benchmark/FOUNDATION_MODEL.md)
+
+
+## Documentation :open_book:
+
+For detailed information on configuration parameters and model customization, please refer to the [documentation](./docs/documentation/README.md).
+
+
+### Key Configuration Parameters
+
+- Adjust model and data parameters in `run.sh`. Key parameters include:
+
+| Config Name | Type | Description |
+| --- | --- | --- |
+| `trainer.max_epochs` | integer | Maximum number of training epochs. |
+| `model.forecaster.class_path` | string | Forecaster module path (e.g., `probts.model.forecaster.point_forecaster.PatchTST`). |
+| `model.forecaster.init_args.{ARG}` | - | Model-specific hyperparameters. |
+| `model.num_samples` | integer | Number of samples per distribution during evaluation. |
+| `model.learning_rate` | float | Learning rate. |
+| `data.data_manager.init_args.dataset` | string | Dataset for training and evaluation. |
+| `data.data_manager.init_args.path` | string | Path to the dataset folder. |
+| `data.data_manager.init_args.scaler` | string | Scaler type: `identity`, `standard` (z-score normalization), or `temporal` (scale based on average temporal absolute value). |
+| `data.data_manager.init_args.context_length` | integer | Length of observation window (required for long-term forecasting). |
+| `data.data_manager.init_args.prediction_length` | integer | Forecasting horizon length (required for long-term forecasting). |
+| `data.data_manager.init_args.var_specific_norm` | boolean | If conduct per-variate normalization or not. |
+| `data.batch_size` | integer | Batch size. |
+
+
+- To print the full pipeline configuration to a file:
+
+ ```bash
+ python run.py --print_config > config/pipeline_config.yaml
+ ```
+
+## Acknowledgement 🌟
+
+Special thanks to the following repositories for their open-sourced code bases and datasets.
+
+### Tools/Packages
+
+- [GluonTS](https://github.com/awslabs/gluonts)
+- [PyTorch-TS](https://github.com/zalandoresearch/pytorch-ts)
+- [TSLib](https://github.com/libts/tslib)
+- [NeuralForecast](https://github.com/Nixtla/neuralforecast)
+
+### Official Implementations
+
+**Classical Time-series Models**
+
+- [Autoformer](https://github.com/thuml/Autoformer)
+- [N-HiTS](https://github.com/cchallu/n-hits)
+- [NLinear, DLinear](https://github.com/cure-lab/LTSF-Linear)
+- [TimesNet](https://github.com/thuml/Time-Series-Library)
+- [RevIN](https://github.com/ts-kim/RevIN)
+- [PatchTST](https://github.com/yuqinie98/PatchTST)
+- [iTransformer](https://github.com/thuml/iTransformer)
+- [GRU NVP, GRU MAF, Trans MAF, TimeGrad](https://github.com/zalandoresearch/pytorch-ts/tree/master)
+- [CSDI](https://github.com/ermongroup/CSDI)
+- [TSDiff](https://github.com/amazon-science/unconditional-time-series-diffusion)
+
+
+**Time-series Foundation Models**
+
+- [MOIRAI](https://github.com/SalesforceAIResearch/uni2ts)
+- [Chronos](https://github.com/amazon-science/chronos-forecasting)
+- [Lag-Llama](https://github.com/time-series-foundation-models/lag-llama)
+- [TimesFM](https://github.com/google-research/timesfm)
+- [Timer](https://github.com/thuml/Large-Time-Series-Model)
+- [UniTS](https://github.com/mims-harvard/UniTS)
+- [ForecastPFN](https://github.com/abacusai/ForecastPFN)
+- [TTM](https://github.com/ibm-granite/granite-tsfm)
+
+## Citing ProbTS 🌟
+
+If you have used ProbTS for research or production, please cite it as follows.
+```tex
+@article{zhang2023probts,
+ title={{ProbTS}: Benchmarking Point and Distributional Forecasting across Diverse Prediction Horizons},
+ author={Zhang, Jiawen and Wen, Xumeng and Zhang, Zhenwei and Zheng, Shun and Li, Jia and Bian, Jiang},
+ journal={arXiv preprint arXiv:2310.07446},
+ year={2023}
+}
+```
\ No newline at end of file
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000..b3c89ef
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,41 @@
+
+
+## Security
+
+Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin).
+
+If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below.
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report).
+
+If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp).
+
+You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
+
+Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
+
+ * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
+ * Full paths of source file(s) related to the manifestation of the issue
+ * The location of the affected source code (tag/branch/commit or direct URL)
+ * Any special configuration required to reproduce the issue
+ * Step-by-step instructions to reproduce the issue
+ * Proof-of-concept or exploit code (if possible)
+ * Impact of the issue, including how an attacker might exploit the issue
+
+This information will help us triage your report more quickly.
+
+If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs.
+
+## Preferred Languages
+
+We prefer all communications to be in English.
+
+## Policy
+
+Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd).
+
+
diff --git a/config/default/autoformer.yaml b/config/default/autoformer.yaml
new file mode 100644
index 0000000..25428ea
--- /dev/null
+++ b/config/default/autoformer.yaml
@@ -0,0 +1,45 @@
+# lightning==2.3.0.dev0
+seed_everything: 0
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+ # num_sanity_val_steps: 0
+ # gradient_clip_algorithm: 'norm'
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.Autoformer
+ init_args:
+ moving_avg: 25
+ factor: 1
+ n_heads: 8
+ activation: 'gelu'
+ e_layers: 2
+ d_layers: 1
+ output_attention: false
+ d_ff: 512
+ f_hidden_size: 512
+ embed: 'timeF'
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ num_samples: 1
+ learning_rate: 1e-3
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # none, standard, scaling
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/default/csdi.yaml b/config/default/csdi.yaml
new file mode 100644
index 0000000..9c27587
--- /dev/null
+++ b/config/default/csdi.yaml
@@ -0,0 +1,45 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 1
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 128
+ emb_feature_dim: 16
+ channels: 64
+ n_layers: 4
+ num_heads: 8
+ num_steps: 50
+ diffusion_embedding_dim: 128
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 64
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 4
+ test_batch_size: 4
+ num_workers: 8
diff --git a/config/default/dlinear.yaml b/config/default/dlinear.yaml
new file mode 100644
index 0000000..3bd35fe
--- /dev/null
+++ b/config/default/dlinear.yaml
@@ -0,0 +1,32 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: false
+ kernel_size: 3
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ learning_rate: 0.01
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/default/gru.yaml b/config/default/gru.yaml
new file mode 100644
index 0000000..d1cf880
--- /dev/null
+++ b/config/default/gru.yaml
@@ -0,0 +1,34 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.GRUForecaster
+ init_args:
+ f_hidden_size: 40
+ num_layers: 2
+ dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/default/gru_maf.yaml b/config/default/gru_maf.yaml
new file mode 100644
index 0000000..53fee8f
--- /dev/null
+++ b/config/default/gru_maf.yaml
@@ -0,0 +1,42 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 1
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_MAF
+ init_args:
+ enc_num_layers: 2
+ enc_hidden_size: 40
+ enc_dropout: 0.1
+ n_blocks: 4
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: false
+ conditional_length: 200
+ dequantize: true
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ scaler: identity # identity, standard, temporal
+ split_val: true
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/default/gru_nvp.yaml b/config/default/gru_nvp.yaml
new file mode 100644
index 0000000..75860b5
--- /dev/null
+++ b/config/default/gru_nvp.yaml
@@ -0,0 +1,42 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 7
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 40
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ n_blocks: 4
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 200
+ dequantize: true
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/default/itransformer.yaml b/config/default/itransformer.yaml
new file mode 100644
index 0000000..0e2bb33
--- /dev/null
+++ b/config/default/itransformer.yaml
@@ -0,0 +1,41 @@
+# lightning==2.3.0.dev0
+seed_everything: 0
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.iTransformer
+ init_args:
+ factor: 1
+ n_heads: 8
+ activation: 'gelu'
+ e_layers: 2
+ output_attention: false
+ f_hidden_size: 256
+ d_ff: 256
+ label_len: 48
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 1
+ learning_rate: 1e-4
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # none, standard, scaling
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/default/linear.yaml b/config/default/linear.yaml
new file mode 100644
index 0000000..af0c832
--- /dev/null
+++ b/config/default/linear.yaml
@@ -0,0 +1,29 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 30
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.LinearForecaster
+ init_args:
+ individual: false
+ use_lags: true
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/default/mean.yaml b/config/default/mean.yaml
new file mode 100644
index 0000000..e114886
--- /dev/null
+++ b/config/default/mean.yaml
@@ -0,0 +1,28 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 40
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.MeanForecaster
+ init_args:
+ mode: global
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/default/nhits.yaml b/config/default/nhits.yaml
new file mode 100644
index 0000000..e6a16ba
--- /dev/null
+++ b/config/default/nhits.yaml
@@ -0,0 +1,43 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.NHiTS
+ init_args:
+ n_blocks: [1,1,1]
+ hidden_size: 512
+ pooling_mode: 'max'
+ interpolation_mode: 'linear'
+ activation: 'ReLU'
+ initialization: 'lecun_normal'
+ batch_normalization: false
+ shared_weights: false
+ naive_level:
+ dropout: 0
+ n_layers: 2
+ use_lags: false
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/default/nlinear.yaml b/config/default/nlinear.yaml
new file mode 100644
index 0000000..b9edac8
--- /dev/null
+++ b/config/default/nlinear.yaml
@@ -0,0 +1,31 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.NLinear
+ init_args:
+ individual: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.01
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/default/patchtst.yaml b/config/default/patchtst.yaml
new file mode 100644
index 0000000..f773b9d
--- /dev/null
+++ b/config/default/patchtst.yaml
@@ -0,0 +1,37 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 3
+ patch_len: 6
+ dropout: 0.1
+ f_hidden_size: 32
+ n_layers: 3
+ n_heads: 8
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: true
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_rate_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/default/timegrad.yaml b/config/default/timegrad.yaml
new file mode 100644
index 0000000..65c1085
--- /dev/null
+++ b/config/default/timegrad.yaml
@@ -0,0 +1,40 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 100
+ enc_hidden_size: 128
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ scaler: identity # identity, standard, temporal
+ split_val: true
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/default/timesnet.yaml b/config/default/timesnet.yaml
new file mode 100644
index 0000000..745a424
--- /dev/null
+++ b/config/default/timesnet.yaml
@@ -0,0 +1,36 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TimesNet
+ init_args:
+ n_layers: 2
+ num_kernels: 6
+ top_k: 5
+ d_ff: 32
+ dropout: 0.1
+ f_hidden_size: 40
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/default/trans_maf.yaml b/config/default/trans_maf.yaml
new file mode 100644
index 0000000..1641632
--- /dev/null
+++ b/config/default/trans_maf.yaml
@@ -0,0 +1,46 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.Trans_MAF
+ init_args:
+ enc_hidden_size: 32
+ enc_num_heads: 8
+ enc_num_encoder_layers: 2
+ enc_num_decoder_layers: 2
+ enc_dim_feedforward_scale: 4
+ enc_dropout: 0.1
+ enc_activation: gelu
+ n_blocks: 4
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: false
+ conditional_length: 200
+ dequantize: true
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ scaler: identity # identity, standard, temporal
+ split_val: true
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/default/transformer.yaml b/config/default/transformer.yaml
new file mode 100644
index 0000000..00d1fef
--- /dev/null
+++ b/config/default/transformer.yaml
@@ -0,0 +1,38 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TransformerForecaster
+ init_args:
+ f_hidden_size: 16
+ num_heads: 4
+ num_encoder_layers: 3
+ num_decoder_layers: 3
+ dim_feedforward_scale: 4
+ dropout: 0.1
+ activation: gelu
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/default/tsdiff.yaml b/config/default/tsdiff.yaml
new file mode 100644
index 0000000..8e8c77d
--- /dev/null
+++ b/config/default/tsdiff.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ check_val_every_n_epoch: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+ gradient_clip_val: 0.5
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TSDiffCond
+ init_args:
+ timesteps: 100
+ hidden_dim: 64
+ step_emb: 128
+ num_residual_blocks: 3
+ dropout: 0.0
+ mode: diag # diag, nplr
+ measure: diag # 'diag', 'diag-lin', 'diag-inv', or 'diag-legs' for diag
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ use_scaling: false
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: temporal # identity, standard, temporal
+ context_length: 336
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
diff --git a/config/ltsf/electricity_ltsf/csdi.yaml b/config/ltsf/electricity_ltsf/csdi.yaml
new file mode 100644
index 0000000..faa5660
--- /dev/null
+++ b/config/ltsf/electricity_ltsf/csdi.yaml
@@ -0,0 +1,45 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 1
+ check_val_every_n_epoch: 3
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 64
+ emb_feature_dim: 8
+ channels: 64
+ n_layers: 4
+ num_heads: 8
+ num_steps: 50
+ diffusion_embedding_dim: 64
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 16
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_ltsf
+ scaler: standard # identity, standard, temporal
+ split_val: true
+ batch_size: 4
+ test_batch_size: 8
+ num_workers: 8
diff --git a/config/ltsf/electricity_ltsf/dlinear.yaml b/config/ltsf/electricity_ltsf/dlinear.yaml
new file mode 100644
index 0000000..955ea0c
--- /dev/null
+++ b/config/ltsf/electricity_ltsf/dlinear.yaml
@@ -0,0 +1,35 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 200
+ log_every_n_steps: 1
+ accumulate_grad_batches: 2
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinearEncoder
+ init_args:
+ individual: true
+ kernel_size: 25
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_ltsf
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/electricity_ltsf/gru_nvp.yaml b/config/ltsf/electricity_ltsf/gru_nvp.yaml
new file mode 100644
index 0000000..cd53158
--- /dev/null
+++ b/config/ltsf/electricity_ltsf/gru_nvp.yaml
@@ -0,0 +1,45 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 400
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 128
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ n_blocks: 2
+ hidden_size: 64
+ n_hidden: 2
+ batch_norm: false
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_ltsf
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/electricity_ltsf/patchtst.yaml b/config/ltsf/electricity_ltsf/patchtst.yaml
new file mode 100644
index 0000000..80b0a54
--- /dev/null
+++ b/config/ltsf/electricity_ltsf/patchtst.yaml
@@ -0,0 +1,40 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 400
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 8
+ patch_len: 16
+ dropout: 0.2
+ f_hidden_size: 128
+ n_layers: 3
+ n_heads: 16
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: false
+ num_samples: 100
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_ltsf
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 8
+ test_batch_size: 8
+ num_workers: 8
\ No newline at end of file
diff --git a/config/ltsf/electricity_ltsf/timegrad.yaml b/config/ltsf/electricity_ltsf/timegrad.yaml
new file mode 100644
index 0000000..e3d1610
--- /dev/null
+++ b/config/ltsf/electricity_ltsf/timegrad.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 200
+ enc_hidden_size: 128
+ enc_num_layers: 3
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_ltsf
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/electricity_ltsf/timesnet.yaml b/config/ltsf/electricity_ltsf/timesnet.yaml
new file mode 100644
index 0000000..c908a9e
--- /dev/null
+++ b/config/ltsf/electricity_ltsf/timesnet.yaml
@@ -0,0 +1,40 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 200
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 2
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TimesNet
+ init_args:
+ n_layers: 2
+ num_kernels: 6
+ top_k: 5
+ d_ff: 512
+ dropout: 0.1
+ f_hidden_size: 256
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ num_samples: 100
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_ltsf
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/etth1/csdi.yaml b/config/ltsf/etth1/csdi.yaml
new file mode 100644
index 0000000..b81d015
--- /dev/null
+++ b/config/ltsf/etth1/csdi.yaml
@@ -0,0 +1,47 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 1
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 128
+ emb_feature_dim: 16
+ channels: 64
+ n_layers: 4
+ num_heads: 8
+ num_steps: 50
+ diffusion_embedding_dim: 128
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 64
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: etth1
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 8
+ test_batch_size: 8
+ num_workers: 8
diff --git a/config/ltsf/etth1/dlinear.yaml b/config/ltsf/etth1/dlinear.yaml
new file mode 100644
index 0000000..61c22ae
--- /dev/null
+++ b/config/ltsf/etth1/dlinear.yaml
@@ -0,0 +1,35 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ accumulate_grad_batches: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: true
+ kernel_size: 25
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.005
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: etth1
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
diff --git a/config/ltsf/etth1/gru_nvp.yaml b/config/ltsf/etth1/gru_nvp.yaml
new file mode 100644
index 0000000..e133f74
--- /dev/null
+++ b/config/ltsf/etth1/gru_nvp.yaml
@@ -0,0 +1,45 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 64
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ n_blocks: 4
+ hidden_size: 64
+ n_hidden: 3
+ batch_norm: false
+ conditional_length: 100
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: etth1
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/ltsf/etth1/patchtst.yaml b/config/ltsf/etth1/patchtst.yaml
new file mode 100644
index 0000000..78eb0a9
--- /dev/null
+++ b/config/ltsf/etth1/patchtst.yaml
@@ -0,0 +1,39 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 8
+ patch_len: 16
+ dropout: 0.3
+ f_hidden_size: 16
+ n_layers: 3
+ n_heads: 4
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: true
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: etth1
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/ltsf/etth1/timegrad.yaml b/config/ltsf/etth1/timegrad.yaml
new file mode 100644
index 0000000..0fad3cb
--- /dev/null
+++ b/config/ltsf/etth1/timegrad.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 200
+ enc_hidden_size: 128
+ enc_num_layers: 3
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: etth1
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/etth2/csdi.yaml b/config/ltsf/etth2/csdi.yaml
new file mode 100644
index 0000000..122f487
--- /dev/null
+++ b/config/ltsf/etth2/csdi.yaml
@@ -0,0 +1,47 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 1
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 128
+ emb_feature_dim: 16
+ channels: 64
+ n_layers: 4
+ num_heads: 8
+ num_steps: 50
+ diffusion_embedding_dim: 128
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 64
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: etth2
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 8
+ test_batch_size: 8
+ num_workers: 8
diff --git a/config/ltsf/etth2/dlinear.yaml b/config/ltsf/etth2/dlinear.yaml
new file mode 100644
index 0000000..f72a950
--- /dev/null
+++ b/config/ltsf/etth2/dlinear.yaml
@@ -0,0 +1,35 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ accumulate_grad_batches: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: false
+ kernel_size: 25
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.05
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: etth2
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
diff --git a/config/ltsf/etth2/gru_nvp.yaml b/config/ltsf/etth2/gru_nvp.yaml
new file mode 100644
index 0000000..e2db668
--- /dev/null
+++ b/config/ltsf/etth2/gru_nvp.yaml
@@ -0,0 +1,46 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 400
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 64
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ n_blocks: 2
+ hidden_size: 128
+ n_hidden: 3
+ batch_norm: true
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: etth2
+ path: /home/covpreduser/Blob/v-jiawezhang/data/all_datasets/
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/etth2/patchtst.yaml b/config/ltsf/etth2/patchtst.yaml
new file mode 100644
index 0000000..f8bae39
--- /dev/null
+++ b/config/ltsf/etth2/patchtst.yaml
@@ -0,0 +1,40 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 8
+ patch_len: 16
+ dropout: 0.3
+ f_hidden_size: 16
+ d_ff: 128
+ n_layers: 3
+ n_heads: 4
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: false
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: etth2
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/ltsf/etth2/timegrad.yaml b/config/ltsf/etth2/timegrad.yaml
new file mode 100644
index 0000000..dad8862
--- /dev/null
+++ b/config/ltsf/etth2/timegrad.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 100
+ enc_hidden_size: 64
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: etth2
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/ettm1/csdi.yaml b/config/ltsf/ettm1/csdi.yaml
new file mode 100644
index 0000000..ccf7b4c
--- /dev/null
+++ b/config/ltsf/ettm1/csdi.yaml
@@ -0,0 +1,47 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 1
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 128
+ emb_feature_dim: 16
+ channels: 64
+ n_layers: 4
+ num_heads: 8
+ num_steps: 50
+ diffusion_embedding_dim: 128
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 64
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: ettm1
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 8
+ test_batch_size: 8
+ num_workers: 8
diff --git a/config/ltsf/ettm1/dlinear.yaml b/config/ltsf/ettm1/dlinear.yaml
new file mode 100644
index 0000000..2511626
--- /dev/null
+++ b/config/ltsf/ettm1/dlinear.yaml
@@ -0,0 +1,35 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ accumulate_grad_batches: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: true
+ kernel_size: 25
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: ettm1
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
diff --git a/config/ltsf/ettm1/gru_nvp.yaml b/config/ltsf/ettm1/gru_nvp.yaml
new file mode 100644
index 0000000..351e97d
--- /dev/null
+++ b/config/ltsf/ettm1/gru_nvp.yaml
@@ -0,0 +1,45 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 400
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 64
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ n_blocks: 4
+ hidden_size: 64
+ n_hidden: 3
+ batch_norm: false
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: ettm1
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/ettm1/patchtst.yaml b/config/ltsf/ettm1/patchtst.yaml
new file mode 100644
index 0000000..72c3022
--- /dev/null
+++ b/config/ltsf/ettm1/patchtst.yaml
@@ -0,0 +1,39 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 8
+ patch_len: 16
+ dropout: 0.2
+ f_hidden_size: 128
+ n_layers: 3
+ n_heads: 16
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: true
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: ettm1
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/ltsf/ettm1/timegrad.yaml b/config/ltsf/ettm1/timegrad.yaml
new file mode 100644
index 0000000..093c27a
--- /dev/null
+++ b/config/ltsf/ettm1/timegrad.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 200
+ enc_hidden_size: 128
+ enc_num_layers: 3
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: ettm1
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/ettm2/csdi.yaml b/config/ltsf/ettm2/csdi.yaml
new file mode 100644
index 0000000..87a147e
--- /dev/null
+++ b/config/ltsf/ettm2/csdi.yaml
@@ -0,0 +1,47 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 1
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 128
+ emb_feature_dim: 16
+ channels: 64
+ n_layers: 4
+ num_heads: 8
+ num_steps: 50
+ diffusion_embedding_dim: 128
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 64
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: ettm2
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 8
+ test_batch_size: 8
+ num_workers: 8
diff --git a/config/ltsf/ettm2/dlinear.yaml b/config/ltsf/ettm2/dlinear.yaml
new file mode 100644
index 0000000..3802b19
--- /dev/null
+++ b/config/ltsf/ettm2/dlinear.yaml
@@ -0,0 +1,35 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ accumulate_grad_batches: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: false
+ kernel_size: 25
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: ettm2
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
diff --git a/config/ltsf/ettm2/gru_nvp.yaml b/config/ltsf/ettm2/gru_nvp.yaml
new file mode 100644
index 0000000..47ab9b4
--- /dev/null
+++ b/config/ltsf/ettm2/gru_nvp.yaml
@@ -0,0 +1,45 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 400
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 64
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ n_blocks: 2
+ hidden_size: 128
+ n_hidden: 3
+ batch_norm: false
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: ettm2
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/ettm2/patchtst.yaml b/config/ltsf/ettm2/patchtst.yaml
new file mode 100644
index 0000000..0ca1579
--- /dev/null
+++ b/config/ltsf/ettm2/patchtst.yaml
@@ -0,0 +1,39 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 8
+ patch_len: 16
+ dropout: 0.2
+ f_hidden_size: 128
+ n_layers: 3
+ n_heads: 16
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: true
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: ettm2
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/ltsf/ettm2/timegrad.yaml b/config/ltsf/ettm2/timegrad.yaml
new file mode 100644
index 0000000..cafdd05
--- /dev/null
+++ b/config/ltsf/ettm2/timegrad.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 200
+ enc_hidden_size: 64
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: ettm2
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/exchange_ltsf/csdi.yaml b/config/ltsf/exchange_ltsf/csdi.yaml
new file mode 100644
index 0000000..f15e122
--- /dev/null
+++ b/config/ltsf/exchange_ltsf/csdi.yaml
@@ -0,0 +1,47 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 1
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 128
+ emb_feature_dim: 16
+ channels: 64
+ n_layers: 4
+ num_heads: 8
+ num_steps: 50
+ diffusion_embedding_dim: 128
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 64
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_ltsf
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 8
+ test_batch_size: 8
+ num_workers: 8
diff --git a/config/ltsf/exchange_ltsf/dlinear.yaml b/config/ltsf/exchange_ltsf/dlinear.yaml
new file mode 100644
index 0000000..374bd52
--- /dev/null
+++ b/config/ltsf/exchange_ltsf/dlinear.yaml
@@ -0,0 +1,35 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ accumulate_grad_batches: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: true
+ kernel_size: 25
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.0005
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_ltsf
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
diff --git a/config/ltsf/exchange_ltsf/gru_nvp.yaml b/config/ltsf/exchange_ltsf/gru_nvp.yaml
new file mode 100644
index 0000000..595b9e5
--- /dev/null
+++ b/config/ltsf/exchange_ltsf/gru_nvp.yaml
@@ -0,0 +1,45 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 400
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 128
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ n_blocks: 2
+ hidden_size: 128
+ n_hidden: 3
+ batch_norm: false
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_ltsf
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/exchange_ltsf/patchtst.yaml b/config/ltsf/exchange_ltsf/patchtst.yaml
new file mode 100644
index 0000000..682503a
--- /dev/null
+++ b/config/ltsf/exchange_ltsf/patchtst.yaml
@@ -0,0 +1,39 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 8
+ patch_len: 16
+ dropout: 0.2
+ f_hidden_size: 16
+ n_layers: 3
+ n_heads: 4
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: true
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_ltsf
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/ltsf/exchange_ltsf/timegrad.yaml b/config/ltsf/exchange_ltsf/timegrad.yaml
new file mode 100644
index 0000000..ce7b6a5
--- /dev/null
+++ b/config/ltsf/exchange_ltsf/timegrad.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 400
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 200
+ enc_hidden_size: 64
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_ltsf
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/illness_ltsf/csdi.yaml b/config/ltsf/illness_ltsf/csdi.yaml
new file mode 100644
index 0000000..97a72eb
--- /dev/null
+++ b/config/ltsf/illness_ltsf/csdi.yaml
@@ -0,0 +1,47 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 1
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 128
+ emb_feature_dim: 16
+ channels: 64
+ n_layers: 4
+ num_heads: 8
+ num_steps: 50
+ diffusion_embedding_dim: 128
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 64
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: illness_ltsf
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 8
+ test_batch_size: 8
+ num_workers: 8
diff --git a/config/ltsf/illness_ltsf/dlinear.yaml b/config/ltsf/illness_ltsf/dlinear.yaml
new file mode 100644
index 0000000..a1d52f1
--- /dev/null
+++ b/config/ltsf/illness_ltsf/dlinear.yaml
@@ -0,0 +1,35 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ accumulate_grad_batches: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: false
+ kernel_size: 25
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.01
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: illness_ltsf
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 36
+ prediction_length: 36
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
diff --git a/config/ltsf/illness_ltsf/gru_nvp.yaml b/config/ltsf/illness_ltsf/gru_nvp.yaml
new file mode 100644
index 0000000..5926412
--- /dev/null
+++ b/config/ltsf/illness_ltsf/gru_nvp.yaml
@@ -0,0 +1,45 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 400
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 64
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ n_blocks: 4
+ hidden_size: 128
+ n_hidden: 2
+ batch_norm: false
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: illness_ltsf
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 36
+ prediction_length: 36
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/illness_ltsf/patchtst.yaml b/config/ltsf/illness_ltsf/patchtst.yaml
new file mode 100644
index 0000000..729ec2f
--- /dev/null
+++ b/config/ltsf/illness_ltsf/patchtst.yaml
@@ -0,0 +1,40 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 2
+ patch_len: 24
+ dropout: 0.3
+ f_hidden_size: 16
+ n_layers: 3
+ n_heads: 4
+ fc_dropout: 0.3
+ head_dropout: 0
+ individual: true
+ learning_rate: 0.0025
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: illness_ltsf
+ path: /home/covpreduser/Blob/v-jiawezhang/data/all_datasets/
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 36
+ prediction_length: 36
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/ltsf/illness_ltsf/timegrad.yaml b/config/ltsf/illness_ltsf/timegrad.yaml
new file mode 100644
index 0000000..03fe6aa
--- /dev/null
+++ b/config/ltsf/illness_ltsf/timegrad.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 200
+ enc_hidden_size: 64
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: illness_ltsf
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 36
+ prediction_length: 36
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/traffic_ltsf/csdi.yaml b/config/ltsf/traffic_ltsf/csdi.yaml
new file mode 100644
index 0000000..9fe882f
--- /dev/null
+++ b/config/ltsf/traffic_ltsf/csdi.yaml
@@ -0,0 +1,47 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 1
+ check_val_every_n_epoch: 3
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 64
+ emb_feature_dim: 8
+ channels: 64
+ n_layers: 4
+ num_heads: 8
+ num_steps: 50
+ diffusion_embedding_dim: 64
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 16
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_ltsf
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 4
+ test_batch_size: 4
+ num_workers: 8
diff --git a/config/ltsf/traffic_ltsf/dlinear.yaml b/config/ltsf/traffic_ltsf/dlinear.yaml
new file mode 100644
index 0000000..0c3c79c
--- /dev/null
+++ b/config/ltsf/traffic_ltsf/dlinear.yaml
@@ -0,0 +1,35 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 400
+ log_every_n_steps: 1
+ accumulate_grad_batches: 4
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: false
+ kernel_size: 25
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.05
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_ltsf
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 8
+ test_batch_size: 8
+ num_workers: 8
diff --git a/config/ltsf/traffic_ltsf/gru_nvp.yaml b/config/ltsf/traffic_ltsf/gru_nvp.yaml
new file mode 100644
index 0000000..d4cd530
--- /dev/null
+++ b/config/ltsf/traffic_ltsf/gru_nvp.yaml
@@ -0,0 +1,45 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 400
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 128
+ enc_num_layers: 3
+ enc_dropout: 0.1
+ n_blocks: 4
+ hidden_size: 128
+ n_hidden: 3
+ batch_norm: true
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_ltsf
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/traffic_ltsf/patchtst.yaml b/config/ltsf/traffic_ltsf/patchtst.yaml
new file mode 100644
index 0000000..73479f2
--- /dev/null
+++ b/config/ltsf/traffic_ltsf/patchtst.yaml
@@ -0,0 +1,39 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 300
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 3
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 8
+ patch_len: 16
+ dropout: 0.2
+ f_hidden_size: 128
+ n_layers: 3
+ n_heads: 16
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: true
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_ltsf
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 8
+ test_batch_size: 8
+ num_workers: 8
\ No newline at end of file
diff --git a/config/ltsf/traffic_ltsf/timegrad.yaml b/config/ltsf/traffic_ltsf/timegrad.yaml
new file mode 100644
index 0000000..682500a
--- /dev/null
+++ b/config/ltsf/traffic_ltsf/timegrad.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 200
+ enc_hidden_size: 128
+ enc_num_layers: 3
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_ltsf
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/weather_ltsf/csdi.yaml b/config/ltsf/weather_ltsf/csdi.yaml
new file mode 100644
index 0000000..0c66d4d
--- /dev/null
+++ b/config/ltsf/weather_ltsf/csdi.yaml
@@ -0,0 +1,47 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 1
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 128
+ emb_feature_dim: 16
+ channels: 64
+ n_layers: 4
+ num_heads: 8
+ num_steps: 50
+ diffusion_embedding_dim: 128
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 64
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: weather_ltsf
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 8
+ test_batch_size: 8
+ num_workers: 8
diff --git a/config/ltsf/weather_ltsf/dlinear.yaml b/config/ltsf/weather_ltsf/dlinear.yaml
new file mode 100644
index 0000000..05834b5
--- /dev/null
+++ b/config/ltsf/weather_ltsf/dlinear.yaml
@@ -0,0 +1,35 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ accumulate_grad_batches: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: false
+ kernel_size: 25
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: weather_ltsf
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
diff --git a/config/ltsf/weather_ltsf/gru_nvp.yaml b/config/ltsf/weather_ltsf/gru_nvp.yaml
new file mode 100644
index 0000000..0d5a178
--- /dev/null
+++ b/config/ltsf/weather_ltsf/gru_nvp.yaml
@@ -0,0 +1,45 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 400
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 64
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ n_blocks: 4
+ hidden_size: 128
+ n_hidden: 3
+ batch_norm: false
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: weather_ltsf
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/ltsf/weather_ltsf/patchtst.yaml b/config/ltsf/weather_ltsf/patchtst.yaml
new file mode 100644
index 0000000..d71cfb5
--- /dev/null
+++ b/config/ltsf/weather_ltsf/patchtst.yaml
@@ -0,0 +1,39 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 8
+ patch_len: 16
+ dropout: 0.2
+ f_hidden_size: 128
+ n_layers: 3
+ n_heads: 16
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: false
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: weather_ltsf
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/ltsf/weather_ltsf/timegrad.yaml b/config/ltsf/weather_ltsf/timegrad.yaml
new file mode 100644
index 0000000..ffcd86a
--- /dev/null
+++ b/config/ltsf/weather_ltsf/timegrad.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 200
+ enc_hidden_size: 64
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: weather_ltsf
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ context_length: 96
+ prediction_length: 96
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
diff --git a/config/m4/m4_daily/csdi.yaml b/config/m4/m4_daily/csdi.yaml
new file mode 100644
index 0000000..5e5e202
--- /dev/null
+++ b/config/m4/m4_daily/csdi.yaml
@@ -0,0 +1,46 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 32
+ emb_feature_dim: 4
+ channels: 16
+ n_layers: 4
+ num_heads: 4
+ num_steps: 50
+ diffusion_embedding_dim: 32
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 64
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m4_daily
+ context_length_factor: 3
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
diff --git a/config/m4/m4_daily/dlinear.yaml b/config/m4/m4_daily/dlinear.yaml
new file mode 100644
index 0000000..161b6dd
--- /dev/null
+++ b/config/m4/m4_daily/dlinear.yaml
@@ -0,0 +1,34 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: false
+ kernel_size: 3
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m4_daily
+ context_length_factor: 3
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
diff --git a/config/m4/m4_daily/gru_nvp.yaml b/config/m4/m4_daily/gru_nvp.yaml
new file mode 100644
index 0000000..09d31a5
--- /dev/null
+++ b/config/m4/m4_daily/gru_nvp.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 40
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ n_blocks: 2
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 100
+ dequantize: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m4_daily
+ context_length_factor: 3
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
diff --git a/config/m4/m4_daily/patchtst.yaml b/config/m4/m4_daily/patchtst.yaml
new file mode 100644
index 0000000..e97e8ef
--- /dev/null
+++ b/config/m4/m4_daily/patchtst.yaml
@@ -0,0 +1,39 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 2
+ patch_len: 6
+ dropout: 0.3
+ f_hidden_size: 32
+ d_ff: 128
+ n_layers: 3
+ n_heads: 8
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: true
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m4_daily
+ context_length_factor: 3
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 128
+ num_workers: 8
\ No newline at end of file
diff --git a/config/m4/m4_daily/timegrad.yaml b/config/m4/m4_daily/timegrad.yaml
new file mode 100644
index 0000000..376f3c4
--- /dev/null
+++ b/config/m4/m4_daily/timegrad.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 50
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 100
+ enc_hidden_size: 64
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m4_daily
+ context_length_factor: 3
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
diff --git a/config/m4/m4_weekly/csdi.yaml b/config/m4/m4_weekly/csdi.yaml
new file mode 100644
index 0000000..ad4d95c
--- /dev/null
+++ b/config/m4/m4_weekly/csdi.yaml
@@ -0,0 +1,46 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 32
+ emb_feature_dim: 4
+ channels: 16
+ n_layers: 4
+ num_heads: 4
+ num_steps: 50
+ diffusion_embedding_dim: 32
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 64
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m4_weekly
+ context_length_factor: 3
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
diff --git a/config/m4/m4_weekly/dlinear.yaml b/config/m4/m4_weekly/dlinear.yaml
new file mode 100644
index 0000000..3b3b918
--- /dev/null
+++ b/config/m4/m4_weekly/dlinear.yaml
@@ -0,0 +1,34 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: false
+ kernel_size: 3
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m4_weekly
+ context_length_factor: 3
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
diff --git a/config/m4/m4_weekly/gru_nvp.yaml b/config/m4/m4_weekly/gru_nvp.yaml
new file mode 100644
index 0000000..2f372c0
--- /dev/null
+++ b/config/m4/m4_weekly/gru_nvp.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 40
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ n_blocks: 2
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 100
+ dequantize: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m4_weekly
+ context_length_factor: 3
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
diff --git a/config/m4/m4_weekly/patchtst.yaml b/config/m4/m4_weekly/patchtst.yaml
new file mode 100644
index 0000000..2c63319
--- /dev/null
+++ b/config/m4/m4_weekly/patchtst.yaml
@@ -0,0 +1,39 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 3
+ patch_len: 6
+ dropout: 0.3
+ f_hidden_size: 32
+ d_ff: 128
+ n_layers: 3
+ n_heads: 8
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: true
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m4_weekly
+ context_length_factor: 3
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 128
+ num_workers: 8
\ No newline at end of file
diff --git a/config/m4/m4_weekly/timegrad.yaml b/config/m4/m4_weekly/timegrad.yaml
new file mode 100644
index 0000000..43e873c
--- /dev/null
+++ b/config/m4/m4_weekly/timegrad.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 50
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 100
+ enc_hidden_size: 64
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m4_weekly
+ context_length_factor: 3
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
diff --git a/config/m4/m5/csdi.yaml b/config/m4/m5/csdi.yaml
new file mode 100644
index 0000000..aa8dee7
--- /dev/null
+++ b/config/m4/m5/csdi.yaml
@@ -0,0 +1,46 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 32
+ emb_feature_dim: 4
+ channels: 16
+ n_layers: 4
+ num_heads: 4
+ num_steps: 50
+ diffusion_embedding_dim: 32
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 64
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m5
+ context_length_factor: 3
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
diff --git a/config/m4/m5/dlinear.yaml b/config/m4/m5/dlinear.yaml
new file mode 100644
index 0000000..848f9a0
--- /dev/null
+++ b/config/m4/m5/dlinear.yaml
@@ -0,0 +1,35 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: false
+ kernel_size: 3
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m5
+ context_length_factor: 3
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 256
+ num_workers: 8
diff --git a/config/m4/m5/gru_nvp.yaml b/config/m4/m5/gru_nvp.yaml
new file mode 100644
index 0000000..71a8a9a
--- /dev/null
+++ b/config/m4/m5/gru_nvp.yaml
@@ -0,0 +1,45 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 40
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ n_blocks: 2
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 100
+ dequantize: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m5
+ context_length_factor: 3
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
diff --git a/config/m4/m5/patchtst.yaml b/config/m4/m5/patchtst.yaml
new file mode 100644
index 0000000..6fbe3b1
--- /dev/null
+++ b/config/m4/m5/patchtst.yaml
@@ -0,0 +1,40 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 2
+ patch_len: 4
+ dropout: 0.3
+ f_hidden_size: 64
+ d_ff: 128
+ n_layers: 3
+ n_heads: 8
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: true
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m5
+ context_length_factor: 3
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 128
+ num_workers: 8
\ No newline at end of file
diff --git a/config/m4/m5/timegrad.yaml b/config/m4/m5/timegrad.yaml
new file mode 100644
index 0000000..2437872
--- /dev/null
+++ b/config/m4/m5/timegrad.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 30
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 50
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 100
+ enc_hidden_size: 64
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: m5
+ context_length_factor: 3
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 512
+ num_workers: 8
diff --git a/config/m4/tourism_monthly/csdi.yaml b/config/m4/tourism_monthly/csdi.yaml
new file mode 100644
index 0000000..ed49c8e
--- /dev/null
+++ b/config/m4/tourism_monthly/csdi.yaml
@@ -0,0 +1,46 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 32
+ emb_feature_dim: 4
+ channels: 16
+ n_layers: 4
+ num_heads: 4
+ num_steps: 50
+ diffusion_embedding_dim: 32
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 64
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: tourism_monthly
+ context_length_factor: 3
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
diff --git a/config/m4/tourism_monthly/dlinear.yaml b/config/m4/tourism_monthly/dlinear.yaml
new file mode 100644
index 0000000..4181943
--- /dev/null
+++ b/config/m4/tourism_monthly/dlinear.yaml
@@ -0,0 +1,34 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: false
+ kernel_size: 3
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: tourism_monthly
+ context_length_factor: 3
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
diff --git a/config/m4/tourism_monthly/gru_nvp.yaml b/config/m4/tourism_monthly/gru_nvp.yaml
new file mode 100644
index 0000000..fae0350
--- /dev/null
+++ b/config/m4/tourism_monthly/gru_nvp.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 40
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ n_blocks: 2
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 100
+ dequantize: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: tourism_monthly
+ context_length_factor: 3
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
diff --git a/config/m4/tourism_monthly/patchtst.yaml b/config/m4/tourism_monthly/patchtst.yaml
new file mode 100644
index 0000000..de72060
--- /dev/null
+++ b/config/m4/tourism_monthly/patchtst.yaml
@@ -0,0 +1,39 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 2
+ patch_len: 6
+ dropout: 0.3
+ f_hidden_size: 64
+ d_ff: 128
+ n_layers: 3
+ n_heads: 8
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: true
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: tourism_monthly
+ context_length_factor: 3
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 128
+ num_workers: 8
\ No newline at end of file
diff --git a/config/m4/tourism_monthly/timegrad.yaml b/config/m4/tourism_monthly/timegrad.yaml
new file mode 100644
index 0000000..848e726
--- /dev/null
+++ b/config/m4/tourism_monthly/timegrad.yaml
@@ -0,0 +1,44 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 800
+ log_every_n_steps: 2
+ check_val_every_n_epoch: 2
+ default_root_dir: ./results
+ accumulate_grad_batches: 8
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 50
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 100
+ enc_hidden_size: 64
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: tourism_monthly
+ context_length_factor: 3
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
diff --git a/config/pipeline_config.yaml b/config/pipeline_config.yaml
new file mode 100644
index 0000000..2dcdd3e
--- /dev/null
+++ b/config/pipeline_config.yaml
@@ -0,0 +1,53 @@
+# lightning.pytorch==2.3.0dev
+seed_everything: true
+trainer:
+ accelerator: auto
+ strategy: auto
+ devices: auto
+ num_nodes: 1
+ precision: null
+ logger: null
+ callbacks: null
+ fast_dev_run: false
+ max_epochs: null
+ min_epochs: null
+ max_steps: -1
+ min_steps: null
+ max_time: null
+ limit_train_batches: null
+ limit_val_batches: null
+ limit_test_batches: null
+ limit_predict_batches: null
+ overfit_batches: 0.0
+ val_check_interval: null
+ check_val_every_n_epoch: 1
+ num_sanity_val_steps: null
+ log_every_n_steps: null
+ enable_checkpointing: null
+ enable_progress_bar: null
+ enable_model_summary: null
+ accumulate_grad_batches: 1
+ gradient_clip_val: null
+ gradient_clip_algorithm: null
+ deterministic: null
+ benchmark: null
+ inference_mode: true
+ use_distributed_sampler: true
+ profiler: null
+ detect_anomaly: false
+ barebones: false
+ plugins: null
+ sync_batchnorm: false
+ reload_dataloaders_every_n_epochs: 0
+ default_root_dir: null
+model:
+ forecaster: null
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 10
+ load_from_ckpt: null
+data:
+ data_manager: null
+ batch_size: 64
+ test_batch_size: 8
+ num_workers: 8
diff --git a/config/stsf/electricity/dlinear.yaml b/config/stsf/electricity/dlinear.yaml
new file mode 100644
index 0000000..4e60590
--- /dev/null
+++ b/config/stsf/electricity/dlinear.yaml
@@ -0,0 +1,32 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: true
+ kernel_size: 3
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.01
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/electricity/gru.yaml b/config/stsf/electricity/gru.yaml
new file mode 100644
index 0000000..24c9072
--- /dev/null
+++ b/config/stsf/electricity/gru.yaml
@@ -0,0 +1,34 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.GRUForecaster
+ init_args:
+ f_hidden_size: 40
+ num_layers: 2
+ dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/electricity/gru_maf.yaml b/config/stsf/electricity/gru_maf.yaml
new file mode 100644
index 0000000..c39a2bf
--- /dev/null
+++ b/config/stsf/electricity/gru_maf.yaml
@@ -0,0 +1,42 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_MAF
+ init_args:
+ enc_num_layers: 2
+ enc_hidden_size: 40
+ enc_dropout: 0.1
+ n_blocks: 4
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/electricity/gru_nvp.yaml b/config/stsf/electricity/gru_nvp.yaml
new file mode 100644
index 0000000..ad2c480
--- /dev/null
+++ b/config/stsf/electricity/gru_nvp.yaml
@@ -0,0 +1,42 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 40
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ n_blocks: 3
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/electricity/patchtst.yaml b/config/stsf/electricity/patchtst.yaml
new file mode 100644
index 0000000..5a1d4d8
--- /dev/null
+++ b/config/stsf/electricity/patchtst.yaml
@@ -0,0 +1,37 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 2
+ patch_len: 4
+ dropout: 0.1
+ f_hidden_size: 64
+ n_layers: 4
+ n_heads: 8
+ fc_dropout: 0.1
+ head_dropout: 0
+ individual: true
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/electricity/timegrad.yaml b/config/stsf/electricity/timegrad.yaml
new file mode 100644
index 0000000..aaecf63
--- /dev/null
+++ b/config/stsf/electricity/timegrad.yaml
@@ -0,0 +1,41 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 100
+ enc_hidden_size: 128
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/electricity/timesnet.yaml b/config/stsf/electricity/timesnet.yaml
new file mode 100644
index 0000000..e5e0a62
--- /dev/null
+++ b/config/stsf/electricity/timesnet.yaml
@@ -0,0 +1,36 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TimesNet
+ init_args:
+ n_layers: 2
+ num_kernels: 6
+ top_k: 5
+ d_ff: 64
+ dropout: 0.1
+ f_hidden_size: 64
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/electricity/trans_maf.yaml b/config/stsf/electricity/trans_maf.yaml
new file mode 100644
index 0000000..1c61bb0
--- /dev/null
+++ b/config/stsf/electricity/trans_maf.yaml
@@ -0,0 +1,46 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.Trans_MAF
+ init_args:
+ enc_hidden_size: 32
+ enc_num_heads: 8
+ enc_num_encoder_layers: 2
+ enc_num_decoder_layers: 2
+ enc_dim_feedforward_scale: 4
+ enc_dropout: 0.1
+ enc_activation: gelu
+ n_blocks: 4
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/electricity/transformer.yaml b/config/stsf/electricity/transformer.yaml
new file mode 100644
index 0000000..667ad85
--- /dev/null
+++ b/config/stsf/electricity/transformer.yaml
@@ -0,0 +1,38 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TransformerForecaster
+ init_args:
+ f_hidden_size: 32
+ num_heads: 8
+ num_encoder_layers: 3
+ num_decoder_layers: 3
+ dim_feedforward_scale: 4
+ dropout: 0.1
+ activation: gelu
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: electricity_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/exchange/dlinear.yaml b/config/stsf/exchange/dlinear.yaml
new file mode 100644
index 0000000..56eccc3
--- /dev/null
+++ b/config/stsf/exchange/dlinear.yaml
@@ -0,0 +1,32 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: false
+ kernel_size: 3
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.01
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_rate_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/exchange/gru.yaml b/config/stsf/exchange/gru.yaml
new file mode 100644
index 0000000..411cb52
--- /dev/null
+++ b/config/stsf/exchange/gru.yaml
@@ -0,0 +1,34 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.GRUForecaster
+ init_args:
+ f_hidden_size: 40
+ num_layers: 2
+ dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_rate_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/exchange/gru_maf.yaml b/config/stsf/exchange/gru_maf.yaml
new file mode 100644
index 0000000..c30ef96
--- /dev/null
+++ b/config/stsf/exchange/gru_maf.yaml
@@ -0,0 +1,42 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_MAF
+ init_args:
+ enc_num_layers: 2
+ enc_hidden_size: 40
+ enc_dropout: 0.1
+ n_blocks: 4
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: false
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_rate_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/exchange/gru_nvp.yaml b/config/stsf/exchange/gru_nvp.yaml
new file mode 100644
index 0000000..4b05825
--- /dev/null
+++ b/config/stsf/exchange/gru_nvp.yaml
@@ -0,0 +1,42 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 40
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ n_blocks: 4
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_rate_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/exchange/patchtst.yaml b/config/stsf/exchange/patchtst.yaml
new file mode 100644
index 0000000..f773b9d
--- /dev/null
+++ b/config/stsf/exchange/patchtst.yaml
@@ -0,0 +1,37 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 3
+ patch_len: 6
+ dropout: 0.1
+ f_hidden_size: 32
+ n_layers: 3
+ n_heads: 8
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: true
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_rate_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/exchange/timegrad.yaml b/config/stsf/exchange/timegrad.yaml
new file mode 100644
index 0000000..554e59b
--- /dev/null
+++ b/config/stsf/exchange/timegrad.yaml
@@ -0,0 +1,41 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 100
+ enc_hidden_size: 128
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_rate_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/exchange/timesnet.yaml b/config/stsf/exchange/timesnet.yaml
new file mode 100644
index 0000000..2e7838c
--- /dev/null
+++ b/config/stsf/exchange/timesnet.yaml
@@ -0,0 +1,36 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TimesNet
+ init_args:
+ n_layers: 2
+ num_kernels: 6
+ top_k: 5
+ d_ff: 64
+ dropout: 0.1
+ f_hidden_size: 64
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_rate_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/exchange/trans_maf.yaml b/config/stsf/exchange/trans_maf.yaml
new file mode 100644
index 0000000..e39513e
--- /dev/null
+++ b/config/stsf/exchange/trans_maf.yaml
@@ -0,0 +1,46 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.Trans_MAF
+ init_args:
+ enc_hidden_size: 16
+ enc_num_heads: 8
+ enc_num_encoder_layers: 2
+ enc_num_decoder_layers: 2
+ enc_dim_feedforward_scale: 4
+ enc_dropout: 0.1
+ enc_activation: gelu
+ n_blocks: 4
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: false
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_rate_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/exchange/transformer.yaml b/config/stsf/exchange/transformer.yaml
new file mode 100644
index 0000000..1d4ab67
--- /dev/null
+++ b/config/stsf/exchange/transformer.yaml
@@ -0,0 +1,38 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TransformerForecaster
+ init_args:
+ f_hidden_size: 32
+ num_heads: 8
+ num_encoder_layers: 3
+ num_decoder_layers: 3
+ dim_feedforward_scale: 4
+ dropout: 0.1
+ activation: gelu
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: exchange_rate_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/solar/dlinear.yaml b/config/stsf/solar/dlinear.yaml
new file mode 100644
index 0000000..3bd35fe
--- /dev/null
+++ b/config/stsf/solar/dlinear.yaml
@@ -0,0 +1,32 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: false
+ kernel_size: 3
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ learning_rate: 0.01
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/solar/gru.yaml b/config/stsf/solar/gru.yaml
new file mode 100644
index 0000000..d1cf880
--- /dev/null
+++ b/config/stsf/solar/gru.yaml
@@ -0,0 +1,34 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.GRUForecaster
+ init_args:
+ f_hidden_size: 40
+ num_layers: 2
+ dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/solar/gru_maf.yaml b/config/stsf/solar/gru_maf.yaml
new file mode 100644
index 0000000..e974359
--- /dev/null
+++ b/config/stsf/solar/gru_maf.yaml
@@ -0,0 +1,42 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_MAF
+ init_args:
+ enc_num_layers: 2
+ enc_hidden_size: 40
+ enc_dropout: 0.1
+ n_blocks: 4
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: false
+ conditional_length: 200
+ dequantize: true
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/solar/gru_nvp.yaml b/config/stsf/solar/gru_nvp.yaml
new file mode 100644
index 0000000..7d3f8c3
--- /dev/null
+++ b/config/stsf/solar/gru_nvp.yaml
@@ -0,0 +1,42 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 40
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ n_blocks: 4
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 200
+ dequantize: true
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/solar/patchtst.yaml b/config/stsf/solar/patchtst.yaml
new file mode 100644
index 0000000..e190df7
--- /dev/null
+++ b/config/stsf/solar/patchtst.yaml
@@ -0,0 +1,37 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 3
+ patch_len: 6
+ dropout: 0.1
+ f_hidden_size: 32
+ n_layers: 3
+ n_heads: 8
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: true
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/solar/timegrad.yaml b/config/stsf/solar/timegrad.yaml
new file mode 100644
index 0000000..9681ae0
--- /dev/null
+++ b/config/stsf/solar/timegrad.yaml
@@ -0,0 +1,41 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 100
+ enc_hidden_size: 128
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/solar/timesnet.yaml b/config/stsf/solar/timesnet.yaml
new file mode 100644
index 0000000..f23eabc
--- /dev/null
+++ b/config/stsf/solar/timesnet.yaml
@@ -0,0 +1,36 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TimesNet
+ init_args:
+ n_layers: 2
+ num_kernels: 6
+ top_k: 5
+ d_ff: 16
+ dropout: 0.1
+ f_hidden_size: 16
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/solar/trans_maf.yaml b/config/stsf/solar/trans_maf.yaml
new file mode 100644
index 0000000..191c620
--- /dev/null
+++ b/config/stsf/solar/trans_maf.yaml
@@ -0,0 +1,46 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.Trans_MAF
+ init_args:
+ enc_hidden_size: 32
+ enc_num_heads: 8
+ enc_num_encoder_layers: 2
+ enc_num_decoder_layers: 2
+ enc_dim_feedforward_scale: 4
+ enc_dropout: 0.1
+ enc_activation: gelu
+ n_blocks: 4
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: false
+ conditional_length: 200
+ dequantize: true
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/solar/transformer.yaml b/config/stsf/solar/transformer.yaml
new file mode 100644
index 0000000..00d1fef
--- /dev/null
+++ b/config/stsf/solar/transformer.yaml
@@ -0,0 +1,38 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TransformerForecaster
+ init_args:
+ f_hidden_size: 16
+ num_heads: 4
+ num_encoder_layers: 3
+ num_decoder_layers: 3
+ dim_feedforward_scale: 4
+ dropout: 0.1
+ activation: gelu
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/traffic/csdi.yaml b/config/stsf/traffic/csdi.yaml
new file mode 100644
index 0000000..8451175
--- /dev/null
+++ b/config/stsf/traffic/csdi.yaml
@@ -0,0 +1,45 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 400
+ log_every_n_steps: 1
+ check_val_every_n_epoch: 3
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 64
+ emb_feature_dim: 8
+ channels: 64
+ n_layers: 4
+ num_heads: 8
+ num_steps: 50
+ diffusion_embedding_dim: 64
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 16
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 8
+ test_batch_size: 8
+ num_workers: 8
diff --git a/config/stsf/traffic/dlinear.yaml b/config/stsf/traffic/dlinear.yaml
new file mode 100644
index 0000000..6bd4524
--- /dev/null
+++ b/config/stsf/traffic/dlinear.yaml
@@ -0,0 +1,32 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: false
+ kernel_size: 3
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/traffic/gru.yaml b/config/stsf/traffic/gru.yaml
new file mode 100644
index 0000000..8613675
--- /dev/null
+++ b/config/stsf/traffic/gru.yaml
@@ -0,0 +1,34 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.GRUForecaster
+ init_args:
+ f_hidden_size: 128
+ num_layers: 2
+ dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
diff --git a/config/stsf/traffic/gru_maf.yaml b/config/stsf/traffic/gru_maf.yaml
new file mode 100644
index 0000000..ea9c941
--- /dev/null
+++ b/config/stsf/traffic/gru_maf.yaml
@@ -0,0 +1,42 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_MAF
+ init_args:
+ enc_num_layers: 2
+ enc_hidden_size: 128
+ enc_dropout: 0.3
+ n_blocks: 3
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/traffic/gru_nvp.yaml b/config/stsf/traffic/gru_nvp.yaml
new file mode 100644
index 0000000..eef657c
--- /dev/null
+++ b/config/stsf/traffic/gru_nvp.yaml
@@ -0,0 +1,42 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 128
+ enc_num_layers: 2
+ enc_dropout: 0.3
+ n_blocks: 4
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/traffic/patchtst.yaml b/config/stsf/traffic/patchtst.yaml
new file mode 100644
index 0000000..7790448
--- /dev/null
+++ b/config/stsf/traffic/patchtst.yaml
@@ -0,0 +1,38 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 1
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 3
+ patch_len: 6
+ dropout: 0.1
+ f_hidden_size: 32
+ n_layers: 3
+ n_heads: 8
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: false
+ num_samples: 100
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/traffic/timegrad.yaml b/config/stsf/traffic/timegrad.yaml
new file mode 100644
index 0000000..36caadf
--- /dev/null
+++ b/config/stsf/traffic/timegrad.yaml
@@ -0,0 +1,41 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 100
+ enc_hidden_size: 128
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
diff --git a/config/stsf/traffic/timesnet.yaml b/config/stsf/traffic/timesnet.yaml
new file mode 100644
index 0000000..96b0cca
--- /dev/null
+++ b/config/stsf/traffic/timesnet.yaml
@@ -0,0 +1,36 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TimesNet
+ init_args:
+ n_layers: 2
+ num_kernels: 6
+ top_k: 5
+ d_ff: 16
+ dropout: 0.1
+ f_hidden_size: 16
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/traffic/trans_maf.yaml b/config/stsf/traffic/trans_maf.yaml
new file mode 100644
index 0000000..0ffb6fd
--- /dev/null
+++ b/config/stsf/traffic/trans_maf.yaml
@@ -0,0 +1,46 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.Trans_MAF
+ init_args:
+ enc_hidden_size: 128
+ enc_num_heads: 4
+ enc_num_encoder_layers: 2
+ enc_num_decoder_layers: 2
+ enc_dim_feedforward_scale: 4
+ enc_dropout: 0.1
+ enc_activation: gelu
+ n_blocks: 3
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 200
+ dequantize: false
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/traffic/transformer.yaml b/config/stsf/traffic/transformer.yaml
new file mode 100644
index 0000000..cf89ac0
--- /dev/null
+++ b/config/stsf/traffic/transformer.yaml
@@ -0,0 +1,38 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TransformerForecaster
+ init_args:
+ f_hidden_size: 32
+ num_heads: 8
+ num_encoder_layers: 3
+ num_decoder_layers: 3
+ dim_feedforward_scale: 4
+ dropout: 0.1
+ activation: gelu
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: traffic_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/wiki/csdi.yaml b/config/stsf/wiki/csdi.yaml
new file mode 100644
index 0000000..861f4b2
--- /dev/null
+++ b/config/stsf/wiki/csdi.yaml
@@ -0,0 +1,45 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 400
+ log_every_n_steps: 1
+ check_val_every_n_epoch: 3
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.CSDI
+ init_args:
+ emb_time_dim: 64
+ emb_feature_dim: 8
+ channels: 64
+ n_layers: 4
+ num_heads: 8
+ num_steps: 50
+ diffusion_embedding_dim: 64
+ beta_start: 0.001
+ beta_end: 0.5
+ sample_size: 16
+ linear_trans: false
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ feat_idx_emb_dim: 1
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: wiki2000_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 8
+ test_batch_size: 8
+ num_workers: 8
diff --git a/config/stsf/wiki/dlinear.yaml b/config/stsf/wiki/dlinear.yaml
new file mode 100644
index 0000000..9de36cc
--- /dev/null
+++ b/config/stsf/wiki/dlinear.yaml
@@ -0,0 +1,32 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.DLinear
+ init_args:
+ individual: false
+ kernel_size: 3
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: wiki2000_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 32
+ test_batch_size: 32
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/wiki/gru.yaml b/config/stsf/wiki/gru.yaml
new file mode 100644
index 0000000..9050ba2
--- /dev/null
+++ b/config/stsf/wiki/gru.yaml
@@ -0,0 +1,34 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.GRUForecaster
+ init_args:
+ f_hidden_size: 40
+ num_layers: 2
+ dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: wiki2000_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/wiki/gru_maf.yaml b/config/stsf/wiki/gru_maf.yaml
new file mode 100644
index 0000000..14a509e
--- /dev/null
+++ b/config/stsf/wiki/gru_maf.yaml
@@ -0,0 +1,42 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_MAF
+ init_args:
+ enc_num_layers: 2
+ enc_hidden_size: 40
+ enc_dropout: 0.1
+ n_blocks: 3
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 200
+ dequantize: true
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: wiki2000_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/wiki/gru_nvp.yaml b/config/stsf/wiki/gru_nvp.yaml
new file mode 100644
index 0000000..968a739
--- /dev/null
+++ b/config/stsf/wiki/gru_nvp.yaml
@@ -0,0 +1,42 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.GRU_NVP
+ init_args:
+ enc_hidden_size: 40
+ enc_num_layers: 2
+ enc_dropout: 0.1
+ n_blocks: 3
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 200
+ dequantize: true
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: wiki2000_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/wiki/patchtst.yaml b/config/stsf/wiki/patchtst.yaml
new file mode 100644
index 0000000..0eb87a9
--- /dev/null
+++ b/config/stsf/wiki/patchtst.yaml
@@ -0,0 +1,38 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 400
+ log_every_n_steps: 1
+ default_root_dir: ./results
+ accumulate_grad_batches: 4
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.PatchTST
+ init_args:
+ stride: 4
+ patch_len: 8
+ dropout: 0.1
+ f_hidden_size: 32
+ n_layers: 2
+ n_heads: 8
+ fc_dropout: 0.2
+ head_dropout: 0
+ individual: false
+ num_samples: 100
+ learning_rate: 0.0001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: wiki2000_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 16
+ test_batch_size: 16
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/wiki/timegrad.yaml b/config/stsf/wiki/timegrad.yaml
new file mode 100644
index 0000000..ff8a685
--- /dev/null
+++ b/config/stsf/wiki/timegrad.yaml
@@ -0,0 +1,41 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.TimeGrad
+ init_args:
+ loss_type: l2
+ diff_steps: 100
+ beta_end: 0.1
+ beta_schedule: linear
+ conditional_length: 100
+ enc_hidden_size: 128
+ enc_num_layers: 4
+ enc_dropout: 0.1
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: wiki2000_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/wiki/timesnet.yaml b/config/stsf/wiki/timesnet.yaml
new file mode 100644
index 0000000..78bc757
--- /dev/null
+++ b/config/stsf/wiki/timesnet.yaml
@@ -0,0 +1,36 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TimesNet
+ init_args:
+ n_layers: 2
+ num_kernels: 6
+ top_k: 5
+ d_ff: 32
+ dropout: 0.1
+ f_hidden_size: 32
+ use_lags: false
+ use_feat_idx_emb: false
+ use_time_feat: false
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: wiki2000_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
diff --git a/config/stsf/wiki/trans_maf.yaml b/config/stsf/wiki/trans_maf.yaml
new file mode 100644
index 0000000..2f5afd5
--- /dev/null
+++ b/config/stsf/wiki/trans_maf.yaml
@@ -0,0 +1,46 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.Trans_MAF
+ init_args:
+ enc_hidden_size: 128
+ enc_num_heads: 4
+ enc_num_encoder_layers: 2
+ enc_num_decoder_layers: 2
+ enc_dim_feedforward_scale: 4
+ enc_dropout: 0.1
+ enc_activation: gelu
+ n_blocks: 3
+ hidden_size: 100
+ n_hidden: 2
+ batch_norm: true
+ conditional_length: 200
+ dequantize: true
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ use_scaling: true
+ num_samples: 100
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: wiki2000_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/stsf/wiki/transformer.yaml b/config/stsf/wiki/transformer.yaml
new file mode 100644
index 0000000..29f6958
--- /dev/null
+++ b/config/stsf/wiki/transformer.yaml
@@ -0,0 +1,38 @@
+# lightning==2.3.0.dev0
+seed_everything: 1
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 50
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TransformerForecaster
+ init_args:
+ f_hidden_size: 32
+ num_heads: 8
+ num_encoder_layers: 3
+ num_decoder_layers: 3
+ dim_feedforward_scale: 4
+ dropout: 0.1
+ activation: gelu
+ use_lags: true
+ use_feat_idx_emb: true
+ use_time_feat: true
+ feat_idx_emb_dim: 1
+ learning_rate: 0.001
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: wiki2000_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/tsfm/chronos.yaml b/config/tsfm/chronos.yaml
new file mode 100644
index 0000000..f43a052
--- /dev/null
+++ b/config/tsfm/chronos.yaml
@@ -0,0 +1,28 @@
+# lightning==2.3.0.dev0
+seed_everything: 0
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 40
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.Chronos
+ init_args:
+ model_size: base
+ num_samples: 100
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
\ No newline at end of file
diff --git a/config/tsfm/forecastpfn.yaml b/config/tsfm/forecastpfn.yaml
new file mode 100644
index 0000000..974ca5d
--- /dev/null
+++ b/config/tsfm/forecastpfn.yaml
@@ -0,0 +1,29 @@
+# lightning==2.3.0.dev0
+seed_everything: 0
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 40
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.ForecastPFN
+ init_args:
+ label_len: 48
+ ckpt_path: /path/to/checkpoints/ForecastPFN/saved_weights
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ timeenc: 2 # TODO: fix bug
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/tsfm/lag_llama.yaml b/config/tsfm/lag_llama.yaml
new file mode 100644
index 0000000..c87a357
--- /dev/null
+++ b/config/tsfm/lag_llama.yaml
@@ -0,0 +1,29 @@
+# lightning==2.3.0.dev0
+seed_everything: 0
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 40
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.LagLlama
+ init_args:
+ use_rope_scaling: true
+ num_samples: 100
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ timeenc: 2
+ batch_size: 1
+ test_batch_size: 1
+ num_workers: 8
\ No newline at end of file
diff --git a/config/tsfm/moirai.yaml b/config/tsfm/moirai.yaml
new file mode 100644
index 0000000..86f273a
--- /dev/null
+++ b/config/tsfm/moirai.yaml
@@ -0,0 +1,31 @@
+# lightning==2.3.0.dev0
+seed_everything: 0
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 40
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.prob_forecaster.Moirai
+ init_args:
+ variate_mode: S
+ patch_size: auto
+ model_size: base
+ scaling: true
+ num_samples: 100
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: identity # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/tsfm/timer.yaml b/config/tsfm/timer.yaml
new file mode 100644
index 0000000..97f051f
--- /dev/null
+++ b/config/tsfm/timer.yaml
@@ -0,0 +1,28 @@
+# lightning==2.3.0.dev0
+seed_everything: 0
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 40
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.Timer
+ init_args:
+ label_len: 96
+ ckpt_path: /path/to/checkpoints/timer/Timer_67M_UTSD_4G.pt
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/tsfm/timesfm.yaml b/config/tsfm/timesfm.yaml
new file mode 100644
index 0000000..b769246
--- /dev/null
+++ b/config/tsfm/timesfm.yaml
@@ -0,0 +1,31 @@
+# lightning==2.3.0.dev0
+seed_everything: 0
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 40
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TimesFM
+ init_args:
+ input_patch_len: 32
+ output_patch_len: 128
+ num_layers: 20
+ model_dims: 1280
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ # var_norm: true
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/tsfm/tinytimemixer.yaml b/config/tsfm/tinytimemixer.yaml
new file mode 100644
index 0000000..6ba051a
--- /dev/null
+++ b/config/tsfm/tinytimemixer.yaml
@@ -0,0 +1,25 @@
+# lightning==2.3.0.dev0
+seed_everything: 0
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 40
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.TinyTimeMixer
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/config/tsfm/units.yaml b/config/tsfm/units.yaml
new file mode 100644
index 0000000..056ac80
--- /dev/null
+++ b/config/tsfm/units.yaml
@@ -0,0 +1,28 @@
+# lightning==2.3.0.dev0
+seed_everything: 0
+trainer:
+ accelerator: gpu
+ devices: 1
+ strategy: auto
+ max_epochs: 40
+ use_distributed_sampler: false
+ limit_train_batches: 100
+ log_every_n_steps: 1
+ default_root_dir: ./results
+model:
+ forecaster:
+ class_path: probts.model.forecaster.point_forecaster.UniTS
+ init_args:
+ ckpt_path: /path/to/checkpoints/units/units_x128_pretrain_checkpoint.pth
+ quantiles_num: 20
+data:
+ data_manager:
+ class_path: probts.data.data_manager.DataManager
+ init_args:
+ dataset: solar_nips
+ split_val: true
+ scaler: standard # identity, standard, temporal
+ # var_norm: true
+ batch_size: 64
+ test_batch_size: 64
+ num_workers: 8
\ No newline at end of file
diff --git a/docs/benchmark/FOUNDATION_MODEL.md b/docs/benchmark/FOUNDATION_MODEL.md
new file mode 100644
index 0000000..e489e16
--- /dev/null
+++ b/docs/benchmark/FOUNDATION_MODEL.md
@@ -0,0 +1,32 @@
+## Time Series Foundation Models Benchmarking
+
+
+
+
+We have incorporated eight recently emerged time series foundation models, namely [Lag-Llama](https://github.com/time-series-foundation-models/lag-llama), [Chronos](https://github.com/amazon-science/chronos-forecasting), [TimesFM](https://github.com/google-research/timesfm), [Timer](https://github.com/thuml/Large-Time-Series-Model), [MOIRAI](https://github.com/SalesforceAIResearch/uni2ts), [UniTS](https://github.com/mims-harvard/UniTS), [ForecastPFN](https://github.com/abacusai/ForecastPFN), and [TTM](https://github.com/ibm-granite/granite-tsfm), into our framework. These foundation models are categorized based on their capabilities, such as zero-shot forecasting, adaptability to varying prediction lengths, and support for probabilistic predictions, as well as their architectural designs, including whether they are auto-regressive and the nature of their backbone networks. Additionally, we have detailed their training processes, including the lengths of prediction horizons used during pre-training and the sizes of look-back windows.
+
+Detailed configuration files can be found in folder [config/tsfm/](../../config/tsfm/).
+
+### A Comparison of Pre-trained Time-series Foundation Models
+
+Table 1. Foundation Models for Time Series. **Zero-shot** indicates whether the original work tests zero-shot capabilities. **Any-horizon** indicates if the same pre-trained model can adapt to prediction tasks of varying lengths. **AR** denotes if the model performs auto-regressive forecasting. **Prob.** indicates if the model natively supports probabilistic forecasting. **Arch.** denotes the model's backbone architecture: D-O for decoder-only transformer, E-O for encoder-only transformer, E-D for encoder-decoder transformer, and unique for specially designed backbones. **Multi-variate** indicates if the model explicitly handles multivariate relationships. **Pre-train Horizons** specifies the forecasting task horizons during pre-training. **Look-back Window** specifies the context history length settings used in the original experiments.
+
+
+
+Table 2. Evaluation Datasets for Time-series Foundation Models. We selected several popular datasets to evaluate time-series foundation models. \checkmark indicates pre-training on the dataset, $\bigcirc$ indicates zero-shot evaluation on the dataset, few indicates few-shot evaluation on the dataset, and ✗ indicates the dataset is not mentioned in the paper or documentation. ‘*’ indicates that the data comes from the same source but may be processed differently.
+
+
+
+
+
+### Comparison of Time-series Foundation Models on Diverse Prediction Horizons
+
+Table 3. NMAE of time-series foundation models on diverse prediction horizons. The input sequence length is set to 96 if not specified. For every model, we exclude the evaluation results on its pre-trained datasets
+
+
+
+### Comparison of Time-series Foundation Models on Short-term Probabilistic Forecasting
+
+Table 4. Results of probabilistic foundation models on short-term distributional forecasting. For every model, we exclude the evaluation results on its pre-trained datasets.
+
+
diff --git a/docs/benchmark/README.md b/docs/benchmark/README.md
new file mode 100644
index 0000000..76ca2a8
--- /dev/null
+++ b/docs/benchmark/README.md
@@ -0,0 +1,23 @@
+# Benchmarking :balance_scale:
+
+We conducted a comprehensive benchmarking and analysis of a diverse range of state-of-the-art models from different strands of research. We mainly assessed these models using NAME and CRPS metrics across multiple forecasting horizons, repeating each experiment five times with different seeds to ensure result reliability.
+
+Results of **time series foundation models** see [HERE](./FOUNDATION_MODEL.md).
+
+## Long-term Forecasting Benchmarking
+
+Detailed configuration files can be found in folder [config/ltsf/](../../config/ltsf/).
+
+Table 1. Results ($\textrm{mean}_{\textrm{std}}$) on long-term forecasting scenarios with the best in $\textbf{bold}$ and the second $\underline{\textrm{underlined}}$, each containing five independent runs with different seeds. The input sequence length is set to 36 for the ILI-L dataset and 96 for the others. Due to the excessive time and memory consumption of CSDI in producing long-term forecasts, its results are unavailable in some datasets.
+
+
+
+
+
+## Short-term Forecasting Benchmarking
+
+Detailed configuration files can be found in folder [config/stsf/](../../config/stsf/).
+
+Table 2.Results ($\textrm{mean}_{\textrm{std}}$) on short-term forecasting scenarios with the best in $\textbf{bold}$ and the second $\underline{\textrm{underlined}}$, each containing five independent runs with different seeds.
+
+
diff --git a/docs/benchmark/figs/FM_dataset.jpg b/docs/benchmark/figs/FM_dataset.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6e2d8c751237195ea92bdd996b7cbdfbaadfacc5
GIT binary patch
literal 268047
zcmeEvcT`i`*6*fCH*};1DWVjSCa6e2q=^_oArvJ_ktRkFBuY$>-a`>Mf&zkIp-C4+
zB3(cQL0Ui{38GRHz!*&kZ+p(Y-+jkB#=Gx(3E2L}Myfd7Ck5@2SD_P+oC_V$1V004XdN1Q*v4feo809mjB0B0fRKlcc<
z6>|Nv&pGn*!!0tWnXQEd@}wi&%J#VBpQq;BXMXgEC8r1g1YHUVcC#+}_5;I0E`$3y?t*;@&%i(XPk;7<5EF
zfX8h7Jl^-uJ}2MLzUN%soMej!SF`}XZqGWSM#`Y4^cel^4=
z=&F+KB`;L)WmNF_5Fc;w*uSpt=eGdSzYhF)$9C%;IAFZz&%OU2fBc+>zfQOA@aO&I
zkb3>Eeg7KBD>V4pKi+Y0>I1-kdjGF;+}!~H@Dc#vX8miQK?(pAY5@Qw>tFkJ=>mX=
z4gj>?-FgX#@^Ov`v~h7L1Dv89T%sIXod6WP)jS-3ZvVV24o)s^o^8B*`~rgD0resP
zCkGc7CpQ-l4>zc=95=zw0d7$qv7Nf++r%Bt^D1AF(7Tm>pHJmTb(^H)JF2R_XK*CH
zfRywO8CkVm>bv)77#Qw1GCpw7;;5ySwTUGr5#S|ydHY=O_45z78WMUf41N7Z
zRCG*iTzo=eM&|9T?3_RDza;xbE>Tb}PHt{4Zr-1Aad3u#
zjZ2i9XQ%EqF>?pr^H;=`^=|P=97(@l-Nvt???{#O41OmdrD{M@qx}@^Ps#r81dIHC
zlI*_(`2F*O`rhnY&SU}?y00hy1sfEbP~famfSP=Hs&kxtIA?wLVH&PuYL;Mya$fXHXKfgU`J`Uu57
zQOuU$Wl46hu_o8SZ&~2hLIIpp2x9FqI4T!gfV2g`6iv2(_WOkI2l2nJ_4j@KtpmSx
z;I|I^)`8zT@LLCd>%eav_^kuKb>O!S{MLcrI`CTue(S(*9rSzjffZ4*b@E-#YMH
z2Y&0oZyorp1OM}N;QJP^1#pw7xMj)Nsz}-SET+9_IeT|bCqmy{;R0*H2zoB?kR|?N
zscSVfw}xaSq|hTj$BsD3VJUHvcWV%=ize;{ez)KE;D3+~aBaQYWR)XD8F+Il_SPfx
z!!9H0mm5}DTvH#;o-t0{eLne;?==&EC&>9yb+NzV&Y_jM!`NgX-csLa;qsz=ksmwgub@boY?otS0ynqyvB8VpFSwnL!~
zzp2Yky5xCFSN2d|#zQ}V6s(_tlL>hNcNnac~ro>beF1FIdhX!%4r?M0L-aTjx%it?=&Wcpd2(J|ZszBO7?B`AV7Vx=(f!hK&M+w@5
z*{4wPuLFwcOpDDt6OrRIv=_XIhKk?c*t;WEntAY*^^tVcMa3O&q%nx~Z)VWRPq$zJ
zRTBf`u!Lod40Q`2E2dX1<`h{)z8#viHHcKzJK&^ntti{Hy)~jLh2RaHh?}iyn~7%g
zjIwk{3I}y0?23tJJ_PCn$>-kAk-pY9#(j35-V@zx9FmbS#$1wgdqBd$^3Qg?pmQL3
zMxa&L#WesYX>#g3Dg3OJRqVuSsY~>b@`}waW*LzjWV7qzy1G;m5ZCcH&Ij7C_Yw
zoW@+Gj*(_MKO8W?$4~DVHi7#{QRLn3-8J%;xf1IwR(8?zP$lFAPOJdhqFc;Rrw}@E
z+t4?lwO`zL6AV*KhqSWSmX}d#iE6#7ca?8tTm|KBj5N7Qf+Uzq*U+jWz6Rb5JMzAz
zBj|bn+gtCuqXE1=?rdf9!ztN?#aR?XZ3|%iLFj;f3&m|7@<7ZO01lQC9P5PSEg%p%
z@sRW>0o5W$K_$rUO{w@&{`ji@m5?$mtp#~rkDfp-{Efz09bz8+8l#x(j^o9cFrpry
zquiTa6*Ujp-BNv9y(es^wa-agdR1=$LmSp!0Qh4Csi>|L^`rGqX_o97x^!a$P0Mlm
z2z@|_X|Y=|aA!RCg~EQu{}PP~qoIvd8x2v^HAl_KPAMof7YgrrCTBB*J`CPC1UU
z0gV1=eO?pltKb$OEdO+52W&!1)xP
zmKK4TkUFWyTJdt#p#a^G45K>2iSV8
zj{3^WlmaEO<;b>U5!ezmjx7+y6hlXTH<)8Y(fPfD9*s4JZnjuY^$lM4JNzO0MO-(b
z0lqXb8>HWHqkc6tWYZZUg|vS7WoANuSJBWL9U!Y=uam^A{#_nbuHMQjGHPj<3Z?MI
z`cMn-Eutke6DB??Aq}a5Ntlhm>tsRIIa
zA5ySAV3qSsl6!wvDstD6zDmR+?#JCXHS?)s%W+H{bbqHTE(W7bGdenAc+=Utv93Py
zi<@g{8p%PrNj*Z7yvoodfSMQxtKC~dJF$ASUQugMCuFYi{g9eFVV8fWFjpVonM?`{++jwa=q-LV~-U-p{6|S#Ns|9u@8~(qh3hWbGNNXHI<8hC5Izldgn@
zURAL-S?~7|?^UfiptS<(oTsh2>U>KcTR>q8*mahMV7Db~0VTSK2iR9LYXJma*b;mY
zw-G4|EVim+LK%msXTEfh8AwnR2z^LTF;wZ-x*d3zle8R!~bG_ni-qS
zLzumr#Vw*Y!OM?tRx8;U2r_Y&jZP~*`(^levADZk3N5X#{770=9WVD4WamIqi%vOO
zrORy~%ESUP@zs9Y8o7M|wgL}Qu~cRYl}5no;XLe7TrIK_F7Cq6e>o#s7lb~`z
zEpg6d=(bgQ^r^{pyV6B2zP1>8t5DANL);&
zEgvb&@-%;Q^A6Jw+d~k>`4z-?Xg9^R(^bf_6Sh}N76vDKBw0BP)mXBO?@2|QIRRyj
zj}eZ1z>_qNu`S^5?)7X>$0`v=$1tkMYSf$#9Ms=bdDKJw4>#8-el^{mZ+@yex#2o2s&hU>vQG#mi_E8JAjDB^=PWPy79~u|`4p3NC%+-p0zI
zS1%RkpvOM3)R`XG*ZBu&3USdGC{5FHE${Wwe5%
zm6k(~7k;QGU81xMhsuTf?^Vh;pmJ0MmAnrT{TT0c{^|V)Vjp~|hb0z8lI|o#BWn+j
zGptFsL_Hf%SWVcx)HtTqFO
z{}Jx?wICKM-zviLx3_}Fm}(8P9GlJK&*sTQx0)B=atI1WOBZhn<8)`J&hQvEH^Y_b
z=swOeeHRoWsDSFAyWOQXIA}?cKd$UO7iN>jZQn1DEdXd|zRyGk;M#;?b5L3yyZJNp
zhwvtV{r=PQQV6nG7)E(BAEPgwgwT_;i<#_!-07H8wTnZ)7=1Y7X@Ua;LlR$7?mM!x<{L
zms>zmE~KkKjO98O68Hhq?t3)H-c@HOnrHd-H<4qJYH6H{vYlP`(2h=5*JVL$BpkQox6?rM}2X!SI&3FE&e+%*zG#Bi483^raWwEI+s$;$u2HMiZwoJ7i&z*h4I9
zs&CwVrfF6oI@4u%S-BUrcY1WqrRYqCZToH+b;HfmxHj++n(Jck*aDuHf$pBT7IAoh
zLRfa9%FzQOyV&~}rP{KeP6>@RBuF$OPHEhg7I~Sv5J7tpafWu6b~dMNHH9G9YKk^D
z38m&B
z5I45t$#)Ds*G&eF=kogo>R$GFCThc_f&K2}z5|z60|pTF8~;G9lHWm@xEpDoQcZVa
zglMrc2_qw79eU83hTmcpJe1hAYVo+{DZCu
zzN|UZM|$AbCh%HvpOLpvX7cRV3U564&HS0_f`p~%w@i{p*2PM`8Lv++ubNgAi8gPo
zKbpxAei>!RLxl#5W9=kOZUIt^T(-86)zEBsURhJ##giR7rrs`3n9b#~Ps3)5pieQ-
z8jt!148lu9j%-dui=F;t2fav^+)g{)OTVsqvdA(-=-qy<^RDh{7O)LNW}xjyZ1)x*
z=|0wBl(Yrhw8Sr(fuj9AeEQY#(=6Eh@4(rgp^o)b2~_n!Qa$Gw&VjLbo1xj;ZJR|;
z^;L)A%52(sF)rEii>a^6!+5xYK@B|%YN#SZPP$!FCpuren%Z(BBe3W*(Yk)cbbt=k
zd~;20s`Oi%=apwa26>mmSVq=MVDx5*>evDpQgy#XuE>7%B^?q(2>p#-!?;3%$b3l)
zJh->yrpB3x4@wy_=|Lsu6yV&K0JbFa4ikZE+ifbuptLnVOfUuft=kn1s&CCV)HG(L
z-g{(IV3a}Yb2+-Q?LB%zM9k>1d<5hjQjq#mDK30wprO17jNRas+
zf@?Ek8PXQp%V-^xKblM>b4kTR2G7*?;rz;ldJTqO5n5c1Ms%0SsnlvJw3$tDFu`yA
zUG0BWL^7TwIXMe!TfJ3{QLbUwk`6%ydmis<-kb6u+=H?N1`lg3W1eUGP#7t7IeZ01
zjVZ{m=ukYoS>7s-w&=d$E&Dd{xBQWSZ{ZK7v58CauKi?0Cx)ai-=EIdW->T
z8O1PbrI=dPC1tw1Vc6TH(!2Whv1%JiaGB^}nx5u5j9v*!dJ-c;>xl}5i(!Bve6)$b
z7YNvWP?`#dPNtip)F`)~CQJ!GqSYv=q8tw>UIkUkCYC(>vOuzYFU8Ndab(FEOkH?i
z5lZYcpA)g-D}+oP+g%hP@Xz*%1t$*(GSrVgqp2s?huaI<*2$GVZ@s5on2f7ejX90o
z)oQj`gx!gqsG{&P0-5_*hlhz%L`ws<*62$5#lq{2ToU^yoUf}8`7a*ZX64nR{{7nK
z6O0}6C=zpK45PxdVWh6RgOA?ce15N7oZhIi*}2hbC70q<%X5n#iz4|{xzf#`Iu)TrvKVyLXLa#xzR0IzG{YKbCkKducJ8PT;G3*$A&3koCWB$Q)x
z&xgY%3Uhz>BzWif-%QGX7?9ahf^Pi#16IF^HD+9)ej{JHoeS+m34LkZJxuXZHh)aC
zxGsBuC#>jox#UxuseRTJI0alIynTS5CB0e3R$>`b4I9VnwD-@C2D;zuiry3M%QKN;
z=e=z8$CJ4X364xN==*`c5A|=)ax{`^s}q6b#~QJY(%ysSh;;}>_A`M!bgM6^AJ^gR
zy{Z|TUd*kz@AS(LX?iF0qma)ORlpk-j_t;(*M=Wjjk)1e^GxL?{+^fer)S~*DhHrh
zz@=Z&IMNo4?_@)%ad9Y?J2{Y1_d(9?*2<;I`JieGA+M{48hyAj0>6Wia%4QVjf(BP
zF`5a+$Q&ML0z5crS_3EuF+MO!u
z*hQY=r6<08ooR29#O#W;5elfTIiy!TB_Z4og7Rw&2sM26B9gDXRSbQir=7N7TbTqV
z|Hhs%VdI!8!||NpycwQ^ZptF)u4n%K@PbzR?{);IRq&J$ZK6w=A@-AcqD_2D&C5SG
zXKg&b>KhVibHTD9D)CT&{F$5s#oNqg`av)Nv&85ytw4D2FuHz5fpMz)$AvP|s?eUBCw}mEwE5k)
z%hbg25&3^0?*7|c#lxx{r~GH0!j^q}!sILtzj&CVdP$(>Kz){sjY%Y5nvy5FgGW
zqe^Mtl3R2XMq@5KU)^gk`DoSj6=6Ep;Lr#!&Db%Cb=!UKXapDQ|9%5%^lX
z&XXN3a@h-sNi@aSzVr_&;Wr3?12sT6qGpVWqxO@LDW(tx#g>soWjbaa8#D0F_S<#j
z@@1^woCjVGpEXGBvoYL!j*(^HeQ7unEE>Dp#gdN3IFt|<6i;0JXhMrcug`diHPH(p
zwugnWfgotnFu6{_$IsLZqm#&x_)w^L0AX^Bx>@zpCR6&3MnAcpsoyZt`||BJd+cq+
zSb`>OHg#sr51eizf!5~`&?Q2i7Dv*;A6FxB{_v+m%%@o&QfwbO3Wc<<`~cl7{+E`(}Yp-?WsF2()b
z78U>q4Fc6ZPR5h0?nEkS;aWO5Rsz3Y_`d%g7YkqeO%S|uk3;lZAo
zDlNs|5^uFfYqsdBAt)$@2Dm)2Dh|fG1^87#&=OE_8G$m40Ol?%f@VRMpmI48gG42lC=f+yhyf_
z&c*W~+SqDBm%}d*%wU%W7;04=_tZ(#jkGBGmFfbk_jP=3Yu_wf-2&~BY|QdjykVZ
z57+aH!qLu;aV;l8Bd6=Ct~%l1?t5TI0CTzTE}
zw9Nw&`L<}244R^0^%L$6SN(F^KjU~9lkGTBjII|2#ue~1@P^_8g^DU|Bt|d(L$PQ1
zLDI-=W)!jmOs8%4_t?%DrfdOxpLMUxUWg+LJ9m$oWKZzaPo-Nm9Z(l2^j=4f^0LHX
z?Bn4i+-CR!>_>2OA*A^;=n^#J7`JFTq;KsNoNx(@4NVdsTlZ+q^xe&aX5}w^11>2&
z&+jX&UeY8(Q`rZ==QD!Q-!-koz%lnT1P)qD9-H6i#|Q86cHsf5?9
zM1Vcc7R7imuF@vk(pcsU*D_p?pR~CBr5w|vqu(>OjZ?h8$R#U&ynG{ZV}S)0HzTcZ
zy?4MTn=U;9+X%(pJ;|HK9=wrXQpDnBWR-s&3my-UQj{6Jq`jhaEz$2nVjsd*C3I)j
zNQB1ANWNF|E!J#)8o59mJw`WSTqWaW0>0f?slAE9T+%^(%9x+`%=yMwYU`u5bmWmk
z#$`Z(C!BX#8%(i(1XJu?_FxcWv#W{|yb6C@FP;UafU1Hlwty2xr#UuIovV1n?1x1&6}gPaq!L6ESP
zKuOr+{N;zuYP`ZS^&xu7mk*@eri~I3At|kpVXPv?m-a2f!1K*{Qh`W7vrp62rq}xs
z!De~*tM%LL0UYu_!9uJRt_=j(7f`omc9=pLlMYJkv
z^sZgHfOUzy3~*m9cn<>;U2Vn$FR0A8ozGS3X#cVF_tDRWRdc&t1}-4g+QkklxZX4`
zgd82*0xr%>AUcWD1*A7zQ+=E@HBSX>oi?Mu8LG3brEAdiKo4PWZHr{(S%4m5`WGks
zZ=SJ!^A?!E$ti%a5Y$ym#t75O4gSQQ%-1!acb`93(7Ptf!u)V=e0k~`ep-W9K%|n|
znS}&Pi)G+i_EvcobhD`CFbNsMR;Z=E&D4pxxZ*+V+bMxMG(BBx^YZ2=jYL638;37u
z(7}TL09D_jSYi}XP6wD^h1F3QaI*6)fAV4Z&xrNSY`U(NM}WyrBroH(ZrgJ1Cb0P&
zHk(b@9#Rl@W2PH2(cOJh&T(*OVE+eUAODKI+B){)Sv(?z+d%aTSJH)J}C<7>3K+Ar!1Uz_JrEN%Jpi&JMEe5c;S5-mpJ$GScj
zd^fx_vWj~OV~MSUF;F5~;WI&&;MZ^a)wacaN6T=?PJQ!>*^ZC=)SGlA)2AJnU1p>s
zlG8gfo^brg`j75$0t|+bU}E?aSbSQRB&`atbg0V(u_1Rbn!dsGQ3t-_>(lhFvQ}KH
z2Tq@KSv!J11}38;&tUWzIGQj0E{>mdY*5?8y7-;|8&@;kt(Nr0I$9GvmsqUv;aW53
zyPUsIK>hd>djwirQgCb8RP8Lr;vJd#VsmobeL*<5i2qWtzD|@58>`um72teM`{8<8z>!;`Ejj6e(?-nK1jO`
zu`G+n8BrF^b{~vRMQWD0$ESXtkMxGfCx(~p;f7iuUsm9Pp|x;UUQT-zQ_=vosT3P=
zxCX3=3%F&MJoF>p?5-$3Yw6|Xm!dWNc2UB?su-*!9(i)BS9KQA**wFND5I+k8iu_?
zVbv$?O@iw4hx*uSQ&9iq<++5o=4#3BQA~f9C0G``#=0_!y8xRggBP5lB5z$-tGC^m
zb*`?tH6gLr@IyyU&qqt;8gm)-a21>TsUHgNLZM)WYK91-2ficGV4VcvC!h5Xc81Ey
z46P6kAY*Kg`-U9NmuP-cZR`jKV<{|{f+A1}l>LzydumlD-Bf)zEhfWmPqtOexVP#%
zD$Vo&aiJo0aPCA)zp&-f4sFT2d)~4G1c)}y1CU)gV*B{phZJLN-dyk2K
zlh@pCiMs@bv4Lg&^yZSJ@7ZJNs_%@xWmpP5q-~2Zb=YiS*^^u$u9-9d#j!-ZUMDn?I~erTk;&<*X!_qMi?Ow
zdIrM-CZi%P8T~35Z&x~N^~G#eGjnNp#%==Z4x$UrOSwi6L&|{SiZXty@>4M?VjQNLV%d+Gng-B
zbuT=n4#XRt)thRXnwQgTQnK%9B1Y|sfK{#jUHASSM61AN!8BK`(aY6jD1>!|0#C$>
z)zD7d_j%HB{-Vy{8;O1KQqB`Hhm7=LTIR+8=YfF*Xfkg0F0vY*yrfps#0c~$u~|5B
z#y`W}OUt)bCxgVVm|EUiILKS%higDBm63x`aO(UmnzaRC#LzvyLR*{M?405M>5BZ7
z!j;#r0mV|VQl2oQ$QEMl$b&aB$|%}K?Q08VD#VhFMe*a+lG{%!+Bm)xUq!ybb;8EC
z0Ef*q+@+Z)55>9{ISA^*c#PBi51+?|HOgP7FPzkBSP$Yj{xtjS>q1RUP2fQmatfR;
zRC@t{IkK6JRSIm?c&CjDMSE?uFBq?)7SSQUS%eu@OU-IQkIVYFV63s32?vjD4#$$
zgF?DbKfhI!Jv~3wdoSNx|B1148*x_BSV_#hXKsKcg`5rRLdktVi<6O2AxSeLezMvX
zA5xZ_9Sl2M?NpOAUcW2cE9NBUN~=E}up_d+zyOrQhy`IZ6|!7uI$ig|tX>SVc7@o6
zjAX1yM|eyPT;YDC>
z`R(O(@yXqFG%dVL1SbG|*Kr2VpTO=psE(f`HQc93O@z8sn)Yk;qJrtV1J5sYx40w&
zTy2*>O5c0EFL2pq&ep*vA*wjS>DDWy4W-l()?u)aRI9$sQ-r3&V-Yiu`eL%d-N}l&
zYp3ky_XRztq)QZc!<8*Pqpv=fAEUcdA?=FF3>@_k=nHRQ&hC3Q%7PVsX^^hmVa&9w
zud!AVGNn2@-Y3KhfxI~PBEn9>Z0Q!zh?(K6+evjNZ7-X{Sd6?=d(jdR`NJ|3)4`k8
zD2{)2@PRjhr&xW%h#oLjUm%HnTOf#mlQnw{g%Jp9L(=uIt9w4EeiNUt1c@OlZ(rM*
zmi}e)|Kvgu3~}f2ifBsL79bC*Nsvlc{?7NIWzg-6A-l7B)ASYh`0}8xHw*C<8CE`t
zCdw&~fc8{l$sbP0EF=|<`fdy(GTA*%xQ!?52-pdX9r-M8+cfmOi4!^RbKM)2V=*h2
zRv!MDU){U%AwoVAx&IgQbP7A=cpX_I-PQe-y1cdp%s5bfjFAy6@nSYVfmDU}kVK|J
z<-2;RdS;=%|91gITb!zZ9H`|kfbw?Y**YtTV3{{
zQrZ>uhm}S{&K~=5x~aC=TvcCr^4QCR`Fv9xtleBcz)1b?C9@ZCq?~SC3{s}NDij)n
z-Ca$yY{*mYx~i63$^pOI>HCc1P$9}oqvAut6JN^8bPh+Degj=g-k4md9vS;k&8$;7Q
z*dZKb126yTM+;Vq=8?6n^qZD{E0V?r@bqsuLZ
zq=NWsqdzJpFD42f6TElmb@1gUH6JYHwO*s0w!7p5Z*FE?zwZB#XpehzB73yq)sUT5
zxtQ_0ZLW9zh>+mOuze}X%EPstHqjwhXwn--f<97*KC>0w750yr?az)B3`}xrbVg})
zKdB^>N=~I(ac*!n!Y-~7ttm%Xq6`bB9;0Dl{lxQX>PYr|vdw&$d)ebsU#5_BcQolUVitA3J!M?Ly*&cy0ICpntA!XTP0hS)?SwvElh}n)pO%bF8uT7*=yVkImOhg}zkWNi9fb8!#SP
z){h#72iY6kG_`w{ecmsvO`-FR*fWi)op(4empCr-2~AL842_Z-}oZQhf~v$I@3Pz_CuEscpQLr(epBOV7tqg0B8
z$~3q0siaxE<+g%V??^wi1-a+e7T`Uz{WDa$;!@qF`QA%&PwQHR8ut^gKHJx$40~O$
zpMF|N@<=gP$<42SIUsBxt_{utlb~@k{8)tr4=&pFV$$>Hs|sJ7!^Hg?gNe3=b=hOd
z)K`Nw*(r&SciTFesb1FHDE(RJuOO^hnSycQ#8I&Fu#xd20_GI>i6F-Yt6jCI4wSeD
zTE7Pea=gUP4AXBGy`BH!>~}m~)ogM6w$C|;r|?aFWL5$7G-Tq?OE87X#VDrm#+e)r
z45F+GC@*9zNDY>U88tY+)P5bdH$vvr+Ji`ruj;>m_k{u>tO-kw@ow*Z
z-u8kW{?mIhZC!GgFJ2J)<7ij34{i8bchBG$^#ziMUkW4@MD2$GCgK!m6iBOWfd!H6
zrP>1@Yb{e!$^IEyHjVnb*LP(cIG*S_k-D^Q#!YBcUy2c`5-N~l9d6b;O{Vd^GB!zt
zkQ{!^`6F{d7Bzf)mDCzT%^jwUwHFAwqQTO*SxfAU3;uYco%{8r%HEV+=prY$-htPF
zExWa96grpBU^&pHOp9R?_o4s~^;-73cgs2zB`@lG6>ruBmx1YcZj>1!5@Nuy+0D{t
z^iyjo+A;HwskI<7-d#8R(dClbpT*h!XPTme>2I!QzhL^X`=+JG3?ahkC~|6y*AxF!
zy@8=9HDX*W+T}s|Gnoz*&K=j359%%POx?9_h{5mPCwA$^
ztlwEtkBH0EzB{`%0c=Jxd_mf`1mjJ3cqvPhmY+vXs@QLsJ%30hx*eT}U
z+?u{G6FIW<2P)B<*&sqN_JJA91@pPArz?-GP%IU?nqo=Z)qQzqT{z2koQs5TD{gSC
zHG$kOFX|PLXZt5%VMqNUQV45|QDMAOEH-K{ka9tn7-Wil0NGzfT>B|IFd-kRt2Yc;
z?O@WxnJtaxBEgrfw`WloE$XTCdu4w6-I7PMbwT>qylv&qYk$(e#K3Cm-Miko%uF_4
z&=#PCN=AOA-3M9j(sdlnk^hZT`d{4A`6@6)(35Y>h@o0{H#g#2EFLj3ehhaLOCIPi
zAcCju6k4v0UQ8=~{kmT^+fzcG)0Qg}8H_o`ctP!%ozD!aWI56d@v&MuzDxsgiCr}Zy3BZdrPs5dV8lEaN^TN#KF
zcg;wn@$aL0d&A=zGW65GBi4T|N#YUNjGEod_*$Xf#pY~z_P&LO?3zCftrr=Sl1?mz
z*Y+yMh-nsHNO&qI&^MtW^Ac8G?-MHYP$@ZT+0JQc3!n@^zDa*~`irpqvmU1@-HBBh
z#6SxJ6!*~VSqDAROU%B3q2qRy#2sF8cRt_&4tJ-#az7$0zOJl7uv2PZz>T1dAmOfx
z_jU&2M=3%XARyQyFJcact)Bk|OnOK?U9dQube)We1GT?XDqn5L^WL(@e4n|Nok>9X
zy9`*o=3|v91Z|UfgTdR56T$@+#82~6_urFBzd!zA(iY_pOiIA~Ya>achMylPS`mw5
z;kS$1~xZbk7LSyg>0(U7D7gw|H;(&KfJ;WF$$Cc`Lnn-M^-3x?NjH$*
z8aGbLgnxF~sJ(UPu%Ww+;#m#iq1`J=P1C6vT>|A7!-CFz3={G)Rz<95q4=LYOZ;#`
zsQd~4@-uP-tN{sOkHc!dC06dOz#OI-SrD?jhsZxdQBA3f3hvYfBOIb2|W
zSN$Mbp?N7v@1jl@)Jm2UgqsFhSoPribT
zo}s$HCYU6U?<3ASNJD}~Wcvi;d<$axW67_T1!9wKwc~Q9f%Pg_Bu)lZ6*3Tm)gQ()FRLi!?Z^eLLiI%U!Jm*J_6MX3
zL{X$q9e9cEVL#$%g-}?!ri9*P&mC|4>clPShi%%@w#Q5W0CG<&+kmB
zo-`N8$*}fnfAUB*b5LO9$-(?PrHeO$M)BoD3*7me;%!$@S7iHz-q{E=+oXbAUsXw^
zNMi$zwQqlyuaD8I`blQCWP-(#trD694q%60iBQTOR!$XQ`A~eA^Zf
zEwaN@nxSt?Z9-?us6DMs-S`Gjsgeqyl+UBz|6X!)0rcSrSdsnTUO|H{;64%rQ
z!ji3(9A1jP3Ljf-OfqpOLaVeBdy-kUm$eP<2@HiVHsNh%8
z=b!zyWOV<^t!!gn+{O*o-l`>DI-cQwpCt#1bq(E5Ld5}0-dKz>-G)K*@f*SPy5Mdg
zKB(a8x(WCNfW6EHiI%-;RQP7Z#?0of)RsK@{mlwf32ZMLlGI5M-Ru4Ay&>n9(l5?=
zZfo?T-OGuc+$nlSuR1m6II=Y!j$;4*bpoC+iKASqrhTISL90qaRl1$s-~J;==U8%P
zje=>nRigG2?CPB;P5kQfle|l~cEV%<9b~dT7I&q=mS(7#9l8uJ>%3RKBb93D$xMrQ
zscX%(uJW3i`^O_rBCg2AUSdguay3~(I|U13VDIQdZSAK0nw3CRNQD
zh}mx}$x|KKt-x!s=TPCs#h`L>`t?JD{kVRR8lOe3@U2vMyWYQJ
zQG8#hvRqHbW;XHwcW6}~bkc$?fG5Ew#yZE4g00eDL>N8Y*Pz<0+P+gKhCiy9p8Ae6
z8P=h7m*3KlWJc|#4UFs18%8&f4E1lFDR9?6J}k^^3#u$0&cCp3)NzIX=0SD!dyh>&
zM|@Th;N_BJEzQgpnr^K{LW`$t&Q&NKOfRSt|v+Duoo5x&(J
zV?mpVQ1bG_b|?rW?~8iOb$?sZR?`_
zZ%E|{M_E-@<>;QC)NiH;T?l?Vw>j?#R~@5yF)bM3)>J%L3r)zEpf1-;_`B&Od@a+o
z5nt&-1%+!}Q5x{E1a5+f3lKg=g5=WPB$nV_`X
zPd3FbewzucK2S?bv)eOPpxD6i#7
zVD=>MDv>=2r8*wK3PXvWIx!xS(Cwr8*G^XqzF-13OF#7+wOc@Ml1(md&JXC!SJmP?
zt0vc9LhCxxGr_3dsN&&Gq8eaezlGg
zdT%On`XdCDD2*TvcVhd^UmO@jTPjSw~x-8R9u
z|M4Jz-SFs_H4i^g;Q!tjzHEV0V2Rm+X3c(a5w}4K@>pf+;d&cbyFm`U-4T#O&llJZ
zn=OH@dBY@di=*JopO6{>0dB22D-YF_e*nQM
zcQ{V1Q;{CLJxu~letuc
zB+u;-F*9T}VMmeF2Wk~#M&W*{A0Pf0e&YkwWhO#Pwo%v
zdthuM%G?h|NvC1HRVn@ch3IJtTXG<=lo&g6DhpRTpGIRQO=$?_N1m7IZC_bLo&cHK
zwY(8qfd3+r=M6c0F-e9cSJqMP^1=1`#T5xoYQ{oGUrvC&=v#-&fs6vB#~K~nm(8o*
z;l_RG-x;@<2I&4{)W#%3_Y^Nu%u+AUa`e(L&bLPexO8znJGtjuuG)818M&}!AAsGY
zxdx{Dvt^W3|@Ox78
z=6gTiju;}ol|1?`(rPv9tnJ5ZB>W(*4j-qDK{5}s?5HDio;F{h+XJcrtIPAFN0JYZ
zJnfhgU~90YSk^S1c4&OQBI6E8Ft%Z&<-m2xDK(czIPF$+fr3|-Ug`@Om#8+7*cZ+g
z0H2y&cI4F`n?tybW)1cgf+zC(p@L{6UGK}xj{|`}WWXDni%#wDhEm~g5M4w+I%>=FVE(1YDIHzH@v$5*s?WWJc3rl9ngA|z_x(kgr3M(^dT3l%n>VwJF
zttp$f4%)4Ew50FxxreWv-ntYzWP_s=u*8Pxft#f*!p!4D2D7tObQB{)I&0egW9IZt
zx6vRI57l>)vD~SlR<{#6gEJ(ae8;3+i{w~_Nr0=@U=1@%O%!*iAQm(_f*5137I>W*
z5tAdLJtc5?UM~GgsN(6nB3@H1mpsg2V8x)ucR7hlpf^+RP1nzs%@JPkI_Xu&2
zF7E)DQN7rG_;plln{ibhXy;Q7rhI(3?Dc5Ee2F%cuu>{yY%~Jq
zh6}<^%7HoLrGm$M2_-D8S{iT1L`%HN=p13U<J#3+&79G;$7ggr9Qq)J7cjWGTt4v|V?fW+tZfw7T1c}IPu4b@m>qJ`_
zaN=!MCO7pCvd^~ytiyGTS{mUcPS{XJR#Qi8(Rsho-ni`z>&MG4j-^rEnk_vMnctVJM!}J!0
zwNdJ>p)rdwgYePe`a{N3y&`uU)NIeIyhO-XZ+zTz>}OwcfLQ00cb`8Je^w!$+GK
zF7z3}Pb5G(66}lC;
zirk3-4(y4I`nigH44kDl3&LD~E67V6*M)7AYHZ*qTQOmJ}E36_CC`X~S-kEs5Qvlv$Y{TQ_8@xVKGv8=o2TwPo2rM$AV+EiM8&Xhs8rVQJFX_7-7+5lOvEDItaZmP-LNZ(P7aLdxYJNlg5#bVMjaw
zTFh&<)t#%DW{*PK_M4dw&tB
z-kMum|NKdj!b`@SggvIhIxK@#mgFH29!QpK0mrCgZGrj^{FlF_ovmbAa#ag!dc2Mf
z6R@tpjtugyFH?{VEoAphoT=uB350@+%EhKvey;sr_D0ma&FZLX3SGYZzw8crnZS-2KjSo!|HKJ=gi2
zzt3N;E0eijulv5A>*Miws0uGh@&h*bbWdZ&f~Z}r4G^z
zB5T_HyhGaUep})DHZ*_cL7w29XhHo={~Jels*<0%Y7ua+e4ul&9MnfN3z+lX!DHH=
zNjo}72qHjKQvzZLirO0dz{FJ-{tt#^huqf~;KYo*xeJgH_}xDhG_GNW9%yUf*2}{I
z!g?0}9ra&71P(S!AH5iT8r_d&*HH(s!ARf63CF|oNb|Uui8_w5?4`R==DHe9*i3BURvQ(Uq?fMrcf^nC3nzd;q1*H3>u-&1j3*WxyoLWehY&P#a+ZSZ)-g@H$V3y>b
zF-$n^!43S@DcFfHb{Ng-?|Ygn#?`uR^pOuQw6`K5wUY5khd!-btI0L9Gbmf5b60Zg
zB>j&Wa=Z~wO+6wgxQiV#Vz7%7FcVb5gQhu=fcAQ{NP@`_{H6)9yeKa(op9hXaYf;%+3rpB&uu*;fSV6ln
znmI}GZOlAQYSaEzlk9tM1*_Nf={C;s(gwGKif_dQD2JgCVMi;9Fmu(f;p4LA2v^aw
z!@ZP$VMa|A?OS8WHiiI{M0KF4kzx`zcyA7FZY^d7+ZwLErOMYf9H~AxpW5u%XnstY
zljSGdYD$+PB%4C?DGtr}B4pH&q~57>Yy_)u|DZd%@cid%_gR~H?e)*e4s8BJpJ^r0
z<1ll@GbiZp)E<)-dh8zRaFQp_c^cnvmdt<4ck;3K4`##%CMhYot4UwKM|1osAvTbP
zji79*bchM5d+Fm1VG*8n%W77(&GOo@q;FEl&3}4oBp}f7jc2IQ1PR38Hc^Ga=Ew8F
zk?y6d0;wpwtH_Ap03LCPPrZU7JpkT>51jlm!w*=E+BbW2)PEHWsmh-`{Hf{l%Xx_;
zbPgh2*UG~7Wt4@1h=`
zk!ns#aR%ZY_1no$7)VZ_XOb8&kL~b@`OLR`T=UEA$S(Zq^Kityi`}_AI@{jqx5dbf
z7Q2U9UBO?!PP+$*lk^G5Vf4cnU{lAk$hKH1c@_Y
zI-s!^^P1)hF_D8A{OZIvqy_V#udz<11DwsT3LCJS4nI#oXWBgNmO{e1P5V~lRD>apdA230JBIY8ayk~v%tJWiCA
ztlk0VnGDR{u)9VHsGgJyGphGRT>WyzVI>$8FYsd+lDYYW=}nS4jdy^MTo<5-4I&lq
z3Jjk#&p9;RRx@$MXwPi25BmV{JpAaj$hmh6UV2`LnLnv%9-YRS7~*<&d$wPzSe2-A
zLuCn>z4Kie;3GVGc`qt{QclTWoW!)-B0XNM7L&J9(43tWv`f_)Lea&q-6x)NxaD-h
z>rCalTW?a+`TOQR@4}zs_YSfeKR9?id$H}bCiHwjb@*!cVy?SQO_S1MjS?c`S1w}}
z%||;=+UgjH5?L%E^Ihp~63*rO-DeNX)`=}^%p%13{T
zn=3(`(qi=?qvowU;79EsPm8$nk7rsN)9w3vnJ$juydujl&ZK3{y=&*+#?ttGDYg)O
z!F=MxBq1e?CQ%yL(|J&z?dq0PY}XemSwvYj2y{Bd{53u>^^(Q&M%W@ZssKm2K5!IT
zq4?3$h#xU-lf5*lsx|BEtjaY!_vF7YSNEOg=lH%$u*TLF*goZ9-`Wp*7{)}l?f|Vd
z`4bH#P%%x3YCzLUxMu8qyl!MPai~W=qwry>iOZ8Am4|l%m}Dy3nQX|YCQ*UE{BY0t
z?<(Che60yL2kR07H_H^>Xcp!Q-cr`UK6)lqeQG>_qB{6g_I8_hMXs%Z<{
z!Ygcgm6th~u&m2^yL02Ck9GZ%d>nHXL%JGjxD6b!B@i84J{sjRUWA@c?r!ixD|3?e(1O9K>8O|`<>VZ?V*MP`^sf#ZBLmdqRA|(r;
zh;YyxR$pX*4f-KH~y<012;k
z}bZ%DhfRkXlqsoSf4a
z``AM{g+^_1P2mpYse3It@vxnN=eXmw=8?0{Y6+
zBW8{g5stRWLeKFuxHa
z2PXez?YRG3I>_v`L)1M)=08FAlpyBL8fTgl9?TaCRZEq7B-VT%U_9dPq(?k
zSa=#OzL~UHZtY3(G0`?Xuq3;I_*|)ypn`SyQNMLf>d_)lP_|!>N*KP?#60SG7%YUaof6^}>ol$MeebO-LN{w-Eguc)Q`$y&d
zIffW|Bp$R%ppFmc*%F&C!{8L|d|XiqgxKnYGY2|Tj7dFR^1>$3aGt=HT|qV3?KkC8
zC-HkH2aTF({X$0U^GuUX@`=E%?*xOiiDb{2T%Ru%je-YCcYe-~y)Wvg@x*c5k!5SC
zU%tEJzJI5o@36!>ItR1`-k2q%J%>CL=y?;;berYUvR)8YZLS%X@QV1V-i!2B+D}X`
zJ&Y1(x_4_S^J)#64LvIgA8J;j-V3=k3Yc=o}HE=T1T}4
z7tkHDpEFeqJ1FUXF(jVR{W+^D;NHRWc+o(j{6fNgC8d91x1Tqu5Zj*^5-z`k!G+vF
zWQUs&FekI)616zzo2KOvU87l5Gvf|XHzP__vw1%>ymxz*KSJFju`OgrfVSg6)HC(&
z6s9I%2R%M~C0&E%j}j!R-6WYMo}6F1csJUiloX#E
zv|wNnu7{x}vk8xw;mtz>E5&x!6mC>c4~=h&Vg8B`K=&KM{TXow#tKflV!n^SaDzK@
z$qVGu1vsyDvIOHu+`lkH3IR!vk;nacVNGm-@$dhASD_&LcElq$yaC_HpbYEFkwD
z!z{o7l#`lnfuhvOTBd{2|Hjy1?(qMoUHr*5F-y{)-4{j6faF~r;8O;Nt$zVRBWmKz
zY{yg3>t(P7n%ZU!cO86ba`}KtFv-0nHvIZQ63^wtBRflvZUu1H8y*>00(?yTTtkmf
zl2AkPpMh{J?+WA|Trxi17a(_B^#K{gk+Ju~Gl%c8EV`Bj!N1IoZ!NfP?2xGjKN;$q
zts`m))Zkt<%SZS$4--n%?HpIk%*iPo|5Uk1vFiC3shn$6N-hYRB&n7(AP0GKLAEJL
z>!n}DbgLiE&!^S2sy?evuQ`os&
z)sRE|pZEtvzQ_)*zUI_|Oi7bhAZ=2AC!C{MW9vfRe5ek`k>VF;(@~O#YAb44XC+)F
zVaGD!{|Z<}ECYHKB+uB2#pUOrev158JM)hDKMf5%15d63n+7~n#s~yD9(M(Cf6_e|
zeXGobP)*8dD5xmVbip<9&$O#W1b!iOw|n!oZVa7eN~O`%YCP$6@7Qcp;)yA`EGc`w
zK8_STpB=3u5a>VSrP*;1a+2HhtR(i%Z$Bg8v$J{j~)41=u0jNo0EJCTg?L((xJ|}^VX!8c+
zp_nLB2m>N|VoZNu>-vH5|HvPdNb1qZ`zgmlE)Ukeyi1l&n6=Effs+v;-1Wjd_Tmd3
z_*PZ)sGLirsd}tWDh#RK3F7FSWPK8uxf`uiW7@B+d|^zkFeH+)0DsvhQ|YqJbyD)W
z?X6Pvxd0-0o-W%7ub1vNlQsaDJ9FaGPuyQF6tw)ZoH7Z!C>V5#ypF0vkqog>tt{0L
z=Z6qmvonHP_U}Cfo+`dGezTH1G;^_DpBKLCW?G{&C4lfo$bh)*?ccn)l>P=_?zu{#
z&NhZ{%h1p@Gj~rBVrH+^MVT6t#~P*2vf>(vC>#ccz7(hk_*K@0AvPpo9OdbUj}aVLdV*?ZGfl
z$B(9_E8*^8ThB7@b3rLZ?xJugijc(Cxg9j&^y-7tq@7NHcSCf5@hk1^FaVSHrNSY&
z734!zq$!ggH1W2-w{d%##fj
z^!xn(mbCvN^FJ-~|8F8z6-p*OkERI(;t-9HKEd(EmhteWx4gFy;hW)u!y|j|=!ELI
zv;qD=5oH4oJ7s^30=g8`pu+9gcp3
z`o)h?#F6YU`x@Ben6W(Pe_yJ~c#a9+mfYw!NEk
zRpb2E>dJPO0MP4S3T7MuACrCtaubC$)PXWP<2^9lv&@XH{6i$pjZT8kxQ`Lu|D38K
zpW3=vu;L5v0_uaG9p|~E_{5kWY4=eMkn@V2Uc7UAor=do6iDmM69G|wAh|PH76m3D
zFW84#e+q;oWj}-|Uzhy}T++%8gX0wgH`hRIJ;QkBj|Krjy9RD?7tIeAYv@Bq__)Hpj!l`L5@|e82E^yn7O+$^}o{0ZI*BMgz7n
zB4c&7reY%nX#c`Ns>q2I3ejt*X-(g}q}A?H5~o=1qIWiBFT>cP^u^^jw<7MdEmeFq
zpKxVd*C#a4lXABh+22vaa2ikPd;rK}sZxQ~J)cje9TPo6j+im~fFd4${CII@;Fz()
zw#1dBr>{FAv%V}N!q8rUoD2?Ua=}2?dBD(PanpDky+vId%3AwM|{Vd8w
z__Mc&(H|ueo**5CmJj66hcf08aP_Nb@jBB|+anW`@DHCs6RYr0dCN=g30$(zWI`uM
zIe=ntN_?(ply_E^<$3^551(fp*A$Zqt7V(xk4J2@LpnHHS
zybB;N7tM6xLx6Un!noEC63fa~;1~-_{}6B>sGUp0Za=U?(xZ0G=_05SJ|+UDwgK=Z
zyHQkrwE?n2jqxdl!xA4e65gsH(NGhB5612@h
z@=N)J{IlS%3-`m5nW(oxt8$Qb7es~qUC4vS`M8Dyh8WUgc3%UTh@@N!i~((Xm7i>z
z6*O)MhH-WmC-oF)`~|M4*+W4?ymKuNEJ8HP^&eMA)_$u}#aD1ePgo=}
z^Cbz68;czkXf;4TVsxNtvWY6>&Uv;(9~~B&6Lx+#v%G{7oOem;b6xWG(*{ExElSCg
zh$8WQb7q~9MS!zGbJC0<0pP=!AWza3p)S%UFk-kXH>8?ZEJr%obtV@tm)<3XraPeh%HG&I}ICF9`aywBd
z14N7YG$hqLba6E$sVsA2D9GzyTB32agTgGaw)4rD9;~!vIRpG@K>Y@vSMzjpCmc9OO}1}lv6opWGHz1_9?OCx#T)p1*TmD
zv$%y~bWfZ93p3P1jcCKrA<{J7hWU@k>ZO&9GB1Loo%Z29YsQ%8@S@sKuZV+TVgGm~
z3v;UZO{{sVU(1Dl!(n;V&zv;XTeSpUZ(!2TbfVaI1)M?d=;%xL-!2&%RGmj=a
zsI`nt;qd_hLvnlB0leJVxmLH1S?OAJgu!+ZdH=$a8A7DlL^M0J-l?N_B~%DV72-@Y
zq%IU35o!LGAFa$>>d$iM693xnJ~)^FVElH2!A0YEH4l&3x(oy`RDC=K-_Vy@Q=c?E
zsn<9fvnhZKRDJ#gv?a6XoCZMYsmz@r@{>5Rb7RE&Ol@8GVJBNaZ*^w^zAsamF@7?C>2037>DdZe1EnW-85UF-SZ%j4oG%}x!kj(dgy
zaD_q{^=XTv)Qb`bta_M)JRSCvtsC^DF#F0C@mLCHBxnoHRanzAaEqhpGu_)8RX;~7JY0qJO4(P%B+OFeKyV^iy2iebp^SkSX9Tr?
zoBjjb^a8suWTFonrgypkgWqjnB-t@2o{$tNfR;2NO~=?{R!1t}v(7Q4xW|
zC=$KXZe8fJexcpf=cR@dk+DnucfWpaU#>pCcpjkqjp)UIYxPV$LhG?&N5yz&rzN!K
z6u)SD;f-cvWpH&n%Z19eD-A|Llb^Ia>^FT_)Slq@aNb*!>lFJ8T{0(8l2p>EIA4ao
zoR%l!UYO=$bz*e+vAa3Uup0BIu!p5kd@EZRnkd+bEa6#ReS&R29;{IWf
zaeOS|3e_s5G|)e_@}PSs%$l9f?x+p$uUY0i^wU(g$~WHD%?)@MD=x<#Bc2nh$xW$8
z^5EuD-~#q-NFkcbyrbRkf}d&?C#F1jD=egk1RvNcF}`tLTqrjxMtjk`!KL&`#`1@l
zkgdBjRI?dkP3eykyR&AF5rq8)u0)@I*V+l7ErHG2)F+sB{Z<*8GG0fArU;qNC!k^`
zgdC>4TRr2v%f)M0%a0*{KV81dbv(=X1<%LQ#usJJ1U7=e4RRpu^^}P$4$nMN}^U0bLjwt7%>dup3*>1MvpL@uD-?(k-^<>26ti&a@ie4{uu;a
zq@S+s@CjkcJE(-z(1VsB3$&jLkzP2?)+Z#D@!nJ|@mZV0M!ZnGhNBQ^r
z37`Znm;EIL1{K~Sp%T%Y?gzIU3Z}HnmOkQleB#}H2wvxk+|UzC))R|F{q)BOV@Q^%
z0u(&-j_*!r>lEYZBMY{5G_XUgaU@0tiOq=oF<+p*5lX8t4>+xXP`@@>pGDa|HMF*N
z<4xNW9h!jy|b(g85#>YJB1+==rP=t4d~Vis>V9_Q>o+Qoh;V|
z9`?L^5G3g{^H$`t!l>Y-UGpfA=o;b<(~wYK5lwdlE$-l%fBL5n;g@{v+u>2crQjfz
z)E@2LoXkvH>mZvKpL&_j9s-nn2D?4g5d96y<*oG>hZSiTnpgKNqp#SpwP;9k_r;SJ
zCYc4+7F~Bf^9UY=g{NU{>CZ891)|dQidHGlRwjIiuvU}uLs#NP|h157U{l$)^jG5biPVEjrXV{+CuF~Ym`aND;p45cN<7)S{Z=7-t
z$}xKVAkAcVLi_BY=)XtdHvJ8l=V8|b)Yxy*E({5Uo?EaV*bsDyzRTBJZKNvYUVi2U
z-<|dVsb@SzTu5WIEP5zkD`W0N40#7!(-0(|YT4BbEuZ2x4T>tqdW3UCoROOSe6h}f
zF9eRAx&vGmj%100ql2LGLLQi{(2Hvr9^r5`hCLw)W*u=|Q{#JLof*^mPTL+g#
z){nVckL>>xQuH8B0(`uOVmsK%3w2QB^=+ym7=(?sZxnK0kX**gLOCXxaME?ygDG8r
zuI!zK&}<;Xkesk)90R+u;C~QD-lIf9OT~<@I{Z*ENuVvjNs3&Nyiq%Trmi}|o?Lv~
zuyhAlkXeDF^d
z;U+(!&2P1=Fix?dR_FF;(v`^N3B9@P;}|tyDk?JQ3$%5rKRq2p%s^NIGxlm8q;2Z$
zy6_xaZ3>NVmJ&(NVMxvZ@YYH@QDelRPv_O6iHy%PV?pI<*UNaSBihmTe=KQnT`YH=
z8vx`2N+SKW)?>C#TomfCUm40F_nd;(+mYO9o>Nt=M(HjFOiwfyS75x|p~Bz)B$Z-Hscx
zTixqb%v8RXB>?Mu`f~G{LN{gt1^@R7Zs-Z$CA#yCfmj`kMI%ep#jbFE=*z3ji>0;D7FI?l%C3@m~t|bOf+C|5N?{73RNm
zHD7aXJt?h|OdJ^VgCG7VQ!fqT}G$
zX>$H43#Xh?cd$Ynm8GE_JC;E}ZO};rHrXh$Yogp{IifX_=ckiXkVn;Ifq+Z2v`v@3
zUkvMbPURz{DCQnOkWp$#D@6Ob&}S%S&wL)Bj8=B~AiwaPovEvbob$!caZ;%=W-DEG
zlHA$TIAj0<7CK07plpHtd5-?Nm+LaygNfGR^%Ay8Eo=Fa@13ZyrmlL%rTLX})FN97
z;(k)w=};W?GyHh)Yk$Jw~@2j`EAeS$P5=p2EQE6{vFI$Z1Kyteo*9TYeH
z`ujyLod34haC^_P@A&8EtZ|vY#3qjR&GDi}d}#ax8EG2Ux@)#P>0j8}U>Vrn)))un75y!MW}F_?QY5*~h3<8g@oYJL
zYI${|K6>*?{BNR{v*(jLad?@u!nuEF3yJqyG6=xe^HNesC+d(4Jk?OgIgwEh-xeal+jmMzsGK|Kea)B~IB^xnFkhvm8b(Svp
zSQlQ2C0$RM7PqH`2n2po{^n&nk&6=IB_I5++@4EZ32>xA=;7V(UmAB(;u_&V2i^!|;bIu3=&
z))*Kar1UJlq^!I*jIRQshzK!7EtB$@@2c3YzK2zlU$6#aU7WY2TSD}ycQEasJnC)^
z=4lNiX!(*luUCyUs}ZPMM3$zk17T;6J(ys6;No5BeksB+k9`MtOC}6Hcst?aZzqYt
zk6QX~9F0QXu4R!O?WV2-7^(8U4qViqwlx;pJB(Ar{D)3RD+A&sEpN?pwx_Vr6mhLW
znfR#M1Z^Gr%wCiG4RMz7hZm*v#yG_v$UH*cEF9*W;vh*f_6v&L|9Ce|?Y+4tFYdT}
zJ#iGEo+gcL;S5&PPoQv{u)s!>L@|RPGtvSZx)NRo*BL+h`|bztR8|qn6r#Io_}RhE
zwkz%AtGNLRTi4dnkjKMflb(d83&V=zMu#Y!FY^ZWio!cm+{{9{!P~C+i;ZV5rFX_~IMV1W=_XEqW|d7>Y{wodi_UzDZQ(_|PiK*+`u1hb?=HRffaJN;z+fLH
zp&0xrL=)cyc@0ikV<^re_XGbay4qIpQL85c~p!j
z?}(P~LMWgET^_vSnb7n;ndc(@ce)9J)rvJI7DzYBlAkI>R!W7=htJ0SP?>Cjg@S~HQx94pF%$M^GCq^~^Ix#VnOx1;`S2ihBOl~cqw!+y
zyn@8VOGnR~7mrqdf0h1fxY>7(ZryG{GpZbwFDA8g$Nn0&+&MHE8r!qHzck5YA^pwd
z%VFO{>mATZV!a1}jb2FUN;w2#H?W*$Q;NJ)u;#c|mafF)rF2Q>GYY;fyKuPh4Ux;OQ_`rge?~&+)^f
zSz(MfaJ6TRHY$SNI?@6l=lKhRb#d&IQ@@v2J2ne_FH70?)n9bd4cs;~o?_|&4VEhS
zMmE(J@}-oKbXpX{>jsY6Law)yL(MIJ$xGJMomBeA@k)j7?W$$L(
zarO94ftnXzTt7EQToV<#^w69CMeS}Q^}m1WD~@;iMAQW4q0PPL{V>t*gz#EkRqX?O+>%V>GY?DPw9?M1lit&
zI6`8r$2C#`Tq(>#-8EmsdfN&$`wg>(Uj@B-UvRsBfXr{qZI*Iw4D;KC=nQsW8~G3w
z11M(jPGtPVK%9K>XtQ3q!z%f=kU`_P)$)wW
zLEOg8N{~qOPf!4x0Nb&}1ik7)axoBrF~%{Lc$(aVgN9?*8iER+=sEhTy;?NEp>S#)W7)1kBEH>KTflkrIThU7h%6^p=A@mtP_Dl4CF
zs3=@R3Xw>?WnQt4tg-IE9ygk&GmYE_(rm^=*@*r_Q)=Ifo)}xFpHyQ%BG=#OyWa<1
z;&rI;1MM2UjAr!O=RgoU3ao;(g
zjZXJ@z(rr{>|{u^jQDI-cs(#_Zu`;CckSwzufE4j5=Ha>F#U#CH_-TR3=mLV#;qLa
zpRGbMZ8LI>pEW`RZWG0-gLg5C8#M+>?(&NY7tSfH*)K_6SJ^`nt;zNa0qp}!(n+ny
z0}sLB3fPr_Qhmv>`t*sRQ+xYWB{MHcPu_9dxzCL1!xJM3ST+a;K#~ay$QYq`54)2U
zrG3Hf0kHeF$J*})IJVoMNevg*dn?wZ=Wt!ZxCD5$dNf*Wtzu<%J)h6S=k(kb7|Es}
zOwP3V$+lokLoyTKbRHrI%n$Pd?!#vacEXdv!R9q*zm7T@!us>0mR@PM567Ba617hO
z>+@t)$kdHTAZ}SHqpoO&U;ogd220u!epdv94^p@`5Y<#+BDMH~-gXxSHdqhK`M=jP
zWjQKUDEcOV{oDLMsY8?HJMeTHaf?%487E*;;AWsP?M7-_6D40XvO5OiJ|9m`A=rBg
z{F2+OomGn-3meK3duJ>e=N8n34=7?Ak*eTd0Oh!f*H
zzT68@Su5*8IVFE5bEL@Yt$WVmqj;51tgO)|o*=KEW!^4f9OVYcL*^bF1qli&08owk
zFtUFWxDybIrZX+fQsH$Nx@2|5hzE=TLj1XRnC(y$FZwf*kt?@O^nt{;Xemh0pB8?L
zS^Zr~z(O+c|2-%Nj_z%O#x!`IXS93)KXru~ZzJW&hK=n3(C}x!(!I^2B;l4KM~Hw0=yx9p=6wblXV_
zUW*Kus}Dqdv1avmD?p_W+zzSAU@i*K6&P^WUrYj~a+iamMXx}*nNMIlKW`;t=52pJ
zvyPT4Q!J=#O0hbzr}4cqsh~FSC0nvOXI5GmbE2!-_G|*!rxIM9Kse((vdz(F&hdG^
zrtO_5zkY4AEkxR;vu(lH=-OI<;Tj!nxmOjL@^x!$k}^-8-3V8Y_o}r=;QcV%<9+oV
zLF}D}-^Lo$jrClFKfT3zEiU{M{|#g??vDAmQ5T$(xT`o}y3mzWO1lfiecoKQPgMC`
z-*D>jA&RZTo&E+UKDn!hO1)v!;JK~XRs;kvjTr=ZqLWZed%jGgZT_BV^lycQIu6-b
zT*&kH@?1s_;V;rPt(Rtrtl$lLlV+ov-9b3pL~$qe(B!;AH*fbTPEXmGKHcD!sEa?{
z@1Lz1a5%|?8r6Qd#rZFTr{5YfpeiF{v{asq<~DS33>FDGOWsfJG8~%Dl%TGaf*j_7
zezGSSj6pS-m)Gn@)(1o@sGWV@_uAG&=!M6$*6N&+;n=2S@iFZqY;`bnB+dD69O{D`
z_ulHn&(^{_;YYjQ2EH_=433%TknT(_cMMA|bri*mn4fK1k+Emt9NHRJp)^1NRL8&*
z3*D_8@s28%LoUW@W(rH0B{f;PwkMVd#zpKi4q?Y9ZTN5BU>iV^Zu*QP35rA`QctH6
zO(vORfGg12BI~iMR=*D=A^2T~u|)c1i4z}Vb)UR0-+Cp++yy+PA}En5CZMW!OOmwA
z$bOT*4`Sl8zRnz1o|*T)W;r2f)2GsP&ERGKbN26H!qUs#Fpw{{U&Adzt!FX#P|bf~
zW?6N+iW^?k;6QzfIX$C1MEtEg50K?O(Lk`@-}v)xtcTA+t))N?&%^qlQ@PT3(Dc&H
zyvXAy`TzJc2cV3@f`T09E;2X(pnb=ktDmr*^km&s2q3!MKF&APr|NSf*G6)0D>kq~
z$BeCD%!^493=S*mmER2UL3pKcDKFYB3kJ_e$LpF#W;TNfTruH@nEU8w>3qr
z(i$0?gpq+A#8AEgyOk7jfu6C;Pf|Dr`n9zMy;RGYFmOCj*pB=(fiT(KT-iD-x73=_A6R(!+%+az1~2m|9^%*X`OIb9
zQ{01uS3u5)VhUjj7()z+D$;UAq5Bu%_x_q=ca3j;f7(#5+!W;+!F>A4Ij!=;hG&^~
z)+uQ4C_f-fx2RY(sR!&9yTSa+Ef<^UnzvWCZ95$11c^)2XAC)K{L{#IH&`wm4b>2k
zvALi|Xx@cAtcCPhFz4+`H7WT$G+q2r-DCT8hQNu-$1XeKrH|-ohD-m-f
zMDbmqY5U`KBrnZCWGY+ZmP(&wR#xiN_hW_+ta$nmaYDV~7DW=?3QUf(3CMa~W=a**
z2J4|&J?mKyki_W2Nyb}It<0pYjjBSL+0dU@XR=ZJrW(^bk7IAj5%FNy>X|r)?JAis
zCHLk|_KO1g#0rRt|I#P2&|>i@I0WyU$dA8I-w=8-J?<#urzx35m{-=AFTC_S?HA@U
z{0Qj4As2i)lQ;<2uuZ#_D`XbY5LW!dSgUvwzJ!c8i8R~Ohjd*oBJSX~)xo*pA}y!Q
zn@{Q!lGENy7MB&G+WnWlTzHpG8REz>W`6ry(iy^UqG;UJjI|w#aZAIe&^~8EsNr9b
zTD#{t_};HXj1X*(bCQ_?QXDfX;z+ah`^oPFjF3}1B_ULFa8&8Et2m-5uUgzljM&li
z&wZ&G&jrpXY&{?-peRYC^7mxnxqyxo4rKJ7xcjm6s&XOw_l0I#$5<=Gb_DeVCtiDt
zxnAOj`l((XGh(AS&y<90w4`hBCz{E-$+tV{@?LM%+Odi8bq&pu}5MRHhkjiT^^*&E;JM{V!O@Y4XgL9(2U#B!>pB%}Cscp*DQ=C!v|
z&5t-@cp2Mfe%R@EZco=Vhb0sqcz?K}F2UB{vovDp
zWVC&hXU=`C8lK{u&lv;H7@2=Zn@p-xOrSSZ1yC}AN2AaDo+vN-;2!mmC}FbK|CldM
z?rLM~u`RvCR90B1EA2>klXe(lmntosx)@X@bZx``5iCn!ViEy6
z9U*v&jIzgXgQT6M5lVU&$V}-Yh5o~<`czcTv`mQ1)Rrb4RrDNQ-1QjVc7WfV*M(;~
zvHc5s(He3YWNSfejO5|c-P<^sW?%x(g+Nh2(r=rSE>iMW5)OSA(+;XQi{Jf`)MLLf
z2TG;;*N2gdlEAY&f}+dMD9mjz77w$p;%Lt3)Ic#3Js3A@2lkKJVeoE%1S+lzpY=x{
z0xr)NGZMo8FD$qV(|^5U_5%&X2!C5NA0S!A9tc&l@<0aXN`LM$=5di;DS(`7IKK{_yj!hAP{?!+&M+7Qhgiy-p-YGj|yf>dNb!aC^pDGd*)P
zP3v2l(O#@^r%EV*w0%{C&JEE1QQrWs*7-h&(jAMe*Jm0EexoG@eQ4U{_GQa?AME=*
zeVG%BDz8T&3`G-*B2oQVB9$5lsV{7GW1^iTAZ=GaKTuEGu->z@H${iZyGgm*_XP<#BA_-i&K=
zJ+&XnpuMptFOCbc-nDRrZTDLK{WSiqv@ktx*}{ukda6Qkv$o0>c}Bnf(IiLU=xre3-gGd
zAu53OuO4Vl1Ogcf-#$yu9;oQMKlpvtK4QDSwvqi~=~C&TvBeXf`yXC&Zq9=KB`qDr
z0*(f!87MuttwTu=ZoX-NEyRY5eZI@$*EqPFcX>!pOuRww(eDhb!vkDN%9uTaotYG~
z;2k-KiQp8WWRnjKo5T;9x_gZiz2Rnc^o^?PQ7>c>E^Qg^Y)gxC*bG1*M^*yvpCh3k
zH=#e5oT<>D^9CHq4!xp@cb?%0?&tRXZrRH(l+W^f32U8iK75R-i!?yaz1lH)Oy!};
z2PWULb(9u;ANr{5AQBacC0TpMF4L=bZ5A%MsfNl6kG%xhf)qbRk;a?ycjNWv@NKst
z6*Ar~yHkL{*W4U@GFbD~^G|h+Y2`vnl)y!oOsOCyYmQWFcj-rPENE|F;^BXFCqTR_
z17;*mmiC)@Q%=3fy{|NB^P4g#ShOLWk
zJmO#5TtAe40q)nDe82ev$~Wh7#kWGPMb^kxk8EX|~e
zG=uY$pN+1N3>d8QzwEV1?31*_3g7lSZ6dhH{mF&B3dR_`n=2c8gD~BdcJ)o#i$&$i
zUa{oY@r=I}S=_z`9KfhALXm%A)cCc%c2GeNNGwBh1Y+h-M3FYvyOI25@@Ixr$KN>1
z*G+b{I`C>Y-}y1b!tSIxKgQd_)JF4xqUM2Wo(dRrZy2`_Y??58`uOcX9#vwz-zLIy
zlKOP6FWqyIGjT}#^P2Pb0!8p=xz^u2ykk&H#myLN{gY2iS38Lcqo@X{85CD%Da6y-@M`_qM~g13w;3$B#1E;#1|eqb~mUf)JL01DgHDYK8dBZ6H2J>@V_&-Sp}f?RH@Yfrz0uXV!nuS
z=Njf8*VqgQ-&lTP`pP{MbAE2MaLsd;?z&*b-Y6epk$KhU*A4
zrC7vVuXTUzb#D`%D^_!1`~i$KeD;b#Z|qT%J4`?`)C;*75y;EI87xMb#3LO0Z%7GqE4m2oZ)(%AJNJG;PG+19Y)i($7eoH^QNnCt>mFr2nNao8E
zS0lr&GlA1}_G1~F{cwoso8L1MgNrIQ9`!$`CmU~HCjE%Ga<82EoJ`wXn9*ewyW0P2
zz{3AIXyO0%lZiAy<11lY12|;*voRoA&@s_6O94J1!wa$mh1)FPB7jEIb4>eH%wjM4
z?7uL48GwY#mjbi*zYV$2JbveU&f|BV^WWWH{b6~}%j^BzuIqVSb$kBNwK-FGszrI5nQeq=)~HhGE!*}U
zOPB9z{N}oQY0^*nCHr|e%vmy@E((OR34WJkVxYOYNx#CfFx*^o<{PKp$cRw>>H|ki
zQq`-zY&piBHD!OEI0xg>4W+1pyPD&L=SJ(ek%TGLH8^m+^(7t0*Muhf1`gI-R}Vlr
z$Lgq^E%g5KZsH=VifkhclgRvzAlVc|p=vraH;N;Q%;zDAzOox*LF1z*oGuT}oNcsS
zvg{&IGo;m%Q9WWE1+X<7kFji6@;ojvn$MwK|Bqy3YsyCIoBF%4kI9*L{4Ot>LJmkF
z2u!gm|3uM`-YdU7t0klL{o&2>ni);57hcn8nNF7S7Oy2}o#O&0)ua#8Eg0ZW?SV}}
z)n|9R*er-W3M#45VqKeT+N;BsXd7NSU)^`h#~KOs%gOF~uVp5N-?*y!fn(X681phL
zrUOT^w27B2-|A5ujW42}V=k1qOF1ik@B%G(Z?L)zrk2`D6Yf8VBLR5-u!4=1O_BM!qJWzsX~
zfsAL($1rV3$r?x;@0H<=K@95VxSTN^lRrbT!c|?UZcDG)5U}2Y4K?$hI@}P@!MfK^
z{Tz6(xXoX3=6s;^Nu@XIHm|2FtRc=Fh9(ekAYB2(P*7GrZDM%pZfFBKAA32cPaP0$Ci^+utuht&9zH{^7ZJZcJ?e+*L;`p~M
zUw@7>_|kXRN9XLBPU%Y@d+MfHvQF!Lq9>+1K4#r^rw~ZY!g(ZBF(i53#m!M`+gH9#
z&w=|NSeUh+uX{jb`{90!0J8&ZG$1)~>o-Sm+jqx^T69r3o!
zA2Q<$TC>*^0Uf72zQn?A
zSX>zCpbv
z6}Plea!Olg4wYer!@VTG+8^)nxWZN~x!X+_{k;qQki*DEO^%VK
zgp7W=PkBn(Q%f$+q3CwjrGdWBCLG&}APWBtWh!I`2DF0Qb36#0z*ln6^M18y(Ztu0
z+t;xZNUE9`(Q>YXst4U#>ov%M5NoK3lbJg&T_}5K+2rW*du`{YzRPa4a->_dZ~!-R
zWQULcL!=}cPCiYU*wjF4l{tC~w~Ml+pJ97He8{h5`l3L?@ozdNh?#>T^LQ9^J1rJp
z(;fYbyxA79vRTvUTvu5@6dj(j6!~f{T`u7W<2`*Y{;sHN0RJg$;7d607JG%?x`_uP
zaPfk@%Ody9*Hb}3RZ6|lU{vXH}q4nH~`>d8jifuwN*S~+hAa1nj_3oPKx2yY|)
zil-XKl_5mJd@RTXak>x(HCiyRXe}#LRR0oUA^N9Yk}2ITp?p<^m3oR=LZUfPZOCdJ
zV2rLihr!S1nf1Z3I@EH5i3W*S2Q$@_adB
zx4o1VDQ}K;jjYcMnhLcF3AWg-t+@iK275A9E^xkJlE;Kv^vbb%if&qcRZ+0y#$J9w
z)qWWNChMu1;2Yw`W!tguEoF=&GLI#l9nMPTzRigr^Q^nWcr)PNty`pr-O@Z0va
z7p=vO{7jz>D~Nw|`&nVhwrWcZ3V#z)1)_x5i5Q1UMq@c$V_wQ%6Q1G_;!ww?dC6~w
zEB5m&!AO(!JTqyo01yX!!C{}LD;2aLtJaNAjtV}iCCe})UA+{0o=(YaX~Y~|jN|;h
z@2*V_s<`%B7_x2B{6UlAYpPm1Ib729!A*ZR?}C}=QQf4YRMn&+GHW+Rit0X>=T8tz
z@sk`oOJbB|XQ#jMRZ}?1Tge$!jkpfbOgZo?pF=%l^u>Q*6?>t~91R~z8nER7*C4MH
z^ao?_pcTbD2nigSHoFe8F8m@Q7NBHT(dY5DvngrzWSth(F9bRj89I@r*<*Nsu8dM=dsX<(ZbU6!+6oUJtZeRk|gkD(fG^HueH
z_iJxQDC#8?-)>E7_5*|EIXwMgMw$wo2n
zk*}Q0L1T8h7YRjVC*jUeOXsZ8^sPq^^>%cG*VXfMjQ(aBw#ZS95RAK4!uAhL10r3H
zG?>fFCR!1=Fz{=~8=o)A9IB=nw6oWJcy#usXh35}lL3)EbVKgZx(1+1hZ0Nl*GtHf
zzQSEP*81%Q2dXNH4tNjO=6y}AbnTQ}Oy}rPeb@VzllMz>l<_Dyp2dG)Ur<2jGGGka
zR+HSa`4{QBHSp?3V=*HI{7hpYW{ZE=ER&Oyz?6Z!9bIQ^xMCF=U))YVn~37K*YWQa
zfn{`=^!!evDk&5B=1>z0l(>c;#&XZ+hnp@J8K-1Sb&NdoyVX#ir4)3&!E7t(;LFEX
zrQLyw!F3>9SFq)!hoBtu0>c2CU{oTCWDV^a;bZYTeUlE`jSm%r
zI??C{WB^s`HImKSfIc8vp5PxN>Fk3!GOX#*RQ17b)^BuHLg8b)-zk4>_9r1Fv-ckE
zejVhXAZg~S0k`qhV;w}NrA+r4Qdjk1^#Eh%!xxY;nRHzTq8KqA1P@y%6~8|i9wWm?
zvfF%E%XjZ)bE5|R3z0X_c%}TM9d}3dJNxi`d5zhbbGOgoO^04L_I_1p~Q}&Imq|g@B1gpeczT!JhmLHA~erF
zQ#07eIn*lEXYB4-abRo%c)OBmDmbD$$(nxjm7(aY=AbfjaOiEplG)S4j=p(a=iV#_
z=0Er$PO7Env;g7`sG;#mbz@*Dg`K+47JAyEW9V6N+?6l1GQB20ZTPIEz?0u#7s_7}b(g*R@92|1O$yQ$y0H#=-w-U)G&{T?g`}
zik=rNQ8Pm4KVwFn
zaeS`lHVeI1A5P6orJpM_)+<{+p04vnKH#f}`<@L6s4&kgsG*`~5!5rU&<3B#Hl03J
z7B#7Qx3Bj4hLE+g*#`W@b!r~$%T7HW!nSD=07I%k|JrEpfWn9k2TB4lsylN73n->O
zUzwMh<-#DsdAvvyFm9L~Cj9AUwfihtX2JGb@8vD`yEYjOSD#x6ImjES?a71VzmE2%
zCe9B$+;eDw5(;<>&0l)Cip5-Ritnfw74Cdn+t{zTW-f0jdBbJ+)63ISQPQj?hyrlk
z9Pzsbd{WF#12{wdGqwbpi^Fg2r*;AjEJ+0OK?EuXNxFoX)NjGnY!bOZuEn>fv6ftLi(3m*;9!$TPn_htQ8mJyx{H+
zm=-ykn$hajvUk8*@KjfoaxbS#XM(*;JW?5IwZsU7t<9$q9Tu$bbx>?mmAgWJdw;Sc
zOT_);B7I9>=$rH}eL_6DF710$c@T0dn95ivXxZdhMeZ{`5XO&85SwJI&8`Hn7+CuT
zt9wZo6-EiGE0~p;EmIzMG{a#CCFW>8FKM71H|an?KA=n7_~aM=dQD$%G~_F=1XjVT
z#yxqSAhY;_`4pPVfVOVRfPpWT;S5AU6|g_+Da9m*t^zi?KbaR%!~QY8>r?OB^7|__
z?T2xv^lsd4NzLRsTA-y`k@7OH0Xw-#RS@e&C7k>fbBD=3PTlW7N{a)ndWApQ=2vE{
zrshrIQ-1Ou+}}cv-*mXuYA=3AcL_lONzcJQLas;r0Q2XpuQ)rOtx_fzxQ9xA
z7Sd=kJAc^_e~F;~nGLb`fbk0Fl@^Pr=7=q8av|TK5Jm@VKk8+b`rP@WZFSr%<2vUN
zoNE5ncYm;pI+SYa4=_6g6o3@w(c6^EUXxIQrY!B4m=t;`dU#UpYRUs1l@N=VBfKp_
zCbzmQI9AQ+0)Lq11wfmbZv?p=^kXlShGzCL}z*xhD9egK(LLNJGedaf$
zs2q?F9BD;~`jyKt$J(7z9K;09nxtFZ<-Nx?ZGvB__U&Ok-m?#fg@X;P7OEocis^fB
zc15_xj}3zh^;vtf8^g|C8l9tLarINFiAO)|9{hCwBFFawms}&smkIGP#=K#y4%8g-
ze&V_z^3@yv;9~(j0bdj|<<6T6Rp&Oon+wd%g#jE^=HD=uF*?X`p&h7!3)p^8A34zT
zv28dSXFnJ!WjK_BFF6NLD1R2_sIe|77r{%c7&9RSe{&<_6){nMbqEYY~l>zHjQqoI9>p?$=Z3YF`XlS#eDx
z*Ds)=aGv=1`|+LdM138nC-mT7&ele&aiC`c+J#?gi&*>CO|@kKCO?>jIvk{D9p
zYJx1;BmNfCSsug~F=z1y4W(#jnIkfC)F&irzjFLi^ta(ruZ1H$+LJBshQob-
zt9Z`T=@yj?KN5B539PFzyXB$w66Rfv7CM%!-@tIMkH1GTCfs2A;;pLyc!D?})H@N3gq|;WM=lwh3Y;;eOOeR@pzMtle
zIXL>T+$UhL*Uv-X90|Gc>+F+2g*zuM<$8DsSa7`jYnAmcihZ>}urCuuGRN=g&fH^m
z5y6=MtkWRJYB_}XTjnBnjj@S17w~^;(O`xNa}dlU+R`Rs+D2V9sg)6dKAt7lq^#^}
z>mr^j=E_@sU=v4W+NfWN7~7JdInupHaJSPY5nYgYO~m0EAZ;Ng5cz^9wfSzVm#;{f
znMv$Z1BFx1_Fw0c)p1u~-2u{rpkt6904g;5R|@t{dNT|d&Co4KkU8uMBgGa!DEBQQ
zE61Wuz20>yc0@}%&S$M2SwwuM)lE&m+{U)xsmdJ0h&h`H9fDXQwproo?DTT};mWw_
zf$NFK>V7+aap5uB$axa;;l`T}$J_D)_cjS>RCVlndEvO>tjq}d40iVVZlJlR$S2Wm
zsl^vJRgKP-BHT?7MbH7JVwf{fQtF2Y{;7VZ6{+)6mYT(PQU_|>e#RLafwNWJyT6
z8g4>)9u?<=R7I^D_^ezOSJNxXnK<^c+(3*B><3eceyTA7
z4qx_?nz5GFzNXD})8evVN7*mU)#K8h$}nM6Z!!GA
z8P_hkh@9kK314hA`p$ZPc*$N@2?`LH1xGscuUb)6_nF{z_CzEL(0QpVbJ%2k`!vXB
zzT8YLIpL73rDDVW6VZOM{#z4uiw|-=9-zkJ*B5Bl(L#*(hkiiww)mOpYs-OCy^;Q*
zUK%x7gtf2~QQ5AJXXEHcGe#<`tK2GB!jY>nQbO)-TLwYfSaV>Vg>6AqZ?=CvLx}Ild0rl5ed2X$pFqu4
zO~!GHp-U0f0@@arrXQFfy2gkuVB%s)PG)v%34k8E>pl~N!nthR)(t!_ZU)E?Y=)!0
zB0$R^W$jdR5H@UG2CV2XU8Nkh7Fd5`kaqvrRRM=I~7
zH!AB5238MeQpmZIj7*Ra&R|Axd_joVPh%}}Aze&AAb0F335m{Am#m5~IX<^o~Nk7ikFB_W~wWz0M?wsgviDH$BQ@F`0-
zbuwI2;#u^ya9dTPZO+%+G4dvA6i*jQZb6bBGQ0AnzTZsy+CcUtGAmWO-~(C{uaz0g
zz6);aK*l_3TX)wH6bxXHl5yIfCSI$>*vHMdY{ZAL2Le^N#lakAmxdLvi;f0H+G^Cl
z?z7RaKwYqdw%!A|bMJOFc9nw?$9M}|1p8z}tH`09$y3SBM(5@xoyyCrr+d`rC9R`m
z+vxAZZxqhr9uG_oEZFDK&SD0cI@Hd>3F~4nrtHU<^}%btzq5F(ZkONCW>bGPafr7?
z2WH8VBO!|@0jrJwqD;GjC;q1CP}3;sB#ws~hI~H0T8V~g1T|~3!BZrJQkHY1w$9UVEW?jXibd~u*7>2{2A|0=Sz}Y
z9ySM8B>Ko(l&i3DRlJouzXh{EWnMb9dqwCfC<<_u9AG*$?*b-F+;r`sU0a}tI)ys-
ztktnrefBH-V|aq`Hp`AF^q;j5L5O*TM>I`H&sTH5EzU
zgo84Shd$=L6pprlfma+{vS4t@{y)-aJ9IZpJF><;mZrrVd;vCgMI5i8Zr>VRZQIq@
zSKD^xZmy+dE
zJJI{;5r;%i3lJrn-~12lAAJhk_5awO-N*kA)3HrARGrqDfZ2}@A~|&Cl$zseNuz}#
zogIP#3hC1^(yjUe`nz|g?gDtx1h9urCjsfEK*k4SF3b->Zyc$_e`7;4=s0nyxxTR`
zYtkw(xXFx1!2QPgJI3aI`I+Dal)~szc_@+8(0AyI)Qo$&b&18tE;ZE07H7VB-e#7R
zJoD^9`sqiDpo`MR{YPa-=nCjn{(g
zyvfoV5#6%v@30o|ojSXkl50F?!5;n+-lauV(WmE!c0x2Bh$z1H2gl@M1rwBDFt2v&
zx7qDReS;HEk&l=Ud9GpZz)}n_p-H=g<%L|q~A0E
zUiS3SuYv4?LxFAWhd&sW==(W_$v
z&)w@=9c-^!1iD8lU`j|uC{dOdtSgKaJ^o<;0+cHr%>4!m=
zfmgx6u8szR*|t~Ae1rvpae<%^$U%`VLeY3wv#<+VnmVBI^O44mRPvg}X(=yF;-plj
z^^nDfjZ7DptI+}_9Pi`{mVMfqm4?th^Qnj~SE<|?FUg&}&y3to8F|Ip?>_1mY)vW}
zpFEiSDjp;&qr%z;dHL_^Yk_f)s!ze*DP161BW%Sy56wf22yet*;3sS((C%8CZdL^t
zR7nN2!uDeh-~?K`M?nr)3Lef>E=Dsd$Pq2#ou|z^(s7X3|j;p#2skz#tStoQ5y+rQ;a7~
zc4es5w+E-13L^u4ug^TlYb59L4svX3fyiL-A7$CraFhNNPcouYYf@@Xs|(_ooD!o#
zrHyOay|PL6BY!&8-R^4t)OX~5hwO^|CLXK
z8}~F@a54x87e5^lw^O~M7tY4d#o!_j&_yFCzJuE<&Mi#D7~SR_QAuT1{EkL4f3#)3
z`IA#SS6!owDUA2H+k9|}CshtOiM}W`RV2PKYso*Zv4H$oKWiDjjqr>3zN)h8HQxl>jaDpTj{1^eY|n>(H?x;@hq&N^{I9tPS}J5trrHT#&)Ykrf27h|AF9XT4=w}0Czjy$0FW{#u`pzNZnGz(8
ze8V(;uPmoK5%=%Z#L9`&MWYNPnJ8!utbJ|;8v_nt<;iY9N0W>N(};u3NVE=E~&)d+iyH1&()-oO|yQ162(Za&Fkmt4X2zV;9&K0WPe9s2?Nh%;P6Z7uE{|=!l#5
zZoXWZ^sURdN$I?je|0YWrk9IIuF{H9WMQCzi89+`n9)D5|E!I>Mr~unLAT3!C@99O
zW7x!I)vAxpgZ{Vz->weICXw9Z%N{F}+cxDpVBbaQv4D!-Kw$QHVJJ#tI@$Mb{k)fYV!bj%X2sET*e(PXDZwqjWQZeQtL7{Kx3$$G<~WPG65li{W-E7
zUI9*eu0CsNl5izlP_LuR%wBIdho_6HM*_wSNB-y2xd%FJ)ULr8IImEUDu*6BL}Bo#
znt^!88=F>zK0}Jk?9YlR_Ntq-&Ylvn4R?RAlvxyHACU9rIVH7&2+I{R`SL&cn80Nz
ze>9}>ADC?N=H9#(Guo%y_`jdduHlF-G;6d0BfD9mm#~Sb?MQncKI#-)Tcc#&(0HV=
z?B+*{G1jcRu!GuRFX03Xt^qz#%(U*EcVHyFKmhYSRC^@gI8`HvYotSX%JE8aU43BW
z?E>zgRAKWm-_c=9Mdw0+nwTE7t&`u#Lh%9IoWJ#eBO3y|T<3uDB(!c&0y4yvOa`bF
zTLIa5v9I|6G%gFa`_!l#KoHVzFW-IKfTHZ#zShB^+x1I5r*d9??B!u
zc=&<&Mf>*s3|I3qwB%rgzi#Q3dQUA+w)9U{e*eG%dn@)2e?8-gRGBt0W-{JDx3Ms0
zABtxOo`^9lxZA;Nucy*-4K;g2$sp{(Mqt(*fg~Q8{qP0cdIKq~4O!#gu>yvWxGD3l
z`)p(PYfA6E4!gBrTPtX~59)6C7`ynjRcPVsg9ZqvdG`$SGUf+>-ObZ*KJdg0D!#Mg
z&UUFaX+_fZ#BFsA%Wp#?LD;C98L&q8*YDrcTTKvC#h_z`Pt?yB$7=$GBvisDp{a?p
z!<-tmJP|o1b-cR)&K*RBM}blAk#havW9u5^O(0OOMgmEZFqU?wJczoZ5qc@vB5qXA
z`h^F(kYeWZ9CWhsv$BQT2Idm|kfaEs9Mfsc0z$ASO$A6qXLK+2%Ov`SpLA#U(-rVn
zy1b$A{J~3pEhaYzmr8cz%gk01v~o>k6)?y{=`RLaaWr8=@HW0(R{M&adksRtO_T_7
z9l^oOY<0!bl%bxozb@gGI_>y9zvw8V|3V>ihjAM-f~@mzMNOPG($h);BkOlQNBWI2
z^I}VF)Yn+-&;|z*!>=2Mx;AIhL+G~TQed&1L)c8f`curv_Lb24XQ6&usXOfh)gk*s
zHoF1s>Raj&L!J6bE~Ri8Q;r{Q_hGat%6rDkW5|hu
zx%h-;Rqyfx*>z9Ek2N~N#qK|CNe&C(R18ILxj&I?48cC9yZn1_pL^!2M14+{;j!`;
z2+MB0pSev2(R3=|r=t`8hQ-#AJ}ItV>D(t#fv9iA#1piubQI%5(>>33)SgiuDKFlW
zX33r(ohEntIKGPiv@D3)(pQe!_l_G0bQ=BfsTfwqGo**J%|z?4Pk{h6yZz|$$!nKP
zp9xKl9{2Tp;mCFImAmqFsb_V^d9q-LKF=83qTdj5Z4Z>GX64XjurHCvz2Lw|-8pz#
z==ALuquJp~C+~#%Tq^J#N$VD&mv}jB7I}x<&-FgI{`1lG;#WsMT!)W|FkVq@x^SFG
zf5b$)!%5`={WwFQ&{Q;2)W>Qg3kJ154a!I>*hMLF$c91-hkS(4m6ryzUrcvk~18
zO0W;vi;qLOjud`>)p(IY^ip3=e{T0t<$Y&@p@7=n?mw_N4MsY&{5TAhTI?Wl(5)dw
zL{0X3JK2WfN}d^rF_v2ypx7>sdq0tnjK3I9b(P@kT92gm%*IC_jmFTaa^U+g)R{R1
zYz5EXWIk`;k60Z&T3!@asM|N>@}+Po(N9pwrsnwknSCiOe4Wp-WDy^k3XDt)dl;(K
z_y`n>hu7HfQ!{J_b)9*9J{z}*`(+bD1_U{m8{@2Cfn&w@nHrq3b0ky94qu6FMWq!8
z8Y^4TJTaq}``rl1jru?)Dt;ug#>pkN#h@>1SCL1}?a<@ul(>V2=+}I&rl2%K8mX_7>#JvBS;Bq1dKEdbzHvTaU!NIw*F%28v2k=gZ1dmqqxbCJ>Y)o%57v9J7(=L4
z3kkflBqD>;7{^>4Ho(sQ<#~9Du^O{y#5qj!o3AT?obH#utyOF~=aKIdCYL{ZBcbr8
zhhtv~jP^co1=W|f4mRbpT^RY8qg1e27GFld;Yib478=FsCCBcarTjK?47VNn(xS%?
z{s9KibSET*18Rl4Mo<#81KtXNh6Fee@#`aGtpzU0#Zy9cq%5eIW8L4mo
z67lti#CF}aNtT91jgGt8`y;pXM71<=`S{6d!nb7d041J0&O$N62|V$kspFe{}!t>2tX7r%K{N?fY@Bz(zb8sPiMY#Qxgj
zUem=hxVxG%n4AS*C@8gHLlXhZ{KK@93(#B{q6XhSkOm0@)oVn2*Ux$5zmy=
zLkV*Jr6y6|dIP3FeyojNU~(k_?w1%PjPa@>*e2mtSP=HWicO&cg?#L%)bk48rNlg8
z%}Y(M<8Ld7a)|f5Gx1zaL_8i
zJhWmzS0a&vv7v*!+^+9>yZyYq*NwC*6%?K#0SUrn*Lj#_?YIwr_5ks@99r1~LPM
z1z5XP=QHEoS2+=-xN3)Po5^ZQGfAs`6W9v>sF~#@b;qzjJozViyWws0(y{(y@1hl+
zUN=2zdQnikri{hFUFagmwtuOYQ%Gac?ClqyKe>#13-tI0^2Qui*Df(PJJ^0$_OBs;
zIbG&Eh6No!PHS^H6QTJ4QV1DF&c66|-pc3jpD-8kg;xq8*bPIeOjJI^N|7#
zhvtSEL~%`ZKM9|e0b3|162>co@_(LquI^H0>=@v-aaTj%Q<-(|3bq$XRRFPuYLBWu
z6!zP;eycVu;g9*H8}0n+vVD5DBNKirU0%_^yuHZs7ap_m-#w0;q%pf2pzZg}|INc7
zf_IC9h60{|TFgw;U(D=)L&5(5XfZ*= |