diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0f8934c57..44157ceb9 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -40,4 +40,4 @@ jobs: uv pip install --system "numpy<2" ".[dev]" - name: Tests - run: nbdev_test --do_print --timing --n_workers 0 --flags polars + run: nbdev_test --do_print --timing --n_workers 0 --flags polars \ No newline at end of file diff --git a/action_files/test_models/src/evaluation.py b/action_files/test_models/src/evaluation.py index e93d0d9e9..cda6e059b 100644 --- a/action_files/test_models/src/evaluation.py +++ b/action_files/test_models/src/evaluation.py @@ -41,9 +41,12 @@ def evaluate(model: str, dataset: str, group: str): if __name__ == '__main__': groups = ['Monthly'] - models = ['AutoDilatedRNN', 'RNN', 'TCN', 'DeepAR', + models = ['AutoDilatedRNN', 'RNN', + 'TCN', + 'DeepAR', 'NHITS', 'TFT', 'AutoMLP', 'DLinear', 'VanillaTransformer', - 'BiTCN', 'TiDE', 'DeepNPTS', 'NBEATS', 'KAN'] + 'BiTCN', 'TiDE', 'DeepNPTS', 'NBEATS', 'KAN' + ] datasets = ['M3'] evaluation = [evaluate(model, dataset, group) for model, group in product(models, groups) for dataset in datasets] evaluation = [eval_ for eval_ in evaluation if eval_ is not None] diff --git a/action_files/test_models/src/models.py b/action_files/test_models/src/models.py index ec32b5a82..96a1a0a3d 100644 --- a/action_files/test_models/src/models.py +++ b/action_files/test_models/src/models.py @@ -61,21 +61,22 @@ def main(dataset: str = 'M3', group: str = 'Monthly') -> None: "random_seed": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), } config_drnn = {'input_size': tune.choice([2 * horizon]), - 'encoder_hidden_size': tune.choice([124]), + 'encoder_hidden_size': tune.choice([16]), "max_steps": 300, "val_check_steps": 100, - "random_seed": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),} + "random_seed": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), + "scaler_type": "minmax1"} models = [ AutoDilatedRNN(h=horizon, loss=MAE(), config=config_drnn, num_samples=2, cpus=1), - RNN(h=horizon, input_size=2 * horizon, encoder_hidden_size=50, max_steps=300), - TCN(h=horizon, input_size=2 * horizon, encoder_hidden_size=20, max_steps=300), + RNN(h=horizon, input_size=2 * horizon, encoder_hidden_size=64, max_steps=300), + TCN(h=horizon, input_size=2 * horizon, encoder_hidden_size=64, max_steps=300), NHITS(h=horizon, input_size=2 * horizon, dropout_prob_theta=0.5, loss=MAE(), max_steps=1000, val_check_steps=500), AutoMLP(h=horizon, loss=MAE(), config=config, num_samples=2, cpus=1), DLinear(h=horizon, input_size=2 * horizon, loss=MAE(), max_steps=2000, val_check_steps=500), TFT(h=horizon, input_size=2 * horizon, loss=SMAPE(), hidden_size=64, scaler_type='robust', windows_batch_size=512, max_steps=1500, val_check_steps=500), VanillaTransformer(h=horizon, input_size=2 * horizon, loss=MAE(), hidden_size=64, scaler_type='minmax1', windows_batch_size=512, max_steps=1500, val_check_steps=500), - DeepAR(h=horizon, input_size=2 * horizon, scaler_type='minmax1', max_steps=1000), + DeepAR(h=horizon, input_size=2 * horizon, scaler_type='minmax1', max_steps=500), BiTCN(h=horizon, input_size=2 * horizon, loss=MAE(), dropout=0.0, max_steps=1000, val_check_steps=500), TiDE(h=horizon, input_size=2 * horizon, loss=MAE(), max_steps=1000, val_check_steps=500), DeepNPTS(h=horizon, input_size=2 * horizon, loss=MAE(), max_steps=1000, val_check_steps=500), diff --git a/action_files/test_models/src/models2.py b/action_files/test_models/src/models2.py index b309003fb..fe1fbfb6e 100644 --- a/action_files/test_models/src/models2.py +++ b/action_files/test_models/src/models2.py @@ -2,35 +2,39 @@ import time import fire -import numpy as np +# import numpy as np import pandas as pd -import pytorch_lightning as pl -import torch +# import pytorch_lightning as pl +# import torch -import neuralforecast +# import neuralforecast from neuralforecast.core import NeuralForecast from neuralforecast.models.gru import GRU -from neuralforecast.models.rnn import RNN -from neuralforecast.models.tcn import TCN +# from neuralforecast.models.rnn import RNN +# from neuralforecast.models.tcn import TCN from neuralforecast.models.lstm import LSTM from neuralforecast.models.dilated_rnn import DilatedRNN -from neuralforecast.models.deepar import DeepAR -from neuralforecast.models.mlp import MLP -from neuralforecast.models.nhits import NHITS -from neuralforecast.models.nbeats import NBEATS +# from neuralforecast.models.deepar import DeepAR +# from neuralforecast.models.mlp import MLP +# from neuralforecast.models.nhits import NHITS +# from neuralforecast.models.nbeats import NBEATS from neuralforecast.models.nbeatsx import NBEATSx -from neuralforecast.models.tft import TFT -from neuralforecast.models.vanillatransformer import VanillaTransformer -from neuralforecast.models.informer import Informer -from neuralforecast.models.autoformer import Autoformer -from neuralforecast.models.patchtst import PatchTST +# from neuralforecast.models.tft import TFT +# from neuralforecast.models.vanillatransformer import VanillaTransformer +# from neuralforecast.models.informer import Informer +# from neuralforecast.models.autoformer import Autoformer +# from neuralforecast.models.patchtst import PatchTST from neuralforecast.auto import ( - AutoMLP, AutoNHITS, AutoNBEATS, AutoDilatedRNN, AutoTFT + # AutoMLP, + AutoNHITS, + AutoNBEATS, + # AutoDilatedRNN, + # AutoTFT ) -from neuralforecast.losses.pytorch import SMAPE, MAE +from neuralforecast.losses.pytorch import MAE from ray import tune from src.data import get_data @@ -49,32 +53,18 @@ def main(dataset: str = 'M3', group: str = 'Monthly') -> None: "scaler_type": "minmax1", "random_seed": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), } - config = { - "hidden_size": tune.choice([256, 512]), - "num_layers": tune.choice([2, 4]), - "input_size": tune.choice([2 * horizon]), - "max_steps": 1000, - "val_check_steps": 300, - "scaler_type": "minmax1", - "random_seed": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), - } - config_drnn = {'input_size': tune.choice([2 * horizon]), - 'encoder_hidden_size': tune.choice([124]), - "max_steps": 300, - "val_check_steps": 100, - "random_seed": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),} models = [ - LSTM(h=horizon, input_size=2 * horizon, encoder_hidden_size=50, max_steps=300), - DilatedRNN(h=horizon, input_size=2 * horizon, encoder_hidden_size=50, max_steps=300), - GRU(h=horizon, input_size=2 * horizon, encoder_hidden_size=50, max_steps=300), + LSTM(h=horizon, input_size=2 * horizon, encoder_hidden_size=64, max_steps=300), + DilatedRNN(h=horizon, input_size=2 * horizon, encoder_hidden_size=64, max_steps=300), + GRU(h=horizon, input_size=2 * horizon, encoder_hidden_size=64, max_steps=300), AutoNBEATS(h=horizon, loss=MAE(), config=config_nbeats, num_samples=2, cpus=1), AutoNHITS(h=horizon, loss=MAE(), config=config_nbeats, num_samples=2, cpus=1), NBEATSx(h=horizon, input_size=2 * horizon, loss=MAE(), max_steps=1000), - PatchTST(h=horizon, input_size=2 * horizon, patch_len=4, stride=4, loss=MAE(), scaler_type='minmax1', windows_batch_size=512, max_steps=1000, val_check_steps=500), + # PatchTST(h=horizon, input_size=2 * horizon, patch_len=4, stride=4, loss=MAE(), scaler_type='minmax1', windows_batch_size=512, max_steps=1000, val_check_steps=500), ] # Models - for model in models[:-1]: + for model in models: model_name = type(model).__name__ print(50*'-', model_name, 50*'-') start = time.time() diff --git a/action_files/test_models/src/multivariate_models.py b/action_files/test_models/src/multivariate_models.py index 1b1d9593b..8b1577a57 100644 --- a/action_files/test_models/src/multivariate_models.py +++ b/action_files/test_models/src/multivariate_models.py @@ -10,7 +10,7 @@ from neuralforecast.models.tsmixer import TSMixer from neuralforecast.models.tsmixerx import TSMixerx from neuralforecast.models.itransformer import iTransformer -# from neuralforecast.models.stemgnn import StemGNN +# # from neuralforecast.models.stemgnn import StemGNN from neuralforecast.models.mlpmultivariate import MLPMultivariate from neuralforecast.models.timemixer import TimeMixer @@ -26,13 +26,13 @@ def main(dataset: str = 'multivariate', group: str = 'ETTm2') -> None: train['ds'] = pd.to_datetime(train['ds']) models = [ - SOFTS(h=horizon, n_series=7, input_size=2 * horizon, loss=MAE(), dropout=0.0, max_steps=1000, val_check_steps=500), - TSMixer(h=horizon, n_series=7, input_size=2 * horizon, loss=MAE(), dropout=0.0, max_steps=1000, val_check_steps=500), - TSMixerx(h=horizon, n_series=7, input_size=2*horizon, loss=MAE(), dropout=0.0, max_steps=1000, val_check_steps=500), - iTransformer(h=horizon, n_series=7, input_size=2 * horizon, loss=MAE(), dropout=0.0, max_steps=1000, val_check_steps=500), - # StemGNN(h=horizon, n_series=7, input_size=2*horizon, loss=MAE(), dropout_rate=0.0, max_steps=1000, val_check_steps=500), - MLPMultivariate(h=horizon, n_series=7, input_size=2*horizon, loss=MAE(), max_steps=1000, val_check_steps=500), - TimeMixer(h=horizon, n_series=7, input_size=2*horizon, loss=MAE(), dropout=0.0, max_steps=1000, val_check_steps=500) + SOFTS(h=horizon, n_series=7, input_size=2 * horizon, loss=MAE(), dropout=0.0, max_steps=500, val_check_steps=100, windows_batch_size=64, inference_windows_batch_size=64), + TSMixer(h=horizon, n_series=7, input_size=2 * horizon, loss=MAE(), dropout=0.0, max_steps=1000, val_check_steps=100, windows_batch_size=64, inference_windows_batch_size=64), + TSMixerx(h=horizon, n_series=7, input_size=2*horizon, loss=MAE(), dropout=0.0, max_steps=1000, val_check_steps=100, windows_batch_size=64, inference_windows_batch_size=64), + iTransformer(h=horizon, n_series=7, input_size=2 * horizon, loss=MAE(), dropout=0.0, max_steps=500, val_check_steps=100, windows_batch_size=64, inference_windows_batch_size=64), + # StemGNN(h=horizon, n_series=7, input_size=2*horizon, loss=MAE(), dropout_rate=0.0, max_steps=1000, val_check_steps=500, windows_batch_size=64, inference_windows_batch_size=64), + MLPMultivariate(h=horizon, n_series=7, input_size=2*horizon, loss=MAE(), max_steps=1000, val_check_steps=100, windows_batch_size=64, inference_windows_batch_size=64), + TimeMixer(h=horizon, n_series=7, input_size=2*horizon, loss=MAE(), dropout=0.0, max_steps=500, val_check_steps=100, windows_batch_size=64, inference_windows_batch_size=64) ] # Models diff --git a/nbs/common.base_auto.ipynb b/nbs/common.base_auto.ipynb index e120c2f33..16db978b4 100644 --- a/nbs/common.base_auto.ipynb +++ b/nbs/common.base_auto.ipynb @@ -238,7 +238,11 @@ " self.callbacks = callbacks\n", "\n", " # Base Class attributes\n", - " self.SAMPLING_TYPE = cls_model.SAMPLING_TYPE\n", + " self.EXOGENOUS_FUTR = cls_model.EXOGENOUS_FUTR\n", + " self.EXOGENOUS_HIST = cls_model.EXOGENOUS_HIST\n", + " self.EXOGENOUS_STAT = cls_model.EXOGENOUS_STAT\n", + " self.MULTIVARIATE = cls_model.MULTIVARIATE \n", + " self.RECURRENT = cls_model.RECURRENT \n", "\n", " def __repr__(self):\n", " return type(self).__name__ if self.alias is None else self.alias\n", diff --git a/nbs/common.base_model.ipynb b/nbs/common.base_model.ipynb index 2ae169f8f..fae60e40c 100644 --- a/nbs/common.base_model.ipynb +++ b/nbs/common.base_model.ipynb @@ -36,19 +36,25 @@ "from contextlib import contextmanager\n", "from copy import deepcopy\n", "from dataclasses import dataclass\n", + "from typing import List, Dict, Union\n", "\n", "import fsspec\n", "import numpy as np\n", "import torch\n", "import torch.nn as nn\n", + "import torch.nn.functional as F\n", "import pytorch_lightning as pl\n", + "import neuralforecast.losses.pytorch as losses\n", + "\n", + "from neuralforecast.losses.pytorch import BasePointLoss, DistributionLoss\n", "from pytorch_lightning.callbacks.early_stopping import EarlyStopping\n", "from neuralforecast.tsdataset import (\n", " TimeSeriesDataModule,\n", " BaseTimeSeriesDataset,\n", " _DistributedTimeSeriesDataModule,\n", ")\n", - "from neuralforecast.losses.pytorch import IQLoss" + "from neuralforecast.common._scalers import TemporalNorm\n", + "from neuralforecast.utils import get_indexer_raise_missing" ] }, { @@ -112,27 +118,92 @@ "source": [ "#| export\n", "class BaseModel(pl.LightningModule):\n", - " EXOGENOUS_FUTR = True\n", - " EXOGENOUS_HIST = True\n", - " EXOGENOUS_STAT = True\n", + " EXOGENOUS_FUTR = True # If the model can handle future exogenous variables\n", + " EXOGENOUS_HIST = True # If the model can handle historical exogenous variables\n", + " EXOGENOUS_STAT = True # If the model can handle static exogenous variables\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(\n", " self,\n", - " random_seed,\n", - " loss,\n", - " valid_loss,\n", - " optimizer,\n", - " optimizer_kwargs,\n", - " lr_scheduler,\n", - " lr_scheduler_kwargs,\n", - " futr_exog_list,\n", - " hist_exog_list,\n", - " stat_exog_list,\n", - " max_steps,\n", - " early_stop_patience_steps,\n", + " h: int,\n", + " input_size: int,\n", + " loss: Union[BasePointLoss, DistributionLoss, nn.Module],\n", + " valid_loss: Union[BasePointLoss, DistributionLoss, nn.Module],\n", + " learning_rate: float,\n", + " max_steps: int,\n", + " val_check_steps: int,\n", + " batch_size: int,\n", + " valid_batch_size: Union[int, None],\n", + " windows_batch_size: int,\n", + " inference_windows_batch_size: Union[int, None],\n", + " start_padding_enabled: bool,\n", + " n_series: Union[int, None] = None,\n", + " n_samples: Union[int, None] = 100,\n", + " h_train: int = 1,\n", + " inference_input_size: Union[int, None] = None,\n", + " step_size: int = 1,\n", + " num_lr_decays: int = 0,\n", + " early_stop_patience_steps: int = -1,\n", + " scaler_type: str = 'identity',\n", + " futr_exog_list: Union[List, None] = None,\n", + " hist_exog_list: Union[List, None] = None,\n", + " stat_exog_list: Union[List, None] = None,\n", + " exclude_insample_y: Union[bool, None] = False,\n", + " num_workers_loader: Union[int, None] = 0,\n", + " drop_last_loader: Union[bool, None] = False,\n", + " random_seed: Union[int, None] = 1,\n", + " alias: Union[str, None] = None,\n", + " optimizer: Union[torch.optim.Optimizer, None] = None,\n", + " optimizer_kwargs: Union[Dict, None] = None,\n", + " lr_scheduler: Union[torch.optim.lr_scheduler.LRScheduler, None] = None,\n", + " lr_scheduler_kwargs: Union[Dict, None] = None,\n", + " dataloader_kwargs=None,\n", " **trainer_kwargs,\n", " ):\n", " super().__init__()\n", + "\n", + " # Multivarariate checks\n", + " if self.MULTIVARIATE and n_series is None:\n", + " raise Exception(f'{type(self).__name__} is a multivariate model. Please set n_series to the number of unique time series in your dataset.')\n", + " if not self.MULTIVARIATE:\n", + " if n_series is not None:\n", + " warnings.warn(\n", + " f'{type(self).__name__} is a univariate model. Parameter n_series is ignored.'\n", + " )\n", + " n_series = 1\n", + " self.n_series = n_series \n", + "\n", + " # Protections for previous recurrent models\n", + " if input_size < 1:\n", + " input_size = 3 * h\n", + " warnings.warn(\n", + " f'Input size too small. Automatically setting input size to 3 * horizon = {input_size}'\n", + " )\n", + "\n", + " if inference_input_size is None:\n", + " inference_input_size = input_size \n", + " elif inference_input_size is not None and inference_input_size < 1:\n", + " inference_input_size = input_size\n", + " warnings.warn(\n", + " f'Inference input size too small. Automatically setting inference input size to input_size = {input_size}'\n", + " )\n", + "\n", + " # For recurrent models we need one additional input as we need to shift insample_y to use it as input\n", + " if self.RECURRENT:\n", + " input_size += 1\n", + " inference_input_size += 1\n", + "\n", + " # Attributes needed for recurrent models\n", + " self.horizon_backup = h\n", + " self.input_size_backup = input_size\n", + " self.n_samples = n_samples\n", + " if self.RECURRENT:\n", + " self.h_train = h_train\n", + " self.inference_input_size = inference_input_size\n", + " self.rnn_state = None\n", + " self.maintain_state = False\n", + " \n", " with warnings.catch_warnings(record=False):\n", " warnings.filterwarnings('ignore')\n", " # the following line issues a warning about the loss attribute being saved\n", @@ -147,8 +218,8 @@ " self.valid_loss = loss\n", " else:\n", " self.valid_loss = valid_loss\n", - " self.train_trajectories = []\n", - " self.valid_trajectories = []\n", + " self.train_trajectories: List = []\n", + " self.valid_trajectories: List = []\n", "\n", " # Optimization\n", " if optimizer is not None and not issubclass(optimizer, torch.optim.Optimizer):\n", @@ -162,7 +233,6 @@ " self.lr_scheduler = lr_scheduler\n", " self.lr_scheduler_kwargs = lr_scheduler_kwargs if lr_scheduler_kwargs is not None else {}\n", "\n", - "\n", " # Variables\n", " self.futr_exog_list = list(futr_exog_list) if futr_exog_list is not None else []\n", " self.hist_exog_list = list(hist_exog_list) if hist_exog_list is not None else []\n", @@ -181,12 +251,28 @@ " if not self.EXOGENOUS_STAT and self.stat_exog_size > 0:\n", " raise Exception(f'{type(self).__name__} does not support static exogenous variables.')\n", "\n", - " # Implicit Quantile Loss\n", - " if isinstance(self.loss, IQLoss):\n", - " if not isinstance(self.valid_loss, IQLoss):\n", - " raise Exception('Please set valid_loss to IQLoss() when training with IQLoss')\n", - " if isinstance(self.valid_loss, IQLoss) and not isinstance(self.loss, IQLoss):\n", - " raise Exception('Please set loss to IQLoss() when validating with IQLoss') \n", + " # Protections for loss functions\n", + " if isinstance(self.loss, (losses.IQLoss, losses.MQLoss, losses.HuberMQLoss)):\n", + " loss_type = type(self.loss)\n", + " if not isinstance(self.valid_loss, loss_type):\n", + " raise Exception(f'Please set valid_loss={type(self.loss).__name__}() when training with {type(self.loss).__name__}')\n", + " if isinstance(self.valid_loss, losses.IQLoss):\n", + " valid_loss_type = type(self.valid_loss)\n", + " if not isinstance(self.loss, valid_loss_type):\n", + " raise Exception(f'Please set loss={type(self.valid_loss).__name__}() when validating with {type(self.valid_loss).__name__}') \n", + "\n", + " # Deny impossible loss / valid_loss combinations\n", + " if isinstance(self.loss, losses.BasePointLoss) and self.valid_loss.is_distribution_output:\n", + " raise Exception(f'Validation with distribution loss {type(self.valid_loss).__name__} is not possible when using loss={type(self.loss).__name__}. Please use a point valid_loss (MAE, MSE, ...)')\n", + " elif self.valid_loss.is_distribution_output and self.valid_loss is not loss:\n", + " # Maybe we should raise a Warning or an Exception here, but meh for now.\n", + " self.valid_loss = loss\n", + " \n", + " if isinstance(self.loss, (losses.relMSE, losses.Accuracy, losses.sCRPS)):\n", + " raise Exception(f\"{type(self.loss).__name__} cannot be used for training. Please use another loss function (MAE, MSE, ...)\")\n", + " \n", + " if isinstance(self.valid_loss, (losses.relMSE)):\n", + " raise Exception(f\"{type(self.valid_loss).__name__} cannot be used for validation. Please use another valid_loss (MAE, MSE, ...)\")\n", "\n", " ## Trainer arguments ##\n", " # Max steps, validation steps and check_val_every_n_epoch\n", @@ -217,7 +303,73 @@ " if trainer_kwargs.get('enable_checkpointing', None) is None:\n", " trainer_kwargs['enable_checkpointing'] = False\n", "\n", + " # Set other attributes\n", " self.trainer_kwargs = trainer_kwargs\n", + " self.h = h\n", + " self.input_size = input_size\n", + " self.windows_batch_size = windows_batch_size\n", + " self.start_padding_enabled = start_padding_enabled\n", + "\n", + " # Padder to complete train windows, \n", + " # example y=[1,2,3,4,5] h=3 -> last y_output = [5,0,0]\n", + " if start_padding_enabled:\n", + " self.padder_train = nn.ConstantPad1d(padding=(self.input_size-1, self.h), value=0.0)\n", + " else:\n", + " self.padder_train = nn.ConstantPad1d(padding=(0, self.h), value=0.0)\n", + "\n", + " # Batch sizes\n", + " if self.MULTIVARIATE and n_series is not None:\n", + " self.batch_size = max(batch_size, n_series)\n", + " else:\n", + " self.batch_size = batch_size\n", + " if valid_batch_size is None:\n", + " self.valid_batch_size = batch_size\n", + " else:\n", + " self.valid_batch_size = valid_batch_size\n", + " if inference_windows_batch_size is None:\n", + " self.inference_windows_batch_size = windows_batch_size\n", + " else:\n", + " self.inference_windows_batch_size = inference_windows_batch_size\n", + "\n", + " # Optimization \n", + " self.learning_rate = learning_rate\n", + " self.max_steps = max_steps\n", + " self.num_lr_decays = num_lr_decays\n", + " self.lr_decay_steps = (\n", + " max(max_steps // self.num_lr_decays, 1) if self.num_lr_decays > 0 else 10e7\n", + " )\n", + " self.early_stop_patience_steps = early_stop_patience_steps\n", + " self.val_check_steps = val_check_steps\n", + " self.windows_batch_size = windows_batch_size\n", + " self.step_size = step_size\n", + " \n", + " # If the model does not support exogenous, it can't support exclude_insample_y\n", + " if exclude_insample_y and not (self.EXOGENOUS_FUTR or self.EXOGENOUS_HIST or self.EXOGENOUS_STAT):\n", + " raise Exception(f'{type(self).__name__} does not support `exclude_insample_y=True`. Please set `exclude_insample_y=False`')\n", + "\n", + " self.exclude_insample_y = exclude_insample_y\n", + "\n", + " # Scaler\n", + " self.scaler = TemporalNorm(\n", + " scaler_type=scaler_type,\n", + " dim=1, # Time dimension is 1.\n", + " num_features= 1 + len(self.hist_exog_list) + len(self.futr_exog_list)\n", + " )\n", + "\n", + " # Fit arguments\n", + " self.val_size = 0\n", + " self.test_size = 0\n", + "\n", + " # Model state\n", + " self.decompose_forecast = False\n", + "\n", + " # DataModule arguments\n", + " self.num_workers_loader = num_workers_loader\n", + " self.dataloader_kwargs = dataloader_kwargs\n", + " self.drop_last_loader = drop_last_loader\n", + " # used by on_validation_epoch_end hook\n", + " self.validation_step_outputs: List = []\n", + " self.alias = alias\n", "\n", " def __repr__(self):\n", " return type(self).__name__ if self.alias is None else self.alias\n", @@ -246,21 +398,11 @@ " set(temporal_cols.tolist()) & set(self.hist_exog_list + self.futr_exog_list)\n", " )\n", " \n", - " def _set_quantile_for_iqloss(self, **data_module_kwargs):\n", - " if \"quantile\" in data_module_kwargs:\n", - " if not isinstance(self.loss, IQLoss):\n", - " raise Exception(\n", - " \"Please train with loss=IQLoss() to make use of the quantile argument.\"\n", - " )\n", - " else:\n", - " self.quantile = data_module_kwargs[\"quantile\"]\n", - " data_module_kwargs.pop(\"quantile\")\n", - " self.loss.update_quantile(q=self.quantile)\n", - " elif isinstance(self.loss, IQLoss):\n", - " self.quantile = 0.5\n", - " self.loss.update_quantile(q=self.quantile)\n", - "\n", - " return data_module_kwargs\n", + " def _set_quantiles(self, quantiles=None):\n", + " if quantiles is None and isinstance(self.loss, losses.IQLoss):\n", + " self.loss.update_quantile(q=[0.5])\n", + " elif hasattr(self.loss, 'update_quantile') and callable(self.loss.update_quantile):\n", + " self.loss.update_quantile(q=quantiles)\n", "\n", " def _fit_distributed(\n", " self,\n", @@ -480,7 +622,792 @@ " model.load_state_dict(content[\"state_dict\"], strict=True, assign=True)\n", " else: # pytorch<2.1\n", " model.load_state_dict(content[\"state_dict\"], strict=True)\n", - " return model" + " return model\n", + "\n", + " def _create_windows(self, batch, step, w_idxs=None):\n", + " # Parse common data\n", + " window_size = self.input_size + self.h\n", + " temporal_cols = batch['temporal_cols']\n", + " temporal = batch['temporal'] \n", + "\n", + " if step == 'train':\n", + " if self.val_size + self.test_size > 0:\n", + " cutoff = -self.val_size - self.test_size\n", + " temporal = temporal[:, :, :cutoff]\n", + "\n", + " temporal = self.padder_train(temporal)\n", + " \n", + " if temporal.shape[-1] < window_size:\n", + " raise Exception('Time series is too short for training, consider setting a smaller input size or set start_padding_enabled=True')\n", + " \n", + " windows = temporal.unfold(dimension=-1, \n", + " size=window_size, \n", + " step=self.step_size)\n", + "\n", + " if self.MULTIVARIATE:\n", + " # [n_series, C, Ws, L + h] -> [Ws, L + h, C, n_series]\n", + " windows = windows.permute(2, 3, 1, 0)\n", + " else:\n", + " # [n_series, C, Ws, L + h] -> [Ws * n_series, L + h, C, 1]\n", + " windows_per_serie = windows.shape[2]\n", + " windows = windows.permute(0, 2, 3, 1)\n", + " windows = windows.flatten(0, 1)\n", + " windows = windows.unsqueeze(-1)\n", + "\n", + " # Sample and Available conditions\n", + " available_idx = temporal_cols.get_loc('available_mask') \n", + " available_condition = windows[:, :self.input_size, available_idx]\n", + " available_condition = torch.sum(available_condition, axis=(1, -1)) # Sum over time & series dimension\n", + " final_condition = (available_condition > 0)\n", + " \n", + " if self.h > 0:\n", + " sample_condition = windows[:, self.input_size:, available_idx]\n", + " sample_condition = torch.sum(sample_condition, axis=(1, -1)) # Sum over time & series dimension\n", + " final_condition = (sample_condition > 0) & (available_condition > 0)\n", + " \n", + " windows = windows[final_condition]\n", + " \n", + " # Parse Static data to match windows\n", + " static = batch.get('static', None)\n", + " static_cols=batch.get('static_cols', None)\n", + "\n", + " # Repeat static if univariate: [n_series, S] -> [Ws * n_series, S]\n", + " if static is not None and not self.MULTIVARIATE:\n", + " static = torch.repeat_interleave(static, \n", + " repeats=windows_per_serie, dim=0)\n", + " static = static[final_condition] \n", + "\n", + " # Protection of empty windows\n", + " if final_condition.sum() == 0:\n", + " raise Exception('No windows available for training')\n", + "\n", + " # Sample windows\n", + " if self.windows_batch_size is not None:\n", + " n_windows = windows.shape[0]\n", + " w_idxs = np.random.choice(n_windows, \n", + " size=self.windows_batch_size,\n", + " replace=(n_windows < self.windows_batch_size))\n", + " windows = windows[w_idxs]\n", + " \n", + " if static is not None and not self.MULTIVARIATE:\n", + " static = static[w_idxs]\n", + "\n", + " windows_batch = dict(temporal=windows,\n", + " temporal_cols=temporal_cols,\n", + " static=static,\n", + " static_cols=static_cols)\n", + " return windows_batch\n", + "\n", + " elif step in ['predict', 'val']:\n", + "\n", + " if step == 'predict':\n", + " initial_input = temporal.shape[-1] - self.test_size\n", + " if initial_input <= self.input_size: # There is not enough data to predict first timestamp\n", + " temporal = F.pad(temporal, pad=(self.input_size-initial_input, 0), mode=\"constant\", value=0.0)\n", + " predict_step_size = self.predict_step_size\n", + " cutoff = - self.input_size - self.test_size\n", + " temporal = temporal[:, :, cutoff:]\n", + "\n", + " elif step == 'val':\n", + " predict_step_size = self.step_size\n", + " cutoff = -self.input_size - self.val_size - self.test_size\n", + " if self.test_size > 0:\n", + " temporal = batch['temporal'][:, :, cutoff:-self.test_size]\n", + " else:\n", + " temporal = batch['temporal'][:, :, cutoff:]\n", + " if temporal.shape[-1] < window_size:\n", + " initial_input = temporal.shape[-1] - self.val_size\n", + " temporal = F.pad(temporal, pad=(self.input_size-initial_input, 0), mode=\"constant\", value=0.0)\n", + "\n", + " if (step=='predict') and (self.test_size==0) and (len(self.futr_exog_list)==0):\n", + " temporal = F.pad(temporal, pad=(0, self.h), mode=\"constant\", value=0.0)\n", + "\n", + " windows = temporal.unfold(dimension=-1,\n", + " size=window_size,\n", + " step=predict_step_size)\n", + "\n", + " static = batch.get('static', None)\n", + " static_cols=batch.get('static_cols', None)\n", + "\n", + " if self.MULTIVARIATE:\n", + " # [n_series, C, Ws, L + h] -> [Ws, L + h, C, n_series]\n", + " windows = windows.permute(2, 3, 1, 0)\n", + " else:\n", + " # [n_series, C, Ws, L + h] -> [Ws * n_series, L + h, C, 1]\n", + " windows_per_serie = windows.shape[2]\n", + " windows = windows.permute(0, 2, 3, 1)\n", + " windows = windows.flatten(0, 1)\n", + " windows = windows.unsqueeze(-1)\n", + " if static is not None:\n", + " static = torch.repeat_interleave(static, \n", + " repeats=windows_per_serie, dim=0)\n", + "\n", + " # Sample windows for batched prediction\n", + " if w_idxs is not None:\n", + " windows = windows[w_idxs]\n", + " if static is not None and not self.MULTIVARIATE:\n", + " static = static[w_idxs]\n", + "\n", + " windows_batch = dict(temporal=windows,\n", + " temporal_cols=temporal_cols,\n", + " static=static,\n", + " static_cols=static_cols)\n", + " return windows_batch\n", + " else:\n", + " raise ValueError(f'Unknown step {step}') \n", + "\n", + " def _normalization(self, windows, y_idx):\n", + " # windows are already filtered by train/validation/test\n", + " # from the `create_windows_method` nor leakage risk\n", + " temporal = windows['temporal'] # [Ws, L + h, C, n_series]\n", + " temporal_cols = windows['temporal_cols'].copy() # [Ws, L + h, C, n_series]\n", + "\n", + " # To avoid leakage uses only the lags\n", + " temporal_data_cols = self._get_temporal_exogenous_cols(temporal_cols=temporal_cols)\n", + " temporal_idxs = get_indexer_raise_missing(temporal_cols, temporal_data_cols)\n", + " temporal_idxs = np.append(y_idx, temporal_idxs)\n", + " temporal_data = temporal[:, :, temporal_idxs] \n", + " temporal_mask = temporal[:, :, temporal_cols.get_loc('available_mask')].clone()\n", + " if self.h > 0:\n", + " temporal_mask[:, -self.h:] = 0.0\n", + "\n", + " # Normalize. self.scaler stores the shift and scale for inverse transform\n", + " temporal_mask = temporal_mask.unsqueeze(2) # Add channel dimension for scaler.transform.\n", + " temporal_data = self.scaler.transform(x=temporal_data, mask=temporal_mask)\n", + "\n", + " # Replace values in windows dict\n", + " temporal[:, :, temporal_idxs] = temporal_data\n", + " windows['temporal'] = temporal\n", + "\n", + " return windows\n", + "\n", + " def _inv_normalization(self, y_hat, y_idx):\n", + " # Receives window predictions [Ws, h, output, n_series]\n", + " # Broadcasts scale if necessary and inverts normalization\n", + " add_channel_dim = y_hat.ndim > 3\n", + " y_loc, y_scale = self._get_loc_scale(y_idx, add_channel_dim=add_channel_dim)\n", + " y_hat = self.scaler.inverse_transform(z=y_hat, x_scale=y_scale, x_shift=y_loc)\n", + "\n", + " return y_hat\n", + "\n", + " def _parse_windows(self, batch, windows):\n", + " # windows: [Ws, L + h, C, n_series]\n", + "\n", + " # Filter insample lags from outsample horizon\n", + " y_idx = batch['y_idx']\n", + " mask_idx = batch['temporal_cols'].get_loc('available_mask')\n", + "\n", + " insample_y = windows['temporal'][:, :self.input_size, y_idx]\n", + " insample_mask = windows['temporal'][:, :self.input_size, mask_idx]\n", + "\n", + " # Declare additional information\n", + " outsample_y = None\n", + " outsample_mask = None\n", + " hist_exog = None\n", + " futr_exog = None\n", + " stat_exog = None\n", + "\n", + " if self.h > 0:\n", + " outsample_y = windows['temporal'][:, self.input_size:, y_idx]\n", + " outsample_mask = windows['temporal'][:, self.input_size:, mask_idx]\n", + "\n", + " # Recurrent models at t predict t+1, so we shift the input (insample_y) by one\n", + " if self.RECURRENT:\n", + " insample_y = torch.cat((insample_y, outsample_y[:, :-1]), dim=1)\n", + " insample_mask = torch.cat((insample_mask, outsample_mask[:, :-1]), dim=1)\n", + " self.maintain_state = False\n", + "\n", + " if len(self.hist_exog_list):\n", + " hist_exog_idx = get_indexer_raise_missing(windows['temporal_cols'], self.hist_exog_list)\n", + " if self.RECURRENT:\n", + " hist_exog = windows['temporal'][:, :, hist_exog_idx]\n", + " hist_exog[:, self.input_size:] = 0.0\n", + " hist_exog = hist_exog[:, 1:]\n", + " else:\n", + " hist_exog = windows['temporal'][:, :self.input_size, hist_exog_idx]\n", + " if not self.MULTIVARIATE:\n", + " hist_exog = hist_exog.squeeze(-1)\n", + " else:\n", + " hist_exog = hist_exog.swapaxes(1, 2)\n", + "\n", + " if len(self.futr_exog_list):\n", + " futr_exog_idx = get_indexer_raise_missing(windows['temporal_cols'], self.futr_exog_list)\n", + " futr_exog = windows['temporal'][:, :, futr_exog_idx]\n", + " if self.RECURRENT:\n", + " futr_exog = futr_exog[:, 1:]\n", + " if not self.MULTIVARIATE:\n", + " futr_exog = futr_exog.squeeze(-1)\n", + " else:\n", + " futr_exog = futr_exog.swapaxes(1, 2) \n", + "\n", + " if len(self.stat_exog_list):\n", + " static_idx = get_indexer_raise_missing(windows['static_cols'], self.stat_exog_list)\n", + " stat_exog = windows['static'][:, static_idx]\n", + "\n", + " # TODO: think a better way of removing insample_y features\n", + " if self.exclude_insample_y:\n", + " insample_y = insample_y * 0\n", + "\n", + " return insample_y, insample_mask, outsample_y, outsample_mask, \\\n", + " hist_exog, futr_exog, stat_exog \n", + "\n", + " def _get_loc_scale(self, y_idx, add_channel_dim=False):\n", + " # [B, L, C, n_series] -> [B, L, n_series]\n", + " y_scale = self.scaler.x_scale[:, :, y_idx]\n", + " y_loc = self.scaler.x_shift[:, :, y_idx]\n", + " \n", + " # [B, L, n_series] -> [B, L, n_series, 1]\n", + " if add_channel_dim:\n", + " y_scale = y_scale.unsqueeze(-1)\n", + " y_loc = y_loc.unsqueeze(-1)\n", + "\n", + " return y_loc, y_scale\n", + "\n", + " def _compute_valid_loss(self, insample_y, outsample_y, output, outsample_mask, y_idx):\n", + " if self.loss.is_distribution_output:\n", + " y_loc, y_scale = self._get_loc_scale(y_idx)\n", + " distr_args = self.loss.scale_decouple(output=output, loc=y_loc, scale=y_scale)\n", + " if isinstance(self.valid_loss, (losses.sCRPS, losses.MQLoss, losses.HuberMQLoss)):\n", + " _, _, quants = self.loss.sample(distr_args=distr_args) \n", + " output = quants\n", + " elif isinstance(self.valid_loss, losses.BasePointLoss):\n", + " distr = self.loss.get_distribution(distr_args=distr_args)\n", + " output = distr.mean\n", + "\n", + " # Validation Loss evaluation\n", + " if self.valid_loss.is_distribution_output:\n", + " valid_loss = self.valid_loss(y=outsample_y, distr_args=distr_args, mask=outsample_mask)\n", + " else:\n", + " output = self._inv_normalization(y_hat=output, y_idx=y_idx)\n", + " valid_loss = self.valid_loss(y=outsample_y, y_hat=output, y_insample=insample_y, mask=outsample_mask)\n", + " return valid_loss\n", + " \n", + " def _validate_step_recurrent_batch(self, insample_y, insample_mask, futr_exog, hist_exog, stat_exog, y_idx):\n", + " # Remember state in network and set horizon to 1\n", + " self.rnn_state = None\n", + " self.maintain_state = True\n", + " self.h = 1\n", + "\n", + " # Initialize results array\n", + " n_outputs = self.loss.outputsize_multiplier\n", + " y_hat = torch.zeros((insample_y.shape[0],\n", + " self.horizon_backup,\n", + " self.n_series * n_outputs),\n", + " device=insample_y.device,\n", + " dtype=insample_y.dtype)\n", + "\n", + " # First step prediction\n", + " tau = 0\n", + " \n", + " # Set exogenous\n", + " hist_exog_current = None\n", + " if self.hist_exog_size > 0:\n", + " hist_exog_current = hist_exog[:, :self.input_size + tau - 1]\n", + "\n", + " futr_exog_current = None\n", + " if self.futr_exog_size > 0:\n", + " futr_exog_current = futr_exog[:, :self.input_size + tau - 1]\n", + "\n", + " # First forecast step\n", + " y_hat[:, tau], insample_y = self._validate_step_recurrent_single(\n", + " insample_y=insample_y[:, :self.input_size + tau - 1],\n", + " insample_mask=insample_mask[:, :self.input_size + tau - 1],\n", + " hist_exog=hist_exog_current,\n", + " futr_exog=futr_exog_current,\n", + " stat_exog=stat_exog,\n", + " y_idx=y_idx,\n", + " )\n", + "\n", + " # Horizon prediction recursively\n", + " for tau in range(self.horizon_backup):\n", + " # Set exogenous\n", + " if self.hist_exog_size > 0:\n", + " hist_exog_current = hist_exog[:, self.input_size + tau - 1].unsqueeze(1)\n", + "\n", + " if self.futr_exog_size > 0:\n", + " futr_exog_current = futr_exog[:, self.input_size + tau - 1].unsqueeze(1)\n", + " \n", + " y_hat[:, tau], insample_y = self._validate_step_recurrent_single(\n", + " insample_y=insample_y,\n", + " insample_mask=None,\n", + " hist_exog=hist_exog_current,\n", + " futr_exog=futr_exog_current,\n", + " stat_exog=stat_exog,\n", + " y_idx = y_idx,\n", + " )\n", + " \n", + " # Reset state and horizon\n", + " self.maintain_state = False\n", + " self.rnn_state = None\n", + " self.h = self.horizon_backup\n", + "\n", + " return y_hat \n", + "\n", + " def _validate_step_recurrent_single(self, insample_y, insample_mask, hist_exog, futr_exog, stat_exog, y_idx):\n", + " # Input sequence\n", + " windows_batch = dict(insample_y=insample_y, # [Ws, L, n_series]\n", + " insample_mask=insample_mask, # [Ws, L, n_series]\n", + " futr_exog=futr_exog, # univariate: [Ws, L, F]; multivariate: [Ws, F, L, n_series]\n", + " hist_exog=hist_exog, # univariate: [Ws, L, X]; multivariate: [Ws, X, L, n_series]\n", + " stat_exog=stat_exog) # univariate: [Ws, S]; multivariate: [n_series, S]\n", + "\n", + " # Model Predictions\n", + " output_batch_unmapped = self(windows_batch)\n", + " output_batch = self.loss.domain_map(output_batch_unmapped)\n", + " \n", + " # Inverse normalization and sampling\n", + " if self.loss.is_distribution_output:\n", + " # Sample distribution\n", + " y_loc, y_scale = self._get_loc_scale(y_idx)\n", + " distr_args = self.loss.scale_decouple(output=output_batch, loc=y_loc, scale=y_scale)\n", + " # When validating, the output is the mean of the distribution which is an attribute\n", + " distr = self.loss.get_distribution(distr_args=distr_args)\n", + "\n", + " # Scale back to feed back as input\n", + " insample_y = self.scaler.scaler(distr.mean, y_loc, y_scale)\n", + " else:\n", + " # Todo: for now, we assume that in case of a BasePointLoss with ndim==4, the last dimension\n", + " # contains a set of predictions for the target (e.g. MQLoss multiple quantiles), for which we use the \n", + " # mean as feedback signal for the recurrent predictions. A more precise way is to increase the\n", + " # insample input size of the recurrent network by the number of outputs so that each output\n", + " # can be fed back to a specific input channel. \n", + " if output_batch.ndim == 4:\n", + " output_batch = output_batch.mean(dim=-1)\n", + "\n", + " insample_y = output_batch\n", + "\n", + " # Remove horizon dim: [B, 1, N * n_outputs] -> [B, N * n_outputs]\n", + " y_hat = output_batch_unmapped.squeeze(1)\n", + " return y_hat, insample_y\n", + "\n", + " def _predict_step_recurrent_batch(self, insample_y, insample_mask, futr_exog, hist_exog, stat_exog, y_idx):\n", + " # Remember state in network and set horizon to 1\n", + " self.rnn_state = None\n", + " self.maintain_state = True\n", + " self.h = 1\n", + "\n", + " # Initialize results array\n", + " n_outputs = len(self.loss.output_names)\n", + " y_hat = torch.zeros((insample_y.shape[0],\n", + " self.horizon_backup,\n", + " self.n_series,\n", + " n_outputs),\n", + " device=insample_y.device,\n", + " dtype=insample_y.dtype)\n", + "\n", + " # First step prediction\n", + " tau = 0\n", + " \n", + " # Set exogenous\n", + " hist_exog_current = None\n", + " if self.hist_exog_size > 0:\n", + " hist_exog_current = hist_exog[:, :self.input_size + tau - 1]\n", + "\n", + " futr_exog_current = None\n", + " if self.futr_exog_size > 0:\n", + " futr_exog_current = futr_exog[:, :self.input_size + tau - 1]\n", + "\n", + " # First forecast step\n", + " y_hat[:, tau], insample_y = self._predict_step_recurrent_single(\n", + " insample_y=insample_y[:, :self.input_size + tau - 1],\n", + " insample_mask=insample_mask[:, :self.input_size + tau - 1],\n", + " hist_exog=hist_exog_current,\n", + " futr_exog=futr_exog_current,\n", + " stat_exog=stat_exog,\n", + " y_idx=y_idx,\n", + " )\n", + "\n", + " # Horizon prediction recursively\n", + " for tau in range(self.horizon_backup):\n", + " # Set exogenous\n", + " if self.hist_exog_size > 0:\n", + " hist_exog_current = hist_exog[:, self.input_size + tau - 1].unsqueeze(1)\n", + "\n", + " if self.futr_exog_size > 0:\n", + " futr_exog_current = futr_exog[:, self.input_size + tau - 1].unsqueeze(1)\n", + " \n", + " y_hat[:, tau], insample_y = self._predict_step_recurrent_single(\n", + " insample_y=insample_y,\n", + " insample_mask=None,\n", + " hist_exog=hist_exog_current,\n", + " futr_exog=futr_exog_current,\n", + " stat_exog=stat_exog,\n", + " y_idx = y_idx,\n", + " )\n", + " \n", + " # Reset state and horizon\n", + " self.maintain_state = False\n", + " self.rnn_state = None\n", + " self.h = self.horizon_backup\n", + "\n", + " # Squeeze for univariate case\n", + " if not self.MULTIVARIATE:\n", + " y_hat = y_hat.squeeze(2)\n", + "\n", + " return y_hat \n", + "\n", + " def _predict_step_recurrent_single(self, insample_y, insample_mask, hist_exog, futr_exog, stat_exog, y_idx):\n", + " # Input sequence\n", + " windows_batch = dict(insample_y=insample_y, # [Ws, L, n_series]\n", + " insample_mask=insample_mask, # [Ws, L, n_series]\n", + " futr_exog=futr_exog, # univariate: [Ws, L, F]; multivariate: [Ws, F, L, n_series]\n", + " hist_exog=hist_exog, # univariate: [Ws, L, X]; multivariate: [Ws, X, L, n_series]\n", + " stat_exog=stat_exog) # univariate: [Ws, S]; multivariate: [n_series, S]\n", + "\n", + " # Model Predictions\n", + " output_batch_unmapped = self(windows_batch)\n", + " output_batch = self.loss.domain_map(output_batch_unmapped)\n", + " \n", + " # Inverse normalization and sampling\n", + " if self.loss.is_distribution_output:\n", + " # Sample distribution\n", + " y_loc, y_scale = self._get_loc_scale(y_idx)\n", + " distr_args = self.loss.scale_decouple(output=output_batch, loc=y_loc, scale=y_scale)\n", + " # When predicting, we need to sample to get the quantiles. The mean is an attribute.\n", + " _, _, quants = self.loss.sample(distr_args=distr_args, num_samples=self.n_samples)\n", + " mean = self.loss.distr_mean\n", + "\n", + " # Scale back to feed back as input\n", + " insample_y = self.scaler.scaler(mean, y_loc, y_scale)\n", + " \n", + " # Save predictions\n", + " y_hat = torch.concat((mean.unsqueeze(-1), quants), axis=-1)\n", + "\n", + " if self.loss.return_params:\n", + " distr_args = torch.stack(distr_args, dim=-1)\n", + " if distr_args.ndim > 4:\n", + " distr_args = distr_args.flatten(-2, -1)\n", + " y_hat = torch.concat((y_hat, distr_args), axis=-1)\n", + " else:\n", + " # Todo: for now, we assume that in case of a BasePointLoss with ndim==4, the last dimension\n", + " # contains a set of predictions for the target (e.g. MQLoss multiple quantiles), for which we use the \n", + " # mean as feedback signal for the recurrent predictions. A more precise way is to increase the\n", + " # insample input size of the recurrent network by the number of outputs so that each output\n", + " # can be fed back to a specific input channel. \n", + " if output_batch.ndim == 4:\n", + " output_batch = output_batch.mean(dim=-1)\n", + "\n", + " insample_y = output_batch\n", + " y_hat = self._inv_normalization(y_hat=output_batch, y_idx=y_idx)\n", + " y_hat = y_hat.unsqueeze(-1)\n", + "\n", + " # Remove horizon dim: [B, 1, N, n_outputs] -> [B, N, n_outputs]\n", + " y_hat = y_hat.squeeze(1)\n", + " return y_hat, insample_y\n", + "\n", + " def _predict_step_direct_batch(self, insample_y, insample_mask, hist_exog, futr_exog, stat_exog, y_idx):\n", + " windows_batch = dict(insample_y=insample_y, # [Ws, L, n_series]\n", + " insample_mask=insample_mask, # [Ws, L, n_series]\n", + " futr_exog=futr_exog, # univariate: [Ws, L, F]; multivariate: [Ws, F, L, n_series]\n", + " hist_exog=hist_exog, # univariate: [Ws, L, X]; multivariate: [Ws, X, L, n_series]\n", + " stat_exog=stat_exog) # univariate: [Ws, S]; multivariate: [n_series, S]\n", + "\n", + " # Model Predictions\n", + " output_batch = self(windows_batch)\n", + " output_batch = self.loss.domain_map(output_batch)\n", + "\n", + " # Inverse normalization and sampling\n", + " if self.loss.is_distribution_output:\n", + " y_loc, y_scale = self._get_loc_scale(y_idx)\n", + " distr_args = self.loss.scale_decouple(output=output_batch, loc=y_loc, scale=y_scale)\n", + " _, sample_mean, quants = self.loss.sample(distr_args=distr_args)\n", + " y_hat = torch.concat((sample_mean, quants), axis=-1)\n", + "\n", + " if self.loss.return_params:\n", + " distr_args = torch.stack(distr_args, dim=-1)\n", + " if distr_args.ndim > 4:\n", + " distr_args = distr_args.flatten(-2, -1)\n", + " y_hat = torch.concat((y_hat, distr_args), axis=-1) \n", + " else:\n", + " y_hat = self._inv_normalization(y_hat=output_batch, \n", + " y_idx=y_idx)\n", + "\n", + " return y_hat\n", + " \n", + " def training_step(self, batch, batch_idx):\n", + " # Set horizon to h_train in case of recurrent model to speed up training\n", + " if self.RECURRENT:\n", + " self.h = self.h_train\n", + " \n", + " # windows: [Ws, L + h, C, n_series] or [Ws, L + h, C]\n", + " y_idx = batch['y_idx']\n", + "\n", + " windows = self._create_windows(batch, step='train')\n", + " original_outsample_y = torch.clone(windows['temporal'][:, self.input_size:, y_idx])\n", + " windows = self._normalization(windows=windows, y_idx=y_idx)\n", + " \n", + " # Parse windows\n", + " insample_y, insample_mask, outsample_y, outsample_mask, \\\n", + " hist_exog, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", + "\n", + " windows_batch = dict(insample_y=insample_y, # [Ws, L, n_series]\n", + " insample_mask=insample_mask, # [Ws, L, n_series]\n", + " futr_exog=futr_exog, # univariate: [Ws, L, F]; multivariate: [Ws, F, L, n_series]\n", + " hist_exog=hist_exog, # univariate: [Ws, L, X]; multivariate: [Ws, X, L, n_series]\n", + " stat_exog=stat_exog) # univariate: [Ws, S]; multivariate: [n_series, S]\n", + "\n", + " # Model Predictions\n", + " output = self(windows_batch)\n", + " output = self.loss.domain_map(output)\n", + " \n", + " if self.loss.is_distribution_output:\n", + " y_loc, y_scale = self._get_loc_scale(y_idx)\n", + " outsample_y = original_outsample_y\n", + " distr_args = self.loss.scale_decouple(output=output, loc=y_loc, scale=y_scale)\n", + " loss = self.loss(y=outsample_y, distr_args=distr_args, mask=outsample_mask)\n", + " else:\n", + " loss = self.loss(y=outsample_y, y_hat=output, y_insample=insample_y, mask=outsample_mask)\n", + "\n", + " if torch.isnan(loss):\n", + " print('Model Parameters', self.hparams)\n", + " print('insample_y', torch.isnan(insample_y).sum())\n", + " print('outsample_y', torch.isnan(outsample_y).sum())\n", + " raise Exception('Loss is NaN, training stopped.')\n", + "\n", + " train_loss_log = loss.detach().item()\n", + " self.log(\n", + " 'train_loss',\n", + " train_loss_log,\n", + " batch_size=outsample_y.size(0),\n", + " prog_bar=True,\n", + " on_epoch=True,\n", + " )\n", + " self.train_trajectories.append((self.global_step, train_loss_log))\n", + "\n", + " self.h = self.horizon_backup\n", + "\n", + " return loss\n", + "\n", + "\n", + " def validation_step(self, batch, batch_idx):\n", + " if self.val_size == 0:\n", + " return np.nan\n", + "\n", + " # TODO: Hack to compute number of windows\n", + " windows = self._create_windows(batch, step='val')\n", + " n_windows = len(windows['temporal'])\n", + " y_idx = batch['y_idx']\n", + "\n", + " # Number of windows in batch\n", + " windows_batch_size = self.inference_windows_batch_size\n", + " if windows_batch_size < 0:\n", + " windows_batch_size = n_windows\n", + " n_batches = int(np.ceil(n_windows / windows_batch_size))\n", + "\n", + " valid_losses = []\n", + " batch_sizes = []\n", + " for i in range(n_batches):\n", + " # Create and normalize windows [Ws, L + h, C, n_series]\n", + " w_idxs = np.arange(i*windows_batch_size, \n", + " min((i+1)*windows_batch_size, n_windows))\n", + " windows = self._create_windows(batch, step='val', w_idxs=w_idxs)\n", + " original_outsample_y = torch.clone(windows['temporal'][:, self.input_size:, y_idx])\n", + "\n", + " windows = self._normalization(windows=windows, y_idx=y_idx)\n", + "\n", + " # Parse windows\n", + " insample_y, insample_mask, _, outsample_mask, \\\n", + " hist_exog, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", + "\n", + " if self.RECURRENT:\n", + " output_batch = self._validate_step_recurrent_batch(insample_y=insample_y,\n", + " insample_mask=insample_mask,\n", + " futr_exog=futr_exog,\n", + " hist_exog=hist_exog,\n", + " stat_exog=stat_exog,\n", + " y_idx=y_idx)\n", + " else: \n", + " windows_batch = dict(insample_y=insample_y, # [Ws, L, n_series]\n", + " insample_mask=insample_mask, # [Ws, L, n_series]\n", + " futr_exog=futr_exog, # univariate: [Ws, L, F]; multivariate: [Ws, F, L, n_series]\n", + " hist_exog=hist_exog, # univariate: [Ws, L, X]; multivariate: [Ws, X, L, n_series]\n", + " stat_exog=stat_exog) # univariate: [Ws, S]; multivariate: [n_series, S]\n", + " \n", + " # Model Predictions\n", + " output_batch = self(windows_batch) \n", + "\n", + " output_batch = self.loss.domain_map(output_batch)\n", + " valid_loss_batch = self._compute_valid_loss(insample_y=insample_y,\n", + " outsample_y=original_outsample_y,\n", + " output=output_batch, \n", + " outsample_mask=outsample_mask,\n", + " y_idx=batch['y_idx'])\n", + " valid_losses.append(valid_loss_batch)\n", + " batch_sizes.append(len(output_batch))\n", + " \n", + " valid_loss = torch.stack(valid_losses)\n", + " batch_sizes = torch.tensor(batch_sizes, device=valid_loss.device)\n", + " batch_size = torch.sum(batch_sizes)\n", + " valid_loss = torch.sum(valid_loss * batch_sizes) / batch_size\n", + "\n", + " if torch.isnan(valid_loss):\n", + " raise Exception('Loss is NaN, training stopped.')\n", + "\n", + " valid_loss_log = valid_loss.detach()\n", + " self.log(\n", + " 'valid_loss',\n", + " valid_loss_log.item(),\n", + " batch_size=batch_size,\n", + " prog_bar=True,\n", + " on_epoch=True,\n", + " )\n", + " self.validation_step_outputs.append(valid_loss_log)\n", + " return valid_loss\n", + "\n", + " def predict_step(self, batch, batch_idx):\n", + " if self.RECURRENT:\n", + " self.input_size = self.inference_input_size\n", + "\n", + " # TODO: Hack to compute number of windows\n", + " windows = self._create_windows(batch, step='predict')\n", + " n_windows = len(windows['temporal'])\n", + " y_idx = batch['y_idx']\n", + "\n", + " # Number of windows in batch\n", + " windows_batch_size = self.inference_windows_batch_size\n", + " if windows_batch_size < 0:\n", + " windows_batch_size = n_windows\n", + " n_batches = int(np.ceil(n_windows / windows_batch_size))\n", + " y_hats = []\n", + " for i in range(n_batches):\n", + " # Create and normalize windows [Ws, L+H, C]\n", + " w_idxs = np.arange(i*windows_batch_size, \n", + " min((i+1)*windows_batch_size, n_windows))\n", + " windows = self._create_windows(batch, step='predict', w_idxs=w_idxs)\n", + " windows = self._normalization(windows=windows, y_idx=y_idx)\n", + "\n", + " # Parse windows\n", + " insample_y, insample_mask, _, _, \\\n", + " hist_exog, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", + "\n", + " if self.RECURRENT: \n", + " y_hat = self._predict_step_recurrent_batch(insample_y=insample_y,\n", + " insample_mask=insample_mask,\n", + " futr_exog=futr_exog,\n", + " hist_exog=hist_exog,\n", + " stat_exog=stat_exog,\n", + " y_idx=y_idx)\n", + " else:\n", + " y_hat = self._predict_step_direct_batch(insample_y=insample_y,\n", + " insample_mask=insample_mask,\n", + " futr_exog=futr_exog,\n", + " hist_exog=hist_exog,\n", + " stat_exog=stat_exog,\n", + " y_idx=y_idx) \n", + "\n", + "\n", + " y_hats.append(y_hat)\n", + " y_hat = torch.cat(y_hats, dim=0)\n", + " self.input_size = self.input_size_backup\n", + "\n", + " return y_hat\n", + " \n", + " def fit(self, dataset, val_size=0, test_size=0, random_seed=None, distributed_config=None):\n", + " \"\"\" Fit.\n", + "\n", + " The `fit` method, optimizes the neural network's weights using the\n", + " initialization parameters (`learning_rate`, `windows_batch_size`, ...)\n", + " and the `loss` function as defined during the initialization. \n", + " Within `fit` we use a PyTorch Lightning `Trainer` that\n", + " inherits the initialization's `self.trainer_kwargs`, to customize\n", + " its inputs, see [PL's trainer arguments](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).\n", + "\n", + " The method is designed to be compatible with SKLearn-like classes\n", + " and in particular to be compatible with the StatsForecast library.\n", + "\n", + " By default the `model` is not saving training checkpoints to protect \n", + " disk memory, to get them change `enable_checkpointing=True` in `__init__`.\n", + "\n", + " **Parameters:**
\n", + " `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
\n", + " `val_size`: int, validation size for temporal cross-validation.
\n", + " `random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
\n", + " `test_size`: int, test size for temporal cross-validation.
\n", + " \"\"\"\n", + " return self._fit(\n", + " dataset=dataset,\n", + " batch_size=self.batch_size,\n", + " valid_batch_size=self.valid_batch_size,\n", + " val_size=val_size,\n", + " test_size=test_size,\n", + " random_seed=random_seed,\n", + " distributed_config=distributed_config,\n", + " )\n", + "\n", + " def predict(self, dataset, test_size=None, step_size=1,\n", + " random_seed=None, quantiles=None, **data_module_kwargs):\n", + " \"\"\" Predict.\n", + "\n", + " Neural network prediction with PL's `Trainer` execution of `predict_step`.\n", + "\n", + " **Parameters:**
\n", + " `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
\n", + " `test_size`: int=None, test size for temporal cross-validation.
\n", + " `step_size`: int=1, Step size between each window.
\n", + " `random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
\n", + " `quantiles`: list of floats, optional (default=None), target quantiles to predict.
\n", + " `**data_module_kwargs`: PL's TimeSeriesDataModule args, see [documentation](https://pytorch-lightning.readthedocs.io/en/1.6.1/extensions/datamodules.html#using-a-datamodule).\n", + " \"\"\"\n", + " self._check_exog(dataset)\n", + " self._restart_seed(random_seed)\n", + " if \"quantile\" in data_module_kwargs:\n", + " warnings.warn(\"The 'quantile' argument will be deprecated, use 'quantiles' instead.\")\n", + " if quantiles is not None:\n", + " raise ValueError(\"You can't specify quantile and quantiles.\")\n", + " quantiles = [data_module_kwargs.pop(\"quantile\")]\n", + " self._set_quantiles(quantiles)\n", + "\n", + " self.predict_step_size = step_size\n", + " self.decompose_forecast = False\n", + " datamodule = TimeSeriesDataModule(dataset=dataset,\n", + " valid_batch_size=self.valid_batch_size,\n", + " **data_module_kwargs)\n", + "\n", + " # Protect when case of multiple gpu. PL does not support return preds with multiple gpu.\n", + " pred_trainer_kwargs = self.trainer_kwargs.copy()\n", + " if (pred_trainer_kwargs.get('accelerator', None) == \"gpu\") and (torch.cuda.device_count() > 1):\n", + " pred_trainer_kwargs['devices'] = [0]\n", + "\n", + " trainer = pl.Trainer(**pred_trainer_kwargs)\n", + " fcsts = trainer.predict(self, datamodule=datamodule) \n", + " fcsts = torch.vstack(fcsts)\n", + "\n", + " if self.MULTIVARIATE:\n", + " # [B, h, n_series (, Q)] -> [n_series, B, h (, Q)]\n", + " fcsts = fcsts.swapaxes(0, 2)\n", + " fcsts = fcsts.swapaxes(1, 2)\n", + "\n", + " fcsts = fcsts.numpy().flatten()\n", + " fcsts = fcsts.reshape(-1, len(self.loss.output_names))\n", + " return fcsts\n", + "\n", + " def decompose(self, dataset, step_size=1, random_seed=None, quantiles=None, **data_module_kwargs):\n", + " \"\"\" Decompose Predictions.\n", + "\n", + " Decompose the predictions through the network's layers.\n", + " Available methods are `ESRNN`, `NHITS`, `NBEATS`, and `NBEATSx`.\n", + "\n", + " **Parameters:**
\n", + " `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation here](https://nixtla.github.io/neuralforecast/tsdataset.html).
\n", + " `step_size`: int=1, step size between each window of temporal data.
\n", + " `quantiles`: list of floats, optional (default=None), target quantiles to predict.
\n", + " `**data_module_kwargs`: PL's TimeSeriesDataModule args, see [documentation](https://pytorch-lightning.readthedocs.io/en/1.6.1/extensions/datamodules.html#using-a-datamodule).\n", + " \"\"\"\n", + " # Restart random seed\n", + " if random_seed is None:\n", + " random_seed = self.random_seed\n", + " torch.manual_seed(random_seed)\n", + " self._set_quantiles(quantiles)\n", + "\n", + " self.predict_step_size = step_size\n", + " self.decompose_forecast = True\n", + " datamodule = TimeSeriesDataModule(dataset=dataset,\n", + " valid_batch_size=self.valid_batch_size,\n", + " **data_module_kwargs)\n", + " trainer = pl.Trainer(**self.trainer_kwargs)\n", + " fcsts = trainer.predict(self, datamodule=datamodule)\n", + " self.decompose_forecast = False # Default decomposition back to false\n", + " return torch.vstack(fcsts).numpy() " ] } ], diff --git a/nbs/common.base_multivariate.ipynb b/nbs/common.base_multivariate.ipynb deleted file mode 100644 index f1321600d..000000000 --- a/nbs/common.base_multivariate.ipynb +++ /dev/null @@ -1,625 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| default_exp common._base_multivariate" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# BaseMultivariate\n", - "\n", - "> The `BaseWindows` class contains standard methods shared across window-based multivariate neural networks; in contrast to recurrent neural networks these models commit to a fixed sequence length input." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The standard methods include data preprocessing `_normalization`, optimization utilities like parameter initialization, `training_step`, `validation_step`, and shared `fit` and `predict` methods.These shared methods enable all the `neuralforecast.models` compatibility with the `core.NeuralForecast` wrapper class. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "import numpy as np\n", - "import torch\n", - "import torch.nn as nn\n", - "import pytorch_lightning as pl\n", - "import neuralforecast.losses.pytorch as losses\n", - "\n", - "from neuralforecast.common._base_model import BaseModel\n", - "from neuralforecast.common._scalers import TemporalNorm\n", - "from neuralforecast.tsdataset import TimeSeriesDataModule\n", - "from neuralforecast.utils import get_indexer_raise_missing" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "class BaseMultivariate(BaseModel):\n", - " \"\"\" Base Multivariate\n", - " \n", - " Base class for all multivariate models. The forecasts for all time-series are produced simultaneously \n", - " within each window, which are randomly sampled during training.\n", - " \n", - " This class implements the basic functionality for all windows-based models, including:\n", - " - PyTorch Lightning's methods training_step, validation_step, predict_step.
\n", - " - fit and predict methods used by NeuralForecast.core class.
\n", - " - sampling and wrangling methods to generate multivariate windows.\n", - " \"\"\"\n", - " def __init__(self, \n", - " h,\n", - " input_size,\n", - " loss,\n", - " valid_loss,\n", - " learning_rate,\n", - " max_steps,\n", - " val_check_steps,\n", - " n_series,\n", - " batch_size,\n", - " step_size=1,\n", - " num_lr_decays=0,\n", - " early_stop_patience_steps=-1,\n", - " scaler_type='robust',\n", - " futr_exog_list=None,\n", - " hist_exog_list=None,\n", - " stat_exog_list=None,\n", - " num_workers_loader=0,\n", - " drop_last_loader=False,\n", - " random_seed=1, \n", - " alias=None,\n", - " optimizer=None,\n", - " optimizer_kwargs=None,\n", - " lr_scheduler=None,\n", - " lr_scheduler_kwargs=None,\n", - " dataloader_kwargs=None,\n", - " **trainer_kwargs):\n", - " super().__init__(\n", - " random_seed=random_seed,\n", - " loss=loss,\n", - " valid_loss=valid_loss,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs, \n", - " futr_exog_list=futr_exog_list,\n", - " hist_exog_list=hist_exog_list,\n", - " stat_exog_list=stat_exog_list,\n", - " max_steps=max_steps,\n", - " early_stop_patience_steps=early_stop_patience_steps,\n", - " **trainer_kwargs,\n", - " )\n", - "\n", - " # Padder to complete train windows, \n", - " # example y=[1,2,3,4,5] h=3 -> last y_output = [5,0,0]\n", - " self.h = h\n", - " self.input_size = input_size\n", - " self.n_series = n_series\n", - " self.padder = nn.ConstantPad1d(padding=(0, self.h), value=0.0)\n", - "\n", - " # Multivariate models do not support these loss functions yet.\n", - " unsupported_losses = (\n", - " losses.sCRPS,\n", - " losses.MQLoss,\n", - " losses.DistributionLoss,\n", - " losses.PMM,\n", - " losses.GMM,\n", - " losses.HuberMQLoss,\n", - " losses.MASE,\n", - " losses.relMSE,\n", - " losses.NBMM,\n", - " )\n", - " if isinstance(self.loss, unsupported_losses):\n", - " raise Exception(f\"{self.loss} is not supported in a Multivariate model.\") \n", - " if isinstance(self.valid_loss, unsupported_losses):\n", - " raise Exception(f\"{self.valid_loss} is not supported in a Multivariate model.\") \n", - "\n", - " self.batch_size = batch_size\n", - " \n", - " # Optimization\n", - " self.learning_rate = learning_rate\n", - " self.max_steps = max_steps\n", - " self.num_lr_decays = num_lr_decays\n", - " self.lr_decay_steps = max(max_steps // self.num_lr_decays, 1) if self.num_lr_decays > 0 else 10e7\n", - " self.early_stop_patience_steps = early_stop_patience_steps\n", - " self.val_check_steps = val_check_steps\n", - " self.step_size = step_size\n", - "\n", - " # Scaler\n", - " self.scaler = TemporalNorm(scaler_type=scaler_type, dim=2) # Time dimension is in the second axis\n", - "\n", - " # Fit arguments\n", - " self.val_size = 0\n", - " self.test_size = 0\n", - "\n", - " # Model state\n", - " self.decompose_forecast = False\n", - "\n", - " # DataModule arguments\n", - " self.num_workers_loader = num_workers_loader\n", - " self.dataloader_kwargs = dataloader_kwargs\n", - " self.drop_last_loader = drop_last_loader\n", - " # used by on_validation_epoch_end hook\n", - " self.validation_step_outputs = []\n", - " self.alias = alias\n", - "\n", - " def _create_windows(self, batch, step):\n", - " # Parse common data\n", - " window_size = self.input_size + self.h\n", - " temporal_cols = batch['temporal_cols']\n", - " temporal = batch['temporal']\n", - "\n", - " if step == 'train':\n", - " if self.val_size + self.test_size > 0:\n", - " cutoff = -self.val_size - self.test_size\n", - " temporal = temporal[:, :, :cutoff]\n", - "\n", - " temporal = self.padder(temporal)\n", - " windows = temporal.unfold(dimension=-1, \n", - " size=window_size, \n", - " step=self.step_size)\n", - " # [n_series, C, Ws, L+H] 0, 1, 2, 3\n", - "\n", - " # Sample and Available conditions\n", - " available_idx = temporal_cols.get_loc('available_mask')\n", - " sample_condition = windows[:, available_idx, :, -self.h:]\n", - " sample_condition = torch.sum(sample_condition, axis=2) # Sum over time\n", - " sample_condition = torch.sum(sample_condition, axis=0) # Sum over time-series\n", - " available_condition = windows[:, available_idx, :, :-self.h]\n", - " available_condition = torch.sum(available_condition, axis=2) # Sum over time\n", - " available_condition = torch.sum(available_condition, axis=0) # Sum over time-series\n", - " final_condition = (sample_condition > 0) & (available_condition > 0) # Of shape [Ws]\n", - " windows = windows[:, :, final_condition, :]\n", - "\n", - " # Get Static data\n", - " static = batch.get('static', None)\n", - " static_cols = batch.get('static_cols', None)\n", - "\n", - " # Protection of empty windows\n", - " if final_condition.sum() == 0:\n", - " raise Exception('No windows available for training')\n", - "\n", - " # Sample windows\n", - " n_windows = windows.shape[2]\n", - " if self.batch_size is not None:\n", - " w_idxs = np.random.choice(n_windows, \n", - " size=self.batch_size,\n", - " replace=(n_windows < self.batch_size))\n", - " windows = windows[:, :, w_idxs, :]\n", - "\n", - " windows = windows.permute(2, 1, 3, 0) # [Ws, C, L+H, n_series]\n", - "\n", - " windows_batch = dict(temporal=windows,\n", - " temporal_cols=temporal_cols,\n", - " static=static,\n", - " static_cols=static_cols)\n", - "\n", - " return windows_batch\n", - "\n", - " elif step in ['predict', 'val']:\n", - "\n", - " if step == 'predict':\n", - " predict_step_size = self.predict_step_size\n", - " cutoff = - self.input_size - self.test_size\n", - " temporal = batch['temporal'][:, :, cutoff:]\n", - "\n", - " elif step == 'val':\n", - " predict_step_size = self.step_size\n", - " cutoff = -self.input_size - self.val_size - self.test_size\n", - " if self.test_size > 0:\n", - " temporal = batch['temporal'][:, :, cutoff:-self.test_size]\n", - " else:\n", - " temporal = batch['temporal'][:, :, cutoff:]\n", - "\n", - " if (step=='predict') and (self.test_size==0) and (len(self.futr_exog_list)==0):\n", - " temporal = self.padder(temporal)\n", - "\n", - " windows = temporal.unfold(dimension=-1,\n", - " size=window_size,\n", - " step=predict_step_size)\n", - " # [n_series, C, Ws, L+H] -> [Ws, C, L+H, n_series]\n", - " windows = windows.permute(2, 1, 3, 0)\n", - "\n", - " # Get Static data\n", - " static = batch.get('static', None)\n", - " static_cols=batch.get('static_cols', None)\n", - "\n", - " windows_batch = dict(temporal=windows,\n", - " temporal_cols=temporal_cols,\n", - " static=static,\n", - " static_cols=static_cols)\n", - "\n", - "\n", - " return windows_batch\n", - " else:\n", - " raise ValueError(f'Unknown step {step}') \n", - "\n", - " def _normalization(self, windows, y_idx):\n", - " \n", - " # windows are already filtered by train/validation/test\n", - " # from the `create_windows_method` nor leakage risk\n", - " temporal = windows['temporal'] # [Ws, C, L+H, n_series]\n", - " temporal_cols = windows['temporal_cols'].copy() # [Ws, C, L+H, n_series]\n", - "\n", - " # To avoid leakage uses only the lags\n", - " temporal_data_cols = self._get_temporal_exogenous_cols(temporal_cols=temporal_cols)\n", - " temporal_idxs = get_indexer_raise_missing(temporal_cols, temporal_data_cols)\n", - " temporal_idxs = np.append(y_idx, temporal_idxs)\n", - " temporal_data = temporal[:, temporal_idxs, :, :]\n", - " temporal_mask = temporal[:, temporal_cols.get_loc('available_mask'), :, :].clone()\n", - " temporal_mask[:, -self.h:, :] = 0.0\n", - "\n", - " # Normalize. self.scaler stores the shift and scale for inverse transform\n", - " temporal_mask = temporal_mask.unsqueeze(1) # Add channel dimension for scaler.transform.\n", - " temporal_data = self.scaler.transform(x=temporal_data, mask=temporal_mask)\n", - " # Replace values in windows dict\n", - " temporal[:, temporal_idxs, :, :] = temporal_data\n", - " windows['temporal'] = temporal\n", - "\n", - " return windows\n", - "\n", - " def _inv_normalization(self, y_hat, temporal_cols, y_idx):\n", - " # Receives window predictions [Ws, H, n_series]\n", - " # Broadcasts outputs and inverts normalization\n", - "\n", - " # Add C dimension\n", - " # if y_hat.ndim == 2:\n", - " # remove_dimension = True\n", - " # y_hat = y_hat.unsqueeze(-1)\n", - " # else:\n", - " # remove_dimension = False\n", - " \n", - " y_scale = self.scaler.x_scale[:, [y_idx], :].squeeze(1)\n", - " y_loc = self.scaler.x_shift[:, [y_idx], :].squeeze(1)\n", - "\n", - " # y_scale = torch.repeat_interleave(y_scale, repeats=y_hat.shape[-1], dim=-1)\n", - " # y_loc = torch.repeat_interleave(y_loc, repeats=y_hat.shape[-1], dim=-1)\n", - "\n", - " y_hat = self.scaler.inverse_transform(z=y_hat, x_scale=y_scale, x_shift=y_loc)\n", - "\n", - " # if remove_dimension:\n", - " # y_hat = y_hat.squeeze(-1)\n", - " # y_loc = y_loc.squeeze(-1)\n", - " # y_scale = y_scale.squeeze(-1)\n", - "\n", - " return y_hat, y_loc, y_scale\n", - "\n", - " def _parse_windows(self, batch, windows):\n", - " # Temporal: [Ws, C, L+H, n_series]\n", - "\n", - " # Filter insample lags from outsample horizon\n", - " mask_idx = batch['temporal_cols'].get_loc('available_mask')\n", - " y_idx = batch['y_idx'] \n", - " insample_y = windows['temporal'][:, y_idx, :-self.h, :]\n", - " insample_mask = windows['temporal'][:, mask_idx, :-self.h, :]\n", - " outsample_y = windows['temporal'][:, y_idx, -self.h:, :]\n", - " outsample_mask = windows['temporal'][:, mask_idx, -self.h:, :]\n", - "\n", - " # Filter historic exogenous variables\n", - " if len(self.hist_exog_list):\n", - " hist_exog_idx = get_indexer_raise_missing(windows['temporal_cols'], self.hist_exog_list)\n", - " hist_exog = windows['temporal'][:, hist_exog_idx, :-self.h, :]\n", - " else:\n", - " hist_exog = None\n", - " \n", - " # Filter future exogenous variables\n", - " if len(self.futr_exog_list):\n", - " futr_exog_idx = get_indexer_raise_missing(windows['temporal_cols'], self.futr_exog_list)\n", - " futr_exog = windows['temporal'][:, futr_exog_idx, :, :]\n", - " else:\n", - " futr_exog = None\n", - "\n", - " # Filter static variables\n", - " if len(self.stat_exog_list):\n", - " static_idx = get_indexer_raise_missing(windows['static_cols'], self.stat_exog_list)\n", - " stat_exog = windows['static'][:, static_idx]\n", - " else:\n", - " stat_exog = None\n", - "\n", - " return insample_y, insample_mask, outsample_y, outsample_mask, \\\n", - " hist_exog, futr_exog, stat_exog\n", - "\n", - " def training_step(self, batch, batch_idx): \n", - " # Create and normalize windows [batch_size, n_series, C, L+H]\n", - " windows = self._create_windows(batch, step='train')\n", - " y_idx = batch['y_idx']\n", - " windows = self._normalization(windows=windows, y_idx=y_idx)\n", - "\n", - " # Parse windows\n", - " insample_y, insample_mask, outsample_y, outsample_mask, \\\n", - " hist_exog, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", - "\n", - " windows_batch = dict(insample_y=insample_y, # [Ws, L, n_series]\n", - " insample_mask=insample_mask, # [Ws, L, n_series]\n", - " futr_exog=futr_exog, # [Ws, F, L + h, n_series]\n", - " hist_exog=hist_exog, # [Ws, X, L, n_series]\n", - " stat_exog=stat_exog) # [n_series, S]\n", - "\n", - " # Model Predictions\n", - " output = self(windows_batch)\n", - " if self.loss.is_distribution_output:\n", - " outsample_y, y_loc, y_scale = self._inv_normalization(y_hat=outsample_y,\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=y_idx)\n", - " distr_args = self.loss.scale_decouple(output=output, loc=y_loc, scale=y_scale)\n", - " loss = self.loss(y=outsample_y, distr_args=distr_args, mask=outsample_mask)\n", - " else:\n", - " loss = self.loss(y=outsample_y, y_hat=output, mask=outsample_mask)\n", - "\n", - " if torch.isnan(loss):\n", - " print('Model Parameters', self.hparams)\n", - " print('insample_y', torch.isnan(insample_y).sum())\n", - " print('outsample_y', torch.isnan(outsample_y).sum())\n", - " print('output', torch.isnan(output).sum())\n", - " raise Exception('Loss is NaN, training stopped.')\n", - "\n", - " self.log(\n", - " 'train_loss',\n", - " loss.detach().item(),\n", - " batch_size=outsample_y.size(0),\n", - " prog_bar=True,\n", - " on_epoch=True,\n", - " )\n", - " self.train_trajectories.append((self.global_step, loss.detach().item()))\n", - " return loss\n", - "\n", - " def validation_step(self, batch, batch_idx):\n", - " if self.val_size == 0:\n", - " return np.nan\n", - " \n", - " # Create and normalize windows [Ws, L+H, C]\n", - " windows = self._create_windows(batch, step='val')\n", - " y_idx = batch['y_idx']\n", - " windows = self._normalization(windows=windows, y_idx=y_idx)\n", - "\n", - " # Parse windows\n", - " insample_y, insample_mask, outsample_y, outsample_mask, \\\n", - " hist_exog, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", - "\n", - " windows_batch = dict(insample_y=insample_y, # [Ws, L, n_series]\n", - " insample_mask=insample_mask, # [Ws, L, n_series]\n", - " futr_exog=futr_exog, # [Ws, F, L + h, n_series]\n", - " hist_exog=hist_exog, # [Ws, X, L, n_series]\n", - " stat_exog=stat_exog) # [n_series, S]\n", - "\n", - " # Model Predictions\n", - " output = self(windows_batch)\n", - " if self.loss.is_distribution_output:\n", - " outsample_y, y_loc, y_scale = self._inv_normalization(y_hat=outsample_y,\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=y_idx)\n", - " distr_args = self.loss.scale_decouple(output=output, loc=y_loc, scale=y_scale)\n", - "\n", - " if str(type(self.valid_loss)) in\\\n", - " [\"\", \"\"]:\n", - " _, output = self.loss.sample(distr_args=distr_args)\n", - "\n", - " # Validation Loss evaluation\n", - " if self.valid_loss.is_distribution_output:\n", - " valid_loss = self.valid_loss(y=outsample_y, distr_args=distr_args, mask=outsample_mask)\n", - " else:\n", - " valid_loss = self.valid_loss(y=outsample_y, y_hat=output, mask=outsample_mask)\n", - "\n", - " if torch.isnan(valid_loss):\n", - " raise Exception('Loss is NaN, training stopped.')\n", - "\n", - " self.log(\n", - " 'valid_loss',\n", - " valid_loss.detach().item(),\n", - " batch_size=outsample_y.size(0),\n", - " prog_bar=True,\n", - " on_epoch=True,\n", - " )\n", - " self.validation_step_outputs.append(valid_loss)\n", - " return valid_loss\n", - "\n", - " def predict_step(self, batch, batch_idx): \n", - " # Create and normalize windows [Ws, L+H, C]\n", - " windows = self._create_windows(batch, step='predict')\n", - " y_idx = batch['y_idx'] \n", - " windows = self._normalization(windows=windows, y_idx=y_idx)\n", - "\n", - " # Parse windows\n", - " insample_y, insample_mask, _, _, \\\n", - " hist_exog, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", - "\n", - " windows_batch = dict(insample_y=insample_y, # [Ws, L, n_series]\n", - " insample_mask=insample_mask, # [Ws, L, n_series]\n", - " futr_exog=futr_exog, # [Ws, F, L + h, n_series]\n", - " hist_exog=hist_exog, # [Ws, X, L, n_series]\n", - " stat_exog=stat_exog) # [n_series, S]\n", - "\n", - " # Model Predictions\n", - " output = self(windows_batch)\n", - " if self.loss.is_distribution_output:\n", - " _, y_loc, y_scale = self._inv_normalization(y_hat=torch.empty(size=(insample_y.shape[0], \n", - " self.h, \n", - " self.n_series),\n", - " dtype=output[0].dtype,\n", - " device=output[0].device),\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=y_idx)\n", - " distr_args = self.loss.scale_decouple(output=output, loc=y_loc, scale=y_scale)\n", - " _, y_hat = self.loss.sample(distr_args=distr_args)\n", - "\n", - " if self.loss.return_params:\n", - " distr_args = torch.stack(distr_args, dim=-1)\n", - " distr_args = torch.reshape(distr_args, (len(windows[\"temporal\"]), self.h, -1))\n", - " y_hat = torch.concat((y_hat, distr_args), axis=2)\n", - " else:\n", - " y_hat, _, _ = self._inv_normalization(y_hat=output,\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=y_idx)\n", - " return y_hat\n", - " \n", - " def fit(self, dataset, val_size=0, test_size=0, random_seed=None, distributed_config=None):\n", - " \"\"\" Fit.\n", - "\n", - " The `fit` method, optimizes the neural network's weights using the\n", - " initialization parameters (`learning_rate`, `windows_batch_size`, ...)\n", - " and the `loss` function as defined during the initialization. \n", - " Within `fit` we use a PyTorch Lightning `Trainer` that\n", - " inherits the initialization's `self.trainer_kwargs`, to customize\n", - " its inputs, see [PL's trainer arguments](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).\n", - "\n", - " The method is designed to be compatible with SKLearn-like classes\n", - " and in particular to be compatible with the StatsForecast library.\n", - "\n", - " By default the `model` is not saving training checkpoints to protect \n", - " disk memory, to get them change `enable_checkpointing=True` in `__init__`.\n", - "\n", - " **Parameters:**
\n", - " `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
\n", - " `val_size`: int, validation size for temporal cross-validation.
\n", - " `test_size`: int, test size for temporal cross-validation.
\n", - " \"\"\"\n", - " if distributed_config is not None:\n", - " raise ValueError(\"multivariate models cannot be trained using distributed data parallel.\")\n", - " return self._fit(\n", - " dataset=dataset,\n", - " batch_size=self.n_series,\n", - " valid_batch_size=self.n_series,\n", - " val_size=val_size,\n", - " test_size=test_size,\n", - " random_seed=random_seed,\n", - " shuffle_train=False,\n", - " distributed_config=None,\n", - " )\n", - "\n", - " def predict(self, dataset, test_size=None, step_size=1, random_seed=None, **data_module_kwargs):\n", - " \"\"\" Predict.\n", - "\n", - " Neural network prediction with PL's `Trainer` execution of `predict_step`.\n", - "\n", - " **Parameters:**
\n", - " `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
\n", - " `test_size`: int=None, test size for temporal cross-validation.
\n", - " `step_size`: int=1, Step size between each window.
\n", - " `**data_module_kwargs`: PL's TimeSeriesDataModule args, see [documentation](https://pytorch-lightning.readthedocs.io/en/1.6.1/extensions/datamodules.html#using-a-datamodule).\n", - " \"\"\"\n", - " self._check_exog(dataset)\n", - " self._restart_seed(random_seed)\n", - " data_module_kwargs = self._set_quantile_for_iqloss(**data_module_kwargs)\n", - "\n", - " self.predict_step_size = step_size\n", - " self.decompose_forecast = False\n", - " datamodule = TimeSeriesDataModule(dataset=dataset, \n", - " valid_batch_size=self.n_series, \n", - " batch_size=self.n_series,\n", - " **data_module_kwargs)\n", - "\n", - " # Protect when case of multiple gpu. PL does not support return preds with multiple gpu.\n", - " pred_trainer_kwargs = self.trainer_kwargs.copy()\n", - " if (pred_trainer_kwargs.get('accelerator', None) == \"gpu\") and (torch.cuda.device_count() > 1):\n", - " pred_trainer_kwargs['devices'] = [0]\n", - "\n", - " trainer = pl.Trainer(**pred_trainer_kwargs)\n", - " fcsts = trainer.predict(self, datamodule=datamodule)\n", - " fcsts = torch.vstack(fcsts).numpy()\n", - "\n", - " fcsts = np.transpose(fcsts, (2,0,1))\n", - " fcsts = fcsts.flatten()\n", - " fcsts = fcsts.reshape(-1, len(self.loss.output_names))\n", - " return fcsts\n", - "\n", - " def decompose(self, dataset, step_size=1, random_seed=None, **data_module_kwargs):\n", - " raise NotImplementedError('decompose')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "from fastcore.test import test_fail" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "# test unsupported losses\n", - "test_fail(\n", - " lambda: BaseMultivariate(\n", - " h=1,\n", - " input_size=1,\n", - " loss=losses.MQLoss(),\n", - " valid_loss=losses.RMSE(),\n", - " learning_rate=1,\n", - " max_steps=1,\n", - " val_check_steps=1,\n", - " n_series=1,\n", - " batch_size=1,\n", - " ),\n", - " contains='MQLoss() is not supported'\n", - ")\n", - "\n", - "test_fail(\n", - " lambda: BaseMultivariate(\n", - " h=1,\n", - " input_size=1,\n", - " loss=losses.RMSE(),\n", - " valid_loss=losses.MASE(seasonality=1),\n", - " learning_rate=1,\n", - " max_steps=1,\n", - " val_check_steps=1,\n", - " n_series=1,\n", - " batch_size=1,\n", - " ),\n", - " contains='MASE() is not supported'\n", - ")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "python3", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/nbs/common.base_recurrent.ipynb b/nbs/common.base_recurrent.ipynb deleted file mode 100644 index 7b0ed5585..000000000 --- a/nbs/common.base_recurrent.ipynb +++ /dev/null @@ -1,663 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| default_exp common._base_recurrent" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# BaseRecurrent" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "> The `BaseRecurrent` class contains standard methods shared across recurrent neural networks; these models possess the ability to process variable-length sequences of inputs through their internal memory states. The class is represented by `LSTM`, `GRU`, and `RNN`, along with other more sophisticated architectures like `MQCNN`." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The standard methods include `TemporalNorm` preprocessing, optimization utilities like parameter initialization, `training_step`, `validation_step`, and shared `fit` and `predict` methods.These shared methods enable all the `neuralforecast.models` compatibility with the `core.NeuralForecast` wrapper class." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "import numpy as np\n", - "import torch\n", - "import torch.nn as nn\n", - "import pytorch_lightning as pl\n", - "import neuralforecast.losses.pytorch as losses\n", - "\n", - "from neuralforecast.common._base_model import BaseModel\n", - "from neuralforecast.common._scalers import TemporalNorm\n", - "from neuralforecast.tsdataset import TimeSeriesDataModule\n", - "from neuralforecast.utils import get_indexer_raise_missing" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "class BaseRecurrent(BaseModel):\n", - " \"\"\" Base Recurrent\n", - " \n", - " Base class for all recurrent-based models. The forecasts are produced sequentially between \n", - " windows.\n", - " \n", - " This class implements the basic functionality for all windows-based models, including:\n", - " - PyTorch Lightning's methods training_step, validation_step, predict_step.
\n", - " - fit and predict methods used by NeuralForecast.core class.
\n", - " - sampling and wrangling methods to sequential windows.
\n", - " \"\"\"\n", - " def __init__(self,\n", - " h,\n", - " input_size,\n", - " inference_input_size,\n", - " loss,\n", - " valid_loss,\n", - " learning_rate,\n", - " max_steps,\n", - " val_check_steps,\n", - " batch_size,\n", - " valid_batch_size,\n", - " scaler_type='robust',\n", - " num_lr_decays=0,\n", - " early_stop_patience_steps=-1,\n", - " futr_exog_list=None,\n", - " hist_exog_list=None,\n", - " stat_exog_list=None,\n", - " num_workers_loader=0,\n", - " drop_last_loader=False,\n", - " random_seed=1, \n", - " alias=None,\n", - " optimizer=None,\n", - " optimizer_kwargs=None,\n", - " lr_scheduler=None,\n", - " lr_scheduler_kwargs=None,\n", - " dataloader_kwargs=None,\n", - " **trainer_kwargs):\n", - " super().__init__(\n", - " random_seed=random_seed,\n", - " loss=loss,\n", - " valid_loss=valid_loss,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", - " futr_exog_list=futr_exog_list,\n", - " hist_exog_list=hist_exog_list,\n", - " stat_exog_list=stat_exog_list,\n", - " max_steps=max_steps,\n", - " early_stop_patience_steps=early_stop_patience_steps, \n", - " **trainer_kwargs,\n", - " )\n", - "\n", - " # Padder to complete train windows, \n", - " # example y=[1,2,3,4,5] h=3 -> last y_output = [5,0,0]\n", - " self.h = h\n", - " self.input_size = input_size\n", - " self.inference_input_size = inference_input_size\n", - " self.padder = nn.ConstantPad1d(padding=(0, self.h), value=0.0)\n", - "\n", - " unsupported_distributions = ['Bernoulli', 'ISQF']\n", - " if isinstance(self.loss, losses.DistributionLoss) and\\\n", - " self.loss.distribution in unsupported_distributions:\n", - " raise Exception(f'Distribution {self.loss.distribution} not available for Recurrent-based models. Please choose another distribution.')\n", - "\n", - " # Valid batch_size\n", - " self.batch_size = batch_size\n", - " if valid_batch_size is None:\n", - " self.valid_batch_size = batch_size\n", - " else:\n", - " self.valid_batch_size = valid_batch_size\n", - "\n", - " # Optimization\n", - " self.learning_rate = learning_rate\n", - " self.max_steps = max_steps\n", - " self.num_lr_decays = num_lr_decays\n", - " self.lr_decay_steps = max(max_steps // self.num_lr_decays, 1) if self.num_lr_decays > 0 else 10e7\n", - " self.early_stop_patience_steps = early_stop_patience_steps\n", - " self.val_check_steps = val_check_steps\n", - "\n", - " # Scaler\n", - " self.scaler = TemporalNorm(\n", - " scaler_type=scaler_type,\n", - " dim=-1, # Time dimension is -1.\n", - " num_features=1+len(self.hist_exog_list)+len(self.futr_exog_list)\n", - " )\n", - "\n", - " # Fit arguments\n", - " self.val_size = 0\n", - " self.test_size = 0\n", - "\n", - " # DataModule arguments\n", - " self.num_workers_loader = num_workers_loader\n", - " self.dataloader_kwargs = dataloader_kwargs\n", - " self.drop_last_loader = drop_last_loader\n", - " # used by on_validation_epoch_end hook\n", - " self.validation_step_outputs = []\n", - " self.alias = alias\n", - "\n", - " def _normalization(self, batch, val_size=0, test_size=0):\n", - " temporal = batch['temporal'] # B, C, T\n", - " temporal_cols = batch['temporal_cols'].copy()\n", - " y_idx = batch['y_idx']\n", - "\n", - " # Separate data and mask\n", - " temporal_data_cols = self._get_temporal_exogenous_cols(temporal_cols=temporal_cols)\n", - " temporal_idxs = get_indexer_raise_missing(temporal_cols, temporal_data_cols)\n", - " temporal_idxs = np.append(y_idx, temporal_idxs)\n", - " temporal_data = temporal[:, temporal_idxs, :]\n", - " temporal_mask = temporal[:, temporal_cols.get_loc('available_mask'), :].clone()\n", - "\n", - " # Remove validation and test set to prevent leakeage\n", - " if val_size + test_size > 0:\n", - " cutoff = val_size + test_size\n", - " temporal_mask[:, -cutoff:] = 0\n", - "\n", - " # Normalize. self.scaler stores the shift and scale for inverse transform\n", - " temporal_mask = temporal_mask.unsqueeze(1) # Add channel dimension for scaler.transform.\n", - " temporal_data = self.scaler.transform(x=temporal_data, mask=temporal_mask)\n", - "\n", - " # Replace values in windows dict\n", - " temporal[:, temporal_idxs, :] = temporal_data\n", - " batch['temporal'] = temporal\n", - "\n", - " return batch\n", - "\n", - " def _inv_normalization(self, y_hat, temporal_cols, y_idx):\n", - " # Receives window predictions [B, seq_len, H, output]\n", - " # Broadcasts outputs and inverts normalization\n", - "\n", - " # Get 'y' scale and shift, and add W dimension\n", - " y_loc = self.scaler.x_shift[:, [y_idx], 0].flatten() #[B,C,T] -> [B] \n", - " y_scale = self.scaler.x_scale[:, [y_idx], 0].flatten() #[B,C,T] -> [B]\n", - "\n", - " # Expand scale and shift to y_hat dimensions\n", - " y_loc = y_loc.view(*y_loc.shape, *(1,)*(y_hat.ndim-1))#.expand(y_hat) \n", - " y_scale = y_scale.view(*y_scale.shape, *(1,)*(y_hat.ndim-1))#.expand(y_hat)\n", - "\n", - " y_hat = self.scaler.inverse_transform(z=y_hat, x_scale=y_scale, x_shift=y_loc)\n", - "\n", - " return y_hat, y_loc, y_scale\n", - "\n", - " def _create_windows(self, batch, step):\n", - " temporal = batch['temporal']\n", - " temporal_cols = batch['temporal_cols']\n", - "\n", - " if step == 'train':\n", - " if self.val_size + self.test_size > 0:\n", - " cutoff = -self.val_size - self.test_size\n", - " temporal = temporal[:, :, :cutoff]\n", - " temporal = self.padder(temporal)\n", - "\n", - " # Truncate batch to shorter time-series \n", - " av_condition = torch.nonzero(torch.min(temporal[:, temporal_cols.get_loc('available_mask')], axis=0).values)\n", - " min_time_stamp = int(av_condition.min())\n", - " \n", - " available_ts = temporal.shape[-1] - min_time_stamp\n", - " if available_ts < 1 + self.h:\n", - " raise Exception(\n", - " 'Time series too short for given input and output size. \\n'\n", - " f'Available timestamps: {available_ts}'\n", - " )\n", - "\n", - " temporal = temporal[:, :, min_time_stamp:]\n", - "\n", - " if step == 'val':\n", - " if self.test_size > 0:\n", - " temporal = temporal[:, :, :-self.test_size]\n", - " temporal = self.padder(temporal)\n", - "\n", - " if step == 'predict':\n", - " if (self.test_size == 0) and (len(self.futr_exog_list)==0):\n", - " temporal = self.padder(temporal)\n", - "\n", - " # Test size covers all data, pad left one timestep with zeros\n", - " if temporal.shape[-1] == self.test_size:\n", - " padder_left = nn.ConstantPad1d(padding=(1, 0), value=0.0)\n", - " temporal = padder_left(temporal)\n", - "\n", - " # Parse batch\n", - " window_size = 1 + self.h # 1 for current t and h for future\n", - " windows = temporal.unfold(dimension=-1,\n", - " size=window_size,\n", - " step=1)\n", - "\n", - " # Truncated backprogatation/inference (shorten sequence where RNNs unroll)\n", - " n_windows = windows.shape[2]\n", - " input_size = -1\n", - " if (step == 'train') and (self.input_size>0):\n", - " input_size = self.input_size\n", - " if (input_size > 0) and (n_windows > input_size):\n", - " max_sampleable_time = n_windows-self.input_size+1\n", - " start = np.random.choice(max_sampleable_time)\n", - " windows = windows[:, :, start:(start+input_size), :]\n", - "\n", - " if (step == 'val') and (self.inference_input_size>0):\n", - " cutoff = self.inference_input_size + self.val_size\n", - " windows = windows[:, :, -cutoff:, :]\n", - "\n", - " if (step == 'predict') and (self.inference_input_size>0):\n", - " cutoff = self.inference_input_size + self.test_size\n", - " windows = windows[:, :, -cutoff:, :]\n", - " \n", - " # [B, C, input_size, 1+H]\n", - " windows_batch = dict(temporal=windows,\n", - " temporal_cols=temporal_cols,\n", - " static=batch.get('static', None),\n", - " static_cols=batch.get('static_cols', None))\n", - "\n", - " return windows_batch\n", - "\n", - " def _parse_windows(self, batch, windows):\n", - " # [B, C, seq_len, 1+H]\n", - " # Filter insample lags from outsample horizon\n", - " mask_idx = batch['temporal_cols'].get_loc('available_mask')\n", - " y_idx = batch['y_idx'] \n", - " insample_y = windows['temporal'][:, y_idx, :, :-self.h]\n", - " insample_mask = windows['temporal'][:, mask_idx, :, :-self.h]\n", - " outsample_y = windows['temporal'][:, y_idx, :, -self.h:].contiguous()\n", - " outsample_mask = windows['temporal'][:, mask_idx, :, -self.h:].contiguous()\n", - "\n", - " # Filter historic exogenous variables\n", - " if len(self.hist_exog_list):\n", - " hist_exog_idx = get_indexer_raise_missing(windows['temporal_cols'], self.hist_exog_list)\n", - " hist_exog = windows['temporal'][:, hist_exog_idx, :, :-self.h]\n", - " else:\n", - " hist_exog = None\n", - " \n", - " # Filter future exogenous variables\n", - " if len(self.futr_exog_list):\n", - " futr_exog_idx = get_indexer_raise_missing(windows['temporal_cols'], self.futr_exog_list)\n", - " futr_exog = windows['temporal'][:, futr_exog_idx, :, :]\n", - " else:\n", - " futr_exog = None\n", - " # Filter static variables\n", - " if len(self.stat_exog_list):\n", - " static_idx = get_indexer_raise_missing(windows['static_cols'], self.stat_exog_list)\n", - " stat_exog = windows['static'][:, static_idx]\n", - " else:\n", - " stat_exog = None\n", - "\n", - " return insample_y, insample_mask, outsample_y, outsample_mask, \\\n", - " hist_exog, futr_exog, stat_exog\n", - "\n", - " def training_step(self, batch, batch_idx):\n", - " # Create and normalize windows [Ws, L+H, C]\n", - " batch = self._normalization(batch, val_size=self.val_size, test_size=self.test_size)\n", - " windows = self._create_windows(batch, step='train')\n", - "\n", - " # Parse windows\n", - " insample_y, insample_mask, outsample_y, outsample_mask, \\\n", - " hist_exog, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", - "\n", - " windows_batch = dict(insample_y=insample_y, # [B, seq_len, 1]\n", - " insample_mask=insample_mask, # [B, seq_len, 1]\n", - " futr_exog=futr_exog, # [B, F, seq_len, 1+H]\n", - " hist_exog=hist_exog, # [B, C, seq_len]\n", - " stat_exog=stat_exog) # [B, S]\n", - "\n", - " # Model predictions\n", - " output = self(windows_batch) # tuple([B, seq_len, H, output])\n", - " if self.loss.is_distribution_output:\n", - " outsample_y, y_loc, y_scale = self._inv_normalization(y_hat=outsample_y,\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=batch['y_idx'])\n", - " B = output[0].size()[0]\n", - " T = output[0].size()[1]\n", - " H = output[0].size()[2]\n", - " output = [arg.view(-1, *(arg.size()[2:])) for arg in output]\n", - " outsample_y = outsample_y.view(B*T,H)\n", - " outsample_mask = outsample_mask.view(B*T,H)\n", - " y_loc = y_loc.repeat_interleave(repeats=T, dim=0).squeeze(-1)\n", - " y_scale = y_scale.repeat_interleave(repeats=T, dim=0).squeeze(-1)\n", - " distr_args = self.loss.scale_decouple(output=output, loc=y_loc, scale=y_scale)\n", - " loss = self.loss(y=outsample_y, distr_args=distr_args, mask=outsample_mask)\n", - " else:\n", - " loss = self.loss(y=outsample_y, y_hat=output, mask=outsample_mask)\n", - "\n", - " if torch.isnan(loss):\n", - " print('Model Parameters', self.hparams)\n", - " print('insample_y', torch.isnan(insample_y).sum())\n", - " print('outsample_y', torch.isnan(outsample_y).sum())\n", - " print('output', torch.isnan(output).sum())\n", - " raise Exception('Loss is NaN, training stopped.')\n", - "\n", - " self.log(\n", - " 'train_loss',\n", - " loss.detach().item(),\n", - " batch_size=outsample_y.size(0),\n", - " prog_bar=True,\n", - " on_epoch=True,\n", - " )\n", - " self.train_trajectories.append((self.global_step, loss.detach().item()))\n", - " return loss\n", - "\n", - " def validation_step(self, batch, batch_idx):\n", - " if self.val_size == 0:\n", - " return np.nan\n", - "\n", - " # Create and normalize windows [Ws, L+H, C]\n", - " batch = self._normalization(batch, val_size=self.val_size, test_size=self.test_size)\n", - " windows = self._create_windows(batch, step='val')\n", - " y_idx = batch['y_idx']\n", - "\n", - " # Parse windows\n", - " insample_y, insample_mask, outsample_y, outsample_mask, \\\n", - " hist_exog, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", - "\n", - " windows_batch = dict(insample_y=insample_y, # [B, seq_len, 1]\n", - " insample_mask=insample_mask, # [B, seq_len, 1]\n", - " futr_exog=futr_exog, # [B, F, seq_len, 1+H]\n", - " hist_exog=hist_exog, # [B, C, seq_len]\n", - " stat_exog=stat_exog) # [B, S]\n", - "\n", - " # Remove train y_hat (+1 and -1 for padded last window with zeros)\n", - " # tuple([B, seq_len, H, output]) -> tuple([B, validation_size, H, output])\n", - " val_windows = (self.val_size) + 1\n", - " outsample_y = outsample_y[:, -val_windows:-1, :]\n", - " outsample_mask = outsample_mask[:, -val_windows:-1, :] \n", - "\n", - " # Model predictions\n", - " output = self(windows_batch) # tuple([B, seq_len, H, output])\n", - " if self.loss.is_distribution_output:\n", - " output = [arg[:, -val_windows:-1] for arg in output]\n", - " outsample_y, y_loc, y_scale = self._inv_normalization(y_hat=outsample_y,\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=y_idx)\n", - " B = output[0].size()[0]\n", - " T = output[0].size()[1]\n", - " H = output[0].size()[2]\n", - " output = [arg.reshape(-1, *(arg.size()[2:])) for arg in output]\n", - " outsample_y = outsample_y.reshape(B*T,H)\n", - " outsample_mask = outsample_mask.reshape(B*T,H)\n", - " y_loc = y_loc.repeat_interleave(repeats=T, dim=0).squeeze(-1)\n", - " y_scale = y_scale.repeat_interleave(repeats=T, dim=0).squeeze(-1)\n", - " distr_args = self.loss.scale_decouple(output=output, loc=y_loc, scale=y_scale)\n", - " _, sample_mean, quants = self.loss.sample(distr_args=distr_args)\n", - "\n", - " if str(type(self.valid_loss)) in\\\n", - " [\"\", \"\"]:\n", - " output = quants\n", - " elif str(type(self.valid_loss)) in [\"\"]:\n", - " output = torch.unsqueeze(sample_mean, dim=-1) # [N,H,1] -> [N,H]\n", - " \n", - " else:\n", - " output = output[:, -val_windows:-1, :]\n", - "\n", - " # Validation Loss evaluation\n", - " if self.valid_loss.is_distribution_output:\n", - " valid_loss = self.valid_loss(y=outsample_y, distr_args=distr_args, mask=outsample_mask)\n", - " else:\n", - " outsample_y, _, _ = self._inv_normalization(y_hat=outsample_y, temporal_cols=batch['temporal_cols'], y_idx=y_idx)\n", - " output, _, _ = self._inv_normalization(y_hat=output, temporal_cols=batch['temporal_cols'], y_idx=y_idx)\n", - " valid_loss = self.valid_loss(y=outsample_y, y_hat=output, mask=outsample_mask)\n", - "\n", - " if torch.isnan(valid_loss):\n", - " raise Exception('Loss is NaN, training stopped.')\n", - "\n", - " self.log(\n", - " 'valid_loss',\n", - " valid_loss.detach().item(),\n", - " batch_size=outsample_y.size(0),\n", - " prog_bar=True,\n", - " on_epoch=True,\n", - " )\n", - " self.validation_step_outputs.append(valid_loss)\n", - " return valid_loss\n", - "\n", - " def predict_step(self, batch, batch_idx):\n", - " # Create and normalize windows [Ws, L+H, C]\n", - " batch = self._normalization(batch, val_size=0, test_size=self.test_size)\n", - " windows = self._create_windows(batch, step='predict')\n", - " y_idx = batch['y_idx']\n", - "\n", - " # Parse windows\n", - " insample_y, insample_mask, _, _, \\\n", - " hist_exog, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", - "\n", - " windows_batch = dict(insample_y=insample_y, # [B, seq_len, 1]\n", - " insample_mask=insample_mask, # [B, seq_len, 1]\n", - " futr_exog=futr_exog, # [B, F, seq_len, 1+H]\n", - " hist_exog=hist_exog, # [B, C, seq_len]\n", - " stat_exog=stat_exog) # [B, S]\n", - "\n", - " # Model Predictions\n", - " output = self(windows_batch) # tuple([B, seq_len, H], ...)\n", - " if self.loss.is_distribution_output:\n", - " _, y_loc, y_scale = self._inv_normalization(y_hat=output[0],\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=y_idx)\n", - " B = output[0].size()[0]\n", - " T = output[0].size()[1]\n", - " H = output[0].size()[2]\n", - " output = [arg.reshape(-1, *(arg.size()[2:])) for arg in output]\n", - " y_loc = y_loc.repeat_interleave(repeats=T, dim=0).squeeze(-1)\n", - " y_scale = y_scale.repeat_interleave(repeats=T, dim=0).squeeze(-1)\n", - " distr_args = self.loss.scale_decouple(output=output, loc=y_loc, scale=y_scale)\n", - " _, sample_mean, quants = self.loss.sample(distr_args=distr_args)\n", - " y_hat = torch.concat((sample_mean, quants), axis=2)\n", - " y_hat = y_hat.view(B, T, H, -1)\n", - "\n", - " if self.loss.return_params:\n", - " distr_args = torch.stack(distr_args, dim=-1)\n", - " distr_args = torch.reshape(distr_args, (B, T, H, -1))\n", - " y_hat = torch.concat((y_hat, distr_args), axis=3)\n", - " else:\n", - " y_hat, _, _ = self._inv_normalization(y_hat=output,\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=y_idx)\n", - " return y_hat\n", - "\n", - " def fit(self, dataset, val_size=0, test_size=0, random_seed=None, distributed_config=None):\n", - " \"\"\" Fit.\n", - "\n", - " The `fit` method, optimizes the neural network's weights using the\n", - " initialization parameters (`learning_rate`, `batch_size`, ...)\n", - " and the `loss` function as defined during the initialization. \n", - " Within `fit` we use a PyTorch Lightning `Trainer` that\n", - " inherits the initialization's `self.trainer_kwargs`, to customize\n", - " its inputs, see [PL's trainer arguments](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).\n", - "\n", - " The method is designed to be compatible with SKLearn-like classes\n", - " and in particular to be compatible with the StatsForecast library.\n", - "\n", - " By default the `model` is not saving training checkpoints to protect \n", - " disk memory, to get them change `enable_checkpointing=True` in `__init__`. \n", - "\n", - " **Parameters:**
\n", - " `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
\n", - " `val_size`: int, validation size for temporal cross-validation.
\n", - " `test_size`: int, test size for temporal cross-validation.
\n", - " `random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
\n", - " \"\"\"\n", - " return self._fit(\n", - " dataset=dataset,\n", - " batch_size=self.batch_size,\n", - " valid_batch_size=self.valid_batch_size,\n", - " val_size=val_size,\n", - " test_size=test_size,\n", - " random_seed=random_seed,\n", - " distributed_config=distributed_config,\n", - " )\n", - "\n", - " def predict(self, dataset, step_size=1,\n", - " random_seed=None, **data_module_kwargs):\n", - " \"\"\" Predict.\n", - "\n", - " Neural network prediction with PL's `Trainer` execution of `predict_step`.\n", - "\n", - " **Parameters:**
\n", - " `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
\n", - " `step_size`: int=1, Step size between each window.
\n", - " `random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
\n", - " `**data_module_kwargs`: PL's TimeSeriesDataModule args, see [documentation](https://pytorch-lightning.readthedocs.io/en/1.6.1/extensions/datamodules.html#using-a-datamodule).\n", - " \"\"\"\n", - " self._check_exog(dataset)\n", - " self._restart_seed(random_seed)\n", - " data_module_kwargs = self._set_quantile_for_iqloss(**data_module_kwargs)\n", - " \n", - " if step_size > 1:\n", - " raise Exception('Recurrent models do not support step_size > 1')\n", - "\n", - " # fcsts (window, batch, h)\n", - " # Protect when case of multiple gpu. PL does not support return preds with multiple gpu.\n", - " pred_trainer_kwargs = self.trainer_kwargs.copy()\n", - " if (pred_trainer_kwargs.get('accelerator', None) == \"gpu\") and (torch.cuda.device_count() > 1):\n", - " pred_trainer_kwargs['devices'] = [0]\n", - "\n", - " trainer = pl.Trainer(**pred_trainer_kwargs)\n", - "\n", - " datamodule = TimeSeriesDataModule(\n", - " dataset=dataset,\n", - " valid_batch_size=self.valid_batch_size,\n", - " num_workers=self.num_workers_loader,\n", - " **data_module_kwargs\n", - " )\n", - " fcsts = trainer.predict(self, datamodule=datamodule)\n", - " if self.test_size > 0:\n", - " # Remove warmup windows (from train and validation)\n", - " # [N,T,H,output], avoid indexing last dim for univariate output compatibility\n", - " fcsts = torch.vstack([fcst[:, -(1+self.test_size-self.h):,:] for fcst in fcsts])\n", - " fcsts = fcsts.numpy().flatten()\n", - " fcsts = fcsts.reshape(-1, len(self.loss.output_names))\n", - " else:\n", - " fcsts = torch.vstack([fcst[:,-1:,:] for fcst in fcsts]).numpy().flatten()\n", - " fcsts = fcsts.reshape(-1, len(self.loss.output_names))\n", - " return fcsts" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "show_doc(BaseRecurrent, title_level=3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "show_doc(BaseRecurrent.fit, title_level=3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "show_doc(BaseRecurrent.predict, title_level=3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.utils import AirPassengersDF\n", - "from neuralforecast.tsdataset import TimeSeriesDataset, TimeSeriesDataModule" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "# add h=0,1 unit test for _parse_windows \n", - "# Declare batch\n", - "AirPassengersDF['x'] = np.array(len(AirPassengersDF))\n", - "AirPassengersDF['x2'] = np.array(len(AirPassengersDF)) * 2\n", - "dataset, indices, dates, ds = TimeSeriesDataset.from_df(df=AirPassengersDF)\n", - "data = TimeSeriesDataModule(dataset=dataset, batch_size=1, drop_last=True)\n", - "\n", - "train_loader = data.train_dataloader()\n", - "batch = next(iter(train_loader))\n", - "\n", - "# Test that hist_exog_list and futr_exog_list correctly filter data that is sent to scaler.\n", - "baserecurrent = BaseRecurrent(h=12,\n", - " input_size=117,\n", - " hist_exog_list=['x', 'x2'],\n", - " futr_exog_list=['x'],\n", - " loss=MAE(),\n", - " valid_loss=MAE(),\n", - " learning_rate=0.001,\n", - " max_steps=1,\n", - " val_check_steps=0,\n", - " batch_size=1,\n", - " valid_batch_size=1,\n", - " windows_batch_size=10,\n", - " inference_input_size=2,\n", - " start_padding_enabled=True)\n", - "\n", - "windows = baserecurrent._create_windows(batch, step='train')\n", - "\n", - "temporal_cols = windows['temporal_cols'].copy() # B, L+H, C\n", - "temporal_data_cols = baserecurrent._get_temporal_exogenous_cols(temporal_cols=temporal_cols)\n", - "\n", - "test_eq(set(temporal_data_cols), set(['x', 'x2']))\n", - "test_eq(windows['temporal'].shape, torch.Size([1,len(['y', 'x', 'x2', 'available_mask']),117,12+1]))" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "python3", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/nbs/common.base_windows.ipynb b/nbs/common.base_windows.ipynb deleted file mode 100644 index 80f12e5f5..000000000 --- a/nbs/common.base_windows.ipynb +++ /dev/null @@ -1,897 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "524620c1", - "metadata": {}, - "outputs": [], - "source": [ - "#| default_exp common._base_windows" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15392f6f", - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "markdown", - "id": "1e0f9607-d12d-44e5-b2be-91a57a0bca79", - "metadata": {}, - "source": [ - "# BaseWindows\n", - "\n", - "> The `BaseWindows` class contains standard methods shared across window-based neural networks; in contrast to recurrent neural networks these models commit to a fixed sequence length input. The class is represented by `MLP`, and other more sophisticated architectures like `NBEATS`, and `NHITS`." - ] - }, - { - "cell_type": "markdown", - "id": "1730a556-1574-40ad-92a2-23b924ceb398", - "metadata": {}, - "source": [ - "The standard methods include data preprocessing `_normalization`, optimization utilities like parameter initialization, `training_step`, `validation_step`, and shared `fit` and `predict` methods.These shared methods enable all the `neuralforecast.models` compatibility with the `core.NeuralForecast` wrapper class. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2508f7a9-1433-4ad8-8f2f-0078c6ed6c3c", - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "44065066-e72a-431f-938f-1528adef9fe8", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "import numpy as np\n", - "import torch\n", - "import torch.nn as nn\n", - "import pytorch_lightning as pl\n", - "\n", - "from neuralforecast.common._base_model import BaseModel\n", - "from neuralforecast.common._scalers import TemporalNorm\n", - "from neuralforecast.tsdataset import TimeSeriesDataModule\n", - "from neuralforecast.utils import get_indexer_raise_missing" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ce70cd14-ecb1-4205-8511-fecbd26c8408", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "class BaseWindows(BaseModel):\n", - " \"\"\" Base Windows\n", - " \n", - " Base class for all windows-based models. The forecasts are produced separately \n", - " for each window, which are randomly sampled during training.\n", - " \n", - " This class implements the basic functionality for all windows-based models, including:\n", - " - PyTorch Lightning's methods training_step, validation_step, predict_step.
\n", - " - fit and predict methods used by NeuralForecast.core class.
\n", - " - sampling and wrangling methods to generate windows.\n", - " \"\"\"\n", - " def __init__(self,\n", - " h,\n", - " input_size,\n", - " loss,\n", - " valid_loss,\n", - " learning_rate,\n", - " max_steps,\n", - " val_check_steps,\n", - " batch_size,\n", - " valid_batch_size,\n", - " windows_batch_size,\n", - " inference_windows_batch_size,\n", - " start_padding_enabled,\n", - " step_size=1,\n", - " num_lr_decays=0,\n", - " early_stop_patience_steps=-1,\n", - " scaler_type='identity',\n", - " futr_exog_list=None,\n", - " hist_exog_list=None,\n", - " stat_exog_list=None,\n", - " exclude_insample_y=False,\n", - " num_workers_loader=0,\n", - " drop_last_loader=False,\n", - " random_seed=1,\n", - " alias=None,\n", - " optimizer=None,\n", - " optimizer_kwargs=None,\n", - " lr_scheduler=None,\n", - " lr_scheduler_kwargs=None,\n", - " dataloader_kwargs=None,\n", - " **trainer_kwargs):\n", - " super().__init__(\n", - " random_seed=random_seed,\n", - " loss=loss,\n", - " valid_loss=valid_loss,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", - " futr_exog_list=futr_exog_list,\n", - " hist_exog_list=hist_exog_list,\n", - " stat_exog_list=stat_exog_list,\n", - " max_steps=max_steps,\n", - " early_stop_patience_steps=early_stop_patience_steps, \n", - " **trainer_kwargs,\n", - " )\n", - "\n", - " # Padder to complete train windows, \n", - " # example y=[1,2,3,4,5] h=3 -> last y_output = [5,0,0]\n", - " self.h = h\n", - " self.input_size = input_size\n", - " self.windows_batch_size = windows_batch_size\n", - " self.start_padding_enabled = start_padding_enabled\n", - " if start_padding_enabled:\n", - " self.padder_train = nn.ConstantPad1d(padding=(self.input_size-1, self.h), value=0.0)\n", - " else:\n", - " self.padder_train = nn.ConstantPad1d(padding=(0, self.h), value=0.0)\n", - "\n", - " # Batch sizes\n", - " self.batch_size = batch_size\n", - " if valid_batch_size is None:\n", - " self.valid_batch_size = batch_size\n", - " else:\n", - " self.valid_batch_size = valid_batch_size\n", - " if inference_windows_batch_size is None:\n", - " self.inference_windows_batch_size = windows_batch_size\n", - " else:\n", - " self.inference_windows_batch_size = inference_windows_batch_size\n", - "\n", - " # Optimization \n", - " self.learning_rate = learning_rate\n", - " self.max_steps = max_steps\n", - " self.num_lr_decays = num_lr_decays\n", - " self.lr_decay_steps = (\n", - " max(max_steps // self.num_lr_decays, 1) if self.num_lr_decays > 0 else 10e7\n", - " )\n", - " self.early_stop_patience_steps = early_stop_patience_steps\n", - " self.val_check_steps = val_check_steps\n", - " self.windows_batch_size = windows_batch_size\n", - " self.step_size = step_size\n", - " \n", - " self.exclude_insample_y = exclude_insample_y\n", - "\n", - " # Scaler\n", - " self.scaler = TemporalNorm(\n", - " scaler_type=scaler_type,\n", - " dim=1, # Time dimension is 1.\n", - " num_features=1+len(self.hist_exog_list)+len(self.futr_exog_list)\n", - " )\n", - "\n", - " # Fit arguments\n", - " self.val_size = 0\n", - " self.test_size = 0\n", - "\n", - " # Model state\n", - " self.decompose_forecast = False\n", - "\n", - " # DataModule arguments\n", - " self.num_workers_loader = num_workers_loader\n", - " self.dataloader_kwargs = dataloader_kwargs\n", - " self.drop_last_loader = drop_last_loader\n", - " # used by on_validation_epoch_end hook\n", - " self.validation_step_outputs = []\n", - " self.alias = alias\n", - "\n", - " def _create_windows(self, batch, step, w_idxs=None):\n", - " # Parse common data\n", - " window_size = self.input_size + self.h\n", - " temporal_cols = batch['temporal_cols']\n", - " temporal = batch['temporal']\n", - "\n", - " if step == 'train':\n", - " if self.val_size + self.test_size > 0:\n", - " cutoff = -self.val_size - self.test_size\n", - " temporal = temporal[:, :, :cutoff]\n", - "\n", - " temporal = self.padder_train(temporal)\n", - " if temporal.shape[-1] < window_size:\n", - " raise Exception('Time series is too short for training, consider setting a smaller input size or set start_padding_enabled=True')\n", - " windows = temporal.unfold(dimension=-1, \n", - " size=window_size, \n", - " step=self.step_size)\n", - "\n", - " # [B, C, Ws, L+H] 0, 1, 2, 3\n", - " # -> [B * Ws, L+H, C] 0, 2, 3, 1\n", - " windows_per_serie = windows.shape[2]\n", - " windows = windows.permute(0, 2, 3, 1).contiguous()\n", - " windows = windows.reshape(-1, window_size, len(temporal_cols))\n", - "\n", - " # Sample and Available conditions\n", - " available_idx = temporal_cols.get_loc('available_mask')\n", - " available_condition = windows[:, :self.input_size, available_idx]\n", - " available_condition = torch.sum(available_condition, axis=1)\n", - " final_condition = (available_condition > 0)\n", - " if self.h > 0:\n", - " sample_condition = windows[:, self.input_size:, available_idx]\n", - " sample_condition = torch.sum(sample_condition, axis=1)\n", - " final_condition = (sample_condition > 0) & (available_condition > 0)\n", - " windows = windows[final_condition]\n", - "\n", - " # Parse Static data to match windows\n", - " # [B, S_in] -> [B, Ws, S_in] -> [B*Ws, S_in]\n", - " static = batch.get('static', None)\n", - " static_cols=batch.get('static_cols', None)\n", - " if static is not None:\n", - " static = torch.repeat_interleave(static, \n", - " repeats=windows_per_serie, dim=0)\n", - " static = static[final_condition]\n", - "\n", - " # Protection of empty windows\n", - " if final_condition.sum() == 0:\n", - " raise Exception('No windows available for training')\n", - "\n", - " # Sample windows\n", - " n_windows = len(windows)\n", - " if self.windows_batch_size is not None:\n", - " w_idxs = np.random.choice(n_windows, \n", - " size=self.windows_batch_size,\n", - " replace=(n_windows < self.windows_batch_size))\n", - " windows = windows[w_idxs]\n", - " \n", - " if static is not None:\n", - " static = static[w_idxs]\n", - "\n", - " # think about interaction available * sample mask\n", - " # [B, C, Ws, L+H]\n", - " windows_batch = dict(temporal=windows,\n", - " temporal_cols=temporal_cols,\n", - " static=static,\n", - " static_cols=static_cols)\n", - " return windows_batch\n", - "\n", - " elif step in ['predict', 'val']:\n", - "\n", - " if step == 'predict':\n", - " initial_input = temporal.shape[-1] - self.test_size\n", - " if initial_input <= self.input_size: # There is not enough data to predict first timestamp\n", - " padder_left = nn.ConstantPad1d(padding=(self.input_size-initial_input, 0), value=0.0)\n", - " temporal = padder_left(temporal)\n", - " predict_step_size = self.predict_step_size\n", - " cutoff = - self.input_size - self.test_size\n", - " temporal = temporal[:, :, cutoff:]\n", - "\n", - " elif step == 'val':\n", - " predict_step_size = self.step_size\n", - " cutoff = -self.input_size - self.val_size - self.test_size\n", - " if self.test_size > 0:\n", - " temporal = batch['temporal'][:, :, cutoff:-self.test_size]\n", - " else:\n", - " temporal = batch['temporal'][:, :, cutoff:]\n", - " if temporal.shape[-1] < window_size:\n", - " initial_input = temporal.shape[-1] - self.val_size\n", - " padder_left = nn.ConstantPad1d(padding=(self.input_size-initial_input, 0), value=0.0)\n", - " temporal = padder_left(temporal)\n", - "\n", - " if (step=='predict') and (self.test_size==0) and (len(self.futr_exog_list)==0):\n", - " padder_right = nn.ConstantPad1d(padding=(0, self.h), value=0.0)\n", - " temporal = padder_right(temporal)\n", - "\n", - " windows = temporal.unfold(dimension=-1,\n", - " size=window_size,\n", - " step=predict_step_size)\n", - "\n", - " # [batch, channels, windows, window_size] 0, 1, 2, 3\n", - " # -> [batch * windows, window_size, channels] 0, 2, 3, 1\n", - " windows_per_serie = windows.shape[2]\n", - " windows = windows.permute(0, 2, 3, 1).contiguous()\n", - " windows = windows.reshape(-1, window_size, len(temporal_cols))\n", - "\n", - " static = batch.get('static', None)\n", - " static_cols=batch.get('static_cols', None)\n", - " if static is not None:\n", - " static = torch.repeat_interleave(static, \n", - " repeats=windows_per_serie, dim=0)\n", - " \n", - " # Sample windows for batched prediction\n", - " if w_idxs is not None:\n", - " windows = windows[w_idxs]\n", - " if static is not None:\n", - " static = static[w_idxs]\n", - " \n", - " windows_batch = dict(temporal=windows,\n", - " temporal_cols=temporal_cols,\n", - " static=static,\n", - " static_cols=static_cols)\n", - " return windows_batch\n", - " else:\n", - " raise ValueError(f'Unknown step {step}')\n", - "\n", - " def _normalization(self, windows, y_idx):\n", - " # windows are already filtered by train/validation/test\n", - " # from the `create_windows_method` nor leakage risk\n", - " temporal = windows['temporal'] # B, L+H, C\n", - " temporal_cols = windows['temporal_cols'].copy() # B, L+H, C\n", - "\n", - " # To avoid leakage uses only the lags\n", - " #temporal_data_cols = temporal_cols.drop('available_mask').tolist()\n", - " temporal_data_cols = self._get_temporal_exogenous_cols(temporal_cols=temporal_cols)\n", - " temporal_idxs = get_indexer_raise_missing(temporal_cols, temporal_data_cols)\n", - " temporal_idxs = np.append(y_idx, temporal_idxs)\n", - " temporal_data = temporal[:, :, temporal_idxs]\n", - " temporal_mask = temporal[:, :, temporal_cols.get_loc('available_mask')].clone()\n", - " if self.h > 0:\n", - " temporal_mask[:, -self.h:] = 0.0\n", - "\n", - " # Normalize. self.scaler stores the shift and scale for inverse transform\n", - " temporal_mask = temporal_mask.unsqueeze(-1) # Add channel dimension for scaler.transform.\n", - " temporal_data = self.scaler.transform(x=temporal_data, mask=temporal_mask)\n", - "\n", - " # Replace values in windows dict\n", - " temporal[:, :, temporal_idxs] = temporal_data\n", - " windows['temporal'] = temporal\n", - "\n", - " return windows\n", - "\n", - " def _inv_normalization(self, y_hat, temporal_cols, y_idx):\n", - " # Receives window predictions [B, H, output]\n", - " # Broadcasts outputs and inverts normalization\n", - "\n", - " # Add C dimension\n", - " if y_hat.ndim == 2:\n", - " remove_dimension = True\n", - " y_hat = y_hat.unsqueeze(-1)\n", - " else:\n", - " remove_dimension = False\n", - "\n", - " y_scale = self.scaler.x_scale[:, :, [y_idx]]\n", - " y_loc = self.scaler.x_shift[:, :, [y_idx]]\n", - "\n", - " y_scale = torch.repeat_interleave(y_scale, repeats=y_hat.shape[-1], dim=-1).to(y_hat.device)\n", - " y_loc = torch.repeat_interleave(y_loc, repeats=y_hat.shape[-1], dim=-1).to(y_hat.device)\n", - "\n", - " y_hat = self.scaler.inverse_transform(z=y_hat, x_scale=y_scale, x_shift=y_loc)\n", - " y_loc = y_loc.to(y_hat.device)\n", - " y_scale = y_scale.to(y_hat.device)\n", - " \n", - " if remove_dimension:\n", - " y_hat = y_hat.squeeze(-1)\n", - " y_loc = y_loc.squeeze(-1)\n", - " y_scale = y_scale.squeeze(-1)\n", - "\n", - " return y_hat, y_loc, y_scale\n", - "\n", - " def _parse_windows(self, batch, windows):\n", - " # Filter insample lags from outsample horizon\n", - " y_idx = batch['y_idx']\n", - " mask_idx = batch['temporal_cols'].get_loc('available_mask')\n", - "\n", - " insample_y = windows['temporal'][:, :self.input_size, y_idx]\n", - " insample_mask = windows['temporal'][:, :self.input_size, mask_idx]\n", - "\n", - " # Declare additional information\n", - " outsample_y = None\n", - " outsample_mask = None\n", - " hist_exog = None\n", - " futr_exog = None\n", - " stat_exog = None\n", - "\n", - " if self.h > 0:\n", - " outsample_y = windows['temporal'][:, self.input_size:, y_idx]\n", - " outsample_mask = windows['temporal'][:, self.input_size:, mask_idx]\n", - "\n", - " if len(self.hist_exog_list):\n", - " hist_exog_idx = get_indexer_raise_missing(windows['temporal_cols'], self.hist_exog_list)\n", - " hist_exog = windows['temporal'][:, :self.input_size, hist_exog_idx]\n", - "\n", - " if len(self.futr_exog_list):\n", - " futr_exog_idx = get_indexer_raise_missing(windows['temporal_cols'], self.futr_exog_list)\n", - " futr_exog = windows['temporal'][:, :, futr_exog_idx]\n", - "\n", - " if len(self.stat_exog_list):\n", - " static_idx = get_indexer_raise_missing(windows['static_cols'], self.stat_exog_list)\n", - " stat_exog = windows['static'][:, static_idx]\n", - "\n", - " # TODO: think a better way of removing insample_y features\n", - " if self.exclude_insample_y:\n", - " insample_y = insample_y * 0\n", - "\n", - " return insample_y, insample_mask, outsample_y, outsample_mask, \\\n", - " hist_exog, futr_exog, stat_exog\n", - "\n", - " def training_step(self, batch, batch_idx):\n", - " # Create and normalize windows [Ws, L+H, C]\n", - " windows = self._create_windows(batch, step='train')\n", - " y_idx = batch['y_idx']\n", - " original_outsample_y = torch.clone(windows['temporal'][:,-self.h:,y_idx])\n", - " windows = self._normalization(windows=windows, y_idx=y_idx)\n", - "\n", - " # Parse windows\n", - " insample_y, insample_mask, outsample_y, outsample_mask, \\\n", - " hist_exog, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", - "\n", - " windows_batch = dict(insample_y=insample_y, # [Ws, L]\n", - " insample_mask=insample_mask, # [Ws, L]\n", - " futr_exog=futr_exog, # [Ws, L + h, F]\n", - " hist_exog=hist_exog, # [Ws, L, X]\n", - " stat_exog=stat_exog) # [Ws, S]\n", - "\n", - " # Model Predictions\n", - " output = self(windows_batch)\n", - " if self.loss.is_distribution_output:\n", - " _, y_loc, y_scale = self._inv_normalization(y_hat=outsample_y,\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=y_idx)\n", - " outsample_y = original_outsample_y\n", - " distr_args = self.loss.scale_decouple(output=output, loc=y_loc, scale=y_scale)\n", - " loss = self.loss(y=outsample_y, distr_args=distr_args, mask=outsample_mask)\n", - " else:\n", - " loss = self.loss(y=outsample_y, y_hat=output, mask=outsample_mask)\n", - "\n", - " if torch.isnan(loss):\n", - " print('Model Parameters', self.hparams)\n", - " print('insample_y', torch.isnan(insample_y).sum())\n", - " print('outsample_y', torch.isnan(outsample_y).sum())\n", - " print('output', torch.isnan(output).sum())\n", - " raise Exception('Loss is NaN, training stopped.')\n", - "\n", - " self.log(\n", - " 'train_loss',\n", - " loss.detach().item(),\n", - " batch_size=outsample_y.size(0),\n", - " prog_bar=True,\n", - " on_epoch=True,\n", - " )\n", - " self.train_trajectories.append((self.global_step, loss.detach().item()))\n", - " return loss\n", - "\n", - " def _compute_valid_loss(self, outsample_y, output, outsample_mask, temporal_cols, y_idx):\n", - " if self.loss.is_distribution_output:\n", - " _, y_loc, y_scale = self._inv_normalization(y_hat=outsample_y,\n", - " temporal_cols=temporal_cols,\n", - " y_idx=y_idx)\n", - " distr_args = self.loss.scale_decouple(output=output, loc=y_loc, scale=y_scale)\n", - " _, sample_mean, quants = self.loss.sample(distr_args=distr_args)\n", - "\n", - " if str(type(self.valid_loss)) in\\\n", - " [\"\", \"\"]:\n", - " output = quants\n", - " elif str(type(self.valid_loss)) in [\"\"]:\n", - " output = torch.unsqueeze(sample_mean, dim=-1) # [N,H,1] -> [N,H]\n", - "\n", - " # Validation Loss evaluation\n", - " if self.valid_loss.is_distribution_output:\n", - " valid_loss = self.valid_loss(y=outsample_y, distr_args=distr_args, mask=outsample_mask)\n", - " else:\n", - " output, _, _ = self._inv_normalization(y_hat=output,\n", - " temporal_cols=temporal_cols,\n", - " y_idx=y_idx)\n", - " valid_loss = self.valid_loss(y=outsample_y, y_hat=output, mask=outsample_mask)\n", - " return valid_loss\n", - " \n", - " def validation_step(self, batch, batch_idx):\n", - " if self.val_size == 0:\n", - " return np.nan\n", - "\n", - " # TODO: Hack to compute number of windows\n", - " windows = self._create_windows(batch, step='val')\n", - " n_windows = len(windows['temporal'])\n", - " y_idx = batch['y_idx']\n", - "\n", - " # Number of windows in batch\n", - " windows_batch_size = self.inference_windows_batch_size\n", - " if windows_batch_size < 0:\n", - " windows_batch_size = n_windows\n", - " n_batches = int(np.ceil(n_windows/windows_batch_size))\n", - "\n", - " valid_losses = []\n", - " batch_sizes = []\n", - " for i in range(n_batches):\n", - " # Create and normalize windows [Ws, L+H, C]\n", - " w_idxs = np.arange(i*windows_batch_size, \n", - " min((i+1)*windows_batch_size, n_windows))\n", - " windows = self._create_windows(batch, step='val', w_idxs=w_idxs)\n", - " original_outsample_y = torch.clone(windows['temporal'][:,-self.h:,y_idx])\n", - " windows = self._normalization(windows=windows, y_idx=y_idx)\n", - "\n", - " # Parse windows\n", - " insample_y, insample_mask, _, outsample_mask, \\\n", - " hist_exog, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", - "\n", - " windows_batch = dict(insample_y=insample_y, # [Ws, L]\n", - " insample_mask=insample_mask, # [Ws, L]\n", - " futr_exog=futr_exog, # [Ws, L + h, F]\n", - " hist_exog=hist_exog, # [Ws, L, X]\n", - " stat_exog=stat_exog) # [Ws, S]\n", - " \n", - " # Model Predictions\n", - " output_batch = self(windows_batch)\n", - " valid_loss_batch = self._compute_valid_loss(outsample_y=original_outsample_y,\n", - " output=output_batch, outsample_mask=outsample_mask,\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=batch['y_idx'])\n", - " valid_losses.append(valid_loss_batch)\n", - " batch_sizes.append(len(output_batch))\n", - " \n", - " valid_loss = torch.stack(valid_losses)\n", - " batch_sizes = torch.tensor(batch_sizes, device=valid_loss.device)\n", - " batch_size = torch.sum(batch_sizes)\n", - " valid_loss = torch.sum(valid_loss * batch_sizes) / batch_size\n", - "\n", - " if torch.isnan(valid_loss):\n", - " raise Exception('Loss is NaN, training stopped.')\n", - "\n", - " self.log(\n", - " 'valid_loss',\n", - " valid_loss.detach().item(),\n", - " batch_size=batch_size,\n", - " prog_bar=True,\n", - " on_epoch=True,\n", - " )\n", - " self.validation_step_outputs.append(valid_loss)\n", - " return valid_loss\n", - "\n", - " def predict_step(self, batch, batch_idx):\n", - "\n", - " # TODO: Hack to compute number of windows\n", - " windows = self._create_windows(batch, step='predict')\n", - " n_windows = len(windows['temporal'])\n", - " y_idx = batch['y_idx']\n", - "\n", - " # Number of windows in batch\n", - " windows_batch_size = self.inference_windows_batch_size\n", - " if windows_batch_size < 0:\n", - " windows_batch_size = n_windows\n", - " n_batches = int(np.ceil(n_windows/windows_batch_size))\n", - "\n", - " y_hats = []\n", - " for i in range(n_batches):\n", - " # Create and normalize windows [Ws, L+H, C]\n", - " w_idxs = np.arange(i*windows_batch_size, \n", - " min((i+1)*windows_batch_size, n_windows))\n", - " windows = self._create_windows(batch, step='predict', w_idxs=w_idxs)\n", - " windows = self._normalization(windows=windows, y_idx=y_idx)\n", - "\n", - " # Parse windows\n", - " insample_y, insample_mask, _, _, \\\n", - " hist_exog, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", - "\n", - " windows_batch = dict(insample_y=insample_y, # [Ws, L]\n", - " insample_mask=insample_mask, # [Ws, L]\n", - " futr_exog=futr_exog, # [Ws, L + h, F]\n", - " hist_exog=hist_exog, # [Ws, L, X]\n", - " stat_exog=stat_exog) # [Ws, S] \n", - "\n", - " # Model Predictions\n", - " output_batch = self(windows_batch)\n", - " # Inverse normalization and sampling\n", - " if self.loss.is_distribution_output:\n", - " _, y_loc, y_scale = self._inv_normalization(y_hat=torch.empty(size=(insample_y.shape[0], self.h),\n", - " dtype=output_batch[0].dtype,\n", - " device=output_batch[0].device),\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=y_idx)\n", - " distr_args = self.loss.scale_decouple(output=output_batch, loc=y_loc, scale=y_scale)\n", - " _, sample_mean, quants = self.loss.sample(distr_args=distr_args)\n", - " y_hat = torch.concat((sample_mean, quants), axis=2)\n", - "\n", - " if self.loss.return_params:\n", - " distr_args = torch.stack(distr_args, dim=-1)\n", - " distr_args = torch.reshape(distr_args, (len(windows[\"temporal\"]), self.h, -1))\n", - " y_hat = torch.concat((y_hat, distr_args), axis=2)\n", - " else:\n", - " y_hat, _, _ = self._inv_normalization(y_hat=output_batch,\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=y_idx)\n", - " y_hats.append(y_hat)\n", - " y_hat = torch.cat(y_hats, dim=0)\n", - " return y_hat\n", - " \n", - " def fit(self, dataset, val_size=0, test_size=0, random_seed=None, distributed_config=None):\n", - " \"\"\" Fit.\n", - "\n", - " The `fit` method, optimizes the neural network's weights using the\n", - " initialization parameters (`learning_rate`, `windows_batch_size`, ...)\n", - " and the `loss` function as defined during the initialization. \n", - " Within `fit` we use a PyTorch Lightning `Trainer` that\n", - " inherits the initialization's `self.trainer_kwargs`, to customize\n", - " its inputs, see [PL's trainer arguments](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).\n", - "\n", - " The method is designed to be compatible with SKLearn-like classes\n", - " and in particular to be compatible with the StatsForecast library.\n", - "\n", - " By default the `model` is not saving training checkpoints to protect \n", - " disk memory, to get them change `enable_checkpointing=True` in `__init__`.\n", - "\n", - " **Parameters:**
\n", - " `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
\n", - " `val_size`: int, validation size for temporal cross-validation.
\n", - " `random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
\n", - " `test_size`: int, test size for temporal cross-validation.
\n", - " \"\"\"\n", - " return self._fit(\n", - " dataset=dataset,\n", - " batch_size=self.batch_size,\n", - " valid_batch_size=self.valid_batch_size,\n", - " val_size=val_size,\n", - " test_size=test_size,\n", - " random_seed=random_seed,\n", - " distributed_config=distributed_config,\n", - " )\n", - "\n", - " def predict(self, dataset, test_size=None, step_size=1,\n", - " random_seed=None, **data_module_kwargs):\n", - " \"\"\" Predict.\n", - "\n", - " Neural network prediction with PL's `Trainer` execution of `predict_step`.\n", - "\n", - " **Parameters:**
\n", - " `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
\n", - " `test_size`: int=None, test size for temporal cross-validation.
\n", - " `step_size`: int=1, Step size between each window.
\n", - " `random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
\n", - " `**data_module_kwargs`: PL's TimeSeriesDataModule args, see [documentation](https://pytorch-lightning.readthedocs.io/en/1.6.1/extensions/datamodules.html#using-a-datamodule).\n", - " \"\"\"\n", - " self._check_exog(dataset)\n", - " self._restart_seed(random_seed)\n", - " data_module_kwargs = self._set_quantile_for_iqloss(**data_module_kwargs)\n", - "\n", - " self.predict_step_size = step_size\n", - " self.decompose_forecast = False\n", - " datamodule = TimeSeriesDataModule(dataset=dataset,\n", - " valid_batch_size=self.valid_batch_size,\n", - " **data_module_kwargs)\n", - "\n", - " # Protect when case of multiple gpu. PL does not support return preds with multiple gpu.\n", - " pred_trainer_kwargs = self.trainer_kwargs.copy()\n", - " if (pred_trainer_kwargs.get('accelerator', None) == \"gpu\") and (torch.cuda.device_count() > 1):\n", - " pred_trainer_kwargs['devices'] = [0]\n", - "\n", - " trainer = pl.Trainer(**pred_trainer_kwargs)\n", - " fcsts = trainer.predict(self, datamodule=datamodule) \n", - " fcsts = torch.vstack(fcsts).numpy().flatten()\n", - " fcsts = fcsts.reshape(-1, len(self.loss.output_names))\n", - " return fcsts\n", - "\n", - " def decompose(self, dataset, step_size=1, random_seed=None, **data_module_kwargs):\n", - " \"\"\" Decompose Predictions.\n", - "\n", - " Decompose the predictions through the network's layers.\n", - " Available methods are `ESRNN`, `NHITS`, `NBEATS`, and `NBEATSx`.\n", - "\n", - " **Parameters:**
\n", - " `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation here](https://nixtla.github.io/neuralforecast/tsdataset.html).
\n", - " `step_size`: int=1, step size between each window of temporal data.
\n", - " `**data_module_kwargs`: PL's TimeSeriesDataModule args, see [documentation](https://pytorch-lightning.readthedocs.io/en/1.6.1/extensions/datamodules.html#using-a-datamodule).\n", - " \"\"\"\n", - " # Restart random seed\n", - " if random_seed is None:\n", - " random_seed = self.random_seed\n", - " torch.manual_seed(random_seed)\n", - " data_module_kwargs = self._set_quantile_for_iqloss(**data_module_kwargs)\n", - "\n", - " self.predict_step_size = step_size\n", - " self.decompose_forecast = True\n", - " datamodule = TimeSeriesDataModule(dataset=dataset,\n", - " valid_batch_size=self.valid_batch_size,\n", - " **data_module_kwargs)\n", - " trainer = pl.Trainer(**self.trainer_kwargs)\n", - " fcsts = trainer.predict(self, datamodule=datamodule)\n", - " self.decompose_forecast = False # Default decomposition back to false\n", - " return torch.vstack(fcsts).numpy()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1712ea15", - "metadata": {}, - "outputs": [], - "source": [ - "show_doc(BaseWindows, title_level=3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "48063f70", - "metadata": {}, - "outputs": [], - "source": [ - "show_doc(BaseWindows.fit, title_level=3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "75529be6", - "metadata": {}, - "outputs": [], - "source": [ - "show_doc(BaseWindows.predict, title_level=3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a1f8315d", - "metadata": {}, - "outputs": [], - "source": [ - "show_doc(BaseWindows.decompose, title_level=3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8927f2e5-f376-4c99-bb8f-8cbb73efe01e", - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.utils import AirPassengersDF\n", - "from neuralforecast.tsdataset import TimeSeriesDataset, TimeSeriesDataModule" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "61490e69-f014-4087-83c5-540d5bd7d458", - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "# add h=0,1 unit test for _parse_windows \n", - "# Declare batch\n", - "AirPassengersDF['x'] = np.array(len(AirPassengersDF))\n", - "AirPassengersDF['x2'] = np.array(len(AirPassengersDF)) * 2\n", - "dataset, indices, dates, ds = TimeSeriesDataset.from_df(df=AirPassengersDF)\n", - "data = TimeSeriesDataModule(dataset=dataset, batch_size=1, drop_last=True)\n", - "\n", - "train_loader = data.train_dataloader()\n", - "batch = next(iter(train_loader))\n", - "\n", - "# Instantiate BaseWindows to test _parse_windows method h in [0,1]\n", - "for h in [0, 1]:\n", - " basewindows = BaseWindows(h=h,\n", - " input_size=len(AirPassengersDF)-h,\n", - " hist_exog_list=['x'],\n", - " loss=MAE(),\n", - " valid_loss=MAE(),\n", - " learning_rate=0.001,\n", - " max_steps=1,\n", - " val_check_steps=0,\n", - " batch_size=1,\n", - " valid_batch_size=1,\n", - " windows_batch_size=1,\n", - " inference_windows_batch_size=1,\n", - " start_padding_enabled=False)\n", - "\n", - " windows = basewindows._create_windows(batch, step='train')\n", - " original_outsample_y = torch.clone(windows['temporal'][:,-basewindows.h:,0])\n", - " windows = basewindows._normalization(windows=windows, y_idx=0)\n", - "\n", - " insample_y, insample_mask, outsample_y, outsample_mask, \\\n", - " hist_exog, futr_exog, stat_exog = basewindows._parse_windows(batch, windows)\n", - "\n", - " # Check equality of parsed and original insample_y\n", - " parsed_insample_y = insample_y.numpy().flatten()\n", - " original_insample_y = AirPassengersDF.y.values\n", - " test_eq(parsed_insample_y, original_insample_y[:basewindows.input_size])\n", - "\n", - " # Check equality of parsed and original hist_exog\n", - " parsed_hist_exog = hist_exog.numpy().flatten()\n", - " original_hist_exog = AirPassengersDF.x.values\n", - " test_eq(parsed_hist_exog, original_hist_exog[:basewindows.input_size])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "86ab58a9", - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "# Test that start_padding_enabled=True solves the problem of short series\n", - "h = 12\n", - "basewindows = BaseWindows(h=h,\n", - " input_size=500,\n", - " hist_exog_list=['x'],\n", - " loss=MAE(),\n", - " valid_loss=MAE(),\n", - " learning_rate=0.001,\n", - " max_steps=1,\n", - " val_check_steps=0,\n", - " batch_size=1,\n", - " valid_batch_size=1,\n", - " windows_batch_size=10,\n", - " inference_windows_batch_size=2,\n", - " start_padding_enabled=True)\n", - "\n", - "windows = basewindows._create_windows(batch, step='train')\n", - "windows = basewindows._normalization(windows=windows, y_idx=0)\n", - "insample_y, insample_mask, outsample_y, outsample_mask, \\\n", - " hist_exog, futr_exog, stat_exog = basewindows._parse_windows(batch, windows)\n", - "\n", - "basewindows.val_size = 12\n", - "windows = basewindows._create_windows(batch, step='val')\n", - "windows = basewindows._normalization(windows=windows, y_idx=0)\n", - "insample_y, insample_mask, outsample_y, outsample_mask, \\\n", - " hist_exog, futr_exog, stat_exog = basewindows._parse_windows(batch, windows)\n", - "\n", - "basewindows.test_size = 12\n", - "basewindows.predict_step_size = 1\n", - "windows = basewindows._create_windows(batch, step='predict')\n", - "windows = basewindows._normalization(windows=windows, y_idx=0)\n", - "insample_y, insample_mask, outsample_y, outsample_mask, \\\n", - " hist_exog, futr_exog, stat_exog = basewindows._parse_windows(batch, windows)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "54d2e850", - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "\n", - "# Test that hist_exog_list and futr_exog_list correctly filter data.\n", - "# that is sent to scaler.\n", - "basewindows = BaseWindows(h=12,\n", - " input_size=500,\n", - " hist_exog_list=['x', 'x2'],\n", - " futr_exog_list=['x'],\n", - " loss=MAE(),\n", - " valid_loss=MAE(),\n", - " learning_rate=0.001,\n", - " max_steps=1,\n", - " val_check_steps=0,\n", - " batch_size=1,\n", - " valid_batch_size=1,\n", - " windows_batch_size=10,\n", - " inference_windows_batch_size=2,\n", - " start_padding_enabled=True)\n", - "\n", - "windows = basewindows._create_windows(batch, step='train')\n", - "\n", - "temporal_cols = windows['temporal_cols'].copy() # B, L+H, C\n", - "temporal_data_cols = basewindows._get_temporal_exogenous_cols(temporal_cols=temporal_cols)\n", - "\n", - "test_eq(set(temporal_data_cols), set(['x', 'x2']))\n", - "test_eq(windows['temporal'].shape, torch.Size([10,500+12,len(['y', 'x', 'x2', 'available_mask'])]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bf493ff9", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "python3", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/nbs/common.model_checks.ipynb b/nbs/common.model_checks.ipynb new file mode 100644 index 000000000..d618c5c33 --- /dev/null +++ b/nbs/common.model_checks.ipynb @@ -0,0 +1,248 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| default_exp common._model_checks" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 1. Checks for models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This file provides a set of unit tests for all models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "import pandas as pd\n", + "import neuralforecast.losses.pytorch as losses\n", + "\n", + "from neuralforecast import NeuralForecast\n", + "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic, generate_series" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "seed = 0\n", + "test_size = 14\n", + "FREQ = \"D\"\n", + "\n", + "# 1 series, no exogenous\n", + "N_SERIES_1 = 1\n", + "df = generate_series(n_series=N_SERIES_1, seed=seed, freq=FREQ, equal_ends=True)\n", + "max_ds = df.ds.max() - pd.Timedelta(test_size, FREQ)\n", + "Y_TRAIN_DF_1 = df[df.ds < max_ds]\n", + "Y_TEST_DF_1 = df[df.ds >= max_ds]\n", + "\n", + "# 5 series, no exogenous\n", + "N_SERIES_2 = 5\n", + "df = generate_series(n_series=N_SERIES_2, seed=seed, freq=FREQ, equal_ends=True)\n", + "max_ds = df.ds.max() - pd.Timedelta(test_size, FREQ)\n", + "Y_TRAIN_DF_2 = df[df.ds < max_ds]\n", + "Y_TEST_DF_2 = df[df.ds >= max_ds]\n", + "\n", + "# 1 series, with static and temporal exogenous\n", + "N_SERIES_3 = 1\n", + "df, STATIC_3 = generate_series(n_series=N_SERIES_3, n_static_features=2, \n", + " n_temporal_features=2, seed=seed, freq=FREQ, equal_ends=True)\n", + "max_ds = df.ds.max() - pd.Timedelta(test_size, FREQ)\n", + "Y_TRAIN_DF_3 = df[df.ds < max_ds]\n", + "Y_TEST_DF_3 = df[df.ds >= max_ds]\n", + "\n", + "# 5 series, with static and temporal exogenous\n", + "N_SERIES_4 = 5\n", + "df, STATIC_4 = generate_series(n_series=N_SERIES_4, n_static_features=2, \n", + " n_temporal_features=2, seed=seed, freq=FREQ, equal_ends=True)\n", + "max_ds = df.ds.max() - pd.Timedelta(test_size, FREQ)\n", + "Y_TRAIN_DF_4 = df[df.ds < max_ds]\n", + "Y_TEST_DF_4 = df[df.ds >= max_ds]\n", + "\n", + "# Generic test for a given config for a model\n", + "def _run_model_tests(model_class, config):\n", + " if model_class.RECURRENT:\n", + " config[\"inference_input_size\"] = config[\"input_size\"]\n", + "\n", + " # DF_1\n", + " if model_class.MULTIVARIATE:\n", + " config[\"n_series\"] = N_SERIES_1\n", + " if isinstance(config[\"loss\"], losses.relMSE):\n", + " config[\"loss\"].y_train = Y_TRAIN_DF_1[\"y\"].values \n", + " if isinstance(config[\"valid_loss\"], losses.relMSE):\n", + " config[\"valid_loss\"].y_train = Y_TRAIN_DF_1[\"y\"].values \n", + "\n", + " model = model_class(**config)\n", + " fcst = NeuralForecast(models=[model], freq=FREQ)\n", + " fcst.fit(df=Y_TRAIN_DF_1, val_size=24)\n", + " _ = fcst.predict(futr_df=Y_TEST_DF_1)\n", + " # DF_2\n", + " if model_class.MULTIVARIATE:\n", + " config[\"n_series\"] = N_SERIES_2\n", + " if isinstance(config[\"loss\"], losses.relMSE):\n", + " config[\"loss\"].y_train = Y_TRAIN_DF_2[\"y\"].values \n", + " if isinstance(config[\"valid_loss\"], losses.relMSE):\n", + " config[\"valid_loss\"].y_train = Y_TRAIN_DF_2[\"y\"].values\n", + " model = model_class(**config)\n", + " fcst = NeuralForecast(models=[model], freq=FREQ)\n", + " fcst.fit(df=Y_TRAIN_DF_2, val_size=24)\n", + " _ = fcst.predict(futr_df=Y_TEST_DF_2)\n", + "\n", + " if model.EXOGENOUS_STAT and model.EXOGENOUS_FUTR:\n", + " # DF_3\n", + " if model_class.MULTIVARIATE:\n", + " config[\"n_series\"] = N_SERIES_3\n", + " if isinstance(config[\"loss\"], losses.relMSE):\n", + " config[\"loss\"].y_train = Y_TRAIN_DF_3[\"y\"].values \n", + " if isinstance(config[\"valid_loss\"], losses.relMSE):\n", + " config[\"valid_loss\"].y_train = Y_TRAIN_DF_3[\"y\"].values\n", + " model = model_class(**config)\n", + " fcst = NeuralForecast(models=[model], freq=FREQ)\n", + " fcst.fit(df=Y_TRAIN_DF_3, static_df=STATIC_3, val_size=24)\n", + " _ = fcst.predict(futr_df=Y_TEST_DF_3)\n", + "\n", + " # DF_4\n", + " if model_class.MULTIVARIATE:\n", + " config[\"n_series\"] = N_SERIES_4\n", + " if isinstance(config[\"loss\"], losses.relMSE):\n", + " config[\"loss\"].y_train = Y_TRAIN_DF_4[\"y\"].values \n", + " if isinstance(config[\"valid_loss\"], losses.relMSE):\n", + " config[\"valid_loss\"].y_train = Y_TRAIN_DF_4[\"y\"].values \n", + " model = model_class(**config)\n", + " fcst = NeuralForecast(models=[model], freq=FREQ)\n", + " fcst.fit(df=Y_TRAIN_DF_4, static_df=STATIC_4, val_size=24)\n", + " _ = fcst.predict(futr_df=Y_TEST_DF_4) \n", + "\n", + "# Tests a model against every loss function\n", + "def check_loss_functions(model_class):\n", + " loss_list = [losses.MAE(), losses.MSE(), losses.RMSE(), losses.MAPE(), losses.SMAPE(), losses.MASE(seasonality=7), \n", + " losses.QuantileLoss(q=0.5), losses.MQLoss(), losses.IQLoss(), losses.DistributionLoss(\"Normal\"), \n", + " losses.DistributionLoss(\"StudentT\"), losses.DistributionLoss(\"Poisson\"), losses.DistributionLoss(\"NegativeBinomial\"), \n", + " losses.DistributionLoss(\"Tweedie\", rho=1.5), losses.DistributionLoss(\"ISQF\"), losses.PMM(), losses.PMM(weighted=True), \n", + " losses.GMM(), losses.GMM(weighted=True), losses.NBMM(), losses.NBMM(weighted=True), losses.HuberLoss(), \n", + " losses.TukeyLoss(), losses.HuberQLoss(q=0.5), losses.HuberMQLoss()]\n", + " for loss in loss_list:\n", + " test_name = f\"{model_class.__name__}: checking {loss._get_name()}\"\n", + " print(f\"{test_name}\")\n", + " config = {'max_steps': 2,\n", + " 'h': 7,\n", + " 'input_size': 28,\n", + " 'loss': loss,\n", + " 'valid_loss': None,\n", + " 'enable_progress_bar': False,\n", + " 'enable_model_summary': False,\n", + " 'val_check_steps': 2} \n", + " try:\n", + " _run_model_tests(model_class, config) \n", + " except RuntimeError:\n", + " raise Exception(f\"{test_name} failed.\")\n", + " except Exception:\n", + " print(f\"{test_name} skipped on raised Exception.\")\n", + " pass\n", + "\n", + "# Tests a model against the AirPassengers dataset\n", + "def check_airpassengers(model_class):\n", + " print(f\"{model_class.__name__}: checking forecast AirPassengers dataset\")\n", + " Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", + "\n", + " config = {'max_steps': 2,\n", + " 'h': 12,\n", + " 'input_size': 24,\n", + " 'enable_progress_bar': False,\n", + " 'enable_model_summary': False,\n", + " 'val_check_steps': 2,\n", + " }\n", + "\n", + " if model_class.MULTIVARIATE:\n", + " config[\"n_series\"] = Y_train_df[\"unique_id\"].nunique()\n", + " # Normal forecast\n", + " fcst = NeuralForecast(models=[model_class(**config)], freq='M')\n", + " fcst.fit(df=Y_train_df, static_df=AirPassengersStatic)\n", + " _ = fcst.predict(futr_df=Y_test_df) \n", + "\n", + " # Cross-validation\n", + " fcst = NeuralForecast(models=[model_class(**config)], freq='M')\n", + " _ = fcst.cross_validation(df=AirPassengersPanel, static_df=AirPassengersStatic, n_windows=2, step_size=12)\n", + "\n", + "# Add unit test functions to this function\n", + "def check_model(model_class, checks=[\"losses\", \"airpassengers\"]):\n", + " \"\"\"\n", + " Check model with various tests. Options for checks are:
\n", + " \"losses\": test the model against all loss functions
\n", + " \"airpassengers\": test the model against the airpassengers dataset for forecasting and cross-validation
\n", + " \n", + " \"\"\"\n", + " if \"losses\" in checks:\n", + " check_loss_functions(model_class) \n", + " if \"airpassengers\" in checks:\n", + " try:\n", + " check_airpassengers(model_class) \n", + " except RuntimeError:\n", + " raise Exception(f\"{model_class.__name__}: AirPassengers forecast test failed.\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| eval: false\n", + "#| hide\n", + "# Run tests in this file. This is a slow test\n", + "import warnings\n", + "import logging\n", + "from neuralforecast.models import RNN, GRU, TCN, LSTM, DeepAR, DilatedRNN, BiTCN, MLP, NBEATS, NBEATSx, NHITS, DLinear, NLinear, TiDE, DeepNPTS, TFT, VanillaTransformer, Informer, Autoformer, FEDformer, TimesNet, iTransformer, KAN, RMoK, StemGNN, TSMixer, TSMixerx, MLPMultivariate, SOFTS, TimeMixer\n", + "\n", + "models = [RNN, GRU, TCN, LSTM, DeepAR, DilatedRNN, BiTCN, MLP, NBEATS, NBEATSx, NHITS, DLinear, NLinear, TiDE, DeepNPTS, TFT, VanillaTransformer, Informer, Autoformer, FEDformer, TimesNet, iTransformer, KAN, RMoK, StemGNN, TSMixer, TSMixerx, MLPMultivariate, SOFTS, TimeMixer]\n", + "\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " for model in models:\n", + " check_model(model, checks=[\"losses\"])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "python3", + "language": "python", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/nbs/common.modules.ipynb b/nbs/common.modules.ipynb index f90e936da..403a2a5d6 100644 --- a/nbs/common.modules.ipynb +++ b/nbs/common.modules.ipynb @@ -691,6 +691,66 @@ " x = x + self.mean\n", " return x" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "class RevINMultivariate(nn.Module):\n", + " \"\"\" \n", + " ReversibleInstanceNorm1d for Multivariate models\n", + " \"\"\" \n", + " def __init__(self, num_features: int, eps=1e-5, affine=False, subtract_last=False, non_norm=False):\n", + " super().__init__()\n", + " self.num_features = num_features\n", + " self.eps = eps\n", + " self.affine = affine\n", + " if self.affine:\n", + " self._init_params()\n", + "\n", + " def forward(self, x, mode: str):\n", + " if mode == 'norm':\n", + " x = self._normalize(x)\n", + " elif mode == 'denorm':\n", + " x = self._denormalize(x)\n", + " else:\n", + " raise NotImplementedError\n", + " return x\n", + "\n", + " def _init_params(self):\n", + " # initialize RevIN params: (C,)\n", + " self.affine_weight = nn.Parameter(torch.ones((1, 1, self.num_features)))\n", + " self.affine_bias = nn.Parameter(torch.zeros((1, 1, self.num_features)))\n", + "\n", + " def _normalize(self, x):\n", + " # Batch statistics\n", + " self.batch_mean = torch.mean(x, axis=1, keepdim=True).detach()\n", + " self.batch_std = torch.sqrt(torch.var(x, axis=1, keepdim=True, unbiased=False) + self.eps).detach()\n", + " \n", + " # Instance normalization\n", + " x = x - self.batch_mean\n", + " x = x / self.batch_std\n", + " \n", + " if self.affine:\n", + " x = x * self.affine_weight\n", + " x = x + self.affine_bias\n", + "\n", + " return x\n", + "\n", + " def _denormalize(self, x):\n", + " # Reverse the normalization\n", + " if self.affine:\n", + " x = x - self.affine_bias\n", + " x = x / self.affine_weight \n", + " \n", + " x = x * self.batch_std\n", + " x = x + self.batch_mean \n", + "\n", + " return x" + ] } ], "metadata": { diff --git a/nbs/common.scalers.ipynb b/nbs/common.scalers.ipynb index 9e6737c3c..f49714a6b 100644 --- a/nbs/common.scalers.ipynb +++ b/nbs/common.scalers.ipynb @@ -682,11 +682,11 @@ " def _init_params(self, num_features):\n", " # Initialize RevIN scaler params to broadcast:\n", " if self.dim==1: # [B,T,C] [1,1,C]\n", - " self.revin_bias = nn.Parameter(torch.zeros(1,1,num_features))\n", - " self.revin_weight = nn.Parameter(torch.ones(1,1,num_features))\n", + " self.revin_bias = nn.Parameter(torch.zeros(1, 1, num_features, 1))\n", + " self.revin_weight = nn.Parameter(torch.ones(1, 1, num_features, 1))\n", " elif self.dim==-1: # [B,C,T] [1,C,1]\n", - " self.revin_bias = nn.Parameter(torch.zeros(1,num_features,1))\n", - " self.revin_weight = nn.Parameter(torch.ones(1,num_features,1))\n", + " self.revin_bias = nn.Parameter(torch.zeros(1, num_features, 1, 1))\n", + " self.revin_weight = nn.Parameter(torch.ones(1, num_features, 1, 1))\n", "\n", " #@torch.no_grad()\n", " def transform(self, x, mask):\n", @@ -863,8 +863,8 @@ "#| hide\n", "# Validate scalers\n", "for scaler_type in [None, 'identity', 'standard', 'robust', 'minmax', 'minmax1', 'invariant', 'revin']:\n", - " x = 1.0*torch.tensor(np_x)\n", - " mask = torch.tensor(np_mask)\n", + " x = 1.0*torch.tensor(np_x).unsqueeze(-1)\n", + " mask = torch.tensor(np_mask).unsqueeze(-1)\n", " scaler = TemporalNorm(scaler_type=scaler_type, dim=1, num_features=np_x.shape[-1])\n", " x_scaled = scaler.transform(x=x, mask=mask)\n", " x_recovered = scaler.inverse_transform(x_scaled)\n", @@ -987,14 +987,6 @@ "nf = NeuralForecast(models=[model], freq='MS')\n", "Y_hat_df = nf.cross_validation(df=Y_df, val_size=12, n_windows=1)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b2f50bd8", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/nbs/core.ipynb b/nbs/core.ipynb index 3bb61dbdc..c44411f07 100644 --- a/nbs/core.ipynb +++ b/nbs/core.ipynb @@ -84,6 +84,7 @@ "\n", "from neuralforecast.common._base_model import DistributedConfig\n", "from neuralforecast.compat import SparkDataFrame\n", + "from neuralforecast.losses.pytorch import IQLoss\n", "from neuralforecast.tsdataset import _FilesDataset, TimeSeriesDataset, LocalFilesTimeSeriesDataset\n", "from neuralforecast.models import (\n", " GRU, LSTM, RNN, TCN, DeepAR, DilatedRNN,\n", @@ -96,7 +97,7 @@ " TimeMixer, KAN, RMoK\n", ")\n", "from neuralforecast.common._base_auto import BaseAuto, MockTrial\n", - "from neuralforecast.utils import PredictionIntervals, get_prediction_interval_method" + "from neuralforecast.utils import PredictionIntervals, get_prediction_interval_method, level_to_quantiles, quantiles_to_level" ] }, { @@ -337,6 +338,7 @@ " # Flags and attributes\n", " self._fitted = False\n", " self._reset_models()\n", + " self._add_level = False\n", "\n", " def _scalers_fit_transform(self, dataset: TimeSeriesDataset) -> None:\n", " self.scalers_ = {} \n", @@ -737,13 +739,14 @@ " names: List[str] = []\n", " count_names = {'model': 0}\n", " for model in self.models:\n", - " if add_level and model.loss.outputsize_multiplier > 1:\n", - " continue\n", - "\n", " model_name = repr(model)\n", " count_names[model_name] = count_names.get(model_name, -1) + 1\n", " if count_names[model_name] > 0:\n", " model_name += str(count_names[model_name])\n", + "\n", + " if add_level and (model.loss.outputsize_multiplier > 1 or isinstance(model.loss, IQLoss)):\n", + " continue\n", + "\n", " names.extend(model_name + n for n in model.loss.output_names)\n", " return names\n", "\n", @@ -863,6 +866,7 @@ " verbose: bool = False,\n", " engine = None,\n", " level: Optional[List[Union[int, float]]] = None,\n", + " quantiles: Optional[List[float]] = None,\n", " **data_kwargs\n", " ):\n", " \"\"\"Predict with core.NeuralForecast.\n", @@ -886,6 +890,8 @@ " Distributed engine for inference. Only used if df is a spark dataframe or if fit was called on a spark dataframe.\n", " level : list of ints or floats, optional (default=None)\n", " Confidence levels between 0 and 100.\n", + " quantiles : list of floats, optional (default=None)\n", + " Alternative to level, target quantiles to predict.\n", " data_kwargs : kwargs\n", " Extra arguments to be passed to the dataset within each model.\n", "\n", @@ -900,6 +906,22 @@ "\n", " if not self._fitted:\n", " raise Exception(\"You must fit the model before predicting.\")\n", + " \n", + " quantiles_ = None\n", + " level_ = None\n", + " has_level = False \n", + " if level is not None:\n", + " has_level = True\n", + " if quantiles is not None:\n", + " raise ValueError(\"You can't set both level and quantiles.\")\n", + " level_ = sorted(list(set(level)))\n", + " quantiles_ = level_to_quantiles(level_)\n", + " \n", + " if quantiles is not None:\n", + " if level is not None:\n", + " raise ValueError(\"You can't set both level and quantiles.\") \n", + " quantiles_ = sorted(list(set(quantiles)))\n", + " level_ = quantiles_to_level(quantiles_)\n", "\n", " needed_futr_exog = self._get_needed_futr_exog()\n", " if needed_futr_exog:\n", @@ -949,8 +971,6 @@ " if verbose: print('Using stored dataset.')\n", " \n", "\n", - " cols = self._get_model_names()\n", - "\n", " # Placeholder dataframe for predictions with unique_id and ds\n", " fcsts_df = ufp.make_future_dataframe(\n", " uids=uids,\n", @@ -994,24 +1014,14 @@ " )\n", " self._scalers_transform(futr_dataset)\n", " dataset = dataset.append(futr_dataset)\n", - "\n", - " col_idx = 0\n", - " fcsts = np.full((self.h * len(uids), len(cols)), fill_value=np.nan, dtype=np.float32)\n", - " for model in self.models:\n", - " old_test_size = model.get_test_size()\n", - " model.set_test_size(self.h) # To predict h steps ahead\n", - " model_fcsts = model.predict(dataset=dataset, **data_kwargs)\n", - " # Append predictions in memory placeholder\n", - " output_length = len(model.loss.output_names)\n", - " fcsts[:, col_idx : col_idx + output_length] = model_fcsts\n", - " col_idx += output_length\n", - " model.set_test_size(old_test_size) # Set back to original value\n", + " \n", + " fcsts, cols = self._generate_forecasts(dataset=dataset, uids=uids, quantiles_=quantiles_, level_=level_, has_level=has_level, **data_kwargs)\n", + " \n", " if self.scalers_:\n", " indptr = np.append(0, np.full(len(uids), self.h).cumsum())\n", " fcsts = self._scalers_target_inverse_transform(fcsts, indptr)\n", "\n", " # Declare predictions pd.DataFrame\n", - " cols = self._get_model_names() # Needed for IQLoss as column names may have changed during the call to .predict()\n", " if isinstance(fcsts_df, pl_DataFrame):\n", " fcsts = pl_DataFrame(dict(zip(cols, fcsts.T)))\n", " else:\n", @@ -1021,25 +1031,6 @@ " _warn_id_as_idx()\n", " fcsts_df = fcsts_df.set_index(self.id_col)\n", "\n", - " # add prediction intervals\n", - " if level is not None:\n", - " if self._cs_df is None or self.prediction_intervals is None:\n", - " raise Exception('You must fit the model with prediction_intervals to use level.')\n", - " else:\n", - " level_ = sorted(level)\n", - " model_names = self._get_model_names(add_level=True)\n", - " prediction_interval_method = get_prediction_interval_method(self.prediction_intervals.method)\n", - "\n", - " fcsts_df = prediction_interval_method(\n", - " fcsts_df,\n", - " self._cs_df,\n", - " model_names=list(model_names),\n", - " level=level_,\n", - " cs_n_windows=self.prediction_intervals.n_windows,\n", - " n_series=len(uids),\n", - " horizon=self.h,\n", - " )\n", - "\n", " return fcsts_df\n", "\n", " def _reset_models(self):\n", @@ -1085,15 +1076,6 @@ " if self.dataset.min_size < (val_size+test_size):\n", " warnings.warn('Validation and test sets are larger than the shorter time-series.')\n", "\n", - " cols = []\n", - " count_names = {'model': 0}\n", - " for model in self.models:\n", - " model_name = repr(model)\n", - " count_names[model_name] = count_names.get(model_name, -1) + 1\n", - " if count_names[model_name] > 0:\n", - " model_name += str(count_names[model_name])\n", - " cols += [model_name + n for n in model.loss.output_names]\n", - "\n", " fcsts_df = ufp.cv_times(\n", " times=self.ds,\n", " uids=self.uids,\n", @@ -1107,20 +1089,20 @@ " # the cv_times is sorted by window and then id\n", " fcsts_df = ufp.sort(fcsts_df, [id_col, 'cutoff', time_col])\n", "\n", - " col_idx = 0\n", - " fcsts = np.full((self.dataset.n_groups * self.h * n_windows, len(cols)),\n", - " np.nan, dtype=np.float32)\n", - " \n", + " fcsts_list: List = []\n", " for model in self.models:\n", + " if self._add_level and (model.loss.outputsize_multiplier > 1 or isinstance(model.loss, IQLoss)):\n", + " continue\n", + "\n", " model.fit(dataset=self.dataset,\n", " val_size=val_size, \n", " test_size=test_size)\n", " model_fcsts = model.predict(self.dataset, step_size=step_size, **data_kwargs)\n", "\n", " # Append predictions in memory placeholder\n", - " output_length = len(model.loss.output_names)\n", - " fcsts[:,col_idx:(col_idx + output_length)] = model_fcsts\n", - " col_idx += output_length\n", + " fcsts_list.append(model_fcsts)\n", + "\n", + " fcsts = np.concatenate(fcsts_list, axis=-1)\n", " # we may have allocated more space than needed\n", " # each serie can produce at most (serie.size - 1) // self.h CV windows\n", " effective_sizes = ufp.counts_by_id(fcsts_df, id_col)['counts'].to_numpy()\n", @@ -1148,6 +1130,7 @@ " self._fitted = True\n", "\n", " # Add predictions to forecasts DataFrame\n", + " cols = self._get_model_names(add_level=self._add_level)\n", " if isinstance(self.uids, pl_Series):\n", " fcsts = pl_DataFrame(dict(zip(cols, fcsts.T)))\n", " else:\n", @@ -1164,7 +1147,7 @@ " if isinstance(fcsts_df, pd.DataFrame) and _id_as_idx():\n", " _warn_id_as_idx()\n", " fcsts_df = fcsts_df.set_index(id_col)\n", - " return fcsts_df\n", + " return fcsts_df \n", "\n", " def cross_validation(\n", " self,\n", @@ -1183,6 +1166,7 @@ " target_col: str = 'y',\n", " prediction_intervals: Optional[PredictionIntervals] = None,\n", " level: Optional[List[Union[int, float]]] = None,\n", + " quantiles: Optional[List[float]] = None,\n", " **data_kwargs\n", " ) -> DataFrame:\n", " \"\"\"Temporal Cross-Validation with core.NeuralForecast.\n", @@ -1224,7 +1208,9 @@ " prediction_intervals : PredictionIntervals, optional (default=None)\n", " Configuration to calibrate prediction intervals (Conformal Prediction). \n", " level : list of ints or floats, optional (default=None)\n", - " Confidence levels between 0 and 100. Use with prediction_intervals. \n", + " Confidence levels between 0 and 100.\n", + " quantiles : list of floats, optional (default=None)\n", + " Alternative to level, target quantiles to predict.\n", " data_kwargs : kwargs\n", " Extra arguments to be passed to the dataset within each model.\n", "\n", @@ -1257,15 +1243,15 @@ " df = df.reset_index(id_col) \n", "\n", " # Checks for prediction intervals\n", - " if prediction_intervals is not None or level is not None:\n", - " if level is None:\n", - " warnings.warn('Level not provided, using level=[90].')\n", - " level = [90]\n", - " if prediction_intervals is None:\n", - " raise Exception('You must set prediction_intervals to use level.')\n", + " if prediction_intervals is not None:\n", + " if level is None and quantiles is None:\n", + " raise Exception('When passing prediction_intervals you need to set the level or quantiles argument.') \n", " if not refit:\n", - " raise Exception('Passing prediction_intervals and/or level is only supported with refit=True.') \n", + " raise Exception('Passing prediction_intervals is only supported with refit=True.') \n", "\n", + " if level is not None and quantiles is not None:\n", + " raise ValueError(\"You can't set both level and quantiles argument.\")\n", + " \n", " if not refit:\n", "\n", " return self._no_refit_cross_validation(\n", @@ -1326,6 +1312,7 @@ " sort_df=sort_df,\n", " verbose=verbose,\n", " level=level,\n", + " quantiles=quantiles,\n", " **data_kwargs\n", " )\n", " preds = ufp.join(preds, cutoffs, on=id_col, how='left')\n", @@ -1347,7 +1334,7 @@ " out = out.set_index(id_col)\n", " return out\n", "\n", - " def predict_insample(self, step_size: int = 1):\n", + " def predict_insample(self, step_size: int = 1, **data_kwargs):\n", " \"\"\"Predict insample with core.NeuralForecast.\n", "\n", " `core.NeuralForecast`'s `predict_insample` uses stored fitted `models`\n", @@ -1365,23 +1352,7 @@ " \"\"\"\n", " if not self._fitted:\n", " raise Exception('The models must be fitted first with `fit` or `cross_validation`.')\n", - "\n", - " for model in self.models:\n", - " if model.SAMPLING_TYPE == 'recurrent':\n", - " warnings.warn(f'Predict insample might not provide accurate predictions for \\\n", - " recurrent model {repr(model)} class yet due to scaling.')\n", - " print(f'WARNING: Predict insample might not provide accurate predictions for \\\n", - " recurrent model {repr(model)} class yet due to scaling.')\n", " \n", - " cols = []\n", - " count_names = {'model': 0}\n", - " for model in self.models:\n", - " model_name = repr(model)\n", - " count_names[model_name] = count_names.get(model_name, -1) + 1\n", - " if count_names[model_name] > 0:\n", - " model_name += str(count_names[model_name])\n", - " cols += [model_name + n for n in model.loss.output_names]\n", - "\n", " # Remove test set from dataset and last dates\n", " test_size = self.models[0].get_test_size()\n", "\n", @@ -1417,9 +1388,7 @@ " time_col=self.time_col,\n", " )\n", "\n", - " col_idx = 0\n", - " fcsts = np.full((len(fcsts_df), len(cols)), np.nan, dtype=np.float32)\n", - "\n", + " fcsts_list: List = []\n", " for model in self.models:\n", " # Test size is the number of periods to forecast (full size of trimmed dataset)\n", " model.set_test_size(test_size=trimmed_dataset.max_size)\n", @@ -1427,10 +1396,9 @@ " # Predict\n", " model_fcsts = model.predict(trimmed_dataset, step_size=step_size)\n", " # Append predictions in memory placeholder\n", - " output_length = len(model.loss.output_names)\n", - " fcsts[:,col_idx:(col_idx + output_length)] = model_fcsts\n", - " col_idx += output_length \n", + " fcsts_list.append(model_fcsts) \n", " model.set_test_size(test_size=test_size) # Set original test_size\n", + " fcsts = np.concatenate(fcsts_list, axis=-1)\n", "\n", " # original y\n", " original_y = {\n", @@ -1440,6 +1408,7 @@ " }\n", "\n", " # Add predictions to forecasts DataFrame\n", + " cols = self._get_model_names()\n", " if isinstance(self.uids, pl_Series):\n", " fcsts = pl_DataFrame(dict(zip(cols, fcsts.T)))\n", " Y_df = pl_DataFrame(original_y)\n", @@ -1703,6 +1672,7 @@ " \"Please reduce the number of windows, horizon or remove those series.\"\n", " )\n", " \n", + " self._add_level = True\n", " cv_results = self.cross_validation(\n", " df=df,\n", " static_df=static_df,\n", @@ -1711,7 +1681,8 @@ " time_col=time_col,\n", " target_col=target_col,\n", " )\n", - " \n", + " self._add_level = False\n", + "\n", " kept = [time_col, id_col, 'cutoff']\n", " # conformity score for each model\n", " for model in self._get_model_names(add_level=True):\n", @@ -1721,7 +1692,102 @@ " abs_err = abs(cv_results[model] - cv_results[target_col])\n", " cv_results = ufp.assign_columns(cv_results, model, abs_err)\n", " dropped = list(set(cv_results.columns) - set(kept))\n", - " return ufp.drop_columns(cv_results, dropped) " + " return ufp.drop_columns(cv_results, dropped) \n", + " \n", + " def _generate_forecasts(self, dataset: TimeSeriesDataset, uids: Series, quantiles_: Optional[List[float]] = None, level_: Optional[List[Union[int, float]]] = None, has_level: Optional[bool] = False, **data_kwargs) -> np.array:\n", + " fcsts_list: List = []\n", + " cols = []\n", + " count_names = {'model': 0}\n", + " for model in self.models:\n", + " old_test_size = model.get_test_size()\n", + " model.set_test_size(self.h) # To predict h steps ahead\n", + " \n", + " # Increment model name if the same model is used more than once\n", + " model_name = repr(model)\n", + " count_names[model_name] = count_names.get(model_name, -1) + 1\n", + " if count_names[model_name] > 0:\n", + " model_name += str(count_names[model_name])\n", + "\n", + " # Predict for every quantile or level if requested and the loss function supports it\n", + " # case 1: DistributionLoss and MixtureLosses\n", + " if quantiles_ is not None and not isinstance(model.loss, IQLoss) and hasattr(model.loss, 'update_quantile') and callable(model.loss.update_quantile):\n", + " model_fcsts = model.predict(dataset=dataset, quantiles = quantiles_, **data_kwargs)\n", + " fcsts_list.append(model_fcsts) \n", + " col_names = []\n", + " for i, quantile in enumerate(quantiles_):\n", + " col_name = self._get_column_name(model_name, quantile, has_level)\n", + " if i == 0:\n", + " col_names.extend([f\"{model_name}\", col_name])\n", + " else:\n", + " col_names.extend([col_name])\n", + " if hasattr(model.loss, 'return_params') and model.loss.return_params:\n", + " cols.extend(col_names + [model_name + param_name for param_name in model.loss.param_names])\n", + " else:\n", + " cols.extend(col_names)\n", + " # case 2: IQLoss\n", + " elif quantiles_ is not None and isinstance(model.loss, IQLoss):\n", + " # IQLoss does not give monotonically increasing quantiles, so we apply a hack: compute all quantiles, and take the quantile over the quantiles\n", + " quantiles_iqloss = np.linspace(0.01, 0.99, 20)\n", + " fcsts_list_iqloss = []\n", + " for i, quantile in enumerate(quantiles_iqloss):\n", + " model_fcsts = model.predict(dataset=dataset, quantiles = [quantile], **data_kwargs) \n", + " fcsts_list_iqloss.append(model_fcsts) \n", + " fcsts_iqloss = np.concatenate(fcsts_list_iqloss, axis=-1)\n", + "\n", + " # Get the actual requested quantiles\n", + " model_fcsts = np.quantile(fcsts_iqloss, quantiles_, axis=-1).T\n", + " fcsts_list.append(model_fcsts) \n", + "\n", + " # Get the right column names\n", + " col_names = []\n", + " for i, quantile in enumerate(quantiles_):\n", + " col_name = self._get_column_name(model_name, quantile, has_level)\n", + " col_names.extend([col_name]) \n", + " cols.extend(col_names)\n", + " # case 3: PointLoss via prediction intervals\n", + " elif quantiles_ is not None and model.loss.outputsize_multiplier == 1:\n", + " if self.prediction_intervals is None:\n", + " raise AttributeError(\n", + " f\"You have trained {model_name} with loss={type(model.loss).__name__}(). \\n\"\n", + " \" You then must set `prediction_intervals` during fit to use level or quantiles during predict.\") \n", + " model_fcsts = model.predict(dataset=dataset, quantiles = quantiles_, **data_kwargs)\n", + " prediction_interval_method = get_prediction_interval_method(self.prediction_intervals.method)\n", + " fcsts_with_intervals, out_cols = prediction_interval_method(\n", + " model_fcsts,\n", + " self._cs_df,\n", + " model=model_name,\n", + " level=level_ if has_level else None,\n", + " cs_n_windows=self.prediction_intervals.n_windows,\n", + " n_series=len(uids),\n", + " horizon=self.h,\n", + " quantiles=quantiles_ if not has_level else None,\n", + " ) \n", + " fcsts_list.append(fcsts_with_intervals) \n", + " cols.extend([model_name] + out_cols)\n", + " # base case: quantiles or levels are not supported or provided as arguments\n", + " else:\n", + " model_fcsts = model.predict(dataset=dataset, **data_kwargs)\n", + " fcsts_list.append(model_fcsts)\n", + " cols.extend(model_name + n for n in model.loss.output_names)\n", + " model.set_test_size(old_test_size) # Set back to original value\n", + " fcsts = np.concatenate(fcsts_list, axis=-1)\n", + "\n", + " return fcsts, cols\n", + " \n", + " @staticmethod\n", + " def _get_column_name(model_name, quantile, has_level) -> str:\n", + " if not has_level:\n", + " col_name = f\"{model_name}_ql{quantile}\" \n", + " elif quantile < 0.5:\n", + " level_lo = int(round(100 - 200 * quantile))\n", + " col_name = f\"{model_name}-lo-{level_lo}\"\n", + " elif quantile > 0.5:\n", + " level_hi = int(round(100 - 200 * (1 - quantile)))\n", + " col_name = f\"{model_name}-hi-{level_hi}\"\n", + " else:\n", + " col_name = f\"{model_name}-median\"\n", + "\n", + " return col_name\n" ] }, { @@ -1849,7 +1915,7 @@ "from neuralforecast.models.tsmixer import TSMixer\n", "from neuralforecast.models.tsmixerx import TSMixerx\n", "\n", - "from neuralforecast.losses.pytorch import MQLoss, MAE, MSE\n", + "from neuralforecast.losses.pytorch import MQLoss, MAE, MSE, DistributionLoss, IQLoss\n", "from neuralforecast.utils import AirPassengersDF, AirPassengersPanel, AirPassengersStatic\n", "\n", "from datetime import date" @@ -3465,6 +3531,71 @@ ")\n", "assert all([col in cv2.columns for col in ['NHITS-lo-30', 'NHITS-hi-30']])" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b82e7c70", + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Test quantile and level argument in predict for different models and errors\n", + "prediction_intervals = PredictionIntervals(method=\"conformal_error\")\n", + "\n", + "models = []\n", + "for nf_model in [NHITS, LSTM, TSMixer]:\n", + " params = {\"h\": 12, \"input_size\": 24, \"max_steps\": 1, \"loss\": MAE()}\n", + " if nf_model.__name__ == \"TSMixer\":\n", + " params.update({\"n_series\": 2})\n", + " models.append(nf_model(**params))\n", + "\n", + " params = {\"h\": 12, \"input_size\": 24, \"max_steps\": 1, \"loss\": DistributionLoss(distribution=\"Normal\")}\n", + " if nf_model.__name__ == \"TSMixer\":\n", + " params.update({\"n_series\": 2})\n", + " models.append(nf_model(**params))\n", + "\n", + " params = {\"h\": 12, \"input_size\": 24, \"max_steps\": 1, \"loss\": IQLoss()}\n", + " if nf_model.__name__ == \"TSMixer\":\n", + " params.update({\"n_series\": 2})\n", + " models.append(nf_model(**params))\n", + "\n", + "nf = NeuralForecast(models=models, freq='M')\n", + "nf.fit(AirPassengersPanel_train, prediction_intervals=prediction_intervals)\n", + "# Test default prediction\n", + "preds = nf.predict(futr_df=AirPassengersPanel_test)\n", + "assert list(preds.columns) == ['unique_id', 'ds', 'NHITS', 'NHITS1', 'NHITS1-median', 'NHITS1-lo-90',\n", + " 'NHITS1-lo-80', 'NHITS1-hi-80', 'NHITS1-hi-90', 'NHITS2_ql0.5', 'LSTM',\n", + " 'LSTM1', 'LSTM1-median', 'LSTM1-lo-90', 'LSTM1-lo-80', 'LSTM1-hi-80',\n", + " 'LSTM1-hi-90', 'LSTM2_ql0.5', 'TSMixer', 'TSMixer1', 'TSMixer1-median',\n", + " 'TSMixer1-lo-90', 'TSMixer1-lo-80', 'TSMixer1-hi-80', 'TSMixer1-hi-90',\n", + " 'TSMixer2_ql0.5']\n", + "# Test quantile prediction\n", + "preds = nf.predict(futr_df=AirPassengersPanel_test, quantiles=[0.2, 0.3])\n", + "assert list(preds.columns) == ['unique_id', 'ds', 'NHITS', 'NHITS-ql0.2', 'NHITS-ql0.3', 'NHITS1',\n", + " 'NHITS1_ql0.2', 'NHITS1_ql0.3', 'NHITS2_ql0.2', 'NHITS2_ql0.3', 'LSTM',\n", + " 'LSTM-ql0.2', 'LSTM-ql0.3', 'LSTM1', 'LSTM1_ql0.2', 'LSTM1_ql0.3',\n", + " 'LSTM2_ql0.2', 'LSTM2_ql0.3', 'TSMixer', 'TSMixer-ql0.2',\n", + " 'TSMixer-ql0.3', 'TSMixer1', 'TSMixer1_ql0.2', 'TSMixer1_ql0.3',\n", + " 'TSMixer2_ql0.2', 'TSMixer2_ql0.3']\n", + "# Test level prediction\n", + "preds = nf.predict(futr_df=AirPassengersPanel_test, level=[80, 90])\n", + "assert list(preds.columns) == ['unique_id', 'ds', 'NHITS', 'NHITS-lo-90', 'NHITS-lo-80', 'NHITS-hi-80',\n", + " 'NHITS-hi-90', 'NHITS1', 'NHITS1-lo-90', 'NHITS1-lo-80', 'NHITS1-hi-80',\n", + " 'NHITS1-hi-90', 'NHITS2-lo-90', 'NHITS2-lo-80', 'NHITS2-hi-80',\n", + " 'NHITS2-hi-90', 'LSTM', 'LSTM-lo-90', 'LSTM-lo-80', 'LSTM-hi-80',\n", + " 'LSTM-hi-90', 'LSTM1', 'LSTM1-lo-90', 'LSTM1-lo-80', 'LSTM1-hi-80',\n", + " 'LSTM1-hi-90', 'LSTM2-lo-90', 'LSTM2-lo-80', 'LSTM2-hi-80',\n", + " 'LSTM2-hi-90', 'TSMixer', 'TSMixer-lo-90', 'TSMixer-lo-80',\n", + " 'TSMixer-hi-80', 'TSMixer-hi-90', 'TSMixer1', 'TSMixer1-lo-90',\n", + " 'TSMixer1-lo-80', 'TSMixer1-hi-80', 'TSMixer1-hi-90', 'TSMixer2-lo-90',\n", + " 'TSMixer2-lo-80', 'TSMixer2-hi-80', 'TSMixer2-hi-90']\n", + "# Re-Test default prediction - note that they are different from the first test (this is expected)\n", + "preds = nf.predict(futr_df=AirPassengersPanel_test)\n", + "assert list(preds.columns) == ['unique_id', 'ds', 'NHITS', 'NHITS1', 'NHITS1-median', 'NHITS2_ql0.5',\n", + " 'LSTM', 'LSTM1', 'LSTM1-median', 'LSTM2_ql0.5', 'TSMixer', 'TSMixer1',\n", + " 'TSMixer1-median', 'TSMixer2_ql0.5']" + ] } ], "metadata": { diff --git a/nbs/docs/capabilities/01_overview.ipynb b/nbs/docs/capabilities/01_overview.ipynb index 11b964a7f..de1f3e374 100644 --- a/nbs/docs/capabilities/01_overview.ipynb +++ b/nbs/docs/capabilities/01_overview.ipynb @@ -19,11 +19,11 @@ "|`BiTCN` | `AutoBiTCN` | CNN | Univariate | Direct | F/H/S | \n", "|`DeepAR` | `AutoDeepAR` | RNN | Univariate | Recursive | F/S | \n", "|`DeepNPTS` | `AutoDeepNPTS` | MLP | Univariate | Direct | F/H/S | \n", - "|`DilatedRNN` | `AutoDilatedRNN` | RNN | Univariate | Recursive | F/H/S | \n", + "|`DilatedRNN` | `AutoDilatedRNN` | RNN | Univariate | Direct | F/H/S | \n", "|`FEDformer` | `AutoFEDformer` | Transformer | Univariate | Direct | F | \n", "|`GRU` | `AutoGRU` | RNN | Univariate | Recursive | F/H/S | \n", "|`HINT` | `AutoHINT` | Any7 | Both7 | Both7 | F/H/S | \n", - "|`Informer` | `AutoInformer` | Transformer | Multivariate | Direct | F | \n", + "|`Informer` | `AutoInformer` | Transformer | Univariate | Direct | F | \n", "|`iTransformer` | `AutoiTransformer` | Transformer | Multivariate | Direct | - | \n", "|`KAN` | `AutoKAN` | KAN | Univariate | Direct | F/H/S | \n", "|`LSTM` | `AutoLSTM` | RNN | Univariate | Recursive | F/H/S | \n", @@ -38,7 +38,7 @@ "|`RNN` | `AutoRNN` | RNN | Univariate | Recursive | F/H/S | \n", "|`SOFTS` | `AutoSOFTS` | MLP | Multivariate | Direct | - | \n", "|`StemGNN` | `AutoStemGNN` | GNN | Multivariate | Direct | - | \n", - "|`TCN` | `AutoTCN` | CNN | Univariate | Recursive | F/H/S | \n", + "|`TCN` | `AutoTCN` | CNN | Univariate | Direct | F/H/S | \n", "|`TFT` | `AutoTFT` | Transformer | Univariate | Direct | F/H/S | \n", "|`TiDE` | `AutoTiDE` | MLP | Univariate | Direct | F/H/S | \n", "|`TimeMixer` | `AutoTimeMixer` | MLP | Multivariate | Direct | - | \n", diff --git a/nbs/losses.pytorch.ipynb b/nbs/losses.pytorch.ipynb index d8d333dd7..70cceb571 100644 --- a/nbs/losses.pytorch.ipynb +++ b/nbs/losses.pytorch.ipynb @@ -54,9 +54,8 @@ "outputs": [], "source": [ "#| export\n", - "from typing import Optional, Union, Tuple\n", + "from typing import Optional, Union, Tuple, List\n", "\n", - "import math\n", "import numpy as np\n", "import torch\n", "\n", @@ -70,6 +69,9 @@ " Poisson,\n", " NegativeBinomial,\n", " Beta,\n", + " Gamma,\n", + " MixtureSameFamily,\n", + " Categorical,\n", " AffineTransform, \n", " TransformedDistribution,\n", ")\n", @@ -140,7 +142,7 @@ " `outputsize_multiplier`: Multiplier for the output size.
\n", " `output_names`: Names of the outputs.
\n", " \"\"\"\n", - " def __init__(self, horizon_weight, outputsize_multiplier, output_names):\n", + " def __init__(self, horizon_weight=None, outputsize_multiplier=None, output_names=None):\n", " super(BasePointLoss, self).__init__()\n", " if horizon_weight is not None:\n", " horizon_weight = torch.Tensor(horizon_weight.flatten())\n", @@ -151,10 +153,13 @@ "\n", " def domain_map(self, y_hat: torch.Tensor):\n", " \"\"\"\n", - " Univariate loss operates in dimension [B,T,H]/[B,H]\n", - " This changes the network's output from [B,H,1]->[B,H]\n", + " Input:\n", + " Univariate: [B, H, 1]\n", + " Multivariate: [B, H, N]\n", + "\n", + " Output: [B, H, N]\n", " \"\"\"\n", - " return y_hat.squeeze(-1)\n", + " return y_hat\n", "\n", " def _compute_weights(self, y, mask):\n", " \"\"\"\n", @@ -163,16 +168,17 @@ " If set, check that it has the same length as the horizon in x.\n", " \"\"\"\n", " if mask is None:\n", - " mask = torch.ones_like(y, device=y.device)\n", + " mask = torch.ones_like(y)\n", "\n", " if self.horizon_weight is None:\n", - " self.horizon_weight = torch.ones(mask.shape[-1])\n", + " weights = torch.ones_like(mask)\n", " else:\n", - " assert mask.shape[-1] == len(self.horizon_weight), \\\n", + " assert mask.shape[1] == len(self.horizon_weight), \\\n", " 'horizon_weight must have same length as Y'\n", - "\n", - " weights = self.horizon_weight.clone()\n", - " weights = torch.ones_like(mask, device=mask.device) * weights.to(mask.device)\n", + " weights = self.horizon_weight.clone()\n", + " weights = weights[None, :, None].to(mask.device)\n", + " weights = torch.ones_like(mask, device=mask.device) * weights\n", + " \n", " return weights * mask" ] }, @@ -227,7 +233,8 @@ " def __call__(self,\n", " y: torch.Tensor,\n", " y_hat: torch.Tensor,\n", - " mask: Union[torch.Tensor, None] = None):\n", + " mask: Union[torch.Tensor, None] = None,\n", + " y_insample: Union[torch.Tensor, None] = None) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor, Actual values.
\n", @@ -311,7 +318,9 @@ " def __call__(self,\n", " y: torch.Tensor,\n", " y_hat: torch.Tensor,\n", - " mask: Union[torch.Tensor, None] = None):\n", + " y_insample: torch.Tensor,\n", + " mask: Union[torch.Tensor, None] = None,\n", + " ) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor, Actual values.
\n", @@ -398,7 +407,8 @@ " def __call__(self,\n", " y: torch.Tensor,\n", " y_hat: torch.Tensor,\n", - " mask: Union[torch.Tensor, None] = None):\n", + " mask: Union[torch.Tensor, None] = None,\n", + " y_insample: Union[torch.Tensor, None] = None) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor, Actual values.
\n", @@ -498,7 +508,9 @@ " def __call__(self,\n", " y: torch.Tensor,\n", " y_hat: torch.Tensor,\n", - " mask: Union[torch.Tensor, None] = None):\n", + " y_insample: torch.Tensor,\n", + " mask: Union[torch.Tensor, None] = None,\n", + " ) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor, Actual values.
\n", @@ -590,7 +602,8 @@ " def __call__(self,\n", " y: torch.Tensor,\n", " y_hat: torch.Tensor,\n", - " mask: Union[torch.Tensor, None] = None):\n", + " mask: Union[torch.Tensor, None] = None,\n", + " y_insample: Union[torch.Tensor, None] = None) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor, Actual values.
\n", @@ -685,12 +698,13 @@ " y: torch.Tensor,\n", " y_hat: torch.Tensor,\n", " y_insample: torch.Tensor,\n", - " mask: Union[torch.Tensor, None] = None):\n", + " mask: Union[torch.Tensor, None] = None,\n", + " ) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor (batch_size, output_size), Actual values.
\n", " `y_hat`: tensor (batch_size, output_size)), Predicted values.
\n", - " `y_insample`: tensor (batch_size, input_size), Actual insample Seasonal Naive predictions.
\n", + " `y_insample`: tensor (batch_size, input_size), Actual insample values.
\n", " `mask`: tensor, Specifies date stamps per serie to consider in loss.
\n", "\n", " **Returns:**
\n", @@ -699,7 +713,7 @@ " delta_y = torch.abs(y - y_hat)\n", " scale = torch.mean(torch.abs(y_insample[:, self.seasonality:] - \\\n", " y_insample[:, :-self.seasonality]), axis=1)\n", - " losses = _divide_no_nan(delta_y, scale[:, None])\n", + " losses = _divide_no_nan(delta_y, scale[:, None, None])\n", " weights = self._compute_weights(y=y, mask=mask)\n", " return _weighted_mean(losses=losses, weights=weights)" ] @@ -754,11 +768,11 @@ " \"\"\"Relative Mean Squared Error\n", " Computes Relative Mean Squared Error (relMSE), as proposed by Hyndman & Koehler (2006)\n", " as an alternative to percentage errors, to avoid measure unstability.\n", - " $$ \\mathrm{relMSE}(\\\\mathbf{y}, \\\\mathbf{\\hat{y}}, \\\\mathbf{\\hat{y}}^{naive1}) =\n", - " \\\\frac{\\mathrm{MSE}(\\\\mathbf{y}, \\\\mathbf{\\hat{y}})}{\\mathrm{MSE}(\\\\mathbf{y}, \\\\mathbf{\\hat{y}}^{naive1})} $$\n", + " $$ \\mathrm{relMSE}(\\\\mathbf{y}, \\\\mathbf{\\hat{y}}, \\\\mathbf{\\hat{y}}^{benchmark}) =\n", + " \\\\frac{\\mathrm{MSE}(\\\\mathbf{y}, \\\\mathbf{\\hat{y}})}{\\mathrm{MSE}(\\\\mathbf{y}, \\\\mathbf{\\hat{y}}^{benchmark})} $$\n", "\n", " **Parameters:**
\n", - " `y_train`: numpy array, Training values.
\n", + " `y_train`: numpy array, deprecated.
\n", " `horizon_weight`: Tensor of size h, weight for each timestamp of the forecasting window.
\n", "\n", " **References:**
\n", @@ -769,32 +783,31 @@ " \"Probabilistic Hierarchical Forecasting with Deep Poisson Mixtures. \n", " Submitted to the International Journal Forecasting, Working paper available at arxiv.](https://arxiv.org/pdf/2110.13179.pdf)\n", " \"\"\"\n", - " def __init__(self, y_train, horizon_weight=None):\n", + " def __init__(self, y_train=None, horizon_weight=None):\n", " super(relMSE, self).__init__(horizon_weight=horizon_weight,\n", " outputsize_multiplier=1,\n", " output_names=[''])\n", - " self.y_train = y_train\n", + " if y_train is not None:\n", + " raise DeprecationWarning(\"y_train will be deprecated in a future release.\")\n", " self.mse = MSE(horizon_weight=horizon_weight)\n", "\n", " def __call__(self,\n", " y: torch.Tensor,\n", " y_hat: torch.Tensor,\n", - " mask: Union[torch.Tensor, None] = None):\n", + " y_benchmark: torch.Tensor,\n", + " mask: Union[torch.Tensor, None] = None\n", + " ) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor (batch_size, output_size), Actual values.
\n", " `y_hat`: tensor (batch_size, output_size)), Predicted values.
\n", - " `y_insample`: tensor (batch_size, input_size), Actual insample Seasonal Naive predictions.
\n", + " `y_benchmark`: tensor (batch_size, output_size), Benchmark predicted values.
\n", " `mask`: tensor, Specifies date stamps per serie to consider in loss.
\n", "\n", " **Returns:**
\n", " `relMSE`: tensor (single value).\n", " \"\"\"\n", - " horizon = y.shape[-1]\n", - " last_col = self.y_train[:, -1].unsqueeze(1)\n", - " y_naive = last_col.repeat(1, horizon)\n", - "\n", - " norm = self.mse(y=y, y_hat=y_naive, mask=mask) # Already weighted\n", + " norm = self.mse(y=y, y_hat=y_benchmark, mask=mask) # Already weighted\n", " norm = norm + 1e-5 # Numerical stability\n", " loss = self.mse(y=y, y_hat=y_hat, mask=mask) # Already weighted\n", " loss = _divide_no_nan(loss, norm)\n", @@ -880,7 +893,9 @@ " def __call__(self,\n", " y: torch.Tensor,\n", " y_hat: torch.Tensor,\n", - " mask: Union[torch.Tensor, None] = None):\n", + " y_insample: torch.Tensor,\n", + " mask: Union[torch.Tensor, None] = None,\n", + " ) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor, Actual values.
\n", @@ -1022,35 +1037,47 @@ "\n", " def domain_map(self, y_hat: torch.Tensor):\n", " \"\"\"\n", - " Identity domain map [B,T,H,Q]/[B,H,Q]\n", + " Input:\n", + " Univariate: [B, H, 1 * Q]\n", + " Multivariate: [B, H, N * Q]\n", + "\n", + " Output: [B, H, N, Q]\n", " \"\"\"\n", - " return y_hat\n", - " \n", + " output = y_hat.reshape(y_hat.shape[0],\n", + " y_hat.shape[1],\n", + " -1,\n", + " self.outputsize_multiplier)\n", + "\n", + " return output\n", + "\n", " def _compute_weights(self, y, mask):\n", " \"\"\"\n", " Compute final weights for each datapoint (based on all weights and all masks)\n", " Set horizon_weight to a ones[H] tensor if not set.\n", " If set, check that it has the same length as the horizon in x.\n", + "\n", + " y: [B, h, N, 1]\n", + " mask: [B, h, N, 1]\n", " \"\"\"\n", - " if mask is None:\n", - " mask = torch.ones_like(y, device=y.device)\n", - " else:\n", - " mask = mask.unsqueeze(1) # Add Q dimension.\n", "\n", " if self.horizon_weight is None:\n", - " self.horizon_weight = torch.ones(mask.shape[-1])\n", + " weights = torch.ones_like(mask)\n", " else:\n", - " assert mask.shape[-1] == len(self.horizon_weight), \\\n", - " 'horizon_weight must have same length as Y'\n", - " \n", - " weights = self.horizon_weight.clone()\n", - " weights = torch.ones_like(mask, device=mask.device) * weights.to(mask.device)\n", + " assert mask.shape[1] == len(self.horizon_weight), \\\n", + " 'horizon_weight must have same length as Y' \n", + " weights = self.horizon_weight.clone()\n", + " weights = weights[None, :, None, None]\n", + " weights = weights.to(mask.device)\n", + " weights = torch.ones_like(mask, device=mask.device) * weights\n", + " \n", " return weights * mask\n", "\n", " def __call__(self,\n", " y: torch.Tensor,\n", " y_hat: torch.Tensor,\n", - " mask: Union[torch.Tensor, None] = None):\n", + " y_insample: torch.Tensor,\n", + " mask: Union[torch.Tensor, None] = None,\n", + " ) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor, Actual values.
\n", @@ -1060,20 +1087,24 @@ " **Returns:**
\n", " `mqloss`: tensor (single value).\n", " \"\"\"\n", - " \n", - " error = y_hat - y.unsqueeze(-1)\n", - " sq = torch.maximum(-error, torch.zeros_like(error))\n", - " s1_q = torch.maximum(error, torch.zeros_like(error))\n", - " losses = (1/len(self.quantiles))*(self.quantiles * sq + (1 - self.quantiles) * s1_q)\n", + " # [B, h, N] -> [B, h, N, 1]\n", + " if y_hat.ndim == 3:\n", + " y_hat = y_hat.unsqueeze(-1)\n", + "\n", + " y = y.unsqueeze(-1)\n", + " if mask is not None:\n", + " mask = mask.unsqueeze(-1)\n", + " else:\n", + " mask = torch.ones_like(y, device=y.device)\n", "\n", - " if y_hat.ndim == 3: # BaseWindows\n", - " losses = losses.swapaxes(-2,-1) # [B,H,Q] -> [B,Q,H] (needed for horizon weighting, H at the end)\n", - " elif y_hat.ndim == 4: # BaseRecurrent\n", - " losses = losses.swapaxes(-2,-1)\n", - " losses = losses.swapaxes(-2,-3) # [B,seq_len,H,Q] -> [B,Q,seq_len,H] (needed for horizon weighting, H at the end)\n", + " error = y_hat - y\n", "\n", + " sq = torch.maximum(-error, torch.zeros_like(error))\n", + " s1_q = torch.maximum(error, torch.zeros_like(error))\n", + " \n", + " quantiles = self.quantiles[None, None, None, :]\n", + " losses = (1 / len(quantiles)) * (quantiles * sq + (1 - quantiles) * s1_q)\n", " weights = self._compute_weights(y=losses, mask=mask) # Use losses for extra dim\n", - " # NOTE: Weights do not have Q dimension.\n", "\n", " return _weighted_mean(losses=losses, weights=weights)" ] @@ -1228,9 +1259,9 @@ " self.sampling_distr = Beta(concentration0 = concentration0,\n", " concentration1 = concentration1)\n", "\n", - " def update_quantile(self, q: float = 0.5):\n", - " self.q = q\n", - " self.output_names = [f\"_ql{q}\"]\n", + " def update_quantile(self, q: List[float] = [0.5]):\n", + " self.q = q[0]\n", + " self.output_names = [f\"_ql{q[0]}\"]\n", " self.has_predicted = True\n", "\n", " def domain_map(self, y_hat):\n", @@ -1239,9 +1270,8 @@ "\n", " Input shapes to this function:\n", " \n", - " base_windows: y_hat = [B, h, 1] \n", - " base_multivariate: y_hat = [B, h, n_series]\n", - " base_recurrent: y_hat = [B, seq_len, h, n_series]\n", + " Univariate: y_hat = [B, h, 1] \n", + " Multivariate: y_hat = [B, h, N]\n", " \"\"\"\n", " if self.eval() and self.has_predicted:\n", " quantiles = torch.full(size=y_hat.shape, \n", @@ -1259,7 +1289,7 @@ " emb_outputs = self.output_layer(emb_inputs)\n", " \n", " # Domain map\n", - " y_hat = emb_outputs.squeeze(-1).squeeze(-1)\n", + " y_hat = emb_outputs.squeeze(-1)\n", "\n", " return y_hat\n" ] @@ -1299,7 +1329,7 @@ "\n", "# Check that quantiles are correctly updated - prediction\n", "check = IQLoss()\n", - "check.update_quantile(0.7)\n", + "check.update_quantile([0.7])\n", "test_eq(check.q, 0.7)" ] }, @@ -1357,19 +1387,6 @@ "outputs": [], "source": [ "#| exporti\n", - "def bernoulli_domain_map(input: torch.Tensor):\n", - " \"\"\" Bernoulli Domain Map\n", - " Maps input into distribution constraints, by construction input's \n", - " last dimension is of matching `distr_args` length.\n", - "\n", - " **Parameters:**
\n", - " `input`: tensor, of dimensions [B,T,H,theta] or [B,H,theta].
\n", - "\n", - " **Returns:**
\n", - " `(probs,)`: tuple with tensors of Poisson distribution arguments.
\n", - " \"\"\"\n", - " return (input.squeeze(-1),)\n", - "\n", "def bernoulli_scale_decouple(output, loc=None, scale=None):\n", " \"\"\" Bernoulli Scale Decouple\n", "\n", @@ -1383,21 +1400,6 @@ " probs = F.sigmoid(probs)#.clone()\n", " return (probs,)\n", "\n", - "def student_domain_map(input: torch.Tensor):\n", - " \"\"\" Student T Domain Map\n", - " Maps input into distribution constraints, by construction input's \n", - " last dimension is of matching `distr_args` length.\n", - "\n", - " **Parameters:**
\n", - " `input`: tensor, of dimensions [B,T,H,theta] or [B,H,theta].
\n", - " `eps`: float, helps the initialization of scale for easier optimization.
\n", - "\n", - " **Returns:**
\n", - " `(df, loc, scale)`: tuple with tensors of StudentT distribution arguments.
\n", - " \"\"\"\n", - " df, loc, scale = torch.tensor_split(input, 3, dim=-1)\n", - " return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)\n", - "\n", "def student_scale_decouple(output, loc=None, scale=None, eps: float=0.1):\n", " \"\"\" Normal Scale Decouple\n", "\n", @@ -1413,21 +1415,6 @@ " df = 3.0 + F.softplus(df)\n", " return (df, mean, tscale)\n", "\n", - "def normal_domain_map(input: torch.Tensor):\n", - " \"\"\" Normal Domain Map\n", - " Maps input into distribution constraints, by construction input's \n", - " last dimension is of matching `distr_args` length.\n", - "\n", - " **Parameters:**
\n", - " `input`: tensor, of dimensions [B,T,H,theta] or [B,H,theta].
\n", - " `eps`: float, helps the initialization of scale for easier optimization.
\n", - "\n", - " **Returns:**
\n", - " `(mean, std)`: tuple with tensors of Normal distribution arguments.
\n", - " \"\"\"\n", - " mean, std = torch.tensor_split(input, 2, dim=-1)\n", - " return mean.squeeze(-1), std.squeeze(-1)\n", - "\n", "def normal_scale_decouple(output, loc=None, scale=None, eps: float=0.2):\n", " \"\"\" Normal Scale Decouple\n", "\n", @@ -1442,19 +1429,6 @@ " std = (std + eps) * scale\n", " return (mean, std)\n", "\n", - "def poisson_domain_map(input: torch.Tensor):\n", - " \"\"\" Poisson Domain Map\n", - " Maps input into distribution constraints, by construction input's \n", - " last dimension is of matching `distr_args` length.\n", - "\n", - " **Parameters:**
\n", - " `input`: tensor, of dimensions [B,T,H,theta] or [B,H,theta].
\n", - "\n", - " **Returns:**
\n", - " `(rate,)`: tuple with tensors of Poisson distribution arguments.
\n", - " \"\"\"\n", - " return (input.squeeze(-1),)\n", - "\n", "def poisson_scale_decouple(output, loc=None, scale=None):\n", " \"\"\" Poisson Scale Decouple\n", "\n", @@ -1467,21 +1441,7 @@ " if (loc is not None) and (scale is not None):\n", " rate = (rate * scale) + loc\n", " rate = F.softplus(rate) + eps\n", - " return (rate,)\n", - "\n", - "def nbinomial_domain_map(input: torch.Tensor):\n", - " \"\"\" Negative Binomial Domain Map\n", - " Maps input into distribution constraints, by construction input's \n", - " last dimension is of matching `distr_args` length.\n", - "\n", - " **Parameters:**
\n", - " `input`: tensor, of dimensions [B,T,H,theta] or [B,H,theta].
\n", - "\n", - " **Returns:**
\n", - " `(total_count, alpha)`: tuple with tensors of N.Binomial distribution arguments.
\n", - " \"\"\"\n", - " mu, alpha = torch.tensor_split(input, 2, dim=-1)\n", - " return mu.squeeze(-1), alpha.squeeze(-1)\n", + " return (rate, )\n", "\n", "def nbinomial_scale_decouple(output, loc=None, scale=None):\n", " \"\"\" Negative Binomial Scale Decouple\n", @@ -1550,10 +1510,12 @@ " - [Jorgensen, B. (1987). Exponential Dispersion Models. Journal of the Royal Statistical Society. \n", " Series B (Methodological), 49(2), 127–162. http://www.jstor.org/stable/2345415](http://www.jstor.org/stable/2345415)
\n", " \"\"\"\n", + " arg_constraints = {'log_mu': constraints.real}\n", + " support = constraints.nonnegative\n", + "\n", " def __init__(self, log_mu, rho, validate_args=None):\n", " # TODO: add sigma2 dispersion\n", " # TODO add constraints\n", - " # arg_constraints = {'log_mu': constraints.real, 'rho': constraints.positive}\n", " # support = constraints.real\n", " self.log_mu = log_mu\n", " self.rho = rho\n", @@ -1587,7 +1549,7 @@ " beta = beta.expand(shape)\n", "\n", " N = torch.poisson(rate) + 1e-5\n", - " gamma = torch.distributions.gamma.Gamma(N * alpha, beta)\n", + " gamma = Gamma(N*alpha, beta)\n", " samples = gamma.sample()\n", " samples[N==0] = 0\n", "\n", @@ -1602,12 +1564,12 @@ "\n", " return a - b\n", "\n", - "def tweedie_domain_map(input: torch.Tensor):\n", + "def tweedie_domain_map(input: torch.Tensor, rho: float = 1.5):\n", " \"\"\"\n", " Maps output of neural network to domain of distribution loss\n", "\n", " \"\"\"\n", - " return (input.squeeze(-1),)\n", + " return (input, rho)\n", "\n", "def tweedie_scale_decouple(output, loc=None, scale=None):\n", " \"\"\"Tweedie Scale Decouple\n", @@ -1616,14 +1578,14 @@ " count and logits based on anchoring `loc`, `scale`.\n", " Also adds Tweedie domain protection to the distribution parameters.\n", " \"\"\"\n", - " log_mu = output[0]\n", + " log_mu, rho = output\n", " log_mu = F.softplus(log_mu)\n", " log_mu = torch.clamp(log_mu, 1e-9, 37)\n", " if (loc is not None) and (scale is not None):\n", " log_mu += torch.log(loc)\n", "\n", " log_mu = torch.clamp(log_mu, 1e-9, 37)\n", - " return (log_mu,)" + " return (log_mu, rho)" ] }, { @@ -1687,6 +1649,15 @@ " scale *= t.scale\n", " p = self.base_dist.crps(z)\n", " return p * scale\n", + " \n", + " @property\n", + " def mean(self):\n", + " \"\"\"\n", + " Function used to compute the empirical mean\n", + " \"\"\"\n", + " samples = self.sample([1000])\n", + " return samples.mean(dim=0)\n", + " \n", "\n", "class BaseISQF(Distribution):\n", " \"\"\"\n", @@ -2357,7 +2328,7 @@ " last dimension is of matching `distr_args` length.\n", "\n", " **Parameters:**
\n", - " `input`: tensor, of dimensions [B,T,H,theta] or [B,H,theta].
\n", + " `input`: tensor, of dimensions [B, H, N * n_outputs].
\n", " `tol`: float, tolerance.
\n", " `quantiles`: tensor, quantiles used for ISQF (i.e. x-positions for the knots).
\n", " `num_pieces`: int, num_pieces used for each quantile spline.
\n", @@ -2371,7 +2342,14 @@ " #\n", " # Because in this case the spline knots could be squeezed together\n", " # and cause overflow in spline CRPS computation\n", - " num_qk = len(quantiles) \n", + " num_qk = len(quantiles)\n", + " n_outputs = 2 * (num_qk - 1) * num_pieces + 2 + num_qk\n", + " \n", + " # Reshape: [B, h, N * n_outputs] -> [B, h, N, n_outputs]\n", + " input = input.reshape(input.shape[0],\n", + " input.shape[1],\n", + " -1,\n", + " n_outputs)\n", " start_index = 0\n", " spline_knots = input[..., start_index: start_index + (num_qk - 1) * num_pieces]\n", " start_index += (num_qk - 1) * num_pieces\n", @@ -2381,27 +2359,19 @@ " start_index += 1\n", " beta_r = input[..., start_index: start_index + 1]\n", " start_index += 1\n", - " quantile_knots = input[..., start_index: start_index + num_qk]\n", - "\n", - " qk_y = torch.cat(\n", - " [\n", - " quantile_knots[..., 0:1],\n", - " torch.abs(quantile_knots[..., 1:]) + tol,\n", - " ],\n", - " dim=-1,\n", - " )\n", - " qk_y = torch.cumsum(qk_y, dim=-1)\n", + " quantile_knots = F.softplus(input[..., start_index: start_index + num_qk]) + tol\n", + "\n", + " qk_y = torch.cumsum(quantile_knots, dim=-1)\n", "\n", " # Prevent overflow when we compute 1/beta\n", - " beta_l = torch.abs(beta_l.squeeze(-1)) + tol\n", - " beta_r = torch.abs(beta_r.squeeze(-1)) + tol\n", + " beta_l = F.softplus(beta_l.squeeze(-1)) + tol\n", + " beta_r = F.softplus(beta_r.squeeze(-1)) + tol\n", "\n", " # Reshape spline arguments\n", " batch_shape = spline_knots.shape[:-1]\n", "\n", " # repeat qk_x from (num_qk,) to (*batch_shape, num_qk)\n", - " qk_x_repeat = torch.sort(quantiles)\\\n", - " .values\\\n", + " qk_x_repeat = quantiles\\\n", " .repeat(*batch_shape, 1)\\\n", " .to(input.device)\n", "\n", @@ -2502,15 +2472,6 @@ " NegativeBinomial=NegativeBinomial,\n", " Tweedie=Tweedie,\n", " ISQF=ISQF)\n", - " domain_maps = dict(Bernoulli=bernoulli_domain_map,\n", - " Normal=normal_domain_map,\n", - " Poisson=poisson_domain_map,\n", - " StudentT=student_domain_map,\n", - " NegativeBinomial=nbinomial_domain_map,\n", - " Tweedie=tweedie_domain_map,\n", - " ISQF=partial(isqf_domain_map, \n", - " quantiles=qs, \n", - " num_pieces=num_pieces))\n", " scale_decouples = dict(\n", " Bernoulli=bernoulli_scale_decouple,\n", " Normal=normal_scale_decouple,\n", @@ -2531,9 +2492,24 @@ " [f\"-quantile_knot_{i + 1}\" for i in range(num_qk)],\n", " )\n", " assert (distribution in available_distributions.keys()), f'{distribution} not available'\n", + " if distribution == 'ISQF':\n", + " quantiles = torch.sort(qs).values\n", + " self.domain_map = partial(isqf_domain_map, \n", + " quantiles=quantiles, \n", + " num_pieces=num_pieces)\n", + " if return_params:\n", + " raise Exception(\"ISQF does not support 'return_params=True'\") \n", + " elif distribution == 'Tweedie':\n", + " rho = distribution_kwargs.pop(\"rho\")\n", + " self.domain_map = partial(tweedie_domain_map,\n", + " rho=rho)\n", + " if return_params:\n", + " raise Exception(\"Tweedie does not support 'return_params=True'\") \n", + " else:\n", + " self.domain_map = self._domain_map\n", + "\n", " self.distribution = distribution\n", " self._base_distribution = available_distributions[distribution]\n", - " self.domain_map = domain_maps[distribution]\n", " self.scale_decouple = scale_decouples[distribution]\n", " self.distribution_kwargs = distribution_kwargs\n", " self.num_samples = num_samples \n", @@ -2549,6 +2525,16 @@ "\n", " self.outputsize_multiplier = len(self.param_names)\n", " self.is_distribution_output = True\n", + " self.has_predicted = False\n", + "\n", + " def _domain_map(self, input: torch.Tensor):\n", + " \"\"\"\n", + " Maps output of neural network to domain of distribution loss\n", + "\n", + " \"\"\"\n", + " output = torch.tensor_split(input, self.outputsize_multiplier, dim=2)\n", + "\n", + " return output\n", "\n", " def get_distribution(self, distr_args, **distribution_kwargs) -> Distribution:\n", " \"\"\"\n", @@ -2561,10 +2547,10 @@ " **Returns**
\n", " `Distribution`: AffineTransformed distribution.
\n", " \"\"\"\n", - " # TransformedDistribution(distr, [AffineTransform(loc=loc, scale=scale)])\n", " distr = self._base_distribution(*distr_args, **distribution_kwargs)\n", + " self.distr_mean = distr.mean\n", " \n", - " if self.distribution =='Poisson':\n", + " if self.distribution in ('Poisson', 'NegativeBinomial'):\n", " distr.support = constraints.nonnegative\n", " return distr\n", "\n", @@ -2577,7 +2563,7 @@ "\n", " **Parameters**
\n", " `distr_args`: Constructor arguments for the underlying Distribution type.
\n", - " `num_samples`: int=500, overwrite number of samples for the empirical quantiles.
\n", + " `num_samples`: int, overwrite number of samples for the empirical quantiles.
\n", "\n", " **Returns**
\n", " `samples`: tensor, shape [B,H,`num_samples`].
\n", @@ -2586,30 +2572,31 @@ " if num_samples is None:\n", " num_samples = self.num_samples\n", "\n", - " # print(distr_args[0].size())\n", - " B, H = distr_args[0].shape[:2]\n", - " Q = len(self.quantiles)\n", - "\n", " # Instantiate Scaled Decoupled Distribution\n", " distr = self.get_distribution(distr_args=distr_args, **self.distribution_kwargs)\n", " samples = distr.sample(sample_shape=(num_samples,))\n", - " samples = samples.permute(1,2,0) # [samples,B,H] -> [B,H,samples]\n", - " samples = samples.view(B*H, num_samples)\n", - " sample_mean = torch.mean(samples, dim=-1)\n", + " samples = samples.permute(1, 2, 3, 0) # [samples, B, H, N] -> [B, H, N, samples]\n", + "\n", + " sample_mean = torch.mean(samples, dim=-1, keepdim=True) \n", "\n", " # Compute quantiles\n", " quantiles_device = self.quantiles.to(distr_args[0].device)\n", " quants = torch.quantile(input=samples, \n", - " q=quantiles_device, dim=1)\n", - " quants = quants.permute((1,0)) # [Q, B*H] -> [B*H, Q]\n", - "\n", - " # Final reshapes\n", - " samples = samples.view(B, H, num_samples)\n", - " sample_mean = sample_mean.view(B, H, 1)\n", - " quants = quants.view(B, H, Q)\n", + " q=quantiles_device, \n", + " dim=-1)\n", + " quants = quants.permute(1, 2, 3, 0) # [Q, B, H, N] -> [B, H, N, Q]\n", "\n", " return samples, sample_mean, quants\n", "\n", + " def update_quantile(self, q: Optional[List[float]] = None):\n", + " if q is not None:\n", + " self.quantiles = nn.Parameter(torch.tensor(q, dtype=torch.float32), requires_grad=False)\n", + " self.output_names = [\"\"] + [f\"_ql{q_i}\" for q_i in q] + self.return_params * self.param_names\n", + " self.has_predicted = True\n", + " elif q is None and self.has_predicted:\n", + " self.quantiles = nn.Parameter(torch.tensor([0.5], dtype=torch.float32), requires_grad=False)\n", + " self.output_names = [\"\", \"-median\"] + self.return_params * self.param_names\n", + "\n", " def __call__(self,\n", " y: torch.Tensor,\n", " distr_args: torch.Tensor,\n", @@ -2626,10 +2613,6 @@ " **Parameters**
\n", " `y`: tensor, Actual values.
\n", " `distr_args`: Constructor arguments for the underlying Distribution type.
\n", - " `loc`: Optional tensor, of the same shape as the batch_shape + event_shape\n", - " of the resulting distribution.
\n", - " `scale`: Optional tensor, of the same shape as the batch_shape+event_shape \n", - " of the resulting distribution.
\n", " `mask`: tensor, Specifies date stamps per serie to consider in loss.
\n", "\n", " **Returns**
\n", @@ -2739,7 +2722,8 @@ " \"\"\"\n", " def __init__(self, n_components=10, level=[80, 90], quantiles=None,\n", " num_samples=1000, return_params=False,\n", - " batch_correlation=False, horizon_correlation=False):\n", + " batch_correlation=False, horizon_correlation=False, \n", + " weighted=False):\n", " super(PMM, self).__init__()\n", " # Transform level to MQLoss parameters\n", " qs, self.output_names = level_to_outputs(level)\n", @@ -2753,21 +2737,37 @@ " self.num_samples = num_samples\n", " self.batch_correlation = batch_correlation\n", " self.horizon_correlation = horizon_correlation\n", + " self.weighted = weighted \n", "\n", " # If True, predict_step will return Distribution's parameters\n", " self.return_params = return_params\n", - " if self.return_params:\n", - " self.param_names = [f\"-lambda-{i}\" for i in range(1, n_components + 1)]\n", + "\n", + " lambda_names = [f\"-lambda-{i}\" for i in range(1, n_components + 1)]\n", + " if weighted:\n", + " weight_names = [f\"-weight-{i}\" for i in range(1, n_components + 1)]\n", + " self.param_names = [i for j in zip(lambda_names, weight_names) for i in j]\n", + " else:\n", + " self.param_names = lambda_names\n", + "\n", + " if self.return_params: \n", " self.output_names = self.output_names + self.param_names\n", "\n", " # Add first output entry for the sample_mean\n", " self.output_names.insert(0, \"\")\n", "\n", - " self.outputsize_multiplier = n_components\n", + " self.n_outputs = 1 + weighted\n", + " self.n_components = n_components\n", + " self.outputsize_multiplier = self.n_outputs * n_components\n", " self.is_distribution_output = True\n", + " self.has_predicted = False\n", "\n", " def domain_map(self, output: torch.Tensor):\n", - " return (output,)#, weights\n", + " output = output.reshape(output.shape[0],\n", + " output.shape[1],\n", + " -1,\n", + " self.outputsize_multiplier)\n", + " \n", + " return torch.tensor_split(output, self.n_outputs, dim=-1)\n", " \n", " def scale_decouple(self, \n", " output,\n", @@ -2779,26 +2779,62 @@ " variance and residual location based on anchoring `loc`, `scale`.\n", " Also adds domain protection to the distribution parameters.\n", " \"\"\"\n", - " lambdas = output[0]\n", + " if self.weighted:\n", + " lambdas, weights = output\n", + " weights = F.softmax(weights, dim=-1)\n", + " else:\n", + " lambdas = output[0]\n", + "\n", " if (loc is not None) and (scale is not None):\n", - " loc = loc.view(lambdas.size(dim=0), 1, -1)\n", - " scale = scale.view(lambdas.size(dim=0), 1, -1)\n", + " if loc.ndim == 3:\n", + " loc = loc.unsqueeze(-1)\n", + " scale = scale.unsqueeze(-1)\n", " lambdas = (lambdas * scale) + loc\n", - " lambdas = F.softplus(lambdas)\n", - " return (lambdas,)\n", "\n", - " def sample(self, distr_args, num_samples=None):\n", + " lambdas = F.softplus(lambdas) + 1e-3\n", + " \n", + " if self.weighted:\n", + " return (lambdas, weights)\n", + " else:\n", + " return (lambdas, )\n", + " \n", + " def get_distribution(self, distr_args) -> Distribution:\n", + " \"\"\"\n", + " Construct the associated Pytorch Distribution, given the collection of\n", + " constructor arguments and, optionally, location and scale tensors.\n", + "\n", + " **Parameters**
\n", + " `distr_args`: Constructor arguments for the underlying Distribution type.
\n", + "\n", + " **Returns**
\n", + " `Distribution`: AffineTransformed distribution.
\n", + " \"\"\"\n", + " if self.weighted:\n", + " lambdas, weights = distr_args\n", + " else:\n", + " lambdas = distr_args[0]\n", + " weights = torch.full_like(lambdas, fill_value=1 / self.n_components)\n", + "\n", + " mix = Categorical(weights)\n", + " components = Poisson(rate=lambdas)\n", + " components.support = constraints.nonnegative\n", + " distr = MixtureSameFamily(mixture_distribution=mix,\n", + " component_distribution=components) \n", + "\n", + " self.distr_mean = distr.mean\n", + " \n", + " return distr\n", + "\n", + " def sample(self,\n", + " distr_args: torch.Tensor,\n", + " num_samples: Optional[int] = None):\n", " \"\"\"\n", " Construct the empirical quantiles from the estimated Distribution,\n", " sampling from it `num_samples` independently.\n", "\n", " **Parameters**
\n", " `distr_args`: Constructor arguments for the underlying Distribution type.
\n", - " `loc`: Optional tensor, of the same shape as the batch_shape + event_shape\n", - " of the resulting distribution.
\n", - " `scale`: Optional tensor, of the same shape as the batch_shape+event_shape \n", - " of the resulting distribution.
\n", - " `num_samples`: int=500, overwrites number of samples for the empirical quantiles.
\n", + " `num_samples`: int, overwrite number of samples for the empirical quantiles.
\n", "\n", " **Returns**
\n", " `samples`: tensor, shape [B,H,`num_samples`].
\n", @@ -2807,93 +2843,65 @@ " if num_samples is None:\n", " num_samples = self.num_samples\n", "\n", - " lambdas = distr_args[0]\n", - " B, H, K = lambdas.size()\n", - " Q = len(self.quantiles)\n", - "\n", - " # Sample K ~ Mult(weights)\n", - " # shared across B, H\n", - " # weights = torch.repeat_interleave(input=weights, repeats=H, dim=2)\n", - " weights = (1/K) * torch.ones_like(lambdas, device=lambdas.device)\n", - "\n", - " # Avoid loop, vectorize\n", - " weights = weights.reshape(-1, K)\n", - " lambdas = lambdas.flatten() \n", - "\n", - " # Vectorization trick to recover row_idx\n", - " sample_idxs = torch.multinomial(input=weights, \n", - " num_samples=num_samples,\n", - " replacement=True)\n", - " aux_col_idx = torch.unsqueeze(torch.arange(B * H, device=lambdas.device), -1) * K\n", - "\n", - " # To device\n", - " sample_idxs = sample_idxs.to(lambdas.device)\n", - "\n", - " sample_idxs = sample_idxs + aux_col_idx\n", - " sample_idxs = sample_idxs.flatten()\n", - "\n", - " sample_lambdas = lambdas[sample_idxs]\n", + " # Instantiate Scaled Decoupled Distribution\n", + " distr = self.get_distribution(distr_args=distr_args)\n", + " samples = distr.sample(sample_shape=(num_samples,))\n", + " samples = samples.permute(1, 2, 3, 0) # [samples, B, H, N] -> [B, H, N, samples]\n", "\n", - " # Sample y ~ Poisson(lambda) independently\n", - " samples = torch.poisson(sample_lambdas).to(lambdas.device)\n", - " samples = samples.view(B*H, num_samples)\n", - " sample_mean = torch.mean(samples, dim=-1)\n", + " sample_mean = torch.mean(samples, dim=-1, keepdim=True) \n", "\n", " # Compute quantiles\n", - " quantiles_device = self.quantiles.to(lambdas.device)\n", - " quants = torch.quantile(input=samples, q=quantiles_device, dim=1)\n", - " quants = quants.permute((1,0)) # Q, B*H\n", - "\n", - " # Final reshapes\n", - " samples = samples.view(B, H, num_samples)\n", - " sample_mean = sample_mean.view(B, H, 1)\n", - " quants = quants.view(B, H, Q)\n", + " quantiles_device = self.quantiles.to(distr_args[0].device)\n", + " quants = torch.quantile(input=samples, \n", + " q=quantiles_device, \n", + " dim=-1)\n", + " quants = quants.permute(1, 2, 3, 0) # [Q, B, H, N] -> [B, H, N, Q]\n", "\n", " return samples, sample_mean, quants\n", " \n", - " def neglog_likelihood(self,\n", - " y: torch.Tensor,\n", - " distr_args: Tuple[torch.Tensor],\n", - " mask: Union[torch.Tensor, None] = None,):\n", - " if mask is None: \n", - " mask = (y > 0) * 1\n", - " else:\n", - " mask = mask * ((y > 0) * 1)\n", + " def update_quantile(self, q: Optional[List[float]] = None):\n", + " if q is not None:\n", + " self.quantiles = nn.Parameter(torch.tensor(q, dtype=torch.float32), requires_grad=False)\n", + " self.output_names = [\"\"] + [f\"_ql{q_i}\" for q_i in q] + self.return_params * self.param_names\n", + " self.has_predicted = True\n", + " elif q is None and self.has_predicted:\n", + " self.quantiles = nn.Parameter(torch.tensor([0.5], dtype=torch.float32), requires_grad=False) \n", + " self.output_names = [\"\", \"-median\"] + self.return_params * self.param_names\n", "\n", - " eps = 1e-10\n", - " lambdas = distr_args[0]\n", - " B, H, K = lambdas.size()\n", + " def __call__(self,\n", + " y: torch.Tensor,\n", + " distr_args: torch.Tensor,\n", + " mask: Union[torch.Tensor, None] = None):\n", + " \"\"\"\n", + " Computes the negative log-likelihood objective function. \n", + " To estimate the following predictive distribution:\n", "\n", - " weights = (1/K) * torch.ones_like(lambdas, device=lambdas.device)\n", + " $$\\mathrm{P}(\\mathbf{y}_{\\\\tau}\\,|\\,\\\\theta) \\\\quad \\mathrm{and} \\\\quad -\\log(\\mathrm{P}(\\mathbf{y}_{\\\\tau}\\,|\\,\\\\theta))$$\n", "\n", - " y = y[:,:,None]\n", - " mask = mask[:,:,None]\n", + " where $\\\\theta$ represents the distributions parameters. It aditionally \n", + " summarizes the objective signal using a weighted average using the `mask` tensor. \n", "\n", - " y = y * mask # Protect y negative entries\n", - " \n", - " # Single Poisson likelihood\n", - " log_pi = y.xlogy(lambdas + eps) - lambdas - (y + 1).lgamma()\n", + " **Parameters**
\n", + " `y`: tensor, Actual values.
\n", + " `distr_args`: Constructor arguments for the underlying Distribution type.
\n", + " `mask`: tensor, Specifies date stamps per serie to consider in loss.
\n", "\n", + " **Returns**
\n", + " `loss`: scalar, weighted loss function against which backpropagation will be performed.
\n", + " \"\"\"\n", + " # Instantiate Scaled Decoupled Distribution\n", + " distr = self.get_distribution(distr_args=distr_args)\n", + " x = distr._pad(y)\n", + " log_prob_x = distr.component_distribution.log_prob(x)\n", + " log_mix_prob = torch.log_softmax(distr.mixture_distribution.logits, dim=-1)\n", " if self.batch_correlation:\n", - " log_pi = torch.sum(log_pi, dim=0, keepdim=True)\n", - "\n", + " log_prob_x = torch.sum(log_prob_x, dim=0, keepdim=True)\n", " if self.horizon_correlation:\n", - " log_pi = torch.sum(log_pi, dim=1, keepdim=True)\n", - "\n", - " # Numerically Stable Mixture loglikelihood\n", - " loglik = torch.logsumexp((torch.log(weights) + log_pi), dim=2, keepdim=True)\n", - " loglik = loglik * mask\n", - "\n", - " mean = torch.sum(weights * lambdas, axis=-1, keepdims=True)\n", - " reglrz = torch.mean(torch.square(y - mean) * mask)\n", - " loss = -torch.mean(loglik) + 0.001 * reglrz\n", - " return loss\n", - "\n", - " def __call__(self, y: torch.Tensor,\n", - " distr_args: Tuple[torch.Tensor],\n", - " mask: Union[torch.Tensor, None] = None):\n", - "\n", - " return self.neglog_likelihood(y=y, distr_args=distr_args, mask=mask)\n" + " log_prob_x = torch.sum(log_prob_x, dim=1, keepdim=True)\n", + " \n", + " loss_values = -torch.logsumexp(log_prob_x + log_mix_prob, dim=-1) \n", + " \n", + " return weighted_average(loss_values, weights=mask)\n" ] }, { @@ -2967,30 +2975,31 @@ "outputs": [], "source": [ "#| hide\n", - "# Create single mixture and broadcast to N,H,K\n", - "weights = torch.ones((1,3))[None, :, :]\n", - "lambdas = torch.Tensor([[5,10,15], [10,20,30]])[None, :, :]\n", + "# Create single mixture and broadcast to N,H,1,K\n", + "weights = torch.ones((1,3))[None, :, :].unsqueeze(2)\n", + "lambdas = torch.Tensor([[5,10,15], [10,20,30]])[None, :, :].unsqueeze(2)\n", "\n", "# Create repetitions for the batch dimension N.\n", "N=2\n", "weights = torch.repeat_interleave(input=weights, repeats=N, dim=0)\n", "lambdas = torch.repeat_interleave(input=lambdas, repeats=N, dim=0)\n", "\n", - "print('weights.shape (N,H,K) \\t', weights.shape)\n", - "print('lambdas.shape (N,H,K) \\t', lambdas.shape)\n", + "print('weights.shape (N,H,1,K) \\t', weights.shape)\n", + "print('lambdas.shape (N,H,1, K) \\t', lambdas.shape)\n", "\n", - "distr = PMM(quantiles=[0.1, 0.40, 0.5, 0.60, 0.9])\n", - "distr_args = (lambdas,)\n", + "distr = PMM(quantiles=[0.1, 0.40, 0.5, 0.60, 0.9], weighted=True)\n", + "weights = torch.ones_like(lambdas)\n", + "distr_args = (lambdas, weights)\n", "samples, sample_mean, quants = distr.sample(distr_args)\n", "\n", - "print('samples.shape (N,H,num_samples) ', samples.shape)\n", - "print('sample_mean.shape (N,H) ', sample_mean.shape)\n", - "print('quants.shape (N,H,Q) \\t\\t', quants.shape)\n", + "print('samples.shape (N,H,1,num_samples) ', samples.shape)\n", + "print('sample_mean.shape (N,H,1,1) ', sample_mean.shape)\n", + "print('quants.shape (N,H,1,Q) \\t\\t', quants.shape)\n", "\n", "# Plot synthethic data\n", "x_plot = range(quants.shape[1]) # H length\n", - "y_plot_hat = quants[0,:,:] # Filter N,G,T -> H,Q\n", - "samples_hat = samples[0,:,:] # Filter N,G,T -> H,num_samples\n", + "y_plot_hat = quants[0,:,0,:] # Filter N,G,T -> H,Q\n", + "samples_hat = samples[0,:,0,:] # Filter N,G,T -> H,num_samples\n", "\n", "# Kernel density plot for single forecast horizon \\tau = t+1\n", "fig, ax = plt.subplots(figsize=(3.7, 2.9))\n", @@ -3065,7 +3074,8 @@ " \"\"\"\n", " def __init__(self, n_components=1, level=[80, 90], quantiles=None, \n", " num_samples=1000, return_params=False,\n", - " batch_correlation=False, horizon_correlation=False):\n", + " batch_correlation=False, horizon_correlation=False,\n", + " weighted=False):\n", " super(GMM, self).__init__()\n", " # Transform level to MQLoss parameters\n", " qs, self.output_names = level_to_outputs(level)\n", @@ -3078,25 +3088,41 @@ " self.quantiles = torch.nn.Parameter(qs, requires_grad=False)\n", " self.num_samples = num_samples\n", " self.batch_correlation = batch_correlation\n", - " self.horizon_correlation = horizon_correlation \n", + " self.horizon_correlation = horizon_correlation \n", + " self.weighted = weighted \n", "\n", " # If True, predict_step will return Distribution's parameters\n", " self.return_params = return_params\n", + "\n", + " mu_names = [f\"-mu-{i}\" for i in range(1, n_components + 1)]\n", + " std_names = [f\"-std-{i}\" for i in range(1, n_components + 1)]\n", + " if weighted:\n", + " weight_names = [f\"-weight-{i}\" for i in range(1, n_components + 1)]\n", + " self.param_names = [\n", + " i for j in zip(mu_names, std_names, weight_names) for i in j\n", + " ]\n", + " else:\n", + " self.param_names = [i for j in zip(mu_names, std_names) for i in j]\n", + "\n", " if self.return_params:\n", - " mu_names = [f\"-mu-{i}\" for i in range(1, n_components + 1)]\n", - " std_names = [f\"-std-{i}\" for i in range(1, n_components + 1)]\n", - " mu_std_names = [i for j in zip(mu_names, std_names) for i in j]\n", - " self.output_names = self.output_names + mu_std_names\n", + " self.output_names = self.output_names + self.param_names\n", "\n", " # Add first output entry for the sample_mean\n", " self.output_names.insert(0, \"\")\n", "\n", - " self.outputsize_multiplier = 2 * n_components\n", + " self.n_outputs = 2 + weighted\n", + " self.n_components = n_components\n", + " self.outputsize_multiplier = self.n_outputs * n_components\n", " self.is_distribution_output = True\n", + " self.has_predicted = False\n", "\n", " def domain_map(self, output: torch.Tensor):\n", - " means, stds = torch.tensor_split(output, 2, dim=-1)\n", - " return (means, stds)\n", + " output = output.reshape(output.shape[0],\n", + " output.shape[1],\n", + " -1,\n", + " self.outputsize_multiplier)\n", + " \n", + " return torch.tensor_split(output, self.n_outputs, dim=-1)\n", "\n", " def scale_decouple(self, \n", " output,\n", @@ -3109,27 +3135,61 @@ " variance and residual location based on anchoring `loc`, `scale`.\n", " Also adds domain protection to the distribution parameters.\n", " \"\"\"\n", - " means, stds = output\n", + " if self.weighted:\n", + " means, stds, weights = output\n", + " weights = F.softmax(weights, dim=-1)\n", + " else:\n", + " means, stds = output\n", + " \n", " stds = F.softplus(stds)\n", " if (loc is not None) and (scale is not None):\n", - " loc = loc.view(means.size(dim=0), 1, -1)\n", - " scale = scale.view(means.size(dim=0), 1, -1) \n", + " if loc.ndim == 3:\n", + " loc = loc.unsqueeze(-1)\n", + " scale = scale.unsqueeze(-1)\n", " means = (means * scale) + loc\n", " stds = (stds + eps) * scale\n", - " return (means, stds)\n", + " \n", + " if self.weighted:\n", + " return (means, stds, weights)\n", + " else:\n", + " return (means, stds)\n", + "\n", + " def get_distribution(self, distr_args) -> Distribution:\n", + " \"\"\"\n", + " Construct the associated Pytorch Distribution, given the collection of\n", + " constructor arguments and, optionally, location and scale tensors.\n", "\n", - " def sample(self, distr_args, num_samples=None):\n", + " **Parameters**
\n", + " `distr_args`: Constructor arguments for the underlying Distribution type.
\n", + "\n", + " **Returns**
\n", + " `Distribution`: AffineTransformed distribution.
\n", + " \"\"\"\n", + " if self.weighted:\n", + " means, stds, weights = distr_args\n", + " else:\n", + " means, stds = distr_args\n", + " weights = torch.full_like(means, fill_value=1 / self.n_components)\n", + " \n", + " mix = Categorical(weights)\n", + " components = Normal(loc=means, scale=stds)\n", + " distr = MixtureSameFamily(mixture_distribution=mix,\n", + " component_distribution=components) \n", + "\n", + " self.distr_mean = distr.mean\n", + " \n", + " return distr\n", + "\n", + " def sample(self,\n", + " distr_args: torch.Tensor,\n", + " num_samples: Optional[int] = None):\n", " \"\"\"\n", " Construct the empirical quantiles from the estimated Distribution,\n", " sampling from it `num_samples` independently.\n", "\n", " **Parameters**
\n", " `distr_args`: Constructor arguments for the underlying Distribution type.
\n", - " `loc`: Optional tensor, of the same shape as the batch_shape + event_shape\n", - " of the resulting distribution.
\n", - " `scale`: Optional tensor, of the same shape as the batch_shape+event_shape \n", - " of the resulting distribution.
\n", - " `num_samples`: int=500, number of samples for the empirical quantiles.
\n", + " `num_samples`: int, overwrite number of samples for the empirical quantiles.
\n", "\n", " **Returns**
\n", " `samples`: tensor, shape [B,H,`num_samples`].
\n", @@ -3137,94 +3197,65 @@ " \"\"\"\n", " if num_samples is None:\n", " num_samples = self.num_samples\n", - " \n", - " means, stds = distr_args\n", - " B, H, K = means.size()\n", - " Q = len(self.quantiles)\n", - " assert means.shape == stds.shape\n", - "\n", - " # Sample K ~ Mult(weights)\n", - " # shared across B, H\n", - " # weights = torch.repeat_interleave(input=weights, repeats=H, dim=2)\n", - " \n", - " weights = (1/K) * torch.ones_like(means, device=means.device)\n", - " \n", - " # Avoid loop, vectorize\n", - " weights = weights.reshape(-1, K)\n", - " means = means.flatten()\n", - " stds = stds.flatten()\n", - "\n", - " # Vectorization trick to recover row_idx\n", - " sample_idxs = torch.multinomial(input=weights, \n", - " num_samples=num_samples,\n", - " replacement=True)\n", - " aux_col_idx = torch.unsqueeze(torch.arange(B * H, device=means.device),-1) * K\n", - "\n", - " # To device\n", - " sample_idxs = sample_idxs.to(means.device)\n", "\n", - " sample_idxs = sample_idxs + aux_col_idx\n", - " sample_idxs = sample_idxs.flatten()\n", - "\n", - " sample_means = means[sample_idxs]\n", - " sample_stds = stds[sample_idxs]\n", + " # Instantiate Scaled Decoupled Distribution\n", + " distr = self.get_distribution(distr_args=distr_args)\n", + " samples = distr.sample(sample_shape=(num_samples,))\n", + " samples = samples.permute(1, 2, 3, 0) # [samples, B, H, N] -> [B, H, N, samples]\n", "\n", - " # Sample y ~ Normal(mu, std) independently\n", - " samples = torch.normal(sample_means, sample_stds).to(means.device)\n", - " samples = samples.view(B*H, num_samples)\n", - " sample_mean = torch.mean(samples, dim=-1)\n", + " sample_mean = torch.mean(samples, dim=-1, keepdim=True) \n", "\n", " # Compute quantiles\n", - " quantiles_device = self.quantiles.to(means.device)\n", - " quants = torch.quantile(input=samples, q=quantiles_device, dim=1)\n", - " quants = quants.permute((1,0)) # Q, B*H\n", - "\n", - " # Final reshapes\n", - " samples = samples.view(B, H, num_samples)\n", - " sample_mean = sample_mean.view(B, H, 1)\n", - " quants = quants.view(B, H, Q)\n", + " quantiles_device = self.quantiles.to(distr_args[0].device)\n", + " quants = torch.quantile(input=samples, \n", + " q=quantiles_device, \n", + " dim=-1)\n", + " quants = quants.permute(1, 2, 3, 0) # [Q, B, H, N] -> [B, H, N, Q]\n", "\n", " return samples, sample_mean, quants\n", + " \n", + " def update_quantile(self, q: Optional[List[float]] = None):\n", + " if q is not None:\n", + " self.quantiles = nn.Parameter(torch.tensor(q, dtype=torch.float32), requires_grad=False)\n", + " self.output_names = [\"\"] + [f\"_ql{q_i}\" for q_i in q] + self.return_params * self.param_names\n", + " self.has_predicted = True\n", + " elif q is None and self.has_predicted:\n", + " self.quantiles = nn.Parameter(torch.tensor([0.5], dtype=torch.float32), requires_grad=False) \n", + " self.output_names = [\"\", \"-median\"] + self.return_params * self.param_names\n", "\n", - " def neglog_likelihood(self,\n", - " y: torch.Tensor,\n", - " distr_args: Tuple[torch.Tensor, torch.Tensor],\n", - " mask: Union[torch.Tensor, None] = None):\n", - "\n", - " if mask is None: \n", - " mask = torch.ones_like(y)\n", - " \n", - " means, stds = distr_args\n", - " B, H, K = means.size()\n", - " \n", - " weights = (1/K) * torch.ones_like(means, device=means.device)\n", - " \n", - " y = y[:,:, None]\n", - " mask = mask[:,:,None]\n", - " \n", - " var = stds ** 2\n", - " log_stds = torch.log(stds)\n", - " log_pi = - ((y - means) ** 2 / (2 * var)) - log_stds \\\n", - " - math.log(math.sqrt(2 * math.pi))\n", - "\n", - " if self.batch_correlation:\n", - " log_pi = torch.sum(log_pi, dim=0, keepdim=True)\n", + " def __call__(self,\n", + " y: torch.Tensor,\n", + " distr_args: torch.Tensor,\n", + " mask: Union[torch.Tensor, None] = None):\n", + " \"\"\"\n", + " Computes the negative log-likelihood objective function. \n", + " To estimate the following predictive distribution:\n", "\n", - " if self.horizon_correlation: \n", - " log_pi = torch.sum(log_pi, dim=1, keepdim=True)\n", + " $$\\mathrm{P}(\\mathbf{y}_{\\\\tau}\\,|\\,\\\\theta) \\\\quad \\mathrm{and} \\\\quad -\\log(\\mathrm{P}(\\mathbf{y}_{\\\\tau}\\,|\\,\\\\theta))$$\n", "\n", - " # Numerically Stable Mixture loglikelihood\n", - " loglik = torch.logsumexp((torch.log(weights) + log_pi), dim=2, keepdim=True)\n", - " loglik = loglik * mask\n", + " where $\\\\theta$ represents the distributions parameters. It aditionally \n", + " summarizes the objective signal using a weighted average using the `mask` tensor. \n", "\n", - " loss = -torch.mean(loglik)\n", - " return loss\n", - " \n", - " def __call__(self, y: torch.Tensor,\n", - " distr_args: Tuple[torch.Tensor, torch.Tensor],\n", - " mask: Union[torch.Tensor, None] = None,):\n", + " **Parameters**
\n", + " `y`: tensor, Actual values.
\n", + " `distr_args`: Constructor arguments for the underlying Distribution type.
\n", + " `mask`: tensor, Specifies date stamps per serie to consider in loss.
\n", "\n", - " return self.neglog_likelihood(y=y, distr_args=distr_args, mask=mask)" + " **Returns**
\n", + " `loss`: scalar, weighted loss function against which backpropagation will be performed.
\n", + " \"\"\"\n", + " # Instantiate Scaled Decoupled Distribution\n", + " distr = self.get_distribution(distr_args=distr_args)\n", + " x = distr._pad(y)\n", + " log_prob_x = distr.component_distribution.log_prob(x)\n", + " log_mix_prob = torch.log_softmax(distr.mixture_distribution.logits, dim=-1)\n", + " if self.batch_correlation:\n", + " log_prob_x = torch.sum(log_prob_x, dim=0, keepdim=True)\n", + " if self.horizon_correlation:\n", + " log_prob_x = torch.sum(log_prob_x, dim=1, keepdim=True)\n", + " loss_values = -torch.logsumexp(log_prob_x + log_mix_prob, dim=-1) \n", + " \n", + " return weighted_average(loss_values, weights=mask)" ] }, { @@ -3298,8 +3329,8 @@ "outputs": [], "source": [ "#| hide\n", - "# Create single mixture and broadcast to N,H,K\n", - "means = torch.Tensor([[5,10,15], [10,20,30]])[None, :, :]\n", + "# Create single mixture and broadcast to N,H,1,K\n", + "means = torch.Tensor([[5,10,15], [10,20,30]])[None, :, :].unsqueeze(2)\n", "\n", "# # Create repetitions for the batch dimension N.\n", "N=2\n", @@ -3307,22 +3338,22 @@ "weights = torch.ones_like(means)\n", "stds = torch.ones_like(means)\n", "\n", - "print('weights.shape (N,H,K) \\t', weights.shape)\n", - "print('means.shape (N,H,K) \\t', means.shape)\n", - "print('stds.shape (N,H,K) \\t', stds.shape)\n", + "print('weights.shape (N,H,1,K) \\t', weights.shape)\n", + "print('means.shape (N,H,1,K) \\t', means.shape)\n", + "print('stds.shape (N,H,1,K) \\t', stds.shape)\n", "\n", - "distr = GMM(quantiles=[0.1, 0.40, 0.5, 0.60, 0.9])\n", - "distr_args = (means, stds)\n", + "distr = GMM(quantiles=[0.1, 0.40, 0.5, 0.60, 0.9], weighted=True)\n", + "distr_args = (means, stds, weights)\n", "samples, sample_mean, quants = distr.sample(distr_args)\n", "\n", - "print('samples.shape (N,H,num_samples) ', samples.shape)\n", - "print('sample_mean.shape (N,H) ', sample_mean.shape)\n", - "print('quants.shape (N,H,Q) \\t\\t', quants.shape)\n", + "print('samples.shape (N,H,1,num_samples) ', samples.shape)\n", + "print('sample_mean.shape (N,H,1,1) ', sample_mean.shape)\n", + "print('quants.shape (N,H,1, Q) \\t\\t', quants.shape)\n", "\n", "# Plot synthethic data\n", "x_plot = range(quants.shape[1]) # H length\n", - "y_plot_hat = quants[0,:,:] # Filter N,G,T -> H,Q\n", - "samples_hat = samples[0,:,:] # Filter N,G,T -> H,num_samples\n", + "y_plot_hat = quants[0,:,0,:] # Filter N,G,T -> H,Q\n", + "samples_hat = samples[0,:,0,:] # Filter N,G,T -> H,num_samples\n", "\n", "# Kernel density plot for single forecast horizon \\tau = t+1\n", "fig, ax = plt.subplots(figsize=(3.7, 2.9))\n", @@ -3396,7 +3427,7 @@ " Journal Forecasting, Working paper available at arxiv.](https://arxiv.org/pdf/2110.13179.pdf)\n", " \"\"\"\n", " def __init__(self, n_components=1, level=[80, 90], quantiles=None, \n", - " num_samples=1000, return_params=False):\n", + " num_samples=1000, return_params=False, weighted=False):\n", " super(NBMM, self).__init__()\n", " # Transform level to MQLoss parameters\n", " qs, self.output_names = level_to_outputs(level)\n", @@ -3408,24 +3439,40 @@ " qs = torch.Tensor(quantiles)\n", " self.quantiles = torch.nn.Parameter(qs, requires_grad=False)\n", " self.num_samples = num_samples\n", + " self.weighted = weighted \n", "\n", " # If True, predict_step will return Distribution's parameters\n", " self.return_params = return_params\n", + "\n", + " total_count_names = [f\"-total_count-{i}\" for i in range(1, n_components + 1)]\n", + " probs_names = [f\"-probs-{i}\" for i in range(1, n_components + 1)]\n", + " if weighted:\n", + " weight_names = [f\"-weight-{i}\" for i in range(1, n_components + 1)]\n", + " self.param_names = [\n", + " i for j in zip(total_count_names, probs_names, weight_names) for i in j\n", + " ]\n", + " else:\n", + " self.param_names = [i for j in zip(total_count_names, probs_names) for i in j]\n", + "\n", " if self.return_params:\n", - " total_count_names = [f\"-total_count-{i}\" for i in range(1, n_components + 1)]\n", - " probs_names = [f\"-probs-{i}\" for i in range(1, n_components + 1)]\n", - " param_names = [i for j in zip(total_count_names, probs_names) for i in j]\n", - " self.output_names = self.output_names + param_names\n", + " self.output_names = self.output_names + self.param_names\n", "\n", " # Add first output entry for the sample_mean\n", " self.output_names.insert(0, \"\") \n", "\n", - " self.outputsize_multiplier = 2 * n_components\n", + " self.n_outputs = 2 + weighted\n", + " self.n_components = n_components\n", + " self.outputsize_multiplier = self.n_outputs * n_components\n", " self.is_distribution_output = True\n", + " self.has_predicted = False\n", "\n", " def domain_map(self, output: torch.Tensor):\n", - " mu, alpha = torch.tensor_split(output, 2, dim=-1)\n", - " return (mu, alpha)\n", + " output = output.reshape(output.shape[0],\n", + " output.shape[1],\n", + " -1,\n", + " self.outputsize_multiplier)\n", + " \n", + " return torch.tensor_split(output, self.n_outputs, dim=-1)\n", "\n", " def scale_decouple(self, \n", " output,\n", @@ -3439,11 +3486,18 @@ " Also adds domain protection to the distribution parameters.\n", " \"\"\"\n", " # Efficient NBinomial parametrization\n", - " mu, alpha = output\n", + " if self.weighted:\n", + " mu, alpha, weights = output\n", + " weights = F.softmax(weights, dim=-1)\n", + " else:\n", + " mu, alpha = output\n", + "\n", " mu = F.softplus(mu) + 1e-8\n", " alpha = F.softplus(alpha) + 1e-8 # alpha = 1/total_counts\n", " if (loc is not None) and (scale is not None):\n", - " loc = loc.view(mu.size(dim=0), 1, -1)\n", + " if loc.ndim == 3:\n", + " loc = loc.unsqueeze(-1)\n", + " scale = scale.unsqueeze(-1) \n", " mu *= loc\n", " alpha /= (loc + 1.)\n", "\n", @@ -3452,20 +3506,48 @@ " # => probs = mu / [total_count * (1 + mu * (1/total_count))]\n", " total_count = 1.0 / alpha\n", " probs = (mu * alpha / (1.0 + mu * alpha)) + 1e-8 \n", - " return (total_count, probs)\n", + " if self.weighted:\n", + " return (total_count, probs, weights)\n", + " else:\n", + " return (total_count, probs)\n", + "\n", + " def get_distribution(self, distr_args) -> Distribution:\n", + " \"\"\"\n", + " Construct the associated Pytorch Distribution, given the collection of\n", + " constructor arguments and, optionally, location and scale tensors.\n", + "\n", + " **Parameters**
\n", + " `distr_args`: Constructor arguments for the underlying Distribution type.
\n", + "\n", + " **Returns**
\n", + " `Distribution`: AffineTransformed distribution.
\n", + " \"\"\"\n", + " if self.weighted:\n", + " total_count, probs, weights = distr_args\n", + " else:\n", + " total_count, probs = distr_args\n", + " weights = torch.full_like(total_count, fill_value=1 / self.n_components)\n", + "\n", + " mix = Categorical(weights)\n", + " components = NegativeBinomial(total_count, probs)\n", + " components.support = constraints.nonnegative\n", + " distr = MixtureSameFamily(mixture_distribution=mix,\n", + " component_distribution=components) \n", "\n", - " def sample(self, distr_args, num_samples=None):\n", + " self.distr_mean = distr.mean\n", + " \n", + " return distr\n", + "\n", + " def sample(self,\n", + " distr_args: torch.Tensor,\n", + " num_samples: Optional[int] = None):\n", " \"\"\"\n", " Construct the empirical quantiles from the estimated Distribution,\n", " sampling from it `num_samples` independently.\n", "\n", " **Parameters**
\n", " `distr_args`: Constructor arguments for the underlying Distribution type.
\n", - " `loc`: Optional tensor, of the same shape as the batch_shape + event_shape\n", - " of the resulting distribution.
\n", - " `scale`: Optional tensor, of the same shape as the batch_shape+event_shape \n", - " of the resulting distribution.
\n", - " `num_samples`: int=500, number of samples for the empirical quantiles.
\n", + " `num_samples`: int, overwrite number of samples for the empirical quantiles.
\n", "\n", " **Returns**
\n", " `samples`: tensor, shape [B,H,`num_samples`].
\n", @@ -3473,97 +3555,59 @@ " \"\"\"\n", " if num_samples is None:\n", " num_samples = self.num_samples\n", - " \n", - " total_count, probs = distr_args\n", - " B, H, K = total_count.size()\n", - " Q = len(self.quantiles)\n", - " assert total_count.shape == probs.shape\n", - "\n", - " # Sample K ~ Mult(weights)\n", - " # shared across B, H\n", - " # weights = torch.repeat_interleave(input=weights, repeats=H, dim=2)\n", - " \n", - " weights = (1/K) * torch.ones_like(probs, device=probs.device)\n", - " \n", - " # Avoid loop, vectorize\n", - " weights = weights.reshape(-1, K)\n", - " total_count = total_count.flatten()\n", - " probs = probs.flatten()\n", - "\n", - " # Vectorization trick to recover row_idx\n", - " sample_idxs = torch.multinomial(input=weights, \n", - " num_samples=num_samples,\n", - " replacement=True)\n", - " aux_col_idx = torch.unsqueeze(torch.arange(B * H, device=probs.device),-1) * K\n", - "\n", - " # To device\n", - " sample_idxs = sample_idxs.to(probs.device)\n", - "\n", - " sample_idxs = sample_idxs + aux_col_idx\n", - " sample_idxs = sample_idxs.flatten()\n", "\n", - " sample_total_count = total_count[sample_idxs]\n", - " sample_probs = probs[sample_idxs]\n", + " # Instantiate Scaled Decoupled Distribution\n", + " distr = self.get_distribution(distr_args=distr_args)\n", + " samples = distr.sample(sample_shape=(num_samples,))\n", + " samples = samples.permute(1, 2, 3, 0) # [samples, B, H, N] -> [B, H, N, samples]\n", "\n", - " # Sample y ~ NBinomial(total_count, probs) independently\n", - " dist = NegativeBinomial(total_count=sample_total_count, \n", - " probs=sample_probs)\n", - " samples = dist.sample(sample_shape=(1,)).to(probs.device)[0]\n", - " samples = samples.view(B*H, num_samples)\n", - " sample_mean = torch.mean(samples, dim=-1)\n", + " sample_mean = torch.mean(samples, dim=-1, keepdim=True) \n", "\n", " # Compute quantiles\n", - " quantiles_device = self.quantiles.to(probs.device)\n", - " quants = torch.quantile(input=samples, q=quantiles_device, dim=1)\n", - " quants = quants.permute((1,0)) # Q, B*H\n", - "\n", - " # Final reshapes\n", - " samples = samples.view(B, H, num_samples)\n", - " sample_mean = sample_mean.view(B, H, 1)\n", - " quants = quants.view(B, H, Q)\n", + " quantiles_device = self.quantiles.to(distr_args[0].device)\n", + " quants = torch.quantile(input=samples, \n", + " q=quantiles_device, \n", + " dim=-1)\n", + " quants = quants.permute(1, 2, 3, 0) # [Q, B, H, N] -> [B, H, N, Q]\n", "\n", " return samples, sample_mean, quants\n", "\n", - " def neglog_likelihood(self,\n", - " y: torch.Tensor,\n", - " distr_args: Tuple[torch.Tensor, torch.Tensor],\n", - " mask: Union[torch.Tensor, None] = None):\n", + " def update_quantile(self, q: Optional[List[float]] = None):\n", + " if q is not None:\n", + " self.quantiles = nn.Parameter(torch.tensor(q, dtype=torch.float32), requires_grad=False)\n", + " self.output_names = [\"\"] + [f\"_ql{q_i}\" for q_i in q] + self.return_params * self.param_names\n", + " self.has_predicted = True\n", + " elif q is None and self.has_predicted:\n", + " self.quantiles = nn.Parameter(torch.tensor([0.5], dtype=torch.float32), requires_grad=False)\n", + " self.output_names = [\"\", \"-median\"] + self.return_params * self.param_names\n", "\n", - " if mask is None: \n", - " mask = torch.ones_like(y)\n", - " \n", - " total_count, probs = distr_args\n", - " B, H, K = total_count.size()\n", - " \n", - " weights = (1/K) * torch.ones_like(probs, device=probs.device)\n", - " \n", - " y = y[:,:, None]\n", - " mask = mask[:,:,None]\n", - "\n", - " log_unnormalized_prob = (total_count * torch.log(1.-probs) + y * torch.log(probs))\n", - " log_normalization = (-torch.lgamma(total_count + y) + torch.lgamma(1. + y) +\n", - " torch.lgamma(total_count))\n", - " log_normalization[total_count + y == 0.] = 0.\n", - " log = log_unnormalized_prob - log_normalization\n", - "\n", - " #log = torch.sum(log, dim=0, keepdim=True) # Joint within batch/group\n", - " #log = torch.sum(log, dim=1, keepdim=True) # Joint within horizon\n", - "\n", - " # Numerical stability mixture and loglik\n", - " log_max = torch.amax(log, dim=2, keepdim=True) # [1,1,K] (collapsed joints)\n", - " lik = weights * torch.exp(log-log_max) # Take max\n", - " loglik = torch.log(torch.sum(lik, dim=2, keepdim=True)) + log_max # Return max\n", - " \n", - " loglik = loglik * mask #replace with mask\n", + " def __call__(self,\n", + " y: torch.Tensor,\n", + " distr_args: torch.Tensor,\n", + " mask: Union[torch.Tensor, None] = None):\n", + " \"\"\"\n", + " Computes the negative log-likelihood objective function. \n", + " To estimate the following predictive distribution:\n", "\n", - " loss = -torch.mean(loglik)\n", - " return loss\n", - " \n", - " def __call__(self, y: torch.Tensor,\n", - " distr_args: Tuple[torch.Tensor, torch.Tensor],\n", - " mask: Union[torch.Tensor, None] = None,):\n", + " $$\\mathrm{P}(\\mathbf{y}_{\\\\tau}\\,|\\,\\\\theta) \\\\quad \\mathrm{and} \\\\quad -\\log(\\mathrm{P}(\\mathbf{y}_{\\\\tau}\\,|\\,\\\\theta))$$\n", + "\n", + " where $\\\\theta$ represents the distributions parameters. It aditionally \n", + " summarizes the objective signal using a weighted average using the `mask` tensor. \n", "\n", - " return self.neglog_likelihood(y=y, distr_args=distr_args, mask=mask)" + " **Parameters**
\n", + " `y`: tensor, Actual values.
\n", + " `distr_args`: Constructor arguments for the underlying Distribution type.
\n", + " `mask`: tensor, Specifies date stamps per serie to consider in loss.
\n", + "\n", + " **Returns**
\n", + " `loss`: scalar, weighted loss function against which backpropagation will be performed.
\n", + " \"\"\"\n", + " # Instantiate Scaled Decoupled Distribution\n", + " distr = self.get_distribution(distr_args=distr_args)\n", + " loss_values = -distr.log_prob(y)\n", + " loss_weights = mask\n", + " \n", + " return weighted_average(loss_values, weights=loss_weights)" ] }, { @@ -3604,8 +3648,8 @@ "outputs": [], "source": [ "#| hide\n", - "# Create single mixture and broadcast to N,H,K\n", - "counts = torch.Tensor([[10,20,30], [20,40,60]])[None, :, :]\n", + "# Create single mixture and broadcast to N,H,1,K\n", + "counts = torch.Tensor([[5,10,15], [10,20,30]])[None, :, :].unsqueeze(2)\n", "\n", "# # Create repetitions for the batch dimension N.\n", "N=2\n", @@ -3613,22 +3657,22 @@ "weights = torch.ones_like(counts)\n", "probs = torch.ones_like(counts) * 0.5\n", "\n", - "print('weights.shape (N,H,K) \\t', weights.shape)\n", - "print('counts.shape (N,H,K) \\t', counts.shape)\n", - "print('probs.shape (N,H,K) \\t', probs.shape)\n", + "print('weights.shape (N,H,1,K) \\t', weights.shape)\n", + "print('counts.shape (N,H,1,K) \\t', counts.shape)\n", + "print('probs.shape (N,H,1,K) \\t', probs.shape)\n", "\n", - "model = NBMM(quantiles=[0.1, 0.40, 0.5, 0.60, 0.9])\n", - "distr_args = (counts, probs)\n", + "model = NBMM(quantiles=[0.1, 0.40, 0.5, 0.60, 0.9], weighted=True)\n", + "distr_args = (counts, probs, weights)\n", "samples, sample_mean, quants = model.sample(distr_args, num_samples=2000)\n", "\n", - "print('samples.shape (N,H,num_samples) ', samples.shape)\n", - "print('sample_mean.shape (N,H) ', sample_mean.shape)\n", - "print('quants.shape (N,H,Q) \\t\\t', quants.shape)\n", + "print('samples.shape (N,H,1,num_samples) ', samples.shape)\n", + "print('sample_mean.shape (N,H,1,1) ', sample_mean.shape)\n", + "print('quants.shape (N,H,1,Q) \\t\\t', quants.shape)\n", "\n", "# Plot synthethic data\n", "x_plot = range(quants.shape[1]) # H length\n", - "y_plot_hat = quants[0,:,:] # Filter N,G,T -> H,Q\n", - "samples_hat = samples[0,:,:] # Filter N,G,T -> H,num_samples\n", + "y_plot_hat = quants[0,:,0,:] # Filter N,G,T -> H,Q\n", + "samples_hat = samples[0,:,0,:] # Filter N,G,T -> H,num_samples\n", "\n", "# Kernel density plot for single forecast horizon \\tau = t+1\n", "fig, ax = plt.subplots(figsize=(3.7, 2.9))\n", @@ -3723,7 +3767,9 @@ " def __call__(self,\n", " y: torch.Tensor,\n", " y_hat: torch.Tensor,\n", - " mask: Union[torch.Tensor, None] = None):\n", + " y_insample: torch.Tensor,\n", + " mask: Union[torch.Tensor, None] = None,\n", + " ) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor, Actual values.
\n", @@ -3784,7 +3830,7 @@ "outputs": [], "source": [ "#| export\n", - "class TukeyLoss(torch.nn.Module):\n", + "class TukeyLoss(BasePointLoss):\n", " \"\"\" Tukey Loss\n", "\n", " The Tukey loss function, also known as Tukey's biweight function, is a \n", @@ -3823,10 +3869,14 @@ "\n", " def domain_map(self, y_hat: torch.Tensor):\n", " \"\"\"\n", - " Univariate loss operates in dimension [B,T,H]/[B,H]\n", - " This changes the network's output from [B,H,1]->[B,H]\n", + " Input:\n", + " Univariate: [B, H, 1]\n", + " Multivariate: [B, H, N]\n", + "\n", + " Output: [B, H, N]\n", " \"\"\"\n", - " return y_hat.squeeze(-1)\n", + "\n", + " return y_hat\n", "\n", " def masked_mean(self, x, mask, dim):\n", " x_nan = x.masked_fill(mask < 1, float(\"nan\"))\n", @@ -3834,8 +3884,12 @@ " x_mean = torch.nan_to_num(x_mean, nan=0.0)\n", " return x_mean\n", "\n", - " def __call__(self, y: torch.Tensor, y_hat: torch.Tensor, \n", - " mask: Union[torch.Tensor, None] = None):\n", + " def __call__(self,\n", + " y: torch.Tensor,\n", + " y_hat: torch.Tensor,\n", + " y_insample: torch.Tensor,\n", + " mask: Union[torch.Tensor, None] = None,\n", + " ) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor, Actual values.
\n", @@ -3942,7 +3996,9 @@ " def __call__(self,\n", " y: torch.Tensor,\n", " y_hat: torch.Tensor,\n", - " mask: Union[torch.Tensor, None] = None):\n", + " y_insample: torch.Tensor,\n", + " mask: Union[torch.Tensor, None] = None,\n", + " ) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor, Actual values.
\n", @@ -3952,6 +4008,7 @@ " **Returns:**
\n", " `huber_qloss`: tensor (single value).\n", " \"\"\"\n", + " \n", " error = y_hat - y\n", " zero_error = torch.zeros_like(error)\n", " sq = torch.maximum(-error, zero_error)\n", @@ -4051,9 +4108,18 @@ "\n", " def domain_map(self, y_hat: torch.Tensor):\n", " \"\"\"\n", - " Identity domain map [B,T,H,Q]/[B,H,Q]\n", + " Input:\n", + " Univariate: [B, H, 1 * Q]\n", + " Multivariate: [B, H, N * Q]\n", + "\n", + " Output: [B, H, N, Q]\n", " \"\"\"\n", - " return y_hat\n", + " output = y_hat.reshape(y_hat.shape[0],\n", + " y_hat.shape[1],\n", + " -1,\n", + " self.outputsize_multiplier)\n", + "\n", + " return output\n", " \n", " def _compute_weights(self, y, mask):\n", " \"\"\"\n", @@ -4061,25 +4127,24 @@ " Set horizon_weight to a ones[H] tensor if not set.\n", " If set, check that it has the same length as the horizon in x.\n", " \"\"\"\n", - " if mask is None:\n", - " mask = torch.ones_like(y, device=y.device)\n", - " else:\n", - " mask = mask.unsqueeze(1) # Add Q dimension.\n", "\n", " if self.horizon_weight is None:\n", - " self.horizon_weight = torch.ones(mask.shape[-1])\n", + " weights = torch.ones_like(mask)\n", " else:\n", - " assert mask.shape[-1] == len(self.horizon_weight), \\\n", - " 'horizon_weight must have same length as Y'\n", - " \n", - " weights = self.horizon_weight.clone()\n", - " weights = torch.ones_like(mask, device=mask.device) * weights.to(mask.device)\n", + " assert mask.shape[1] == len(self.horizon_weight), \\\n", + " 'horizon_weight must have same length as Y' \n", + " weights = self.horizon_weight.clone()\n", + " weights = weights[None, :, None, None].to(mask.device)\n", + " weights = torch.ones_like(mask, device=mask.device) * weights\n", + " \n", " return weights * mask\n", "\n", " def __call__(self,\n", " y: torch.Tensor,\n", " y_hat: torch.Tensor,\n", - " mask: Union[torch.Tensor, None] = None):\n", + " y_insample: torch.Tensor,\n", + " mask: Union[torch.Tensor, None] = None,\n", + " ) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor, Actual values.
\n", @@ -4089,25 +4154,27 @@ " **Returns:**
\n", " `hmqloss`: tensor (single value).\n", " \"\"\"\n", - "\n", - " error = y_hat - y.unsqueeze(-1)\n", + " y = y.unsqueeze(-1)\n", + " \n", + " if mask is not None:\n", + " mask = mask.unsqueeze(-1)\n", + " else:\n", + " mask = torch.ones_like(y, device=y.device)\n", + " \n", + " error = y_hat - y\n", + " \n", " zero_error = torch.zeros_like(error) \n", " sq = torch.maximum(-error, torch.zeros_like(error))\n", " s1_q = torch.maximum(error, torch.zeros_like(error))\n", - " losses = F.huber_loss(self.quantiles * sq, zero_error, \n", + " \n", + " quantiles = self.quantiles[None, None, None, :]\n", + " losses = F.huber_loss(quantiles * sq, zero_error, \n", " reduction='none', delta=self.delta) + \\\n", - " F.huber_loss((1 - self.quantiles) * s1_q, zero_error, \n", + " F.huber_loss((1 - quantiles) * s1_q, zero_error, \n", " reduction='none', delta=self.delta)\n", - " losses = (1/len(self.quantiles)) * losses\n", + " losses = (1 / len(quantiles)) * losses\n", "\n", - " if y_hat.ndim == 3: # BaseWindows\n", - " losses = losses.swapaxes(-2,-1) # [B,H,Q] -> [B,Q,H] (needed for horizon weighting, H at the end)\n", - " elif y_hat.ndim == 4: # BaseRecurrent\n", - " losses = losses.swapaxes(-2,-1)\n", - " losses = losses.swapaxes(-2,-3) # [B,seq_len,H,Q] -> [B,Q,seq_len,H] (needed for horizon weighting, H at the end)\n", - "\n", - " weights = self._compute_weights(y=losses, mask=mask) # Use losses for extra dim\n", - " # NOTE: Weights do not have Q dimension.\n", + " weights = self._compute_weights(y=losses, mask=mask) \n", "\n", " return _weighted_mean(losses=losses, weights=weights)" ] @@ -4167,7 +4234,7 @@ "outputs": [], "source": [ "#| export\n", - "class Accuracy(torch.nn.Module):\n", + "class Accuracy(BasePointLoss):\n", " \"\"\" Accuracy\n", "\n", " Computes the accuracy between categorical `y` and `y_hat`.\n", @@ -4180,16 +4247,25 @@ " def __init__(self,):\n", " super(Accuracy, self).__init__()\n", " self.is_distribution_output = False\n", + " self.outputsize_multiplier = 1\n", "\n", " def domain_map(self, y_hat: torch.Tensor):\n", " \"\"\"\n", - " Univariate loss operates in dimension [B,T,H]/[B,H]\n", - " This changes the network's output from [B,H,1]->[B,H]\n", + " Input:\n", + " Univariate: [B, H, 1]\n", + " Multivariate: [B, H, N]\n", + "\n", + " Output: [B, H, N]\n", " \"\"\"\n", - " return y_hat.squeeze(-1)\n", "\n", - " def __call__(self, y: torch.Tensor, y_hat: torch.Tensor, \n", - " mask: Union[torch.Tensor, None] = None):\n", + " return y_hat\n", + " \n", + " def __call__(self,\n", + " y: torch.Tensor,\n", + " y_hat: torch.Tensor,\n", + " y_insample: torch.Tensor,\n", + " mask: Union[torch.Tensor, None] = None,\n", + " ) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor, Actual values.
\n", @@ -4199,10 +4275,11 @@ " **Returns:**
\n", " `accuracy`: tensor (single value).\n", " \"\"\"\n", + "\n", " if mask is None:\n", " mask = torch.ones_like(y_hat)\n", "\n", - " measure = (y.unsqueeze(-1) == y_hat) * mask.unsqueeze(-1)\n", + " measure = (y == y_hat) * mask\n", " accuracy = torch.mean(measure)\n", " return accuracy" ] @@ -4244,7 +4321,7 @@ "outputs": [], "source": [ "#| export\n", - "class sCRPS(torch.nn.Module):\n", + "class sCRPS(BasePointLoss):\n", " \"\"\"Scaled Continues Ranked Probability Score\n", "\n", " Calculates a scaled variation of the CRPS, as proposed by Rangapuram (2021),\n", @@ -4279,8 +4356,12 @@ " self.mql = MQLoss(level=level, quantiles=quantiles)\n", " self.is_distribution_output = False\n", " \n", - " def __call__(self, y: torch.Tensor, y_hat: torch.Tensor, \n", - " mask: Union[torch.Tensor, None] = None):\n", + " def __call__(self,\n", + " y: torch.Tensor,\n", + " y_hat: torch.Tensor,\n", + " y_insample: torch.Tensor,\n", + " mask: Union[torch.Tensor, None] = None,\n", + " ) -> torch.Tensor:\n", " \"\"\"\n", " **Parameters:**
\n", " `y`: tensor, Actual values.
\n", @@ -4290,7 +4371,7 @@ " **Returns:**
\n", " `scrps`: tensor (single value).\n", " \"\"\"\n", - " mql = self.mql(y=y, y_hat=y_hat, mask=mask)\n", + " mql = self.mql(y=y, y_hat=y_hat, mask=mask, y_insample=y_insample)\n", " norm = torch.sum(torch.abs(y))\n", " unmean = torch.sum(mask)\n", " scrps = 2 * mql * unmean / (norm + 1e-5)\n", @@ -4326,11 +4407,11 @@ "source": [ "#| hide\n", "# Each 1 is an error, there are 6 datapoints.\n", - "y = torch.Tensor([[0,0,0],[0,0,0]])\n", - "y_hat = torch.Tensor([[0,0,1],[1,0,1]])\n", + "y = torch.Tensor([[0,0,0],[0,0,0]]).unsqueeze(-1)\n", + "y_hat = torch.Tensor([[0,0,1],[1,0,1]]).unsqueeze(-1)\n", "\n", "# Complete mask and horizon_weight\n", - "mask = torch.Tensor([[1,1,1],[1,1,1]])\n", + "mask = torch.Tensor([[1,1,1],[1,1,1]]).unsqueeze(-1)\n", "horizon_weight = torch.Tensor([1,1,1])\n", "\n", "mae = MAE(horizon_weight=horizon_weight)\n", @@ -4338,21 +4419,21 @@ "assert loss==(3/6), 'Should be 3/6'\n", "\n", "# Incomplete mask and complete horizon_weight\n", - "mask = torch.Tensor([[1,1,1],[0,1,1]]) # Only 1 error and points is masked.\n", + "mask = torch.Tensor([[1,1,1],[0,1,1]]).unsqueeze(-1) # Only 1 error and points is masked.\n", "horizon_weight = torch.Tensor([1,1,1])\n", "mae = MAE(horizon_weight=horizon_weight)\n", "loss = mae(y=y, y_hat=y_hat, mask=mask)\n", "assert loss==(2/5), 'Should be 2/5'\n", "\n", "# Complete mask and incomplete horizon_weight\n", - "mask = torch.Tensor([[1,1,1],[1,1,1]])\n", + "mask = torch.Tensor([[1,1,1],[1,1,1]]).unsqueeze(-1)\n", "horizon_weight = torch.Tensor([1,1,0]) # 2 errors and points are masked.\n", "mae = MAE(horizon_weight=horizon_weight)\n", "loss = mae(y=y, y_hat=y_hat, mask=mask)\n", "assert loss==(1/4), 'Should be 1/4'\n", "\n", "# Incomplete mask and incomplete horizon_weight\n", - "mask = torch.Tensor([[0,1,1],[1,1,1]])\n", + "mask = torch.Tensor([[0,1,1],[1,1,1]]).unsqueeze(-1)\n", "horizon_weight = torch.Tensor([1,1,0]) # 2 errors are masked, and 3 points.\n", "mae = MAE(horizon_weight=horizon_weight)\n", "loss = mae(y=y, y_hat=y_hat, mask=mask)\n", diff --git a/nbs/models.autoformer.ipynb b/nbs/models.autoformer.ipynb index 9c6567f2e..999c4ca62 100644 --- a/nbs/models.autoformer.ipynb +++ b/nbs/models.autoformer.ipynb @@ -68,7 +68,7 @@ "import torch.nn.functional as F\n", "\n", "from neuralforecast.common._modules import DataEmbedding, SeriesDecomp\n", - "from neuralforecast.common._base_windows import BaseWindows\n", + "from neuralforecast.common._base_model import BaseModel\n", "\n", "from neuralforecast.losses.pytorch import MAE" ] @@ -80,8 +80,12 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", + "\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -410,7 +414,7 @@ "outputs": [], "source": [ "#| export\n", - "class Autoformer(BaseWindows):\n", + "class Autoformer(BaseModel):\n", " \"\"\" Autoformer\n", "\n", " The Autoformer model tackles the challenge of finding reliable dependencies on intricate temporal patterns of long-horizon forecasting.\n", @@ -469,10 +473,11 @@ "\t- [Wu, Haixu, Jiehui Xu, Jianmin Wang, and Mingsheng Long. \"Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting\"](https://proceedings.neurips.cc/paper/2021/hash/bcc0d400288793e8bdcd7c19a8ac0c2b-Abstract.html)
\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h: int, \n", @@ -616,13 +621,9 @@ " def forward(self, windows_batch):\n", " # Parse windows_batch\n", " insample_y = windows_batch['insample_y']\n", - " #insample_mask = windows_batch['insample_mask']\n", - " #hist_exog = windows_batch['hist_exog']\n", - " #stat_exog = windows_batch['stat_exog']\n", " futr_exog = windows_batch['futr_exog']\n", "\n", " # Parse inputs\n", - " insample_y = insample_y.unsqueeze(-1) # [Ws,L,1]\n", " if self.futr_exog_size > 0:\n", " x_mark_enc = futr_exog[:,:self.input_size,:]\n", " x_mark_dec = futr_exog[:,-(self.label_len+self.h):,:]\n", @@ -650,7 +651,8 @@ " # final\n", " dec_out = trend_part + seasonal_part\n", "\n", - " forecast = self.loss.domain_map(dec_out[:, -self.h:])\n", + " forecast = dec_out[:, -self.h:]\n", + " \n", " return forecast" ] }, @@ -681,6 +683,21 @@ "show_doc(Autoformer.predict, name='Autoformer.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(Autoformer, [\"airpassengers\"])" + ] + }, { "attachments": {}, "cell_type": "markdown", diff --git a/nbs/models.bitcn.ipynb b/nbs/models.bitcn.ipynb index cd78bb194..b7363dba4 100644 --- a/nbs/models.bitcn.ipynb +++ b/nbs/models.bitcn.ipynb @@ -55,8 +55,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -74,7 +77,7 @@ "import numpy as np\n", "\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_windows import BaseWindows" + "from neuralforecast.common._base_model import BaseModel" ] }, { @@ -146,7 +149,7 @@ "outputs": [], "source": [ "#| export\n", - "class BiTCN(BaseWindows):\n", + "class BiTCN(BaseModel):\n", " \"\"\" BiTCN\n", "\n", " Bidirectional Temporal Convolutional Network (BiTCN) is a forecasting architecture based on two temporal convolutional networks (TCNs). The first network ('forward') encodes future covariates of the time series, whereas the second network ('backward') encodes past observations and covariates. This is a univariate model.\n", @@ -170,7 +173,7 @@ " `batch_size`: int=32, number of different series in each batch.
\n", " `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
\n", " `windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.
\n", - " `inference_windows_batch_size`: int=-1, number of windows to sample in each inference batch, -1 uses all.
\n", + " `inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.
\n", " `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
\n", " `step_size`: int=1, step size between each window of temporal data.
\n", " `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
\n", @@ -190,10 +193,11 @@ "\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = True\n", " EXOGENOUS_STAT = True\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h: int,\n", @@ -315,7 +319,7 @@ "\n", " def forward(self, windows_batch):\n", " # Parse windows_batch\n", - " x = windows_batch['insample_y'].unsqueeze(-1) # [B, L, 1]\n", + " x = windows_batch['insample_y'].contiguous() # [B, L, 1]\n", " hist_exog = windows_batch['hist_exog'] # [B, L, X]\n", " futr_exog = windows_batch['futr_exog'] # [B, L + h, F]\n", " stat_exog = windows_batch['stat_exog'] # [B, S]\n", @@ -358,11 +362,8 @@ "\n", " # Output layer to create forecasts\n", " x = x.permute(0, 2, 1) # [B, 3 * hidden_size, h] -> [B, h, 3 * hidden_size]\n", - " x = self.output_lin(x) # [B, h, 3 * hidden_size] -> [B, h, n_outputs] \n", + " forecast = self.output_lin(x) # [B, h, 3 * hidden_size] -> [B, h, n_outputs] \n", "\n", - " # Map to output domain\n", - " forecast = self.loss.domain_map(x)\n", - " \n", " return forecast" ] }, @@ -393,6 +394,21 @@ "show_doc(BiTCN.predict, name='BiTCN.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(BiTCN, [\"airpassengers\"])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -411,8 +427,8 @@ "import matplotlib.pyplot as plt\n", "\n", "from neuralforecast import NeuralForecast\n", - "from neuralforecast.models import BiTCN\n", "from neuralforecast.losses.pytorch import GMM\n", + "from neuralforecast.models import BiTCN\n", "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", "\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds\n", @@ -196,10 +195,11 @@ "\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = True\n", + " MULTIVARIATE = False\n", + " RECURRENT = True\n", "\n", " def __init__(self,\n", " h,\n", @@ -215,7 +215,7 @@ " stat_exog_list = None,\n", " exclude_insample_y = False,\n", " loss = DistributionLoss(distribution='StudentT', level=[80, 90], return_params=False),\n", - " valid_loss = MQLoss(level=[80, 90]),\n", + " valid_loss = MAE(),\n", " max_steps: int = 1000,\n", " learning_rate: float = 1e-3,\n", " num_lr_decays: int = 3,\n", @@ -241,15 +241,6 @@ " if exclude_insample_y:\n", " raise Exception('DeepAR has no possibility for excluding y.')\n", " \n", - " if not loss.is_distribution_output:\n", - " raise Exception('DeepAR only supports distributional outputs.')\n", - " \n", - " if str(type(valid_loss)) not in [\"\"]:\n", - " raise Exception('DeepAR only supports MQLoss as validation loss.')\n", - "\n", - " if loss.return_params:\n", - " raise Exception('DeepAR does not return distribution parameters due to Monte Carlo sampling.')\n", - " \n", " # Inherit BaseWindows class\n", " super(DeepAR, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -281,8 +272,7 @@ " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", "\n", - " self.horizon_backup = self.h # Used because h=0 during training\n", - " self.trajectory_samples = trajectory_samples\n", + " self.n_samples = trajectory_samples\n", "\n", " # LSTM\n", " self.encoder_n_layers = lstm_n_layers\n", @@ -293,6 +283,8 @@ " input_encoder = 1 + self.futr_exog_size + self.stat_exog_size\n", "\n", " # Instantiate model\n", + " self.rnn_state = None\n", + " self.maintain_state = False\n", " self.hist_encoder = nn.LSTM(input_size=input_encoder,\n", " hidden_size=self.encoder_hidden_size,\n", " num_layers=self.encoder_n_layers,\n", @@ -305,268 +297,38 @@ " hidden_size=decoder_hidden_size,\n", " hidden_layers=decoder_hidden_layers)\n", "\n", - " # Override BaseWindows method\n", - " def training_step(self, batch, batch_idx):\n", - "\n", - " # During training h=0 \n", - " self.h = 0\n", - " y_idx = batch['y_idx']\n", - "\n", - " # Create and normalize windows [Ws, L, C]\n", - " windows = self._create_windows(batch, step='train')\n", - " original_insample_y = windows['temporal'][:, :, y_idx].clone() # windows: [B, L, Feature] -> [B, L]\n", - " original_insample_y = original_insample_y[:,1:] # Remove first (shift in DeepAr, cell at t outputs t+1)\n", - " windows = self._normalization(windows=windows, y_idx=y_idx)\n", - "\n", - " # Parse windows\n", - " insample_y, insample_mask, _, _, _, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", - "\n", - " windows_batch = dict(insample_y=insample_y, # [Ws, L]\n", - " insample_mask=insample_mask, # [Ws, L]\n", - " futr_exog=futr_exog, # [Ws, L+H]\n", - " hist_exog=None, # None\n", - " stat_exog=stat_exog,\n", - " y_idx=y_idx) # [Ws, 1]\n", - "\n", - " # Model Predictions\n", - " output = self.train_forward(windows_batch)\n", - "\n", - " if self.loss.is_distribution_output:\n", - " _, y_loc, y_scale = self._inv_normalization(y_hat=original_insample_y,\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=y_idx)\n", - " outsample_y = original_insample_y\n", - " distr_args = self.loss.scale_decouple(output=output, loc=y_loc, scale=y_scale)\n", - " mask = insample_mask[:,1:].clone() # Remove first (shift in DeepAr, cell at t outputs t+1)\n", - " loss = self.loss(y=outsample_y, distr_args=distr_args, mask=mask)\n", - " else:\n", - " raise Exception('DeepAR only supports distributional outputs.')\n", - "\n", - " if torch.isnan(loss):\n", - " print('Model Parameters', self.hparams)\n", - " print('insample_y', torch.isnan(insample_y).sum())\n", - " print('outsample_y', torch.isnan(outsample_y).sum())\n", - " print('output', torch.isnan(output).sum())\n", - " raise Exception('Loss is NaN, training stopped.')\n", - "\n", - " self.log(\n", - " 'train_loss',\n", - " loss.item(),\n", - " batch_size=outsample_y.size(0),\n", - " prog_bar=True,\n", - " on_epoch=True,\n", - " )\n", - " self.train_trajectories.append((self.global_step, loss.item()))\n", - "\n", - " self.h = self.horizon_backup # Restore horizon\n", - " return loss\n", - "\n", - " def validation_step(self, batch, batch_idx):\n", - "\n", - " self.h == self.horizon_backup\n", - "\n", - " if self.val_size == 0:\n", - " return np.nan\n", - "\n", - " # TODO: Hack to compute number of windows\n", - " windows = self._create_windows(batch, step='val')\n", - " n_windows = len(windows['temporal'])\n", - " y_idx = batch['y_idx']\n", - "\n", - " # Number of windows in batch\n", - " windows_batch_size = self.inference_windows_batch_size\n", - " if windows_batch_size < 0:\n", - " windows_batch_size = n_windows\n", - " n_batches = int(np.ceil(n_windows/windows_batch_size))\n", - "\n", - " valid_losses = []\n", - " batch_sizes = []\n", - " for i in range(n_batches):\n", - " # Create and normalize windows [Ws, L+H, C]\n", - " w_idxs = np.arange(i*windows_batch_size, \n", - " min((i+1)*windows_batch_size, n_windows))\n", - " windows = self._create_windows(batch, step='val', w_idxs=w_idxs)\n", - " original_outsample_y = torch.clone(windows['temporal'][:,-self.h:,0])\n", - " windows = self._normalization(windows=windows, y_idx=y_idx)\n", - "\n", - " # Parse windows\n", - " insample_y, insample_mask, _, outsample_mask, \\\n", - " _, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", - " windows_batch = dict(insample_y=insample_y,\n", - " insample_mask=insample_mask,\n", - " futr_exog=futr_exog,\n", - " hist_exog=None,\n", - " stat_exog=stat_exog,\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=y_idx) \n", - " \n", - " # Model Predictions\n", - " output_batch = self(windows_batch)\n", - " # Monte Carlo already returns y_hat with mean and quantiles\n", - " output_batch = output_batch[:,:, 1:] # Remove mean\n", - " valid_loss_batch = self.valid_loss(y=original_outsample_y, y_hat=output_batch, mask=outsample_mask)\n", - " valid_losses.append(valid_loss_batch)\n", - " batch_sizes.append(len(output_batch))\n", - "\n", - " valid_loss = torch.stack(valid_losses)\n", - " batch_sizes = torch.tensor(batch_sizes, device=valid_loss.device)\n", - " batch_size = torch.sum(batch_sizes)\n", - " valid_loss = torch.sum(valid_loss * batch_sizes) / batch_size\n", - "\n", - " if torch.isnan(valid_loss):\n", - " raise Exception('Loss is NaN, training stopped.')\n", - "\n", - " self.log(\n", - " 'valid_loss',\n", - " valid_loss.item(),\n", - " batch_size=batch_size,\n", - " prog_bar=True,\n", - " on_epoch=True,\n", - " )\n", - " self.validation_step_outputs.append(valid_loss)\n", - " return valid_loss\n", - "\n", - " def predict_step(self, batch, batch_idx):\n", - "\n", - " self.h == self.horizon_backup\n", - "\n", - " # TODO: Hack to compute number of windows\n", - " windows = self._create_windows(batch, step='predict')\n", - " n_windows = len(windows['temporal'])\n", - " y_idx = batch['y_idx']\n", - "\n", - " # Number of windows in batch\n", - " windows_batch_size = self.inference_windows_batch_size\n", - " if windows_batch_size < 0:\n", - " windows_batch_size = n_windows\n", - " n_batches = int(np.ceil(n_windows/windows_batch_size))\n", - "\n", - " y_hats = []\n", - " for i in range(n_batches):\n", - " # Create and normalize windows [Ws, L+H, C]\n", - " w_idxs = np.arange(i*windows_batch_size, \n", - " min((i+1)*windows_batch_size, n_windows))\n", - " windows = self._create_windows(batch, step='predict', w_idxs=w_idxs)\n", - " windows = self._normalization(windows=windows, y_idx=y_idx)\n", - "\n", - " # Parse windows\n", - " insample_y, insample_mask, _, _, _, futr_exog, stat_exog = self._parse_windows(batch, windows)\n", - " windows_batch = dict(insample_y=insample_y, # [Ws, L]\n", - " insample_mask=insample_mask, # [Ws, L]\n", - " futr_exog=futr_exog, # [Ws, L+H]\n", - " stat_exog=stat_exog,\n", - " temporal_cols=batch['temporal_cols'],\n", - " y_idx=y_idx)\n", - " \n", - " # Model Predictions\n", - " y_hat = self(windows_batch)\n", - " # Monte Carlo already returns y_hat with mean and quantiles\n", - " y_hats.append(y_hat)\n", - " y_hat = torch.cat(y_hats, dim=0)\n", - " return y_hat\n", - "\n", - " def train_forward(self, windows_batch):\n", + " def forward(self, windows_batch):\n", "\n", " # Parse windows_batch\n", - " encoder_input = windows_batch['insample_y'][:,:, None] # <- [B,T,1]\n", + " encoder_input = windows_batch['insample_y'] # <- [B, T, 1]\n", " futr_exog = windows_batch['futr_exog']\n", " stat_exog = windows_batch['stat_exog']\n", "\n", - " #[B, input_size-1, X]\n", - " encoder_input = encoder_input[:,:-1,:] # Remove last (shift in DeepAr, cell at t outputs t+1)\n", " _, input_size = encoder_input.shape[:2]\n", " if self.futr_exog_size > 0:\n", - " # Shift futr_exog (t predicts t+1, last output is outside insample_y)\n", - " encoder_input = torch.cat((encoder_input, futr_exog[:,1:,:]), dim=2)\n", + " encoder_input = torch.cat((encoder_input, futr_exog), dim=2)\n", + "\n", " if self.stat_exog_size > 0:\n", - " stat_exog = stat_exog.unsqueeze(1).repeat(1, input_size, 1) # [B, S] -> [B, input_size-1, S]\n", + " stat_exog = stat_exog.unsqueeze(1).repeat(1, input_size, 1) # [B, S] -> [B, input_size-1, S]\n", " encoder_input = torch.cat((encoder_input, stat_exog), dim=2)\n", "\n", " # RNN forward\n", - " hidden_state, _ = self.hist_encoder(encoder_input) # [B, input_size-1, rnn_hidden_state]\n", + " if self.maintain_state:\n", + " rnn_state = self.rnn_state\n", + " else:\n", + " rnn_state = None\n", "\n", - " # Decoder forward\n", - " output = self.decoder(hidden_state) # [B, input_size-1, output_size]\n", - " output = self.loss.domain_map(output)\n", - " return output\n", - " \n", - " def forward(self, windows_batch):\n", + " hidden_state, rnn_state = self.hist_encoder(encoder_input, \n", + " rnn_state) # [B, input_size-1, rnn_hidden_state]\n", "\n", - " # Parse windows_batch\n", - " encoder_input = windows_batch['insample_y'][:,:, None] # <- [B,L,1]\n", - " futr_exog = windows_batch['futr_exog'] # <- [B,L+H, n_f]\n", - " stat_exog = windows_batch['stat_exog']\n", - " y_idx = windows_batch['y_idx']\n", + " if self.maintain_state:\n", + " self.rnn_state = rnn_state\n", "\n", - " #[B, seq_len, X]\n", - " batch_size, input_size = encoder_input.shape[:2]\n", - " if self.futr_exog_size > 0:\n", - " futr_exog_input_window = futr_exog[:,1:input_size+1,:] # Align y_t with futr_exog_t+1\n", - " encoder_input = torch.cat((encoder_input, futr_exog_input_window), dim=2)\n", - " if self.stat_exog_size > 0:\n", - " stat_exog_input_window = stat_exog.unsqueeze(1).repeat(1, input_size, 1) # [B, S] -> [B, input_size, S]\n", - " encoder_input = torch.cat((encoder_input, stat_exog_input_window), dim=2)\n", - "\n", - " # Use input_size history to predict first h of the forecasting window\n", - " _, h_c_tuple = self.hist_encoder(encoder_input)\n", - " h_n = h_c_tuple[0] # [n_layers, B, lstm_hidden_state]\n", - " c_n = h_c_tuple[1] # [n_layers, B, lstm_hidden_state]\n", - "\n", - " # Vectorizes trajectory samples in batch dimension [1]\n", - " h_n = torch.repeat_interleave(h_n, self.trajectory_samples, 1) # [n_layers, B*trajectory_samples, rnn_hidden_state]\n", - " c_n = torch.repeat_interleave(c_n, self.trajectory_samples, 1) # [n_layers, B*trajectory_samples, rnn_hidden_state]\n", - "\n", - " # Scales for inverse normalization\n", - " y_scale = self.scaler.x_scale[:, 0, [y_idx]].squeeze(-1).to(encoder_input.device)\n", - " y_loc = self.scaler.x_shift[:, 0, [y_idx]].squeeze(-1).to(encoder_input.device)\n", - " y_scale = torch.repeat_interleave(y_scale, self.trajectory_samples, 0)\n", - " y_loc = torch.repeat_interleave(y_loc, self.trajectory_samples, 0)\n", - "\n", - " # Recursive strategy prediction\n", - " quantiles = self.loss.quantiles.to(encoder_input.device)\n", - " y_hat = torch.zeros(batch_size, self.h, len(quantiles)+1, device=encoder_input.device)\n", - " for tau in range(self.h):\n", - " # Decoder forward\n", - " last_layer_h = h_n[-1] # [B*trajectory_samples, lstm_hidden_state]\n", - " output = self.decoder(last_layer_h) \n", - " output = self.loss.domain_map(output)\n", - "\n", - " # Inverse normalization\n", - " distr_args = self.loss.scale_decouple(output=output, loc=y_loc, scale=y_scale)\n", - " # Add horizon (1) dimension\n", - " distr_args = list(distr_args)\n", - " for i in range(len(distr_args)):\n", - " distr_args[i] = distr_args[i].unsqueeze(-1)\n", - " distr_args = tuple(distr_args)\n", - " samples_tau, _, _ = self.loss.sample(distr_args=distr_args, num_samples=1)\n", - " samples_tau = samples_tau.reshape(batch_size, self.trajectory_samples)\n", - " sample_mean = torch.mean(samples_tau, dim=-1).to(encoder_input.device)\n", - " quants = torch.quantile(input=samples_tau, \n", - " q=quantiles, dim=-1).to(encoder_input.device)\n", - " y_hat[:,tau,0] = sample_mean\n", - " y_hat[:,tau,1:] = quants.permute((1,0)) # [Q, B] -> [B, Q]\n", - " \n", - " # Stop if already in the last step (no need to predict next step)\n", - " if tau+1 == self.h:\n", - " continue\n", - " # Normalize to use as input\n", - " encoder_input = self.scaler.scaler(samples_tau.flatten(), y_loc, y_scale) # [B*n_samples]\n", - " encoder_input = encoder_input[:, None, None] # [B*n_samples, 1, 1]\n", - "\n", - " # Update input\n", - " if self.futr_exog_size > 0:\n", - " futr_exog_tau = futr_exog[:,[input_size+tau+1],:] # [B, 1, n_f]\n", - " futr_exog_tau = torch.repeat_interleave(futr_exog_tau, self.trajectory_samples, 0) # [B*n_samples, 1, n_f]\n", - " encoder_input = torch.cat((encoder_input, futr_exog_tau), dim=2) # [B*n_samples, 1, 1+n_f]\n", - " if self.stat_exog_size > 0:\n", - " stat_exog_tau = torch.repeat_interleave(stat_exog, self.trajectory_samples, 0) # [B*n_samples, n_s]\n", - " encoder_input = torch.cat((encoder_input, stat_exog_tau[:,None,:]), dim=2) # [B*n_samples, 1, 1+n_f+n_s]\n", - " \n", - " _, h_c_tuple = self.hist_encoder(encoder_input, (h_n, c_n))\n", - " h_n = h_c_tuple[0] # [n_layers, B, rnn_hidden_state]\n", - " c_n = h_c_tuple[1] # [n_layers, B, rnn_hidden_state]\n", - "\n", - " return y_hat" + " # Decoder forward\n", + " output = self.decoder(hidden_state) # [B, input_size-1, output_size]\n", + "\n", + " # Return only horizon part\n", + " return output[:, -self.h:]" ] }, { @@ -596,6 +358,21 @@ "show_doc(DeepAR.predict, name='DeepAR.predict', title_level=3)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(DeepAR, [\"airpassengers\"])" + ] + }, { "attachments": {}, "cell_type": "markdown", @@ -616,18 +393,18 @@ "\n", "from neuralforecast import NeuralForecast\n", "from neuralforecast.models import DeepAR\n", - "from neuralforecast.losses.pytorch import DistributionLoss\n", + "from neuralforecast.losses.pytorch import DistributionLoss, MQLoss\n", "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", - "\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", "\n", "nf = NeuralForecast(\n", " models=[DeepAR(h=12,\n", - " input_size=48,\n", - " lstm_n_layers=3,\n", + " input_size=24,\n", + " lstm_n_layers=1,\n", " trajectory_samples=100,\n", - " loss=DistributionLoss(distribution='Normal', level=[80, 90], return_params=False),\n", + " loss=DistributionLoss(distribution='StudentT', level=[80, 90], return_params=True),\n", + " valid_loss=MQLoss(level=[80, 90]),\n", " learning_rate=0.005,\n", " stat_exog_list=['airline1'],\n", " futr_exog_list=['trend'],\n", @@ -635,7 +412,8 @@ " val_check_steps=10,\n", " early_stop_patience_steps=-1,\n", " scaler_type='standard',\n", - " enable_progress_bar=True),\n", + " enable_progress_bar=True,\n", + " ),\n", " ],\n", " freq='M'\n", ")\n", diff --git a/nbs/models.deepnpts.ipynb b/nbs/models.deepnpts.ipynb index 4f5e7ee9f..465dde397 100644 --- a/nbs/models.deepnpts.ipynb +++ b/nbs/models.deepnpts.ipynb @@ -51,7 +51,7 @@ "from typing import Optional\n", "\n", "\n", - "from neuralforecast.common._base_windows import BaseWindows\n", + "from neuralforecast.common._base_model import BaseModel\n", "from neuralforecast.losses.pytorch import MAE\n" ] }, @@ -66,7 +66,8 @@ "import warnings\n", "\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -77,6 +78,7 @@ "source": [ "#| hide\n", "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", "warnings.filterwarnings(\"ignore\")" ] }, @@ -87,7 +89,7 @@ "outputs": [], "source": [ "#| export\n", - "class DeepNPTS(BaseWindows):\n", + "class DeepNPTS(BaseModel):\n", " \"\"\" DeepNPTS\n", "\n", " Deep Non-Parametric Time Series Forecaster (`DeepNPTS`) is a baseline model for time-series forecasting. This model generates predictions by (weighted) sampling from the empirical distribution according to a learnable strategy. The strategy is learned by exploiting the information across multiple related time series.\n", @@ -133,10 +135,11 @@ "\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = True\n", " EXOGENOUS_STAT = True\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", " \n", " def __init__(self,\n", " h,\n", @@ -176,10 +179,10 @@ " if exclude_insample_y:\n", " raise Exception('DeepNPTS has no possibility for excluding y.')\n", "\n", - " if not isinstance(loss, losses.BasePointLoss):\n", + " if loss.outputsize_multiplier > 1:\n", " raise Exception('DeepNPTS only supports point loss functions (MAE, MSE, etc) as loss function.') \n", " \n", - " if not isinstance(valid_loss, losses.BasePointLoss):\n", + " if valid_loss is not None and not isinstance(valid_loss, losses.BasePointLoss):\n", " raise Exception('DeepNPTS only supports point loss functions (MAE, MSE, etc) as valid loss function.') \n", " \n", " # Inherit BaseWindows class\n", @@ -234,13 +237,13 @@ "\n", " def forward(self, windows_batch):\n", " # Parse windows_batch\n", - " x = windows_batch['insample_y'].unsqueeze(-1) # [B, L, 1]\n", + " x = windows_batch['insample_y'] # [B, L, 1]\n", " hist_exog = windows_batch['hist_exog'] # [B, L, X]\n", " futr_exog = windows_batch['futr_exog'] # [B, L + h, F]\n", " stat_exog = windows_batch['stat_exog'] # [B, S]\n", "\n", " batch_size, seq_len = x.shape[:2] # B = batch_size, L = seq_len\n", - " insample_y = windows_batch['insample_y'].unsqueeze(-1) \n", + " insample_y = windows_batch['insample_y'] \n", " \n", " # Concatenate x_t with future exogenous of input\n", " if self.futr_exog_size > 0: \n", @@ -268,9 +271,7 @@ " # Apply softmax for weighted input predictions\n", " weights = weights.reshape(batch_size, seq_len, -1) # [B, L * h] -> [B, L, h]\n", " x = F.softmax(weights, dim=1) * insample_y # [B, L, h] * [B, L, 1] = [B, L, h]\n", - " output = torch.sum(x, dim=1).unsqueeze(-1) # [B, L, h] -> [B, h, 1]\n", - "\n", - " forecast = self.loss.domain_map(output) # [B, h, 1] -> [B, h, 1]\n", + " forecast = torch.sum(x, dim=1).unsqueeze(-1) # [B, L, h] -> [B, h, 1]\n", "\n", " return forecast" ] @@ -302,6 +303,15 @@ "show_doc(DeepNPTS.predict, name='DeepNPTS.predict', title_level=3)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "check_model(DeepNPTS, [\"airpassengers\"])" + ] + }, { "attachments": {}, "cell_type": "markdown", diff --git a/nbs/models.dilated_rnn.ipynb b/nbs/models.dilated_rnn.ipynb index 4b3bd374f..b18c5449f 100644 --- a/nbs/models.dilated_rnn.ipynb +++ b/nbs/models.dilated_rnn.ipynb @@ -13,7 +13,16 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The autoreload extension is already loaded. To reload it, use:\n", + " %reload_ext autoreload\n" + ] + } + ], "source": [ "#| hide\n", "%load_ext autoreload\n", @@ -58,8 +67,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from nbdev.showdoc import show_doc\n", - "from neuralforecast.utils import generate_series" + "from neuralforecast.utils import generate_series\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -75,7 +87,7 @@ "import torch.nn as nn\n", "\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_recurrent import BaseRecurrent\n", + "from neuralforecast.common._base_model import BaseModel\n", "from neuralforecast.common._modules import MLP" ] }, @@ -324,8 +336,8 @@ "\n", " blocks = [dilated_outputs[:, i * batchsize: (i + 1) * batchsize, :] for i in range(rate)]\n", "\n", - " interleaved = torch.stack((blocks)).transpose(1, 0).contiguous()\n", - " interleaved = interleaved.view(dilated_outputs.size(0) * rate,\n", + " interleaved = torch.stack((blocks)).transpose(1, 0)\n", + " interleaved = interleaved.reshape(dilated_outputs.size(0) * rate,\n", " batchsize,\n", " dilated_outputs.size(2))\n", " return interleaved\n", @@ -359,7 +371,7 @@ "outputs": [], "source": [ "#| export\n", - "class DilatedRNN(BaseRecurrent):\n", + "class DilatedRNN(BaseModel):\n", " \"\"\" DilatedRNN\n", "\n", " **Parameters:**
\n", @@ -398,24 +410,26 @@ " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'recurrent'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = True\n", - " EXOGENOUS_STAT = True \n", + " EXOGENOUS_STAT = True\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h: int,\n", - " input_size: int = -1,\n", + " input_size: int,\n", " inference_input_size: int = -1,\n", " cell_type: str = 'LSTM',\n", " dilations: List[List[int]] = [[1, 2], [4, 8]],\n", - " encoder_hidden_size: int = 200,\n", + " encoder_hidden_size: int = 128,\n", " context_size: int = 10,\n", - " decoder_hidden_size: int = 200,\n", + " decoder_hidden_size: int = 128,\n", " decoder_layers: int = 2,\n", " futr_exog_list = None,\n", " hist_exog_list = None,\n", " stat_exog_list = None,\n", + " exclude_insample_y = False,\n", " loss = MAE(),\n", " valid_loss = None,\n", " max_steps: int = 1000,\n", @@ -425,6 +439,9 @@ " val_check_steps: int = 100,\n", " batch_size = 32,\n", " valid_batch_size: Optional[int] = None,\n", + " windows_batch_size = 128,\n", + " inference_windows_batch_size = 1024,\n", + " start_padding_enabled = False,\n", " step_size: int = 1,\n", " scaler_type: str = 'robust',\n", " random_seed: int = 1,\n", @@ -439,7 +456,10 @@ " super(DilatedRNN, self).__init__(\n", " h=h,\n", " input_size=input_size,\n", - " inference_input_size=inference_input_size,\n", + " futr_exog_list=futr_exog_list,\n", + " hist_exog_list=hist_exog_list,\n", + " stat_exog_list=stat_exog_list,\n", + " exclude_insample_y = exclude_insample_y,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " max_steps=max_steps,\n", @@ -449,13 +469,14 @@ " val_check_steps=val_check_steps,\n", " batch_size=batch_size,\n", " valid_batch_size=valid_batch_size,\n", + " windows_batch_size=windows_batch_size,\n", + " inference_windows_batch_size=inference_windows_batch_size,\n", + " start_padding_enabled=start_padding_enabled,\n", + " step_size=step_size,\n", " scaler_type=scaler_type,\n", - " futr_exog_list=futr_exog_list,\n", - " hist_exog_list=hist_exog_list,\n", - " stat_exog_list=stat_exog_list,\n", + " random_seed=random_seed,\n", " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", - " random_seed=random_seed,\n", " optimizer=optimizer,\n", " optimizer_kwargs=optimizer_kwargs,\n", " lr_scheduler=lr_scheduler,\n", @@ -477,14 +498,12 @@ " self.decoder_layers = decoder_layers\n", "\n", " # RNN input size (1 for target variable y)\n", - " input_encoder = 1 + self.hist_exog_size + self.stat_exog_size\n", + " input_encoder = 1 + self.hist_exog_size + self.stat_exog_size + self.futr_exog_size\n", "\n", " # Instantiate model\n", " layers = []\n", " for grp_num in range(len(self.dilations)):\n", - " if grp_num == 0:\n", - " input_encoder = 1 + self.hist_exog_size + self.stat_exog_size\n", - " else:\n", + " if grp_num > 0:\n", " input_encoder = self.encoder_hidden_size\n", " layer = DRNN(input_encoder,\n", " self.encoder_hidden_size,\n", @@ -496,11 +515,11 @@ " self.rnn_stack = nn.Sequential(*layers)\n", "\n", " # Context adapter\n", - " self.context_adapter = nn.Linear(in_features=self.encoder_hidden_size + self.futr_exog_size * h,\n", - " out_features=self.context_size * h)\n", + " self.context_adapter = nn.Linear(in_features=self.input_size,\n", + " out_features=h)\n", "\n", " # Decoder MLP\n", - " self.mlp_decoder = MLP(in_features=self.context_size + self.futr_exog_size,\n", + " self.mlp_decoder = MLP(in_features=self.encoder_hidden_size + self.futr_exog_size,\n", " out_features=self.loss.outputsize_multiplier,\n", " hidden_size=self.decoder_hidden_size,\n", " num_layers=self.decoder_layers,\n", @@ -510,22 +529,23 @@ " def forward(self, windows_batch):\n", " \n", " # Parse windows_batch\n", - " encoder_input = windows_batch['insample_y'] # [B, seq_len, 1]\n", - " futr_exog = windows_batch['futr_exog']\n", - " hist_exog = windows_batch['hist_exog']\n", - " stat_exog = windows_batch['stat_exog']\n", - "\n", - " # Concatenate y, historic and static inputs\n", - " # [B, C, seq_len, 1] -> [B, seq_len, C]\n", - " # Contatenate [ Y_t, | X_{t-L},..., X_{t} | S ]\n", + " encoder_input = windows_batch['insample_y'] # [B, L, 1]\n", + " futr_exog = windows_batch['futr_exog'] # [B, L + h, F]\n", + " hist_exog = windows_batch['hist_exog'] # [B, L, X]\n", + " stat_exog = windows_batch['stat_exog'] # [B, S]\n", + "\n", + " # Concatenate y, historic and static inputs \n", " batch_size, seq_len = encoder_input.shape[:2]\n", " if self.hist_exog_size > 0:\n", - " hist_exog = hist_exog.permute(0,2,1,3).squeeze(-1) # [B, X, seq_len, 1] -> [B, seq_len, X]\n", - " encoder_input = torch.cat((encoder_input, hist_exog), dim=2)\n", + " encoder_input = torch.cat((encoder_input, hist_exog), dim=2) # [B, L, 1] + [B, L, X] -> [B, L, 1 + X]\n", "\n", " if self.stat_exog_size > 0:\n", - " stat_exog = stat_exog.unsqueeze(1).repeat(1, seq_len, 1) # [B, S] -> [B, seq_len, S]\n", - " encoder_input = torch.cat((encoder_input, stat_exog), dim=2)\n", + " stat_exog = stat_exog.unsqueeze(1).repeat(1, seq_len, 1) # [B, S] -> [B, L, S]\n", + " encoder_input = torch.cat((encoder_input, stat_exog), dim=2) # [B, L, 1 + X] + [B, L, S] -> [B, L, 1 + X + S]\n", + "\n", + " if self.futr_exog_size > 0:\n", + " encoder_input = torch.cat((encoder_input, \n", + " futr_exog[:, :seq_len]), dim=2) # [B, L, 1 + X + S] + [B, L, F] -> [B, L, 1 + X + S + F]\n", "\n", " # DilatedRNN forward\n", " for layer_num in range(len(self.rnn_stack)):\n", @@ -535,25 +555,313 @@ " output += residual\n", " encoder_input = output\n", "\n", - " if self.futr_exog_size > 0:\n", - " futr_exog = futr_exog.permute(0,2,3,1)[:,:,1:,:] # [B, F, seq_len, 1+H] -> [B, seq_len, H, F]\n", - " encoder_input = torch.cat(( encoder_input, futr_exog.reshape(batch_size, seq_len, -1)), dim=2)\n", - "\n", " # Context adapter\n", - " context = self.context_adapter(encoder_input)\n", - " context = context.reshape(batch_size, seq_len, self.h, self.context_size)\n", + " output = output.permute(0, 2, 1) # [B, L, C] -> [B, C, L]\n", + " context = self.context_adapter(output) # [B, C, L] -> [B, C, h]\n", "\n", " # Residual connection with futr_exog\n", " if self.futr_exog_size > 0:\n", - " context = torch.cat((context, futr_exog), dim=-1)\n", + " futr_exog_futr = futr_exog[:, seq_len:].permute(0, 2, 1) # [B, h, F] -> [B, F, h]\n", + " context = torch.cat((context, futr_exog_futr), \n", + " dim=1) # [B, C, h] + [B, F, h] = [B, C + F, h]\n", "\n", " # Final forecast\n", - " output = self.mlp_decoder(context)\n", - " output = self.loss.domain_map(output)\n", + " context = context.permute(0, 2, 1) # [B, C + F, h] -> [B, h, C + F]\n", + " output = self.mlp_decoder(context) # [B, h, C + F] -> [B, h, n_output]\n", " \n", " return output" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "[source](https://github.com/Nixtla/neuralforecast/blob/main/neuralforecast/models/dilated_rnn.py#L289){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", + "\n", + "### DilatedRNN\n", + "\n", + "> DilatedRNN (h:int, input_size:int, inference_input_size:int=-1,\n", + "> cell_type:str='LSTM', dilations:List[List[int]]=[[1, 2], [4,\n", + "> 8]], encoder_hidden_size:int=200, context_size:int=10,\n", + "> decoder_hidden_size:int=200, decoder_layers:int=2,\n", + "> futr_exog_list=None, hist_exog_list=None,\n", + "> stat_exog_list=None, exclude_insample_y=False, loss=MAE(),\n", + "> valid_loss=None, max_steps:int=1000,\n", + "> learning_rate:float=0.001, num_lr_decays:int=3,\n", + "> early_stop_patience_steps:int=-1, val_check_steps:int=100,\n", + "> batch_size=32, valid_batch_size:Optional[int]=None,\n", + "> windows_batch_size=1024, inference_windows_batch_size=1024,\n", + "> start_padding_enabled=False, step_size:int=1,\n", + "> scaler_type:str='robust', random_seed:int=1,\n", + "> num_workers_loader:int=0, drop_last_loader:bool=False,\n", + "> optimizer=None, optimizer_kwargs=None, lr_scheduler=None,\n", + "> lr_scheduler_kwargs=None, **trainer_kwargs)\n", + "\n", + "*DilatedRNN\n", + "\n", + "**Parameters:**
\n", + "`h`: int, forecast horizon.
\n", + "`input_size`: int, maximum sequence length for truncated train backpropagation. Default -1 uses all history.
\n", + "`inference_input_size`: int, maximum sequence length for truncated inference. Default -1 uses all history.
\n", + "`cell_type`: str, type of RNN cell to use. Options: 'GRU', 'RNN', 'LSTM', 'ResLSTM', 'AttentiveLSTM'.
\n", + "`dilations`: int list, dilations betweem layers.
\n", + "`encoder_hidden_size`: int=200, units for the RNN's hidden state size.
\n", + "`context_size`: int=10, size of context vector for each timestamp on the forecasting window.
\n", + "`decoder_hidden_size`: int=200, size of hidden layer for the MLP decoder.
\n", + "`decoder_layers`: int=2, number of layers for the MLP decoder.
\n", + "`futr_exog_list`: str list, future exogenous columns.
\n", + "`hist_exog_list`: str list, historic exogenous columns.
\n", + "`stat_exog_list`: str list, static exogenous columns.
\n", + "`loss`: PyTorch module, instantiated train loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).
\n", + "`valid_loss`: PyTorch module=`loss`, instantiated valid loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).
\n", + "`max_steps`: int, maximum number of training steps.
\n", + "`learning_rate`: float, Learning rate between (0, 1).
\n", + "`num_lr_decays`: int, Number of learning rate decays, evenly distributed across max_steps.
\n", + "`early_stop_patience_steps`: int, Number of validation iterations before early stopping.
\n", + "`val_check_steps`: int, Number of training steps between every validation loss check.
\n", + "`batch_size`: int=32, number of different series in each batch.
\n", + "`valid_batch_size`: int=None, number of different series in each validation and test batch.
\n", + "`step_size`: int=1, step size between each window of temporal data.
\n", + "`scaler_type`: str='robust', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
\n", + "`random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
\n", + "`num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", + "`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", + "`alias`: str, optional, Custom name of the model.
\n", + "`optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", + "`optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", + "`lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", + "`lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", + "`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
*" + ], + "text/plain": [ + "---\n", + "\n", + "[source](https://github.com/Nixtla/neuralforecast/blob/main/neuralforecast/models/dilated_rnn.py#L289){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", + "\n", + "### DilatedRNN\n", + "\n", + "> DilatedRNN (h:int, input_size:int, inference_input_size:int=-1,\n", + "> cell_type:str='LSTM', dilations:List[List[int]]=[[1, 2], [4,\n", + "> 8]], encoder_hidden_size:int=200, context_size:int=10,\n", + "> decoder_hidden_size:int=200, decoder_layers:int=2,\n", + "> futr_exog_list=None, hist_exog_list=None,\n", + "> stat_exog_list=None, exclude_insample_y=False, loss=MAE(),\n", + "> valid_loss=None, max_steps:int=1000,\n", + "> learning_rate:float=0.001, num_lr_decays:int=3,\n", + "> early_stop_patience_steps:int=-1, val_check_steps:int=100,\n", + "> batch_size=32, valid_batch_size:Optional[int]=None,\n", + "> windows_batch_size=1024, inference_windows_batch_size=1024,\n", + "> start_padding_enabled=False, step_size:int=1,\n", + "> scaler_type:str='robust', random_seed:int=1,\n", + "> num_workers_loader:int=0, drop_last_loader:bool=False,\n", + "> optimizer=None, optimizer_kwargs=None, lr_scheduler=None,\n", + "> lr_scheduler_kwargs=None, **trainer_kwargs)\n", + "\n", + "*DilatedRNN\n", + "\n", + "**Parameters:**
\n", + "`h`: int, forecast horizon.
\n", + "`input_size`: int, maximum sequence length for truncated train backpropagation. Default -1 uses all history.
\n", + "`inference_input_size`: int, maximum sequence length for truncated inference. Default -1 uses all history.
\n", + "`cell_type`: str, type of RNN cell to use. Options: 'GRU', 'RNN', 'LSTM', 'ResLSTM', 'AttentiveLSTM'.
\n", + "`dilations`: int list, dilations betweem layers.
\n", + "`encoder_hidden_size`: int=200, units for the RNN's hidden state size.
\n", + "`context_size`: int=10, size of context vector for each timestamp on the forecasting window.
\n", + "`decoder_hidden_size`: int=200, size of hidden layer for the MLP decoder.
\n", + "`decoder_layers`: int=2, number of layers for the MLP decoder.
\n", + "`futr_exog_list`: str list, future exogenous columns.
\n", + "`hist_exog_list`: str list, historic exogenous columns.
\n", + "`stat_exog_list`: str list, static exogenous columns.
\n", + "`loss`: PyTorch module, instantiated train loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).
\n", + "`valid_loss`: PyTorch module=`loss`, instantiated valid loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).
\n", + "`max_steps`: int, maximum number of training steps.
\n", + "`learning_rate`: float, Learning rate between (0, 1).
\n", + "`num_lr_decays`: int, Number of learning rate decays, evenly distributed across max_steps.
\n", + "`early_stop_patience_steps`: int, Number of validation iterations before early stopping.
\n", + "`val_check_steps`: int, Number of training steps between every validation loss check.
\n", + "`batch_size`: int=32, number of different series in each batch.
\n", + "`valid_batch_size`: int=None, number of different series in each validation and test batch.
\n", + "`step_size`: int=1, step size between each window of temporal data.
\n", + "`scaler_type`: str='robust', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
\n", + "`random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
\n", + "`num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", + "`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", + "`alias`: str, optional, Custom name of the model.
\n", + "`optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", + "`optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", + "`lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", + "`lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", + "`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
*" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show_doc(DilatedRNN)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "### DilatedRNN.fit\n", + "\n", + "> DilatedRNN.fit (dataset, val_size=0, test_size=0, random_seed=None,\n", + "> distributed_config=None)\n", + "\n", + "*Fit.\n", + "\n", + "The `fit` method, optimizes the neural network's weights using the\n", + "initialization parameters (`learning_rate`, `windows_batch_size`, ...)\n", + "and the `loss` function as defined during the initialization.\n", + "Within `fit` we use a PyTorch Lightning `Trainer` that\n", + "inherits the initialization's `self.trainer_kwargs`, to customize\n", + "its inputs, see [PL's trainer arguments](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).\n", + "\n", + "The method is designed to be compatible with SKLearn-like classes\n", + "and in particular to be compatible with the StatsForecast library.\n", + "\n", + "By default the `model` is not saving training checkpoints to protect\n", + "disk memory, to get them change `enable_checkpointing=True` in `__init__`.\n", + "\n", + "**Parameters:**
\n", + "`dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
\n", + "`val_size`: int, validation size for temporal cross-validation.
\n", + "`random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
\n", + "`test_size`: int, test size for temporal cross-validation.
*" + ], + "text/plain": [ + "---\n", + "\n", + "### DilatedRNN.fit\n", + "\n", + "> DilatedRNN.fit (dataset, val_size=0, test_size=0, random_seed=None,\n", + "> distributed_config=None)\n", + "\n", + "*Fit.\n", + "\n", + "The `fit` method, optimizes the neural network's weights using the\n", + "initialization parameters (`learning_rate`, `windows_batch_size`, ...)\n", + "and the `loss` function as defined during the initialization.\n", + "Within `fit` we use a PyTorch Lightning `Trainer` that\n", + "inherits the initialization's `self.trainer_kwargs`, to customize\n", + "its inputs, see [PL's trainer arguments](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).\n", + "\n", + "The method is designed to be compatible with SKLearn-like classes\n", + "and in particular to be compatible with the StatsForecast library.\n", + "\n", + "By default the `model` is not saving training checkpoints to protect\n", + "disk memory, to get them change `enable_checkpointing=True` in `__init__`.\n", + "\n", + "**Parameters:**
\n", + "`dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
\n", + "`val_size`: int, validation size for temporal cross-validation.
\n", + "`random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
\n", + "`test_size`: int, test size for temporal cross-validation.
*" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show_doc(DilatedRNN.fit, name='DilatedRNN.fit')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "### DilatedRNN.predict\n", + "\n", + "> DilatedRNN.predict (dataset, test_size=None, step_size=1,\n", + "> random_seed=None, **data_module_kwargs)\n", + "\n", + "*Predict.\n", + "\n", + "Neural network prediction with PL's `Trainer` execution of `predict_step`.\n", + "\n", + "**Parameters:**
\n", + "`dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
\n", + "`test_size`: int=None, test size for temporal cross-validation.
\n", + "`step_size`: int=1, Step size between each window.
\n", + "`random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
\n", + "`**data_module_kwargs`: PL's TimeSeriesDataModule args, see [documentation](https://pytorch-lightning.readthedocs.io/en/1.6.1/extensions/datamodules.html#using-a-datamodule).*" + ], + "text/plain": [ + "---\n", + "\n", + "### DilatedRNN.predict\n", + "\n", + "> DilatedRNN.predict (dataset, test_size=None, step_size=1,\n", + "> random_seed=None, **data_module_kwargs)\n", + "\n", + "*Predict.\n", + "\n", + "Neural network prediction with PL's `Trainer` execution of `predict_step`.\n", + "\n", + "**Parameters:**
\n", + "`dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
\n", + "`test_size`: int=None, test size for temporal cross-validation.
\n", + "`step_size`: int=1, Step size between each window.
\n", + "`random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
\n", + "`**data_module_kwargs`: PL's TimeSeriesDataModule args, see [documentation](https://pytorch-lightning.readthedocs.io/en/1.6.1/extensions/datamodules.html#using-a-datamodule).*" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show_doc(DilatedRNN.predict, name='DilatedRNN.predict')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DilatedRNN: checking forecast AirPassengers dataset\n" + ] + } + ], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(DilatedRNN, [\"airpassengers\"])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -565,7 +873,124 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "c:\\Users\\ospra\\OneDrive\\Nixtla\\Repositories\\neuralforecast\\neuralforecast\\common\\_base_model.py:134: UserWarning: Input size too small. Automatically setting input size to 3 * horizon = 36\n", + " warnings.warn(\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "c575af1dd4b545f1a017aa6edc64a115", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Sanity Checking: | | 0/? [00:00" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "#| eval: false\n", "import pandas as pd\n", diff --git a/nbs/models.dlinear.ipynb b/nbs/models.dlinear.ipynb index ea1a38a43..4191d5e96 100644 --- a/nbs/models.dlinear.ipynb +++ b/nbs/models.dlinear.ipynb @@ -58,7 +58,7 @@ "import torch\n", "import torch.nn as nn\n", "\n", - "from neuralforecast.common._base_windows import BaseWindows\n", + "from neuralforecast.common._base_model import BaseModel\n", "\n", "from neuralforecast.losses.pytorch import MAE" ] @@ -70,8 +70,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -135,7 +138,7 @@ "outputs": [], "source": [ "#| export\n", - "class DLinear(BaseWindows):\n", + "class DLinear(BaseModel):\n", " \"\"\" DLinear\n", "\n", " *Parameters:*
\n", @@ -173,10 +176,11 @@ "\t- Zeng, Ailing, et al. \"Are transformers effective for time series forecasting?.\" Proceedings of the AAAI conference on artificial intelligence. Vol. 37. No. 9. 2023.\"\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = False\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h: int, \n", @@ -256,11 +260,7 @@ "\n", " def forward(self, windows_batch):\n", " # Parse windows_batch\n", - " insample_y = windows_batch['insample_y']\n", - " #insample_mask = windows_batch['insample_mask']\n", - " #hist_exog = windows_batch['hist_exog']\n", - " #stat_exog = windows_batch['stat_exog']\n", - " #futr_exog = windows_batch['futr_exog']\n", + " insample_y = windows_batch['insample_y'].squeeze(-1)\n", "\n", " # Parse inputs\n", " batch_size = len(insample_y)\n", @@ -272,7 +272,6 @@ " # Final\n", " forecast = trend_part + seasonal_part\n", " forecast = forecast.reshape(batch_size, self.h, self.loss.outputsize_multiplier)\n", - " forecast = self.loss.domain_map(forecast)\n", " return forecast" ] }, @@ -303,6 +302,21 @@ "show_doc(DLinear.predict, name='DLinear.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(DLinear, [\"airpassengers\"])" + ] + }, { "attachments": {}, "cell_type": "markdown", @@ -322,7 +336,7 @@ "import matplotlib.pyplot as plt\n", "\n", "from neuralforecast import NeuralForecast\n", - "from neuralforecast.models import DLinear\n", + "from neuralforecast import DLinear\n", "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic, augment_calendar_df\n", "\n", "AirPassengersPanel, calendar_cols = augment_calendar_df(df=AirPassengersPanel, freq='M')\n", diff --git a/nbs/models.fedformer.ipynb b/nbs/models.fedformer.ipynb index 2268c058d..5ef61687b 100644 --- a/nbs/models.fedformer.ipynb +++ b/nbs/models.fedformer.ipynb @@ -51,6 +51,20 @@ "![Figure 1. FEDformer Architecture.](imgs_models/fedformer.png)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "import logging\n", + "import warnings\n", + "from fastcore.test import test_eq\n", + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" + ] + }, { "cell_type": "code", "execution_count": null, @@ -67,7 +81,7 @@ "\n", "from neuralforecast.common._modules import DataEmbedding\n", "from neuralforecast.common._modules import SeriesDecomp\n", - "from neuralforecast.common._base_windows import BaseWindows\n", + "from neuralforecast.common._base_model import BaseModel\n", "\n", "from neuralforecast.losses.pytorch import MAE" ] @@ -402,7 +416,7 @@ "outputs": [], "source": [ "#| export\n", - "class FEDformer(BaseWindows):\n", + "class FEDformer(BaseModel):\n", " \"\"\" FEDformer\n", "\n", " The FEDformer model tackles the challenge of finding reliable dependencies on intricate temporal patterns of long-horizon forecasting.\n", @@ -460,10 +474,11 @@ "\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h: int, \n", @@ -626,13 +641,9 @@ " def forward(self, windows_batch):\n", " # Parse windows_batch\n", " insample_y = windows_batch['insample_y']\n", - " #insample_mask = windows_batch['insample_mask']\n", - " #hist_exog = windows_batch['hist_exog']\n", - " #stat_exog = windows_batch['stat_exog']\n", " futr_exog = windows_batch['futr_exog']\n", "\n", " # Parse inputs\n", - " insample_y = insample_y.unsqueeze(-1) # [Ws,L,1]\n", " if self.futr_exog_size > 0:\n", " x_mark_enc = futr_exog[:,:self.input_size,:]\n", " x_mark_dec = futr_exog[:,-(self.label_len+self.h):,:]\n", @@ -659,11 +670,60 @@ " trend=trend_init)\n", " # final\n", " dec_out = trend_part + seasonal_part\n", - "\n", - " forecast = self.loss.domain_map(dec_out[:, -self.h:])\n", + " forecast = dec_out[:, -self.h:]\n", + " \n", " return forecast" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_doc(FEDformer)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_doc(FEDformer.fit, name='FEDformer.fit')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_doc(FEDformer.predict, name='FEDformer.predict')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(FEDformer, [\"airpassengers\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Usage Example" + ] + }, { "cell_type": "code", "execution_count": null, @@ -682,6 +742,7 @@ "\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", + "\n", "model = FEDformer(h=12,\n", " input_size=24,\n", " modes=64,\n", diff --git a/nbs/models.gru.ipynb b/nbs/models.gru.ipynb index 7f0608a5f..0793c37be 100644 --- a/nbs/models.gru.ipynb +++ b/nbs/models.gru.ipynb @@ -69,7 +69,10 @@ "outputs": [], "source": [ "#| hide\n", - "from nbdev.showdoc import show_doc" + "import logging\n", + "from fastcore.test import test_eq\n", + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -84,9 +87,10 @@ "\n", "import torch\n", "import torch.nn as nn\n", + "import warnings\n", "\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_recurrent import BaseRecurrent\n", + "from neuralforecast.common._base_model import BaseModel\n", "from neuralforecast.common._modules import MLP" ] }, @@ -97,7 +101,7 @@ "outputs": [], "source": [ "#| export\n", - "class GRU(BaseRecurrent):\n", + "class GRU(BaseModel):\n", " \"\"\" GRU\n", "\n", " Multi Layer Recurrent Network with Gated Units (GRU), and\n", @@ -105,7 +109,7 @@ " using ADAM stochastic gradient descent. The network accepts static, historic \n", " and future exogenous data, flattens the inputs.\n", "\n", - " **Parameters:**
\n", + " **Parameters:**
\n", " `h`: int, forecast horizon.
\n", " `input_size`: int, maximum sequence length for truncated train backpropagation. Default -1 uses all history.
\n", " `inference_input_size`: int, maximum sequence length for truncated inference. Default -1 uses all history.
\n", @@ -114,7 +118,7 @@ " `encoder_activation`: Optional[str]=None, Deprecated. Activation function in GRU is frozen in PyTorch.
\n", " `encoder_bias`: bool=True, whether or not to use biases b_ih, b_hh within GRU units.
\n", " `encoder_dropout`: float=0., dropout regularization applied to GRU outputs.
\n", - " `context_size`: int=10, size of context vector for each timestamp on the forecasting window.
\n", + " `context_size`: deprecated.
\n", " `decoder_hidden_size`: int=200, size of hidden layer for the MLP decoder.
\n", " `decoder_layers`: int=2, number of layers for the MLP decoder.
\n", " `futr_exog_list`: str list, future exogenous columns.
\n", @@ -142,10 +146,11 @@ " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'recurrent'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = True\n", " EXOGENOUS_STAT = True\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = True # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h: int,\n", @@ -156,12 +161,14 @@ " encoder_activation: Optional[str] = None,\n", " encoder_bias: bool = True,\n", " encoder_dropout: float = 0.,\n", - " context_size: int = 10,\n", - " decoder_hidden_size: int = 200,\n", + " context_size: Optional[int] = None,\n", + " decoder_hidden_size: int = 128,\n", " decoder_layers: int = 2,\n", " futr_exog_list = None,\n", " hist_exog_list = None,\n", " stat_exog_list = None,\n", + " exclude_insample_y = False,\n", + " recurrent = False,\n", " loss = MAE(),\n", " valid_loss = None,\n", " max_steps: int = 1000,\n", @@ -171,6 +178,10 @@ " val_check_steps: int = 100,\n", " batch_size=32,\n", " valid_batch_size: Optional[int] = None,\n", + " windows_batch_size = 128,\n", + " inference_windows_batch_size = 1024,\n", + " start_padding_enabled = False,\n", + " step_size: int = 1,\n", " scaler_type: str='robust',\n", " random_seed=1,\n", " num_workers_loader=0,\n", @@ -181,10 +192,16 @@ " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", + " \n", + " self.RECURRENT = recurrent\n", + "\n", " super(GRU, self).__init__(\n", " h=h,\n", " input_size=input_size,\n", - " inference_input_size=inference_input_size,\n", + " futr_exog_list=futr_exog_list,\n", + " hist_exog_list=hist_exog_list,\n", + " stat_exog_list=stat_exog_list,\n", + " exclude_insample_y = exclude_insample_y,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " max_steps=max_steps,\n", @@ -194,13 +211,14 @@ " val_check_steps=val_check_steps,\n", " batch_size=batch_size,\n", " valid_batch_size=valid_batch_size,\n", + " windows_batch_size=windows_batch_size,\n", + " inference_windows_batch_size=inference_windows_batch_size,\n", + " start_padding_enabled=start_padding_enabled,\n", + " step_size=step_size,\n", " scaler_type=scaler_type,\n", - " futr_exog_list=futr_exog_list,\n", - " hist_exog_list=hist_exog_list,\n", - " stat_exog_list=stat_exog_list,\n", + " random_seed=random_seed,\n", " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", - " random_seed=random_seed,\n", " optimizer=optimizer,\n", " optimizer_kwargs=optimizer_kwargs,\n", " lr_scheduler=lr_scheduler,\n", @@ -224,75 +242,82 @@ " self.encoder_dropout = encoder_dropout\n", " \n", " # Context adapter\n", - " self.context_size = context_size\n", + " if context_size is not None:\n", + " warnings.warn(\"context_size is deprecated and will be removed in future versions.\")\n", "\n", " # MLP decoder\n", " self.decoder_hidden_size = decoder_hidden_size\n", " self.decoder_layers = decoder_layers\n", "\n", " # RNN input size (1 for target variable y)\n", - " input_encoder = 1 + self.hist_exog_size + self.stat_exog_size\n", + " input_encoder = 1 + self.hist_exog_size + self.stat_exog_size + self.futr_exog_size\n", "\n", " # Instantiate model\n", + " self.rnn_state = None\n", + " self.maintain_state = False\n", " self.hist_encoder = nn.GRU(input_size=input_encoder,\n", - " hidden_size=self.encoder_hidden_size,\n", - " num_layers=self.encoder_n_layers,\n", - " bias=self.encoder_bias,\n", - " dropout=self.encoder_dropout,\n", - " batch_first=True)\n", - "\n", - " # Context adapter\n", - " self.context_adapter = nn.Linear(in_features=self.encoder_hidden_size + self.futr_exog_size * h,\n", - " out_features=self.context_size * h)\n", + " hidden_size=self.encoder_hidden_size,\n", + " num_layers=self.encoder_n_layers,\n", + " bias=self.encoder_bias,\n", + " dropout=self.encoder_dropout,\n", + " batch_first=True)\n", "\n", " # Decoder MLP\n", - " self.mlp_decoder = MLP(in_features=self.context_size + self.futr_exog_size,\n", - " out_features=self.loss.outputsize_multiplier,\n", - " hidden_size=self.decoder_hidden_size,\n", - " num_layers=self.decoder_layers,\n", - " activation='ReLU',\n", - " dropout=0.0)\n", + " if self.RECURRENT:\n", + " self.proj = nn.Linear(self.encoder_hidden_size, self.loss.outputsize_multiplier)\n", + " else:\n", + " self.mlp_decoder = MLP(in_features=self.encoder_hidden_size + self.futr_exog_size,\n", + " out_features=self.loss.outputsize_multiplier,\n", + " hidden_size=self.decoder_hidden_size,\n", + " num_layers=self.decoder_layers,\n", + " activation='ReLU',\n", + " dropout=0.0)\n", "\n", " def forward(self, windows_batch):\n", " \n", " # Parse windows_batch\n", - " encoder_input = windows_batch['insample_y'] # [B, seq_len, 1]\n", - " futr_exog = windows_batch['futr_exog']\n", - " hist_exog = windows_batch['hist_exog']\n", - " stat_exog = windows_batch['stat_exog']\n", + " encoder_input = windows_batch['insample_y'] # [B, seq_len, 1]\n", + " futr_exog = windows_batch['futr_exog'] # [B, seq_len, F]\n", + " hist_exog = windows_batch['hist_exog'] # [B, seq_len, X]\n", + " stat_exog = windows_batch['stat_exog'] # [B, S]\n", "\n", - " # Concatenate y, historic and static inputs\n", - " # [B, C, seq_len, 1] -> [B, seq_len, C]\n", - " # Contatenate [ Y_t, | X_{t-L},..., X_{t} | S ]\n", + " # Concatenate y, historic and static inputs \n", " batch_size, seq_len = encoder_input.shape[:2]\n", " if self.hist_exog_size > 0:\n", - " hist_exog = hist_exog.permute(0,2,1,3).squeeze(-1) # [B, X, seq_len, 1] -> [B, seq_len, X]\n", - " encoder_input = torch.cat((encoder_input, hist_exog), dim=2)\n", + " encoder_input = torch.cat((encoder_input, hist_exog), dim=2) # [B, seq_len, 1] + [B, seq_len, X] -> [B, seq_len, 1 + X]\n", "\n", " if self.stat_exog_size > 0:\n", - " stat_exog = stat_exog.unsqueeze(1).repeat(1, seq_len, 1) # [B, S] -> [B, seq_len, S]\n", - " encoder_input = torch.cat((encoder_input, stat_exog), dim=2)\n", - "\n", - " # RNN forward\n", - " hidden_state, _ = self.hist_encoder(encoder_input) # [B, seq_len, rnn_hidden_state]\n", + " # print(encoder_input.shape)\n", + " stat_exog = stat_exog.unsqueeze(1).repeat(1, seq_len, 1) # [B, S] -> [B, seq_len, S]\n", + " encoder_input = torch.cat((encoder_input, stat_exog), dim=2) # [B, seq_len, 1 + X] + [B, seq_len, S] -> [B, seq_len, 1 + X + S]\n", "\n", " if self.futr_exog_size > 0:\n", - " futr_exog = futr_exog.permute(0,2,3,1)[:,:,1:,:] # [B, F, seq_len, 1+H] -> [B, seq_len, H, F]\n", - " hidden_state = torch.cat(( hidden_state, futr_exog.reshape(batch_size, seq_len, -1)), dim=2)\n", + " encoder_input = torch.cat((encoder_input, \n", + " futr_exog[:, :seq_len]), dim=2) # [B, seq_len, 1 + X + S] + [B, seq_len, F] -> [B, seq_len, 1 + X + S + F]\n", "\n", - " # Context adapter\n", - " context = self.context_adapter(hidden_state)\n", - " context = context.reshape(batch_size, seq_len, self.h, self.context_size)\n", + " if self.RECURRENT:\n", + " if self.maintain_state:\n", + " rnn_state = self.rnn_state\n", + " else:\n", + " rnn_state = None\n", + " \n", + " output, rnn_state = self.hist_encoder(encoder_input, \n", + " rnn_state) # [B, seq_len, rnn_hidden_state]\n", + " output = self.proj(output) # [B, seq_len, rnn_hidden_state] -> [B, seq_len, n_output]\n", + " if self.maintain_state:\n", + " self.rnn_state = rnn_state\n", + " else:\n", + " hidden_state, _ = self.hist_encoder(encoder_input, None) # [B, seq_len, rnn_hidden_state]\n", + " hidden_state = hidden_state[:, -self.h:] # [B, seq_len, rnn_hidden_state] -> [B, h, rnn_hidden_state]\n", + " \n", + " if self.futr_exog_size > 0:\n", + " futr_exog_futr = futr_exog[:, -self.h:] # [B, h, F]\n", + " hidden_state = torch.cat((hidden_state, \n", + " futr_exog_futr), dim=-1) # [B, h, rnn_hidden_state] + [B, h, F] -> [B, h, rnn_hidden_state + F]\n", "\n", - " # Residual connection with futr_exog\n", - " if self.futr_exog_size > 0:\n", - " context = torch.cat((context, futr_exog), dim=-1)\n", + " output = self.mlp_decoder(hidden_state) # [B, h, rnn_hidden_state + F] -> [B, seq_len, n_output]\n", "\n", - " # Final forecast\n", - " output = self.mlp_decoder(context)\n", - " output = self.loss.domain_map(output)\n", - " \n", - " return output" + " return output[:, -self.h:]" ] }, { @@ -322,6 +347,21 @@ "show_doc(GRU.predict, name='GRU.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(GRU, [\"airpassengers\"])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -343,17 +383,15 @@ "# from neuralforecast.models import GRU\n", "from neuralforecast.losses.pytorch import DistributionLoss\n", "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", - "\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", "\n", "fcst = NeuralForecast(\n", - " models=[GRU(h=12,input_size=-1,\n", + " models=[GRU(h=12, input_size=24,\n", " loss=DistributionLoss(distribution='Normal', level=[80, 90]),\n", " scaler_type='robust',\n", " encoder_n_layers=2,\n", " encoder_hidden_size=128,\n", - " context_size=10,\n", " decoder_hidden_size=128,\n", " decoder_layers=2,\n", " max_steps=200,\n", diff --git a/nbs/models.informer.ipynb b/nbs/models.informer.ipynb index c8e30137c..3efdeb344 100644 --- a/nbs/models.informer.ipynb +++ b/nbs/models.informer.ipynb @@ -71,7 +71,7 @@ " TransDecoderLayer, TransDecoder,\n", " DataEmbedding, AttentionLayer,\n", ")\n", - "from neuralforecast.common._base_windows import BaseWindows\n", + "from neuralforecast.common._base_model import BaseModel\n", "\n", "from neuralforecast.losses.pytorch import MAE" ] @@ -83,8 +83,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -259,7 +262,7 @@ "outputs": [], "source": [ "#| export\n", - "class Informer(BaseWindows):\n", + "class Informer(BaseModel):\n", " \"\"\" Informer\n", "\n", "\tThe Informer model tackles the vanilla Transformer computational complexity challenges for long-horizon forecasting. \n", @@ -317,10 +320,11 @@ "\t- [Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, Wancai Zhang. \"Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting\"](https://arxiv.org/abs/2012.07436)
\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False\n", + " MULTIVARIATE = False\n", + " RECURRENT = False\n", "\n", " def __init__(self,\n", " h: int, \n", @@ -463,17 +467,11 @@ " def forward(self, windows_batch):\n", " # Parse windows_batch\n", " insample_y = windows_batch['insample_y']\n", - " #insample_mask = windows_batch['insample_mask']\n", - " #hist_exog = windows_batch['hist_exog']\n", - " #stat_exog = windows_batch['stat_exog']\n", - "\n", " futr_exog = windows_batch['futr_exog']\n", "\n", - " insample_y = insample_y.unsqueeze(-1) # [Ws,L,1]\n", - "\n", " if self.futr_exog_size > 0:\n", - " x_mark_enc = futr_exog[:,:self.input_size,:]\n", - " x_mark_dec = futr_exog[:,-(self.label_len+self.h):,:]\n", + " x_mark_enc = futr_exog[:, :self.input_size, :]\n", + " x_mark_dec = futr_exog[:, -(self.label_len+self.h):, :]\n", " else:\n", " x_mark_enc = None\n", " x_mark_dec = None\n", @@ -488,7 +486,7 @@ " dec_out = self.decoder(dec_out, enc_out, x_mask=None, \n", " cross_mask=None)\n", "\n", - " forecast = self.loss.domain_map(dec_out[:, -self.h:])\n", + " forecast = dec_out[:, -self.h:]\n", " return forecast" ] }, @@ -519,6 +517,21 @@ "show_doc(Informer.predict, name='Informer.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(Informer, [\"airpassengers\"])" + ] + }, { "attachments": {}, "cell_type": "markdown", @@ -555,7 +568,7 @@ " futr_exog_list=calendar_cols,\n", " scaler_type='robust',\n", " learning_rate=1e-3,\n", - " max_steps=5,\n", + " max_steps=200,\n", " val_check_steps=50,\n", " early_stop_patience_steps=2)\n", "\n", diff --git a/nbs/models.ipynb b/nbs/models.ipynb index 018525399..e3a3342a0 100644 --- a/nbs/models.ipynb +++ b/nbs/models.ipynb @@ -229,10 +229,10 @@ " \"input_size_multiplier\": [-1, 4, 16, 64],\n", " \"inference_input_size_multiplier\": [-1],\n", " \"h\": None,\n", - " \"encoder_hidden_size\": tune.choice([50, 100, 200, 300]),\n", + " \"encoder_hidden_size\": tune.choice([16, 32, 64, 128]),\n", " \"encoder_n_layers\": tune.randint(1, 4),\n", " \"context_size\": tune.choice([5, 10, 50]),\n", - " \"decoder_hidden_size\": tune.choice([64, 128, 256, 512]),\n", + " \"decoder_hidden_size\": tune.choice([16, 32, 64, 128]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"max_steps\": tune.choice([500, 1000]),\n", " \"batch_size\": tune.choice([16, 32]),\n", @@ -314,7 +314,7 @@ "source": [ "%%capture\n", "# Use your own config or AutoRNN.default_config\n", - "config = dict(max_steps=2, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", + "config = dict(max_steps=1, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", "model = AutoRNN(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "model.fit(dataset=dataset)\n", @@ -372,10 +372,10 @@ " \"input_size_multiplier\": [-1, 4, 16, 64],\n", " \"inference_input_size_multiplier\": [-1],\n", " \"h\": None,\n", - " \"encoder_hidden_size\": tune.choice([50, 100, 200, 300]),\n", + " \"encoder_hidden_size\": tune.choice([16, 32, 64, 128]),\n", " \"encoder_n_layers\": tune.randint(1, 4),\n", " \"context_size\": tune.choice([5, 10, 50]),\n", - " \"decoder_hidden_size\": tune.choice([64, 128, 256, 512]),\n", + " \"decoder_hidden_size\": tune.choice([16, 32, 64, 128]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"max_steps\": tune.choice([500, 1000]),\n", " \"batch_size\": tune.choice([16, 32]),\n", @@ -452,7 +452,7 @@ "source": [ "%%capture\n", "# Use your own config or AutoLSTM.default_config\n", - "config = dict(max_steps=2, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", + "config = dict(max_steps=1, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", "model = AutoLSTM(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", @@ -511,10 +511,10 @@ " \"input_size_multiplier\": [-1, 4, 16, 64],\n", " \"inference_input_size_multiplier\": [-1],\n", " \"h\": None,\n", - " \"encoder_hidden_size\": tune.choice([50, 100, 200, 300]),\n", + " \"encoder_hidden_size\": tune.choice([16, 32, 64, 128]),\n", " \"encoder_n_layers\": tune.randint(1, 4),\n", " \"context_size\": tune.choice([5, 10, 50]),\n", - " \"decoder_hidden_size\": tune.choice([64, 128, 256, 512]),\n", + " \"decoder_hidden_size\": tune.choice([16, 32, 64, 128]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"max_steps\": tune.choice([500, 1000]),\n", " \"batch_size\": tune.choice([16, 32]),\n", @@ -591,7 +591,7 @@ "source": [ "%%capture\n", "# Use your own config or AutoGRU.default_config\n", - "config = dict(max_steps=2, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", + "config = dict(max_steps=1, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", "model = AutoGRU(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", @@ -650,9 +650,9 @@ " \"input_size_multiplier\": [-1, 4, 16, 64],\n", " \"inference_input_size_multiplier\": [-1],\n", " \"h\": None,\n", - " \"encoder_hidden_size\": tune.choice([50, 100, 200, 300]),\n", + " \"encoder_hidden_size\": tune.choice([16, 32, 64, 128]),\n", " \"context_size\": tune.choice([5, 10, 50]),\n", - " \"decoder_hidden_size\": tune.choice([64, 128]),\n", + " \"decoder_hidden_size\": tune.choice([32, 64]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"max_steps\": tune.choice([500, 1000]),\n", " \"batch_size\": tune.choice([16, 32]),\n", @@ -729,7 +729,7 @@ "source": [ "%%capture\n", "# Use your own config or AutoTCN.default_config\n", - "config = dict(max_steps=2, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", + "config = dict(max_steps=1, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", "model = AutoTCN(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", @@ -927,10 +927,10 @@ " \"inference_input_size_multiplier\": [-1],\n", " \"h\": None,\n", " \"cell_type\": tune.choice(['LSTM', 'GRU']),\n", - " \"encoder_hidden_size\": tune.choice([50, 100, 200, 300]),\n", + " \"encoder_hidden_size\": tune.choice([16, 32, 64, 128]),\n", " \"dilations\": tune.choice([ [[1, 2], [4, 8]], [[1, 2, 4, 8]] ]),\n", " \"context_size\": tune.choice([5, 10, 50]),\n", - " \"decoder_hidden_size\": tune.choice([64, 128, 256, 512]),\n", + " \"decoder_hidden_size\": tune.choice([16, 32, 64, 128]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"max_steps\": tune.choice([500, 1000]),\n", " \"batch_size\": tune.choice([16, 32]),\n", @@ -1007,7 +1007,7 @@ "source": [ "%%capture\n", "# Use your own config or AutoDilatedRNN.default_config\n", - "config = dict(max_steps=2, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", + "config = dict(max_steps=1, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", "model = AutoDilatedRNN(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", @@ -1290,7 +1290,7 @@ "source": [ "%%capture\n", "# Use your own config or AutoMLP.default_config\n", - "config = dict(max_steps=2, val_check_steps=1, input_size=12, hidden_size=8)\n", + "config = dict(max_steps=1, val_check_steps=1, input_size=12, hidden_size=8)\n", "model = AutoMLP(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", @@ -1425,7 +1425,7 @@ "source": [ "%%capture\n", "# Use your own config or AutoNBEATS.default_config\n", - "config = dict(max_steps=2, val_check_steps=1, input_size=12,\n", + "config = dict(max_steps=1, val_check_steps=1, input_size=12,\n", " mlp_units=3*[[8, 8]])\n", "model = AutoNBEATS(h=12, config=config, num_samples=1, cpus=1)\n", "\n", @@ -1561,7 +1561,7 @@ "source": [ "%%capture\n", "# Use your own config or AutoNBEATSx.default_config\n", - "config = dict(max_steps=2, val_check_steps=1, input_size=12,\n", + "config = dict(max_steps=1, val_check_steps=1, input_size=12,\n", " mlp_units=3*[[8, 8]])\n", "model = AutoNBEATSx(h=12, config=config, num_samples=1, cpus=1)\n", "\n", @@ -1703,7 +1703,7 @@ "source": [ "%%capture\n", "# Use your own config or AutoNHITS.default_config\n", - "config = dict(max_steps=2, val_check_steps=1, input_size=12, \n", + "config = dict(max_steps=1, val_check_steps=1, input_size=12, \n", " mlp_units=3 * [[8, 8]])\n", "model = AutoNHITS(h=12, config=config, num_samples=1, cpus=1)\n", "\n", @@ -1841,7 +1841,7 @@ "source": [ "%%capture\n", "# Use your own config or AutoDLinear.default_config\n", - "config = dict(max_steps=2, val_check_steps=1, input_size=12)\n", + "config = dict(max_steps=1, val_check_steps=1, input_size=12)\n", "model = AutoDLinear(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", @@ -1976,7 +1976,7 @@ "source": [ "%%capture\n", "# Use your own config or AutoNLinear.default_config\n", - "config = dict(max_steps=2, val_check_steps=1, input_size=12)\n", + "config = dict(max_steps=1, val_check_steps=1, input_size=12)\n", "model = AutoNLinear(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", @@ -2119,7 +2119,7 @@ "source": [ "%%capture\n", "# Use your own config or AutoTiDE.default_config\n", - "config = dict(max_steps=2, val_check_steps=1, input_size=12)\n", + "config = dict(max_steps=1, val_check_steps=1, input_size=12)\n", "model = AutoTiDE(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", @@ -2257,7 +2257,7 @@ "source": [ "%%capture\n", "# Use your own config or AutoDeepNPTS.default_config\n", - "config = dict(max_steps=2, val_check_steps=1, input_size=12)\n", + "config = dict(max_steps=1, val_check_steps=1, input_size=12)\n", "model = AutoDeepNPTS(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", @@ -2403,7 +2403,7 @@ "source": [ "%%capture\n", "# Use your own config or AutoKAN.default_config\n", - "config = dict(max_steps=2, val_check_steps=1, input_size=12)\n", + "config = dict(max_steps=1, val_check_steps=1, input_size=12)\n", "model = AutoKAN(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", diff --git a/nbs/models.itransformer.ipynb b/nbs/models.itransformer.ipynb index 5e134cfa0..b226d66dc 100644 --- a/nbs/models.itransformer.ipynb +++ b/nbs/models.itransformer.ipynb @@ -27,8 +27,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -69,9 +72,9 @@ "import numpy as np\n", "\n", "from math import sqrt\n", - "\n", + "from typing import Optional\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_multivariate import BaseMultivariate\n", + "from neuralforecast.common._base_model import BaseModel\n", "\n", "from neuralforecast.common._modules import TransEncoder, TransEncoderLayer, AttentionLayer" ] @@ -195,7 +198,7 @@ "source": [ "#| export\n", "\n", - "class iTransformer(BaseMultivariate):\n", + "class iTransformer(BaseModel):\n", "\n", " \"\"\" iTransformer\n", "\n", @@ -222,6 +225,10 @@ " `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
\n", " `val_check_steps`: int=100, Number of training steps between every validation loss check.
\n", " `batch_size`: int=32, number of different series in each batch.
\n", + " `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
\n", + " `windows_batch_size`: int=128, number of windows to sample in each training batch, default uses all.
\n", + " `inference_windows_batch_size`: int=128, number of windows to sample in each inference batch, -1 uses all.
\n", + " `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
\n", " `step_size`: int=1, step size between each window of temporal data.
\n", " `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
\n", " `random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
\n", @@ -240,10 +247,11 @@ " \"\"\"\n", "\n", " # Class attributes\n", - " SAMPLING_TYPE = 'multivariate'\n", " EXOGENOUS_FUTR = False\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False\n", + " MULTIVARIATE = True\n", + " RECURRENT = False\n", "\n", " def __init__(self,\n", " h,\n", @@ -252,6 +260,7 @@ " futr_exog_list = None,\n", " hist_exog_list = None,\n", " stat_exog_list = None,\n", + " exclude_insample_y = False,\n", " hidden_size: int = 512,\n", " n_heads: int = 8,\n", " e_layers: int = 2,\n", @@ -268,6 +277,10 @@ " early_stop_patience_steps: int =-1,\n", " val_check_steps: int = 100,\n", " batch_size: int = 32,\n", + " valid_batch_size: Optional[int] = None,\n", + " windows_batch_size = 128,\n", + " inference_windows_batch_size = 128,\n", + " start_padding_enabled = False,\n", " step_size: int = 1,\n", " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", @@ -286,6 +299,7 @@ " stat_exog_list = None,\n", " futr_exog_list = None,\n", " hist_exog_list = None,\n", + " exclude_insample_y = exclude_insample_y,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " max_steps=max_steps,\n", @@ -294,6 +308,10 @@ " early_stop_patience_steps=early_stop_patience_steps,\n", " val_check_steps=val_check_steps,\n", " batch_size=batch_size,\n", + " valid_batch_size=valid_batch_size,\n", + " windows_batch_size=windows_batch_size,\n", + " inference_windows_batch_size=inference_windows_batch_size,\n", + " start_padding_enabled=start_padding_enabled,\n", " step_size=step_size,\n", " scaler_type=scaler_type,\n", " random_seed=random_seed,\n", @@ -335,8 +353,8 @@ " norm_layer=torch.nn.LayerNorm(self.hidden_size)\n", " )\n", "\n", - " self.projector = nn.Linear(self.hidden_size, h, bias=True)\n", - " \n", + " self.projector = nn.Linear(self.hidden_size, h * self.loss.outputsize_multiplier, bias=True)\n", + "\n", " def forecast(self, x_enc):\n", " if self.use_norm:\n", " # Normalization from Non-stationary Transformer\n", @@ -363,8 +381,8 @@ "\n", " if self.use_norm:\n", " # De-Normalization from Non-stationary Transformer\n", - " dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, self.h, 1))\n", - " dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, self.h, 1))\n", + " dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, self.h * self.loss.outputsize_multiplier, 1))\n", + " dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, self.h * self.loss.outputsize_multiplier, 1))\n", "\n", " return dec_out\n", " \n", @@ -372,14 +390,11 @@ " insample_y = windows_batch['insample_y']\n", "\n", " y_pred = self.forecast(insample_y)\n", - " y_pred = y_pred[:, -self.h:, :]\n", - " y_pred = self.loss.domain_map(y_pred)\n", + " y_pred = y_pred.reshape(insample_y.shape[0],\n", + " self.h,\n", + " -1)\n", "\n", - " # domain_map might have squeezed the last dimension in case n_series == 1\n", - " if y_pred.ndim == 2:\n", - " return y_pred.unsqueeze(-1)\n", - " else:\n", - " return y_pred\n" + " return y_pred" ] }, { @@ -409,6 +424,21 @@ "show_doc(iTransformer.predict, name='iTransformer.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(iTransformer, [\"airpassengers\"])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -448,7 +478,8 @@ " loss=MSE(),\n", " valid_loss=MAE(),\n", " early_stop_patience_steps=3,\n", - " batch_size=32)\n", + " batch_size=32,\n", + " max_steps=100)\n", "\n", "fcst = NeuralForecast(models=[model], freq='M')\n", "fcst.fit(df=Y_train_df, static_df=AirPassengersStatic, val_size=12)\n", diff --git a/nbs/models.kan.ipynb b/nbs/models.kan.ipynb index ac7cc5e2b..003a8e3d0 100644 --- a/nbs/models.kan.ipynb +++ b/nbs/models.kan.ipynb @@ -61,8 +61,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -80,7 +83,7 @@ "import torch.nn.functional as F\n", "\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_windows import BaseWindows" + "from neuralforecast.common._base_model import BaseModel" ] }, { @@ -318,7 +321,7 @@ "source": [ "#| export\n", "\n", - "class KAN(BaseWindows):\n", + "class KAN(BaseModel):\n", " \"\"\" KAN\n", "\n", " Simple Kolmogorov-Arnold Network (KAN).\n", @@ -372,10 +375,11 @@ " \"\"\"\n", "\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = True\n", " EXOGENOUS_STAT = True \n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h,\n", @@ -495,7 +499,7 @@ " \n", " def forward(self, windows_batch, update_grid=False):\n", "\n", - " insample_y = windows_batch['insample_y']\n", + " insample_y = windows_batch['insample_y'].squeeze(-1)\n", " futr_exog = windows_batch['futr_exog']\n", " hist_exog = windows_batch['hist_exog']\n", " stat_exog = windows_batch['stat_exog']\n", @@ -520,7 +524,6 @@ "\n", " y_pred = y_pred.reshape(batch_size, self.h, \n", " self.loss.outputsize_multiplier)\n", - " y_pred = self.loss.domain_map(y_pred)\n", " return y_pred\n", " " ] @@ -552,6 +555,21 @@ "show_doc(KAN.predict, name='KAN.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(KAN, checks=[\"airpassengers\"])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -574,7 +592,6 @@ "from neuralforecast.losses.pytorch import DistributionLoss\n", "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", "\n", - "\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", "\n", diff --git a/nbs/models.lstm.ipynb b/nbs/models.lstm.ipynb index 3eb469306..954e53257 100644 --- a/nbs/models.lstm.ipynb +++ b/nbs/models.lstm.ipynb @@ -58,7 +58,10 @@ "outputs": [], "source": [ "#| hide\n", - "from nbdev.showdoc import show_doc" + "import logging\n", + "from fastcore.test import test_eq\n", + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -72,9 +75,10 @@ "\n", "import torch\n", "import torch.nn as nn\n", + "import warnings\n", "\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_recurrent import BaseRecurrent\n", + "from neuralforecast.common._base_model import BaseModel\n", "from neuralforecast.common._modules import MLP" ] }, @@ -85,7 +89,7 @@ "outputs": [], "source": [ "#| export\n", - "class LSTM(BaseRecurrent):\n", + "class LSTM(BaseModel):\n", " \"\"\" LSTM\n", "\n", " LSTM encoder, with MLP decoder.\n", @@ -101,7 +105,7 @@ " `encoder_hidden_size`: int=200, units for the LSTM's hidden state size.
\n", " `encoder_bias`: bool=True, whether or not to use biases b_ih, b_hh within LSTM units.
\n", " `encoder_dropout`: float=0., dropout regularization applied to LSTM outputs.
\n", - " `context_size`: int=10, size of context vector for each timestamp on the forecasting window.
\n", + " `context_size`: deprecated.
\n", " `decoder_hidden_size`: int=200, size of hidden layer for the MLP decoder.
\n", " `decoder_layers`: int=2, number of layers for the MLP decoder.
\n", " `futr_exog_list`: str list, future exogenous columns.
\n", @@ -129,25 +133,27 @@ " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'recurrent'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = True\n", " EXOGENOUS_STAT = True\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = True # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h: int,\n", - " input_size: int = -1,\n", - " inference_input_size: int = -1,\n", + " input_size: int,\n", " encoder_n_layers: int = 2,\n", - " encoder_hidden_size: int = 200,\n", + " encoder_hidden_size: int = 128,\n", " encoder_bias: bool = True,\n", " encoder_dropout: float = 0.,\n", - " context_size: int = 10,\n", - " decoder_hidden_size: int = 200,\n", + " context_size: Optional[int] = None,\n", + " decoder_hidden_size: int = 128,\n", " decoder_layers: int = 2,\n", " futr_exog_list = None,\n", " hist_exog_list = None,\n", " stat_exog_list = None,\n", + " exclude_insample_y = False,\n", + " recurrent = False,\n", " loss = MAE(),\n", " valid_loss = None,\n", " max_steps: int = 1000,\n", @@ -157,6 +163,10 @@ " val_check_steps: int = 100,\n", " batch_size = 32,\n", " valid_batch_size: Optional[int] = None,\n", + " windows_batch_size = 128,\n", + " inference_windows_batch_size = 1024,\n", + " start_padding_enabled = False,\n", + " step_size: int = 1,\n", " scaler_type: str = 'robust',\n", " random_seed = 1,\n", " num_workers_loader = 0,\n", @@ -167,10 +177,16 @@ " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", + " \n", + " self.RECURRENT = recurrent\n", + " \n", " super(LSTM, self).__init__(\n", " h=h,\n", " input_size=input_size,\n", - " inference_input_size=inference_input_size,\n", + " futr_exog_list=futr_exog_list,\n", + " hist_exog_list=hist_exog_list,\n", + " stat_exog_list=stat_exog_list,\n", + " exclude_insample_y = exclude_insample_y,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " max_steps=max_steps,\n", @@ -180,13 +196,14 @@ " val_check_steps=val_check_steps,\n", " batch_size=batch_size,\n", " valid_batch_size=valid_batch_size,\n", + " windows_batch_size=windows_batch_size,\n", + " inference_windows_batch_size=inference_windows_batch_size,\n", + " start_padding_enabled=start_padding_enabled,\n", + " step_size=step_size,\n", " scaler_type=scaler_type,\n", - " futr_exog_list=futr_exog_list,\n", - " hist_exog_list=hist_exog_list,\n", - " stat_exog_list=stat_exog_list,\n", + " random_seed=random_seed,\n", " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", - " random_seed=random_seed,\n", " optimizer=optimizer,\n", " optimizer_kwargs=optimizer_kwargs,\n", " lr_scheduler=lr_scheduler,\n", @@ -202,75 +219,80 @@ " self.encoder_dropout = encoder_dropout\n", " \n", " # Context adapter\n", - " self.context_size = context_size\n", + " if context_size is not None:\n", + " warnings.warn(\"context_size is deprecated and will be removed in future versions.\")\n", "\n", " # MLP decoder\n", " self.decoder_hidden_size = decoder_hidden_size\n", " self.decoder_layers = decoder_layers\n", "\n", " # LSTM input size (1 for target variable y)\n", - " input_encoder = 1 + self.hist_exog_size + self.stat_exog_size\n", + " input_encoder = 1 + self.hist_exog_size + self.stat_exog_size + self.futr_exog_size\n", "\n", " # Instantiate model\n", + " self.rnn_state = None\n", + " self.maintain_state = False\n", " self.hist_encoder = nn.LSTM(input_size=input_encoder,\n", " hidden_size=self.encoder_hidden_size,\n", " num_layers=self.encoder_n_layers,\n", " bias=self.encoder_bias,\n", " dropout=self.encoder_dropout,\n", - " batch_first=True)\n", - "\n", - " # Context adapter\n", - " self.context_adapter = nn.Linear(in_features=self.encoder_hidden_size + self.futr_exog_size * h,\n", - " out_features=self.context_size * h)\n", + " batch_first=True,\n", + " proj_size=self.loss.outputsize_multiplier if self.RECURRENT else 0)\n", "\n", " # Decoder MLP\n", - " self.mlp_decoder = MLP(in_features=self.context_size + self.futr_exog_size,\n", - " out_features=self.loss.outputsize_multiplier,\n", - " hidden_size=self.decoder_hidden_size,\n", - " num_layers=self.decoder_layers,\n", - " activation='ReLU',\n", - " dropout=0.0)\n", + " if not self.RECURRENT:\n", + " self.mlp_decoder = MLP(in_features=self.encoder_hidden_size + self.futr_exog_size,\n", + " out_features=self.loss.outputsize_multiplier,\n", + " hidden_size=self.decoder_hidden_size,\n", + " num_layers=self.decoder_layers,\n", + " activation='ReLU',\n", + " dropout=0.0)\n", "\n", " def forward(self, windows_batch):\n", " \n", " # Parse windows_batch\n", - " encoder_input = windows_batch['insample_y'] # [B, seq_len, 1]\n", - " futr_exog = windows_batch['futr_exog']\n", - " hist_exog = windows_batch['hist_exog']\n", - " stat_exog = windows_batch['stat_exog']\n", + " encoder_input = windows_batch['insample_y'] # [B, seq_len, 1]\n", + " futr_exog = windows_batch['futr_exog'] # [B, seq_len, F]\n", + " hist_exog = windows_batch['hist_exog'] # [B, seq_len, X]\n", + " stat_exog = windows_batch['stat_exog'] # [B, S]\n", "\n", - " # Concatenate y, historic and static inputs\n", - " # [B, C, seq_len, 1] -> [B, seq_len, C]\n", - " # Contatenate [ Y_t, | X_{t-L},..., X_{t} | S ]\n", + " # Concatenate y, historic and static inputs \n", " batch_size, seq_len = encoder_input.shape[:2]\n", " if self.hist_exog_size > 0:\n", - " hist_exog = hist_exog.permute(0,2,1,3).squeeze(-1) # [B, X, seq_len, 1] -> [B, seq_len, X]\n", - " encoder_input = torch.cat((encoder_input, hist_exog), dim=2)\n", + " encoder_input = torch.cat((encoder_input, hist_exog), dim=2) # [B, seq_len, 1] + [B, seq_len, X] -> [B, seq_len, 1 + X]\n", "\n", " if self.stat_exog_size > 0:\n", - " stat_exog = stat_exog.unsqueeze(1).repeat(1, seq_len, 1) # [B, S] -> [B, seq_len, S]\n", - " encoder_input = torch.cat((encoder_input, stat_exog), dim=2)\n", - "\n", - " # RNN forward\n", - " hidden_state, _ = self.hist_encoder(encoder_input) # [B, seq_len, rnn_hidden_state]\n", + " # print(encoder_input.shape)\n", + " stat_exog = stat_exog.unsqueeze(1).repeat(1, seq_len, 1) # [B, S] -> [B, seq_len, S]\n", + " encoder_input = torch.cat((encoder_input, stat_exog), dim=2) # [B, seq_len, 1 + X] + [B, seq_len, S] -> [B, seq_len, 1 + X + S]\n", "\n", " if self.futr_exog_size > 0:\n", - " futr_exog = futr_exog.permute(0,2,3,1)[:,:,1:,:] # [B, F, seq_len, 1+H] -> [B, seq_len, H, F]\n", - " hidden_state = torch.cat(( hidden_state, futr_exog.reshape(batch_size, seq_len, -1)), dim=2)\n", + " encoder_input = torch.cat((encoder_input, \n", + " futr_exog[:, :seq_len]), dim=2) # [B, seq_len, 1 + X + S] + [B, seq_len, F] -> [B, seq_len, 1 + X + S + F]\n", "\n", - " # Context adapter\n", - " context = self.context_adapter(hidden_state)\n", - " context = context.reshape(batch_size, seq_len, self.h, self.context_size)\n", + " if self.RECURRENT:\n", + " if self.maintain_state:\n", + " rnn_state = self.rnn_state\n", + " else:\n", + " rnn_state = None\n", + " \n", + " output, rnn_state = self.hist_encoder(encoder_input, \n", + " rnn_state) # [B, seq_len, n_output]\n", + " if self.maintain_state:\n", + " self.rnn_state = rnn_state\n", + " else:\n", + " hidden_state, _ = self.hist_encoder(encoder_input, None) # [B, seq_len, rnn_hidden_state]\n", + " hidden_state = hidden_state[:, -self.h:] # [B, seq_len, rnn_hidden_state] -> [B, h, rnn_hidden_state]\n", + " \n", + " if self.futr_exog_size > 0:\n", + " futr_exog_futr = futr_exog[:, -self.h:] # [B, h, F]\n", + " hidden_state = torch.cat((hidden_state, \n", + " futr_exog_futr), dim=-1) # [B, h, rnn_hidden_state] + [B, h, F] -> [B, h, rnn_hidden_state + F]\n", "\n", - " # Residual connection with futr_exog\n", - " if self.futr_exog_size > 0:\n", - " context = torch.cat((context, futr_exog), dim=-1)\n", + " output = self.mlp_decoder(hidden_state) # [B, h, rnn_hidden_state + F] -> [B, seq_len, n_output]\n", "\n", - " # Final forecast\n", - " output = self.mlp_decoder(context)\n", - " output = self.loss.domain_map(output)\n", - " \n", - " return output" + " return output[:, -self.h:]" ] }, { @@ -300,6 +322,21 @@ "show_doc(LSTM.predict, name='LSTM.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(LSTM, [\"airpassengers\"])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -326,17 +363,18 @@ "Y_test_df = AirPassengersPanel[AirPassengersPanel.ds>=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", "\n", "nf = NeuralForecast(\n", - " models=[LSTM(h=12, input_size=-1,\n", - " loss=DistributionLoss(distribution='Normal', level=[80, 90]),\n", + " models=[LSTM(h=12, \n", + " input_size=24,\n", + " loss=DistributionLoss(distribution=\"Normal\", level=[80, 90]),\n", " scaler_type='robust',\n", " encoder_n_layers=2,\n", " encoder_hidden_size=128,\n", - " context_size=10,\n", " decoder_hidden_size=128,\n", " decoder_layers=2,\n", " max_steps=200,\n", " futr_exog_list=['y_[lag12]'],\n", " stat_exog_list=['airline1'],\n", + " recurrent=False,\n", " )\n", " ],\n", " freq='M'\n", @@ -344,19 +382,18 @@ "nf.fit(df=Y_train_df, static_df=AirPassengersStatic)\n", "Y_hat_df = nf.predict(futr_df=Y_test_df)\n", "\n", + "# Plots\n", "Y_hat_df = Y_hat_df.reset_index(drop=False).drop(columns=['unique_id','ds'])\n", "plot_df = pd.concat([Y_test_df, Y_hat_df], axis=1)\n", "plot_df = pd.concat([Y_train_df, plot_df])\n", "\n", "plot_df = plot_df[plot_df.unique_id=='Airline1'].drop('unique_id', axis=1)\n", "plt.plot(plot_df['ds'], plot_df['y'], c='black', label='True')\n", - "plt.plot(plot_df['ds'], plot_df['LSTM'], c='purple', label='mean')\n", "plt.plot(plot_df['ds'], plot_df['LSTM-median'], c='blue', label='median')\n", "plt.fill_between(x=plot_df['ds'][-12:], \n", - " y1=plot_df['LSTM-lo-90'][-12:].values, \n", + " y1=plot_df['LSTM-lo-90'][-12:].values,\n", " y2=plot_df['LSTM-hi-90'][-12:].values,\n", " alpha=0.4, label='level 90')\n", - "plt.legend()\n", "plt.grid()\n", "plt.plot()" ] diff --git a/nbs/models.mlp.ipynb b/nbs/models.mlp.ipynb index 46c09406f..848cd037c 100644 --- a/nbs/models.mlp.ipynb +++ b/nbs/models.mlp.ipynb @@ -49,8 +49,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -67,7 +70,7 @@ "import torch.nn as nn\n", "\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_windows import BaseWindows" + "from neuralforecast.common._base_model import BaseModel" ] }, { @@ -78,7 +81,7 @@ "outputs": [], "source": [ "#| export\n", - "class MLP(BaseWindows):\n", + "class MLP(BaseModel):\n", " \"\"\" MLP\n", "\n", " Simple Multi Layer Perceptron architecture (MLP). \n", @@ -122,10 +125,11 @@ " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = True\n", - " EXOGENOUS_STAT = True \n", + " EXOGENOUS_STAT = True\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h,\n", @@ -211,7 +215,7 @@ " def forward(self, windows_batch):\n", "\n", " # Parse windows_batch\n", - " insample_y = windows_batch['insample_y']\n", + " insample_y = windows_batch['insample_y'].squeeze(-1)\n", " futr_exog = windows_batch['futr_exog']\n", " hist_exog = windows_batch['hist_exog']\n", " stat_exog = windows_batch['stat_exog']\n", @@ -235,7 +239,6 @@ "\n", " y_pred = y_pred.reshape(batch_size, self.h, \n", " self.loss.outputsize_multiplier)\n", - " y_pred = self.loss.domain_map(y_pred)\n", " return y_pred" ] }, @@ -269,6 +272,22 @@ "show_doc(MLP.predict, name='MLP.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "a09d7a35", + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(MLP, [\"airpassengers\"])" + ] + }, { "cell_type": "code", "execution_count": null, @@ -421,6 +440,7 @@ "fcst.fit(df=Y_train_df, static_df=AirPassengersStatic, val_size=12)\n", "forecasts = fcst.predict(futr_df=Y_test_df)\n", "\n", + "# Plot predictions\n", "Y_hat_df = forecasts.reset_index(drop=False).drop(columns=['unique_id','ds'])\n", "plot_df = pd.concat([Y_test_df, Y_hat_df], axis=1)\n", "plot_df = pd.concat([Y_train_df, plot_df])\n", diff --git a/nbs/models.mlpmultivariate.ipynb b/nbs/models.mlpmultivariate.ipynb index 71abdfb04..d06f3034b 100644 --- a/nbs/models.mlpmultivariate.ipynb +++ b/nbs/models.mlpmultivariate.ipynb @@ -49,8 +49,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -64,8 +67,9 @@ "import torch\n", "import torch.nn as nn\n", "\n", + "from typing import Optional\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_multivariate import BaseMultivariate" + "from neuralforecast.common._base_model import BaseModel" ] }, { @@ -76,7 +80,7 @@ "outputs": [], "source": [ "#| export\n", - "class MLPMultivariate(BaseMultivariate):\n", + "class MLPMultivariate(BaseModel):\n", " \"\"\" MLPMultivariate\n", "\n", " Simple Multi Layer Perceptron architecture (MLP) for multivariate forecasting. \n", @@ -102,6 +106,10 @@ " `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
\n", " `val_check_steps`: int=100, Number of training steps between every validation loss check.
\n", " `batch_size`: int=32, number of different series in each batch.
\n", + " `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
\n", + " `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.
\n", + " `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.
\n", + " `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
\n", " `step_size`: int=1, step size between each window of temporal data.
\n", " `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
\n", " `random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
\n", @@ -116,10 +124,11 @@ " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'multivariate'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = True\n", " EXOGENOUS_STAT = True \n", + " MULTIVARIATE = True # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h,\n", @@ -128,6 +137,7 @@ " futr_exog_list = None,\n", " hist_exog_list = None,\n", " stat_exog_list = None,\n", + " exclude_insample_y = False,\n", " num_layers = 2,\n", " hidden_size = 1024,\n", " loss = MAE(),\n", @@ -138,6 +148,10 @@ " early_stop_patience_steps: int =-1,\n", " val_check_steps: int = 100,\n", " batch_size: int = 32,\n", + " valid_batch_size: Optional[int] = None,\n", + " windows_batch_size = 256,\n", + " inference_windows_batch_size = 256,\n", + " start_padding_enabled = False,\n", " step_size: int = 1,\n", " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", @@ -157,6 +171,7 @@ " futr_exog_list=futr_exog_list,\n", " hist_exog_list=hist_exog_list,\n", " stat_exog_list=stat_exog_list,\n", + " exclude_insample_y = exclude_insample_y,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " max_steps=max_steps,\n", @@ -165,6 +180,10 @@ " early_stop_patience_steps=early_stop_patience_steps,\n", " val_check_steps=val_check_steps,\n", " batch_size=batch_size,\n", + " valid_batch_size=valid_batch_size,\n", + " windows_batch_size=windows_batch_size,\n", + " inference_windows_batch_size=inference_windows_batch_size,\n", + " start_padding_enabled=start_padding_enabled,\n", " step_size=step_size,\n", " scaler_type=scaler_type,\n", " num_workers_loader=num_workers_loader,\n", @@ -223,15 +242,9 @@ " x = torch.relu(layer(x))\n", " x = self.out(x)\n", " \n", - " x = x.reshape(batch_size, self.h, -1)\n", - " forecast = self.loss.domain_map(x)\n", + " forecast = x.reshape(batch_size, self.h, -1)\n", "\n", - " # domain_map might have squeezed the last dimension in case n_series == 1\n", - " # Note that this fails in case of a tuple loss, but Multivariate does not support tuple losses yet.\n", - " if forecast.ndim == 2:\n", - " return forecast.unsqueeze(-1)\n", - " else:\n", - " return forecast" + " return forecast" ] }, { @@ -267,76 +280,17 @@ { "cell_type": "code", "execution_count": null, - "id": "1bf909e1", - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "import logging\n", - "import warnings\n", - "\n", - "from neuralforecast import NeuralForecast\n", - "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", - "from neuralforecast.losses.pytorch import MAE, MSE, RMSE, MAPE, SMAPE, MASE, relMSE, QuantileLoss, MQLoss, DistributionLoss,PMM, GMM, NBMM, HuberLoss, TukeyLoss, HuberQLoss, HuberMQLoss" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f7ee8d15", + "id": "6c22db80", "metadata": {}, "outputs": [], "source": [ "#| hide\n", - "# Test losses\n", + "# Unit tests for models\n", "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", - "warnings.filterwarnings(\"ignore\")\n", - "\n", - "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", - "\n", - "AirPassengersStatic_single = AirPassengersStatic[AirPassengersStatic[\"unique_id\"] == 'Airline1']\n", - "Y_train_df_single = Y_train_df[Y_train_df[\"unique_id\"] == 'Airline1']\n", - "Y_test_df_single = Y_test_df[Y_test_df[\"unique_id\"] == 'Airline1']\n", - "\n", - "losses = [MAE(), MSE(), RMSE(), MAPE(), SMAPE(), MASE(seasonality=12), relMSE(y_train=Y_train_df), QuantileLoss(q=0.5), MQLoss(), DistributionLoss(distribution='Bernoulli'), DistributionLoss(distribution='Normal'), DistributionLoss(distribution='Poisson'), DistributionLoss(distribution='StudentT'), DistributionLoss(distribution='NegativeBinomial'), DistributionLoss(distribution='Tweedie'), PMM(), GMM(), NBMM(), HuberLoss(), TukeyLoss(), HuberQLoss(q=0.5), HuberMQLoss()]\n", - "valid_losses = [MAE(), MSE(), RMSE(), MAPE(), SMAPE(), MASE(seasonality=12), relMSE(y_train=Y_train_df), QuantileLoss(q=0.5), MQLoss(), DistributionLoss(distribution='Bernoulli'), DistributionLoss(distribution='Normal'), DistributionLoss(distribution='Poisson'), DistributionLoss(distribution='StudentT'), DistributionLoss(distribution='NegativeBinomial'), DistributionLoss(distribution='Tweedie'), PMM(), GMM(), NBMM(), HuberLoss(), TukeyLoss(), HuberQLoss(q=0.5), HuberMQLoss()]\n", - "\n", - "for loss, valid_loss in zip(losses, valid_losses):\n", - " try:\n", - " model = MLPMultivariate(h=12, \n", - " input_size=24,\n", - " n_series=2,\n", - " loss = loss,\n", - " valid_loss = valid_loss,\n", - " scaler_type='robust',\n", - " learning_rate=1e-3,\n", - " max_steps=2,\n", - " val_check_steps=10,\n", - " early_stop_patience_steps=2,\n", - " )\n", - "\n", - " fcst = NeuralForecast(models=[model], freq='M')\n", - " fcst.fit(df=Y_train_df, static_df=AirPassengersStatic, val_size=12)\n", - " forecasts = fcst.predict(futr_df=Y_test_df)\n", - " except Exception as e:\n", - " assert str(e) == f\"{loss} is not supported in a Multivariate model.\"\n", - "\n", - "\n", - "# Test n_series = 1\n", - "model = MLPMultivariate(h=12, \n", - " input_size=24,\n", - " n_series=1,\n", - " loss = MAE(),\n", - " scaler_type='robust',\n", - " learning_rate=1e-3,\n", - " max_steps=2,\n", - " val_check_steps=10,\n", - " early_stop_patience_steps=2,\n", - " )\n", - "fcst = NeuralForecast(models=[model], freq='M')\n", - "fcst.fit(df=Y_train_df_single, static_df=AirPassengersStatic_single, val_size=12)\n", - "forecasts = fcst.predict(futr_df=Y_test_df_single) " + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(MLPMultivariate, [\"airpassengers\"])" ] }, { @@ -374,6 +328,7 @@ " loss = MAE(),\n", " scaler_type='robust',\n", " learning_rate=1e-3,\n", + " stat_exog_list=['airline1'],\n", " max_steps=200,\n", " val_check_steps=10,\n", " early_stop_patience_steps=2)\n", @@ -385,6 +340,7 @@ "fcst.fit(df=Y_train_df, static_df=AirPassengersStatic, val_size=12)\n", "forecasts = fcst.predict(futr_df=Y_test_df)\n", "\n", + "# Plot predictions\n", "Y_hat_df = forecasts.reset_index(drop=False).drop(columns=['unique_id','ds'])\n", "plot_df = pd.concat([Y_test_df, Y_hat_df], axis=1)\n", "plot_df = pd.concat([Y_train_df, plot_df])\n", diff --git a/nbs/models.nbeats.ipynb b/nbs/models.nbeats.ipynb index 9504770d5..be1c8a93a 100644 --- a/nbs/models.nbeats.ipynb +++ b/nbs/models.nbeats.ipynb @@ -66,7 +66,7 @@ "import torch.nn as nn\n", "\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_windows import BaseWindows" + "from neuralforecast.common._base_model import BaseModel" ] }, { @@ -77,9 +77,12 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", "from nbdev.showdoc import show_doc\n", "from neuralforecast.utils import generate_series\n", + "from neuralforecast.common._model_checks import check_model\n", "\n", "import matplotlib.pyplot as plt" ] @@ -231,7 +234,7 @@ "outputs": [], "source": [ "#| export\n", - "class NBEATS(BaseWindows):\n", + "class NBEATS(BaseModel):\n", " \"\"\" NBEATS\n", "\n", " The Neural Basis Expansion Analysis for Time Series (NBEATS), is a simple and yet\n", @@ -282,10 +285,11 @@ " \"N-BEATS: Neural basis expansion analysis for interpretable time series forecasting\".](https://arxiv.org/abs/1905.10437)\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = False\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", " \n", " def __init__(self,\n", " h,\n", @@ -420,8 +424,8 @@ " def forward(self, windows_batch):\n", " \n", " # Parse windows_batch\n", - " insample_y = windows_batch['insample_y']\n", - " insample_mask = windows_batch['insample_mask']\n", + " insample_y = windows_batch['insample_y'].squeeze(-1)\n", + " insample_mask = windows_batch['insample_mask'].squeeze(-1)\n", "\n", " # NBEATS' forward\n", " residuals = insample_y.flip(dims=(-1,)) # backcast init\n", @@ -435,10 +439,7 @@ " forecast = forecast + block_forecast\n", "\n", " if self.decompose_forecast:\n", - " block_forecasts.append(block_forecast)\n", - "\n", - " # Adapting output's domain\n", - " forecast = self.loss.domain_map(forecast) \n", + " block_forecasts.append(block_forecast) \n", "\n", " if self.decompose_forecast:\n", " # (n_batch, n_blocks, h, out_features)\n", @@ -480,6 +481,22 @@ "show_doc(NBEATS.predict, name='NBEATS.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "8de78f60", + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(NBEATS, [\"airpassengers\"])" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/nbs/models.nbeatsx.ipynb b/nbs/models.nbeatsx.ipynb index 9952c3cf9..aaba3b760 100644 --- a/nbs/models.nbeatsx.ipynb +++ b/nbs/models.nbeatsx.ipynb @@ -62,7 +62,8 @@ "\n", "from fastcore.test import test_eq, test_fail\n", "from nbdev.showdoc import show_doc\n", - "from neuralforecast.utils import generate_series" + "from neuralforecast.utils import generate_series\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -80,7 +81,7 @@ "import torch.nn as nn\n", "\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_windows import BaseWindows" + "from neuralforecast.common._base_model import BaseModel" ] }, { @@ -373,7 +374,7 @@ "outputs": [], "source": [ "#| export\n", - "class NBEATSx(BaseWindows):\n", + "class NBEATSx(BaseModel):\n", " \"\"\"NBEATSx\n", "\n", " The Neural Basis Expansion Analysis with Exogenous variables (NBEATSx) is a simple\n", @@ -427,10 +428,11 @@ " \"\"\"\n", "\n", " # Class attributes\n", - " SAMPLING_TYPE = \"windows\"\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = True\n", " EXOGENOUS_STAT = True\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(\n", " self,\n", @@ -612,8 +614,8 @@ "\n", " def forward(self, windows_batch):\n", " # Parse windows_batch\n", - " insample_y = windows_batch[\"insample_y\"]\n", - " insample_mask = windows_batch[\"insample_mask\"]\n", + " insample_y = windows_batch[\"insample_y\"].squeeze(-1)\n", + " insample_mask = windows_batch[\"insample_mask\"].squeeze(-1)\n", " futr_exog = windows_batch[\"futr_exog\"]\n", " hist_exog = windows_batch[\"hist_exog\"]\n", " stat_exog = windows_batch[\"stat_exog\"]\n", @@ -637,9 +639,6 @@ " if self.decompose_forecast:\n", " block_forecasts.append(block_forecast)\n", "\n", - " # Adapting output's domain\n", - " forecast = self.loss.domain_map(forecast)\n", - "\n", " if self.decompose_forecast:\n", " # (n_batch, n_blocks, h)\n", " block_forecasts = torch.stack(block_forecasts)\n", @@ -680,6 +679,22 @@ "show_doc(NBEATSx.predict, name='NBEATSx.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "ce8cba7d", + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(NBEATSx, [\"airpassengers\"])" + ] + }, { "cell_type": "code", "execution_count": null, @@ -806,7 +821,7 @@ "# test seasonality/trend basis protection\n", "test_fail(NBEATSx.__init__, \n", " contains='Horizon `h=1` incompatible with `seasonality` or `trend` in stacks',\n", - " kwargs=dict(self=BaseWindows, h=1, input_size=4))" + " kwargs=dict(self=BaseModel, h=1, input_size=4))" ] }, { diff --git a/nbs/models.nhits.ipynb b/nbs/models.nhits.ipynb index e844f4660..98da310c1 100644 --- a/nbs/models.nhits.ipynb +++ b/nbs/models.nhits.ipynb @@ -67,7 +67,7 @@ "import torch.nn.functional as F\n", "\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_windows import BaseWindows" + "from neuralforecast.common._base_model import BaseModel" ] }, { @@ -83,7 +83,8 @@ "import matplotlib.pyplot as plt\n", "from fastcore.test import test_eq\n", "from nbdev.showdoc import show_doc\n", - "from neuralforecast.utils import generate_series" + "from neuralforecast.utils import generate_series\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -261,7 +262,7 @@ "outputs": [], "source": [ "#| export\n", - "class NHITS(BaseWindows):\n", + "class NHITS(BaseModel):\n", " \"\"\" NHITS\n", "\n", " The Neural Hierarchical Interpolation for Time Series (NHITS), is an MLP-based deep\n", @@ -316,10 +317,11 @@ " Accepted at the Thirty-Seventh AAAI Conference on Artificial Intelligence.](https://arxiv.org/abs/2201.12886)\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = True\n", " EXOGENOUS_STAT = True\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self, \n", " h,\n", @@ -455,8 +457,8 @@ " def forward(self, windows_batch):\n", " \n", " # Parse windows_batch\n", - " insample_y = windows_batch['insample_y']\n", - " insample_mask = windows_batch['insample_mask']\n", + " insample_y = windows_batch['insample_y'].squeeze(-1).contiguous()\n", + " insample_mask = windows_batch['insample_mask'].squeeze(-1).contiguous()\n", " futr_exog = windows_batch['futr_exog']\n", " hist_exog = windows_batch['hist_exog']\n", " stat_exog = windows_batch['stat_exog']\n", @@ -476,9 +478,6 @@ " if self.decompose_forecast:\n", " block_forecasts.append(block_forecast)\n", " \n", - " # Adapting output's domain\n", - " forecast = self.loss.domain_map(forecast)\n", - "\n", " if self.decompose_forecast:\n", " # (n_batch, n_blocks, h, output_size)\n", " block_forecasts = torch.stack(block_forecasts)\n", @@ -516,6 +515,21 @@ "show_doc(NHITS.predict, name='NHITS.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(NHITS, [\"airpassengers\"])" + ] + }, { "cell_type": "code", "execution_count": null, @@ -611,7 +625,6 @@ "from neuralforecast.losses.pytorch import DistributionLoss\n", "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", "\n", - "\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", "\n", diff --git a/nbs/models.nlinear.ipynb b/nbs/models.nlinear.ipynb index b55d42204..fc67b409a 100644 --- a/nbs/models.nlinear.ipynb +++ b/nbs/models.nlinear.ipynb @@ -53,7 +53,7 @@ "\n", "import torch.nn as nn\n", "\n", - "from neuralforecast.common._base_windows import BaseWindows\n", + "from neuralforecast.common._base_model import BaseModel\n", "\n", "from neuralforecast.losses.pytorch import MAE" ] @@ -65,8 +65,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -76,7 +79,7 @@ "outputs": [], "source": [ "#| export\n", - "class NLinear(BaseWindows):\n", + "class NLinear(BaseModel):\n", " \"\"\" NLinear\n", "\n", " *Parameters:*
\n", @@ -113,10 +116,11 @@ "\t- Zeng, Ailing, et al. \"Are transformers effective for time series forecasting?.\" Proceedings of the AAAI conference on artificial intelligence. Vol. 37. No. 9. 2023.\"\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = False\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h: int, \n", @@ -188,11 +192,7 @@ "\n", " def forward(self, windows_batch):\n", " # Parse windows_batch\n", - " insample_y = windows_batch['insample_y']\n", - " #insample_mask = windows_batch['insample_mask']\n", - " #hist_exog = windows_batch['hist_exog']\n", - " #stat_exog = windows_batch['stat_exog']\n", - " #futr_exog = windows_batch['futr_exog']\n", + " insample_y = windows_batch['insample_y'].squeeze(-1)\n", "\n", " # Parse inputs\n", " batch_size = len(insample_y)\n", @@ -204,7 +204,6 @@ " # Final\n", " forecast = self.linear(norm_insample_y) + last_value\n", " forecast = forecast.reshape(batch_size, self.h, self.loss.outputsize_multiplier)\n", - " forecast = self.loss.domain_map(forecast)\n", " return forecast" ] }, @@ -235,6 +234,21 @@ "show_doc(NLinear.predict, name='NLinear.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(NLinear, [\"airpassengers\"])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -254,7 +268,7 @@ "\n", "from neuralforecast import NeuralForecast\n", "from neuralforecast.models import NLinear\n", - "from neuralforecast.losses.pytorch import MQLoss, DistributionLoss\n", + "from neuralforecast.losses.pytorch import DistributionLoss\n", "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic, augment_calendar_df\n", "\n", "AirPassengersPanel, calendar_cols = augment_calendar_df(df=AirPassengersPanel, freq='M')\n", @@ -264,8 +278,7 @@ "\n", "model = NLinear(h=12,\n", " input_size=24,\n", - " loss=MAE(),\n", - " #loss=DistributionLoss(distribution='StudentT', level=[80, 90], return_params=True),\n", + " loss=DistributionLoss(distribution='StudentT', level=[80, 90], return_params=True),\n", " scaler_type='robust',\n", " learning_rate=1e-3,\n", " max_steps=500,\n", diff --git a/nbs/models.patchtst.ipynb b/nbs/models.patchtst.ipynb index 31064cc24..bd6f2a35f 100644 --- a/nbs/models.patchtst.ipynb +++ b/nbs/models.patchtst.ipynb @@ -61,7 +61,7 @@ "import torch.nn as nn\n", "import torch.nn.functional as F\n", "\n", - "from neuralforecast.common._base_windows import BaseWindows\n", + "from neuralforecast.common._base_model import BaseModel\n", "from neuralforecast.common._modules import RevIN\n", "\n", "from neuralforecast.losses.pytorch import MAE" @@ -74,8 +74,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -611,7 +614,7 @@ "outputs": [], "source": [ "#| export\n", - "class PatchTST(BaseWindows):\n", + "class PatchTST(BaseModel):\n", " \"\"\" PatchTST\n", "\n", " The PatchTST model is an efficient Transformer-based model for multivariate time series forecasting.\n", @@ -673,10 +676,11 @@ " -[Nie, Y., Nguyen, N. H., Sinthong, P., & Kalagnanam, J. (2022). \"A Time Series is Worth 64 Words: Long-term Forecasting with Transformers\"](https://arxiv.org/pdf/2211.14730.pdf)\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = False\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h,\n", @@ -789,21 +793,11 @@ " def forward(self, windows_batch): # x: [batch, input_size]\n", "\n", " # Parse windows_batch\n", - " insample_y = windows_batch['insample_y']\n", - " #insample_mask = windows_batch['insample_mask']\n", - " #hist_exog = windows_batch['hist_exog']\n", - " #stat_exog = windows_batch['stat_exog']\n", - " #futr_exog = windows_batch['futr_exog']\n", - "\n", - " # Add dimension for channel\n", - " x = insample_y.unsqueeze(-1) # [Ws,L,1]\n", + " x = windows_batch['insample_y']\n", "\n", " x = x.permute(0,2,1) # x: [Batch, 1, input_size]\n", " x = self.model(x)\n", - " x = x.reshape(x.shape[0], self.h, -1) # x: [Batch, h, c_out]\n", - "\n", - " # Domain map\n", - " forecast = self.loss.domain_map(x)\n", + " forecast = x.reshape(x.shape[0], self.h, -1) # x: [Batch, h, c_out]\n", " \n", " return forecast" ] @@ -835,6 +829,21 @@ "show_doc(PatchTST.predict, name='PatchTST.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(PatchTST, [\"airpassengers\"])" + ] + }, { "attachments": {}, "cell_type": "markdown", @@ -872,7 +881,6 @@ " n_heads=4,\n", " scaler_type='robust',\n", " loss=DistributionLoss(distribution='StudentT', level=[80, 90]),\n", - " #loss=MAE(),\n", " learning_rate=1e-3,\n", " max_steps=500,\n", " val_check_steps=50,\n", diff --git a/nbs/models.rmok.ipynb b/nbs/models.rmok.ipynb index 017477c13..96dd6e195 100644 --- a/nbs/models.rmok.ipynb +++ b/nbs/models.rmok.ipynb @@ -37,8 +37,8 @@ "# Reversible Mixture of KAN - RMoK\n", "The Reversible Mixture of KAN (RMoK) is a KAN-based model for time series forecasting which uses a mixture-of-experts structure to assign variables to different KAN experts, such as WaveKAN, TaylorKAN and JacobiKAN.\n", "\n", - "**Reference**\n", - "- [Xiao Han, Xinfeng Zhang, Yiling Wu, Zhenduo Zhang, Zhe Wu.\"KAN4TSF: Are KAN and KAN-based models Effective for Time Series Forecasting?\"](https://arxiv.org/abs/2408.11306)" + "**References**
\n", + "[Xiao Han, Xinfeng Zhang, Yiling Wu, Zhenduo Zhang, Zhe Wu.\"KAN4TSF: Are KAN and KAN-based models Effective for Time Series Forecasting?\"](https://arxiv.org/abs/2408.11306)
" ] }, { @@ -55,8 +55,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -73,8 +76,9 @@ "import torch.nn.functional as F\n", "\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_multivariate import BaseMultivariate\n", - "from neuralforecast.common._modules import RevIN" + "from neuralforecast.common._base_model import BaseModel\n", + "from neuralforecast.common._modules import RevINMultivariate\n", + "from typing import Optional" ] }, { @@ -331,9 +335,11 @@ "source": [ "#| export\n", "\n", - "class RMoK(BaseMultivariate):\n", + "class RMoK(BaseModel):\n", " \"\"\" Reversible Mixture of KAN\n", - " **Parameters**
\n", + " \n", + " \n", + " **Parameters:**
\n", " `h`: int, Forecast horizon.
\n", " `input_size`: int, autorregresive inputs size, y=[1,2,3,4] input_size=2 -> y_[t-2:t]=[1,2].
\n", " `n_series`: int, number of time-series.
\n", @@ -353,6 +359,10 @@ " `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
\n", " `val_check_steps`: int=100, Number of training steps between every validation loss check.
\n", " `batch_size`: int=32, number of different series in each batch.
\n", + " `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
\n", + " `windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.
\n", + " `inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.
\n", + " `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
\n", " `step_size`: int=1, step size between each window of temporal data.
\n", " `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
\n", " `random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
\n", @@ -366,20 +376,21 @@ " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", - " Reference
\n", - " [Xiao Han, Xinfeng Zhang, Yiling Wu, Zhenduo Zhang, Zhe Wu.\"KAN4TSF: Are KAN and KAN-based models Effective for Time Series Forecasting?\"](https://arxiv.org/abs/2408.11306)\n", + " **References**
\n", + " - [Xiao Han, Xinfeng Zhang, Yiling Wu, Zhenduo Zhang, Zhe Wu.\"KAN4TSF: Are KAN and KAN-based models Effective for Time Series Forecasting?\". arXiv.](https://arxiv.org/abs/2408.11306)
\n", " \"\"\"\n", "\n", " # Class attributes\n", - " SAMPLING_TYPE = 'multivariate'\n", " EXOGENOUS_FUTR = False\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False\n", + " MULTIVARIATE = True # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h,\n", " input_size,\n", - " n_series,\n", + " n_series: int,\n", " futr_exog_list = None,\n", " hist_exog_list = None,\n", " stat_exog_list = None,\n", @@ -396,6 +407,10 @@ " early_stop_patience_steps: int =-1,\n", " val_check_steps: int = 100,\n", " batch_size: int = 32,\n", + " valid_batch_size: Optional[int] = None,\n", + " windows_batch_size = 1024,\n", + " inference_windows_batch_size = 1024,\n", + " start_padding_enabled = False,\n", " step_size: int = 1,\n", " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", @@ -422,6 +437,10 @@ " early_stop_patience_steps=early_stop_patience_steps,\n", " val_check_steps=val_check_steps,\n", " batch_size=batch_size,\n", + " valid_batch_size=valid_batch_size,\n", + " windows_batch_size=windows_batch_size,\n", + " inference_windows_batch_size=inference_windows_batch_size,\n", + " start_padding_enabled=start_padding_enabled,\n", " step_size=step_size,\n", " scaler_type=scaler_type,\n", " random_seed=random_seed,\n", @@ -445,35 +464,31 @@ " self.wavelet_function = wavelet_function\n", "\n", " self.experts = nn.ModuleList([\n", - " TaylorKANLayer(self.input_size, self.h, order=self.taylor_order, addbias=True),\n", - " JacobiKANLayer(self.input_size, self.h, degree=self.jacobi_degree),\n", - " WaveKANLayer(self.input_size, self.h, wavelet_type=self.wavelet_function),\n", - " nn.Linear(self.input_size, self.h),\n", + " TaylorKANLayer(self.input_size, self.h * self.loss.outputsize_multiplier, order=self.taylor_order, addbias=True),\n", + " JacobiKANLayer(self.input_size, self.h * self.loss.outputsize_multiplier, degree=self.jacobi_degree),\n", + " WaveKANLayer(self.input_size, self.h * self.loss.outputsize_multiplier, wavelet_type=self.wavelet_function),\n", + " nn.Linear(self.input_size, self.h * self.loss.outputsize_multiplier),\n", " ])\n", " \n", " self.num_experts = len(self.experts)\n", " self.gate = nn.Linear(self.input_size, self.num_experts)\n", " self.softmax = nn.Softmax(dim=-1)\n", - " self.rev = RevIN(self.n_series, affine=self.revin_affine)\n", + " self.rev = RevINMultivariate(self.n_series, affine=self.revin_affine)\n", "\n", " def forward(self, windows_batch):\n", " insample_y = windows_batch['insample_y']\n", " B, L, N = insample_y.shape\n", - " x = self.rev(insample_y, 'norm') if self.rev else insample_y\n", + " x = self.rev(insample_y, 'norm')\n", " x = self.dropout(x).transpose(1, 2).reshape(B * N, L)\n", "\n", " score = F.softmax(self.gate(x), dim=-1)\n", " expert_outputs = torch.stack([self.experts[i](x) for i in range(self.num_experts)], dim=-1)\n", "\n", - " y_pred = torch.einsum(\"BLE,BE->BL\", expert_outputs, score).reshape(B, N, -1).permute(0, 2, 1)\n", + " y_pred = torch.einsum(\"BLE, BE -> BL\", expert_outputs, score).reshape(B, N, self.h * self.loss.outputsize_multiplier).permute(0, 2, 1)\n", " y_pred = self.rev(y_pred, 'denorm')\n", - " y_pred = self.loss.domain_map(y_pred)\n", + " y_pred = y_pred.reshape(B, self.h, -1)\n", "\n", - " # domain_map might have squeezed the last dimension in case n_series == 1\n", - " if y_pred.ndim == 2:\n", - " return y_pred.unsqueeze(-1)\n", - " else:\n", - " return y_pred" + " return y_pred" ] }, { @@ -503,6 +518,21 @@ "show_doc(RMoK.predict, name='RMoK.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(RMoK, [\"airpassengers\"])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -560,13 +590,6 @@ "ax.legend(prop={'size': 15})\n", "ax.grid()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/nbs/models.rnn.ipynb b/nbs/models.rnn.ipynb index f5e1a67b9..8a92fdfb2 100644 --- a/nbs/models.rnn.ipynb +++ b/nbs/models.rnn.ipynb @@ -61,8 +61,10 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "from fastcore.test import test_eq\n", "from nbdev.showdoc import show_doc\n", - "from neuralforecast.utils import generate_series" + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -76,9 +78,10 @@ "\n", "import torch\n", "import torch.nn as nn\n", + "import warnings\n", "\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_recurrent import BaseRecurrent\n", + "from neuralforecast.common._base_model import BaseModel\n", "from neuralforecast.common._modules import MLP" ] }, @@ -89,7 +92,7 @@ "outputs": [], "source": [ "#| export\n", - "class RNN(BaseRecurrent):\n", + "class RNN(BaseModel):\n", " \"\"\" RNN\n", "\n", " Multi Layer Elman RNN (RNN), with MLP decoder.\n", @@ -106,7 +109,7 @@ " `encoder_activation`: str=`tanh`, type of RNN activation from `tanh` or `relu`.
\n", " `encoder_bias`: bool=True, whether or not to use biases b_ih, b_hh within RNN units.
\n", " `encoder_dropout`: float=0., dropout regularization applied to RNN outputs.
\n", - " `context_size`: int=10, size of context vector for each timestamp on the forecasting window.
\n", + " `context_size`: deprecated.
\n", " `decoder_hidden_size`: int=200, size of hidden layer for the MLP decoder.
\n", " `decoder_layers`: int=2, number of layers for the MLP decoder.
\n", " `futr_exog_list`: str list, future exogenous columns.
\n", @@ -135,26 +138,29 @@ " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'recurrent'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = True\n", " EXOGENOUS_STAT = True\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = True # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h: int,\n", " input_size: int = -1,\n", " inference_input_size: int = -1,\n", " encoder_n_layers: int = 2,\n", - " encoder_hidden_size: int = 200,\n", + " encoder_hidden_size: int = 128,\n", " encoder_activation: str = 'tanh',\n", " encoder_bias: bool = True,\n", " encoder_dropout: float = 0.,\n", - " context_size: int = 10,\n", - " decoder_hidden_size: int = 200,\n", + " context_size: Optional[int] = None,\n", + " decoder_hidden_size: int = 128,\n", " decoder_layers: int = 2,\n", " futr_exog_list = None,\n", " hist_exog_list = None,\n", " stat_exog_list = None,\n", + " exclude_insample_y = False,\n", + " recurrent = False,\n", " loss = MAE(),\n", " valid_loss = None,\n", " max_steps: int = 1000,\n", @@ -164,6 +170,10 @@ " val_check_steps: int = 100,\n", " batch_size=32,\n", " valid_batch_size: Optional[int] = None,\n", + " windows_batch_size = 128,\n", + " inference_windows_batch_size = 1024,\n", + " start_padding_enabled = False,\n", + " step_size: int = 1,\n", " scaler_type: str='robust',\n", " random_seed=1,\n", " num_workers_loader=0,\n", @@ -174,10 +184,16 @@ " lr_scheduler_kwargs = None, \n", " dataloader_kwargs = None, \n", " **trainer_kwargs):\n", + " \n", + " self.RECURRENT = recurrent\n", + "\n", " super(RNN, self).__init__(\n", " h=h,\n", " input_size=input_size,\n", - " inference_input_size=inference_input_size,\n", + " futr_exog_list=futr_exog_list,\n", + " hist_exog_list=hist_exog_list,\n", + " stat_exog_list=stat_exog_list,\n", + " exclude_insample_y = exclude_insample_y,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " max_steps=max_steps,\n", @@ -187,13 +203,14 @@ " val_check_steps=val_check_steps,\n", " batch_size=batch_size,\n", " valid_batch_size=valid_batch_size,\n", + " windows_batch_size=windows_batch_size,\n", + " inference_windows_batch_size=inference_windows_batch_size,\n", + " start_padding_enabled=start_padding_enabled,\n", + " step_size=step_size,\n", " scaler_type=scaler_type,\n", - " futr_exog_list=futr_exog_list,\n", - " hist_exog_list=hist_exog_list,\n", - " stat_exog_list=stat_exog_list,\n", + " random_seed=random_seed,\n", " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", - " random_seed=random_seed,\n", " optimizer=optimizer,\n", " optimizer_kwargs=optimizer_kwargs,\n", " lr_scheduler=lr_scheduler,\n", @@ -208,7 +225,11 @@ " self.encoder_activation = encoder_activation\n", " self.encoder_bias = encoder_bias\n", " self.encoder_dropout = encoder_dropout\n", - " \n", + "\n", + " # Context adapter\n", + " if context_size is not None:\n", + " warnings.warn(\"context_size is deprecated and will be removed in future versions.\")\n", + "\n", " # Context adapter\n", " self.context_size = context_size\n", "\n", @@ -217,69 +238,74 @@ " self.decoder_layers = decoder_layers\n", "\n", " # RNN input size (1 for target variable y)\n", - " input_encoder = 1 + self.hist_exog_size + self.stat_exog_size\n", + " input_encoder = 1 + self.hist_exog_size + self.stat_exog_size + self.futr_exog_size\n", "\n", " # Instantiate model\n", + " self.rnn_state = None\n", + " self.maintain_state = False\n", " self.hist_encoder = nn.RNN(input_size=input_encoder,\n", - " hidden_size=self.encoder_hidden_size,\n", - " num_layers=self.encoder_n_layers,\n", - " nonlinearity=self.encoder_activation,\n", - " bias=self.encoder_bias,\n", - " dropout=self.encoder_dropout,\n", - " batch_first=True)\n", - "\n", - " # Context adapter\n", - " self.context_adapter = nn.Linear(in_features=self.encoder_hidden_size + self.futr_exog_size * h,\n", - " out_features=self.context_size * h)\n", + " hidden_size=self.encoder_hidden_size,\n", + " num_layers=self.encoder_n_layers,\n", + " bias=self.encoder_bias,\n", + " dropout=self.encoder_dropout,\n", + " batch_first=True)\n", "\n", " # Decoder MLP\n", - " self.mlp_decoder = MLP(in_features=self.context_size + self.futr_exog_size,\n", - " out_features=self.loss.outputsize_multiplier,\n", - " hidden_size=self.decoder_hidden_size,\n", - " num_layers=self.decoder_layers,\n", - " activation='ReLU',\n", - " dropout=0.0)\n", + " if self.RECURRENT:\n", + " self.proj = nn.Linear(self.encoder_hidden_size, self.loss.outputsize_multiplier)\n", + " else:\n", + " self.mlp_decoder = MLP(in_features=self.encoder_hidden_size + self.futr_exog_size,\n", + " out_features=self.loss.outputsize_multiplier,\n", + " hidden_size=self.decoder_hidden_size,\n", + " num_layers=self.decoder_layers,\n", + " activation='ReLU',\n", + " dropout=0.0)\n", "\n", " def forward(self, windows_batch):\n", " \n", " # Parse windows_batch\n", - " encoder_input = windows_batch['insample_y'] # [B, seq_len, 1]\n", - " futr_exog = windows_batch['futr_exog']\n", - " hist_exog = windows_batch['hist_exog']\n", - " stat_exog = windows_batch['stat_exog']\n", + " encoder_input = windows_batch['insample_y'] # [B, seq_len, 1]\n", + " futr_exog = windows_batch['futr_exog'] # [B, seq_len, F]\n", + " hist_exog = windows_batch['hist_exog'] # [B, seq_len, X]\n", + " stat_exog = windows_batch['stat_exog'] # [B, S]\n", "\n", - " # Concatenate y, historic and static inputs\n", - " # [B, C, seq_len, 1] -> [B, seq_len, C]\n", - " # Contatenate [ Y_t, | X_{t-L},..., X_{t} | S ]\n", + " # Concatenate y, historic and static inputs \n", " batch_size, seq_len = encoder_input.shape[:2]\n", " if self.hist_exog_size > 0:\n", - " hist_exog = hist_exog.permute(0,2,1,3).squeeze(-1) # [B, X, seq_len, 1] -> [B, seq_len, X]\n", - " encoder_input = torch.cat((encoder_input, hist_exog), dim=2)\n", + " encoder_input = torch.cat((encoder_input, hist_exog), dim=2) # [B, seq_len, 1] + [B, seq_len, X] -> [B, seq_len, 1 + X]\n", "\n", " if self.stat_exog_size > 0:\n", - " stat_exog = stat_exog.unsqueeze(1).repeat(1, seq_len, 1) # [B, S] -> [B, seq_len, S]\n", - " encoder_input = torch.cat((encoder_input, stat_exog), dim=2)\n", - "\n", - " # RNN forward\n", - " hidden_state, _ = self.hist_encoder(encoder_input) # [B, seq_len, rnn_hidden_state]\n", + " # print(encoder_input.shape)\n", + " stat_exog = stat_exog.unsqueeze(1).repeat(1, seq_len, 1) # [B, S] -> [B, seq_len, S]\n", + " encoder_input = torch.cat((encoder_input, stat_exog), dim=2) # [B, seq_len, 1 + X] + [B, seq_len, S] -> [B, seq_len, 1 + X + S]\n", "\n", " if self.futr_exog_size > 0:\n", - " futr_exog = futr_exog.permute(0,2,3,1)[:,:,1:,:] # [B, F, seq_len, 1+H] -> [B, seq_len, H, F]\n", - " hidden_state = torch.cat(( hidden_state, futr_exog.reshape(batch_size, seq_len, -1)), dim=2)\n", + " encoder_input = torch.cat((encoder_input, \n", + " futr_exog[:, :seq_len]), dim=2) # [B, seq_len, 1 + X + S] + [B, seq_len, F] -> [B, seq_len, 1 + X + S + F]\n", "\n", - " # Context adapter\n", - " context = self.context_adapter(hidden_state)\n", - " context = context.reshape(batch_size, seq_len, self.h, self.context_size)\n", + " if self.RECURRENT:\n", + " if self.maintain_state:\n", + " rnn_state = self.rnn_state\n", + " else:\n", + " rnn_state = None\n", + " \n", + " output, rnn_state = self.hist_encoder(encoder_input, \n", + " rnn_state) # [B, seq_len, rnn_hidden_state]\n", + " output = self.proj(output) # [B, seq_len, rnn_hidden_state] -> [B, seq_len, n_output]\n", + " if self.maintain_state:\n", + " self.rnn_state = rnn_state\n", + " else:\n", + " hidden_state, _ = self.hist_encoder(encoder_input, None) # [B, seq_len, rnn_hidden_state]\n", + " hidden_state = hidden_state[:, -self.h:] # [B, seq_len, rnn_hidden_state] -> [B, h, rnn_hidden_state]\n", + " \n", + " if self.futr_exog_size > 0:\n", + " futr_exog_futr = futr_exog[:, -self.h:] # [B, h, F]\n", + " hidden_state = torch.cat((hidden_state, \n", + " futr_exog_futr), dim=-1) # [B, h, rnn_hidden_state] + [B, h, F] -> [B, h, rnn_hidden_state + F]\n", "\n", - " # Residual connection with futr_exog\n", - " if self.futr_exog_size > 0:\n", - " context = torch.cat((context, futr_exog), dim=-1)\n", + " output = self.mlp_decoder(hidden_state) # [B, h, rnn_hidden_state + F] -> [B, seq_len, n_output]\n", "\n", - " # Final forecast\n", - " output = self.mlp_decoder(context)\n", - " output = self.loss.domain_map(output)\n", - " \n", - " return output" + " return output[:, -self.h:]" ] }, { @@ -309,6 +335,21 @@ "show_doc(RNN.predict, name='RNN.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(RNN, [\"airpassengers\"])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -328,26 +369,24 @@ "\n", "from neuralforecast import NeuralForecast\n", "from neuralforecast.models import RNN\n", - "from neuralforecast.losses.pytorch import MQLoss, DistributionLoss\n", + "from neuralforecast.losses.pytorch import MQLoss\n", "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", - "\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", "\n", "fcst = NeuralForecast(\n", " models=[RNN(h=12,\n", - " input_size=-1,\n", + " input_size=24,\n", " inference_input_size=24,\n", " loss=MQLoss(level=[80, 90]),\n", - " scaler_type='robust',\n", + " valid_loss=MQLoss(level=[80, 90]),\n", + " scaler_type='standard',\n", " encoder_n_layers=2,\n", " encoder_hidden_size=128,\n", - " context_size=10,\n", " decoder_hidden_size=128,\n", " decoder_layers=2,\n", - " max_steps=300,\n", + " max_steps=200,\n", " futr_exog_list=['y_[lag12]'],\n", - " #hist_exog_list=['y_[lag12]'],\n", " stat_exog_list=['airline1'],\n", " )\n", " ],\n", diff --git a/nbs/models.softs.ipynb b/nbs/models.softs.ipynb index 978f3c2c2..588bd8dcb 100644 --- a/nbs/models.softs.ipynb +++ b/nbs/models.softs.ipynb @@ -27,8 +27,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -57,8 +60,9 @@ "import torch.nn as nn\n", "import torch.nn.functional as F\n", "\n", + "from typing import Optional\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_multivariate import BaseMultivariate\n", + "from neuralforecast.common._base_model import BaseModel\n", "from neuralforecast.common._modules import TransEncoder, TransEncoderLayer" ] }, @@ -134,7 +138,7 @@ "\n", " # stochastic pooling\n", " if self.training:\n", - " ratio = F.softmax(combined_mean, dim=1)\n", + " ratio = F.softmax(torch.nan_to_num(combined_mean), dim=1)\n", " ratio = ratio.permute(0, 2, 1)\n", " ratio = ratio.reshape(-1, channels)\n", " indices = torch.multinomial(ratio, 1)\n", @@ -169,7 +173,7 @@ "source": [ "#| export\n", "\n", - "class SOFTS(BaseMultivariate):\n", + "class SOFTS(BaseModel):\n", "\n", " \"\"\" SOFTS\n", " \n", @@ -194,6 +198,10 @@ " `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
\n", " `val_check_steps`: int=100, Number of training steps between every validation loss check.
\n", " `batch_size`: int=32, number of different series in each batch.
\n", + " `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
\n", + " `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.
\n", + " `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.
\n", + " `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
\n", " `step_size`: int=1, step size between each window of temporal data.
\n", " `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
\n", " `random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
\n", @@ -212,10 +220,11 @@ " \"\"\"\n", "\n", " # Class attributes\n", - " SAMPLING_TYPE = 'multivariate'\n", " EXOGENOUS_FUTR = False\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False\n", + " MULTIVARIATE = True\n", + " RECURRENT = False\n", "\n", " def __init__(self,\n", " h,\n", @@ -224,6 +233,7 @@ " futr_exog_list = None,\n", " hist_exog_list = None,\n", " stat_exog_list = None,\n", + " exclude_insample_y = False,\n", " hidden_size: int = 512,\n", " d_core: int = 512,\n", " e_layers: int = 2,\n", @@ -238,6 +248,10 @@ " early_stop_patience_steps: int =-1,\n", " val_check_steps: int = 100,\n", " batch_size: int = 32,\n", + " valid_batch_size: Optional[int] = None,\n", + " windows_batch_size = 256,\n", + " inference_windows_batch_size = 256,\n", + " start_padding_enabled = False,\n", " step_size: int = 1,\n", " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", @@ -256,6 +270,7 @@ " stat_exog_list = None,\n", " futr_exog_list = None,\n", " hist_exog_list = None,\n", + " exclude_insample_y = exclude_insample_y,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " max_steps=max_steps,\n", @@ -264,6 +279,10 @@ " early_stop_patience_steps=early_stop_patience_steps,\n", " val_check_steps=val_check_steps,\n", " batch_size=batch_size,\n", + " valid_batch_size=valid_batch_size,\n", + " windows_batch_size=windows_batch_size,\n", + " inference_windows_batch_size=inference_windows_batch_size,\n", + " start_padding_enabled=start_padding_enabled,\n", " step_size=step_size,\n", " scaler_type=scaler_type,\n", " random_seed=random_seed,\n", @@ -299,7 +318,7 @@ " ]\n", " )\n", "\n", - " self.projection = nn.Linear(hidden_size, self.h, bias=True)\n", + " self.projection = nn.Linear(hidden_size, self.h * self.loss.outputsize_multiplier, bias=True)\n", "\n", " def forecast(self, x_enc):\n", " # Normalization from Non-stationary Transformer\n", @@ -316,22 +335,19 @@ "\n", " # De-Normalization from Non-stationary Transformer\n", " if self.use_norm:\n", - " dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, self.h, 1))\n", - " dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, self.h, 1))\n", + " dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, self.h * self.loss.outputsize_multiplier, 1))\n", + " dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, self.h * self.loss.outputsize_multiplier, 1))\n", " return dec_out\n", " \n", " def forward(self, windows_batch):\n", " insample_y = windows_batch['insample_y']\n", "\n", " y_pred = self.forecast(insample_y)\n", - " y_pred = y_pred[:, -self.h:, :]\n", - " y_pred = self.loss.domain_map(y_pred)\n", + " y_pred = y_pred.reshape(insample_y.shape[0],\n", + " self.h,\n", + " -1)\n", "\n", - " # domain_map might have squeezed the last dimension in case n_series == 1\n", - " if y_pred.ndim == 2:\n", - " return y_pred.unsqueeze(-1)\n", - " else:\n", - " return y_pred" + " return y_pred" ] }, { @@ -361,6 +377,21 @@ "show_doc(SOFTS.predict, name='SOFTS.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(SOFTS, [\"airpassengers\"])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -381,9 +412,7 @@ "from neuralforecast import NeuralForecast\n", "from neuralforecast.models import SOFTS\n", "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", - "from neuralforecast.losses.pytorch import MSE\n", - "\n", - "\n", + "from neuralforecast.losses.pytorch import MASE\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", "\n", @@ -396,8 +425,7 @@ " d_ff=64,\n", " dropout=0.1,\n", " use_norm=True,\n", - " loss=MSE(),\n", - " valid_loss=MAE(),\n", + " loss=MASE(seasonality=4),\n", " early_stop_patience_steps=3,\n", " batch_size=32)\n", "\n", diff --git a/nbs/models.stemgnn.ipynb b/nbs/models.stemgnn.ipynb index b2222fc1c..1e97e9bca 100644 --- a/nbs/models.stemgnn.ipynb +++ b/nbs/models.stemgnn.ipynb @@ -53,8 +53,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -68,8 +71,9 @@ "import torch.nn as nn\n", "import torch.nn.functional as F\n", "\n", + "from typing import Optional\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_multivariate import BaseMultivariate" + "from neuralforecast.common._base_model import BaseModel" ] }, { @@ -171,7 +175,7 @@ "outputs": [], "source": [ "#| export\n", - "class StemGNN(BaseMultivariate):\n", + "class StemGNN(BaseModel):\n", " \"\"\" StemGNN\n", "\n", " The Spectral Temporal Graph Neural Network (`StemGNN`) is a Graph-based multivariate\n", @@ -198,6 +202,10 @@ " `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
\n", " `val_check_steps`: int=100, Number of training steps between every validation loss check.
\n", " `batch_size`: int, number of windows in each batch.
\n", + " `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
\n", + " `windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.
\n", + " `inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.
\n", + " `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
\n", " `step_size`: int=1, step size between each window of temporal data.
\n", " `scaler_type`: str='robust', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
\n", " `random_seed`: int, random_seed for pytorch initializer and numpy generators.
\n", @@ -212,10 +220,11 @@ " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'multivariate'\n", " EXOGENOUS_FUTR = False\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False \n", + " MULTIVARIATE = True # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", " \n", " def __init__(self,\n", " h,\n", @@ -224,6 +233,7 @@ " futr_exog_list = None,\n", " hist_exog_list = None,\n", " stat_exog_list = None,\n", + " exclude_insample_y = False,\n", " n_stacks = 2,\n", " multi_layer: int = 5,\n", " dropout_rate: float = 0.5,\n", @@ -236,6 +246,10 @@ " early_stop_patience_steps: int =-1,\n", " val_check_steps: int = 100,\n", " batch_size: int = 32,\n", + " valid_batch_size: Optional[int] = None,\n", + " windows_batch_size = 1024,\n", + " inference_windows_batch_size = 1024,\n", + " start_padding_enabled = False,\n", " step_size: int = 1,\n", " scaler_type: str = 'robust',\n", " random_seed: int = 1,\n", @@ -254,7 +268,8 @@ " n_series=n_series,\n", " futr_exog_list=futr_exog_list,\n", " hist_exog_list=hist_exog_list,\n", - " stat_exog_list=stat_exog_list, \n", + " stat_exog_list=stat_exog_list,\n", + " exclude_insample_y = exclude_insample_y, \n", " loss=loss,\n", " valid_loss=valid_loss,\n", " max_steps=max_steps,\n", @@ -263,6 +278,10 @@ " early_stop_patience_steps=early_stop_patience_steps,\n", " val_check_steps=val_check_steps,\n", " batch_size=batch_size,\n", + " valid_batch_size=valid_batch_size,\n", + " windows_batch_size=windows_batch_size,\n", + " inference_windows_batch_size=inference_windows_batch_size,\n", + " start_padding_enabled=start_padding_enabled,\n", " step_size=step_size,\n", " scaler_type=scaler_type,\n", " num_workers_loader=num_workers_loader,\n", @@ -379,14 +398,8 @@ "\n", " forecast = forecast.permute(0, 2, 1).contiguous()\n", " forecast = forecast.reshape(batch_size, self.h, self.loss.outputsize_multiplier * self.n_series)\n", - " forecast = self.loss.domain_map(forecast)\n", "\n", - " # domain_map might have squeezed the last dimension in case n_series == 1\n", - " # Note that this fails in case of a tuple loss, but Multivariate does not support tuple losses yet.\n", - " if forecast.ndim == 2:\n", - " return forecast.unsqueeze(-1)\n", - " else:\n", - " return forecast" + " return forecast" ] }, { @@ -423,73 +436,12 @@ "outputs": [], "source": [ "#| hide\n", - "import logging\n", - "import warnings\n", - "\n", - "from neuralforecast import NeuralForecast\n", - "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", - "from neuralforecast.losses.pytorch import MAE, MSE, RMSE, MAPE, SMAPE, MASE, relMSE, QuantileLoss, MQLoss, DistributionLoss,PMM, GMM, NBMM, HuberLoss, TukeyLoss, HuberQLoss, HuberMQLoss" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "# Test losses\n", + "# Unit tests for models\n", "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", - "warnings.filterwarnings(\"ignore\")\n", - "\n", - "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", - "\n", - "AirPassengersStatic_single = AirPassengersStatic[AirPassengersStatic[\"unique_id\"] == 'Airline1']\n", - "Y_train_df_single = Y_train_df[Y_train_df[\"unique_id\"] == 'Airline1']\n", - "Y_test_df_single = Y_test_df[Y_test_df[\"unique_id\"] == 'Airline1']\n", - "\n", - "losses = [MAE(), MSE(), RMSE(), MAPE(), SMAPE(), MASE(seasonality=12), relMSE(y_train=Y_train_df), QuantileLoss(q=0.5), MQLoss(), DistributionLoss(distribution='Bernoulli'), DistributionLoss(distribution='Normal'), DistributionLoss(distribution='Poisson'), DistributionLoss(distribution='StudentT'), DistributionLoss(distribution='NegativeBinomial'), DistributionLoss(distribution='Tweedie'), PMM(), GMM(), NBMM(), HuberLoss(), TukeyLoss(), HuberQLoss(q=0.5), HuberMQLoss()]\n", - "valid_losses = [MAE(), MSE(), RMSE(), MAPE(), SMAPE(), MASE(seasonality=12), relMSE(y_train=Y_train_df), QuantileLoss(q=0.5), MQLoss(), DistributionLoss(distribution='Bernoulli'), DistributionLoss(distribution='Normal'), DistributionLoss(distribution='Poisson'), DistributionLoss(distribution='StudentT'), DistributionLoss(distribution='NegativeBinomial'), DistributionLoss(distribution='Tweedie'), PMM(), GMM(), NBMM(), HuberLoss(), TukeyLoss(), HuberQLoss(q=0.5), HuberMQLoss()]\n", - "\n", - "for loss, valid_loss in zip(losses, valid_losses):\n", - " try:\n", - " model = StemGNN(h=12,\n", - " input_size=24,\n", - " n_series=2,\n", - " scaler_type='robust',\n", - " max_steps=2,\n", - " early_stop_patience_steps=-1,\n", - " val_check_steps=10,\n", - " learning_rate=1e-3,\n", - " loss=loss,\n", - " valid_loss=valid_loss,\n", - " batch_size=32\n", - " )\n", - "\n", - " fcst = NeuralForecast(models=[model], freq='M')\n", - " fcst.fit(df=Y_train_df, static_df=AirPassengersStatic, val_size=12)\n", - " forecasts = fcst.predict(futr_df=Y_test_df)\n", - " except Exception as e:\n", - " assert str(e) == f\"{loss} is not supported in a Multivariate model.\"\n", - "\n", - "\n", - "# Test n_series = 1\n", - "model = StemGNN(h=12,\n", - " input_size=24,\n", - " n_series=1,\n", - " scaler_type='robust',\n", - " max_steps=2,\n", - " early_stop_patience_steps=-1,\n", - " val_check_steps=10,\n", - " learning_rate=1e-3,\n", - " loss=MAE(),\n", - " valid_loss=MAE(),\n", - " batch_size=32\n", - " )\n", - "fcst = NeuralForecast(models=[model], freq='M')\n", - "fcst.fit(df=Y_train_df_single, static_df=AirPassengersStatic_single, val_size=12)\n", - "forecasts = fcst.predict(futr_df=Y_test_df_single) " + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(StemGNN, [\"airpassengers\"])" ] }, { @@ -527,13 +479,13 @@ "model = StemGNN(h=12,\n", " input_size=24,\n", " n_series=2,\n", - " scaler_type='robust',\n", - " max_steps=100,\n", + " scaler_type='standard',\n", + " max_steps=500,\n", " early_stop_patience_steps=-1,\n", " val_check_steps=10,\n", " learning_rate=1e-3,\n", " loss=MAE(),\n", - " valid_loss=None,\n", + " valid_loss=MAE(),\n", " batch_size=32\n", " )\n", "\n", diff --git a/nbs/models.tcn.ipynb b/nbs/models.tcn.ipynb index dee324513..61551f1f5 100644 --- a/nbs/models.tcn.ipynb +++ b/nbs/models.tcn.ipynb @@ -69,7 +69,7 @@ "import torch.nn as nn\n", "\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_recurrent import BaseRecurrent\n", + "from neuralforecast.common._base_model import BaseModel\n", "from neuralforecast.common._modules import MLP, TemporalConvolutionEncoder" ] }, @@ -80,10 +80,11 @@ "outputs": [], "source": [ "#| hide\n", - "from nbdev.showdoc import show_doc\n", - "\n", "import logging\n", - "import warnings" + "import warnings\n", + "from fastcore.test import test_eq\n", + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -93,7 +94,7 @@ "outputs": [], "source": [ "#| export\n", - "class TCN(BaseRecurrent):\n", + "class TCN(BaseModel):\n", " \"\"\" TCN\n", "\n", " Temporal Convolution Network (TCN), with MLP decoder.\n", @@ -134,21 +135,22 @@ " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'recurrent'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = True\n", " EXOGENOUS_STAT = True \n", - " \n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False) \n", + "\n", " def __init__(self,\n", " h: int,\n", " input_size: int = -1,\n", " inference_input_size: int = -1,\n", " kernel_size: int = 2,\n", " dilations: List[int] = [1, 2, 4, 8, 16],\n", - " encoder_hidden_size: int = 200,\n", + " encoder_hidden_size: int = 128,\n", " encoder_activation: str = 'ReLU',\n", " context_size: int = 10,\n", - " decoder_hidden_size: int = 200,\n", + " decoder_hidden_size: int = 128,\n", " decoder_layers: int = 2,\n", " futr_exog_list = None,\n", " hist_exog_list = None,\n", @@ -162,6 +164,10 @@ " val_check_steps: int = 100,\n", " batch_size: int = 32,\n", " valid_batch_size: Optional[int] = None,\n", + " windows_batch_size = 128,\n", + " inference_windows_batch_size = 1024,\n", + " start_padding_enabled = False,\n", + " step_size: int = 1, \n", " scaler_type: str ='robust',\n", " random_seed: int = 1,\n", " num_workers_loader = 0,\n", @@ -185,6 +191,10 @@ " val_check_steps=val_check_steps,\n", " batch_size=batch_size,\n", " valid_batch_size=valid_batch_size,\n", + " windows_batch_size=windows_batch_size,\n", + " inference_windows_batch_size=inference_windows_batch_size,\n", + " start_padding_enabled=start_padding_enabled,\n", + " step_size=step_size,\n", " scaler_type=scaler_type,\n", " futr_exog_list=futr_exog_list,\n", " hist_exog_list=hist_exog_list,\n", @@ -215,7 +225,7 @@ " self.decoder_layers = decoder_layers\n", "\n", " # TCN input size (1 for target variable y)\n", - " input_encoder = 1 + self.hist_exog_size + self.stat_exog_size\n", + " input_encoder = 1 + self.hist_exog_size + self.stat_exog_size + self.futr_exog_size\n", "\n", " \n", " #---------------------------------- Instantiate Model -----------------------------------#\n", @@ -228,11 +238,11 @@ " activation=self.encoder_activation)\n", "\n", " # Context adapter\n", - " self.context_adapter = nn.Linear(in_features=self.encoder_hidden_size + self.futr_exog_size * h,\n", - " out_features=self.context_size * h)\n", + " self.context_adapter = nn.Linear(in_features=self.input_size,\n", + " out_features=h)\n", "\n", " # Decoder MLP\n", - " self.mlp_decoder = MLP(in_features=self.context_size + self.futr_exog_size,\n", + " self.mlp_decoder = MLP(in_features=self.encoder_hidden_size + self.futr_exog_size,\n", " out_features=self.loss.outputsize_multiplier,\n", " hidden_size=self.decoder_hidden_size,\n", " num_layers=self.decoder_layers,\n", @@ -242,41 +252,41 @@ " def forward(self, windows_batch):\n", " \n", " # Parse windows_batch\n", - " encoder_input = windows_batch['insample_y'] # [B, seq_len, 1]\n", - " futr_exog = windows_batch['futr_exog']\n", - " hist_exog = windows_batch['hist_exog']\n", - " stat_exog = windows_batch['stat_exog']\n", + " encoder_input = windows_batch['insample_y'] # [B, L, 1]\n", + " futr_exog = windows_batch['futr_exog'] # [B, L + h, F]\n", + " hist_exog = windows_batch['hist_exog'] # [B, L, X]\n", + " stat_exog = windows_batch['stat_exog'] # [B, S]\n", "\n", - " # Concatenate y, historic and static inputs\n", - " # [B, C, seq_len, 1] -> [B, seq_len, C]\n", - " # Contatenate [ Y_t, | X_{t-L},..., X_{t} | S ]\n", - " batch_size, seq_len = encoder_input.shape[:2]\n", + " # Concatenate y, historic and static inputs \n", + " batch_size, input_size = encoder_input.shape[:2]\n", " if self.hist_exog_size > 0:\n", - " hist_exog = hist_exog.permute(0,2,1,3).squeeze(-1) # [B, X, seq_len, 1] -> [B, seq_len, X]\n", - " encoder_input = torch.cat((encoder_input, hist_exog), dim=2)\n", + " encoder_input = torch.cat((encoder_input, hist_exog), dim=2) # [B, L, 1] + [B, L, X] -> [B, L, 1 + X]\n", "\n", " if self.stat_exog_size > 0:\n", - " stat_exog = stat_exog.unsqueeze(1).repeat(1, seq_len, 1) # [B, S] -> [B, seq_len, S]\n", - " encoder_input = torch.cat((encoder_input, stat_exog), dim=2)\n", - "\n", - " # TCN forward\n", - " hidden_state = self.hist_encoder(encoder_input) # [B, seq_len, tcn_hidden_state]\n", + " # print(encoder_input.shape)\n", + " stat_exog = stat_exog.unsqueeze(1).repeat(1, input_size, 1) # [B, S] -> [B, L, S]\n", + " encoder_input = torch.cat((encoder_input, stat_exog), dim=2) # [B, L, 1 + X] + [B, L, S] -> [B, L, 1 + X + S]\n", "\n", " if self.futr_exog_size > 0:\n", - " futr_exog = futr_exog.permute(0,2,3,1)[:,:,1:,:] # [B, F, seq_len, 1+H] -> [B, seq_len, H, F]\n", - " hidden_state = torch.cat(( hidden_state, futr_exog.reshape(batch_size, seq_len, -1)), dim=2)\n", + " encoder_input = torch.cat((encoder_input, \n", + " futr_exog[:, :input_size]), dim=2) # [B, L, 1 + X + S] + [B, L, F] -> [B, L, 1 + X + S + F]\n", + "\n", + " # TCN forward \n", + " hidden_state = self.hist_encoder(encoder_input) # [B, L, C]\n", "\n", " # Context adapter\n", - " context = self.context_adapter(hidden_state)\n", - " context = context.reshape(batch_size, seq_len, self.h, self.context_size)\n", + " hidden_state = hidden_state.permute(0, 2, 1) # [B, L, C] -> [B, C, L]\n", + " context = self.context_adapter(hidden_state) # [B, C, L] -> [B, C, h]\n", "\n", " # Residual connection with futr_exog\n", " if self.futr_exog_size > 0:\n", - " context = torch.cat((context, futr_exog), dim=-1)\n", + " futr_exog_futr = futr_exog[:, input_size:].swapaxes(1, 2) # [B, L + h, F] -> [B, F, h] \n", + " context = torch.cat((context, futr_exog_futr), dim=1) # [B, C, h] + [B, F, h] = [B, C + F, h]\n", + "\n", + " context = context.swapaxes(1, 2) # [B, C + F, h] -> [B, h, C + F]\n", "\n", " # Final forecast\n", - " output = self.mlp_decoder(context)\n", - " output = self.loss.domain_map(output)\n", + " output = self.mlp_decoder(context) # [B, h, C + F] -> [B, h, n_output]\n", " \n", " return output" ] @@ -308,13 +318,6 @@ "show_doc(TCN.predict, name='TCN.predict')" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Usage Example" - ] - }, { "cell_type": "code", "execution_count": null, @@ -322,8 +325,19 @@ "outputs": [], "source": [ "#| hide\n", + "# Unit tests for models\n", "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", - "warnings.filterwarnings(\"ignore\")" + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(TCN, [\"airpassengers\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Usage Example" ] }, { @@ -338,7 +352,7 @@ "\n", "from neuralforecast import NeuralForecast\n", "from neuralforecast.models import TCN\n", - "from neuralforecast.losses.pytorch import GMM, MQLoss, DistributionLoss\n", + "from neuralforecast.losses.pytorch import DistributionLoss\n", "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", "\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds [B, h, n_outputs]\n", "\n", - " # Map to output domain\n", - " forecast = self.loss.domain_map(x + x_skip)\n", + " forecast = x + x_skip\n", " \n", " return forecast\n" ] @@ -383,6 +386,21 @@ "show_doc(TiDE.predict, name='TiDE.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(TiDE, [\"airpassengers\"])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -402,7 +420,7 @@ "\n", "from neuralforecast import NeuralForecast\n", "from neuralforecast.models import TiDE\n", - "from neuralforecast.losses.pytorch import GMM, DistributionLoss\n", + "from neuralforecast.losses.pytorch import GMM\n", "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", "\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds 1:\n", + " raise Exception('TimeLLM only supports point loss functions (MAE, MSE, etc) as loss function.') \n", + " \n", + " if valid_loss is not None and not isinstance(valid_loss, losses.BasePointLoss):\n", + " raise Exception('TimeLLM only supports point loss functions (MAE, MSE, etc) as valid loss function.') \n", + "\n", + "\n", " # Architecture\n", " self.patch_len = patch_len\n", " self.stride = stride\n", @@ -523,13 +533,10 @@ " return lags\n", " \n", " def forward(self, windows_batch):\n", - " insample_y = windows_batch['insample_y']\n", - "\n", - " x = insample_y.unsqueeze(-1)\n", + " x = windows_batch['insample_y']\n", "\n", " y_pred = self.forecast(x)\n", " y_pred = y_pred[:, -self.h:, :]\n", - " y_pred = self.loss.domain_map(y_pred)\n", " \n", " return y_pred\n" ] @@ -575,11 +582,12 @@ "outputs": [], "source": [ "#| eval: false\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "\n", "from neuralforecast import NeuralForecast\n", "from neuralforecast.models import TimeLLM\n", - "from neuralforecast.utils import AirPassengersPanel, augment_calendar_df\n", - "\n", - "AirPassengersPanel, calendar_cols = augment_calendar_df(df=AirPassengersPanel, freq='M')\n", + "from neuralforecast.utils import AirPassengersPanel\n", "\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", diff --git a/nbs/models.timemixer.ipynb b/nbs/models.timemixer.ipynb index 9bfdd9cc5..49801a6b8 100644 --- a/nbs/models.timemixer.ipynb +++ b/nbs/models.timemixer.ipynb @@ -17,8 +17,8 @@ "\n", "Seasonal and trend components exhibit significantly different characteristics in time series, and different scales of the time series reflect different properties, with seasonal characteristics being more pronounced at a fine-grained micro scale and trend characteristics being more pronounced at a coarse macro scale, it is therefore necessary to decouple seasonal and trend components at different scales. As such, TimeMixer is an MLP-based architecture with Past-Decomposable-Mixing (PDM) and Future-Multipredictor-Mixing (FMM) blocks to take full advantage of disentangled multiscale series in both past extraction and future prediction phases.\n", "\n", - "**Reference**\n", - "- [Shiyu Wang, Haixu Wu, Xiaoming Shi, Tengge Hu, Huakun Luo, Lintao Ma, James Y. Zhang, Jun Zhou.\"TimeMixer: Decomposable Multiscale Mixing For Time Series Forecasting\"](https://openreview.net/pdf?id=7oLshfEIC2)" + "**References**
\n", + "[Shiyu Wang, Haixu Wu, Xiaoming Shi, Tengge Hu, Huakun Luo, Lintao Ma, James Y. Zhang, Jun Zhou.\"TimeMixer: Decomposable Multiscale Mixing For Time Series Forecasting\"](https://openreview.net/pdf?id=7oLshfEIC2)
" ] }, { @@ -41,10 +41,10 @@ "import torch\n", "import torch.nn as nn\n", "\n", - "from neuralforecast.common._base_multivariate import BaseMultivariate\n", + "from neuralforecast.common._base_model import BaseModel\n", "from neuralforecast.common._modules import PositionalEmbedding, TokenEmbedding, TemporalEmbedding, SeriesDecomp, RevIN\n", - "\n", - "from neuralforecast.losses.pytorch import MAE" + "from neuralforecast.losses.pytorch import MAE\n", + "from typing import Optional" ] }, { @@ -54,8 +54,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -324,7 +327,7 @@ "source": [ "#| export\n", "\n", - "class TimeMixer(BaseMultivariate):\n", + "class TimeMixer(BaseModel):\n", " \"\"\" TimeMixer\n", " **Parameters**
\n", " `h`: int, Forecast horizon.
\n", @@ -354,6 +357,10 @@ " `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
\n", " `val_check_steps`: int=100, Number of training steps between every validation loss check.
\n", " `batch_size`: int=32, number of different series in each batch.
\n", + " `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
\n", + " `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.
\n", + " `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.
\n", + " `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
\n", " `step_size`: int=1, step size between each window of temporal data.
\n", " `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
\n", " `random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
\n", @@ -368,14 +375,15 @@ " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References**
\n", - " [Shiyu Wang, Haixu Wu, Xiaoming Shi, Tengge Hu, Huakun Luo, Lintao Ma, James Y. Zhang, Jun Zhou.\"TimeMixer: Decomposable Multiscale Mixing For Time Series Forecasting\"](https://openreview.net/pdf?id=7oLshfEIC2)\n", + " [Shiyu Wang, Haixu Wu, Xiaoming Shi, Tengge Hu, Huakun Luo, Lintao Ma, James Y. Zhang, Jun Zhou.\"TimeMixer: Decomposable Multiscale Mixing For Time Series Forecasting\"](https://openreview.net/pdf?id=7oLshfEIC2)
\n", " \"\"\"\n", "\n", " # Class attributes\n", - " SAMPLING_TYPE = 'multivariate'\n", " EXOGENOUS_FUTR = False\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False\n", + " MULTIVARIATE = True # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h,\n", @@ -405,6 +413,10 @@ " early_stop_patience_steps: int =-1,\n", " val_check_steps: int = 100,\n", " batch_size: int = 32,\n", + " valid_batch_size: Optional[int] = None,\n", + " windows_batch_size = 256,\n", + " inference_windows_batch_size = 256,\n", + " start_padding_enabled = False,\n", " step_size: int = 1,\n", " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", @@ -431,6 +443,10 @@ " early_stop_patience_steps=early_stop_patience_steps,\n", " val_check_steps=val_check_steps,\n", " batch_size=batch_size,\n", + " valid_batch_size=valid_batch_size,\n", + " windows_batch_size=windows_batch_size,\n", + " inference_windows_batch_size=inference_windows_batch_size,\n", + " start_padding_enabled=start_padding_enabled,\n", " step_size=step_size,\n", " scaler_type=scaler_type,\n", " random_seed=random_seed,\n", @@ -522,6 +538,9 @@ " for i in range(self.down_sampling_layers + 1)\n", " ]\n", " )\n", + " \n", + " if self.loss.outputsize_multiplier > 1:\n", + " self.distr_output = nn.Linear(self.n_series, self.n_series * self.loss.outputsize_multiplier)\n", "\n", " def out_projection(self, dec_out, i, out_res):\n", " dec_out = self.projection_layer(dec_out)\n", @@ -678,13 +697,10 @@ "\n", " y_pred = self.forecast(insample_y, x_mark_enc, x_mark_dec)\n", " y_pred = y_pred[:, -self.h:, :]\n", - " y_pred = self.loss.domain_map(y_pred)\n", + " if self.loss.outputsize_multiplier > 1:\n", + " y_pred = self.distr_output(y_pred)\n", "\n", - " # domain_map might have squeezed the last dimension in case n_series == 1\n", - " if y_pred.ndim == 2:\n", - " return y_pred.unsqueeze(-1)\n", - " else:\n", - " return y_pred" + " return y_pred\n" ] }, { @@ -714,6 +730,21 @@ "show_doc(TimeMixer.predict, name='TimeMixer.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(TimeMixer, [\"airpassengers\"])" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/nbs/models.timesnet.ipynb b/nbs/models.timesnet.ipynb index 37e5d46e4..00d65688f 100644 --- a/nbs/models.timesnet.ipynb +++ b/nbs/models.timesnet.ipynb @@ -54,7 +54,7 @@ "import torch.fft\n", "\n", "from neuralforecast.common._modules import DataEmbedding\n", - "from neuralforecast.common._base_windows import BaseWindows\n", + "from neuralforecast.common._base_model import BaseModel\n", "\n", "from neuralforecast.losses.pytorch import MAE" ] @@ -66,8 +66,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -200,7 +203,7 @@ "outputs": [], "source": [ "#| export\n", - "class TimesNet(BaseWindows):\n", + "class TimesNet(BaseModel):\n", " \"\"\" TimesNet\n", "\n", " The TimesNet univariate model tackles the challenge of modeling multiple intraperiod and interperiod temporal variations.\n", @@ -279,10 +282,11 @@ " Haixu Wu and Tengge Hu and Yong Liu and Hang Zhou and Jianmin Wang and Mingsheng Long. TimesNet: Temporal 2D-Variation Modeling for General Time Series Analysis. https://openreview.net/pdf?id=ju_Uqw384Oq\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False \n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h: int, \n", @@ -377,13 +381,9 @@ "\n", " # Parse windows_batch\n", " insample_y = windows_batch['insample_y']\n", - " #insample_mask = windows_batch['insample_mask']\n", - " #hist_exog = windows_batch['hist_exog']\n", - " #stat_exog = windows_batch['stat_exog']\n", " futr_exog = windows_batch['futr_exog']\n", "\n", " # Parse inputs\n", - " insample_y = insample_y.unsqueeze(-1) # [Ws,L,1]\n", " if self.futr_exog_size > 0:\n", " x_mark_enc = futr_exog[:,:self.input_size,:]\n", " else:\n", @@ -398,7 +398,7 @@ " # porject back\n", " dec_out = self.projection(enc_out)\n", "\n", - " forecast = self.loss.domain_map(dec_out[:, -self.h:])\n", + " forecast = dec_out[:, -self.h:]\n", " return forecast" ] }, @@ -429,6 +429,21 @@ "show_doc(TimesNet.predict, name='TimesNet.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(TimesNet, [\"airpassengers\"])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -448,9 +463,7 @@ "\n", "from neuralforecast import NeuralForecast\n", "from neuralforecast.losses.pytorch import DistributionLoss\n", - "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic, augment_calendar_df\n", - "\n", - "AirPassengersPanel, calendar_cols = augment_calendar_df(df=AirPassengersPanel, freq='M')\n", + "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", "\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", @@ -460,10 +473,9 @@ " hidden_size = 16,\n", " conv_hidden_size = 32,\n", " loss=DistributionLoss(distribution='Normal', level=[80, 90]),\n", - " futr_exog_list=calendar_cols,\n", " scaler_type='standard',\n", " learning_rate=1e-3,\n", - " max_steps=5,\n", + " max_steps=100,\n", " val_check_steps=50,\n", " early_stop_patience_steps=2)\n", "\n", diff --git a/nbs/models.tsmixer.ipynb b/nbs/models.tsmixer.ipynb index 94a9e4125..4c01a42f3 100644 --- a/nbs/models.tsmixer.ipynb +++ b/nbs/models.tsmixer.ipynb @@ -44,8 +44,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -55,12 +58,13 @@ "outputs": [], "source": [ "#| export\n", - "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "\n", + "from typing import Optional\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_multivariate import BaseMultivariate" + "from neuralforecast.common._base_model import BaseModel\n", + "from neuralforecast.common._modules import RevINMultivariate" ] }, { @@ -157,55 +161,6 @@ " return x" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 1.2 Reversible InstanceNormalization\n", - "An Instance Normalization Layer that is reversible, based on [this reference implementation](https://github.com/google-research/google-research/blob/master/tsmixer/tsmixer_basic/models/rev_in.py).
" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "class ReversibleInstanceNorm1d(nn.Module):\n", - " \"\"\" \n", - " ReversibleInstanceNorm1d\n", - " \"\"\" \n", - " def __init__(self, n_series, eps=1e-5):\n", - " super().__init__()\n", - " self.weight = nn.Parameter(torch.ones((1, 1, n_series)))\n", - " self.bias = nn.Parameter(torch.zeros((1, 1, n_series)))\n", - "\n", - " self.eps = eps\n", - "\n", - " def forward(self, x):\n", - " # Batch statistics\n", - " self.batch_mean = torch.mean(x, axis=1, keepdim=True).detach()\n", - " self.batch_std = torch.sqrt(torch.var(x, axis=1, keepdim=True, unbiased=False) + self.eps).detach()\n", - " \n", - " # Instance normalization\n", - " x = x - self.batch_mean\n", - " x = x / self.batch_std\n", - " x = x * self.weight\n", - " x = x + self.bias\n", - " \n", - " return x\n", - "\n", - " def reverse(self, x):\n", - " # Reverse the normalization\n", - " x = x - self.bias\n", - " x = x / self.weight \n", - " x = x * self.batch_std\n", - " x = x + self.batch_mean \n", - "\n", - " return x" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -220,7 +175,7 @@ "outputs": [], "source": [ "#| export\n", - "class TSMixer(BaseMultivariate):\n", + "class TSMixer(BaseModel):\n", " \"\"\" TSMixer\n", "\n", " Time-Series Mixer (`TSMixer`) is a MLP-based multivariate time-series forecasting model. `TSMixer` jointly learns temporal and cross-sectional representations of the time-series by repeatedly combining time- and feature information using stacked mixing layers. A mixing layer consists of a sequential time- and feature Multi Layer Perceptron (`MLP`).\n", @@ -244,6 +199,10 @@ " `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
\n", " `val_check_steps`: int=100, Number of training steps between every validation loss check.
\n", " `batch_size`: int=32, number of different series in each batch.
\n", + " `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
\n", + " `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.
\n", + " `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.
\n", + " `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
\n", " `step_size`: int=1, step size between each window of temporal data.
\n", " `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
\n", " `random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
\n", @@ -262,10 +221,11 @@ "\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'multivariate'\n", " EXOGENOUS_FUTR = False\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False\n", + " MULTIVARIATE = True # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h,\n", @@ -274,6 +234,7 @@ " futr_exog_list = None,\n", " hist_exog_list = None,\n", " stat_exog_list = None,\n", + " exclude_insample_y = False,\n", " n_block = 2,\n", " ff_dim = 64,\n", " dropout = 0.9,\n", @@ -286,6 +247,10 @@ " early_stop_patience_steps: int =-1,\n", " val_check_steps: int = 100,\n", " batch_size: int = 32,\n", + " valid_batch_size: Optional[int] = None,\n", + " windows_batch_size = 256,\n", + " inference_windows_batch_size = 256,\n", + " start_padding_enabled = False,\n", " step_size: int = 1,\n", " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", @@ -305,6 +270,7 @@ " futr_exog_list=futr_exog_list,\n", " hist_exog_list=hist_exog_list,\n", " stat_exog_list=stat_exog_list,\n", + " exclude_insample_y = exclude_insample_y,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " max_steps=max_steps,\n", @@ -313,6 +279,10 @@ " early_stop_patience_steps=early_stop_patience_steps,\n", " val_check_steps=val_check_steps,\n", " batch_size=batch_size,\n", + " valid_batch_size=valid_batch_size,\n", + " windows_batch_size=windows_batch_size,\n", + " inference_windows_batch_size=inference_windows_batch_size,\n", + " start_padding_enabled=start_padding_enabled,\n", " step_size=step_size,\n", " scaler_type=scaler_type,\n", " random_seed=random_seed,\n", @@ -328,7 +298,7 @@ " # Reversible InstanceNormalization layer\n", " self.revin = revin\n", " if self.revin:\n", - " self.norm = ReversibleInstanceNorm1d(n_series = n_series)\n", + " self.norm = RevINMultivariate(num_features = n_series, affine=True)\n", "\n", " # Mixing layers\n", " mixing_layers = [MixingLayer(n_series=n_series, \n", @@ -349,23 +319,17 @@ "\n", " # TSMixer: InstanceNorm + Mixing layers + Dense output layer + ReverseInstanceNorm\n", " if self.revin:\n", - " x = self.norm(x)\n", + " x = self.norm(x, 'norm')\n", " x = self.mixing_layers(x)\n", " x = x.permute(0, 2, 1)\n", " x = self.out(x)\n", " x = x.permute(0, 2, 1)\n", " if self.revin:\n", - " x = self.norm.reverse(x)\n", + " x = self.norm(x, 'denorm')\n", "\n", " x = x.reshape(batch_size, self.h, self.loss.outputsize_multiplier * self.n_series)\n", - " forecast = self.loss.domain_map(x)\n", - "\n", - " # domain_map might have squeezed the last dimension in case n_series == 1\n", - " # Note that this fails in case of a tuple loss, but Multivariate does not support tuple losses yet.\n", - " if forecast.ndim == 2:\n", - " return forecast.unsqueeze(-1)\n", - " else:\n", - " return forecast" + "\n", + " return x" ] }, { @@ -401,80 +365,12 @@ "metadata": {}, "outputs": [], "source": [ - "#| hide\n", - "import logging\n", - "import warnings\n", - "\n", - "from neuralforecast import NeuralForecast\n", - "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", - "from neuralforecast.losses.pytorch import MAE, MSE, RMSE, MAPE, SMAPE, MASE, relMSE, QuantileLoss, MQLoss, DistributionLoss,PMM, GMM, NBMM, HuberLoss, TukeyLoss, HuberQLoss, HuberMQLoss" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "# Test losses\n", + "# Unit tests for models\n", "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", - "warnings.filterwarnings(\"ignore\")\n", - "\n", - "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", - "\n", - "AirPassengersStatic_single = AirPassengersStatic[AirPassengersStatic[\"unique_id\"] == 'Airline1']\n", - "Y_train_df_single = Y_train_df[Y_train_df[\"unique_id\"] == 'Airline1']\n", - "Y_test_df_single = Y_test_df[Y_test_df[\"unique_id\"] == 'Airline1']\n", - "\n", - "losses = [MAE(), MSE(), RMSE(), MAPE(), SMAPE(), MASE(seasonality=12), relMSE(y_train=Y_train_df), QuantileLoss(q=0.5), MQLoss(), DistributionLoss(distribution='Bernoulli'), DistributionLoss(distribution='Normal'), DistributionLoss(distribution='Poisson'), DistributionLoss(distribution='StudentT'), DistributionLoss(distribution='NegativeBinomial'), DistributionLoss(distribution='Tweedie'), PMM(), GMM(), NBMM(), HuberLoss(), TukeyLoss(), HuberQLoss(q=0.5), HuberMQLoss()]\n", - "valid_losses = [MAE(), MSE(), RMSE(), MAPE(), SMAPE(), MASE(seasonality=12), relMSE(y_train=Y_train_df), QuantileLoss(q=0.5), MQLoss(), DistributionLoss(distribution='Bernoulli'), DistributionLoss(distribution='Normal'), DistributionLoss(distribution='Poisson'), DistributionLoss(distribution='StudentT'), DistributionLoss(distribution='NegativeBinomial'), DistributionLoss(distribution='Tweedie'), PMM(), GMM(), NBMM(), HuberLoss(), TukeyLoss(), HuberQLoss(q=0.5), HuberMQLoss()]\n", - "\n", - "for loss, valid_loss in zip(losses, valid_losses):\n", - " try:\n", - " model = TSMixer(h=12,\n", - " input_size=24,\n", - " n_series=2,\n", - " n_block=4,\n", - " ff_dim=4,\n", - " revin=True,\n", - " scaler_type='standard',\n", - " max_steps=2,\n", - " early_stop_patience_steps=-1,\n", - " val_check_steps=5,\n", - " learning_rate=1e-3,\n", - " loss=loss,\n", - " valid_loss=valid_loss,\n", - " batch_size=32\n", - " )\n", - "\n", - " fcst = NeuralForecast(models=[model], freq='M')\n", - " fcst.fit(df=Y_train_df, static_df=AirPassengersStatic, val_size=12)\n", - " forecasts = fcst.predict(futr_df=Y_test_df)\n", - " except Exception as e:\n", - " assert str(e) == f\"{loss} is not supported in a Multivariate model.\"\n", - "\n", - "\n", - "# Test n_series = 1\n", - "model = TSMixer(h=12,\n", - " input_size=24,\n", - " n_series=1,\n", - " n_block=4,\n", - " ff_dim=4,\n", - " revin=True,\n", - " scaler_type='standard',\n", - " max_steps=2,\n", - " early_stop_patience_steps=-1,\n", - " val_check_steps=5,\n", - " learning_rate=1e-3,\n", - " loss=MAE(),\n", - " valid_loss=MAE(),\n", - " batch_size=32\n", - " )\n", - "fcst = NeuralForecast(models=[model], freq='M')\n", - "fcst.fit(df=Y_train_df_single, static_df=AirPassengersStatic_single, val_size=12)\n", - "forecasts = fcst.predict(futr_df=Y_test_df_single)" + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(TSMixer, [\"airpassengers\"])" ] }, { @@ -504,7 +400,7 @@ "from neuralforecast import NeuralForecast\n", "from neuralforecast.models import TSMixer\n", "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", - "from neuralforecast.losses.pytorch import MAE\n", + "from neuralforecast.losses.pytorch import MAE, MQLoss\n", "\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", @@ -521,8 +417,7 @@ " early_stop_patience_steps=-1,\n", " val_check_steps=5,\n", " learning_rate=1e-3,\n", - " loss=MAE(),\n", - " valid_loss=MAE(),\n", + " loss=MQLoss(),\n", " batch_size=32\n", " )\n", "\n", @@ -536,9 +431,13 @@ "plot_df = pd.concat([Y_test_df, Y_hat_df], axis=1)\n", "plot_df = pd.concat([Y_train_df, plot_df])\n", "\n", - "plot_df = plot_df[plot_df.unique_id=='Airline1'].drop('unique_id', axis=1)\n", + "plot_df = plot_df[plot_df.unique_id=='Airline2'].drop('unique_id', axis=1)\n", "plt.plot(plot_df['ds'], plot_df['y'], c='black', label='True')\n", - "plt.plot(plot_df['ds'], plot_df['TSMixer'], c='blue', label='Forecast')\n", + "plt.plot(plot_df['ds'], plot_df['TSMixer-median'], c='blue', label='median')\n", + "plt.fill_between(x=plot_df['ds'][-12:], \n", + " y1=plot_df['TSMixer-lo-90'][-12:].values,\n", + " y2=plot_df['TSMixer-hi-90'][-12:].values,\n", + " alpha=0.4, label='level 90')\n", "ax.set_title('AirPassengers Forecast', fontsize=22)\n", "ax.set_ylabel('Monthly Passengers', fontsize=20)\n", "ax.set_xlabel('Year', fontsize=20)\n", @@ -569,7 +468,7 @@ "Y_df = AirPassengersPanel[AirPassengersPanel['unique_id']=='Airline1']\n", "\n", "plt.plot(Y_df['ds'], Y_df['y'], c='black', label='True')\n", - "plt.plot(Y_hat_df['ds'], Y_hat_df['TSMixer'], c='blue', label='Forecast')\n", + "plt.plot(Y_hat_df['ds'], Y_hat_df['TSMixer-median'], c='blue', label='Forecast')\n", "ax.set_title('AirPassengers Forecast', fontsize=22)\n", "ax.set_ylabel('Monthly Passengers', fontsize=20)\n", "ax.set_xlabel('Year', fontsize=20)\n", diff --git a/nbs/models.tsmixerx.ipynb b/nbs/models.tsmixerx.ipynb index cb0ba72b6..691bdbc32 100644 --- a/nbs/models.tsmixerx.ipynb +++ b/nbs/models.tsmixerx.ipynb @@ -44,8 +44,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -59,8 +62,10 @@ "import torch.nn as nn\n", "import torch.nn.functional as F\n", "\n", + "from typing import Optional\n", "from neuralforecast.losses.pytorch import MAE\n", - "from neuralforecast.common._base_multivariate import BaseMultivariate" + "from neuralforecast.common._base_model import BaseModel\n", + "from neuralforecast.common._modules import RevINMultivariate" ] }, { @@ -244,7 +249,7 @@ "outputs": [], "source": [ "#| export\n", - "class TSMixerx(BaseMultivariate):\n", + "class TSMixerx(BaseModel):\n", " \"\"\" TSMixerx\n", "\n", " Time-Series Mixer exogenous (`TSMixerx`) is a MLP-based multivariate time-series forecasting model, with capability for additional exogenous inputs. `TSMixerx` jointly learns temporal and cross-sectional representations of the time-series by repeatedly combining time- and feature information using stacked mixing layers. A mixing layer consists of a sequential time- and feature Multi Layer Perceptron (`MLP`).\n", @@ -268,6 +273,10 @@ " `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
\n", " `val_check_steps`: int=100, Number of training steps between every validation loss check.
\n", " `batch_size`: int=32, number of different series in each batch.
\n", + " `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
\n", + " `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.
\n", + " `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.
\n", + " `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
\n", " `step_size`: int=1, step size between each window of temporal data.
\n", " `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
\n", " `random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
\n", @@ -286,10 +295,11 @@ "\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'multivariate'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = True\n", " EXOGENOUS_STAT = True\n", + " MULTIVARIATE = True # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h,\n", @@ -298,6 +308,7 @@ " futr_exog_list = None,\n", " hist_exog_list = None,\n", " stat_exog_list = None,\n", + " exclude_insample_y = False,\n", " n_block = 2,\n", " ff_dim = 64,\n", " dropout = 0.0,\n", @@ -310,6 +321,10 @@ " early_stop_patience_steps: int =-1,\n", " val_check_steps: int = 100,\n", " batch_size: int = 32,\n", + " valid_batch_size: Optional[int] = None,\n", + " windows_batch_size = 256,\n", + " inference_windows_batch_size = 256,\n", + " start_padding_enabled = False,\n", " step_size: int = 1,\n", " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", @@ -329,6 +344,7 @@ " futr_exog_list=futr_exog_list,\n", " hist_exog_list=hist_exog_list,\n", " stat_exog_list=stat_exog_list,\n", + " exclude_insample_y = exclude_insample_y,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " max_steps=max_steps,\n", @@ -337,6 +353,10 @@ " early_stop_patience_steps=early_stop_patience_steps,\n", " val_check_steps=val_check_steps,\n", " batch_size=batch_size,\n", + " valid_batch_size=valid_batch_size,\n", + " windows_batch_size=windows_batch_size,\n", + " inference_windows_batch_size=inference_windows_batch_size,\n", + " start_padding_enabled=start_padding_enabled,\n", " step_size=step_size,\n", " scaler_type=scaler_type,\n", " random_seed=random_seed,\n", @@ -351,7 +371,7 @@ " # Reversible InstanceNormalization layer\n", " self.revin = revin\n", " if self.revin:\n", - " self.norm = ReversibleInstanceNorm1d(n_series = n_series)\n", + " self.norm = RevINMultivariate(num_features= n_series, affine=True)\n", "\n", " # Forecast horizon\n", " self.h = h\n", @@ -417,19 +437,19 @@ "\n", " def forward(self, windows_batch):\n", " # Parse batch\n", - " x = windows_batch['insample_y'] # [batch_size (B), input_size (L), n_series (N)]\n", - " hist_exog = windows_batch['hist_exog'] # [B, hist_exog_size (X), L, N]\n", - " futr_exog = windows_batch['futr_exog'] # [B, futr_exog_size (F), L + h, N]\n", - " stat_exog = windows_batch['stat_exog'] # [N, stat_exog_size (S)]\n", + " x = windows_batch['insample_y'] # [batch_size (B), input_size (L), n_series (N)]\n", + " hist_exog = windows_batch['hist_exog'] # [B, hist_exog_size (X), L, N]\n", + " futr_exog = windows_batch['futr_exog'] # [B, futr_exog_size (F), L + h, N]\n", + " stat_exog = windows_batch['stat_exog'] # [N, stat_exog_size (S)]\n", " batch_size, input_size = x.shape[:2]\n", "\n", + " # Apply revin to x\n", + " if self.revin:\n", + " x = self.norm(x, mode=\"norm\") # [B, L, N] -> [B, L, N]\n", + "\n", " # Add channel dimension to x\n", " x = x.unsqueeze(1) # [B, L, N] -> [B, 1, L, N]\n", "\n", - " # Apply revin to x\n", - " if self.revin:\n", - " x = self.norm(x) # [B, 1, L, N] -> [B, 1, L, N]\n", - " \n", " # Concatenate x with historical exogenous\n", " if self.hist_exog_size > 0:\n", " x = torch.cat((x, hist_exog), dim=1) # [B, 1, L, N] + [B, X, L, N] -> [B, 1 + X, L, N]\n", @@ -476,26 +496,17 @@ " x = self.mixing_block(x) # [B, h, ff_dim] -> [B, h, ff_dim] \n", " \n", " # Fully connected output layer\n", - " x = self.out(x) # [B, h, ff_dim] -> [B, h, N * n_outputs]\n", + " forecast = self.out(x) # [B, h, ff_dim] -> [B, h, N * n_outputs]\n", " \n", " # Reverse Instance Normalization on output\n", " if self.revin:\n", - " x = x.reshape(batch_size, \n", - " self.h, \n", - " self.loss.outputsize_multiplier,\n", - " -1) # [B, h, N * n_outputs] -> [B, h, n_outputs, N]\n", - " x = self.norm.reverse(x)\n", - " x = x.reshape(batch_size, self.h, -1) # [B, h, n_outputs, N] -> [B, h, n_outputs * N]\n", - "\n", - " # Map to loss domain\n", - " forecast = self.loss.domain_map(x)\n", - "\n", - " # domain_map might have squeezed the last dimension in case n_series == 1\n", - " # Note that this fails in case of a tuple loss, but Multivariate does not support tuple losses yet.\n", - " if forecast.ndim == 2:\n", - " return forecast.unsqueeze(-1)\n", - " else:\n", - " return forecast" + " forecast = forecast.reshape(batch_size, \n", + " self.h * self.loss.outputsize_multiplier,\n", + " -1) # [B, h, N * n_outputs] -> [B, h * n_outputs, N]\n", + " forecast = self.norm(forecast, \"denorm\")\n", + " forecast = forecast.reshape(batch_size, self.h, -1) # [B, h * n_outputs, N] -> [B, h, n_outputs * N]\n", + "\n", + " return forecast" ] }, { @@ -531,113 +542,12 @@ "metadata": {}, "outputs": [], "source": [ - "#| hide\n", - "import logging\n", - "import warnings\n", - "import pandas as pd\n", - "\n", - "from neuralforecast import NeuralForecast\n", - "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic, generate_series\n", - "from neuralforecast.losses.pytorch import MAE, MSE, RMSE, MAPE, SMAPE, MASE, relMSE, QuantileLoss, MQLoss, DistributionLoss,PMM, GMM, NBMM, HuberLoss, TukeyLoss, HuberQLoss, HuberMQLoss\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "# Test losses\n", + "# Unit tests for models\n", "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", - "warnings.filterwarnings(\"ignore\")\n", - "\n", - "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", - "\n", - "AirPassengersStatic_single = AirPassengersStatic[AirPassengersStatic[\"unique_id\"] == 'Airline1']\n", - "Y_train_df_single = Y_train_df[Y_train_df[\"unique_id\"] == 'Airline1']\n", - "Y_test_df_single = Y_test_df[Y_test_df[\"unique_id\"] == 'Airline1']\n", - "\n", - "losses = [MAE(), MSE(), RMSE(), MAPE(), SMAPE(), MASE(seasonality=12), relMSE(y_train=Y_train_df), QuantileLoss(q=0.5), MQLoss(), DistributionLoss(distribution='Bernoulli'), DistributionLoss(distribution='Normal'), DistributionLoss(distribution='Poisson'), DistributionLoss(distribution='StudentT'), DistributionLoss(distribution='NegativeBinomial'), DistributionLoss(distribution='Tweedie'), PMM(), GMM(), NBMM(), HuberLoss(), TukeyLoss(), HuberQLoss(q=0.5), HuberMQLoss()]\n", - "valid_losses = [MAE(), MSE(), RMSE(), MAPE(), SMAPE(), MASE(seasonality=12), relMSE(y_train=Y_train_df), QuantileLoss(q=0.5), MQLoss(), DistributionLoss(distribution='Bernoulli'), DistributionLoss(distribution='Normal'), DistributionLoss(distribution='Poisson'), DistributionLoss(distribution='StudentT'), DistributionLoss(distribution='NegativeBinomial'), DistributionLoss(distribution='Tweedie'), PMM(), GMM(), NBMM(), HuberLoss(), TukeyLoss(), HuberQLoss(q=0.5), HuberMQLoss()]\n", - "\n", - "for loss, valid_loss in zip(losses, valid_losses):\n", - " try:\n", - " model = TSMixerx(h=12,\n", - " input_size=24,\n", - " n_series=2,\n", - " stat_exog_list=['airline1'],\n", - " futr_exog_list=['trend'],\n", - " n_block=4,\n", - " ff_dim=4,\n", - " revin=True,\n", - " scaler_type='standard',\n", - " max_steps=2,\n", - " early_stop_patience_steps=-1,\n", - " val_check_steps=5,\n", - " learning_rate=1e-3,\n", - " loss=loss,\n", - " valid_loss=valid_loss,\n", - " batch_size=32\n", - " )\n", - "\n", - " fcst = NeuralForecast(models=[model], freq='M')\n", - " fcst.fit(df=Y_train_df, static_df=AirPassengersStatic, val_size=12)\n", - " forecasts = fcst.predict(futr_df=Y_test_df)\n", - " except Exception as e:\n", - " assert str(e) == f\"{loss} is not supported in a Multivariate model.\"\n", - "\n", - "\n", - "# Test n_series = 1\n", - "model = TSMixerx(h=12,\n", - " input_size=24,\n", - " n_series=1,\n", - " stat_exog_list=['airline1'],\n", - " futr_exog_list=['trend'],\n", - " n_block=4,\n", - " ff_dim=4,\n", - " revin=True,\n", - " scaler_type='standard',\n", - " max_steps=2,\n", - " early_stop_patience_steps=-1,\n", - " val_check_steps=5,\n", - " learning_rate=1e-3,\n", - " loss=MAE(),\n", - " valid_loss=MAE(),\n", - " batch_size=32\n", - " )\n", - "fcst = NeuralForecast(models=[model], freq='M')\n", - "fcst.fit(df=Y_train_df_single, static_df=AirPassengersStatic_single, val_size=12)\n", - "forecasts = fcst.predict(futr_df=Y_test_df_single) \n", - "\n", - "# Test n_series > 1024\n", - "# See issue: https://github.com/Nixtla/neuralforecast/issues/948\n", - "n_series = 1111\n", - "Y_df, S_df = generate_series(n_series=n_series, n_temporal_features=2, n_static_features=2)\n", - "\n", - "model = TSMixerx(\n", - " h=12,\n", - " input_size=24,\n", - " n_series=n_series,\n", - " stat_exog_list=['static_0', 'static_1'],\n", - " hist_exog_list=[\"temporal_0\", \"temporal_1\"],\n", - " n_block=4,\n", - " ff_dim=3,\n", - " revin=True,\n", - " scaler_type=\"standard\",\n", - " max_steps=5,\n", - " early_stop_patience_steps=-1,\n", - " val_check_steps=5,\n", - " learning_rate=1e-3,\n", - " loss=MAE(),\n", - " valid_loss=MAE(),\n", - " batch_size=32,\n", - ")\n", - "\n", - "fcst = NeuralForecast(models=[model], freq=\"D\")\n", - "fcst.fit(df=Y_df, static_df=S_df, val_size=12)\n", - "forecasts = fcst.predict()" + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(TSMixerx, [\"airpassengers\"])" ] }, { @@ -667,7 +577,7 @@ "from neuralforecast import NeuralForecast\n", "from neuralforecast.models import TSMixerx\n", "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", - "from neuralforecast.losses.pytorch import MAE\n", + "from neuralforecast.losses.pytorch import GMM\n", "\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", @@ -680,13 +590,12 @@ " n_block=4,\n", " ff_dim=4,\n", " revin=True,\n", - " scaler_type='standard',\n", + " scaler_type='robust',\n", " max_steps=500,\n", " early_stop_patience_steps=-1,\n", " val_check_steps=5,\n", " learning_rate=1e-3,\n", - " loss=MAE(),\n", - " valid_loss=MAE(),\n", + " loss = GMM(n_components=10, weighted=True),\n", " batch_size=32\n", " )\n", "\n", @@ -702,7 +611,11 @@ "\n", "plot_df = plot_df[plot_df.unique_id=='Airline1'].drop('unique_id', axis=1)\n", "plt.plot(plot_df['ds'], plot_df['y'], c='black', label='True')\n", - "plt.plot(plot_df['ds'], plot_df['TSMixerx'], c='blue', label='Forecast')\n", + "plt.plot(plot_df['ds'], plot_df['TSMixerx-median'], c='blue', label='median')\n", + "plt.fill_between(x=plot_df['ds'][-12:], \n", + " y1=plot_df['TSMixerx-lo-90'][-12:].values,\n", + " y2=plot_df['TSMixerx-hi-90'][-12:].values,\n", + " alpha=0.4, label='level 90')\n", "ax.set_title('AirPassengers Forecast', fontsize=22)\n", "ax.set_ylabel('Monthly Passengers', fontsize=20)\n", "ax.set_xlabel('Year', fontsize=20)\n", @@ -733,7 +646,7 @@ "Y_df = AirPassengersPanel[AirPassengersPanel['unique_id']=='Airline1']\n", "\n", "plt.plot(Y_df['ds'], Y_df['y'], c='black', label='True')\n", - "plt.plot(Y_hat_df['ds'], Y_hat_df['TSMixerx'], c='blue', label='Forecast')\n", + "plt.plot(Y_hat_df['ds'], Y_hat_df['TSMixerx-median'], c='blue', label='Forecast')\n", "ax.set_title('AirPassengers Forecast', fontsize=22)\n", "ax.set_ylabel('Monthly Passengers', fontsize=20)\n", "ax.set_xlabel('Year', fontsize=20)\n", diff --git a/nbs/models.vanillatransformer.ipynb b/nbs/models.vanillatransformer.ipynb index b76cc9ba2..c28b2a4a6 100644 --- a/nbs/models.vanillatransformer.ipynb +++ b/nbs/models.vanillatransformer.ipynb @@ -67,7 +67,7 @@ " TransDecoderLayer, TransDecoder,\n", " DataEmbedding, AttentionLayer,\n", ")\n", - "from neuralforecast.common._base_windows import BaseWindows\n", + "from neuralforecast.common._base_model import BaseModel\n", "\n", "from neuralforecast.losses.pytorch import MAE" ] @@ -79,8 +79,11 @@ "outputs": [], "source": [ "#| hide\n", + "import logging\n", + "import warnings\n", "from fastcore.test import test_eq\n", - "from nbdev.showdoc import show_doc" + "from nbdev.showdoc import show_doc\n", + "from neuralforecast.common._model_checks import check_model" ] }, { @@ -154,7 +157,7 @@ "outputs": [], "source": [ "#| export\n", - "class VanillaTransformer(BaseWindows):\n", + "class VanillaTransformer(BaseModel):\n", " \"\"\" VanillaTransformer\n", "\n", " Vanilla Transformer, following implementation of the Informer paper, used as baseline.\n", @@ -209,10 +212,11 @@ "\t- [Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, Wancai Zhang. \"Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting\"](https://arxiv.org/abs/2012.07436)
\n", " \"\"\"\n", " # Class attributes\n", - " SAMPLING_TYPE = 'windows'\n", " EXOGENOUS_FUTR = True\n", " EXOGENOUS_HIST = False\n", " EXOGENOUS_STAT = False\n", + " MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False)\n", + " RECURRENT = False # If the model produces forecasts recursively (True) or direct (False)\n", "\n", " def __init__(self,\n", " h: int, \n", @@ -346,14 +350,8 @@ " def forward(self, windows_batch):\n", " # Parse windows_batch\n", " insample_y = windows_batch['insample_y']\n", - " #insample_mask = windows_batch['insample_mask']\n", - " #hist_exog = windows_batch['hist_exog']\n", - " #stat_exog = windows_batch['stat_exog']\n", - "\n", " futr_exog = windows_batch['futr_exog']\n", "\n", - " insample_y = insample_y.unsqueeze(-1) # [Ws,L,1]\n", - "\n", " if self.futr_exog_size > 0:\n", " x_mark_enc = futr_exog[:,:self.input_size,:]\n", " x_mark_dec = futr_exog[:,-(self.label_len+self.h):,:]\n", @@ -371,7 +369,7 @@ " dec_out = self.decoder(dec_out, enc_out, x_mask=None, \n", " cross_mask=None)\n", "\n", - " forecast = self.loss.domain_map(dec_out[:, -self.h:])\n", + " forecast = dec_out[:, -self.h:]\n", " return forecast" ] }, @@ -402,6 +400,21 @@ "show_doc(VanillaTransformer.predict, name='VanillaTransformer.predict')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Unit tests for models\n", + "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\")\n", + " check_model(VanillaTransformer, [\"airpassengers\"])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -421,9 +434,7 @@ "\n", "from neuralforecast import NeuralForecast\n", "from neuralforecast.models import VanillaTransformer\n", - "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic, augment_calendar_df\n", - "\n", - "AirPassengersPanel, calendar_cols = augment_calendar_df(df=AirPassengersPanel, freq='M')\n", + "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n", "\n", "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n", @@ -434,7 +445,6 @@ " conv_hidden_size=32,\n", " n_head=2,\n", " loss=MAE(),\n", - " futr_exog_list=calendar_cols,\n", " scaler_type='robust',\n", " learning_rate=1e-3,\n", " max_steps=500,\n", diff --git a/nbs/utils.ipynb b/nbs/utils.ipynb index 5b056c144..e8cb8c170 100644 --- a/nbs/utils.ipynb +++ b/nbs/utils.ipynb @@ -13,7 +13,16 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The autoreload extension is already loaded. To reload it, use:\n", + " %reload_ext autoreload\n" + ] + } + ], "source": [ "#| hide\n", "%load_ext autoreload\n", @@ -38,12 +47,11 @@ "#| export\n", "import random\n", "from itertools import chain\n", - "from typing import List, Union\n", + "from typing import List, Union, Optional, Tuple\n", "from utilsforecast.compat import DFType\n", "\n", "import numpy as np\n", - "import pandas as pd\n", - "import utilsforecast.processing as ufp" + "import pandas as pd" ] }, { @@ -161,7 +169,77 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "[source](https://github.com/Nixtla/neuralforecast/blob/main/neuralforecast/utils.py#L22){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", + "\n", + "### generate_series\n", + "\n", + "> generate_series (n_series:int, freq:str='D', min_length:int=50,\n", + "> max_length:int=500, n_temporal_features:int=0,\n", + "> n_static_features:int=0, equal_ends:bool=False,\n", + "> seed:int=0)\n", + "\n", + "*Generate Synthetic Panel Series.\n", + "\n", + "Generates `n_series` of frequency `freq` of different lengths in the interval [`min_length`, `max_length`].\n", + "If `n_temporal_features > 0`, then each serie gets temporal features with random values.\n", + "If `n_static_features > 0`, then a static dataframe is returned along the temporal dataframe.\n", + "If `equal_ends == True` then all series end at the same date.\n", + "\n", + "**Parameters:**
\n", + "`n_series`: int, number of series for synthetic panel.
\n", + "`min_length`: int, minimal length of synthetic panel's series.
\n", + "`max_length`: int, minimal length of synthetic panel's series.
\n", + "`n_temporal_features`: int, default=0, number of temporal exogenous variables for synthetic panel's series.
\n", + "`n_static_features`: int, default=0, number of static exogenous variables for synthetic panel's series.
\n", + "`equal_ends`: bool, if True, series finish in the same date stamp `ds`.
\n", + "`freq`: str, frequency of the data, [panda's available frequencies](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases).
\n", + "\n", + "**Returns:**
\n", + "`freq`: pandas.DataFrame, synthetic panel with columns [`unique_id`, `ds`, `y`] and exogenous.*" + ], + "text/plain": [ + "---\n", + "\n", + "[source](https://github.com/Nixtla/neuralforecast/blob/main/neuralforecast/utils.py#L22){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", + "\n", + "### generate_series\n", + "\n", + "> generate_series (n_series:int, freq:str='D', min_length:int=50,\n", + "> max_length:int=500, n_temporal_features:int=0,\n", + "> n_static_features:int=0, equal_ends:bool=False,\n", + "> seed:int=0)\n", + "\n", + "*Generate Synthetic Panel Series.\n", + "\n", + "Generates `n_series` of frequency `freq` of different lengths in the interval [`min_length`, `max_length`].\n", + "If `n_temporal_features > 0`, then each serie gets temporal features with random values.\n", + "If `n_static_features > 0`, then a static dataframe is returned along the temporal dataframe.\n", + "If `equal_ends == True` then all series end at the same date.\n", + "\n", + "**Parameters:**
\n", + "`n_series`: int, number of series for synthetic panel.
\n", + "`min_length`: int, minimal length of synthetic panel's series.
\n", + "`max_length`: int, minimal length of synthetic panel's series.
\n", + "`n_temporal_features`: int, default=0, number of temporal exogenous variables for synthetic panel's series.
\n", + "`n_static_features`: int, default=0, number of static exogenous variables for synthetic panel's series.
\n", + "`equal_ends`: bool, if True, series finish in the same date stamp `ds`.
\n", + "`freq`: str, frequency of the data, [panda's available frequencies](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases).
\n", + "\n", + "**Returns:**
\n", + "`freq`: pandas.DataFrame, synthetic panel with columns [`unique_id`, `ds`, `y`] and exogenous.*" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "show_doc(generate_series, title_level=3)" ] @@ -170,7 +248,111 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\ospra\\AppData\\Local\\Temp\\ipykernel_16560\\470716697.py:2: FutureWarning: The default of observed=False is deprecated and will be changed to True in a future version of pandas. Pass observed=False to retain current behavior or observed=True to adopt the future default and silence this warning.\n", + " synthetic_panel.groupby('unique_id').head(4)\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
unique_iddsy
002000-01-010.357595
102000-01-021.301382
202000-01-032.272442
302000-01-043.211827
22212000-01-015.399023
22312000-01-026.092818
22412000-01-030.476396
22512000-01-041.343744
\n", + "
" + ], + "text/plain": [ + " unique_id ds y\n", + "0 0 2000-01-01 0.357595\n", + "1 0 2000-01-02 1.301382\n", + "2 0 2000-01-03 2.272442\n", + "3 0 2000-01-04 3.211827\n", + "222 1 2000-01-01 5.399023\n", + "223 1 2000-01-02 6.092818\n", + "224 1 2000-01-03 0.476396\n", + "225 1 2000-01-04 1.343744" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "synthetic_panel = generate_series(n_series=2)\n", "synthetic_panel.groupby('unique_id').head(4)" @@ -180,7 +362,61 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
static_0static_1unique_id
00.7488050.5735440
10.2349660.2350571
\n", + "
" + ], + "text/plain": [ + " static_0 static_1 unique_id\n", + "0 0.748805 0.573544 0\n", + "1 0.234966 0.235057 1" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "temporal_df, static_df = generate_series(n_series=1000, n_static_features=2,\n", " n_temporal_features=4, equal_ends=False)\n", @@ -238,7 +474,131 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
unique_iddsy
01.01949-01-31112.0
11.01949-02-28118.0
21.01949-03-31132.0
31.01949-04-30129.0
41.01949-05-31121.0
51.01949-06-30135.0
61.01949-07-31148.0
71.01949-08-31148.0
81.01949-09-30136.0
91.01949-10-31119.0
101.01949-11-30104.0
111.01949-12-31118.0
\n", + "
" + ], + "text/plain": [ + " unique_id ds y\n", + "0 1.0 1949-01-31 112.0\n", + "1 1.0 1949-02-28 118.0\n", + "2 1.0 1949-03-31 132.0\n", + "3 1.0 1949-04-30 129.0\n", + "4 1.0 1949-05-31 121.0\n", + "5 1.0 1949-06-30 135.0\n", + "6 1.0 1949-07-31 148.0\n", + "7 1.0 1949-08-31 148.0\n", + "8 1.0 1949-09-30 136.0\n", + "9 1.0 1949-10-31 119.0\n", + "10 1.0 1949-11-30 104.0\n", + "11 1.0 1949-12-31 118.0" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "AirPassengersDF.head(12)" ] @@ -247,7 +607,18 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABmcAAAKHCAYAAAB0L5wRAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3Rc1dX38d+oF6tLVrPk3hvGBhsbsA02vRgIJUAoJgl5SCCm5iEkwQQChGAgL6TBY8AQCC2YjrEBY1ww7r13Vav3NtLc94/RjEaoa6qk72ctrdyZOfeeM+2a3D17b5NhGIYAAAAAAAAAAADgEX7eXgAAAAAAAAAAAEBfQnAGAAAAAAAAAADAgwjOAAAAAAAAAAAAeBDBGQAAAAAAAAAAAA8iOAMAAAAAAAAAAOBBBGcAAAAAAAAAAAA8iOAMAAAAAAAAAACABxGcAQAAAAAAAAAA8CCCMwAAAAAAAAAAAB5EcAYAAAC93jfffCOTySSTyaSFCxd6ezkAAAAAgD6O4AwAAAB6hGeeecYeYDGZTHrrrbe8vaRm6/nhX79+/ZSenq5LLrlEf/vb31RWVubt5QIdOnbsWLuf69b+5s2b5+1lowMLFy7UwoUL9eqrr3p7KQAAAGhEcAYAAAA9wssvv9zs9uLFi720ks6prKxURkaGPv30U/3qV7/SiBEj9MUXX3h7WQD6oEceeUSPPPIIwRkAAAAfEuDtBQAAAAAdWb9+vXbv3t3svq+++krHjh3ToEGDOtx/1qxZMgzDTauzWrp0abPb5eXl2rZtm1577TUVFBTo5MmTuvzyy7Vq1SpNnTrVrWsBXCEhIUEvvvhih+OSk5M9sBoAAACgdzEZ7v5/qQAAAICTfvazn+n//u//JEm33nqrXnnlFUnSH/7wBz3yyCNeW5fJZLJvt/Wf1YWFhbrwwgu1ceNGSdK0adP03XffeWR9QFcdO3ZMgwcPliQNHDhQx44d8+6C4BK2c9XMmTP1zTffeHcxAAAAkERZMwAAAPi4yspKvf3225KkwYMH669//av69esnSXrllVdksVi8ubwOxcXFacmSJfbb69ev14kTJ7y4IgAAAACAtxGcAQAAgE975513VF5eLkn6yU9+ooiICF111VWSpIyMDK1YsaLDY3zzzTf25uULFy5sdcygQYNkMpnsZdJqa2v1t7/9TbNmzVJycrL8/f07VUKtNaNHj9bw4cPtt3fu3Gnfrqmp0Ycffqi77rpL06dPV0JCggIDAxUREaHhw4frJz/5SaeeoySVlZVp0aJFmj17thITExUUFKTIyEgNHTpU06dP1z333KNly5aprq6u1f1zc3P1yCOPaMaMGYqPj1dgYKCio6M1YsQInX322XrooYf0zTffdBgQ27Ztm379619r4sSJio2NVXBwsFJSUnTxxRfr5ZdfVn19fbv7296rWbNm2V+j//f//p/OOOMMxcXFKTQ0VEOHDtXtt9+uI0eOdOq1qays1OOPP67JkycrKipKERERGjdunB566CHl5ORIkm655Rb73B1ljJSWlmrRokWaM2eOUlJSFBwcrNjYWE2ePFkPPvigsrKy2t2/tbk++OADXXnllRo4cKCCg4NbXcfq1as1f/58jR49WhEREQoKClJSUpLGjx+vK664Qn/729909OjRTr0m7lZbW6t//OMfuuCCC5q9RpMmTdIDDzzQ4Tpb+94ePHhQ9957r8aOHavo6Og2v9M1NTX617/+pUsuuURpaWkKCQlRVFSUxo0bp7vuuksHDhzo9PMoKCjQk08+qXPPPdf+PMLCwjR8+HBdffXVWrx4scrKylrd98CBA3rmmWd0xRVXaPjw4erXr5+CgoLUv39/nX322XrsscdUUFDQqXV05723vX42q1atst/n+EcvGgAAAC8wAAAAAB82Y8YMQ5IhyTh06JBhGIbx9ddf2++7+uqrOzzGypUr7eMffvjhVscMHDjQkGQMHDjQOHr0qDFu3Dj7Pra/gQMHNtvH8bGOTJ8+3T72jTfesN8/ePDgFvO09nf55Zcb5eXlbR5/06ZNRlJSUqeOtXHjxhb7f/bZZ0ZERESn9s/Pz291DTU1Ncb8+fMNk8nU7v5jx441Dh8+3OZzsY2bOXOmceTIEWP8+PFtHis8PNz48ssv233t9+7da39/W/tLSEgwvv32W+Pmm2+233f06NE2j/fOO+8YsbGx7T7HkJAQ49VXX23zGI5z7d+/37jqqqtaPY5tHQ0NDcbtt9/eqffn4osvbvf1aM/Ro0fb/Lx3xebNm9t9zSUZQUFBxl/+8pc2j/HD7+3rr79uhIaGtjjOD7/T33zzjZGamtru3P7+/sbjjz/e4fN4/vnnjfDw8A5f81tuuaXFvkuWLOnU+xUZGWl88sknba7Bmfe+M/tIMl555ZUOXwsAAAC4VoAAAAAAH7V//36tXbtWknTmmWdq6NChkqRZs2Zp0KBBOnbsmD788EMVFBQoPj7eJXPW1tbqyiuv1K5duzRt2jT96Ec/UlpamkpKSpplvHRVXl6efTs6Otq+XVVVpejoaJ1zzjmaNGmSBg4cqLCwMJWVlWnHjh16++23lZOTow8//FDz58/XO++80+LYVVVVmjdvnnJzcyVJkydP1hVXXKHU1FSFh4eruLhYe/fu1cqVK7V9+/YW+2dnZ+uaa65RRUWFJGtfiosvvlhJSUkKDg5WQUGBdu3apa+++qrNjIP6+npdcMEF9n4WiYmJuu6663TKKacoPDxcWVlZWrp0qb799lvt3r1bZ599trZu3aqEhIQ2X7OysjJdfPHF2rt3r8477zxdcsklSkpKUm5url577TVt2rRJlZWV+vGPf6x9+/YpNja2xTHy8/N1zjnn2LNj0tPTNX/+fI0cOVIVFRVavny53nvvPV155ZWaOHFim2uxeemll3T77bfLMAwFBATokksu0TnnnKOkpCRVVlZq7dq1euONN1RdXa1bbrlFQUFB+vGPf9zuMRcsWKDPP/9cAwcO1E033aRRo0aprq5OGzZsUHBwsCTphRde0L/+9S9JUkREhH70ox9p8uTJSkhIUF1dnTIzM7Vp0yZ9+eWXHT4Hd9u1a5dmzpxp/zyNHDlSP/nJTzRs2DCVlpbqs88+04cffqi6ujrdf//9qq2t1UMPPdTuMdetW6c//elPMplMuvnmm3XWWWepX79+OnLkiAYMGGAf9/nnn+vyyy+X2WyWyWTSnDlzdP7552vAgAGqq6vTpk2b9Nprr6mkpES//e1vJUkPPvhgq3P+7//+r/785z/bb5955pm65JJLNHDgQFksFp04cUJr167VihUrWu05VVVVJZPJpIkTJ+rss8/WqFGj7J/RzMxMffnll1q2bJnKysp01VVXad26dTr11FNbHMeZ937p0qWSpCuuuEKSNHbsWD322GMtxrU2LwAAANzM29EhAAAAoC3333+//ZfdL730UrPHfv/739sfe/bZZ9s9TlcyZ2x/Tz75ZIfrcxzfnj179jQbe+LECftjn332mVFXV9fmvpWVlcYVV1xh33f16tUtxrz77rv2x++9995217J7924jLy+v2X1/+ctf7Ps///zz7e7//fffG9XV1S3u/9///V/7MX784x8bFRUVre7/wgsv2MfdcMMNrY5xfK0CAgKMd955p8WY+vp649JLL7WPe/rpp1s91k033WQfc84557S6rk8++cQICgpqNWPF0fbt243g4GBDkpGWlmZs27at1Tn37dtnDBgwwJBkREREGIWFhS3GOGbOSDLmzZvX6utqM3bsWEOSERsbaxw/frzNcTU1Ncb69evbfLwjzmbOWCwWY8KECfZj3Hzzza1+vt9//30jMDDQnsWyadOmFmMcv7eSjP79+xvbt29vc+7s7Gx7RlNUVJTx1VdftTnOtkZ/f39j7969LcZ88MEH9nnDw8ON999/v815CwsLjZUrV7a4f9euXcbBgwfb3M8wDOPLL780wsLCDEnGueee2+oYV7z3tucyc+bMdtcDAAAAzyE4AwAAAJ9kNpuNxMREQ7KWiCopKWn2+KFDh+wXHMeNG9fusboanLn88ss7tcbOBGeKioqMqVOn2sdNmzatU8d2VFpaai+t9NOf/rTF40888YT9+Lt37+7y8R1LJlVWVnZ5/5MnTxohISGGJGPKlClGfX19u+NvuOEG+4XxzMzMFo87vq6///3v2zzO/v377eNau7Cdm5trDwBERUUZJ0+ebPNYv/vd7zoMztiCZP7+/saWLVvafY4rVqxoN9DnGJxJTU1tt2SdYRj2oFBnyvg5wzE405m/H17s/+STT5p9L81mc5tzPfLII/ax11xzTYvHfxicWbp0abtrv/vuu+1jP/zww3bH7tu3z/D39zckGb/4xS+aPWaxWOwBEUnGW2+91e6xnOUYaG7t++CK957gDAAAgO/xEwAAAOCDPv74Y508eVKSNG/ePEVFRTV7fOjQoTrzzDMlWcsobdiwwWVz33XXXV3e54MPPmj29+9//1v333+/Ro0ape+//16SFBQUpGeeeabLx46MjNT48eMlSevXr2/xeHh4uH178+bNXT6+s/u//fbbqqmpkSTdd9998vf3b3f8TTfdJElqaGjQV1991eY4Pz8//frXv27z8REjRigtLU2StHv37haPf/rppzKbzZKkG264Qf3792/zWHfeeacCAtqu+lxSUqIPP/xQkjR37lxNmjSpzbGSNGfOHKWkpEiSvvjii3bHzp8/X/369Wt3jO092rlzp+rq6tod603//e9/7dv33Xdfu6/pggULFBYWJsn6fbe9V61JT0/X5Zdf3ubjhmHo9ddfl2Qto3bZZZe1u86RI0fq9NNPl9Ty/dmyZYv98zRp0iRde+217R7LWTNmzLBvt/f99vX3HgAAAF1DzxkAAAD4pMWLF9u3b7755lbH3HLLLVqzZo0k6eWXX7ZfbHWGv7+/pk+f3uX9bD0d2pKQkKBXX31VZ5xxRovHiouL9cYbb2jZsmXatWuXCgsLVVlZ2Wofi8zMzBb3zZkzRyaTSYZh6H/+53908OBBXXfddRozZkyn1n7eeefZg0ZXXnmlfvOb3+iqq67S4MGDO7X/t99+2+y5fPDBB+2Oz8rKsm/v2bOnzXEjR45UXFxcu8dKTU1VRkaGiouLWzy2ceNG+/bs2bPbPU7//v01ZswY7dixo9XH165dK4vFIsna96Oj5yjJHnBp7zlK0llnndXhsc477zy99dZb2rdvn84991zdfffdOu+88zoM6jgjISFBL774YrtjftjryTG4cP7557e7b2RkpKZPn64vv/xS1dXV2r59u6ZMmdLq2DPPPFMmk6nNY+3Zs0cFBQWSpKSkpE69P7Yg4tGjR1VTU6OQkBBJ0urVq+1j5s2b1+FxOrJmzRr95z//0YYNG3TkyBGVl5e3GYhq7fvtjfceAAAA7kdwBgAAAD4nOztby5YtkyQlJydr7ty5rY675pprdNddd6mqqkr/+c9/9Mwzz9h/id9dcXFx9ou0zggNDVVcXJzGjx+vCy+8UD/5yU8UHR3dYtyHH36o2267TYWFhZ06bllZWYv7Ro8erd/97nd69NFHVVlZqUcffVSPPvqo+vfvrzPPPFNnn322LrjgAo0cObLVY55//vm66aab9Nprr6mgoED333+/7r//fqWnp2vGjBmaOXOmLrroInuWyg8dO3bMvv0///M/nXoeNkVFRW0+9sML/60JDg6WJNXW1rZ4LDs72749dOjQDo81dOjQNoMzjs/x3Xff1bvvvtvh8Wzae46SmjW0b8uf//xnrVmzRpmZmVqzZo3WrFmjgIAAnXLKKTrrrLM0a9YsnXfeeS757NqEhYV1OTiRk5MjyRrASkpK6nD8yJEj7Y3sHd+vH+roNXJ8f1atWqVVq1Z1YrVNioqK7JlOGRkZ9vs7G+BsTUVFhX7yk590KlBk09r32xvvPQAAANyP4AwAAAB8zquvvqqGhgZJ1nJUbZXJioiI0BVXXKE33nhDZWVleu+99+wls7orNDS0W/u1luXSke+++04/+tGPVF9fL0maMGGC5syZo2HDhikmJkbBwcH2bIHf/e532r17tz1744f++Mc/6vTTT9eTTz6ptWvXSpLy8vL0/vvv6/3335dkLZ+0aNEiTZ06tcX+S5Ys0bnnnqtnn31W27ZtkySdOHFCJ06c0H/+8x+ZTCZdeOGFeuaZZ1oEeUpKSrr83G3aK9Pk5+dcFebKykr7dmeCdu2NceY5tleuS+rcZy49PV1bt27V448/rtdee02FhYWqr6/Xpk2btGnTJj377LOKjIzUr3/9az300EP2oJWnlZeXS2peKq89jtkftn1b09Fr5Mz7IzX/HDoGSJzJTrn22mv12WefSbK+HhdffLEmTZqklJQUhYWF2Uu+7dq1S7///e8lyX7ec9RT3nsAAAB0DcEZAAAA+BTDMPTyyy/bbz/99NN6+umnO7Xv4sWLnQ7OeNIf/vAHe2Dmb3/7m+644442x/7pT3/q8HiXXHKJLrnkEp08eVKrV6/Wd999p1WrVmnLli0yDENr167VWWedpc8++0xz5sxpsf9NN92km266SSdOnLDvv3LlSu3Zs0eGYeizzz7T6tWrtXbtWnsPHKn5Bezi4uJWM4S8wTFAUFVV1eF4x2DODzk+x+eee67dXjjuEh8fr2eeeUZ/+ctftHnzZq1bt05r167V119/raKiIpWVlenRRx/V2rVrtWLFCqeDW90RERGhkpKSdl9LRxUVFc327S7H92fBggV69tlnu32syMhI+7bj+rpi7dq19sDM+PHjtXz58jYziQIDAzs8Xk947wEAANA1/BcbAAAAfMqqVat0+PDhbu377bff6uDBgy5ekXuYzWZ98803kqTJkye3G5iRmpdt6khiYqJ+9KMfadGiRdq0aZOOHTumH/3oR/Z577777nb3T09P1w033KAXXnhBu3fv1u7duzVz5kxJ1uyG3/72t83GO5acsjVS9wW2MlWSOvWZOnLkSJuPOT7HXbt2ObcwJ/n7++v000/XggUL9O677+rkyZN65513FBUVJUn6+uuvtXTpUq+sLTk5WZL1c5Kbm9vh+AMHDti3Hd+vrnLl++N4rI76BbVl+fLl9u3HH3+83RJvR48e7fRxffm9BwAAQNeQOQMAAACfsnjxYvv2FVdcoQkTJnS4z4YNG/T5559Lkl5++WU98cQTblufqxQUFNizZoYNG9bu2A0bNtibnXdHenq63nzzTa1atUr5+fnatWuXSkpKOp3hMmbMGL3//vtKSEiQxWJp1jBdkmbNmqVPPvlEkvT+++9rxowZ3V6rK5122mn65z//KUlauXKlPUDVmry8vHYDSzNnzpTJZJJhGPrkk09UV1enoKAgl6+5OwICAnT11VcrKyvLHnhbvXq1rrrqKo+vZdq0adq7d68k6YsvvtDNN9/c5tjy8nKtW7dOkrVs2cSJE7s97ymnnKLo6GiVlJRo9erVKigo6FTPotacffbZ9u0PPvhAf/jDH7p8DMfAVEffb1uGTXd09r23fXa7U34RAAAA7kHmDAAAAHxGaWmp/vvf/0qy/kL873//uxYuXNjh33PPPWc/xpIlS1rt2+BrHEtuHTp0qN2xDz/8sNPzBQYGKjU11X7bFhjqrNjYWHu5px/2ULnuuuvsfS7++c9/dvh8POXiiy+2l4x64403lJ+f3+bY559/vt3PTXx8vC6++GJJ1gvvixYtcu1iXWDw4MH27a6+v67iGABbtGhRu+v461//ai9/dtlll3WqvFdb/P39deONN0qSamtr9dBDD3X7WKeeeqrGjh0rSdq6davefvvtLh+js9/vdevWadmyZV1f5A909N7byr51ttwcAAAA3I/gDAAAAHzGm2++qerqaknSeeed124pIEcjRozQtGnTJEk5OTlO/RLdUyIjIzVixAhJ0ubNm/Xee++1GNPQ0KC77767w4u3/+///T+9++67zZqa/9Dq1au1Y8cOSdayTY5ZBY888oi++OILWSyWNvd/88037U3XJ02a1Oyx1NRU+6/2q6qqdP7552vr1q3trnnXrl36xS9+0e4YZyUmJurHP/6xJGvg77rrrmv14vSnn36qp556qsPjPfbYY/Yg1O9+9zv99a9/bTcTobS0VM8995y+/PLLbj4Dq5ycHN17773tlmYzm8168cUX7bdPOeUUp+bsrgsvvNCeAbNz5079/Oc/bxHMk6SPPvpIjz76qCRrYOWBBx5weu7f/va3io2NlSS9+OKL+s1vftPq3DbV1dV65ZVX9NZbbzW732Qy6bHHHrPfvu222/TBBx+0eZzi4mJ7iUKb0047zb79yCOPqKampsV+O3bs0NVXX93uZ8hV770teLNv3z77ORYAAADeRVkzAAAA+AzHkmY33XRTl/a96aabtH79evtxLr30UpeuzR0WLFhg7zVzzTXX6Nprr9XMmTMVExOjQ4cO6Y033tDevXs1btw4BQcHa/Pmza0eZ8uWLVqyZImioqJ0/vnn69RTT9WAAQMUEBCgvLw8rVy5Up988ok9+PLDnjErV67UwoUL1b9/f51//vk65ZRTlJycLJPJpJycHH3++efNAgw/3F+yBi62b9+uzz//XEeOHNGUKVN0wQUX6JxzzlFqaqpMJpMKCwu1a9cuffPNN9q7d6/8/f3tZcfc5emnn9aKFSuUk5Ojr7/+WmPGjNH8+fM1atQoVVRUaPny5Xr33XcVGxurU045RV999ZUktdpQfeLEifq///s/3XzzzbJYLFqwYIH+/ve/64orrtDo0aMVHh6u8vJyHT58WBs2bNCqVatUV1en119/3annUFtbq2eeeUbPPPOMJk+erLPOOktjxoxRdHS0KioqdPjwYf3nP/+x98wZMmSIrrvuOqfm7C6TyaQ33nhD06ZNU0VFhV555RV99913uummmzRkyBCVlZXp888/b9YX5ZFHHtGpp57q9NzJycl69913dfHFF6umpkZPPfWU3njjDV199dWaMGGCIiIiVFlZqePHj2vTpk366quvVFVVZQ8SOZo3b57uvfdeLVq0SJWVlbriiit05pln6pJLLtHAgQNlGIYyMjL03XffadmyZbr22ms1a9Ys+/5XXnml0tPTdeLECW3atEkjR47UT3/6Uw0bNkxVVVVatWqV3nrrLZnNZt18881asmRJq8/JVe/9nDlztGPHDlVWVurSSy/VTTfdpISEBJlMJknS+PHjm2XWAQAAwAMMAAAAwAds27bNkGRIMqKioozq6uou7V9UVGQEBwcbkoyAgAAjNzfX/tjKlSvtx3744Ydb3X/gwIGGJGPgwIGdntN2zO7+Z7XFYjHmz5/f7Dg//Bs/frxx5MgRY+bMmW3Odeutt7Z7DNtfYGCg8dhjj7XYf/bs2Z3aPzw83Hj55ZfbfD5ms9m4//77jcDAwE4dr63X2vb4zJkzO3wN23tdbPbs2WOkp6e3uY64uDjjm2++MW644Qb7fUVFRW0eb/ny5caAAQM69RyDg4ONzz//vMUxbr75ZvuYo0ePtvscjx071qm5JBnjxo0zDh061OHr1pajR492+P50xqZNm+zfqbb+goKCjD//+c9tHqMz39vWbNmyxRg1alSnXi9/f3/jpZdeavNYTz/9tBESEtLhcW699dZWX4P4+Ph2537yySfbfZ6ueu+zsrKMxMTENvd95ZVXOv36AgAAwDXInAEAAIBPcMyaufrqqxUSEtKl/WNiYnTppZfqvffeU319vZYsWeKSUknuZDKZtHjxYl188cV68cUXtWnTJpWVlSkuLk4jR47U1Vdfrdtuu63D1+Kf//ynbrnlFq1cuVJr1qzR/v37lZ+fr/r6ekVGRmr48OGaNWuWbrvtNg0fPrzF/p988onWrFmjlStXat26dTp06JAKCgpkGIaio6M1atQozZkzRz/96U+VkpLS5joCAgL01FNP6Ve/+pVefvllff311zp48KCKiork5+enuLg4jRgxQlOnTtX555/frPG6O40ePVp79uzRX//6V7333ns6dOiQDMNQWlqaLr30Ut11111KTU3Vk08+aX8etv46rZk7d649Y+HTTz/Vpk2blJ+fr5qaGkVERGjQoEGaOHGizjnnHF166aWKjo52av0DBw7UiRMntHLlSq1cuVJbtmzRiRMnVF5erqCgICUlJWnSpEm66qqrdM011yggwPv/N2/y5Mnav3+/Fi9erA8//FA7duxQYWGhwsPDNXDgQM2dO1d33HFHs14prjJp0iTt3r1bS5cu1Ycffqj169fr5MmTqqysVL9+/ZSWlqbx48dr9uzZuvTSS9stn3jvvffq+uuv14svvqjly5fr4MGDKi4uVlBQkFJTU3XqqafqwgsvbNZrx/E12LFjhxYtWqRPPvlEx48fV0BAgFJSUjR79mz9/Oc/16mnntqiJJojV733KSkp2rJlixYtWqQvv/xSR48eVUVFRbsl1QAAAOBeJoP/GgMAAADQx1ksFiUlJSk/P18TJ07Utm3bvL0kAAAAAL1Yy0LKAAAAANDHvP3228rPz5ckzZ4928urAQAAANDbEZwBAAAA0KutX79eNTU1bT6+Zs0a/fKXv5Qk+fn56ec//7mnlgYAAACgj/J+MWIAAAAAcKMnn3xS3377rS688EJNmTLF3jcnKytLX375pZYtW2bvvfHAAw9o9OjR3lwuAAAAgD6AnjMAAAAAerV58+bpww8/bHeMyWTSvffeqz//+c/y86PAAAAAAAD3IjgDAAAAoFc7dOiQPvroI61YsUKHDx9WYWGhysrKFBERofT0dM2cOVM///nPNXbsWG8vFQAAAEAfQXAGAAAAAAAAAADAg+g54wSLxaLs7GxFRETIZDJ5ezkAAAAAAAAAAMCLDMNQeXm5UlJS2i2ZTHDGCdnZ2UpLS/P2MgAAAAAAAAAAgA/JyMjQgAED2nyc4IwTIiIiJElHjx5VbGysl1cDwJvMZrOWL1+u8847T4GBgd5eDgAv4nwAwIbzAQAbzgcAHHFOAHq3srIypaWl2eMHbSE44wRbKbOIiAhFRkZ6eTUAvMlsNissLEyRkZH8hxXQx3E+AGDD+QCADecDAI44JwB9Q0etUNoueAYAAAAAAAAAAACXIzgDAAAAAAAAAADgQQRnAAAAAAAAAAAAPIjgDAAAAAAAAAAAgAcRnAEAAAAAAAAAAPAggjMAAAAAAAAAAAAeFODtBfRFZrNZDQ0N3l5Gj+fv76/AwEBvLwMAAAAAAAAAgC4hOONBZWVlKigoUG1trbeX0msEBwcrPj5ekZGR3l4KAAAAAAAAAACdQnDGQ8rKypSVlaV+/fopPj5egYGBMplM3l5Wj2UYhsxms0pLS5WVlSVJBGgAAAAAAAAAAD0CwRkPKSgoUL9+/TRgwACCMi4SGhqqiIgIZWZmqqCggOAMAAAAAAAAAKBH8PP2AvoCs9ms2tpaRUVFEZhxMZPJpKioKNXW1spsNnt7OQAAAAAAAAAAdIjgjAc0NDRIEs3r3cT2utpeZwAAAAAAAAAAfBnBGQ8ia8Y9eF0BAAAAAAAAAD0JwRkAAAAAAAAAAAAPIjgDAAAAAAAAAADgQQRnAAAAAAAAAAAAPIjgDAAAAAAAAAAAgAcRnAEAAAAAAAAAAPAggjMAAAAAAAAAAAAeRHAGAAAAAAAAAAB4VFmNWQ0Ww9vL8BqCM/CKjRs3ymQyacaMGW2OeeSRR2QymfTYY495cGUAAAAAAAAAAHf6cFuWJj6yXD/65zpV1tZ7ezleQXAGXnHaaadp8uTJWrdunXbv3t3icYvFoldeeUX+/v669dZbvbBCAAAAAAAAAIA7vLMpQ4YhbT1Rokc+bnl9uC8gOAOvuf322yVJ//d//9fiseXLl+v48eO66KKLlJqa6umlAQAAAAAAAADc5ERRlX37nU2Z+nRHjhdX4x0B3l4ArC59fo3yy2u9vYxOSYgI1sd3nun0ca6//nrdd999ev311/Xkk08qODjY/pgtYPOzn/3M6XkAAAAAAAAAAL6hvsGi7JKaZvc9+P4OnZIerdToUC+tyvMIzviI/PJa5ZbVdDywFwkPD9cNN9ygf/zjH1q6dKmuu+46SVJeXp4++ugjpaSk6KKLLvLyKgEAAAAAAAAArpJTWqMGi9HsvrKaet399jb952fT5O9n8tLKPIvgjI9IiAjueJCPcOVaf/GLX+gf//iHXnrpJXtw5tVXX5XZbNb8+fPl7+/vsrkAAAAAAAAAAN7lWNLsutPStPpggbJKqrXhaJH+ueqwfjl7mBdX5zkEZ3yEK8qE9UQTJkzQtGnTtHLlSh0+fFhDhw7V4sWLZTKZdNttt3l7eQAAAAAAAAAAF3IMzoxNjdKVpw7QdS9+J4shPbPigKYPjdOk9BgvrtAz/Ly9AOAXv/iFDMPQ4sWLtWrVKh04cEBz587VoEGDvL00AAAAAAAAAIALZTgEZ9JiQnX64Fj9qjFbpsFi6NdvbVNFbb23lucxBGfgdddcc41iYmL06quv6h//+Ick6Wc/+5mXVwUAAAAAAAAAcDXHzJn02DBJ0l3nDtek9Gj743/4cJc3luZRBGfgdaGhobrpppuUk5Ojt99+WwkJCbr88su9vSwAAAAAAAAAgIvZMmdMJik1JlSSFODvp79eO0n9gq2dWN7fkqWPtmd7bY2eQHAGPuH222+3b99yyy0KDAz04moAAAAAAAAAAO5gy5xJigxRcIC//f70uDD98fKx9tsPLd3ZrARab0NwBj5h9OjRSklJkST99Kc/9fJqAAAAAAAAAACuVl5jVnGVWZKU1ljSzNEVk1J1+SkpjWPrdffb21TfYPHoGj2F4Ax8wrp165Sdna2ZM2dqxIgR3l4OAAAAAAAAAMDFMoqq7dvprQRnTCaTHp03TgMay51tOl6sv6087LH1eRLBGfiExx9/XJL0q1/9yssrAQAAAAAAAAC4wwmHMmWtBWckKTIkUM9de4r8TNbb/+/rg9p8vMgTy/MogjPwmnXr1um2227T1KlT9emnn2ry5Mm68sorvb0sAAAAAAAAAIAbZBY3BWfSYkPbHDdlUKzuOne4JKnBYujXb21TWY3Z7evzJIIz8JoDBw7o5Zdf1t69e3XppZfq/fffl58fH0kAAAAAAAAA6I06kzlj86vZwzRlYIwkKbO4Wn/6ZK9b1+ZpXAmH19xyyy0yDENlZWX66KOPlJ6e7u0lAQAAAAAAAADcxDE4k9ZBcCbA30/PXnuK+gUHSJI+2p4twzDcuj5PIjgDAAAAAAAAAADczhacCQn0U0K/4A7Hp8WGaVJ6tCSp2tyg4qreU9qsxwZnsrKydOONNyouLk5hYWE65ZRTtHnzZvvjhmFo4cKFSklJUWhoqGbNmqXdu3c3O0Ztba3uvPNOxcfHKzw8XJdddpkyMzM9/VQAAAAAAAAAAOjVLBZDmcXVkqS0mDCZTKZO7Zca3dSbJruk2i1r84YeGZwpLi7WjBkzFBgYqM8//1x79uzRokWLFB0dbR/z1FNP6ZlnntELL7ygjRs3KikpSXPnzlV5ebl9zIIFC7R06VK99dZbWrNmjSoqKnTJJZeooaHBC88KAAAAAAAAAIDeKa+8VnX1Fkkd95txlNJLgzMB3l5Ad/z5z39WWlqaXnnlFft9gwYNsm8bhqHnnntODz30kK688kpJ0pIlS5SYmKg333xTt99+u0pLS7V48WK9/vrrmjNnjiTp3//+t9LS0vTll1/q/PPPd/m6e1M9PF/C6woAAAAAAAAAvq0r/WYcEZzxIR999JHOP/98XX311Vq1apVSU1N1xx136Gc/+5kk6ejRo8rNzdV5551n3yc4OFgzZ87UunXrdPvtt2vz5s0ym83NxqSkpGjcuHFat25dq8GZ2tpa1dbW2m+XlZVJksxms8zmtmvdGYYhwzBUV1en4OCO6+iha2pra+2vcXvvA+BOts8en0EAnA8A2HA+AGDD+QCAI84J6KuO5TdVtUqJCu70dyCxX6B9O6Oo0ue/O51dX48Mzhw5ckT/+Mc/dM899+i3v/2tNmzYoLvuukvBwcG66aablJubK0lKTExstl9iYqKOHz8uScrNzVVQUJBiYmJajLHt/0NPPPGEHnnkkRb3r1y5UmFh7Uf64uPjFRgYKIvF0ulaeuiYYRgqKChQUVGRDh486O3lAFqxYoW3lwDAR3A+AGDD+QCADecDAI44J6Cv+TrDT7ZOK3lH9uizkt3t79CooEayhTK27DuqzyyH3bNAF6mqqup4kHpocMZisWjKlCl6/PHHJUmTJk3S7t279Y9//EM33XSTfdwPgyCGYXQYGGlvzIMPPqh77rnHfrusrExpaWmaPXu24uLi2j1ueXm5cnNzVV5ersjISAUGBhKkcYItS6a8vFxms1ljxoxRRESEt5eFPsxsNmvFihWaO3euAgMDO94BQK/F+QCADecDADacDwA44pyAvmrlezulzBxJ0hVzz9SIxM5dz62tt+jRrV9KkoywGF100VS3rdEVbBW3OtIjgzPJyckaM2ZMs/tGjx6t//73v5KkpKQkSdbsmOTkZPuYvLw8ezZNUlKS6urqVFxc3Cx7Ji8vT9OnT2913uDg4FbLkgUGBnZ4Io2NjVVAQIAKCgqUk5PTiWeJzggODtaAAQMUGRnp7aUAkjp3PgDQN3A+AGDD+QCADecDAI44J6CvySqtsW8PSohUYGDnwhOBgVJCRLDyy2uVU1rj89+bzq6vRwZnZsyYof379ze778CBAxo4cKAkafDgwUpKStKKFSs0adIkSVJdXZ1WrVqlP//5z5KkyZMnKzAwUCtWrNA111wjScrJydGuXbv01FNPuWXdkZGRioyMlNlsVkNDg1vm6Ev8/f19/osIAAAAAAAAAJBOFFnLfcX3C1J4cNdCEynRocovr1Veea3q6i0KCvBzxxI9qkcGZ+6++25Nnz5djz/+uK655hpt2LBBL774ol588UVJ1nJmCxYs0OOPP67hw4dr+PDhevzxxxUWFqbrr79ekhQVFaXbbrtN9957r+Li4hQbG6v77rtP48eP15w5c9y6fqLiAAAAAAAAAIC+osbcoJNltZKktNj2+7e3JjU6RNszJMOQTpbVdOsYvqZHBmdOO+00LV26VA8++KD++Mc/avDgwXruued0ww032Mc88MADqq6u1h133KHi4mJNnTpVy5cvb9aX5Nlnn1VAQICuueYaVVdX69xzz9Wrr74qf39/bzwtAAAAAAAAAAB6ncziKvt2ejcCK8lRofbtrJJqgjPedMkll+iSSy5p83GTyaSFCxdq4cKFbY4JCQnR888/r+eff94NKwQAAAAAAAAAABlF1fbttJiuB1ZSopuCM9kl1e2M7Dl6fmE2AAAAAAAAAADgs2z9ZqTuZc6kRofYtwnOAAAAAAAAAAAAdMAxONOdkmSOmTNZJTUuWZO3EZwBAAAAAAAAAABuk9EsOBPazsjWUdYMAAAAAAAAAACgC2yZMwF+JiVHdT04ExcepKAAaziD4AwAAAAAAAAAAEA7DMOwZ84MiAmVv5+py8cwmUxKbcyeyS6plmEYLl2jNxCcAQAAAAAAAAAAblFUWafKugZJ3es3Y5MSHSJJqqxrUFlNvUvW5k0EZwAAAAAAAAAAgFtkFDeVIXMqOBPVu/rOEJwBAAAAAAAAAABuYes3I0npTmXOEJwBAAAAAAAAAADoUIaLgjOpBGcAAAAAAAAAAAA65hicSYtxTeZMVkmNU2vyBQRnAAAAAAAAAACAW7iurFmIfZvMGQAAAAAAAAAAgDbYgjORIQGKCgvs9nHoOQMAAAAAAAAAANABc4NFOaXWEmTpcd3PmpGkkEB/xYUHSSI4AwAAAAAAAAAA0Kqckho1WAxJzvWbsUluLG2WW1aj+gaL08fzJoIzAAAAAAAAAADA5VzVb8YmJcpa2sxiSCfLa50+njcRnAEAAAAAAAAAAC7nGJxJc0Vwphf1nSE4AwAAAAAAAAAAXC6j2LXBmVSCMwAAAAAAAAAAAG1zeVkzh+BMFsEZAAAAAAAAAACA5jIagzMmU/Osl+5KiQ6xb+eU1Dh9PG8iOAMAAAAAAAAAAFzOFpxJiQpVUIDz4QjKmgEAAAAAAAAAALShrMas4iqzJGlAjPNZM5IU3y9Ygf4mSZQ1AwAAAAAAAAAAaCbDxf1mJMnPz6TkKGugh8wZAAAAAAAAAAAAB+4IzkhNfWfKaupVXmN22XE9jeAMAAAAAAAAAABwqYyipsyWNJcGZ5pKpOWU1rjsuJ5GcAYAAAAAAAAAALjUCYfMGVcGZ1IdgjM9ue8MwRkAAAAAAAAAAOBSJ9xW1qwpONOT+84QnAEAAAAAAAAAAC6VUWwNzoQG+iu+X5DLjktwBgAAAAAAAAAA4AcsFkOZjT1n0mJDZTKZXHbslKgQ+3Z2CT1nAAAAAAAAAAAAdLK8RnUNFkmuLWkmScn0nAEAAAAAAAAAAGjuRGFTv5k0Fwdn+gUHKCo0UBJlzQAAAAAAAAAAcKlDeRX628pDzS70o2fIKG4KmqTFuDY4IzX1ncktrVGDxXD58T2B4AwAAAAAAAAAwKd8d7hQl72wRn/5Yr/uf2+7t5eDLjpR1BRQc3VZM0lKjbb2nam3GMovr3X58T2B4AwAAAAAAAAAwGd8sz9Pt7yyQVV1DZKkHZmlMoyemR3RV2U4Bmfi3Jc5I0nZpT2ztBnBGQAAAAAAAACAT/hid65+9tom1dZb7PdVmxtUUmX24qrQVY7BmQExoe2M7J5mwZke2neG4AwAAAAAAAAAwOs+3JalO97YInODNUsmOKDp8nVWD70A31fZyprF9wtWWFCAy49PcAYAAAAAAAAAACe9szFDC97eZm/ufuWkVP1i5lD74wRneo4ac4PyGvvApMe6PmtGauo5I0nZJTVumcPdCM4AAAAAAAAAALzmte+O6YH/7pCtrcz1U9P19NUTmzWSzyomONNTZBY79JuJdX2/Gal55kxPDdy5Pp8IAAAAAAAAAIBO+Neqw3ri83322/NnDNbvLxktk8nUK0pX9UUnHPrNpLkpONM/IkT+fiY1WIwe+9kgcwYAAAAAAAAA4FGGYei5Lw80C8z8cvZQe2BGat5IvqdmR/RFJwrdH5zx9zMpKdJa2ozgDAAAAAAAAAAAHTAMQ08u26fnvjxov+++80bo/vNH2QMzkpQYGSLbTYIzPceJoqb3yl1lzSQptTGzqrjKrKq6erfN4y4EZwAAAAAAAAAAHmGxGFr40W79a9UR+32/u3i0fnXO8BZjgwL8lBjRs7Mj+qKMYvdnzkhSSnSIfTu7pMZt87gLwRkAAAAAAAAAgEe8tPqIlnx33H77sXnj9NOzhrQ53nYBvqCiTjXmBrevD87LaOw5E+jfVHrMHZJ7eE8igjMAAAAAAAAAAI/4YFu2JMlkkp6+eqJunDaw3fGpMU2ZFz3xAnxfYxiGTjQGZwbEhMnfz9TBHt2XQnAGAAAAAAAAAID2WSyGjhVUSpIGxYXrR5MHdLhPqsMFePrO+L6iyjpV1VkznNxZ0kySUpuVNet5nw2CMwAAAAAAAAAAt8stq1F1Y2myIfHhndqnp1+A72tsWTOSlBYT2s5I56U0C9zRcwYAAAAAAAAAgBaONmbNSNKQhE4GZxwu8GcVE5zxdY7BmXQ3Z85Q1gwAAAAAAAAAgA4cya+wbw+O79epfXp6dkRfk+HB4ExkSKAiggMkSdmlBGcAAAAAAAAAAGjhcH43MmeaBWeq2hkJX5BR1BQkcXfPGakpeJdTWiOLxXD7fK5EcAYAAAAAAAAA4HbNypp1sudMREigIkKs2RFZPbB0VV/TrOeMR4Iz1p5EdfUWFVbWuX0+VyI4AwAAAAAAAABwuyMF1rJm/YIDlBAR3On9bNkzuaU1auhh2RF9jS04ExUaqKjQQLfP15P7zhCcAQAAAAAAAAC4VW19gzKLrRfPhySEy2QydXpfW3DG3GAov7zWLeuD88wNFuU09n5xd78ZG4IzAAAAAAAAAAC04XhhlYzGpJfBnSxpZpMa49h3pmddgO9LskuqZUtsSosNbX+wizTvSdSzPhsEZwAAAAAAAAAAbnUkv8K+PSS+X5f27ckX4PsST/ebkX6YOVPjkTldheAMAAAAAAAAAMCtjhRU2rcHJ3Qtc6Ynl67qSxyDM54raxZi3+5pnw2CMwAAAAAAAAAAtzqS3xScGeJMWbPinnUBvi85XuiQORPjmeBMYmSI/BrbF2WX9qzPBsEZAAAAAAAAAIBbHXXMnOlqcIbMmR5hb06ZfXtkUoRH5gz091NipDV7pqd9NgjOAAAAAAAAAADcytZzJikyROHBAV3aN6FfsAL9rekR9JzxXXtzyiVJMWGB6h8R7LF5k6OswZmCijrVmBs8Nq+zCM4AAAAAAAAAANymuLJOxVVmSdKQLvabkSQ/P5OSo6zZM5Q180355bUqqKiVJI1OjpTJZPLY3I49iXJKazw2r7MIzgAAAAAAAAAA3OaIEyXNbGylzcpr61VWY3bJuuA6+3KbSpqNTo706Nw9tewdwRkAAAAAAAAAgNvYSppJ0pCEft06RmpM0wV4smd8j2O/mVEe6jdj45g505PK3hGcAQAAAAAAAAC4jWPmTHfKmknNL8D3pOyIvmJfY78ZyfOZMz31s0FwBgAAAAAAAADgNkfzHYIz3SxrNqCHZkf0FXsaM2f8/Uwa1r972VHdlRIdYt/OKaHnDAAAAAAAAAAAOlJgLWsW5O+nATFh3TpGTy1d1RfU1Vt0uLF03dCEcIUE+nt0/mY9Z0p7zmeD4AwAAAAAAAAAwC0aLIaOFVZJkgbGhcnfz9St49Bzxncdzq+QucGQJI1K8mxJM0mKCg1UWJA1INSTAncEZwAAAAAAAAAAbpFdUq26eoskaXA3S5pJUnJUU+mqntRXpC/Y21jSTPJ8vxlJMplM9syq7JJqGYbh8TV0B8EZAAAAAAAAAIBb2MpdSdKQhO73IgkJ9Fd8v2BJPSs7oi/Yl1tu3x6dHOGVNdiCMzVmi4qrzF5ZQ1cRnAEAAAAAAAAAuMXRgkr79hAnMmckKbWx8Xteea09Gwfe5+3MGanpsyH1nMwqgjMAAAAAAAAAALc4ku8QnElwMjjT2HfGMKTc0hqnjgXX2ZtjzZyJDQ9S/4hgr6whJcqhJxHBGQAAAAAAAABAX9Ysc8aJsmaSlBrddAE+s6TKqWPBNfLLa1VQUStJGpUUIZPJ5JV1pDh8NsicAQAAAAAAAAD0aUcae85EhQYqJizQqWM1vwBP5owv2Jfr/ZJmEsEZAAAAAAAAAAAkSVV19cpuLD82JCHc6awKx8yZrOKecQG+t3PsNzMqKcJr60hp1nOmZwTuCM4AAAAAAAAAAFzuWEFT6bHB8c71m5F6ZnZEb2frNyN5N3MmKaopOEPPGQAAAAAAAABAn3WkoMK+PdTJfjOSNCCm5zV97+1smTP+fiYNT3T+Pe6u4AB/JUQES+o5gTuCMwAAAAAAAAAAlzuaX2nfHuKCzJmo0ECFBflL6jkX4HuzunqLDjf2FBqaEK7gAH+vrseWWZVXXqva+gavrqUzCM4AAAAAAAAAAFzuSEFTcGZwgvPBGZPJZO87k1VSLcMwnD4muu9wfoXMDdb3wJslzWxSHfrOnCyt9eJKOofgDAAAAAAAAADA5Y40ZlWYTNKgOOeDM1JTdkRtvUUFFXUuOSa6x1bSTJJGJXk/OJMS5dCTqNT3M6sIzgAAAAAAAAAAXMowDHvmTEpUqEICXVPyKtWh7wylzbxrX265fXt0coQXV2JlC9xJPeOzQXAGAAAAAAAAAOBSBRV1Kq+plyQNcUFJM5tUhwvwWT3gAnxv5pg54wtlzQjOAAAAAAAAAAD6NFtJM0kamtDPZcdN7WEX4HszW3AmNjxI/SOCvbyaHwbuary4ks4hOAMAAAAAAAAAcKmjjSXNJGlwvAszZxzKmmUWE5zxlvzyWnvPn9HJETKZTF5ekZQSHWLf7glZVQRnAAAAAAAAAAAudcQhOOPKsmY9rXRVb+VY0mxUkvdLmknWDJ7IkABJ0q6sUhmG4eUVtc/p4ExVVZWqqqrafPz555/XWWedpdGjR+uiiy7SJ5984uyUAAAAAAAAAHqxnNJqvb3xhIor67y9FHTTkXz3ZM4kRgTL38+apdETsiN6q325vtVvRpJMJpNOHxwrSSqqrNOhvIoO9vAup4IzH3/8sSIiIpSSkqLy8vIWj8+fP18LFizQunXrtH//fn3xxRe6/PLL9dRTTzkzLQAAAAAAAIBe6nhhpS54brV+89+d+uMne7y9HHTTkQLrhfGQQD+lRIV2MLrzAvz9lBRpLV9F5oz37M1pigeMSorw4kqaswVnJOn7o0VeXEnHnArOfPHFFzIMQ/PmzVNERPM3YM2aNXr11VclSWFhYZo0aZJCQkJkGIZ+97vfaffu3c5MDQAAAAAAAKCXKa8x66dLNqm02ixJ2njMty+uonXmBotOFFqrLQ2KC5efn2v7kdgavxdXmVVVV+/SY6NzbGXN/P1MGp7Yz8uraXL64Dj7dq8Ozqxfv14mk0mzZ89u8diLL74oSUpJSdHevXu1efNm7du3T2lpaWpoaNC//vWvbs+7cOFCmUymZn9JSUn2xw3D0MKFC5WSkqLQ0FDNmjWrRTCotrZWd955p+Lj4xUeHq7LLrtMmZmZ3V4TAAAAAAAAgO6zWAzd/fY2HXQoRXSyrEYWi2/3jUBLmcXVqm9831zZb8amWeP3YrJnPK2u3qLD+dbv6dCEcAUH+Ht5RU3GpUQqLMi6ng1HC32674xTwZm8vDxJ0vDhw1s8tmzZMplMJt15550aMGCAJCktLU133nmnDMPQqlWrnJlaY8eOVU5Ojv1v586d9seeeuopPfPMM3rhhRe0ceNGJSUlae7cuc1Kry1YsEBLly7VW2+9pTVr1qiiokKXXHKJGhoanFoXAAAAAAAAgK5btGK/vtyb1+w+c4OhgopaL60I3XUkvynANiTe9VkVqTFNZdLoO+N5h/IqZG6wBj18pd+MTYC/nyYPjJEknSyr1fHGDC5f5FRwJj8/X5LUr1/zL9iePXtUUFAgSbrsssuaPTZlyhRJ0rFjx5yZWgEBAUpKSrL/JSQkSLJmzTz33HN66KGHdOWVV2rcuHFasmSJqqqq9Oabb0qSSktLtXjxYi1atEhz5szRpEmT9O9//1s7d+7Ul19+6dS6AAAAAAAAAHTNR9uz9beVhyVJfiZpbErTBd/s0hpvLQvddLSg0r49ON71mTOp0WH2bYIznrcvt8y+7WvBGUmaNqSptNkGHy5tFuDMzv7+1vSgoqLmT3D16tWSpISEBI0aNarZYzEx1qhVTY1zJ9WDBw8qJSVFwcHBmjp1qh5//HENGTJER48eVW5urs477zz72ODgYM2cOVPr1q3T7bffrs2bN8tsNjcbk5KSonHjxmndunU6//zzW52ztrZWtbVNkfqyMuuH0Gw2y2w2O/V8APRstnMA5wIAnA8A2HA+AGDD+QBo366sMj3w3nb77QcvHKmaugbtzrZee8sorNDYJNdf4PeWvnBOOHiyqYJRekywy59rYkSgfTuzsLJXv5a+aHdWiX17eEKYz73+p6Y1BYy+O5yvK05Jame063X29XAqOJOamqpDhw5p27ZtmjVrlv3+Tz/9VCaTSWeddVaLfUpLSyVJ8fHx3Z536tSpeu211zRixAidPHlSjz32mKZPn67du3crNzdXkpSYmNhsn8TERB0/flySlJubq6CgIHugyHGMbf/WPPHEE3rkkUda3L9y5UqFhYW1sgeAvmbFihXeXgIAH8H5AIAN5wMANpwPgJbK6qRFO/1VY7Y2jJ+aYFFC0W5tKjBJsv4w/Ov1W2Q57rt9I7qrN58TNh/wl2R9Tw9tXafsne2P76rcKsl2aXvDnsP6zHzQtROgXWv2+MlWlCtr9wZ95mMvf71FCjT5y2yYtGpvtj77LMOj81dVda6UmlPBmbPOOksHDx7UCy+8oBtvvFHx8fHauHGjli1bJkmtZqDs3btXkpSU1P1o1YUXXmjfHj9+vM444wwNHTpUS5Ys0bRp0yRJJpOp2T6GYbS474c6GvPggw/qnnvusd8uKytTWlqaZs+erbi4uDb3A9D7mc1mrVixQnPnzlVgYGDHOwDotTgfALDhfADAhvMB0LraeotuemWTSupKJEmT0qK0eP5pCg7wU/zRIv370CZJUkzKEF104UgvrtS1+sI54U+7VkmqVWx4oH502Xkdju+qqrp6PbH9a0mSKTxWF110usvnQNv+uOMbSXWKCQvUdZfP7fC6uze8k7dR3x8tVlGtSadMn62U6NCOd3IRW8WtjjgVnLnjjjv06quv6ujRoxoyZIhGjBihPXv2qL6+XrGxsbr22mtb7PP111/LZDLplFNOcWbqZsLDwzV+/HgdPHhQ8+bNk2TNjklOTraPycvLs2fTJCUlqa6uTsXFxc2yZ/Ly8jR9+vQ25wkODlZwcHCL+wMDA3vtiRRA13A+AGDD+QCADecDADacD4AmhmHooQ93aMuJEklSclSI/nXTFPULtV57S4+LsI89WV7XK787vfWcUF5jVl65tTXE0IR+bnmOUYGBigkLVHGVWTmltb3ydfRV+eW1KqyskySNSYlUUFCQl1fUuqlD4vX90WJJ0pbMMg1M8FxvnM5+Hv2cmeTUU0/VX/7yF5lMJlVUVGjLli2qqalRYGCgXnrpJUVERDQbX1paqk8//VSSNHfuXGembqa2tlZ79+5VcnKyBg8erKSkpGZpgXV1dVq1apU98DJ58mQFBgY2G5OTk6Ndu3a1G5wBAAAAAAAA4LxX1x3TO5syJUnBAX568SdT1D8ixP54YlTTD6SzS2n43pMcK2gq6TQ43n29gmyZELllNapvsLhtHjS3N6cpK2RUkucCHl01bXCsfXvD0SIvrqRtTmXOSNLdd9+tOXPm6L333rNnq/z4xz/WyJEtUw2/+eYbnXbaaZKkOXPmdHvO++67T5deeqnS09OVl5enxx57TGVlZbr55ptlMpm0YMECPf744xo+fLiGDx+uxx9/XGFhYbr++uslSVFRUbrtttt07733Ki4uTrGxsbrvvvs0fvx4p9YFAAAAAAAAoH2rD+br0U/22G//5eqJGj8gqtmY4AB/xfcLVkFFrXJKajy9RDjhSEGFfXtIQj+3zZMaHard2WVqsBg6WV6rVA+WrerL9uU2BWdGJ/tucGZSeowC/U0yNxj6/kgvDc5I1r4v48eP73Dc5Zdfrssvv9zp+TIzM/XjH/9YBQUFSkhI0LRp07R+/XoNHDhQkvTAAw+ourpad9xxh4qLizV16lQtX768WSbPs88+q4CAAF1zzTWqrq7Wueeeq1dffVX+/v5Orw8AAAAAAABAS8cKKvWrN7fKYlhv3zFrqC6bmNLq2JToEBVU1Cqv3JoZEeDvVBEgeMjh/Er79hAPZM5IUlZxNcEZD9mbU27fHp0c0c5I7woN8teEAdHafLxYRwoqlVde0yw7zxc4FZyZP3++JOnCCy/U1Vdf7ZIFdcZbb73V7uMmk0kLFy7UwoUL2xwTEhKi559/Xs8//7yLVwcAAAAAAADgh8przPrpa5tUWm2WJM0Z3V/3ndey+o5NclSIdmSWymKIzIge5GiBQ3AmwX3BmQExTZ+H7BJK33mKraxZgJ9Jw/q7LzPKFU4fHKvNx619ZzYeLdbFE5I72MOznAo3L1myREuWLFFkpO+mLwEAAAAAAADwvsc+2atDedaSV8P799Oz154iPz9Tm+MdMyO4+N5zHMm3vsd+Jik91n3BGcdgXRafD4+oq7fYv8NDE/opOMC3q1Cd7tB35vujhV5cSeucCs4kJCRIkhITE12yGAAAAAAAAAC9j2EY+mJPriQpPMhfL900RREhge3ukxJFcKanMQzDnjmTFhumoAD3laJLITjjcYfyKlTfWJNwlA+XNLOZMjBGtvjvhqO+13fGqW/HmDFjJEnHjx93yWIAAAAAAAAA9D6ZxdUqqbKWMzt9cKwGdaIXSXJ0U3+InNIat60NrnOyrFZVdQ2S3NtvRpJSY5r3nIH77csts2+PTvb9aloRIYEamxIlSdqXW67iyjovr6g5p4IzN954owzD0JIlS1y1HgAAAAAAAAC9zPbMEvv2hAHRndon2SFzJofMiB7BVtJMkoYkuLcfSVx4kIIbM3PIrPIMW78ZSRqV5PuZM5I01aG02cZjvpU941Rw5tZbb9W5556rDz/8UI888ogMw3DVugAAAAAAAAD0EjszS+3bEwZEdWqfFIfMmWwyZ3qEI40lzSRpsJszZ0wmk73vTFZJNdemPWBfbrl9e0wPyJyRmved8bXSZgHO7Lx69Wrdd999ys/P1x//+Ee99dZbuvbaazVhwgTFxMTI37/9hkBnn322M9MDAAAAAAAA6AEcM2fGdzI40z8iRP5+JjVYDOWUkhnRExzJbwrODElwb3BGsvadOVJQqaq6BpVWmxUdFuT2OfsyW+ZMXHiQEiKCvbyaznEMznzfm4Izs2bNkslkst8+cOCAHn300U7tazKZVF9f78z0AAAAAAAAAHycxWJoV5b1om5yVIj6R4R0sIeVv59JiRHByi6tUU4JmTM9wZECh7Jm8e4taybJnjkjWfsaEZxxn7zyGhVUWHu2jEqOaBYX8GXRYUEalRShfbnl2p1dqvIasyJCAr29LElOljWTJMMwuv0HAAAAAAAAoHc7Wlipilrrj7THp3Yua8YmufHie2FlnWrMDS5fG1zraGNZs/AgfyVGuj+zIsUhOJNF3xm32pfTVNJsdFLPKGlmY8uesRjSpuPFXl5NE6cyZ1auXOmqdQAAAAAAAADohXY4lDSbmBbdpX2To5qybHJLazTIzX1M0H219Q3KKKqSJA1OCPdIZkVqTFNwJpvgjFvZSppJ0qge0m/GZurgOL323XFJ1r4zs0f29/KKrJwKzsycOdNV6wAAAAAAAADQC+3ILLVvdzVzxjEzIru0muCMD8soqpKlsVjSYA+UNJOalzXLKiY44077ch0yZ5IjvLiSrjttcIx9+/sjhV5cSXNOlzUDAAAAAAAAgLY4E5xxzJyh74xvO5xfad8e4qEgWuoPgndwH1vmTICfScP6eyb45ir9I0I0JMH6mdyRWarqOt8okUhwBgAAAAAAAIBb1DdYtDvbGpxJjw1TTHjXGrYnRzVdfM/h4rtPO+IYnEnwTHAmKSpEtuppZM64T129RYfyKiRJQxP6KTjA38sr6rqpjX1n6i2Gtp7wjb4zLgvOlJWV6eWXX9bPfvYzXXrppTr33HN1/PjxZmOys7O1Z88eHTlyxFXTAgAAAAAAAPBRh/IrVGO2SJLGD+ha1owkpUQ3Zc5kl5I548uOFlTYt4d4qKxZUICf+kcES5KyyKxym0N5FapvrFnX00qa2UwdHGffXn+0yIsraeJUzxmbv/3tb3rooYdUXm6tO2cYhkwmkyorK5uNW7VqlW644QaFhIQoMzNTsbGxrpgeAAAAAAAAgA/akdFU0mxiN4IzzTJnaPju0xwzZwZ7KHNGspY2O1lWq4KKWtWYGxQS2POyOnydraSZJI1KjvTiSrrv9MFNsYgNR32j74zTmTMLFy7UXXfdpbKyMgUFBWny5Mltjr322muVnJys2tpa/fe//3V2agAAAAAAAAA+bEdWiX17fGp0l/ePCw9SkL/1EmYOmTM+7WiBNTjTPyJY/YJdkhPQKSnRjqXv+Iy4w77cpuDM6B4anEmJDtWAGOtnZeuJEtXWe7/vjFPBma1bt+rRRx+VJN14443Kzc3Vhg0b2p7Mz09XX321DMPQihUrnJkaAAAAAAAAgI/bkWnNnDGZpHGpXb+o6+dnUlKUtbRZNpkzPqu0yqzCyjpJnus3Y5Ma0xScoe+Me+zNKbdvj07qmWXNpKbSZrX1Fvu5yZucCs48//zzMgxDZ5xxhl577TVFRXWcmnjGGWdIknbu3OnM1AAAAAAAAAB8WG19g70c0pD4cEWEBHbrOMmNwZmymnpV1ta7bH1wncOO/WYSPNNvxibVIXMmq6TKo3P3FbbMmbjwICU09vjpiaY2K23m/b4zTgVnVq1aJZPJpF/96led3mfQoEGSpKysLGemBgAAAAAAAODDDuRWyNxgbSI+YUB0t4/TvGwVmRG+6KhDv5kh8R7OnGkWnKGsmavlldeooMKaFTUqOUImk8nLK+q+qUOagjPrj3i/74xTwZmcnBxJ0siRIzu9T3CwNbJWW1vrzNQAAAAAAAAAfNj2zBL79oQBHVfcaYstc0aSsrn47pOONMucoaxZb7JsV659e3RSz+w3Y5MeG6bESGt8YvPxYtU3WLy6HqeCM0FBQZIks9nc6X1sAZ3o6GhnpgYAAAAAAADgw3Y69HRwKjhD5ozP25XV1DB+WIJne5I4ZlbRl8i1/rPhhB7+aLf99rQhcV5cjfNMJpO970xVXYN2ZZd1sId7ORWcGTBggCRp9+7dHYxssnz5cknSsGHDnJkaAAAAAAAAgA+zZc74+5k0Jrn7wZkUMmd8msViaFtGiSRrT5K02ND2d3CxyJBARYQESJKyCM64zMtrjurB93fKsFYm1A1T03Xu6P7eXZQLnN6s74x3S5s5FZw555xzZBiGXnnllU6NP3LkiBYvXiyTyaS5c+c6MzUAAAAAAAAAH1Vd16CDedZSV8P791NokH+3j5UcReaMLztSUKnSamtlpUnpMV7pSWLrO5NTWi2LxfD4/L3N3785pD9+ssd++2dnDdZj88b16H4zNlObBWeKvLgSJ4Mzv/rVrxQQEKC1a9dq4cKF7Y7dtGmTzjvvPFVUVCg4OFi33367M1MDAAAAAACgFymurNN3hwu1ZN0xLV5zVCVVdd5eEpywJ6dMDY0XyZ0paSY1b/ieU0rmjK/ZeqLYvj0pPdora7B9RswNhvIr6HXeXYZh6Jnl+/XUsv32++46Z5h+e9HoXhGYkaRh/fspNtzarmXD0SL7ecobApzZecSIEfr973+vhx9+WI8++qg+//xzXXXVVfbHly1bpo8//ljLly/XN998I8la1+3JJ59UcnKyUwsHAAAAAABAz2PNqCjX/tzGv5PW/80rb35B9eDJcj151QQvrRLO2tFY0kySxg+IdupYkaEBCgvyV1VdAz1FfNDWxpJmkveCM459Z7JKqpUYGdLOaLTGMAz96dO9+r81R+33PXDBSN0xq3e1JzGZTDp9UKyW7c5VWU299ueWa0xKpFfW4lRwRpJ+//vfy2w26/HHH9fGjRu1adMmexTt/vvvt48zDEMmk0l/+MMfdNdddzk7LQAAAAAAAHqIHZkl+ueqw9qbU65jhZX2HgbtWXu4wP0Lg9vszCy1b090MnPGZDIpOSpEh/MrlV1SY7/OCN+w5bg1c8bPJE10MhDXXakxDsGZ4mqdmh7jlXX0VBaLoT98tEv/Xn/Cft/Dl47RrTMGe3FV7jN1iDU4I0nfHy30WnDGqbJmNn/84x+1fv16XXnllQoNDZVhGM3+AgMDdeGFF2r16tV6+OGHXTElAAAAAAAAegDDMHTXf7bqs525OlrQemAmOixQUwfH6uYzBmpgXJgkKaOoWuU1Zg+vFq6yI8sanAn0N2lkUoTTx7NlRlSbG+z9TeB9FbX1OnCyXJI0MilS4cFO5wJ0yw8zZ9B5DRZDD/x3hz0wYzJJT145vtcGZiTpdB/pO+Oyb8uUKVP03nvvqb6+Xnv27FFeXp4aGhoUFxensWPHKjQ0tOODAAAAAAAAoFc5VlilY4VVkqSgAD+NTIzQyKQI+/+OSopQQkSwPRPiwfd36nih9SLh/txyTRkU2+ax4Zsqaut1OL9CkjQ6OVLBAf5OHzM5qqlMVXZJjaLDgpw+Jpy3I6NEtpYdp3qppJnUvC8Rpe86z9xg0d1vb9MnO3IkSf5+Ji26eqLmTUr18srca1RSpCJCAlReU68NR4u8lo3n8lBmQECAJkygHigAAAAAAACkNYeaypPdM3eEfjFzaLvjxyQ3ZVnszSkjONMD7coqtWdIjU91rqSZTXJU08X3nNJqr5UhQnPN+814r5TYAIeyZscbg8FoX219g3715lat2HNSkjXL7f9dN0kXju/9veL9/ax9Z77al6fCyjodzq/QsP7OZ/h1lUvKmgEAAAAAAACtWXMw37595rD4DsePSm666L43t9wta4J77cgssW9PcLLfjE1KtEPmTGmNS44J5209UWzfnuTFzJn+EcGKDguUJO3OLpXRmcZWfZhhGPqff2+xB2aCAvz04k+m9InAjI1jabPvvVTajOAMAAAAAAAA3KLBYmjd4UJJ1r4yY5I7znZw7E+yN6fMbWuD++zILLVvT3BRg/hmmTOUrfIJhmFoy4kSSVJUaKCGxId7bS0mk8mepVVQUaccAnjt+v5okb7elydJCg301yu3nKbZo/p7eVWe5Ric2XK8xCtrcKqs2fz587u8j8lkUkhIiKKiojR8+HBNmzZNo0ePdmYZAAAAAAAA8EE7s0pVXlMvSZoxNF5+fh3X9I8MCdSAmFBlFldrf265LBajU/vBd+zMsgZnQgL9NLx/P5cc0zFzhgvvvuFEUZWKKuskWbNmvNGzw9GEAVFafdBaRnFHZolSoumB3pZvDzRlNC68bIxmdCKrsbcZnRwpk0kyDOlQnneyNJ0Kzrz66qsu+dJNmTJFzzzzjGbMmOH0sQAAAAAAAOAb1jr0mzlzeOcv/o1OjlRmcbWq6hp0oqhKg7z4i3x0TUlVnb3nx9iUKAX4u6Zwj2PmDA3ffcMWh5Jmp3qx34yNY5bWjsxSXTCu75To6ipbEEtSn8uYsQkJ9FdaTJhOFFXpcH6lDMPweIDRqbNjenq60tPTFR8fL8Mw7H9BQUFKTExUYmKigoKC7PdLUnx8vAYMGKDIyEj7/Rs3btTMmTP1xhtvuORJAQAAAAAAwPvWOFwA7Ey/GZvRDuXP9uVS2qwnsWXNSLKXmXKF8OAARYZYf2dO5oxv2NpY0kzybr8ZG8f+Ro6l9dBcUWWddmVbX59RSRHqHxHSwR6917DGzL6K2nrllnn+vOJUcObYsWNaunSpIiIiFBQUpLvvvltbt25VZWWlsrOzlZ2drcrKSm3dulULFixQYGCg+vXrp6VLl6q4uFgZGRn685//rIiICFksFv30pz9VRkaGq54bAAAAAAAAvKS6rkGbj1t/WZ8eG6a02LBO7zvaoe/MnhzvlJtB9zTvN+O64Iwke5mq3NIaWSw0fPc2W3DGZJImpkV7dS2SlBQZovh+wZKsZc1syQJobu2hAtlemrNHJHh3MV42zKHs4sGTFR6f36ngzMmTJ3XRRRcpNzdXK1eu1KJFizRx4kT5+TUd1s/PTxMnTtQzzzyjlStXKjc3VxdddJFycnKUmpqq+++/X998841CQ0NVV1enF154weknBQAAAAAAAO/acKxIdQ0WSepyPwPHzJm9OWTO9CQ7Mkvs245lplwhOcr6C/+6BosKG3udwDuq6xrs383h/fspMiTQyyuy9jqf2BgQLKup14miKi+vyDetPtjUb+asLpSb7I2GJTQFZw7l9bDgzKJFi5Sbm6t77rlHZ5xxRofjzzjjDN1zzz3Ky8vTX/7yF/v9kyZN0vz582UYhlasWOHMkgAAAAAAAOADmvWb6WJwJj02TGFB/pIoa9bT7GzMnOkXHKAhLu4VlOzQ4D2nlL4z3rQzq1T1jdlLk9K832/GZrxDttZ2Spu1YBiGvd9McICfThsU6+UVedewRIfgTH4PC858+OGHMplMOv/88zu9zwUXXCBJ+vTTT5vdf+GFF0qylkoDAAAAAABAz2brN2MySdOHxnVpXz8/k0Y2ljbLKKpWeY3Z5euD6+WX1yq7sR/MuNRI+fm5trl2SlRTb4zsEvrOeNOWE8X27VMHRntvIT8w0SFba6dDFhesDudX2Hs2nT44ViGB/l5ekXc5ljU71NPKmmVmZkqSgoODO72PbaxtX5uUlBRJUlUV6WYAAAAAAAA9WUFFrfY0ljwalxKlmPCgLh9jVFJTabP9ufSd6Ql2ZpXYt11d0kySkqPInPEVWx2CM5PSfSdzZlxqU+bMDjJnWvj2QFNG49nD+3a/GUmKDAlU/whrvKLHZc6EhVkbuW3atKnT+2zcuLHZvja1tbWSpJgY3/kyAwAAAAAAoOvWHS60b3e134zNmOQI+zZ9Z3qG7RlNF8PHO1wkd5Xk6KbMGduv/+F5hmFoy4kSSVJEcECzvh3elhARbM+w2pVVqobG0muwWuNYbrKP95uxGd5Y2qyosk6FFbUendup4MzkyZNlGIaeeOIJFRYWdji+oKBATz75pEwmk6ZMmdLssf3790uS+vfv78ySAAAAAAAA4GVrD3a/34zN6OSmzJm9ZM70CDuzmoIzE92QOZPikDmTXULmjLdklVQrv9x6EfuU9GiXl69zli1rq7KuQUcLPJ8N4atq6xv0XWPgPL5fsEYlRXSwR9/gGFw8lOfZz4tTwZk77rhDkrVE2bRp0/Tpp5/KMFpGIw3D0CeffKIzzjhDGRkZkqRf/vKXzcYsW7as1aANAAAAAAAAeg7DMOy/zg4O8NOUQd2rkjIyicyZnsQwDHsZqajQQKXFhnawR9clRZE54wu2NmbNSNKktGivraMt4wc0ZW05ZnP1dVuOl6ja3CBJOnt4vEwm3wqqeUuzvjMeLm0W4MzOl112mX7+85/rxRdf1JEjR3TZZZcpLi5Op5xyij0DJi8vT9u2bWuWWXP77bfrkksusd/Ozc3VBx98IMMwdOGFFzqzJAAAAAAAAHjR8cIqZTVmNZw2qPsNpyNCrBf4M4qqtT+3XBaL4XO/0EeTnNIaFTSWBJowIMotF35DAv0VFx6kwso65ZA54zVbHPvNDPS9FhWOWVs7s0p11eQB3luMD1l9MN++fdYISprZDOvf9EMAT2fOOBWckaR//vOfGjhwoB599FHV1NSooKBAX331VbMxtmya4OBgPfzww/rf//3fZo9HRkZq7969kqTU1FRnlwQAAAAAAAAvWe3Q06C7/WZsRiVFKqOoWlV1DTpRVKVB8eHOLg9u4th8fcIA1/ebsUmODlFhZZ1OlteqwWLIn4CdxzlmzpzihvJ1znLsd7Qjs8R7C/Exqw+67tzcmzTLnOlJZc1sHnzwQR05ckRPPPGE5syZo8TERAUFBSkoKEiJiYk699xz9fjjj+vIkSMtAjOSFBYWpoEDB2rgwIEKCHA6XgQAAAAAAAAvcUW/GZtmfWcobebTHC+Cj0+Ndts8yY19ZxoshvLKKW3mabX1DdqTbf0uDokPV0x4kJdX1FJUWKAGxoVJknZnl8ncYPHyiryvqLJOu7KtAdTRyZHqHxHSwR59R3y/IEWFBkrqgZkzNklJSfrNb36j3/zmN646JAAAAAAAAHqQBouhdYetwZnosECNSYnsYI/2jUl26DuTW64Lxyc7dTy4z86spsyZiWnuy5xJjW7qZZNdUmMP1sAzdmWVqa4x2DEp3fdKmtlMGBCt44VVqq236ODJCqfPRT3d2kMFsrWKP2s4WTOOTCaThvfvp03Hi5VTWqPyGrMiQgI9MrdLMmcAAAAAAACAXVmlKquplyTNGBrvdMmpUUlkzvQEhmHYy5rF9wtWUqT7fpWfHNV07JxS+s542lbHfjPp0d5bSAcmOJQ225lV4r2F+Ihm/WYIzrTgWNrscH6lx+YlOAMAAAAAAACXWOPCfjOSlB4bprAgf0nSvlyCM77qRFGVSqvNkqz9Zkwm9/WBSXbInMkpoayZpzn2mznVpzNnmoIz2x36IfVFhmHY+80EB/jptEGxXl6R7/FW3xmXN3gpKytTeXm5GhoaOhybnp7u6ukBAAAAAADgJWtc2G9Gkvz8TBqZFKGtJ0qUUVTt0XIz6LwdDhe/HS+Ku0OKQ+ZMVgmZM55my5wJC/LXiMR+HYz2nrGpUTKZJMOQdvbx4Mzh/ArllFoDmacPjlVIoL+XV+R7hvbk4MyKFSv097//XatXr1ZxcXHHO8hay62+vt4V0wMAAAAAAMDLqusatPm49bpQWmyo0hsbcjtrdHKk/df6+3PLNYVfffscx34z7g7ONMucoayZR+WW1ii78SL/hAFRCvD33aJM/YIDNDShnw7lVWhfbplq6xsUHNA3gxLfHmgKmp89PMGLK/Fdw5sFZ8o9Nq/T36C77rpLF1xwgT766CMVFRXJMIxO/wEAAAAAAKB32HisyN4o/MxhrrsAODopwr5N3xnftD2jxL49PjXarXMlRgTL1srIlg0Az3DsN+PLJc1sbIFCc4OhfTmeu+Dua5r1mxlBv5nWpESFKrQxo6jHZM68+eabeuGFFyRJISEhmjdvniZPnqzY2Fj5+flu5BQAAAAAAACutfaQa0ua2YxOjrRv783tuxdYfZXFYmhXY+ZMSlSIEiKC3TpfgL+f+keEKLesRtn0nPGorQ5BuEk9ITiTGqX3t2RJknZklWpiWrR3F+QFtfUNWn+kSJKUEBGskYkRHezRN/n5mTS0f7h2ZZXpRFGVaswNHin/5lRw5l//+pckKS0tTV9//bWGDh3qkkUBAAAAAACgZ7E1nDaZpDOGxrnsuCPJnPFpRwoqVFln7T093s0lzWySo63BmYKK2j5drsrTthxvypyZlB7tvYV00vgB0fbtHRkl0rSBXluLt2w5XqJqs/X7edaweJlMJi+vyHcN7x+hXVllshjS0YLKZj8McBen0lt27Nghk8mkhx9+mMAMAAAAAABAH1VYUas9jYGTsSmRig0PctmxI0IClRZr7TOyP7dcFgul8n3JjkzHfjPRHpkzJaqp78zJ0lqPzNnX1dVb7L2F0mPDFN/PvRlSrjA2JVL+jTXwHPsi9SWUNOu8Yc36znimtJlTwRmz2SxJmjRpkksWAwAAAAAAgJ5n3eFC+/YMF5Y0sxmdZP0Fc1Vdg04UVbn8+Oi+5sEZD2XORIXYt7NLqz0yZ1+3L7dMtfXWnlKn9oCsGUkKCfTXiMYyXgdOlqu6McOrL7FlNEruOTf3JkMTelhwZtCgQZKkigrPNckBAAAAAACAb3HsN3PWsASXH3+UY98ZSpv5lH25Te/H2BRPlTVrypzJITjjEc1Lmvl+vxmbiY0BQ4sh7c7uW9kzhRW12tX4nEcnR6p/REgHe/RtwxN7WHDmyiuvlCR99dVXLlkMAAAAAAAAehbDMOy/zg4K8NOUQa6/cDsm2aHvTG65y4+P7jucXylJigsPcmk5u/akOGbOlNR4ZM6+bmtGiX27J/SbsXHsg+SY5dUXrD1cKKOxCuTZw8ma6cjA2DAF+lvL4DkTnDlRWKWFH+3q1FingjP33nuv0tPT9dxzz2nfvn3OHAoAAAAAAAA90PHCKmWVWLMXThsUo5BA1zdnH5VE5owvKq02K7/c2vNlqEO/Bncjc8bztpywZs4EB/h5pFG6q0xIjbZv78gs8do6vGH1AYd+M8Ndn9HY2wT4+2lQXLgk6WhBpeobLN06zqoDeXpvc1anxjoVnImKitKyZcuUmJioGTNm6O9//7uKi4s73hEAAAAAAAC9wppD7u9pkB4bpvAga9DHsYwWvMvx1+XDPBicccycySFzxu3yy2uVUWQNgk0YEKVAf6cuKXvUyKQIBTWud0dW38mcMQzDfm4OdlNGY29kK21W12Dpdn+zbx36/HQkoFszNBoyZIgkqaqqSsXFxbrzzjt11113KT4+XmFhYe3uazKZdPjwYWemBwAAAAAAgJe5u9+MJPn5mTQyKUJbTpQoo6ha5TVmRYQEumUudN5hx+BMgueCM/H9ghXob5K5wVB2KcEZd9vmUNLs1B7Ub0ayllocnRyh7ZmlOpJf2WfOHYfzK5TT+N04fXCsWzIaeyPH89ihvAoN6eJ5zdxg0XeHCzs93qngzLFjx5rdNgxDhmEoLy+vw31NJpMzUwMAAAAAAMDLGiyG1jVeiIoOC9SYFPeVOxqVHKktJ0okSftzyzVlUKzb5kLnHM5vCs54sqyZn59JiZEhyiyupqyZB9hKmkk9q9+MzYQB0dre2G9mZ1appg/t/f1Xvj3QFDQ/m5JmneZ4HjuUX6Hzurj/towSVdTWd3q8U8GZm2++2ZndAQAAAAAA0IPtyipVabVZkjR9aJz8/dz3Y1zHPhd7c8oIzvgAb5U1k6SUqFBlFlerpMqs6roGhQaRGeAuW5sFZ3pW5owkjR8QZd/emdk3gjOrDzr0mxnR+5+vqziexw6drGhnZOsc+/x0hlPBmVdeecWZ3QEAAAAAANCDeaLfjM3opAj79p6ccrfOhc451Jg5Exbk36wPjCckRzfNl11araEeLKvWl9Q3WLQ9w5p1khodqsRIz77PrjDBITizI7P3952prW/Q+iNFkqSEiGCNTIzoYA/YDE3oJ5NJMoym81tXrD7U+X4zktRzujcBAAAAAADApzj2mznTzcGZkQ7BmX25ZW6dCx2rMTcoo7FhtvWCpmdbGCRHhdq3c0roO+Mu+0+Wq9rcIEk6pQeWNJOsfURCG3uu7Mgq8e5iPGDz8WL7e3bW8Hjai3RBSKC/0mLCJFkzAw3D6PS+pVVmbW/szzSsf3in9iE4AwAAAAAAgC6rrmvQpmPWckdpsaEaGNe5i1HdFRESqLRY6wX5/bnlslg6f9EMrnessFK2t2Bognvf+9ak/CBzBu6xtbHPkySd2gNLmklSgL+fxjb2w8ooqlZxZZ2XV+Reaw42Bc3PGk5Js66ylTarqmtQdmnnA7/rDhfYz4lndLJ0nkuDMzU1NVq7dq3++9//6vXXX1dZGb9iAAAAAAAA6I02HitSXYNFkvuzZmxGJ1kvsFbVNehEY9YGvMOb/WYkMmc8ZUuzfjPR3luIkyYMiLZv78jq3aXNVh/0XLnJ3mi4Y9+ZvM6XNvvW4XWfPjSuU/u4JDiTkZGhm2++WdHR0Tr77LN1zTXX6JZbblFmZmazcYsXL9bpp5+uuXPndiklCAAAAAAAAL5lrQf7zdiMTo60b+/N4UfB3uT94ExT5kwOmTNus60xcybIIfukJ3LsO7Mzs8R7C3Gzwopa7cq2Bp9GJ0eqf0TP6xHkbUO7EZwxDEPfHsiXZP2uTBkY26n9nA7ObNiwQZMmTdK///1v1dXVyTCMNgMvl112mXbs2KGvv/5ay5cvd3ZqAAAAAAAAeMmm402/qJ/eyRIuzhqd3NR3Zm9uuUfmROsO51fat4cmeD44kxrdlDnTldJD6LziyjodKbC+z2NTIxUc4O/lFXXfeIfgzI7M3ps5s/ZwoWyX5s+mpFm3DGsWnOncvzPHCquUVWINEp82OEahQZ37rjgVnCktLdXll1+uoqIiJSUl6e9//7t27tzZ5viEhARdeOGFkqRPP/3UmakBAAAAAADgJYZh6EBjcCQ1OlSx4UEemZfMGd9h+0W5v5/J7f2GWhMdFqiQQOulzZwSMmfcYWuGQ0mztJ7Zb8ZmcFy4IoIDJPXu4MzqxuwNSTpreIIXV9JzDetG5syag9173Z0Kzjz//PM6efKk4uPj9d133+kXv/iFxo4d2+4+tpJmGzZscGZqAAAAAAAAeEluWY3Ka+slSSMSPZc1kRYTpvDGXyQTnPGeBouhI/nWi5YD48IUFODSttadYjKZlNLYdyaHzBm3eGdjU8uKKYN6dnDGz8+kcanW7JncshrllfW+z4xhGPZ+M8EBfj3+PfOWyJBAJUYGS5IO5lV0qj2LY7+ZrvRgc+rM+fHHH8tkMumee+5Renp6p/axBW8OHz7szNQAAAAAAADwkv0OJcVGJEW0M9K1/PxMGtk4X2ZxtcpqzB6bG02yiqtVW2+RJA3zQkkzm+Roaz+Nitp6PgsudvBkuZbtzpUk9Y8I1jmj+nt5Rc6bkOZbpc0O51eoqq7eZcf77nChchuDTtOGxCkksOeWofM2W/ZMSZVZhZV17Y41N1j03eFCSVJceJDGJHe+N5NTwZmDBw9Kks4+++xO7xMdHS1JKivj1w0AAAAAAAA90YGTDsGZ/p4LzkjSKIcLX/vpO+MVh/ObSv04Ns/2tOSopr4zOSW9LxPCm/7xTdMP639+9pBecaF/Qmq0fXtHlveCMxaLofvf3a5zF63Sj/7xncwNFpcc99/fH7dv/2jyAJccs68a7vDvWkelzbZllKiiMZP0zOHx8vMzdXoep4Iz1dXWeo7h4Z2vK1lRYX0yISEhzkwNAAAAAAAAL9mf23SxaqQHM2ek5n1n9lHazCscL1Z6M3MmJarp+mI2fWdcJqOoSh9uz5Zk7e3z49M7VzHJ100Y0JQ5szOzxCtrMAxDv/9wl97dbC0ZtyenTJ/tzHH6uHllNVq++6QkKb5fsM4fm+T0MfuyoV3oO+NMnx+ngjMJCdbJMjIyOr3P5s2bJUnJycnOTA0AAAAAAAAvsWXOmEzNmyd7wpjkpmDQnhwyZ7yhWXDGm5kz0U2ZM9mlBGdc5V/fHlaDxdpn49bpgxUeHODlFbnGgJhQxYQFSrKWNetMLxFXMgxDf/p0r974/kSz+19ec9Tptby9MUP1je/ZtacN8EofqN7EMejcUXDGsd/MWcM7329GcjI4c/rpp0uSPv/8806Nb2ho0IsvviiTyaQzzzzTmakBAAAAAEAPkFlcpbc3nlB+ea23lwIXsVgMHcyzBkUGxYV7vNzRyCSHzJlcMme8wXfKmjVlzlDWzDXyymr0ziZrVkd4kL9unj7QyytyHZPJpPEDoiVJhZV1yi717GfmuS8P6v/WHG1ci5QQYW06vz2zVFtOFHf7uA0WQ//ZcMJ+3N6S6eRNwzqZOVNaZdaOxiyskYkRSozsWrUwp4IzP/7xj2UYhl5++WVt3bq13bEWi0W/+MUvtGfPHknSjTfe6MzUAAAAAADAxxmGoZtf3qDf/Hen5j67Sp+7oHQLvC+juEo1ZmuPhBGJnr8w3y84QOmxYZKsPWcsFs/++r2vMwxDhxqDM0mRIernxayKFDJnXG7xmqOqq7d+v288Y6Ciw4K8vCLXmpDqndJm/1p1WH/96qD99pNXjtdvLhhlv724MWjTHV/vy7MHmmaP7K8BMWHdXygkSfH9ghTdmGXVXnBm3eEC2f4J6mrWjORkcOaqq67S9OnTVVtbq3PPPVd/+9vflJeXZ3/cZDLp5MmTev311zVlyhS9/PLLMplMuuCCCzRr1ixnpgYAAAAAAD5ud3aZDudXSpJKqsz6nze26N53tqu8xuzllcEZ+3ObSomNSPRsvxmbUY19bqrqGnSiqMora+irCivrVFJl/Q57s6SZROaMq5VU1enf661N5YMC/HTbmYO9vCLXc+w7sz2z1CNzvv7dMT3x+T777YcvHaNrT0vXpROTFd/Pmj2zbFeuMrp5LrO9Z5J04zSyZlzBZDLZS5vlltW0+d8tjiXNzvR0cEaSPvjgA40aNUolJSW66667lJycLJPJJEk69dRTlZKSoltuuUXbt2+XYRgaN26c3njjDWenBQAAAAAAPu7rfXkt7vvvlkxd8NxqbTha5IUVwRVs/WYk7wVnRic3lTbbm0NpM0/ylX4zkhQREqiIxsydHDJnnPbqumOqrGuQJF07JU39I7pWoqknmNBY1kySdnogOPPupgz9/sPd9tv3nz9St86wBr2CA/x10xnWsnEWQ1qy7liXj3+isErfHrQ2pE+NDtXMEf2dXzQkdVzazDAMfXvA+toH+ftp6uC4Ls/hdHAmPj5emzZt0i9/+UsFBwfLMAz7X21trX07ICBAP//5z7Vu3TpFR0c7Oy0AAAAAAPBxjsGZ/71wlL38UVZJta598Ts9+fk+1dY3eGt56KYDJ5suUo1M8lZwpmlegjOe5Sv9ZmySo60BhJzSGo83eO9NKmrr9craY5Ikfz+Tfn72EO8uyE2SokLUv7HXy47MErd+Zj7Zka3f/HeH/fYvZw/VL2cPazbm+qnpCgqwXqJ/e2OGKmrruzTHmxtOyPYUrp+aLn8/k3OLhl1HwZljhVXKKrEGhU8bHKPQoK73X3NJUciwsDA9//zzWrhwob744gtt2rRJeXl5amhoUFxcnCZNmqQLL7xQKSkprpgOAAAAAAD4uMKKWm13aJL7i5lDdfH4ZN37znZtOFYkw5D+ueqwVh3I11+vO8VrGRjoOlvmTKC/SYPiwr2yhmaZMw5l1uB+jhcphyZ45/13lBwVqgMnK1Rbb1FRZZ3iGstEoWv+8/0JlVZbSzfNOyVVabG9t2/JhAFR+nJvnspq6nW8sEqD4l3/Of5yz0kteGubvR/JrTMG6b7zRrYYF98vWFeckqq3N2WovLZe72zM0PxOlpOrrW/QO5syJFnPx9dMSXPZ+vGD4Ex+y+DM6saMJUk6a3hCt+ZwaceuuLg4XX/99br++utdeVgAAAAAANDDfLM/3/5r3tmjrGVW0mLD9J+fT9NLq49o0fL9MjcY2ptTpkueX6PfXDBKt04fJD9+9evTzA0We+bE4Phw+y++PS0tJkzhQf6qrGsgc8bDfKmsmSSlRDv0nSmtITjTDTXmBr20+ogkyWSS/mdW78yasRmfGq0v91ozO3dklbo8OLPmYIHueHOL6hsjM9edlqY/XDLG3grkh+afOVhvNwZZXl13TDdPH9SpDJhlu3JVVFknSTp/bJISIvjsu1Kz4MzJlsGZbw809Zs5qxv9ZiQXlDUDAAAAAAD4IceSZueObqqB7+9n0i9mDtUHv5yhEYnWCx919RY9+ske/eTl7+kb4eOOFVTK3GC94OjNbCc/P5O9pFpmcbXK2mjWDNc73BiciQwJUIIPBEKSo0Lt29klfff88Y9vDuv6l9bru8OFXd73vc2ZyiuvlSRdMDZJw/r37kzGCWlR9u0tx4tdeuyNx4r0s9c2qa7eIkm6/JQU/emK8W0GZiRreUjbxf0TRVX6cu/JTs31xvoT9u0bpw10YtVoTUpUqMIaS5X9MHPG3GDR+iPW71p8vyCNTopssX9nuD04U1tbq6+++kpvv/22NmzY4O7pAAAAAACAl5kbLPYmuVGhgZqUFt1izNiUKH30qzM1f0ZT+Za1hwp1/rPfak82mRC+av/JphJiI71cis6xtNl+Spt5RGVtvbJLayRZ+820d8HZU5KjmmfO9EXZJdX687J9Wne4UDe9/L3e35LZ6X3rGyz656rD9tt3zBrWzuje4dT0GNkSU2wX2F3hUF655r+yUdVmay+188Yk6umrJ3YqC8axlNniNUc7HL8/t1wbjhVJsmZ4TB0c281Voy1+fiYNTbD+iCSjqEo15qYeedsySuz9gWYMi+921q9TwZnjx4/rgQce0AMPPKCSkpIWj69fv15Dhw7Veeedp+uvv15nnHGGTjvtNJ04caLlwQAAAAAAQK+w6VixyhsvWswckaAA/9YvP4QE+usPl47RGz+dqqRI6wXWspp6PfvlAY+tFV1zwKG0y4gk7wZnRjkEZ/ZR2swjjuRX2reHJXi/pJkkpUQ7ZM700cy73Q4BbXODoXve2a7/99XBTjW7/3hHtjKLra/b2SMSNH5AVAd79HxRoYEam2J9nvtyy+2lwZz1f6uP2v/tO3tEgp6/fpIC2/j374dmDk+w93DacLRIu7JK2x3/xvfH7ds3TE33iUBpb2QrbWYxmp//Vh9wvt+M5GRwZunSpXr66af19ddfKzo6utlj5eXlmjdvnnJycmQYhv1v8+bNuvjii1VfX+/M1HZPPPGETCaTFixYYL/PMAwtXLhQKSkpCg0N1axZs7R79+5m+9XW1urOO+9UfHy8wsPDddlllykzs/NRZQAAAAAA0LqV+5tKmp0zqn87I61mDIvXFwvOVlRooCRp8/HiTl1UhOcdyPWdzJkxyU3z7ybbyiMO5Te9/77Qb0aSUh2CM4fzWvaF6Av257b8/D+z4oAeeG+HzA2WNvezWAz9fWVT1syvZvf+rBmbM4bG2be/d0H2jGEYWn3Q2oMkKMBP/7jhVAUH+Hd6fz8/k26d0bnsmcraer2/JUuSFBrorytPHdDNVaMjzfrOOJQ2+/ag8/1mJCeDMytWrJDJZNK8efNaPPbiiy8qL8/6H2N33XWXPvzwQ91xxx2SpD179mjJkiXOTC1J2rhxo1588UVNmDCh2f1PPfWUnnnmGb3wwgvauHGjkpKSNHfuXJWXN/0DsmDBAi1dulRvvfWW1qxZo4qKCl1yySVqaGj44TQAAAAAAKALbP1m/EzWzJnOiAoL1OSBMZKkoso6HSmo7GAPeMOBxrJmwQF+SosN8+paRidH2ksT7ezgV+ZwjUMOwQ9fCc6kx4YpNjxIkvT90SI1WPpeYHevQ9D0hqnp9u13N2dq/qsbVd5GT6YVe0/qYON7etqgGJ3eh0pjnTGkKTjznQuCM8cLq5TV2PPotEExCg8O6PIxrjp1gKLDrD9S+Hh7tk6WtV6m76Pt2faSWpdNTLH/sAGu1yw40/hdKamq047MEknWHykkRoa0tmunOBWcOXLkiCRp8uTJLR575513ZDKZdMUVV+i5557TpZdeqhdeeEFXX321DMPQe++958zUqqio0A033KCXXnpJMTEx9vsNw9Bzzz2nhx56SFdeeaXGjRunJUuWqKqqSm+++aYkqbS0VIsXL9aiRYs0Z84cTZo0Sf/+97+1c+dOffnll06tCwAAAACAvuxEYZX9Asap6TGKabxo2hm24IxkzZ6Bb6kxN+hYoTVoNjyxX6f6KLhTWFCA/cLZgZPlzfoBwD0O5zUFTYf6SFkzPz+Tpg2xBhXKa+q1O7vvBepsZf2CAvz0yGVj9cL1kxQUYL3su/pgga7+53fK+UHJN8Mw9LeVh+y37+hDWTOSdNrgWPs5bN1h54Mzaw41ZVKcOax7Za5Cg/x1/enW4Fq9xdBr3x1rMcYwDP17fVNJsxunDezWXOic5sEZaxB03eFC2WLAzmTNSFLXQ3gObJkxiYmJze4vKyvTli1bJEm33nprs8euu+46vfvuu9q+fbszU+uXv/ylLr74Ys2ZM0ePPfaY/f6jR48qNzdX5513nv2+4OBgzZw5U+vWrdPtt9+uzZs3y2w2NxuTkpKicePGad26dTr//PNbnbO2tla1tbXNnqckmc1mmc2tR6AB9A22cwDnAgCcDwDYcD5AX7ViT459e+bwuC59B04Z0FSmauPRQl0xMcmla/OW3nI+2JddZr8gNTwh3Ceez9jkCB04WSFzg6HdmcWa0Af6ZXjTwcaLk0EBfkqKCPSJz4AknT4wWp/tzJUkrTmYp9GJ4V5eUftceU6oMTfoaGOm4fD+4TIsDTp/dIKW3DJZ//PGNpVUm7Uvt1zz/rZWL914qkY3lgNcc6hQOzKtgawxyRGaMTjaZ95PTwj2k8alRGp7ZqkO5VUou6hCCRHB3T7e6gNN5TynDer+a/nj01L14rdHVG8x9Ob3J3T7mYMUGtRUHm1bRom9jOP41EiNSgzrU++bp6VEBCrQ3yRzg6GDJ8tlNpu1yqF06/QhMa2+/p19T5wKztjKhP2wFNjatWvV0NCggIAAzZo1q9ljaWlpkqSioqJuz/vWW29py5Yt2rhxY4vHcnOtJ+IfBowSExN1/Phx+5igoKBmGTe2Mbb9W/PEE0/okUceaXH/ypUrFRbm3VReAL5hxYoV3l4CAB/B+QCADecD9DXv7vGTrVCHf94+ffbZvk7vW9cg+Zn8ZTFM+nZPpj4LOt7xTj1ITz8fbMw3SbJeJGwoytRnn2V4d0GS/Eqa1vTW8nXKTOp7Ja08pcEiHS3wl2RSXFCDvlj2ubeXZFdXLdkuc378/X6llu316no6yxXnhIwKyWJYn3u4uVSfffaZ/bE7Rkr/2uuvwlqTTpbV6up/rdP8ERaNijb0/O6mc/XUiBJ9/rnvvJ+ekmA0vQb/Wvq1To3v3vnDYkir91u/G2EBho5tW6MTTuQlTIz10+YCPxVXmfWnN5ZremLTut441LTmcSHFzd5vuEdckL9yq006kl+hjz/9TCt2Wt/rAJOhwn0b9NnBlvtUVVV16thOBWeioqJUVFSk7OzsZvd/8803kqSJEycqPLz1SHVISPdqsWVkZOjXv/61li9f3u4xTKbmqbWGYbS474c6GvPggw/qnnvusd8uKytTWlqaZs+erbi4uDb3A9D7mc1mrVixQnPnzlVgILU+gb6M8wEAG84H6Isqa+t134aVkgwlRQbrpz+a2+H/F/+h17PXa0dmmU5Wm3TGrDmKCet8WTRf1VvOB7uXH5AOHZMkXTpzimZ1sp+QOyWdKNF/X9pgvRGbrosuGuvdBfViR/IrZfl+rSRp0pAkXXTRRC+vqIlhGHrp0CrlV9TpRFWg5p4/W4H+TnVzcCtXnhPe25Il7dwtSTp38ihdNGNQs8cvq6jV7W9s0/bMUtU2mPTS/gDdODVNh8pOSJKGxIfpNzfM8HqZQm+IPFSoL5dsliTVRA3URReN6dZxdmSWqmr995Kks0cm6ZKLnftupGWV6sp/Wo+3qTxSj94yXSaTSSVVZj2wcZUkiyJDAvTg9ec2y6qBe3xetl3Ldp9Ug2FS+LDTVLR+qyTp9MFxmnfplFb3sVXc6ohTwZlx48bp22+/1dKlS3X55ZdLsmbR2PrNzJ49u8U+WVlZklpmtnTW5s2blZeX16zPTUNDg7799lu98MIL2r9/vyRrdkxycrJ9TF5enn3OpKQk1dXVqbi4uFn2TF5enqZPn97m3MHBwQoObpneFhgY2KP/4wqA63A+AGDD+QCADecD9CUbDhTK3GD9he+5oxMVFNT1wMppg+K0I9N6UWNndoXOHd296we+qKefDw7lN/0SeExqjE88lwlpsfIzWX+5vju73CfW1FsdK25qTj4sMdLnXuszhsbro+3Zqqxr0L68Kp2aHtPxTl7minOC4/dybCvfy6SYQL318zP067e2avmek6q3GHr1uxP2x/9n1jCFBPf8IHh3TBuaYC9ZteFYcbffi/XHSuzbZ41IcPo9PXVQvE4bFKONx4p1OL9S646WaNbI/vpwR4Zq6y2SpKsmD1BkePcb0aPzRiRGaNnuk5Kk19c3ZYyePbJ/m+91Zz8DToWQr7jiChmGoddff12/+c1v9Mknn+j666+3lw+75pprWuyzadMmSVJ6enq35jz33HO1c+dObdu2zf43ZcoU3XDDDdq2bZuGDBmipKSkZmmBdXV1WrVqlT3wMnnyZAUGBjYbk5OTo127drUbnAEAAAAAAG1b6VCH/ZxR/bt1jMkDmy6objpe7PSa4Dr7c63l7fsFByglyjcuCoYG+Wt4f2sPjQMny1VjbuhgD3TXobwK+7Zjk2xfccbQpqo237mgwXtPsS+36Rf6o5IjWh0TGuSvf9w4Wbf+IKsmNTpU8yalunN5Pi00yF+npEVLko4WVCqntLpbx1l7qMC+fdYw12QU3nbmYPv24jVHZRjWHjQ2N0wd6JJ50LFhiU3fqzWO7/XweKeP7VRw5vbbb9fo0aNlGIaefvppXX755XrvvfckSZdeeqmmTGmZ1rN06VKZTKYWvWg6KyIiQuPGjWv2Fx4erri4OI0bN04mk0kLFizQ448/rqVLl2rXrl265ZZbFBYWpuuvv16StRzbbbfdpnvvvVdfffWVtm7dqhtvvFHjx4/XnDlzuv16AAAAAADQVxmGoZX78iVJwQF+mj60exctpjgEZzYTnPEZFbX1yiqxXrgckdivy+Xq3Gn8gChJUr3F0L7GABJc77BjcCbBB4MzQ/pecMYwDO3NsX7m4/sFK75f2w3t/f1MevjSsfrDJWNk+/rede4wny7/5gnOfm6q6xq06Zj136q02FClx7mmL/ncMUlKiw2VJK0+WKAl647pSEGlJGnakFifDJD2Vq2d7+L7BWl0UqTTx3bq2xccHKyvvvpKV155pQICAmQYhgIDA/WTn/xEr7/+eovx3377rfbs2SNJmjt3rjNTt+uBBx7QggULdMcdd2jKlCnKysrS8uXLFRHRFOV69tlnNW/ePF1zzTWaMWOGwsLC9PHHH8vfnzp9AAAAAAB01Z6cMuWWWcsenTE0rtt18PtHhtgvSG3PKFFdYwkXeNfBk01Bj5FJrf8631vGp0bZt3dmlnhvIb3c4XxrcMZkkoYktN5j2psGxoUpuTGja9PxItXW9/4sqvyKWhVV1kmSRreRNfND888crM/uOktv/HSqrpmS5s7l9QhnOPyQoDvBmY3HilTXYP136sxhzmdS2Pj7mXTL9KbsmT9+sse+feM0smY8aUhCuH74e4Qzh8XLzwV9mpzqOSNZ+7e89957qq2tVVFRkeLi4tqsKZuWlqaVK1dKkk477TRnp7b75ptvmt02mUxauHChFi5c2OY+ISEhev755/X888+7bB0AAAAAAPRVK/c5X9LMZnJ6jDKKqlVbb9Hu7FJN6gG9I3q7Aw7BGVsZMV9hy5yRpJ1ZpV5cSe9lGIYO51t/tT8gJlQhgb7342aTyaQzhsbp/S1ZqjFbtD2jVKcPjvX2stxqX45D0DSx89/L0cnO/+K/t5iUHq2gAD/V1Vv03ZGuB2ccS5rNcGFwRpKumTJAz644oIraelms7dwU3y9Y541Jcuk8aF9IoL/SY8N0vLCpv9NZw11Tvs5leWvBwcFKTk5ut9nf4MGDNXPmTM2cOdOn0l8BAAAAAIBzvnIIzswe6WRwZlDTBVVKm/mG/blNJa18LXNmTHKk/Bt/wbwjk+CMO+SW1aiitl6Sb5Y0s+lrpc32O5TxG0XApVtCAv01ufEHAJnF1cooqupgj+ZsPUhMJnW7nGdbIkICW2Q3XXdamoIC+nYpOm/44XnPFf1mJBcGZwAAAAAAQN9UWFGrbRklkqz9SNJinau5T98Z3+OYOTOiC7/Q94SQQH8Nb+y/cDCvQjXm3l/OytMOOfab8eFeF2cMbQrOrDtc0M7I3mFvbpl9e5SPBU17EsfPTVeyZ4oq67Q72/oejE2JVGx420kL3XXrjEGyVc8ymaTrTqcUnTc4nvdGJkaof2SIS47rdHCmqqpKVVVtRxSff/55nXXWWRo9erQuuugiffLJJ85OCQAAAAAAfMiqA/kyGkuuzHaypJlkvfgfEWytxL7peLEM28HhNbbgTGx4kOL7uf4CpLNsfWcaLIb25pR1MBpdddghODPUhzNnBsSE2XtWbT1R0usDdbayZv5+Jp8Omvm6ZsGZLmRcubOkmU1abJhunzlUkvSzs4ZoQIxzP35A9zh+v1yVNSM5GZz5+OOPFRERoZSUFJWXl7d4fP78+VqwYIHWrVun/fv364svvtDll1+up556yplpAQAAAACAD/nasd+MkyXNJOuFxlPSoyVJ+eW1yiyudvqY6L7iyjrllddKsmZG+WKp+gn0nXGrQ/k9I3NGaiptVtdg0ZZenHlnbrDYM5qGxIf7ZB+gnmLigGiFNr5+3x0u7PQPAhyDM2e6KTgjSQ+cP1IH/3ShHrxwlNvmQPvOG5ukIfHhiu8XrJ+cMdBlx3UqOPPFF1/IMAzNmzdPERHNU+fWrFmjV199VZIUFhamSZMmKSQkRIZh6He/+512797tzNQAAAAAAMAHmBssWnUgX5IUGRKgyQ4lyZwxZWBT35lNx4tcckx0jy+XNLMZl+oQnKHvjMv1lLJmUvO+H91p8N5THC2oVF2DRZLv9YHqaYIC/DRlkPXfrtyyGh0r7LjvjGEYWn2wwL7/aQ690lzNZDIp0N/PJwPjfUVUaKC+vGemNvz2XA2MC3fZcZ0Kzqxfv14mk0mzZ89u8diLL74oSUpJSdHevXu1efNm7du3T2lpaWpoaNC//vUvZ6YGAAAAAAA+YPPxYpXXWBuFzxzZXwH+rmlv6xjk2XSs9/76vSfoCcGZ0cmR8m9szEDmjOsdzq+UJMX3C1J0mO+VtXPU3RJVPY1j+b7RyZFeXEnv0NXPzYmiKmWVWLM6TxsUQ+ZSH+DnZ5Kfn2sDZE79F1NenjVtefjw4S0eW7ZsmUwmk+68804NGDBAkpSWlqY777xThmFo1apVzkwNAAAAAAB8wErHkmajElx23FPSo+1NkDf34tJEPcF+h+CMr/5CPyTQ3x44OphXoeq63t1rxJNKq83KbyxrN8SH+83YJEaGaEi89Zft2zJKVFlb7+UVucf+3Kbv5Sgf/V72JLZyeFLnMq5sWTOS+/rNoPdzKjiTn29NW+7Xr/mJec+ePSoosH5AL7vssmaPTZkyRZJ07NgxZ6YGAAAAAAA+wNZvxmSSZo5wvt+MTb/gAPuvwfefLFdZjdllx0bXHDjZVNJqRH/fvQg8PtX6eWmwGNrjkFUA5/SkkmY20xqzIOothjb10uDuPsfgDJkzThufGqV+wQGSOtd3xlP9ZtC7ORWc8fe3pmsVFTWv/bp69WpJUkJCgkaNat6oKCbGmpZcU1PjzNQAAAAAAMDLMoqqdLDxwu2ktGjFhru23JGttJlhSNtOlLj02OgcwzDsZc0SI4MVFRbo5RW1bfyAaPv2Lkqbucxhx+BMD8ickX6QBdFLS5vtawxARoQEKCUqxMur6fkC/P10WmPfmYKK2mZByR9qsBha9//Zu+/wqOqsD+DfO5OZ9N57IyFAQihBpIM0ARUrtrXrrn1XXdvuvru6rmXXtbvr6q5iQeyoqIDSWwDpSYCE9N57z2Tmvn/cyc1ESsr0yffzPD7OTG45MJNL8jv3nKP/XHm7qjAhzPuc2xKdj1HJmfDwcADAsWPHBrz+ww8/QBAEzJkz54x9mpulfxwDAphRJCIiIiIiIrJn2wa0NDNd1UyfAXNnHPTud1tX29qNpg6paslW5830SQnvXyDNKGNyxlTya+2wcmaYLarsTXOHBhXN0o3v40K8OCjeRGbG969Xn+9zc6KiGc2dGv0+/vK8K6LhMio5M2fOHIiiiDfffFNuY3bw4EFs2rQJALB06dIz9jl16hQAICQkxJhTExEREREREZGVDUzOBJv8+GkxfvLjw8UN59mSzGXAvBkbT84khXjCSb9IysoZ0zGsIIi3k+RMoKczEoOlWDPLmhyuLWJ2VX/bPludA2WPZsQPreLKcN7M7AQWINDIGZWcuffee6FQKFBYWIi4uDikpaVh3rx56O3tha+vL6699toz9tm2bRsEQcCkSZOMOTURERERERERWVFHT698Z3GotwvGhZp+gTDM2wUhXlK7nmMlTejV6kx+Djo/w6HjiTa+COyiUsrVPbk1rejoccxB8JaWp6+ccVMr7ap9Vl9rM50IHCx0rOTuwHkztv19aU/GhXrBy0WaO7O/oB463dnnznDeDJmKUcmZKVOm4MUXX4QgCGhra8ORI0fQ1dUFlUqF//73v/D0HHhxaG5uxg8//AAAWLx4sTGnJiIiIiIiIiIrSs+rR0+vlCyZPzbILG11BEHAVP0MgPYe7YAFSbKM3Or+qglbr5wBgIkRUmsznQicqmwZZGsaTJdGi9KGDgBAfKCHXbXPGmoVhD0akJwJ8bJiJI5FqRAwXZ/Ua+zQDKgc7NPZo8WhIqnNZoSvK6L83CwaIzkWJ2MP8NBDD2HRokX48ssvUVVVhdDQUFx//fUYO3bsGdvu2LED06ZNAwAsWrTI2FMTERERERERkZVsyzHvvJk+adG++CGjEgBwuLgRyeEcvGxJhouT9jBvJDncGzhYCkCaOzM12m+QPeh8iurb0Vc8YA/vv6Hpsf4QBEAUHW/uDNuamc+MOH9sPlkNAEjPr8e40IHJr0PFDejRV3HOHhNgVwlLsj1GJ2cAICUlBSkpKYNut3LlSqxcudIUpyQiIiIiIiIiKxFFEdv182bUTgrMGuM/yB4jlxZtOHemEbfMjDHbuWggnU5Erj45E+nnCndnkywjmVVf5QwAZHLujNEGzJsJdLdiJMPn665GUogXTlW24GRlC5o6euDjprZ2WEbT6US53WCUnxs87OD70p7MHDOw4uqO2bEDvr7HYN7MLLY0IyMZ1daMiIiIiIiIiEafU5WtqGzuAiDdZeymNt/iYFKoJ1xVSgBScoYsp7ypE+09WgD20dIMkKoIVErpTvbMMiZnjGWYnLG3yhkAmKlvbSaKwAEHmTtT2tiBDv33ZRKrZkwuMcgTfu5SEu9AYT20v5g7syePyRkyHSZniIiIiIiIiGhYtluopRkAqJQKTIr0ASAlCyqbO816PuqXW9Pf0izRTpIzzk5Kuc1Tfm0bOnp6rRyRfcuvbZcf22NyZkac482dOVVpOG/GPr4v7YlCIeDCOKlis7WrFycr+lvINbT34IT++YQwLzmJQzRSJr+1paioCHV1dejs7IQoiufddu7cuaY+PRERERERERGZ2c6cWvmxuZMzAJAW4yvPjDhc3IhLJrqa/ZwE5FT1V03YS3IGAFLCvZFV3gKdCJysaEFaDOfOjFRf5YxSISDKz77amgHABXF+UAiATnSc5IzhvJmkX8xDIdOYEeePDZlVAIB9BXVI0bdLTM/vr5qZzaoZMgGTJGdycnLw3HPPYf369WhpaRl8BwCCIKC3l3cvEBEREREREdmTXq0OGeVNAKQ5JJF+bmY/55RoX/nxoaJGXDIxzOznJOB0tf1VzgBASrgPPkEpACCjrJnJmRHS6kQU1ErJmWh/N6id7K8Bj5eLCsnh3sgoa0ZOdSvq27rh7+Fs7bCM0jdvBmDljLnMiO+vuErPr8ev58YD4LwZMj2jr6rffPMNpkyZgjVr1qC5uRmiKA75PyIiIiIiIiKyL3m1bejS6AAAEyN8LHLOKVG+EKQxIpw7Y0F9i8BKhYA4OxoGnxLuLT/OKufcmZEqb+xEd6/0vT4m0P5amvUxbG22v8D+585k678vXVQKRPvbz/elPYkP9ECgp5TEO1jYAI1WB1EUsVufnFErFZjGpC+ZgFHJmdLSUvzqV79CZ2cnwsLC8Oqrr+Kdd94BIFXGbN26FV9++SWeeOIJhIVJd7XMnj0bW7ZswbZt24yPnoiIiIiIiIgsKqO0f7F7osEiuDl5u6qQGCTdIX6ysoVzRCxAqxORp6+aiPF3g4tKaeWIhi4xxAMqpZTNy2ByZsTya/vb2tnjvJk+hlUQ+wrqzrOl7evo6UVRvTQHaGywJ5QKwcoROSZBEOSkXnuPFpnlzShp6EB5kzTzLC3GF65q+7kmku0yKjnz+uuvo6OjA56enjhw4AAefPBBzJgxQ/76ggULcOWVV+K5555Dbm4urrvuOuzduxfvvvsu5s2bZ3TwRERERERERGRZfS3NAMtVzgD9rc20OhHHSpvOvzEZrbi+HT36qomxdtY6ydlJiaQQaRZHfm0b2ruZzBuJvnkzgFRJYK+mxfjBSZ/EsPe5M6er29DXjKjvM07mMSCpl1+PPXlsaUamZ1RyZsuWLRAEAffee69cGXMurq6uWLNmDSZPnoxPP/0UX331lTGnJiIiIiIiIiIryCjrr0RIDrfc4mCawdyZw0VsbWZuhvNmEoLsKzkDAMn6qi5RBE5UDG0+Mg1kmJyx58oZd2cnTNQPdM+vbUd1S5eVIxq57Mr+z7K9JU3tzcB2ePXYa5Ccmc3kDJmIUcmZoqIiAMDMmTPl1wShv5yut3fgnQkKhQIPPvggRFHEe++9Z8ypiYiIiIiIiMjCunu1OKVfHIwLdIeni8pi506LMUjOlDA5Y245Vf0L8/a4CNy3GA8AmWxtNiJ5Bm3N4u04OQMMrILYX2C/1TN982YAICnU/r4v7Um0vxtCvV0AAAeLGrA3T/rceLk4yclfImMZlZxpb5d6HEZGRsqvubm5yY+bm8/8x2/ChAkAgOPHjxtzaiIiIiIiIiKysNNVbdBopZ46qRZsaQYAUX5uCPBQAwCOFDdCpxMtev7RxrByJjHY/haBUwwWTzPLmqwXiJ0SRVGunAn1doGHs5OVIzLOjLj+Sgd7bm2WXdVfOcO2ZuZlOHemS6NDc6cGADAzPoCzfshkjErOeHtL/9B1dfWXA/r792ei8/Pzz9inpUW6iNTV2fcALiIiIiIiIhpcd6/W2iGQCR03WOROsfCdw4IgYKq+tVlLVy9yDVoukenl6JMzaqUCMf5ug2xtexKDPaFWSsterJwZvvr2Hnkx2p7nzfSZGu0LlVI/d8ZOK2dEUZQrZ4K9nOHnrrZyRI7PsOKqz+wEtjQj0zEqOTN27FgAQEFBgfyap6cnoqOjAQA//fTTGfts2bIFAODj42PMqYmIiIiIiMiGiaKIBz85igl//hHv7y20djhkIpkG82ZSIy3f1iUt2k9+fLiYrc3MpbtXi6I6qVtKXKA7nJRGLR9ZhdpJIbd9KqhrR1t37yB7kCFHmTfTx1WtxOQoKblbXN+B8qZOK0c0fNUt3WjqkBJmrJqxjLMmZzhvhkzIqH9dZ8yYAQDYv3//gNcvueQSiKKIF198Edu2bZNf//LLL/Hqq69CEATMmjXLmFMTERERERGRDStr7MT64xXo1Yl45odTAxb1yX71Vc4oBGB8qOWTM1Oi++fOHCpusPj5R4vCunb06tvG2eO8mT591V2iCJxw4OqZLo0WVc2mHXKfZfD3Ze/zZvoYDni3x9ZmpwxbmnHejEVE+Loh0s9Vfh7u44poO6wkJNtlVHJm+fLlEEUR69atg1bbX6r+6KOPws3NDW1tbVi8eDECAwPh5eWFa6+9Fp2dnVAoFHj00UeNDp6IiIiIiIhsk+HCuVYn4pEvjrHFmZ3r7NHKrcQSgz3hqlZaPIbkcC+onaSlDFbOmE9OlX3Pm+kzYO6MgyZnOnp6sfTVXZj5wla8t8c0VYrF9e14bUuu/NzSLQzNxbAKwh6TM9mV/d+XSXacNLU3hkm92WMCIAicN0OmY1RyZv78+fjLX/6C2267DeXl5fLrUVFR+OKLL+Dt7Q1RFFFfX4+2tjaIoghnZ2f897//xYUXXmh08ERERERERGSbDhYNXDg/Xd2GVw0W+8j+nKxsgVZfTTExwjqLtc5OSqTqz11c34Ha1m6rxOHoTlf3LwKPtefkTITjJ2d259ahuL4DOhF45oeT2J5dY9TxujRa3LPmCFr1beBWTAyVv+fs3eQoHzjrk7v7C+ohiqJV4ujV6nCwqBFdw7xfIduwcoZtzSxm0bhg+fGSCcHn2ZJo+JyM2VkQBPzlL38569eWLVuGvLw8fPHFFzhx4gR6e3uRkJCAVatWITw83JjTEhERERERkY07rE/OKBUClIKAHq0Ob+/Mx+LxwZgS5TvI3mSLMvQtzQAgJcLHanFMifaVk3+HixtxcXKI1WJxVKer++eN2HNbs8RgT6idFOjp1TlsciY9r05+LIrAg58cxdf3zRrxnJinvzuBk5VSEiAu0B1/v2qiw1QKODspMTXaF+n59Shv6kRpQyeirNCi6sl1mfjicBlCXJVYfrEWKpVqSPv1VbQ5KQTEBzpGqzl7sHh8MP5x9UQoBAEXJQVZOxxyMGad6Obn54ff/OY3eP311/Hvf/8bDz30EBMzREREREREDq65Q4Mc/Z33E8K88NtFCQAAnQj8/ovj6NKwvZk9MpwbZM076dOi/eTHhzl3xiz6KmdcVUqE+7gOsrXtUikVGBcqVRgU1LajtUtj5YhMb+8v2nO1dvfirg8Poblj+H/Wrw6X4ZOfSwEALioF3rpxKjycjbqv2+bMNGxtVlB3ni3N41RlC744XAYAqOoU8Mb2/CHt19OrQ56+reSYIA+5vSOZnyAIWJUWiaunRjhMopJsx7C/k6urq/HYY48hJSUFXl5ecHd3R0JCAn7961/j1KlT5oiRiIiIiIiI7Mjhkv4F87RoP/xmbhxSI30ASAuk//wxx0qRkTEy9JUHKqVg1WqKqdH9lVecO2N6HT29KGnoAAAkBntAobDvxciU8P72TycqWs6zpf2paemSF+zHh3rJc0gK69rxwKdH0avVDflY2VUt+OM3mfLzZy9PseuqqXO50GB+yM+Flr9+vLFtYHvPd/cW4+QQPpf5tW3o1beV5LwZIscxrOTM/v37MWHCBLz00ks4efIk2tra0NnZiYKCArz77ruYNGkS1q5da65YiYiIiIiIyA4YzpuZFuMLJ6UCL10zUb7T9929hfi5kBUP9qStuxf5tdIicFKIF5ydlFaLxc9djbgAdwBAVnnLqK7E2pZdjQ/Si9Cunw9iCnk1begbxZFox/Nm+kwM95EfG1Z/OYJ0g6qZBUmB+O/NafBzVwMAdp2uxQsbs4d0nLbuXtz78RF0aaRkznXTInHV1AjTB2wDUiK85X+LDlm48i67qgUbMqsAAH0FGFqdiCfXZcjzvM63b5+kUM6bIXIUQ07OtLS04Oqrr0ZDQwNEUYQoivD390dwsDQISRRFaDQa3HHHHaygISIiIiIiGsUOFfUveE2NkaocxgR54vdLEgFIcxEe/fI4OnpMt6BM5pVV3iwv2E+0geHgfdUzPVrHnSUymLyaVtzxwSH8Zf0JLH99t8mqiLL1cy0A+5430yc5vP/z6miflb0G82ZmxQcg0s8Nb904BU76aqf/7SnEl/oWWuciiiIe/yoDBbXtAKQKnKcum2C+oK3M2Ukpt2Usru9ATUuXxc79xtY8+fEjixIQ7CpdVI+XNeP99KLz7ptd6Vjfl0QkGXJy5r333kNFRQUEQcDll1+OvLw81NbWorKyEpWVlXjggQcAAD09PXjppZfMFjARERERERHZri6NFsdLpQXQaH83BHm6yF+7Y3Yc0vSL6sX1Hfj7EO/qJuvLKGuSH9tCciYtpr+12aGi0dnaLD2/Xk6YFdd34Jr/pOPln3KgGUYrK0NanYj39xbime9Oyq8lOEDlTEJw/3wOR0rOiKIoV844OykwRX9tnR7nj6dX9idX/rAuE0dKzv098uG+YvyQUQkA8HRxwlu/mgIXlfUq4ywhLaZ/btUhC7VGzKlqxQ+Z0t9zgIczbpkRhevi+qv+XvopB2WNHefc3zBpOi6ElTNEjmLIyZkNGzYAAC688EJ89dVXiIuLk78WFBSE1157DbfddhtEUZS3JSIiIiIiotElq7wZPfrFYcPB7QCgVAh48ZpUuKikX0U/2FeM9HzLD2Sm4cswaAeVYtAmylqmGny2Dlu4NZGt6EuC9tGJwOvb8nDVW+lyC7qhOlbahMve3IOnvjuJVn2LtBAvlwHzfeyVSqnAeH0bqMK6drR0aawckWmUNHSgvKkTgJSsNEyo3Dg9Gr+6MAqAVF32m48Oo6r5zAqRY6VN+NsP/cm4F69ORbS/u5kjt75pVkjuvr61f9bM3fPi4KJSIs4LuOECqX1cR48Wf/omC6J49vZmfW3NfNxUCPZyNn/ARGQRQ07OZGVlQRAE3HfffRCEsw+D++1vfwsAqK6uRn19/Vm3ISIiIiIiIsf1y3kzvxQb4I7HL06Snz/2ZQbaTDgvg8yjr+LA2UmBxGAPK0cDxAe6w9dNBUC68103yLwGR3RcX82kUgp4cGEClPpWVhllzVjx+m58tK/onAu9fZo7NPjD15m44t97ccJgKPmqtAhs+O0ceDg7mS1+S0oxaG2W5SDVM3vz+tfdZsYHnPH1v1w6ARfGSUnM2tZu/PqjQwPmMzW29+C+j49Ao5U+I3fNicXFySFmjto2TI0yrJwxf3I3p6oVG7L6q2ZunB4tf+33ixPkZMuOnFqsP15xxv4N7T2obukGACSFeJ5zXZaI7M+QkzMNDdLFKikp6ZzbjBs3Tn7c2Dg6y4qJiIiIiIhGM8MqBsPWMYZumRGD6bHS18oaO/HcBs4ttWVNHT0orpfa7UwI84KTcshLCWYjCIL8+Wrq0Ay7UsTetXX3yn/mpBAvPLw4EevumYm4AKnqoUujw/99ewK3vX/wrDM1RFHEl4fLcNFLO7D2QIncHi0pxBNf3j0D/7g6VR4s7whSIhwvOWNYdTgz3v+Mr6uUCvz7xqmI8HUFICXtnvgqA6IoQqcT8dDnx/orb6J98djF517vczTebio5yXyiogXtZr5B4PVtufL32N3z4uCq7q9y8nRR4a8rk+Xnf/3uJBrbewbs31c1A0jf70TkOIb8E1VPj3RhcHFxOec2KpXqjO2JiIiIiIhodNDpRLl/v6+bCvGBZ2+Po1AIePHqVLjpF6jWHijBrtO1FouThsdwTsfECB/rBfILA1oTWWhuhK3ILGuWF3tTI731//fBDw/OwU0X9t+VvyOnFktf3YVNWVXya6erW3Ht2/vx+y+Oo16/COyuVuJPK8bh+wdmnzOpas8MK2cMW/TZK51OxD79vBlPZ6cBfz5Dfu5q/PfmNPla+82xCry9qwBv7czHjhzpmuvvrsabN0yBygaSrpbU9znX6kQcK20y23lOV7digzxrRj2gaqbP0gkhuHiCVLVU396DZ39xw0J2pcG8mVD7nwNFRP1G15WXiIiIiIiIzCa/tg1NHdI8h7QYv/O2Xonyd8Mflvd3X3j8qww0dzrGLAhHY7iYPTHi7IvA1mCYRDhYNLrmzvS1NAMGJsxc1Uo8c3kyVt82DYGeUqukxg4N7l5zGI9+cRzPbzyF5a/txs8Gf18rUkKx5ZF5uHNOnE1URZlDQpAHnJ2kP5sjVM7kVLfKibXpcX7nfd/GhXrh5VWT5Od/35SNl37KAQAIAvDadZMR4n3uG7EdlWFy15zXj9e39lfN/GZu/ICqGUNPr5wATxepjeCXh8uwN6+/MsqwcmYsK2eIHIpj/qtLREREREREFmc4byZtCIPEb5wehdljpFkJlc1d+Nv3JwfZg6whY0AiwHaSM8lh3vKCu6WGetsKw/ck9SzVTAvGBuHH383F0gnB8mtfHC7D2zsL0KufzxPt74b3b5uGf904BaHeruYO2aqclAqMD5MWtYvqO+w+EZyef/55M790cXIIHlqUCAAQRaBvRNPvFiZidsLg+zuitGiDuTNmun7kVrfiB33VjL+7GjdeGHXObYO9XPDEsv7Wcn/4OlOeEZRTJVXOCAJsYuYXEZnOsCe7/elPf4KPj4/R2wmCgHfffXe4pyciIiIiIiIbdaho8HkzhgRBwN+vnoiLX9mF1u5efHG4DJekhmFeYqA5w6RhytRXzrirlYgLsJ2FQbWTAqmRPvi5sAElDR2obulCsNfoqAA4Xiq9J25qJcYEnf098XNX4z+/moovD5fh6e9Ook0/V0PtpMC98+Nx97x4uKjOfhe/I5oY7o2jJU0AgBPlzZg5xn6TEukGVRWzhvjneOCiMciuasFGfYu7uYmBeOCiMWaJzx5E+LoixMsFVS1dOFLSiF6tzuSVY69vy+uvmpkXBzf1+Zdhr58WhW+PVuDnogYU13fg1S25eHTpWORUS8mZGH/3QY9BRPZl2N/R33777Xm/3le2Pth2AJicISIiIiIiciAHi6XkjLOTAsnhQ2u9Eu7jiv+7ZDwe+yoDAPBBehGTMzaktrUbFc3SQPnkcG8oFOduVWcN02J88XOh9Lk7VNSIFRNDrRyR+dW1dcuD3JPDvaE8z3siCAKuSYvEhXH+ePHHHCgVAh5cmIDYgLPPg3JkKRE+AIoBAD8XNdhtcqZXq8MB/Wc+wEM95EoKhULAS6tS4eOmQpdGhz9fMt7mvp8tSRAEpMX44vuMSnT0aHGqshUpJqwMzK1uxfcZFQCkqplfXXjmrJlfUigEPHdlCpa/ths9Wh3+u7sAyeFe6NLoAABJIZw3Q+RohpUSFkXRZP8RERERERGR46hu6UJpg7RgnBrpA2enod+Rf/XUCITqZx7szq1FS5d9txxyJJnlTfLj1Egfq8VxLqNx7szAlmZDW0yO9HPD69dPxivXThqViRkAmBHvLz/ek1t3ni1tW0Z5s1wFNSM+4LyzvX7JTe2E56+ciFeunQRfd7W5QrQb0wyuH4eKTXv9eMOgaubXcwevmukzJsgD9y2QKpq0OhGPfZkhfy2J82aIHM6QK2cKCwvNGQcRERERERHZMcOe/YaDlodCoRCwdEII3k8vgkYrYtupGlw+OdzUIdIIZJT1D09PCbedeTN9pkT5QhCkORqmXly1VX0tzQDbTJjZqnAfV8QHuiO/th1HS5vQ2qWBp4vK2mEN24CWZgYJJxq+NIN/qw4VNeK2WbEmOW5eTSu+01fN+LmrcdOMwatmDN0zPx7fZ1Qgt6YNHT1a+fWkUFbOEDmaISdnoqOHdyEhIiIiIiKi0ePgMOfN/NLylFC8n14EANiQWcnkjI0wTM5MNGHLH1PxdlVhbLAnsqtacbKiBW3dvfBwduyZDMcHVM74WC0OezQnIRD5te3Q6kTsy6/Hkgkh1g5p2Pbm1cuPZ8bbZ2s2W5EU4gUPZye0dffiYFEDRFEcViXSuYy0aqaP2kmBF65KwdX/2QfD5kNsa0bkeEw76YqIiIiIiIhGpb6qBUGQqhmGa2q0LwI9nQEAO07Xym17yHpEUZSTM96uKkT5uVk5orPra02kE4GjJY2DbG3fDN8TXzcVInxdrRyRfZmT0J/M2G2Hrc26NFoc1n/GI3xdEeVvm9+T9kKpEDA5ygcAUNPaLbfmNEZeTRvWHzeomhnCrJmzmRrth19N79/XTa1EpC/fbyJHw+QMERERERERGaWtuxcnK1oAAGODPeHtOvxWQUqFgIv1d7H39OqwPbvGpDHS8FU2d6GurRuAVDVjijvKzeGXrYkcWVljJxraewBILc1s9T2xVRfG+UOllP7OdufWWvz8xfXt2JhZiaaOnhHtf7i4ET290nD4WayaMYlpJp5b9ea2XLna5a45cXA3opLvsYvHItxHSsDOTQiEQsHvdyJH49i1vkRERERERGR2R0saodMvRqUNc96MoWUpIfhofzEAYGNWJS5NDTNFeDRCtt7SrE+aGYd62xrDlmYT2dJs2NydnTAlyhcHChtQVN+BkvoOs1ef1LR04buMSqw/XoHjpU0AgPGhXlh//yw4KYd3z/Reg3kzM8dw3owpDEjuFjfgqqkRIz5Wfm1/1Yyvmwo3D3PWzC95uqiw/v5Z2JFTiwVJQUYdi4hsE5MzREREREREZJSDBtUK00Ywb6bPBTF+8HNXo6G9B9uza9HZo4WrWmmKEGkEMsub5Mcp4T5Wi2Mw4T6uCPN2QUVzF46WNEGj1UE1zEVve2GYMEu14YSZLZubGIgDhVISb3deLW70N/2M5eYODTZmSQmZfQX1A+aGAMDJyhZ8c6wCVw8zEZCe3z9vZkY8kzOmMCnSB04KAb06ccC/ZSPx5rY8+UaFX8+NN6pqpo+/h7NRCSMism2O+dMKERERERERWcwhg1YwaUYkZ5yUCiydEAwA6NRosfM0W5tZk71UzgD9n7uOHi1OVbZYORrz6au8AFg5M1ID5s6cNt3cmc4eLb47XoE7PziEtGc344l1mUjPH5iYGRPkIT9+dctpuUXZULR0aZChr5xKDPZAkKeLqUIf1dzUTpgQLl3f8mra0Ng+spZz+bVt+PZYOQDTVM0Q0ejA5AwRERERERGNmEarw9GSJgBAmLeL3B9/pJYlh8qPN2RWGXUsGjnDwfMBHs4I9bbtheBpBq2JjL373VZpdSIyy6X3JNzHFYGezlaOyD5NCPOGr5s0Fys9vw692qEnSM6mqrkLD312DFP/thkPfHIUW05VQ6Ptz8jE+LvhwYvGYMvDc7Hl4XlycqissROfHSwZ8nkOFDTIVRkzOW/GpKZF918/DheP7Prx9s58+f25a65xs2aIaPRgcoaIiIiIiIhG7FRlCzo1WgDGVc30mRHvD29XaeF066lqdOmPTZZV0tCB5k4NAKlqxtYHzw+YO2OCod62KL+2DR090veDrVcy2TKlQsCsMVJyo6WrFxnlzYPscX5//jYLXx8tl98bAAjydMYds2Px7X2zsP338/HwkrEYE+QJAPj9krHydm9sy0Nnz9Cucen5BvNm2NLMpAznzhwcwdyqxvYefHNMmjXj6eKEm2fEmCo0InJwTM4QERERERHRiA2cN+N7ni2HRqVUYMl4qbVZe48Wu3NN13aIhs6wpVlKuO0nAhKDPeHpIt2pfrCoEeIvh3w4AMOWZqmRPlaLwxGYqrVZXVs3tmVL7RddVUpcNy0Sa++ajn1PLsT/XTIeqZE+ZyQ2UyN95GtcTWs3PtpfNKRzpedJ82YUAjA9jskZU5oabZjcHX7lzKcHS+UWdavSIuHBqhkiGiImZ4iIiIiIiGjETDVvxtCylBD58casSpMck4Yn06CaIDXS9pMzSoWAKVFScrCurRvF9R1Wjsj0juvnjQCsnDHW7IRA+fHu3NoRH2f9sQr06ntZ3TIzBi9cNREz4wOgVJy/0uyRJWPRl7P59458tHZpzrt9bWs3cqpbAUjJ0r7qQjKNQE9nxAa4AwAyypqGVbGp1YlYs78YACAIwE0XctYMEQ0dkzNEREREREQ0IqIoypUzns5OSAz2NMlxZ40JgKf+zuPNJ6uHNTSbTMOwSiMl3MdqcQzHwLkzjtfarK+aSRDso5rJloX7uCI+UFqMP1rahJZBkiPnsu5omfz4qinhQ95vbIgnVqaGAQCaOjR4d0/hebffV1AvP545hvNmzCFNP3dGoxUHVA4OZsupapQ3dQIA5icGIkaf5CEiGgqjkjP79+83VRxERERERERkZ4rrO1DX1g0AmBLtO+jd4kPl7KTEIn3bn9auXuzNZ2szS9LpRGTpK2fCvF3sZvC8YeXWSId626ruXi1OVbYAAOIDPeDpwsoJY83RV89odSL25dcPsvWZcqpakVUuvScTI7yRMMzk9O8WJcrXzP/tLkRje885t03P678GzopncsYcphlcP4aT3P0gvUh+fMvMGBNGRESjgVHJmZkzZ2LChAl46aWXUFNTY6qYiIiIiIiIyA4YLmCZYt6MoWXJBq3NMtnazJIK6trRrh9SnmJH7bNSI3ygUkqL3Y5WOXOqshUardQ+iy3NTGNuosHcmRG0NvvqiGHVTMSw948JcMeqtEgAQFt3L/6zK/+c2/YlqNVKBaZGm/ZaS5I0g3/DDg3x+pFb3Yp0fWIvNsAdcw3a5RERDYXRbc2ys7Px2GOPITIyEldeeSW+++476HQsOSciIiIiInJ0hoOTTTVvps/cxEC4q5UAgJ9OVkOj5e+ZlpIxYLaJj9XiGC5XtRLJ+nZf+bXtqNdXdTkCw/ck1Y7eE1s2PdZfTubtyR1edV6vVoevj5YDAFRKAZfqW5QN14MLx0DtJC3NfZBehJqWrjO2KW3oQGmD1DZrSrQPXPXXRTKt2AB3+LurAUiVdzr9LKHz+WBfkfz4pgujoTBR9SgRjR5GJWdee+01TJo0CaIoQqPR4Ntvv8Xll1+OiIgIPPnkkzh9+rSp4iQiIiIiIiIbc6hYurtYpRRMvmDsolJiQVIQAGkmw4ECx6qEsGWG8xbsrUpjmoO2Njte2v+epEb6WC8QB+Lu7IQpUVK1RFF9B0rqO4a87568OtS2Ssm/BWOD4Kdf1B+uUG9XeYB8l0aHN7fnnbFNej5bmlmCIAhy9UxLVy9ya9rOu31LlwbrjkgJOje1ElenDb96iojIqOTMAw88gMOHD+PYsWN44IEH4O/vD1EUUVVVhX/84x8YN24cZs+ejdWrV6O9vd1UMRMREREREZGV1bd1I79W+j0vOdzbLHdzL08JlR9vyGJrM0sxrNKwt8HzaQYtnw45UnJG/56olALGhQ5vtgmd29zE/jZUu/OG3tqsb1EeAK6aatyi/D3z4+Gmv35+8nMJShsGJon25vXPw5k5xt+oc9H5pUUPfe7Ml4fK0KFv/3jllHB4cQ4UEY2A0W3NAGDixIl47bXXUF5eji+//BIrVqyAQqGAKIrYt28f7rzzToSGhuLOO+/E3r17TXFKIiIiIiIisiLDqoRpJm5p1mf+2EC4qKRfW3/MqoJ2CG1myDi9Wh1OVEhDzqP93eDjNrKKAGsxnMfhKHNn2rp7kV8r3cWfFOIFZye2tTKVOQkGc2dOD621WUuXBj+eqAIA+LqpsGBskFExBHg44/ZZsQAAjVbEa1tz5a+JoijPNHFXK+2qzaA9GurcGZ1OxIcGLc1umRFjxqiIyJGZJDnTR6VSyXNnSktL8fzzz2Ps2LEQRRFtbW1YvXo15s6di3HjxuHFF19EdXW1KU9PREREREREFmJYlWCuAdVuaid54bO+vQc/FzrGYrsty61pQ3evNN/H3qpmAMDfwxlxge4AgKzyZnTq72y3Z5llzRD1ecnUSPt7T2zZhDBv+LpJFQ978+vQO4TZVhsyKuXvkctSw+SZMca4a24cvFycAADrjpQhT99SK7emDXX62UnT4/yhUpp0GY9+YUKYt3xDwMGic1fe7cqtRZG+Dd7MeH8kBLOajYhGxmxX9ZCQEDz++OM4efIk9u7dizvvvBMeHh4QRRE5OTl44oknEBkZicsvvxybNm0yVxhERERERERkBoZVCWlmSs4AwDKD1mYb2drM7AxbmtnbvJk+0/StiTRaUW4HZs+OD3hPfKwWhyNSKgTMGiNVz7R29eK4wbylczFlS7M+3q4q/GZePABAJwKvbJFmOO/N66/mmRnPlmbmpnZSYJJ+plN5UycqmjrPut0H6UXy41tmxpg/MCJyWBZJuff09KC7uxtarRaCIACQSjN7e3vx3XffYcWKFZg8eTL2799viXCIiIiIiIjICJ09WmSVS4uYcYHu8PdwNtu5LkoKku9M35RVBR1bm5lVhsHitL0mAgxbEx12gLkzhgmzVDt9T2zZ3ASDuTO55587U1LfgZ/1iekxQR4mrS67bVYMAjykNoI/ZFQiq7xZbmkGADPjA861K5mQYZvOs82tKqprx47T0uck3McVi8YFWyw2InI8ZkvOlJSU4JlnnkF8fDwuuugirFmzBh0dHVAoFLjkkkvw2Wef4U9/+hMiIiIgiiKOHz+O+fPn48CBA+YKiYiIiIiIiEzgeFkTNFopSTIt2jzzZvp4ODvJi6c1rd04UmL/i+22rC85IwjAhDAvK0czMoaLq44wd+Z4qfSeuKmVGBPkYeVoHM9sg7kze3LPP3dm3dEy+fFVUyLkG5BNwU3thPsWjJGfv/hjDvYXSMkZP3c1kkLYOssS0gyTM2e5fny0v1huM/irC6OhVJjuM0BEo49JkzNdXV1Yu3YtFi9ejLi4ODz11FMoLCyEKIqIjY3F3/72N5SUlGD9+vW45ppr8Ne//hWFhYVYs2YNAgIC0NPTgz//+c+mDImIiIiIiIhMzHDByrBKwVyWp4TIjzdkVpn9fKNVd68W2VUtAIC4AHd4uqisHNHIRPu7IUBfzXW4uBFaO662qmvrRrm+tVJyuDcXgs0gzMcV8fo5RUdLm9DSpTnrdqIoyi3NBAG4fHKYyWO5YXoUwrxdAAA7T9eitasXADAjzh8KvvcWMSXKB31/1Yd+MXemvbsXnx8qBQA4Oylw3bRIS4dHRA7GJMmZAwcO4O6770ZoaChuuukmbNu2DTqdDmq1Gtdeey02b96MvLw8/OEPf0BoaOiAfRUKBW644Qa8/PLLAIDDhw+bIiQiIiIiIiIyE8NWL4ZVCuaycFwwVEpptWxjViVbm5lJTlWrXBFlz+2zBEHANH3SsLWrF6erW60c0cgNbGlmnzOA7MEcfXWeVidin0ErMUMHixpR0iANgZ89JgCh3q4mj8PZSYkHFyac8frMMZw3YymeLiqMDZGqBrOrWgYk6745Vi4nzFZOCoOvu9oqMRKR4zAqOfPiiy9i/PjxmDlzJv773/+iubkZoihi/PjxeOWVV1BeXo5PPvkECxcuHPRY06ZNAwA0NrJEnYiIiIiIyFZpdaI8xyPAwxnR/m5mP6e3qwqz9UO7K5u7HGLIuy06UNBfEZVi54mAqdH9FV1na01kL/pamgFAqn5QOZne3MT+1mbnmjuz7sjAlmbmctXUCMQGuA94bRbnzVhUX3JXJwJHS5oASJVTH6QXydvcPCPG8oERkcMxKjnz+OOPIycnB6Iows3NDbfffjvS09ORmZmJ3/72t/DzG/odVE5OTsaEQkRERERERBZwurpVvnM4LdrXpDMXzmdZcn8Xhk1ZbG229kAJ/r4pGz8XNhhVSaTVifjxRBWufXsfnt1wSn59oh1XzgC/nDtjvzeBHh9QOeNjtTgc3fRYf7k6b/dZ5s50abT4IaMSAOCuVmLJBPMNgVcpFfjdov7qmTBvF4skwanf2ebO7Cuox+nqNunr0b5IDrfvBDYR2QajMyJpaWm48847cf3118PDY+SD6eLj46HT6YwNh4iIiIiIiMzI0vNm+iweHwzl1wK0OhEbsirxxLIkiyWGbM3O07X4w9eZAIC3duQjzNsFl6aG4dLUMEwI8xrS30tLlwafHyzFB/uKUNrQOeBr8YHuSLHzhcfxYV5wVSnRqdHabeWMKIrIKJMqZ3zdVIjwNX0bLZK4OztharQv9hc0oLi+AyX1HYgySIj8dLIard1SUnp5Sijc1Oa9wfjSiWH45mg5tufU4o45caP2Wmct0wz+bTuov358mF4sv3bLzBhLh0REDsqof02OHz+OlJQUU8VCRERERERENs6wCsES82b6+LqrMTPeH7tz61Da0IkTFS2j9s7lX1YOVTR34e1dBXh7VwHiAt2xMjUcl00KO6M1EgAU1bXj/fQifHGoFO092gFfiwt0x20zY3DV1AionUwyotZqVEoFJkf5ID2/HhXNXShv6kS4j/WSG99nVOLD0woEjG/ArIShVV2UNXaiob0HgNTSjAv05jUnIRD79a39dufV4kb/aPlrXx02aGk21XwtzfooFAL+e3Ma2nu08HJhpxlLC/V2RbiPK8qbOnGstAnF9e346aR03Q3ydMbFySFWjpCIHIVRP20xMUNERERERDR6iKIo30XsqlJifJiXRc9v2NpsQ2alRc9tK0RRxM6cGgCA2kmBBWMD4aToX7QvqG3HK1tOY8E/d+DSN/bgf7sLUNXchfS8Otz5wUEseGkH3k8vGpCYmZMQgNW3TcOWh+bhphkxZq8KsJSztSaytO5eLZ5cl4GHvsjE0XoF7vjwCLLKmwffEQNbmtl7mzl7MCfBYO7M6f7WZtUtXfIcmnAfV1xgoaS0k1IBb1cVk3JW0lc906XR4cl1mejrHnnj9GiolPadvCYi2+EYP3ERERERERGR2RXXd6CyuQuANHDd0gtUSyYE40/fSItkG7Oq8OjSsaNu4fJ0dRsq9O/B9Fg/rL7tAjS092BDZiXWH6/Az4X9SYjM8mZkljfjbz+cOuM4LioFrpwSgdtmxiAh2NNi8VuSYWuiQ0WNWDkp3KLnr2jqxD1rDuN4WX8ypkujw68/PIRv75+NQE/n8+6fYbBfasTorBKzpAlh3vB1U6GxQ4O9+XXo1ergpFTg22Pl8sL8VVPCoVCMrmvOaJUW44dvjlUAANLz6wEAKqWA66dHWjMsInIwQ0rOlJSUmOXkUVFRZjkuERERERERmd6+gnr58Yx4f4ufP8DDGdNj/bGvoB6Fde3IqW5FUohlq3esbYe+agYAFowNAgD4uavxqwuj8asLo1HR1InvMyqw/ngFsspbztg/1NsFN8+IwfUXRMLHTW2xuK1hcpQvFAKgE/vnRlhKel4d7v/kqNyWzNlJAR+VFtWdAiqau3DPmsP4+K7pcHZSnvMYx0ub5MesnDE/pULArDEB+D6jEq1dvThe1owpUT746nC5vM0VU8zf0oxsw9nadi5PCUWQp4sVoiEiRzWk5ExsbKzJTywIAnp7e01+XCIiIiIiIjKPvruHAeskZwBgWUqInCTalFU1CpMztfLj+WMDz/h6mI8rfj03Hr+eG4/82jasP1aB7Tk18HRxwnXTonBxcsioacnj4eyEcaFeOFHRgpzqVjR3auDtqjLrOUVRxDu7CvD3TdlytUWknyvevC4Vxw/swb9y3VHd0o1DxY348zcn8MJVKWet/tLqRGTq25+F+7gOWmVDpjE3IRDfZ0gtE3fn1sLZSYGc6lYAUrXg2eY4kWNKCPKAl4sTWrr61y5vmRljvYCIyCEN6ScyURTN8h8RERERERHZB1EUsU+fnHFXK5ESbp02S4vH9w9T33qq5jxbOp7WLo1cARLl5zboQnF8oAceWpyI9ffPxsd3XohLU8NGTWKmT9/d76IIHClpNOu52rp7cd/aI3h+Y39iZl5iIL67fzbGh3rBWw28dcMkODtJ78Fnh0rxQXrRWY+VX9uGDv1coIlsaWYxsw3nzuTW4asjZfLzq1g1M6ooFAKmRve3RkwJ98bkSB/rBUREDmlIlTOrV682dxxERERERERkw/Jr21DX1g0AmBbrZ7VF/lBvV0wIk6ohMsubUd3ShWCv0dFmZm9ePXr1q/4LxgaOunk7I5EW44v39QmQQ0UNcis4U8uracPdaw4jr6ZNfu3Bi8bgt4sSoVQI0Gg0AKQF3n9cPRG//fQYAOCZH04hIdgTs8YEDDieYUuzVC4IW0yYjyviA92RX9uOY6VNKKiV3k+1kwIrJoZaOTqytBnx/tiur1a8ZWYMr7lEZHJDSs7ccsst5o6DiIiIiIiIbNg+w5ZmcdZpadZn4bhgnKiQ5qlsPVWDG6aPjnmmO0/3VwrNN1OSwdGkRffPjThYZJ7KmU1Zlfj9Fxlo65baH3m6OOGVVZOwyKDKy9DKSeE4VdmK/+zMh1Yn4t6Pj2D9/bMQ7d9fCXW8rEl+zMoZy5qTEIj82nZodSIaO6Sk2uLxwWZviUe258bp0Sisa4efuxpXTg63djhE5IBGVz0zERERERERjUjfnBcAmBkfcJ4tzW/RuP7ExNZT1VaMxHJEUcT2bOkObrWTAhdaOUFmL0K8XRDp5wpAqkbp7tWa7Ni9Wh1e2JiNu9cckRMzY4M98d39s8+ZmOnz6NKxuChJ+hw3d2pw5weH0Nqlkb+eUSbNmxEEWK2F4Gg1N/HM69vVbGk2Krk7O+H5Kyfi0aVJUChYNUNEpsfkDBEREREREZ2XTidif4E068TLxQnjw7ysGk9ymDeCvaQB6Xvy6tDZY7oFd1uVU92KqpYuAMCFcf5wVSutHJH9mKavnunu1SGrvNlkx33xpxz8Z2e+/Pyy1DB8fd9MxAxhaLxSIeC16yZhTJAHACC3pg0PfXYMOp2I7l4tTlVKlWHxgR7wdGHFhiVNj/WHStm/EB/g4Yw5CdZNSBMRkWNicoaIiIiIiIjOK6e6FQ3tPQCAC2L9obTyHcQKhYCLkqTKhO5eHfbk1Vk1HkvYoZ97AEjzZmjoLojtb2229VTNebYcup5eHdYeKAEgJVr+fMl4vHbdJLiph9Q9HgDg6aLCf29Og5eLtM+WUzV4efNpnKpshUYrzRZiSzPLc3d2GjAI/vJJYXCy0owtIiJybEP/qWEQx48fx+7du1FQUIDW1lZotee/c0kQBLz77rumOj0RERERERGZyYB5M/G20U5r0bggfPKztDi+9VQ1Fg/SRsrebc/mvJmRWjguGAohEzoR2JBZiUeXjjV6sPfe/Dq0dkmtzC5LDcPts2NHdJzYAHf868YpuOW9n6ETgTe35w2YN5Ma4WNUnDQyy1NCsb+gAU4KAddOi7R2OERE5KCMTs7k5OTg9ttvx/79+4e8jyiKRiVn3nrrLbz11lsoKioCAEyYMAF//vOfsWzZMvn4Tz/9NN555x00NjZi+vTp+Ne//oUJEybIx+ju7sbvf/97fPLJJ+js7MTChQvx73//GxER7CNKRERERGSs09WtyCxrxsXJIXB3Ntk9YWQlhvNmZtjIrJNZYwLgolKgS6PD1uwa6HSiw84EaO3S4HCxNMw+2t8NsUNom0X9Aj2dcUGsH/YXNKCovgPZVa0YF2pca75NmVXy44uTQ4w61pyEQPxxxXg88/1JAMDu3P5KsNRIH6OOTSNzwwVR8HZVIdzHFQnBntYOh4iIHJRRdZnl5eWYO3cu9u/fD1EUIYoi3N3dERERgaioqHP+Fx0djaioqBGfNyIiAi+88AIOHTqEQ4cO4aKLLsLKlStx4sQJAMA//vEPvPzyy3jzzTdx8OBBhISEYPHixWhtbZWP8bvf/Q5ff/01Pv30U+zZswdtbW245JJLBq34ISIiIiKi82vp0mDV2/vwyBfHcdmbe5BX02btkMgIWp2IA/rkjK+bCkkhtrFQ6aJSYvYYaQ5EbWs3Mk04S8TW7M2rQ69OanO1gFUzI7I8JVR+vDGz0qhjabQ6/HhSSs64qZWYl2h8m7nbZ8XgmqkDbxZVKQWMC7WN77fRxkmpwMpJ4UiL8Rt8YyIiohEyKjnz7LPPorZW6nt75513Ijs7Gy0tLSguLkZhYeGg/43UpZdeiuXLlyMxMRGJiYl49tln4eHhISeJXn31Vfzxj3/ElVdeieTkZHzwwQfo6OjA2rVrAQDNzc1499138dJLL2HRokWYPHky1qxZg8zMTGzZssWYvxIiIiIiolFvZ04tmjo0AID82nasfHOP0YuhZD2nKlvQom/fdGGcv01Vpywa19/KbMupaitGYl7bs/vnzczjvJkRWTqhv7plY1bVebYc3IGCBvkad1FSEFxUSqOOB0it3/92RTKmRPnIryWFeMHZyfhjExERkW0yqr/Apk2bIAgCbr75ZrzzzjumimlYtFotvvjiC7S3t2PGjBkoLCxEVVUVlixZIm/j7OyMefPmIT09Hb/5zW9w+PBhaDSaAduEhYUhOTkZ6enpWLp06VnP1d3dje7ubvl5S0sLAECj0UCj0ZjpT0hE9qDvGsBrARHxekAEbD4xcOGzvUeLez4+grtmx+DhRWNGzWBlR7ke7Mntn3VyQYyPTf155ozpv6t988lqPLggzorRmIcoithxWnoPnJ0USIv0sqn3wF74uSoxNcoHh0uakFvThpPljUgI8hjRsb7PKJcfLxkXOKT3YyjXAwWAN69LxXX/+xklDZ1YmRrC95rIQTnKzwhEdHZD/d42KjlTUVEBALj55puNOcyIZGZmYsaMGejq6oKHhwe+/vprjB8/Hunp6QCA4OCBwyCDg4NRXFwMAKiqqoJarYavr+8Z21RVnfsOmueffx5PP/30Ga9v374dbm5uxv6RiMgBbN682dohEJGN4PWARiutCGw5oQQgwEUpYoKviMN1UjLmv3uKsCOjALcm6uChsm6clmTv14P1pxToa7rQU5qFDfVZ1g3oF6LclShpF5Bd1Yo1X2+An7O1IzKt8nagukX61T3OoxfbNv9o5YjsV5RCwGFIlShvfrMbSyPEYR9DJwLfH5OucSqFiK7CI9hQMvT9h3I9eDABaOwGAhpOYMOGE8OOkYjsh73/jEBEZ9fR0TGk7YxKzvj6+qKmpgY+Pj7GHGZExo4di2PHjqGpqQlfffUVbrnlFuzcuVP+uiAMLLUXRfGM135psG2efPJJPPzww/LzlpYWREZGYsGCBfD3t42hmERkHRqNBps3b8bixYuhUo2i1SYiOgOvBzTaHShsQMf+QwCAi8aF4NVVE/Hh/hK8sOk0enUiclsUeDPXDW9el4qJEd5Wjta8HOF60KvV4Q9HtgPQIsBDjduuWjzo71WWVuCaj9e25QMAhLBkLJ8+8vmmtujtXYUAcgEAV88aj+UXOtafz5ImNXXi65d2AwAKeryxfPnMYR/jQGED2vTXuAVJwbji0klD2s8RrgdEZDq8JhA5tr6OW4MxKjmTlpaGDRs24PTp05g8ebIxhxo2tVqNMWPGyHEcPHgQr732Gh5//HEAUnVMaGj/wL+amhq5miYkJAQ9PT1obGwcUD1TU1ODmTPP/cOZs7MznJ3PvA1LpVLxQkpEAHg9IKJ+vB7QaLUzt15+vHhCCNRqNe6cOwapUX649+MjqG3tRmVzF67/30H8deUEXHeB4y802/P1IKuyEe3dWgDAjPgAqNVqK0d0piXJoXJyZtvpetw6O97KEZnWrrz+76mF40Ls9rNkC6IDVZgU6YNjpU3Irm5DWXMPYgPch3WMzaf65/+smBg27PfDnq8HRGR6vCYQOaahfl8b1ez5wQcfhCiKVps3Y0gURXR3dyM2NhYhISEDygJ7enqwc+dOOfEydepUqFSqAdtUVlYiKyvrvMkZIiIiIiI6v62npNkYCgGYnxgkvz4txg8/PDAbadHSzVE9Wh2eWJeJJ77KQJdGa5VYaXD7CvoTAzPibLNbwPhQL4R5uwAA9ufXo62718oRmU5LlwaHixsBALEB7ogZZiKBzrQ8JUR+vDGrclj76nQiNmZJrdDVTgpclBQ0yB5ERERE52ZUcmbx4sV47LHHsH37dtxzzz0WG2L1hz/8Abt370ZRUREyMzPxxz/+ETt27MCNN94IQRDwu9/9Ds899xy+/vprZGVl4dZbb4WbmxtuuOEGAIC3tzfuuOMOPPLII9i6dSuOHj2KX/3qV0hJScGiRYss8mcgIiIiInI0+bVtKKhrBwCkRfvB131glUWQlwvW3nUhbp0ZI7/26cFSrHp7H8qbOi0ZKg3RvnyD5Ey8bSZnBEHAwnFSl4QerQ57cmsH2cN+7Mmtg1YnzUWZlxho5Wgcw7Lk/g4bGzPPPXP2bI6UNKKmtRsAMDchEJ4uvNudiIiIRm5Ibc0+/PDDc35t/PjxmDlzJt555x189913uPrqq5GUlAQ3N7dBj3vzzTcPPVID1dXVuOmmm1BZWQlvb29MnDgRmzZtwuLFiwEAjz32GDo7O3HvvfeisbER06dPx08//QRPT0/5GK+88gqcnJywatUqdHZ2YuHChXj//fehVCpHFBMRERER0Wi39VS1/HjR+LPfUa52UuCpyyYgNdIbT67LRJdGh4yyZlz6xh6su2cmKwNsSE+vDoeKpKqNEC8XxPgP/juetSwcF4SP9hcDALacqsHFBgvw9mxHTo38eP5YJmdMIdLPDcnhXsgqb0FmeTNKGzoQ6Te0z/YGg2SOYQUOERER0UgMKTlz6623DmnoY2VlJd54440hnVgQhBEnZ959991Bj/3UU0/hqaeeOuc2Li4ueOONN4YcLxERERERnd+WU/0LyX2VDOdyxeQIjA32wt1rDqOkoQMN7T14a0c+/n71RHOHSUN0vKwJnZq+eTP+Q/qd0FoujPOHm1qJjh4ttmfXQKsToVTYbrxDIYoiduRIVUAuKgUutNG2cvZoWXIossqlQb0bsyrx67mDzykSRRGb9G3QVEph0GscERER0WCG3NZMFEWT/0dERERERI6hqaNnwGyM+ECPQfcZH+aF9ffPgptaql7fdKIKGq3OrHHS0NlDS7M+Liol5iQEAADq23twrLTRyhEZ72Rli9xCa0acP1xU7PJgKsuS+6teNgyxtdnxsmZUNHcBAGaNCYC3K1uaERERkXGGVDlTWFho7jiIiIiIiMiO7ciplWdjLBzGkGwfNzUWjQvG+uMVaO7UYE9eHRaM5ZBtWzAgOWMHVRuLxgXjxxNSa70tp2owNdrPyhEZp69qBgDm83vCpOICPZAU4onsqlYcK21CRVMnwnxcz7vPxsxK+fFyB2mbR0RERNY1pORMdHS0ueMgIiIiIiI7tmXAvJnhtftZMTEU649XAAB+yKhkcsYGdGm0OFwiVZ9E+LoOeSaHNS1ICoIgAKIozT96/OIka4dklJ0GyRl+T5jesuRQZFe1AgA2ZVXh9tmx59xWFEVs0Lc0UyoELB7mNY6IiIjobIbc1oyIiIiIiOhsenp18kKyt6sKadG+w9p/XmIgPJyl+8Z+PFGFnl62NrO2oyVN8vtgD1UzABDg4YzJkT4AgNPVbSht6LBuQEZo7tTIybG4AHdE+dt+cszeLE/pb222MavyPFsCJypaUNrQCQCYGe8PX3e1WWMjIiKi0cGo5MxFF12EhQsXori4eMj7VFRUyPsREREREZH9O1jUgNbuXgDA/LGBcFIO79cMF5VSvhO9tasXu3NrB9mDzG1fgf3MmzFkOKTdsJrL3uzJrZPbBM4bG2jlaBxTQrAnxgRJs7EOFTeiuqXrnNtuMGhpdrHBvBoiIiIiYxiVnNmxYwd27NiB9vb2Ie/T2dkp70dERERERPbPcBHccHF8OFak9M9w+CHj/Hexk/nty6+TH9tTcmaRgyRnduTUyI/Z0sx8lusTLaIoVe2djSiK2JglfU0hAEvGMzlDREREpsG2ZkRERERENGKiKGLrKWkh2UkhYF7iyO7yn5MYAE8XqbXZ5pPV6NJoTRYjDU9njxbHSpsAALEB7gj1Pv+gdFuSGOyBCF8p3gMFDWjp0lg5IiC/tg1fHi5DQ3vPkLYXRRE7TkvVY64qJS6I9TNneKPaMoOk8MbMsydncqpbUVgn3ZB6QawfAj2dLRIbEREROT6LJ2f6qmxcXFwsfWoiIiIiIjKxvJo2lOhne1wQ6wdvV9WIjuPspJTvSG/t7sWu02xtZi2Hihug0UottS60k3kzfQRBkKtnenWiVT9HpQ0dePjzY1j88k78/ovjuOilHfj05xLo9O3KzuVERQtqW7sBSFVLLiqlJcIdlZJCPBEb4A4AOFBYj7q27jO22WCQtFlukMwhIiIiMpbFkzMbN24EAERERFj61EREREREZGKbTdDSrM8lEw1am2WytZm17Mu3z3kzfQxbm/VVdVlSTUsX/u+bLFz00g6sO1KOvlxMU4cGT6zLxNX/ScfJipZz7r/TIKG0gPNmzEoQBHmGjE4EfjpxZiu8jfprkSAASyewpRkRERGZjtNwNr799tvP+vqf/vQn+Pj4nHff7u5u5Ofn4+DBgxAEAfPmzRvOqYmIiIiIyAYZLn4vGmfcbIxZYwLg7apCc6cGW/StzVg1YHn7CvqTMxfG2V9LrQti/eDp7ITW7l5sy65Br1YHJ6X570ts6ujBWzvz8UF6Ebo0Ovl1b1cVJkf5YEeOlHQ5UtKES97YjVtnxuLhJYnwcB74a7nhvJn5nDdjdsuTQ/HWjnwAwMasStwwPUr+Wm51K3Jr2gAAU6N8EezFDiBERERkOsNKzrz//vsQBGHAa6Io4ttvvx3S/qIo3TLk5+eHJ598cjinJiIiIiIiG1Pf1o0jJY0AgDFBHoj2dzfqeGonBZZOCMbnh8rQ3qPFjpxa+a52soy27l5klDUDkN7TIE/7W4xWOykwNzEQP2RWorlTg8PFjZhuxvZsbd29WL2nEO/sKkBrd6/8uptaiTtnx+LOuXHwclEhPa8Of/o2CwW17dCJwHt7C/FDZgX+75LxWJESCkEQ0NwhxQsAcYHuiPRzM1vcJEkO90KEryvKGjuRnl+PxvYe+LqrAQAbs/pbmi1jSzMiIiIysWElZ6KiogYkZ4qLiyEIAkJDQ6FSnbu3tCAIcHFxQWhoKGbOnIl77rkHYWFhI4+aiIiIiIisbntOLfT3Xw1oJWWMFRPD8PmhMgDA9xkVTM5Y2MHCBmj1fbhm2mFLsz4LxwXJrfG2ZteYJTnTpdHi4wMl+Pf2PNS398ivq50UuOnCaNwzPx4BHv3D42eOCcDG387B/3YX4vWtueju1aG6pRv3rz2KzxJK8deVyThR0Sy3QVvAqhmLEAQBy1NC8c6uAmh1IjafqsaqtEgAA5MzvBYRERGRqQ0rOVNUVDTguUIhlYb/9NNPGD9+vMmCIiIiIiIi27fVYN6MsS3N+syM94evmwqNHRpsPVWDzh4tXNVsbWYphi3NZpix2sTcFowNgkKQ5ohsOVWNPywfZ9LjHy9twt1rDqOyuUt+TakQsCotAg9clIAwH9ez7ufspMR9C8bgstQwPLX+BLZmSy3MdufWYekruxDh27/ffM6bsZhlySF4Z1cBAGnGzKq0SBTWteNUpTQbaFKkD8LP8Z4SERERjZRRjXfnzp2LuXPnwt3duPYFRERERERkX7p7tdilH1zu567G5ChfkxxXpVTId6h3arTYnmP5ge6j2b78/uSMOVuBmZuvuxpp0dK8nILadhTUtpn0+A9/fmxAYuay1DBseXgenr9y4jkTM4Yi/dzwv1vS8M5NU+VF/x6tDgV17QAAV5USF8Ta37wfezUp0gdh3lILvz15dWju1GBjVqX89eUprJohIiIi0zMqObNjxw5s374d0dHRpoqHiIiIiIjswP6CBrT3aAFId/grFcIgewzdipT+FsjfZ1SY7Lh0fs2dGpyokObNJIV4wk8/d8NeLTSo5tp6ynRJvuL6duTXSkmUuEB3bHhwDl6/fjJiA4Z306IgCFgyIQSbH56Lu+fFw8nge2hmvD+cnVgxZimCIGCpPims0YrYeqoaGzMN5s0kc94MERERmZ5RyRkiIiIiIhqdBrY0M828mT4XxvnBX58Y2JZdg3aDIetkPj8XNsjzTmbY8byZPgsNPpdbDD6vxuqrGAOAq6ZEYHyYl1HHc1M74YllSdj42zmYPzYQ0f5ueGBhgrFh0jAtT+lPwLy7pxCZ5VKiMjncC5F+btYKi4iIiBzYsGbODEVLSwtaW1uh1WoH3TYqKsrUpyciIiIiIjMTRVGuRFArFZibaNrZGE761mYfHyhBl0aHbdk1uDQ1bPAdySiGLc1mxgdYMRLTiA90R4y/G4rqO3CouBHNHRp4u6mMPu5Og+TMPBN+9hOCPfH+bReY7Hg0PFOjfBHk6Yya1m6cqGiRX2fVDBEREZmLSSpnNm/ejCuuuAIBAQHw9fVFVFQUYmNjz/tfXFycKU5NRERERHZMpxOh0eqsHQYNU3ZVK8qbOgEA0+P84OFs8nu+sGJi/4IoW5tZRnp+HQBAIcAh5p0IgiBXdWl1oknmF/X06pCuT2IFeKgxPtS4qhmyHQqFIM+7MrTsLK8RERERmYLRyZkHH3wQF198MdavX4+GhgaIojjk/4iIiIho9PrpRBVmvrAN05/binwTD+sm8zJnS7M+02P9EeDhDADYnlOLNrY2M6uG9h5kV7UCACaEecPb1fgKE1uwaHz/53PzSeNbmx0qbkCHftbS3IRAKEw4a4ms75dVMkkhnogL9LBSNEREROTojLrFbe3atXjzzTcBAC4uLrj88ssxdepU+Pn5QaHgOBsiIiIiOlNbdy+e+e4kPjtUKr/2QXoR/roy2YpR0XBsNhiubjh03ZSUCgHLU0Lw4b5i9PTqsPVUNVZOCjfLuUY7jVaH7dn976kjzJvpkxbtC183FRo7NNiRU4PuXi2cnZQjPp5hSzNTt/Mj67sgVpp3Vd/eA2DgHBoiIiIiUzMqOfP2228DACIjI7Ft2zbEx8ebJCgiIiIickyHixvw0GfHUdLQMeD1jVlVeOrSCbwL3Q7UtHbheGkTAOmu8ghf8w3KXpESig/3FQMAvs+oZHJmmNq7e5FX04aqli7UtXWjvq0H9W3dqGvrkZ63S88bOzQD9psR5zjJGSelAhclBeOrI2Vo79EiPb8eC8aOPKG467TU+k0QgDkJ9j+XhwZSKgRcMjEUH+wrhiAwOUNERETmZVRyJiMjA4Ig4C9/+QsTM0RERER0ThqtDq9tycW/d+RBp+9u66ZWIsTbBQW17aht7cbhkkZMi7H/OReOzrDCwlwtzfqkxfjJA7p35tSipUsDLxfHaLdlShqtDoV17cipakVOVSuyq1pxurr1jCToUHg4O2GaA8ybMbR4vJScAaTWZiNNztS0dOFUpTQoPiXcG/76tnvkWB5ePBauaickh3thTBBbmhEREZH5GJWc0WikO6wmT55skmCIiIiIyPHk1bTioc+OI7O8WX5tarQvXl6ViiMljXjos+MAgA2ZlUzO2IEtFmhp1kdqbRaK99OL0KPVYcvJalw5JcKs57QHx0ubsDe/Tk7GFNS2o0erG/Zx3NRKBHg4w99DDX93ZwR6OuOKyeHwcDbq10SbMzcxAM5OCnT36rD5ZDX+tjJ5RFV6u3Lr+o+ZwJZmjsrbTYUnliVZOwwiIiIaBYz6qTsmJganTp1CWxsHuBIRERHRQKIo4sN9xXhuwyl090oLx04KAb9blIC758XDSamAr7saKqUAjVbEpqwq/N+K8WxtZsO6NFrs0S9QB3g4IzXCx+znvGSilJwBgB8yKkd9cuZgUQOu+c++QbdzUyuREOyJpGBPRPq5IsDDWU7E9P3fTe1YSZhzcVM7YU5CALacqkFtazeOlzVhcpTvsI9jOG9m3lgmZ4iIiIjIOEb9NH7llVfi2WefxdatWzFnzhxTxUREREREdq66pQu//+I4dhvcaR4f6I5Xr52MlAhv+TUvFxXmJARiW3YNKpu7RrxoSpaRnl+HTo0WAHBRUqBFEmlTonwR4uWCqpYu7MqtRXOHBt5uo7e12ac/lw54rlQIiAtwx9gQTySFeCIx2BNJIV6I8HVlotPA4vHBctXX5pPVw77OaHUi9uRKyRlPZydMivQxdYhERERENMoojNn5kUceQVRUFF599VVkZ2ebKiYiIiIismM/najC0ld3DUjM3DIjGt8/MGdAYqbPsuQQ+fHGrCqLxEgjY9jSzNzzZvoo9K3NAECjFfHTydH7Genu1cp/fg9nJ2x4cA5O/nUpNj88D2/eMAX3X5SAJRNCEOXvxsTML1yUFAxB/1ey+WT1sPfPLG9GY4fU1nvWmAColEb9Kk1EREREZFxyxtvbG5s2bUJwcDBmzZqFf//732hsbDRVbERERERkZ7KrWnDPx0fQpF/EDPJ0xge3X4CnVybDVa086z6LxwfDSb+QvCGzEqIoWixeGjqtTsQW/aK22kmB2QkBFjv3Jamh8uMfMistdl5bs/t0HVq7egEAS8YHY3yYF5ydzv59RQMFejpjir5aJremDYV17cPaf5dBS7O5iWxpRkRERETGM6qtWVxcHACgo6MDjY2NeOCBB/Dggw8iICAAbm5u591XEATk5+cbc3oiIiIisjF/35gNrU5KriweH4x/XDURvu7q8+7j46bGjHh/7M6tQ1ljJ7LKW85aYUPWtTu3FjWt3QCkYeiWnFcyOdIH4T6uKG/qxJ7cOjR19MDH7fyfK0dkmJhaMTH0PFvS2SwZH4zDxdLNhJtPVuHXc+OHvO/OAckZyyUmiYiIiMhxGfUbVVFR0YDnoihCFEXU1NScfQcDgsAyeyIiIiJHkp5Xh+050gJmuI8r3rh+MlxUQ7urf3lKqNwGbUNWJZMzNuiLQ2Xy41VpERY9tyAIWJ4Sgv/uLkSvTsSPJ6pw7bQoi8ZgbV0ardyOy9PFyaKVS45i8fhgPL9Rase9+WT1kJMzzR0aHC2Rkjrxge6I8D3/jYhERERERENhVHLmlltuMVUcRERERGTHdDpRXvQEgIcXJw45MQNId7T/8etM6ERgY2YlHls6ljfz2JDG9h45MRDgocaCpCCLx3DJxDD8d3chAOD7jMpRl5zZeboWbd19Lc1C2M5sBOICPRAf6I782nYcLm5EXVs3AjycB91vb34d9AWBmJdo+c8+ERERETkmo5Izq1evNlUcRERERGTHvs+sRGZ5MwBgXKgXLp8cPqz9/T2cMT3WH/sK6lFU34HsqlaMC/UyR6g0At8eK0ePVgcAuGJyuFWGoU+M8EaEryvKGjuRnl+PhvYe+A3SMs+R/JDR39LMcAYPDc+SCSF4a0c+dCKw7VQNVk2LHHSfnTn9Lc3mjeW8GSIiIiIyDcv/VkVEREREDqW7V4sXf+yvmnlyWRKUiuFXvSxPCZEfbxzFQ99t0ecGLc2uSRt8MdscBEGQ56xodSK+z6iwShzW0KXRYsspqXLJ21WFWfFsaTZSi8cHy49/0leDnY8oitiVKyVnnJ0UmB7rZ7bYiIiIiGh0YXKGiIiIiIyyZn8JShs6AQBzEgIwN3Fkd5YvnRCCvk5mG7KqTBUeGSmrvBknK1sAAJMifZAY7Gm1WC6f1F+RZTgDx9HtyKlBR48WALB0QjDUTvw1bqQmRfgg0FNqZbYnrxad+r/Xc8mtaUNlcxcAYHqc/7DaNRIRERERnY/Jf6qvrq7G1q1b8cUXX+CLL77A1q1bUV09+B1JRERERGR/mjs1eHNbrvz88YuTRnysIC8XTIuW7krPq2lDbnWr0fGR8b44VCo/XmWlqpk+40K9kBLuDQDILG/GKX3SyNF9Z9jSbGKYFSOxfwqFgEXjpLkxXRoddufWnnf7Xaf7vz43gRVLRERERGQ6JknOiKKIt99+GykpKQgLC8OSJUtw3XXX4brrrsOSJUsQFhaGlJQUvPPOOxBF0RSnJCIiIiIb8J+d+Wjs0ACQZpEk6xfOR2qZQWuzDZmsnrG2Lo0W3xyT2oe5qBQ2MetkVVqE/Hg0VM909PRi26kaAICvmwoz4v2tHJH9WzK+/zozWGuznQbJmfmcN0NEREREJmR0cqaxsRFz5szBvffei5MnT0IUxbP+d/LkSdxzzz2YO3cumpqaTBA6EREREVlTRVMn3ttTCABQKxV4ZEmi0ce8ONlg7kwW585Y25ZT1WjulJJvy5JD4eWisnJEwGWp4XJbr6+PlqGnV2fliMxre3YtOjVS662Lk0OgUrKlmbFmxPvDTS21J9uWXQOt7uw3EHb2aHGgsAEAEObtgvhAD4vFSERERESOz6if7EVRxMqVK5Geng5RFOHn54d77rkH77//PjZt2oSNGzfi/fffx7333gt/f3+Iooj09HSsXLnSVPETERERkZW8vPk0uvUL47fMjEaEr5vRxwz1dsXkKB8AQHZVKwpq24w+Jo3c5waVKdcYVKxYk7ebCksnSEm8xg4Ntp5y7BbK32dUyI9XpLClmSm4qJSYp5+N1dDeg8PFjWfdbn9hvZz8mzc2EELfUCwiIiIiIhMwKjmzdu1a7NmzB4Ig4MYbb0RBQQH+9a9/4eabb8aSJUuwdOlS3HzzzXjzzTdRUFCAm266CaIoYs+ePfjkk09M9WcgIiIiIgvLrmrBV0ekhXsvFyfct2CMyY69PLm/ddbGLLY2s5aKpk55HkeknysujLWddlqGrc0+N5iJ42jau3uxLVtqaebvrsaFcX5WjshxLB4fLD/+6cTZrzM7c/pbmvUlc4iIiIiITMXo5AwAzJs3Dx999BE8PT3Pua2Hhwc++OADzJs3D6IoYs2aNcacmoiIiIis6IWN2egbJXj/RWPg46Y22bHZ2sw2fHW4TH6Pr54SCYXCdqoGZsYHINzHFYA0E6S6pcvKEZnH1uwauTrt4uQQOLGlmclclBQEpf4zvflU9Vlno+7SJyeVCgEzxwRYND4iIiIicnxG/XR/5MgRCIKA+++/f8j7PPDAAwCAo0ePGnNqIiIiIrKS9Lw67NDfUR7u44qbZ8SY9PiRfm5ICfcGAGSVt6CkvsOkx6fB6XQivjgsVUYJAnDV1HArRzSQUiHgqqlS9YxOhFzF5Wh+MGxpNjH0PFvScPm4qXFBjFSJVFzfgdyagS0USxs6UFDbDgCYEuVjE/OWiIiIiMixGJWcaWiQhiPGxsYOeZ++bfv2JSIiIiL7odOJeH5jtvz8kSWJcFEpTX6eZSmsnrGmn4saUNIgJcVmjwkwyTwhU7tman9rsy8OlZ218sGetXZpsF2fBA3wcMZ0G2or5ygMW5ttPjlwdlFf1QwAzE1gSzMiIiIiMj2jkjPe3tIdjRUVFYNs2a9vWy8vL2NOTURERERW8F1GBTLLmwEA40K9cPkk81RULDOYO7OBc2csznCOyzVpkVaM5Nwi/dwwI05KWBTWtePQOYa626utp2rkYfTLU0LkFlxkOuebOzNg3sxYJmeIiIiIyPSMSs4kJycDAFavXj3kfd57770B+xIRERGRfeju1eLFH3Pk539YnmS2OSSxAe4YFyrdzHO8tAnlTZ1mOQ+dqbVLgw2ZUrWSl4sTlhgsYNuaVdMMq2dKz7Ol/fk+o79ibEUKW5qZQ6SfW/91pqxZnl2k0eqQnl8PAPBzVyM5zNtqMRIRERGR4zIqOXP11VdDFEV8/fXXeOqpp87bSkAURTz11FP4+uuvIQgCrrnmGmNOTUREREQWtmZ/CcoapSTJnIQAzDFzq5/lyf2tzTaxesZifsioRJdGqthYOSncLG3rTOXiCaHwdHYCICUz2rt7rRyRabR0abDrtFS5EeTpjDT9bBQyvbO1NjtS3Ig2/WdpTkKA2ZLQRERERDS6GZWcueuuu5CUlARRFPHMM89g4sSJeOmll7Bnzx7k5uYiLy8Pe/bswUsvvYTU1FQ888wzAICkpCTcddddJvkDEBEREZH5NXdq8Ma2XADSgPjHL04y+zkHzJ3J5NwZSzFsabbKRlua9XFVK3FJahgAoKNHix8c5HOy+UQ1erR9Lc1C2dLMjJacJTnDeTNEREREZAlOxuysUqmwceNGXHTRRSgsLMTJkyfx2GOPnXN7URQRFxeHjRs3wsnJqFMTERERkQW9tSMfTR0aAMAVk8KRHG7+Nj9jgjyREOSB3Jo2HCpuRFVzF0K8Xcx+3tEsr6YVR0qaAABJIZ5IDrf9OZGr0iLwyc8lAKTWZraeUBoKwyTTJRPZ0sycJoR5IczbBRXNXUjPr0NrlwY7T/cnZ+YkBlgxOiIiIiJyZEZVzgBAdHQ0MjIy8Mgjj8Db2xuiKJ71P29vb/z+97/HsWPHEBUVZYrYiYiIyE519PTi75uy8fJPOdDqzt0WlWzD/oJ6/G93AQBArVTg4SWJFjv3MoNZGz+eYGszc/viUJn8+Jq0SAiC7VdsTIr0QUKQBwDgYFEjCmrbrBxRv41ZVfg0X4GTlS1D3qe5Q4Pd+sqNEC8XTInyNVd4BEAQBLm1mUYrYt2RcmSVS+/XhDAvBHkyIUxERERE5mGS8hV3d3e8+OKLePbZZ3H48GFkZWWhoaEBAODn54fk5GRMnToVarXaFKcjIiIiO9al0eKuDw9hb540bDnCz80h7nR3VKUNHbhnzWH06pNov5kXhwhfN4udf3lKCF7fKrVT25BZiVtmxljs3KONRqvDV0fKAQAqpYDLJ4VZOaKhEQQBq9Ii8eyGUwCALw+X4TELtN0bzBeHSvHolxkAFFj1zs/42+XJuGYI17ofT1ZBo5W+31ZMDOW8EwtYPD4EH+wrBgC8vPm0/PrcRLY0IyIiIiLzMWlvMbVajRkzZmDGjBmmPCwRERE5iJ5eHe79+IicmAGAb46WMzljo9q7e3HXh4fQqG9nNjcxEL9bZLmqGQAYG+yJ2AB3FNa142BRA2pbuxHo6WzRGEaLnTm1qGvrBgAsGhcMfw/7+Xu+fHI4/r4pG706EV8dKcPDixPhpDS6ScCI7TpdiyfXZcrPu3t1ePTLDBwrbcKfLx0PZyflOff9IaO/pdkKtjSziOlxfvB0cUJrVy+aOzXy6/OYnCEiIiIiM7LebyxEREQ0qvRqdXjos2PYll0z4PV9BfWoaemyUlR0LjqdiIc/P4bsqlYAQFyAO964frLFB5MLgoBlySFSTCLw00m2NjOXzw+Vyo/tLWEa6OmMBUlBAIDqlm7szq2zWiwnKpoHVJuFufW3bvz4QAmufXs/Kps7z7pvY3sP9uZJsYf7uGJypI/Z4yVApVTgIv3np4+7WsmWckRERERkVkzOEBERkdnpdCIe/ypTHnLt7NS/ECaKwPcGd4qTbXhtay5+PFENAPB0dsJ/b0mDt6vKKrEsN5g7szGTyRlzqG3tlhOnwV7OmJNgf0PQDRNKhokmSypr7MBtqw+ivUcLAFg8LgiPTtTi+SsmQO0k/ep1rLQJl76xB/vy68/Y/6eTVXJSZ3lKiF3M/HEUfXNn+swcEyC/Z0RERERE5jDktma7du0y+cnnzp1r8mMSERGRbRFFEX9ZfwJfHZEGjauUAv5z01RE+rrKi8Hrj1fg9tmx1gzT5omiCFGEReZPbMysxGv6OS+CALx+w2TEB3qY/bznMiHMC5F+riht6MS+gno0tPfAz52zDE3pm6PlclLgyikRVm0JNlLzxwYiwMMZdW3d2HKq2uKfk+YODW5dfRA1rVJruClRPnj5mhRs21yBq6eEIzncF3evOYzypk7UtfXgV+8ewBMXJ+HOObFyEsYwUX3JRPuY+eMo5iUGQqUU5Hk/nDdDREREROY25OTM/PnzTXrnliAI6O3tNdnxiIiIyPaIoogXNmbjo/3SoGWlQsAb10/GgrFS1cy4UC+cqmzBsdImlNR3IMrfcoPm7cnp6lbc9O4B1LdJi80BHs7w95D+H+Chhr+HM/zd1QjwdEaAuzMCPNUI8XIZ0c9uJyta8PDnx+XnTy5Lkt8vaxEEAcuTQ/H2rgJodSK+PFyKX8+Nt2pM1qLR6qAUBJMm6URRHFBpcs3UCJMd25JUSgWumhKOt3cVQKMV8c3Rcoslfbt7tbjro0PIq2kDAMQGuON/t0yDi6r/fUqJ8Mb3D8zGg58exe7cOmh1Ip7dcArHyprwj6smokujRbq+mibC1xUTI7wtEjtJPF1UmDUmADtyagEA85mcISIiIiIzG3Jypo8oioNvRERERATg9a15eHtXAQCpAuOf10zExcn9LaouSw3DqcoWAMB3GRW4b8EYq8Rp697eWYDqFulu/JrWbvnO/PNJCPLAI0sSsXTC0Fsj1bd1464PD6FTI7VkumJyOO6aEzfywE1o1bRI+bP0QXoxbp8Va5fVHcbIr23D1W+lw0WlxDMrk7HoF22YRup4WTNy9UmFaTG+iLNilZSxrkmLkD8nnx8qxW2zYszeGkynE/HI58fxc2EDACDAQ40PbrsAfu5qaDSaAdv6uqvx/m0X4OXNOfjX9nwAwA8ZlThd1YqLkoKg1VcvrZgYypZmVvCnFeOgEATMHhOASD/eLEBERERE5jXs5IyrqytWrlyJxYsXQ6EYXb8QExER0dD9d1cBXtlyWn7+7OUpuGLywDvyL00Nxd83ZQMA1h9jcuZsdDoRO09L7d+cFAL8PdSob+uRW1CdS25NG+5ecwQp4d54dOlYzEkIOO9ib0+vDvd8fATlTdKg8tRIHzx/ZYrNLBDHB3pgwdhAbM+pRXlTJzadqBp1bZ8+OVCCxg4NAA3u/PAQrr8gEn9aMR7uzsP+kV5WUt+Bv31/Un5+jcHcFns0JsgTk6N8cLSkCdlVrcgqb0GKmStQ/r4pW25H5qpS4t1bpp23ClCpEPDo0iSkRvjgkc+Po7W7F7k1bXKCDAAuHWWfbVsxJsgT7906zdphEBEREdEoMeTf5Dw9PdHa2orOzk589tln2LFjB2644QbcdNNNSE1NNWeMREREZGc+2l+MZzeckp//acU43DA96oztInzdMDXaF4eLG5FT3YqcqlaMDfG0ZKg270RFC+raegAAC5KC8N+b0yCKIlo6e1Hb1o36tm7UtfWgvl36f11bN06UN+N4WTMAILO8GTe/9zOmx/rh0aVjkRbjd8Y5+uYC9d35H+TpjHdumgoXldJyf9AhuGN2HLbrWw69t6dw1CVnfi5qGPD8k59LsS+/Hi9fOwlTonyHdazuXi3e2VmAN7fnobtXBwDwdHHCipTQQfa0favSInG0pAkA8MXhUrMmZ97fWyhX6igE4M0bJiM10mdI+y6ZEIJv7/fAbz46PCAxE+3vhglhXuYIl4iIiIiIbMiQS1+qq6vxySefYPny5VAqlaiqqsIrr7yCKVOmIDU1Ff/85z9RUVFhzliJiIjIDnx1uAz/902W/PzhxYm48zytsS5L7V9gX3+83Kyx2aMdOTXy4/ljpRkIgiDA202FMUEemB7njxUTQ3HzjBg8vDgRz12Rgm/um4XVt07DuND+Bd4DhQ24+j/7cPv7B3GionnAOdbsL8YnP5cAANROCrx901QEe7lY4E83PLPG+CNJn7w7UtKEoyWNVo7Ictq6e5FVLr1vfu5quOoTZ0X1HbjmP/vw8ubT0Gh1QzrW3rw6LHt1N17afFpOzIR4ueDtX001qgrHVlwyMRQuKunXnG+OlqNL36bP1DZlVeFpg6qjZy5PxsJxw2s1FxfogW/um4UVE/uTYpdPCreZijUiIiIiIjKfISdnXFxccO211+L7779HeXk5XnnlFUyePBmiKCIzMxOPP/44oqOjsXjxYnz00Udob283Z9xERERkgzZkVuLRL/uHyd89Lx4PXHT+VmXLU0LRN9v8u+OVnG/3C9sHJGeChrSPIAhYkBSEHx6YjTdvmIy4AHf5a9uya7Di9T24b+0R5Ne2IT2/Dk9/17/A/MKVKZg8zCoMSxEEAbfP6h/w/u6eQitGY1mHihrQ18luRUooNv52DibpKzS0OhGvb83F1W+lI7+27ZzHqGnpwoOfHMWN/zuAgjrpZ3WlQsCds2Ox5ZF5mDkmwNx/DIvwdFFhuX62VUtXL346WW3ycxwubsRvPz2KvsvVfQviceP06BEdy93ZCW9ePxlvXD8Zj1+chHvmx5swUiIiIiIislUjGhoTGBiI3/72tzh06BBOnDiBxx9/HBEREdBqtdi6dStuvfVWBAcH46abbsKPP/7IRRYiIqJRYFt2NR785Ki8gHzLjGg8fvHYQe8AD/R0xsx4aVG4pKFDbsdFQFNHD46VNgEAEoM9EO7jOqz9FQoBl0wMw08PzcU/rpqIMO/+apgfMiqx+OWduOuDQ/L8ml/PjcOVUyLOdTibcNmkMPi7qwEAG7Oq5Bk5jq6v5RwATI/zQ0yAO768ewYeXpwIpT67ebysGSte342P9hcP+Pm7V6vD6r2FuOilnVh/vL/SfWq0L75/YDb+dMl4eDhAxYwhw9k5XxwqNemxC2rbcOcHB+Wqoysnh+P3S8YadUxBEHBpahjumR9vc+0EiYiIiIjIPEaUnDE0btw4PP/88yguLsa2bdtw6623wtPTEx0dHfj444+xfPlyhIeH4/HHHzdFvERERGSD0vPqcPeaI/Ii/zVTI/CXSycMuTXPgNZmx9gmtc+u3Do52TXUqpmzcVIqsGpaJLb9fj7+fMl4ObmhE4H2Hqnl07zEQDx+cZLRMZubi0qJGy+UKhS0OhEfphdZNyALOWCQnLlAPzfISanAgwsTsO6emXJ1VJdGh//7Jgu3vX8QNa1dOFrSiJX/2ounvzuJtu5eAICvmwr/uGoivvjNjAGt7xzJ9Fg/RPm5AQD25NWhtKHDJMcVRRH3rz2Kxg4NAKnV3gtXTWQbMiIiIiIiGjajkzOG5s+fj/feew9VVVVYu3Ytli1bJs+neeONN0x5KiIiIrIRh4sbcOeHh9Cjv4v8komheOGqiVAohr5YuTQ5BGql9GPJ9xkV0OpYdQsAO7INWpolBhp9PBeVErfPjsWuxxbg90sS4ekiVUvEB7rj9esnyxUYtu6mC6Plz8van0vQrk86OKrOHi0yypoAALEB7gj6xTyg1Egf/PDgHNx0YX9brR05tVj4z5248q10nKhokV+/bloktj0yH6umRQ7re9TeKBQCVqVJVWCiCHxgoiTenrw6nKyU/j7HBHngrV9NhdrJpL9SERERERHRKGGW3yQEQYBCoYAgCLyLjIiIyIFllTfj1vcOokNffbFoXBBeuXbSsBf5vV1VmKcfdl/T2o0DhfUmj9Xe6HQidp6uBQC4q5VI01dLmIK7sxPuvygBex67CKtvm4bvHpgNb1eVyY5vboGezrhsklRt1drVi6+OlFk5IvM6WtIIjVZKWE6PPfvnwFWtxDOXJ2P1bdMQ6OkMAGjt7pVnoowL9cJX98zEC1dNhK++csrRXX9BlJw4+exgqVw5ZAzDOUcPLUqEl4v9fN8QEREREZFtMWlyZufOnbjzzjsRHByM66+/Hhs3boRGo0FoaCgefPBBU56KiIiIrOx0dStuevcAWvULnrPHBODNG6ZApRzZjxeGrc2+O87WZlkVzahv7wEAzBoTYJa7873dVFgwNghuavubN3L7rFj58eq9RdA5cLXVgJZm50jO9FkwNgg//m4uLp4QAkBK7P3fJePx3f2zMDXa16xx2hp/D2dcOTkcgJSo+vygcbNn8mpasSNHSpiG+7hi6YRgo2MkIiIiIqLRy+jfxE+dOoWPPvoIH3/8McrKpLsWRVGEm5sbrrjiCtx8881YuHAhFAqW+xMRETmKorp23Pi/A/LchWkxvnjn5qlGDbJeNC4YbmolOnq02JBZhacvSx7V7YK2Z9fKj42ZN+Ooxod5YWa8P9Lz61FY145t2TVYNN4xF8t/NkjOTI/zH3R7P3c13vrVFOTXtiHQ08WuqqJM7fbZsfhUn5RZnV6IW2bGjLh93+q9RfLjW2fGwGmEiWgiIiIiIiJghJUzNTU1eO2115CWlobk5GT8/e9/R2lpKQRBwEUXXYQPPvgA1dXV+Oijj7B48WImZoiIiBxIWWMHbvzfAdS2dgMAJkZ4491bpxldfeGqVmKxfnG9uVOD3bm1g+zh2HacNpg3M9b4eTOOyLB6xrDdlCPp7tXiSEkjAKlaI9zHdUj7CYKAMUGeozoxAwCJwZ6YkxAAACht6MTmk9UjOk5je4/cPs9drcS1F0SaLEYiIiIiIhqdhpw16erqwqeffooVK1YgIiICDz/8MI4cOQJRFDFhwgT8/e9/R0lJCTZv3oybbroJ7u7u5oybiIiIrKCmpQu/+t8BlDd1AgDGBnvig9suMNncBcPWZutHcWuzhvYeHCttAiD9HYcNcUF+tLkoKQixAdLPnPsK6nGiotnKEZleZlkzunt1AIDpcaabOzSa3D67P4n33giTeGt/LkGXRnofrkmL5KwZIiIiIiIy2pBvcQ0KCkJ7ezsAqW1ZSEgIrr/+etx0002YNGmSueIjIiIiG9HQ3oMb/3cARfUdAIC4AHesuXO6SYeLz0kIhLerCs2dGmw+WY3OHi1c1SNvlWavdufWyoPcWTVzbgqFgNtmxeDP354AILWd+uc1qVaOyrQM581MH2TeDJ3dvIRAxAe6I7+2HT8XNSCzrBkpEd5D3r+nV4cP9xUBAAQBuG1WjHkCJSIiIiKiUWXIyZm2tjYIggAXFxdcdtllWLJkCZRKJTIyMpCRkTGik998880j2o+IiIgsq7lTg5vePYDcmjYAUnulNXdOR6Cns0nPo3ZSYFlyCD49WIqOHi22nKrGpQbVNKNF39BxgPNmBnPVlAj888cctHT1Yv2xCjx28VgEebpYOyyTMUzOXBA7+LwZOpNCIeD22bH449dZAID39hbilWsnDXn/DZmVqG6R2jguGheMaH92CCAiIiIiIuMNuzl8V1cXPv/8c3z++edGnVgQBCZniIiI7EB7dy9uf/8gTlS0AACCvZyx9q7pZmu1dVlqmDzAe/3xilGXnNHpROw8LSVnPJydkBbja+WIbJu7sxOunx6Ft3cWoEerw5r9JXh4caK1wzKJXq0Oh4uk5EyQpzNi/N2sHJH9unJyBF78MQdNHRp8d7wCTyxLQrDX4Ek8URTx3t7+Vmh3GLRIIyIiIiIiMsaQZ84A0i8npvyPiIiIbJsoirh/7REcLpYGkvu5q/HxndPNeuf49Dh/BOkrcnbm1KK5U2O2c9mijPJmNLT3AABmjfGHSjmsH9dGpVtmxECpEAAAH+8vRpdGa+WITONERQvae6Q/ywWxfhAEwcoR2S9XtRI3To8CAPTqRLlN2WAOFTcio0yaZTQhzIut5YiIiIiIyGSGXDmzfft2c8ZBRERENuhERQu261tsebk44aM7LsCYIE+znlOpELBiYihW7y1Cj1aHH09UYVVapFnPaUt25NTIjxewpdmQhPm4YllyCL7PqER9ew++PVaOa6dFWTsso/1sOG8mji3NjHXzjBi8s6sAGq2Ijw+U4P4FCYPOtHp398CqGSbIiIiIiIjIVIacnJk3b5454yAiIiIbtDGrUn786MVJmBA29CHaxrgsNQyr9xYBAL47XjHKkjP982bmjQ20YiT25Y7Zsfg+Q/q8vrunEKvSIu1+If1AYb38mBUbxgv2csElE8Pw9dFyNHVosO5oGW6cHn3O7UsbOvDTySoAUlu5SyaOrhaLRERERERkXuyTQURERGcliiI2ZEoLk4IAXDwhxGLnnhTpgyg/ab7G3rw61LZ2W+zc1lTf1o3jZU0AgKQQT4R6m2eujyOaHOWLKVE+AIDT1W3Ym1d//h1snE4nypUzvm4qjAn0sHJEjuH2Wf0zY97bUwid7tytllfvLULfl2+eEQ21E391IiIiIiIi0+FvGERERHRWOdWtKKxrBwBcEOOHQP0cGEsQBAGXpoYCAHQisCGzcpA9HMPu3Dr0jeWbz5Zmw3bH7Dj58bt7CqwYifGyq1rR0tULQJo3o1DYdxWQrUiJ8MYFMVIVUn5tO3bl1p51u9YuDT4/VAoAcHZS4IbzVNgQERERERGNBJMzREREdFZ9VTMAsDwl1OLnvyw1XH68/niFxc9vDYbzZuazpdmwLZ0QjHAfqdpoe04t8mrarBzRyP1s0NLsgljOmzGl22f3V8+8u6fwrNt8fqgMbd1ScuzKKeHwc1dbJDYiIiIiIho9mJwhIiKis9pkMG/m4mTLtTTrMzbEE2ODPQEAh4sbUdbYYfEYLEmrE7HztHQXv6ezE6ZG+1o5IvvjpFTglpn9FQ6r95594d0eHNC3NAM4b8bUFo8Pltsm7s6tw+nq1gFf1+pEvJ/e/9kxbIVGRERERERkKkzOEBER0RnyalpxulqqOkiL9kWwl4tV4rhsUv8A7u+OO3Zrs4yyJjR2aAAAs8YEQKXkj2kjce20KLiplQCAr46UoblTY+WIhk8U++fNeLo4YVyol5UjcixKhYBbZ8bIz9/7RfXM5pNVKG3oBADMTQxEgj5JTEREREREZEr8rZ+IiIjOsNGgpZk1qmb6XDKxv52ao7c225HTP/tiQRJbmo2Ut6sKV0+NAAB0aXT4IcP+knr5te2ob+8BAEyL8YOS82ZMbtW0SHg6OwEA1h0tR31bt/w1w1Znd8xm1QwREREREZkHkzNERER0hg1Z/cmZZVaYN9Mn2t8dqZE+AIBTlS04WNRw/h3smOG8mXmJQVaMxP5dMzVSfrzuSJkVIxmZAwPmzbClmTl4ODvh2mnS56SnV4e1B0oASBVsB4saAQAJQR6YmxBgtRiJiIiIiMixMTlDREREAxTVteNUZQsAIDXSRx6wbi3XpvUvtP9hXSa6e7VWjMY86tu6kVHeDABICvFEiLd12sg5iuRwLyQGewAADhU3oqiu3coRDc/PBvNmmJwxn1tmxqCvKOnD/cXo7tUOaHF2++xYCAKrloiIiIiIyDyYnCEiIqIBNhpUzSy3YkuzPqvSIpAS7g0AyK1pw392FFg5ItPblVsLUZQeL0hi1YyxBEHAlVMi5Of2VD0jiiIOFEjJGVeVUv7sk+lF+rlh6QTpGlfb2o139xTie30bPF83Fa6YHG7N8IiIiIiIyMExOUNEREQDbMzqn9GxLNl6Lc36OCkVeOGqFHnuxr+25yGvptXKUZnW9uz+eTPzEzlvxhSumBwuV0WsO1oOnU60bkBDVNrQiaqWLgDA1GhfqJT8cd2cDGfK/GNTDnr1n5Mbp0fDRaW0VlhERERERDQK8Lc9IiIikpU2dCCjTGqvlRzuhSh/NytHJJkQ5o0750iLqD1aHZ5cl2k3i+2D0epE7MqVkjOezk6YEu1r5YgcQ7CXC2YnSImussZO/GyheUU1rV349lg5Hv8yA/d9fATF9cNrqbbfYN7MdLY0M7up0b5IjRhYnaRSCrh5RrSVIiIiIiIiotHCydoBEBERke3YZNDSzBaqZgz9bmEiNmZWoaShAweLGvHJwRLcON3+F1CPlzWhqUMDAJiTGMBKCRO6ako4dp2WEl9fHS7DhXH+Jj9HS5cGBwoasDevDun5dThd3Tbg66erW7H+/tlwVQ+tCoPzZixLEATcPjsWv/30mPzapRPDEOTFuU9ERERERGRedvnb//PPP49p06bB09MTQUFBuPzyy5GTkzNgG1EU8dRTTyEsr1ZxEAAAWGlJREFULAyurq6YP38+Tpw4MWCb7u5uPPDAAwgICIC7uzsuu+wylJXZT09yIiIiU9swoKWZ9efNGHJVK/HcFSny8xc2ZKNa3/7Jnu3IrpEfz0/kvBlTWjI+BB7O0r1IGzIr0dmjNfqYXRot0vPq8OKP2bj8X3sx6emfcNeHh/B+etEZiRlAmpP07IaTQz5+X3JG7aRAaqSP0fHS4JanhCLUuz8Zc7tBqzMiIiIiIiJzscvkzM6dO3Hfffdh//792Lx5M3p7e7FkyRK0t/e3jfjHP/6Bl19+GW+++SYOHjyIkJAQLF68GK2t/T3qf/e73+Hrr7/Gp59+ij179qCtrQ2XXHIJtFrjf3EnIiKyN5XNnTha0gQASArxRFygh3UDOovZCQG4Sj/ovbW7F3/59sQge9i+Haf7583MG8t5M6bkqlZiRYpUAdbeo8WPJ6oG2eP8Vu8tROrTP+GG/x3Av7bn41hpEwy76ykEIDXSB/fOj8dr102Ci0r6UXvN/hJsPlk96PErmztR0tABAJgU6cOZJxaiUirwwlUTEePvhvsWxCM53HvwnYiIiIiIiIxkl23NNm3aNOD56tWrERQUhMOHD2Pu3LkQRRGvvvoq/vjHP+LKK68EAHzwwQcIDg7G2rVr8Zvf/AbNzc1499138dFHH2HRokUAgDVr1iAyMhJbtmzB0qVLzzhvd3c3uru75ectLS0AAI1GA41GY64/LhHZgb5rwGi+FoiiiJ5e3ZC3VzspIAiCGSOi4frheLn8eMn4IJv9PD++dAy251SjoV2DTSeqsOF4ORaPt52Kk+FcD+rauuUZP+NCPOHnqrTZv3d7dVlqMD47VAoA+PJwKVYkj+yzcqKiBU9/d2YFzJhAd8yI98fMOD9cEOMLL1eV/LXmjrH48/pTAIDHvjyO7+6bgeDztMtKz+1P1KVF+fCzYEEzY32w+XezAZj233L+fEBEfXg9ICJDvCYQObahfm8Loija/TTdvLw8JCQkIDMzE8nJySgoKEB8fDyOHDmCyZMny9utXLkSPj4++OCDD7Bt2zYsXLgQDQ0N8PXtH7ybmpqKyy+/HE8//fQZ53nqqafO+vratWvh5mYbA5OJiKyhTQO8eUKJys6hJ1tCXEXcO14Lb7UZA6NheT1LifxW6T18MrUXITb8T9uhWgEf5UlVBd4qEU9O0sLVDm85+blWwMf6P8eicB0ujRp6gpOGRicCfzuqRH23AAEinpqihY/z8I4hisC/TiqQ2yJVwqT46jDJX0SCt3jea5goAu/mKJDZKO2X6K3DPeN0UJzjUvlZvgLpNdK2947TYqyP3f+YTkRERERENOp0dHTghhtuQHNzM7y8vM65nR0uYwwkiiIefvhhzJ49G8nJyQCAqiqpZUVwcPCAbYODg1FcXCxvo1arByRm+rbp2/+XnnzySTz88MPy85aWFkRGRmLBggXw9zf9gFkish8ajQabN2/G4sWLoVKpBt/Bwfzlu5Oo7BzezK6qTgHr6wLw0W1pcOIAdKurae1Gwf6dAIC4AHfcfvUsK0d0fstEEcUfHcGu3Ho0awRkCDF4evl4a4cFYHjXg58+zwAg/dxx+8XTMS3G97zb08jku+ThzR0FECGg1X8cbpg7vJkiu3LrkLv/CAAgys8Vnz4wC2qnoV23ZszvwaX/2oea1m6cblag2icJd8yKOeu2r722F0A7nBQCfnP1Yrip7f5H9VFvtP98QET9eD0gIkO8JhA5tr6OW4Ox+9/47r//fmRkZGDPnj1nfO2X7XJEURy0hc75tnF2doaz85m3WqpUKl5IiQjA6Lwe5Ne24bNDUjssF5UCk4YwwPp0dRsa2ntwqLgJr24vwJPLxpk5ShrMtpxy9NXSrpgYahef42evmIglr+xCp0aLtT+X4copkUiL8bNqTKIooqa1G6VtwOnaTjg5nbuUWRSBvfn1AABPFydcEBfARKWZXDMtCm/uKAAAfHu8EvddlDDktopanYgXf8qVnz92cRLcXYdeehPso8Ir107Cr949AFEEXtqci9kJQWfMNalt7UZBnTQ/MSXCG97urkM+B9m+0fjzARGdHa8HRGSI1wQixzTU72u7Ts488MADWL9+PXbt2oWIiAj59ZCQEABSdUxoaKj8ek1NjVxNExISgp6eHjQ2Ng6onqmpqcHMmTMt9CcgIrJ/L27KgVY/Efve+WPw4MKEQfc5UtKIVf/Zh16diLd3FiAt2g+LxwcPuh+Zz8as/qrRZcmh59nSdkT6ueH3S8fime+lOSBPrMvEDw/OhrOTZYaot3ZpcLq6FTlVbcipakF2VStOV7eisUMDwAn/zNw/5GPNTQhkYsaMov3dMS3GFweLGpFb04bM8mZMjPAZ0r5fHy1HdlUrACA1whsrUob//TFrTAB+PScOb+8qgEYr4sFPj+L7B2YPqIw5WNQgP74g1rpJRiIiIiIiIjI/u1wFEEUR999/P9atW4dt27YhNnZga4rY2FiEhIRg8+bN8ms9PT3YuXOnnHiZOnUqVCrVgG0qKyuRlZXF5AwR0RAdLm7AphPSon6gpzPunDO0VkFTonzxh+X91TKPfH4MJfUdZomRBlff1o39BVIFR4y/G8aFelo5oqG7dWYMUiOkCoS8mja8tSPfLOdp7tDg22Pl+PumbNz+/kHMemEbUp76CVe9tQ9/+DoTH+wrxoHCBn1iZviWJoeYOGL6pSun9N/Is+5I+ZD26dJo8dJPOfLzJ5aNG3LFzS89smQsksOlXsMFte145vtTA77+c2F/cubCWLbLJSIiIiIicnR2WTlz3333Ye3atfj222/h6ekpz4jx9vaGq6srBEHA7373Ozz33HNISEhAQkICnnvuObi5ueGGG26Qt73jjjvwyCOPwN/fH35+fvj973+PlJQULFq0yJp/PCIiuyCKIp7fkC0/f2hR4rDmI9w2KwaHihuwIbMKLV29uHftYXx590y4qCxT9WAvRFHEvoJ6RPq6IdLPzSzn+OlkNfTFT1iWEjrixWdrUCoEPH/lRFz65h5odSL+vT0fl0wMxZgg0yWYujRarPzXHhQNIYEY5OmMhCAPiG21iImOgkIx+H0wicGeuHSifVQr2bMVE0Px1PoT6O7V4dtj5fjD8nGDzo1ZvbcIlc1dAICFSUGYET/ypInaSYHXrpuMS17fg06NFp/8XIJ5iYG4WJ+Y60uQCgIwlbOHiIiIiIiIHJ5dJmfeeustAMD8+fMHvL569WrceuutAIDHHnsMnZ2duPfee9HY2Ijp06fjp59+gqdn/2LNK6+8AicnJ6xatQqdnZ1YuHAh3n//fSiVXBgkIhrMTyercai4EQAQH+iOVWkRg+wxkCAI+PtVE3GqshWFde3IKm/BM9+fxLNXpJgjXLskiiL+9E0WPj5QAheVAu/fdgEujDP9HfUbMivlx8vtpKWZofFhXvj13Di8tSMfPVodnlyXic9+PQMKhWmSTB8fKDkjMePp7ITEEE8kBnsiKcQTY0M8MTbYE77uamg0GmzYsAHLl49n/2gb4uWiwpIJIfjueAUaOzTYnlODpRPOXbHU2N6Df+/IAwAoBODxZUlGxxAf6IG/XDoeT6zLBAA8sS4DkyJ94KJSIKdaap02PtQLXi783BARERERETk6u0zOiH0Ti89DEAQ89dRTeOqpp865jYuLC9544w288cYbJoyOiMjx9Wp1+Pum/qqZxy9OGtG8DE8XFf594xRc/q+96O7V4eMDJZgW44fLJ4ebMly79e8d+fj4QAkAoEujwx3vH8THd12ISZE+JjtHU0cP9umH0kf4usptl+zNbxcmYGNmJYrqO3CwqBGfHCzBjdOjjT5ue3cv/r09T37+2nWTkBbjhzBvF7uqMCLJlVPC8d3xCgDAuiNl503OvLk9D61dvQCAa6ZGIjHYNNVY106LxM7TtdiYVYWmDg0e/vwYbpkZg74fb6ezpRkREREREdGoYJczZ4iIyLo+O1SKgtp2AMC0GF8sHh884mONC/XCM5cny8+fXJeJXP0d5KPZuiNlePHHnAGvtfdocfO7B3CyosVk59l8shq9+p5my5JD7Dbh4KJS4jmDqqsXNmSjprXL6OOu3luI+vYeAMClqWFYOSkc4T6udvv3NNrNGROAQE9nAMC27Bo06t/bXypt6MCH+4oAAC4qBR5anGiyGARBwPNXpiDU2wUAkJ5fj6fWn5C/fkGsn8nORURERERERLaLyRkiIhqW9u5evLI5V37+5PKRD8jusyotUm6L1qnR4u41h9He3WvUMe3Zntw6PPZlhvz8oUWJmKFvZ9bS1Yub3j2AvJo2k5xrY1aV/HhZiv21NDM0c0wArpkqfY5au3vxgsFMpJFo7tDg7V0FAKTZNg8tSjA6RrIuJ6UCl08KAwBotCK+y6g463Yv/pgDjVZKWt4xOxYh+kSKqfi4qfHyqknou3T2zbUBmJwhIiIiIiIaLZicISKiYfnv7gLUtXUDAJanhGBKlGkGV/91ZTKSQqS2Qfm17XhyXeaQ2lg6mlOVLbh7zWG5muVXF0bhwYVj8L9b0jAlygcAUN/egxv/tx8lQxhQfz4tXRrszq0FAIR6u2BShI9Rx7MFTyxLgrerNK9j3dFyHNAPWR+Jt3fly22trp4SgbhAD5PESNZ11dT++VhfHS474+uZZc1Yr2995ueuxm/mxZsljhnx/rjnF8dODPaAn7vaLOcjIiIiIiIi28LkDBERDVltazfe0VcSOCkEPLrU+AHZfVxUSrz1q6nwcJbGoa0/XoE1+nkro0VFUyduW30QbfqqoUXjgvH0ZckQBAHuzk5YfdsFmBAmzYSpbunGDf/bj8rmzhGfb+upark64OLkECgU9t+qy9/DGY8uHSs///O3J6DR6oZ9nNrWbqzeWwQAUCsVeJBVMw4jKcRL/j46XtaMvJr+NoqiKOK5Dafk5w9eNAZeLiqzxfLQ4kSkRnjLz1k1Q0RERERENHowOUNEREP22tbT6OjRAgBumB6F2AB3kx4/NsAdL149UX7+zHcnkVHWZNJz2KrmTg1uXf0zqlqk9kaTIn3wxvWToTRImHi7qvDRHdORECRVcJQ1duLG/x5AbWv3iM65IdOgpVmyfbc0M3T9BVGYqF/wzqluxQfpRcM+xr+256FT0/9ZD/dxNWWIZGVXTjGonjlSLj/ecboW+/TVVtH+brhherRZ41ApFXjtuskI93GFi0qBG818PiIiIiIiIrIdTM4QEdGQ5Ne24ZOfSwEA7molHlxonkqCZSmhuH1WLACgR6vDPWuOoKnj7EO7HUV3rxZ3f3QYp6ulOTLR/m5495Y0uKqVZ2zr567GmjunI9rfDQBQUNeOm949MOy/o7buXuw8LbU0C/R0xtRo07SnswVKhYBnVibL8zxe3ZKL6pau8+9koLypE2v1VVuuKiXuWzDGHGGSFa2cFAYnfeLz6yPl0OpEaHXigDlFjy4dC7WT+X9Ujglwx9ZH5iHjL0sxLtTL7OcjIiIiIiIi28DkDBERDcmLm3Kg1c9B+c28eAR4OJvtXE8sS5Lnq5Q3deKRz4877PwZnU7E419myHfr+7mr8cFtF8D/PH+/wV4u+PjO6QjTDynPrmrFLe/9jNYuzZDPuz27Bj29UruviyeEDKjQcQSpkT64bloUACkR9ewPpwbZo98bW3PRo2+FduusGAR6mu+zTtYR4OGM+WMDAQBVLV3Yl1+PdUfKkFMttThLjfDGihTLVZO5qJQWSQQRERERERGR7eBvgURENKjDxQ3YdEJqgRXo6Yw758Sa9XxqJwXevGEKfN2kWQ9bs2uwLbvGrOe0lhd/ysE3x6Th4y4qBd69JQ0xQ2gXF+Hrho/vulBOkh0va8Yd7x9CR0/vWbdv6dLgcHEDPj5QjL98m4V//NhfIbAsJcQEfxLb89jSsfJnaP3xCqTn1w26T2FdO77QD4n3dHHCb+bGmTVGsh7D1mYfHyjGy5tPy8+fXD4OguBYCUsiIiIiIiKyLU7WDoCIiGybNCC7fyH/4cWJcFOb/5+PMB9XPHtFCu79+AgA4J8/ncaCsUEOMbS+z0f7i/HWjnwAgEIAXr9uMiZHDb29WGyAOz6+czque2cfGjs0+LmoAb/56DAevzgJeTVtyK5qxenqVuRUtaK8qfOsx/B3V+OCGMccQu7rrsbjFyfhiXWZAIA/f3sCG387Byrlue9NeWXzablC7Ndz4uDjprZIrGR5C8cFwcvFCS1dvdiY1T9/aWFSEC6M87diZERERERERDQasHKGiIjO66eT1Thc3AgAGBPkgWumRgyyh+ksSw6RB7ufqmzBD5mVFju3uW0+WY2/fJslP3/qsglYMmH4FSxjQzzx4e3T4eksJcx259bhkjf24HefHcN/duZjW3bNORMzYd4ueOqyCXA6T7LC3q1Ki8SkSB8AQF5NG97bU3jObU9VtmD9camKyd9djdtmm7dCjKzL2UmJS1PDBrymEIDHlyVZKSIiIiIiIiIaTRx3NYaIiIym0erw9439VTOPX5xk0YV8QRDwyJKx8vNXNp9Gr34WiD3bfLIaD3xyBPoCDfxmXhxunhEz4uOlRHhj9W3T4KpSnvXrns5OmBrtixumR+GvKyfg019fiKP/txjpTy48Y3Ha0SgUAv52eTL6OlS9tjUXlc1nT1a99FN/W6t75sfDw5kFxo7uql8km6+ZGonEYE8rRUNERERERESjCVcdiIhsWHOHBm7OyvO2YTKnzw6WoqCuHQBwQYwfFo0LsngMcxMCcEGsH34ubEBBXTvWHS3HqrRIi8dhClqdiFc2n8ab2/+/vfuOrqJa+zj+O+kJaSSBFCAJvfdepFwpIkWkSQfBq4ANVFCvDb3Xcq0oKChXqiAiSJMiHWnSW5DeQguhJwTS5/0j5EDeQAjhlJTvZ62z1pmZPXs/M+dkE+bJ3vuIeV/H6iF6vc3D/6V+nXA/TRtUT2NWHZGvh7PKB3mpQpCXygd5K8THrUCvn1GlmI/61A/TtL9O6kZiiv6zaL++7VUrQ5mdkVe0Yv95SVKQt5v6NAizR6iwsZolfFWmqKeORF+Xm7ODhrcqZ++QAAAAAAAFBMkZAMilZm8/rdd+3a0qxbw1458N5O3mbLO245NSNH1zpEavuHOB7Ap2ecBvMpk0ok15dRu/SZL09YrDeqJGiFyd7j5KJLe6Epeol3/ZpT8PXTDva1ctWJ91q2axdXTqhPtpysB6Fqkrv3mtdXkt3ntOl+IStWjPOfWse1FNygaYj3++7KD5/UuPlpXbPUYhIX8xmUwa26umflx3XE/WLKYgHzd7hwQAAAAAKCCY1gwAcqHklFR9cethccSZGI34dbcMw7BJuzO3RKrF52v079//Vmx8siSpXdXgB1qo3tLqhvupWbkikqQzV29q5pZTdoslJyLOXFOHsevNiRlHB5PeeryixvasmeeSTHmVj4ez3rhjLZF350coITlFkrTx6EVtOHJJkhTm76FudWy3rhLsr0KQtz7rVl2NygTcvzAAAAAAABZCcgYAcqFVB6J17lq8efuPfef1YxYLmT+s1FRDC3afVauv/tQbv+3N0HaH6iH6uEtVq7WdXa/dsfbMmFVHdCMx2Y7RZN+v206py7iNOn0lbZ0T/0Iu+mlQff2zaakCPdWYPXSpVVx1wtKSjMcuxunH9cdlGIY+/+P2qJlhLcvabRpBAAAAAABQcDCtGQDkQj9tjsy075MlB1SjhK/qhPtZrB3DMLTqQLQ+++OgDkTFZjj2aIWierV1eVUK8bZYew+janEfta0SpCURUbp4PUFTNp7UkOal7R3WPSUkp+iDhX9r+h2fZY0SvhrXp5aCfdztGFnB5eBg0gdPVFH7MeuUakhjVh6Rl6uTdkRelSSVC/RUx+rF7BskAAAAAAAoEPjTUADIZSIv3TBPf1XM113PNSslSUpONfTCjJ26dD3BIu1sOnpJXcZt1KAp2zIkZuqX9NOcIQ3144C6uSYxk+6VVuWUPthk/NqjiolPsm9A93Du2k099f1fGRIzveuH6pfnGpCYsbNKId7q3yhcknQzKUXvzN9nPvZKq/JytND6PwAAAAAAAFkhOQMAucz0LSfN73vVD9WI1uVVv2TaaJmomHgN+2WXUlJzvv7M/nMx6vvjZvWc8Jd5xIAkVSvuo2mD6mnmsw1UO8xyo3MsqWygl56smTay4drNJP1vnfWmesupTUcvqcOY9dp16qokycXJQZ92raYPn6zK+jK5xPBW5RTg6ZphX7XiPmpTOdBOEQEAAAAAgIKG5AwA5CIJySn6ddtpSZKzo0lP1S0hJ0cHjelVU0W80h4mrzt8Ud+sPJyj+mdtPaUnvt2gdYcvmveVLeqp8X1qa/7zjfVI2SK5fh2UYY+Wk9Ot0Q0/rjtmsZFED+tGYrK+XX1EfX7crIvXEyWljXyaM7iRutcpYefocCdvN2e91a5Chn2vtS6f67/7AAAAAAAg/2DNGQAPJTXV0JRNJ3To/PVslTeZpKZli6hN5UAehN7Fkr1RuhyX9mD/sSrB5r/uL+rlpm961FTv//2lVEP6ZtVh1Q4rrKblimSr3oTkFI1a8Ld+3nJ7mq0Sfu4a3rKcnqhRLE9N5RTq76Gn6pbQ9M2RiktM0fi1R/VWu0p2i+fM1ZuauumEft4cqZj4ZPP+R8oG6JseNVW4kIvdYsO9dapRTAt3n9OqA9FqWTFQj5QNsHdIAAAAAACgACE5A+ChTN98Uu8v/PuBzpmxOVKvtCqnlx4ta6Wo8q7pm29Pada7fmiGYw1L++u1NuX16dKDMgxp2C+7tOilJvddw+Ts1ZsaMn2Hdt+aZkuS+jYI09vtK+bZabZe/EdZzd5+WgnJqZq66aQGNSmlIB83m7VvGIZ2RF7RxPUntHRfVKZp5p5vUZr1S3I5k8mk7/vW1oFzsSof5EWyGAAAAAAA2BTTmgHIsdj4JI1ekbPptb5cfkj/W3fMwhHlbQeiYrT1xBVJaVONpa8zc6fBTUvr0QpFJUmX4xL1/PQdSkpJvWedG49eVIcx682JGVcnB33erbr+3alKnk3MSFKQj5v6NQyTJCUkp2rMqpx9Dx9UYnKq5u08o07fblCXcZu0aO85c2LGxdFBXWoV16KXmmhEmwokZvIAZ0cHVS3uIxcnfh0CAAAAAAC2xcgZADk24c9junRrCq42lQM1rGW5+56z6kC0PvvjoCTpP4v2y93FUb3rh1k1zrxi+l+3pxzrXT/0rn/J7+Bg0hfdq6vdN+t15upN7Yi8qk+WHNA77TNO62UYhiasO6ZPlhxQ+qCO4oXdNb5PbVUp5mPV67CVIc3LaMatqc1+2XpKzzUtrVB/D6u0dTkuUTM2n9TUTScVHZtxjZsATxf1aRCm3vXDzOsCAQAAAAAAAFkhOQMgR87HxGvCuuOS0hau/9fjFRXmX+i+51UM9lZKqqEvlx+SJL09L0Luzo7qXKu4VePN7eISkjV35xlJSrsfte99P3w9XDSuTy11HbdJiSmp+nH9cdUJK6y2VYMlSdcTkvX67D1atPec+Zym5Yro66dq5Kv1T/wKuWjQI6X0zcrDSk41NHrlIX3ZvYZF2zAMQ2NWHdG3q48oITnjCKVKwd4a2KSkOlQPztOjkAAAAAAAAGB7zOMBIEdGrzikm0kpkqTe9cOylZhJ9+I/yui5ZqUkSYYhvfbrbi25I5FQEM3bdUbXE9IWk3+iRoi83ZyzLF+tuK/eaV/RvD1y9h4dvxinoxeuq9O3GzIkZl78RxlNGlA3XyVm0j3zSEn5uKfdq7k7z+jw+ViL1r/m4AV9ufyQOTFjMqWNEvvl2QZa9FITda1dnMQMAAAAAAAAHhjJGQAP7PD5WP2y9ZQkycvVSS/+o8wDnW8ymfTGYxXMa4akGtJLM3dq9YFoi8eaFxiGoZ8yTGmWvWne+jQIU8fqIZKk2IRkDZy8VU+M3aAj0dclpX02E/rV0aut8+/C9N5uzhrcrLSktERf+ogsS0hKSdW/F/1t3u5VP1RrX2uh7/vWUf1S/iwgDwAAAAAAgBwjOQPggf136UHzOiaDm5eWv+eDr7NhMpk0qkNldb01fVdSiqHBP23XxqMXLRlqnrDz1FXtPxcjSape3EdVi2dvTRiTyaSPO1dV6SJpo5aOX4wzj74pH+ilBS82UatKgdYJOhfp3+j2Wi9LIqK09/Q1i9Q7ddNJHbsQJ0mqHVZYH3aqYrU1bQAAAAAAAFCwkJwB8EA2H7ukFfvPS5KCvN00sHHJHNfl4GDSf7tUU7tqaWulJCSn6pkp27T95BWLxJpX/PTXSfP73g2yN2omXSFXJ43rU1vuzren1upQPURzn2+kkgHZn2ouL/NwcdILLW6P3vr0jwMyDOOh6rwcl6ivV9wehfNu+0qMlAEAAAAAAIDFkJwBkG2GYejjJQfM26+0Kid3l4dbb8PRwaTRT9VQy4pFJUk3ElM0YNIWRZyxzOiH3O5KXKJ+35O2Poy3m5M6VAt54DrKBXpp4oC6almxqD7uXFXf9KghDxcnS4eaq/WoV0LFfN0lSesOX9T0zZH3OSNrXy0/pJj4tFFIXWoVV/USvg8bIgAAAAAAAGBGcgZAti2JiNKuU1clSeUCPdXl1pRkD8vZ0UFje9VSkzIBkqTY+GT1/XGzDll4cffcaM6O00q8tdh8l9rFc5zsaljaX//rX1c964UWyBEerk6Oer9jZfP2fxb9rSPROfv+HIyK1fTNaaOZPFwcNfKx8haJEQAAAAAAAEhHcgZAtiQmp+rTpbdHzbzZtqJFF5l3c3bUD/1qq05YYUnSlRtJ6vO/zTpxMc5ibeQ2qalGhhEeves/2JRmyKhlpUD1aRAqSYpPStWLP+9SQnLKA9VhGIb+s+hv85pKQ5uXVqC3m6VDBQAAAAAAQAFHcgZAtvy8JVInLt2QJDUo5afm5YtYvA0PFydNfLquqhbzkSRFxyao/6QtD/yAPa/YePSSjt9KPjUs5a8yRT3tHFHe99bjlVT21n3cfy5Gny49+EDnr9wfrXWHL0qSivm665lHSlk8RgAAAAAAAIDkDID7io1P0tcrD5u332xb0WpTZ3m7OWvqwHoqH+glSTp56Ybm7zprlbbs7ae/Tprf92nAqBlLcHdx1Dc9a8rFMe2ftx/XH9efhy5k69zE5FR9uHi/eftfj1eUm/PDrakEAAAAAAAA3A3JGQD39cOfx3Q5LlGS1KF6iNUXRy9cyEUfda5q3p7w5zGlps8zlU+cj4nX8v3nJUlFvFzVunKgnSPKPyoGe+uNthXM26/M2q2L1xPue97UTSfMI5nqhfvp8apBVosRAAAAAAAABRvJGQBZOh8TrwnrjkmSnB1NGtHaNouj1w4rbF5/5nD0da05FG2Tdm1l5pZTSrmVcHqqTgk5O9IdW9LTjcPVrFza1HsXryfo9dl7ZBj3TvBdup5gHh1mMknvdqhktdFhAAAAAAAAAE8DAWTpq+WHFJ+UKilt6q1Qfw+btf1s09vrfYxfe8xm7Vpbckqqft4SKUlyMEk964faOaL8x2Qy6fNu1eVfyEWStPJAtKbdMY3c//fl8kOKjU+WJHWrXVxVbq17BAAAAAAAAFgDyRkA93T4fKxmbTslSfJyddKL/yhr0/ZbVgxUqSKFJElbjl/WrlNXbdq+taw8EK2omHhJ0j8qFFUxX3c7R5Q/FfFy1efdqpu3P1y0XwejYjOV238uxpwsK+TiqNfa2GZ0GAAAAAAAAAoukjMA7um/Sw8ofamXIS1Ky+/WKARbcXAw6dlHbo+e+eHPozZt31qmb440v+/dIMyOkeR/LSoU1YBG4ZKkhORUvfTzTsUnpZiPG4ah/yz62/w9f/4fZVTUy80OkQIAAAAAAKAgITkD4K7+OnZJK/anrfMS5O2mgY1L2iWOTjWLKcDTVZK0JCJKJ24t2J5X/XXskv48dEGSVLywu5qWLWLniPK/N9pWUPlAL0nSwfOx+mTJAfOx5X+f14YjlyRJJfzc7fY9BwAAAAAAQMFCcgZAJoZh6OM7HmC/0rqc3Jwd7RKLm7Ojnm4cfisu6X/r897aM8kpqfp9z1l1/m6Devzwl3l/r/qhcnRg0Xlrc3N21Dc9a8rFKe2fvMkbT2j1gWglJKfow8X7zeX+1bai3b7nAAAAAAAAKFic7B0A8q5L1xP07oJ9On35RrbKm0wmta4cqMFNS8uBB9K52sr90dp9a32X8oFe6lKruF3j6VM/TN+uPqIbiSn6ddtpDW9ZTv63RtPkZldvJGrm1lOauvGEzl6Lz3CsXKCnetdjSjNbKR/kpbcer6j3FuyTJI2YvVtdahXXyUtp/Vf9kn56rEqQPUMEAAAAAABAAUJyBjlyIzFZAydv1e7T1x7ovF2nrurU5Zv66MkqMplI0ORGhmFo7Ooj5u1XW5ez++gOHw9n9agbqokbjishOVVTNp3UK63K2TWmrByJjtWkDSc0Z8dpxSelZjhWPtBLA5uE64kaxRilYWP9GoZp7aELWnUgWhevJ+r7P9NGYZlM0rsdKtEnAQAAAAAAwGZIzuCBJaek6sUZOzMkZrLz7D59we2ft0TK3dlR77SvyMPQXGjTsUvadWvUTIUgL7WsGGjfgG4Z9EhJTdl0QimphqZtOqEhzUrL3SX3JDdSUw3tv2LS7Cnbte7WGibpTCbp0QpFNbBxSTUs7c/33k5MJpM+7VpNj41ep4vXE8z7n6pTQpVDfOwYGQAAAAAAAAoakjN4IIZh6N0F+7TyQNpC8V6uTvp1SENVCPK+77kLdp/VyzN3yjCkiRuOq5Cro15tXd7aIeMBfbf6qPn9kOa5Zwq6Yr7u6lAtWPN2ndWVG0n6dfsp9WsYbu+wdCMxWb/tOKOJ64/r2EVHSbcTM4VcHNWtTgn1bxSukgGF7BckzAI8XfVF9+rqP3GLJMnT1Yl+CAAAAAAAADZHcgYP5Ls1RzVjc6QkydnRpO/71s5WYkaSOlYPUXxiikbO2SNJGrPqiNxdHDW0eRmrxYsHs+vUVa0/clGSFObvoXZVg+0cUUbPNi2tebvOSpImrDumXvVC5eToYJdYzly9qambTujnzZGKiU/OcKx4YXcNaBSu7nVLyNvN2S7x4d6alSuifz9RWTO3ntKwluVUxCv3r18EAAAAAACA/IXkDLJt7s7T+uyPg+btz7pWV6MyAQ9UR/e6JXQzKcW8KPenSw/Kw9lRAxqXtGisyJlv71hrZkiz0nZLfNxLpRBvPVI2QOsOX9Spyze1dF+U2lcLsVn7hmFoR+QVTVx/Qkv3RSklfa6+W8p4G3qlXU21qRpi93V6kLW+DcPVNxeMvAIAAAAAAEDBRHIG2bLhyEWNnL3HvD3ysfLqVLNYjurq3yhcNxJT9N+lByRJoxb+LQ8XJ3WvW8IiseYXV+IS5e3ubLOH/AejYrX87/OSpCBvNz1ZK2efr7U917S01h1OG93z/dpjalc12OpruCQmp2rx3nOatOF4hrWWJMnF0UEda4Sob/3iOrFzvVpVKkpiBgAAAAAAAECWSM7gvg5ExWjwtO1KSkkbJdCnQaiGNCv9UHUOaV5aNxKTNWZV2kiN13/bIzcXR3WsbrtRELnZ6BWHNHrFYRXzddfLLcuqc81iVh/FMm7N7VEz/2xaSq5OjlZtL6cal/FX5RBv7Tsbo71nrmnTsUtqVPrBRnBl19Ubifrpr5OauumkomMTMhwL8HRRnwZh6l0/TEW8XJWUlKQTO60SBgAAAAAAAIB8huQMsnTu2k0NmLhVsQlpa2q0rFhUozpUtshIhVdaldONxBT9uP64DEMa/ssuuTk5qHXloIeuOy9bGhGl0SsOS0pb12Tk7D0av/aoXm1VXm2rBMnBCqMyTl6K04LdaWu5FPZwVs96uXcUk8lk0rNNS+nlmbskST/8ecwqyZlrN5PU6dsNOnHpRob9lUO8NbBxSbWvHpxrE1gAAAAAAAAAcrfctaAEcpWY+CQNmLhVUTHxkqTqJXz1Tc+aFhvBYTKZ9Ha7iupZL1SSlJJq6IUZO7Xu8AWL1J8XnbwUpxG/7s60/9iFOD0/Y4c6jF2v1QejZRjGXc7OufFrjyl9+ZSBjUvKwyV3523bVQ1WMV93SdKagxd0MCrW4m38vCXSnJhxMEmPVQ7SL8820O8vNlGX2sVJzAAAAAAAAADIMZIzuKvE5FQ9N3W7Dp5Pe+gd5u+hH/vXsfhDe5PJpA87VdGTt9avSUxJ1T+nbtOW45ct2k5eEJ+UoiE/7TCPUmpfLViznmuouuGFzWX2nY3R05O2qvv3myx2j6KuxWvO9tOSJE9XJ/VrFG6Req3JydFBzzxS0rz9w5/HLFp/Ukqqpmw8IUkymaSFLzbR+L61Vb+Uv9XXtwEAAAAAAACQ/5GcQSaGYWjk7N3adOySJMmvkIsmP11PAZ6uVmnPwcGkz7pW02O3pjOLT0rVwMlbtevUVau0l1uNWrBPf5+LkSSVKlJIn3Sppnol/TTruYaa/HRdVQ7xNpfdeuKKun+/SQMmbVHEmWv3qjJb/rfumBJTUiVJfRuGycfd+aHqs5XudUqYY52/64zOXbtpsbqXRETp3LW0EWOPVghU5RAfi9UNAAAAAAAAACRnkMlnfxzUvF1p64+4Ojnof/3rqGRAIau26eTooG961lTz8kUkSdcTkvXMlK26dD3hPmfmD7O3n9bMrackSW7ODhrXu7Y8XdNGKZlMJjUvX1QLX2ii73rXUukitz+LNQcvqP2Y9Xp++g6dunzjrnVn5UpcoqZvjpSU9lkPbFzyPmfkHoVcndS3QZgkKTnV0KQNJyxSr2EY+nH9cfP2oCZ5554AAAAAAAAAyBtIziCDX7ed0ndrjkpKW2djTM+aqhVa+D5nWYaLk4PG96mt+iX9JEkXryfq3QX7bNK2PR2IitHb8/aatz96sqrKB3llKufgYNLjVYP1x7Cm+qxrNfOaK5K0aO85dRy7XttPPthUZ5M2HNfNpBRJUo+6JVTEyzqjo6ylf6NwuTildWMzNkcqJj7poevcEXlFu2+N2qoU7K0Gpfweuk4AAAAAAAAAuBPJGZhtP3lFb82NMG+P6lhZrW9NNWYrbs6OGtOrpnw90qarWrTnnH7fc9amMdjS9YRkDf1ph+KT0qYV61mvhDrXKp7lOU6ODupWp4RWvdZM73esbJ5u7sqNJPWcsFmL957LVtux8UmafGtdFScHk55tVjrnF2InRbxc1eXW/bqekKwZt0YBPYw7R80MbFKSNWYAAAAAAAAAWBzJmVzm8PlYvTV3r8atOao9p68qJdWwSbvnrt3Uc9O2m9ce6dcwTP0ahtuk7f+vqJebPniiinn7nXkRuhCb/6Y3MwxDr8/Zo2MX4yRJlUO89V6Hytk+39XJUf0bhWvVa83UpEyAJCkxOVXPz9ih/607JsPI+rszfXOkYuKTJUlP1iyWYSROXvLPR0oqPX8y4c9junYz56NnTl2+oaURUZKkAE9XdagebIkQAQAAAAAAACADkjO5yJmrN9Vzwl+avjlS/116QB3HblCtfy/Xc9O2aeqmEzp64fp9H7jnRHxSip6dul0Xb63v0rCUv95pX8ni7TyIDtWC9XjVtFE7V24k6a25e61y7fY0ddNJLdqTNsrFy81J3/WuJTdnxweux9vNWRMH1DWPIDEM6T+L9mvUgn33TO7FJ6Xof+vSRoiYTNLg5nlv1Ey6UkU81a5qWhLlUlyivlx2MMd1Td10Qum3rF/DMLk6PfjnAQAAAAAAAAD3Q3Iml7iZmKJnp27TxeuJGfZfu5mkP/ad17vz9+nRL9aq4cer9MqsXZqz/bSirsU/dLuGYWjk7D3ae+aaJKmEn7u+7V1Lzo72/WqYTCb9+4kq8i/kIkla9vd5zd+Vf6Y32xl5Rf9Z9Ld5+/Nu1RXmXyjH9bk4OejzbtU0rGVZ874pm07quWnbdSMxOVP5X7edMifjHq8SrNJFPHPcdm7wr8cryv1WYmvaXycVcev7/CCuJyRr5pZTktLuZ+/6oRaNEQAAAAAAAADSkZzJBQzD0IjZu7XvbIwkKczfQ+93rKzHKgfJx905Q9momHj9tuOMXv11txp8vFKPfrFGv+04neNRJePXHtOC3WlJDw8XR03oV0d+txIi9ubv6aoPn7w9vdm78yN0PubhE1L2diUuUS/M2KmklLTP7NmmpdTGAmv7mEwmDWtZTp93qy4nh7R5vlbsP6+eP/yVYVq4pJRUjV97zLw9JA+PmkkX4uuulx5NS0ylGmnfldQHnBLw122nFJuQlsjqXLOY/G+t5QMAAAAAAAAAlkZyJhf4bs1R/X5reitPVydN6FdH/RuFa3zf2trxTistfKGJ3mhbQY+UDZCbc8aP7OiFOL0ya7eGTt+hK3GJd6v+nlbuP69P/zhg3v6yew1VCPJ++AuyoMeqBOuJGiGSpJj4ZL35W96e3iw11dDwWbt05upNSVLd8MIa0aa8RdvoWru4Jj9dT16uTpKk3aevqfO4DTp64bokaf6us+b2W5QvoirFfCzavr0MalJSpYukjT7aEXlVs7efzva5KamGJm04Yd5+unFJS4cHAAAAAAAAAGYkZ+xs2b4offZH2hoZJpM0+qkaKhfoZT7u6GBS1eI+GtystKYNqq/d77XWzGcb6KV/lFHNUF9zuSURUWoz+k+tPXQhW+0eiY7VyzN3KT3P8UqrcnqsysOP3rCG9ztWVhGvtFEMqw5E69cHeOie24xbe1RrDqZ9Rv6FXDSmp3WmkGtSNkC/DmmoYB83SdKpyzfV+buN+uvYJX235oi53PMtyli8bXtxcXLQB0/cHmn1ydIDunojewnLFfvPK/LyDUnSI2UDVD7I6z5nAAAAAAAAAEDOkZyxo4NRsRr+yy7z9muty6tlpcAsz3F1clSDUv56pXV5zR3aWN/1riVfj7Spz6JjE9R/4ha9Nz9CNxNT7lnHtRtJembKNl2/NYXT41WD9OI/cu9Del8PF338ZFXz9r8X/q2zt0Z+5CWrDpzXF8tuJ+K+7lFTQbeSJ9ZQIchb855vrErBaaOhrt1MUs8Jf+nYhThJUr2SfqoT7me19u2hcZkAta8WLEm6HJdoTnzez4/rj5vfD2zCqBkAAAAAAAAA1kVyxk6uxCXqn1O3Ke5WEqVD9RANzcHaH49XDdYfw5qqabki5n1TNp1U+zHrtPd05kXRk1NS9cLPO3TiUtoogYrB3vq8W3WZTKYcXolttKwUqC61ikuSYhOS9fqcPXlqerOlEVF6btp2pS+DMrxlOTUpG2D1dgO93TRrcEM1u/X9uPOW5adRM3d6u10lFXJxlCTN2BKpPaevZlk+4sw1bTl+WZJUukghNStbJMvyAAAAAAAAAPCwSM7YQVJKqp6fscM8jVKVYt76tEu1HCdIAr3dNOXpuvrgicpydUr7SI9eiNOT323Q2FWHlZySai770eIDWnf4oqS0abUm9KstDxenh7wi23i3QyUFeaeNNFl3+KJ+3nLKzhFlz/xdZ/T8jB1KSknLjLSrFqwXbJgY8XR10v/611GPuiXM+6oW81FTGySH7CHIx03DWpaTlJaMemdehFJS753Im3jHqJmnG5eUg0PuTlQCAAAAAAAAyPtIztjBh4v2a+PRS5KkAE9X/dC3jtxv/aV/TplMJvVrGK5FLz2iqrcWeE9ONfT5skN66oe/dPJSnGZtO6WJG9IeRDs5mDSuT20VL+zxcBdjQz7uzvqky+3pzT5c9LdO3Upw5Vaztp7SsF92mZMDnWsV09dP1bB5AsDZ0UEfd66qD5+sonZVg/XVUzVy/WiphzGgcbjKBXpKknafvqZftt49kRcdE6+Fe85Kknw9nM2jswAAAAAAAADAmkjO2NjMLZGavPGEJMnZ0aTv+9ZSiK+7xeovU9RTc4Y00gstyij9+f/2k1f0+Nfr9PbcCHO5D56oonol8956I83LF1XPemkjQOISUzRy9h6lZjEqwp6mbDyhkXP2mKcS61U/VJ93rS4nR/v82JlMJvWuH6Zve9dSmaKedonBVpwdHfTBE1XM25/+cUCX4xIzlZu66aR5RFOveqEPnSQFAAAAAAAAgOwgOWNDW09c1jvzbydIPuxUVbXDLJ8gcXFy0GttyuvXwQ0V6pc2MiYuMUWJt6Y369cwTL3qh1q8XVv51+MVVexWQmvTsUua9tdJO0eU2fi1R/Xegn3m7UFNSurDTlWYMsuGGpTyV6caIZKkqzeS9OnSAxmOxyelaPrmtO+Ok0PayDMAAAAAAAAAsAWSMzZy5upNDZ623fxX+k83Dlf3O9YAsYbaYX5a/PIjeqrO7XYalvLXO+0rWbVda/Nyc9anXauZtz9ZckAnLsbZMaLbDMPQV8sP6ZMltxMBL7Qoo7fbVczX04jlVv9qV1FermlrKs3ceko7Iq+Yj/2244yu3EiSJLWvFqwgHze7xAgAAAAAAACg4CE5YwPXE5L17NRtunRrWqUmZQL01uMVbdK2p6uT/tu1mn55toH+06mKJg6oK2c7TatlSY3LBKhvgzBJ0s2kFA2ftUuJyakWqz/izDUt3ntO52Pis32OYRj6ZMkBfb3ysHnfiDbl9Vqb8iRm7KSol5uGtypn3n53foRSUg0ZhmFef0mSBjYpaY/wAAAAAAAAABRQTvYOID9LTE7VzK2RGrPqiC7EJkiSwvw9NLZXTZuvO1K/lL/ql/K3aZvW9kbbClp76IIiL9/Qzsir+mTJAb3b4eFHBa0+GK1Bk7cqfSmb0kUKqXGZADUqHaCGpfzl4+Gc6ZzUVEOjFu7T1E23p1h7p30lDeKhv931aximWdtO6UBUrCLOxGjG5pMK9S+kI9HXJUl1wwurWnFf+wYJAAAAAAAAoEAhOWMFKamG5u48o9ErDun0lZvm/V6uTprQr458PVzsGF3+UcjVSWN71VTXcZuUmJKqiRuOq254YbWtGpzjOo9EX9dLM3aaEzOSdPRCnI5eiNPUTSflYJKqFPNRo9IBalzGX3XD/eTs6KA35uzRr9tPS5JMprT1hPLyuj75iZOjg/7dqYq6jd8kSfrsj4MqU9TTfJwEGgAAAAAAAABbIzljQYZhaGlElL5Yfsj8V/np2lYJ0og25VWqiOc9zkZOVCvuq3c6VNI78yIkSSNm71GFYG+VDCj0wHVdu5mkZ6duU2xCsiSpVqivTCaTdp26qpRb2ZpUQ9pz+pr2nL6m8WuPysXRQcULu+vYrTVvHEzS592qq3Ot4ha6QlhC3XA/dalVXHN2nFZMfLJ2RF6VJBUv7K5WlYLsGxwAAAAAAACAAofkjAUYhqG1hy7o8z8Oau+ZaxmONS1XRK+1Lse0SVbUp36otp24rPm7zup6QrKG/LRd855vLDdnx2zXkZJq6MWfd5qTLBWCvDRtUH0VcnVSbHySthy/rA1HLmnj0Ys6EBVrPi8xJdV8jpODSV/3qKl21XI+cgfW8+bjFbTs7yjFxieb9w1oFC5HB9YDAgAAAAAAAGBbJGcs4JlpO7XrfGKGfbXDCmtEm/JqkM/WecmNTCaTPnqyqvadjdGR6Os6EBWrd+dH6NOu1bNdxydL9uvPQxckSYU9nDWhXx0Vck378fByc9ajFQP1aMVASdLF6wnaePSSNh65qA1HL+rU5Ztyd3bU2F41zWWQ+wR4umpEm/J6d/4+SZKnq5OeqlvCzlEBAAAAAAAAKIhIzljAjsircnD1kCRVDPbWyDbl1bx8EZlM/EW+rRRyddK43rXUcewG3UxK0axtp1Un3E/d69z/4fuc7ac1Yd1xSWmjX77rXVsl/DzuWT7A01Udq4eoY/UQSdLZqzfl5uwov0KsJZTb9a4fppX7o7X20AW90qqcvNyc7R0SAAAAAAAAgAKI5IyFlAoopFdal9PjVYLlwDRJdlE20Esfd66qYb/skiS9My9CVUJ8VCnE+57n7Iy8ojfn7jVvv9exshqWfrDRTiG+7jmKF7bn6GDSxAF1dSMxmcQMAAAAAAAAALtxsHcA+cG77Sto2fCmal8thMSMnXWqWUy964dKkhKSU/X8jB2KjU+6a9moa/F6btp2JSanSpJ61w9V3wZhNosV9uHoYCIxAwAAAAAAAMCuSM5YwJM1QuTkyK3MLd5pX0lViqWNljl+MU6vz9kjwzAylIlPStFz07YpOjZBklSvpJ/e61DZ5rECAAAAAAAAAAoeMgrId9ycHTWud215u6XN2rd4b5QmbThhPm4Yht78ba92n74mSSrm665xvWvJxYkfBwAAAAAAAACA9fE0GvlSCT8PfdG9hnn7o8X7tf3kFUnSD38e09ydZyRJ7s6OmtCvjvw9Xe0RJgAAAAAAAACgACI5g3yrVaVAPdeslCQpOdXQCzN26Lcdp/XJ0gPmMl92r65KId72ChEAAAAAAAAAUACRnEG+NqJ1edUL95MknbsWr1dm7Vb68jMvP1pWbasG2zE6AAAAAAAAAEBBRHIG+ZqTo4PG9KqpAE+XDPsfqxyklx8ta6eoAAAAAAAAAAAFWZ5Mzvz555/q0KGDQkJCZDKZNG/evAzHDcPQqFGjFBISInd3dzVv3lz79u3LUCYhIUEvvviiAgICVKhQIXXs2FGnT5+24VXAVgK93fRNz5pyMKVtVwjy0hfdq8shfQcAAAAAAAAAADaUJ5MzcXFxql69usaOHXvX459++qm+/PJLjR07Vlu3blVQUJBatWql2NhYc5lhw4Zp7ty5mjlzptavX6/r16+rffv2SklJsdVlwIYalQ7QjwPq6vkWpTVtUH0VcnWyd0gAAAAAAAAAgAIqTz6hbtu2rdq2bXvXY4ZhaPTo0XrrrbfUuXNnSdKUKVMUGBioGTNm6LnnntO1a9f0448/atq0aWrZsqUk6aefflKJEiW0YsUKtWnTxmbXAttpUb6oWpQvau8wAAAAAAAAAAAFXJ5MzmTl+PHjioqKUuvWrc37XF1d1axZM23cuFHPPfectm/frqSkpAxlQkJCVKVKFW3cuPGeyZmEhAQlJCSYt2NiYiRJSUlJSkpKstIVAcgL0vsA+gIA9AcA0tEfAEhHfwDgTvQJQP6W3Z/tfJeciYqKkiQFBgZm2B8YGKiTJ0+ay7i4uKhw4cKZyqSffzcff/yx3n///Uz7V69eLQ8Pj4cNHUA+sHz5cnuHACCXoD8AkI7+AEA6+gMAd6JPAPKnGzduZKtcvkvOpDOZMi72bhhGpn3/3/3KvPnmm3rllVfM2zExMSpRooRatGghf3//hwsYQJ6WlJSk5cuXq1WrVnJ2drZ3OADsiP4AQDr6AwDp6A8A3Ik+Acjf0mfcup98l5wJCgqSlDY6Jjg42Lw/OjraPJomKChIiYmJunLlSobRM9HR0WrUqNE963Z1dZWrq2um/c7OznSkACTRHwC4jf4AQDr6AwDp6A8A3Ik+Acifsvtz7WDlOGyuZMmSCgoKyjAsMDExUWvXrjUnXmrXri1nZ+cMZc6dO6eIiIgskzMAAAAAAAAAAAAPK0+OnLl+/bqOHDli3j5+/Lh27dolPz8/hYaGatiwYfroo49UtmxZlS1bVh999JE8PDzUq1cvSZKPj48GDRqkV199Vf7+/vLz89Nrr72mqlWrqmXLlva6LAAAAAAAAAAAUADkyeTMtm3b1KJFC/N2+jow/fv31+TJkzVy5EjdvHlTQ4cO1ZUrV1S/fn0tW7ZMXl5e5nO++uorOTk5qXv37rp586YeffRRTZ48WY6Ojja/HgAAAAAAAAAAUHDkyeRM8+bNZRjGPY+bTCaNGjVKo0aNumcZNzc3jRkzRmPGjLFChAAAAAAAAAAAAHeX79acAQAAAAAAAAAAyM1IzgAAAAAAAAAAANgQyRkAAAAAAAAAAAAbIjkDAAAAAAAAAABgQyRnAAAAAAAAAAAAbIjkDAAAAAAAAAAAgA2RnAEAAAAAAAAAALAhkjMAAAAAAAAAAAA2RHIGAAAAAAAAAADAhkjOAAAAAAAAAAAA2BDJGQAAAAAAAAAAABsiOQMAAAAAAAAAAGBDJGcAAAAAAAAAAABsyMneAeRlhmFIkmJjY+Xs7GznaADYU1JSkm7cuKGYmBj6A6CAoz8AkI7+AEA6+gMAd6JPAPK3mJgYSbfzB/dCcuYhXLp0SZJUsmRJO0cCAAAAAAAAAAByi9jYWPn4+NzzOMmZh+Dn5ydJioyMzPImI/+rW7eutm7dau8wYEcxMTEqUaKETp06JW9vb3uHAzuiPwD9AdLRH4D+AOnoD0B/gDvRJ4A+AenoD/InwzAUGxurkJCQLMuRnHkIDg5pS/b4+PjQkRZwjo6OfAcgSfL29ua7UMDRHyAd/QHoD5CO/gD0B0hHfwCJPgG30SeA/iD/ys5gDgcbxAHke88//7y9QwCQS9AfAEhHfwAgHf0BgDvRJwBIR39QsJmM+61Kg3uKiYmRj4+Prl27RoYTKODoDwCkoz8AkI7+AEA6+gMAd6JPACAxcuahuLq66r333pOrq6u9QwFgZ/QHANLRHwBIR38AIB39AYA70ScAkBg5AwAAAAAAAAAAYFOMnAEAAAAAAAAAALAhkjMAAAAAAAAAAAA2RHIGAAAAAAAAAADAhkjOAAAAAAAAAAAA2FCBT878+eef6tChg0JCQmQymTRv3rwMx8+fP68BAwYoJCREHh4eeuyxx3T48OG71mUYhtq2bXvXenbs2KFWrVrJ19dX/v7+evbZZ3X9+nUrXRWAnLBEf9C8eXOZTKYMrx49emQo8+GHH6pRo0by8PCQr6+vla8KQE7Yqj/o2LGjQkND5ebmpuDgYPXt21dnz5619uUBeAC26g/Cw8MzlXnjjTesfXkAHoAt+oM1a9ZkOp7+2rp1qy0uE0A22Or3A54nAvlbgU/OxMXFqXr16ho7dmymY4ZhqFOnTjp27Jjmz5+vnTt3KiwsTC1btlRcXFym8qNHj5bJZMq0/+zZs2rZsqXKlCmjzZs3a+nSpdq3b58GDBhgjUsCkEOW6g/++c9/6ty5c+bX999/n+F4YmKiunXrpiFDhlj1egDknK36gxYtWmjWrFk6ePCg5syZo6NHj6pr165WvTYAD8ZW/YEkffDBBxnKvP3221a7LgAPzhb9QaNGjTIcO3funJ555hmFh4erTp06Vr9GANlji/6A54lAAWDATJIxd+5c8/bBgwcNSUZERIR5X3JysuHn52dMmDAhw7m7du0yihcvbpw7dy5TPd9//71RtGhRIyUlxbxv586dhiTj8OHDVrseADmX0/6gWbNmxssvv5ytNiZNmmT4+PhYKGIA1mKL/iDd/PnzDZPJZCQmJj5s2ACswJr9QVhYmPHVV19ZOGIA1mKr3w8SExONokWLGh988IElwgZgBdbqD3ieCOR/BX7kTFYSEhIkSW5ubuZ9jo6OcnFx0fr16837bty4oZ49e2rs2LEKCgq6az0uLi5ycLh9u93d3SUpQz0Acq/s9geSNH36dAUEBKhy5cp67bXXFBsba9NYAViXtfqDy5cva/r06WrUqJGcnZ2tEzwAi7J0f/Df//5X/v7+qlGjhj788EMlJiZa9wIAWIy1fj9YsGCBLl68yF/KA3mIpfoDnicC+R/JmSxUqFBBYWFhevPNN3XlyhUlJibqk08+UVRUlM6dO2cuN3z4cDVq1EhPPPHEXev5xz/+oaioKH322WdKTEzUlStX9K9//UuSMtQDIPfKbn/Qu3dv/fzzz1qzZo3eeecdzZkzR507d7Zj5AAszdL9weuvv65ChQrJ399fkZGRmj9/vi0vB8BDsGR/8PLLL2vmzJlavXq1XnjhBY0ePVpDhw619SUByCFr/X/hxx9/VJs2bVSiRAlbXAYAC7BUf8DzRCD/c7J3ALmZs7Oz5syZo0GDBsnPz0+Ojo5q2bKl2rZtay6zYMECrVq1Sjt37rxnPZUrV9aUKVP0yiuv6M0335Sjo6NeeuklBQYGytHR0RaXAuAhZac/kNLmi01XpUoVlS1bVnXq1NGOHTtUq1YtW4cNwAos3R+MGDFCgwYN0smTJ/X++++rX79++v333++6jh2A3MWS/cHw4cPNZapVq6bChQura9eu5tE0AHI3a/x/4fTp0/rjjz80a9Ysm1wDAMuwVH/A80Qg/2PkzH3Url1bu3bt0tWrV3Xu3DktXbpUly5dUsmSJSVJq1at0tGjR+Xr6ysnJyc5OaXlu7p06aLmzZub6+nVq5eioqJ05swZXbp0SaNGjdKFCxfM9QDI/e7XH9xNrVq15OzsrMOHD9swUgDWZsn+ICAgQOXKlVOrVq00c+ZMLV68WH/99Ze1LwGAhVjr94MGDRpIko4cOWLxmAFYh6X7g0mTJsnf318dO3a0ZtgArMBS/QHPE4H8jeRMNvn4+KhIkSI6fPiwtm3bZp7C7I033tCePXu0a9cu80uSvvrqK02aNClTPYGBgfL09NQvv/wiNzc3tWrVypaXAcAC7tUf3M2+ffuUlJSk4OBgG0YIwFYs3R8YhiHp9jzVAPIOS/cH6SPz+R0CyHss0R8YhqFJkyapX79+rEUH5GGW+v2A54lA/lTgpzW7fv16hr9GO378uHbt2iU/Pz+Fhobq119/VZEiRRQaGqq9e/fq5ZdfVqdOndS6dWtJUlBQkIKCgjLVGxoamiGLPXbsWDVq1Eienp5avny5RowYoU8++US+vr5Wv0YA2fOw/cHRo0c1ffp0Pf744woICNDff/+tV199VTVr1lTjxo3N9UZGRury5cuKjIxUSkqKOalbpkwZeXp62vSaAdydLfqDLVu2aMuWLWrSpIkKFy6sY8eO6d1331Xp0qXVsGFDu1w3gMxs0R9s2rRJf/31l1q0aCEfHx9t3bpVw4cPV8eOHRUaGmqX6waQma3+vyClzdJx/PhxDRo0yKbXCCB7bNUf8DwRyOeMAm716tWGpEyv/v37G4ZhGF9//bVRvHhxw9nZ2QgNDTXefvttIyEhIcs6JRlz587NsK9v376Gn5+f4eLiYlSrVs2YOnWqla4IQE49bH8QGRlpNG3a1PyzXrp0aeOll14yLl26lKGd/v3737Wd1atX2/BqAWTFFv3Bnj17jBYtWhh+fn6Gq6urER4ebgwePNg4ffq0rS8XQBZs0R9s377dqF+/vuHj42O4ubkZ5cuXN9577z0jLi7O1pcLIAu2+v+CYRhGz549jUaNGtnq0gA8IFv1BzxPBPI3k2Hcmj8DAAAAAAAAAAAAVseaMwAAAAAAAAAAADZEcgYAAAAAAAAAAMCGSM4AAAAAAAAAAADYEMkZAAAAAAAAAAAAGyI5AwAAAAAAAAAAYEMkZwAAAAAAAAAAAGyI5AwAAAAAAAAAAIANkZwBAAAAAAAAAACwIZIzAAAAADR58mSZTCaZTCadOHHC3uEgjxswYID5+3Tn62G/W6NGjbprvWvWrLFI3AAAAICtkJwBAAAA8rATJ07c9WH1g74AAAAAALZDcgYAAAAA7hAeHi6TyaQBAwbYO5Q8LyQkRHv37jW/ihUrlqnMnaNh7mfo0KHmuiZOnGiNkAEAAACbcLJ3AAAAAAByrlixYtq7d+89j7dp00Znz55VSEiI/vjjj3uWq1KlCskIWJyzs7OqVKlisfqKFi2qokWLSpIuXrxosXoBAAAAWyM5AwAAAORh93v47ezsnK1yAAAAAADbYVozAAAAAAAAAAAAGyI5AwAAAECTJ082r/tx4sSJTMebN28uk8mk5s2bS5KOHDmiwYMHq1SpUnJ3d1d4eLgGDRqkkydPZjgvIiJCTz/9tEqVKiU3NzeVKFFCQ4YMUXR0dLbiWr58ufr06aOSJUvK3d1d3t7eql69ukaOHKlz585lee7Zs2f1xhtvqFatWvLx8ZGLi4uCgoJUtWpV9ezZU5MnT1ZMTEyma0y/hilTppjvSfor/frTXblyRZMmTVKfPn1UqVIleXp6mttp06aNfvjhByUmJt4zxhMnTpjrnjx5siTpt99+U+vWrVW0aFEVKlRI1atX15gxY5SUlGQ+zzAMzZgxQ82bN1fRokXl4eGhWrVqafz48TIM457tpbc1atQoSdKKFSvUsWNHBQcHy83NTaVKldILL7yg06dPZ3lvLSH9O/f+++9niu/O192+jwAAAEBex7RmAAAAAB7IihUr1LlzZ8XGxpr3nTx5UhMnTtTvv/+utWvXqkKFCvr555/19NNPKyEhwVzu9OnTGj9+vJYsWaKNGzcqJCTkrm3ExcWpb9++mjt3bob98fHx2rNnj/bs2aNx48bp559/Vvv27TOdv27dOrVv3z5D8kWSzp8/r/PnzysiIkIzZ85UQEDAXc/Prpo1a2ZKSKW3s2zZMi1btkzjx4/X4sWLFRQUdN/6hg4dqnHjxmXYt2fPHr300ktas2aNZs2apeTkZPXp00ezZ8/OUG7nzp0aMmSIduzYoR9++OG+bb3//vvmJE2648eP69tvv9W0adO0cOFCNW3a9L71AAAAAHhwJGcAAAAAZNvZs2fVvXt3+fr66qOPPlK9evWUmJioOXPm6Ouvv1Z0dLSeeeYZffXVV+rXr5/Kli2rV199VdWqVVNcXJwmTpyoadOm6eTJk3rllVc0c+bMTG2kpKSoQ4cOWr16tUwmk3r06KHOnTurZMmSSkpK0pYtW/TFF18oMjJSXbp00caNG1W7dm3z+QkJCerRo4diYmLk5eWlIUOGqEWLFipatKiSkpJ08uRJbdq0SXPmzMnQ7qRJkxQXF6c2bdro7NmzeuKJJ/Sf//wnQ5lChQplirV+/fpq3769atasqcDAQCUmJur48eP66aeftHTpUu3cuVM9evTQmjVrsry348eP1+bNm/X444/rmWeeUVhYmE6dOqWPP/5Ymzdv1m+//aZJkyZpz549mj17tnr16qVevXopODhYhw8f1qhRo3TgwAFNmDBBnTt31mOPPXbPthYtWqRt27apfPnyGjlypKpVq6Zr167p119/1YQJExQTE6P27dtr7969CgsLyzLunOrUqZPq1Kmj7777zpyQ2rt3b6ZyxYoVs0r7AAAAgF0ZAAAAAPKtsLAwQ5IRFhaWZblJkyYZkgxJxvHjxzMdb9asmfl42bJljejo6ExlRowYYS5TpEgRo3HjxkZcXFymct26dTMkGU5OTnet5/PPPzckGc7OzsbixYvvGu/ly5eNypUrG5KMJk2aZDi2cuVKcxwLFy685zUnJSUZ165dy7Q//Z7179//nuemO3ToUJbHJ06caI5lxYoVmY4fP37cfFySMWzYsExl4uLijPDwcEOSERAQYJhMJmP06NGZyp07d87w8vIyJBkdO3a8azx3tlWrVi0jNjY2U5mpU6eay3Tt2jXL67uX/v37Z+t7ZxiG8d5775nbexCrV682n7d69eocxQkAAADYC2vOAAAAAHgg33zzjYoUKZJp/9ChQ83vL168qAkTJsjDwyNTuSFDhkiSkpOTtWnTpgzHkpKS9MUXX0iSXnjhBbVt2/auMRQuXFifffaZJGn9+vU6cuSI+VhUVJT5fVbTcjk5Ocnb2/uex7OjbNmyWR5/+umnVbNmTUnSvHnzsixbokQJffrpp5n2e3h4qH///pLS7mv9+vX18ssvZyoXFBSkJ598UlLatG7388MPP8jT0zPT/r59+5rv+7x58+67tg8AAACAB0dyBgAAAEC2+fr6qk2bNnc9Fh4ebk52VKtWTRUrVrxruerVq5vfHzt2LMOxLVu2mJMB3bt3zzKWOxMvdyZ5goODze8nTZqUZR2WZBiGoqKidOjQIUVERJhf6evq7N69O8vzO3fuLGdn57seq1atmvn9U089dc860u/tlStXdPXq1XuWq1q1aoap4P6/gQMHSkpLoN1vOjYAAAAAD441ZwAAAABkW9myZWUyme553MfHRzExMSpXrtw9y/j6+prfx8bGZji2bds28/uGDRtmO647R8s0adJEpUqV0rFjxzRs2DBNnz5dTz75pJo1a6Y6derIxcUl2/Vmx6JFizRu3Dj9+eefma7nThcvXsyynuzeswe5t3du36lu3bpZxlKvXj3z+4iIiCzLAgAAAHhwJGcAAAAAZNvdpim7k4ODw33LpZeRpJSUlAzHoqOjcxTXjRs3zO+dnZ21cOFCde3aVfv379fWrVu1detWSZK7u7uaNWumvn376qmnnpKjo2OO2pPSRsr885//1I8//pit8jdv3szyeHbvWU7v7Z2KFi2aZSyBgYHm95cvX86yLAAAAIAHR3IGAAAAQK5xZ0JhzZo18vf3z9Z5/z/ZUKlSJe3du1cLFy7UwoULtXbtWh09elQ3b97U0qVLtXTpUn355ZdavHjxfRMV9zJx4kRzYqZGjRoaNmyY6tevr2LFisnDw8Oc+OnXr5+mTZsmwzBy1I41ZDX6CQAAAID1kZwBAAAAkGvcmYxxcXFRlSpVclyXo6OjOnXqpE6dOkmSzp07pyVLlui7777T9u3btX37dj333HOaO3dujuqfMGGCJKl06dLauHGj3N3d71ruypUrOarfms6fP5/t435+ftYOBwAAAChwHO5fBAAAAABso2bNmub3y5Yts2jdwcHBGjhwoDZt2qRatWpJkn7//fdM041ld1TJvn37JElPPPHEPRMzhmFox44dDxG1daRP85ad4w+TIMsORvEAAACgICI5AwAAACDXaNKkiXmkxvjx4xUTE2PxNpydndWsWTNJUnJysq5evZrhuJubmyQpISEhy3qSk5MlZVzv5v9bsGCBzp49+xDRWsfevXu1c+fOex6fOHGipLTRR82bN7dqLOn3W7r/PQcAAADyC5IzAAAAAHINNzc3vfbaa5KkqKgo9ejRQ3FxcfcsHxsbq7Fjx2bYt27dOh05cuSe5yQmJmrt2rWSJE9PTxUpUiTD8eDgYEnS0aNHs4y1bNmykqSFCxfedeqyo0ePaujQoVnWYU/PPvvsXe/tjBkztHjxYklSp06dzPfDWu6s/373HAAAAMgvWHMGAAAAQK4ycuRIrVy5UitXrtSSJUtUqVIlDR48WA0bNpSvr69iY2N18OBBrVmzRvPmzZObm5teeOEF8/krV67Uv//9bz3yyCNq166dqlWrpiJFiujmzZs6dOiQxo8fb55q7JlnnpGTU8b/FjVq1EirV6/W1q1b9cknn6ht27YqVKiQJMnd3V3FihWTJPXr108jRozQmTNn1KhRI40cOVKVK1dWfHy8Vq1apdGjRyshIUG1atXKdVOb1alTR9u2bVOdOnX0+uuvq2rVqrp27Zpmz56t77//XpLk5eWlzz//3OqxNGrUyPx++PDheuuttxQcHGye7iw8PDzTZwQAAADkdfyGCwAAACBXcXR01MKFCzV48GBNnTpVkZGR+te//nXP8kWLFs20LzU1VWvXrjWPkLmbzp076+OPP860f8iQIRo3bpwuX76sN998U2+++ab5WLNmzbRmzRpJ0ssvv6zly5dr2bJlOnDggAYOHJihHnd3d02dOlWLFi3KdcmZdu3aqV27dnr//ff19NNPZzru7e2tBQsWKDw83OqxlClTRt27d9esWbO0bNmyTGsNHT9+3CZxAAAAALbEtGYAAAAAch13d3dNmTJF27Zt05AhQ1S5cmX5+PjIyclJvr6+qlGjhgYNGqTZs2dr//79Gc4dOXKkFi9erOHDh6tBgwYKDQ2Vm5ub3NzcFB4erqeeekqLFi3SnDlzMqx3kq5YsWLasmWLBg0apDJlyty1jJS2ds2iRYv0zTffqE6dOvLw8JC7u7vKlCmjwYMHa8eOHerWrZtV7o8ljBo1SkuXLlW7du0UGBgoFxcXhYeHa+jQodq3b595XR5b+Omnn/Tpp5+qXr168vHxkYMD/1UFAABA/mYyDMOwdxAAAAAAAOtLnyrsvffe06hRo6zWzoABAzRlyhSFhYXpxIkTVmljzZo1atGihSRp9erVat68uVXaAQAAAKyBac0AAAAAAFaRlJSkiIgI83b58uXl7Oyc4/qio6MVHR0tKW26MwAAACCvIjkDAAAAALCKs2fPqmrVqubth10/5rvvvtP7779vgcgAAAAA+2IiXwAAAAAAAAAAABtizRkAAAAAKCBsteYMAAAAgKwxcgYAAAAAAAAAAMCGWHMGAAAAAAoIJk4AAAAAcgdGzgAAAAAAAAAAANgQyRkAAAAAAAAAAAAbIjkDAAAAAAAAAABgQyRnAAAAAAAAAAAAbIjkDAAAAAAAAAAAgA2RnAEAAAAAAAAAALAhkjMAAAAAAAAAAAA2RHIGAAAAAAAAAADAhv4PEWPKQwKUP9QAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "#We are going to plot the ARIMA predictions, and the prediction intervals.\n", "fig, ax = plt.subplots(1, 1, figsize = (20, 7))\n", @@ -291,7 +662,88 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
static_0static_1static_2unique_id
00.2688440.8759460.0476050
10.9951510.3760250.4975791
20.1366130.0609340.3192902
30.0844190.9189990.8200503
40.7743600.6850720.1131914
\n", + "
" + ], + "text/plain": [ + " static_0 static_1 static_2 unique_id\n", + "0 0.268844 0.875946 0.047605 0\n", + "1 0.995151 0.376025 0.497579 1\n", + "2 0.136613 0.060934 0.319290 2\n", + "3 0.084419 0.918999 0.820050 3\n", + "4 0.774360 0.685072 0.113191 4" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "static_df" ] @@ -311,7 +763,121 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
unique_iddsytrendy_[lag12]
140Airline11960-09-30508.0140463.0
141Airline11960-10-31461.0141407.0
142Airline11960-11-30390.0142362.0
143Airline11960-12-31432.0143405.0
284Airline21960-09-30808.0284763.0
285Airline21960-10-31761.0285707.0
286Airline21960-11-30690.0286662.0
287Airline21960-12-31732.0287705.0
\n", + "
" + ], + "text/plain": [ + " unique_id ds y trend y_[lag12]\n", + "140 Airline1 1960-09-30 508.0 140 463.0\n", + "141 Airline1 1960-10-31 461.0 141 407.0\n", + "142 Airline1 1960-11-30 390.0 142 362.0\n", + "143 Airline1 1960-12-31 432.0 143 405.0\n", + "284 Airline2 1960-09-30 808.0 284 763.0\n", + "285 Airline2 1960-10-31 761.0 285 707.0\n", + "286 Airline2 1960-11-30 690.0 286 662.0\n", + "287 Airline2 1960-12-31 732.0 287 705.0" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "#| export\n", "\n", @@ -348,7 +914,18 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABmcAAAKHCAYAAAB0L5wRAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3wU1frH8c+mB0gCBEgIEHrvgiCo9CIdRBEVFbHda7tYr10s14JXxCv6U5QOglgQqVKkI9J76KETQmghhPT5/THskpjObnaS8H2/XnnN2d2Zc55tg86z5zw2wzAMRERERERERERERERExC08rA5ARERERERERERERETkRqLkjIiIiIiIiIiIiIiIiBspOSMiIiIiIiIiIiIiIuJGSs6IiIiIiIiIiIiIiIi4kZIzIiIiIiIiIiIiIiIibqTkjIiIiIiIiIiIiIiIiBspOSMiIiIiIiIiIiIiIuJGSs6IiIiIiIiIiIiIiIi4kZIzIiIiIiIiIiIiIiIibqTkjIiIiIgUWsuXL8dms2Gz2RgxYoTV4YhIETdx4kTHOWXixIlWhyMiIiIiNzAlZ0RERESkQI0aNcpxMdRmszFjxgyrQ8oQz9//SpUqRXh4OL179+bLL78kNjbW6nBFcnX48OEcP9eBgYHUrFmTgQMHMnnyZBISEqwOucjL7rX29vYmODiY6tWr07ZtW5566inGjRtHdHS0JXGOHj2aESNGMHr0aEvGFxEREZGsKTkjIiIiIgVq/PjxGW6PGzfOokjy5vLlyxw7dox58+bx9NNPU6dOHX7//XerwxJxyqVLlzh06BC//PILDz30EI0bN2bTpk1Wh1UspaSkcO7cOQ4fPsyff/7JV199xaOPPkrlypW588472blzp1vjGT16NO+8846SMyIiIiKFjJfVAYiIiIhI8bVu3Tp27dqV4b6lS5dy+PBhqlWrluvxHTp0wDCMAorONGvWrAy3L126xNatW5k8eTIxMTGcPn2afv36sWLFClq3bl2gsYi4Qvny5Rk7dmyG+y5cuMBff/3F1KlTiYuL48CBA3Tr1o0NGzZQo0YNiyItPrI6j1y4cIGIiAjWrVvHli1bSE5OZtasWcybN4///Oc/vPjiixZFKyIiIiKFgZIzIiIiIlJg0s+Sefjhh5kwYQKGYTBhwgTeeecdCyO7pn///pnue+CBB3jttdfo0aMHGzZsIDExkeHDh/Pnn3+6P0CRfCpRokSWn+uhQ4fyyiuv0LFjRyIjIzl37hyvv/4606dPd3+QxUxWr3d627dv54033mDOnDkkJSXx0ksv4enpyXPPPeeeAEVERESk0NGyZiIiIiJSIC5fvswPP/wAQPXq1fn8888pVaoUABMmTCAtLc3K8HIVHBzMpEmTHLfXrVvH0aNHLYxIxHlVq1blq6++ctz+7bffSExMtDCiG0OTJk347bffePvttx33vfjii1paTkREROQGpuSMiIiIiBSImTNncunSJcCciRIQEMDAgQMBOHbsGIsXL861j+XLlzuKbI8YMSLLfapVq4bNZnMsk5aYmMiXX35Jhw4dqFixIp6ennlaQi0r9evXp3bt2o7bO3bscLQTEhKYPXs2zz77LG3btqV8+fJ4e3sTEBBA7dq1eeCBB/L0HAFiY2P59NNP6dixIyEhIfj4+DgKuLdt25bnn3+ehQsXkpSUlOXxUVFRvPPOO9x6662UK1cOb29vSpcuTZ06dWjXrh2vv/46y5cvzzUhtnXrVv71r3/RtGlTypYti6+vL2FhYfTq1Yvx48eTkpKS4/H296pDhw6O1+h///sfbdq0ITg4GH9/f2rWrMkTTzzBoUOH8vTaXL58mQ8++IAWLVoQFBREQEAAjRo14vXXX+fUqVOAOSPEPvbhw4dz7O/ixYt8+umndOnShbCwMHx9fSlbtiwtWrTg1Vdf5cSJEzken9VYv/76K3feeSdVq1bF19c3yzhWrVrFsGHDqF+/PgEBAfj4+BAaGkrjxo0ZMGAAX375JZGRkXl6TZzVpUsX/P39AYiPj+fAgQOOxy5dusSMGTN44oknuPnmmylbtqzj89SgQQMee+wx1q9fn+sYWb1Oy5YtY9CgQYSHh+Pr60uFChXo2bNnpiXBcpKQkMA333xD7969qVKlCn5+fgQFBdGoUSOeffZZ9u3bl78Xw81GjBhB3759AUhLS8v2vAawb98+Ro0axYABA6hduzalSpXCx8eHChUq0K5dO95//31iYmKyPd5+bjxy5AgAR44ccbwn6f/+HoNhGKxZs4a33nqLrl27UrlyZfz8/PD396dy5cr07duX8ePHZ3s+EhEREZE8MkRERERECsCtt95qAAZgHDhwwDAMw/jjjz8c991999259rFs2TLH/m+//XaW+1StWtUAjKpVqxqRkZFGo0aNHMfY/6pWrZrhmPSP5aZt27aOfadNm+a4v3r16pnGyeqvX79+xqVLl7Ltf+PGjUZoaGie+tqwYUOm4+fPn28EBATk6fgzZ85kGUNCQoIxbNgww2az5Xh8w4YNjYMHD2b7XOz7tW/f3jh06JDRuHHjbPsqWbKksWTJkhxf+4iICMf7m9Vf+fLljZUrVxoPPfSQ477IyMhs+5s5c6ZRtmzZHJ+jn5+fMXHixGz7SD/W3r17jYEDB2bZjz2O1NRU44knnsjT+9OrV68cX4+cREZGZvt5z0pYWJhj/zVr1hiGYRiJiYmGn59fnmJ94oknjOTk5Dy9TocOHTKeeeaZHPv7xz/+kWvMy5cvNypVqpRjP56ensYHH3yQbR8TJkxw7DthwoRcx8xJfs4j6W3dujXDsadOncq0z6RJk/L0PgQGBhpz587Ncpycvjvp//5+bn344YfzdFy9evWMffv25eu5i4iIiMg1qjkjIiIiIi63d+9e1qxZA8Btt91GzZo1AejQoQPVqlXj8OHDzJ49m5iYGMqVK+eSMRMTE7nzzjvZuXMnt9xyC3fddRdVqlThwoULGWa85Fd0dLSjXbp0aUc7Pj6e0qVL06lTJ5o3b07VqlUpUaIEsbGxbN++nR9++IFTp04xe/Zshg0bxsyZMzP1HR8fT//+/YmKigKgRYsWDBgwgEqVKlGyZEnOnz9PREQEy5YtY9u2bZmOP3nyJIMGDSIuLg6A9u3b06tXL0JDQ/H19SUmJoadO3eydOnSbGcUpKSkcMcdd7B8+XIAQkJCGDx4MM2aNaNkyZKcOHGCWbNmsXLlSnbt2kW7du3YsmUL5cuXz/Y1i42NpVevXkRERNCtWzd69+5NaGgoUVFRTJ48mY0bN3L58mXuvfde9uzZQ9myZTP1cebMGTp16uSYHRMeHs6wYcOoW7cucXFxLFq0iJ9++ok777yTpk2bZhuL3bfffssTTzyBYRh4eXnRu3dvOnXqRGhoKJcvX2bNmjVMmzaNK1euMHToUHx8fLj33ntz7HP48OEsWLCAqlWr8uCDD1KvXj2SkpJYv349vr6+AIwZM4ZvvvkGgICAAO666y5atGhB+fLlSUpK4vjx42zcuJElS5bk+hxcJSUlhXPnzjlu2z/XaWlpJCQkEBISQufOnWnatClhYWH4+/tz/vx5Nm7cyMyZMzl//jzffPMNgYGBjBw5Mtfx3njjDb7//nuqVavGAw88QP369UlJSeGPP/5gypQppKam8vXXX9O2bVseeOCBLPtYsGAB/fr1Izk5GZvNRpcuXejevTuVK1cmKSmJjRs3MnnyZC5cuMBrr70GwKuvvur8i1UAmjZtSuPGjR3npeXLlzN48OAM+8THx2Oz2WjatCnt2rWjXr16ju/J8ePHWbJkCQsXLiQ2NpaBAweydu1abrrppgx9jB07lvj4eB5//HHOnDlD+fLlGTt2bKZ46tWrl2lsHx8fbrvtNlq3bk2tWrUIDAwkMTGRAwcO8Msvv7B9+3b27NlDjx492Lx5M4GBga58iURERERuDFZnh0RERESk+HnppZccv67+9ttvMzz25ptvOh777LPPcuwnPzNn7H8fffRRrvGl3z8nu3fvzrDv0aNHHY/Nnz/fSEpKyvbYy5cvGwMGDHAcu2rVqkz7/Pjjj47HX3jhhRxj2bVrlxEdHZ3hvk8++cRx/BdffJHj8X/99Zdx5cqVTPe/8sorjj7uvfdeIy4uLsvjx4wZ49jv/vvvz3Kf9K+Vl5eXMXPmzEz7pKSkGH369HHs99///jfLvh588EHHPp06dcoyrrlz5xo+Pj5ZzlhJb9u2bYavr68BGFWqVDG2bt2a5Zh79uwxKleubABGQECAcfbs2Uz7pJ8RAhj9+/fP8nW1a9iwoQEYZcuWNY4cOZLtfgkJCca6deuyfTw3+Zk5s2DBAse+JUqUMBISEgzDMN+b+fPnG6mpqdkeGxMT45hN5unpaRw+fDjL/f7+Og0aNMgxTno//PCDY59GjRpl2dfJkycdM56CgoKMpUuXZrtfkyZNHLFFRERk2qcwzJwxDMN4/PHHHcc+99xzmR7fuXOnsX///hz7WLJkiVGiRAkDMDp37pztfulnF+bFihUrjHPnzmX7eFpamvHhhx864n/vvffy1K+IiIiIZKSaMyIiIiLiUikpKUyePBkAPz8/7r777gyPP/TQQ472uHHjXDp2v379+Pe//+2Svs6fP8/DDz/suH3LLbdQpUoVx+0ePXrg7e2d7fElSpRg4sSJlCxZEoBJkyZl2id9rY9hw4blGE+DBg0yzVbJz/GtWrXCz88vw33R0dGMHj0agJYtWzJlyhRHvH/31FNPcf/99wMwY8aMXGuzvPrqq5neewBPT0/++9//Om4vWLAg0z6nT59m+vTpAAQFBTF9+vQs4+rVqxcvv/xyjnGAWecjMTERT09PZs+ene1Mm7p16zJhwgTArL3y7bff5thvpUqVmDJlSqbXNT37e9S5c2fCw8Oz3c/X15fWrVvn9lScduzYMZ566inH7b59+zpm+Xh6etKjRw88PLL/38Tg4GDH9zs1NZWpU6fmOmbt2rWZNGmSY5z0Bg0aRNu2bQHYuXMnx48fz7TPJ5984pjpM3nyZDp16pTlOBUrVmTmzJl4enqSmprK559/nmtsVklfByv97Dy7hg0bUqtWrRz76Ny5My+88AIAS5cuzfU7mVft2rWjTJky2T5us9l45ZVXuP3224Gsz20iIiIikjslZ0RERETEpebMmcPp06cB6N+/P0FBQRker1mzJrfddhtgXozNS3HxvHr22Wfzfcyvv/6a4W/q1Km89NJL1KtXj7/++gsAHx8fRo0ale++AwMDady4MQDr1q3L9Hj6hMOmTZvy3b+zx//www8kJCQA8OKLL+Lp6Znj/g8++CBgXpRfunRptvt5eHjwr3/9K9vH69Sp40h07dq1K9Pj8+bNIzk5GYD777+fChUqZNvXM888g5dX9qs1X7hwgdmzZwPQtWtXmjdvnu2+AF26dCEsLAyA33//Pcd9hw0bRqlSpXLcx/4e7dixw20F1OPj4zN9ridNmsRTTz1Fo0aNOHToEGAuZ/b+++/nu/+aNWsSGhoKZP25/rsnn3wyxwRW586dHe2/fx4Mw2DKlCmAmTzr27dvjmPVrVuXVq1aAbm/f1ZKn/xIv8Rcft16662Odl7eC1eyj33gwAHOnj3r1rFFREREigPVnBERERERl0o/Gyb9LJn0hg4dyurVqwEYP36842KqMzw9PR2/wM+PAQMG5Ph4+fLlmThxIm3atMn02Pnz55k2bRoLFy5k586dnD17lsuXL2MYRqZ9s5oR0KVLF2w2G4Zh8M9//pP9+/czePBgGjRokKfYu3Xr5kga3Xnnnfz73/9m4MCBVK9ePU/Hr1y5MsNz+fXXX3PcP/0v83fv3p3tfnXr1iU4ODjHvipVqsSxY8c4f/58psc2bNjgaHfs2DHHfipUqECDBg3Yvn17lo+vWbOGtLQ0wKz5kttzBBwJl5yeI+CYOZCTbt26MWPGDPbs2UPnzp157rnn6NatW65JHWecOXMm18919erVmTFjhqMeVHonT55kypQpLF26lN27d3P+/Hni4+Oz7Cerz/Xf5fa9rFSpkqP998/D7t27iYmJASA0NDRP7589yRgZGUlCQkKOiSGrZHWOyMrq1auZPn0669ev59ChQ1y6dMmRuPy7vLwXeZWSksIvv/zCr7/+ytatWzl58iSXLl1yfJeyGju377yIiIiIZKTkjIiIiIi4zMmTJ1m4cCFgLjHUtWvXLPcbNGgQzz77LPHx8UyfPp1Ro0ZRokQJp8YODg52yUVYf39/goODady4MT169OCBBx5wFExPb/bs2TzyyCN5/sV4bGxspvvq16/PG2+8wXvvvcfly5d57733eO+996hQoQK33XYb7dq144477qBu3bpZ9tm9e3cefPBBJk+eTExMDC+99BIvvfQS4eHh3HrrrbRv356ePXtmWI4tvcOHDzva//znP/P0POxy+rV/uXLlcj3evsRVYmJipsdOnjzpaGeVPPi7mjVrZpucSf8cf/zxR3788cdc+7PLbUZD5cqVc+3j448/ZvXq1Rw/fpzVq1ezevVqvLy8aNasGbfffjsdOnSgW7duBZ5AKFmyJOXLl6d58+b06dOHwYMH4+/vn2m/b775hueffz7bZMzfZfW5/rvcPg/plzuzz+SyS//+rVixghUrVuQpLrtz5845ZkIVJumTUFklNeLi4njggQfylIyyy8t7kRd79+7lzjvvzDU5WRBji4iIiNxIlJwREREREZeZOHEiqampgLkcVXbLZAUEBDBgwACmTZtGbGwsP/30k2PJrOuV1YXmvMjrL9jT+/PPP7nrrrtISUkBoEmTJnTp0oVatWpRpkwZfH19sdlsALzxxhvs2rUr21+cv/vuu7Rq1YqPPvqINWvWAGYNil9++YVffvkFMJcP+vTTT7OsSTJp0iQ6d+7MZ599xtatWwE4evQoR48eZfr06dhsNnr06MGoUaMyJXkuXLiQ7+dul9MSXTnVLMmLy5cvO9p5SdrltI8zzzG7GQp2efnMhYeHs2XLFj744AMmT57M2bNnSUlJYePGjWzcuJHPPvuMwMBA/vWvf/H6669nWZclv6pWrZohqZFXP/74I//4xz8ct9u0aUP79u2pXr06QUFBGWJ7/PHHOXPmjOP7nhNnPg/OvH+Q8+fUSpGRkY7232tJAdxzzz3Mnz8fMBNrvXr1onnz5oSFhVGiRAnHUn47d+7kzTffBMjTe5Gbixcv0qlTJ0eCNCwsjF69elG/fn1CQkLw8/NzvJ8zZszghx9+cNnYIiIiIjcaJWdERERExCUMw2D8+PGO2//9738zFH7Pybhx45xOzrjTW2+95UjMfPnllzz55JPZ7vuf//wn1/569+5N7969OX36NKtWreLPP/9kxYoVbN68GcMwWLNmDbfffjvz58+nS5cumY5/8MEHefDBBzl69Kjj+GXLlrF7924Mw2D+/PmsWrWKNWvWOGrgABmW1jp//nyWM4SskL6WTl5mcKRP5vxd+uc4evToHGvhFJRy5coxatQoPvnkEzZt2sTatWtZs2YNf/zxB+fOnSM2Npb33nuPNWvWsHjxYqeTW9frtddeA8xlwWbNmkWfPn2y3fexxx5zS0zp37/hw4fz2WefuWXcgvbnn3862n9Puq5Zs8aRmGncuDGLFi1y1Pj5O29vb5fGNWbMGEdi5v7772f8+PH4+Phkua89mSwiIiIi18ea/+oXERERkWJnxYoVHDx48LqOXblyJfv373dxRAUjOTmZ5cuXA9CiRYscEzNAvmYwhISEcNddd/Hpp5+yceNGDh8+zF133eUY97nnnsvx+PDwcO6//37GjBnDrl272LVrF+3btwfg0qVLjovvdumX5fp7IXYrpV+GKi+fKXuB+6ykf447d+50LjAneXp60qpVK4YPH86PP/7I6dOnmTlzJkFBQQD88ccfzJo1y5LYIiMjOXDgAAD9+/fPMTETGxvrVBH7/ChM75+rbN26NcP3rUOHDhkeX7RokaP9wQcfZJuYgYwzcFzBPraXlxdffPFFtomZghhbRERE5EajmTMiIiIi4hLjxo1ztAcMGECTJk1yPWb9+vUsWLAAgPHjx/Phhx8WWHyuEhMT45g1U6tWrRz3Xb9+vaOY+fUIDw/n+++/Z8WKFZw5c4adO3dy4cKFPM9wadCgAb/88gvly5cnLS2NVatWZXi8Q4cOzJ07F4BffvmFW2+99bpjdaWbb76Zr7/+GoBly5Y5ElRZiY6OzjGx1L59e2w2G4ZhMHfuXJKSknK84OxOXl5e3H333Zw4ccKReFu1ahUDBw50eyxRUVGOdm6f64ULF2a7TJ+rNWvWjNKlS3PhwgVWrVpFTExMnmoaFWZvvfWWo92vXz9CQkIyPJ6f98I+wyYn9plYeVnC0T52cHAwZcqUyXa/hIQEli1blmt/IiIiIpI9JWdERERExGkXL17k559/BszZAV999VWOv/a227dvnyM5M2nSJN5///1s69QUFumX3LLPNMjO22+/7fR43t7eVKpUiTNnzgA4EkN5VbZsWQIDA7lw4UKmGiqDBw/m9ddfJzExka+//pp//vOfuV4MdodevXrh7e1NcnIy06ZNY8SIEVnW5QD44osvcqx3Ua5cOXr16sXcuXOJiori008/5dVXXy2o0K9L9erVHe38vr+uktfPdVJSUp6W6nMVT09PhgwZwpgxY0hMTOT111/nm2++cdv4rjZixAjmzJkDmEmTESNGZNrn7+9FvXr1suxr7dq1LFy4MNcx7UvD5bT839/Hjo6OJjY2lsDAwCz3Gz16tNtmT4mIiIgUV1rWTERERESc9v3333PlyhUAunXrlqfEDECdOnW45ZZbADh16lSefgVutcDAQOrUqQPApk2b+OmnnzLtk5qaynPPPZfrhdP//e9//PjjjzkWLV+1ahXbt28HzCWe0s8aeOedd/j9999znMXw/fffO4qqN2/ePMNjlSpVcszYiI+Pp3v37mzZsiXHmHfu3JmhaHxBCAkJ4d577wXMxN/gwYOzvLA8b948Ro4cmWt/77//vqOY/RtvvMHnn3+e4yyCixcvMnr0aJYsWXKdz8B06tQpXnjhhRyXZktOTmbs2LGO282aNXNqzOtVr149x0X82bNnZ6iJYnflyhWGDBni+Dy6y2uvvUbZsmUBGDt2LP/+978zJRrTu3LlChMmTGDGjBnuCjFXO3bsoF+/frzzzjuO+0aPHp3l+33zzTc72u+88w4JCQmZ9tm+fTt33313nmbD2JN/Z8+e5ejRoznuax/bMAxef/31LPeZPn16htk/IiIiInJ9NHNGRERERJyWfkmzBx98MF/HPvjgg6xbt87RT061LgqL4cOHO2rNDBo0iHvuuYf27dtTpkwZDhw4wLRp04iIiKBRo0b4+vqyadOmLPvZvHkzkyZNIigoiO7du3PTTTdRuXJlvLy8iI6OZtmyZcydO9eRfPl7zZhly5YxYsQIKlSoQPfu3WnWrBkVK1bEZrNx6tQpFixYkCHB8PfjwUxcbNu2jQULFnDo0CFatmzJHXfcQadOnahUqRI2m42zZ8+yc+dOli9fTkREBJ6eno5lxwrKf//7XxYvXsypU6f4448/aNCgAcOGDaNevXrExcWxaNEifvzxR8qWLUuzZs1YunQpcG0Jp/SaNm3Kd999x0MPPURaWhrDhw/nq6++YsCAAdSvX5+SJUty6dIlDh48yPr161mxYgVJSUlMmTLFqeeQmJjIqFGjGDVqFC1atOD222+nQYMGlC5dmri4OA4ePMj06dMdNXNq1KjB4MGDnRrzevn4+PDkk08ycuRIUlJSaN++PUOHDqVVq1aULFmS3bt3M2nSJI4dO0bnzp3Zu3cvx48fd0tsFStW5Mcff6RXr14kJCQwcuRIpk2bxt13302TJk0ICAjg8uXLHDlyhI0bN7J06VLi4+N577333BIfwK+//prhdlxcHBcuXGDPnj38+eefbN682fGYr68vH374Ic8880yWfd15552Eh4dz9OhRNm7cSN26dXn00UepVasW8fHxrFixghkzZpCcnMxDDz3EpEmTcoytS5cu/Pbbb46+//GPfxAWFub4rtSqVcsxY+7pp59m/PjxpKSkMGbMGDZv3sxdd91FpUqVOH36NLNnz2bp0qWUKlWKvn37OmZMioiIiMh1MEREREREnLB161YDMAAjKCjIuHLlSr6OP3funOHr62sAhpeXlxEVFeV4bNmyZY6+33777SyPr1q1qgEYVatWzfOY9j6v9z+H09LSjGHDhmXo5+9/jRs3Ng4dOmS0b98+27EefvjhHPuw/3l7exvvv/9+puM7duyYp+NLlixpjB8/Ptvnk5ycbLz00kuGt7d3nvrL7rW2P96+fftcX8OcXhe73bt3G+Hh4dnGERwcbCxfvty4//77HfedO3cu2/4WLVpkVK5cOU/P0dfX11iwYEGmPh566CHHPpGRkTk+x8OHD+dpLMBo1KiRceDAgVxft+xERkbm+v7kJjEx0bjjjjtyjLN9+/ZGTExMrt+7/LxOEyZMcOw7YcKEbPfbvHmzUa9evTy9np6ensa333573WPlRV7f2/Tf4wEDBhi7du3Kte+NGzca5cqVy/H5ffTRR3k6R8bFxeX4uv39uHHjxhleXl7Z7l+2bFlj4cKFxttvv+24b9myZU69liIiIiI3Is2cERERERGnpJ81c/fdd+Pn55ev48uUKUOfPn346aefSElJYdKkSbz88suuDtOlbDYb48aNo1evXowdO5aNGzcSGxtLcHAwdevW5e677+aRRx7J9bX4+uuvGTp0KMuWLWP16tXs3buXM2fOkJKSQmBgILVr16ZDhw488sgj1K5dO9Pxc+fOZfXq1Sxbtoy1a9dy4MABYmJiMAyD0qVLU69ePbp06cKjjz5KWFhYtnF4eXkxcuRIx6/m//jjD/bv38+5c+fw8PAgODiYOnXq0Lp1a7p37067du2cfg3zon79+uzevZvPP/+cn376iQMHDmAYBlWqVKFPnz48++yzVKpUiY8++sjxPLKrkQHQtWtXx2yVefPmsXHjRs6cOUNCQgIBAQFUq1aNpk2b0qlTJ/r06UPp0qWdir9q1aocPXqUZcuWsWzZMjZv3szRo0e5dOkSPj4+hIaG0rx5cwYOHMigQYPw8rL2f898fHyYN28eEydOZNKkSWzbto0rV65Qvnx5GjVqxH333ceQIUOynJ3kDs2bN2fXrl3MmjWL2bNns27dOk6fPs3ly5cpVaoUVapUoXHjxnTs2JE+ffrkeXlFV/Py8iIgIIDAwEAqVqxI8+bNadGiBX379s22dtLftWjRgu3bt/Ppp58yd+5cjhw5gpeXF2FhYXTs2JHHH3+cm266ieXLl+faV8mSJVm3bh2jRo1i/vz57N+/n0uXLmW7HOKwYcNo1qwZo0aNYsWKFZw+fZqAgADCw8Pp06ePY+ZNVkvfiYiIiEje2QwjD4vUioiIiIiIFEJpaWmEhoZy5swZmjZtytatW60OSUREREREJFfW/ORJRERERETEBX744QfOnDkDQMeOHS2ORkREREREJG+UnBERERERkUJp3bp1JCQkZPv46tWreeqppwDw8PDg8ccfd1doIiIiIiIiTlHNGRERERERKZQ++ugjVq5cSY8ePWjZsqWjbs6JEydYsmQJCxcuxL5K88svv0z9+vWtDFdERERERCTPVHNGREREREQKpf79+zN79uwc97HZbLzwwgt8/PHHlhWqFxERERERyS8lZ0REREREpFA6cOAAv/32G4sXL+bgwYOcPXuW2NhYAgICCA8Pp3379jz++OM0bNjQ6lBFRERERETyRckZERERERERERERERERN1LNGSekpaVx8uRJAgICsNlsVocjIiIiIiIiIiIiIiIWMgyDS5cuERYWluPSy0rOOOHkyZNUqVLF6jBERERERERERERERKQQOXbsGJUrV872cSVnnBAQEABAZGQkZcuWtTgaEbFScnIyixYtolu3bnh7e1sdjohYSOcDEbHT+UBE7HQ+EJH0dE4QKd5iY2OpUqWKI3+QHSVnnGBfyiwgIIDAwECLoxERKyUnJ1OiRAkCAwP1H1YiNzidD0TETucDEbHT+UBE0tM5QeTGkFsplOwXPBMRERERERERERERERGXU3JGRERERERERERERETEjZScERERERERERERERERcSMlZ0RERERERERERERERNxIyRkRERERERERERERERE3UnJGRERERERERERERETEjbysDuBGlJycTGpqqtVhSDHi6emJt7e31WGIiIiIiIiIiIiISB4oOeNGsbGxxMTEkJiYaHUoUgz5+vpSrlw5AgMDrQ5FRERERERERERERHKg5IybxMbGcuLECUqVKkW5cuXw9vbGZrNZHZYUA4ZhkJyczMWLFzlx4gSAEjQiIiIiIiIiIiIihZiSM24SExNDqVKlqFy5spIy4nL+/v4EBARw/PhxYmJilJwRERERERERERERKcQ8rA7gRpCcnExiYiJBQUFKzEiBsdlsBAUFkZiYSHJystXhiIiIiIiIiIiIiEg2lJxxg9TUVAAVbJcCZ/+M2T9zIiIiIiIiIiIiIlL4KDnjRpo1IwVNnzERERERERERERGRwk/JGRERERERERERERERETdSckZERERERERERERERMSNlJwRERERERERERERERFxIyVnpMAsX74cm83GhQsXrA7FIS8xTZw4kdKlS7stJhERERERERERERG5sSg5IwWmbdu2nDp1iqCgIKtDcSiMMYmIiIiIiIiIiIjIjcXL6gCk+PLx8SE0NNTqMDIojDGJiIiIiIiIiIiIyI1FM2ckW9WqVWP06NEZ7mvWrBkjRowAwGaz8d133zFgwABKlChB7dq1+e233xz7ZrWE2MSJEwkPD6dEiRIMGDCATz/9NMMSYkOHDqV///4Zxhw+fDgdOnRw3DYMg5EjR1KjRg38/f1p2rQpP/30U56eU15iOnv2bJ76EhERERERERERERG5HkrOiFPeeecdBg0axPbt2+nZsyf3338/586dy3Lfv/76i2HDhvHkk0+ydetWOnbsyPvvv5/vMd944w0mTJjA//3f/7Fr1y6ee+45hgwZwooVK/Ldl6tiEhEREREREREREZE8unwWpg6Edf8HhmF1NJbQsmbilKFDh3LvvfcC8MEHH/DFF1+wfv167rjjjkz7fv7553Tv3p1XXnkFgDp16rB27VoWLlyY5/EuX77MqFGj+OOPP2jTpg0ANWrUYPXq1XzzzTe0b98+X/G7IiYRERERERERERERyYddv8CBJeZfzH7o+Ql4eFodlVtp5ow4pUmTJo52yZIlCQgIIDo6Ost9IyIiHAkVu7/fzs3u3btJSEiga9eulCpVyvE3efJkDh48mO/4XRGTiIiIiIiIiIiIiOTDic3X2hvHwQ9DICneungsoJkzki0PDw+Mv00pS05OznDb29s7w22bzUZaWlqW/f29r+sZ0973vHnzqFSpUob9fH19c+3/emISERERERERERERERc6eTU502IobJ0Oe+fD5L5w7w9QMtjS0NxFM2ckW+XLl+fUqVOO27GxsURGRl53fw0aNGDdunUZ7vv77b+PCbB169YMffj6+nL06FFq1aqV4a9KlSoFEpOIiIiIiIiIiIiIuEjiJTiz12x3fB0e+g38SsPxDTCuK5w7ZGl47qLkjGSrU6dOTJkyhVWrVrFz504eeughPD2vf92/Z599loULFzJy5Ej27dvHmDFjMtV26dSpExs3bmTy5Mns37+ft99+m507dzoeDwgI4MUXX+S5555j0qRJHDx4kC1btvDll18yadKkAolJRERERERERERERFzk5FbAgMDKUKoChN8CjyyGoHA4dxC+6wonNlkdZYFTckay9eqrr9KuXTt69+5Nz5496d+/PzVr1rzu/m655Ra+++47vvjiC5o1a8aiRYt44403MuzTvXt33nzzTV5++WVuvvlmLl26xIMPPphhn/fee4+33nqLDz/8kPr169O9e3fmzJlD9erVCyQmEREREREREREREXER+5JmlZpfu698HXh0CYQ2gfgYmNgb9v1uTXxuYjNUdOO6xcbGEhQURExMDMHB2a+Dl5CQQGRkJNWrV8fPz8+NERZ+EydOZPjw4Vy4cMHqUIoFfdask5yczPz58+nZs2emWkwicmPR+UBE7HQ+EBE7nQ9EJD2dE+SGN/Mh2P0rdBkBtz2X8bHES+bjB5eCzRN6jzLr0hQh9rzBxYsXCQwMzHY/zZwRERERERERERERERH3OHF15kzYTZkf8w2A+36AZkPASIU5/4I//gPFcI6JkjNSrPzjH/+gVKlSWf794x//sDo8ERERERERERERkRvX5Ri4eNRshzXLeh9Pb+g3Btr/27y9ciTMfgpSk90Sort4WR2A3NiGDh3K0KFDXdbfu+++y4svvpjlYzlNIRMRERERERERERGRAmafNRNcG/yCst/PZoOOr0FgJZj7HGydBmVrQLusr/0WRUVy5sylS5cYPnw4VatWxd/fn7Zt27JhwwbH44ZhMGLECMLCwvD396dDhw7s2rUrQx+JiYk888wzlCtXjpIlS9K3b1+OHz/u7qciLlahQgVq1aqV5V+FChWsDk9ERERERERERETkxnXyanKmUou87d/iIej6rtmOXFkwMVmkSCZnHn30URYvXsyUKVPYsWMH3bp1o0uXLpw4cQKAkSNHMmrUKMaMGcOGDRsIDQ2la9euXLp0ydHH8OHDmTVrFjNmzGD16tXExcXRu3dvUlNTrXpaIiIiIiIiIiIiIiLF14lN5rZSFvVmslO1rbk9vbNY1Z4pcsuaXblyhZ9//pnZs2fTrl07AEaMGMGvv/7K//3f//Hee+8xevRoXn/9de68804AJk2aREhICN9//z1PPPEEFy9eZNy4cUyZMoUuXboAMHXqVKpUqcKSJUvo3r17lmMnJiaSmJjouB0bGwtAcnIyycnZr3eXnJyMYRikpaWRlpbmktdBJCtpaWkYhkFycjKenp5Wh3NDsZ8DcjoXiMiNQecDEbHT+UBE7HQ+EJH0dE6QG5Zh4HViMzYgJaQJRl6/A2Vq4WXzwBZ/luTzxyCgYoGG6ay8freLXHImJSWF1NRU/Pz8Mtzv7+/P6tWriYyMJCoqim7dujke8/X1pX379qxdu5YnnniCTZs2kZycnGGfsLAwGjVqxNq1a7NNznz44Ye88847me5ftmwZJUqUyDZmLy8vQkNDiYuLIykpKb9PWSTPkpKSuHLlCitXriQlJcXqcG5IixcvtjoEESkkdD4QETudD0TETucDEUlP5wS50fgnxdAtPoY0PFmw5Thp26LzfGwn31ACEk6yce5EooOaFmCUzouPj8/TfkUuORMQEECbNm147733qF+/PiEhIUyfPp2//vqL2rVrExUVBUBISEiG40JCQjhy5AgAUVFR+Pj4UKZMmUz72I/Pyquvvsrzzz/vuB0bG0uVKlXo2LEjwcHB2R6XkJDAsWPHKFWqVKakkogrJSQk4O/vT7t27fRZc7Pk5GQWL15M165d8fb2tjocEbGQzgciYqfzgYjY6XwgIunpnCA3KlvEb7ALbKENuaN3/3wd65k4C3bPolW4P2m39iyYAF3EvuJWbopccgZgypQpDBs2jEqVKuHp6clNN93Efffdx+bNmx372Gy2DMcYhpHpvr/LbR9fX198fX0z3e/t7Z3jiTQ1NRWbzYaHhwceHkWyzI8UER4eHthstlw/k1Jw9NqLiJ3OByJip/OBiNjpfCAi6emcIDecqK0A2Cq1yP9nP6wp7J6F55ndeBby701en1uRzBTUrFmTFStWEBcXx7Fjx1i/fj3JyclUr16d0NBQgEwzYKKjox2zaUJDQ0lKSuL8+fPZ7iMiIiIiIiIiIiIiIi5ycou5rXRT/o8NaWxuT+90XTwWK5LJGbuSJUtSsWJFzp8/z++//06/fv0cCZr0azYmJSWxYsUK2rZtC0CLFmZmLv0+p06dYufOnY59RERERERERERERETEBdLS4ORWsx12HcmZ0Ebm9uwBSMpbTZfCrkgua/b7779jGAZ169blwIEDvPTSS9StW5eHH34Ym83G8OHD+eCDD6hduza1a9fmgw8+oESJEtx3330ABAUF8cgjj/DCCy8QHBxM2bJlefHFF2ncuDFdunSx+NmJiIiIiIiIiIiIiBQjZ/dD0iXwLgHl6+X/+FIhUKIcxMdAdARUbuH6GN2sSCZnLl68yKuvvsrx48cpW7YsAwcO5D//+Y9jLbeXX36ZK1eu8OSTT3L+/Hlat27NokWLCAgIcPTx2Wef4eXlxaBBg7hy5QqdO3dm4sSJeHp6WvW0RERERERERERERESKnxObzG3FpuB5HWkJmw1CG8OhZXB6R7FIzhTJZc0GDRrEwYMHSUxM5NSpU4wZM4agoCDH4zabjREjRnDq1CkSEhJYsWIFjRo1ytCHn58fX3zxBWfPniU+Pp45c+ZQpUoVdz8Vueqvv/7CZrNhs9n48MMPs92vQ4cO2Gw2Dh8+nK/+ly9fjs1mY+jQoRnunzhxouPzYoWYmBi+++47Hn/8cZo1a4aXlxc2m40ZM2ZYEo+IiIiIiIiIiIiIy53YbG6vZ0kzO/vSZlHFo+5MkZw5I8XPlClTMrRfffVVC6Nxn9WrV/PYY49ZHYaIiIiIiIiIiIhIwTl5NTlTyYnkTEhjc3u6eCRniuTMGSlekpOT+eGHH7DZbISGhhIREcHmzZuz3Hfy5MlERERQqVIll4w9YMAAIiIiePrpp13SX36FhITw5JNPMmHCBHbu3MkDDzxgSRwiIiIiIiIiIiIiBSIlCaJ2mG1nkjOhV5MzUTshLc35uCymmTNiuQULFhATE0P79u1p37497777LlOmTOGmmzJ/UcPDw106dlBQUIYl8dytTZs2tGnTxnHbw0P5UhERERERERERESlGTu+E1CTwLwNlql9/P+Vqg6cPJF2CC0egrBN9FQK6EiyWsy9pNmTIEIYMGQLA9OnTSU1NzbRvdjVnbDYb1apVIykpiXfffZd69erh6+tL//79cxw7u5ozQ4cOxWazsXz5clauXEmnTp0ICAggMDCQXr16sXv37mz7nDNnDt27dyc4OBg/Pz/q1KnDm2++SVxcXO4vhoiIiIiIiIiIiEhxYl/SLKw52GzX34+nN5SvZ7aLwdJmSs6IpS5evMjcuXPx9fXlrrvuonbt2rRq1YrTp0+zePHifPWVlpZG//79GTlyJDVr1qRfv35UrFjRqfjmzJlDp06dOHfuHN27d6dixYrMnz+fdu3aERUVlWn/F154gb59+7Jy5UoaNWpEr169SEpK4v3336dDhw5cvnzZqXhEREREREREREREipQTW8xtpRbO9+VY2myH831ZTMuaFQKGYXAlOfMskcLK39sTmzMZznRmzpxJQkICAwcOpHTp0oA5g2b9+vVMnTqVO+64I899HTt2DF9fX/bu3euymjSjR49m6tSp3HvvvQCkpqZyzz338PPPP/PVV1/x7rvvZnguo0aNonnz5vzyyy9Uq1YNMGvqPP3004wdO5YRI0bwySefuCQ2ERERERERERERkULPMXPGiXozdunrzhRxSs4UAleSU2nw1u9Wh5Fnu9/tTgkf13x00i9pZjd48GCef/55Zs2aRVxcHKVKlcpzfx9++KHLEjMA9913nyMxA+Dp6clrr73Gzz//zMqVKzPs+8EHHwDmkmz2xAyAt7c3n3/+Ob/99hvfffcdH3/8sWrLiIiIiIiIiIiISPGXGAdn9pjtSi5IzoQ0Mreni/7MGV0hFsscPnyY1atXU7ZsWXr27Om4v3z58nTv3p34+HhmzZqV5/5sNht9+vRxaYzdunXLdF+dOnUAOHXqlOO+6Ohotm3bRv369albt26mY/z8/GjZsiUXLlxg//79Lo1RREREREREREREpFA6tQ2MNAgIg4BQ5/sLvZqcuXAUrlxwvj8LaeZMIeDv7cnud7tbHUae+Xt7uqSfqVOnYhgGgwYNwsfHJ8NjQ4YMYd68eUyZMoUHHnggT/1VqFABX19fl8RmV7ly5Uz32WfyJCYmOu47cuQIABEREbku+RYTE5NlAkdERERERERERESkWLEvaeaKWTMA/mUgsDLEHofTu6Dara7p1wJKzhQCNpvNZcuEFSVTp04FYOnSpdx2220ZHrMnPpYuXcqpU6eoWLFirv35+fm5PMa81tZJTTVrBlWsWDHL2TbpBQcHOx2XiIiIiIiIiIiISKF3wsXJGTDrzsQeh9M7lZwRya/169ezd+9eAPbv35/tUl9paWl8//33vPDCC+4ML9/sM2xCQ0OZOHGitcGIiIiIiIiIiIiIFAYnNpnbMFcmZxrBvgUQVbTrzqjmjFhiypQpALz00ksYhpHl36JFi4BrM2wKs8qVK1O3bl22b99OZGSk1eGIiIiIiIiIiIiIWOvyWbhgloMgrLnr+g25Wnfm9E7X9WkBJWfE7VJSUvjhhx8AuPfee7Pdr1OnTlSoUIGtW7eyc2fh/6K98cYbpKamMnDgwCzjPXjwIOPHj7cgMhERERERERERERE3O7nF3AbXAv/Srus3tLG5Pb0bUlNc16+bKTkjbrdgwQLOnDlD3bp1ad48+4ypp6cnd911F1A0Zs8MGTKEl19+mS1bttCsWTNuvvlmBg0axB133EH9+vWpVasW//vf/zIdd8sttzj+5s2bB8Cbb77puO/JJ59091MRERERERERERERcc7Jq/VmXLmkGUCZ6uBdElIT4ewB1/btRkrOiNvZlzQbPHhwrvvaZ9ZMmzaNtLS0Ao3LFT7++GOWLl1K3759OX78OL/++itbtmyhRIkSvPTSS1nOnPnrr78cfzExMQAcOHDAcd/u3bvd/TREREREREREREREnGOvN1PJxckZDw8IaWi2i/DSZl5WByA3npkzZ+Z539tuuw3DMBy3ly9fnuV+6ffJSocOHbLcZ+jQoQwdOjTT/RMnTmTixInZ9pfTeJ06daJTp045xpPXvkRERERERERERG5oqcng6W11FJJfhgEnCmjmDEBoIzi+HqJ2QOO7XN+/G2jmjIiIiIiIiIiIiIgULhdPwMwH4T+hsHu21dFIfsWegMvRYPOEik1c339II3MbtcP1fbuJkjMiIiIiIiIiIiIiUjikJsOa/8GYm82kTFoK7JlndVSSX/ZZMyENwNvf9f2HXk34aFkzEREREREREREREREnHF4D816AMxHm7aAqcPFYkZ4dccOy15spiCXNwEz6YIO40xB3BkqVL5hxCpBmzoiIiIiIiIiIiIiIdeKiYdY/YGJPMzFTIhj6fQkPLzAfP7MXkhOsjVHy5+TVmTOVCig541MSytYw26eLZvJOyRkRERERERERERERcb+0VFj/LXzRErZNB2zQ4mF4eiM0HwJBlcG/LBipEL3b6mglr9LS4ORWs12pRcGNE9rY3BbRmVVKzoiIiIiIiIiIiIiIe53YBN92gvkvQuJFs4bIo0ugz2goUdbcx2a7Vky+iF6AvyGdOwiJseDlD+XrF9w4oY3MbVTRrDujmjMiIiIiIiIiIiIi4h6pybDg37BxPGCAbxB0fhNaDgMPz8z7hzaGQ8uVnClK7PVmKjYBzwJMQYRcnTlzWskZEREREREREREREZHsbZkCG8eZ7Sb3QNf3ICAk+/1DNXOmyDlxtd5MWAHVm7Gzz5yx1yTy9ivY8VxMy5qJiIiIiIiIiIiIiHscWmFub38R7hybc2IGriVnTu80a5lI4XfyanKmIOvNAARWAv8yZk2iM3sKdqwCoOSMiIiIiIiIiIiIiBQ8w4Cjf5rtmh3zdkxwLfDyg6Q4OB9ZcLGJa6Qmw6ntZrtSAc+csdkg5OrsmSK4tJmSMyIiIiIiIiIiIiJS8M4dgrjT4OGd91kVnl5QoYHZjtpecLGJa5zeBamJ4BcEZWsU/HihV+vORCk5IyIiIiIiIiIiIiKS2dF15rbSTeDtn/fjHBfgVXem0DuZrt6MzVbw49lnzhTBz4aSMyIiIiIiIiIiIiJS8I6uNbfhbfJ3XMWrdWdOaeZMoXfCXm+mgJc0s7Mn7k7vMJfNK0KUnJFC4a+//sJms2Gz2fjwww+z3a9Dhw7YbDYOHz6cr/6XL1+OzWZj6NChGe6fOHEiNpuNESNG5D9oF9i0aRMjRozg9ttvJywsDF9fX6pUqcKQIUPYvl3/2IiIiIiIiIiISDFy5Gq9mapt83dc6NXkTBGcHXHDObnF3Ia5KTlTvi54eEHCRbh43D1juoiSM1IoTJkyJct2cZaSkkLLli1555132LNnD82bN6dv3774+voybdo0WrZsyU8//WR1mCIiIiIiIiIiIs67dBrOHQRsUKV1/o6t0MA8Li4K4qILIjpxhaTLEL3bbLtr5oyXL5Sra7ZPF626M0rOiOWSk5P54YcfsNlshIaGEhERwebNm7Pcd/LkyURERFCpUiWXjD1gwAAiIiJ4+umnXdJffrVu3Zq5c+dy+vRp5s2bx48//si+fft4/fXXSU5OZtiwYcTExFgSm4iIiIiIiIiIiMscvTprJqQh+JfO37G+pSC4ptnW7JnC69h6MNIgIAwCw9w3bmjRrDuj5IxYbsGCBcTExNCuXTsef/xxIPvZM+Hh4dSrVw9vb2+XjB0UFES9evUoV66cS/rLDy8vL9atW0evXr3w8Lj2VfTw8OC9996jXr16XLp0iXnz5rk9NhEREREREREREZeyJ2fyW2/GzrG0mUoBFFr7F5vbmp3cO6697oySMyL5Y0/EDBkyhCFDhgAwffp0UlNTM+2bXc0Zm81GtWrVSEpK4t1336VevXr4+vrSv3//HMfOrubM0KFDsdlsLF++nJUrV9KpUycCAgIIDAykV69e7N69O9s+58yZQ/fu3QkODsbPz486derw5ptvEhcXl/uLke75NG5snlROnjyZ5+NEREREREREREQKpSNrzW3V603OFM0L8DeUA1eTM7W7unfckKszZ7SsmUjeXbx4kblz5+Lr68tdd91F7dq1adWqFadPn2bx4sX56istLY3+/fszcuRIatasSb9+/ahYsaJT8c2ZM4dOnTpx7tw5unfvTsWKFZk/fz7t2rUjKioq0/4vvPACffv2ZeXKlTRq1IhevXqRlJTE+++/T4cOHbh8+XKexz506BAAoaGhTj0HERERERERERERSyXEXrtwHt72+vpwzJxRcqZQOn8YYvaBzRNqdHDv2PbE3blISLzk3rGd4GV1AAIYBiTHWx1F3nmXAJvNJV3NnDmThIQEBg4cSOnSpQFzBs369euZOnUqd9xxR577OnbsGL6+vuzdu9dlNWlGjx7N1KlTuffeewFITU3lnnvu4eeff+arr77i3XffzfBcRo0aRfPmzfnll1+oVq0aYNbUefrppxk7diwjRozgk08+yXXc1atXs2nTJnx8fPL1GoiIiIiIiIiIiBQ69lokZapB4HX+mNp+AT5mv1l43qeky8ITF7AvaRZ+S/5rCjmrZDkIqAiXTsHp3RDe2r3jXyclZwqD5Hj4wI0Fkpz12kmXnfzSL2lmN3jwYJ5//nlmzZpFXFwcpUqVynN/H374ocsSMwD33XefIzED4OnpyWuvvcbPP//MypUrM+z7wQcfAOaSbPbEDIC3tzeff/45v/32G9999x0ff/xxhhozfxcbG8uwYcMAeO6555ye/SMiIiIiIiIiImKpo1eXNLveWTMAASFQKgTiTpsX4Kvc7JrYxDXsyZlaXawZP6TR1eTMjiKTnNGyZmKZw4cPs3r1asqWLUvPnj0d95cvX57u3bsTHx/PrFmz8tyfzWajT58+Lo2xW7dume6rU6cOAKdOnXLcFx0dzbZt26hfvz5169bNdIyfnx8tW7bkwoUL7N+/P9vxUlNTue+++9i/fz+tWrXKMDNHRERERERERESkSDryp7m93nozdo66M9ud60dcKzkBIq/+kL125uupbhF6te5MVNGpO6OZM4WBdwlzNkpR4V3CJd1MnToVwzAYNGgQPj4+GR4bMmQI8+bNY8qUKTzwwAN56q9ChQr4+vq6JDa7ypUrZ7rPPpMnMTHRcd+RI0cAiIiIwJbLkm8xMTFZJnAAHn/8cebNm0fdunWZN29eptdFRERERERERESkSElJhBObzLYzM2fATM4cWKK6M4XNkdWQcgUCwiCkoTUxhFxNzpxWckbyw2a7IddInDp1KgBLly7ltttuy/CYPfGxdOlSTp06laelvfz8/FweY26JFrvU1FQAKlasmOVsm/SCg4OzvP+ll15i/PjxVKlShcWLF1OuXLn8BSsiIiIiIiIiIlLYnNgMqYlQsjwE13SuL82cKZz2LzG3tbu4rFZ5voU2Mbend0FaKnh4WhNHPig5I5ZYv349e/fuBWD//v3ZLvWVlpbG999/zwsvvODO8PLNPsMmNDSUiRMn5vv4Dz/8kP/+979UqFCBxYsXU6VKFRdHKCIiIiIiIiIiYgFHvZk2zl+4D21qbk/vgtQU8NTl7UJh/yJza9WSZmAm/rz8zfru5yKhXC3rYskj1ZwRS0yZMgUwZ4sYhpHl36JF5pfaPsOmMKtcuTJ169Zl+/btREZG5uvYsWPH8tprr1G6dGl+//33bJc8ExERERERERERKXIc9WacXNIMoGx18C4JKQlw7qDz/Ynzzh403wsPL6je3ro4PDyhQn2zfbpoLHun5Iy4XUpKCj/88AMA9957b7b7derUiQoVKrB161Z27iz8awW+8cYbpKamMnDgwCzjPXjwIOPHj89w308//cQ///lPSpUqxfz582nWrJmbohURERERERERESlgaalw7C+zHd7G+f48PK/VNFHdmcLhwNUlzcLbgF+gtbGEXq07E1X4ryWDljUTCyxYsIAzZ85Qt25dmjdvnu1+np6e3HXXXXz11VdMnTqVjz76yI1R5t+QIUPYsWMHI0eOpFmzZjRv3pzq1asTGxvLkSNH2LNnD02bNmXYsGEAREdHc//995OWlkb16tX55ptv+OabbzL1279/f/r37+/mZyMiIiIiIiIiIuKk07sgMRZ8Sl0r2O6sik3g+Ho4tQ0a3+WaPuX67V9sbmt3tTYOuFZ3pogk7pScEbezL2k2ePDgXPe99957+eqrr5g2bRoffPBBQYfmtI8//pju3bszZswY/vzzT7Zt20aZMmWoXLkyL730UobnHB8fT1JSEgA7duxgx46sTxrVqlVTckZERERERERERIqeo1eXNKvSynX1YUIbm9sicgG+WEu+AodXmW0r683Y2ROApzVzRiRLM2fOzPO+t912G4ZhOG4vX748y/3S75OVDh06ZLnP0KFDGTp0aKb7J06cyMSJE7PtL6fxOnXqRKdOnXKMB8ykS25xi4iIiIiIiIiIFFlH1prbcBfUm7FLn5wxDLDZXNe35M/h1Wb9n8DKUL6e1dFcW/Iu9gTEn4MSZa2NJxeqOSMiIiIiIiIiIiIirmUY12bOVHVBvRm7Cg3A5gnxMXApynX9Sv7tX2Rua3ctHEkyv0AoXdVsF4GZVUrOiIiIiIiIiIiIiIhrnTsEcafBwxsqtXBdv97+UK6O2Y7a7rp+JX8MI2NyprCoeLXuzJE11saRB0UyOZOSksIbb7xB9erV8ff3p0aNGrz77rukpaU59jEMgxEjRhAWFoa/vz8dOnRg165dGfpJTEzkmWeeoVy5cpQsWZK+ffty/Phxdz8dERERERERERERSW/vAvj5Ubh4wupI5HrZZ81UuslMqLiSY2kzJWcsc/YgnD9sJt+qt7c6mmvq9zO3W6dDunxBYVQkkzMff/wxX3/9NWPGjCEiIoKRI0fyySef8MUXXzj2GTlyJKNGjWLMmDFs2LCB0NBQunbtyqVLlxz7DB8+nFmzZjFjxgxWr15NXFwcvXv3JjU11YqnJSIiIiIiIiIicmNLTYHFb8H0wbDjR9gyxeqI5HoduZqcCXfhkmZ26evOiDUOLDa3VduCbylrY0mvfm/wDYKLRyFyhdXR5KhIJmf+/PNP+vXrR69evahWrRp33XUX3bp1Y+PGjYA5a2b06NG8/vrr3HnnnTRq1IhJkyYRHx/P999/D8DFixcZN24cn376KV26dKF58+ZMnTqVHTt2sGTJEiufnoiIiIiIiIiIyI3n0mmY3A/WfH7tvtM7rYtHnHN0rbmt2tb1fSs5Yz3HkmbdrI3j77z9ocndZnvLVGtjyYWX1QFcj9tuu42vv/6affv2UadOHbZt28bq1asZPXo0AJGRkURFRdGt27UPhq+vL+3bt2ft2rU88cQTbNq0ieTk5Az7hIWF0ahRI9auXUv37t0zjZuYmEhiYqLjdmxsLADJyckkJydnG29ycjKGYZCWlpZh6TURV0tLS8MwDJKTk/H09LQ6nBuK/RyQ07lARG4MOh+IiJ3OByJip/OBSO5sx9bh+csj2OJOY/iUIq3p/Xhu+Abj9C5Sitl354Y4J8SdxvvcIQxspFRsAa5+ruXq4w1w7hDJcefAN8C1/UvOki7jdXgNNiC5ekfXv7/OajwY7w3fYUTMISX2DPiXduvwef1uF8nkzL///W8uXrxIvXr18PT0JDU1lf/85z/ce++9AERFRQEQEhKS4biQkBCOHDni2MfHx4cyZcpk2sd+/N99+OGHvPPOO5nuX7ZsGSVKlMg2Xi8vL0JDQ4mLiyMpKSnvT1Qkn5KSkrhy5QorV64kJSXF6nBuSIsXL7Y6BBEpJHQ+EBE7nQ9ExE7nA5EsGAY1zvxOwxMzsJFGrF8lNlR/lqQrJekBcC6S3+fMItXT1+pIXa44nxMqnl9PKyDWrzLL/yiYwuzdvMvin3yOdbO/41ypugUyhmQt5OIWbklN5LJPOZb8tR9sB6wOKSPDoIN/OEFXjhIx8x0iy3d16/Dx8fF52q9IJmd++OEHpk6dyvfff0/Dhg3ZunUrw4cPJywsjIceesixn81my3CcYRiZ7vu7nPZ59dVXef755x23Y2NjqVKlCh07diQ4ODjbPhMSEjh27BglS5bE39/Fxa9E0rly5Qr+/v60b98eX9/i9x8thVlycjKLFy+ma9eueHt7Wx2OiFhI5wMRsdP5QETsdD4QyUbiJTzn/guPE78BkNbwTvx7jqKdj1m/woh8F9vlaO64qSpGpZusjNSlboRzgsei1XAYSjXsRs87ehbIGJ6XpsKBRbStHkDazQUzhmTNY+FyAPwa9aFnj17WBpMNjwonYNFrNE7eSv2en7l1bPuKW7kpksmZl156iVdeeYXBgwcD0LhxY44cOcKHH37IQw89RGhoKGDOjqlYsaLjuOjoaMdsmtDQUJKSkjh//nyG2TPR0dG0bZv1Ooi+vr5ZXvD29vbO9URqs9lITU3Fw6NIlvmRIiI1NRWbzYavr2+x/ce9sMvL+UBEbgw6H4iInc4HImKn84FIOtER8MMDcHY/eHjDHR/icfOjeKT/0XRoIzj4B15n90C11tbFWkCK9Tnh2DoAPKvfimdBPcewpnBgEZ5ndhXcGJKZYcBBs2a7Z907Cu9r3+xeWDoC2+kdeMfshopN3TZ0Xr/XRTJTEB8fnynJ4enp6ajnUr16dUJDQzNMDUxKSmLFihWOxEuLFi3w9vbOsM+pU6fYuXNntsmZ6+Xt7Y2vry8XL17EMAyX9i1iZxgGFy9eVGJGRERERERERAq3HT/Bt53MxExgJXh4AbR6DP6+mk2FBub29C73xyjXLyEWTu802+Guvc6aQWgTcxu1veDGkMxi9sGFo+DpA9Vvtzqa7JUoC/V6m+0tU62NJRtFcuZMnz59+M9//kN4eDgNGzZky5YtjBo1imHDhgHmLJXhw4fzwQcfULt2bWrXrs0HH3xAiRIluO+++wAICgrikUce4YUXXiA4OJiyZcvy4osv0rhxY7p06eLymMuVK8eJEyc4fvw4QUFBeHt757rEmkheGIZBcnIyFy9eJC4ujkqVKlkdkoiIiIiIiIhIZqnJ8PtrsH6sebtGBxg4DkqWy3r/kEbmVsmZouXYejDSoEw1CKyY6+7XLbSxuY2OMD9bnvqxslvsvzrZodpt4FPS2lhy03wI7PoFts+Eru+Bt5/VEWVQJJMzX3zxBW+++SZPPvkk0dHRhIWF8cQTT/DWW2859nn55Ze5cuUKTz75JOfPn6d169YsWrSIgIAAxz6fffYZXl5eDBo0iCtXrtC5c2cmTpyIp6eny2MODAwEICYmhhMnTri8fxFfX18qVark+KyJiIiIiIiIiBQq68deS8y0ewk6vAoeOVyHC2lobk/vNJdS0g+di4aja81tQc6aAShdFXwDITHWnM1h/7xIwdq/yNzW6mptHHlRowMEVYGLx2DPXGh8l9URZVAkkzMBAQGMHj2a0aNHZ7uPzWZjxIgRjBgxItt9/Pz8+OKLL/jiiy9cH2QWAgMDCQwMJDk5mdTUVLeMKTcGT09PLWUmIiIiIiIiIoXbjh/Nbdf34NZnc9+/fF2weULCBYg9CUFaLaRIOPKnua3apmDH8fAwZ1cdXQtRO5SccYfEODhyNflWu5u1seSFhyc0uw9WfAxbpig5I8W82JeIiIiIiIiIiMjfXTgKJ7cANmg6OG/HePlCuTpwJgKidys5UxSkJMKJTWa7oGfOAFRsYiZnTm3P++dKrl/kCkhLNpesC65pdTR50+x+MzlzaDmcPwJlqlodkYOH1QGIiIiIiIiIiIhIMRcx19yGt4FSFfJ+XPqlzaTwO7EZUhOhZHn3XLy3152J2l7wY8m1ejO1uxWdZQbLVIXq7c321u+tjeVvlJwRERERERERERGRghXxm7lt0Dd/xzmSM7tcG48UDEe9mTbuuXjvSM7sMOsSScExjGvJmaJQbya9mx40t1unQVrhKTei5IyIiIiIiIiIiIgUnEun4eg6s12/T/6OVXKmaHHUm3HDkmYA5euBh5dZl+jicfeMeaM6swdij4OXH1S7zepo8qdeb/ALgovHzKXZCgklZ0RERERERERERKTg7JkLGFCpBQRVzt+x9uRMzD6znokUXmmpcOwvsx3exj1jevlC+fpmW0ubFaz9i8xttdvAp4S1seSXtx80HmS2N0+xNpZ0lJwRERERERERERGRgmNf0iy/s2YAAiuZv3hPSzETNFJ4nd4FibHgUwpCGrlv3PRLm0nBSV9vpii66QFzu2cuxJ+zNparlJwRERERERERERGRghF/DiJXme36+aw3A2bdEvuFfi1tVrgdvbqkWZVW4OnlvnGVnCl4CbHX3t9aXayN5XpVbGp+VlKTYMePVkcDKDkjIiIiIiIiIiIiBWXvfDBSzQRLcM3r60N1Z4qGI2vNbbib6s3YVWxibk9pWbMCE7nCnL1Wtub1f48Lg+YPmtsthWNpMyVnREREREREREREpGBEzDG31zNrxk7JmcLPMK7NrKjqpnozdvaZVRePwpXz7h37RmGvN1O7q7VxOKvxXeDpa86yOrnV6miUnBEREREREREREZECkBALB/8w29dTb8ZOy5oVfucOQdxp8PCGSi3cO7Z/aSgdbrajdrp37BvBsfWwZ57ZLurJmRJloX5vs10IZs8oOSMiIiIiIiIiIiKut3+RWd8huBZUqH/9/ZSvZ27jouByjGtiE9faM9fcht8C3v7uHz/06tJmqjvjOpfPwuynYVxXiD8LZapB1dusjsp5zYeY2x0/QvIVS0NRckZERERERERERERcL+I3c1u/L9hs19+PbykoU91sa/ZM4bTjJ3PbcIA14zuSM6o747S0NNg0Eca0uDa7pPkQeHQpePtZGppLVO8AQeGQcBEi5loaipelo4uIiIiIiIiIyI0lNcWcARF7Ei4eN7exJyE2XRvggVlQvq61scr1S4qH/YvNtjNLmtmFNITzkWZypkZ75/sT14nZbyZFPLygQX9rYghtbG41c8Y5p7bB3OfhxEbzdkgj6PWpOSOquPDwgOb3w/IPzeRTk7stC0XJGRERERERERERKVgnt8DCV+H8ETMxY6TlfsyWKdDt/YKPTQrGwaWQHG/+Qj2sufP9hTQyl87SzJnCxz5rpmYnKBlsTQz25MyZPZCSCF6+1sRRVCVchD/+Axu+Nc/PPqWg4+vQ6nHwLIYphGb3wfKPIHIFnD9sLtlmgWL4yoqIiIiIiIiISKGy+G04+ue12x5eEBAGQZUgMOzqX2Vze/YALH0H9i9RcqYoi5hjbuv3cW5JM7uQhub2tAq+FyqGATuvJmca3WVdHEGVwa80JFwwEzQVm1oXS1FiGGbtld9fh8vR5n2NBkK3/0BgRWtjK0ilw6FGBzi0DLZ+Dx1fsyQMJWdERERERERERKTgnNlr/jrZ5nF1qbL6ULK8ubRMVq6chz/egzMRcOEYlK7i3njFeSlJsHeh2W7Q1zV92pMzZ/ZAWip4eLqmX3HOqW1mQtXLD+r1tC4Om81MyESugKPrlJzJi7MHYc6/4PAq83ZwLej5X6jZ0dq43KX5EDM5s2UatP+3JeeUbP4VFBERERERERERcYH135rbOj3MXyoHhGSfmAHwLwOVW5ntA4sLPDwpAJErIPEilAq59l46q0x18C4BKQlw7pBr+hTn7fjR3Na5A3wDrI2ldldza5+1JdlLS4Vpd5uJGS8/6PQm/HPtjZOYAajX25xtFXscjqy1JAQlZ0REREREREREpGAkxMK26Wa71WN5P85+kXW/kjNF0u7Z5rZe75wTcfnh4QEV6pttLW1WOKSlwa5ZZruxhUua2dXvY26PrIHLMdbGUtjt+x3OHTSTE0/9Be1evPHq9Hj7Qe1uZjtyhSUhKDkjIiIiIiIiIiIFY/sPkBQHwbXNWTN5ZU/OHFphFveWoiM1BfbON9uuWtLMzlF3Zpdr+5Xrc/RPiD0BvoFQq6vV0ZhF3Ss2NQva75lndTSF219fm9sWQ83X7UZV/XZzG7nKkuGVnBEREREREREREdczjGtLmrV6LH9F4UObQKlQSL5s2XIzcp2OroX4s+bydFVvdW3fIY3MrZIzhcPOn8xt/T7mLITCwD57JuI3a+MozE7vvlYH7OZHrY7GWtXbmdsTGyExzu3DKzkjIiIiIiIiIiKuF7kSYvaCTyloem/+jrXZoFYXs31gietjk4Kz++pF8bq9wNPbtX07Zs5oWTPLpSbDrl/NdqOBloaSQf1+5vbQCrhywdJQCq3135jber2hdBVrY7FamWoQFA5pKXBsnduHV3JGRERERERERERcb/1Yc9t0MPgF5v94R92ZRa6LSQpWWhrsmWu2Xb2kGUCFBub2wlGznpFY59ByuHIOSpaH6u2tjuaa8nWgfD1ISzbrqkhG8edg2w9mu/U/rI2lsLBwaTMlZ0RERERERERExLUuHLtWd+Tmx66vjxodwOYJMfvg/GFXRSYF6cRGuHQKfALyV2Mor0qUhcBKZjs6wvX9S97tuLqkWYP+4OllaSiZ1L+aGNTSZpltngwpVyC0MVRta3U0hUO1q8mZw0rOiIiIiIiIiIhIUbdpglmUu9rtUKHe9fXhXxrCbzHb+xe7LDQpQLtnm9s63cHLt2DG0NJm1ku+cm2GVOO7rY0lK/a6MweWQNJla2MpTFJTYMN3Zrv1P/JXB6w4s8+cObkFEi66dWglZ0RERERERERExHWSE2DTRLPd6nHn+lLdmaLDMCBijtkuiCXN7OxLm53eVXBjSM72/Q5JcWatjiqtrI4ms9DGZi2RlAQldtPbOx8uHoMSwdDoLqujKTyCKkPZGuYPCo786dahlZwRERERERERERHX2f0rxJ81l5+q29O5vmp3M7eHVphJHym8orbDhSPg5X8tqVYQQhqZWyVnrLPz6pJmje4snLMvbDYtbZaVv74xty0eBm8/a2MpbCxa2kzJGRERERERERERcZ31Y81ty2HO16IIaQgBYWaNhCNrnI9NCs7uqxfBa3cBn5IFN45jWbNd5mwdca+Ei7BvkdluXIhnXzToZ273/a7ELsCp7XBktVnH6+ZHrI6m8KneztxGrnTrsErOiIiIiIiIiIiIa5zYZP55+sBNDznfn81mXuwHLU9U2NlnKNQvwCXNAMrVBg9vSLoEF44W7FiSWcRcSE2EcnWvzWIqjMJuMhO7SXFwaLnV0Vhv/dVZMw36QWCYtbEURtVuM7dROyD+nNuGVXJGRERERERERERcY/3VYtMNB0Cp8q7p07602f5FrulPXO/MXojZZyZN6nQv2LE8vaF8PbOtpc3cz76kWeO7CueSZnYeHlC/j9m+0Zc2u3wWtv9otm/5p7WxFFYBoWbCEcOtszSdTs7Ex8cTHx+f7eNffPEFt99+O/Xr16dnz57MnTvX2SFFRERERERERKSwuXwWdv5stls97rp+q7cHDy84dxDOHnRdv+I69iXNanYEv6CCHy/90mbiPnFnzPpPAI0GWhtLXjS4OotrzzxITbY2FittmmDOdgprDpVvtjqawqv61bozke6rO+NUcmbOnDkEBAQQFhbGpUuXMj0+bNgwhg8fztq1a9m7dy+///47/fr1Y+TIkc4MKyIiIiIiIiIihc2WyeYFwIrNoFIL1/XrFwjhbcz2gSWu61dcJ2K2ubXPVCho9uRMtJIzbrX7VzBSzYv8wTWtjiZ34W2gRDlIuACHV1sdjTVSk2HDOLPd+h+Fe7aT1apdTc4cLiLJmd9//x3DMOjfvz8BAQEZHlu9ejUTJ04EoESJEjRv3hw/Pz8Mw+CNN95g1y6dPEVEREREREREioW01GsXAFs97voLgLW7mlvVnSl8zkWadRpsnlC3l3vG1MwZa+y4uqRZo7usjSOvPDyh3tXP5I26tFnEHLh0EkpWMJeblOzZkzPRu81ZYm7gVHJm3bp12Gw2OnbsmOmxsWPHAhAWFkZERASbNm1iz549VKlShdTUVL755htnhhYRERERERERkcJi30K4eAz8y0KjO13fv73uzOFVkHzF9f3L9bPPZgpvAyWD3TOmPTlz9oA+D+5y4RgcWwfYCuY7XlDsS5tFzDWTyDeav742ty2HgZevtbEUdiWDIaSR2XZ29kwel+B0KjkTHR0NQO3atTM9tnDhQmw2G8888wyVK1cGoEqVKjzzzDMYhsGKFSucGVpERERERERERAqL9d+a25seBG9/1/dfvh4EVoaUhBt3eaLCyv5+1OjgvjFLhUCJYDDS4Mwe9417I7PXk6p6KwSGWRtLflRrZ9ZBuhwNx9ZbHY17ndgMx/4CD29o+bDV0RQNrljaLCEWJvTM065OJWfOnDGn95QqVSrD/bt37yYmJgaAvn37ZnisZcuWABw+fNiZoUVEREREREREpDA4sw8OLQNs5q+zC4LNlm5ps0UFM4bkn2FcS85Uu81949psWtrM3XZeXdKscRFZ0szOywfq9DDbN9rSZuvNla1oOAACQq2NpaiofjU5E+lEcmbrNEi+nKddnUrOeHp6AnDu3LkM969aZQZfvnx56tWrl+GxMmXKAJCQkODM0CIiIiIiIiIiUhhs+M7c1u0BZaoW3DjpkzOGUXDjSN6d2QPxMeDlD5VauHds+/JDSs4UvDP7zLpCHl7QoJ/V0eSfY2mzOTfOuSMu+tpsp1v+YW0sRUnVtoANzu6H2FP5Pz4tDf7KezkXp5IzlSpVAmDr1q0Z7p83bx42m43bb7890zEXL14EoFy5cs4MLSIiIiIiIiIiVku8BFu/N9utHivYsaq3N5fnOX84z+v5SwGzz5oJb23OUHAnx8yZne4d90ZknzVTszOUKGttLNejZifwLmnWxTq52epo3GPjBEhNgso3uz9xWpT5l4GKTc329SxtdmAxnI8E38A87e5Ucub222/HMAzGjBnjWMZsw4YNLFy4EIDu3btnOiYiIgKA0FBNpRIRERERERERKdK2/wBJlyC4FlTvULBj+Za6+qtmzAtgYr3IlebWnUua2aVf1uxGmQ1hBcOAHUV0STM7b/9rM+8i5lgbizukJMHGcWa7tWbN5JtjabOV+T923f+Z26b35ml3p5IzTz75JB4eHkRGRlKjRg1atmxJ+/btSUlJoUyZMtxzzz2Zjvnjjz+w2Ww0a9bMmaFFRERERERERMRKhgHrvzXbNz8GHk5dZsqb2t3MrerOWC8tDY6sMdvV2rl//PL1wOYB8WfNJZykYJzcAucOmkvX1c1bkfNCyb602e7fin8yb/dsiDsNpUKhft/c95eM7Oez/M6cid5j1l+zecBND+XpEKf+1bzpppv45JNPsNlsxMXFsXnzZhISEvD29ubbb78lICAgw/4XL15k3rx5AHTt2tWZoUVERERERERExEpH1pg1R7xLQrO8/UrYafbkzOE1kJS3gstSQM7sMRMj3iUgrLn7x/f2h7I1zbaWNis49rolde8wZ68VVbW7gaevmWiK3m11NAXrr6uzN25+1P3LDRYHVduAzdNcQvPC0bwft36sua3bE8qE5+kQr/xHl9Fzzz1Hly5d+Omnn4iKiqJixYrce++91K1bN9O+y5cv5+abbwagS5cuzg4tIiIiIiIiIiJW2TPf3DbsD35B7hmzXG0oHW5eMItcZV4wFmvYf1VexYJ6M3YhDc3C3ad3Qa3O1sRQnKUkwc5fzHajIrqkmZ1vgPkZ2TvfXNrMvixecXN8I5zYBJ4+0GKo1dEUTb4BUOkmOL7B/Hem+f25H3PlPGybbrbzsZSc08kZgMaNG9O4ceNc9+vXrx/9+vVzxZAiIiIiIiIiImKlQ8vMrTsvitts5i/gN3xn1p1RcsY69uSMvT6DFUIawe5fzeSMuN6St+HSSSgRfK1mS1FWv4+ZnNn9G3R4xepoXM8wYOV/zXaju6BUeWvjKcqq3W4mZw7nMTmzZSokx0OFhmYNrkuX8jSMU8uaDRs2jGHDhvHjjz86042IiIiIiIiIiBQll6KuLg1kg+od3Dt2rasXifcvKv61IwqrtDRzaTkwL2JaxT77QckZ14uYA+u+Mtt9x4CXr7XxuELdHuDhBdG74OxB6+JIS4U1n8PH1eGP913X7+7ZsG+B+RxvfdZ1/d6I7EnnyFW5/zuTlnptSbPWT5g/Isgjp5IzkyZNYtKkSQQGBjrTjYiIiIiIiIiIFCWHlpvbik2hZLB7x65+u1k74sJRiNnv3rHFFL0brpwz6w1ZUW/Gzp6cObMHUpOti6O4ORcJvz5ltts8DfV6WhuPq/iXgepXi71H/GZNDGf2wrhusPgt8zu08hPYv8T5fuPPwfyXzPZtz0OF+s73eSOrcgt4eEPscTgfmfO+exeY/x75l4HGd+drGKeSM+XLm1OjQkJCnOlGRERERERERIqrswdh0Rt5+/WpFB0Hry5pVrOj+8f2KQnVbjXb+xe5f3yBw6vNbfgt4OltXRylw8EnANKS4ewB6+IoTlIS4cehkHgRKreCLiOsjsi16vc1t7vdnJxJTYHVn8HXt8OJjeAbCNXbm4/NfspMrjhj0RtwORrK1YV2Lzof743OpwRUvtlsR67Ked+/vja3LYaax+WDU8mZBg0aAHDkyBFnuhERERERERGR4urXJ2HtFzCpN3zTDrbNMItMS9FlGNdmztSwIDkDZt0ZUHLGKvZ6M9VuszYOm01Lm7naojfg1FZzFsDdE6xNvhWEer0AG5zcDBeOuWfM6AgY1xWWjIDURPP89eQ6uHcGlKsDcVEwd/j1/4Dh4B+wdRpgg75fFI8l6AoDx9JmK7Pf5/Qu83xo84SWj+R7CKeSM0OGDMEwDCZNmuRMNyIiIiIiIiJSHB1dB8fWmevfe/lD1HaY9QSMbmwu5XL5rNURyvWIjjAvJnr5mzMnrGCvO3NkLSTGWRPDjSotDY4UgnozdiHmj8c5vdPaOIqDXbOu1c4YMBaCKlsbT0EoVQGqtjXbEXMKdqzUFFj1qfnDhJObwTcI+v8f3DcTgiqZsywGfGP+G7l7Nmyfmf8xEuNgzr/MdqvHIby1a5/Djcx+fjucw8xf+6yZ+r2hdJV8D+FUcubhhx+mc+fOzJ49m3feeQdD05NFRERERERExG7N5+a26b3w/G7o/BaUCjUv7P/xPnzWwLyodGavtXFK/hz8w9xWbWvdL7SDa0KZ6uZyVpErrInhRhW9C66cB59SENbM6mg0c8ZVzh6E2c+Y7VuHQ51uloZToOxLmxVkcub0LviuMyx9F1KToM4d8NQ6aHZfxoLxlW6C9q+Y7fkvmrVL8mPZf8xjgqqY/8aK61S+2axvFnc66/pm8eeuJdRa//O6hvByIjxWrVrFiy++yJkzZ3j33XeZMWMG99xzD02aNKFMmTJ4enrmeHy7du2cGV5ERERERERECqvoPbB3PmCDW/8FJcrC7S9Am2fMX2ev+xJObYNNE82/Wl2hzZPmMlnpL1xJ4XPIwnozdjYb1O5q/sp//+KrSxWJWxSWejN2IY3M7Y2enLkUBSc2Qc3O4O2Xv2OTE+DHhyDpEoS3gU5vFkyMhUX93rDw33D0T/N1Cwh1Xd+pybB6NKz42Ewe+5WGHiOhyaDs/2277TnY/zsc3wCz/gkPzQGPPMypOLYB1v2f2e4zGnxLuehJCGB+j8Jbm8uaHV4J5etkfHzzJEhJgNAm1z2L1KnkTIcOHbCl+1Dt27eP9957L0/H2mw2UlJSrmvcatWqZVnn5sknn+TLL7/EMAzeeecdxo4dy/nz52ndujVffvklDRs2dOybmJjIiy++yPTp07ly5QqdO3fmq6++onLlYjhdT0RERERERMTd1v7P3NbrBeVqX7vfywea3mNeqDqyFtZ9BXvmwYHF5l9Ycxjyi5nMkcInJREOX13Syqp6M3a1u11LzhiGknruEllI6s3YVahvbmNPmL9kv1HPHT8/ai6/ZJ9B0eiuvF3gB/j9VYjaASWC4a7x4OnUJePCL6gyVGkNx/4ya7Xc/oJr+k2Mg8l9zSQZQN2e0Puz3JM/nl7m8mZf3w5HVps/Xmj7TM7HpCTCb08Dhjk7tVYXlzwF+Ztq7czkTORKuPnRa/enpsD678x2639c978/Ti1rBmAYxnX/Xa8NGzZw6tQpx9/ixYsBuPvuuwEYOXIko0aNYsyYMWzYsIHQ0FC6du3KpUuXHH0MHz6cWbNmMWPGDFavXk1cXBy9e/cmNTXVuRdERERERERE5EZ38cS1pT5uHZ71PjYbVLsVBk+DZzZBqyfAuySc3AIbx7ktVMmnY39ByhUoWeHaclJWqXYbePlB7HGI3m1tLDeKDPVmCsmKOH5BUDrcbJ/cYm0sVok/d+19uXgMfnkMvu14LZGWkx0/wcbxgA3uHAuBYQUaaqHRYqi53TTJ/Fy7wtZpZmLGLwju/A4Gf5/3WTnBNeGOD8z20ndznwm2ahSc2QMlykH3D5yLW7JX3V53ZnXGz8meuea/PSXKQaOB1929U2nQZcuWOXP4dStfvnyG2x999BE1a9akffv2GIbB6NGjef3117nzzjsBmDRpEiEhIXz//fc88cQTXLx4kXHjxjFlyhS6dDGzilOnTqVKlSosWbKE7t27u/05iYiIiIiIiBQb674yl3OpeitUuTn3/YNrQs+RULEJzH7KTOzc/qJmQhRGB9MtaWb1++PtDzU6wL6F5uwrq5NFN4LTOyHhAvgEQMWmVkdzTY2O5hJD26ZDrc5WR+N+h5aBkQbl6kDTwbDqMzi1FSb1hjo9oOs7UL5u5uNi9l8rJn/7CzfW7IuGA2DhK3DhCBz6w/nnbhiw/luz3elNaHJ3/vu46SHYu8A8p/3yODz2R9Z1vaIjYNWnZrvnJzfubDF3CLsJvEtA/Fk4E3Ht35m/vjG3LR/O/zKC6TiVnGnfvr0zh7tEUlISU6dO5fnnn8dms3Ho0CGioqLo1u1a0SpfX1/at2/P2rVreeKJJ9i0aRPJyckZ9gkLC6NRo0asXbs22+RMYmIiiYmJjtuxsbEAJCcnk5ycXEDPUESKAvs5QOcCEdH5QETsdD6QG9aVC3htmoANSLnlaYz8fAdq98TLyw9bzD5Sjm7ACGteYGG6U3E6H3ge/AMPIKVqu/y9twXEVrsHXvsWYuz+jZS2z1kdTrHncXA5nkBaldakphlmErYQsDUdgtfmSRi7Z5PS5X1zea5CzNXnBM99i/EAUmt1Je2WZ6HxvXis+i8emydi27cAY/8i0po/QNrtL0OpCleDuILXzIewJcWRFt6W1NtehELwnXYfLzwa34PnhrGkbRhPalXnrnPbIlfgdXY/hk8pUhoMvP7XsscovI5vwHZ6J6lL3iWt84iMj6el4vnrU3ikJZNW+w5S6/S+wd43d7PhWaU1HoeWkXpwOWll60DUdryPrsXw8CKl6YNZvv55/W4X+QUEf/31Vy5cuMDQoUMBiIqKAiAkJCTDfiEhIY46NVFRUfj4+FCmTJlM+9iPz8qHH37IO++8k+n+ZcuWUaJECWeehogUE/ZlFkVEdD4QETudD+RGUzvqNxokXeaiXxWW702CffPzdXyLUk2pfOEvjsz9hJ2VhxRQlNYo6ucD75RL9Di1DYClkSkkHM/fe1sQfJI9uQMbttM7WDZrEld8y+d+kFy3Vod+oSIQcSWYA/Otf//Ta+9fjdJXDrP3hxEcDOlhdTh54pJzgpFG993z8QPWxZQixvG+tKdUvTo0ODmTihc34bl5IsbWGewP6cXBCnfQ6PhUqp3dRYJXIMsDB5O4cJHzsRQxAVeq0Qlg7wL+mD2NBO8yuR2SrVaHRlMRiAy8hR1L87CcXA5CQ4bQOvJzPNZ9yZ8xgZwNqOd4rEb0Qhqf3ESyhz9/+NxBwoIFTo0luauVUJ6GQPRfP7H+TGWaH/mWcOBEYEs2rd4CZF5OMT4+Pk99F/nkzLhx4+jRowdhYRnXQ7T9bWqtYRiZ7vu73PZ59dVXef755x23Y2NjqVKlCh07diQ4uHBn5EWkYCUnJ7N48WK6du2Kt7e31eGIiIV0PhARO50P5IaUkoDXGLOwcslur9Kzca98d2Hb7wUz76NG/BbC75gMHkX+0kWxOR/Ydv+KbYeBUb4enfrdb3U4Dsal6diOrKFzpSukteppdTjFV1oqXp+ZRcrrdn+EOmE3WRxQRrbQM7DgBRomrKduj/9Zv+xeDlx6TojagffWixjeJWk18NkslsF6hJSjf+Kx5C28Tm2h/qmfqXdhGbYr5zCw4TVoIp2rF5L6QRZImzQbj+N/0SU4irTbrvO8dvEYXlu3AlBl4LtUKVfHyah6kjY3Bo9t07g1ejIp/VaCXyBcOILX2H8AYOv+Pp1uKl4/YCisbCdCYeJMQhMP0rNdC7zGPApAaP8R9KzUMstj7Ctu5cZl/4UTGxvLTz/9xJ9//klUVBTx8fGMHz+eqlWrOvY5efIkFy5cwM/Pjxo1ajg95pEjR1iyZAm//PKL477QULPIUlRUFBUrVnTcHx0d7ZhNExoaSlJSEufPn88weyY6Opq2bdtmO56vry++vpnX+fP29i7S/3ElIq6j84GI2Ol8ICJ2Oh/IDWXbFLh8BoKq4NV0EHhex2e/bjcoUQ7b5TN4H1kFdbrlfkwRUeTPB0dWAmCr2alwPY/6feHIGjz3zsfz1mesjqb4OrkLEi6CbyBelVuAZyFLnDa7B5a+je3cQbxPrIMikHBwyTnh8HIAbNXb4e1fKut9araD6n/Arl9g6TvYLhw1j2n/b7zq3IA1etK7eRgc/wvPrVPxbP8SeHjmv4+tk82aP9Xb413RRbWven4MR1Zju3AE7yVvQP+vYMELkBwP1W7H6+Zh4OHhmrEkZ1VagE8AtoSLeC96BVKTIOwmvKrekm0SOK/fa5e8g19++SXh4eE89thjjB8/nnnz5rF8+XIuX76cYb8VK1bQqFEjGjVqxLlz55wed8KECVSoUIFeva79Eqd69eqEhoZmmBaYlJTEihUrHImXFi1a4O3tnWGfU6dOsXPnzhyTMyIiIiIiIiKSjbRUWPuF2W7z1PUlZsA8rtFAs739B9fEJs4zDDi43GzX6GhpKJnUu3pd6OifEHfG2liKs8OrzW14m8KXmAHwDYDGV4uwb5xgbSzudGCJua2VS5LFwwMa3wVPb4Se/4XOb0H7lws+vsKuQT/wKw0Xj8GBpfk/PjkBNk0y260ec11cvgFw51iwecC27+HnR+HQcvDygz6fKzHjTp5eUPVqziBijrlt/Q+XzM5z+l0cMWIEzz77LLGxsfj4+NCiRYts973nnnuoWLEiiYmJ/Pzzz06Nm5aWxoQJE3jooYfw8rr2D4LNZmP48OF88MEHzJo1i507dzJ06FBKlCjBfffdB0BQUBCPPPIIL7zwAkuXLmXLli0MGTKExo0b06VLF6fiEhEREREREbkhRfwG5w6Bfxm46UHn+mp6j7ndMw8SLzkfmzjv3CG4eBQ8vKHarVZHk1HpKlCxGWDA3sJVB6VYsSdnqt1mbRw5afmwuY2Yc2Mk6hIuwrG/zHatPF7T9PI1kwi3v3B9s0SKG29/aGZeM2bTxPwfv2sWXDkHgZWhjotrHYXfArcON9s7fzK3HV+D4JquHUdyl34mXqkQaDjAJd06lZzZsmUL7733HgBDhgwhKiqK9evXZz+Yhwd33303hmE4XfBqyZIlHD16lGHDhmV67OWXX2b48OE8+eSTtGzZkhMnTrBo0SICAgIc+3z22Wf079+fQYMGceutt1KiRAnmzJmDp6dOSiIiIiIiIiL5YhiwerTZbvU4+JR0rr+wmyC4NqRcufYrVbHWwT/Mbfgtzr+/BaF+H3O7Z661cRRXaalwZK3Zrn67tbHkpGJT8/yRlmzONijuIldCWgoE14Ky1a2OpuhqMdTc7lsIsSfzd+z6seb25mEFM6Osw6sQ2thsV2wGtzzl+jEkd+nPey2HgZePS7p1KjnzxRdfYBgGbdq0YfLkyQQFBeV6TJs2bQDYsWOHM0PTrVs3DMOgTp3MBZZsNhsjRozg1KlTJCQkOJZTS8/Pz48vvviCs2fPEh8fz5w5c6hSpYpTMYmIiIiIiIjckCJXwqmt4OVvJmecZbNBk6uzZ7bNcL4/cd6h5ea2Rgcro8iePTlzaDkk5K0Qs+RD1HZINOvNENrE6mhyZp89s2kipKVZGkqB23/1x+95nTUjWStfF6reCkYqbJ6S9+OOb4KTm8HTB256qGBi8/KBe6aZy2jdPbFwLil4IwhpDGVrmrODWzzssm6dSs6sWLECm83G008/nedjqlWrBsCJEyecGVpERERERERECos1o81t8yFQspxr+mwyyNxGroSLuoZgqdQU830AqFnI6s3Yla9rzrZKTYL9i6yOpviJXGVuq7Yt/EthNRpoJpHOHYLDK62OpuAYxrUaKbW6WhtLcWC/4L55sjlTLC/ss2Ya3um6f/uyUqYq9PhYs6Os5OEBj/0BT22AgBDXdevMwadOnQKgbt26eT7G19cXgMTERGeGFhEREREREZHC4NQ2c8krmye0zfuPN3NVpiqEtwWMa2vtizVObILEWLNodsVmVkeTPS1tVnAc9WYK8ZJmdj4lryV3N06wNpaCdGYPxB43C8QXtjpQRVGDvuBf1nxN9+ehHMflGNj1i9l2xYxRKfz8S0Op8i7t0qnkjI+PubZacnJyno+xJ3RKly7tzNAiIiIiIiIiUhis+Z+5bTgAylRzbd/2C6zbfnBtv5I/h5aZ2xrtC/esifq9ze3+xZCcYG0sxUlqChz902xXu83aWPLKPgtiz1yIi7Y2loJyYIm5rXqrWdRenOPlC83uM9ub8pDU2zzJnKkXdhNUblGwsUmx5VRypnLlygDs2rUrz8csWmROLa1Vq5YzQ4uIiIiIiIiI1c4fvvbL4Vv/5fr+G/Y31/KP3gVRztWuFScctCdnCumSZnZhN0FgJUiKu1YjR5wXte3qzKmga4XJC7vQRlD5ZkhLgS1TrY6mYNiTM7W1pJnL2JN6+xfBxePZ75eaAhvGm23NmhEnOJWc6dSpE4ZhMGFC3qYIHjp0iHHjxmGz2ejaVScOERERERERkSJt7Rgw0qBmZ6hYAEXC/ctAnTvM9nbNnrFEQiwc32C2C2u9GTubDer1Mtt75lgbS3FiX9Ks6q2Fe+bU3zlqiEyCtDRrY3G1xDg4stZs1+pibSzFSbla5tJ9RppZeyY7+xaay5+VCDZnjYpcJ6eSM08//TReXl6sWbOGESNG5Ljvxo0b6datG3Fxcfj6+vLEE084M7SIiIiIiIiIWOlyzLVfpBfErBm7JveY2x0/5b1Is7jO4dVgpELZGq5ftq4g2OvO7F1g/rpdnOeoN1NEljSzazgAfIPMGX72pfmKi8OrzSW1SodDsFYncqmW9qTe5OzPIevHmtubHgRvP/fEJcWSU8mZOnXq8Oabb2IYBu+99x6tW7dm5MiRjscXLlzIxx9/TOfOnWndujWRkZHYbDY++ugjKlas6HTwIiIiIiIiImKR9WMh5QqENYfq7QpunNrdzBk0l05B5MqCG0eydqiILGlmF97WLOodf/ZanRS5fqkpcKSI1Zux8ykBTa8md/NSQ6QosS9pVqurOWNMXKdeHyhRzvw3Z//vmR8/sxciV4DNA1oOc398Uqw4lZwBePPNN3njjTew2Wxs2LCBV199FdvVk8JLL73Ea6+9xvLlyzEMA4C33nqLZ5991tlhRURERERERMQqSZev/XL41uEFe3HQywca3mm2tbSZ+x38w9wW9iXN7Dy9oG4Ps71nrrWxFAentkHSJfArDSFFpN5MevalzfbMh0tR1sbiKoYBBxabbS1p5npePtD8frO9MYuk3obvzG3dnubMJREnOJ2cAXj33XdZt24dd955J/7+/hiGkeHP29ubHj16sGrVKt5++21XDCkiIiIiIiIiVtk2A66cN5e6si8jVZDsS5vt/s1MDIl7XDgGZw+YvxCvdrvV0eRdvd7mds8880K2XL/Dq8xt1VvBwyWXEd0rpAFUaW0uzbdlitXRuMa5Q+ZSbR7eUL0IfS+LkpseMrcHlsD5I9fuT4iFrd+b7ZsfdX9cUux4uaqjli1b8tNPP5GSksLu3buJjo4mNTWV4OBgGjZsiL+/v6uGEhEREREREREr7Zplbls87J4C4VVaQZnqcD7S/AV8k7sLfky5tqRZpRbgX9rSUPKlZkfwLgkXj8GprebSe3J97MmZorakWXotHoZjf8GmyXDb8+45ZxUk+5JmVduAb4C1sRRXwTWhentz+bLNk6Hzm+b923+ApDgIrg01OlgaohQPLk95e3l50aRJE7p06UL37t1p2bKlEjMiIiIiIiIixcXlGDiyxmw36OueMW22a7Nnts9wz5gCB4tYvRk7b3+ofXW5p4g51sZSlKUmw9F1Zrsoz9Bo2N9clu3i0WvL9BVl+7WkmVu0vLok3pYp5nfBMGD9t+Z9rR5XrR9xiSI4H1FERERERERELLN3PhhpULEplKnmvnGbDDK3B/+AS6fdN+6NKi3N/NU4FJ16M+nVu7rcXoTqzly3U9vMWQJ+paFCQ6ujuX7e/tD0XrOdVQ2RoiT5ChxebbZrdbU2luKubi8oWR7iTsPeBRC5EmL2gk8paDrY6uikmFByRkRERERERETybvdv5tYdtWbSC64JlW82E0M7f3bv2DeiqO0Qf9a8EFn5Zqujyb863cyaHDF74cw+q6MpmiJXmttqtxXNejPp2WdB7FsIsSetjcUZR9ZAyhUICIMK9a2Opnjz8oHmQ8z2pgmwfqzZbjoY/AKti0uKFadqzgwbNizfx9hsNvz8/AgKCqJ27drccsst1K+vk4mIiIiIiEixk3zFLFxcoYGW/ygurlyAQ8vNdv1+7h+/yT1wfIO5tFmbJ90//o3EXm+m2u3g6W1tLNfDLwhqtDfrc+yZA+VfsDqiosc+Q6NaEV7SzK58XQhvC0fXwpap0P5lqyO6PgeWmttanfXvqjvc9BCs/sycsWm7mqC8+TFrY5JixankzMSJE7G54ETQsmVLRo0axa233up0XyIiIiIiIlJIzHsBtk6Dlo9Az/8W/V9eC+z7HdKSoXw9KF/H/eM3vBMWvmIutxS9ByrUc38MNwp7bY6iuKSZXb3eZnImYi7cXoyTM4Zhbl15sT7p8rV6M9Vuc12/Vmr5sJmc2TTJ/Dx4eFodUf4dWGJua2tJM7coWx1qdjLPh0YaVG+nf3fEpZz6L+Pw8HDCw8MpV64chmE4/nx8fAgJCSEkJAQfHx/H/QDlypWjcuXKBAYGOu7fsGED7du3Z9q0aS55UiIiIiIiImKxKxdgx09me+M4mPWEWVBXirYI+5Jmfa0Zv2Qw1O5mtrf/YE0MN4Kk+GsX5msU5eRML8AGJzfDxRNWR1NwfnsGPqkJ22e6pr+LJ2BCD0i+DKVCzdmPxUH9vuBfFmKPX0tyFCXnj0DMPrB5QvX2Vkdz42jx8LV2q8eti0OKJaeSM4cPH2bWrFkEBATg4+PDc889x5YtW7h8+TInT57k5MmTXL58mS1btjB8+HC8vb0pVaoUs2bN4vz58xw7doyPP/6YgIAA0tLSePTRRzl27JirnpuIiIiIiIhYZfevkJpoFtP18IIdM2Hmg5CcYHVkcr2SLl9bUsfd9WbSa3KPud3xo1m0Xlzv6FpITYLASlCuttXRXL9SFaBKa7O9Z561sRSUswdhyxSzPtAvj8G8FyEl8fr7O74Rvu1ozk4rEQx3Tyw+sx69/aDZfWZ74wTr4jixCc/JvQk/uyJ/x9kTSlVagX9pl4cl2ajbA6reai7vV6eH1dFIMePU2fX06dP07NmTqKgoli1bxqeffkrTpk3xSHfS9vDwoGnTpowaNYply5YRFRVFz549OXXqFJUqVeKll15i+fLl+Pv7k5SUxJgxY5x+UiIiIiIiImKxrdPNbdtn4J5p4OkLe+fD93dDYpy1scn12b/YLERdphqENrYujjp3gG8QXDxmJhHE9Q5erTdTo2PRr2thTyTaZ30VNxvHm9uAMHO74VuY0BMuHs9/X9t/NI+NO23OlnnsD6jaxnWxFgYthprb/b9D7En3j3/pNEy/D49j62h+dBweG77L+7GOejNdCiY2yZqnNzw8H4bOBU+nKoSIZOJUcubTTz8lKiqK559/njZtcj9Zt2nThueff57o6Gg++eQTx/3Nmzdn2LBhGIbB4sWLnQlJRERERERErHbuEBxbZxbPbTwI6t4BQ34Gn1IQuRIm94P4c1ZHKfkVMcfc1u9r7QV7bz9o2M9sb5thXRyFQdRO84K6qxOeh5ab26Jcb8aufm9ze2Rt8TvvJF8xi9sD9BkN980EvyA4sRG+aXctyZabtDRY+i788qg547FOD3hkkZmILW7K1Ybwtmb9EHcvjZiaDD8OhbgoDN9AADwXvQJ/fZP7sSlJEHl1po2SMyLFhlPJmdmzZ2Oz2ejevXuej7njjjsAmDcv43TSHj3MaWGHDx92JiQRERERERGxmv2CeY2OEFjRbFe/HR78DfzLmBcOJ/Y2f0EsRUNKIuz73Ww36GdtLABNBpvb3bPNC9Q3otQUmDLAvKD+WQNY9Ob1zZZIL+4MrPkfnN5p3q7RwekwLVemGoQ0BiMV9i6wOhrX2jULEi5AULh5wb5Od3hiJYQ2MZc5mzIAVn6S8/J/iXHwwxBY9al5+7bnYPA08A1wy1OwRLN7ze22GXC1RrZbLH7LnO3nG0jKw4vYX6GXef+Cl2Hd1zkfe2wdJMVByQrm+ysixYJTyZnjx81/9H19ffN8jH1f+7F2YWHm9Mv4+HhnQhIRERERERErpaXBtqtLmjW9N+NjlVvA0PlmgenoXTDhDrhw1P0xSv4dXAZJl8ylk8JusjoaCG9jXpBOjDWXy7sRHV4Fl6PNdsJFWPs/GN0EfhoGxzflvZ+0VNi3yLxAP6oeLH7TvL/qrVCynOvjtoJ9abM9c62Nw9XsS2K1HAoenma7TDV4ZDHc9CBgwB/vw/TBcOV85uMvHIXx3WHvPPD0gQHfQJcR1/oqrhr0Ay8/OLMHTm5xz5g7foJ1X5ntAV9DcC12hw0ite2/zPsW/hv+/Cr74+31Zmp1Lj41gETEueRMiRIlANi4cWOej9mwYUOGY+0SE81iZWXKlHEmJBEREREREbHS0T/NC34+AVCvV+bHQxrAsAVQOtxc/mz8HXBmn/vjlPyx1+uo36dwXBj08ICm95jtG3Vps92zze1ND8Lg6WaxaiMVdv4M33WCcd1g16/mDJusnIuEpe/BZ43MWlARcyAtBSq1gN6fmUtkFRf2pc0OLC0+Na9OboUTm8DDG5o/mPExbz/o+wX0HWMmIfb/bi5zdnLrtX2O/AljO5qzpEpWMBPnTQe78xlYxy8I6l39TLjj/BG1E2Y/bbZvf/Hav402G2kd3oDbXzBv//4q/Pll1n3stydntKSZSHHi1H9RtWjRAsMw+PDDDzl79myu+8fExPDRRx9hs9lo2bJlhsf27t0LQIUKFZwJSURERERERKxknzXTsD/4lMh6n7I1YNjvUK4uxJ6ACT3g1Da3hSj5lJp8bXZKg77WxpKefWmzA0tvvCXy0lKv1QBqOADq9TSLVT+x0pyx5uENx/6CHx+CL5qbF3wTYs0l4LbPNJcV/F8zWPVfuHQS/MvCLU/CP9eaReBbDgPfUpY+RZeq0ADKVDfrqdhnIBR1G8eZ2wb9oFT5rPe56YFrtWMuHDUTdpsnw5ZpMKkPxMdAaGN4fBlUudltoRcK9pmdO34067kUlCsXzFlpKVegZmfo+FrGx2026PSmmbQB+P01WDsm4z6xJ83ZptigZqeCi1VE3M6p5MyTTz4JmEuU3XLLLcybNw8ji7UaDcNg7ty5tGnThmPHjgHw1FNPZdhn4cKFWSZtREREREREpIhIijd/qQ+ZlzT7u8AweHgBVGxmXiCc2Nv8JbcUPodXm0silShnLidWWJSrBZVbmbNFdvxodTTudWSN+b3xL2vOmLGr2NRcMum5nebFXv+y5kX531+DUQ3g07rwy2PmkmjYzIvFd0+EF/bAHR9CSEOrnlHBstmuzZ4pDkubXblgLpMFcPMjOe9bsSk8vhzq9DCTU789A7OfhLRkqN/XTJQHVS7oiAufGh3MJTavnIMDiwtmjLQ0+OVxOB9pzhYd+F3WS8bZbNDpDWj3snl70euw9otrj9sTipVaQImyBROriFjCqeRM3759efzxxzEMg0OHDtG3b19CQkLo1q0bQ4YMYciQIXTr1o2QkBD69evHoUOHAHjiiSfo3bu3o5+oqCh+/fVXDMOgR48ezj0jERERERERscaeeWZdktJV83YRv2QwPPQbhLc1a4dMvRPOHiz4OCV/7Eua1etV+GpROAp7T7c2DnezL2lWrxd4emd+PCAUOr8Jz+2C3qPNWWpJl8zaNEHh0OE1GL4DHvjFnHnjlfdawkVW/auzvvb9bs4gKsq2zYDkeChfP2/nWv8yMPh76PwW2K5eCmz/b7h7EviULNhYCytPL2hyt9ne+n3BjLFypLmknJcf3DM158SKzWbOqmn/b/P2ojdgzedm+4CWNBMprryc7eDrr7+matWqvPfeeyQkJBATE8PSpUsz7GOfTePr68vbb7/NK6+8kuHxwMBAIiIiAKhUqZKzIYmIiIiIiIgV7BfImw7Oe10SvyAY8jNMHQhH18KyD+CucQUXo+RPWipEXJ1pUJiWNLNrOAAWvGLWzYjaYS7RVNylX9Kswf+zd9/RUZVbA4d/M+m9QwIJLUASeu+9iCBNVBQUQcV+Vez1u/beUKwoIoKACoKiAtJ77xBCSaghhfTeZs73x5uAXASSTE/2s1bWnGTOOe9OMhmGs2fvPfrq+7p6Qqe7oMNENQ8KTSVD7WFukLXV76QSU9mnVdVJhwm2jqh6NA12fqe2O9+jLupXhl6vZps0uw5Ki2pfG7N/03a8qlA5uhwKMsxblXJ0Oax9R20P/1hVMF1LRYIGHax7B1b8V7WVjF+r7m822HzxCSHsgln+NX7++edJSEjg7bffZtCgQdStWxdXV1dcXV2pW7cuAwcO5K233iIhIeGyxAyAp6cnDRs2pGHDhjg7m5wvEkIIIYQQQghhbTlJkLBGbVd1qLSrJwx9V20fXAgph8wbm6i+M9shP1Ul0Rr1sXU0l/MIgKjyDhx7a0n1zJltkJeifieNK/k70euhUU9o1Kt2JmZAfd9dJqvtbV+rJIcjOrkR0o6Aixe0ubXqx4e2lsRMhbotILSNavF2cKH5zpuRoNoHokHnydBufNWO7/889Hteba9+HYqz1XNdvfbmi1EIYRfM9i9yaGgozz77LH///Tfnzp2jsLCQwsJCzp07x4oVK3juuecICwsz13JCCCGEEEIIIezJgZ9BM0JENwhsUvXjw9qUVwFoqnpG2IeKlmbNh4Kzq21juZILg71/BkOZbWOxhoq5TtHD7fd3Yq863AkunpByQM3tcUQ7vlW3bcaCu69tY6kJKhIn5mptVpIP8+9QLQTDu8CQt6t3nn7PqfaDFSIH2F9bSSGEyWrp2yWEEEIIIYQQQpiNpl2sWqiYAVId/V9Q8xDi/oDE3eaJTVSfpv2jfZYdtjSr0HQgeIVA/nmIX3Xt/R2Z0XgxYdZilG1jcUQeARerTbZ9ZZ01i/PgxHrY8CHMvQ2mdbo4M6iqcpPV8yOolmbCdK1uBr0znNsN54+Ydi5NgyWPQeoh8KoDY38wLYHa71kY9Cp414WOd5kWmxDCLklyRgghhBBCCCGEaZL2wfnD4OR27RkYVxMSdfHC6eo3zBKaMMG53ZB9RrVPihxg62iuzMkFWpcP9t5Xw1ubnd0BuUng5gtN+tk6GsfU9X51G/cnZJ0277k1DdKOq2T1H4/DV73gnQiYNQJWvQZHl0L6Mfj1PkjcVfXz754NxjJVkVEb5itZg3cINC2f5WLq88e2r+HAL6Bzglu+B18zdBDqNQWeOgqNe5t+LiGE3TH7gJecnBxyc3MxGAzX3LdBgwbmXl4IIYQQQgghhLVVXNCKvgE8/E07V99n1cWt+FVwajM07GFyeKKaKqpmmg0GFw/bxnItbcfB1i8g7i8ozFQVEjVRRcVF1FBwdrNtLI6qToxKbCWshe3fwHWvm3Y+Q5lqNRa/WiXPCjMu38c3XM15Ce8M8Wvg+AqYfzvcu6byF/ANZbDre7XdebJpMYtLtRunEmf7f4YB/1e99mFnd8HfL6rtIW+qGU9CCHENZknOrFixgi+++IINGzaQmZlZqWN0Oh1lZbWgF6wQQgghhBBC1GSGUpVMgYuzP0wR2BjaT4BdM1X1zKQ/Qacz/byiajQNYivaZ9lxS7MKoa2hTkvVTujQYuhUA1sAadrF5Iy0NDNN1wdUcmb3LDXbw9Wr+ufa8CGs/cecLCc3Nbg9vBNEdFEJGd96F+9vPwFmDIbzcfDT7eo5rjLJz2PLIecseATK79/cml8P7v6Qk6ha0EX2r9rxpUWw+AFV1dTyRvX4EkKISjC5rdmjjz7K9ddfz++//05GRgaaplX6QwghhBBCCCGEgzu2AgrSVU98c7W+6vO0usB5ahMkrDHPOUXVpMZCRrz6PTS7ztbRXJtOd3HeUU1tbZa4S12cd/WGyIG2jsaxNbsOAhqpoe37f67+ebLOwMaP1Xavx+He1fD8WbhnuaqeaDHq0sQMgLsvjJunqrsSd6kZJZW5RrZjhrptfwe4uFc/ZnE5ZzdodZPars7zx5o3Ie2o+nfwho/kDQVCiEozqXJm7ty5fPbZZwC4u7szevRoOnbsSGBgIHq9jLMRQgghhBBCiBqv4kJW61vAyUyds/3qQ6e7YduXqnqmSX+52GVtFVUzTQeCm49tY6ms1rfAiv/CmW2QHg9BkbaOyLxiF6vb5tfLxXlT6Z2gy32w/AU1J6TjpOo9x6z4PygrhIY9YeDLlT9HYBO4ZRbMvhH2/wR1WqjZIleSkaBaPULNrAqzB23Hwc4Zqp1jcW7ln/fObIct6toow6eCZ6DFQhRC1DwmvXL++uuvAYiIiGD16tVERtawFz5CCCGEEEIIIa6sIAOOLlPb5mhp9k+9n1AthxJ3qTWihpr3/OLqKubNxIywbRxV4ROqKkqOr4B982HAi7aOyHykpZn5tb8DVr8J5w/DiXVqDk1VnNwIhxaBTg/Xv1P15E6TvjD0XfjrKVj5ipqF03zIv++7c6a6bTpIJXaE+YV3gqCmkH5cJafb337tY0oLYfGDoBnVv4HRwywfpxCiRjGpvGX//v3odDpefvllScwIIYQQQgghRG1z6FcwlKh5H6GtzHtu7zrQ9X61vfoNMBrNe35xZenxanaL3tnxkmJtb1O3++bXrMdM0l7IOg0unuoCvTCdux+0G6+2t31dtWMNZbD0WbXdcRKEtaleDJ0nQ8e7AA0W3AOpcZfvU1oEe+ao7U73VG8dcW063T+ePyrZ2mz1GyqZ4xMG179tudiEEDWWScmZ0tJSANq3b2+WYIQQQgghhBBCOJC95RewzF01U6HHo+DmCykHL7Z0EpZXUaHRuI+ai+FIom9Qj5ns03B6s62jMZ9Di9Vt8yHg6mnTUGqUigTwkaWqdVhl7f5ePS+5+0P/l6q/vk4HQ99TbdFKcmHebaoi8Z9iF0NhBviGX7myRphHm/LkzMkNKhl6Nae3wpbP1faITxzvuVIIYRdMSs40atQIgLy8PHPEIoQQQgghhBDCUaQdg8SdoHNSsz4swTMQuv9Hba95S71bXVje4fJ5MzEjbRtHdbh4QMvRantvNQZ72yNpaWY5wc3KK5E02P5t5Y4pyFAVEwD9XwSvINNicHaFsT+AfwPIPAG/TAJD6cX7d8xQtx0nqVk5wnL8I6BRb7W9/6cr71dSAIsfAjRod7skzYQQ1WZScmbMmDEArFq1yizBCCGEEEIIIYRwEBVtX5oOUi3ILKXbg+odyenH4MDPlltHKFln4NweQKeqUBxR2/JWVbGL1UVUR5d8QF20d/aApoNtHU3N0/UBdbtnNhRX4s3Ha96Cwkyo0wI63W2eGLyC4bZ54OKl5t8sf0F9PWk/nN2uWgx2uNM8a4mrq2h1t3eeSoz+m9WvQ0Y8+NSDIW9ZLzYhaqCiUgPalf7WagGTkjNPPvkkDRo0YOrUqcTF/UtfTCGEEEIIIYQQNY/RCPvK31XczkItzSq4+0LPKWp77TtQVmLZ9Wq7w0vUbcMelk26WVKDbhDQCEryIO4PW0djuoqqmWaDwM3btrHURJED1SD44pxrzxpJOQQ7yytZhr4LTs7miyO0FYyZrra3T4edMy+uFTMCfOqaby1xZTEj1GynjHg4u/Py+09thq1fqu2Rn4KHv1XDE6ImOZyUQ5tX/ubO77aTU1R67QNqIJOSM35+fixbtoy6devSs2dPvvjiCzIzM80VmxBCCCGEEEIIe3RyA+ScVQO1m1thYHyX+8CrDmSdUu9uF5bjyC3NKuh0F+cgVXawt73StIvzllqMtmUkNZdeD13KZ89s+1oln/+NpsHSZ0Ezqr+Pxn3MH0vM8IszbP566mISvNM95l9L/Ds3n4vPf/vmXnpfSf7FdmbtJ0AzqWQTwhSrDqdQYjCy4VgaY7/aQlJ2oa1DsjqTkjNNmjRh6NChZGdnk5mZySOPPEJISAihoaE0adLkqh+RkZHm+h6EEEIIIYQQQljTvvnqtuUYcHG3/HquntDnKbW9/n0orX3/eb+MoUxdKDSX9HjYNl0NuQZ1kdiRtblV3SashZxzNg3FJKmxkH4cnNxkroUltRsHrj6qfWLC6n/fJ/Y3lZh2dofr3rBcLH2eUs+txjIoK4TgKGjUy3Lricu1vU3dHlwIpUUXv77qNdVi0Lc+DHnTNrEJUYPsO5t9YTsuOZcxX2zmSHKuDSOyPpPqL0+ePHnJ55qmoWkaqamp1zxWp9OZsrQQQgghhBBCCFsozrvYZqmthVua/VPHSbDpU1Wxs3MmdH/IemvbG0MZfNMPkg9CSBSEd4aILuo2OEpVAlxLUQ6cWA/xqyF+FWSevHhfRDfwC7dU9NYR2Bga9IDTm2H/z9Briq0jqp6Kv7Wmg9Q7+oVluPlA+ztg25eqeqbpoEvvLymAv8srWno+BgENLReLTgejPldttZL2Qdf71NeE9TTuoxIwOYlwdBm0HA0nN8K2r9T9I6epylEhhEn2n80C4ONb2/LZ6uPEn8/n5q828/WEjvSIDLZtcFZiUnJm4sSJ5opDCCGEEEIIIYQjOLwESvMhMFIlBKzF2Q36PgNLHoUNH6rh2LV1/sbh39WQeIDzceqjot2bmy/U71ierOkC4R3BI0C1akrao5Ixx1erIePGsovn1DurpEzTAdB2vPW/J0toN04lZ/bNUxfUbXWBOycJ/YaPGBj7G7qwNOh8d+VjqUjOtBhlufiE0uVedfH92N+QdhyCm168b/OnkH0GfMMvzsCyJFdPmPgHnNkOTQdafj1xKb0TtBkLGz9Wzx9NB8FvD6v7Ok6S34kQZpCcXURKTjF6HQxpGUr/qDrc+8NOdpzMZNJ3O/hgbFtGtq1n6zAtzqTkzMyZM80VhxBCCCGEEEIIR7B7lrptO876F7vbjVcXyzJPwPavofeT1l3fHmgabJ6mtrv/Bxr2gLM74MwOOLdbDTVPWKM+KgQ1g4J0KMy49FyBTdQw9KYDVdukmlaZ0WIU/PW0Sl4l7YV67a27fs459XjdNQsnQzHeAH89Aed2wbAP1AX4q0ktT7w5uULU9daIuHYLilSt444ug+3TYdh76utZp9XvEeC616/9ezMXd19oNuja+wnLaDtO/d6PrVBvCsg8CX4RMPh1W0cmRI2wr7xqpnldHzxdnfF0hdn3dOWJn/fy14FkHp23h+TsQu7t3aRGd+AyKTkjhBBCCCGEEKIWSY2D01tA56RaAFmbkwv0ex4W3QebPoHO96oLmLXJ6S0qCePsrt7B7x0C0Teo+wxlkHroYrLm7A7VGin9mLrf1Qea9IXIAeojsLHNvg2rcPeD6OFwcAHsnWe95Ex2orqou3sWGEoAMEZ0I744gKbnl6Pb+yMk7Yexs1RC4EoqqmYiB0gLJWvper9Kzuz9EQa8pJ5f/v4/KCuChr2g5Y22jlBYS0gU1Ougnm8PLlRfGzmt9v2bI4SFVLQ0axN+8d83dxcnPhvXgTd8D/PdphO89Vcc57KK+L/hLXDS18wEjSRnhBBCCCGEEEJUzq7v1W3UUPANs00MrW+GDR9A2lHYOQN6PW6bOGylomqm7TiVmPknJ2cIa6s+Ok9WX8tPVxcXXb0hvJNKcNUmbcep5MyBX9QQd2dXy62VfbY8KfPDhaQMDXtC32cxhHcndulSGg++F+dF90HKAZjeH2788mJy7X9JSzPra9JfzW1KO6ISNHVbQexi0Olh6Lsy+6W2aTdePX8CdLobIvvbNh4hapD9Z7MBaBPuf8nX9Xod/x3Rgnr+7rzx52G+33yS5Owipt7WDncXJxtEalmVmBJYeUVFRWzatImFCxcye/ZscnJyzHl6IYQQQgghhBC2UloI++aq7Y532S4OvRP0ekJtb/lcxVVbpB2DI3+p7e4PV+4YryBoNhgadq99iRmAJv3AO1S1dDu+wjJrZJ+FP56AT9vDjm9VYqZhLzUz5K6/VLVS+UV9rVEfeGCDmgdUnA3zx8PKV1TV0z+lHVNVUHpnlQwV1qHTqeoZgG1fw7Ln1HbHuyC0le3iErbR6ibwCoHg5jD4NVtHI0SNoWka+85kAdD2f5IzFSb3bsJn49vj6qRn2aFkbv92G5n5JdYL0krMkpw5c+YMEydOxN/fnz59+jB27FgmTZrE2bNnL9lvxowZdOnShcGDB6NpmjmWFkIIIYQQQghhDYcWQ1E2+DdQbZZsqfXN4NcA8s/Dnjm2jcWatnymbqOGQXAz28biKJycoc0tanvvXPOeu7RQJWU+aaequAwl0Kh3eVLmT2jc+9+P860Hk/6Erg+qzzd+DHNuhLzzF/epqJpp0g88Aswbt7i6treBm5+abZVyENz9VYszUft4BsJj++D+9TVvJpcQNnQyvYCcojJcnfVEhV75b2t4m3rMvqcLvu7O7DqVyU1fbSYpu2a9Kcfk5Mz27dtp3749c+bMoaSkBE3Trph4GTlyJPv372f16tX8/fffJq2bmJjIHXfcQVBQEJ6enrRr145du3ZduF/TNF555RXq1auHh4cH/fr149ChQ5eco7i4mEceeYTg4GC8vLwYOXLkZQklIYQQQgghhBDArpnqtsNE0Ju1CUPVOblAz0fV9qZPwVBq23isIe+8mpsC0OMR28biaNqOV7dHl0HGCfOdd82bKiljLIXGfWDSXzDpjysnZf7J2RWGvgM3fwcuXnBiPXzdG05vU/fHLla3LUabL15ROa5e0GHCxc8HvKQu0ovaydULXDxsHYUQNUrFvJkWYb64Ol/9NWXXJkEsfLAH9f09SDifzxdr4q0QofWY9Io6OzubUaNGkZGRQWhoKF988QUHDhy44v4hISEMHarKcf/8889qr5uZmUnPnj1xcXFh6dKlxMbG8uGHH+Lv739hn/fee4+PPvqIzz77jB07dhAaGsrgwYPJzc29sM+UKVNYtGgR8+fPZ+PGjeTl5TF8+HAMBkO1YxNCCCGEEEIARiOUFNg6CmEuKbFwZptqsdR+wrX3t4b2d6h2M9mn4cACW0djeTu+AUMx1O8IDbrbOhrHUrcFRA4EYxmse88858xJgu3fqO2bZsDEJdCoZ9XP0+omuG+NapuUmwTfD4MVL0PyAdA5XXkejbCsrveDux+Ed7ZtG0chhKiB9p1R82bahvtVav9mdX14flg0APsTsy0Wly2YlJyZNm0aKSkpBAcHs2XLFh544AFatmx51WMqWppt37692uu+++67REREMHPmTLp06UKjRo0YOHAgkZGRgKqamTp1Ki+++CJjxoyhVatWzJo1i4KCAubOVWXM2dnZzJgxgw8//JBBgwZdqP45cOAAK1eurHZsQgghhBBCCOCPx+CterDgbkiNs3U0wlQVVTNRw8Cnrm1jqeDicXHuysaPVEKwpiopULNMQFXNyFDyqhvworrdPx/OHzX9fBs+hLIiiOimEiymCImCe1dDyzEqgbRpqvp64z5SsWEr/g3g8VjVfs7J2dbRCCFEjVJROdM2wr/Sx8SE+QJwJDkHg7HmjEsx6V+YJUuWoNPpeOKJJ2jQoEGljqlI3sTHV78E6ffff2fIkCHccsstrFu3jvr16/PQQw9x7733AnDixAmSk5O57rrrLhzj5uZG37592bx5M/fffz+7du2itLT0kn3q1atHq1at2Lx5M0OGDLls3eLiYoqLiy98npOTA0BpaSmlpbWgjF4IcUUVzwHyXCCEkOcDIYC0ozjvno0ODQ4uRDv4K1qLURh6PQUh0baOzmpqzPNBaQHO++ajA8ra3YlmT99Pu4k4b/gIXdpRyg79hhY93NYRWYR+9xycCtLR/BtS1vR6sKffgaOo0wan5kPRH12KcfUbGMbMqP65ss/gvOt79TfR9zm0srJrHnLN5wO9O4z6Gn39TuhX/hedsYyy6BH29fdW2+jdQEP+3oRF1JjXCEJUUZnByMFzqvqlRah3pf8G6vu64uGip7DUyLHkbCJDvCwZpskq+32ZlJw5duwYAH369Kn0MRWtxyoSG9WRkJDAl19+yRNPPMELL7zA9u3befTRR3Fzc+POO+8kOTkZgLp1L31HV926dTl16hQAycnJuLq6EhAQcNk+Fcf/r7fffptXX331sq+vWbMGT0/Pan8/QoiaY8WKFbYOQQhhJ+T5QNRm7U9NpwEa571jKHX2pl7WDnSxi9HF/sY5/84cCR1Nrke4rcO0Gkd/PmiQvp72xTnku9Zh5eE8iPvL1iFdItq/H1Epv5P31yusi9fVvKoSzcjAwx/gDRz06k3CMtPmt9ZmPvpe9GcZ+sO/sW5BZ3I8K/cm0//V7tS3NDSWkurTki2HcuBQ5f8mrv18EI5/s5cIyo0j4Zw/WpJ9/b0JIczL0V8jCFFViflQVOqMu5PG4e3rOFKFl2113Jw4Vapj3tL1dAi27+qZgoLKtXc2KTlTWFgIgJdX5TNVeXl5ALi7u1d7XaPRSKdOnXjrrbcAaN++PYcOHeLLL7/kzjvvvLCf7n9elGuadtnX/tfV9nn++ed54oknLnyek5NDREQE/fv3JygoqLrfjhCiBigtLWXFihUMHjwYFxcXW4cjhLAheT4QtV7WaZz3bgEg4OZP0Op3oDTlEE4bP0Aft4T6Wdupn7UdY/RIDL2fgjotbByw5dSU5wOnmZ8A4N7zAYb1sMPKlIKuaJ+txL/wJDfEeKI16W/riMxKd+QvnPemoLn7ET3+DaJdvW0dkkPTFu1EF7uIvsaNGIbNqfoJ0o/jvHcTAIE3fciw+p0qdVh1ng+iqh6dEMJB1JTXCEJU1c87z8L+WNo1CGT4DZ2rdOzm0lhO7TyLR1hThg1uZqEIzaOyhSkmJWdCQkJITEzkzJkztG3btlLH7Nq1C4CwsLBqrxsWFkaLFpf+Jy4mJoaFCxcCEBoaCqjqmH+uk5qaeqGaJjQ0lJKSEjIzMy+pnklNTaVHjx7/uq6bmxtubm6Xfd3FxUWeSIUQgDwfCCEukucDUWtt/xI0AzTph3Ojrupr4e3gtjmQckgN445djD7ud/Rxv0PMCOj7LIS2tmnYluTQzwfJB+DcLtC74NTxTpzs8fvwC4WOk2DrFzhv/gSirrvmIQ5l2xcA6Drdg4tXwDV2Ftc04EU4/Bv6Y8vQp+yD8MolVy7Y9KF6jmt+Pc6Nuld5eYd+PhBCmJ08J4ja5mCSKtxo1yCwyo/9VuH+/LTzLHEpeXb/d1PZ+PSmLNKlSxcAli5dWqn9DQYD06dPR6fT0atXr2qv27NnT44cOXLJ144ePUrDhg0BaNy4MaGhoZeUBpaUlLBu3boLiZeOHTvi4uJyyT5JSUkcPHjwiskZIYQQQgghxFXkpsDuH9R27ycvv79uSxg7Cx7cAi1vBHRweAl81QsW3gsG6btud3bOVLcxw8E7xLaxXE33/4DeBU5thNNbbR2N+ZzZAWe2qu+t6/22jqZmCG4Gbcer7dVvVO3YlFg4sEBt93/RvHEJIYQQtcC+M1kAtA33q/KxLcJ8ADicVP1xKfbGpOTMuHHj0DSN7777jj179lx1X6PRyAMPPEBsbCwAd9xxR7XXffzxx9m6dStvvfUWx48fZ+7cuUyfPp2HH34YUO3MpkyZwltvvcWiRYs4ePAgkyZNwtPTk/Hj1YswPz8/7rnnHp588klWrVrFnj17uOOOO2jdujWDBg2qdmxCCCGEEELUWlu/AEMxhHeGRr2vvF/dFnDL9/DQFmg5BtDBgZ9h9yxrRSoqozgP9v+stjveZdtYrsWvPrQbp7Y3fGTbWMxpyzR12+ZW8Am1bSw1Sd9nVMIrYQ2c3Fj549a8CWjQYjSEtbFUdEIIIUSNVFRq4EhKLgBtIvyrfHxUqC8AKTnFpOcVmzM0mzEpOXPTTTfRo0cPiouLGThwIJ9//jmpqakX7tfpdKSkpDB79mw6derEd999h06n4/rrr6dfv37VXrdz584sWrSIefPm0apVK15//XWmTp3K7bfffmGfZ555hilTpvDQQw/RqVMnEhMT+fvvv/Hx8bmwz8cff8zo0aMZO3YsPXv2xNPTkyVLluDk5FTt2IQQQgghhKiVCrNgxwy13fvJyg1lrxMDt8yEoe+qz9e9ByX5FgtRVNHBhVCSC4FNoHEfW0dzbT2ngE4Px5ardmyOLiNBVZYB9PiPbWOpaQIaQofyebWr3wCtEkOFz+2BuD/UY6z/C5aNTwghhKiBDp3LwWDUCPZ2pZ5f1efRe7s50zDIE4DDSbnmDs8mTErOACxevJjo6GiysrJ49NFHCQsLQ1f+H7EOHTpQr149Jk2axL59+9A0jVatWvHjjz+aHPjw4cM5cOAARUVFHD58mHvvvfeS+3U6Ha+88gpJSUkUFRWxbt06WrVqdck+7u7uTJs2jfT0dAoKCliyZAkREREmxyaEEEIIIUSts+MbdSG/TktoNqRqx3a8C/wbQl4KbP3SMvGJqttV3tKs46TKJdtsLSiyvF0esPFj28ZiDlu/BM0ITQepRKYwrz5Pg7M7nN4C8auuvX9FC7Q2t0JIlGVjE0IIIWqg/WezAGgb7n8hf1BVLcJU9UxNaW1mcnImODiYnTt38vDDD+Pm5oamaRc+iouLL2w7Oztz3333sXnzZvz9/c0QuhBCCCGEEMIulORfTKr0ehz0VfxvhrMrDHhJbW/6BAoyzBufqLpze1WlgJMrtLv9mrvbjV5PqNtDiyA93raxmKIgA/bMUds9HrFtLDWVbxh0nqy2r1U9c2oLHF8JemfVEk0IIYQQVVYxb6ZNuH+1zxFTw5IzzuY4iaenJ9OmTeOVV15h+fLl7Ny5k9TUVAwGA0FBQbRv356hQ4dSr149cywnhBBCCCGEsCe7f4CCdAhodLFyoapa3awSMykHVdXDda+bNURRRbu+V7cxI8Ar2KahVEloK2h+PRxdBpumwshpto4Iss5A2hGI6ApuPtfeH2DnDCgtgNDW0LivZeOrzXo9Djtnlrcs+xNihl++j6ZdrJppf4dq8yeEEEKIKtt/NhuANhF+1T5HRXImVpIzlwsKCmL8+PGMHz/enKcVQgghhBBC2KuyEtj0qdruOQWcqvlfDL0eBr4Mc2+B7dOh6wNqyLuwvuJcOPCL2u54l21jqY7eT6rkzN550Pc52zyOMk9B7G8QuxgSd6mvuXhBqxuh/Z0Q0eXKreLKimHbdLXd41HHaCnnqLyCoduDsOEDWPMmRA0F/f/MoE1YC6c2qiqyPk/bJEwhhBDC0WUXlpKQpmZLtjWpcka90eV4ah7FZQbcnB17drzJbc2EEEIIIYQQtdj++ZB7DrxDoZ2Jb9JqNhga9ICyIlj3jnniE1V3YAGU5EFQM2jUy9bRVF1EF2jUG4ylsOUz662beUpVf03vD5+0gRX/V56Y0YFPGJTmq1Zl310Hn3dRSc2885efZ//PkJ8KvvWrX4kmKq/Hf8DND1JjVTu8f/pn1Uyne8Av3PrxCSGEEDXAwURVNRMR6EGgl2u1z1Pf3wNfd2fKjBrHU/PMFZ7NWDw5U1xczKpVq/jpp5/Yvn27pZcTQgghhBBCWIvRcHHweo9HwNnNtPPpdDDoFbW9Zw6cP2ra+UT17JqpbjtOctyqjd7ls2d2fQ/5aZZbJ/MkbJwK0/uVJ2T+C+d2g06vEkTDPoAnj8ATh+GupdB2PLh4QtpRlbz5KBrm3w5Hl4OhDIxG2Fzeiq3bg+DkYrnYheIRAD3L5/qseUv9HiocXQaJO9XvrNfjtolPCCGEqAH2mmHeDIBOp/vH3JlcE6OyPZPamp06dYrPP/8cgBdeeAF/f/9L7t+6dSs333wzSUlJF77WoUMHFi5cSIMGDUxZWgghhBBCCGFrsYshI0Fd3Ow4yTznbNAVoobBkb9gzRsw9gfznFdUTuJuSNoHTm6mV0LZUpP+UK+9miWy7SsY8JJ5z59xAhZOVhfuK+j00LAntBwNMSPBu86lxzTsoT6GvgsHF8Ke2aqyJu4P9eFTT1UqpR0BVx/ocKd5YxZX1vUB2PolZMTDvnnQYYJKlK1+U93f5T7wqWvbGIUQQggHtv9sFgBtw6s/b6ZCTJgv205kcLgGzJ0xqXJm0aJFfPDBB6xevfqyxExubi6jR48mKSkJTdMufOzatYsbbriBsrKyfz+pEEIIIYQQwv5pGmwor5rp+iC4eZvv3AP+D9CpmR0V8zqEdVRUzbQYBZ6Bto3FFDod9Cqvntk2HYrM/J/3v19SiRmdHhr3gRs+UhUyk/6AzpMvT8z8k7svdLoL7l0ND26Bbg+BR6BqD3jgZ7VPx4ngbvrFC1FJbj4XK2PWvavm/hz+DVIOgJsv9HzMtvEJIYQQDm7/WdXWzNTKGYAW5ZUzsedqeXJmxYoV6HQ6Ro8efdl906dPJzU1FYBHH32U3377jYceegiA2NhYZs2aZcrSQgghhBBCCFs6tkJduHT1hi73mvfcdVtA29vU9spXzXtucWVFOXBgodrudJdtYzGH6OEQ3ByKs2HnDPOdNyMB4v5U2/evh4lLoPM9V0/IXEndFnD92/BkHNzyPTQdBPU7QY9HzRevqJzOk9XsrOwzsHOmanEG0P1hx05UCiGEEDaWmlNEUnYReh20rm/6m09a1Ctva5acg6ZpJp/PlkxKziQkJADQsWPHy+77+eef0el03HjjjUydOpURI0bw2Wefccstt6BpGgsWLDBlaSGEEEIIUROUFECx4w9yrHU0DTZ8oLY73WWZC5f9ngcnVzixDuLXmP/84nIHflZD64OjoEF3W0djOr3+YvXMls/V8405bPsa0FQiJbS1ec7p7AYtb4Q7FsK9q6SFli24eECfp9T23y+quUAeAaqySQghhBDVtq+8aqZpHW+83EyasnLhPE56HVkFpSTnFJl8PlsyKTlTURlTt+6lLxxzcnLYvXs3AHfddek7rm67Tb0Dbt++faYsLYQQQgghHFl6PPz1NLzfFKZ1tOzAbmF+pzbDmW0qedL9P5ZZI6AhdLpHba98RSWEhOVoGuz8Xm13uku1BasJWt8M/g0h/zzsNkP3hqJs2DNHbctF+5qnw0TwawDG8jbsPaeoNnRCCCGEqLaKeTPmaGkG4O7iRGSIF4DDz50xKVWVm5sLgMFguOTrmzZtwmAw4OzsTL9+/S65LyIiAoCMjAxTlhZCCCGEEI5G0+DkRtj6BRxZCpRfbC/Nh40fw5A3bRqeqIINH6rb9neAT6jl1un9pBqanrRXzZ9pOdpya9UkhlLITYacc5CTCIWZUJz7j4+c/7nNVS3N8lPB2f1iS7mawMlFzRL5Ywps+gQ63gUu7tU/3+4foCQPQmIgcoDZwhR2wtkV+j0Lvz0MXnXM37JRCCGEqIUqKmfahptvnl5MmC9HU/KIPZfDgGjHrTg2KTnj5+dHRkYG586du+Tra9euBaBt27Z4eXn967Hu7ia8IBZCCCGEEI6jrAQO/araCiXvv/j1ZtdBRFdY/Trs+FZVYPiG2S5OUTnn9kD8KtA5WX4uhneIelyse0c9TqKHg5PprRAcXlEOpMaqxEt24sUkTE75dl4KaMbqnbvDRNXKqSZpNx7Wv69+PnvnqNki1WEoK29pBnR7sOZUF4lLtbtd3Ya2Add/v54hhBBCiMrRNO1C5UzbCH+znbdFmC+/7T3H4aRcs53TFkz6n02rVq1Yv349ixYtYtSoUYCqoqmYN9O/f//LjklMTAQub4UmhBBCCCFqmPx02PUdbP8W8pLV15w91Lvyuz0EIc1VNc2xFXBmq5phcsOHto1ZXNuGj9Rt65shsLHl1+v+MOz4BtKPqwvrHSdZfk17VpQNn3W5+Dd1JXoXlez0rQ+eQeDmC24+//Phq1o2VXzu7gd+Edb5PqzJ2U1Vz/z1FGycCu3vVBUSVRW3RA2L9wyCNmPNHqawEzqdqgoUQgghhMlOZxSQVVCKq5Oe6FDztQqNCVPnqtVtzW688UbWrVvH7NmzqVu3Lr1792b27NmcOnUKnU7H2LGXv2DduXMnAA0aNDBlaSGEEEIIYa+yTqsL+PvmQVn5gEbvUNUeptPdlw6P1+lg4P/B9zfArlmqEiOgoW3iFtd2/ggcXqK2ez1unTXdfaHP07DsOVj7DrS5VQ3urq02T1OJGTc/qNsSfOuBX32VhPGtrz73rQ9eIaA3acRozdJ+Aqz/QCVX9s2DjhOrfo4tX6jbTvfU7segEEIIIUQlVbQ0iwnzwdXZfK9NK5IzJ9LzKSgpw9PVMavrTfqJ3H///cTExKBpGh988AGjRo1iwYIFAIwYMYJOnTpddsyiRYvQ6XSXzaIRQgghhBA1QPZZ+HYw7JqpEjNhbeHG6TDlAPR56tLETIVGvaBJPzCWwvr3rB6yqII1bwKaai9WJ8Z663a6W1V05CbB9unWW9fe5Kao9oAAo7+Au5fCzTNg8GvQ9X6IGQ71O4BPXUnM/C8Xd+hZ3oZv40eqRVlVnN0JZ7eDk2v126IJIYQQQtQy+89kAdAm3N+s5w3xcSPExw1Ng7hkx21tZtIrdjc3N1atWsWYMWNwdnZG0zRcXFyYMGECs2fPvmz/9evXExsbC8DgwYNNWVoIIYQQQtibohz4cax6V39INEz6C+5bB21vvXYLoQH/p273zoO045aPVVTd8VUQ+xvo9NDveeuu7ewG/V9Q2xs+gsIs665vL9a/D6UFUL8TRN9g62gcT8e7wDMYMk/CgV+qdmxFUqzVzSr5JYQQQgghrml/eeWMOefNVKgJrc1MfjtVaGgoCxYsICcnh8TERHJycpg1axY+Pj6X7RsREcGaNWtYvXo1nTt3NnVpIYQQQghhLwyl8MtESD0E3nXh9gXQqGflB2aHd4LmQ0EzwNq3LRurqLqyYlj6jNrucj+EtrJ+DG1uhZAYKMqCTZ9Yf31byzihKtIABr0iw+irw9UTevxHbW/4AIyGyh2XdUYlJgG6P2SZ2IQQQgjxr3KKSll1OIWi0kr+uy3sRpnByIHE8uRMuJ/Zzx8TpvIPtTo5U8HNzY2wsDBcXa/8rsjGjRvTt29f+vbti07+MyGEEEIIUTNomhq0Hb8aXDxh/E/gX42h4hWVEQcXQsoh88YoTLPlM0g/rhJv/a1cNVNB7wQD/6u2t36pZhvVJmveAmMZRA6Exr1tHY3j6jwZPALU4/nQosods326Shw36g2hrS0bnxBCCCEA0DSNxXsSGfDBOu6ZtZOZm07aOiRRRcfP51FYasDL1YkmId5mP3+L8sqZ2HOSnBFCCCGEELXV5k9h1/eADm6aAfXaV+88YW2gxWhAUxeihX3IOg3r3lfb170B7uZ/11ulRQ2Fhr2grBCW2ShJZAvJBy624Rr0sm1jcXRuPtCtvPpl/QdgNF59/+I82DVLbXd/2LKxCSGEEAKA46m5jPtmK1N+2ktaXjEAu05l2jgqUVX7z6iqmdbhfjjpzV+oUZGciUvOxWjUzH5+azA5OVNQUEBBQcEV7582bRq9e/cmJiaGYcOG8ccff5i6pBBCCCGEsBeHFsOK8mqG69+B6GGmna//C2qmSdwfkLjb5PCEGSx7XiVDGvaC1rfYNhadDm74APTO6jFy9G/bxmMtq14DNGh1E4S1tXU0jq/LfeDmB+cPQ9ySq++7dy4UZ0NgJDQbYp34hBBCiFqqoKSMd5bGcf3UDWxNyMDNWc/odvUAx25dVVvtO5sFQNtwf4ucv3GwF67OegpKDJzOuHJ+wp6ZlJxZsmQJPj4+1KtXj9zc3Mvuv/vuu5kyZQqbN2/myJEjLF++nFGjRvHee++ZsqwQQgghhLAHZ3bAovvVdpf7odsDpp8zJErNFgFY86bp5xOmObZCJUH0ziopYg+tievEQLcH1fbSZ6C0yLbxWNrJTXDsb/U76P+iraOpGTz8oWv5c9f691Vrxn9jNMK2L9V2twdBL40nhBBCCEvQNI3lh5IZ/NF6vloXT5lRY1BMHVY+0ZfXRqtZh4lZhWTml9g4UlEVFcmZNhZKzjg76Ymq69hzZ0x6dbl8+XI0TWP06NH4+Phcct/GjRv5/vvvAfD09KR9+/a4u7ujaRovvfQShw5JH3EhhBBCCIeVcQLm3QZlRdB8KFz/tvnO3fdZdSH6+Eo4tcV85xVVU1oEfz2ttrs9qJIi9qLvs+ATBpknYNMnto7GcjQNVr2qtjvcCUGRto2nJun2ILh6q5ZxR5f9+z5Hl0FGgmrl13acdeMTQgghaonT6QXc/f0O7p+9i8SsQur7e/DNnZ34dmJnIgI98XV3oUGgJ+C4F+Bro6JSA3FJqpijTbjl2iLHhKmcRKyDPjZMSs5s3boVnU5H//79L7tv+vTpANSrV4/Dhw+za9cu4uLiiIiIwGAw8PXXX5uytBBCCCGEsJXCTJg7FgrSILQN3PStGtZuLoGNof0Etb369Su/q11Y1qZPVPLDJ0wlQ+yJmw8MKa+s2viRShbWREeXwZlt4Oxhf78DR+cZCJ0nq+117/3788zWL9Rtx0ngZv4htkIIIURtVlRq4NNVxxj88TrWHDmPi5OOh/tHsvKJvgxuUfeSfS8MfnfQC/C10eGkHMqMGkFeroQHeFhsnYrHhqMm7kxKzqSmpgLQrFmzy+5btmwZOp2ORx55hPDwcAAiIiJ45JFH0DSNdevWmbK0EEIIIYSwhbIS+GkCpB0F33AY/7NlLlr2eRqc3ODUJkhYY/7zi6vLOKGSHqCSIG4+V9/fFlqOgcZ9VfXWsudsHY35GQ2wsrxqptuD4BNq23hqou7/UYmvc7vh+KpL70vaDyc3gM5JzagRQgghhNmk5hYx7JMNfLTiKMVlRno2DWLpY314ekg0Hq6Xv+mrRb3y5Mw5x7wAXxvtP5sNqKoZnQVbI8dcSM5cPnLFEZiUnDl//jwA3t6X/oc8NjaWtLQ0AEaOHHnJfZ06dQLg5MmTpiwthBBCCCGsTdNgyaPqgqWrD4z/CXzDLLOWX33odLfaXv2GVM9Y27LnVdKjcV+VBLFHOh0M+wD0LqrC5MhSW0dkXvt/VgPr3f2h52O2jqZm8g65+Dyz/n+qZyqqZlqOBr9wq4cmhBBC1GRzt50mIS2fYG83Ph3Xnjn3dKVpnSu/4auiOuKQJGccxr4zWYDl5s1UiC5/bCRmFZJdUGrRtSzBpOSMk5PKZGZkZFzy9Q0bNgAQEhJCdHT0JfcFBAQAUFRUwwd3CiGEEELUNOveg33z1DvJx34Poa0su17vJ8DFExJ3XXkmhDC/I0vh6FKV9Bj2gUqC2KuQ5tDjP2p76TNQUmDbeMylrBjWvKW2ez2uBtgLy+j5qKrSO7MNTqxXX8tNhgML1Ha3h20XmxBCCFFD7TyZCcBjA5sysm29a1ZWtKyvLsAfP59HUanB4vEJ0+07mwVA2wjLzZsB8PNwob6/apvmiG3vTErO1K9fH4C9e/de8vU///wTnU5H7969LzsmO1uVNAUHB5uytBBCCCEcXUk+FDtm6XGto2mw/RtYW36x+IYPoekgy6/rXQe63q+2V78BRqPl16ztSgtVkgNU0iOkuW3jqYw+T6sWe1mnYePHto7mUsYyPErSqn7czpmQfVrN+5GWWpblEwodJ6rt9e+r2x3fgrEUIrpCeEfbxSaEEELUQGUGI7tPq+RM58aBlTom1NedAE8XDEaNYyl5lgxPmEFuUSkJafmA5Stn4GLbO0ecO2NScqZ3795omsZnn312oY3Zjh07WLZMvbNxyJAhlx1z+PBhAEJDpWeyEEIIUSsZjepC//vN4IseUJhl64jE1ZQWwm//gb+eUp/3eBQ63WW99Xs8Cm6+kHIQYhdbb93aasNHKsnhG66SHo7A1QuuL08cbpoK6fE2DeeC1DicZwzkukNP4DT3Jji9tXLHFedeTBL0ew5cPS0Xo1B6PqYqxU5ugPjVsPM79fVuD9k2LiGEEKIGOnQuh4ISA34eLjSvU7m5hjqd7sIF+EPnsi0ZnjCDA4nZaBrU9/cg2NvN4utdnDtTy5IzDz30EHq9nhMnTtCkSRM6depE3759KSsrIyAggFtvvfWyY1avXo1Op6Ndu3amLC2EEEIIR5SRALNGqAv9pfnqneGbpto6KnElGQkwYzDsnQM6PQz8Lwx61boxeAaqod2g2jwZyqy7fm2SHn/x7/H6t1XSw1HEjITIgWAogb+etu2MIk1TlRfT+6JLPQSA/sQ6+G4IzBoJpzZf/fgtn0NBGgQ1hXZ3WCFggV84tL9dbf88CQrSwa8BRA+3aVhCCCFETbTjpBqP0alhAHp95dvnVsydccTWVbXNvjMqgWbplmYVWoSpJN/hZMd7bJiUnOnQoQPvv/8+Op2OvLw8du/eTVFRES4uLnzzzTf4+Fya/czOzubPP/8EYPDgwaYsLYQQQghHYjTCtq/hy55waiO4eEGHO9V9W7+E7LO2jU9c7shS+LofJB8Az2CYsBh6Pwl6k14+Vk+3B8EjANKPwYGfrb9+baBpsPRZldyIHAgxI2wdUdXodDDsfXByhfhVcHiJbeLIT4f54+HPJ6GsCGOTAaxr/jKG9neC3hlOrIOZQ1WS+uSmfzk+DTZPU9sDXgInZ+vGX5v1elzN0youfzdu1/vl5y+EEEJYwPYTKjlT2ZZmFVrWUxf6Y8853gX42mZ/+bwZa7Q0A2gRph4bR5PzKDU4Vitsk19tPv744wwaNIgFCxaQnJxMWFgY48aNIyoq6rJ9165dS+fOnQEYNMgKfcqFEEIIYXvp8aot1unyd4s36g0jp0FAI0hPUMma1W/CjV/aNEy7pmnq3fTZZ8HN538+fC9uu5dvu/pU/6KioQzWvAkbP1Kfh3eBsbPAt575vp+qcveFnlNg5cuqeiZ6uPqaMJ+4P+D4CpXcGPa+SnY4mqBI1Z5q/fuw7HloOtC61T8Ja+HX+yEvWf0cB72KoeM9ZC1dhnHYIzj1fVq1jdszRw2eP7FePR/2ew4a9VLn2PAhlORBWDuIGWW92IX6N6ntbbD3R3D1hg4TbB2REEIIUeNomsbOU+XzZhpVLTnzz7kiRqNWpaobYV37z5ZXzlgpORMe4IG3mzN5xWUknM8nKrRy7fLsgVneCtS6dWtat259zf1GjRrFqFHynwwhhBCiVjAaVLXMqtegrFBd7Br8GnS862L1xeDX4NsBsG8edH8IQq/9eqJWivsT/n6xasfUaw8tRkPL0eqiY2XknYeFd6uLxgBdH4DBr4Oza9XWtoQu98HOGWoeyrLnYfTnto7I+koK4IeR6rbzPdB2nHnmkeSnqZ8pqORGUKTp57SVXk/Avp9Uy8T1H8Cgly2/ZlmJSmhu+gTQILg53DQDwtpAaenF/fwbwIipqgJt40ewe7aacfL9BmjYS81y2vGt2nfQK7apUqvt+r+g2jm2vgXcrdOGQwghhKhN4s/nkZFfgruLntb1q/ZvbZNgL9yc9eSXGDiVUUDjYAdqwVuLnM8tJjGrEJ0OWodb5/WUXq8jOtSHnacyOZyU41DJGXnFL4QQQgjzSzsOM4fB8udVYqZxX3hws7qg/M8LjuEdoeUYQIMV/7VZuHavos1Rk/4qSdHmNoi6Qb3rPqwdBEaCVx1w9rh4zLk9qtLkk7bwdV/Y+DFknLjyGqe3wdd9VGLGxUtdXB76rn0kZkAlIW78GtCpGTi2altlS7G/wdkdkHoI/nwCPm4BK1+FnKSqn8togOOr4JdJ8FEMZJ9RMzZ6PWH2sK3K1ROGvqO2N0+D80ctu17acTWXadNUQFPJ5/vWqcTMlfhHwPCP4bG90HmyqrI5tREW3qPayjXuC5H9LRu3+Hd+4XD3MvVvlRBCCCHMbvsJVTXTLsIfV+eqXZZ2dtITXX7RXVqb2a/N8WkARNX1wdvNei1iY8IuVlY5EmmiK4QQQgjzMRpg6xew+g0oK1Ltta57HTpOunKbpIH/VRfa41eri8VNB1o1ZLt3Zgec2Qp6Fxj9JfiGXX1/QynkpcLRZRC7GE5uhKS96mPlKxDaRlXTtBitKiQ0TVU4/f0iGMvUu/7HzoY60Zb+zqquYQ9V2bFpKix5TLVc86lr66isZ88cdRs5ENKPQ9YpVYGxeRq0GgPdHoJ67a5+jsxTqm3T3rkqIVMhrK1KGJijEsfWooZBsyFwbDksfVrNSzJ3mzZNUz/Hv56B0nw1E2nktKrN6vELhxs+VAmxjR/D7lnqvNao9hFCCCGEsIEdJ9W8mS5VbGlWoUU9X/adzSY2KZsb2lzj/0XCJtYeOQ9Av6g6Vl23ou1dbG1Pzpw8eZK0tDQKCwvRNO2q+/bp08fcywshhBDCVs4fhd8eUu/sB4gcACM+Ve8Sv5rAxtDlXpXUWfEyNOkHeieLh+swtpRXzbQZe+3EDICTC/jVV+/87nyPalUW94dK1JzYAMn71ceq11QbOe+6cHylOrbljeoCs5sdl4H3f0El8VIOwO+PwPifHHM+SlWlx6vqCnQw8lPwCYMjf8GWL9Q8p/0/qY+GPVWSJmroxb+j0iL1GNgzGxLWAeWv0d391eOq/YSrV3o4Gp1OVc8krFUfhxap5JW5FOXAkkfVeUFVsI2ZXv25TH714YYPoO8zat5MYBPzxSqEEEIIYUe2n1DJmc6Nq5mcKa+OkMoZ+2Q0aqw7WpGcCbHq2rW6cubIkSO89dZb/P777+TkVO4HoNPpKCsrM8fyQgghhLAlowG2fAar3wRDsRpQP+RNdcG3shfN+zwNe35UF9z3/wztxlk2ZkeRkXCxfVf3/1TvHN4hapZFp7vUbJG4P+DQYtW+LPkAcAD0znDdG2rGjL0nOpzd1IXw6f1UZcSumdDpbltHZXl7f1S3TQeqigtQVRoxIyBxt0puHloEpzapj4DGqmVW1mk48DMUZl48V+O+0OFOiB4OLu7W/16sIbAJ9H4C1r4Ny55T37NXkOnn1TRYdL9KjOmdof+LqprLHAll7zqAdd9hKIQQQghhLeeyCknMKsRJr6NDg4BqnaOiOuKQJGfs0v7EbDLyS/Bxc6Zjw+r9jqsrqq4Peh2k5ZWQmltEHR/H+H+OycmZxYsXc/vtt1NUVHTNShkhhBBC1DCpcapaJnGX+rzpIBjxycWLx5XlGagupK58WbVEazkaXDyueViNt/VL0Izq51q3henn8wpWLeY6ToL8dJWoObMNOkyEBl1NP7+11G2hWj8tfwGWv6guvDvyEPtrMZSpNmQA7e+4/P76HeCmb2HQq7B9Ouz6HjJPqFZ1FXzrQ7vbof3tENDIGlHbXs8pcPBXSDsCfzym2vWZmnzcM1slZpxcYeISaNDNLKEKIYQQQtR0FS3NWtbzxauas0iiQ33R6SA1t5jzucWE+LiZM0RhojVxqQD0bh6Mi5N1R917uDrRKNiLhPP5HE7KdZjkjEk/pTNnznDHHXdQWFhIvXr1mDp1KtOnTwdUZcyqVatYsGABzz33HPXqqTL/Xr16sXLlSlavXm169EIIIYSwDUMZbPgIvu6tEjNufjDqC7h9QdUTMxW6PgB+EZBzFrZ9Zd54HVFBxsUZIz0eMf/5vYKg40QY/YVjJWYqdH1QtZMqLYBf71OPyZoqfjXkJoFHoJqnciV+9WHwq/BELAz7ACK6QotRcPtCmHIABrxYexIzoKqCxkxXFS6Hl8C++aadLyMBlj6ntgf8nyRmhBBCCCGq4EJLs2rOmwHwcnOmcZAX4Hjtq2qDtRUtzZrbphrcEdvemZSc+fTTTykoKMDHx4dt27bx6KOP0r179wv39+/fnzFjxvDWW29x7NgxbrvtNjZt2sSMGTPo27evycELIYQQwgZSYmHGIFj1KhhK1ODth7eqd+Sb8q50F3cY8JLa3vCRquyozXZ+pxIPdVuryhBxKb0eRn+pEoOJO2HjR7aOyHL2/KBu29yq2rpdi6uXmuN0z98w9gdoNqj2znGq107NKQL462nIPFW98xgNsOgBKM2Hhr2g+8NmC1EIIYQQojaoqJwxJTkDEOOgg99ruvS8YvafzQKgr5XnzVRwxLkzJiVnVq5ciU6n46GHHrpQGXMlHh4ezJkzh/bt2zN//nwWLlxoytJCCCGEsDZDKax/H6b3hXN7wN0PRn+lBrJXdxD2/2o9Vg2pL85Ra9VWZcWw7Wu13eMR+58DYyv+EWqQOsDady6216tJ8tPgyFK13WGCbWNxVD2nqCqiklyVYDEaqn6OjR+rFoCuPnDjl7U32SWEEEIIUQ2Z+SUcTckDoHMj02aRVFRHyNwZ+7L+2Hk0Tf1+6vrapqVYi9qWnDl58iQAPXr0uPA13T8uHpSVXdpeQq/X8+ijj6JpGt99950pSwshhBDCmpIPwrcD1TwYQwk0HwoPbYN248ybONDrYfDranvHt6qNUG20/2fITwWfetBqjK2jsW+tb4GWY0AzwK/3Q0mBrSMyr33zwVgG9dpD3Za2jsYx6Z3gxq/B1RtOb4Ytn1Xt+HN7Ye3banvY++DfwOwhCiGEEELUZDtPZQIQGeJFkLdpc2JaVlTOnMs2OS5hPmviVEuz/tG2qZqBi5Uz8efzKCqtxhuybMCk5Ex+fj4AERERF77m6el5YTs7+/I/kpYt1X8q9+3bZ8rSQgghhLCWvfNgej9I2gfu/jDmGxg3D3zDLLNeZH+IHAjGUlj1mmXWsGdGI2yepra7PQhOLraNx97pdHDDh+ATBunHYMV/bR2R+WiaGkAP0F6qZkwS2BiuL0+wrHodkg9U7rjSQjXTyFim5ve0vc1yMQohhBBC1FDmamkG0KI8OZOQlk9BSQ2eO+lADEaN9cfK581E2WbeDEBdXzcCvVwxanA0JddmcVSFSckZPz8/AIqKii58LSgo6MJ2fHz8Zcfk5KiyorS0NFOWFkIIIYQ1FGTA0mdUoiTqBnh4O7QZa/k2W4NfA3RwaBGc3WnZtezN8ZWQdkS1T+o40dbROAbPQBj9hdre8Q0cW2nbeMwlcRecjwNnd2h9s62jcXztJ0DUMPV89uv9UFp07WNWvqr+Hr1DYfhUaTEohBBCCFEN20+YLzlTx8edYG83NA2OJDvGBfiabu+ZLLIKSvF1d6Z9hL/N4tDpdMSE+QCO09rMpORMVFQUAAkJF1uO+Pj40LBhQwD+/vvvy45ZuVL9Z9nf39+UpYUQQghhDZunqfkvdVvBrXPAp6511g1tBe3Gq+0V/1UVBLXFlvKqmY4T1VwfUTmRA6DL/Wr7t4dVYtHRVVTNtBgljwVz0OlgxKfgGQyph2DNG1ffP34NbPtSbY/6XCUBhRBCCCFElRSUlHEwUXVX6tLYPK+nKqpnZO6MfVh3JBWA3s1DcHYyKd1gspjQirkzjpG4M+mn1b17dwC2bt16ydeHDx+Opmm8//77rF69+sLXFyxYwNSpU9HpdPTs2dOUpYUQQghhaXmpsO0rtd3/RTUPxpr6v6gqBk5tujgQvaY7txdOrAedE3R9wNbROJ5Br0Bwc8hLhj+mOHZSryQfDixU29LSzHy8Q2BU+cyZzZ/BiQ3/vl9hJix+SG13ngzNBlknPiGEEEKIGmbv6SzKjBqhvu6EB3iY5ZwX5s44SHVETbfmSPm8GRu2NKvQ4sJMIsd4bJh0lWXYsGFomsavv/6KwXBxyM7TTz+Np6cneXl5DB48mJCQEHx9fbn11lspLCxEr9fz9NNPmxy8EEIIISxo48dQWgD1O0LUUOuv71cfupVfHF35MhhqQT/hikHlrcaAf8TV9xWXc/WEMdNB7wyxv8H+n2wdUfXF/g4luRDQCBrKm5rMKmoodJgIaLD4QSj6l2Gyfz4FuecgqGl5m0UhhBBCCFEd2yvmzTQORGemFrEtwhzrAnxNlppbxIHyyqi+zUNsHA3ElD82DifnoDnAm/VMSs7069ePl19+mbvuuovExMQLX2/QoAG//PILfn5+aJpGeno6eXl5aJqGm5sb33zzDd26dTM5eCGEEEJYSHYi7Jihtge8ZLs5C72mgGcQpB2FPT/YJgZryToDB39V293/Y9tYHFm99tDvObW97HlVAeGIKlqatb/D+lVrtcGQt1TiK/sM/PXMpfcdWAAHF6gKthung6uXTUIUQgghhKgJdpQnZ7o0CjDbOSuqI+KSczAY7f8CfE22/qiaK9+6vh8hPm42jgYiQ7xxcdKRW1TG2cxCW4dzTc6mHKzT6Xj55Zf/9b6hQ4dy/PhxfvnlFw4dOkRZWRnNmjVj7Nix1K9f35RlhRBCCGFp698HQ7F6x36T/raLw90P+j4LS5+B5S9BSDQ07GG7eCxp21egGaBRb6jXztbROLaeU1RLsPOHYc3bMOw9W0dUNenxqp2fTg9tx9s6mprJzVslXmZeD/vnQ9T10PJGyD4Lfz6h9un7DIR3tG2cQgghhBAOrNRgZPepLEBVzphLoyAvPFycKCw1cCItj6Z1fMx2blE1a8rnzfSLsn3VDICrs56mdXw4nJRDbFIOEYGetg7pqiz6NrzAwEDuv/9+Pv30U7744gsef/xxScwIIYQQ9i7jxMV37duyaqZCp7uhST8ozYc5N8PJTbaNxxKKsmHXLLXd41HbxlITOLnA0HfU9o5vISXWtvFU1Z456jZyoGrvJyyjQVfoVZ6I+eNxyDmn5swUZat2jr2ftG18QgghhHCItkTiyg6dy6Gw1ICfhwvNzZhAcdLriAnzubCGsI0yg5ENR9W8mX52MG+mQrsIfwBWH061bSCVUOXkTEpKCs888wytW7fG19cXLy8vmjVrxn333cfhw4ctEeNlXnnlFXQ63SUfoaGhF+7XNI1XXnmFevXq4eHhQb9+/Th06NAl5yguLuaRRx4hODgYLy8vRo4cydmzZ60SvxBCCGHX1r0HxjJ1YdgeqlScXGDcfFXBU5oPP95S8xI0u2ap+SIh0dBUBo+bRZN+EDNCVSMtexas9R97QxmcP6pmxhxdDkZj1Y/fN09td5hg/vjEpfo+C2FtVfu7bwbAiXXg7KGqapxcbB2dEEIIUWsVlRp4cdEB2rz694W2WMLx7Cz/3XVqGIBeb943/V0Y/J4kyRlb2XMmi5yiMvw9XS4kROzBje3VG9z+2H+O/GL7nl1bpeTM1q1badmyJR9++CGxsbHk5eVRWFhIQkICM2bMoF27dsydO9dSsV6iZcuWJCUlXfg4cODAhfvee+89PvroIz777DN27NhBaGgogwcPJjc398I+U6ZMYdGiRcyfP5+NGzeSl5fH8OHDMRgMVolfCCGEsEvnj6gWPwADXrRtLP/k4gHj5kHkgPIEzc1wcqOtozKPshLY+qXa7v4fmS9iTte9AU5ucGI9HF5i3nMbStXfS+xvsPZd+OUu+KI7vBkKn3eGnyfA3LGwYBKUFlX+vPGrIDdJzVpqPtS8MYvLObuqRIyzu/q5Awx5A4Kb2jYuIYQQohY7k1HAzV9t5sdtp8ktKuPvQ8m2DklU0/YTKjljzpZmFVqE+QEQK5UzNrO2vKVZn2YhOJk5+WaKzo0CaBzsRX6JgT8PJNk6nKuq9P/+c3JyuPnmm8nIyEDTNDRNIygoiLp16wKqWqW0tJR77rnHKhU0zs7OhIaGXvgICQm5EMfUqVN58cUXGTNmDK1atWLWrFkUFBRcSBxlZ2czY8YMPvzwQwYNGkT79u2ZM2cOBw4cYOXKlRaPXQghhLBba98GzQhRN6i2PvbExQNum6sqekoLVAXNiQ22jsp0hxZB7jnwqgNtxto6mpoloBH0fExtL38RSk0cCJlyCBbcDZ93gzfD4PMu8POdsPYtOPQrpMaCsRRcPCGsHehdVPJm9mgoqOQ7PitaCra5TSUOhOXViVaJPIBmQ6DTPbaNRwghhKjF1sSlMnzaRg4m5lzorhyXnHv1g4Rd0jSNnacyAejcyALJmYrKmXM50v7ORtbEVbQ0s495MxV0Oh23dAoH4JedZ2wczdU5V3bH7777jnPnzqHT6Rg1ahQffPABTZo0ASA1NZU333yTadOmUVJSwocffsi3335rsaABjh07Rr169XBzc6Nr16689dZbNGnShBMnTpCcnMx11113YV83Nzf69u3L5s2buf/++9m1axelpaWX7FOvXj1atWrF5s2bGTJkyL+uWVxcTHFx8YXPc3JUZra0tJTS0lILfadCCEdQ8RxQq58LUg+jP7m+0rsbI7qqi5fCfqQcxOXQIjR0lPV5Fuzy8ewMN8/C6ZeJ6BNWof14C4Zb56I16m3rwC6o0vOBpuG86VN0gKHTZIya3k5/7g6s68M475mDLvs0hg1TMfZ+qnrnyTyJ86yR6ArSLnxJc/VCC46C4Ci04OZoIdHqc79w0OnRndyA04KJ6E5vQZtxHWW3/QT+Da68Rv55nI8sRQeUtr5NHgvW1H4ShHeHwMZQZr7WB/L6QAhRQZ4PhLg6g1Fj2pp4Pl+bAEDbcD8m92rEI/P3cTgpp8b97dSG54TjqXlk5Jfg7qInuo6n2b/XyCB39DpIzy8hMSOPur7uZj2/uLqUnCJik1QStUdjf7t7LI9qE8qHfx9lx8lMjpzLokmIl1XXr+zPo9LJmb/++guAbt26sXDhQnT/GA5cp04dPvnkE/Ly8pg5c+aFfS2la9eu/PDDDzRv3pyUlBTeeOMNevTowaFDh0hOVqWOFRU9FerWrcupU6cASE5OxtXVlYCAgMv2qTj+37z99tu8+uqrl319zZo1eHp6mvptCSFqgBUrVtg6BJsIzDtCj+Pv4qRV/oKWHh0H648jIWSI7QfOCwC6xH9MGJDo35VdO08CJ20b0FXofcbRxfc8dXP2w9xb2R75BGk+LWwd1iUq83wQknOQHqkHKdO78ndGfUot/BqqtqoXNJrOuV+gbfiINel1KHQNrtLxLmX59D76Gj7FaWR5NORw2M3ketSn0CXo4vNXJpBZCkcPAgcvHOvT+Fm6x3+IR/oxDF/3Z2vkk2R7NvrXdSJTltLKWEamZxPW7zwBnKjW9ytMEW+Rs9bW1wdCiMvJ84EQl8srhR+O6TmSrRr89K5rZHT9dAoT0tHhRFpeCfMX/4VvDSwqrsnPCZtTdIAT4R5lrPx7mUXWqOPuRHKhjh+WrKFlgFTPWNPWVPX7jfDU2LZ+la3D+VfRfnoOZep5b8EGRjas4ixQExUUFFRqv0onZw4ePIhOp+Phhx++JDHzT4899hgzZ84kJSWF9PR0goKCKnv6Khk69GL/7datW9O9e3ciIyOZNWsW3bp1A7gsRk3Trhh3Zfd5/vnneeKJJy58npOTQ0REBP3797fY9yqEcAylpaWsWLGCwYMH4+JSywYIZ8Tj/P1j6LQyjGHtILDJtY/JP4/+5AZaJ86lZYgew5D3wKkGvtJ2ILrEXTjv2YOm01P3tqkMC3KAeQtl12NcMAnn+JX0ODkVw9i5aI37WDcGQwnkJqHLOQe559DlnMOYdZbkE4cJCwtDf435MbrM3eq2w50MHnKrNSKunbShGOfswfn0FgZp6zEMq0KFt6EEp3lj0RcnofnUw+uuP+jkE1a19XNGoP10G+6psfQ98S6Gm75Ha9L/f2LUcJ7+JgA+fR9mWIdhVVtD2KVa/fpACHEJeT4Q4t/tPZPFoz/tJym7CA8XPa+Pasmothdfa30Rv5GT6QVEtO5Kz8iac+2tNjwnrF5wAEhiSIemDBtomf9frszfz5L9yXiHRzGsbyWuRQizWTp/H5DCqC6RDBtgn9cPXBql8tC8vezPceezIX1wdrLefNeKjlvXUunkTEaG6pMdHR19xX1iYmIubGdmZlotYeHl5UXr1q05duwYo0ePBlR1TFjYxSfz1NTUC9U0oaGhlJSUkJmZeUn1TGpqKj169LjiOm5ubri5uV32dRcXlxr7RCqEqJpa93yQnw4/jYPCTKjfEf3EP8C1EpWEmgZbv4C/X0K/dw76zBMwdjZ41ZwX2w5n/TsA6NqOxyU05ho72wkXFxg3F36agO7Ycpx/Hg/j5kNk/2sfW1V5qbBvPmSfgexEyEmEnHOQn3rZrk5ABKhKisrQOeHU42GcatNzhy0MfQ+m90Ufuxh9l3uhUa9rH6Np8MejcGojuPqgu/0XXAKv0pbsSoIawt3L4Kc70J1Yj/NP42DEp9D+9ov7nNkBaUfA2QPntreox7eoMWrd6wMhxBXJ84EQiqZpzNl6itf+iKXUoNEk2Isv7+hIVKjPJfvFhPlyMr2A4+cL6BcdaqNoLacmPyfsPJUFQLfIYIt9j63D/VmyP5m4lLwa+3O0R6UGI5uOpwMwICbUbn/2g1uFEewdy/m8EjYmZDG4Rd1rH2Qmlf2ZVDo5U1JSgk6nw939yv37/rloSUlJZU9tsuLiYg4fPkzv3r1p3LgxoaGhrFixgvbt21+IZd26dbz77rsAdOzYERcXF1asWMHYsWrwblJSEgcPHuS9996zWtxCCOHQSotg/njISFAzFMbNr1xiBlQboO4PQ3BzNVz71Cb4ph+M+wnq2ldrKptL2g8r/g/qtIT+L4Cbt/nXOLkREtao4eV9nzH/+S3J2Q1una2Gsh9dBvNug3HzIHKA+dYoyIDvhqjH+r9xcgPfemrOiG89DN6hHD6ZSkyLGJz0Ttc+f1jbylWcCdOEtYGOk2Dnd7D0Obh/HVzr97P+fdg3F3ROMPZ7CG1V/fXd/eD2hfDbw3DgZ/jtIZXg6/OUek7cM1vt13K02lcIIYQQooYqKCnjhV8PsHjvOQCGtgrlvZvb4ON++cXM6FBflh5MJi4519phChOcyyokMasQJ72ODg0Crn1ANbUIU6+bY89VrkpBmMfuU5nkFpcR6OVKm3B/W4dzRS5OesZ0CGf6+gR+3nnGqsmZyqp0csaePPXUU4wYMYIGDRqQmprKG2+8QU5ODhMnTkSn0zFlyhTeeustmjVrRrNmzXjrrbfw9PRk/PjxAPj5+XHPPffw5JNPEhQURGBgIE899RStW7dm0KBBNv7uhBDCARiN6sLima3g5gfjfwHvOlU/T7PBMHklzL0VMk/AjMFw0wyIut78MTsaoxG2fQUrX1atsxLWQtwSGPkZNOlrvnU0DVa/obY7ToSAhuY7t7U4u8HYH+DniXB0Kcy9DcbPN0+CxlAGC+5SiRnfcGgz9pJEDL71wTPokrlJxtJS4v/6i6iuw6Qaxt70fwkOLoSUA7Dre+h8z5X33f8zrFFtxrjhA2hqhteIzq5w49fgVx82fgxr3lDVWNe9Dgd/Vfu0n2D6OkIIIYQQdio1p4gJM7ZzJCUXJ72O54dGc0+vxlccMxAdpipp4pLl4rsj2XFSdWBqWc8XLzfLXX6OKX98nEwvIK+4DG8LriUuWnPkPAB9mgXjpLfvGcJjO6nkzOq4VFJzi6jjc+XCE1uwXqM1Mzp79izjxo0jKiqKMWPG4OrqytatW2nYUF1QeuaZZ5gyZQoPPfQQnTp1IjExkb///hsfn4ulkR9//DGjR49m7Nix9OzZE09PT5YsWYKTUyXe4SqEELXdmjfVBU69s6paqHPllpfXFBIF966GRr2hJE9VPmz6RCUNaqu8VJh7Cyx/XiVmIgeCXwPIOg0/jIQ/noBiM71z7PgqOL0FnN2h91PmOactVCRoooaBoRjmjYdTm00/7/IXVGLMxQvG/wSDXoYu90LUUFXx4hV8SWJG2DmvIJWgAVj9uqqK+jcnN6kKF4Aej0Knu80Xg14Pg16BYR+ATg+7Z8HXfaAkV1VQNbxyi10hhBBCCEf35bp4jqTkEuLjxrx7uzG5d5Orzn+OCfUF4GhKHmUG6w70FtW3/YR6nd25UaBF1wnydiPUV11sP5wkCTxrWXtEtffuH12NN+laWdM6PnRo4I/BqLFod6Ktw7lMldOJL730Ev7+/ibvp9PpmDFjRlWXB2D+/PlXvV+n0/HKK6/wyiuvXHEfd3d3pk2bxrRp06oVgxBC1Fq7Z8OGD9T2iE/NU8XhGQgTFsHSZ1TLoRX/hdTDMHwquNjXuxos7ujfqiop/7xKmAx5EzrdoxJXK1+BHd/CzhlwbAWM/NS0+Sqapi5QA3SeDL5VHHJub5xd4ZZZ8NPtcOxv+HEsTPwN6nes3vl2fQ/bv1bbY742raWVsB+d7oZdMyE1Fta+DcPev/T+tOPqMWQogZiRMOhVy8TR5V5VfbXgHsg8qb7W/g5J9gkhhBCixtI0jeUHkwF4c3QrujS+9oX78AAPPF2dKCgxcDI9n6Z1fK55jLC9isoZSydnQFXnJOcUEXsuxyrr1XZJ2YXEJeei00HvZiG2DqdSxnaKYPfpLH7aeYb7+lw9IWxtVU7O/Pbbb1e9v+Kbu9Z+QLWTM0IIIWwkYS38MUVt93n60mHWpnJygeEfQ50WsPRZ2DcP0uPh1jngY399Qc2utEglX7Z9qT6v0xJungF1YtTnbj5ww4fQYhT89h/IOgWzR0OHiXDdG+DuW/U14/6EpL2qKqTX42b6RmzM2VVV0Px4C5zcALPHwKQ/q55YObUZ/iyvJOr/IsSMMH+swjacnOH6d1QV2o5v1Ryaui3Vffnp8OPNUJgJ9TvBmOmq0sVSom+AiUtg7liVDGo73nJrCSGEEELY2L6z2ZzLLsLT1Yk+zSt3UVev1xEV6sOe01kcTsqV5IwDyMwv4WhKHgCdG1lu3kyFFvV8WRWXKnNnrGRdeUuzdhH+BHq52jiayhneth6v/RFLwvl8dp/OpGND+0niVel/m5qmme1DCCGEg0mNg5/uBGMZtLpZXbC2hC73wh0L1UDss9vhmwGQfMAya9mL1MPw7cCLiZmuD6hWbxWJmX9q3Ace3Axd7lOf754FX3SH4yurtqbRcHGeRrcHVXuumsLFA8bNh/DOUJSlklhpxyt/fNZp+OkOMJZCi9EqESlqliZ9VVWMZlTJYE1TCdL549T8K/8G6jHk4mH5WCI6w6O74T87HL96TQghhBDiKpYeTAJUKyR3l8qPFYgub20mc2ccw85TmQBEhngR5O1m8fVahKnHR6y0NbOKNeUtzfo1t/+WZhW83Zy5obX6v9ZPO87YOJpLVbpy5sSJE5aMQwghhD3LS1WVCMXZ0KA7jP7Csq13IvvD5NVq/kz6MZh9I9y7BvwjLLemLWiaalG2/EUoKwLPYBj9JTS/7urHuXmrVkwtRqm5GJknYc5NapD4kDdVYgvAaISCNMhJhJxz6iP7rLrNSFBtndz8oMd/LP6tWp2bN9z+C8waoZJ7P4yEu5ZCQMOrH1ecB/PGQUE6hLZRvw87KnkWZnTdG6r93ckNELsYYn+HM9vU38/tC8DbiiX6HgHqQwghhBCihvpnS7PrW4ZW6diKoe9xSWaauyksypotzUBVzgAcSc6l1GDExckhR6w7hJIyI5uOpwPQP9oxWppVGNs5gl92neWP/Un8d0RLvN2q3FDMIiodRcOG17iYIYQQomYqKYC5t0L2aQiMhNvmquHrlhbcFCavhFnD1cX1+ePg7uXg6mX5ta0hPx1+/w8c+Ut9HjlQJQKq0sKtUS9VRbPqddj2FeyZrSpoAhqrhExukmqVdDW9H6+5F4U9AmDCYpg5DNKOwA+jVILmStUJRiMsuh9SDoJXCIybB66eVg1ZWFFAQ+j5GKx7F369T/2t6F1UK8WQKFtHJ4QQQghRo8Ql53IyvQBXZ32Vh4hfrJyR5Iwj2H7CusmZiABPfNycyS0uI/583oXHizC/nacyyCsuI9jblVb1/GwdTpV0ahhAk2AvEtLy+Wt/EmM728ebfyWVKIQQ4sqMBvj1Xji3GzwCVSWCpxV7c3r4w23z1IXy5AOw+EFVbeLISvJh0yfweReVmHFyhSFvq3fqV2e2jqsXDH0H7voLApuohMzpzWomjaEE0IF3KNTroOamdH0ABr8GN82Ayaugx2Nm/xbtilcw3LkYAhqpdlWzR0N+2r/vu+4diPtD/U5u/RH8wq0YqLCJnlPAN/xiEnPkp6p1oBBCCCGEMKul5VUzfZqFVPkd61GhqnImMauQnKJSs8cmzKegpIyDidkAdGlsnWsHer2OmIrWZjJ3xqIq5s30aR6CXu9YHSZ0Oh23dFIJmZ922k9rM/uo3xFCCHG57LOw5DFo0A16PWnZodT/qzhPtfvZ+6OqxHByVRUzQZHWi6GCf4R6J/v3wyH2N1j3HvR71vpxmKo4Tw0f3zxNtRoDCI6Cm76FsDamn79hD3hgk0ou6J3Atz741gOfMHByMf38jsy3Htz5O8wcCufjVJu8iUtU8q/CoUWqggJg+FRo0NUWkQprc/WE4R/BL3dB7yeg3XhbRySEEEIIUSNVtDQb2qpqLc0A/DxcqO/vQWJWIUeSc61WkSGqbu/pLMqMGqG+7oQHWGF+Y7kW9XzZfjKD2HM5jOlgtWVrnQvzZqIcZ97MP93UoT4f/H2EXacyOZ6aR9M63rYOSZIzQghhlzQNfn8E4ler5EjiHhgzXc3RsJTiPDi6TM1eOLYSygrL79CpdlsNu1tu7Wtp0A2Gf6zagK19C+pEq3krjqA4F7Z/A1s+U3NMQLUd6/M0tBlr3sSJq6c6p7hcQEO48zeVoEner2YoTVik/qaS9sGiB9V+3f8D7W+3bazCupoPgRcSZbaQEEIIIYSFJJzP40hKLs56HYNiqtEtAFU9k5hVSFxSjiRn7Nj2inkzjQPRWfH1dYvyyplDUjljMYlZhRxNyUOvgz7Ngm0dTrXU8XWnf1QIKw+n8svOMzw/LMbWIUlbMyGEsEv75qnEjJOb+jjyJ3w3BLJOm3ed4lw4sADm3w7vR8LCe+DwEpWYCWgMvR6HBzdB65vNu251dJgA3R5S24seUG3O7FlxLmz4EKa2gVWvqsRMYBOV6PrPTpUAqO0VLdYW3EzNoHH3h7Pb1RyjrNMwb7x6zEcOhEGv2jpKYQuSmBFCCCGEsJiKlmbdI4Pw86ze/4Giy1ubHZa5M3ZtR3lypksj6841bVGvvK1ZUg6ao7dCt1Nry6tm2jcIwN/T1cbRVN/Y8tZmC3cnUmow2jgaqZwRQgj7k5cKy55X2/2eg0a9Yf54NaR8en/V4suUKpbSQjj8R3mFzAowFF+8L7AJtBgNLUdDaBv7u2A5+HVIPQwJa2DeOLh3DXiH2DqqSxXlwPbpqlKmMFN9LTAS+j4DrW4GJ/mn16ZCW8Edv8IPI+HEevisi0rMBDWFm7+T348QQgghhBBmtvxQRUuzsGqfI7q8MiIuSSoj7FWpwcjuU1mAqpyxpmZ1vXHW68guLOVcdhH1/a3XUq22WBOn5s30j7KzazBV1D+6DsHebqTlFbMmLpXrWla91aI5yRUIIYTpjEYorsILJHc/+7vob0+WPgNFWSo50uMRVV1xX3kyInk/zBoBIz6peuulkgLYNRM2ToX81ItfD4xUyZgWoyG0tX3/bpyc4ZaZ8M1AyIiHnyeoWSLONn7XRlkxnN4Kx1fA7tnq9wfqgn+fZ6DVTXLR356Ed4TxP8Ocm1Rixs0Pxs2/dAaNEEIIIYQQwmRnMwvYfzYbnQ4Gt6heSzOAmPLKmSPJuRiNmsMNI68N9p/NprDUgJ+HC83r+Fh1bTdnJ5rW8SYuOZfYczmSnDGjwhIDX6w9fqFyxlHnzVRwcdJzU4f6fL0+gZ93npXkjBDCweWlwuwxkFKFFlOhbWDUZxDW1nJxOaq4P9Vgcp2T+hlVtL3yC4e7l6l2Xod/h98egtRYGPyaGv5+NSX5sPM72PTpxaSMXwS0vU0lZOq2tO+EzP/yCFAX0r8dBKe3wF9PwohPrfs9aBqkH4fjqyB+FZzcCKUFF+8PalZeKXPTtX8/wjYa9YTxP8HmT6HXE6rlmRBCCCGEEMKslpW3NOvcKJAQH7dqn6dxsBeuTnrySwyczSykQZCnuUIUZvLn/iQA+jQPsUnyrEU9X+KSczl0LtukRKC4aNXhFF7+/RBnM9VM4hFt69GyvIWcI7ulUwRfr09gzZFUUnOKqOPrbrNYJDkjhKi+kgKYd1vVEjOgqj+m94feT6ih6M7Vf4FWoxRmwZ9Pqu2ej16evHL1gltmwbp3YN27qm1W2lG46VtVjfS/SvJhxwx18TlflZ/i3wB6PwVtx9m+2sQUIc3h5hkwdyzs/gHqtIRuD1h2zcIsOLGuPCGzBrL/Z/6PVx2IHABRQyFmhCRlHEGTvupDCCGEEELYBU3TyC4sJSm7CICYMMe/CFjbVSRnhrYy7d3pzk56mtX15tC5HOKScyQ5Y2cMRo0/9p8DYFTbejaJoWU9P37dnUjsOWl9Z6qzmQW8uiSWFbEpAIT5ufPyiBYMaRmKzpHe3HsFTet407FhALtOZbJwdyIP9ou0WSySnBFCVI/RCIvuh8RdqpLh7uVqgPy1FGbA0mfVvJP176tKkVGfQ/0OFg/Z7q34L+Qmlc8nefbf99Hrof8LEBIFix+CY3/Dt4Nh/Hw1LwbKkzLfqkqZgjT1Nf+GKhHW9raaM4S+2WBVOfT3S7D8eZWwiRxg/nWO/q0eq4k7QfvHsDgnV2jQTQ2RbzoQ6rZyrAokIYQQQgghrCy7sJTEzEKSsgtJyi4iObuIc9mFJJdvJ2UXUVhquLD/zEmd6R/t2C10arPUnCJ2nVZzOIeYoXVQdKhveXIm1+atiMSltp1IJzW3GD8PF/o0t81MkhblydxYmUtUbSVlRr7ZkMC01ccoKjXirNdxT+/GPDqgGV5uNSuNcGunCHadyuSXnWd4oG8TmyWdatZPVQhhPSv/q9prObnCbXNVsqAyfEJh7CzVuuvPp1Rrrm8HQa8pKiFRW6toTqyH3bPU9shp4HKN/qitblLJsPnjIe0IfDMAbvwaUg+rSpmCdLVfQCOVlGlza81JyvxT9/9ASizsmwu/TIJ710CQGd/xcHwVzB8HxjL1eVAzlYiJHKjaYrl6mW8tIYQQQggharAFu87y9IJ9aNq193V11lNSZuSP/UmSnHFgy2NT0DRoG+FPPTPMAIkunzsTlywX3+3Nkn2qamZoq1BcnfU2iaEiOXM2s5DswlL8PGrgNRAL2nw8jf/77SDx5/MB6No4kNdHt6J5XevOD7KWYW3CeGXJIRLS8tl5KpPOjQJtEodJyZmtW7fSrVs3c8UihHAUO2bA5mlqe9Tn0LBH1c/R8kZo1BuWPgMHF8KGD1UVzegvoH5H88Zr70oK4PdH1Xanu9VF/8qo30ElI+aPh3O7VYuvCgGNy5MyY2tmUqaCTgfDP4b0Y3B2h2qzN3nlv7d5q6rEXfDTBJWYaTEKrntDtYUTQgghhBBCVInRqPHZ6mNoGgR4ulA/wINQXw/C/NwJ83cnzM+dUF8P6vm7U9fXnd2nMxn/zTbWHU2V4e8ObNlBNYPE1JZmFaLDypMzSblmOZ8wj5IyI38dUO3rRtqopRmAn6cL4QEenM0sJPZcDt0jg2wWiyNJzSnijT8P83t5gi3Y25UXb4hhdLv6NaKF2ZV4uzkzvE0YP+88y887zjhmcqZHjx7ExMRw9913M2HCBOrUkXczCFHjHVsJfz2ttvu/qC7+V5dXMNz8nRpK/+cTcD5OVdH0eBT6PQ8uthvIZVVr34bME+BTDwa9WrVjfcPgrr/gt//AwQWqtVmfZ6D1LeBUS4ojXdzh1h/hm/5qBs/sMWrQu1dw9c+ZHg8/3gKl+dC4L4z5pvZWdQkhhBBCCGGiDcfTOJlegI+bMxufHXDN9jidGgbi5epEWl4Jh87l0DrcDG++ElaVmV/C1oQMwIzJmVBVGXEiPZ/CEgMerjLn0x6sP3qe7MJS6vi40bWJbRMibcP9OZtZyKbjaZKcqYQl+87x/K8HyCsuQ6+DCd0a8sR1UbWm6mhspwh+3nmWPw8k8fLIlnjboHWbyXVmcXFxPPPMM0RERDBmzBiWLFmC0Wi89oFCCMeTfAB+mQiaAdqOV5UZ5tBiJDy8XSUUNCNsmgpf94YzO8xzfnuWuBu2fKa2h38E7tUYeOniATd9C4/shod3QLtxtScxU8GnLoybp+YfJe6EGYMhI6F658pNhtk3qtZwYW3h1jmSmBFCCCGEEMIEP2w+CcDNncIrNbfA1VlPr2bqzVZrjqRaMjRhISsOp2AwasSE+dIwyDztoEN83Aj2dkXT4GiKVM/Yi4qKixvahOFk4yq361rWBWBpedWWuLLcolKeW7ifvOIy2kb489vDvXh1VKtak5gB6NgwgCbBXhSUGFh35LxNYjApOfPJJ5/Qrl07NE2jtLSU3377jdGjRxMeHs7zzz/P0aNHzRWnEMLWcpJg7q1QkqfakY34xLzDzz0DVYLhtrngXVdVQHx3HWz9ynxr2BtDKfz+iEpItboJooZW/1w6nZq1UtuSMv8U1hbuWaFaj2UkwLeD4eyuqp2jKBvm3AxZp1RruNsXVC9hJoQQQgghhADgdHoBq8sTLBO6Naz0cf2iVHeWtZKccUjLDqo2V9e3NE/VTIWK6hmZO2MfCkrKWBGbAti2pVmF/tF1cHHSEX8+n2OSwLuqBbvOkl9ioGkdb359sEetrFDU6XT0aR4CwJaENJvEYFJy5pFHHmHXrl3s3buXRx55hKCgIDRNIzk5mffee4+YmBh69erFzJkzyc/PN1fMQghrK85T80xyEiG4Odw6G5xdLbNW9A3w0FY1wF4zwvIX4PRWy6xla5s+gZSD4BEI179r62hqhuBmcM9KlagpSINZw+HIssodW1oE82+HlAPgFQITfgVvadcphBBCCCGEKeZsO4WmQe9mwTQJ8a70cf2i1AWzPWeyyMwvsVR4wgJyi0rZeExd6Bza2tzJmfK5M8ly4d0erDycSmGpgQaBnrSL8Ld1OPi6u9Crqaq6q0gQissZjRqzyisaJ/VoZPOKJ1uqaH+3OT7dJuub3NYMoE2bNnzyySckJiayYMECbrjhBvR6PZqmsWXLFiZPnkxYWBiTJ09m06ZN5lhSCGEtRgMsuBuS94NnMIz/WbWOsiTPQBgzHVqPVS3UFk6GwizLrmlt54/CuvKEzPXvgHeIbeOpSXzqwqS/oOkgKC2A+eNg58yrH2M0wKL74OQGcPWBOxaq+T1CCCGEEEKIaissMfDTjjMATOzeqErHhvl5EB3qg6bB+mO2aTcjqmd1XColBiNNQrxoVqfyCbnKiA4rr5xJkuSMPfh9r2ppNqJtmN0Mjx/aKgyApZKcuaK1R1PVHDB3Z8Z0qG/rcGyqW+MgdDpIOJ9PSk6R1dc3S3KmgouLy4W5M2fOnOHtt98mKioKTdPIy8tj5syZ9OnTh5iYGN5//31SUlLMubwQwtw0DZY9B8eWg7M7jJsPgY2tt/4NH0JAI8g+A39MUfHUBEajamdmKIGmg6HNWFtHVPO4eavHa7s7VAXWH1Ng9Rv//hjSNFj6DMT+BnoXuO1HVXkjhBBCCCGEMMmSfefILiwlPMCD/tFVr0q/2NpMkjOO5J8tzcx9wf5i5UwOWk25RuCgsgtKWXdUtR0c2dZ+LvAPalEXJ72O2KQcTqcX2DocuzRz00kAbuscgadrLW6PD/h5utCynkr6bk2wfvWMWZMz/xQaGsqzzz5LbGwsmzZtYvLkyXh7e6NpGkeOHOG5554jIiKC0aNHs2xZJVvOCCGsa+uXsH262r7xa4jobN313X3hphmgd4ZDi2DPHOuubyk7Z8CZreDqDcM/Mu/sHnGRkwuM+gz6Pqc+X/8+LH5Izfr5p/UfwI5vAZ2q2GrS1+qhCiGEEEIIUdNomsasLScBuKNbw2q1zalobbbu6HmMRrkQ7wgKSwwXkmkVFQzm1LSON3odZBaUkppbbPbzi8pbdiiJUoNGVF0fosqTZvYg0MuVro0DARWjuNTx1Fw2HEtDr4M7q1jRWFN1b6Jam22xQWsziyVn/qmkpITi4mIMBsOFjLmmaZSVlbFkyRJuuOEG2rdvz9atNXSuhBCOKO5PNe8FYPBr0HK0beII7wT9X1TbS5+BtGO2icMURiMk7YeNH8P3w1U1EsDAl9XwemE5Oh30fx5GTgOdE+ybCz/eAkXlwyN3fQ9r3lDbQ9+FVmNsFqoQQgghhBA1ye7TWRw6l4Obs55bO0VU6xwdGwbg4+ZMRn4J+xOzzRyhsIR1R89TWGogPMCDVvV9zX5+dxenC7OLDiflmP38ovJ+36damo1sV8/GkVxuaCs160ham13u+/JZM4Ni6hIR6GnbYOyELefOWKxu6fTp08yaNYvvv/+ekydPAioh4+TkxNChQ5kwYQIHDhxg1qxZnDlzhn379tGvXz/WrVtH165dLRWWMKf0ePjrKTU7ozJ0OjUDYtAr4OFvyciEqdKOwa/3ARp0vAt6PGrbeHpOgYQ1cGI9LLgLJq8CZzfbxnQteakQvwbiV6nb/NRL74+6ATpPtk1stVGHO8EnDH6eqB5L3w9TP/8/Hlf3934Sut5v2xiFEEIIIYSoQX4or5oZ2bYeAV6u1TqHi5OeXs2CWXowmbVHUu1i4Li4umUHVaWCJVqaVYgO9eF4ah5xybkXWt8J60rNLbpQZTCyrf0lZ4a0DOW/vx9iz+kskrILCfPzsHVIdiG7sJSFuxIBmNSzkW2DsSOdGwXipNdxOqOAxKxC6vtb7/Fi1sqZoqIi5s6dy+DBg2nSpAmvvPIKJ06cQNM0GjduzBtvvMHp06f5/fffueWWW3jttdc4ceIEc+bMITg4mJKSEv773/+aMyRhCZqm2kt91RviV0PO2cp9ZJ+BXTPhi+5w9G9bfxfiSkoL4ZdJUJIHDXvCsPdt33ZLr4cbp4NHICQfgJWv2jaef1NWTHBuLPrVr8FXveCDZmrA/P6fVGLGxQuaXw9D34dHdqu5JnqrFC+KCs0Gw6Q/wCtEPY6WPKbm0bS/Awb8n62jE0IIIYQQosY4n1vMXwfURfqJPRqZdK7+5Rff18jcGbtXXGZg1WH1xsTryysXLCEmTFXkxEnljM38uT8JowbtG/jbZfVFHV93OjQIAGC5VM9c8MvOMxSWGogO9bnQykuAj7sLrev7AdZvbWaWyplt27Yxc+ZMfvrpJ3Jy1BOjpmm4ubkxevRoJk+ezMCBA//1WL1ez/jx4zEajdx5553s2rXLHCEJSynMUoO1Dy1SnzfqrS5qOlfiXTC5KapNVkY8zL0F2o6H698CjwBLRiyqaukzkHIQPIPVvBcnF1tHpPiGwegvYN5tsPVziOyvLrbbiqZB+nGVoDy+CueTG+lZmg/H/7FPaBtoOhAiB0JEF/uv9qkN6neAe1bAjzer31/zoTD8E9snIIUQQgghhKhB5m8/TalBo30Df1qVX/Cqrr7lc2f2n80iPa+YIG/5f5W92hyfTm5xGXV83C5cGLeE6PL5JnHJuRZbQ1zdhZZmdlg1U2Foq1B2ncpk2aFkJvVsbOtwbM5g1C60NJvUo5HFKtscVffIIPaeyWJLfDo3dwy32romJWfef/99Zs6cyZEjRwCVkAFo2bIlkydPZsKECQQGBlbqXJ07q0HjmZmZpoQkLOnUFvj1XlUBo3dWc0B6PgZ6p8qfo3EfWPMmbPlczX6IXw0jPoGo6y0Xt6i8fT/B7h8AHdz0rUqI2JOoodDlPtg+HRY9AA9uBp+61lu/MEu1VotfBcdXQ/bpC3fpgCJnP1xjhqBvNhia9ANvKa+2S4GN4d7VcHorNOkPThbr8CmEEEIIIUStU2ow8uM29X+liWYYNl3X150WYb7EJuWw/th5bmxvvYtmomqWHVAVCkNahqLXW+7Cb3R55Uz8+TxKyoy4OktXCms6nV7AntNZ6HVwQxs7u270D0NahvLGn4fZfiJDErvAqsMpnM0sxN/ThVHt6ts6HLvTvUkQX66NZ2tCOpqmWS15ZdIVqWeffRadToemaXh5eXHrrbcyefJkunXrVvVAnOXimN0ylMH692D9+6oFUEBjVVER3rHq53L1hCFvQsxI+O0h9c71ebdCm9vg+rfBs3LJvBov4wRs+AACI6HlaAhsYvk1zx9RVVEAfZ9RlSn2aPDrcHITpB6CxQ/A7Qst1x7MaIRzu+H4KpWQObsTNMPF+51coUE3iBxIacM+LN91imE3DEfvYifVRuLK3P2g+RBbRyGEEEIIIUSNsyI2heScIoK9XRna2jytrfpHhxCblMOaOEnO2Ksyg5G/Y1VyxpItzQDq+bnj4+5MblEZCWl5RIf6WnQ9cakl+1XVTPfIIOr4uNs4miuLCPSkVX1fDibm8HdsCuO6NLB1SDZVUTUzrksDPFyr8Eb7WqJTowBcnHQkZhVyOqOAhkFeVlnX5IxIp06dmDx5MuPGjcPb27va54mMjMRoNJoajjC3zJOw8F44u1193nacmkHi5mPaeRt0hQc2wpq3YMtnsH++GtI9fCpEDzM1asd2ciP8NAEKM9Tnq15V7bFajoYWoyEo0vxrluSrQemlBaq6qe+z5l/DXFzc4ebvYHo/VXm19XPo8Yj51zEaVQu1Y8sv/XpQs/JWZQOgUS9wLX+yLi0F3RnzxyGEEEIIIYQQDmTWPy4Aujmb5wJgv6g6fL4mnvXHzmMwajhZsCpDVM/2kxlkFpQS4OlC18aWfeOtTqcjOtSHHScziUvKleSMlf2+1/5bmlUY2iqMg4k5LDuYXKuTM3HJOWyOT8dJr+OObg1tHY5d8nR1pl2EPztOZrIlPt1qyRmT3m6+b98+tm3bxr333mtSYkbYqQML4KveKjHj5quqZW78yvTETAUXD7judbj7bwhuDnkpMH+cSgYVZJhnDUez63v4YZRKzIS2Ua2xdHpI3g+rXoNpHdSw+fUfQHq8+db962k4fxi86sCYb6vWqs4W6kSreUUAK1+Fc3vMv8bWz1VixslNVXoNnwpTDsAjO2Hou6rqwtU6T9RCCCGEEEII4QiOJOey7UQGTnod47ua70Jo+wh/fN2dySooZe+ZLLOdV5jPsvKh64Nb1MXZyfJtxioSMoeTcyy+lrjoSHIuR1JycXHScX1L+21pVqGiimtzfBrZhaU2jsZ2KpLmQ1rWpb6/h22DsWPdmwQBsCUh3WprmvRs2bp1a3PFIexJca6a57HwHijOgYjyKpfWN1tmvYjOcP8G6DlFJSIO/Ayfd1VVEbWFoQyWPgtLHgNjGbQcA3cvhzt/g6eOqbk8TfqDzgmSD8Dq11Wi5suesO59SDtW/bX3/Ah7f1Q/+5tnWHeGiyk63gUxI8BYCgvugeI88507+aBKhgEMew9unQ2d7gL/2vsuCyGEEEIIIYS4lh+2nATguhZ1CfMz3wVAZyc9vZuHALDuSKrZzivMw2jULiRnhrayzgX76DD1xuG4pFyrrCeU3/clAtC3eR38PO2/pXtkiDfN63pTatBYdTjF1uHYRGZ+CYv2qN/bpB6NbRyNfesWWZ6ciVdzZ6xBJmaJSyUfhK/7wL556mJ93+dg0l8QYOGSNxd3GPwq3LMSQqIhPxV+HAuHFlt2XXtQmAVzb4FtX6nP+7+o2na5eqrPvYKh4yS4c3F5ouZT1VJL5wQpB2HNG/BZJ5g3vupJmtTD8OeTarvfC6qlmaPQ6dTPwrc+ZMTD0mfMc96yYvj1PjCUQPProcNE85xXCCGEEEIIIWqwnKLSCxcA7+zeyOzn7x9VB4A1R86b/dzCNHvPZpGaW4yPmzM9mgZZZc2Kypk4qZyxGk3TWLIvCYCR7ey/pVmF61uq6pmKBGJtM3/HGYpKjbSs50vnRgG2DseudWgQgKuzntTcYuLP51tlzUrNnDl9+rRFFm/QQN6FfhmjAc7tVe2SgiLByYpZ6H3zYckUKCsE33C46Vto2N166wOEd4T718Oi++HQIlhwF5TkQfs7rBuHtaQdV3NN0o+Bi6dqG9di1JX39wqCjhPVR0EGxP2hElgJa+DIn3B0marw6PsceIdcfe3iPDVnpqxQJXt6P2nWb80qPANhzDcwa7iq/mnYw/THyuo3IPUQeAbDyGkqCSSEEEIIIYQQ4qoW7jpLQYmB5nW96dbE/DNH+pZXzhxIzOZ8bjEhPm5mX0NUz8ZjaQD0bh5stjlD1xIVqipnUnKKycwvIcDL1Srr1mZ7z2RxOqMADxcnBsXUsXU4lXZ9qzA+XX2cdUfPk19chpebySPYHUaZwcjs8orGST0aoZNrXFfl7uJExwYBbElIZ0tCOk3rVH+My887KzeXulKPxsaNzV/ypNPpKCsrM/t5HVp+Giy4G06sU5/rnSGoKYREQUiMuq0TA4GR4GzGf3TKimHZc7DzO/V55ECVmPG07AC3K3J2U/Nt3Hxgr44tWgAAxMBJREFU9w/w28Oq1Vq3B20Tj6XEr4FfJkJRtkqGjZsLYW0rf7xnIHS4U32cPwIrXoajS2HHt7DvJ+g1Bbo9dLEC5580TVXMpB0BnzCV4NA7aCFdo57Q52lY9y78/qiaj9RiZPXOdXIjbJ6mtkd+Ct6O82JDCCGEEEIIIWzFaNSYveUUABO6W+YCYIiPG63r+3EgMZt1R89zc8dws68hqmdr+XyG7pHBVlvT282ZBoGenM4oIC45l+6R1qnYqc1+33cOgOta1sXT1XESHDFhPjQM8uRUegFrj5znhjb2PyvHXFbEpnAuu4ggL1dGtHWcaidb6h4ZxJaEdLbGpzOhW/U6SZ1Kz+f1P2IrtW+lrsZqmmaRD/EPZ3aodmIn1oGzO7j6qNkj5+Mg9jdY946qIvmiG7wVBp91gZ8mwNp3ISOh+utmnYbvhpQnZnTQ73m4/RfbJWYq6J1Uy6ru/1GfL3sO1r6jkgo1wfZvYM5NKjET3hnuXV21xMz/ComC8fNh4h8Q1g5Kcsvn0nSEvXNVRdY/7ZkN++er1mg3f6dapzmyvs9Bu9tBM6gE57GVVT9HUbaatYQG7SdA9A1mD1MIIYQQQgghaqKNx9NISMvHx82ZMe3rW2ydflGqematzJ2xG8VlBnadygSguwUqpq4murx6RlqbWZ7BqPHH/vKWZg52kV+n03F9q/LWZodqV2uzmZtPAjC+awPcXaxT1eboKhK9WxPSMRqrdx161uZTlb6EXak058yZM6sViKgETVOVDsueV4PNg5qp4eMh0ZCTCKlxKkFz/rCqjkiNUxfe046oj8O/w9q31YXk7g9Dg+6Vb8N0fCUsnAyFmeARAGO+hWaDLPv9VoVOB9e9Ae7+aq7K2rehKAeGvOm4raYMpbD0Wdg5Q33e5jYY8YmauWMOjXvDvWvg4EJY9Spkn4HFD8KWL+C611T7suSD8NfTav8BL6lWYI5Or1ctyEoLVDu8n26HOxZCo16VP8fS59TPy78hXP+25WIVQgghhBBCiBrmh/KqmZs6hlu0ZVC/qDpMW32c9UfPU2Yw4uzkoB0gapC9p7MoLjMS7O1GZEj1WwBVR3SoD3/HphCXlGvVdWujbQnpnM8txs/Dhd7NrtFG3w5d3zKUr9clsPpwCkWlhlqRqDh0LpvtJzJw1uu4vauFZ4nXIG3D/fFwcSI9v4SjqbkX5ltVVl5xGb9UsqUZVDI5M3GiDMS2iJJ8WPIYHPhFfd5iFIz6XLXzAvALVx//TJhoGuScu5isOb4K4lep2SNx5VUT3R+GFqOv3PrMaIT176lKFDR1zNgfIMAO/1B1Ouj7tPqZLHsWtn4OxTkqoaF3sCfSzFOqRdvJDYAOBr0CPR8zf6JJr4c2t0DMCNj+Naz/EFIOwOwbVcu6rFNQVgTNroOeU8y7ti3pneDG6VBSAMeWw9xb4c7f1Ryja4n9DfbNBZ0exky/+DcohBBCCCGEEOKqzmQUsCouBYAJ3S17XaFdhD/+ni5kFZSy90wWnRrZuOuHYGtCBgDdmgRafZ5FdJi6aCqVM5ZX0dJsWOtQXJ0dLynaNtyfMD93krKL2HgsjUEt6to6JIv7ftNJAIa2DiPUz0xvCq8FXJ31dGoUwIZjaWyJT69ycmbhrrPkFpfRKNiTyqRoHO+vqaZIOwbfDFSJGb0zDHkbbpl17YvCOh341Yemg1QSZsKv8NA26DhJtUNL2gu/3guftIENH6qh8f9UkAFzb1FVKGjQ8S64e7l9Jmb+qdsDMPpLdfF8z2zVuqqsxNZRVU5ZMaz/AD7vqhIzrt4wbp6aCWPJFy4u7ir58+ge6PqgepzFr4L04+BbH0Z/5bhzZq7E2RXGzoLGfaAkD+bcCMkHrn5MbjIsmaK2e06BBt0sHaUQQgghhBBC1Bhztqn2Lb2bBVu8csJJr6NP+bv210hrM7tQMW+mWxPrz3ypaGt2JCUXQzXbD4lrKy4z8NcB1dLMUeeW6PU6hrRUrc2WHqz5rc3S84r5rTyhNqlHI9sG44AqWpttiU+v0nFGo8as8lZyt3dtUKljatiVWQdxaDFM76eqX7xD1ZyQ7g9V/0J9nWhVSfJ4LPR/CbzrQm4SrHoNPmoBfzyhkkGJu9Rcm+MrVSJn9JcwYqr5WmpZWrvxKoGld4HYxTB/nKqSsGfxa+DLHmr+S1khNOoN962FqKHWi8ErCIa+Aw9vVxVV/g3Vz9Grhg7Lc/GA2+ZBeBc1R+aH0XD+6L/vq2mqmqkwA0LbqJlLQgghhBBCCCEqpajUwM871HuD7+zeyCpr9o+umDtz3irriSsrKjWw63T5vJlI619jaBjkhbuLnqJSI6fS862+fm2x/mgaOUVl1PFxo2tjx72WVDF3ZuXhFEoNRhtHY1nztp+mpMxI23A/OjTwt3U4Dqd7ebJ524mMKs2dWX/s/IX5ayPbVm7+miRnrMlQCstfhF8mqnf1N+wF96+Hht3Nc36vINUCbMoBVRUR2lolBHbOgM86wYzr1EyNgMYweaVKdjiaFiNh/E/g7KGSTHNuUhfg7U1OEvxyF8werSpVvOuqmT4Tl0BwM9vEFBSpqkqm7IeIzraJwVrcvOH2X1TCpSANfhgFmScv32/nDPU4cnKDMd9cuRWgEEIIIYQQQojLLD+UTGZBKfX9PRgQXccqa/ZpFoJOB4fO5ZCaU2SVNcW/23smi5IyIyE+bjQJ9rL6+k56HVF1VfVMXLLMnbGUipZmw9vUw0nvoDOggc6NAgn2diW7sPRCxVdNVGowMnurmgM2qWcjq7cbrAla1/fD282Z7MJSYpMq3zZxZnkrubGdIyo9f81sU9r27dvHhg0bSEhIIDc3F4PBcNX9dTodM2bMMNfy9i8nCRbcBae3qM97PgYD/gtOFhiU5+wG7cZB29vg5EbY8jkcXQbGMoi6AUZ/AR7+5l/XWpoOhDsXw49j4fRmmDUCbl8I3nYwkMxQpua8rHkbSnJVG7Yu90H/F8Ddz9bR1S4e/jBhMXw/DM7HwayRcPcy8C0vwU07DstfUtuDX1UVaEIIIYQQQgghKm1defXKiLbWu2gb5O1Gm3B/9p3JYu3R84ztFGGVdcXl/tnSzFYXgKNDfdl3Npu45FyGtQ6zSQw1WVJ2IStj1Uypke0cs6VZBSe9jsEtQpm3/TRLDybTu5kdXEe0gGUHk0nJKSbY203+JqrJ2UlPl8aBrI5LZUt8Oq3qX/uabvz5PNYdPY9OBxO7NwLKKreWaaHCkSNHuPvuu9m6dWulj9E0rXYkZ4xGOLtdtTE78DMUpIObr0qOxIyw/Po6HTTurT7S4yHzhBoIXxMypg26waQlMHsMJO2DbwaoOS6hrUw/d2EmLH0Wzu6EoKbqon1INIREQXCUqsr4N6e3wp9PQspB9Xl4Z7jhQwhra3pMonq8glSCZuZQ9fj/YRTctVQlyn69V1WWNe4LXe63daRCCCGEEEII4VA0TWPj8TQA+jQLtura/ZqHqOTMkVRJzthQxTyG7jaYN1MhOqy8cqYK724XlVNqMPLI3D0UlhpoE+5H23DHf9Px0FYqOfP3oRReH9XKppVARqPGvrNZxIT54u7iZJZzFpUa+Hilau1/e9cGuDmb57y1UfcmQSo5k5DOvX2aXHP/H8pnzQyMrkuDIE9ycir3nGRSciYxMZE+ffqQlpaGpqn+a97e3gQEBKCvaYPGK8tohDPb1EyU2N8h99zF++q0hFtnq/ZS1hYUaZt1LSmsLdy9HObeAhkJqm3bTd9A9A3VP2fSPvhpAmSp8j8y4uHY8kv38WtQnrCJgpAYCGwCe+bA3jnqfo8AGPQqtJ8AtfXvwJ74hsHE3+G7oZB2VLWaa9wXzu1WSZrRX8rvSQghhBBCCCGq6GhKHqm5xbi76OnYKMCqa/ePrsMnq46x4VgapQYjLk7yfzprK/p/9u47PKo6a+D4d2Yy6T0hvTdaQu+9I9gBUVEQxYqLsupaVvddXbuube0dRBArKkpHeu+ElkJ6771NZu77xyRRlJKQSSaTnM/z5NEkt5wwMzeT37nnHJ2eI+klAAwLczdbHN19pK1ZW/nvhjgOphbjZGPF2zf37xTtsYaFeeBsa0VBRS2HUosZEmqe525aYRWPfn+MvUlFDAp2Y+Xdw7AywXXsnd8SScqvpJuTDXeMDDVBpF1X4xyt/clF1OsNF318ymp0fHcoA4DbR4a06DytSs48//zz5Ofno1KpuPPOO3nkkUeIiopqzSEtk2KA1N3GCpnTP0N59u/fs3E2Dn/vdR1ETJKZFqbmGQF3boZv50PyNlh5C0z8F4x6qOUVQoeXGStf9LXgGgRTnoPKfMiPg7zTxv9W5kFpmvEjYcNfjzFgHkx82lixIToO1yCY95OxgiYn1vgBcOXr4NK8AV1CCCGEEEIIIX63I8HY0mxIqEe7353dx98FdwdriirrOJxazFAzVm50VUfSjPNmvJxsCDXDvJlGPXycAUgrqqKith7HZs55EBe3+XQuH25LAuCVWX0I9jDfY2xK1lZqJvXy5ofDmaw9kd3uyRmDQWHZ3lReWnuGap1xJMjB1GI+3J7E/eMjWnXsU1llfLDtLADPXhuNi7221fF2ZT19nXG2taKspp4TWWX0C3S94LbfHsygsk5PlLcjI8Jb9vuoVVesdevWoVKpmDdvHh999FFrDmXRrD4YBvUFv3/Bxhm6T4fe10H4BOMMGNF27N3h1u9h3RNw4GPY/B/IOwPXvA1a20vvr6uBtf+Aw18YP4+cCjM+NFbA/FlVkXF+SWOyJv805McbF/+nPg+BQ0z7swnT8YwwzipacqWxdV30LIiZZe6ohBBCCCGE6PTWn8zh1fVxTOjhxW0jQvB3tTN3SMIEGluajY5o35ZmAGq1irFR3Vh1JJMtcfmSnDGDPQ3zZoaHm2/eDIC7gzXezjbkltUSl1POwOD2reLqjDJLqnn422MAzB8RwrRONrdkWrQvPxzOZP2JHP7vql7t9vxNLazkH98dZ39yEQBDQ90ZE9WNV9fH8cbGeMZGdWvWbJPzqdcbeOz749QbFKZF+3BFtI8pQ++SNGoVw8I82HAql91nCy6YnNEbFJY2tDSbPyK0xc+nViVnsrKMLbvmzZvXmsNYPFVlPji7Qo/pxgqZ8PGSkGlvGi1c+V9ju7E1jxpn/BQlwU3LwekiF6TiFPhmnrGdGSqY8CSMevjCba7s3SF4hPFDWB7v3rBgIyRsNFY5CSGEEEIIIdqUTm/gmZ9PklVaQ2JeBZ/uTGZatA8LRoXSP0gWUS1Vbb2+aRj8qHaeN9NoXHdjcmZrXB6PT+thlhi6ssbHf1gHSIz18HEmtyyfMzllkpxppbp6A39bcZiSKh19A1x4Ynrne22NjvTE3lpDVmkNxzNK6XuRighTMBgUluxO4ZX1Z6jRGbC31vDEtB7cMjQYlQpOZJay9kQOi78+yi+LRl3W/JlPdyYTm1mKs60Vz1zbuw1+iq5peLgxObPnbCELx52/smnLmTzSiqpwsdNyXX+/Fp+jVc3s3NyMFzxXV9fWHMbi1V//KfwjAa7/ALpfIYkZcxp8J8xdBbaukHkQPp4AWUfPv238evhwjDExY+8Bc3+AMf+Q+SOdnWckDF8INo7mjkQIIYQQQohOb/WxLLJKa/BwsGZEuAd6g8Ivx7O5/r3dzHhvF78ez6ZebzB3mKKFDqUWU6Mz4OloQ4+GmR/tbUxkN1Qq46yR7NJqs8TQVdXo9BxNKwE6SHLGt2HuTLbMnWmtV9ad4UhaCc62VrwzZ0CnHChvq9UwvocXAGtP5LTpuZILKrnxoz3855dT1OgMDA/zYP3iMcwdHoJarUKlUvH89TF0c7IhMa+Cl9aeafE5UgoqeX1jPABPXdULL6dmdBESzdI4d+ZgSjF19ed/r7KkoWrmpsGB2Fu3vA6mVavQgwYNAiA+Pr41h7F4irQu61jCxsJdv4FnFJRlwmdXwMlVv3/foIffnoMVs6GmFPwHwT3bjS3ohBBCCCGEEEKYhKIoTTML7hgVyoq7hrHmgdHMHBCAtUbN4bQS7l9xmLGvbuXj7UmU1ejMHLForp0JxpZmoyLM19LKzcG6qc3Mtrh8s8TQVR1OLaZOb8DH2ZYQD3tzh0PPhrkzZ3LKzByJZdtwModPdiYD8OoNfQl0N/9j21amNbT9WnciG0VRTH58vUHhkx1JXPHmdg6kFONgreG566JZfufQv/y7ujtY88qsPoBxob9xnldzKIrC4z8cp7bewKgIT24YGGDSn6Ori/Jywt3BmmqdnuMZJX/5fkJuOTsTC1CrYO7w4Ms6R6uSMw888ACKonTpeTOig/IIhzs3QcQkqK+Gb+fD1pegIh++nAHbXzVuN+RuuH0tuMjFSwghhBBCCCFMaUtcHnG55ThYa7h1mHHRopefM6/N7svOx8fzwIQI3B2sySyp5vk1pxn+wmaeWX2StMIqM0cuLqVp3kxkN7PGMb678e73LXF5Zo2jq/m9pZm7WefNNGqqnMkpb5OFdkuxM6GAJ344zrH0khbvm15UxSMNc2YWjAplau/OPbNkfHcvrK3UpBRWcTLLtEm9s/kV3PDBbp779XRT0mT938dw67Bg1Orzv17Gd/dibsPvyUe+PUZJVV2zzrXyQDp7k4qw02p4cUZMh3g9diZqtYrhDdWBu88W/uX7nzdUzUzp5UOA2+UlM1uVnJk8eTKPPvooW7Zs4b777kOna/+7XF588UVUKhWLFy9u+pqiKDz99NP4+flhZ2fHuHHjOHny5Dn71dbWsmjRIjw9PXFwcOCaa64hIyOjnaMXbcrWBeZ8A8P/Zvx864vwZgwkbQWtPcz4GKa/ClbWZg1TCCGEEEIIITqjDxqqZuYMDcLFTnvO97ycbHloSnd2Pz6Bl2bEEOnlSGWdns93pTDlzW1yB3wHVlxZR2xmKWC+eTONxnU3Jod2JRZesOWMML09HWjeDECYpyNWahXlNfVkldaYOxyzeerHWL7an8617+7iga+OkF7UvER345yZspp6+ga68tgVnW/OzJ852FgxpZc3AF/tTzPZcdOLqrjm7Z0cTivB0caKF2fEsGzBkGYt3D8xvQdhng7kltXyr59OXnL7nNIaXvj1NACPTO3eqSudzGlYQ2uzPX9KzpRW6fjhsDGXMH9kyGUfv1mN0L744osLfq9Xr16MGDGCjz76iNWrVzNr1ix69OiBvf2lnxDz5rVuIPeBAwf46KOP6NOnzzlff+WVV3j99ddZsmQJUVFRPPfcc0yePJm4uDicnIzZ9MWLF7N69WpWrlyJh4cHDz/8MFdddRWHDh1Co+l8/RS7LLUGpj4P3XrAL383VtF4RMDsZeDdy9zRCSGEEEIIIUSndDitmP3JRWg1KhaMCrvgdrZaDTcNCeLGwYHsSCjg5XVnOJlVxpd7U3nuuph2jFg01+6zhSgKRHk74u1s3tkG0X4ueDpaU1BRx/7kIrMni7qC6jo9RxsqMxrnMZibtZWaCC9HzuSUs/dsITO7YGunlIJKUgqraCyc+PlYFutO5DB/ZAj3j4vAxV57wX1fWHOaYxmluNhpeXdOf6ytusYs5jlDg/jleDY/Hc3in9N74mDT8nkhf/bFnhQq6/T09nPmo3mD8He1a/a+9tZWvH5jP2a+v5vVx7KY1NOLa/v5n3dbRVF46scTlNfW0y/QlfkjQloduzi/xsqZQ2nF1Oj02GqNeYOvD6ZRozPQw8eJoaHul338Zj3r5s+f36yyqOzsbN5+++1mnVilUrUqOVNRUcEtt9zCxx9/zHPPPdf0dUVRePPNN3nyySeZMWMGAEuXLsXb25sVK1Zwzz33UFpayqeffsqyZcuYNGkSAF9++SWBgYFs2rSJqVOnXnZcooMaMNeYjEnZCQNvB1tnc0ckhBBCCCGEEJ3WB1vPAnBdP398XC69gK9SqRgTZayCmPfZfn45ns3/XdW7yywSWpKdicZ5CKMizNvSDIwtZyb19GblgXQ2nMqR5Ew7OJxWjE6v4OtiS1AHulP/yhhfzuSUs/JAWpdMzmyLN74uh4a689SVvXhx7Wl2JRby0fYkvjmYzqIJkcwdFvyXa+ra2Oymgeav3dD3slszWaLhYR6EeTqQVFDJz8eyuHlIUKuOV12n5+sD6QA8NDmqRYmZRv0CXVk0IYI3NyXw1I8nGBzijt95jvNrbDabTuei1ah4eWYfNBdolyZaL7ybA92cbMgvr+VIWgnDwz2o1xtYujsVgDtGhraqnVyzU4IdrWfj/fffz5VXXsmkSZPOSc4kJyeTk5PDlClTmr5mY2PD2LFj2b17N/fccw+HDh1Cp9Ods42fnx/R0dHs3r37gsmZ2tpaamtrmz4vKzOWWet0OrO0dBMt5NXH+AEgj5cwscZrgFwLhBByPRBCNJLrgeiqzuZXsvF0LgB3jAhq0WtgSLALXk425JXXsvlUNpN6erVVmO2qs1wPFEVhe8Mi8PAw1w7x80zs4cnKA+msP5HDk1dEXXCmgzCNnQnG+T5DQtyor683czS/u76fD29uTuBASjGnMoqJ9HY0d0gXZeprwpYzxmvu6AgPunvZ8/m8AWxPKODl9fEk5FXy7C+nWLIrmX9MieSK3t6oVCpSi6r4x3fHAVgwMpixke4d4jXdnmYP8ueldfEs35vKrP6+rTrWqsMZlNXUE+Bmx8gwt8v+t7x7VDC/ncnleEYZD39zlCW3DTznulZcVce/G9qe3TsmlDAP2y73uLW3YaFurD6ew66EPAYFObPhVC6ZJdW42WuZ1rvbef/9m/uYNCs5k5yc3LKI29jKlSs5fPgwBw4c+Mv3cnJyAPD29j7n697e3qSmpjZtY21tjZub21+2adz/fF588UWeeeaZv3x9y5YtzWrjJoTo/DZu3GjuEIQQHYRcD4QQjeR6ILqar86qURQ1MW4G4g9uJ76F+/d2VJNXruaDdYepS+5cc0Qs/XqQVw2ZJVZoVArFcQdYk2juiKDeADYaDbnltXzw7VpCnMwdUee27oQGUGFfkcGaNenmDuccvVzUxBareem7ncwMtYxrhymuCToD7Eo0Pi6qnNOsWXO66Xv3hcF+RxVr0tWkF1fzwNfHCXFUuCrIwI+paipqVYQ6KfSuP8uaNWdbHYulcdKBRqXhRFYZH36zhsDLzOkpCrx73PgYDHSuYP26ta2K62pPOJOlYU9SEY9/vo5xvr8XTSxPVFNYqcbHTiGkKp41a1r6W1a0lEOFCtCw5tBZImvjefukGlAz0K2W3zauP+8+VVXNm/nUrORMcHBwc2Ntc+np6Tz44INs2LABW9sLl0b/uZxIUZRLlhhdapsnnniChx56qOnzsrIyAgMDGT9+PB4eHaPPphDCPHQ6HRs3bmTy5MlotRfu5SqE6PzkeiCEaCTXA9EV5ZbV8Mj+HYDCU7OGMSDItcXHCMkuY8t7ezlVqmHU+Ik421n+66ezXA+W70uDo2cYGOzO9VcPNnc4TX6rPM6vJ3KodItg+pQoc4fTaVXV1fPI/i2Awp3XjO1Qbc0AHCMLWPDFYY6WWPPu5LFNsyE6IlNeE3YmFqLbdwhvJxvunDX5L2ubVwOP19bz2a5UPtmVQkqFnndOGf9t3Oy1LL1nOL7NaD/ZWe2tO87q4zmk2wRzz/Tel3WMQ6nFZO49gI2VmqfmjMf1IjN+mss6MJ1/rz7Nrxla7rpqGJHejuxIKGD/nsOoVPC/uUPpH+ja6vOIS+tdVMXKN3aSXqXGN2YoiXv2o1Gr+L+bx13wtdPYcetSWj/pqJ0dOnSIvLw8Bg4c2PQ1vV7P9u3beeedd4iLiwOM1TG+vr+Xo+Xl5TVV0/j4+FBXV0dxcfE51TN5eXmMGDHigue2sbHBxsbmL1/XarUW/eZKCGE6cj0QQjSS64EQopFcD0RXsmxfIjq9wuAQN4aGX95Mkj6B7nT3diIut5wNZwpaPQegI7H068HupGIAxnb36lA/x7Q+vvx6IoeNp/N5YnqvVvX/Fxd2PKUEnV7Bz8WWMC/nDvfvPL6HDwFudmQUV7P+dAGzLGD2jCmuCTvPFgEwtns3rK2tz7uNq1bLQ1N7cOvwEN7YFM/XB9JRgNdn9yPIs2uXm90yLITVx3NYfTyHp67qjZNtyx+P5QcyAbi2nx/dXEyTtJw3IpQt8QVsjcvnke9PsOKuofzrZ2NV1PwRIQwJM//cr64i3MsZPxdbskpreHyVsaXcFdE+F33tNPd13arJehMmTGDixIlN7cKaIysrq2m/yzFx4kRiY2M5evRo08egQYO45ZZbOHr0KGFhYfj4+JxTFlhXV8e2bduaEi8DBw5Eq9Wes012djYnTpy4aHJGCCGEEEIIIYQQ51darTNWVgD3jg2/7OOoVCquH+APwKojmSaJTbRevd7AnrOFAIyK8DRzNOca190Lays1yQWVJORVmDucTmtvkvHxHxbu0eESMwBqtaopmbtiX/PXKi3dtoY5UGOjLj2jy8vZlhdn9OG3h8fxy6JRjO/ROeZ6tcbQUHfCuzlQVafnp6NZLd4/r6yGtbHZAMwbHmKyuFQqFa/M7IObvZZT2WVc+b+dZJZUE+BmxyNTupvsPOLSVCoVw8ONv/eS8isBuH1EiEmO3arkzNatW9m6dSuVlZXN3qe6urppv8vh5OREdHT0OR8ODg54eHgQHR2NSqVi8eLFvPDCC6xatYoTJ04wf/587O3tmTNnDgAuLi4sWLCAhx9+mM2bN3PkyBFuvfVWYmJimDRp0mXFJYQQQgghhBBCdGUr9qVRUVtPlLcj47u3bsHv2n5+qFSwP7mI9KLm9W0XbetYRinltfW42GmJ9ncxdzjncLSxakoYrT9x4VnConX2JhkrNIaFddzW/jcMCsBKreJwWglncprXVsiSZRRXkZhXgUatYlRk85OmIZ4O9PbrWK9jc1Gp/pjUS0NRlEvsca6v9qdTb1AYGOxm8mujMZkWA0BmSTUAL1wfg4ONxTXDsnjDw3+/7kX7OzMw2O0iWzdfq5IzHdWjjz7K4sWLWbhwIYMGDSIzM5MNGzbg5PR7qdEbb7zBddddx+zZsxk5ciT29vasXr0ajabj9qMUQgghhBBCCCE6ohqdns92JQNwz5hw1OrW3VXv62LH8IYF4J+OSvVMR7AzoQCAkREeaFr5+LaFK3r7ALD+lCRn2kJlbT3H0ksAml6bHZGXky2TexnHGqxoqOTrzBqrZvoHuuLSCeZzmcusgQFYW6k5lV3GsYzSZu+n0xtY3lClNW9428xsvyLat6lF36yBAYyJknZm5vDH5MztI0JNVj3Y7smZxiobW1vTDZraunUrb775ZtPnKpWKp59+muzsbGpqati2bRvR0dHn7GNra8vbb79NYWEhVVVVrF69msDAQJPFJIQQQgghhBBCdBWrjmSSX16Lr4stV/f1M8kxr+//e2uzlt7JLExvR4JxEXhURMdcGJzY0wu1Ck5klkm1VRs4lFpMvUHB39WOQHfTzNRoK3OGGqsgVh3OpKqu3szRtK1tcY0tzTrm69JSuNpbc2WMcXZ5S1ribTiZS155LZ6ONkyL9r30DpfpxRkxLFswpKmKRrQ/f1c7bhgYwNioblzV13SPdbsnZ9auXQtAQEDHH8olhBBCCCGEEEKIi9MbFD7angTAglGhWFuZZqnhimgfbKzUnM2vJDaz+XcyC9Mrr9FxpKFqYnQLWie1Jw9HGwaHuAOw4VSumaPpfJrmzXTgqplGI8M9CXK3p7y2nl+OZ5s7nDZTV29gd8McqHGtbCUpfk/qrT6WTVmNrln7LN2TYtx3SKDJfvedj1ajZnRkN7SaTtkEy2K8ekNflt4xBBsr03XealGDujvuuOO8X3/qqadwdXW96L61tbWcPXuWAwcOoFKpGDt2bEtOLYQQQgghhBBCiA5o46kckgsqcbHTNvXtNwUnWy1Tevuw+lgWPxzOpE+Aq8mOLVpmb1IReoNCsId9h66amNrbh33JRaw/mcOCUaHmDqdT2dOQnPlja5+OSq02zhB5ed0ZVuxLY/agztkp51BqMRW19Xg4WNPbz9nc4Vi8QcFuRHo5kpBXwU9HMpk7POSi25/OLmN/chEatYo5Q9umpZno/FqUnFmyZMlf+qkpisJPP/3UrP0by5Dd3d154oknWnJqIYQQQgghhBBCdDCKovD+NmPVzLzhwSYfUjyjvz+rj2Wx+lgWT17ZU+4aNpOdTS3NOmbVTKOp0T7855dTHEwpoqDC2GpItF5lbT3HG+ZwDA11N3M0zXPDoABe3xjH0fQSTmWV0asTJi8a582MierW6jlfwjgmY87QIJ5ZfYrl+9K4dVjwReeKfLHH2P5sam9vfFxMN75DdC0tetcUFBR0zpMyNTUVlUqFr68vWu2Fh06pVCpsbW3x9fVlxIgR3Hffffj5maYHrRBCCCGEEEIIIcxjb1IRx9JLsLFSc9uIEJMff1SkJx4O1hRW1rEzoYDxPaR1jznsSCwAYHRkx55r4e9qR4y/C7GZpWw6lctNJqzk6soOphajNygEuHX8eTONPB1tmNLbh1+PZ7NifyrPXdf5ZnU0JmfGde/Yr0tLMqN/AC+tPcOZnHKOpJcwIMjtvNuVVuv48UgmAPMuUWEjxMW0KDmTkpJyzudqtfGOlQ0bNtCrVy+TBSWEEEIIIYQQQoiO74NtZwHjXeptUaWg1ai5uq8fS3ansOpIpiRnzCCrpJqk/ErUKstoaTW1tzexmaWsP5kjyRkT2dMw12S4Bcyb+aNbhgTx6/FsfjySxRPTepq8ss+ccstqOJ1dhkrV8ZOmlsTFXstVffz4/nAGK/alXTA5892hDKp1erp7O1lMNZnomFpVDzxmzBjGjBmDg4ODqeIRQgghhBBCCCGEBTidXca2+HzUKrh7dHibnWfGAH8ANpzKoaK2vs3OI85vZ4KxaqZvoCsudhfumtJRTO3tA8CuxELKmznUW1zc3oZ5M8MsLDkzPNyDUE8HKmrrWX0sy9zhmFRj1UyfAFfcHazNHE3nMmeoMan7y/EsSqv/eg0xGBSW7UkBYN6Ii7c+E+JSWpWc2bp1K1u2bCE4WIYeCSGEEEIIIYQQXcmHDVUz02N8CfJou1ZHMf4uhHVzoEZnYG1sdpudR5zf9oZ5M6M7+LyZRhFejoR1c6BOb2BrXL65w7F4FbX1xGYa580Ms4DKqT9SqVTcPCQQgBX708wcjWlta3huj42SqhlTGxDkSg8fJ2p0BlYdzvjL93ckFpBSWIWTjRXX9fM3Q4SiM5FJekIIIYQQQgghhGiR9KIqVh83JkruHdt2VTNgXGCd0d+4APbj0cw2PZc4l8GgsLuhpdUoC2mdpFKpmqpn1p3MMXM0lu9AShF6g0KQuz3+rnbmDqfFZg0MxFqj5nhGKScakkyWrl5vYEeCJGfaikqlaqqeWbE/DUVRzvn+F7tTAJg1KKBTtcoT5mHy5ExZWRmZmZmkpaVd8kMIIYQQQgghhBCW55uD6egNCqMiPIn2d2nz813bcHfy7rOFZJdWt/n5hNGp7DKKKutwsNbQP8jV3OE0W2NyZuuZPGp0ejNHY9l+b2lmmXM13B2suSLa+HxYvq9zrEUeyyihrKYeFzst/QJdzR1Op3Rdf3/stBricys4lFrc9PX0oip+i8sDYO4w6SQlWs8kyZmNGzdy/fXX4+npiZubG0FBQYSGhl70IywszBSnFkIIIYQQQgghRDtbe8JYkTBrYEC7nC/Q3Z4hIe4oCvx8tHPNjujIdjTMmxkW5oFWYznNV/r4u+DjbEtlnZ7dZwvMHY5F25tUBFjevJk/aqyC+PloZqeYW9XYrm90pCcatcw7aQvOtlqu7usLwIo/JPW+3JuKosCYqG6EdXM0V3iiE2n1b9YHHniAK664gp9//pmioiIURWn2hxBCCCGEEEIIISxLYl45iXkVaDUqJvT0arfzXj/AWD2z6oi0NmsvOxN/XwS2JGq1iqm9vQFYd0Jam12u8hpdUyswS07ODA11J7ybA5V1en7qBK0Rt8VLS7P2cPMQY1Lvl9hsSqrqqK7Ts/JAOgDzpGpGmEirGuOtWLGCd955BwBbW1uuu+46Bg4ciLu7O2q15dxRIYQQQgghhBBCiOZZG2tc7B4V4Ymzrbbdzjs9xpd//3SSMznlnMoqo5efc7uduyuqrtNzIMXYzsdS5s380dTePizdk8qm03nU6w1YWVDlT0dxMKUYvUEh2MMePwucN9NIpVJx85Agnvv1NCv2pTFnSBAqlWVWnBRU1HI8w5gwk+RM2+oX6EpPX2dOZ5fx/eFMnGysKK3WEeBmx/ge7XdjgujcWpWc+fDDDwEIDAzkt99+Izy8bYcACiGEEEIIIYQQwrwaW5pNi/Zt1/O62GmZ2NOLtSdy+PFopiRn2tj+lCLq6g34utgS3s3B3OG02JBQd1zttRRV1nEwtdiiKz/MpWneTKjl/9vNHBDAK+vjOJlVxvGMUvpa6KyWHQnGqplevs54OduaOZrOTaVSMWdoEP/68QQr9qViq9UAxlkz0k5OmEqrbhs4fvw4KpWKf//735KYEUIIIYQQQghxjrP5Fby3NZHSKp25QxEmklZYxansMjRqFZN6ebf7+a/vb2xt9tPRTPQGaZfelnY2LAKPivC0yCoDK42aiT2Mz9H1J6W12eXY05CcGR5u+ckZNwdrpkf7AOfOELE02xrmzYztLlUz7eG6fn7YW2s4m1/JyawybKzUzB4UaO6wRCfSquSMTmd8g92/f3+TBCOEEEIIIYQQovN4/PvjvLIujhs/2kN+ea25wxEmsO5kNmCc4eDuYN3u5x/X3QtXey25ZbUy6L2N7Ugw/vuOsrB5M390RcNi/IaTuZ169nFhRS1H0opN+jOezi5rmjczNMzdZMc1pzlDjXNCfj6WRVmN5d00YDAobG94XY6TlmbtwslWyzV9/Zo+v6avH25m+N0nOq9WJWdCQkIAqKioMEUsQgghhBBCCCE6idTCyqZ5FWdyypn94R4yS6rNHJVord9bmvmY5fzWVmqu6mNsp7bqiOUP9u6o8strOZNTDsDICMtNzoyO9MTeWkNmSTUnMsvMHU6bUBSFWz7Zx/Xv7Wbx10epqqtv9TE3nMxh5vu7MSgwMNgNXxfLnTfzR4ND3IjwcqRap+cnM18/SqtbnhyKzSylqLIOJxsrBgS7tUFU4nzmDA1q+v/bRoSYLxDRKbUqOTNjxgwANm/ebJJghBBCCCGEEEJ0Dt8fNi589Q1wwd/VjuSCSm54fzdn8+XmPkuVU1rDkbQSVCrjsHVzub5/AADrTuSYZCFa/NWuROPd+b18nfF0tDFzNJfPVqtpGpreWVub7T5b2JRI++loFte9u+uyr7OKovDulkTu+fIQVXV6RkZ48Oltg0wZrlmpVCrmDDEutC/fl2a2aqple1IY8uIWPotTU1dvaPZ+2+KNLc1GRnii1bRqSVe0QJ8AVx6f1oN/Tu9BtL+LucMRnUyrXskPP/wwQUFBvPnmm5w5c8ZUMQkhhBBCCCGEsGAGg8IPhzMAuGNUKN/dN5zwbg5kldYw+4M9nMwqNXOE4nI0Lm4PCHIz6yDqAUGuBHvYU1WnZ+OpXLPF0Zk1tjQbHWW5VTONGlubddbkzJd7UwFjlVA3Jxvicyu49p1drI3NbtFxanR6/v71UV5dH4eiwLzhwSy5fQiu9p2rhdPMAQHYWKk5k1NObGb7/y7al1TIM6tPYVDgWJGaB78+1uwEzda4PEDmzZjDvWPDuXuMzFsXpteq5IyLiwvr1q3D29ubkSNH8t5771FcXGyq2IQQQgghhBBCWKADKUVkFFfjaGPFlF4++LrY8c09w+nt50xhZR03fbSXQ6lF5g5TtNDaE8bFXnO1NGukUqm4rp8/AD8c7tqtzb7cm8qDK4+wIyHfZFUAiqKwM9F4h/7oCMtfBB7fwwutRkVCXkWnq9zLLathQ0OC8skre/LrA6MYEupORW099y0/zPO/nkKnv/TCf15ZDTd+tJcfj2ahUat49rpo/nNtdKesznCx1zZV/rX39SO3rIb7Vxyh3qAwJMQNK5XCpjP5LFx++JIJmpKqOo6mlwA0VYMJISxfq66yYWFhTJs2jdLSUoqLi1m0aBHdunXDx8eHsLCwi36Eh0u2UQghhBBCCCE6o+8bqmaujPHFzloDgIejDV/dPYzBIW6U19Rz6yf72ZGQb84wRQsUVtSyP9mYUDNnS7NG1/c3Jmd2JOSTV15j5mjMo7RaxzOrT/LT0SzmfrqfK97cwdcH0qjR6S/reDU6PT8eyeTmj/eSW1aLjZWaQSGWP9fC2VbL8HBjBVBnq55ZuT8dvUFhcIgbPXyc8XKyZcWdQ7lnbBgAH+9I5paP95FXduHXyInMUq55ZxfH0ktwsdOy7I4hzB0W3F4/glnMGGC8fvx0NLNFbcVao67ewH1fHqKgopYePk58PLc/d/UwYGOlZtPpXBYuP0Rt/YVfuzsTCzAoEOXtiJ9r55gBJIRoZXImJSWFlJQU8vKMZXWKomAwGMjLy2v63sU+hBBCCCGEEEJ0LtV1etbEGhdAGxfAGjnbavnijqGMiepGtU7PgiUHWXeicy2WdlYbT+ViUCDa35lAd3tzh0OIpwMDglwxKPDz0Sxzh2MWm0/notMruNprsbfWEJdbzmPfxzLypd94fWM8+eW1zTrOicxS/vXjCQY/v4nFXx9lb1IRKhXMHxGCrVbTxj9F+5ja2xuA9Sc7Txu8er2Br/anAXDrH5IpVho1T0zryQe3DsTJxor9KUVM/99O9iYV/uUYvx7PZtYHu8kpqyG8mwM/3T+SERGW38ruUkZFeOLlZENxlY4tDa3C2tqzv5zicFoJzrZWfDh3IPbWVvRwVfjglv4NCZo87vvy8AUTNFvjjDczSNWMEJ2LVWt2vu2220wVhxBCCCGEEEKITmDDqRwqausJdLdjcIj7X75vZ63hk3mDWPz1EdbE5rBw+SFemdWXWQMDzBCtaK61DUm0adG+Zo7kdzMHBnA4rYTvDmVw5+gwc4fT7hqToLcND+GOUaF8fSCNpbtTySyp5n+bE/hg61mu6efHglGh9PR1Pmff0iodPx7N5OsD6ZzKLmv6ur+rHbMHBTJrUAD+neju/Mm9vHnqxxMcSy8hu7QaXxfL/9k2nc4jp6wGDwfrprk6f3RFtA/dfZy478tDnMkp55ZP9vHo1O7cPSYMRYH//ZbAm5sSAOOC/9tz+uNsq23vH8MsrDRqruvvz0fbk/jhcEabVwN+fyiDZQ2zgd68qR/BHg7odDoARkV48Oltg1mw9AC/nTEmaN6/dQA2Vr8nRhVFYVt8Y3LGq01jFUK0r1YlZz7//HNTxSGEEEIIIYQQohP4vqGH//X9A1CrVefdxtpKzf9u6o+DdSzfHsrgkW+PUVGjY/7I0PYMVTRTabWO3WeNA+LPtwhsLlfF+PHM6lOcySnnVFYZvfycL71TJ1Feo2N7Q1vA6TG+uNhpuXtMOHeMDGXdyRw+3ZnMkYbE1XeHMhgZ4cGCUaHYWGn4+kA6607mNLVzstaomRrtw42DAhkR7nHB160l83KyZWCQGwdTi9lwMpfbRoSYO6RWW77PuNg/e3DgOQv5fxTq6cCqhSN5clUsPxzJ5MW1ZzicVoyVWs2vscYZUneOCuWJ6T3RdMLH/WJmDgjgo+1J/HYmj+LKOtwcrNvkPCcyS/nnqlgAHpwYyYQe3n/ZZlSkJ5/N/z1Bc++yQ7x/68CmyrXT2eXkl9dip9UwONTyWw0KIX7X+SZ7CSGEEEIIIYQwi9yyGnY2LBjP/FNLsz+z0qh5eWYf7mhIyDy9+hRvb05o8xhFyzW2z4rydiS8m6O5w2niYq9lck/jQmfjnKOu4rczedTVGwjr5kCU9++PiZVGzVV9/Fi1cCQ/LBzBlX180ahV7Eos5I4lB7nlk338fCyLunoDPX2defrqXux/ciJv39yfUZGenTIx06ixOqIzzJ1JLqhkR0IBKhXMGRJ00W3trDW8Nrsvz18fjbVGzfqTufwam41Wo+KVmX146qpeXS4xA9Ddx4lof2d0eoXVx9umNWJxZR33fnmI2noD47t348GJkRfcdmSEJ5/dNhhbrZotcfncs+xQ0/yorfHG1msjwj0umIgTQlgmSc4IIYQQQgghhDCJH49kYlBgULAbwR4Ol9xerVbxr6t6sniSccHqtY3x7db/XzRf41ygK9q49c/l+ONg73p9+wz27gjWNFQ9XBnji0p1/oX1AUFuvDtnANv+MY67x4ThZGuFk60Vtw4LYvXfRrHmgVHMHxmKq33bVAx0NI3JmX3JReSW1Zg5mtZZ3tAia1xUt2bNgFKpVNwyNJhv7x1OoLsd3ZxsWH7nMGYPDmzrUDu0Gf2N7TS/P2T65K7eoPDg10fJKK4myN2eN2/sf8nk54gIYwWNrVbNtvh87m5I0GxrmDczrrvMmxGis2lVW7Pzyc3N5cSJExQVFQHg7u5OdHQ03t5/LdsTQgghhBBCCNE5KIrSVL0wswXzY1QqFYsnRVFSpWPJ7hRe2xDHuKhuF1xwFu2rsra+adbBFR1o3kyjMVHd8HS0pqCiju0J+edtGdTZVNbWNw0Hb84MoAA3e/45vSePX9EDBbpklQRAkIc9Q0Lc2Z9SxPK9qTw0pbu5Q7osNTo93zYkE24dFtyiffsGurL1kfHoDQrWVnK/9jX9/HhhzWmOZZSSmFdOhJeTyY79xsZ4tsfnY6tV8+HcgbjYN2+ez4hwTz6fP4Q7lhxge3w+dyw5wKHUYkDmzQjRGZnkSqwoCh9++CExMTH4+fkxZcoUbrrpJm666SamTJmCn58fMTExfPTRRyiKYopTCiGEEEIIIYToQE5mlRGfW4G1lZrpMS1fxF80IQIHaw0nMss6RduhzmJrXD619QaCPezp6Wu6hUtT0WrUXNPXWD3z/aFMM0fTPrbE5VFbbyCkhY+JWq3qsomZRvNHhgCwfF9aU8soS/PL8WxKq3X4u9oxrnvLF+s1apUkZhp4Oto0VaM0zkszhQ0nc3hnSyIAL8/sQ0/fls3DGh7uwee3D8ZOq2H32ULqDQqhng4EeVy6SkoIYVlafTUuLi5m9OjRLFy4kFOnTqEoynk/Tp06xX333ceYMWMoKSkxQehCCCGEEEIIITqK7xru5J7SyxsXu+bdIfxHHo423DHKOH/m9Y3x6A1yY19HsK4hUXZFtE+HrWaaOdCYnNl4OpfSKp2Zo2l7a2ONj8m0i7Q0E+c3pZc3fi62FFbW8cvx7HY/f129gdTCylbduLysoaXZnKFBXT7ZZgozBxgrPX88kmmS3ztJ+RU8/M0xAG4fGcK1/S4+f+1ChoV5sOT2wdhbG2fMjI2SlmZCdEatSs4oisK1117L7t27URQFd3d37rvvPpYsWcK6detYu3YtS5YsYeHChXh4eKAoCrt37+baa681VfxCCCGEEEIIIcxMpzfw8zHjQOWWtDT7sztHh+Fsa0V8bgW/tNGAZtF8NTo9v53OBTrmvJlGvXyd6eHjRF29gV9iO/fzprpOz29njHOZpnfANnMdnZVGzdzhIQB8viu5zbu75JTWsCY2m+d/PcXM93cT/fR6xr66lX//fPKyjncis5Rj6SVoNSpu7OLzYkxlQk8vXOy0ZJfWsOdsYauOVVlbzz3LDlFeW8+QEHf+Ob1nq443NMyD5XcO5cZBgdw9JqxVxxJCdEytmjmzYsUKdu7ciUqlYs6cObz33ns4Of21pHbevHm89NJL3H///SxbtoydO3fy1VdfcfPNN7fm9EIIIYQQQgghOoCtcfkUVdbRzcmG0RGel30cFzst94wN59X1cbyxMZ7pMb5oNdJ+x1x2JhRQWafH18WWvgGu5g7nglQqFTMHBPD8mtP8cDiTW4a2bA6HJdkal0e1Tk+gux3R/i1rlSSMbhocyJub4jmZVcbB1GIGh7ib5Li19XpOZJZxJK2YI2klHE4rJru05rzbfrEnlUhvJ+a2cGbMlw1VM9OiffF0tGl1zAJsrDRc3deXL/em8f3hDEZFXt7vMEVRePT74yTkVeDlZMM7t/Q3ye+v/kFu9A9ya/VxhBAdU6uuEitWrABg7NixLFu27LyJmUaOjo4sXbqUsWPHoigKX375ZWtOLYQQQgghhBCig/jhsLGl2XX9/LBq5WLU/BEhuDtYk1JY1XRcYR5rTxjbZ03t7YO6g7dPurafH2oVHEotJrmg0tzhtJk1DY/J9GhpaXa53Bysub6/sdXUkl0prT5eXlkNcz/dR8y/NzDz/d089+tpfo3NJru0BrXKWNl1y9AgXruhL789PJZHr+gOwNM/n2T32YJmn6e0WsdPR42VYbe2MKkjLm5GQ2uzdSdyqKitv6xjrDyQzq/Hs9FqVLx/6wC8nGxNGaIQopNq1bvmw4cPo1Kp+Nvf/tbsfRYtWgTAkSNHWnNqIYQQQgghhBAdQElVHZtPG9ssNS5wtYaDjRULx4UD8L/NidTWW+bQbkun0xvY1NjSLLrjtjRr5OVsy5iGmQyrOmlS749t5qbFSEuz1pg/MgQwzlTKKqlu1bGeWX2KHQkF1OkNuDtYM6mnF/+Y2p0Vdw0l9umprHlwNM9fH8PMgQGEdXPkvrHhXNfPD71BYeHyw6QWNi+Z+MPhDKp1eqK8HRkcIpUUptQ/0JUwTweqdXrWxrZ8FlFGcRXP/3oagEen9mBgsGmqsYQQnV+rkjNFRUUAhIaGNnufxm0b9xVCCCGEEEIIYblWH8+mTm+gl68zPX1N02bp1mHBeDvbkFlSzcr96SY5pmiZvUmFlFbr8HS0Nlnbp7bWmBz8/nAmBhMM9u5otsXnU1mnx9/Vjr4BLuYOx6L18HFmeJgHeoPCsoZWYZdjz9lCfo3NRq2Cb+8dzqGnJvHJbYO5f3wEI8I9cbD56zQBlUrFSzP70DfAhZIqHXcuPUh5je6i51EUheX70gDj9VGqpkxLpVIxY4CxmuqHw5kt2ldRFB7/PpaK2noGBbtxx6jmr5EKIUSrkjMuLsY3A1lZzR+417its7P0RhVCCCGEEEIIS/f9IWOVQuPClinYajX8bUIkAO9sSaS6Tqpn2ltjS7PJvXzQdPCWZo2m9PLGydaKzJJq9qd0vhtCG+/onxbtI4vzJtBYPfPV/jRqdC2/xtTrDTyz+iQAc4YGMTjEvdmPi61Ww0fzBuHlZENCXgV///oo+oskFPcmFZGYV4G9taapJZswresbkrt7kgrJKK5q9n4r9qexM7EAW62aV2/oazHXSyFEx9Cq5Ex0dDQAn3/+ebP3+eyzz87ZVwghhBBCCCGEZTqbX8HR9BI0ahXX9jPtguGNgwIJcLMjv7yWZXtTTHpscXF6g8KGk8bkzDQLaGnWyFar4ao+xnZfjUnDzqK2Xs+mhvaB0tLMNCb19CbAzY6SKh0/HW1ZtQQYkzpncspxsdPy8OTuLd7f29mWj+YNwtpKzabTeby2Ie6C2365z1jdc11/f5xstS0+l7g0f1c7hod5APDjkeY9H9KLqnjhD+3MQj0d2iw+IUTn1KrkzKxZs1AUhVWrVvH000+jKBfO8iuKwtNPP82qVatQqVTccMMNrTm1EEIIIYQQQggz+6FhtsfYqG50c7Ix6bGtrdQ8ONFYPfP+1rOXbPsjTOdQajEFFXU421oxrGGx0lI0tjZbE5tNVd3lDfbuiHYmFFBRW4+Psy39A13NHU6noFGruG14CACf70q56JrWnxVX1vHaxngAHp4ShZuD9WXF0C/QlVdm9gHgva1nz5skyiurYX1DJdutQ4Mv6zyieWYO/L014qWeDwaDwmPfH6eyTs+QEHfmjwhphwiFEJ1Nq5Izd911Fz169EBRFJ599ln69OnDa6+9xs6dO0lISCAxMZGdO3fy2muv0bdvX5599lkAevTowV133WWSH0AIIYQQQgghRPszGBRWNfTmn9mwIG5q1/f3J8zTgeIqHZ/vSmmTc4i/WnvC2D5rUi9vrK1atWzQ7gYFuxHkbk9lnZ4NJ3PNHY7JrIk1Ls5fEe2DWtommczsQYHYaTWcySlnb1LzW+G9vjGekiodPXycmDMkqFUxXNffn3vHhgPw6HfHOZZecs73vz6QTr1BYUCQK738ZERAW7oi2gc7rYbkgkqO/Olx+LPl+1LZfbYQO62GV2b1kdelEOKytOpdllarZe3atYSGhqIoCqdOneLRRx9l7Nix9OjRg+7duzN27FgeffRRTp48iaIohIWFsXbtWqys/joUTQghhBBCCCGEZdibVEhWaQ3OtlZM7OnVJuew0qhZPDkKgI+3J1FSVdcm5xG/UxSl6S79adGW1z7rj4O9vz/cOVqb1dUb2HjK+Jhc2cfyHpOOzMVe2/R8WbI7uVn7nM4uY3lDm7F/X90bK03rE5j/mNqdiT28qK03cPeyg+SW1QDGFoNf7U8DYO5wqZppa442Vk2tHC/WGjGtsIoX154B4LEruhMi7cyEEJep1b9BgoODOX78OA8//DAuLi4oinLeDxcXFx555BGOHj1KUFDr7ioQQgghhBBCCGFe3zUsfF/V1w9brabNznNVjC89fJwor63no+1JbXYeYXQso5Ss0hrsrTWMjvQ0dziXZUZ/YyXXzsQCckprzBxN6+06W0BZTT1eTjYMDHIzdzidTmM7qo2nckkvuvggeEVRePrnkxgUmB7jw/Bw07T906hVvHlTPyK9HMktq+XuZYeo0en57UweWaU1uNlrLTJZaokaWyOuPpZFjU7/l+8bDAr/+O4YVXV6hoa6M6+hNZ4QQlwOk9QnOzg48Oqrr5KTk8OuXbv48MMPefHFF3nxxRf58MMP2bVrFzk5Obzyyis4Ojqa4pRCCCGEEEIIIcyksraedQ3VFW3V0qyRWq3ioYbqmc93pVBQUdum5+vqGh/X8T282jTp1paCPOwZEuKOosCqZg727sjWxhrbzElLs7YR6e3E6EhPDAos25t60W3XxOawL7kIGys1/5ze06RxONlq+eS2QbjaazmWXsLj3x9vimf2oECLfT1amuHhHvi62FJWU89vZ/L+8v1le1PZl1yEvbWGV2f1ldekEKJVTNo81tramuHDh3PXXXfx2GOP8dhjj3HXXXcxfPhwrK0vbziaEEIIIYQQQoiOZd2JHKrq9IR6OjAgyLXNzze5lzd9A1yo1ul5f+vZNj9fV6UoCusa5s00tvaxVDMHGltV/XA4o0WD3jsand7AhlPG2TlSOdF2GqtnVu5Po6qu/rzbVNfpeWHNaQDuHRtOgJu9yeMI9nDgvTkD0KhV/Hg0i+3x+QDMGSodaNqLRq3iuv4NrRH/1NostbCSlxramT0xrQdBHqZ/DgghuhbLmuwnhBBCCCGEEMLsfjhiXLCa0d8flart7xpWqVQ8PKU7YLxrObu0us3P2dGdySljTWw26UVVrUo+KIpCYl4Fn+1MZv7nB0gprMLaSs347m0zR6i9TIvxxcZKTUJeBbGZpeYO57LtOVtISZUOT0drhoS6mzucTmt8dy+CPewpq6m/YLXVB9vOkllSjb+rHfeODW+zWEZEePLvq3s1fT42qhvBHjLTpD01VoRujc9vqtY0GBT+8e1xqnV6RoR7cMtQmQEkhGg9K3MHIIQQQgghhBDCcmSVVLP7bCFA093F7WF0pCdDQtzZn1LEO78l8vz1Me127o6mtErHDe/vobzWeId/NycbBgS50j/IjQFBbsT4u2BnfeEWSKVVOnadLWB7fD47EgrILDk32TVzgD8ONpa9XOBsq2Vqbx9+PpbFD4cz6RPgatZ49AaFvGpanEhb21DJNLW3Dxppn9Rm1GoVtw0P4T+/nGLJrhTmDAk6J/GcUVzFB9uMVXv/nN7zoq8vU5g7LJjUwiqW7Ull4bi2SwSJ84vwcqRvoCvH0kv46WgWC0aFsmR3CvtTinCw1vDyzD7SzkwIYRLNfre1fft2k598zJgxJj+mEEIIIYQQQoi2Y2wTBcPC3Al0b7+WLsbqmShu/GgvXx9I596x4e16/o5k+f5UymvrsbfWUFdvIL+8lvUnc1l/0tj+ykqtoqevM/2DXBkQ5Eb/IFcKKurYkZDP9vh8jqaXYPhDjsBao2ZwqBtjIrsxJqobPXyczPSTmdaMAf78fCyLn45m8s/pPbG2av/mIfV6Az8fy+J/mxNIKbRiX81RXr+pP8622mbt2/iYTo+RlmZtbdagAF7bEEdCXgW7EgsZFenZ9L0X1pymtt7AsDB3pse0fcs/lUrFv67qxRPTemClkaY35jBzgD/H0kv44XAGE3p48cp6Yzuzf17Zs8v+7hFCmF6zkzPjxo0zabm6SqWivv78fTyFEEIIIYQQQnQ8BoPCygPpANwwMLDdzz80zIPRkZ7sSCjgzU0JvDa7b7vHYG519QaW7EoB4Nlro5ke48uJrFIOpxZzJK2Ew2nF5JXXEptZSmxmKV/sOf+A8/BuDoyJMiZjhoV6tHklgDmMivDEy8mGvPJatsblMaV3+83Rqdcb+OloFm//lkBKYVXT1zedyeeat3fy/q0D6enrfNFj7E8uoqiyDjd7LUOlpVmbc7bVMmtgAEv3pLJkd3JTcmb32QLWxOagVsG/r+7dLq0cG0lixnyu7uPHs7+c4mRWGQuWHqBGZ2BUhCdzhsj8HyGE6bS4TtmSB+kJIYQQQgghhLh82xPyySiuxtnWiiv7mOdO/oendGdHQgGrjmSwaEIEIZ5daxbDz8eyyCuvxdvZhqv7+mFtpWZwiDuDQ4yL94qikFVac06y5mRWKXZaDaMiPRkT2Y3RUd3wd7Uz80/S9qw0aq7r789H25P4/nBGuyRn6vUGVh3J5N0tiU1JGTd7LQtGhlCTeYZVWQ6kFFZx/Xu7eP66GGYODLjgsdb8oaWZLNK3j9tGhLB0Tyqbz+SRWliJv6sd/1l9CoBbhgZfMqEmOg83B2sm9PBi/clckvIrcbSx4qWZMe2anBNCdH4tTs7Y2dlx7bXXMnnyZNRqeXMghBBCCCGEEF3Fin1pAMwYEICt1jyVFv0CXRnfvRtb4vL5cPtZXpzRxyxxmIOiKHy8PQmA+SNCz9umS6VS4e9qh7+rHVf39QNApzegVqm65MySmQMC+Gh7Er+dyaO4sg43B+s2OY+uISnzzm+JpBUZkzLuDtbcPSaMucOCsVYrrFlzmlXXDuOR70+yPT6fh789xqG0Yv59dS9srM59PekNCutOSEuz9hbWzZFx3buxNS6fpbtTCfG050xOOa72Wh6aHGXu8EQ7mzkgoKm14JNX9iTATdqZCSFMq9nJGScnJ8rLy6murubrr79m69atzJkzh7lz59K3b9crJRdCCCGEEEKIriS3rIbNZ/IAuGWoedu63D8+gi1x+Xx3KIMHJ0bh42Jr1njay/aEAuJyy3Gw1jCnBY+BtgtXXXT3caK3nzMns8pYfTyLecNDTHp8nd7AD4czeGdLIulF1QB4NCRlbh0WjIONcdlFp9MB4GZvzefzB/P2bwm8tTmBFfvSOJFZynu3DDhn4fdAShEFFbW42GkZHu5h0pjFxc0fEcLWuHy+PZjeNPT94clRbZbYEx3X+B5eXNXHF3cHa24a3P6tPIUQnV+z36Hl5uby1VdfMX36dDQaDTk5ObzxxhsMGDCAvn378t///pesrKy2jFUIIYQQQgghhJl8cyAdvUFhcIgbkd7mHRg/KMSdIaHu6PQKH+9IMmss7amxaubGwUG42F16oLwwmjnA2Drsu0MZJj1uSkElE1/bxmPfx5JeVI2nozX/nN6DHY+N556x4U2JmT/TqFUsnhTF5/MH42qv5XhGKVe9vZOtcXlN26yNNbY0m9LLu0sn18xhTGQ3wjwdKK+tp7RaRw8fJ26WOSNdklaj5p05A/jPtdHSzkwI0Saa/Rve1taWG2+8kV9++YXMzEzeeOMN+vfvj6IoxMbG8thjjxEcHMzkyZNZtmwZlZWVbRb0+++/T58+fXB2dsbZ2Znhw4ezdu3apu8risLTTz+Nn58fdnZ2jBs3jpMnT55zjNraWhYtWoSnpycODg5cc801ZGSY9o2aEEIIIYQQQnQGeoPCygPpAC2q2GhL94+PAIyt1ooq68wcTds7mVXKzsQCNGoVt48MMXc4FuWafn5Ya9QczyhlV2KByY773K+nSCuqwtPRmqeu7MmORydw95hw7K2b16RkXHcvflk0ij4BLpRU6bh9yQHe3BRPvd7A2hM5gLQ0Mwe1WsX8P7zGnr6mt8z8EUII0SYu67dLt27dePDBBzl48CAnT57kscceIyAgAL1ez+bNm5k/fz7e3t7MnTuX9evXoyiKSYMOCAjgpZde4uDBgxw8eJAJEyZw7bXXNiVgXnnlFV5//XXeeecdDhw4gI+PD5MnT6a8vLzpGIsXL2bVqlWsXLmSnTt3UlFRwVVXXYVerzdprEIIIYQQQnRFeWU1rDuRTXWdvL/uDLbH55NZUo2rvZZp0R1jsXhMpCfR/s5U6/Qs2ZVs7nDa3Cc7jD/j9BhfAt1l7kFLeDraNCUV/7shziRrFEfSitl0Og+NWsU39wznztFh2Fm3fA5TgJs93947nFuGBqEo8OamBK55Zxd55bU42VoxMsKz1bGKlps1MIDJvbxZNCGCYWHSVk4IIUTbaHXqv2fPnrz44oukpqby22+/MX/+fJycnKiqqmL58uVMnz4df39/HnvsMVPEC8DVV1/N9OnTiYqKIioqiueffx5HR0f27t2Loii8+eabPPnkk8yYMYPo6GiWLl1KVVUVK1asAKC0tJRPP/2U1157jUmTJtG/f3++/PJLYmNj2bRpk8niFEIIIYQQoitSFIU7lh7g3i8PM/qVLXy8PYmqunpzhyVaYfm+NMDYHspW2/IF6LagUqm4f5yxembJ7hTKa3RmjqjtZJdWs/qYsY34XaNDzRyNZVo4PhxbrZojaSVs+UP7sMv12oZ4AGYO8Cesm2OrjmVjpeH562N47Ya+2GrVnMouA2ByL2+sraRiwxzsra34eN4gHp7S3dyhCCGE6MSaV2vbTOPGjWPcuHG89957/PjjjyxbtoyNGzeSk5PD22+/zcsvv2zK0wGg1+v59ttvqaysZPjw4SQnJ5OTk8OUKVOatrGxsWHs2LHs3r2be+65h0OHDqHT6c7Zxs/Pj+joaHbv3s3UqVPPe67a2lpqa2ubPi8rM75h0ul0TcP9hBBdU+M1QK4FQgi5HggBW+PzOZFpfK9cUFHL82tO88G2s9w5KoQ5QwKa3fLH0nWW60F2aQ2/nckF4IYBfh3q55kQ5UGYpwNJBZV8sTuZuztp4uLTHUnUGxSGhrrR09uhQz0GlsLNVsPcoUF8vDOFV9fFMTLUrWnYe0vtSy5iZ2IBWo2K+8aENuvxaM714Jo+3kR52fO3r46RWlTFtX185LEWopPqLO8RhBDn19zXdpv8VaRSqVCr1ahUqjYbmBUbG8vw4cOpqanB0dGRVatW0atXL3bv3g2At7f3Odt7e3uTmpoKQE5ODtbW1ri5uf1lm5ycnAue88UXX+SZZ575y9e3bNmCvb2UlQshYOPGjeYOQQjRQcj1QHRVigJvndQAKsb6GPBzUNiQoaawso6X18fz7uY4JvgZGOWjYNMxCjDanKVfD9amqzAoGiKcFeIObCPO3AH9yXAXFUkFGj7YEo9XyWkuo7NUh1ZTD18eNr6m+tgUsGbNGnOHZLFCdWCj0XA6p5yXlq+jn0fL25spCvyv4Ro31FPP8T1bON6C/ZtzPfhbBBTVQkncPtZ0tBecEMKkLP09ghDi/Kqqqpq1nUmTM9u2bWPZsmV89913TfNdFEXB19eXuXPnmvJUdO/enaNHj1JSUsL333/PbbfdxrZt25q+/+ekkKIol0wUXWqbJ554goceeqjp87KyMgIDAxk/fjweHtKDVIiuTKfTsXHjRiZPnoxWqzV3OEIIM5Lrgejq9qcUkbz3IFqNihfmjcfLyYZ/6Q38fCyb97YlkVZUzc9pGnYUaFkwMoRbhwbiYNM5K2k6w/WgXm/gxdd3ALUsnNqH6X06xryZP5qsN7DljZ1kldZQ6RXNdQ2zRTqLz3alUKOPJ8zTgUduHnHZ1R7CKNspkbe3JLG92JnHbhmBpoX/ntsTCkjaexgbKzUvzRuDt7Nts/brDNcDIYTpyDVBiM6tsePWpbT6r6DTp0+zbNkyli9fTkZGBmBMctjb23P99dczb948Jk6ciFpt2j6p1tbWREQY+wsPGjSIAwcO8NZbbzXNtsnJycHX9/c/HPLy8pqqaXx8fKirq6O4uPic6pm8vDxGjBhxwXPa2NhgY2Pzl69rtVq5kAohALkeCCF+J9cD0VV9uMNYrT57UCD+7sY5DFot3DQ0hFmDgvjxaBZv/5ZAamEV/92YwKe7UrhrTBjzhofg2EmTNJZ8PdiWkEtOWS3uDtZc2dcfrVXHK0vRauHeceH8308n+WRXKrcMD0Wr6RxzOnR6A0v3GOf93D0mDBsbazNHZPnuGhvBF3vTOZtfyZqTecwYENDsfRVF4c3NZwGYNzyYAA+nFp/fkq8HQgjTk2uCEJ1Tc1/Xl/WONS8vj7feeotBgwYRHR3Nyy+/THp6OiqVigkTJrB06VJyc3NZtmwZkydPNnli5nwURaG2tpbQ0FB8fHzOKQusq6tj27ZtTYmXgQMHotVqz9kmOzubEydOXDQ5I4QQQgghhLiw2IxStsfno1GruGdM+F++b6VRM2tgAJsfGstrN/QlxMOe4iodr6yLY8rr2yioqD3PUYU5rdhvTAzMGhiATQdMzDSaPSgQT0drMoqrWX0sy9zhmMya2GyySmvwdLTmuv7+5g6nU3C21XLP2DAA3tyUgE5vaPa+G07lEptZioO1hnvH/vUaJ4QQQgjREs2+Na2mpoYff/yRZcuWsXHjRvR6PYpi7M8aHR3N3LlzueWWW/Dz82uzYBv985//ZNq0aQQGBlJeXs7KlSvZunUr69atQ6VSsXjxYl544QUiIyOJjIzkhRdewN7enjlz5gDg4uLCggULePjhh/Hw8MDd3Z1HHnmEmJgYJk2a1ObxCyGEEEII0Rm9vy0RgGv6+hHkceGZjFYaNTMHBnBtPz9+PpbFf9fHkVVawzu/JfL0Nb3bK1xxCZkl1WyNywPg5iEdu1WYrVbDglFhvLzuDO9tPct1/fwtvv2Xoih8vCMJgNuGh2Cr7bjJMUszf0QIn+1MJq2oim8PZjCnGa3w9AaF1zfEA3DHqFA8HP/aVUMIIYQQoiWanZzx8vKisrISML5J9PHx4eabb2bu3Ln069evreI7r9zcXObOnUt2djYuLi706dOHdevWMXnyZAAeffRRqqurWbhwIcXFxQwdOpQNGzbg5PR7yfEbb7yBlZUVs2fPprq6mokTJ7JkyRI0GnnDK4QQQgghREsl5lWw9kQOAPeNa94d5VYaNTMGBODtbMstn+xj+b5UFowKJdD9wokd0X6+3p+GQYER4R6EejqYO5xLunVYEO9tTSQxr4INp3K5ItrH3CG1yp6kQk5klmGrVXPrsGBzh9Op2FtbsXBcBP/55RRv/5bAjAH+l0x+/XI8i7jccpxtrbhzdFg7RSqEEEKIzqzZyZmKigpUKhW2trZcc801TJkyBY1Gw/Hjxzl+/PhlnXzevHmXtd+nn3560e+rVCqefvppnn766QtuY2try9tvv83bb799WTEIIYQQQgghfvfBtrMoCkzu5U2Ud8vmMIyM8GRUhCc7Ewt4Y2M8r9/Yr22CFM1Wrzfw9cF0gGZVFXQETrZa5o8I4e3fEnlvayJTe3ujUllu9czH241VMzcMDMTNQWbNmNqcoUF8vCOJ7NIavtqfxu0jQy+4bb3ewJubEgDj7B8XO5kPIYQQQojWa/HEzZqaGr755hu++eabVp1YpVJddnJGCCGEEEII0XFkFFfx45FMABY2s2rmz/4xtTs7EwtYdTSTu8eG0cPH2ZQhihbafCaP3LJaPBysmdLLcipQbh8Zyic7kjmeUcrOxAJGR3Yzd0iXJSG3nC1x+ahUsGDUhZMG4vLZajUsmhDJP1fF8u6WRG4cHIi99fmXSH44nElyQSXuDtYXTeIIIYQQQrSEuiUbK4pi0g8hhBBCCCGE5ft4exL1BoWRER70D3K7rGP0DXRleowPigL/XR9n4ghFS63YlwbArEEBWFu16M9Gs3J3sOamIYEAvLsl0czRQEFFLV/uTeXpn0/y25lc6ps5fP6THckATO3lQ4gFtJSzVDcMCiDI3Z6CijqW7k497za19Xre2mysmlk4LhwHmxbf4yqEEEIIcV7NflexZcuWtoxDCCGEEEIIYYHyy2tZecDY/ur+cRGtOtbDU7qz/mQum07ncTCliEEh7qYIUbRQelEV2xPyAbh5sGW0NPuju0aH8eXeVPYmFXEotYiBwe37PMorr2H9iRx+jc1mf3IRhob7EpfsTsHb2YZZAwOYPSiQYI/zJ13yymtY1VCJdtcYmW3SlrQaNYsnRfLQN8f4YNtZbhkWhLPtuS3Lvj6QTmZJNV5ONjL7RwghhBAm1ezkzNixY9syDiGEEEIIIYQF+mxXMrX1BvoFujI83KNVxwrv5sjsQQF8tT+dl9ed4Zt7hlv0zBBL9fWBdBQFRkV4WmTVhp+rHTP6B/D1wXTe23KWT+e3fXImr7yGdSdy+PV4NvtTivhjo4i+AS709HVm/ckccstqeXfLWd7dcpZhYe7cODiQadG+5wyj/2J3KnV6AwOD3RgYfHmVaKL5ru3nz7tbEjmbX8mnO5L5++Sopu9V1+l5+zdjBdaiCRHnPE5CCCGEEK0l9bhCCCGEEEKIy1JarWPZHmMroPvHR5gkkfLgxCh+OJzJgZRitsTlMaGHd6uPKZpPpzfw9UFjJdScoZZXNdPo3nHhfHsonc1n8jidXUZPX9PPMMorq2FtQ4XMgT8nZAJduTLGh2nRvgS62wPwzLW92XQqj68PprMjIZ+9SUXsTSri/346ybX9/LhpcBBh3RxYttf4mrprtFTNtAeNWsVDk7tz/4rDfLozmfkjQnBzsAbgy72p5JfX4u9qx40WWEUmhBBCiI5NkjNCCCGEEEKIy7JsTwoVtfV093ZiYg8vkxzTx8WW+SND+HBbEq+si2NclBdqtVTPtJfNp3PJL6/F09GGyb0sNzEW6unA9Bhffjmezftbz/K/m/ub9Phb4vK4+4uD6PS/Z2T6BbpyZYwv02J8CHCz/8s+NlYaruzjy5V9fMksqea7gxl8c9DYMuvLvWl8uTcNb2cbSqt1BHvYW/S/v6WZFu1DL19nTmWX8cH2szwxrScVtfW8v+0sAA9OirSo2UtCCCGEsAzy7kIIIYQQQgjRYtV1ej7blQLAfePCTZpAuW9sOE62VpzJKefnY1kmO664tOX70gCYPSgArcay/1xc2DAD6ZfjWaQUVJrsuIqi8Mq6OHR6hV6+zjx1ZU92PT6BH+8fyV1jws6bmPkzf1c7HpwUyY5Hx/PlgqFc3dcPa42a3LJaAO4cFYpGkpLtRq1W8fAUYzuzpbtTyCuv4fOdyRRV1hHm6cCM/v5mjlAIIYQQnZFUzgghhBBCCCFabOWBNIoq6wh0t+OqPr4mPbarvTX3jg3n1fVxvLYxjukxvnLXejtIK6xiR0IBKhXcPMTyWzj18nNmQg8vfjuTxwfbzvLSzD4mOe6epEJOZ5dhp9Ww4q6huNpbX/ax1GoVoyI9GRXpSUlVHT8dzaK0WsdNneDf39JM6OFF/yBXjqSV8NKaM2w8nQvA4slRWFl4olIIIYQQHZO8wxBCCCGEEGahNyhsictjb1KhuUMRLVRXb+Cj7UkA3Ds2vE0WLm8fGUI3JxvSi6r5an+ayY8v/uqrA8Z/59GR3ZrmpFi6+8eHA/D94QyySqpNcszPdiYDMHOgf6sSM3/mam/NbSNCeGBipMVXLVkilUrFI1O6A/DDkUzKa4wtG6+KMW3yWQghhBCikbzjE0IIIYQQ7aqytp4lu5KZ8NpWbv/8ADd/vJeTWaXmDku0wI9HMskurcHLyYaZAwLa5Bz21lY8ODESgLd/S6Cytr5NztNVKYpCjU5PQUUtKQWVxGaU8u3BdADmdKKqjYHB7gwP80CnV/igYX5IayQXVLL5TB4At48MbfXxRMcyItyDYWHuTZ8/NCVKZl4JIYQQos1IWzMhhBBCCNEuMkuqWbo7ha/2p1Fe8/tCu6LA6xvi+XT+YDNGJ5pLb1CahmTfNToMW62mzc514+BAPt6RRGphFZ/tTGZRQ7JG/JWiKBRX6cgurSantIas0hpySqvJLq2htEpHeW09FTX1VNTWU16jo6K2/pxh9o28nGyY2NPLDD9B21k0MYI9SYWsPJDO38ZH4OVse9nH+nxXMopibIEV3s3RhFGKjkClUvHoFT248cM99A90Y0ovb3OHJIQQQohOTJIzQgghhBCiTR1JK+bTncmsPZGD3mBcDA71dOCOkSEMCHbjmnd2sflMHofTihkQ5GbmaMWlrD2RTXJBJS52WuYMbdsKC61GzcNTuvPAV0f4aHsStwwLxt3BdG2kLNWuxAJ2JRY0JGGMyZjs0hpq6w2XdTxHGyscbaxwtrPi/vERna6l1vAwDwYFu3EwtZgPtyfxr6t6XdZxSqt0fHswA4AFo6RqprMaEOTG9kfH42KnRaWSqhkhhBBCtB1JzgghhBBCCJOr1xtYfzKXT3cmcTitpOnrw8M8uHN0KOO7ezW1ipk5wJ9vDmbw2oY4lt85zEwRi+ZQFIV3txirZm4fGYKDTdv/OXFVjC8fbjvLyawy3tuSyFOXubDeWZzNr2DeZ/ubEp1/5uloja+LHT4utvi62OLjYouHgzWONlocbY1JGKeG/zraWuFobdXp2zapVCoWTYzkts/2s3xfKveNC8fT0abFx/nqQBrVOj09fJwYEe7RBpGKjsLXxc7cIQghhBCiC5DkjBBCCCGEMKk1sdk8/+tpMhuGb2s1Kq7p688do0Lo7efyl+0XTYhk1ZFMdiUWsvtsASPCPds7ZNFMW+PyOZ1dhr21hvkjQtrlnGq1sc3QbZ/t54u9qdw+KhR/1667cPrahjj0BoW+AS5Mi/HF18UWXxc7fF1s8XK2wcaq7drMWbIxkZ70DXDhWEYpn+xI5vFpPVq0v05vYOnuFADuGBUqFRVCCCGEEKLVOle9uhBCCCGEMKutcXks+uoImSXVuDtY88CECHY9PoHXZvc9b2IGINDdnpsbBpC/viEeRTl/RYAwL53ewItrTwNwy9AgXO3br73YmEhPhoW5U1dv4K1N8e123o7mWHoJa2JzUKng1Rv6cu/YcK7t58+QUHcC3e0lMXMRKpWKRROMM4uW7UmhuLKuRfuvPZFDdmkNno7WXNPXry1CFEIIIYQQXYwkZ4QQQgghhEmcyirj/uWH0RsUZvT3Z/fjE3hoSne8nC49fPv+8RHYWKk5mFrM1vj8dohWtNTS3SnE51bg7mDN/eMj2vXcjUO6Ab47lEFiXnm7nr+jeGX9GQBm9A8gytvJzNFYnok9vejl60xlnZ7PdiU3ez9FUfh0p3H7W4cFY6uVJJgQQgghhGg9Sc4IIYQQQohWyy2rYcHSA1TW6Rke5sFLM/u0aAHT29mWecODAWPbJqme6VhySmt4Y6OxYuXxK3q0a9VMowFBbkzp5Y1BgVfXx7X7+c1tZ0IBuxILsdao+fvkSHOHY5FUKhUPTDQmFpfsSqG0Wtes/Q6nFXMsvQRrKzW3DgtuyxCFEEIIIUQXIskZIYQQQgjRKpW19dyx5ADZpTWEd3Pgg1sHYm3V8reZ944Nx8Faw4nMMtafzG2DSMXlen7NaSrr9PQPcmXWwACzxfHoFd1Rq2D9yVx2JRaYLY72pigKL68zVs3cOiyYADd7M0dkuab08qG7txPltfVNM2QupbFq5rp+fng62rRhdEIIIYQQoiuR5IwQQgghhLhseoPCA18d4WRWGR4O1iy5fQgu9trLOpaHow13jAoF4PWNxqHnwvx2JRaw+lgWahU8e200arX5BqFHeDkxt6Fy4f9+OkFdvcFssbSnNbE5xGaW4mCt4f7x4eYOx6Kp1Srun2CsnvlsVzIVtfUX3T69qIp1J3IAmq5PQgghhBBCmIIkZ4QQQgghxGVRFIX/rD7J5jN52Fip+eS2QQS6t+6O/jtHh+Fsa0V8bgW/HM8yUaTictXVG/i/n04AMHdYMNH+LmaOCB6a0h1PR2vO5le2aG6IpdLpDfx3g7GN211jwvCQyo1WuzLGl7BuDpRU6Vi2J/Wi2y7dnYJBgVERnvTwcW6nCIUQQgghRFcgyRkhhBBCCHFZPt+VwtKGhc03b+xH/yC3Vh/TxU7LPWONlQFvbIxHp+8alREd1We7kjmbX4mnozUPTelu7nAA43PkiWk9AXhrUwJZJdVmjqhtfXcog+SCSjwcrLlzdJi5w+kUNGoV948zVs98vCOJqrrzV89U1Nbz9YF0ABZI1YwQQgghhDAxSc4IIYQQQogW23Ayh2d/PQXAE9N6MC3G12THnj8iBHcHa1IKq/jhcIbJjitaJqukmrc2JQDwxLSeuNhdXru6tjBjgD+DQ9yo1ul5ruF52BnV6PS8uSkegL9NiMDRxsrMEXUe1/bzI8jdnqLKOlbsSzvvNt8cSKe8tp6wbg6MjerWzhEKIYQQQojOTpIzQgghhBCiRY5nlPDgyqMoCswZGsTdY0x7N7+DjRULxxmrZ/63OZHaer1Jjy+a57lfT1Gt0zM4xI0ZA/zNHc45VCoV/7k2Go1axZrYHLbH55s7pDaxZHcKuWW1+LvaMWdokLnD6VSsNOqm+T0fbk+iRnfudUZvUPh8t7Ft3h0jQ806a0kIIYQQQnROkpwRQgghhBDNllFcxYKlB6nW6Rkb1Y3/XNMblcr0i5a3DgvG29mGzJJqVu5PN/nxxcVtj89nTWwOGrUxCdIWj3Fr9fR15rbhIQD8++eTnS6JV1ql470tiQA8NDkKGyuNmSPqfK7vH4C/qx355bVN7csabTyVS3pRNa72WmYOCDBThEIIIYQQojOT5IwQQgghhGiWshoddyw5QH55LT18nHhnTn+sNG3zdtJWq+FvEyIBeGdLItV1nWvhvSOrrdfz759PAnDb8BB6+nbcIeiLJ0fSzcmG5IJKPt6eZO5wTOrD7Wcpq6knytuR6/p3rMqlzsLaSs29DVV6H2w7e06C77OdxqqZOUOCsLOWxJgQQgghhDA9Sc4IIYQQQohL0ukNLPzyMPG5FXg52fDZ/ME42bbtDJIbBwUS4Ga8q33Z3pQ2PZf43Sc7kkkuqKSbkw1/nxxp7nAuytlWy1NX9gSMSbz0oiozR2QaeWU1fLbLmBz4x9QeaKSlVpu5YWAA3s42ZJfW8N0h44yr2IxS9qcUYaVWMa+hOksIIYQQQghTk+SMEEIIIdqdoihsOZPH/uQic4cimqGsRsfC5YfZmViAvbWGz+YPxs/Vrs3Pa22l5sGJxuTA+1vPUl6ja/NzdnXpRVW8/VsCAE9d2bPNE3CmcE1fP4aFuVOjM/DsL6fMHc45zuZXsjNHRXFVXYv2e2tzAjU6AwOD3ZjU06uNohNgrNK7Z4yxeub9rWfR6Q18utNYhXVVH198XGzNGZ4QQgghhOjEJDkjhBBCiHaVXVrN7UsOcPuSA9z88V4S88rNHZK4iNPZZVzz9k42nsrFWqPm7Zv7E+3v0m7nv76/P2GeDhRX6fh8V0q7nberevaXU9ToDAwLc+eavn7mDqdZVCrjXBwrtYoNp3LZcibP3CGhKApLd6dwzXt7+DZZw/jXdvDyujMUVV46SZNcUMnKhvknj13Ro0PO++lsbh4ShKejDRnF1Xy47Sy/HM8GYMGoMDNHJoQQQgghOjNJzgghhBCiXSiKwtcH0pjy+na2xuUDoDcovLQ2zsyRiQv54XAG17+3i5TCKvxd7fj23uFM7OndrjFYadQsnhwFwMfbkyhpYQWCaL4tZ/LYcCoXK7Ux2WFJSYEobyfuGBUKwL9/PkmNznwzigorarlz6UH+/fNJ6uoNOGoVKuv0vL/1LKNe/o2X1p6hsKL2gvu/vjEevUFhfPduDAl1b8fIuy47aw13jzE+f/67IZ56g8KQEHdiAtovES2EEEIIIboeSc4IIYQQos1lllQz77P9PPZ9LOW19fQLdOXDuQPRqFVsOp3LvqRCc4co/qC2Xs+Tq2J56Jtj1OgMjInqxi+LRtE30NUs8VwV40sPHyfKa+v5qJMNfe8oanR6nl59EoA7RoUS5e1k5oha7oGJkfg425JWVMUH286aJYYdCflc8dYONp/Jw1qj5qnp3Xl2oJ4P5vSjt58zVXV6Pth2ltGvbOHFNacp+FOS5kRmKauPZQHGWTOi/dwyNBg3+9/b+DUm+4QQQgghhGgrVuYOQAghhBCdl6IorDyQzvO/nqaith4bKzUPT4liwagwNGoVNw0OZPm+NF5Ye4YfF46wqDv121NFbT13LT1IVmk1jjZWONpY4WTb+F8tjra/f83J1gonGy19Al3wcmr5rISM4iruX36YYxmlqFTw4MRIFk2INOtAcrVaxUOTo7h72SE+2ZnM9f39ibTA5IEpZBQbB94HuNmb9LgfbksitbAKH2dbHmiY82NpHG2seOqqnvxtxRHe23qWGf0DCPIw7b/ThdTW6/nv+jg+3pEMQKSXI/+7uT8RnnasWXOSiT29mBrjx+bTeby1OYHYzFI+3J7EF3tSmTs8mLvHhOHpaMMr642VhNf286OXn3O7xC6MHGysuHN0GK+ujyPQ3Y7Jvdq3SlAIIYQQQnQ9kpwRQgghRJvIKK7i8e9j2ZlYAMDAYDdemdWH8G6OTds8OCmSVUcyOZZewq+x2VzVxzJmXLS3lfvT2NPC6iKVCgaHuHNljC/Ton3wcr50omZbfD4PrjxCSZUOV3stb97Yj3HdO8Yw8sm9vBkb1Y1t8fks/vooqxaOxNqqaxWB55TWMPWN7VTp9Ezo7sWC0aEMD/NoVVKztl7P+pO5vLc1EYCnruqJo43l/olwZYwvKyPS2ZlYwNOrT/LpbYPaPOl7Nr+CB746wsmsMgDmDgvmySt7YqvVoNPpmrZTqVRM6uXNxJ5e/HbGmKQ5nlHKR9uT+GJPClN7+7A9Ph8rtYqHJ3dv05jF+d05OhSVCsZEdjNrQloIIYQQQnQNlvuXlxBCCCE6JINBYcX+NF5cc5rKOj02Vmr+MbU7t48M/ctil5eTLfeMCeeNTfG8si6OKb18utyC+6Xo9AY+35UCGKtY+ge5Ul5TT0VtPRU19ZQ3/LeiVkdFbT3lNfXkl9dyJqec/clF7E8u4unVJxkc7M70GB+mxfji/adEjcGg8L/fEnhrcwKKAjH+Lrx3ywAC3dun6qA5VCoVr8zqw9Q3t3Myq4y3Nsd3ubZP3x1Kp7LOOEtl85k8Np/Jo6evMwtGhXJ1X19srDTNPtbp7DK+PpDOj0czKakyJhBGRXhyZYxvm8TeXlQqFU9f05tpb23ntzN5bDqd12YVEMY5Wuk8s/oU1To9bvZaXpnV95LnU6lUTOzpzYQeXmyNy+fNTfEcyyjlp6PGdmZzhga1W8WPOJeNlYaF4yLMHYYQQgghhOgiJDkjhBBCCJNJL6rise+Ps/usscpjcIgbr8zqS6inwwX3uXN0KF/uSyWtqIov96ZKn/8/WRObTWZJNZ6O1tw3LhxbbfMW4DNLqlkbm82vsdkcSSthf0oR+1OKeOaXUwwMcmN6jC/TYnywtdKw+OujbIvPB4wLw/93Va9mn6c9eTvb8sL1MSxcfpj3t55lfHcvBoV0jYHpBoPCNwczAFg8KZLCijq+O5TB6ewyHvn2GC+vO8PcYcHcMjQID0eb8x6jtFrHz8ey+OZAOrGZpU1f93Wx5YaBAdw5JqxTtBaM8HLkztFhvL/1LE//fJJREZ7YWZv2+VxSVccTP8Sy9kQOACMjPHh9dr+/JD4vRqVSMb6HF+O6d2NrfD7v/pZIRW09iyZYZls5IYQQQgghRMtIckYIIYQQrWYwKCzfl8qLa89QVafHVqvmsSt6cNvwENSXaA3jYGPF3ydF8c9Vsbz9WwIzBwbgYqe96D5dhaIofLwjCYB5w0NalDDxd7XjztFh3Dk6jKySataeyGFNbDaHUos52PDxn19O4WRrRXmNcR7Q89fHMGtgQFv9OCYxPcaXGQP8+eFwJg99c4w1D4626DZczbUvuYi0oiocrDXcPSYMe2srHp4SxVf701m6O4Wcshpe3xjPu1sSub6/P3eMCiXK2wlFUdibVMQ3B9NZE5tNbb0BAK1GxeRe3sweFMjoTtjCadGECH46kklmSTXvbknkkammaxN2KquMBUsPkF1ag5VaxT+mdueu0WGXvNZdiEqlYnx3L8Z3kBaCQgghhBBCiPbR+f+SFUIIIUSbSius4tHvj7E3qQiAIaHuvDKzDyEXqZb5s9mDAvhsVzKJeRV8sO0sj13RtdpVXciepEJOZJZhq1Vz67Dgyz6On6sdC0aFsmBUKNml1ayNNSZqDqYWU15TT7CHPe/fMtBiBpA/fU1v9iUZkxXP/XKKl2b2MXdIbe7bg+kAXN3XD3tr41t4V3tjNdWdo0NZE5vNpzuTOZ5RysoD6aw8kM6IcA8yS6pJLaxqOk6UtyOzBwVyfX//C1bYdAb21lb839W9uPfLw3yw7SxTenvTJ8C11cetqqtn4fJDZJfWEOrpwP9u6k9MgEvrAxZCCCGEEEJ0OZKcEUIIIcRlMRgUvtiTwsvr4qjW6bHTanh8Wg/mDgtu8R3kVho1j1/Rgzu/OMhnO5OZOywYP1e7Norccny83Vg1c8PAQNwdrE1yTF8XO+4YFcodo0LJKa3hcFoxoyM9cbK1nGolZ1str83uy80f72XlgXQm9vRus7kiHUFZjY41J7IBmD048C/f12rUXNvPn2v6+nEwtZhPdiSx4VRuU3tBB2sN1/TzY/agQPoFunaK1mXNMbW3D1f28eXX49ks/voovy4a3er2Zs//epqUwip8XWz5ceFIXOwt53UjhBBCCCGE6FgkOSOEEEKIFkspqOTR74+zP9lYLTMszJ1XZvZt1RDriT29GBrqzr7kIl7bEM9rs/uaKlyLlJBbzpa4fFQqWNBGc3h8XGyZbqED4IeFeXDX6DA+2p7E498fp1/gGLo5dc5KkNXHsqjRGYj0cqR/oOsFt1OpVAwOcWdwiDtphVWsPp6Fl5MN02N8cegCrd/+TKVS8fx10RxILiIpv5KX1p7mmWujL/t4v53JZfm+NABeu6GvJGaEEEIIIYQQraI2dwBCCCGEsBwGg8JnO5O54q3t7E8uwt5aw7PXRbPizmGtSsyAcSH1iek9AfjhSAansspMEbLF+mRHMgBTenm3qEVcV/LwlCh6+DhRWFnHEz8cR1EUc4fUJr45YGxpNntQYLOrXoI87Ll/fAQ3DArskomZRq721rx6gzHRu3RPKtvi8y/rOIUVtTz6XSxgTJaOiPA0WYxCCCGEEEKIrkmSM0IIIYRolqT8CmZ/uIf//HKKGp2BEeEerF885rLamF1Iv0BXrurji6LAi2tPm+SYliivvIZVRzIBuHtMmJmj6bhsrDS8cWM/rDVqNp3O4+uGJEZncianjGMZpVipVVw/wN/c4ViksVHduG24cWbTP749RnFlXYv2VxSFJ36IpaCilihvR/4xtXtbhCmEEEIIIYToYiQ5I4QQQoiL0hsUPtmRxLS3dnAwtRgHaw0vXB/D8juHEujeumqZ83l0ag+0GhU7EgrYfpl3uVu6L3anUqc3MCDIlYHB7uYOp0Pr6evMI1OjAPjPL6dILaw0c0Sm9c2BDMDY9s/TsXO2bWsPj0/rSVg3B/LKa3nqxxMtqrL67lAGG07lotWoeOPGfthqWze3RgghhBBCCCFAkjNCCCGEuISnfozluV9PU1tvYHSkJ+v/PoY5Q4PabKh4kIc9c4eFAPDi2jPoDZ2zVdWFVNXVs2xvKiBVM821YFQYQ0PdqarT8/evj1KvN5g7JJOoqzew6ogxOXPj4EAzR2PZ7Kw1vHljP6zUKn6Nzeano1nN2i+9qIpnVp8C4KHJ3ent59KWYQohhBBCCCG6EEnOCCGEEOKC4nLKWdnQKur566P54o4hBLiZvlrmzxZNiMDJ1orT2WX82NDeq6v49mAGpdU6gj3smdzLx9zhWASNWsVrs/viaGPF4bQSPtyeZO6QTGLT6VyKq3R4O9swJrKbucOxeH0CXHlgYiQA//rpBJkl1RfdXm9QeOibo1TU1jM4xE2SpUIIIYQQQgiTkuSMEEIIIS7ojY3xKApMj/HhlqHBbVYt82duDtbcPz4CgNc2xFGj07fLec1Nb1D4dGcyYBw6rjHRLJ+uIMDNnmeu6Q0Yn7cnMkvNHFHrfXPQmBidOSAAK428bTeFhePC6R/kSnlNPQ9/cxTDRSrzPtqexIEUYyvH12f3k9ejEEIIIYQQwqTkrzwhhBBCnFdsRinrTuagUsHfJ0W1+/nnjwjB39WOrNIaPt+V0u7nN4cNJ3NIK6rC1V7LrIEB5g7H4swY4M/0GB/qDQqLvz5q0Um97NLqpplLswdJSzNTsdKoeWN2P+y0GvYmFfHZruTzbncyq5TXN8YB8O9rerfJfC0hhBBCCCFE1ybJGSGEEEKc12sNC5PX9fMn0tup3c9vq9Xw8BRjUui9LYkUVda1ewztSVGUpnZcc4cFY29tZeaILI9KpeL562LwcrIhMa+CV9bFmTuky/b9oQwMCgwJdSfE08Hc4XQqIZ4O/OuqXgC8si6OuJzyc75fozPOLtLpFab08uYGSZQKIYQQQggh2oAkZ4QQQgjxFwdTitgal49GrWLxpEizxXFdP396+TpTXlvfdBd7Z3UotZij6SVYa9TMHR5s7nAslpuDNS/P6gPA0j0pnMkpM3NELWcwKHxzMAOAG6Vqpk3cPCSQCT28qNMbWPz1UWrrf6+y+u/6OOJzK/B0tObFGTHt1s5RCCGEEEII0bVIckYIIYQQf/HahngAZg8KINjDfHftq9Uq/jm9JwBf7k3j7c0JZoulrX3UUDVzfX9/vJxszRyNZRvf3Ytp0T7oDQrP/HwKRbnwXJGOaG9yIWlFVTjaWDEtxsfc4XRKKpWKl2bG4O5gzensMl7faLzm7U4s4JOGuU8vz+yDh6ONOcMUQgghhBBCdGKSnBFCCCHEOXYlFrAnqRBrjZq/TTBf1UyjUZGe/GNqdwBe2xjP/zphgia5oJKNp3MBuHN0qJmj6Rz+Ob0nNlZq9iQVsu5EjrnDaZFvG6pmru7rJ+3t2pCXky0vXB8DGJOjG0/l8vC3xwC4eUgQE3t6mzM8IYQQQgghRCdnkcmZF198kcGDB+Pk5ISXlxfXXXcdcXHntjpRFIWnn34aPz8/7OzsGDduHCdPnjxnm9raWhYtWoSnpycODg5cc801ZGRktOePIoQQQnQoiqLw3w3G36lzhgbh72pn5oiM7h8fwaNXGBM0r2+M581N8WaOyLQ+3ZmEosCEHl5mme/TGQW623PP2HAAnvv1NDU6/SX2aB1FUcgsqWZrXB4fb0/i0e+O8dSPseSV1bToOKXVOtbEZgNw42BpadbWroj24YaBASgK3PXFQbJLawjxsOepK3uaOzQhhBBCCCFEJ2eRt+Jt27aN+++/n8GDB1NfX8+TTz7JlClTOHXqFA4OxtYrr7zyCq+//jpLliwhKiqK5557jsmTJxMXF4eTk3HRY/HixaxevZqVK1fi4eHBww8/zFVXXcWhQ4fQaDTm/BGFEEIIs9gSl8eRtBJstWoWjg83dzjnWDguAhUqXl53hjc3JaAo8PfJUeYOq9UKK2qbKiXuGh1m5mg6l/vGhvPdwXQyS6r5cFsSD5pgfpLBoJBVWk1CbgUJeeUk5FYQn1dBYm45lXV/TQBtOZPP0jsGE+HVvKTb6mNZ1NYbiPJ2pG+AS6vjFZf272t6sze5kPSiatQqeP3GfjjYWOSfSUIIIYQQQggLYpF/daxbt+6czz///HO8vLw4dOgQY8aMQVEU3nzzTZ588klmzJgBwNKlS/H29mbFihXcc889lJaW8umnn7Js2TImTZoEwJdffklgYCCbNm1i6tSp7f5zCSGEEOZkMCj8d72xIuW2ESEdcu7JfePCUavgxbVneKuhvZmlJ2i+3JtGbb2BaH9nhoW5mzucTsXOWsMT03uy6KsjvL8tkVmDAlpVDbZyfxrP/3qa8tr6837fSq0i1NOBKG8nwr0c+floJimFVcx8fw+f3DaIwSGXfny/OZgOwOxBgTKIvp042ljx1k39eeCrI8wfEcKAIDdzhySEEEIIIYToAiwyOfNnpaWlALi7G//gTU5OJicnhylTpjRtY2Njw9ixY9m9ezf33HMPhw4dQqfTnbONn58f0dHR7N69+7zJmdraWmpra5s+LysrA0Cn06HT6drkZxNCWIbGa0BXvRYoisLSvWlsjSto9j5DQty4e3QIVhqL7LDZKa09kcOp7DIcbDQsGBHUYZ/Pd4wIwmAw8PL6eN7anIBer+eBCeEdZiG7JdeDGp2epXuMw8fvGBFMff35F/3F5Zva05PBIW4cSCnm+V9O8taNfS/rOJtO5/HEqlgUBbQaFaEeDkR4ORDh5UhEN+N/Qzzs0f7hmjZnsD/3fHmEYxml3PLJPl6bFcMVvS88x+RMTjnHM0rRalRcFePdYV+DnVGMryNbHhoNmPZ3eVd/fyCE+J1cD4QQfyTXBCE6t+a+ti0+OaMoCg899BCjRo0iOjoagJwc49BXb+9z//j19vYmNTW1aRtra2vc3Nz+sk3j/n/24osv8swzz/zl61u2bMHe3r7VP4sQwvJt3LjR3CGYxeZMFT+ntawd5K6zhaw7FM9tkQbsLP63keUzKPDSMQ2gYnQ3HXu2bjJ3SBflB1wbrOKnVA3vbE0iPiGR6YEGOkh+Bmje9WB3roqiSg1u1gqkHWFNxpF2iKzrGe8MB9Gw5kQu4YY1RLSwW1h6BfzvpAZFUTHCy8CsUAMadQlQAtWgpEFCGiScZ99b/UBfqeZEMTyw8ijXhRgY56uc9zw/JKsBNb1c9Ozb1rFfg6Jluur7AyHEX8n1QAjxR3JNEKJzqqqqatZ2Fr8c9re//Y3jx4+zc+fOv3zvz3fQKopyybtqL7bNE088wUMPPdT0eVlZGYGBgYwfPx4PD4/LiF4I0VnodDo2btzI5MmT0Wq15g6nXa09kcPPe44DcNeoELr7XHquQmFFLW9sTuR0CXyU7MSHt/YjxMOhjSMVF/Pj0Sxy957Axc6KF24bjZNtx38eTwd6707lhbVxbMhUEx4Rzt8nRrR5BY3BoFBUVUdOaS05ZTVkl9aQU1bzh8+rKSyrxkqrvWSyqKpODyjcN7EHV48IbtO4u7oM21Os2J/BxkJXFs4e1uyqvaySap77cB91hjpGR3jw0a39W1zxd7VB4T+/nmbF/gxWpWhw8wvmsalRqNW/P0Fq6w08/eo2QMeiKwcyNqpbi84hOqau/P5ACHEuuR4IIf5IrglCdG6NHbcuxaKTM4sWLeLnn39m+/btBAQENH3dx8cHMFbH+Pr6Nn09Ly+vqZrGx8eHuro6iouLz6meycvLY8SIEec9n42NDTY2Nn/5ularlQupEALoeteDw2nF/OP7EwDMHxHCk1f1bva+IyO9uOuLgyQVVDLrw/28f8sARkR4tlWoFq2ith57reachVxT0ukNvL0lCYB7x0bg7mQ51aB3j41Ao9Hw7C+neH9bMmq1mkemdDd5gmZ7fD7vbU0ko7ia3LIadPrzVz78TgX65rUo83Ky4eahwV3q2mEO/5jak19jczmTW8F3R3OYO+zSybCyGh13f3mU/Io6evg48d6tA7G7jMSlFnj++j4EuDvwyro4PtudSm5FHa/d0BdbrbHqcMPpbIqrdPg42zK+py+aNnq9C/Poau8PhBAXJtcDIcQfyTVBiM6pua9ri0zOKIrCokWLWLVqFVu3biU0NPSc74eGhuLj48PGjRvp378/AHV1dWzbto2XX34ZgIEDB6LVatm4cSOzZ88GIDs7mxMnTvDKK6+07w8khBAWKK2wiruWHqS23sCknl7866peLdo/2t+Fn+4fyd3LDnE0vYS5n+3n6Wt6N2vBtKswGBQ+3ZnMq+vjCHS349Ub+rbJoOpvD2aQVlSFp6M1t1lg9caCUaGogP/8cop3t5zFoMCjU02XoDmeUcJdXxif641UKujmaIOvqx2+zrb4uNji52qLj4sd3RysiD24h7FjxmKlvfRbLR9nWxxsLPItmUVxc7Dm4SlR/N9PJ3ltQxxX9/HF1d76gtvr9AbuX36YuNxyvJxs+Gz+4FZVlKlUKhaOi8DXxZZHvzvOr8ezyS+v5eO5g3Cx1/LNwXQAZg0MkMSMEEIIIYQQQnQBFrkScP/997NixQp++uknnJycmmbEuLi4YGdnh0qlYvHixbzwwgtERkYSGRnJCy+8gL29PXPmzGnadsGCBTz88MN4eHjg7u7OI488QkxMDJMmTTLnjyeEEB1eaZWO25fsp7Cyjmh/Z966qf9lLSZ6Oduy8u5hPPFDLKuOZPKvH08Qn1PO/13d65yh2l1RXlkND397jB0JBQCcza9k1vu7uXN0GA9Njmq62761anR63v7NOClj4bgI7K0t8q0Bd4wKRaWCZ1af4v2tZ1EBj17Ro9XHzSur4e4vDlFbb2BsVDcWTYjAx8UWb2fbCz5HdTodeSchrJuD3AXXwcwZEsSKfWmcySnn9Y3x/Ofa6PNupygK//rxBDsSCrDTavhs/mD8XO1MEsP1/QPwcrLl3mWH2J9cxMwPdvPSjBi2J+QDcMOggEscQQghhBBCCCFEZ2CRK1/vv/8+paWljBs3Dl9f36aPr7/+ummbRx99lMWLF7Nw4UIGDRpEZmYmGzZswMnp91kIb7zxBtdddx2zZ89m5MiR2Nvbs3r1ajQa0yx4CSFEZ1RXb+CeLw9yNr8SPxdbPr1tcKvu+rfVanh9dl8evaI7KhUs25vKbZ/tp6SqzoRRW5bNp3O54q0d7EgowFar5umrezFjgD8GBT7ansT0t3ZwKLXIJOf6an8a2aU1+LrYMmdokEmOaS63jwzlmWuMrfXe23qWd7cktup4NTo9dy87RE5ZDRFejrwzpz+DQtwJcLPv8slDS2WlUfN/Vxur/L7cm8rp7PP3Af5gWxIrD6SjVsHbN/cn2t/FpHGMjPDkm3uH4+NsS2JeBbM/3IOiwLAwd4Jl/pYQQgghhBBCdAkWubKgKMp5P+bPn9+0jUql4umnnyY7O5uamhq2bdtGdPS5d0fa2try9ttvU1hYSFVVFatXryYwMLCdfxohhLAciqLw+A/H2ZtUhKONFZ/OH4y3s22rj9vY7ufDWwdib61h99lCrnt3F4l5FSaI2nLU6PT8+6cTLFh6kKLKOnr6OvPLolHMHxnK67P78dn8QXg72xjn9Hywh2d/OUV1nf6yz1dVV9+UwFg0IdJk1TjmdNuIEJ6c3hOAV9fH8dnO5Ms6jqIo/POHWI6ml+Bip+WTeYNa1dJKdBwjwj2ZHuODQYFnVp9EUc6dH/Tr8WxeXncGgP+7qheTenm3SRw9fZ35YeEIorwdMTSEcONgeR8qhBBCCCGEEF2FRSZnhBBCmMf/Nifyw+FMNGoV794ygJ6+ziY9/pTePnx/3wj8Xe1IKazi+vd2sTUuz6Tn6Kjicsq59p1dLN2TChjnqPx4/wgivH6v+JzQw5sNfx/LDQMDUBT4dGcy097azv7ky6uiWbo7lYKKOoLc7TtVK6W7xoSxeFIkYJxD8/WBtBYf46PtSfxwxPhcf++WAYR4SjVDZ/LP6T2xsVKzN6mINbE5TV8/lFrM3785CsDtI0OYPzL0AkcwDT9XO769dwSTe3kzOMSNadG+bXo+IYQQQgghhBAdhyRnhBBCNMuqIxm8sSkegGevjWZsVLc2OU9PX2d+/ttIBoe4UV5Tzx1LDrBiX8sX1y2Foigs3Z3C1e/sJC63HE9HG5bcPph/XdULG6u/VrK42Gl59Ya+fH77YHycbUkprOLGj/bw9M8nqaqrb/Z5Cytq+WDbWQAWT4rsdG26HpwYyd1jwgB4/IdYfjqa2ex9t5zJ46WGyol/XdmTkRGebRKjMJ8AN3vuHRsOwAtrTlNdpye1sJK7vjhIXb2BST29eerKXu0Si4udlo/nDeLbe0d0iuo1IYQQQgghhBDNY5lTf4UQQrSrfUmFPPZdLAD3jA1r89kkHo42LL9zGE/9GMs3BzN46sdY/N3s2iwhZC6FFbU8+t1xNp8xVgeN796NV2/oi6ejzSX3Hd/diw0PjeGFX0+z8kA6S3an8NuZPF6Z1YdofxdySqvJLq0hu6SG7NIacsr++Hk1ZTXGRE54Nweu7effpj+nOahUKp6Y1oPK2nqW70vjoW+OYafVMKW3z0X3S8wr54GvjqAocPOQQG4bEdI+AYt2d+/YcL49mE5mSTWvrD/Dtvh8iirriPF34X8390OjVpk7RCGEEEIIIYQQnZgkZ4QQQlzU2fwK7l52iDq9gekxPjw2tUe7nNfaSs3LM/ugQsXXB9P524rD/Hj/SMK7ObbL+dva1rg8/vHdcfLLa7G2UvPPaT24bUQIKlXzF4SdbbW8NLMP02J8eeL746QVVXHTR3ubvb+bvZZ/X9270y5Cq1Qqnr02muo6PT8cyeRvK47w6fxBjI48f5KvpKqOO5cepLy2niEh7jxzTXSLHg9hWeysNTx5ZS/uX3GYz3elAODnYsuntw3C3lreIgshhBBCCCGEaFvyl6cQQnRgFbX1OFhrzLZAXFhRy+2fH6C0Wke/QFden90PdTsu5KtUKp69LpqkggoOpBRz19KDrFo4Ehd7yx3MvudsIW9uimdfw5yYSC9H/ndz/1bN7xkb1Y31fx/DC2vO8NV+Yws4J1srfF1s8XWxw9fFFh8XW/xc7PBxsW36vCsMuFerVbwyqw9VdXrWnczh7i8O8cWCIQwOcT9nu3q9gb+tOEJKYRX+rna8f+sArK06V6s38VfTY3wYGurOvuQiHG2s+Oz2wXg525o7LCGEEEIIIYQQXYAkZ4QQooPanVjAHUsP0MvXmQ9uHdhuC4a19Xp2xBewJjabjadzKa+pJ9Ddjk9uG2SWeQjWVmrev3Ug176zi6SCSv721WE+nz8YKwuakaIoijEpszmB/Q1JGWuNmluHBfPoFd1N8u/qZKvlxRkxPH5FDzQaFY428iu+kZVGzf9u7s/dyw6yNS6fOz4/wPK7htInwLVpm+d+Pc3OxALsrTV8ctsgPJrRWk5YPpXKmLx7ZX0ctw0PoYfP5SdJhRBCCCGEEEKIlpCVGyGE6IAqa+t59Pvj1OgMHE4r4dp3d/HxvEFE+7u0yflqdHp2JBgTMptO5VJe+/tg+UB3Oz6fP7hZc1DaiqejDR/NG8is9/ewI6GAF9ee4V9Xtc+w7tZQFIXdDZUyB1KKAWNS5sbBgdw3Lhw/VzuTn9OSq4rakrWVmg9uHcj8z/ezN6mIeZ/t5+u7h9Pdx4mv9qexZHcKAK/P7teqKiZheYI9HHh3zgBzhyGEEEIIIYQQoouR5IwQQnRAr22IJ6O4Gj8XW+xtrEjMq2DWB7t57YZ+XNnH1yTnqNHp2Rafz9rYbDadzqPiDwkZb2cbpkX7cmUfXwYGubVrK7ML6e3nwuuz+3Lf8sN8ujOZ7t5OzB4caO6wzktRFHYlGpMyB1MbkjJWam4eHMi948LxdTF9UkZcmq1Wwye3DebWT/ZxNL2EWz7Zx2NXdOf/fjoBwEOTo7gi2sfMUQohhBBCCCGEEKIrkOSMEEJ0MIfTivl8dzIAL8yIYUCwGw98dYStcfncv+IwCXmRPDAh8rITJgdSivhybyqbTuVSWadv+rqPsy3TYny4MsaXAR0kIfNn02J8WTwpkjc3JfDkj7GEdXNg0J9mh5iToijsSCjgrc0JHPpDUmbOkCDuHRuOj4vMsjA3Rxsrlt4+hJs+3svp7DL+8d1xAK6M8WXRhAgzRyeEEEIIIYQQQoiuQpIzQohWSy2s5EhaSbO3HxjsRqC7fdsFZMHq6g08/v1xFAVm9PdnXHcvAD69bTAvrjnNJzuTeXNTAgm5Ffz3hr7YWTd/Vsn+5CLe3BTP7rOFTV/zc7FlWowv02N86B/YMRMyf/bAhEjicspZeyKHe788xE9/G4V/G7QHa64anZ59yUXsiM9na3w+iXkVANhYqbl5SBD3jQvHWwaMdygu9lqWLRjC7A/3kJRfSW8/Z169oQ8qVcd//gshhBBCCCGEEKJzkOSMEKJV9icXcesn+6jTG5q9j7WVmr9PiuKu0aEWNdS9Pby3NZH43Ao8HKzPmamiUat46qpeRHk78eSPsfwam01qUSUfzxt0yRZZe5MKeWtTAnuSjEkZK7WKWQMDmD04kH4BrhaRkPkjtVrFa7P7klJYxensMu5aepDv7huOvXX7/EpTFIX43Ap2JOSzLT6f/clF1Nb//vy3sVJzy9Bg7h0bhpckZTosT0cbvrlnOL8ez+aqPr7t9vwRQgghhBBCCCGEAEnOCCFaISm/gruXHaRObyDK27FZ1QHFVXWcyCzj5XVnWHcim1dv6EuUt1M7RNvxxeeW8+6WRACevqY3bg7Wf9lm9uBAQjwduPfL/2/vvqOjqro+jv8mvUFCElIoSYAEUCD0YkSKgogiIh2kCkpRKRYUfR4FewULKsIrARFBpEkXVIpI76H3BEhCKIFUUu/7B2YkTyAEmEwI+X7WmuXk3nPP3WcY9gp3e87Zpj2nE9R+wt+a1Lu+6gaUydN2w9Hz+uKPQ9p47IIkyd7WpC4NKmpoiyqqUKZ4z1xycbDT5D719cSEv7UvJkEv/7JLX/esV2gzH+KT07XuyDmtPXRWfx0+p9iEy7nO+5V2UrOq3mpWtaweCC4rdxf7QokDluXt5qi+YUFFHQYAAAAAACiBKM4AuCUXktP19NQtupiSoToVPTTr2SZysr/xEluGYWje9tMau2ivdp26pHZfrtPwViEa1KxyiZ5Fk5VtaNSc3crIMtTqHh+1C/W/bttGlTz163P3a+C0rTp4JlHdJm3Ux51C1aFueRmGoQ3/zJTZdPzfokzXBhU1tGVwkS7/ZWkVyrhoYu/66jl5o5ZGxOrLP45oeKsQi97DMAy9s3i/wtcfl2H8e9zRzkaNK3upWYi3mlctq2AfN5bEAgAAAAAAQIFRnAFw0y5nZOnZH7bqxPkUVSjjrMl9GhSoMCNJJpNJnepXUNMQb70+L0J/HIjTJ78d1PI9sfqkS6iq+5Uu5OjvTFPXn9DOkxdVytFO73SoecMH/RU9XTR3aJhGzNqp3/ef0Yifd2pr5AUdOpOkzf8UZRxsbdStYUUNaVFF5e6ioszVGgZ56t0ONfXq3AiN//2Qqvq6qW2t6xe2bta4lYc05e/jkqRqvqX0QMiV2TGNKnkW+DsPAAAAAAAA/C+KMwBuSna2oVfm7NbWyHiVcrLT1P4NVbaU403341vaSf/Xt4EW7DytMQv3KeL0JT3+1Tq98GCIhrSoIvsSNIvm5IUUffrbQUnSa49Wv+EeMjncHO00qXd9fbLioL5dfVQ/boySdKUo073RlaJMQfsqzro1DNCB2ESF/31CL87epYqeLqpZ3v22+522/oS++vPKMnPvPVlTTzUOvO0+AQAAAAAAAEkqOU8/AVjEuJWHtGhXtOxsTPquV30F+9z6fjEmk0lP1q2glSObqfW9vsrIMjRu5SF1+Ppv7YtOsGDUdy7DMDR6XoRSM7LUuJKnejQMuKnrbWxMevWR6hrfrbYqe7uq732BWjOqhd5+omaJKMzkeOPRe/RAiLdSM7LUY9JGrT967rb6W7w7WmMW7ZUkjWxVlcIMAAAAAAAALIriDIACm73lpCb8s2H9Bx1rKSzY2yL9+pR20qTe9fVF9zrycLHX3ugEtZ+wTp//fkjpmdkWucedas62U1p35Jwc7Wz0YadQ2djc2r4lT9atoD9fbqGxJawok8PO1kYTetZToyBPJaZlqu+Uzfp15+lb6mv9kXN68eddMgypV5MADXso2MLRAgAAAAAAoKSjOAOgQNYdPqfX50dIkl54MFhdGlS0aP8mk0lP1CmvFSObqU0NX2VmG/r898MaOmO7jKt3Yr+LxCVe1rtL9kuSRrauqkrerkUcUfHm7myvHwY00mO1/JWRZWj4rJ2auOboTX1/9py+pGenb1N6Vrba1vTT2PY33v8HAAAAAAAAuFkUZwDc0KEziRry4zZlZht6ok45vdi6aqHdy6eUkyb2qq8ve9SVg52Nft9/RtPWnyi0+xWlMQv36lJqhmqWL62BTSsVdTh3BSd7W33Vo64G/PN5frjsgMYs3Kus7BsXaKLOp6hf+BYlpWWqcSVPje9WR7a3OJMJAAAAAAAAyA/FGQD5iku8rP7hW5SYlqmGQWX0cefQQp9JYDKZ1L52OY1uW12S9P6yA9ofc3ftQbN8T6yWRsTK1sakjzqFys6WdGwpNjYm/bfdvfrPY/fIZJKmbYjU0BnbdDkj67rXnEtKU58pm3QuKU33+JfW5L4N5GRva8WoAQAAAAAAUJLwNBDAdaWmZ+mZaVt1+mKqKnm7alLvBnK0s94D635hQXqwuo/SM7P1wswdSk2//sP14uRSaobe/HWPJGlQs8qqUc69iCO6Ow18oLIm9KgnBzsb/bb3jHpO3qj45PQ87ZLSMtU/fItOnE9RhTLOmta/oUo72RdBxAAAAAAAACgpKM4AuKasbEMjft6hXacuqYyLvab0a6gyrg5WjcFkMumTzqHyKeWoI3FJemfJPqvevzAkXs7Q2IV7FZeYpsrerhr2UEhRh3RXeyzUXz8OaCx3Z3ttj7qoTt+u18kLKebz6ZnZGjx9myJOX5Knq4N+eLqRfEo7FWHEAAAAAAAAKAnsijoAFG+XUjMUcym1QG1NMqmSt6sc7KgJFgcfLT+g3/aekYOtjSb1aVBkm9V7uTlqXNc66j1lk37aFKVmId56pKZ/kcRyK7KzDUWcvqS/Dp/V2kPntD0qXpn/7H/yYadQls6ygkaVPDV3yH3qO2WLjp1L1pPf/K3wfo1Uo1xpvfzLLq07ck4uDrYK79dQlcu6FXW4AAAAAAAAKAEozuCWGIahn7ec1NhF+5Sazz4O/yvA00UfdQrVfVW8CjE63K7le2I0ae0xSdInXULVMMizSONpGuKtQc2qaOKao3p1boRCK3ionIdzkcaUn9hLl7X28Fn9dfic1h0+q/iUjFzng7xc9EyzympUqWg/15Ik2KeU5g0NU//wLdoXk6BukzaoabC3Vuw7Izsbkyb2qq/aFT2KOkwAAAAAAACUEBRncNMupqRr9LwILdsTK0nycLGXnc2NZ8OkpGcq6kKKekzeqN5NAvVa2+pydeQreKeJOp+iV+bsliQ926yynqhTvogjuuKlh6tqw9Fz2nXqkkbM2qmZzzaRrY2pqMOSJF3OyNL6I+e04ISNvv5qvQ7FJeU67+Zop7AqXmpWtayahZRVgJdLEUVasvmWdtLPg5po6Izt+uvwOa3Yd0aS9GmX2mpWtWwRRwcAAAAAAICShCfjuCkbj53XyJ93KubSZdnZmPRKm2p65oHKsinAQ/LEyxn6YNkB/bQpStM3RmrVwTh93ClUYcHeVogcBZGWmaXnftquxMuZqh9YRq+0qVbUIZnZ29royx519egXf2nziQua8OcRDW9VNPu1GIahw3FJWnvorNYePqdNx84rLTNbV7bxSpLJJIWWd79SjKlaVnUqesjeluX87gSlnK7sn/Sf+Xs0f8dpjX60ujrUvTMKkAAAAAAAACg5KM6gQDKysvXF74f19eojMowryzJ92aOuQit4FLiPUk72ev/JWnq0pr9enbtbp+JT1fP/NumpxgEa/eg9cmMWTZF7b8l+RZy+pDIu9vqqR907rqAQ6OWqdzrU1Iuzd+mLPw7p/mAvNbDSkmvxyelad+Sc/vpnubKYS5dznfct5aggp1T1aFlHzav5qoyrg1Xiws2zt7XRR51D9XaHGnK0Y88fAAAAAAAAWB9Pw3FDUedTNGzWDu08eVGS1LVBBb31eI1bXpKsaYi3fhvZTB8tO6DpGyM1Y1OUVh88q486happCLNoJOnXnaf1zuL9quTtokdr+attTX/5uTsV6j0X747WDxsiJUnjutW5Y/d06Vivgv46fE7zd5zW8Fk7tXT4A3J3ti+Ue0VfTNXMzVFae/icdp+6KMP495yjnY0aVfJU839mxwSVcdSyZcv0aC0/2dsXTjywLAozAAAAAAAAKCoUZ5Cv+TtO6b8L9iopLVOlnOz0Qcdaahda7rb7dXO00zsdaqptLT+9One3Tl5IVa/vN6lHo4oa/eg9Ku1UMh9uZ2cbGrfykCasOiJJOpeUpi0n4jV20T41CCxzpVBTy0/+7pYtnBw/l6zX5kZIkoa0qKKW1Xws2r+lvf1EDW2LjFfUhRS9Pi9CE3rWlclk2f1nLqakq+M36xWb8O8Mmaq+bmoWcqUY06iSp5zs/324n5GRYdH7AwAAAAAAALh7UZzBNSVcztCbC/Zowc5oSVLDoDIa362OKpSx7EbmYVW8tXx4M33y20FNXX9CMzef1OqDZ/VBx1pqcYcXCCwtOS1TL87eqd/2Xtmk/JkHKsnP3VlLI2K0LTJeW/95vb14n+rnFGpq+t32DJfLGVl6bsZ2JaVlqlGQp15qXdUSwylUpZzs9WWPuur87XotiYhRs63e6tYwwGL9G4ahNxbsUWzCZQV4uuj5B4P1QIi3xYtiAAAAAAAAAEomijPIY3tUvIbP2qGTF1Jla2PS8IdCNLRFFdkV0v4jro52GtO+hh6peWUWTeT5FPUL36JBzSrrtbbVLT4j4k50Kj5Fz/ywTftjEuRga6MPOtZSp/oVJEkDmlZSzKVULYuI1dKIGG2NjNe2f17vLN6nugEeeqyWv7o2rHhLM47eXrxP+2IS5OXqoC971C20P2dLq1PRQy89XE0fLT+gMQv3qX6gp4J93CzS98Jd0VqyO0a2NiZ92aOu6lT0sEi/AAAAAAAAACBJxeMpLKzCMAyF/31cXSdu0MkLqapQxlmzBzXRsIdCrPLAvkllLy0b/oD63x8kSfpu7TGNnhehrGwj/wuLuW2RF9Th67+1PyZB3m4OmvlsE3NhJoe/u7OeblpJc4aEaePohzTm8XvVKMhTJpO0I+qi3l2yX80/XqXwv48rPTO7wPf+dedp/bQpSiaTNL5bnULf18bSBjWrrPuDvZSakaVhM3coLTPrtvuMvpiq/yzYI0ka9mAIhRkAAAAAAAAAFkdx5g4TfTFV41Ye0ndrjmrVgTidvJCibCsUJ5LTMvXCzB0au2ifMrMNPRbqr6XDH1D9QM9Cv/fVXBzs9NbjNfRx51DZmKRZW05q+KwdN1VwKE5+2XpSPSZt0rmkdN3rX1q/Pt9U9QPL5HuNn7uT+t1fSbMH36eNox/S2PY1FOzjpviUDI1dtE8Pj1+jZRExMoz8vzdHzybp9XlX9pl5vmWwmlUta7FxWYuNjUnjutZRGRd77YtJ0Oh5Ebf19yU729DLv+xS4uVM1a7ooedaVrFgtAAAAAAAAABwBcua3UHWHjqr4bN2KD4l98biLg62CvZxU7CPm6r6llLIP/8t7+EsG5vbX/LrSFyiBv+4XUfikmRnY9Ibj92jfmFBRbqcWNcGFeXmaKfhs3Zo8e4YJadl6tte9XNtwF6cZWUb+mj5AU1ae0yS9EgNP43rVlsuDjf3V9K3tJP6hgXpqcYB+nnrSY1feVgnzqdoyIztqh9YRq8/es81iz2p6Vf2mUlOz1KTyp4a0erO32fmenxLO2lctzoaOG2r5m0/LVcHO739RI1b+v6Grz+h9UfPy9neVuO71i42S7wBAAAAAAAAKF4oztwBsrMNfb3qiMb9fkiGId3rX1qVy7rq8JkkHTuXpJT0LO0+dUm7T13KdZ2TvY2q+ZVWp3rl1bl+hZt+sC9Ji3ZF69W5u5WSniXf0o765ql6Vp8tcz2P1vKXq6OdBk3fqlUHz6rPlM36vm8DlbqFfVXuJImXMzRs5g6tOnhWkjTswWCNaFX1tgptdrY2eqpxoJ6oU16T1hzVpL+OaVtkvDp9u16P1fLXqEeqKdDL1dx+zMK9OhCbKG83R33Zva5sLVDkK0otq/no0y6henH2Lk3fGCkXR1u99sjN7Vd06EyiPlp+QJL0xmP3qHJZy+xfAwAAAAAAAAD/i+JMEbuYkq6RP+80P6jv0ShAbz1+r3mGSGZWtk6cT9GRuEQdOpOkw3FJOnwmUcfOJutyRrZ2nbyoXScv6tPfDqpH4wD1CwuSv7vzDe+bnpmtD5btV/jfJyRJ91X20lc968rbzbHQxnormlctq+kDGuvp8C3afPyCnvq/TZrav5E8XR2KOrRbEnU+RQOmbdHhuCQ52tno0y619Xjtchbr383RTi8+XE09Gwdq3MqD+mXbKS2JiNGKfbHq1SRQwx4M0aqDcfp560mZTNIX3evIp3Tx2mfmep6sW0Gp6dl6fX6EvltzTG4OdnrhoZACXZuema0Rs3YqPTNbLauV1VONAwo5WgAAAAAAAAAlGcWZIhRx6pKGzNimU/GpcrSz0bsdaqpLg4q52tjZ2piXNHuk5r/HM7OyFXUhRWsPndXU9Sd04nyKvltzTN//dVyP1vLXgKaVVPs6G5nHXrqs537arm2R8ZKkIS2q6KXWVe/YJZwaBnlq5rNN1GfKZu0+dUndvtug6QMaF6vN67OzDc3ZdkrvL9uviykZ8i3tqMl9Gii0gkeh3M/P3Ukfd66tp5tW0gdLD2jNobMK//uE5mw7pcysK3uyDH8oRPcHexfK/YtKz8YBSknP1LtL9uuzlYfk7GCrgQ9UvuF1438/pH0xCSrjYq+POocW6ZJ+AAAAAAAAAO5+FGeKgGEY+nnLSb25cK/SM7MV6OWib56qpxrl3Avch52tjSqXdVPlsm7qfV+Q/jwQp+/XHdPGYxe0cFe0Fu6KVoPAMhrQtJIeruFnXrZq/ZFzGjZrh84lpauUk50+61JbD9fwK6yhWkzN8u6aPeg+9fq/TTocl6Qu363XjAFNFODlUtSh3dC+6AT999c95mJY7QrumtSngXytMGOlul9pTXu6kf46fFbvLdmvA7GJkqSmwd564cGCzSopbgY+UFkp6Vkat/KQ3l2yXy4OduqZz0yYLScuaOKao5KkDzrWkk+p4lP0AwAAAAAAAFA8UZyxsssZWfrvgj36ZdspSVKre3z1Wdfacne+9X1UbG1Man2vr1rf66s9py9pyrrjWrQ7Wlsj47U1Ml4Vyjir//2VdDkjS5+tOKhsQ6ruV0oTe9VXkLfrjW9whwj2cdMvg+9Tr+83KfJ8ijpPXK8fBzZWVd9SRR3aNSVeztD4lYc1bcMJZWUbcnGw1chWVdXv/iDZW3mW0gMhZbVkmLfm7zitiFMXNeyhkGK/z0x+XngwWMnpmfpuzTG9sSBCLg626lC3fJ52iZczNPLnnTIMqXP9Cnqkpn8RRAsAAAAAAACgpKE4Y0WR55M1+Mft2h+TIBuT9HKbahrcrMptbQT/v2qWd9e4bnX0atvqmr4hUjM2RepUfKreWbzP3KZTvQp6t0NNOTvYWuy+1lLR00W/DLpPvb/frINnEtX1uw2a1r/RdZdwKwqGYWjR7hi9u3if4hLTJEmP1vLTf9vdW6D9gAqLrY1JnetXUOf6FYosBmsxmUx67ZHqSknL0vSNkXrpl11ydrBVm/+ZJfbO4n06FZ+q8h7Oeuvxe4soWgAAAAAAAAAlzZ25ychdxjAM/bY3Vu2+Wqf9MQnycnXQjwMaa2iLYIsWZq7mW9pJL7eppvWvPaT3n6ylKmVd5WRvow861tKnXUKLZWEmh09pJ/08qIlqV/TQxZQMPfV/m7Tu8DmL3iMtM0vHzyUrIyv7pq47ejZJvb7fpGEzdyguMU1BXi6a9nQjffNU/SItzJREJpNJY9vXUKd6FZSVbeiFn3ZozaGz5vO/7Y3V7K2nZDJJ47rWVimnW5+9BgAAAAAAAAA3g5kzhcQwDO2NTtDSiBgtjYjRifMpkqR6AR765qn6VtvM3tnBVj0bB6hHo4pKz8qWo13xLcpczcPFQTMGNtYz07Zqw7Hz6hu+WW89fq96Nwm87c3cNx07r+dn7tDZxDTZ25pUydtVIT6lFOLrZv5vkJerHOz+rW2mpmdpwqrDmrT2mDKyDDnY2ei5FsEa1LyynOzvjs+8OLKxMemjTrWUmpGppRGxGjR9q6b1b6TKZd00el6EJOnZZpXVuLJXEUcKAAAAAAAAoCShOGNBOQWZJf8UZCL/KchIkqOdjXo3CdSoR6rneqhvLSaT6a4pzORwc7TT1KcbavS8CM3bflpv/rpXB2MTNaZ9jVva08UwDP3fX8f14fIDyso2ZGOSMrIMHTqTpENnkqSIf9va2ZgU5O2qqr5uCvRy1cKd0Tp9MVWS1LJaWY1pX0OBXsVnP5+7mZ2tjT7vVlep6Vu16uBZDZi2VdX8SulCcrru8S+tF1tXLeoQAQAAAAAAAJQwFGcsYF90ov7efFZLI2IUdSF3QaZlNR89GuqvB6v7yM2Rj9vSHO1s9VmX2qrmW0ofLj+gGZuidPRskr59qr7KuDoUuJ/EyxkaNWe3lu2JlSR1qFNO7z1ZSxdTM3ToTKKOnEnS4bhEHTqTpCNxSUpKy9SRuCvvc5Rzd9Kbj9dQmxq+tz17B5blYGejb3vVV//wLdpw7Ly2RcbLwdZGn3erc9cVLQEAAAAAAADc+agWWMBTU7bIxtFFkuRk/09BptaVgowrBZlCZzKZNKh5FQX7uGnYzB3aeOyCnvj6b33ft4FCfEvd8PqDsYka8uM2HTuXLHtbk958vIZ6NQ6QyWSSq6Odyns4q2U1H3N7wzAUc+myDscl6fCZRB2JS1KFMs56umkluTjw532ncrK31f/1baDe32/S9qiLeq1tdVXzu/H3AwAAAAAAAAAsjSfJFuBkb6PWtfzVtpafHqzuwwP6IvLQPb6aN/R+Dfxhi6IupOjJb9brqx511bK6z3WvWbDjtEbPi1BqRpb83Z30zVP1VDegTL73MZlMKufhrHIezmpetaylh4FC5Opop9mD7lPUhRRVLutW1OEAAAAAAAAAKKGoIljAHyObqqK/b1GHAUnV/Erp1+eaavCP27T5+AU9PW2LXm97jwY+UCnXUmNpmVl6b8l+/bAhUpL0QIi3Pu9WR15ujkUVOqzEztaGwgwAAAAAAACAImX9nenvQsyUubN4ujroxwGN1b1hRRmG9N7S/Xplzm6lZWZJkqIvpqrbdxvNhZlhDwZrav9GFGYAAAAAAAAAAFZBVQF3JQc7G33QsZaq+ZXSO4v3ac62Uzp+Lln9woL05q97FJ+SIXdne43vVlsPVmfWEwAAAAAAAADAeorlzJm1a9fq8ccfV7ly5WQymbRgwYJc5w3D0JgxY1SuXDk5OzurRYsW2rt3b642aWlpeuGFF+Tt7S1XV1e1b99ep06dsuIoUNhMJpP6319JU/s3UiknO22LjNcLM3coPiVDNcqV1uIXmlKYAQAAAAAAAABYXbEsziQnJ6t27dqaMGHCNc9//PHHGjdunCZMmKAtW7bIz89PrVu3VmJiornNiBEjNH/+fM2aNUvr1q1TUlKS2rVrp6ysLGsNA1bSrGpZzR96v4K8XCRJ3RtW1NwhYaro6VLEkQEAAAAAAAAASqJiuaxZ27Zt1bZt22ueMwxDn3/+ud544w117NhRkjRt2jT5+vrqp59+0qBBg3Tp0iV9//33mj59ulq1aiVJ+vHHH1WxYkX9/vvvatOmjdXGAusI9nHT8hHNdPJCikJ8SxV1OAAAAAAAAACAEqxYFmfyc/z4ccXGxurhhx82H3N0dFTz5s21fv16DRo0SNu2bVNGRkauNuXKlVPNmjW1fv366xZn0tLSlJaWZv45ISFBkpSRkaGMjIxCGhEsxVZSkKcTf1YoFDnfK75fAMgHAHKQDwDkIB8AuBo5Abi7FfTv9l1XnImNjZUk+frm3kvE19dXkZGR5jYODg4qU6ZMnjY511/LBx98oLFjx+Y5vmrVKrm4sEQWAGnlypVFHQKAOwT5AEAO8gGAHOQDAFcjJwB3p5SUlAK1u+uKMzlMJlOunw3DyHPsf92ozejRo/Xiiy+af05ISFDFihXVsmVLeXl53V7AAIq1jIwMrVy5Uq1bt5a9vX1RhwOgCJEPAOQgHwDIQT4AcDVyAnB3y1lx60buuuKMn5+fpCuzY/z9/c3H4+LizLNp/Pz8lJ6ervj4+FyzZ+Li4hQWFnbdvh0dHeXo6JjnuL29PYkUgCTyAYB/kQ8A5CAfAMhBPgBwNXICcHcq6N9rm0KOw+oqVaokPz+/XNMC09PTtWbNGnPhpX79+rK3t8/VJiYmRnv27Mm3OAMAAAAAAAAAAHC7iuXMmaSkJB05csT88/Hjx7Vz5055enoqICBAI0aM0Pvvv6+QkBCFhITo/fffl4uLi3r27ClJcnd314ABA/TSSy/Jy8tLnp6eevnll1WrVi21atWqqIYFAAAAAAAAAABKgGJZnNm6datatmxp/jlnH5i+fftq6tSpGjVqlFJTUzV06FDFx8ercePGWrFihUqVKmW+Zvz48bKzs1PXrl2Vmpqqhx56SFOnTpWtra3VxwMAAAAAAAAAAEqOYlmcadGihQzDuO55k8mkMWPGaMyYMddt4+TkpK+++kpfffVVIUQIAAAAAAAAAABwbXfdnjMAAAAAAAAAAAB3MoozAAAAAAAAAAAAVkRxBgAAAAAAAAAAwIoozgAAAAAAAAAAAFgRxRkAAAAAAAAAAAArojgDAAAAAAAAAABgRRRnAAAAAAAAAAAArIjiDAAAAAAAAAAAgBVRnAEAAAAAAAAAALAiijMAAAAAAAAAAABWRHEGAAAAAAAAAADAiijOAAAAAAAAAAAAWBHFGQAAAAAAAAAAACuiOAMAAAAAAAAAAGBFdkUdQHFmGIYkKTExUfb29kUcDYCilJGRoZSUFCUkJJAPgBKOfAAgB/kAQA7yAYCrkROAu1tCQoKkf+sH10Nx5jacP39eklSpUqUijgQAAAAAAAAAANwpEhMT5e7uft3zFGdug6enpyQpKioq3w8Zd7+GDRtqy5YtRR0GilBCQoIqVqyokydPqnTp0kUdDooQ+QDkA+QgH4B8gBzkA5APcDVyAsgJyEE+uDsZhqHExESVK1cu33YUZ26Djc2VLXvc3d1JpCWcra0t3wFIkkqXLs13oYQjHyAH+QDkA+QgH4B8gBzkA0jkBPyLnADywd2rIJM5bKwQB3DXe+6554o6BAB3CPIBgBzkAwA5yAcArkZOAJCDfFCymYwb7UqD60pISJC7u7suXbpEhRMo4cgHAHKQDwDkIB8AyEE+AHA1cgIAiZkzt8XR0VFvvfWWHB0dizoUAEWMfAAgB/kAQA7yAYAc5AMAVyMnAJCYOQMAAAAAAAAAAGBVzJwBAAAAAAAAAACwIoozAAAAAAAAAAAAVkRxBgAAAAAAAAAAwIoozgAAAAAAAAAAAFhRiS/OrF27Vo8//rjKlSsnk8mkBQsW5Dp/5swZ9evXT+XKlZOLi4seeeQRHT58+Jp9GYahtm3bXrOf7du3q3Xr1vLw8JCXl5eeffZZJSUlFdKoANwKS+SDFi1ayGQy5Xp17949V5v33ntPYWFhcnFxkYeHRyGPCsCtsFY+aN++vQICAuTk5CR/f3/17t1b0dHRhT08ADfBWvkgKCgoT5vXXnutsIcH4CZYIx+sXr06z/mc15YtW6wxTAAFYK3fD3ieCNzdSnxxJjk5WbVr19aECRPynDMMQx06dNCxY8f066+/aseOHQoMDFSrVq2UnJycp/3nn38uk8mU53h0dLRatWql4OBgbdq0ScuXL9fevXvVr1+/whgSgFtkqXzwzDPPKCYmxvz67rvvcp1PT09Xly5dNGTIkEIdD4BbZ6180LJlS82ePVsHDx7U3LlzdfToUXXu3LlQxwbg5lgrH0jS22+/navNf/7zn0IbF4CbZ418EBYWlutcTEyMBg4cqKCgIDVo0KDQxwigYKyRD3ieCJQABswkGfPnzzf/fPDgQUOSsWfPHvOxzMxMw9PT05g8eXKua3fu3GlUqFDBiImJydPPd999Z/j4+BhZWVnmYzt27DAkGYcPHy608QC4dbeaD5o3b24MHz68QPcIDw833N3dLRQxgMJijXyQ49dffzVMJpORnp5+u2EDKASFmQ8CAwON8ePHWzhiAIXFWr8fpKenGz4+Psbbb79tibABFILCygc8TwTufiV+5kx+0tLSJElOTk7mY7a2tnJwcNC6devMx1JSUtSjRw9NmDBBfn5+1+zHwcFBNjb/ftzOzs6SlKsfAHeuguYDSZoxY4a8vb1Vo0YNvfzyy0pMTLRqrAAKV2HlgwsXLmjGjBkKCwuTvb194QQPwKIsnQ8++ugjeXl5qU6dOnrvvfeUnp5euAMAYDGF9fvBwoULde7cOf5PeaAYsVQ+4HkicPejOJOP6tWrKzAwUKNHj1Z8fLzS09P14YcfKjY2VjExMeZ2I0eOVFhYmJ544olr9vPggw8qNjZWn3zyidLT0xUfH6/XX39dknL1A+DOVdB88NRTT2nmzJlavXq1/vvf/2ru3Lnq2LFjEUYOwNIsnQ9effVVubq6ysvLS1FRUfr111+tORwAt8GS+WD48OGaNWuWVq1apeeff16ff/65hg4dau0hAbhFhfXvhe+//15t2rRRxYoVrTEMABZgqXzA80Tg7mdX1AHcyezt7TV37lwNGDBAnp6esrW1VatWrdS2bVtzm4ULF+rPP//Ujh07rttPjRo1NG3aNL344osaPXq0bG1tNWzYMPn6+srW1tYaQwFwmwqSD6Qr68XmqFmzpkJCQtSgQQNt375d9erVs3bYAAqBpfPBK6+8ogEDBigyMlJjx45Vnz59tHjx4mvuYwfgzmLJfDBy5Ehzm9DQUJUpU0adO3c2z6YBcGcrjH8vnDp1Sr/99ptmz55tlTEAsAxL5QOeJwJ3P2bO3ED9+vW1c+dOXbx4UTExMVq+fLnOnz+vSpUqSZL+/PNPHT16VB4eHrKzs5Od3ZV6V6dOndSiRQtzPz179lRsbKxOnz6t8+fPa8yYMTp79qy5HwB3vhvlg2upV6+e7O3tdfjwYStGCqCwWTIfeHt7q2rVqmrdurVmzZqlpUuXauPGjYU9BAAWUli/HzRp0kSSdOTIEYvHDKBwWDofhIeHy8vLS+3bty/MsAEUAkvlA54nAnc3ijMF5O7urrJly+rw4cPaunWreQmz1157Tbt379bOnTvNL0kaP368wsPD8/Tj6+srNzc3/fzzz3JyclLr1q2tOQwAFnC9fHAte/fuVUZGhvz9/a0YIQBrsXQ+MAxD0r/rVAMoPiydD3Jm5vM7BFD8WCIfGIah8PBw9enTh73ogGLMUr8f8DwRuDuV+GXNkpKScv3faMePH9fOnTvl6empgIAA/fLLLypbtqwCAgIUERGh4cOHq0OHDnr44YclSX5+fvLz88vTb0BAQK4q9oQJExQWFiY3NzetXLlSr7zyij788EN5eHgU+hgBFMzt5oOjR49qxowZevTRR+Xt7a19+/bppZdeUt26dXX//feb+42KitKFCxcUFRWlrKwsc1E3ODhYbm5uVh0zgGuzRj7YvHmzNm/erKZNm6pMmTI6duyY3nzzTVWpUkX33XdfkYwbQF7WyAcbNmzQxo0b1bJlS7m7u2vLli0aOXKk2rdvr4CAgCIZN4C8rPXvBenKKh3Hjx/XgAEDrDpGAAVjrXzA80TgLmeUcKtWrTIk5Xn17dvXMAzD+OKLL4wKFSoY9vb2RkBAgPGf//zHSEtLy7dPScb8+fNzHevdu7fh6elpODg4GKGhocYPP/xQSCMCcKtuNx9ERUUZzZo1M/9dr1KlijFs2DDj/Pnzue7Tt2/fa95n1apVVhwtgPxYIx/s3r3baNmypeHp6Wk4OjoaQUFBxuDBg41Tp05Ze7gA8mGNfLBt2zajcePGhru7u+Hk5GRUq1bNeOutt4zk5GRrDxdAPqz17wXDMIwePXoYYWFh1hoagJtkrXzA80Tg7mYyjH/WzwAAAAAAAAAAAEChY88ZAAAAAAAAAAAAK6I4AwAAAAAAAAAAYEUUZwAAAAAAAAAAAKyI4gwAAAAAAAAAAIAVUZwBAAAAAAAAAACwIoozAAAAAAAAAAAAVkRxBgAAAAAAAAAAwIoozgAAAAAAAAAAAFgRxRkAAAAAmjp1qkwmk0wmk06cOFHU4aCY69evn/n7dPXrdr9bY8aMuWa/q1evtkjcAAAAgLVQnAEAAACKsRMnTlzzYfXNvgAAAAAA1kNxBgAAAACuEhQUJJPJpH79+hV1KMVeuXLlFBERYX6VL18+T5urZ8PcyNChQ819TZkypTBCBgAAAKzCrqgDAAAAAHDrypcvr4iIiOueb9OmjaKjo1WuXDn99ttv121Xs2ZNihGwOHt7e9WsWdNi/fn4+MjHx0eSdO7cOYv1CwAAAFgbxRkAAACgGLvRw297e/sCtQMAAAAAWA/LmgEAAAAAAAAAAFgRxRkAAAAAmjp1qnnfjxMnTuQ536JFC5lMJrVo0UKSdOTIEQ0ePFiVK1eWs7OzgoKCNGDAAEVGRua6bs+ePerfv78qV64sJycnVaxYUUOGDFFcXFyB4lq5cqV69eqlSpUqydnZWaVLl1bt2rU1atQoxcTE5HttdHS0XnvtNdWrV0/u7u5ycHCQn5+fatWqpR49emjq1KlKSEjIM8acMUybNs38meS8csafIz4+XuHh4erVq5fuvfdeubm5me/Tpk0bTZo0Senp6deN8cSJE+a+p06dKkmaN2+eHn74Yfn4+MjV1VW1a9fWV199pYyMDPN1hmHop59+UosWLeTj4yMXFxfVq1dPEydOlGEY171fzr3GjBkjSfr999/Vvn17+fv7y8nJSZUrV9bzzz+vU6dO5fvZWkLOd27s2LF54rv6da3vIwAAAFDcsawZAAAAgJvy+++/q2PHjkpMTDQfi4yM1JQpU7R48WKtWbNG1atX18yZM9W/f3+lpaWZ2506dUoTJ07UsmXLtH79epUrV+6a90hOTlbv3r01f/78XMcvX76s3bt3a/fu3fr22281c+ZMtWvXLs/1f/31l9q1a5er+CJJZ86c0ZkzZ7Rnzx7NmjVL3t7e17y+oOrWrZunIJVznxUrVmjFihWaOHGili5dKj8/vxv2N3ToUH377be5ju3evVvDhg3T6tWrNXv2bGVmZqpXr16aM2dOrnY7duzQkCFDtH37dk2aNOmG9xo7dqy5SJPj+PHj+vrrrzV9+nQtWrRIzZo1u2E/AAAAAG4exRkAAAAABRYdHa2uXbvKw8ND77//vho1aqT09HTNnTtXX3zxheLi4jRw4ECNHz9effr0UUhIiF566SWFhoYqOTlZU6ZM0fTp0xUZGakXX3xRs2bNynOPrKwsPf7441q1apVMJpO6d++ujh07qlKlSsrIyNDmzZv12WefKSoqSp06ddL69etVv3598/VpaWnq3r27EhISVKpUKQ0ZMkQtW7aUj4+PMjIyFBkZqQ0bNmju3Lm57hseHq7k5GS1adNG0dHReuKJJ/Tuu+/mauPq6pon1saNG6tdu3aqW7eufH19lZ6eruPHj+vHH3/U8uXLtWPHDnXv3l2rV6/O97OdOHGiNm3apEcffVQDBw5UYGCgTp48qQ8++ECbNm3SvHnzFB4ert27d2vOnDnq2bOnevbsKX9/fx0+fFhjxozRgQMHNHnyZHXs2FGPPPLIde+1ZMkSbd26VdWqVdOoUaMUGhqqS5cu6ZdfftHkyZOVkJCgdu3aKSIiQoGBgfnGfas6dOigBg0a6JtvvjEXpCIiIvK0K1++fKHcHwAAAChSBgAAAIC7VmBgoCHJCAwMzLddeHi4IcmQZBw/fjzP+ebNm5vPh4SEGHFxcXnavPLKK+Y2ZcuWNe6//34jOTk5T7suXboYkgw7O7tr9vPpp58akgx7e3tj6dKl14z3woULRo0aNQxJRtOmTXOd++OPP8xxLFq06LpjzsjIMC5dupTneM5n1rdv3+tem+PQoUP5np8yZYo5lt9//z3P+ePHj5vPSzJGjBiRp01ycrIRFBRkSDK8vb0Nk8lkfP7553naxcTEGKVKlTIkGe3bt79mPFffq169ekZiYmKeNj/88IO5TefOnfMd3/X07du3QN87wzCMt956y3y/m7Fq1SrzdatWrbqlOAEAAICiwp4zAAAAAG7Kl19+qbJly+Y5PnToUPP7c+fOafLkyXJxccnTbsiQIZKkzMxMbdiwIde5jIwMffbZZ5Kk559/Xm3btr1mDGXKlNEnn3wiSVq3bp2OHDliPhcbG2t+n9+yXHZ2dipduvR1zxdESEhIvuf79++vunXrSpIWLFiQb9uKFSvq448/znPcxcVFffv2lXTlc23cuLGGDx+ep52fn5+efPJJSVeWdbuRSZMmyc3NLc/x3r17mz/3BQsW3HBvHwAAAAA3j+IMAAAAgALz8PBQmzZtrnkuKCjIXOwIDQ3VPffcc812tWvXNr8/duxYrnObN282FwO6du2abyxXF16uLvL4+/ub34eHh+fbhyUZhqHY2FgdOnRIe/bsMb9y9tXZtWtXvtd37NhR9vb21zwXGhpqft+tW7fr9pHz2cbHx+vixYvXbVerVq1cS8H9r6efflrSlQLajZZjAwAAAHDz2HMGAAAAQIGFhITIZDJd97y7u7sSEhJUtWrV67bx8PAwv09MTMx1buvWreb39913X4Hjunq2TNOmTVW5cmUdO3ZMI0aM0IwZM/Tkk0+qefPmatCggRwcHArcb0EsWbJE3377rdauXZtnPFc7d+5cvv0U9DO7mc/26p+v1rBhw3xjadSokfn9nj178m0LAAAA4OZRnAEAAABQYNdapuxqNjY2N2yX00aSsrKycp2Li4u7pbhSUlLM7+3t7bVo0SJ17txZ+/fv15YtW7RlyxZJkrOzs5o3b67evXurW7dusrW1vaX7SVdmyjzzzDP6/vvvC9Q+NTU13/MF/cxu9bO9mo+PT76x+Pr6mt9fuHAh37YAAAAAbh7FGQAAAAB3jKsLCqtXr5aXl1eBrvvfYsO9996riIgILVq0SIsWLdKaNWt09OhRpaamavny5Vq+fLnGjRunpUuX3rBQcT1TpkwxF2bq1KmjESNGqHHjxipfvrxcXFzMhZ8+ffpo+vTpMgzjlu5TGPKb/QQAAACg8FGcAQAAAHDHuLoY4+DgoJo1a95yX7a2turQoYM6dOggSYqJidGyZcv0zTffaNu2bdq2bZsGDRqk+fPn31L/kydPliRVqVJF69evl7Oz8zXbxcfH31L/henMmTMFPu/p6VnY4QAAAAAljs2NmwAAAACAddStW9f8fsWKFRbt29/fX08//bQ2bNigevXqSZIWL16cZ7mxgs4q2bt3ryTpiSeeuG5hxjAMbd++/TaiLhw5y7wV5PztFMgKglk8AAAAKIkozgAAAAC4YzRt2tQ8U2PixIlKSEiw+D3s7e3VvHlzSVJmZqYuXryY67yTk5MkKS0tLd9+MjMzJeXe7+Z/LVy4UNHR0bcRbeGIiIjQjh07rnt+ypQpkq7MPmrRokWhxpLzeUs3/swBAACAuwXFGQAAAAB3DCcnJ7388suSpNjYWHXv3l3JycnXbZ+YmKgJEybkOvbXX3/pyJEj170mPT1da9askSS5ubmpbNmyuc77+/tLko4ePZpvrCEhIZKkRYsWXXPpsqNHj2ro0KH59lGUnn322Wt+tj/99JOWLl0qSerQoYP58ygsV/d/o88cAAAAuFuw5wwAAACAO8qoUaP0xx9/6I8//tCyZct07733avDgwbrvvvvk4eGhxMREHTx4UKtXr9aCBQvk5OSk559/3nz9H3/8oXfeeUcPPPCAHnvsMYWGhqps2bJKTU3VoUOHNHHiRPNSYwMHDpSdXe5/FoWFhWnVqlXasmWLPvzwQ7Vt21aurq6SJGdnZ5UvX16S1KdPH73yyis6ffq0wsLCNGrUKNWoUUOXL1/Wn3/+qc8//1xpaWmqV6/eHbe0WYMGDbR161Y1aNBAr776qmrVqqVLly5pzpw5+u677yRJpUqV0qefflrosYSFhZnfjxw5Um+88Yb8/f3Ny50FBQXl+TMCAAAAijt+wwUAAABwR7G1tdWiRYs0ePBg/fDDD4qKitLrr79+3fY+Pj55jmVnZ2vNmjXmGTLX0rFjR33wwQd5jg8ZMkTffvutLly4oNGjR2v06NHmc82bN9fq1aslScOHD9fKlSu1YsUKHThwQE8//XSufpydnfXDDz9oyZIld1xx5rHHHtNjjz2msWPHqn///nnOly5dWgsXLlRQUFChxxIcHKyuXbtq9uzZWrFiRZ69ho4fP26VOAAAAABrYlkzAAAAAHccZ2dnTZs2TVu3btWQIUNUo0YNubu7y87OTh4eHqpTp44GDBigOXPmaP/+/bmuHTVqlJYuXaqRI0eqSZMmCggIkJOTk5ycnBQUFKRu3bppyZIlmjt3bq79TnKUL19emzdv1oABAxQcHHzNNtKVvWuWLFmiL7/8Ug0aNJCLi4ucnZ0VHByswYMHa/v27erSpUuhfD6WMGbMGC1fvlyPPfaYfH195eDgoKCgIA0dOlR79+4178tjDT/++KM+/vhjNWrUSO7u7rKx4Z+qAAAAuLuZDMMwijoIAAAAAEDhy1kq7K233tKYMWMK7T79+vXTtGnTFBgYqBMnThTKPVavXq2WLVtKklatWqUWLVoUyn0AAACAwsCyZgAAAACAQpGRkaE9e/aYf65WrZrs7e1vub+4uDjFxcVJurLcGQAAAFBcUZwBAAAAABSK6Oho1apVy/zz7e4f880332js2LEWiAwAAAAoWizkCwAAAAAAAAAAYEXsOQMAAAAAJYS19pwBAAAAkD9mzgAAAAAAAAAAAFgRe84AAAAAQAnBwgkAAADAnYGZMwAAAAAAAAAAAFZEcQYAAAAAAAAAAMCKKM4AAAAAAAAAAABYEcUZAAAAAAAAAAAAK6I4AwAAAAAAAAAAYEUUZwAAAAAAAAAAAKyI4gwAAAAAAAAAAIAVUZwBAAAAAAAAAACwov8HkbMfDkLdqD0AAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "fig, ax = plt.subplots(1, 1, figsize = (20, 7))\n", "plot_df = AirPassengersPanel.set_index('ds')\n", @@ -365,7 +942,18 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABmcAAAKHCAYAAAB0L5wRAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3gU5doG8Ht2s9n03js19I4UpXeOKKIgIgpiwWPHXo4axA87niMqNpqKooCAXUADSicQWiiBkF4I6X2z2Z3vj2Unu2Q3bVsC9++6uM6bmXdm3t1sxu+bZ5/nEURRFEFERERERERERERERER2IXP0AoiIiIiIiIiIiIiIiK4lDM4QERERERERERERERHZEYMzREREREREREREREREdsTgDBERERERERERERERkR0xOENERERERERERERERGRHDM4QERERERERERERERHZEYMzREREREREREREREREdsTgDBERERERERERERERkR0xOENERERERERERERERGRHDM4QEREREZFNjR49GoIgQBAERy+FiIiIiIioTWBwhoiIiOgapX9Ybu6fh4cHOnbsiOnTp2PNmjVQqVSOXrJdnT17Fm+99RYmTZqETp06wdvbG87OzggKCsKAAQOwcOFCbN68GbW1tY5eqs3V1tYiMDBQ+mx069bN0UtCXFyc2c+uk5MT/P39MXDgQDz88MM4cOCAo5dL1Czz5883+ZmWyWTw8vJCREQEevfujdmzZ+Ott97CoUOHHLLOnTt3Ii4uDnFxcUhLS3PIGoiIiIjaO0EURdHRiyAiIiIi+2tpFkOnTp2wceNG9OvXzzYLaiPS0tLw/PPPY8OGDdBqtU3O9/f3x/PPP49HH30USqXSDiu0vw0bNmDWrFlG23bv3o3rr7++WcePHj0au3btAgBY6//9iIuLw+LFi5s9f86cOfjiiy/g6upqlesT2cL8+fOxdu3aFh3Tq1cvPPnkk7jnnntstKqGDP/+4uPjMXr0aLtdm4iIiOhq4eToBRARERGR423evNnoZ1EUUVJSgmPHjuGbb77BpUuXkJKSgnHjxuHUqVMIDg520EptKz4+HjNnzkRhYSEAQC6XY9SoURg1ahQiIiLg6emJgoICpKSk4I8//sDJkydRWFiIZ555Bp07d8b06dMd+wJsZOXKlSa3NTc4s3PnTiuvyNjtt9+O2bNnSz/X1dUhOzsbv/zyC7Zv3w4A+Oabb1BVVdXgs07UVj366KMYO3as9LNKpUJJSQmys7Nx8OBB/PPPP6iqqsLJkyexYMECrF+/HuvWrUNAQIADV01EREREzcXgDBERERE1GlR45ZVXMHr0aJw4cQJFRUVYtmwZ3nrrLfstzk6OHDmCqVOnoqamBgAwefJk/O9//0PXrl1Nzn/33Xdx6NAhvPLKK/j999/tuVS7yszMlAIco0ePRnp6OlJTU/H999/jf//7Hzw9PR28QqBbt24mP8OPP/441q5di3vuuQeiKGLLli34448/MGnSJPsvkqiFBgwY0Oi9uaSkBJ988gkWL16MmpoabNu2DdOmTcNff/3FDDEiIiKidoA9Z4iIiIioUX5+fliyZIn0s62zIByhoqICt9xyixSYueeee/DLL7+YDczoDR48GL/99hs++eSTq/Zh6OrVq6XybvPnz8fdd98NAKisrMR3333nyKU1y7x584xKsn3//fcOXA2R9fj4+OD555/Hvn374O3tDQDYv38/nn/+eQevjIiIiIiag8EZIiIiImpS9+7dpXFZWVmT848ePYpHHnkEPXv2hI+PD1xcXBAVFYUZM2Zg3bp1Znu53HzzzVID7Oeee67Ra2zYsEGa27VrV1RUVLTsRRn45JNPkJGRAUDXv2HFihWQyZr/fyovXLiw0WyMCxcu4JlnnkH//v3h5+cHpVKJsLAwTJkyBZ988glqa2tNHvfoo49Kr/H2229vdA379u2DQqGAIAgIDAxETk5Os9dvjiiKWL16NQDA3d0dt956K+bNmyf1KzJV7syU0aNHS6/DlDVr1kj716xZA0CXyfTggw+ia9eu8PT0NNrXUjfddJM0Pn78uNG+5ORkLFu2DLfccgu6dOkCDw8PODs7IygoCCNHjsTrr7+OgoKCZl3nn3/+wYIFC9C9e3d4enrC2dkZISEh6N27N2655RZ89NFHSE1NNXv8L7/8gjvuuAOdO3eGu7s7lEolwsPD0a9fP8yePRurVq1Cbm5uo2uoqanBp59+ihtvvBGRkZFwcXGBt7c3evXqhcceewzJycmNHh8XFyf9LvSB2MOHD+Oee+5Bx44d4eLiAn9/f4wZM8YocNeU3bt344477kBERARcXFwQHh6Of/3rX9iyZQsAXa8n/XXnz5/f5Pn+/vtvPPDAA+jevbt0j4mMjMStt96KTZs2NdrbyNS1cnJy8Morr6B///7w9/c3uY6ysjK89957GDNmDIKDg+Hs7AwvLy906tQJw4cPx5NPPonff//d7N+zrfTr10/6OwWATz/9FNnZ2SbnlpeXY/369Vi4cCEGDx4MPz8/KBQK+Pj4oEePHrj//vtx8OBBs9fSfz4M+z2NGTNGej/1/2JiYhocm5WVhY8//hizZ89Gjx494OnpCYVCgYCAAAwZMgQvvPACMjMzW/9GEBEREbU3IhERERFdkwBI/5qye/duae7EiRPNzqurqxMfe+wxURAEo/Nf+a9fv35ienp6g+MLCgrE8PBwEYAoCIL4xx9/mLxOamqq6O3tLQIQnZ2dxcOHDzf/hV9Bq9VK1wQgfvfdd60+lylvvvmmqFAoGn0/OnToIB49erTBsTU1NWK/fv2keZ9//rnJaxQXF4vR0dHSvJ9++skqa9++fbt0zrvuukvaPnLkSGl7UlJSk+cZNWpUo5+11atXS/tXr14tvvXWW6JcLm/wPq1evVo65tVXX5W2v/rqq41ef9u2bdLcLl26SNvXrl3b6O9F/8/Ly0v8+eefzZ5fo9GICxcubNa5/vWvfzU4vqqqSpw2bVqzjn/44YfNrmPnzp1Gn2VT/+Ryubh06VKz5zB8X+Pj48U333zT5O9C/2/atGlibW1to+//s88+2+g94c477xTPnTsn/Txv3jyz5youLm7WezVy5Ejx0qVLJs+RmppqdK1t27aJfn5+Dc5huI6EhAQxJCSkWb+jQ4cONfp+NGbevHkmP+/N0b9/f+nYN954o8F+lUoluri4NOs1LFy4UFSr1Q3OYfj5aOxfdHS00XHx8fFN/ncB0N3Pv/jiixa9biIiIqL2ij1niIiIiKhJn3zyiTSeMGGC2Xn33XeflN3g5OSE2bNnY8yYMXBzc8OpU6ewatUqZGdn4+jRoxg+fDiOHDmCoKAg6Xh/f3+sW7cO48aNg0ajwd13341jx44hODhYmlNXV4c5c+agtLQUAPDWW29hwIABrX5tSUlJ0rfMPT09MWPGjFaf60qLFy9GXFyc9PP06dMxefJk+Pj4ICUlBWvXrkVycjJSU1MxYsQIHDhwwChLSalUYv369Rg4cCAqKyvx+OOPY/jw4ejRo4fRdR544AGkp6cD0PVZufHGG62yfsPMmHnz5knj+fPn4++//5bmvPfee1a5HqArO/bbb7/Bw8MDd999N6677jo4Ozvj9OnTCAkJadU58/PzpbGPj480rqqqgiAI6Nu3L0aOHIlu3brBz88PgO5b/jt27MDvv/+OsrIy3Hrrrdi7d6/Jz9qHH36ITz/9FIDuM3Tbbbdh4MCBCAwMRG1tLbKyspCQkIAdO3aYXN9LL72En376CQAQGBiI22+/HT179oS/vz9qamqQmpqKgwcPIj4+3uxr/O2333DzzTdDrVZDEASMHz8ekyZNQkREBGpra5GQkIAvv/wSJSUlePHFFwEAL7zwQqPv2xdffIF169YhMDAQ8+fPR58+fSCTybB//3588cUXqK6uxk8//YQ333wTL7/8sslzvP7663j77bcBAIIgYMaMGZg8eTI8PDyQnJyMVatWYd26dairq2t0LYAuc+X666/HqVOnAAAxMTHSe6VUKpGWloZvv/0WR48exd9//43x48dj//79cHFxMXvO8+fP47bbbkN5eTluvfVWjB8/Hn5+fsjKypIyvaqqqjB9+nTk5eUBAAYOHIhbbrkF4eHhcHd3R3FxMU6fPo34+HgcO3asyddhK3PnzkViYiIAXfnJK8ubabVa1NTUIDg4GOPGjUPfvn0RFhYGV1dXFBcXIyEhAd9//z2Ki4vx6aefwsvLS/rd6c2ePRv9+vXD+vXrpbKGS5YsQa9evYzmubm5Gf1cU1MDURQRGxuLMWPGoEePHggICICTkxPy8vLw999/Y8uWLaitrcX999+P4OBgq93HiIiIiNosR0eHiIiIiMgxYPBt5StptVqxuLhY3Llzp3jrrbdK83r06CFWVlaaPN+mTZukeT4+PuKBAwcazCkrKxPHjBkjzZs+fbrJcxl+O3vixImiVquV9r3wwgtGWQiG+1rj448/ls43btw4i85l6ODBg6JMJhMBiEqlUvzxxx8bzFGpVOKcOXOk6/fv39/k6zHMLOndu7dYXV0t7fv000+Njq+pqbHK+gsLC0WlUikCECMjI0WNRiPtKy8vF93d3UUAYmBgYJOZEy3JnAEgdu3a1WRmlaGWZM7cfvvt0twFCxZI20+ePCmeO3eu0WN37Nghurm5Nfr56NmzpwhA9PPza3TdNTU14v79+4221dXVSVlgnTt3FouLi80eX1paKh45cqTB9pycHCnzw9vbW/zzzz9NHp+TkyP26dNHyqA5ffp0gzlXZkaMGjXK5Jr27NkjOjk5iQBEf39/k5+7s2fPis7OziIAUaFQiFu3bm0wp7KyUpwwYYLZjBVDs2fPluY8+eSTJj93Wq1WfO6556R5L730UoM5hpkzAER3d3dxx44dJq8piqK4YcMGae5TTz1ldp4oimJSUpKYn5/f6JzGWJI5s3fvXulYX1/fBvvr6urEX3/91ehv+UoFBQXi8OHDpc9IWlqayXlXZlg1JS0tzWR2oKHExEQxKChIBHQZbpbe24mIiIjaOgZniIiIiK5Rhg8nm/oXFhYmPvbYY2JpaanZ8w0aNEia/+2335qdV1BQIAYEBEhzTZXFqqurMyqd9dZbb4miKIp//vmnFPAIDQ01W7aoJV566SWjUj7WcttttzVaYkhPpVKJ3bp1k+b++uuvJufdeeed0px///vfoijqHgTrAwfu7u7i2bNnrbb+Dz74QLreCy+80GD/XXfdJe3fuHFjo+dqSXBGEASTAYgrNTc489VXXxmVU/r999+bPPeVXn75Zen4rKysBvv1QayZM2e2+Ny5ubnSuZ955pkWHy+Korho0SLpHKYCIIbOnDkjlSl78MEHG+w3fF99fX0b/RszDCz+888/DfY/8sgjjX6G9C5duiT6+vo2Gpw5duyYtP+WW25p9DWKoijecMMNUrDqysDRlcGZ999/v9FzvfHGG43er6zJkuBMTk6O0esyVZasOc6fPy+d4/XXXzc5p6XBmeZauXKldN7du3db7bxEREREbVHzu5wSERER0TVLoVDA3d0dGo3G5P6MjAwkJCQAADp06NBo83p/f388+OCD0s8//PBDgzlyuRzr1q2TSkz95z//wc8//4y5c+dCq9VCJpPh66+/RkBAgCUvCwBQWFgojQ1LXlmitrYWP//8MwDAw8MDjz76qNm5zs7OePrpp6WfN23aZHLeihUr0LlzZ2m8bt06zJ49G1VVVQB0pbW6du1qlfUDxiXN7r777gb7DcucGc611A033ID+/fu36JgzZ85gy5Yt0r9NmzZh+fLlmDx5Mu666y6pOfy0adMwadKkFq/p+uuvl8b79+9vsN/d3R0AcOLEiRY3gzcs/3TkyJEWr00URXz11VcAgNjYWNx0002Nzo+NjcV1110HAPjjjz8anXv33Xc3+jc2btw4aZyUlNRg/9atWwHo/p4fe+wxs+cJCAjA3LlzG13L2rVrpfFzzz3X6FwAuOuuuwAApaWlOHDggNl5rq6uuO+++xo9l/73CwCHDx9u8tqO4uvra/RzUVFRq87TqVMnqYSgqc+7LTX1t0ZERER0NWHPGSIiIiLC5s2bG2yrqqpCWloatm7dioMHD+KNN97AunXrsGPHDnTp0sVoruFDtIkTJ0q9GsyZPHkyXn/99QbHGoqIiMDq1aulPhrTpk2T9j3//PMYO3Zss19fczW17uY6evQoampqAOgeNho+3DVl8uTJ0tjc++Hp6Ylvv/0W119/PWpra40eZs+ZMwfz58+3fOGXJSQkSL0zhgwZgm7dujWYM3bsWERFRSEjIwPbtm1DdnY2wsPDLb72iBEjWnzMd999J/W/MGfWrFlYvXq1yX27d+/Gt99+i4MHD+LChQsoLy+HWq02OTcrK6vBtokTJ2L9+vU4c+YMxo0bh0WLFmHixInw8PBocu1eXl4YOnQo9u/fjz///BM33XQTHn74YYwePRpKpbLJ40+dOoWCggIAQEhICLZs2dLkMXK5HACQmpqKmpoasz1Zhg8f3uh5DH/fxcXFRvsuXryIzMxMAEC3bt2a7Bc0ZswYLF++3Ox+fY8jQRCQmZmJ3NzcRs+n7yMF6N6jkSNHmpzXv3//Jn9P48ePhyAIEEUR//73v3Hu3DnMnj27Qe8nR9MHIZuSk5ODr776Cn/++SdOnTqF4uJiKch7JVOfd0scPXoUX3/9Nfbt24dz586hrKwMKpXKLtcmIiIiamsYnCEiIiIiTJ8+3ey+F198Ee+//z6efPJJZGRk4JZbbkFiYiIUCoU0x/BBaXOyN2JjY6VxTk6O2Xk33XQTHn30UaOHtsOGDcPixYsbPf+2bdvMPmwEdA/T9RkL/v7+0vYrHzC3VkvfD31j8crKykbfj0GDBuGNN97AU089JW3r1KkTVqxYYdmCr2CYCWOYIWNIEATcdddd+L//+z9oNBqsWbMGL730ksXXjoiIsPgccrkcXl5eiI6OxtChQ3HXXXeZDDRUVFTgrrvualZAQ6+srKzBtrfeegu7d+9GVlYWdu/ejd27d8PJyQn9+vXDiBEjMHr0aEycONFsEOSjjz7CuHHjUFJSgp9++gk//fQTlEolBg0ahBEjRmDs2LEYM2YMnJwa/r9vaWlp0njXrl3YtWtXs18LoMuuCAsLM7mvqcw0w+CRPhipZ/g57tSpU5PraGqO/nWKooiZM2c2eT5DjWWQNOfz1r17d/znP//BkiVLUFlZiSVLlmDJkiUICgrCDTfcgJEjR2Ly5MlG9zVHuPL+pc88NPTpp5/iySefbPT+aMjU57016urq8PDDD+Pzzz9vdhDJWtcmIiIiaqsYnCEiIiKiJi1atAhbtmzB33//jaSkJGzcuBF33HGHtL+8vFwaN5UlAsDom+qGx5py5QPPGTNmmHxIbeiBBx5Aenq62f2pqamIiYkBYPzt//Pnzzd63uZq6fsB6N6TysrKFr8fkydPhpeXV8sXaUZ1dTW+/fZbALqSa42VqJs/fz7+7//+DwCwatUqvPjiixZnH7m6urb4mFdffRVxcXEtPu7222/Hr7/+CkD3e/rXv/6F/v37IywsDG5ubtLn7OTJk3j55ZcBwGRpv6ioKCQmJmLp0qX48ssvUVhYiLq6OiQkJCAhIQHvv/8+vLy88Pjjj+Oll15qkBEzYMAAHD16FEuWLMF3332HiooKqFQq7NmzB3v27MGbb76J4OBgPP/883jssccgk9VXpy4pKWnx6zbUWBk2w+u0VGVlpTQ2LN1mTlNzLHmdjb3G5n7eXnvtNVx33XV48803sWfPHgBAfn4+fvjhB6k04/XXX4/33nsPQ4YMafVaLZGamiqNfX19G9wnN2zYYFRSctiwYRg1ahQ6dOgAb29vo8/lAw88gEuXLpktZdlSjz/+OD777DMAujKZkydPxnXXXYeIiAi4u7tLwf78/HwsXLgQgOm/NSIiIqKrCYMzRERERNQsU6ZMkUoLbd++3Sg44+npKY0NH8qaU1FRYfLYK508edKoHwsAvPLKK5g6darVSgoZltE6ePAg6urqmgz+NKWl74fhvMbej9zcXNxzzz1G21asWIFbbrnFqP+HJTZu3IjS0lIAuofahplFjblw4QJ27tyJMWPGWGUdtrZnzx4pMNO7d29s27bNbOktwywxcwICArBs2TK88847OHz4MPbu3Ys9e/bgr7/+QlFREcrKyrBkyRLs2bMH27dvbxD4iI6OxhdffIGPPvoIBw4cwL59+7B7927s3LkTFRUVuHjxIhYtWoRjx44ZlWczDHQ+8cQTeP/991vzdlidYVCyOVkaTf2deHh4oKSkBD4+PlbLcGupG2+8ETfeeCMuXryIf/75B/v27cOuXbtw5MgRiKKIPXv2YMSIEfj1118xfvx4u69v37590thUgOjFF18EoMss27x5s1GpyCvdf//9VltXZmYmPvnkEwC6YHh8fHyD0ph6pnoXEREREV2tWv9VKCIiIiK6phg+pDfs5wAAoaGh0jg5ObnJcxnOMVdSqaqqCrfffrtULunWW28FoMvsmD17doMySobS0tIgiqLZf/qsGQDo2bOnlD1TXl4ufQveEi19P3JycqSAlbn3Q6vVYu7cubh06RIAXQaRIAjQarW46667pL4jljIsaWbPY+1t27Zt0njp0qWN9kQxzEhoilwux3XXXYcnnngCGzZswMWLF/H999/D29sbAPDXX3+Z7PGkp1QqMXLkSDz33HP46aefcOnSJXzyySdSgGjNmjVGTekNy3KdPHmy2eu0NcPPcUpKSpPzL1y40Oh+/essKSlpcP+xt+DgYNx222147733kJCQgLS0NNx2220AALVajUWLFjlkXevWrZPGVwZJU1NTpczA6dOnNxqYKSsra7QUXEvt2LEDWq0WgK5fmLnAjH6dRERERNcKBmeIiIiIqFkMH/5fWapr6NCh0njbtm1N9hT4/fffTR5r6PHHH8epU6cAAHfffTc2btyIWbNmAQBOnDhh1HfFEoIg4IknnpB+fv311802qG6ufv36Sf1F9uzZ02RWQHPejzfeeAN//fUXAGD06NHYsGGDlFWUm5uL+fPnW7RmQFfWTZ8d5evri1dffbVZ//SloTZt2mRxmS17ycvLk8adO3dudK4+w6Y1nJycMHPmTKOya//880+zj3dxccHChQvx0EMPmTy+X79+8PHxkbZbK0hnqeDgYERGRgIATp8+bfR+mxIfH9/o/tGjR0tjawRQrSkqKgrffPMNAgMDAeiCZPb+O9i0aROOHj0KQPeZueuuu4z2t+Tz/vvvv0vBFHMMM7+aut/b62+NiIiIqL1hcIaIiIiImuW3336TxleWFIuKisLgwYMB6L75/P3335s9T3FxsVTiRhAEKSPG0Pfff48vvvgCANClSxd89NFHAIDPPvtMynr5+OOPsXXr1ta/IAP//ve/ER0dDUAX+HnooYeafDhp6LPPPsMff/wh/ezs7Cx9M72iogIffvih2WPVajXeffdd6Wf9N/AN7d27V3q4HxAQgHXr1kEmk+H//u//cN111wEAfvnlF/zvf/9r9ppNWbVqlfSg9Y477kBcXFyz/k2fPh2Arin8N998Y9Ea7MUwwNhYr6G9e/caBc9aq0OHDtK4rq7OasfL5XLMnTsXAKBSqfDSSy9ZsErruvnmmwHosr4++OADs/MKCgrw1VdfNXquefPmSeM333yzzQSh9BQKhVH/qtb8jlvr6NGjuPfee6Wf//3vfxtl7wHN/7zX1tZKfaQaY1hOr6ngc3OvnZKSgrVr1zZ5bSIiIqKrBYMzRERERNSk999/X/q2vkwmw+zZsxvMeeGFF6Txv//9bxw6dKjBnIqKCsyaNUsqzTV9+nR0797daE5aWhoeeOABALogx/r166UHgd7e3vjmm2+knjD33nuvVUocubu7Y9OmTVK2y6pVq3DjjTfi3LlzjR6XkJCAqVOnYuHChaiurjba9+yzz0rfLn/11Vfxyy+/NDherVbj3nvvxenTpwHoGsNPmjTJaE5JSQnmzJkjPexdtWqVVDJKoVDg22+/lfrUPPfcc9K351tKo9EYPRg1fBjeFMO57aW0mT6YCACLFy82WSbv+PHjmDlzZqOZAbm5uXjqqacaLd2lVqulZuiALttFLzExEYsXL0Zubq7Z4ysqKox+N4bHA7peIn5+fgB0gcLnnnsOarXa7Pmqq6uxevVqrF+/3uwca3jkkUekcmzvvvsufvzxxwZzqqqqMGfOnCYzTQYNGiTdd3JycjBp0qQmS2Dt378fzzzzTOsWb+CDDz7Ahg0bUFtba3bOP//8g+PHjwPQlWALCAiw+LpNKSkpwVtvvYVhw4ZJfaKGDx+OpUuXNpjbrVs36T66detWo/40etXV1Zg7d670OhpjGCw8cuRIo3MN/9beeecdFBYWNpiTkZGBm266qVn9iYiIiIiuFpZ1OiUiIiKiq8KWLVsabKuurkZaWhq2bt2KAwcOSNufeuop9OrVq8H8W265BfPnz8eaNWtQXFyM4cOHY86cORg9ejTc3Nxw6tQprFq1CllZWQB0jaH1GTR6dXV1uOOOO6QHjW+++SYGDBhgNGfYsGFYvHgxXnrpJRQWFuLOO+/EX3/91aDBeksNHDgQv/76K2bOnInCwkL89ttv2LZtG0aNGoUxY8YgIiIC7u7uKCwsxPnz57Ft2zacOHHC7PkGDRqEV155BXFxcVCpVJg2bRqmT5+OKVOmwNvbGykpKfjyyy9x5swZAICnpye+/vprCIJgdJ77778f6enpAIDHHnusQa+Ijh074pNPPsGdd94JlUqFO+64AwkJCQ1KzzXlt99+Q05ODgAgNjZWyshpjvHjxyMsLAw5OTk4cuQIjh492iCA0NbMmDEDUVFRyMjIQEJCAmJjY3Hfffehc+fOqKqqwq5du7B+/Xqo1WrMmzfP7Df6VSoVli1bhmXLlmHgwIEYMWIEevToAR8fH1RUVCAlJQXffvut1FOlY8eORsHN0tJSxMXF4bXXXsPw4cMxfPhwxMbGwsvLCyUlJTh9+jS++eYbqTTU0KFDMXbsWKM1hIaGYsOGDfjXv/6FmpoavP3221i3bh1mzpyJPn36wNPTE5WVlUhPT0dCQgL+/PNPVFVVYcmSJTZ6d3ViY2Pxyiuv4OWXX4Zarcb06dMxY8YMTJ48GZ6enjh79ixWr16NtLQ0zJo1S8q4M/e3/PnnnyM5ORlHjhzBkSNHEBsbi5tvvhkjRoxASEgINBoN8vPzceLECfz5559IS0tDp06d8M4771j0Oo4cOYK1a9fC29sbkyZNwoABAxAREQEnJyfk5+cjPj4eP//8s5Rt9+KLL1p0PcPr6kvWAbqsltLSUmRlZeHQoUP4+++/jbJWJk+ejK+//loKMhtydnbGQw89hLfffht1dXUYNWoU5s+fj+uuuw7u7u44deoU1q5di8zMTIwbNw5nz56V7tWmjBw5Es7OzqitrZXe3759+0KpVAIAXF1dMWrUKAC6e/aQIUNw4MABZGRkoFu3bnjggQfQvXt3aDQa7N+/H1999RUqKyul/4YQERERXRNEIiIiIromAWjRP4VCIb766quiVqs1e866ujrx0UcfFQVBaPRcffv2FdPS0hoc//zzz0tzpkyZYvZaGo1GHDt2rDT3tddes9r7kpqaKs6aNUuUyWTNel+CgoLE999/X1SpVCbPt3TpUlGhUDR6jpiYGDExMbHBsZ988onRe1ZTU2N23fPmzZPmLliwoMWve/r06dLxS5cubfHxzzzzjHT8I488YrRv1KhR0j5TVq9eLe1fvXp1s6736quvSse8+uqrLV6vKIpiQkKCGBAQYPb3IpfLxTfffFOMj483e620tLRm/w316tVLPH/+vNHxu3btavbxI0eOFPPz882+niNHjojdunVr1rnkcrn4+eefN/q+xsfHN/r+Nfa+GHrmmWcavSfMnj1bPH36tPTzY489ZvZcFRUV4vz585u8x+j/jRo1qsE5UlNTpf3z5s1r9DWKoijec889zb5Hvv76602erzGGf8fN/de7d+9m/d2oVCpx8uTJTb5fBQUFYnR0tAhAjI6ONnu+//znP2bPc+VxqampYocOHRq99iOPPCJeuHChRb8bIiIiovaMmTNEREREZJJSqYSPjw+6d+8ufcta3+/FHLlcjg8++AALFizAZ599hp07dyIrKwu1tbUIDAzEwIEDMXPmTNxxxx0Nvh2/Y8cOvP322wCAkJAQrF27tkEWiZ5MJsNXX32Fvn37oqCgAIsXL8a4ceMwfPhwi193TEwMvvvuO5w9exabN29GfHw8zp07h4KCAtTU1MDb2xtRUVEYNGgQpk6diqlTp0qlm0x54YUXMGvWLKxYsQI7duxAeno6Kisr4e/vj759++Lmm2/GggULpG+c6yUlJWHRokUAADc3N6xfv77BHEMffvgh9u3bh+TkZKxatQqTJk3CrFmzmvWaL168iJ9//hmA7r3V9zBpiXnz5knfoF+3bh3eeecdk9/gb0sGDhyI48eP47333sPPP/+M9PR0ODk5ISwsDGPGjMEDDzyAAQMGYOfOnWbPER0djYyMDMTHxyM+Ph5HjhxBRkYGysvL4ezsjJCQEPTv3x+33norZs2aJZXk0xs5ciSSk5Ol448fP46srCxUVlbCxcUF4eHhUkmvK7OmrtS/f38kJSVh8+bN2Lp1K/bv34+LFy+isrISHh4eiIyMRO/evTFmzBhMmzYNISEh1ngbm/T2229j2rRp+PDDD7F7924UFBRIn//77rsPt956q1F2nr5Emynu7u5YvXo1nn32WaxZswY7d+5EamoqiouL4ezsjMDAQMTGxmL48OGYMmVKizLAzPnkk08wf/58xMfHY/fu3Th79iwuXbqEuro6eHl5oUuXLhg9ejTuvfdedOnSxeLrmSIIAtzc3ODl5QU/Pz/07NkTAwYMwLhx4zBo0KBmncPZ2Rm//PIL1qxZg7Vr1+LYsWOorq5GYGAgevXqhTlz5mDu3LnNzkJcsmQJ+vbti9WrV+Po0aMoKCgwW/otJiYGiYmJ+O9//4sffvhB6j0TEhKC4cOH495778Xo0aORlpbWrGsTERERXQ0EUWykgDIRERERERGRjS1fvhyPPfYYAGDz5s2YPn26YxdERERERGRjDM4QERERERGRw6jVainrR6FQIDs7G4GBgY5eFhERERGRTVnWNZWIiIiIiIjIjIKCAiQlJZndX1NTgwULFkhzbrvtNgZmiIiIiOiawMwZIiIiIiIisomjR4+if//+GDRoEMaNG4fY2Fh4eXmhvLwcx48fx/r165GbmwtA12vmxIkTCAsLc/CqiYiIiIhsz6npKUREREREREStl5CQgISEBLP7O3TogK1btzIwQ0RERETXDGbOEBERERERkU3U1tbi999/xx9//IF9+/YhPz8fhYWFAICAgAD069cPN910E+bNmwdnZ2cHr5aIiIiIyH4YnCEiIiIiIiIiIiIiIrIjljWzgFarRU5ODjw9PSEIgqOXQ0REREREREREREREDiSKIsrLyxEWFgaZTGZ2HoMzFsjJyUFkZKSjl0FERERERERERERERG1IZmYmIiIizO5ncMYCnp6eAIDU1FT4+fk5eDVE5EhqtRrbtm3DxIkToVAoHL0cInIg3g+ISI/3AyLS4/2AiAzxnkB0dSsrK0NkZKQUPzCHwRkL6EuZeXp6wsvLy8GrISJHUqvVcHNzg5eXF/8PK6JrHO8HRKTH+wER6fF+QESGeE8gujY01QrFfMEzIiIiIiIiIiIiIiIisjoGZ4iIiIiIiIiIiIiIiOyIwRkiIiIiIiIiIiIiIiI7YnCGiIiIiIiIiIiIiIjIjhicISIiIiIiIiIiIiIisiMGZ4iIiIiIiIiIiIiIiOzIydELuBap1WpoNBpHL4OoAblcDoVC4ehlEBEREREREREREV3VGJyxo7KyMhQUFEClUjl6KURmKZVKBAQEwMvLy9FLISIiIiIiIiIiIroqMThjJ2VlZcjOzoaHhwcCAgKgUCggCIKjl0UkEUURarUapaWlyM7OBgAGaIiIiIiIiIiIiIhsgMEZOykoKICHhwciIiIYlKE2y9XVFZ6ensjKykJBQQGDM0REREREREREREQ2IHP0Aq4FarUaKpUK3t7eDMxQmycIAry9vaFSqaBWqx29HCIiIiIiIiIiIqKrDoMzdqDRaACAjdap3dB/VvWfXSIiIiIiIiIiIiKyHgZn7IhZM9Re8LNKREREREREREREZDsMzhAREREREREREREREdkRgzNERERERERERERERER2xOAMERERERERERERERGRHTE4Q0REREREREREREREZEcMzhAREREREREREREREdkRgzNERERERERERERERER2xOAMERERERERERERERHZVWmVGlqt6OhlOAyDM+QQhw4dgiAIuP76683OWbx4MQRBwOuvv27HlRERERERERERERGRLW1OzEK/Jdtw80d7UFxZ6+jlOASDM+QQgwcPxsCBA7F3714kJSU12K/VarF69WrI5XLcc889DlghEREREREREREREdnCd4cyIYrAiexSLPz6MFR1Gkcvye4YnCGHWbhwIQDgiy++aLBv27ZtSE9Px9SpUxEeHm7vpRERERERERERERGRjaRcqpTGB1OL8PymExDFa6vEmZOjF0A605bvxqVylaOX0SyBnkr89OgNFp9nzpw5ePrpp/HVV1/hzTffhFKplPbpAzb333+/xdchIiIiIiIiIiIiorahtFrd4Fn45sRsRPu74YnxXR20KvtjcKaNuFSuQl5ZjaOXYVfu7u648847sWLFCmzevBmzZ88GAOTn5+PHH39EWFgYpk6d6uBVEhEREREREREREZG1pFyqkMZdgz1wLr8Cogj8d8c5RPu74Zb+EQ5cnf0wONNGBHoqm57URlhzrQ8++CBWrFiBzz//XArOrFmzBmq1GgsWLIBcLrfatYiIiIiIiIiIiIjIsc7n1wdnZg+OgkYr4v9+PQ0AeHbjcYR5u2JIR39HLc9uGJxpI6xRJqw96tOnD4YOHYr4+HikpKSgU6dOWLlyJQRBwL333uvo5RERERERERERERGRFRlmznQK8sDILgFIK6zEugMZUGtEPPDVYfzw0HB0CvRw4CptT+boBRA9+OCDEEURK1euxK5du5CcnIwJEyYgJibG0UsjIiIiIiIiIiIiIitKMcic6RzkAUEQsPimnhjVNRCArifNgjWHUFRZ66gl2gWDM+Rws2bNgq+vL9asWYMVK1YAAO6//34Hr4qIiIiIiIiIiIiIrC3lUiUAwFUhR6iXCwDASS7Dh3P6o1uIJwAgvbAKD3yZgBq1xmHrtDUGZ8jhXF1dcffddyM3NxffffcdAgMDcfPNNzt6WURERERERERERERkRao6DdILdcGZTkHukMkEaZ+niwKr5g9G0OWe5wnpxXhm43FotaJD1mprDM5Qm7Bw4UJpPH/+fCgUCgeuhoiIiIiIiIiIiIisLb2wCvpYi6meMmE+rlg1fzBcFXIAwE/HcrBse7I9l2g3DM5Qm9C9e3eEhYUBAO677z4Hr4aIiIiIiIiIiIiIrO28Yb8ZE8EZAOgV7o3ld/SHcDmp5sP48/g+IdMey7MrBmeoTdi7dy9ycnIwatQodO3a1dHLISIiIiIiIiIiIiIrSzEIznQKMh2cAYDxPYLxyo09pJ9f/OEE9p4vsOna7I3BGWoTli5dCgB45JFHHLwSIiIiIiIiIiIiIrKF85cMMmcaCc4AwD3Xd8D84TEAgDqtiJe3nrTl0uyOwRlymL179+Lee+/FkCFD8Msvv2DgwIGYMWOGo5dFRERERERERERERDaQcjk4IxOAaH+3Jue/fGMPdAvxvHxsJSpVdTZdnz212+BMdnY25s6dC39/f7i5uaFfv344fPiwtF8URcTFxSEsLAyurq4YPXo0kpKSjM6hUqnw6KOPIiAgAO7u7rjpppuQlZVl75dyzUpOTsaqVatw+vRpTJs2DT/88ANksnb7kSQiIiIiIiIiIiIiM7RaESn5lQCAKD83KJ3kTR4jlwnoGeYt/ZxeWGWz9dlbu3wSXlxcjOuvvx4KhQK//fYbTp06hffeew8+Pj7SnLfffhvLli3Dhx9+iEOHDiEkJAQTJkxAeXm5NOeJJ57A5s2bsX79euzevRsVFRW48cYbodFoHPCqrj3z58+HKIooKyvDjz/+iKioKEcviYiIiIiIiIiIiIhsILesBtVq3bP3pkqaGYoxyLBJL6y0+rocxcnRC2iNt956C5GRkVi9erW0LSYmRhqLooj//ve/eOmll6QyWWvXrkVwcDC++eYbLFy4EKWlpVi5ciW++uorjB8/HgDw9ddfIzIyEjt27MCkSZPs+pqIiIiIiIiIiIiIiK5W5/Pr+810Cmx+cCY6wF0ap11FmTPtMjjz448/YtKkSZg5cyZ27dqF8PBwPPTQQ7j//vsBAKmpqcjLy8PEiROlY5RKJUaNGoW9e/di4cKFOHz4MNRqtdGcsLAw9OrVC3v37jUZnFGpVFCpVNLPZWVlAAC1Wg21Wm12vWq1GqIoQqvVQqvVWvz6iWxNq9VCFEWo1WrI5U2nFxKke0Bj9wIiujbwfkBEerwfEJEe7wdEZIj3BLpWJeeVSuMYf9dm/w1EeDtL47SC8jb/t9Pc9bXL4MyFCxewYsUKPPnkk3jxxRdx8OBBPPbYY1Aqlbj77ruRl5cHAAgODjY6Ljg4GOnp6QCAvLw8ODs7w9fXt8Ec/fFXeuONN7B48eIG2+Pj4+HmZr55kZOTE0JCQlBRUYHa2toWvVYiR6itrUV1dTX+/vtv1NVdPU227GH79u2OXgIRtRG8HxCRHu8HRKTH+wERGeI9ga41Oy/IoO+0cvHcMfyad6xZx1XVAfpQxpHkTPz6a7ptFmglVVXNy+5pl8EZrVaLQYMGYenSpQCA/v37IykpCStWrMDdd98tzRMEweg4URQbbLtSY3NeeOEFPPnkk9LPZWVliIyMxJgxY+Dv72/2nDU1NcjMzISHhwdcXFyafH1EjlZTUwNXV1eMHDmSn9lmUqvV2L59OyZMmACFQuHo5RCRA/F+QER6vB8QkR7vB0RkiPcEulatW3kIQDEAYO5NE+Dt2vzP/9tJ8SiuUqNCcMPUqSNttELr0Ffcakq7DM6EhoaiR48eRtu6d++OTZs2AQBCQkIA6LJjQkNDpTn5+flSNk1ISAhqa2tRXFxslD2Tn5+P4cOHm7yuUqmEUqlssF2hUDR6I9VoNBAEATKZDDKZrJmvkshxZDIZBEFo8rNNDfE9IyI93g+ISI/3AyLS4/2AiAzxnkDXmgsFuoySAA8lArzMV6IyJdrfHcVVJcgtrYEGMrgo2m4rhub+XbfLSMH111+Ps2fPGm1LTk5GdHQ0AKBDhw4ICQkxSg2sra3Frl27pMDLwIEDoVAojObk5ubi5MmTZoMzRERERERERERERETUMqVVahRU6Pq5dw5yb/HxMf71wZzMouaVDWvr2mXmzKJFizB8+HAsXboUs2bNwsGDB/HZZ5/hs88+A6ArZ/bEE09g6dKl6NKlC7p06YKlS5fCzc0Nc+bMAQB4e3vj3nvvxVNPPQV/f3/4+fnh6aefRu/evTF+/HhHvjwiIiIiIiIiIiIioqvG+UsV0rhToEeLj4/2rw/opBVWoUuwp1XW5UjtMjgzePBgbN68GS+88AJee+01dOjQAf/9739x5513SnOeffZZVFdX46GHHkJxcTGGDBmCbdu2wdOz/pf2/vvvw8nJCbNmzUJ1dTXGjRuHNWvWQC5vuylRRERERERERERERETtSUp+fXCmc1DLgzMxAfWZM+mFlVZZk6O1y+AMANx444248cYbze4XBAFxcXGIi4szO8fFxQXLly/H8uXLbbBCIiIiIiIiIiIiIiJKsWrmzNURnGmXPWeI2oK4uDgIgoA1a9Y4eilEREREREREREREbdZ5SzNnDIIz6YVXR88ZBmfIIdLS0iAIAkaPHu3opRARERERERERERGRDekzZ9yc5Qj1dmnx8b5uCni66AqBMXOGiIiIiIiIiIiIiIioETVqDTKKdNkunQI9IAhCi88hCIKUPZNdXI3aOq1V1+gIDM4QEREREREREREREZFNpBdWQSvqxp0C3Ruf3IhofzcAgFYEsorbf2kzBmfI7uLi4tChQwcAwK5duyAIgvRv/vz5AC5HQmNiUFtbi9deew3dunWDUqnE9OnTpfNUVFTgtddeQ+/eveHm5gYvLy+MGjUKW7ZsaXBNwzJq1dXVeP755xEdHQ2lUonOnTvjrbfegiiKJte7a9cujB49Gh4eHvD398ctt9yCM2fOWPttISIiIiIiIiIiIrrqWNpvRk8fnAGujr4zTo5eAF17+vXrh1tvvRWbNm1CcHAwJk+eLO274YYbpLFWq8X06dPx999/Y9SoUejTpw/8/f0BABcvXsTYsWNx6tQphIeHY8KECaiqqsK+fftwyy234I033sDzzz/f4Nq1tbWYOHEikpKScN1116F79+7YtWsXnn/+eZSXl+P11183mr9161bceuut0Gg0GD58OKKionDw4EEMGTIE06ZNs9E7RERERERERERERHR10PebAXRlzVor2r8+6+Zq6DvD4AzZ3fTp09GvXz9s2rQJ3bp1w5o1a0zOy8zMhFKpxNmzZxEeHm6075577sGpU6fw7LPP4vXXX4dCoQAAXLhwARMnTsR//vMfTJ06FX369DE6bt++fRgxYgSSk5MREBAAAEhISMCwYcPw/vvv4/nnn4eHh+4GUV5ejvvuuw8ajQbffPMN7rjjDgBAXV0d7rvvPqxdu9aabwsRERERERERERHRVcdamTMxBsGZqyFzhmXNqE174403GgRmjh49it9++w3Dhw/Hm2++KQVmAKBjx4547733oNFo8MUXXzQ4n0wmwxdffCEFZgBg0KBBmDJlCqqqqpCQkCBt37BhAwoKCjBhwgQpMAMATk5OeP/996UgDhERERERERERERGZps+ckcsEo+yXlooxKGvGzBmynk9HARX5jl5F83gEAQt32fwygiCYLB22fft2AMDNN98MQRAa7NeXRjt06FCDfTExMejatWuD7fptubm50rbdu3cDAGbNmtVgvq+vLyZOnIgffvihOS+FiIiIiIiIiIiI6Jqj1YpScCbazw3OTq3PFwn0VMJVIUe1WoOMqyBzhsGZtqIiHyjPcfQq2pSgoCAolcoG29PS0gAAzz33HJ577jmzxxcUFDTYFhERYXKuPgtGpVJJ23JydL+PqKgok8eY205EREREREREREREQE5pNWrUWgBARwv6zQC6L/NH+7vhTF45MourUKfRwknefouDMTjTVngEOXoFzWentbq4uJjcrtFoAAAjRoxAx44dzR5vWLpMz1SmjTmiKLb4GCIiIiIiIiIiIiLSsVa/Gb0Yf3ecySuHWiMit7QGkX5uTR/URjE401bYoUzY1UKf/XLbbbfhscces9l1wsLCAADp6ekm92dkZNjs2kRERERERERERETtXcql+t4wnQJb329GLzrAuO9Mew7OtN+cH2rXnJ2dAQB1dXUtPnb8+PEAgC1btlhzSQ3oe9ds2LChwb6SkhJs27bNptcnIiIiIiIiIiIias9skTmjl9bO+84wOEMOERAQAIVCgZSUFKlMWXMNHToU48aNQ3x8PBYtWoSKigqj/VqtFtu2bcPu3bstWuPMmTPh5+eHbdu24fvvv5e2azQaPPXUUw2uS0RERERERERERET1Ui7VP0O1tOcMAET712fKpBdUNjKz7WNwhhzC2dkZkydPRl5eHvr27Yu7774b9913H1avXt2s49etW4c+ffrgv//9L6KjozFu3DjMnj0bI0aMQEhICCZNmoSEhASL1ujl5YXPPvsMMpkMt99+O2644QbMmTMHsbGx2LhxI+68806Lzk9ERERERERERER0NUu5nDkT6KmEt6vC4vMxc4bICr744gvcddddKCwsxDfffIOVK1di167m9d4JDg7G/v37sWzZMnTp0gWHDh3Cli1bkJWVhf79++Ojjz7C3LlzLV7jrbfeiu3bt2PEiBFITEzEb7/9hh49emDfvn3o3LmzxecnIiIiIiIiIiIiuhoVV9aisLIWANDZClkzABDi5QJnJ11YI72wfWfOODl6AXTtCgoKwpdffmlynyiKTR7v6uqKRYsWYdGiRU3OjYmJafSccXFxiIuLM7lv7NixGDt2bIuOISIiIiIiIiIiIrqWGZY06xTk3sjM5pPJBET7ueFcfgXSi6qg1YqQyQSrnNvemDlDRERERERERERERERWZRicsVbmDABEXy5tVlunRV5ZjdXOa28MzhARERERERERERERkVWdzzfMnLFecCbG300ap7Xj0mYMzhARERERERERERERkVWlXKoPnHS2YnAmOqC+RFp6YZXVzmtvDM4QEREREREREREREZFV6TNn3J3lCPFysdp5mTlDRERERERERERERER0hRq1BpnFuqyWTkEeEATBaueO8TfInClg5gwRERERERERERERERHSCishirpxp0DrlTQDgFBvFyjkgnSd9orBGSIiIiIiIiIiIiJqc1IuVeCLfy4gu6Ta0UuhFtKXNAMs7zeTXZGNtUlrkVGWAQBwkssQ6asrbZZeWAVRHwVqZ5wcvQAiIiIiIiIiIiIiIkNbj2bjuU3HUaPWYsfpi1j/wDBHL4laICW/PqOlU6B7IzMbtzNzJ1745wVUqCvwY8qP2HTTJgBAlL8bLhRUolqtwaVyFYKs2NPGXhicISIiIiIiIiIiIqI2oU6jxZu/ncEXu1OlbYkZJdBqRchk1utbQrZ1/pJlmTNaUYtPj3+Kj49+LG1LLk5GlboKbgq3y31nLgEA0ouq2mVwhmXNiIiIiIiIiIiIiMjhiiprMW/1QaPADACo6rTIK6tx0KqoNVIulzWTywRE+bUsc6aitgJPxD9hFJjRyyzPBABE+7tJ29IK2mffGQZniIiIiIiIiIiIiMihknJKMW35buw5XwgAUMgFdAvxlPa31wfw1yKtVsSFAl1wJtrfDc5OzQ9DpJamYs6vcxCfGQ8AkAkydPfrLu1PL0sHgMuZM5e3FVZZY9l2x+AMERERERERERERETnM1qPZuHXFXmSXVAMAAjyU+Pb+oZg7NFqak1rI4Ex7kV1SjRq1FgDQKbD5Jc3iM+Ix55c5SC3VZU55OXthxbgVWNB7gTQnozwDwBWZM+30s8GeM0RERERERERERERkd3UaLd747QxWGpQx6xfpg0/mDkSIt4v0gB9g5kx70tJ+M1pRi0+PfYqPj9WXMevi2wX/G/0/RHpF4lThKWm7PnMmwtcNMgHQiu03c4bBGSIiIiIiIiIiIiKyq6LKWjzyzRHsTSmUts0eHInFN/eE0kkOAIgJMMyOaJ8P4K9F+n4zQNOZMxW1FXhh9wvYmblT2jYpZhJeG/4a3BS633+0V30GVUaZLnPG2UmGcF9XZBZVI62wEqIoQhAE670IO2BZM7rqrFmzBoIgSP98fHwazBEEATExMXZfm97PP/+MF198EePHj4e3tzcEQcDkyZPNzq+qqsKWLVtw7733ok+fPvDy8oK7uzv69u2L1157DRUVFSaPmz59utF7MX/+fBu9IiIiIiIiIiIiouY5ma3rL6MPzCjkAl6f3gtvzOgtBWYAIMzbVepXwsyZ9iOlmZkzF0ov4I5f7pACMzJBhkUDF+Gdke9IgRkAcFe4w9/FH0B9WTOgvu9MeU0diqvUVnwF9sHMGbpq9e3bF/369YObm1vTk+1s7ty5KC0tbfb8b775Bvfffz8AoGfPnpg8eTLKysqwd+9evPrqq/j222+xa9cuBAUFGR03duxY+Pj4IC8vD3/88YdVXwMREREREREREVFL7UspxPzVB6Gq05UsC/RUYsWdAzAoxq/BXJlMQJSfG87nVyC9qAparQiZrH1lR1yLUvLrA2kdA91NzkkqTMK9f9yLSrVurpezF94Z+Q6Ghw83OT/KKwqFNYUoqC5ApboS7gp3RPu74Z9zuv1phZXwc3e27guxMQZn6Ko1ffp0xMXFOXoZJt16663o3r07Bg8ejPLyckybNq3R+c7Ozvj3v/+NRYsWoUuXLtL23Nxc/Otf/0JiYiKeeOIJfPPNN0bHPfbYYwCAnTt3MjhDREREREREREQO9/72ZCkw0z9K118m2MvF7PwYf3ecz69AbZ0WuWU1CPdxtddSqZX0PWeCvZTwclGYnPPpsU+lwEwX3y7435j/IdIz0uw5ozyjkJifCADILM9EN79uUuYMAKQXVmJAlK+1XoJdMDhD5AArV66Uxjt37mxy/t1334277767wfbQ0FB89NFHGD58OH744QfU1tbC2bl9RYiJiIiIiIiIiOjaoNWKOJmjqyYT6u2C9Q8MNSpjZkoHw74zBZUMzrRxRZW1KKqsBdB4v5lThacAAB4KD3w95WujMmamGPadSS9LRze/bog2CM6kFbS/nkTsOUMOcejQIQiCgOuvv97snMWLF0MQBLz++ut2WZMoivj2228xe/ZsdO3aFe7u7vD09MR1112Hjz/+GFqt1uRxFRUVePrppxEZGQlXV1f06NEDH3zwgdSEyta9bfr27QsAUKlUKCwsbGI2ERERERERERGRY2QWV6GqVgMA6BXu3WRgBoDRA/hU9p1p85rTb6ZUVYqLVRcBAF19uzYZmAGASK/6rJqMMl3fmRj/+uPSC9vfZ4OZM+QQgwcPxsCBA7F3714kJSWhZ8+eRvu1Wi1Wr14NuVyOe+65xy5rUqlUmDNnDnx9fdGjRw8MGDAABQUF2LdvHx5++GEcPHgQa9asMTqmpqYG48aNw8GDBxEYGIgbb7wRFRUVeOaZZ5CSkmKXdV+4cAEAoFAo4OfXsDYnERERERERERFRW3A6t1wadw/xbNYxHQKMS1dR25aSXx+cMZc5k1ycLI27+nZt1nmjPeszZzLKdcGZSD83CAIgikBaITNniJpt4cKFAIAvvviiwb5t27YhPT0dU6dORXh4uF3W4+TkhE2bNiEvLw+7d+/G+vXrsWPHDqSlpWHQoEFYu3Yt/v77b6Nj3n33XRw8eBDDhg3D+fPnsWHDBvz22284dOgQvvrqK7us+3//+x8AYPLkyVAqlXa5JhERERERERERUUudzi2Txt1DvZp1TEyAYeZM+3sAf605n9905oxhcCbWL7ZZ543yipLG+swZF4UcoZf7FbXHwB0zZ9qI23++HQXVBY5eRrMEuAbguxu/s/g8c+bMwdNPP42vvvoKb775plFgQR+wuf/++y2+TnM5OTlhxowZDbYHBgbijTfewIQJE7B161aMHDlS2vfpp58CAJYtWwYvr/r/oPTp0wePPvooXnvtNZuu+ddff8XKlSuhUCiwZMkSm16LiIiIiIiIiIjIEmfy6oMz3ZoZnAn1coGzkwy1dVqktcMH8Ncaw7Jm1syccVe4I8A1AAXVBUgvS5e2R/u7I6e0BsVVapRWqeHtpmjlyu2PwZk2oqC6APlV+Y5ehl25u7vjzjvvxIoVK7B582bMnj0bAJCfn48ff/wRYWFhmDp1qt3XdfToUSlzp6qqCqIoorxcl3J57tw5aV5GRgaysrIQERGBoUOHNjjPzJkzbRqcOX36NObOnQtRFPHOO+9IvWeIiIiIiIiIiIjaojN5umdsrgo5ovya7jMCADKZgGg/N5zLr0BGYRU0WhFymWDLZZIFzl8OzngonRDsZbrKT3KRLjgjQEBnn87NPneUZxQKqgtQWFOISnUl3BXuiAlww74Luj7c6UWV6OPmY9kLsCMGZ9qIANcARy+h2ay51gcffBArVqzA559/LgVn1qxZA7VajQULFkAub7opmLXU1tZi/vz5+Pbbb83O0QdpACAnJwcAEBkZaXJuVFSUye3WkJWVhcmTJ6O4uBhPPvkkHn/8cZtdi4iIiIiIiIiIyFIVqjqkX+4LEhvi2aIAS0yAO87lV6BWo0VuaTUifJsX2CH7qlFrkFVcDQDoFOgOQWj4O9ZoNThfch6ArlSZm6L5v8sorygcyT8CQFfarLt/d0T715e9SyusQp8IHwtegX0xONNGWKNMWHvUp08fDB06FPHx8UhJSUGnTp2wcuVKCIKAe++9165rWbZsGb799lv06tUL77zzDgYMGABfX18oFAokJycjNjYWoig2OM7UTcaWCgoKMGHCBGRkZOCee+7Bu+++a9frExERERERERERtdTZvPovPXcP9WzRsR0M+s6kFVQxONNGXbhUCf3jU3MlzTLKM1CjqQHQ/JJmetFe0dI4vTwd3f27I8a//rOQ0c7K3skcvQCiBx98EKIoYuXKldi1axeSk5MxYcIExMTE2HUdmzdvBgB8++23mDx5MoKCgqBQ6GoUXrhwocH80NBQALryZqaY226J8vJyTJkyBWfOnMGMGTPw+eef2z04RERERERERERE1FJG/WZCmtdvRi/a4AE8+860XUb9ZoKa7jfTxbdLi84f6VlfwSizLBMAGmTOtCcMzpDDzZo1C76+vlizZg1WrFgBALj//vvtvo7i4mIApsuUff/99w22RUdHIywsDFlZWThw4ECD/Rs3brTq+lQqFW6++WYkJCRg0qRJ+Pbbb+1a9o2IiIiIiIiIiKi1TufWB2e6h7YsONPB8AF8AYMzbdW5fIPgjJnMmbNFZ6VxrG9si85vlDlTlq7bZhC4S29ngTsGZ8jhXF1dcffddyM3NxffffcdAgMDcfPNN9t9HV276tLoPvnkE6PtGzduxJdffmnymIULFwIAnnrqKaN+NCdPnsTy5cuttjaNRoM77rgD8fHxGDFiBH744Qc4Oztb7fxERERERERERES2dCa3/tlZbEjLyprFGJY1a2cP4K8lxzJLpHHPMNMBuHPF56RxS8uaRXnW9/jOKNdVLXJzdkKgpxJA+8ucYc8ZahMWLlyI//3vfwCA+fPnS+XE7OnZZ5/F77//jueffx4bNmxA165dce7cOSQkJODpp5822dvlmWeewU8//YQ9e/agU6dOGD16NCoqKvDXX3/h/vvvx4cffmgyiLJkyRL88ssvAICyMt23Bvbv34+hQ4dKczZv3iyVTvvwww+lsmsBAQF46KGHTL6Gd999FwEBAZa9EURERERERERERFYkiiLOXO45E+7jCm/Xlj37C/FygdJJBlWdFqnMnGmTtFoRRy8HZwI8nBHh62pynr6smbvCHWEeYS26hpvCDQGuASioLpAyZwAgxt8Nl8pVuFSuQqWqDu7K9hH2aB+rpKte9+7dERYWhpycHNx3330OWcPIkSOxe/duvPTSS0hMTERycjJ69+6NTZs2YcCAASaDM66urvjzzz8RFxeH77//Hlu3bkWHDh2wdOlSzJo1Cx9++CH8/f0bHJeSktKgFFppaanRNpVKJY31JdeA+t44psTFxTE4Q0REREREREREbUpWcTUqVHUAgO6hLcuaAQCZTEC0vxuSL1Ygs6gaGq0IuYx9mNuS1MJKlFarAQD9In1N9skuqy1DTmUOAF3WjExoeWGvKM8oFFQXoKimCBW1FfBw9kC0vzsOpemen6YXVqGHmaydtobBGWoT9u7di5ycHIwaNUoqL2ZLoiia3D506FD8+eefLTrGy8sLy5Ytw7Jly4y2f/fddwCAfv36NThmzZo1WLNmTbPXGxcXh7i4uGbPJyIiIiIiIiIiaisM+810C2ndg/MYf3ckX6xArUaLnJJqRPq5NX0Q2U1iRok07h/lY3KOJSXN9KK9onEk/wgAXWmzHv49EHNF3xkGZ4haYOnSpQCARx55xGrn3LJlC9LS0uDm5oaPP/7Yaue90tGjR9GnTx/IZPWR3hMnTuDZZ58FAMyZM8dm127KBx98gCNHjiAvL89hayAiIiIiIiIiomubvqQZAHRrReYM0LDvDIMzbUtiRn3lH3PBGX1JM6D1wZkoL4O+M2W64Ey0v+Fno/30nWFwhhxm7969WLlyJU6ePImDBw9i4MCBmDFjhtXOf+zYMRw7dgze3t42Dc7Mnj0bZWVl6N27N3x9fZGWloaEhARoNBo8+OCDGDFihM2u3ZS//voLW7duddj1iYiIiIiIiIiIDDNnuoe2PnNGL62wCiO6WLwssiJ95oxMAPpE+Jicc7borDRudXDG0yA4U54BwPizkV7YfnoSMThDDpOcnIxVq1bB09MT06ZNw4cffmiUfaL39NNPo6CgoFnnXLNmDebPn4/58+dbebXmPfroo1i/fj2OHj2K4uJiuLm5Yfjw4bj33nsxb948u63DlC1btjj0+kRERERERERERPrMGaWTzOhBekvEBNRnyqQVtJ8H8NeCqto6nMnTBeC6BnvCQ2k67GBY1qyLb+uia9Fe0dI4vSwdABBlUNYsjcEZoqY1N4iyceNGpKenN+ucLenjYi0PP/wwHn74Ybtfl4iIiIiIiIiIqK2rqq2THpjHhnhCLmvYKL45OhiWNWNwpk05nlUK7eV23f2jfE3O0Wg1OFeiC85EeETAXdG6IF2kZ6Q0zijTZc54uyrg5+6MospapLejsmYN0xSI2pi0tDSIotisf0RERERERERERNR2nM0rh/6xXfeQ1jdqD/Z0gdJJ9zi7PWVHXAv0Jc0A8/1msiqyUF1XDQCI9Ytt9bXcFG4IdA0EUF/WDACiL2fP5JbWoEatafX57YnBGSIiIiIiIiIiIiKyCX1JMwDoFurZ6vPIZIJUEi2zqBoaLb+o3VYczSyWxgPMBGeSi5OlcWv7zehFeen6zhTVFKGitgKAcd+ZjKL2kT3D4AwRERERERERERER2cTp3DJp3M2CzBmgvu9MrUaLnJJqi85F1iGKIo5czpzxdHFCxwAPk/POFp2VxpYGZ4z6zpTr2mFE+7e/nkQMztgRy25Re8HPKhERERERERERWcOZ3PrMme4WZM4AxtkRLG3WNuSU1uBSuQoA0C/SBzIzPYUMM2difVtf1gww7juTWZYJwPiz0V76zjA4Ywcyme5t1mjaR607Iv1nVf/ZJSIiIiIiIiIiailRFHE6T5c5E+rtAh83Z4vOFxNgEJxpJ9kRV7vEjPqSZv0jfczO0wdnXJ1cEe4ZbtE1jTJnykxkzrSTwB2fvNqBQqGAXC5HdTVT7ah9qK6uhlwuh0KhcPRSiIiIiIiIiIioncouqUZ5TR0AoFuIZVkzgHF2RGpB+8iOuNolXi5pBgD9o3xNzqmorUB2RTYAoItvF8gEy8ISUZ5R0jijPAMAM2fIDEEQ4ObmhtLSUmbPUJun0WhQWloKNzc3CILpNEQiIiIiIiIiIqKmGJc0s6zfDFDfcwYA0ttJdsTVzjBzpp+ZzJlzJeeksaUlzQDjsmYZZbrgjI+bAl4uTgCA9KL28dlwcvQCrhVBQUFIS0tDeno6/Pz8oFQq+eCb2hRRFKFSqVBUVAStVougoCBHL4mIiIiIiIiIiNqxM5dLmgFANysEZ4I9XeCikKFGrUUqgzMOp6rT4GSO7nfcIcAdvu6my9YlF9X3m+nq29Xi67op3BDkGoT86nwpc0YQBMQEuON4Vimyi6tRW6eFs1Pbzk1hcMZOnJ2dERERgYKCAuTm5jp6OURmubu7IyQkBM7OltUAJSIiIiIiIiKia9tpw8wZK5Q1k8kExPi740xeOTKLqlCn0cJJ3rYfwF/NTueWo7ZOC6DxfjNni89KY2sEZwAgyisK+dX5KKopQnltOTydPRHtrwvOaEUgq7gKHQM9rHItW2Fwxo7c3NwQFRWFuro61NXVOXo5RA04OTnByYm3BSIiIiIiIiIistzpy5kzzk4ydAhwb2J28+iDM2qNiJySGkQZNIIn+zIsadY/ysfsvOTi+syZLr5drHLtaK9oJFxMAKArbdYzoCdi/A3L3jE4QybwATgRERERERERERFdzaprNUgr0JUe6xrsYbUMl2iDvjNphZUMzjhQYkaJNO4f5WtyjlbU4lyxrudMuEc4PJ0tz6ACrug7U64LzkT71wcA09pB2TvmfBERERERERERERGRVSVfLIdW1I27hVjeb0avQzt7AH81S8zUZc64KGSINVO2Lrs8G1V1VQCslzUD6DJn9NLL0gGgQeZMW8fgDBERERERERERERFZ1ZnLJc0AoHuo9YIzMQbl0VILGJxxlIIKFTKLqgEAfcJ9oDCTGWVY0izWN9Zq14/yipLGGWUZum0GwZkL7eCzweAMEREREREREREREVnV6dxyadzdTFZFa8QYZM60h+yIq9VRo5JmPmbnGQZnuvp2tdr1ryxrBgCBHkr4uTsDAI6kF6O2Tmu169kCgzNERERERERERETUZtRptHh160kM/r8d+OlYjqOXQ610Orc+c6ZbKzJntKIW/z38X4z5fgw2Jm+Utgd7KeGqkAOA1NOG7E9f0gwA+kX6mJ13tvisNLZmcMbVyRVBbkEA6jNnBEHAqK6BAIAKVR0OpRVZ7Xq20C6DM3FxcRAEwehfSEiItF8URcTFxSEsLAyurq4YPXo0kpKSjM6hUqnw6KOPIiAgAO7u7rjpppuQlZVl75dCREREREREREREl2m0Ip7acAxr96XjUrkKH+9McfSSqBVEUcSZPF3mTLBXfTZDS45femApVp5ciYLqAnx6/FNpnyAIiL5cviqjqAp1mradHXG1SjTKnPE1O0+fOePq5GqU7WIN+r4zxapilNXqgoFjugVJ+/86k2/V61lbuwzOAEDPnj2Rm5sr/Ttx4oS07+2338ayZcvw4Ycf4tChQwgJCcGECRNQXl6fSvfEE09g8+bNWL9+PXbv3o2KigrceOON0Gg0jng5RERERERERERE1zSNVsQzG49h69H6bJn0wkqIoujAVVFr5JXVoLRaDQDoFtKyrBlRFPH2obfx3dnv6s9XmYeauhrpZ31pszqtiJySmgbnINvSaEUcyywBAIR6uyDE28XkvCp1FTLLMwEAnX06Qy6TW3UdUZ71fWcyy3TXGdUlEHKZAACIZ3DGNpycnBASEiL9CwzUpSuJooj//ve/eOmllzBjxgz06tULa9euRVVVFb755hsAQGlpKVauXIn33nsP48ePR//+/fH111/jxIkT2LFjhyNfFhERERERERER0TVHqxXxwg/H8cORbKPtVbUaXKpQOWhV1FrGJc2a329GFEW8m/Auvj79dYN92RX1n42YgPq+M6mFLG1mb+fyy1FZq0tycES/Gb0or/rgTHpZOgDA202BgZczeS4UVLbp0ndOjl5Aa507dw5hYWFQKpUYMmQIli5dio4dOyI1NRV5eXmYOHGiNFepVGLUqFHYu3cvFi5ciMOHD0OtVhvNCQsLQ69evbB3715MmjTJ5DVVKhVUqvr/GJSV6W4yarUaarXaRq+UiNoD/T2A9wIi4v2AiPR4PyAiPd4PiBqn1Yp45afT+D5B13JALhPQNcgDpy+XxbpwsQy+Ltb9xr0jXQv3hKSsEmncNdC9Wa9VFEUsP7YcX576EgAgQEA3v244XXQaAJBanIood93D+ChfpXRcysUyDO/gY73FU5MSUgulcZ9wL7O/39MFp6VxJ69OVv/Mh7uFS+PUklTp/CO7+OPg5X4z20/lYv6waKtetynNfZ3tMjgzZMgQfPnll+jatSsuXryI119/HcOHD0dSUhLy8vIAAMHBwUbHBAcHIz1dFz3Ly8uDs7MzfH19G8zRH2/KG2+8gcWLFzfYHh8fDzc3N0tfFhFdBbZv3+7oJRBRG8H7ARHp8X5ARHq8HxA1JIrAxlQZdl/UFfgRIOKuThqUqktwGrqAzE879+Ni4NVX2uxqvifEJ8ugL9qUfy4Rv2YnNjpfFEXsqNmBXapd0rabXW+GrEaG09A94N9+cDsqj+uyIHSJObpH27sOn4J/0UmrvwYy76fz9b/f6sxT+PXXUybn/Vn1pzQuPFuIX1N+teo6LmouSuP9Z/YjIjMCACCvAvSfj417TiOoOMnE0bZTVVXVrHntMjgzZcoUady7d28MGzYMnTp1wtq1azF06FAAusZQhkRRbLDtSk3NeeGFF/Dkk09KP5eVlSEyMhJjxoyBv79/a14KEV0l1Go1tm/fjgkTJkChUDh6OUTkQLwfEJEe7wdEpMf7AZFpoiji/347i90XMwAAMgF459Y+uKlvKP48k4/NaUcBAL4RXTB1XGcHrtS6roV7wgfn9wCohEIuYN4tk6GQN95d49MTn2LXifrAzIuDX8RtXW5DYn4iftjxAwDAM8oTUwdNBQBcLKvB8qS/dZO9gjB16gCbvA4ybfkHut+vk0zAfbdOgovCdGbbxu0bgUu68V2T74KXc8v6DzWluq4ay79fDgDQeGkwdZLu8yGKIr5K+wc5pTW4UCHHqHHj4a60XyhEX3GrKe0yOHMld3d39O7dG+fOncP06dMB6LJjQkNDpTn5+flSNk1ISAhqa2tRXFxslD2Tn5+P4cOHm72OUqmEUqlssF2hUFy1N1IiahneD4hIj/cDItLj/YCI9Hg/IKoniiKW/noaa/fpAjOCALw7sy9mDNB9871TUP1D3OxS1VX5t3O13hNq1BqkXu7z0SXIE24uDZ+nGvrs+Gf49MSn0s8vXPcC7uh+BwCgg28HaXtOZY70foX7OcHNWY6qWg0yiqqvyvexrSqtVuP8Jd3vt0eYFzzdXEzOE0UR50rOAQBC3UPh72795AaFQoFgt2BcrLqIzIpMo8/B2O5B+Hp/BtQaEQfSSzGpZ4jVr9/Yupqj8ZBlO6FSqXD69GmEhoaiQ4cOCAkJMUoLrK2txa5du6TAy8CBA6FQKIzm5Obm4uTJk40GZ4iIiIiIiIiIiMgyoijird/P4vN/UqVtb93aRwrMAECkX30LgXQ2fG9XzudXQHu5Cl23UM9G5646uQrLE5dLPz8z6BnM6T5H+jnANQAuct3D/8zyTGm7IAiI9nfXbS+qQp1Ga63lUxOOG/QT6h/pY3ZeTmUOKtW6v92uvl1ttp5oL10/mRJVCUpVpdL2sd2CpHH8mXybXd8S7TI48/TTT2PXrl1ITU3FgQMHcNttt6GsrAzz5s2DIAh44oknsHTpUmzevBknT57E/Pnz4ebmhjlzdH/Y3t7euPfee/HUU0/hzz//RGJiIubOnYvevXtj/PjxDn51REREREREREREV69l25Pxya4U6ec3ZvTGrEGRRnNcFHIEe+kyLjKKmte/gdqGU7n1JZ26h5gvY7U2aS3eP/y+9POTA5/E3T3vNpojCAIiPHVBu+yKbGi0GmlfjL8ugFenFZFdUm2VtVPTEjNKpHH/KF+z884WnZXGtgzORHrW3zsMA3jDOgZA6aQLf8SfzYcotr2+Ve0yOJOVlYU77rgDsbGxmDFjBpydnbF//35ER+uiZM8++yyeeOIJPPTQQxg0aBCys7Oxbds2eHrWR2rff/99TJ8+HbNmzcL1118PNzc3/PTTT5DLTdfHIyIiIiIiIiIiIsv8b8c5LP/rvPTz69N74Y7rokzOjfbTZUYUVNSiQlVnl/WR5c7klkvj7qGmgzPrTq/DuwnvSj8/PuBx3NPrHpNz9cEZtVaN/Kr6DIiYAHdprC+jRraXmFEsjftH+Zidl1ycLI27+tk+cwYA0svSpbGrsxzDO+lKqV0sUyEpp3l9YOypXfacWb9+faP7BUFAXFwc4uLizM5xcXHB8uXLsXz5crNziIiIiIiIiIiIyDq+2p+O93fUP7BdfFNPzB0abXZ+lL8bDqYVAQAyCqvQI8y6zcTJNs7k1T8EN1XW7NcLv+LNg29KPz/c72Hc1/s+s+e7MjMi1EPXZ7yDf31wJq2gEoi1aNnUDKIo4mhmCQDAz90ZUQblB69kFJyxYeZMlFd9cDejLMNo39huQYg/ewmArrRZr3Bvm62jNdpl5gwRERERERERERG1H6IoYkV8fcbMf/7VHfOGxzR6TLTBg9+MImZGtAeiKOL05bJmAR5KBHgoG8xZdXKVNF7YZyEe7Ptgo+c0V7Yq2r/+85FWyNJ39pBeWIXiKjUAoF+kDwRBMDtXH5xRypWI8jSdHWcNhufOKDcOzowx6Dvz19m213eGwRkiIiIiIiIiIiKyqcyiauSU1gAAhnfyx30jOjZ5TJTBw/d0PnxvF/LLVdLD++4msmZKVaXSQ/tuft3wcL+HmzynueBMB4OyZmmFDN7ZQ2KmQUmzSB+z86rUVVIWS2efznCS2a6Al+Hn48rMmQhfN3QN9gAAHM0sQWGFymbraA0GZ4iIiIiIiIiIiMim9qcWSuNhHf2bdUy0Qdmq9CIGZ9oDfdYMYLrfzOGLhyFC15h9cMjgRjMv9MwFZwI9lXBz1vUPT2PPGbtIzCiRxv2jfM3OSylJkX7PtixpBgAuTi4IcQ8BAKSXpzfYr8+eEUVgV/Ilm66lpRicISIiIiIiIiIiIps6mFokja/r4NesYwzLmmUyONMunM4tl8bdQhpmzhy+eFgaDwwe2KxzhrmHQSboHmNnVWRJ2wVBkAJ4WcXVUGu0rVozNZ8+OCMIQJ9I8/1bzhaflca2Ds4A9aXNSlWlKFWVGu0bG2tQ2uxM2yptxuAMERERERERERER2dSBy5kzzk4y9G2kHJIhHzcFPF105ZBY1qx9OJPXeOZMwsUEaTwwqHnBGYVcgVD3UADGmTMA0CFAF8Cr04rILq5u8Xqp+aprNVJmVJcgD3i5KMzO1ZeuA4BYv1ibry3Ky6DvzBWlzQZG+8Lr8n3k7+RLqGtDQTwGZ4iIiIiIiIiIiMhmckqqkVmke3DeP9IHLgp5s44TBAFRl7NnskuYGdEenLmcOeMkE9Ap0MNoX3ltOc4UnQEAdPHtAh8Xn2afN8IjQjqHYWZEjEHpu1T2nbGpkzmlqNPqSpX1jzRf0gwwDs508eli03UBQLRntDS+srSZk1yGkV0DAQBlNXU4nF6MtsLi4ExVVRWqqsxHrpcvX44RI0age/fumDp1Kn7++WdLL0lERERERERERETtxAGDfjNDmtlvRi/aXxec0WhF5JQwM6ItU9VpkHKpAgDQOcgDzk7Gj54T8xOhFXUBtkHBg1p07gjPCGlsmD0TE2DQl4h9Z2wqMaM+qNE/ysfsPFEUkVykC84EuQW1KAjXWoaZM5llmQ32jzEobRZ/tu30nbEoOPPTTz/B09MTYWFhKC8vb7B/wYIFeOKJJ7B3716cPXsWf/zxB26++Wa8/fbbllyWiIiIiIiIiIiI2okDF+r7zQxtZr8ZvSg/g4fvLG3Wpp3Pr5AyK5oqadbS4EykZ6Q0NgrOGGTOpPHzYVP6fjMA0D/KfOZMXmUeytW6WEGsr+1LmgH1PWeAhpkzADA6NhCCoBvHt6G+MxYFZ/744w+Ioojp06fD09O4wdPu3buxZs0aAICbmxv69+8PFxcXiKKI//znP0hKSrLk0kRERERERERERNQOHEjVBWcUcqHRh7qm6DNnACC9iA/f2zJ9STMA6Bbi2WD/4bzD0nhgcPP6zeiZDc4E1H8+Upk5Y1P64IyH0gmdgzzMzjMsadbVt6utlwUAiPSKhABd9OXKnjMA4O+hRN8IHwDA2YvlyG4jWXgWBWf2798PQRAwZsyYBvs+++wzAEBYWBhOnz6Nw4cP48yZM4iMjIRGo8Gnn35qyaWJiIiIiIiIiIiojcsvq5EemveN8IGrc/P6zehF+9U/fM9gT5E2Td8sHgC6XZE5U6WuQlKh7sv6Hb07wt+1ZeXtzAVnAj2UcL/8mUrn58NmckurkVdWAwDoG+kNuUwwO/ds8VlpbK/gjFKuRIh7CAAgo7xhcAYAxnarL232VxvJnrEoOJOfr3sRXbo0bOrz+++/QxAEPProo4iI0NUEjIyMxKOPPgpRFLFr1y5LLk1ERERERERERERt3P7U+pJmQzq2rKQZAEQZZs6wbFWbdiavPnOme6hx5szR/KPQiBoALS9pBpgPzgiCgOjLpc0yi6uh1mhbfG5qmmFJs36RPo3ONcycifWzT1kzoL60WamqFKWq0gb7DYMzbaW0mUXBmUuXdM1zPDyM05hOnTqFgoICAMBNN91ktG/QIN0fX1pamiWXJiIiIiIiIiIiojbuwIVCaTykQ8uyJQAg1NsVCvnlckUsa9amncnTZc74uzsj0ENptM+o30xIy4MzHs4e8FXqSuJllWcZ7esQoAvOaLQisorbRrmqq83RzBJp3D+y8dKE+uCMQqZAtFe0LZdlJMrLoO9MWcO+Mz3DvBDkqftc7k0pQI1aY7e1mWNRcEYu16WMFRUVGW3/559/AACBgYHo1q2b0T5fX90vr6amxpJLExERERERERERURt38HLmjFwmYEB0y/rN6I+L9NVlz2QUVUEURauuj6wjv7wGBRW1AIBuoZ4QBOOyV4cvtr7fjJ4+eya/Kh8qjUrabth3Jo2lzWwiMaNYGveL8jE7r6auRgqMdPbpDCeZk62XJjEMBJkqbSYIAsbE6rJnatRa7EspbDDH3iwKzoSHhwMAjh49arT9l19+gSAIGDFiRINjSkt1KUUBAQGWXJqIiIiIiIiIiIjasMIKFc7lVwAAeoV7w0PZuge1kZf7zlTVaqQAALUtZ3INSpqFGPebqamrwYmCEwB0paeC3ILQGuGeumfRIkRkl2dL2/VlzQAgrYDBGWtTa7Q4nqV7ph/l54aAK7KiDKWUpEAr6krL2avfjJ5h6buMMtN9Z8a0sb4zFgVnRowYAVEU8eGHH0plzA4dOoTff/8dADBp0qQGx5w+fRoAEBISYsmliYiIiIiIiIiIqA07aNBvZmiHlveb0Ys26DuTUcSH723R6dwyadwt1Dg4c/zScai1agCtK2mmZ67vjL6sGcDgjC3sTSmEqk4XcOnfSNYMAJwtPiuN7R2cMcycMVXWDABu6BIglUn860y+wzPxLArOPPTQQ5DJZEhNTUXHjh0xaNAgjBo1CnV1dfD19cXtt9/e4Ji//voLgiCgX79+llyaiIiIiIiIiIiI2rADBsGZIR1bH5yJ8qsPzqQXsu9MW2TYk6R7qKfRPqN+M8HWD87EGGbO8PNhVRsPZ+H+Lw1+fzGN/x0fv3RcGsf6xdpsXaZEeEZAgC7wYvj5MOShdJJ6X2WXVEuZfY5iUXBmwIABeOeddyAIAioqKnDkyBHU1NRAoVDg888/h6en8R9iaWkpfvnlFwDAhAkTLLk0ERERERERERERtWH7L+h6OsiEph/qNsawbBWDM22PRiti3+XftZeLE7pdUdbM1sGZAA9nqWQee85Yh1qjRdyPSXh6wzHUXs6aGRzji5kDIxo97kDuAQCAQqZA74DeNl+nIaVciVD3UADmM2eAtlXazOKOPIsWLcL48eOxceNG5OXlITQ0FHfccQdiYxtGxnbu3InBgwcDAMaPH2/ppYmIiIiIiIiI6CojiiK0oq4RPLVfJVW1OHtR14ekR5gXvFwULTpe9znQQi6TX1HWjMGZtuZUThlKqnRly4Z3CjD6263V1ErZFOEe4Qj1CG31dcwFZwRBQLS/G5JyypBVXA21RguF3KKchGvapXIVHv7miFFZwrlDo/DKjT3h7GT+fc0sz0RWRRYAoF9QP7gp3MzOtZVIr0jkVOagrLYMJTUl8HHxaTBnbLcgLPn5FABdcObBUZ3svMp6FgdnAKB3797o3bvpSNjNN9+Mm2++2RqXJCIiIiIiIiKidkoURRRW1iKtoBKpBZVIK9T9b2pBFdILKyEAeGdmX0zt3foHueRYB1OLoG/noC8jZEpJTQnSy9ORUZaBtLI0ZJRlIL0sHRnlGdBoNXhhyAuYGlP/PDGdmRFtzu7zBdL4+i4BRvtOFJyASqMCAAwMHmjRdQJdA+Eid0GNpqZB2aqYAHck5ZRBoxWRWVSFjoEeFl3rWnUsswQPfn0YuaU1AABnuQxLpvfE7YOjmjxWnzUDAENDh9psjY2J9oyW1pFenm4yONMhwB0dAtyRWlCJw+nFKK1Sw9utZcFja7EoOLNgwQIAwJQpUzBz5kyrLIiIiIiIiIiIiK4uuaXV2JCQhfP5FVIgprymrtFjVuxMYXCmHTPqN9NBV9KssLoQm89vRkpJihSMKastM3cKAMBnxz/DjC4zEOylxMUyFTKKqm26bmq5PYbBmU7GgbiEPOuUNAN0GTIRnhE4X3Ie2RXZ0IpayARdJkeHK0rfMTjTchsSMvHSlpNSGbNgLyU+mTsQ/aN8m3X8/tz90nhI6BCbrLEpUV71QaSMsgz0Dexrct6Y2CCkFqRCoxXx97lLmNY3zF5LNGJRcGbt2rUAgNtvv90qiyEiIiIiIiIioqvPwq8O43hWaZPz5DIBMgFQa0Sczi1DjVoDF4XcDiskazuQqutBIgjAdZeDMy/ufhF7c/Y2eaxMkEEuyKHWqpFdkY3C6kJE+7njYpkKBRUqVKrq4K60SkEgslCNWoNDabpAXJi3CzoEuBvtN+o3E2JZcAaAFJxRa9XIr8pHiHsIABiVvkstqMQYi6907VBrtHj951NYu6++T8ugaF98PHcAgjxdmnUOrajFwdyDAAAPhQd6+ve0yVqbEuVpEJwpzzA7b2y3IKzakwoAiD+T3z6DM4GBgbh06RKCg4OttR4iIiIiIiIiIrqKnM+vMArMCAIQ7uMqlZaJ8b/8vwHuiPB1xUubT+D7hCzUaUWczC61qJE8OUZZjRqncnQZMbHBnvBxc0ZBdQH25eyT5ggQEOIegiivKER7Ruv+10v3v5Eekfgg8QOsSVoDQFcaK9LPFwcvBwEyiqrQPdSrwXXJ/o6kF0N1OdPi+s4BEIT6fjNqrRrHLh0DAAS7BSPCo/Fm8s1heI7M8kwpOGMYFEpj6btmu1SuwsPrjkh/W0Dz+stcKbk4GcWqYgDA4JDBcJI5Jnga7RUtjdPL0s3Ou66DH9yd5ais1WBn8iVotKJD+pxZ9C716NEDu3btQnp6Ovr162elJRERERERERER0dXij6Q8abxofFc8OLojlE7ms2H6Rfri+wRdU+mjmSUMzrRDh9OKoZX6zeh+f39l/AURuo1zu8/F4wMeh4uT+W/l9w6o7299/NJxRPtPkX5OL2Rwpq0w7DdzwxX9Zk4VnkJ1na4M3cDggUaBm9aK9IyUxpnlmRgcMhiAcXDmTF65xde5FhzNLMGDXx1GXlnL+8tcaX9OfUkzR/WbAXSZVTJBBq2oxbnic2bnOTvJcEOXAPyRdBFFlbU4llWCAc0s32ZNzQ9/mTB37lyIoiiVNyMiIiIiIiIiIjJkGJyZMSC80cAMAPSP8pHGiRklNloV2dL+yyXNAGBIR10Pkj8z/pS23dTppkYDMwDQJ7CPND5ecNyobFVGETMj2grDfjPDGus3Y4WSZkDD4Iyev4cSEb6uAHRBhxq1xirXu1oVVKhw18oDUmAm2EuJ7xYObVVgBjDuN+PI4Iyz3BmxvrEAgPMl51FSU2J27thuQdJ455l8Wy/NJIuCM/fccw/GjRuHrVu3YvHixRBF0VrrIiIiIiIiIiKidi67pFoqadYr3AuRfm5NHAF0DfaEm7MugJOYUWzT9ZFtHLhQXyLpug5+KFWVSv0owj3C0c2vW5PnCHEPQZCb7uHpyYKTiPBVSvvSC6usvGJqjdIqNU5k6/6+Y4M9G/QnMeo3E2zb4AwADOmgCw7V1mlxLLPEKte7Wv16IhflNXUAgAFRPvjp0RvQv5WZI7WaWhzJPwIACHINQgfvDlZbZ2sYBgIP5x82O29El0BpnOigz4tFZc3++ecfPP3007h06RJee+01rF+/Hrfffjv69OkDX19fyOWNfxNi5MiRllyeiIiIiIiIiIjasD9O1mfNTO4Z0qxj5DIBfSN8sO9CIXJKa5BXWoMQ7+Y1pSbHq1TVSQ/sOwd5IMBDiZ9StqFO1D0IHhc1rtnlrfoG9sX29O2oVFdCVNR/sz2jiMGZtmDfhUKpfN31nY1LmtVp65CYnwgA8HfxR4xXjFWuGe4RLpWtahCc6eiHTUd0JREPpBZJWVvU0K8ncqXx0hm9GwTWWuLYpWNS+bqhYUOtUr7OEoOCB+GrU18B0GVvjYsaZ3JeqLcLAjycUVBRi5PZpRBF0e5rtyg4M3r0aKMFJycnY8mSJc06VhAE1NXVWXJ5IiIiIiIiIiJqw343KGk2uVfzgjOArrTZvgu60lhHM4sx2TvU6msj2zicXgzN5Sf2+n4zO9J3SPvHR49v9rn6BPTB9vTtAIC0ilPwVHqiXFXHzJk2Yo9RvxnjQMjZorOoVOvKzw0KGWS1h94KuQIhbiHIqcxpEJwZ2qF+DQdTi648lC67VK6S3p+OAe6IDfa06HxtpaSZ3sDggRAgQISIwxfNZ84IgoAeYd74O/kSiqvUyC2tQZiPqx1XamFZMwAQRbHV/4iIiIiIiIiI6OpUUKHCobTLDwAD3dE5qPkPAA3L67DvTPty4Ip+M1XqKuzJ2QNAl0HRN7Bvs89l2HfmRMEJRF3uO5NdUo06jdZKK6bW2pOiC844yQRc1+GKfjM2KGmmpy9tVl5bjlJVaf12P1eEXs6yO5xeDDU/IyZtO5UnZTxN6R1iceDsQO4BaTwkdIhF57IGb6U3uvh2AQCcLT6L8tpys3N7hXlJ45PZpWbn2YpFmTPx8fHWWgcREREREREREV1Fdpy6CP13c5tb0kyvX6SPNGZwpn0x7DcztIMf9ubshkqjAgCMjRoLmdD874p39+8OJ8EJdWIdjl86jmj/m5CUUwaNVkROSY0UrCH7yympxoVLusyYfpE+8FAaP2ZOyLNdcCbCMwIH8nQBgazyLHgrvQHoMiGGdPDDlqM5qFZrcDyrFAOjW9dH5Wr224n6jMYpvSzLSiyvLcfJgpMAgI7eHaU+UY42KHgQkouToRW1SMxPxMgI0+1VeoV7S+OknDJMbOF/qyxlUXBm1KhR1loHERERERERERFdRVpb0gwAAj2ViPRzRWZRNY5nl0Ct0UIht7gADNlYda0Gx7JKAAAdAtwR5OWCHccMSppFNb+kGQC4Ormiq19XnCo8hZSSFPQzaGuSXlTJ4IwDGZY0u7LfjEarkRqx+yp90cmnk1WvHeEZIY0zyzPRM6Cn9POQjv7YcjQHgC6Li8EZY8WVtVLJyCg/N/Q0yBxpjYS8BGhEDYC2UdJMb1DIIHxz5hsAujWaC84Yvv6kHPtnzvC/akREREREREREZFVlNWrp4W2Ytwt6G3w7ubn6R+oeqtaotTibZ74sDbUdiRnFUGvq+82oNWrsytwFAPB09sTgkMEtPmefAF1pMxEinFyzpO3sO+NYxv1mjIMz50rOSaWkBgYPtHqTdX1ZMwAN+s7o+xwBxllcpLP91EWpJ9SUXpaXNGtr/Wb0BgYPlMaGJfauFOXnBk8XXf7Kyewym6/rSgzOEBERERERERGRVcWfyZce0k9q5QPA/lE+0jgxo9haSyMb2m/QhH1IRz8cyDuACnUFAGB0xGgo5IoWn9Ow70yVcEEaZxQxOOMooihiT4ou+8LNWY6+ET5G+41KmoVYt6QZ0HhwpkOAOwI9lbp1pBWxN9EVfj2ZK42n9LaspBlQ329GJshs8rtuLT8XP3Ty1mVsnSo8hUp1pcl5giCgR6gueyavrAYFFSq7rRGwYnCmrKwMq1atwv33349p06Zh3LhxSE9PN5qTk5ODU6dO4cKFC2bOQkRERERERERE7d3vJw1KmrWyhn//qPpyROw70z4cTC2Uxtd18MeO9PqSZuOix7XqnH0D+0rj3Jqz0ji90PTDVrK9c/kVuFSue4g9pIMfnJ2MHzEfvnhYGhtmMFhLY8EZfd8ZAKis1SApx/7ZEG1VaZVxRmPfiJZnNBrKr8pHSmkKAKBXQC94OntavEZr0geLNKIGR/OPmp13Zd8Ze7JKcOajjz5CVFQU7r//fqxatQq//PILdu7cicpK45vkrl270KtXL/Tq1QtFRUwrIyIiIiIiIiK62lTXarDz7CUAgL+7MwbF+DVxhGndQz3hfLnPzNHMEmstj2xEVaeRgmgRvq4I8XJGfGY8AF3vmOFhw1t13kjPSPgofQAA50tPQSHXbWdZM8fZfc58vxlRFKXgjKezJ7r4dLH69T2dPaXPxJXBGeCK0mYGAcNr3Y7TF6WMxim9Qy0uaabPmgHaVkkzveaWNusV7ri+MxYHZ+Li4vDYY4+hrKwMzs7OGDjQfDT09ttvR2hoKFQqFTZt2mTppYmIiIiIiIiIqI35+9wlVKt1DaIn9gyGXNa6B4BKJzl6Xn5odqGgEsWVtVZbI1nfscxSqOp0JaSGdPBHYn4iimp0X86+IfwGuDq5tuq8giCgd0BvAECxqhih/rqgTEZRFURRtMLKqaUa6zeTUpKCYpWuDOHAoIGQy+Q2WYM+eya/Kh8qjXEpqiEd/aUx+87U+82gpNnU3q3LaDTUVvvN6A0Kri+zZlhq70o9wwwyZ+zcd8ai4ExiYiKWLFkCAJg7dy7y8vJw8OBB8xeTyTBz5kyIoojt27dbcmkiIiIiIiIiImqD/jAoaTaplSXN9PpH1pc2O5pVYtG5yLYOXKjPUBjS0Q9/Zvwp/TwuqnUlzfQM+854++oeMFfValDIgJ3dqTVaHLjcWyjAwxmxwcalrAwzFGzZgyTCMwIAIEJEdkW20b4uQR7wc3cGABxMK4JGyyBeeY0afyfrgmrBXkqje2triKIoBWdc5C5G5QfbikC3QER7RQMAThaeRHVdtcl5HQPc4aLQhUlOtqfMmeXLl0MURQwbNgxffvklvL2brlM3bNgwAMCJEycsuTQREREREREREbUxtXVa7Dh9EQDgqXTC8E4BTRzRuP5RPtKYfWfaNv0DewAYEuOHHRm6fjNOMieMjBhp0bkNgzMyl/oe1yxtZn/Hs0pQoaoDAAzvFNCgNJZRcCbYdsEZw74zWeVZRvsEQcB1l8spltfU4Uwe+878dSYftRpdZtvkniGQtTKjUS+1LBX5VfkAdOXDnOXOFq/RFvSfwTptHY5fOm5yjpNchm4huizN9MIqlNWo7bY+i4Izu3btgiAIeOSRR5p9TExMDAAgOzu78YlERERERERERNSu7L9QiLIa3YPbcd2DGjQKbynj4EyxReci21FrtDicrvv9hHq7oFxMRV6lLoNqaOhQixuF9w7oDQG6h8kVwgVpe0ZRpblDyEZ2n6vPkLrBRL8Zffkod4U7Yv1ibbaOCI8IaWyy70xHg74zLG2G307UZzRO6R1q8fn257TtkmZ6rek7cyrHfsE8i/4LmZurSyOMjW3+H5pSqQQAqFSqJmYSEREREREREVF78nuS9UqaAUC4jysCPXXPko5mlkDL8kRt0vGsUqnP0JAOfvgz03olzQBdA/iO3h0BAAW1qYCg+2Y7M2fsz7DfzPVX9JtJK0tDYY0ueNM/qD+cZE42W4dh5ozJ4EwHg74zqYUN9l9LKlV1iD+ry3IJ8HDG4Bi/Jo5omlG/mbC2G5wZHDJYGjfWd6aXYd+Z9hKccXbWpSup1c1P9dEHdHx8fCy5NBERERERERERtSEarYhtSbqSZkonGUbFBlp8TkEQ0D/SB4CuPNGFggqLz0nWZ/jw+7oOftiRritpJkDAmMgxVrmGvrSZVtRA7qKryJPB4IxdVarqkJipy5CK8XdDuI+r0X57lTQDmg7OdAvxhLerAgBwMLXomg7s7jx7Cao6XUmzST1DILewpFmdtk4KdPgqfdHVt6vFa7SVEPcQhHuEAwCOXzoOlcZ0wkhPw+BMtv36zlgUnImI0KWPJSUlNfuYbdu2AQA6d+5syaWJiIiIiIiIiKgNOZJRjIIK3YOvUV0D4eZsnW/N94+qb1x9hH1n2iTDslFhgWVIK0sDAAwIHgB/V38zR7WMUd8Z1wwAQHoRgzP2dDCtCGqNLshxfeeG/aQMMxMGhdg2OBPoFgilXJdVZyo4I5MJUoZIcZUa5/Kv3cDurydzpfFUK5Q0O1V4CuXqcgDAdaHXQSZYVr7S1vSBwlptLU5cOmFyTtcQDzhdDlqdzGknwZmxY8dCFEWsXr26WfMvXLiAlStXQhAETJgwwZJLExERERERERFRG/LHyfqSZpN7WV7STM+470yJ1c5L1lFn0G8mwEOJs+V7pX3jo8Zb7TqGwRk3T13mDMua2deec/UlzUz2m7mcOePq5Ioe/j1suhaZIJP6zmSXZ0MrahvMGWrYd+YaLW1WXatB/BldSTNfNwWGdLBySbM23G9GzzBQaK7vjNJJji7But5Y5/MrUF2rscvaLArOPPLII3BycsKePXsQFxfX6NyEhARMnDgRFRUVUCqVWLhwoSWXJiIiIiIiIiKiNkIURanfjJNMwLhuwVY7d58Ib+ir8CRmFFvtvGQdp3LLUKGqA6Brwv5nhnX7zeh18u4ENyc3AIDMRZc5U1ChQuXla5Pt7UnRBTgEARjWyTgjKqsiC/lVuiBA38C+UMgUNl+PvrRZrbZWurYho74zBtld15JdyZdQdTnQMKlnCJzklme5HMg9II3bRXAmuOngDAD0CvMCAGhF4EyeffrOWPTb6Nq1K15++WWIooglS5ZgyJAhePvtt6X9v//+O9566y2MGzcOQ4YMQWpqKgRBwJtvvonQUMtTqIiIiIiIiIiIyPGScsqQVVwNQPfQ1tvNeg9m3Zyd0C1E99As+WI5H8a3MYYPvbtHqHG66DQAoKd/T4R6WO/5n1wmR++A3gCAOlkxBCdd6aHMYmbP2ENBhQqnc3UPrHuFecPHzdlov1FJMxv3m9GL8IyQxqZKm/UI84KnUlde8UBqIUTx2us785tBSTNrZDRW11UjMT8RABDuEW70O2irwj3CEeKue+3H8o9BrVGbnNcr3KDvTE47CM4AwMsvv4z//Oc/EAQBhw4dwgsvvABB0H2d4ZlnnsGLL76InTt3Sh/+V155BY899pillyUiIiIiIiIiojbijyTblDTT05c204rA8Sz79QOgpiWk1wdnVMrj0nh8tPVKmukZljaTu+oexrO0mX3sTakvC2aq38zRS0elsa37zejpM2cAIKs8q8F+uUzAoBhdz6qCilqkXKq0y7raClWdBn+e1mUUebk4YXinhr+3lkq8mAi1VhfcaA9ZMwAgCIIUMKzR1CCpMMnkvJ6XM2cAIMlOfWes0q3ntddew/79+zFjxgy4urpCFEWjfwqFAlOmTME///yDV1991RqXJCIiIiIiIiKiNuL3y/1mBAGY0MN6Jc30+kX6SOPETJY2a0tOZuu+Ye7uLMexon+k7dYsaaanz5wBALmrrrRZBoMzdtFYvxkAOF2oy5iSCTKb95vRaypzBgCGdKwvbXYw9doqbbb7XIFUcnBCjxA4O1keCjDqNxPWPoIzQPNKm3UP9cLlnBPpvtYauaXVeOSbw82a69Tqq1xh0KBB2LhxI+rq6nDq1Cnk5+dDo9HA398fPXv2hKurq7UuRUREREREREREbcT5/Aqcy68AAAyK9kWQp4vVr9E/ylcaJ2aUWP381DqlVWpkl+jK2XUJFXE0/ygAXX+YDt4drH693oH1wRnZ5eBMetG1lQ3hCKIoYvd5XXDG2UkmZaPoqTVqnC85DwCI8YqBq5N9ngMbZs6YC85c18FPGh9ILcScIVE2X1db8euJ+ozGqb2tk9FoGJwZEjLEKue0h4HBA6VxwsUE3Nf7vgZz3JVO6BDgjguXKnE2rxxqjRaKVvTo+f1kHnaeLWh6IqwYnJFO6OSEPn36ND2RiIiIiIiIiIjaPcOSZpN6Wr+kGQB0DHCHl4sTymrqkJhRAlEUpbL65DhJufWlfzz9z0Ks1rU1GBdt/awZAAhwDUC4RziyK7Ihd8kGoGFZMzvIKKqSgnCDon3hopAb7b9QekEqddXNr5vd1hXuEQ4BAkSIZoMzvcO94eYsR1WtBgcuFF0z947aOi22n9Ldmz2UTrihi+UlzYprinGm6AwA3e/Z18W3iSPajmivaAS4BqCgugCJFxNRp62Dk6xhaKRXmDcuXKpErUaLcxcr0MOg1Flz/WYQFGuKVcqaERERERERERHRtckewRmZTEC/KH3vCBWyiqttch1qmVMGTbPLZInSeHyU9fvN6On7zggyNWQuecgoYnDG1vRZM4DpfjOni05LY3sGZ5zlzlKjd3PBGYVchoHRuntHXlnNNfN52ZtSgLIaXUmz8d2DoHSSN3FE0w7mHYQIXQC2vfSb0TPsO1NVVyUFma7UK9yyvjP55TU4lN788nkMzhARERERERERUatkl1TjeJbuAVbPMC9E+rnZ7Fr9jfrOlNjsOtR8SfrgjKwK6VXHAABh7mE2fUDfN7CvNJa7ZiC7uBp1Gq3NrkfAnvON95sxfNBtz+AMUF/arKy2DKUq0w/ThxiWNrtwbfSdMczemNwr1CrnNOo3086CM8AVfWfyTPed6RnmLY2Tclred+aPpIsQxebPt6is2YIFC1p8jCAIcHFxgbe3N7p06YKhQ4eie/fuliyDiIiIiIiIiIgc4I+TBg8AbZQ1o9c/ykcaJ2YU46a+YTa9HjVN/81ypddZaEQNAF1JM1uWjeoTUN9OQe6SgZriYcgpqUGUv+0Cg9cyrVbE3pRCAICXixN6hXs3mHO6sD5zpruffZ/zRnpG4mDeQQBAVnkWvJUN1zeko7803p9aiFmDIxvMuZrUabTYdrmkmZuzHKNjA61y3gO5BwAACpkC/YP6W+Wc9jQoxCA4czEB83vNbzCnp0EZs5PZLc+c+e1EbovmWxScWbNmjVVutoMGDcKyZctw/fXXW3wuIiIiIiIiIiKyj9+TDL+dbdvgTD/DzJmMEptei5pWo9Yg5VIlAMDL/wxqLm+3ZUkzQJeZ4SxzRq22FnJXXSmrjKIqBmds5FRuGUqqdP1khnXyh1xm/CxYK2pxtvgsACDEPQQ+Lj52XV+EZ4Q0zqzIRM+Ang3m9InwhtJJBlWd9prInDmQWoTiy7+zMd2CGvQIao2s8iypdFzfwL5wU7S/v7eO3h3h5+KHopoiHLl4BBqtBnKZ8Xvj4+aMcB9XZJdU41RuGbRaETJZ8+IfhRUq7L+gC2RG+rnCdKE9YxaVNYuKikJUVBQCAgIgiqL0z9nZGcHBwQgODoazs7O0HQACAgIQEREBLy8vafuhQ4cwatQorFu3zpLlEBERERERERGRnRRUqHAoTfegs2OgOzoHedj0ej5uzugY6A5A1+tEVaex6fWocWfyyqHRioBQi1rnUwAAfxd/o7JjtqCQK9DdX5edIVMWAPJKpBdV2vSa17LdTZQ0yyrPQqVa9/7bu6QZUF/WTL8WU5ROcgy43LMqu6QaWcVXd9+ZXw2yN6ZaqaSZPmsGaJ8lzQBdRa+BwQMBAOXqciQXJ5ucp+87U1WrQWph8+8t205dhPZySbOJPZr3ZQWLgjNpaWnYvHkzPD094ezsjEWLFiExMRGVlZXIyclBTk4OKisrkZiYiCeeeAIKhQIeHh7YvHkziouLkZmZibfeeguenp7QarW47777kJnZnJgSERERERERERE50o5T9bX1J/cMsWkpK73+kboHrLUabav6AZD16EuaOXkkQwvdt/THRo1t8E10W+gTaFjaLBMZhVf3w3ZHMuw3c72J4MzpIseVNAOuyJwpN/9ceUjHa6PvjEYr4o/LGY0uCpnVS5oBwNCw9hmcASAFZwBdaTNTerWy74xhUGxij+BmHWNRcObixYuYOnUq8vLyEB8fj/feew99+/aFTFZ/WplMhr59+2LZsmWIj49HXl4epk6ditzcXISHh+OZZ57Bzp074erqitraWnz44YeWLImIiIiIiIiIiOxgx+mL0tjWJc30jPvOlNjlmmSa/qGlk8cpaZutS5rpGQVnXDOQzuCMTdSoNVJ2XJi3CzoEuDeYc6bojDR2dOZMo8GZDvV9Zw6mXr3BmUNpRSioqAUAjO4aBHelRV1NAOhK1x3I0wVnPBQe6OnfsHRcezEouL7vzOGLh03O6Rle33cmqZl9Z4ora6XeTBG+ruhh0LumMRYFZ9577z3k5eXhySefxLBhw5qcP2zYMDz55JPIz8/HO++8I23v378/FixYAFEUsX37dkuWRERERERERERENiaKIo5mlgAAfNwU6G2iSbgtGAZn9Ncnx9AHZ2SX+744yZyMGm7bUt+A+tJpctdMpBcxOGMLR9KLUaPWAgCGdw4wmR3n6MwZL2cveCt195/GgjP9o3zgLNc9Cj+QWmiXtTmCYUP6Kb2tEzRPLk5GUY0uoDUoZBCcZJYHfByli28X6fNy+OJhaEVtgzmGmTMnc5oXnNl++qKuzCOAKb2an0lqUXBm69atEAQBkyZNavYxkydPBgD88ssvRtunTJkCQFcqjYiIiIiIiIiI2q6c0hrp29m9w73tUtIMAGKDPeF6ubl1YkaxXa5JDdVptDiTWwbIaiB31pW9ivWNhbPc2S7XD3EPQYCrrsSW3DUTGYXlUr9rsg5RFPHxzhTp5xFdGpY0A4AzhbrMGW+lN0Lc7ZNBd6VID132zMXKi6jV1Jqc46KQo1+kDwAgrbAKF8tq7LU8u1FrtPjtpK6kmbNchrHdgqxy3i9OfCGNh4U2naDRlskEGQYEDQAAlKhKkFKS0mBOkJcLAjyUAHRB6ObcW36//L4DwJTeze/zY1FwJitL12RJqVQ2+xj9XP2xemFhYQCAqipGuomIiIiIiIiI2rITWSXSuG+Ej92u6ySXoXeE7lvNWcXVyC+/+h6wtgcXCiqhqtNC7pINCLoHl70Cetnt+oIgoE+ArrSZIK9BNS6isNL0Q3lqnT9P52P35X4zEb6umNSzYeDlUtUlFNboslC6+XWzW5D2SvrSZiJEZFVkmZ1n2Hdm/wXHZs+cz6/Ay1tOYvupi01PbqYtidnIL1cBAEbFBsLTRWHxOY9cPII/0v4AAPj9P3v3HR1HeTVw+LdNvffuXiX33m3cDbbBBFNCAgkJEBIIgS8khCSQQgk91ACBhEAoAWOKsY1773KRbMu2bEm2erF6l3bn+2Ok2V2ra9V9n3N8zuzuOzOvpN2R/N6597r4sXzQcoeP2d1sS5s12XemrrRZYXkN6YUVzR6vuLKG3Ym5AIR6uzC2Db8THQrOuLm5AXDkSONfRGMOHz5st2+9qir1jePr6+vIlIQQQgghhBBCCCFEJzuRZi31Uh8s6Sp2pc2k70y3OFVX6kfvYl0I7+o+FLZ9Z/TSd6ZDVddaeHK9tVzZ75aNwKUuY81Wd5c0qxfhGaFtp5U0HZyZPMAanDnYjX1n9p3P44bX9/LBgYvc+2EsSbmlDh+z1myxy3S6Z/ZAh49pUSz87fDftMe/GPcLPJ08HT5ud5sQMkHbPpLVRHDGprRZfQnHpmxNyKbGrAapl8SEoNe3PkjpUHBmwoQJKIrC008/zeXLLUcb8/LyeOaZZ9DpdEycaF+D8uzZswAEBXVMupUQQgghhBBCCCGE6Bxx3ZQ5AzAu0npj7zHpO9MtTqWri5UGV5vgTED3BWcMrpdIlb4zHeb9fSkk55UBakBjaUzj5crO5J/Rtof7De+SuTWmPnMGmu87M6GfL8a6hfOD3ZQ5syY2jTv+dYiSqloAzBaFFzefc/i46+IytZ/Z9EH+TOzv18IeLfvq/FecvnwagKG+Q1k1eJXDx+wJhvsOx8PkAaiZM42VLYsO89K2T6U333dmfby1pNmyNpQ0AweDM/fddx+gliibOnUq3377baNfjKIorFu3jmnTppGaqn5Afv7zn9uN2bhxY6NBGyGEEEIIIYQQQvReF3JLeWdXEil1i0ai91MUhbi6zJkgT2dCvF1a3CetJI33T73P+YLzDp/fNnNG+s50j9OZdcEZl3QAXI2uDPR2/E79toj2j0ZXt7RpkMyZDpNXWsUrWxMB0Ongj9eNbLJcWU8MzjSXOePmZNQy/S7klpFbVwKsKyiKwt+3JPLwZye0LIt66+IyOdlCAKA5ZovCq9sStccPzB/S7mPVK6sp45Vjr2iPfzPpNxj0DbOneiOD3sC4oHEA5Ffmk1yc3GBMTLg1c+ZkM5kzpVW17DynljQL8nRmQlTbqoI5FJxZsWIFd999N4qikJSUxIoVKwgODmbRokXcfvvt3H777SxatIjg4GBWrlxJUlISAPfccw/XXXeddpysrCy+/PJLFEVh6dKljkxJCCGEEEIIIYQQPUSt2cIP3z3Ek+sTWPjSTp7ekEBp3d3CovdKuVxOSaX6cxzdipJmiqLwy+2/5Pkjz3PjNzfy5IEnKapq/0JksJcL4T6uAMSlFVFrtrT7WKLtFEXhVEYxOkMZeie1NNRwv+EY9cYunYebyY1+noMA0Dtnc+FyXpeev696YdM5Latj9YRIu0XqK9UHZ1wMLvT36t8V02uUbVmz5jJnAKYM8Ne2D3VRabMas4VHPo/jpS3WDJnbp0bx+2utpeCe++5su4+/Pj6TC7nWTKepA/1b2KNl78S9Q16F+pmaHzWfyaGTHT5mTzIxxKbvTCOlzSJ8XfFyUa9p9WUcG7PtTA7VtervoLaWNAMHgzMA//jHP3jyySdxdnZGURTy8vLYunUrH3/8MR9//DFbt24lLy8PRVFwcnLiqaee4o033rA7hpeXFwkJCSQnJ3P99dc7OiUhhBBCCCGEEEL0APuTLmuNdGvMCm/tTGLe8zv4PDYNi6Vh5Q3RO9iWNBvdipJmp/NPc65AXZS0KBY+OfsJ1669lk/OfEKtpX3BurF12TPl1WbOZTver0G0XnphBUUVNejrsmag6/vN1BsfPAYAnU7hfOGZFkaLlpzOKObTw5cA8HA28n+LhzU5tqS6RAuEDPUd2q1ZFUFuQTjpnYBWBGcG2vad6fzSZiWVNfz434f5LNaa0fPo0uH8ZWUMP5jWTws07zyXy4F2lFqzXJk1c43jWTNpJWn85/R/ADDpTTw88WGHj9nTTAy2Cc5kNwzO6HQ6ouv6zmQXVzWZZbUhPlPbXtJE+b/mOBycAXj00UdJSkri6aefZsGCBQQHB+Pk5ISTkxPBwcHMnz+fp556iqSkJH7729822N/NzY1+/frRr18/jMaujbILIYQQQgghhBCic3x5LKPBc7klVfzfZye44c19UpKql6ovaQZoJYKa823Stw2eK6oq4smDT7J63WoOZx1u8xzGRfpo28dS5X3UleqbYxtcrIvgXd1vpt744LHadmZV+zMPhJoR9Zd1p6mPm//imsEEejo3Of5svvX73Z0lzQD0Or2WPZNWkoZFaTqbbmI/X+qTGzo7cyazqIKb/rGf3YlqBoqTUc9rt43jnjmD0Ol0OBsN/GrhUG38sxvPNNoypDnfncrSAtTjo3yYMdjxrJkXY1+kxlIDwA9G/sCubFxfMcJ/BK5GNTAWmx3b6Pc9Jtym70wj2TPl1bXsOKuWNPN3d2JyO/r8dEhwBiAkJITf/OY3bNq0iYyMDCoqKqioqCAjI4PNmzfz29/+ltDQtjXEEUIIIYQQQgghRO9UWWPmu1Nqk1xPZyObfzWbRSODtddPpBZywxv7eOh/x8kpruyuaYp2iLcJzoxupuQRgNliZmPyRgCMeiNrVqzh2oHXaq8nFiTy4+9+zEM7HiKjtGEwrynjbOr6H7tU2Or9hOPqgzN6V2vmTIx/TLfMZXTgaG27Qp9EebWUTWyv705ls78ucyPKz40fzejf7Hi7fjP+3RucAWvfmWpLNTnlOU2O83QxaaXazmSVUFBW3SnzOZ1RzA2v7+NMVgkAPm4m/vuTKVw3Osxu3A3jwhkSpDanP3qpkK0JTc/9Soqi8Mo2ax+vB+YPabI/UGsdzjrM5oubAfB38eeno37q0PF6KpPepPWdySnPabRXUX3mDFive7Z2ns2losYMwKLoEIyGtodaOiw4I4QQQgghhBBCCFFv25kcrb/MkpgQhgR78vYPJ/LhXVMYGuyhjfviaDrznt/BmzsuUFVr7q7pilYyWxRO1t1BHO7jir9H03fWg1ouJrdCvbN4VvgshvoO5ZlZz/DB0g8Y6T9SG7f54mZWfLmC1469RnlNy43do8O8MBnURUjJwOpap+t+/gYXdTHTw+RBlFdUt8yln1c/jLir83FN5eLlsm6ZR0/R1qyLelW1Zp5an6A9/t2yETgbmy9TlpBvHT/Cb0QzI7uGbXZHy31nrBkOh1I6Pntm17lcVr+1n6y6Gw+i/Nz44mfTmdRIZoVBr7MrH/fcd2cxt7Ls55aEHBIy1aDBmAhv5gwNBNr/PjBbzPzt0N+0xw+MfwAPJ49m9ujdWiptZps5czK9YebM+pNZ2vayUW0vaQYSnBFCCCGEEEIIIUQn+PKY9a76lWPDte2ZQwJY/8Asnlg+Umu2W1Zt5m8bz7DopV1sPp3d5XMVrXc+p5TyajWINiaybSXNlg1cpm2PDRrLx9d+zJ+n/xk/F3XBsspcxVtxb7HiyxVsSN7Q7AKji8nAyLq7mi/kllFUUdOur0e03amMYnTGYvQmdVE42j8ava57lhj1Oj0hzmpZKL2xlOOZyd0yj+6mKAoPfXqcSU9u5Z1dSW3u6fXenhQu5atB0WkD/VkcHdzCHtbMGYPOwBBfx/ucOKq+rBnQaBaErSkDrKW/9p7P69B5/O9wKj/692Ht5oSxkT58cd90BgY2HeRYNDKYsXWlGs9ml/D1ifQmx9ZTFIVXttr0mqnLmnn64NPM+XQObx5/s809vdaeX8vZArVc3Qi/EawctLJN+/c2E0OswZn9mfsbvD4gwANXkxqkvDJzprLGzLYE9e8VHzcTUwda31Ol1aV8du6zVs2hw6+cxcXFpKenc+nSpRb/CSGEEEIIIYQQou8pKq/R6rAHejozbZB9DXyjQc+dMwaw49fz+P6UKK3+/8XL5fz0P0dYE9v8wproPnFphdr26AifZsdWmavYcnELAG5GN+ZEzLF7Xa/Tc8OQG1h3wzruGHkHRp0arMsuz+aRXY/wYcKHzR7ftu/MidTCJseJjpNfVk1mUSV6F+tntLv6zdQb4mM9/9Hs4903kW6UmFPKF8fSySut4sn1Cdz578NNNjC/Uk5JJa9vV0tj6XXwx+UjWyyNVW2uJqkwCYAB3gNwNjSfQdcV2pI5M3WQP051Jai2JuS0O9PkShtPZvHImjgt82VxdDAf/3QqAS1kGOp0Oh5ZYs2eeXHzOaprm+6bA7DjbC7xddkc0WFeXDM8iJzyHD468xEFVQW8ceIN7vruLjJLM5s9Tr2S6hJePfaq9viRSY9g0DefPdXbxQTE4OnkCcCe9D1an516Br2OEaHq65fyy+1uAth1LpeyuhsVFo0MxmRT0mx3+m5eOPJCq+bQIcGZzZs3c8MNNxAQEICvry9RUVEMGDCg2X8DBw7siFPz9NNPo9PpePDBB7XnFEXhiSeeICwsDFdXV+bOncupU6fs9quqquL+++8nICAAd3d3VqxYQVqa/PEnhBBCCCGEEEI4auOpTKrN6sLS8tFhGPSNL/T5uTvx5A2jWHf/LLsyM//el9IV0xTtENeGfjN70vZQUqP2W1jQb4HWfPlKnk6e/N+k/+OLlV8wM3ym9vx/E/7b7KLpuCgfbfuolDbrEvVNsQ2uNsEZ/+4NzowPHqNtny082Y0z6T5XlvbbdS6XpX/fxc5zuS3u+8J357Qsj5snRTEi1KuFPSCxMJFaRd2nJ5Q0A/vgzMXii82O9XA2ajcNpBdWcDqzYT+R9vjwgPW8P5rRnze+PwFXp9YFOKYPCmDWkAAAUvMr+ORw04kNiqLwd5usmfuvUbNm4nPj7cYdzTnKjd/cqPWQac47ce+QX6mWeFvYb6FdVklfZdKbmBU+C1CDU0ezjzYYE2Pze+60TfbMBpuSZktHhdrts/XS1lbPweHgzAMPPMCSJUv4+uuvyc/PR1GUVv9z1OHDh3n77bcZPXq03fPPPvssL774Iq+99hqHDx8mJCSEhQsXUlJSoo158MEHWbt2LZ988gl79uyhtLSU6667DrNZ6tsKIYQQQgghhBCO+PKYtbH7yrFhzYxUjQzz4pO7p2qLgiczirhc2rq7vkXXirOpux8T0Xxw5ttkm5JmA5Y1M1I1wHsAby54k0khkwBIL01v9g748VG+2vaRFAnOdIX60j4Gm8yZmICY7poOAAsHTUJR1ADwpbJTHZYF0Zsct8kcqy/DlFdazR3vHeKp9QlNZmGcTC/if7HqZ8zT2cjDi4a26nxnLp/Rtof7DW/nrDtWhGcEbkY3AA5lHcJsaX6Nd8FIa+m2LadzHD5/YXk1+5MuAxDp58ofrxvZ5I0JTXlksfV7+crW85RXN16WbHdinvYzHx7iyaK6ryUuL04bUx8ML6ku4aEdD/Hn/X+morai0eNdKr7EBwkfAOCkd+LhiQ+3ad692bzIedr2jtQdDV6PDrMGK+uD01W1ZrbUlWD1dDEyY1CANqbKXMWutF2tPr9DwZmPPvqI1157DUVRcHZ25pZbbuG5557j3Xff5V//+lez/9577z1HTk1paSnf//73eeedd/D1tf4yVhSFl19+mccee4xVq1YRExPD+++/T3l5OR999BEARUVFvPvuu7zwwgssWLCAcePG8eGHHxIfH8+WLVscmpcQQgghhBBCCHE1yyqq5ECyukDV39+N0S0s4NfT6XTMHqoucCgK7OngPgDCcdW1FhLqFucHBrjj5WJqcmxJdQk7U3cC4Ofix5TQKa0+z4ywGdr23oy9TY6L8HUl1NsFgNiLBdSYmy8DJBynBmcUrayZr7Mvoe6hze/UycK9/HFV1L5WtcZ0jqdntbBH33PsUiGgliXb9KvZzB0WqL329q4kvvePfaTkldntoygKf/7mNPWxrAfmD2mx/Fa9hPwEbXuEf8/InDHpTcwIV68dhVWFHM893uz4BSOCtO0tCY73OtuSkKOVM1sSHdJiabjGjIrw5tq6LIy80ir+tTelwZgre83cf80Q9HVBoPg8a+bMJ9d+wqJ+i7THn537jNu+vY3EAuu+9Z4/8rzWn+aO6DsI9whvMKavmhE+QyupuT11e4PgbnSY9W+Yk3U3J+w7f5mSumyzhSODcTJaQyz7M/Y3GQRrjLHdMwfeeustACIjI9m2bRuDBg1y5HBt8vOf/5xrr72WBQsW8Ne//lV7Pjk5maysLBYtsr75nJ2dmTNnDvv27eOee+4hNjaWmpoauzFhYWHExMSwb98+Fi9e3Og5q6qqqKqy3rlTXKz+QVJTU0NNjTSeE+JqVn8NkGuBEEKuB0KIenI9EFerr46laot9y0eHUFvb+obE0wf48pa6ns/Oszksiw5qfodeoq9cD05nFGvl6mLCvJr9ejYlb6LaUg3AoqhFKGaFGnPrvv7JQZO17b1pe/neoO81OXZiPx++icuiosbM8YuXtabaonOcSi9EZypAb1Sbx4/0G9mmz3hnGeYzmhPFaeh0Ch8d30FMcNPvmZ6gI68JZVW1nMtWqwUNDfIgxNPEW7eN5f0Dl3hu0zlqzApxaUVc+8punlg+guvrshk3nMziUIpaxqqfnxu3TQpv9XwSLluDM4M8B/WYa9vssNlaCa+tF7cy2m90k2MD3IxEh3lyKqOE+PQiLuWVaMHe9tgQb80YXTA8sN3fkwfmDWTjqSzMFoV/7LzA6vFh+LhZA+EHkvI5clHNFBwc6M6CYf7U1NRgtpg5lae29QhxCyHSPZKnpz/NlOApPBf7HJXmSs4XnufWb2/lV+N+xU1DbkKn03Ew6yDbU7cDEOASwB3D7+gxP8+u4KJzYULwBA5mHSS9NJ0zeWcY7DNYe32Anwsmg44as8LJ9CJqampYF5euvb7oip/1ppRNbTq/Q8GZuLg4dDodjz/+eJcGZj755BOOHj3K4cOHG7yWlaVGx4ODg+2eDw4O5uLFi9oYJycnu4yb+jH1+zfm6aef5k9/+lOD57dv346bm1ubvw4hRN+zeXPLdTyFEFcHuR4IIerJ9UBcbT6IMwDqXbyeBedYv/5cq/ettYBJb6DGomPryXS+db5EO24+7rF6+/Vgb7YOUEsmGYrTWL++6ZJjH5R+oG17Z3izfv36Vp/Holhw17lTppSxP30/X3/7tXZn85VcS61z+mDjfjLCr76SVl2lygzJeQYMntaSZqZ8U5t+tp0losKLE3XbO5L3sn5971in64hrwvkisCjq58NXKdZ+HsHAL0fCfxIN5FTqKKs28+s1J/l0Zxw39LfwQrz1Wr0wsIQtmza26nwWxUJCkRqc8dH7sHvLboe/ho5SYalAjx4LFjac3cCwzGHNjo/S6zhVd/14dc12Zoa07/pRZYadZ9Xvp5dJISN+H1kOtD+aHKBnf46ekspafvv+Vlb0s2YFvnpKT30xrGnexWzcuAGALHMW5bVq0NS/xl97HzjjzN1ud/O/sv+RZcmiylzFM0ee4avjX7HSbSX/Kv2XduzZutns2Lyj/RPvpQKrbDLNtr3NXJe5dq8HuxhIK9NxIbeUNV+vZ8MJ9WftrFcovXCE9cnqOLNiZkuxWpXLCadWnduh4Ex9VGjcuHGOHKZNUlNT+eUvf8mmTZtwcWk6mnll6piiKC2mk7U05tFHH+Whhx7SHhcXFxMZGcm8efPw9/dv5VcghOiLampq2Lx5MwsXLsRkajq1XwjR98n1QAhRT64H4mp0IbeMtP1qGaqYMC9+dOPUNh/j6/yj7EzMo6hGx5CJsxga7NnR0+xyfeV6sOfLU4B6x/DNC6YyoZ9vo+NyK3L545d/BCDcPZy7l9/d5hI/+/ftZ0PKBqqpJnxSOBOCJjQ6bmhOKf97dR8Apa4hLFvWdWtUV5ujlwpRDh3C4GoNzqycspI5EXO6cVaqyRWT+XbtJwCUGVOYNGs+gZ6tK9HVHTrymvD27mQ4rZaqWj49hmUTIuxev72qlr+sP8Oao2pmx5E8PaeLTZRXqz1Zpg/y45HvT2j1ZzS5KJmab+vWhMPGsWx2y/2kutJ3W74jNieWPEseI2aMYID3gCbH9s8sZsMbBwDINgaxbFnj15mWbDiZRe0htd/LdeMiue7ake06Tr1xRZUseHkP1bUW9uQYeeL7MwnxcuFQSj7n9x8BYIC/G4/9YIbW12bt+bVwSN1/YcxClo2w/7ncar6Vvx/7O5+cUz8nCbUJpJSnUGFRS3CN9BvJ7xb/Dr3O4Rb1vc7YsrGs+2odAFnuWSxbbP+921N9is9i01HQkeE5jHLzBQAWRoey8jprdtbBrINUbFO/nzMjZnJCCxk3zaHgTP/+/UlISKC0tNSRw7RJbGwsOTk5TJhg/bCYzWZ27drFa6+9xtmzZwE1OyY01FrzMicnR8umCQkJobq6moKCArvsmZycHKZPn97kuZ2dnXF2bnhhN5lMvfqPKyFEx5HrgRCinlwPhBD15HogribrT1mbKl8/Lrxd7/05w4LYmaj2m9mXVEh0hF+Hza+79fbrwckMtXSSXgdjovwx1TUev9K2xG1YFPVO72UDl+Hk1Lo7iG3NipjFhhT1jvBD2YeYGt54oG94mA9+7k7kl1Vz5GIBBoNR678gOta5HLVnSX2/GYAxwWN6xHs61BSKlyGMYnMGetc0tp7L5AdTh3T3tFrUEdeE+PQSbXtC/4AGx/MxmXhh9ThmDw3isbUnKa2q1QIzeh08vjymTZ/RCyUXtO2RASN7xM/f1ryoecTmxAKwJ2sPQwOGNjl2dKQfYd4uZBRVciCpgCqLDg/nti+Xbz5j7ZG2bFT7fvfZigowcce0fryzO5mqWgtv7krhqRtG8ebOFG3ML64Zgouz9ed2uuC0tj02eGyDOZhMJh6b9hgzImbwh71/oLCq0K43ym+n/BZnp54b0OxM/Xz6Mcx3GGcLznLy8kkKawoJdLNm04yK8OGzWPXGhPf2XtSev25MmN33eUf6Dm17Xr95vM7rLZ7boVDYqlWrANi6dasjh2mT+fPnEx8fz/Hjx7V/EydO5Pvf/z7Hjx9n4MCBhISE2KUFVldXs3PnTi3wMmHCBEwmk92YzMxMTp482WxwRgghhBBCCCGEEI1TFIWvjquLFzodLB8T1q7jzB4aoG3vSsztkLkJx1VUm619LYI9cXVqPDAD8G3St9r2dQOva9f5poZagzH7MvY1OU6n0zGpv3rjbXFlLWezS5ocKxxzKqMYsGBwUT/nQW5BdguY3W1ckJo1pdOZ+ebsgW6eTdc5nloIgLuTgcFBHk2OWzk2nPUPzLLry/T9Kf0YFtK27MSEfGu/mRF+I9q0b1e4JvIabXtH6o5mx+p0OhaMVG/mrzZb2HWu7b9zKmvMbEvIBsDb1cSUgR1zQ8HP5g7WAkWfHk7li6Np7DmvBoGi/NxYOdb+d2xcnpq5Y9AZGOnfdObO3Mi5fL78cyaHWHt7Le2/VPv8XK3mRs7Vtnek7bB7LTrMW9surVJ7bLmaDMwZau2LZ1EsbLu0DQAnvRPTw1oXY3AoOPPwww8TFRXFyy+/zJkzZxw5VKt5enoSExNj98/d3R1/f39iYmLQ6XQ8+OCDPPXUU6xdu5aTJ09y55134ubmxm233QaAt7c3d911Fw8//DBbt27l2LFj3H777YwaNYoFCxZ0ydchhBBCCCGEEEL0JSfSirh4Wa13P22gP8Fe7WusPCjQQ2vKfCg5n8oac4fNUbTf6cxizBa1H8PoCO8mx10svsjJy2qzheF+wxnoM7Bd5wt0C2Sor3rH++nLpymoLGhy7OQB1lLzh5Lz23U+0bJTGcXonC6jM1QBEOMf080zsrdwwDRtO/7yMSqq+/61I6uokqziSgBGR/hoJa6aEuXvxmf3TuMP143kvrmD+N2ytgdXzly2rgEP9xve5v07W6RXJIO81d7ox3OOk1/Z/DVh4Uhr3/Itp7PbfL59F/Ioq3uvLRgRjMnQMWXB/NyduHu2ev00WxQe/sxaIusX8wZjtDlPeU05FwrVjKYhvkNwNbo2e+xg92DeXvg2f5j6B+4YeQd/mPaHDplzbzYvap62fWVQb0SoZ4P+d9cMD7K7SSEuN47cCjW4Nz1sOu4m91ad16F3i7e3Nxs3biQ4OJgZM2bwxhtvUFDQ9C/LrvLII4/w4IMPct999zFx4kTS09PZtGkTnp7WSPBLL73E9ddfz+rVq5kxYwZubm588803GAxN3/khhBBCCCGEEEKIxtVnzQAN7uhtC51Ox+wh6t34VbUWWWzvIeLSCrXtURE+TY5bn2xtDr9sgGO9KGaEzQBAQeFAZtOZEFMGWO9Ul/dL56gxWzibVYLBpqRZdEB0N86ooYmhE60PXJLZfRVk3h1Pta7Djo3yadU+JoOeu2YO4JElw5vNgGuMoiicyVeDM34ufgS5BbWwR/eoz4JQUNiZurPZsVMG+GsZKtvO5lBrtrTpXBtPZmnbS2JC2jbRFvx45gD83dXSZYoaGyfcx5UbxofbjTt1+ZRWSnJUwKhWHdugN7B62Gr+b9L/4enU+3u7OWqk30iCXNX384GMA5TXlGuvuTkZGRRon5W2dJT9z3rrJWtlsfn95rf6vA4FZwYOHMjSpUspKiqioKCA+++/n8DAQEJCQhg4cGCz/wYNGuTIqe3s2LGDl19+WXus0+l44oknyMzMpLKykp07dxITYx/Nd3Fx4dVXX+Xy5cuUl5fzzTffEBkZ2WFzEkIIIYQQQgghrha1ZgvfnMgEwMmgZ0lMaAt7NG+WbWmzdpSZER0vPq1I2x7TROaMoiisT1KDMzp0LB2w1KFzTguzZkI0V9psRKiXtrh6MDkfpX4VU3SY8zmlVJstGFytwZmeljkT5h6Gj5Ma2DW4XmTT6fQW9uj9jtWVNAPsypV1luzybAqq1IDQcL/h6K5MJ+gh7EpUpe5odqyTUc+cYer7prC8htiLrU88qDVb2FyXbePmZGDWkIAW9mgbD2cjv7hmsN1z980b1CA7Jy43TttubXBG2NPpdNr7ptpSzf7M/Xavx4R5advORj3zhlkDk4qisOXiFkAtKzc3Ym6rz+tQcCYlJYWUlBRycnK0iVgsFnJycrTXmvsnhBBCCCGEEEKI3m9/0mXyStVSR/OGB+Lt6lgz5JmDA7QSIrsT85ofLLrEibrMGZNB12SPitP5p0kpTgFgQvAEQtwdu4t8fPB4XAxqibt96fuaDLoY9Dom1vWdySutIuVyeaPjRPup/Wawy5xprq9Fd9DpdEwNm6Ru62vYmnRMK8XXVx2/VKhtj+uC4Ex91gz0zJJm9UYHjsbPRc2o25+5n8raymbHLxxhLW22uQ2lzQ6nFFBQXgPAvGFBuJg6viLTbVOiiPRTy5SF+7jyvQkRDcbE58Vr26MDR3f4HK4WtkG97Ze2271m23dmztBA3OtuCAA4V3COtFL12jgxZCI+Lj6tPqex5SFNu+OOOxzZXQghhBBCCCGEEH3AV8cztO2VY8ObGdk6Pm5OjI7w4URqIWezS8gurmx3DxvhuJLKGpLyygA1S8XZ2PgCZH3WDMCygY6VNANwNjgzIWQCe9P3klORw4XCCwz2Hdzo2En9/dhxVs2yOpR8mQEBrav3L1rnVEYRYEbvon7WIzwi2rQA2VUmhUxgY4r6PizVneN4aiET+vl286w6h9miEJ+uZrSFebsQ1AXXyIT8BG17hF/b+9V0Fb1Oz9zIuXyR+AUVtRUcyjrE7IjZTY6fNywIg16H2aKwOSGbx64d0aqsoO9OWUuaLe7gkmb1nI0GPvrJVNYcTWPl2PBGr7/xuWpwxsPkwQDvAZ0yj6vBlNApuBndKK8tZ1faLswWMwa9+v2eOyyQpzckYFHg1slRdvttubRF214Q1bZ+9g4FZ/71r385srsQQgghhBBCCCF6ucoas1Zz39PZyDXDO6YHwZwhAZyoK9mz61wuN02UUuTd5WR6sdbvYHQTJc3MFjMbkjcAYNQbWdRvUYece0bYDPam7wVgb8beJoMztn1nDibnc/OkqEbHifY5lVGM3jkHnV7NEogJ6FklzepNDLb2nTG6JbP5dHafDc6cyy6hvK4RfWv7zTjqzOXekTkDMDdCDc4AbLu0rdngjLebicn9/difdJmLl8u5kFvK4KDm+7BYLIr2u8/JoGdeXWm0zhDp58aDC4Y2+lpWWRY5FWpVq+iAaPQ6hwplXdWcDE7MCJ/B5oubKagqIC4vjnFB4wAYEuzJtw/Morza3OCaYttv5pqoa9p0TvlpCSGEEEIIIYQQot22ncmhtKoWUO8c7qiyLrOGWhe6pLRZ94qrK2kGMDrcp9ExR7KPkFuhZq7MDJ+Jt3PjQZy2mh42Xdven7G/yXGjIrxxNqrLXIeS8zvk3EJlsSgkZBTblTSL9o/uxhk1bYD3ALydfAAwuKWwOSGzeyfUiY53cb8ZsJY1czO6EeXVswOgU8OmamURd6btxKJYmh2/YKS1tNmmVpQ2i0svIqtYLZc2Y7A/ni6OlfNsL7uSZgFS0sxRzZU2GxHq1SAwc7H4IokFiQCMCRxDkFvbblCR4IwQQgghhBBCCCHa7avj1qbbK8eGddhxx0b6aE3e95zPw9LHe0f0ZHF1pZMARkc2HnRZn2wtaXbtgGs77NwDvQdqi11Hso9QZa5qdJyz0cC4uuyBtIIKMgorOmwOV7vUgnJKqmrRu9oEZwJ6ZnBGp9MxKUTNntEZKkkuOk9yXUm+vsa238zYyM7PDiqqKiKjTC1rN8xvWI/P0HA1ujI1bCoAeRV5nMo71ex4274zW1oRnKnPmgFY0kklzVqjvqQZwKiAUd02j75iVvgs7b29PXV7C6Pts2baWtIMOjg4U1lZyd69e1mzZg0ffPABxcXFHXl4IYQQQgghhBBC9CBF5TVsP6NmSwR4ODN9UECHHdtk0DN9kD8A+WXVWkNy0fXqM2dcTHoGB3o0eL3KXMXmlM2Aekf9nMg5HXZunU7HjLAZ2nlis2ObHDt5gL+2fThFsmc6Sv1nrz5zRoeOkf4ju3NKzZoQPEHbNriltGqhvTeqz5wx6HWMCu+YTLXm1GfNAAzzHdbp5+sI8yLnadstLbRH+bsxLFgtZXYstZDcksYDwQCKorDxpJqVpdfBApvATleLy4vTtkcFSnDGUb4uvlops5TiFJKLkpsdv/WiNTgzP2p+m8/XIcGZ1NRU7rjjDnx8fJg9ezarV6/mzjvvJC0tzW7cu+++y+TJk1m4cCGKIne8CCGEEEIIIYQQvdnGU5lUm9VSMcvHhGLQt9xAuS1sS5vtSszt0GOL1ikoqyY1X81CiQnzxmhouJS0J20PJTUlgLo45Wp07dA5tLa02eT+9n1nRMc4lVEEulr0LmqmwADvAbib3Lt5Vk0bHzxe2za4JbM5oe8FZ0qrajmXo37mhgV74urUMeUkm2MbnBnhP6LTz9cRZkfMRof6e6k1WRALRqpZeooC28/kNDnuXHYpKZfLAZg8wA9/D+cOmG3b1VpqOX35NABh7mEEuHbcDRJXM9ug3s7UnU2OyyrL0oJjQ32HEunV9t54DgdnDh06xLhx4/jwww+prq5GUZQmAy8rVqwgLi6Obdu2sWnTJkdPLYQQQgghhBBCiG701fEMbXvl2PAOP/6cITbBmXMSnOkOtiXNRkU0fnf+t8nfatvLBi7r8DlMDZ2qLbDuzdjb5Ljx/Xww1gUIpe9MxzmVUYzeOROdTm0+31P7zdQb5jtMCx4Z3JI5knKZgrLqbp5Vx4pLK6R++XVsXTm/zpaQn6BtD/cb3iXndFSAa4CWTXK+8DypJanNjrfNgGmu74xdSbPo7itpdqHwAhW1avBcsmY6jl3fmWaCetsubdO221PSDBwMzhQVFbFy5Ury8/MJCQnhjTfeID4+vsnxgYGBLF26FIBvv/22yXFCCCGEEEIIIYTo2bKKKtmfdBmA/v5ujGli4d4RUf5u9PN3AyD2YgGlVbUdfg7RvPi6kmYAYyJ8GrxeUl2i3Vns5+LH1NCpHT4HHxcfLSCQWJBIbnnjgTo3JyMxdeWdzueUklfadFki0XqnMooxuFh7S/XUfjP1DHqDVpZIbyxFMeWx/WzTWRC9UX1JM1D7c3WFM5fVzBmjzshgn8Fdcs6O0NosCFCvcYGeahbMnvO5VFSbGx238ZQ1OLOoG4MzdiXNpN9Mh+nn1Y8B3gMAOJ57nPzKxoP9tv1m5vdre0kzcDA48+qrr5KdnU1AQAD79+/n3nvvJTq6+Qt0fUmzQ4cOOXJqIYQQQgghhBBCdKN1cRnandsrxoaj03VsSbN6s+uyZ2otCgcuXO6Uc4imnUizZs6MbiQAt/XSVqotalbC4v6LMeqNnTKP6eHW0mb7MvY1OW7KAGtpsyPSd8ZhOSWV5JZUYXC1ZhzEBMR044xax7bvjNEtmc19rO/M8UuF2va4LgjOVNZWklys9t4Y5DMIJ4NTp5+zo7Sl74xer2PBCLW0WWWNhb3n8xqMuXi5jIRMtQ/TmEgfwnw6toxjW8TnWpMkRgeO7rZ59EX17xuLYmF32u4GrxdUFnAk+wgAUZ5RDPEZ0q7zOBSc+eabb9DpdDz00ENERUW1ap/64M2FCxccObUQQgghhBBCCCG6kX1Js7BOO8+sIdYa+tJ3puvF1WXOeDob6e/fsM/I+qT12vayAR1f0qyebd+Z5oIzk22CM4eSCzptPleL0xnqIrTeRe0rbdQZe0Uz+InBE7Vtg2sKO8/lUlnTeBZEb6MoipY54+lsZFCgR6efM7EgEYui9hfrLSXN6g30Hkikp9oLJDY7lqKqombH25Y2ayyo992pnlHSDCA+Tw3OGHVGRvj1jj5AvUVLQb0dqTu0z8T8fvPbfYOKQ8GZxMREAGbPnt3qfXx8fAAoLi525NRCCCGEEEIIIYToJhdyS4mv60UyKty7UxcHpw3y1/qI7E5seBez6DzZxZVkF6ulwUZFeKPX2y8+5VXkcTDrIADhHuGMCRzTaXMZHTha6yNyIPOAtih2pYn9/KhfIzuUIplWjjqVUQy6avTOalmwwb6DcTG6dPOsWhbtH42zQS1PZXBLprzazIGkvvF+yCyqJKdE/VyOjmz4uewMtv1mRvj3riCATqfTeoiYFTN70vc0O37G4ABcTOqS+dYz2Vgs9r3VbfvNLI4OpruUVpdyoVBNfhjiO6RXfC57k1EBo/BzUYP9+zL2UWW2L5O55dIWbbu9/WbAweBMRYXacMjdveGdE00pLS0FwMVF3jBCCCGEEEIIIURv1FVZMwCeLibGR/kCkJxXRmp+eaeeT1jF2ZQ0G9VISbONyRu1IMmyAcs6rbQdgElvYnLIZADyK/M5k3+m0XHebiaGBXsCatZHcWVNp83panA6oxiDSwY6nbpAXd/7p6czGUxamSe9UwE6Y2GfKW3WLf1mbD5vvS1zBtpW2szFZGBWXTnNvNJqjtv03couruRoXUm5YcGeDOyCrKWmnLp8CgX1cyklzTqeQW9gdoSakFJRW8HBzIPaa6XVpezP2A9AkFuQQ6UeHQrOBAaqb9TU1NQWRlrFxsYCEBoa6siphRBCCCGEEEII0U021ZV10engutGdG5wBKW3WXeJtFiXHRPg0eN22GfK1A6/t9Pm0trRZfd8ZiwKxF6W0mSNOZRRpJc0AogN6R3AG7PvOGNyS2ZKQjaIozezRO9gHZ3y75Jy2wZneUNbuSuOCxuHl5AXAnvQ91JibD9ouHGnNiNliE9TbZFPSbHFMzyhpBmqWh+h4tkG9Hak7tO3d6bupsajvoflR89Hr2h9icSg4M3myesfChg0bWjXebDbz9ttvo9PpmDlzpiOnFkIIIYQQQgghRDfIL6vmTFYJADFh3oR4d35ljNlDA7Xt3eektFlXOWGbORNunzlTZa4iLjcOUEuaDfIZ1OnzmRE2Q9tuvu+Mv7Z9KDm/U+fUl5VU1pByuRyDq01wppdkzkDD4Ex2cRUn03t/m4Vjl6wBx67InKm11HKu4BygNj73cOq+bJH2MuqNWhZEWU0Zh7MPNzv+muFBWnlE24yr705Zt7u738yJ3BPa9qhACc50hqmhU7XyiLY9ZmxvTHCkpBk4GJy59dZbURSF9957j2PHjjU71mKxcO+993L69GkAbr/9dkdOLYQQQgghhBBCiG5gu9g9xab5emeKCffGx80EwN4LedSaG+83IjqOoihaXyE/dycifF3tXo/LjaPaUg3YN1/vTJFekUR4RABwLOcY5TWNl7ibNMCaTXBYgjPtlpCpBmENdZkzTnonhvgO6c4ptcnogNEYdUYADG4pAGxO6N2lzWrMFu1zGeHrSqCnc6efM6UoReu30RtLmtWr7zsD9lkQjQnwcNbKaSbmlJKSV0ZheTX76/oWRfm5MSLUs5Nm2jJFUYjPVTNnPJ086e/Vv9vm0pe5mdyYGjoVgNyKXE5fPk2VuYpdabsA8HH2YXzweIfO4VBw5sYbb2T69OlUVVUxf/58Xn/9dXJycrTXdTod2dnZfPDBB0ycOJH33nsPnU7HkiVLmDt3rkMTF0IIIYQQQgghRNc7mGxtqj1loH8zIzuOQa9j5mC1tFlJZS0nbMptic6RVlBBfpkafBkd4d2gn8yR7CPa9sSQrgnOAMwIV7Nnai21HM5q/O73IE8XBgSo/ZFPpBVSWWPusvn1JacyikBfgd5ZzVYb7jcck97UzbNqPTeTGyP9RwJgcM5BZyjt9X1nzmaVUFmjBqe7qt9MQn6Ctj3Cf0SXnLMzzAyfqb1/t6dub7HE3YIRNqXNErLZkpCD2aLusyQmpFN7bLUksyyTy5Xq7+JRAaMcKqslmmcb1Nueup39GfupqK0A1LJnRr3RoeM7/JP78ssvGT58OIWFhTzwwAOEhoZqb87x48cTFhbGnXfeyYkTJ1AUhZiYGP773/86elohhBBCCCGEEEJ0g/rMGZ0OJvfvmswZgNlDrKXNdkpps04XZ1PSbPQVJc0AYrNjte2uypwBmBY2TdtutrRZ3XuzxqxwrK6Bt2ibUxnFGFzStcf1gY7exL60WQoJmcWkFTSecdUb2Peb8emSc/b2fjP13E3uTA5RW3RklWVxtuBss+Nt+85sPp3NxpM2/WaigxvbpcvE5cVp29JvpnNdGZzZcnGL9nhBP8dKmkEHBGcCAgI4cuQIP//5z3F2dkZRFO1fVVWVtm00Grn77rvZt28fPj4+Dk9cCCGEEEIIIYQQXauooobTmWrPhuEhXni7dd1d9LOGBmjbuxNzu+y8V6u49EJte3SEj91rNeYaTuSo/Q5C3EMI9wjvsnlNDpmMQWcAWuo7Yw0cSt+Z9jmVUWzXbyYmIKYbZ9M+V/adAdiakNPU8B7PNjgzLsqnS85pG5zpzZkz0HChvTmDAt21DLzDKfnsqvu9E+jpzLhI3+Z27XT1Jc0ARgeO7saZ9H0BrgGMDlC/x4kFiWy6uAkAN6MbU0KnOHz8Dsl5cnNz49VXXyU1NZUPP/yQBx98kNtuu42bb76Z++67j3feeYfk5GT+8Y9/4O7u3hGnFEIIIYQQQgghRBc7kpJPfSWYruo3Uy/U25UhQWoj6hOphRSV13Tp+a82cak2mTMR9pkzpy6fotJcCahZM11Z3sfTyZMxgWMASClOIaM0o9FxdsGZlMuNjhFNq6o1k5hdgt7FGpyJ9o/uxhm1z9igsehQ35/1wZneXNqsPjhj1OuIDmuY0dbRFEXRypoFuAYQ4BrQwh49m11w5lLzwRmdTseCEUEAWBSorlXLyS2ODkav776SZgDxedbgTG8MmvY2tu+b+pJmsyNm42xwvOdThxak8/f357bbbuPFF1/kww8/5OOPP+a1117jrrvuIiwsrCNPJYQQQgghhBCih3t9+3mWvLyLLb14IUzYO2iTgTB1YOuCMx8lfMQNX93A+qT1Dp9/Vl1pM4sCey9IabPOYrEonKxrOh7i5UKQl4vd63b9ZrqwpFm91pQ2i/B1JdRbnffRi4XUmC1dMre+IjG7lFqLgqEuOONqdGWA94BunlXbeTt7M8R3CAAG50zQV3Ig6TLFlb0vuFtcWcOF3FIARoR64WIydPo5M8oyKKkuAdSeQ71diHsII/zU7J+E/ASyyrKaHW/bd6bekujQTplba9VYajh9+TQAER4R+Ll07Y0SVyPb4Ey9+f3md8ixpVuQEEIIIYQQQogOl11cyXPfneVMVgn3fXSUOGng3iccTLJmIExqRb+ZspoynjvyHOcLz/O7Pb/jYOZBh84/26a02a5zUtqssyRfLqOkqhaAUREN784/kmUTnAnp+uDMjLAZ2nZTwRmdTqdlz1TUmLVgU1+UWVRB7MWCFhuct8WBpMvoDKXonQoBGOE3AoO+84MBnUErbaZTMLhepNaisPNs77t+xKUWaZmLjfWbyavIIzY7tkPfB7af9fqgRm83L3Ketr0zdWezYyf088XXpnynt6uJKa28MaGzJBYkUmWuAmBUoPSb6QqDfQYT4RGhPXbSOzErfFaHHLvTgzNVVVVs3bqVTz/9lEOHDnX26YQQQgghhBBC9AA7bRbOq2st3PNBLLklVd04I+Go0qpaTmao/WaGBHng79FyOY+DmQeptaiL/GbFzP/t/D/SStJa2KtpUwb442RUlzJ2J+Z16CKksIpPswYyxlwRnKm11HIs5xigljmK8ozq0rmB2pje21md14HMA9p77EpXQ9+ZgrJqrn1lDze+uY/frInDYnH8M7HvQh7PbjyL3iVde643l05qrO/MloTel9F5PLVA274yOFNeU85t397GnRvv5OGdD1NjcTwz6FTeKZ46+JT1nEFjHT5mT2BX2iyt+dJmRoOeecODtMcLRgRjMnRvroNdv5kA6TfTFXQ6nd37ZnrYdNxNHdO6xaF308WLF3nkkUd45JFHKCwsbPD6gQMHGDRoEIsWLeK2225j2rRpTJo0iUuXLjlyWiGEEEIIIYQQPdzOK7IaMosq+dmHsVrNdtH7HEnJx1y38NvaO4f3pu+1e1xYVcgvt/+S8pryds3B1cnA5LqMnfTCCpLyytp1HNG8EzaZbqMjfOxeS7icQHmt+vPr6n4z9Qx6A1NDpwJQUl3CybyTjY6bchUEZzadziK/rBqA/x1J47Ev4x0K0JxML+Lu/8RSbbZgcE3Vnu8rwRlnjxQAtp/J6bZSdydSC7n3v8eIzWvbZ6e+3wzA2Cgfu9f2ZewjsywTgM0XN/Po7kebDFq2RlJREj/b8jPtsz4vch4zw2e2+3g9yXC/4YS4hwBwKPMQZTXN/x5ZNc6aMXHjhPAOm8f5gvM8uP1BPj/3eZv2i8uL07Ylc6brXDfwOq1/1crBKzvsuA4FZ9auXcvzzz/Ptm3b8PHxsXutpKSE66+/nszMTBRF0f7FxsZy7bXXUlvb/guEEEIIIYQQQoiey2xR2JOo9gPxdDESUtev4sjFAh7/+lR3Tk04wLbfzJQB/i2OVxSFvRlqcMaoN9LPqx8A5wrO8Ye9f2h31susIVLarLPF2WTOjAq3z5zp7n4z9aaHTde292fsb3TMoEAP/NydADhkE1zsSzadss8A+fhQKn/8+mS7Pl/JeWXc8d4hSutK2gUFWD9f0f7Rjk20GwW4BtDfqz8AOpdU0FVTXFnL4W4I2JVU1vDT/xxh65lcPkjUs+f85ZZ3Qr2e1gdnvFyMDPC3v2t/26Vtdo+/S/mO3+35HWaLuc1zzCrL4t7N91JQpWbqTAiewLOzn0Wv6xvdMXQ6HXMj5gJq/5YrbyK40swhAfz3J1P46KdTmD4ooNmxrVVtruaX23/J1ktb+dP+P/Fdynet3jc+T82cMeqNfaIPUG8RHRDN+0vf5435b7Cg34IOO65Dn6rNmzej0+m4/vrrG7z29ttvk5OTA8ADDzzAV199xX333QfA6dOnef/99x05tRBCCCGEEEKIHupEWiFFFWpJlVlDAnj7hxO0UlQfH7rEhwcuduf0RDvZ9ptpTebMxeKLpJeqZZEmBE3glXmvaGVANl3cxD/j/9muecwaEqht764LAoqOU2u2cCpDDc5E+bnhWxfcqGcXnOmGfjP1bIMzzfWdmdTfF4CSylrOZpV0ydy6SllVLbvPq58BD2cj+rpEjA8PXOJP35xuU4Amu7iSH7x7kMt1WTgT+/tgdFU/v55OnkR6Rnbs5LvY+ODxACiYtYygrWdyunwer2xNJKeuxKeCjoc+iyOzqKLF/dIKKsgrVX82YyJ90OutWTc1lhp2pqm9U1wMLpj0ao+UDckb+MPeP7QpQFNYWci9m+/VsnCG+w3n1WtexcXo0upj9AZ2fWfSmu87AzBjcECHBWYA3j/1PpdKrJWlHt/3OClFKS3uV1xdTHKRWppvuO9wnA0tlxcVHWdc0DhmRXRMr5l6DgVnkpKSAJgwYUKD1/73v/+h0+m44YYbePnll1m+fDmvvfYaN910E4qi8PnnbUvZEkIIIYQQQgjRO9g2Wp4zNJDRET48s8paeuOJr0/12RJDfVVFtVnLphgY4E6QZ8sLdfVZMwAzwmcw0Gcgz8x6RisL8uqxV9mVtqvNcxkR6klAXb+b/RcuU1Xb9jvD+4ovjqbx0uZzZBdXdtgxE3NKqaxRyz2NuqLfjNli5li22m/Gz8WPgd4DO+y8bRXiHqKdPz4vnuLq4kbHTbbJ8jqc0reuOzvP5VJda8HoGc+wEXv4w/Uh1FeZ+/e+FP76bUKrAjRF5TX88N1DpBWoQYLhIZ48fVM/LleqgZ9o/+huKV/XkWxLmxnr+s50deZdYnYJ/9qbYvdcQXkNv/joWIsl1mxLmo27ot/M0eyj2vt/XuQ8Xpz7Ika9EYBvkr7hif1PYFFaLuFWXlPOz7f9nAtFFwCI9IzkzQVv4unk2eK+vc2EkAm4Gl0BNbjblf3LMkszeTvubbvnymrKeGjnQ1TUNh+osy3hKCXN+gaHgjP1mTHBwcF2zxcXF3P06FEAfvSjH9m9dssttwBw4sQJR04thBBCCCGEEKKHsu03M3uomuWwanwEd80cAECtReFnH8aSXtjy3cKiZzh6qYDaNvab2ZO+R9ueET4DUBsx/3zszwFQUPjNrt+QVJTUprnodDpm15U2q6gxE3uxoIU9+qaT6UU89L8T/H1rItc8v4M3dpx3OFBlsShsOJmlPR5zRXDmXME5SmrU7JMJwRO6fcG+PnvGrJg5lHmo0TF9ue/MplNZ6EyXcQn/iHNVX/J64j0sm3UCnV7NsHh3TzLPbDzT7MJzRbWZH79/mLPZ6s810s+V9380iQPZ1kbpvbnfTD3b4Iy3XxqgBiK76veQoij88atT2nX0jmlR+Dmr27EXC/jbhjPN7t9cvxnbkmbXRF3D3Mi5PD/neYw6NUDz5fkv+fP+PzcboKkx1/DQjoeIy1X7mQS4BvDWwrcIcO24bJGexNngzKSQSQDkVeRxruBcl537uSPPUWlWA+o3DrmRQd6DAEgsSOTJA082u298bry2PSpAgjN9gUPBmZIS9cJtNtv/8t+7dy9msxmDwcDcuXPtXouMVNMg8/P71i9EIYQQQgghhBBQUFatNRMfFuxJqLer9tqjS4czc7C60HO5rJp7PjhCRfXVm/XQm9iVNGtFv5nK2kqOZKnlr4JcgxjiM0R77e7Rd7Ow30IASmtK+eW2XzaZ9dCU+qAf2AcDrya2X3dZtZlnN55l0Uu72Hw6u813gSuKwnenslj2ym5e2ZqoPT86wsdunG1JM9vF7u5iW9rMNhhoa0SoFx7O6iL1weT8Lr1DvjPVmC1sPZOD0T0RnU79mqrMVezK/ZjQ6Fcwep0AFN7amcTzm842+nXXmC3c999YLcDp7+HEr5Yr/GrPXTx7+FltXIx/7w/OhLmHaU3gq41JgNpXp6uyZ9bFZbK/7joa6efK/y0cwp1DzJgMaoDzn3uS2Xgys8n9bYMzY2w+l4qisC1VDc4Y9UZmhs8EYH7UfP42+28YdAYA1iSu4amDTzX6PrAoFh7b+5iW7ehp8uQfC/7R60vZtaQ114+Oti99H5svbgbU7MOHJj7Ei3Nf1LJ4vrrwFWsT1za5f32/GYDRgaM7d7KiSzgUnPH2Vu+gyMjIsHt+x44dAIwZMwZ3d/crdwPAxaVv1SoUQgghhBBCCAG7z+dRv/YzZ1ig3WtGg55Xbx1HlJ8bACfTi/ntF3F9ZrG0Lztgk3HQmsyZo9lHtTuDZ4TPsMuw0Ol0/HXGXxniqwZsUopT+O2u37apL8LMIda7uXeduzr7zhywCZjVf3svXi7np/85wg/fO8T5nJb7qyiKwtaEbK57dQ/3fBDLGZueLLOHBjKxn6/d+PqAG8DE4O7rN1NvQvAEnPRqT5ymShMZ9Dom1vWdySutIjmvrEvn2FkOJuVTUlmLwc2aeVZfMrCkNg/X8I9x7fcWeud0Xt9+gZe3JNrtb7EoPPJ5HNvP5gIKnt7J9I/5F48fetBuAXhc0Dgt86030+l0WkDRrFSjd1HXMm3LcHaW0qpa/vrtae3x49dF42Iy0M8Tfrd0mPb8rz+LI6WR92eN2cLJdGsfKH8Pa5+R0/mnySpTs92mhE7Bw8lDe21R/0U8M+sZ9Dp1+ffTs5/yzKFn7D4niqLwt0N/Y0PyBkDNKHl1/qsM87POq6+qD2SBfRnOzlJtrubpQ09rjx+a8BBeTl4M9BnI49Me155/8uCTnMlvmEmlKIr22fR29ibKM6rT5yw6n0PBmZgYNXK+dq01omc2m7V+M/PmzWuwT3q62kzsylJoQgghhBBCCCF6vyv7zVzJ192Jd344ETcn9W7er45n8M7utpW1El2rssas3bUd5edmlw3VlD0ZDUua2XIzufHKvFfwdlZv+tydvpvXjr/W6jkFeDgzKlzdNyGzuEN7rvQG1bUWjqSo2Q6h3i58e/8sJtuU79qdmMeSl3fz529OU1RR02B/RVHYeS6X69/Yx13vH+FUhjVzaUykD+//eDLv/2gSRoN12ciiWIjNiQXUhcH64Fp3cjO5aQvumWWZWqPsK03q3/dKm206nQUoGNzV66e7yZ0vVnxht+BsdEvBbcBrOId8wSs7jvFqXVaUoij89dsE1h5Lx+CahHu/tyHsLc4VWYMyQ32H8vK8l3l/yft9phn8+KDx2raHz0UA9p7Pa7Hfi6Ne3ZpIdnEVANcMD2LBSOua6PcnR7J8TBgAJVW1/Oy/R6mssQ9Un8ksoapWnePYK/rN2JU0i7ymwbmXDFjCkzOf1AJ3H535iOeOPKcFaN6Ke4uPznwEgEFn4IU5L/SIrLiuEOUZRYRHBADHso9RVtO5gdv/nP4PKcUpAIwNHMvyQcu1164deC03D7sZUDPgHtrxECXV9gH29NJ08ivV61dMQEy3l5UUHcOh4MwNN9yAoih88MEH/OY3v2HdunXcdtttXLyoXuBWr17dYJ8jR9S7LKKiJLonhBBCCCGEEH2JxaJopZZcTQbtbvUrDQvx5MXVY7THz2w4c9WWpuoNjqcWUl23MGjbv6M5e9PVu5D1Oj1TQ6c2OibCM4Ln5zyvld35Z/w/2ZiysdXzsg3+dXVj7+4Wn15IRd0C7tSB/owM8+LTu6fy+m3jCfdRg2e1FoX39iYz7/kdfHTwEmaLgqIo7D2fx/f+sZ873jvECZtSSTHhXrx350S+vG86c4YGNlj4O194nqIq9e798UHjtbvxu5tt8K+p0kR2fWdSen9wRlEUNp3KRu+Ui95YCqg/k8G+g3lzwZu8Pv91+nn1A0CnU3DyPYT7oOd55ch7vLr9DG/suMC/j+7ANeqfuPV/G72bNag1yHsQz895ns+Wf8b8qPl9agHYNtvLz0+9ebykqtauZFhHO59Twrt71O+vk1HP48tH2r2u0+l4etUoBgaqlYcSMot54utTdmOOp1r7ajUXnJkX2fAmeYDrBl7HX2b8RQvQfHD6A16KfYlPz3zK68df18b9ecafmRM5p41fYe+l0+m060etUttk36qOkFWWxdtxbwPq78XHpj7W4Br6yKRHiPaPBiC1JJU/7v2jXZaTXUmzAClp1lc49Jv0nnvuYcSIESiKwvPPP8/KlSv5/PPPAVi+fDkTJzZMcV27di06na5BLxohhBBCCCGEEL1bQlYxeaXq3cHTBvnjbDQ0OXZJTCgPXDMYAIsC9390tNFyLqL7HUyyLWnWcr+ZzNJMkorUu/lHBYzSsmMaMzV0Kg9PfFh7/Me9f2y0nEtjbMvm7Uq8ukqbHbD5mUytKzOn0+m4dnQoWx6aw4MLhuBiUpd88suq+d3aeFa8toeb3z7A9/95UOsxAjA8xJO3fjCBb34xk2uGBze5GN/TSprVa01polER3jgb1e9HX8iciU8vIqu40q6kWX1zc4DZEbNZu2ItD094GHeTuuivM1TiErKONxN/zmunH8G9/5sY3c9r+/T36s/fZv2NNSvWsLj/4h4TfOtIA7wH4Ous3jRQqksE1KBzZ5U2UxSFx78+Ra1FXWC/d/ZA+vk3bP/g4WzkH7dPwNWk/s785HAqn8emaa8fswkejY3y0bYvFV/ifKH6MxwdOJpAt4bZqvVWDl7Jn6b/SXv8r1P/4q8H/6o9/r+J/8eKQSva9gX2AV1V2uy5w89RUVsBwOqhqxnuN7zBGCeDEy/MfQEvJy8AtlzawgenP9Bej8uN07ZHBYzqtLmKruXQldbZ2ZmtW7eyatUqjEYjiqJgMpn4wQ9+wAcffNBg/K5duzh9Wq2xuHDhQkdOLYQQQgghhBCih7Ht/dFYSbMrPbhgKAtGqOVdiitr+el/jlBS2bAEk+heB5OtvU1akzlju8DVml4Vt4+4XVsUrKit4JfbfqmVbmnOuEgfPF3URu+7E3MxW66e3kW2/WamXhEwc3Uy8OCCoWx9eC7XjQ7Vnj+VUWwXmBgS5MHrt41n/QOzWBwd0mKGxJFsa3BmQkjPKXs00Hug1ug9NjuWytqGJe6cjQbG1S1qpxVUkF5Y0ZVT7HCbTmUDNBmcATAZTNwZcyfrbljH9YOv1543OOdg9LD2n4nwiODJmU+yduValg1chkHfdFC9t7PtO1NlKUPvrPZq6azMzfXxWew9r35Ww31c+dncwU2OHRrsyZM3xGiPf/9lPGey1HKD9Zk9JoOOkaFe2pjtqdu17cZKml3phiE38Mdpf2zw/I9jfswd0Xe0uH9fNDlkMka9+ntkT/qeTumBtz9jP5subgLAz8WPX4z7RZNjwz3CeWrmU9rjl2Jf4njOccA+c0aCM32Hw2HwkJAQPv/8c4qLi0lPT6e4uJj3338fT0/PBmMjIyPZvn0727ZtY9KkSY0cTQghhBBCCCFEb7XzXI623ZrgjF6v46WbxzA4SG1gnJhTymvbz7ewl+hK1bUWjl5SsyzCvF2I8G2530x9STOAmWEzmxmp0ul0/HHaH4nxVxcmM8oyeOXoKy3uZzTomTk4AIDC8hri0gpb3KcvuLLfTJSfW6Pjwn1cee228Xx691RG2CzoDgxw5++3jGXjg7O5dnQoen3LZasURSE2W+0342HyYLhvw7u+u4tOp2NGmBoErDJX2QWRbE0eYA1iHbhwudExvUVj/WYauxMfIMA1gL/M+AsfX/sxQU5Dtefd9YH8efqf+fqGr1kxaIW2QN3X2fZTCQ/NANRMpPqsz45SVlXLX789rT3+4/KRuDo1H/haNT6CWyerbSAqayz87MOjpBdWkJSrZpWODPXCxWQ9hl2/maiWgzMANw29icemPGY955BVPDj+wVbt2xe5mdyYEKS+J9JL07lYfLFDj19jruHpQ09rjx8c/2Cz2aQAcyLncFfMXYBabu3hnQ+TU55DwuUEQO2V4+Pi06HzFN2nw3IUnZ2dCQ0NxcnJqckxAwYMYM6cOcyZM6dP1awUQgghhBBCiKtdaVWttmDcz9+N/gENS7c0xtPFxDs/nIhTXePxz4+kaf1NRPeLTy+ksqau38xA/xb/L19jqeFA5gEAfJx9GOk/stnx9ZwNzrw872U8TGqgbn3yekqrS1vczzYIeLX0Lbqy30xLP5MpA/1Zd/9M3vz+eP5x+wQ2/Wo2K8eGY2hFUKZecnGyls00Lmhcj8uusM3Qsg0O2qoP5AF8dyqr0+fUWZLzyjiXXdqg30xLwZWYgBg23/IZv4x5mh8P/QO7bt3IDUNuwKQ3dcW0ewzbHlhOXtbeLrsTO/b68dr282QWqVlcc4cFsmhkcKv2e3z5SKLD1GBqcl4ZP3j3oPaabb+ZyxWXOZZzDFDLtQ3wHtDqud0y/BY+ue4TXrvmNR6f9vhVv0Zrd/3o4NJmHyZ8SHKR2nNodOBoVg5e2ar9fjHuF1r5yJzyHO767i6qLdUAjAqUrJm+pO8VkBRCCCGEEEII0eX2nc/T6uq3JmvG1oAAdxZFqwtXl8uq2Xw6u8PnJ9rHtrdJa0qaxeXGUVqjLhhPC5vWpkX8YPdgrh14LaCWN1ufvL7FfWZfhcGZxvrNtMSg17F0VChLYkIwGtq+FGTXbyak5/SbqTcldAoGnfpe25O+p9ExE/r5EuDhDMCOc7mUVtV22fw60ubTamCpuZJmTdHr9PxkwnX8atpqnIxN31zdlw3yGUR/r/4A5NacQWcoATq278yF3FL+uVv9+TgZ9DyxPLrVARAXk4E3vz9BK9lYnzUD9v1mdqbtREH9nduakmZXivaPZk7knD7ZW6itbIMzTV0/2iOrLIs3T7wJgA4dj015rNXfb6PeyLOzn8XfRc34SylO0V6TkmZ9i8OfwPLycsrLy5t8/dVXX2XWrFmMGDGCZcuWsW7dOkdPKYQQQgghhBCih7FdGG9rcAbglklR2vYnhy91yJyE4w7a9CiZckVvk8bYlTQLb7mk2ZVWDVmlbX+R+EWL48N8XBkarGbbnEgtpKCsus3n7G2a6zfTWWxLhdXfzd2TeDl5MSZwDKAuYqaVpDUYY9DrWBKjBoGray1sP5PTYExv0Jp+M6JpOp2Ohf3UPtgKCu6+aqmoXYl5WDqgb5WiKDzx9SlqzOqx7p49sNWZpPWi/N144aYxDZ4fG+mrbbenpJlo3BCfIQS5BgFqILqxvlXt8cKRF6ioVftbrR62utWZpPUC3QJ5bs5zDQI6owNGd8j8RM/gUHDmm2++wdPTk7CwMEpKShq8/uMf/5gHH3yQffv2cfbsWb777jtWrlzJs88+68hphRBCCCGEEEL0IIqiaMEZJ4O+XQvG0wf5E+mn9jPZcz6P1PymbwIUXaPWbCE2RQ3OBHk609+/8d4mtmzvOp4eNr3N5xzpP5IRfiMAOHX5FGfyz7S4z+whajDQoqjvnb6stf1mOpKiKMRmqf1mXI2ujPAf0ennbA/bu9/3ZexrdMyymFBte8PJzE6fU0fLLaki9lIBoODsqZZKaq7fjGjcov6LtG3vQDU4k19WzcmMIoePvfFkFrsT1etQuI8rP583uH1zjA7h7tkDtcc+bibtGlxeU87+jP0ABLkGERMQ4+Csr246nY7p4ervq0pzJUezjzp8zEOZh9iYshFQS3zeP+7+dh1nUsgku31NehPD/IY5PD/RczgUnPnuu+9QFIXrr78eT09Pu9f27NnDv//9bwDc3NwYN24cLi4uKIrC73//e06dOtXIEYUQQgghhBBC9DZJeWWkFah3h04a4Iu7c9sbS+v1Oi17RlHgf0dSO3SOou1OZhRTVq32NmlNv5m8ijwS8tWFzhF+IwhwDWh2fFO+N/R72vbn5z5vcfycYVdPabO29pvpCKklqeRUqFkm44LG9dgeJa0pTTR5gB9+7mo5r+1ncqmoe393JkVROJlexGvbEln1xl5GPf4dL24+165jbUnIRlFA75SLRa/eJN2afjPC3jDfYUR6RgJQwll0BrUUo6Olzcqra/nLutPa4z9cNwJXJ7XcnqIoJBYk8t7J97hz453M/mw268rXoShNZ+v8evEwrhmuZnTcNjlK+7zvzdir9R+ZFzVPSpN1ALvrR4Zjpc1qLDU8dfAp7fGD4x/E29m73cf7ccyPWTZgGQA3DL4BJ8PVWZKwr3Lo03vgwAF0Oh3z5s1r8Nrbb78NQFhYGAkJCcTGxnLmzBkiIyMxm8289dZbjpxaCCGEEEIIIUQPYbugVZ/F0B7fmxChNSn/7EgatWaLw3MT7XfQpnxWa/rN1N/JDe3Lmqm3dMBSXI1qFtX6pPVaWZimTOrvh4tJXd7YdS632cXO3q49/WYc1dNLmtUb4TcCPxf1e3Iw8yA15poGY4wGvdaYvaLGzM5znVParLSqlo0nM/nN53FMeWor1726h+c3nePopUJKqmp5ZWsi+y9cbvlAV9h0qv39ZoSVTqdjQb8FAChYMHqqAZVdiY4FZ17ffp6MIrUk1qwhAcwa6sXO1J38Zf9fWLxmMau+XsVLsS8Rmx1LaU0pB6oPsD1te5PHMxn0vHvHRA49Np9Hllizo+xKmrWj34xoaFroNC3ItS+98cy71voo4SMuFF0A1P4wNwy5waHj6XV6/jb7b2xfvZ3fT/29Q8cSPY9DwZmcHPWX2JAhQxq8tnHjRnQ6Hffffz8REREAREZGcv/996sp7zt3OnJqIYQQQgghhBA9hF2/mWHtD84Ee7kwb5h6l3BWcWWfz4Lo6Wz7zbQmEGCbrWB7F3JbeTp5sqifWnaopKaEzRc3NzvexWRgWl0pvZySKs5kNSy73ld0S7+ZLJvgTEjPDc7odXotKFheW87x3OONjls6ylrabH18VoecW1EUzueoTeBve+cA4/68iXs/PMqnR1LJKalqdJ/H1sZTVdv6zJ3Sqlr2nld//u7eF7XnJTjTPvXXGABPfzXj7+ilQooqGgb1WiM5r4x3diWjM13GxX8f+tB/MvvT2fxi2y/437n/kVnWeBm9Z488S2l1aZPH1el0BHm6aI9rLDXsTFPXVD1MHvLz7yDezt6MChgFwIWiC2SWtq/sYU55Dm8cfwMAHToem/JYh2U2BbgGdEm2pOhaDr07cnPVP5Q9PDzsnj99+jR5eWp9xRUrVti9NnGi+os8JSXFkVMLIYQQQgghhOgBKmvMHExWFwyDvZwZFuzZwh7Nu3VypLb98SEpbdZdzBaFw3XBGX93JwYFejQ73qJYtMwZd5M7YwPHOnR+29Jma86taXH8nKF9v7RZd/SbAWvmjLPBmRj/nt3bwjYouDd9b6Njpg/yx9tVLc22NSGbyhrHSpsdScnnmhd2suDFnfz12wT2XbisNYMHcDHpuWZ4EH9ZGc2uX89jfJQPoJaDfHPHhVafZ+fZXKrNFkDB6K5mzki/mfaL9o8m1F0N1NU6nQN9OWaLwr529q36w/rvMEa9gMfg5zAFfc3R3ANa6TFQe4VMD5vObyb9hnU3rGNGqPpezanI4bXjr7X6PLHZsZRUqwHoWRGzMBl6ZpnB3sju+pHR+PWjJW8cf4PyWrVn3o1DbyQ6ILpD5ib6LoeCMwaDWjcxPz/f7vndu3cDEBgYyPDh9r8kfH19AaisrHTk1EIIIYQQQggheoBDyflU1qjlx+YMDXT4rs45QwMJ9nIGYPvZHLKL5f+O3SEhs5iSqlpA7dPR0s814XICBVVq4GBKyBSHFwzHBI5hoLfaDPtozlGSipKaHT+nLuMKHO8b0VN1R7+Z9NJ07Y7/MYFjevxC8PSw6eiw9uVojMmgZ2FdabOyarPWvL09FEXh91+eJDmvzO75SD9X7pjWj3/9aBLH/7iI9+6cxA+m9SfK342nVo3CWFe+8Y3tF7iQ23TWhK1Np9UsH71TLlWK2rhe+s20n06nY2G/hQAomLXSZu0J7p5MLyK29H0MLtl2zwe7BXPT0Jt4Zd4r7LllD28tfIvbR95OP69+/HbSbzGhfp4+SviIk3knW3Uuu5JmUVLSrCPNDJupbTcV3G1OanEqX57/ElCzmh4Y90BHTU30YQ4FZ8LDwwE4fvy43fPffvstOp2OWbNmNdinqEj9BRIQ0L7GgEIIIYQQQggheg67kmZDg5oZ2TpGg57VE9XsGbNF4fPYNIePKdrOtqRZa/rNdFRJs3o6nY4bh9yoPf7i3BfNju/v76Zlkhy5mE9pXWCpL7HtUdJV/WZis2O17Z7cb6aen4sfI/xHAHAm/wy55Y0vtC8bFaJtb4hvX/kigH0XLmtl9CL9XPn9tSPY8tAcdv16Hn9aGcO8YUG4mAx2+wwP8eIns9TAY7XZwmNr41vsk1Rda2HbGbW1gLt3iva8lLRyTH1wBsDZWw2O7GxH36ont2y2ZjMZ/Hhw/IOsWbGGzd/bzB+n/ZF5UfNwM9lnuoV7hHONixpcUVD48/4/U2tp/rqlKIoWnDHpTXbBBOG4kf4j8XH2AeBA5gFqLG0rcfePuH9gVtQA+g9H/hBfF9+OnqLogxwKzsyaNQtFUXjttde0MmaHDx9m48aNACxevLjBPgkJah3HkJCQBq8JIYQQQgghhOhd6oMzeh3MHNwxN+HVB2cAPjl8CYul7zZ476kO2vQ2mdKK3ia2WQodEZwBWD5oOSa9emf51xe+ptpc3eRYnU6nlTarMSvtarbe0x1Isu0BJP1mmjIjzPr+25fReGPvGYMD8HRWM042J2S3qfeLrX/utmZ0/WbJcH4yayCDgzxazGr65fwhRPi6AurPdc3R9GbHH0y+TEmlunAfFGQNWEtwxjGjA0cT5KbeVGBwSwR9JZlFlSTmtC6bCeB4aiHHir7WHj8w/j7uGnUXQ32Htvg+mO48ncE+gwFIyE/go4SPmh1/Ov802eVqds6U0Cl4ODVfblK0jUFvYFrYNABKa0qJy41r9b7JRcmsS1oHgJeTF7ePvL1T5ij6HoeCM/fddx96vZ7k5GQGDhzIxIkTmTNnDrW1tfj6+nLzzTc32Gfbtm3odDrGjh3ryKmFEEIIIYQQQnSztIJyztctYo2L8sXbrWNKHkX6uTFriBroSc2vYH9S31to78ksFoVDKWogwMfN1GIfoaKqIk7kngBggPcAwj3CO2Qevi6+zI+aD0BBVQHbU7c3O96+70xOh8yhp6iutXDkovoz6Y5+Mya9SWuW3dPNDG+5NJGz0cCCutJmJZW17Dvf9mvM+ZxStteV0Av3cWVJdOtvQnZ1MvDX6639e5789jT5ZU0HHzedqi+XpVCuPwdIv5mOoNfpraXNdGaMHuoN5W0pjfi3zQcxeqnXPxe9JzcMXdHCHlYGnYHfT/69VorvteOvNduIXkqadb7WXD8a848T/8CiqCVe74y+E08nx/rviauHQ8GZ8ePH89xzz6HT6SgtLeXo0aNUVlZiMpl455138PS0fyMWFRXx7bffArBw4cLGDimEEEIIIYQQopfYdc7aq8F2Ybwj3DIpStv++NClDj22aN65nBIKy9VyLpP6+6HXN3/398HMg9qilG3WQke4cai1tNmac2uaHTttkD8mgzrX9pQm6sni0gq13k5d1W8muyyb1JJUAEYFjMLF6NLp5+wIowNH42lS16P2Ze7DbGk8K2ZpjDWYsr4dpc3e25usbf9oRn+MhrYtsc0dFsTyMWEAFJTX8NT6hEbHWSwKm0+rwRln1zxKawsB6TfTUWxLmxm94oHW952JvZhPbME6dDr1s/n9EbfganRt0/lHB4xm9bDVAFTUVvDUwaeavHbVB2d06JgXOa9N5xGtMz1surZtW66zOecLzrMheQMAPs4+3Dbitk6Zm+ibHArOAPzqV7/i2LFj/OEPf+CnP/0pf/zjH4mLi+OGG25oMHbHjh1MmjSJ2bNns2DBAkdPLYQQQgghhBCiG9lmJ3R0cGbByCD83J0A9a7x5u4qFx3rYFLb+s3YljSzveu4I0wOmaxl4uzP3E9aSdM9iNydjUzqr843Nb+ClMvlHTqX7nTAJntsWleVNMvufSXNAIx6I1PDpgJqVtepy6caHTd7aCDuTmo/mM0J2dSYLa0+R35ZNV8cVd+L7k4GVk+KbGGPxv3huhF4uqgBls9j09h3Ia/BmPj0IrKKKwEY0s96zZ0cMrld5xT2xgaOJcBVzdQ0eZwDfRWHkvMpr265b9Xzm+Nx8jkEgB4j3x95a7vm8Mvxv9TmsCNtB1svbW0w5lLxJc4XngfUAGT9eNGxAlwDtIy0hPwE8ioafiav9OaJN1FQA2o/jvkx7ib3Tp2j6FscDs4AjBo1ij/96U+89dZbPPHEEwwbNqzRcStXrmT79u1s376dgAC5iAghhBBCCCFEb1VjtrC3rhSQr5uJmHDvDj2+s9HAjePVRflqs0VbCBWd72CybeP55gMBiqJodxc7G5yZEDyhQ+ei1+m5cYg1e2bt+bXNjp9tW9rsbN8pbdYt/WZsgzPBvSc4A/YZXE2VJnIxGZg3XO03UlheYxcAa8lHBy9qmUw3T4rCy6V9JR2DPF347VJrabLfrz1JZY19ps+m01natptXirYt/WY6hkFv0MonoqvF6H6GarOlxffDgaTLHMnbgs6oBoGXDVxKoFv7blLwdPLkt5N/qz1++uDTlFbb972xLesoJc06l+31Y3/G/mbHns0/y6aLmwDwc/Hj5mENW3wI0ZwOCc4IIYQQQgghhLi6HL1YQGmVemfxrCGBGFoofdUeN9uUNvvkcGqfKlPVUymKwqFkNRDg6WJkRKhXs+PPF54np1wNgkwMntgppa9WDl6JQadmOHyZ+CW1lqbvaLfvO9P6vhE9mW2/mTBvFyL92lY2qb2OZKnBGaPOyJjAMV1yzo4yI9y6uLono+nSRMtGhWrb6+Ozmhxnq6rWzPv7LwKg16klzRxx66Qoxkf5AJCUV8abOy7YvV7fb0anU8iqVrOAPEweDPNr/MZo0Xb2pc1OAvZlO6+kKAovbD6Dk5/1vXVH9A8dmsOifouYFT4LgJyKHF499qrd67bZNNdESnCmM9leP2wzQxvzxvE3tO2fjPoJbqau6Qcm+g4JzgghhBBCCCGEaDPbhe+OLmlWb3CQB5P6+wJq8+2jlwo65TzC6kJuKXmlagm5Sf39Wgy62WYl2C5odaQgtyBmRVgXLZtr0jw8xJMgT2cA9iddbpCF0Bt1R7+ZvIo8UopTAIgOiO51C44h7iEM9hkMwMm8kxRVFTU6bu6wQFxM6tLYplNZ1LaitNm6E5nkllQBsDg6hEg/x743er2Op1aNwlj3WXtzxwXO56hZE0m5pSTWbcf0q6KgSg3SjQ+WfjMdaULwBHyd1d81Ro8zoKtuNri778JljubuR++sBnAmBU/SSmG1l06n47Gpj+FiUAPcH5/5mJN5aqAoryKP4znHARjoPZD+3v0dOpdo3tjAsVppsn3p+7Sealc6dfkU21LVPkBBrkHcNPSmLpuj6Ds6PDiTkpLCkSNH2L17N7t27Wr2nxBCCCGEEEKI3mlXonXhatbQzitbfYtN9szHh1I77TxCdcCBfjOdFZwB+N6Q72nbnyd+3uQ4nU6nBQsraywcTslvcmxvYVteSUqatV59aSKLYmF/ZuOlidycjMwbppY2u1xWzaEW3i+KovDPPcna45/MGtAhcx0e4sVPZw8E1DKOj62NR1EUNp3O1sb0j8jUticFS0mzjmTUG7VSYTp9DUaPsyTnlXHxclmDsYqi8MKmszj57daeuyP6jg6ZR7hHOPeNvU89Dwp/2v8nai217EzdqfU0kZJmnc9kMDElZAoABVUFJFxOaHScXdbM6J90Suao6Ps6JDhz9uxZ7rjjDnx9fRk0aBBTpkxh7ty5zJs3r8l/11wjFxMhhBBCCCGE6I1yS6o4mV4MQHSYF0GenbcgsWxUqNYwe11cBsWVNZ12LoFW0gxgSguBgPKacmKzYwEIcw9jgFfHLFQ3Zkb4DILc1EX03Wm7tVJqjZkzzLbvTO8vbdYd/WZis2K17YkhvTM4Mz18urbdXLbVUpvSZhtaKG22P+kyCZnqtW9MpA/jo3wdnKXVA9cM0UrWHUzO5/PYNDadss6n1um8ti39Zjreon6LtG2jZ31ps4bXj53ncjmefRqjexIA/bz6aZl9HeH2kbcz1HcoAGfyz/DfhP9q2RkgJc26il1pxPSGpRHjcuPYlaYmHoS4h9j1RhOiLRwOznz55ZeMHz+eDz/8kKKiIhRFafU/IYQQQgghhBC9z+7Ezi9pVs/VycD1Y8MBNRPi6+MZnXq+q5miKBxMVrM03JwMRIc132/mSPYRaixqsGxG+IxOLbdl1Bu5fvD1AJgVM1+d/6rJsTMHB1Bfjc02w6s36rZ+M3WZM3qdnrGBY7vknB1tQvAErUTU3vS9Ta5DXTM8CCejujy28VQWFkvT61Xv7rbJmpk5oEPf865OBv56/Sjt8V+/TeBYaiEAQ4M9OF1wDJB+M51lUugkvJzUa57RIwF0NQ1KmymKwoubz9n1mvnBiB+g13VcYSKT3sTj0x5Hh/reev346xzIOACoJR6jA6I77FyiaS31nXn9+Ova9t2j78bJ4NQl8xJ9j0NXj9TUVG6//XYqKioICwvj5Zdf5u233wbUVOKtW7fy+eef89vf/pawsDAAZs6cyZYtW9i2bVtzhxZCCCGEEEII0UN1Rb8ZW7dMjtS2Pzl8qdPPd7W6eLmc7GK1l8aEfr6YDM0vGdjeTdyZJc3qrRqySluwXJO4psk+AD5uToyN9AHgXHYpGYUVnT63ztId/WYKKgs4X6hmaYzwG4GHk0enn7MzOBuctQyT3IpczhWca3Sch7NRu47lllQR20Rvq6TcUraeUTO2wrxdWBoT0uFznjM0kBVj1PWzoooa6uNJU4bWkl8p/WY6k0lvspY2M1RjcE9k34XLVNVa+1ZtTcghPisVo/cJALydvFk+aHmHz2V04GhuHnYzABW1FVRb1D5g8yLndWggSDQt3COcAd5qNuiJ3BN2fauOZh9lX8Y+bdz1g67vjimKPsKhT/Qrr7xCeXk5np6eHDx4kAceeIBp06Zpr8+bN49Vq1bx1FNPkZiYyC233MLevXt59913mTNnjsOTF0IIIYQQQgjRtcwWRSv14uFsZHy/jivr05ToMG9GR3gDcDK9mJPpjTf3Fu1XVWtmzdE07XFrymfVl4oy6oxaff7OFO4RzrQwdc0hvTSdg5kHmxw7Z2iQtt1YaaLeoqv7zdRYavj6wtfa497ab6ZeS3e/11s2yhpoWR+f2eiY9/Zas2bunNEfYwvBy/b6/XUj8HKxD774+lv7bUm/mc6zsN9CbdvkGU95tZnYFDVYZ7GoWTMm3/3odGrA5qZhN+FmcuuUuTww/gECXe1vfpB+M13Ltm+V7e8b26yZe0bfg8lg6vK5ib7Dod8kW7ZsQafTcd9992mZMU1xdXXlww8/ZNy4cXzyySesWbPGkVMLIYQQQgghhOgGJ9OLKCivK2U12L/F7IqOcvMkyZ6pV1Vr5oGPj7HytT08vT6B/RcuU2NuPIukOYqicCQln9+tjWfyk1t5dZu1p8WUAX7N7nup+BKXStSfw9igsV2WXbFqyCpt+4vEL5ocZ9d3plcHZ5ruN2O2mHl83+PcvO5m/nbob+zL2Ee1ubrN51AUhVN5p3jm0DMs+GwBzx95Xnutt/abqTczfKa23VzfmfkjgjEZ1KykjScbljYrKKvm81g1eOnuZODmSVGdMFtVkKcLv106Qnsc6u1CRuVJ7bH0m+k8U0On4mnyBMDomQC6Wu36sel0Fqez8nDyURfpjXojtw6/tdPm4unkyW8n/9b62OQpgbku1lhw91DmIQ5lHQIgyjOqUzKnxNXFob+iU1JSAJg+3dpkzTbFtra21v5kej0PPPAAiqLw3nvvOXJqIYQQQgghhBDdwHahe3YXlDSrt2JMGK4mAwBfHcugvLq2hT36rs9j0/j6RAYn0op4a1cSt75zgPF/3sx9/43lsyOp5JRUNrt/Sl4ZL24+x5zndvC9f+zno4OXKKqo0V4fF+WjlQVrim0WQleUNKt3TeQ1+Dqr2VpbL22loLLxElSjwr3xdVPvZt6TmNeu4FVHKquqJakYatswj5b6zWy5tIUvEr/g9OXTfJjwIfdsvoeZn8zkgW0P8Pm5z8kqa765fUZpBu/EvcPKr1Zyy7e38N+E/2qlswAG+wxmaujUNnyVPU+UZxQRHhEAHM05SllNWaPjvFxMzBwcAEBmUSXH0wrtXv/o0CWtvNxNEyPxdm3fnfKVtZUcyznWYhDtlkmRXDc6FKNexy/nD+Zw9mFA+s10NieDE3Mi1Uo/OkMlBrfz7DyXi8Wi8NLmREzex9AZywFY2n8pQW5BzR3OYQv7LWT10NUYdAbuGSMZGl1tYvBEnA3OgFrGU1EUu6yZe8fcKyUGhcMcegeVlam/1CIjrXcwublZ0/mKiorw97e/syM6Wm1cdeLECUdOLYQQQgghhBCiG9gFZ4Z0XXDG08XEdaND+Sw2jZKqWr6Ny+SmiZEt79gHfXMio8FzJVW1rI/PYn28uiA/OsKbucOCuGZ4EKPDvSmqqGFdfCZrj6Zx9FJhg/1dTQaWxIRww7hwZgwOwKBvvreJbRZCfemXrmAymFgxaAXvn35fK8F1R/QdDcYZ9DpmDQnk6xMZlFTVcjy1kEn9m88G6iybTmXx+y9PklNiZHvRIT64a2qrFvdb6jezMXljg30qaivYnrqd7anbARjmO4xZEbOYHTGbUQGjqKitYPPFzXxz4RuOZB9psL+T3om5kXNZPmg5M8JnYNL37sVgnU7HjPAZfHr2U2ottRzKPMS8qHmNjl06KpTtZ9Xr24b4TMZHqUHA6loL7+9LqTse/HjGgHbNZX/Gfv60/0+kl6Yz2Gcw/1z0T/xdGy9Vp9frePXWcZgtCpdKUngyQfrNdJWF/RayLmkdACaveM5kDue9vcmczS7CbaC1z9YPRv6g0+ei0+n4w7Q/8OiUR+Xn3g1cjC5MDJ7I3oy95JTn8GHChxzNOQrAAO8BLBuwrJtnKPoChz7Z3t7e5OfnU1lpvSvHNhhz4cKFBsGZ4uJiAPLy8hw5tRBCCCGEEEKILna5tIqjdc2yBwW6E+nXObX2m3LL5Cg+qyst9Onh1KsyOJNdXMnBZHWhtr+/Gw8uGMq2MznsPJdrl/0Sl1ZEXFoRr2xNxM/diZLKGmrM9qWadDqYMSiAVePDWRwdgrtz65YIKmorOJB5AIAA14Auv5N/1dBVvH/6fUAtbfbDkT9sELgAtbn613WBrJ1nc7s8OHO5tIonvjltF0yLSyvmjvcO8cFdk/F0aT7w0Vy/mdLqUnal7QLAz8WPRyc/yu703exJ32OX/XK24CxnC87yz/h/4uXkRZW5iipzVYNzTQiewPKBy1nYfyFeTl7t+np7qpnhM/n07KeAmvHVVHBm0chgfqfXUWtRWB+fxe+WjUCn0/FtfAY5JVXamCj/tl33SqpLeOHIC6xJtJb3P194np9s+gnvLX4PX5fG+3bpdDqMBh2Hsw5rz0lZq843PWw6bkY3ymvLMXqehkwzT284g8H9HAZnNXg3KWQSI/xHtHCkjiOBme4zI3yGlin6wpEXtOfvG3MfBr2hu6Yl+hCHypoNG6b+AZaUlKQ95+npSb9+/QDYtGlTg322bNkCgI+PjyOnFkIIIYQQQgjRxbadyUGpW99fODKk+cGdYHyUD0OD1d4mRy4WkJhd0uVz6G7fxmVqP4MVY8O5flw4r9w6jtjfL+Dze6fx83mDGBlqv7ieX1ZtF5gZHuLJo0uHs/+38/nwJ1NYNT6i1YEZgAMZB7QF/jkRc9DruqbvUL2B3gMZHzQegKSiJI7nHm903KyhAdp2V/adURSFr09ksPClXXaBGYNO/RkcTy3kzn8dprSq+dJ8zfWb2Z66nWqLWhprcf/FLBmwhCdnPsn21dv5+NqP+dmYnxHjH2O3T3F1sV1gpr9Xf+4fdz8bb9zIv5f8mxuH3tjnAjMAk0Mma4vb9aWJGuPj5sS0Qer3Ob2wgvj0IhRF4Z+7k7UxP5k1sE3n3pG6g+u/vN4uMFM/l/OF5/nppp9SWFnY7DHqS5qB9JvpCi5GF+ZE1Jc2q8DgfgGzRcHJ35o188ORP+yu6YkuZlu206yYAbXk46L+i7prSqKPcegvqGnTpgFw4MABu+evu+46FEXhueeeY9u2bdrzn3/+OS+//LKaVjqj69KehRBCCCGEEEI4bktCtra9cGTn1tpvjE6ns2vE/fnRtC6fQ3f7Js662L98dKi2bTTomdjfj18vHs76X87iwKPzeWbVKBaNDMbTxUiotws/nTWA9Q/MYuODs7lnziBCvF3aNYcdaTu07XmRjWchdLYbh96obX91/qtGxwR5umiBqvj0IvJKG2aMdLTs4kp++p9YHvj4GPllavDEx83E898bxcOjzPjUlTOLvVjAj/91uMneSS31m9mQvEHbXjpgqbat1+mJCYjhvrH38fF1H7N99Xb+OuOvLO6/GG9nbwJcA7h1+K18fO3HfH3919w9+m7CPcI79HvQ07iZ3LRgXnppOpdKLjU5dtko62dqfXwWB5LyOZWhVoAZHeHNxH6NZ7lcqaCygN/s+g33b7ufnIocANxN7vxh6h/4cuWXBLmq18+zBWe5e/PdFFUVNXocRVG0zBnpN9N1FvZfqG0bPU+id87A6H4egH5e/ZgdMbu7pia62ACvAYS5h9k99/OxP+/ymxJE3+XQO2nZsmUoisIXX3yB2WzWnv/1r3+Nm5sbpaWlLFy4kMDAQLy8vLj55pupqKhAr9fz61//2uHJCyGEEEIIIYToGpU1ZnadU8tT+7s7MTaydYuUHe36sWEY6/qhfHUsA7Ol8bvg+6LU/HKO1fWLGR7iyZBgzybHhni7cMvkKN7+4UTin1jM/kfn89i1IxkZ5lhmhNliZkfqDgBcja5MCZ3i0PHaa0HUAlyNasBiU8qmRkt1AcwZZu2LtCex88qrK4rC/w6nsuDFnXZBzGWjQtj8qzmsHBNKuDv8+84JWr+ZQyn53PXvI1RUmxscr7l+M4WVhezP2A9AiHsIYwLHNDmvANcAVg5eyfNznmfPLXvYvno7v5vyO2ICYhotBddX2d79vid9T5PjFo0Mpr7d0saTmby7x1op5q6ZA1r8nimKwsaUjVz/1fWsT16vPT8zfCZfrvyS1cNW08+rH+8ufpcAVzWzKyE/gXs230NxdXGD4yUXJWtl6qTfTNeZGT5Tu74YPU/h5L9be+32EbfLwvxVpL5vVb3hfsO5JuqabpyR6GscuprMnTuXxx9/nB/96Eekp6drz0dFRfHZZ5/h7e2NoihcvnyZ0tJSFEXB2dmZd955h6lTpzo8eSGEEEIIIYQQXWPfhTwqatRF5PkjglpsGN9Z/D2cmTNUXXDPKq6068vR162Ly9S2l48Ja2Zk54nPi9cWi6eFTsPF2L7sG0e5mdxY2E+9u72kpkQLGF2p/r0CnVfaLDW/nB++d4hH1sRRUqlmwgR4OPOP28fzxvcnEOjprI2NDvPiw7um4OmiLrLvT7rMT/9zhMoa+wBNc/1mtlzaQq2inmdJ/yWyUNwKM8Ksi6t70/c2Oc7fw1n7fqdcLmdLgpr1EurtYpdV05jc8lwe3P4gv975a+0z4uXkxZMzn+SN+W8Q4m4tBdnfuz/vLn4XPxe1D9Kpy6f42eafUVpdandM6TfTPVyNrswMnwmA3liGyfsYoP48Vwxa0Z1TE91g+aDl6HV6DDoDD45/UK65okM59G7S6XQ8/vjj/OUvfyEqKsrutaVLl3L+/HnefPNNfvGLX3DvvffywgsvcP78ee68805HTiuEEEIIIYQQoottPp2jbS8YEdyNM4EbxlvLMK09lt7MyL7Ftn/J8tHdE5yxDYLMjZzbLXOod93A67TtdUnrGh0zPsoXj7p+OrvO5WLpwEwri0XhP/tTWPzyLnbbZOWsGh/OlodmsySm8cX8URHefHDXFDzr5rXnfB73fBBrF6Bprt/MxuSN2vaSAUs65Gvp64b6DiXQVQ3UHc463GSmFcDSRoIwd0zvj8nQ+BKaoih8ef5LVn61km2p1tL+C6IW8NX1X7Fi0IpGM24Geg/k3UXv4uusZiHG5cXxsy0/o6ymTBsj/Wa6z6J+DXuK3DT0JtxMbt0wG9GdxgWN46NrP+Kjaz+yy6IRoiN0aqjPz8+Pe+65h1deeYU33niDX/3qV4SH9+1apkIIIYQQQgjR11gsClvrSjU5G/XMHBLQwh6da8GIYG1he0N8ZqNlofqa8zmlnM5Uyx6NifQhyr97FgjrgzM6dN3ed2FyyGStd8eetD0UVBY0GONk1DO9rsn75bJq4tMb7+3RHi9sPssfvzpFed37L9TbhX/9aBIvrh6Lj5tTs/uOjfTh3z+ejLuTAVCzeu7771Gqas3N9pvJLc/lUNYhAKI8oxjpN7LDvp6+zLY0UaW5kiNZR5ocuzg6GNtYipuTgVsnRTU5/j+n/8Mf9v6BkuoSAPxc/Hhhzgu8NO8lrXRZUwb7DuadRe/g7ewNwPHc49y35T7Ka8ql30w3mxUxC2eDNevNqDNy6/Bbu3FGojtF+0cz0l+ut6LjtTk4k52dzSOPPMKoUaPw8vLC3d2dIUOGcPfdd5OQkNAZcxRCCCGEEEII0Y3i0ovIKVHvNJ85OAA3p+7te+BiMrB0lFoiqKzazKbTWd06n66wLs42a6b58kqd5VLxJS4UXQBgTOAY/F39W9ijcxn0BpYNXAZArVLLxpSNjY6bNzxI2+6oTKvSqlr+tTdFe3zblCg2/Wo284YFNb3TFSb08+XfP56MW12AZtuZHH7+32McuZjfZL+ZTRc3oaBm/ywZsOSq6hvjqFnhs7TtpjKtAII8XZjUz097fNOECLzdTI2OrTHX8N7J97TH1w28jq9WfsWi/g2zLpoyB587wAABAABJREFUzG8Y7yx8By8ntR/U0Zyj/GLbLzidf1r6zXQjd5M708Oma48XD1hMsHv3Zo0KIfqeNgVnDhw4QHR0NC+88AKnT5+mtLSUiooKkpKSePfddxk7diwfffRRZ81V8+abbzJ69Gi8vLzw8vJi2rRpbNiwQXtdURSeeOIJwsLCcHV1Ze7cuZw6dcruGFVVVdx///0EBATg7u7OihUrSEtL6/S5CyGEEEIIcbWorrV09xREB9ly2trgfOHIti1O1ZhrOno6ANwwLkLb/rKPlzZTFEUraabTwXXdVNJse+p2bXte1LxumcOVWlPabNmoUFxM6vLH2mPpDfq7tMfXxzO0jJnbpkTx1A2j8HRpfAG/OZP6+/HenZO0+W1JyObn/z2qvd5cSbNlA5a1Z+pXrTmRc7QMlc0XN1NU1XQW1Z0z+gPg42birpkDmxy3PXW7FkBZ1G8RT896Gh8XnzbPbYT/CN5e9DaeJk9ALb127+Z7tdel30z3uHX4rejQ4Wp05ccxP+7u6Qgh+qBWB2eKi4v53ve+R35+PoqioCgK/v7+BAerf5grikJNTQ133XVXp2fQRERE8Mwzz3DkyBGOHDnCNddcw8qVK7UAzLPPPsuLL77Ia6+9xuHDhwkJCWHhwoWUlJRox3jwwQdZu3Ytn3zyCXv27KG0tJTrrrsOs7nvp8MLIYQQQgjRmcwWhTveO0TME9/x529OU15d291TEg7akmANzlwzonWZAYqi8MjOR5j80WSe2PeEVvKno0wZ4EeYt9qMfldiHrklTfeQ6O1OZxZzIVftQzG5vx8hdV93V+tJ/WbqDfMbxlDfoQDE5cZxsfhigzHeriatmXtRRQ0bTzqeafXp4Uva9m2Tmy551RpTB/rz3h2TcDaqSzQF5TV2r9XLKM3geO5xAIb4DmGQzyCHznu1cTY4s3zgcgCqzFXNZs8sGxXK+gdm8e0Ds5otIfhF4hfa9veGfs+h+UX7R/PWwrfwMHkAUFhVqL0m/Wa6x7SwaXyx4gvWrFijXWeEEKIjtTo4895775GRkYFOp+P666/n/Pnz5ObmkpmZSWZmJvfffz8A1dXVvPDCC502YYDly5ezbNkyhg4dytChQ3nyySfx8PDgwIEDKIrCyy+/zGOPPcaqVauIiYnh/fffp7y8XMvqKSoq4t133+WFF15gwYIFjBs3jg8//JD4+Hi2bNnSqXMXQgghhBCirzuQdJmd53KprrXw3t5klry8m30X8lreUfRIqfnlnMlSAytjI30I8mxdYOBM/hk2pGyg1lLLmsQ1XP/V9exM3dlh89Lrdawcp/Y0NVusmSV90TcnMrXt5WO6J2umsLKQYznHAOjn1Y8BXgO6ZR6NaU32zC02PUM+sQmstMfpjGJOpKlZF9FhXsSEezt0PIDpgwP45x0TcTJal2mu7DdjW7Ztaf+lDp/zanTjkBu17TWJa1AUpcmxI8O8CPdxbfL19NJ09mXsAyDcI5wpoVMcnt+owFG8ueBN3IzWgJD0m+leg30HE+kZ2d3TEEL0Ua0uWLl+/XoApk6dypo1a+zqmgYFBfH3v/+d0tJS/vWvf2lju4LZbOazzz6jrKyMadOmkZycTFZWFosWWet7Ojs7M2fOHPbt28c999xDbGwsNTU1dmPCwsKIiYlh3759LF68uNFzVVVVUVVlvRuruFhtxlhTU0NNTeek6gsheof6a4BcC4QQcj0QAj6PTbV7fCm/nNveOcgtkyJ4ZNFQPF2ujrr5feV6sPGkNehxzbCAVn89XyV+Zfc4pzyHX2z7BUv7L+X/xv8fvi6+Ds9t+ahg3tyh9kD54mgaP5gS0cIevY9a0kwt22bQ61gwvPU/g460/dJ2zIpaaWJ22Gxqa3tORtyiyEW8FPsSCgrrLqzjpyN/2qAXy9hwDwYGuJGUV86BpHwSswrp7+/ervN9dDBF275pQnirfh6tuR5M7e/DG7eO4WcfHafGrLBgRJDd93lDkrWc+/yI+b3+2tId+nn0Y3TAaOLy4kgsSOR41nFiAmLadaw1Z9do/X9WDlyJudaMGcersUT7RvPq3Ff5xY5fUFFbwezw2ShmpdNKRIru0Vf+RhBCNK61n+1W/6/o5MmT6HQ6fv7znzfZcO6Xv/wl//rXv8jOzuby5cv4+3dec8D4+HimTZtGZWUlHh4erF27lpEjR7Jvn3rXQn25tXrBwcFcvKimN2dlZeHk5ISvr2+DMVlZTac3P/300/zpT39q8Pz27dtxc2s6zVUIcfXYvHlzd09BCNFDyPVAXK2qzbD+hAHQ4WJQCHeDCyXq/x8+OZzGxhOprB5oIdq36buV+5refj343yk99UUXnHLPsH79mRb3MStmvipWgzMGDPQ39udCrRpE2ZCygV0Xd7HcdTnRpmiHG5pHuBtIK9NxMqOY9z5fT0gf+69ZSgmkF6r/dR/iaebgzu6p9vBp2afatnOaM+uzuu6mzNYYaBzIhdoLpJWm8dY3bxFlbFhqbJS7jqQ8AwDP/G83K/q1vS9WtRnWxKrXOJNewSUrnvXr41u9f2uuBw/HQEqJjhhLEuvXJwGQa87lTIn62Qs3hBO/O554Wn9eYTWoahBxxAHw9x1/5wa3G9p8DIti4dNi9TOhR49HigfrL3XsZ+Je13tJqk0iJj+mS2+CFl2rt/+NIIRoXHl5eavGtTo4k5+vNjgbPnx4k2NGjBihbRcUFHRqcGbYsGEcP36cwsJC1qxZwx133MHOndYU+Sv/wFcUpcU/+lsa8+ijj/LQQw9pj4uLi4mMjGTevHmd+rUKIXq+mpoaNm/ezMKFCzGZ2t4IVAjRd8j1QFztvonLpOqQumC4fGwEf10xko8Op/LcpkTKq80UVut4+4yB68eE8tiy4fi49d3PSV+4HhRV1PDQwR2AQqSvKz++cWargin7MvZRuqMUgNkRs3l+1vOsS17H87HPU1JTQplSxiflnzAvYh6PTnqUANeAds8x2+ciT204C0Ch9xB+vHBIu4/VE/11/RlALcN15zWjWDY+vMvnUG2u5qk1TwHg4+zD3dfdjVHfszLgzElmHj/wOAD5IfncO/neBmOmlFWz/rmd1JgVjhe58Ori2ZgMra72DsCXxzOoOHQSgOvGhHPjitZlXTh6PXg7/m3qYzE3j76ZZSOWtfkYQjWvdh6bvthEWW0Zpy2neXnhy7ib2pZFtTt9N8U71Woqs8JnccucWzpjqqIP6wt/IwghmlZfcaslrf5rqrq6Gp1Oh4tL0/WFbS8m1dXVrT10uzg5OTF48GAAJk6cyOHDh/n73//Ob37zG0DNjgkNDdXG5+TkaNk0ISEhVFdXU1BQYJc9k5OTw/Tp05s8p7OzM87Ozg2eN5lMciEVQgByPRBCWMn1QFytvo6zZqLfOCESZ2cnfjRzEAtGhvK7tfHsTlR7z3x5IpM9F/L5y8polo4KbepwfUJvvh7sPZWD2aJmOS0cGYKTk1Or9ttwyVp+aeXglTg5ObFq2CpmRc7irwf+yrbUbQBsT9tObE4sj0x6hBWDVrQri+b68RE8s/EsFkV9//16yQj0eseycXoKs0Vhw8lsAJwMepaODu+W99LBnIOU16p3gM6OmI2rc9N9OLrL4oGLefrw01SaK9l0aROPTn0UJ4P9+zXEx8SikSF8G5/J5bJqdp3PZ0lM264/n8Vay/x9f0q/Nv882nM9UBSF7y59pz1eOmhpr72m9AQmk4llA5fx2bnPqKitYGvaVm4cemPLO9r4MulLbft7w74nPw/Rbr35bwQhRNNa+7lu2y0iPZiiKFRVVTFgwABCQkLs0gKrq6vZuXOnFniZMGECJpPJbkxmZiYnT55sNjgjhBBCCCGEaFpuSZUWfAn3cWVyfz/ttUg/N/7z48k8+73RWs+ZvNIqfvbfo/zsw1hyS6oaPaboXptPZ2vbC0cGNzPSqrymnG2X1OCLl5MXsyJmaa8FugXy8ryXeW7Oc/i5qO+P4upifr/39/xs68/ILM1s9JjNCfJ0YdaQQADSCys4nJLf5mP0VIeS88mp+2zMGRaIt2v3LODtSN2hbc+LnNctc2iJu8mda6KuAdT31O703Y2Ou3mStbH3x4dSGx3TlAu5pRyqe38NDvJgQj/H+ya1xrmCcyQXJQMwPmg8Ie4hXXLevuzGIdZgzJrENW3aN7c8l11puwAIcg1iZvjMDp2bEEKIq0evDM787ne/Y/fu3aSkpBAfH89jjz3Gjh07+P73v49Op+PBBx/kqaeeYu3atZw8eZI777wTNzc3brvtNgC8vb256667ePjhh9m6dSvHjh3j9ttvZ9SoUSxYsKCbvzohhBBCCCF6p29OZGhZFivHhjXIXtDpdKyeGMmWh+awYIR1oX/DySwWvbST8zklXTpf0bzqWgs7z+YC4O1qYmL/1i1Eb720lYraCgAW91/cIHtBp9OxpP8Svlz5JcsGWEsz7U3fyw1f38DJvJNtnusqm1Jfa4+lt3n/nuqbOGuWxvIxYd0yB0VR2J66HQCT3sT0sJ57Q+PyQcu17XUX1jU6ZubgACJ81cyfXYm5pBdWtPr4nx62BnNumRTpcL+k1tqYslHbXjpgaZecs68b6T+S4X5q2f74vHjO5p9t9b5fXfgKs2IG4Poh1/e4En9CCCF6jzb/Bvn973+Pj4+Pw+N0Oh3vvvtuW08PQHZ2Nj/4wQ/IzMzE29ub0aNHs3HjRhYuXAjAI488QkVFBffddx8FBQVMmTKFTZs24enpqR3jpZdewmg0snr1aioqKpg/fz7//ve/MRgM7ZqTEEIIIYQQVzvbRfEbxjXdFyPYy4V3fjiBb+IyeeLrU+SXVVNQXsNLmxN5/fvju2KqohUOJedTUlULwLxhga3uzfHNhW+0bdvF8iv5uvjyt9l/Y0n/JfzlwF/IrcilrKaMl4++zD8X/bNNc104Mhg3JwPl1Wa+jc/kiRXRuJh69//taswWNsSrmUSuJgMLRgR1yzwS8hPIKc8BYEroFNxMbt0yj9aYGjoVfxd/LldeZmfaToqqivB29rYbo9fruHliJC9sPoeiwP8Op/KrhUNbPHZ1rYU1sWkAmAw6Vo2P6JSv4UqKorAhWS0TqNfpWdhvYZect6/T6XTcOORGnjz4JABfJH7Bo1MebXE/i2JhzTlrps0Ng2/otDkKIYTo+9ocnPnqq6+afb3+zpGWxgHtDs60tJ9Op+OJJ57giSeeaHKMi4sLr776Kq+++mq75iCEEEIIIYSwOp9TQnx6EQAx4V4MCfZsdrxOp2PFmDBmDPJn8cu7yCut5rtTWeSWVBHo2bDPo+h6m09b+wctaGVJs5zyHA5mHQQg3COcsYFjW9xnXtQ8JoRM4OZvbiatNI2DmQe5WHyRfl79Wj1XNycjS2JC+OJoOiWVtWxNyOHa0b27l9He83kUlNcAMH9EEG5O3XN3fn3WDPTckmb1jHojSwcs5cOED6mx1PBdynesHra6wbjvTYzgpS3nsCjw2ZFUHpg/BEMLfYq2JGRzuUztrbsoOgQ/99b1X3LUybyTpJeqge8pIVPwd/XvkvNeDZYNXMYLR16g0lzJN0nf8KsJv8LF2HSfZYDDWYdJK1WDdNNCpxHh2TVBOiGEEH1Tm8qaKYrSYf+EEEIIIYQQfYd91kzrF6v8PZxZPVHtAVFrUfjfkbb1gBCdQ1EUtiSo2RImg445QwNbtd+G5A1YFAsA1w28rtVln7ycvOwW0T8/93kbZwyrbN53faG02TcnrP13uqukGdj3m5kTMafb5tFattla3yZ92+iYUG9X5g1TM5EyiirZlZjb4nE/PnRJ2751UpSDs2y9DSkbtG0padaxvJy8WNR/EQAl1SVsubSlxX1ss2ZuHHpjMyOFEEKIlrX61pvk5OTOnIcQQgghhBCil7JYFL48pvbGMOjVjJi2uHVyFG/uvICiqAugP5szqEG/GtG1EjJLtF4cUwf64+nSukb0rS1p1piVg1fy6rFXqbHU8OX5L7l/3P0N+tU0Z9ogf4K9nMkurmLH2Rzyy6q7LLuho1XWmNl0Ss1c8nQ2tjo41tEySzM5k38GgGj/aILdW5dB1Z1G+I1goPdAkoqSOJpzlLSStEazG26ZHMXWM2oA8pNDl7RgTWNS88vZcz4PgEg/V6YP6prsFYti4bvk7wA1K+iaqGu65LxXkxuH3MjXF74G1MDLdQOva3JsYWWhFsDxdfbt8ZlkQggher5WB2f69Wt9SrkQQgghhBDi6nEoJV9byJ81JKDNZcki/dyYPSSQnedySSuoYFdiLnObWSgVnW9LQra2vbCVJc3OFZzjbIHaVHt0wOg2lSUD8HPxY0G/BWxI3kBhVSGbL27m2oHXtnp/g17HyrHhvL0riVqLwrq4DH44rX+b5tBT7DyXq/X7WRwT0m39c3ak7dC250bO7ZY5tJVOp2P5oOX8/ejfAViXtI57x9zbYNy8YYEEeTqTU1LF1oQcckoqCfJsvKTVZ0dSqS8AcvPEyC4LHh/NPkpOhRpAmhk+s0H/HOG4cUHjGOA9gOSiZI5kHyGlKIX+3v0bHftN0jfUWNRSgysGrWhT8FgIIYRoTJvKmgkhhBBCCCHElb60K2kW3q5j3DbFWibovwcvNTNSdIXNp63BmfkjWhecWZe0TttuS1DF1uqh1tJm/zv7vzbvb/v+682lzb45kaFtd2dJs+2Xek+/GVvXDrC+/75N+rbR0upGg56bJqoZNbUWhTWxjb9fas0W/ndE7TFi0Ou4qa4MY1fYmLJR217aX0qadQadTseNQ6zlyb44/0Wj4xRFsStptmroqk6fmxBCiL5PgjNCCCGEEEKIdqusMfNtvNobw93JwKKRIe06zvzhQQR7qRk3287kkFlU0WFzFG2TWVRBfHoRANFhXoT7uLa4j9li1vp7GHXGdvfGmBA8gYHeAwE4mnOUC4UX2rT/iFAvhod4AnDsUiHJeWXtmkdHiUsr5KFPjzP/hR28uOksRRU1Le5TXl3L1rp+P37uTl1WQutKJdUlHM4+DECYexhDfYd2yzzaI9QjlEkhkwBIKU7hZN7JRsettgm0fHr4UqNBnF2JuWQVVwIwb1gQwV7NN4xvTGJBIn85+BdeLn6Zl4+9TH5lfov71Fpq2ZSyCQAXg0uvyVzqjZYPWo5RrxaW+er8V9SYG35OT+Se4EKRej0aHzReu04JIYQQjpDgjBBCCCGEEKLdtibkUFKpll9aEhOKq1P7yi8ZDXpuqWuybbYofHo4tcPmKNqmPjAAsKCVWTOHsw+TU24tv+Tr4tuuc+t0Om4aepP2+LNzn7X5GN2dPVNrtrA+PpPvvbmPFa/t5Ytj6VzILeOVbeeZ+bdtvLI1kZLKpoM0m09nU1FjBmBpTAgmQ/f8t31vxl5qLepne27kXHS63tUHyrZ3yDdJ3zQ6pp+/OzMGq8GvlMvl7E+63GDMx4es16JbJrU+a8aiWNiRuoOfbPoJq75exdoLa8mz5PGfhP+wZM0S/n707xRWFja5/8HMgxRUFQAwJ3IObia3Vp9btI2fix/XRKr9fPIr8+3K+dVbk2jNmrlx6I0NXhdCCCHaQ4IzQgghhBBCiHZb2wElzerdMjmS+lYOnxxKpdZsceh4on3a029m3QWbkmaD2lfSrN7yQctxNqhZVF+f/5qK2rZlUa0cG059HOHLY+mNZkN0hqLyGt7aeYE5z+3gvv8e5cjFggZjSipreXHzOWY9u503dpynrK6vjK1vTmRq291Z0mxH6g5tuzdmbSzst1B7H21M3qj1CrlSfVAYaBAUzimuZNsZNegY7OXM3GGBLZ63rKaM/yb8l+Vrl3P/tvs5mHmwwZiK2gr+Gf9PlnyxhNePv05xdXGDMRuSN2jbUtKs89kGXGwDMQCl1aV8l/IdAJ4mTxb2W9ilcxNCCNF3SXBGCCGEEEII0S75ZdXsOGtduJzmYPmlUG9XrhmuBgOyiivZfjbX4TmKtimtqmXfeTV7INTbhegwrxb3qaitYPPFzQB4mDyYGzHXoTl4O3uzuP9iAEpqSrRF0dYK8XZhxqAAAC7ll3P0UsMgSUc6n1PK77+MZ+rTW3l6wxnSC63BpCFBHjy9ahSbfzWb1RMjMNRFHwvLa3h241lmPbudt3ddoKJazZQpKq9h5znrZ2pSf79OnXtTaiw17ErbBaiL0RNDJnbLPBzh6eSpBZUKqgrYl76v0XGLooPxdTMBsOFkFoXl1dprn8WmYbaowb3VEyMxNpPFlFaSxrOHn2XBZwt45tAzXCqx9s6K8ozikQmP8EvPX7J6yGpMevV8ZTVl/OPEP1jy+RL+ceIflFaXAlBtrmbrpa0AuJvcmRkxs53fBdFaU0OnEu6h3mCwL30fGaXWvk/rk9drQeJlA5fhamy51KMQQgjRGhKcEUIIIYQQQrTLurgMausWLq8fG64tPDvi+1Osd7H/9+BFh48n2mb3uVyq6zKWFowIblUpq+2XtlNeWw7Aov6LcDG2vSfHlVYPW61tf3a27aXNrrfJ4vriaMeXNlMUhZ3ncrnjvUMseHEnHx64pJUiA7hmeBAf3jWFTb+aza2ToxgS7Mmz3xvD1ofmsGp8uJYhll9WzVPrzzDr2e28+//s3Xd0FGUXwOHfpvfeSYeE3nuXKh3pRSkiiGBvYEWRT8UudhFFkCrSpAlI770n1IQkhISQ3pNt3x8LA5GSTbJJKPc5J4eZnXln3k12l2Tu3Ht3RvP3sXjUWsN7qmddP5O8p0rjyJUjZBVmAYYyddeDCfcbY0qbWVuY07+RPwCFGp3yetH9p7zizf1prtPr9RxIPMCLm1+k5/Ke/BHxB9nqbGV7c9/mfNfxO1b1W8XQ6kPxNPfkjaZvsKbfGgaFD8JCZehzkqXO4vuj39NtWTdmnZjFhpgNynE6BXZSMoBE+TFTmdGvWj8A9OhZfn65su3mTJqB4QMrfG5CCCEeXBKcEUIIIYQQQpTKzRe9HytjSbPr2oV7Kg3ot529SlxqrkmOK4yz8aaSZp2NLGl280Xvmy+Gl0U9j3pKA/rjycc5nXq6ROO71fHBxtLw5+7q4wkUaLTFjCiZD1ZHMOq3/Ww7eyO7y87KnFEtg9j8ant+G92UNmEetwS3gj3s+XJwAza83J4+9f2U8mvJ2QVMWx3BuytPKfv2ru9r0jmXxJa4Lcry/VjS7LrWVVrjam3of7QldosScPqvm3vJLD4Qh16vZ09UCrHXPn/ahnkQ4HZrz5efjv3EmPVj2By3GZ3eENS0NrdmQNgAlvZZyqyus2gf0B4zVdFLL74OvkxpOYVV/VbRr1o/zFWGXl0ZBRnMODyDN3e8qezbLbhbGb4DoiQeq/aY8rNafm45Wp2WyJRIIlIiAKjlXosabjUqc4pCCCEeMBKcEUIIIYQQQpRYdHIOR+PSAajh40hN3+LLXxnD3EzF8GvZM3o9LDoQW8wIYSoarU7pr+FgbUGL0OJLaiXnJbPn8h4AfO19aezd2CRzUalUDA4vffaMg7UFj9b2ASAjT81WE5bIyynQsGDfjddlFRdb3ulZkz1vdmJq3zqEejoUe4xqXg58M6wh619qR4+6Prds93e1pUGAi8nmXBJ6vV4JzlioLO7rklqWZpZ0CzEENwp1hfwb8+9t9wvzdqRxkCGIc+ZKFkfi0ll0U9bMkKa3Zs2odWrmn56vrHvZevFCwxfYOHAj77d6Xwku3o2/oz8ftP6Avx/7mz5V+9wSxHG2dqaFX4vin6gwCW97b9pWaQvAldwr7Lq8q0jWzICwAXcaKoQQQpSKBGeEEEIIIYQQJbb8yI2smf6NTJM1c92gJv5YXCvntPjAJdTXymyJ8nUoJo30XEPT9PbhnlhbmBc75p/of9DqDVkpPUN73nJxuSx6hvZUejusjlpNjjqnRONvzuZabsLSZptOJ1GgMbwm+zWswrbXH2Fs21CcbUte+ivc25EfHm/M2hfa0vWmTKVhzQKNKilXHs6nnyc+2/D9auzTGCcr0wReK4sxpc2gaPbMj1svsP5kIgBu9lZ0uU0W2f6E/WQUZADQ3r89/wz4h3H1xuFq41riOQY6BfJhmw9Z3nc53UO6o8Lws3+s6mP3bUm5+9XNAZgFkQtYG7UWAFsLW3qE9KisaQkhhHhASXBGCCGEEEIIUSJ6vZ4V14IzKhX0bWDa4IyXow1daxsuhiZnF7Ax4koxI4Qp/FukpJmXUWPKo6TZdQ5WDsrF0FxNLmuj15ZofNtqHng4GHp1bD6dRMa1wFNZrT2eoCwX1yTeWLX8nJg5sgn/vNSWX0c1YXy70DIfs7S2xm1VljsEdKi0eZhKXY+6BDkFAXAg8QAJ2Qm33a9nPV8crQ09YDZGXFF6Lw1oVOW2gcoNMRuU5ceqPYaledmDKKHOoXza7lNWPraSGR1m8EKjF8p8TFEybf3b4mnrCcCuy7vIUhtK4T0a/CgOVsVnxQkhhBAlUabfIvfu3WuqeQghhBBCiIeMXq9nx7mr/BtxBb1eX9nTESVwODZN6cXQuqoH3k6lbwCv1+s5mHiQjTEblZ4NAMObBSnL8/fFlH6ywih6vV4JgpmbqehQvfjgTFR6lNKLoaZbTaq6VDX5vAZVH6QsLzmzpESfFRbmZvSp7wdAoVbHquOXyzyf7AINW84YSr95OFjTLKT40m8lUcPHiU41vU0S8Cmtm4Mz7f3bV9o8TEWlUhUJHK6OWn3b/eysLOjTwO+Wx+9U0ux6iTRbC1vaVDFt6bcQ5xA6BnbEytzKpMcVxbMws+Cxao/d8riUNBNCCFEeyvQbX6tWrahduzZffPEFSUlJppqTEEIIIYR4wJ2Mz2DIz3sZ8et+xs49yJoTt7+TWdyblt1UIqpfw9JnzURlRDHh3wk8uf5JXtn6Cn+d/UvZ1qqqO8Huhgbcu86nEJ1cspJWomQuXM3mYooh4NYkyBUXu+IvCt98kbt31d7lMq/a7rWp414HgMjUSE6lnCrR+JtL7i09fKnM89kUeUUpadajrg/mZpVTeqy8XM29yvHk4wCEuYbh7+hfyTMyjZtfn39f+PuOQb5hzQKLrDcNdqWal+Mt++1L2EdmYSYAjwQ8go1F6QPU4t7TL6xfkfVqLtWo71m/kmYjhBDiQWZR1gOcPn2aSZMm8dZbb9GzZ0+efPJJevbsiZmZVEwTQgghhBBFpWQX8PmGsyw6EMvN18Z+33WRXvVuvWNZ3HsKNFpWXyvrZGtpTrc6tzYzL05mYSY/HfuJhZEL0eg1yuPzI+czKHwQKpUKMzMVw5sH8tHa0wAs3B/LWz1qmuZJPERyCjQkZxeQlqsmPbeQ9Gv/Kut5atJy1cSm3Ah+3a6/xn/p9DolOGOuMqd7SPdyew6Dqg/i5O6TAPx55k/qeNQxemxtPydq+DhyOjGLI7HpXLiaTVXP0pcmWnNTSbOedX1LfZyKlqvOJSU/hcyCTNIL0skoyFD+zSi8sXw5+0Z20YNQ0uy6Kg5VaOLdhINXDnIx8yLHk4/f9mJ7nSrO1PZz4tRlQ+BlaNPAW/YBWH9xvbL8aPCj5TNpUWkCHANo7tucfQn7AOgf1r/SekAJIYR4sJUpODNjxgx+//13jhw5glqtZuXKlaxcuRJvb29GjRrFk08+SXh4uKnmKoQQQggh7lNqrY4/9sTw9b9nyczX3LL9YEwa55OybnuHsri3bD1zlYw8Q++OrrW9sbc2/k8KrU7L8vPL+fbIt6Tmp96yPSojimNXj9HAqwEAAxsH8Pn6sxRqdSw5GMcrXcKxsSy+Sf3DJl+t5WJKDheTc4hKNvx7MTmXqOQckrMLSny8zjWLD84cvnKYhBxDoKKFXws8bD1KfB5jdQvuxmcHPiNbnc266HW81vQ1o5vUq1QqBjb2539rIgFYeugSk7rVKNU8svLVbD17FQAvR2uaBJu2pFlZFWgLiMuMIyYrhtjMWGIyY4jJNCwn5ZW80sWDFJwB6FutLwevHATg7/N/3zET4o3uNXjq94PUqeJEz3q3BuDUWjWbYjcBYGdhZ/KSZuLe8HzD5zl+9Tj+jv63LXMmhBBCmEKZgjPPP/88zz//PMePH+fXX39l4cKFJCcnk5iYyKeffsqnn35Ky5Yteeqppxg8eDD29vammrcQQgghhLhPbD97lQ9WR3A+KVt5zMHaguc7VkMPTF9nyIxYtD+Od3rVqqRZCmMtL2VJs8NXDjN9/3QiUyOVx6zNrXmqzlO427ozbe80AP46+5cSnHGzt6J7XR9WHr1MWq6a9acS6dug9GXUHgR6vZ6/j11mX3QqF5NziE7OISEjv8zHVanAxdaSx5sHEexR/N9tRUqahZZPSbPr7Czt6F21NwtPLyRfm8/qC6sZXnO40eP7NqjCx+tOo9XpWX4knle7Vi9VObJNkUkUKiXNfCu9pNmm2E3subxHCcAk5CSgp+z9u5ysnOgV2ova7rVNMMt7R5egLny07yPyNHmsu7iOSc0mYW1ufct+bcM8iZzWDTMVt82W2JOwh6xCQ5P4DoEdbnsMcf+r71mfPcP2GDI5VVIZRgghRPkoc1kzgHr16jFjxgw+//xzVq1axezZs/nnn3/QarXs2bOHPXv28OKLLzJ48GCefPJJWrdubYrTCiGEEEKIe1hMSg7TVkfyb+SVIo8PbOzPpG7V8XK0IS2nkC83GDIjlh6+xOvdqmNtIZkR96qMXDWbT99oht6mWvHZEok5iXx56EvWRa8r8vijwY/yauNX8XXwJU+Tx9eHviZLncX6i+uZ3GwyjlaGLKrhzQJZedRQamn+3tiHPjiz/lQiLy46atS+Hg7WhHjY4eNsi4utJa52lrjYWeFiZ4mrnRXO1/51tbPE0cbS6GBDgbaADRc3AIbMgY6BHUv7dIw2KHwQC08vBGDJ2SUMqzHM6DJDno7WtA/3ZPPpJBIy8tlzIYU2YSXP9Fl9U0mzHpVc0mx/wn5e2vKSUfu6WrsS6BSIj70PLtYuOFs742zljIuNCy7WLjhZOSmPO1k5YW72YH4G21va0zmwM6uiVpFVmMWWuC10C+52233v9l64uaRZ16CuJp+nuHc8qO8FIYQQ9w6TBGeus7S0pH///vTv35/ExETmzJnDnDlzOH36NNnZ2cyePZvZs2cTHh7OmDFjGDlyJN7exafMCyGEEEKI+0d2gYbvt5zn1x3RFGp1yuMNAlx4v09tGgS4KI+52lvRrY4Pfx8zZEZsjLgivWfuYatPXFZ+pn3q+2Fhfue7ifM1+cw5NYdfT/5KniZPeby6a3UmN5tMU5+mymO2Frb0DO3JojOLyNfmsy56HYOrDwagWYgb1bwcOJ+Uzf6LqZy7kkWY98Nb/u733ReLrLvYWRLiYU+Iuz3BHoYvw7IdjjaW5TKHrXFbyVIbMgc6B3XG1sK2XM5zszDXMBp6NeRI0hHOp5/n6NWjNPRqaPT4AY38lcDi0sOXShycycxXs/3mkmZBriUab2oLTi8osu5o5UiQYxCBToEEOQUpX4FOgUaXgHsY9KnWh1VRqwBDabM7BWfupFBbyJbYLYAh2NO6itx4KoQQQojSM2lw5mY+Pj5MnjyZyZMns2fPHmbPns3ixYvJysrizJkzvPHGG7z99tv06NGDZ555hm7dSvZLkRBCCCGEuPdk5Knp9/0uopJvNBf3crTmje41eKxBFcxuczfy0KYB/H3MkBmxaH+cBGfuYTeXNOvf6M4ZLPmafEauG1mkhJmLtQvPN3yeAWEDbns38oDwASw6swgwlDa7HpxRqVQ83jyQqasiAJi/L5b3+zxY5ZaMdT4pm71Rhl49oZ72LJvQChc7qwqfx+oLN0qa9QrtVWHnHRQ+iCNJRwD488yfJQrOdKrphZONBZn5GtadTOCDvrVLFLz6N+KKEpjsUdf3tp9lFeVKzhW2xm0FwNPWkyW9l+Bm4yYNy43QzKcZPvY+JOYksvvybpLzkkvUL2nP5T1KYLJDgJQ0E0IIIUTZVEjhzMLCQgoKCtBqtcovjHq9Ho1Gw6pVq+jZsycNGzZk7969FTEdIYQQQghRTj5aE6kEZqzMzZjwSFU2v/YI/Rv53/FiZotQd4Lc7QDYeT6Z2JTcCpuvMN6JSxkcjEkDIMzLgdp+d74b/4ejPyiBGXOVOY/XfJzV/VYzuPrgO5aJqeFWQ+lxEZkaSURKhLKtf0N/rC0Mf7osPXyJvEKtSZ7T/WbBvlhl+fHmQZUSmInNjGXbpW0AeNl50cynWYWdu0tQF5ytnQHYcHED6fnpRo+1sTSnTwND4DdfrWPdicQSnXvNTSXNet2mSXxFWnZ+GVq94T3QP6w/7rbuEpgxkpnKTOmRpNVrWRO1pkTjby5p9mjwoyadmxBCCCEePuUWnImNjWXatGlUrVqVjh07Mm/ePHJzczEzM6NXr14sXryYd955B39/f/R6PceOHeORRx5h37595TUlIYQQQghRjnacu8rig3EAOFhbsPbFNkzuVgMH67sna5uZqRjSNEBZ//PaMcS95eftF5TlJ1uH3PFi8ImrJ5gTMQcASzNL5veYzxvN3lAuqt/NgPAByvKyc8uUZWc7S3rXN1xYz8rXsPr45VI9h/tZvlrL0sOXALCyMGPAXTKXytPciLlK0/lhNYZVaE8GGwsb+lbtC0ChrpCVF1aWaPyARv7K8l/XvpfGyMhTs/2coaSZj5MNjQIrr6SZRqdh6dmlgCHQMCBsQDEjxH/1qdpHWV5xfgV6vd6ocQXaArbEGUqaOVg60MqvVbnMTwghhBAPD5MGZ/Lz81mwYAFdunQhNDSU999/n+joaPR6PSEhIfzvf/8jNjaWv//+m0GDBvHBBx8QHR3NvHnz8PDwoLCwkClTpphySkIIIYQQogLkFGh4Y+kJZf3NHjWo5mV8X5CBjfyVBsxLDsWhualXjah8sSm5rD1hyBzwcLC6Y0mzQm0hU3ZPQac3/PwmNphIbQ/jS5B1D+6u9C9ZE7WGXPWNLKrhzQOV5fk3ZZA8LNYcTyAjTw0YMjcqI2smJS+FFedXAGBnYaeUnqtIA8MHKstLzi4x+sI6GPpehXraA7A/OpW4VOOy9DZGXEGtNZynskua7YzfyZXcKwC0rdIWX4fKzeK5HwU7B1Pfsz4A59PPFym/eDe743eTrc4GoGNgR6zMK/49KIQQQogHi0mCM/v27eOZZ57B19eXESNGsHnzZnQ6HVZWVgwZMoSNGzdy/vx53nrrLXx9i/7yaGZmxvDhw/nyyy8BOHTokCmmJIQQQgghKtCn/5wmPt3Q9L1lqDvDmgYWM6IoLycbOtXwAuBKZgFbzlw1+RxF6f26MwrdtWvgo1oGY2N5+2yJX078wvn08wDUdKvJqNqjSnQeBysHpUF3tjqbDTEblG0NA1yo6WsopXY0Lp2T8RklfRr3tfn7YpTlx5uX7P1lKovOLKJAWwAYspwqo9F8iHOIUkotJjOGfYnGV15QqVRFsmeWGpk9cz0wCdCzkkuaLTm7RFkeFD6oEmdyf7s5e+bvC38bNebmzyMpaSaEEEIIUyhTcOazzz6jVq1atGrVil9++YWMjAz0ej21atXiq6++Ij4+noULF9KpU6dij9W0aVMA0tLSyjIlIYQQQghRwfZHpzJnj+HCsa2lOdMH1C3VneVDm90obbb4wMOXGXGvSs0pVMrV2VqaM6Jl0G33O5N6hlnHZwFgobJgWutpWJoZ33D9uv5h/ZXlm0ubqVSqIkGJX3dGl/jY96vIhEwOx6YDUMPHsVLKauWqc1l4eiFg+PmOqDmiwudw3aDqN4ISc0/NLdHY/o2qcL0i39LDl9Dp7p55k5GrZse1kmZ+zjY0DHAp0flM6XL2ZXZc2gGAj70Pbaq0qbS53O+6hXTDysyQ+bI2ai1qrfqu+99c0szR0pGWvi3LfY5CCCGEePCVKTgzefJkzpw5g16vx87OjjFjxrB7925OnDjBiy++iJubm9HHsrC4ey1yIYQQQghx78kr1DLpr2PK+uuPVifI3b5Ux2of7oWvsw0Am08nkZiRb5I5irL5Y08M+WpDmbIhTQNuW05LrVPz7q530eg1AIytN5bqbtVLdb76nvWp5lINgCNJR7iQfqPXTf9GVXC1MwR8/j52WcnWetAtuKmM2+PNAyul+fuK8yvIKDBkK3UL6Vap5bQ6BXbCz97Qg2hH/A7OpZ0zeqyvsy1tqnkAEJeax4GLqXfdf0NE4j1T0mzpuaVKv5+BYQMrtN/Pg8bJyokOgR0ASCtIY0f8jrvuvyt+FznqHMBQ0szSvOSBZyGEEEKI/ypzWbMmTZrw888/k5CQwKxZs2jRokWpjlO1alV0Oh1arbasUxJCCCGEEBXkq3/PcjHF0LehUaALo1oFl/pY5mYqBjUxZM/o9LDkWraGqDx5hVrm7LkIGH4+T7UJue1+c07NUfo2VHOpxtN1ny71OVUqVZEm50vPLVWW7awsGNEyGACtTs9vD0H2TE6BhuVH4gFD5lLfhrfv91OeNDoNcyNuZKiMrj26wudwM0szS0bUupG58/up30s0viSlzdbcVNKsRyWWNFPr1EommbnKnH5h/SptLg+KkpQ2W39xvbLcNbhruc1JCCGEEA+XMgVnjh07xr59+xg3bhwODg6mmpMQQgghhLgPHI1LZ9aOKACsLMz4dGB9zMt4V/ngJv5KyaHFB+OKLTkkytdfhy+RmlMIQM+6vgS42d2yT1R6FD8e/REAM5UZH7T6oMx3lfcK7aWURFt1YRWF2kJl26iWQVhbGP6MWbQ/lozcu5cjut+tOnaZ7AJDRlLfBn442VT8Hfv/xvxLfLYhQNTar3Wps6JMqX9Yf6XnzdrotSTmJBo99tHaPjhYGyo3rD2RSF7h7W8QTM8tZOe5ZACquNhWakmzrXFbSc4zzKVDQAe87LwqbS4PilZ+rfCwNWRRbbu0jbT825dYz9fkszVuKwCOVlLSTAghhBCmU6bgTN26dU01DyGEEEIIcR8p0BjKmV2PnbzUOYxqXmW/Wcff1Y62YZ4AXErLY9eF5DIfU5SOVqdXgm8AT7cLvc0+WqbsnkKhzhA8GVVrFHU9y/43gouNC52DOgOQXpDO5tjNyjZ3B2sGNTFkPuQUapm3L6bM57uXzb+ppNnwm3ruVBS9Xs/sU7OV9dF1Rlf4HG7HztKOIdWHAIbMnvmR840ea2tlTo+6PgBkF2hYf+r2gZ0Np66g0V0vaeZTKeXkrltyZomyPCh80F32FMayMLOgV2gvwPAaWhu99rb77YrfRa7GkCHaKbCTlDQTQgghhMmUuayZEEIIIYR4+Hy/+Txnr2QDULeKM0+3vfXCfWkNaxqgLC/aL6XNKsv6U4nEXCtZ16aaB3WqON+yz4LTCzh21dBzKMgpiIkNJprs/APDBirLf537q8i2sW1ClQyr33dfJF/9YJZGPn4pnRPxhj4vdas4U8/fpcLnsD9xPxEpEQDUdKtJc5/mFT6HOxlec7jS1H3J2SVkFWYZPXZg4xufM3cqbbb6ppJmPev5lXKWZRebGcuehD0A+Dv408KvdKXExa2MKW12c0mzR4MfLfc5CSGEEOLhYWHMTrGxscXvVAqBgRV/55cQQgghhCibiMuZ/LDV0KTdwkzFJwPqYWFuunt+OtX0xt3eipScQjZEJJKSXYC7g7XJji+Kp9fr+XnbBWX9dlkzcZlxfHP4G2V9aqup2FjYmGwOTXyaEOAYQFxWHPsS9hGXFUeAo+GCerCHPd3r+LD2RCJXswpYcSSeoc0evL8tFtyUNfN4JWTNAEWyZp6s82SlZo/8l4etB32q9eGvs3+Ro85hydkljKkzxqixTYNdCXSzIzY1l53nk7mcnoefi62yPS2nkF3nb5Q0q+9/a3CyotwcnBxUfRBmKrnH0lTCXMOo6VaTyNRIIlIiOJ92nmqu1ZTteZo8tl7aCoCTlRPNfe+d4KQQQggh7n9G/VYXEhJi8q/QUNPdXSmEEEIIISqGWqvj9b+OKaV+JnaoRi0/J5Oew8rCjIGN/a+dT8+yw/EmPb4o3r7oVI5dMmRs1PR1om2YR5HtOr2O9/e8T742H4BhNYbR2LuxSedgpjKjf1h/ZX35ueVFtj/drqqyPHNH1APXnygzX83fxy4D4GBtQe/6FZ+5cSb1DLvidwFQxaEKXYK6VPgcijOq1ihUGAJG8yPmo9Ya14NIpVLRv1EVAPR6WH6k6OfM+lOJaK+9pnrV8620oFShtpAV51YAhjJcfav2rZR5PMj6VrvxPf1v9szO+J3kafIA6BzUWemFJYQQQghhCkYFZ/R6fbl8CSGEEEKI+8vM7VGcupwJQHVvR57rUK2YEaUz+KbSZgsPxMrvjhVs5vYbvWbGtwu95cL0X2f/Yn/ifgD87P14qdFL5TKPvlX7Yq4yB2DF+RVodBplW4MAF5qFuAEQdTWHTaeTymUOlWXlkXhyrzWq79ewCvbWRhU9MKk5p+YoyyNqjcDCrOLnUJxg52A6BHQAICkviTXRa4weO6CRv7K89PClIp8za4qUNPM1wUxLZ1PsJtIKDI3quwR2wd3WvdLm8qDqEdJDeW2vjlpd5HOmSEmzIClpJoQQQgjTMuq369mzZxe/kxBCCCGEeKCdT8pixr/nADBTwacD62FlUT7ldap6OtAsxI390alEXc3hYEwaTYPdyuVcoqizV7LYfC3Q4edsc8uF6cScRL489KWy/n6r97GztCuXuXjaedLevz2b4zZzNe8qOy7toENgB2X7M+1D2R+dCsDP2y7QpZZ3ucyjoun1eubfVNJseCWUNEvMSWRd9DoAnK2d6VetX4XPwVhP1nmSzXGbAfj95O/0qdrHqNJfAW52RT5njsal0zDQlZTsAnZfSLm2jy11b9NvqaL8eeZPZXlQ9UGVNo8HmauNK+2qtFM+Z/Ym7KVNlTbkqnPZfmk7AC7WLjT1bVrJMxVCCCHEg8ao4MyoUaPKex5CCCGEEOIeptXpmfTXcQq1OgDGtQulfoBLuZ5zWLMA5cL7wv2xEpypIDdnzTzVNhTLm/oJ6fV6pu6ZSo46B4ABYQNo6deyXOczIHyAcuF96bmlRYIzj4R7EeblwLmkbA7GpHEoJo3GQa7lOp+KcDg2ndOJhub2jQJdqOlr2tKBxvgj4g80ekMGwdDqQ8stAGcKDbwa0NCrIUeSjnAh4wI743fSzr+dUWMHNvZXPmeWHr5Ew0BX1p+6opQ061nXr9JKmkWlR3HwykEAgp2CaeLdpFLm8TDoU62P8jmz8vxK2lRpw474HUpJs06BnaSkmRBCCCFMTjoJCiGEEEKIYs3eFc3h2HQAQjzseblzeLmfs3sdX5xsDPcSrT2RQEaecb0kROklZuSz8qih94aTjQVDbyovB4Z+DDvjdwLgZevFq01eLfc5tfZrjbedISNmR/wOEnMSlW1mZiqebnejl+XM7RfKfT4VYf6+GGV5ePOgCj9/ZmEmf501NKG3NrdmWI1hFT6Hknqy9pPK8uyTxld+6FHXF1tLQ+m8v49eJl+tZe1NJc16VWJJsyVnlyjLg8IHVVqQ6GHQrko7XKxdANgcu5nMwkw2XNygbH80WEqaCSGEEML0JDgjhBBCiAqXlJnPi4uO8NqSY+SrtZU9HVGMDacSmb7uNACqa+XMbK5dzCyLjIIM3t31LpO2TSK7MPuW7TaW5vRraGjYna/W8ffR+Fv2EaY1e1c0aq0hY2BEy6AifU72Jexj2t5pyvqUllNwtHIs9zmZm5nTL8xQUkun17Hy/Moi2/s2qIK3kzUAGyKuEHX11tdSZdDr9czcEc2PEWb8G2l8P5z03EJWHzcEB5xsLColOPDnmT/J1eQChr4/90Ofk/YB7QlxDgHg4JWDnLh6wqhxDtYWdKvjA0BmvoYlB+PYfSEZgEA3O2r7lS1rSa/Xs+j0IsZvHM+aqDVG98/K1+Sz8oLhtW5lZlWkab0wPUtzS3qE9ACgUFfIinMrlJJmrtauNPWRkmZCCCGEMD0JzgghhBCiQp29kkW/H3az8uhl/jp0id93X6zsKYm72BR5hWcXHEZzrcTPmNYhJikvFpcVxxNrn2DF+RWsu7iOX078ctv9hja70Wtj4f44oy9sipLLylez4FqfEytzM0a1Cla2HUw8yPObn6dAWwBAn6p9aB/QvsLm1q9aP1QYsgaWn1+OTq9TtllZmDGmteGivF4Pv+yIrrB53YlWp+eNpSf4bMM5TmeYMWHBUZ5bcJiU7IJixy49HE+hxvD8BjYOMEkgtCQKtYXMj5wPgAoVI2uPrNDzl5aZyozRtUcr67NPGZ89M6CRv7L80drTXPu4o2c93zJlq+j1er44+AUf7vuQ3Zd388aON3hu83NFsr/uZEPMBrIKDaXtuoV0w9m68vrePCz6VOujLH939DvytfkAdArqhIWZURXhhRBCCCFKxGS/YRw7dowdO3YQFRVFVlYWWu3d74JVqVT8+uuvpjq9EEIIIe4Duy8kM/6PQ2Tla5TH5u+LYVzbUMzNpFzLvWbrmSQmzDusZFL0a1iFt3rULPNxTyaf5NlNz5Kan6o8tvzcciY2mIi1uXWRfWv6OlHf35ljlzKISMjkZHwmdf3lImV5WLg/lqwCw3uzf6MqeDnaAHAk6QgTN01Uei884v8I77d8v0Ln5ufgRyu/Vuy6vIv47Hj2JuyllV8rZfuw5oF8u/k82QUalh6+xCtdwvF0tL7LEctPoUbHy38eZc3xhCKPrz6ewK7zybzfpzZ96t++j4ler/9PSbOAW/Ypb2ui1pCcZ8gc6RTYiSCnii+rVlq9Qnvx7ZFvSc5L5t+Yf4nNjCXQKbDYcS2ruuPrbENCRj55N2Vz9qxb+qwlrU7LtL3TWHpuaZHHt1/aTr+V/Xi1yasMCBtwx+DPn2f+VJYHhQ8q9TyE8Wq51aKaSzXOp59XPu9ASpoJIYQQovyUOThz5swZxowZw969e40eo9frJTgjhBBCPGSWH7nEpL+OKxf6VSrDXe5xqXlsO5tExxrelTxDcbMd567y9B+HKNQa7uDvU9+PzwfVL3MQbXPsZiZvn6zckaxChR49aQVpbLi4gd5Ve98yZmizQI5dMpQoWngglrr+dcs0B3GrQo2O33ZeBAzvzXHX+rgcTTrKMxufUS5Utq3Sli8e+QJL84pvjD0gfAC7Lu8CYOnZpUWCM042lgxvHsjM7VEUanTM2X2R1x6tXuFzzFdrmTj/MJtPG8qYWZipaOej5Ui6NWm5atJy1by46Cirjl3mf4/VxcfZpsj4fdGpRF3NAaB5iBvVvMq/bNzNdHpdkYyTJ+s8eZe97z1W5lY8XvNxZhyegR49cyPm8k6Ld4odZ26mon+jKny/5UbPomD30pc0U+vUvL3jbdZdXAcYPudG1BrBuuh1XM27SrY6m6l7pvJP9D+81+o9AhyLBuHOpJ7h2NVjAIS5hlHfs36p5iFKRqVS0bdqX7449IXymJuNG028m1TirIQQQgjxICtTWbP4+HjatWvH3r170ev16PV67O3t8ff3JzAw8I5fQUFBBAYWfweTEEIIIe5/er2ebzed4+XFx5TATIfqnnw9pIGyz9w9MXcYLa7TaHXkFmoqpKzX7vPJjJ1zUCmt1LOuL18OLntgZkHkAl7a8pISmGns3ZhvOn6jbF90etFtx/Wu74ed1Y2G3TkFmtvu9zDQ6rTkqnNN/jr4+9hlEjMNP5fONb2p6unAiasnmPDvBKX3SCu/VnzV4SuszK1Mem5jPeL/CG42hpJ6m+M2k5KXUmT7k62DsTQ3vEb/2BtT4a+T7AINo2fvVwIz1hZm/Ph4A/oG6Vj3fCt63tQ75t/IJLp8tY3FB2KL/CznXysrBzC8ecX/vbT90naiMwxl4Rp5NaKeZ70Kn0NZDQofhJ2FHQArzq8okqF3N/1vKm0GpS9pVqAt4JUtryiBGQuVBZ+0+4TXm77O8r7L6Vetn7LvvsR9DPh7APMi5qHV3cjYWXJ2SZHnU5bSaqJkeob2xEx14zJJ58DOUtJMCCGEEOWmTL9lfPjhh1y9ehWVSsXYsWN57bXXCA8PN9XchBBCCHGfU2t1vLP8JIsPximPPd48kKl9aqNSqfj0nzPEp+ex7exVYlJyCHK3r8TZ3ruOX0rn8V/2kVWgwcrCDBdbS1ztrHC2s8TVzhIXWytc7A2Pudha4mpvRcNAF6UsVUnsjUrhqTkHKbgWmHm0tjdfD22AhXnp7+nR6XV8cfAL5kbMVR7rEdKDaa2nYWlmSbhrOGfTznI8+TinUk5R2712kfEO1hb0rufH4oNxZBdomLc3hvHtq5Z6PverqIwoRqwZRaY6HUszS5ytnXGxdsHJygkXa5cb69aGdRdrF+p41MHH3ueux9Xr9czcfiNj4Jn2oZxKOcX4f8eTrc4GoLlvc2Z0mHFL2bmKZGluSd+qfZl9ajYanYY/Iv7gpcYvKdt9nW3pU78KSw9fIiNPzeIDcYxpE1Ihc0vPLWTU7AMci0sHwN7KnF9HN6VxgBNrz4O7gzXfD29E73qJvLPiJMnZBWTla5i89ASrjiXwcf+62FqZ889JQyk0N3srpUn97cRmGoI4xpTsKonZJ29kzYypM8akx64oztbODAgfwB8Rf1CgLWDh6YU82+DZYsdV9XSgYaALR2LTAehRipJmuepcXtj8AvsS9wFgZWbFl498qfRncrZ25oPWH9AtuBvv73mfhJwE8jR5fHLgE9ZfXM/U1lPxsfNhddRqAGwtbOkV2qvE8xCl52nnSSu/VuyM3wlA1+CulTwjIYQQQjzIyhSc+eeff1CpVIwcOZKZM2eaak5CCCGEeABk5auZOP8wO84lK4+90b0G49uFKncBD28eyGfrz6DXG+4YN0U/kwfRD1suKL1ACjU6krIKSMq6e2NxS3MVver58WTrYOr5uxh1ngMXUxnz+wGl50Lnml58O6wRlmUIzORr8nlr51tsjNmoPDau7jiea/iccnfykOpDmLZ3GgCLTy/mg9Yf3HKcp9qG8OehOPR6+GHrBYY2C8TZtuJLa1Wmz/bMJFOdDhjKJiXnJSu9Qe7ETGVGp8BOPFHzCRp6NbztHfj/RiZx9oohCNM4yBV7xySeWv+00oy8qU9Tvu34LTYWJQ/2mdqwGsOYFzkPtU7N/Mj5PF7zcTztPJXtT7cLZenhSwD8ujOaES2DyvT6NUZSVj4jf93P6UTD98vZ1pI5Y5rRIMAFtVpdZN9udXxoEerGtNWRyjx3nk/m0a+30zjIVckuHNTEH2sL89ueLyIlgmFrhqHT6+gZ2pOXG72Mt33Zy0IevnKYw0mHAQh1DqWtf9syH7OyjKg5goWRC9HoNSw6vYgnaz+JnaVdsePe6VmLd1acpF2YB7X9StbbKqMgg4mbJnL86nHAEFj5ruN3NPNtdsu+raq0Ynnf5Xx96GsWnTFkDB69epRBfw+iuW9zctSG0nY9QnrgaFWxpe0EvNz4ZVLyUqjjUYdmPrf+/IQQQgghTKVMf6lcvnwZgJEjR5pkMkIIIYR4MCRk5DHopz1KYMbK3IxvhzXkmfZVi1wcHto0AKtrF07/PBhH/k2NmIVBVr6azWcMZZLsrMyp4eOIj5MNNpZ3/zVOrdWz/Eg8fb7bxcAfd7PmeAKaa/1jbudQTBqjf9tPbqHhZ9ChuiffP94IK4vS/7qYmp/K2A1jlcCMucqcKS2n8EKjF4qUjekV2gsHSwcA1kavJaMg45ZjhXs70q9hFQAy8tT8sj2q1PO6H6m1avZe2QqAXmeBNt8XNM5YqO6eyaLT69gYs5FR/4xi6JqhrLqwCrXWEDC4lJbLG0uP88y8Q8r+vZuoGLdhHJmFmYChtNV3Hb/D1sK2fJ5YCfk6+DKk+hAA8rX5/Hz85yLbq/s40qG6IVgTn57H2hMJ5Tqf+PQ8hvy8VwnMeDhYs3h8CxoEuNxxjIudFV8Mrs/sJ5vid63nTG6htkgge1jTO2fErDy/Ep3e8F5eE7WG3it6M/P4TAq0dw/Y3snV3Kt8vO9jxm4Yqzw2uvboIu/R+42vgy/dQroBkF6QzorzK4wa1zjIlXUvtuXNEt4okJyXzJj1Y5TAjKOVI790/eW2gZnr7C3tebvF28x+dDaBjoafd6GukB3xO5R9BoUPKtE8hGmEu4bzZ+8/mdJyipSUE0IIIUS5KlPmjKurK0lJSbi4uJhoOkIIIYS430UmZPLk7ANK/wpnW0t+GdmEZiFut+zr7mBNz3q+LD8ST3qumr+PXWZwk4Bb9nuYbYy4ovR+GdTYn6l96yjb8tVa0nILSc9Vk5ZbSMa1huPRydn8degSabmGi/AHY9I4GJOGn7MNI1sFM6xpIM52N7JOjsalM/q3/eRcC8y0DfPgxyca3/HOfWPEZMYw8d+JxGYZyi/ZWdjxefvPb3s3vp2lHX2r9WV+5HwKtAWsOL+CUbVH3bLfy53DWXXsMmqtnl93RjOqVTCejpVXZqsi7YzfjQbD3fSarDrkXx6qbKvqZc2LXatQo4o5GQUZytel7EusOL9Cya6JSIngrZ1v8fmBL/CkA8ciaqIuvFFKsHZQLr9d+IT0gnQAGng24IfOPxiVcVCRxtUbx7Jzy8jV5LL07FJG1RpFgNONz42n21Vly5mrAPy8LYo+9f3K5QJr1NVsnpi1j8sZhs+6Ki62zBvbnBAP48ozdqjuxfqX2/HJP6eZt/dGr5m2YR4E3+EYer2e7Ze2F3ksT5PHt0e+Zdm5Zbze5HU6BnY06vkm5yXz28nf+PPMn0UCO9VcqtEztKdRz+FeNrr2aKU82NyIuQyuPrhceock5iQybsM4LmZeBAwN5Gd2mUl1t+pGjW/i04SlfZbyw9EfmBMxRwm81XKvRW2P2sWMFkIIIYQQ97My3Q7VpEkTAM6ePWuSyQghhBDi/rb97FUG/bRHCcwEuNmybGKr2wZmrhvRMkhZnrc3ptzneL9Zdeyysty7vl+RbTaW5vg621LT14lWVT3oXteX4c0DebtnLfa82Ynp/esS7u2g7H85I5/p607T4uNNvLPiBOeTsjlxKYMRv+5Tyqa1rubOLyObYGNZ+sDM0aSjPLH2CSUw42nrye/dfr9rmaTr2RAAi88sVi5Q3izAzY7HmxteL3lqLd9tPlfqOd5vFkasVJbD7dsyuMmN5uUXkgp4YV4Un/6dgadFLToHdWZA+ABebPQi6wes56M2H1HT7UYmQGpBCmcK/sI65GNsfJfg6JjE6Pa2ZLt9T1pBGgD1POrxY+cfsbe89/pAudm4KcE7jV7Dd0e/K7K9Ragb9f0NJakiEjLZdT7F5HOITMhk8M97lcBMqIc9S55paXRg5jpHG0v+91hdFo5rQZiXA442Frzc5c49PKMzo7mUbSiHVs+jHsNqDMNcZXivxmfH89LWlxi3cRzn087f8Rhp+Wl8efBLui/trvRlAUMZrjF1xvB7t9+xMrcq0fO4F1V3q05rv9aA4Xvzb+y/Jj9HbGYsI9eNVAIzPvY+zOk2x+jAzHU2Fja80uQV5nWfR023mthZ2PFiwxdNPl8hhBBCCHFvKVNw5oUXXjA0EJV+M0IIIcRD788DcYz5/QDZ1y7y1w9wYfnE1lT1dLjruIYBLtT2cwLg+KUMjl5rqC0gLadQKXXk52xDo0BXo8faWJoztFkg619qx7ynmtOphpeyLU+tZd7eWDp/uY2BP+0mK9/wM2sR6saskU3LFJjZcHEDT61/Ssm+qOZSjfk95lPT/e5lgkKcQ2ju2xyAuKw4dl/efdv9nu1QDTsrw/wW7I8lLjW31HO9X+Rr8jmQZCh1pNfaMKxuZz4dWJ+Vz7amUaCLst+/kUl0/Wo709edVt6HVuZWtPDqSh3eQx03AXVmHfR6Q1aFykyDpcsh8P+SNSmTSc03BDFqu9fmxy4/4mB19/duZRpZayQu1i4ArItex5nUM8o2lUrF0+2qKus/b79g0nMfi0tn6My9JGcbgho1fZ1YPL4lfi6lL/3Wsqo7G15ux9EpXe/6Pt9x6UbJqy5BXXir+Vss6b2E5j7Nlcf3Jexj4KqBfLzv4yIlAjMKMvjm8Dd0W9qN2admk681BJasza0ZVWsU6/qv4+XGL+NsXbJeK/ey0XVGK8uzT85Gr9eb7Njn084z6p9RJOQYSucFOgYyt9tcgp2DS33Mup51+bP3n+wetptWVVqZaKZCCCGEEOJeVabgTJcuXZg0aRJbtmxhwoQJtzS8FEIIIcSDT6/X8+WGM0xaehyNznDhq0stbxaNa4GHQ/Elp1QqFSNvyp6Zu+dieU31vrPuZKLyPe1V3w8zs5KXZlKpVLQJ8+DX0U3Z/Gp7RrUMUoIbAAXXSqY1C3bj11FNsbUqXWBGr9cz59QcXtv2GoW6QgCa+zZnbve5+Dr4GnWMYdWHKcuLTi+67T6ejtY81SYEMPTV+Wrjg5/Bvf3SdjR6w4V0dVYdOtU0ZFDVD3Bh6YRWfDWkPt5OhvdaoVbHT9su0PHzrSw+EMvH6yJp9+kWftt1kfzsIPLjn0Ab8yY17XorfX4A5UJ9Tbea/NzlZ5ysnCr4WZaMg5UD4+qOA0CPnm+OfFNke7c6PgS6Gcqx7TiXbLKgb75ayzPzDpGRZ/i7p0GAC4vGtTBJeT2VSoV5Me/xm4Mz7fzbARDmGsYvXX/h60e+poqDoS+TVq9lwekF9Frei4WnF/LD0R/otrQbv5z4hVyNIaBpZWbFEzWfYF3/dbzW9DXcbd3L/BzuNc19mitZYxEpEXcM+paURqfh1W2vKiUDw1zDmNN9jtGfdcUxNyt9gFwIIYQQQtw/jCq6O3fu3Dtuq1WrFq1atWLmzJmsWrWKgQMHUqNGDezsiq9NPXLkSONnKoQQQoh7TqFGxxtLj7PsSLzy2OhWwbzbq1axFxlv1qd+FT5ae5qMPDWrjyfwTs9auNnf/2V1yqpISbN6fnfZ0zihng5M7VuHV7pWZ8nBOGbvukh8eh7NQtz4bXRT7K1L149Bq9PyyYFPWHh6ofJYn6p9eL/l+1iaW95lZFHtA9rjY+9DYk4i2y9t51LWJfwd/W/Zb1y7UP7YG0N6rprlR+MZ374q1X0cSzX3+8GKc2uU5QDLVng52ijrKpWKfg396VrLh++3nGfWjmgKtTqSsgqYvPREkeNYW5gxsmUQ49tXxcNhGLnqXP6+8DfzI+dzMfMitdxr8XPnn++bzIkhNYbwR+Qfyuvl8JXDNPJuBIC5mYrx7UN5e/lJAD5bf5r5Y1uU+Zxz91wk4Vops8ZBrswZ0wyHUr5vSiq7MJtDVw4BUMWhCiHOIco2lUpFp6BOtPFvw5xTc5h1YhZ5mjzSC9L5aN9HRY5jYWbBgLABjKs7Dm977wqZe2VRqVQ8VfcpXtv2GgAzDs+gpV9LzFRlukeRVRdWEZURBUB11+r8+uiv9837RgghhBBC3DuM+kti9OjRRjWVTEhI4NtvvzXqxCqVSoIzQgghxH0sI0/NM38cYk+UoRSSSgXv9KylZDWUhK2VOYMa+zNrZzSFGh1/HozjmfZVix/4AEvKzGdvtOF7G+xuR50qpstkcLa1ZGzbUJ5sHUJcai6BbnalysoByFXnMnnHZLbGbVUem1B/AhPqTyhxE3YLMwsGhQ/i2yPfokfPkrNLeLnxy7fs52RjyYT2Vfl43Wn0evhs/RlmjWpSqvnf67ILs9mTsBMAncaBbtXa3HY/e2sLJnWrwZCmAfxvTSQbI64o26wszHi8eSAT2lfFy+lGYMfO0o6hNYYyuPpgLmVdws/Br1wappcXa3NrJtSfwHu73wMMF95/7/a78rob3CSAmdujiEnJZdf5FHaeS6ZNmEepz5eZr+aHrYYSaSoVfNivToUFZgD2JOxBozeUq2vn3+627y9rc2uervc0far24atDX7E2eq2yzUJlwWNhj/F03adNluFxP+gS1IWabjWJTI0kMjWSjTEbeTT40VIfr0BbwA/HflDW32r+lgRmhBBCCCFEqRh9y5Berzf5lxBCCCHuT5fSchn0024lMGNtYcaPjzcqVWDmusdb3ChtNm9vDFrdw/27wpoTCVz/dal3fb8SBzqMYW6mItjDvtSBmeS8ZJ5a/5QSmLFQWTCt9TQmNphY6vn2D+uvBAiWnVumNCv/r1GtgpVSXv9GXuFQTFqpznev2xK3BY3eUCZOk1mXzjXvflE9yN2eX0Y24Y+nmtGxhhdjWoew/fUOvNe7dpHAzM3MVGYEOgXeV4GZ6/pU7UOwUzAAh5MOsyP+RtkvS3MzXukSrqx/tv50mf4G+WV7FOm5hnJmjzWoQg2fii39tv3SdmX5ekmzO/Gx9+GTdp8wt/tcOgR0YEj1Iazqt4r3Wr73UAVmwPD6fqHRC8r6d0e+Q6PTlPp4i08vJjEnETD8HK5nawkhhBBCCFFSRv0FFh0dXd7zEEIIIcR94mR8Bk/+foCrWYaL5m72Vswa1aREzepvJ8TDnnbhnmw/e5VLaXlsPZNEp5oPdsmduylS0qx+2UuamVpURhQT/51IfLahpJ2DpQNfPvIlLf1alum4HrYedAnqwrrodaQXpLP+4nr6VO1zy342lua82Cmct5YbSnd9+s9pFj3dolyCWJVpbdQ6Zdle3Zh6VYy7Q79tmCdtwzzLa1r3DAszC55v+DyvbnsVgG8Of0ObKm2UslW96/nx49YLnE7M4tilDNafSqRbnZIHJ65mFfDrzuhr51TxcufwYkaYlk6vU/rN2FrY0tSnqVHjGno1pGHHhuU5tftCa7/WNPFuwsErB7mYeZGV51cyIHxAiY+TXZjNrBOzlPUXGr5wl72FEEIIIYS4O6MyZ4KCgsrlSwghhBD3ly2nkxj88x4lMBPiYc+yCa3KHJi5buRN2TNz98SY5Jj3o7jUXA7HpgNQ3duRcO97q5/KwcSDjFg7QgnMeNt5M6f7nDIHZq4bVmOYsrzo9KI77jeoiT8hHvYA7ItOZfu5ZJOc/16RUZDB7gRDA3Od2plHgpqVOsvpQdYlqAu13GsBcCbtDP9E/6NsMzNTMalbdWX9s/Vn0Gh1JT7H91vOk1uoBWB480AC3Yvvr2lKkSmRpOQbMhWb+zTH2ty6Qs9/v1OpVLzY6EVl/YdjP5CvyS/xceZGzCWtwJCl1yOkB9XdqhczQgghhBBCiDsrWydEIYQQQjw05u2N4ak5B5QLlI2DXFk6oRXB1y6Om0KHGl5UcbEFYNvZq8Sk5Jjs2PeTNScSlOXe9e+tEkRro9by9ManySzMBAzNsOf3mE+4q+kyCRp4NqC6q+Gi54nkE5xKPnXb/W5Xtkr3AJXD+zfmX3R6w/tNk1mPTjV9KnlG96b/Xnj/7uh3qHVqZb1DdS+aBhsCyBeu5rDsSHyJjh+Xmsv8fYZgsa2lOc91rGaCWZfMzSXN2vq3rfDzPwgaeDXgkYBHAEjKTWLxmcUlGp+an8qcU3MAQwnH5xo8Z+opCiGEEEKIh0yZgjMdO3akU6dOxMQYf2fr5cuXlXFCCCGEuPfpdHqmrzvNOytOcv26d8+6vswf2xw3eyuTnsvcTMXjLQKV9Xl7H87smZtLmvWqd2+UNNPr9cw6MYvJOyYrF75bV2nNnO5z8LY3bfk5lUrFkBpDlPVFZ+6cPdOzri+1/Qy9P07GZ7L2ZMId973frIu+UdJMl9WAtuGlb2b/oGvp25LmPs0BiMuKY/m55co2lUrFpG41lPWvN54lX601+thf/3sOtdbw4TemTTBejrfv3VOeStJvRtzZCw1fQIUh++yXE7+QVZhl9NhZJ2aRq8kFYED4AAKcAspljkIIIYQQ4uFRpuDM1q1b2bp1Kzk5xt/VmpeXp4wTQgghxL3vzWUn+GnbBWX96XahfDusITaW5uVyviFNArAyN/yK8ufBS+QVGn8R9UFw4Wo2py4bslLq+zubNDOpLL469BUzDs9Q1geEDeDbjt9ib1k+8+sZ0hNHS0M5t3XR60jPT7/tfmZmKl5/9EZpoS82nEVdirJV95rkvGQOJB4AQFfoTiPfOjjZWFbyrO5dKpWqSNP3n479RJ4mT1lvGuxGxxpeAFzOyGf+vlijjnv2ShbLjlwCwNnWkqfbVTXhrI2TnJfMyZSTAIS7huNjLxlUpRXmGkav0F6AoWzg9UyY4iRkJyglFm3MbRhfb3y5zVEIIYQQQjw8pKyZEEIIIe7oZHwGiw/GAWCmgml9a/NWj5rl2vfC3cGaXvUMpbwy8tSsOn65mBEPltXHbi5pdm9kzVzKusTsU7OV9RcavsB7Ld/D0qz8ggV2lnb0rdYXgAJtASsvrLzjvu3DPWke4gZAdHIOfx26VG7zqigbLm5AhyHIpM6sR6caps1OehDV86xHp0BDdv7VvKssiFxQZPtrXW8E8b7fcp7sAk2xx/x8/Rn01zIGJzxSFWfbig+Q7YzfqSxL1kzZTWwwEQszC8DQQyY5r/heVT8e+1HJGHy85uN42nmW6xyFEEIIIcTDocKDM9ezbGxsKr4cgBBCCCFKZtGBG3eXv92zFiNaBlfIeUe0DFKW/9gTg17/4PQRuRu9Xs/fx270w+hZ797oN7Ps3DJleXy98YyrNw6Vqvwb0w+uPlhZXnxmMTr97TNi/lu2asa/50pUtupe9M/FG03tNZn1lawPcXfPN3weM5XhT5xfT/5KRkGGsq2WnxN9GxgCnqk5hczaEXXXYx2OTWNDxBUAvBytGVVBn3//JSXNTMvf0Z9B4YMAyNPkMevErLvuH5UepQSHHa0cebLOk+U+RyGEEEII8XCo8ODMunWG2tn+/v4VfWohhBBClEBuoYaVRwxZK7aW5gxuUnH/dzcIcKFOFUMfkRPxGRyNS6+wc1em04lZXLhquJGlWbAbvs62lTwj0Og0rDxvuDBprjJnSPUhxYwwnRDnEFr4tgAMfUR2xe+6476Ng1zpXNOQXZKYmc/cPRcrYorlIiE7gSNJRwDQ5ntTxT6Eqp4OlTyr+0NVl6r0Du0NQFZhFr+f+r3I9le6hGNxLfPvl+1RpGQX3PY4er2eT/85ray/2DkMW6vyKeV4N2qdmj2X9wDgbO1MPY96FT6HB9HT9Z7G1sLw+br4zGLis+PvuO93R79TAsNj6ozB2dq5QuYohBBCCCEefBYl2XnMmDG3ffydd97BxcXlrmMLCgq4cOECBw4cQKVS0b59+5KcWgghhBAVbO2JRLKulf3pXd8Xxwrsd6FSqRjZIphJS48DhuyZhoGuFXb+yrLq2I0Sbr3r3xtZMzvjd5KUlwRAe//2FV7OZ2iNoexN2AvAojOLaOvf9o77vv5odTadvoJeDz9svcDQZoH3ZZ+W9RfXK8uazHp0rO5VIZlKD4qJDSayNnotap2a+ZHzGV5juPK6DXK3Z2izAObtjSWnUMsPWy/wbq9atxxjx7lk9kalAhDsbsfgJpXT/P3IlSNkq7MBaO3XGnOzig8QPYg8bD14ouYT/HLiFzQ6DT8c/YEP23x4y34nk0+yMWajMmZ4jeEVPVUhhBBCCPEAK1Fw5vfff7/lD0O9Xs/KlXeuAf7ffQHc3Nx48803S3JqIYQQQlSwRftvlDQb0jSwws/fu74fH66NJCNPzerjCbzTqxZu9lYVPo+Kotfrlf46ZiroXvfeCM4sPbtUWR4QPqDCz9/evz0+9j4k5iSy49IOLmVdwt/x9llc1X0ceaxBFZYfiSc9V80v26N49aY+I/eLdRfXKcvqzPp0kJJmJeLn4Mfg6oOZHzmfPE0ePx//mXdavKNsf6FjGH8dukS+Wscfe2IY0yaEKi43stR0Oj2frT+jrL/StTqW5pXTqlNKmpWf0XVGs/jMYjILM1l1YRVP1n6Saq7Viuwz4/AMZXl8vfHYWdpV9DSFEEIIIcQDrER/ZQQGBhb5AsOdrb6+vrdsu/krKCiI6tWr06FDB95++22OHz9OSEhIuTwhIYQQQpTduStZHIxJAyDc24FGgS4VPgdbqxul1Aq1OhYfiKvwOVSkY5cyiEvNA6B1NQ88HKwreUZwJecK2+MNF4e97bxp7de6wudgYWbB4HBD7xk9ev48++dd93+5cziW5oabiX7dGU1aTmG5z9GUYjJjiEiJAECbVwVrvGgR6l7Js7r/jKs7TilbtfTsUhKyE5RtXk42jG5l+FukUKtjxr9ni4xddzKRE/GGXjW1fJ3oVYmB0uvvPzOVWaW8/x5kTlZOPFX3KcDw2fLtkW+LbN+bsFfJ2vN38GdAWMUHp4UQQgghxIOtRMGZixcvEh0drXxdt2HDhiKP//crKiqKiIgINm3axLRp0/Dz8zP5ExFCCCGE6dwcCBnaNLDSSio93jxIWZ63NwaN9vYN4R8ERUqa1bs3fldaeWGl0muhX1i/Siup1C+sHxZmhoTv5eeWk6/Jv+O+ge52DL2W6ZVbqGXBTRlg94N/ov9RltWZ9Wld1QMbSyllVVLutu6MqDUCAI1ew4LTC4psn9C+Kk42htfUX4cucT4py7CvVscXG25kzbzerTpmZpXz+ReXFUd0huFvrnoe9XCxcamUeTzIhtUYhpetITNtc9xmjl09BhgyGWccupE182zDZ7E0v/9KJAohhBBCiHtbmfLz27VrR7t27bC3tzfVfIQQQghRyQo0WpYevgSAlbkZ/RpWqbS5BHvY0z7c0CsiPj2Pr/89V2lzKU86nZ7V10qaWZqreLS2TyXPCHR6HcvOLQNAhYp+1fpV2lw8bD3oGtQVgPSCdD4/+Pld9x/XNpTr8cQ5uy9SqLl/gnr/XLwRnNFk1pOSZmUwvMZwrMwMpRD/OvsXOeocZZuznSXj21cFQKeHz9cbsmf+OnSJqGTDfs2C3XgkvGJ7LN1MSpqVP1sLW8bXH6+szzg8A71ez6bYTZxMOQlAuGs4PUJ6VNYUhRBCCCHEA6xMwZmtW7eyZcsWgoKCit9ZCCGEEPeFjRFXSMtVA9Ctjg+uldzn5cXOYZhfu3P9uy3n2XI6qVLnUx4OXEzlSmYBAO3DPXG2q/w7tPcl7CM+Ox6AVn6t8HOo3GyecXXHYW1uKPW2+Mxi1kStueO+ge52dK3lDUBSVoES+LrXnUs7x/n08wBocoPQa1wkOFMG7rbu9KraC4BsdTbLzy0vsv3J1sF4OhpeU/+cSmRfVAozNt0IAE/qVr3SsgYBdlzaoSxLcKb89AvrR6CjIdvuQOIBdsTvKFLi7IWGL2CmqpyeQ0IIIYQQ4sEmv2UKIYQQoohF+28uaRZQiTMxaBToyuRuN5q6v/znUS6l5VbijExv1U3Bg971742SZkvPLVWW+4f1r8SZGFRzrcbbzd9W1qfumcqF9At33H9s21Bl+ded0ej1+nKdnymsi16nLGsy61PDx7FIo3pRciNqjlCW50XOQ6vTKut2Vha80PFGA/in5hwkIcNQMq9TDS+aBLtV3ET/I1edy4HEA4Ch31O4a3ilzeVBZ2lmyXMNn1PWJ22fRFRGFAANvRpKYEwIIYQQQpQbkwdnMjMziY+PJzY2ttgvIYQQQtxbYlNy2Xk+GYAgd7t7phH5uLahSiZEeq6aZxccoUCjLWbU/UGj1bH2RCIANpZmdK7pXckzgtT8VDbFbgLAzcaNDgEdKnlGBv3C+vFYtccAyNPk8crWV8hV3z5Q1yTIlXr+zgCcupzJ3qjUcp9fUlY+608l8sk/pxk6cw99v9/F4dg0o8bq9XqlpJler0KTWVeyZkygmms1Wvm1AiA+O54tcVuKbB/SNJBANzsAsgs0AKhU8Nqj1Smt1PxUtsVt49sj3zJh8wR+yPqBfYn7SnSMfQn7KNQVAtDWv22lZvA8DB4NfpQabjUAipS/e7HRi/K9F0IIIYQQ5cYkwZmNGzfSr18/PDw8cHV1JTAwkJCQkLt+hYaGFn/gO/j4449p2rQpjo6OeHl58dhjj3HmzJki++j1et5//338/PywtbXlkUce4dSpU0X2KSgo4Pnnn8fDwwN7e3v69OnDpUuXSj0vIYQQ4n7358EbWTNDmgZUWiPs/1KpVHw2qL5yEfVYXDofrYms5FmZxu4LKaTmGC7Cdqrhjb21RSXPCFZdWIVGZ7hQ3bdq33uqEfZbzd8izDUMgKiMKKbumXrbrBiVSsVTbUKU9V93Rpt0HgUaLYdj0/h1ZzTPLThM6+mbafbhJsb/cYgft15gb1Qqx+LSGfP7AaKuZhd7vIiUCOKyDO8/bU5V9FpHOkpwxiRG1hqpLP8R8UeRbVYWZrzSpWhWSt/6ftT0dTLq2GqtmlPJp1gQuYA3drxBj2U9aL+4Pc9tfo6Zx2eyL3Efl7WXeWX7K0SmGP+ZtSP+ppJmVSRzo7yZqcx4oeELRR5rU6UNjb0bV9KMhBBCCCHEw6DMwZkXXniBbt268ffff5Oamoperzf6q7S2bdvGs88+y969e9m4cSMajYauXbuSk3PjLqdPP/2UL7/8ku+++44DBw7g4+NDly5dyMrKUvZ56aWXWL58OYsWLWLnzp1kZ2fTq1cvtNoH405cIYQQoiQ0Wh1LDhkuDpubqRjYyL+SZ1SUs60lPzzeCCsLw68vc/bEsOrY/dFL5G7+PnZzSTPfSpyJgV6vL1LSrF9Yv0qcza1sLWz5sv2X2FvaA7A2ei1Lzi657b496vri62wDwKbTV4wKktxNfHoeU1edou/3u6jz3nr6/7CbaasjWH08gfj0vNuOSc9VM+b3A0oA7k7WRq9VljWZ9XG2taRhgEuZ5isMWvm1oqpzVQAOJx3mxNUTRbb3qe9HDR9HACzMVLzS5e5ZM8l5yXxx8AtGrB1BiwUtGLpmKB/v/5g1UWuUANt/5WnyeG7TcyTmJBY7X71ez/ZL2wGwMrOiuW/zYseIsvtvMObFRi9W4myEEEIIIcTDoEy3Zi5YsIDvvvsOABsbGx577DEaN26Mm5sbZmbl187mn3/+KbI+e/ZsvLy8OHToEO3atUOv1/P111/z9ttv07+/oUb6nDlz8Pb2ZsGCBYwfP56MjAx+/fVX/vjjDzp37gzAvHnzCAgI4N9//+XRRx8tt/kLIYQQ96ItZ64qTek71fDCy8mmkmd0qzpVnJnapzZvLjNcXH1j6XFq+jpRzcuhkmdWOgUaLetPGi7WOlhb8Ej1ys+UOJJ0hOgMQ5ZJY+/GhDiHFDOi4gU7B/NBqw94ddurAEzfP53a7rWp7VG7yH6W5maMahXM9HWn0eth9q6LTHusTqnOmZxdQJ9vd5JyhyCLraU59fydaRDoQsMAV8K9HZg4/zCnE7O4mJLL03MPMm9sc2wszW8Zq9PrbippZo46qzbt63piYS7tIU1BpVIxotYI3t/zPmDInvm0/afKdjMzFb+MbMKP2y7QpaY3ge52dzxWdmE2w9cMJyEn4bbbrc2tqeVei3oe9ajrWZdqTtV4fu3zxGnjSMpL4rlNzzGn+xwluHg7Z9POciX3CgBNfZpiZ3nn+QjTUalUfN7+c3469hPNfJopZc6EEEIIIYQoL2UKzvz8888ABAQEsHnzZqpWrWqSSZVURkYGAG5uhqad0dHRJCYm0rVrV2Ufa2tr2rdvz+7duxk/fjyHDh1CrVYX2cfPz486deqwe/fu2wZnCgoKKCgoUNYzMzMBUKvVqNXqcnluQoj7w/XPgIf1syAzT83zi45x8nKm0WNq+Try9ZD6uNtblePMREks3BejLA9q7Ffi13OeJo83d73JkaQjRo8Jdgrmkzaf4GPvY/SYAQ182HchmRXHEsgp1DJh3kH+Gt8cO6vKLwcGJfs82ByZRNa1Phddanpijg61Wleu8yvOX2f+UpYfC33snv1c61ClA8OqD2PhmYWodWpe2foKC7ovwMmqaDmqgQ19+WbTOXILtfx1KI4XOoTiYlfyMm1TVpwoEpgJ9bCnfoAzDfydaRDgTLiXwy3BlJ8fb8DAn/dxNbuQgzFpvPbnUb4cVPeWHhaHkw6TlJsEgDY7DHR2tAtzv2e/9/ejrgFdmXF4BmkFaWyI2cDz6c8X+dzxcbRkai/Dxfi7fd+/OPBFkcBMgEMAdT3qUse9DvU86hHmElakDKBareYJ+yeYq51LfE48Z9LO8NrW1/iy3ZdYmN3+M2tr7FZlubVva3kdVCBnC2cmN54MPLy/04ny87D/vSCEKEo+E4R4sBn73lbpy1BfzNXVlczMTH755RfGjBlT2sOUiV6vp2/fvqSlpbFjh6E28+7du2ndujXx8fH4+fkp+z799NPExMSwfv16FixYwJNPPlkk2ALQtWtXQkJClMDTzd5//32mTp16y+MLFizAzk7uaBNCPLwWnDdj39WS3+EdYK/nudpabG69kVxUsPQCeP+wOXpUuFjpea+RlpK2m1mXt45dBbtKfG5PM0/GOozF3uzOd5L/V4EWvjxhTmKeYZJNPXQ8Xk3H/da3ec5ZMw6nGN4742toqeVa+rKvppCny+PTzE9Ro8ZGZcNkp8lYqu6dfjP/pdFr+DX7V+K0hlJS1S2q87j945ipin4eLY02Y3ui4bFegVq6VCnZ9/lYiorfzho+qOws9Eyqp8XV2rixcdnwzSlzCnWGF+ejVXT0CCwagFuVu4p9hYaG8XnxQ9BmNuDDJlrs791v/X1pU94mthRsAaCNdRu62XYr0fgodRS/5fwGgBVWTHCcgKe5p1Fjr2qvMjN7Jnl6Q/m75lbN6WXb67bN5mdmzSRWGwvAK46v4GbuVqJ5CiGEEEIIISpXbm4uw4cPJyMjAyenO/ezLNMtptcjQA0bNizLYcrkueee4/jx4+zcufOWbf/9Y0ev19/2DyBj93nzzTd55ZVXlPXMzEwCAgLo0KED7u7upZi9EOJBoVar2bhxI126dMHS8uG6mrbjXDL79hwGDI2V/V2KL4WVlFVIdoGGuBwVK5K9+GVEI6wtpHxPZfphaxR6zgPwRKuq9OpUrUTjjycfZ8/GPQBYmFng71B8v5qUvBSy1Flc1V1lleUqfur4U4nK99RvkUP/n/aSU6jlQLIZfVvXYUiTyumTk5WvISYll+iUHKKSsjh+JoqAwADMzO4eeYzMvATocLG15MWhnbGs5DJWS84tQX3A8Ptd37C+9G3St1LnY4xmOc0Y/s9w0gvSOaM5Q3JIMqNrjS6yT+3UXLp8vRO9Hg6k2fHJk22V3kXFSc9V879vdwGGrJkPHqtH3zv0BspV5xKbFat8peanghd0dM1ly5lkALZqoFDlRjXPG6X4Tl88DYBeZ4EmuxaNAl0Z1LdZCb8TojjN85qzc+VO1Do1R3VHmd5lutGfOXmaPH5a+5Oy/lLjlxhafWix467/fjC823Bqp9Zm4paJaHQa9hXuo3Wd1jxR44ki+6cXpDNl2RTAkFn4RK8nbndYIcR96GH+e0EIcSv5TBDiwXa94lZxyhScCQ4OJjIykuzssjVXLa3nn3+ev//+m+3bt+Pvf+NijI+PoURBYmIivr43/nhOSkrC29tb2aewsJC0tDRcXV2L7NOqVavbns/a2hpr61tvk7S0tJQPUiEE8PB9HmQXaHj370hl/f3etRnePLDYcWevZDHopz1k5KnZE5XK60tP8t3wRpiXNFVDmIROp+evI/EAqFQwtHlQiV7HhdpCPtj3ATq9IRvg2QbPMrbu2GLHXcq6xMh1I7mad5WTKSd5fefrfN/p+yIlge6mup8L0wfU4/mFhjJqH6w5TYNAN+pUcTZ67iWRV6jlYkoO0cmGr4vX/03JITn7v31IzCAx3uhjd6/ri52NkakY5WjFhRXK8qDqg+6Lz7MAlwCmt53OhH8noEfP98e+p4F3A5r6NFX2qebtTJea3myIuMKVrAI2nk7msYZVjDr+9PURXL328+1Uw4ue9T25mH2R2MxYYrJiiMk0fMVmxnI17+odj2N1U/LDwTTD139psmuCzppONb3vi+/9/cbH0odeob1Yfn452epsVses5vGajxs19qujX3Ep+xIAjbwa8XjtWzO07sbS0pKW/i2Z2moqb+9823DMw18R6BxIp8BOyn774/Yrn6Xt/dvL60CIB9DD9veCEOLu5DNBiAeTse/rMt2e2b9/fwA2bdpUlsOUmF6v57nnnmPZsmVs3ryZkJCijWpDQkLw8fFh48aNymOFhYVs27ZNCbw0btwYS0vLIvskJCRw8uTJOwZnhBBCFPXJutPEpxtKtLSq6s6wZgFGjQv3duS30U2xvdYYe93JRN5ZcZIyVNp8oCVl5VOoKb8+JLsuJBOXavg5tg3zxN+1ZKU6fz7+M1EZUQDUcq/F6NqjjRrn7+jPT11+wtHKEYA9CXt4a+dbaHVao8/du74fo1oGAVCo0TFh/iEyck1bt1mv1/Pqn8eoOeUfus/YwcT5h/ls/RmWHLrEwZi02wRmSsbKwoyR157D3aTkpVCgLSh2v9I6lXKKyFRDsLWOex2qu1Uvt3OZWusqrRlffzwAWr2WSdsnkZyXXGSfsW1DleVZO6OM+rzZeiaJpYcNF+QdrS2oUnUDLRa2oP/f/Xlp60t8degrlp1bxqErh+4amDGK3gx1amsAOlT3KtuxxB2NqDVCWZ4XMc+oz5tjV48xL2IeANbm1kxtNbVEgZmb9anah2fqPwOAHj1vbH+DU8mnlO3bL21Xltv5tyvVOYQQQgghhBD3hzJlzrz66qv88ccffP311wwdOpQaNWqYal539eyzz7JgwQJWrlyJo6MjiYmJADg7O2Nra4tKpeKll17io48+IiwsjLCwMD766CPs7OwYPny4su9TTz3Fq6++iru7O25ubrz22mvUrVuXzp07V8jzEEKI+9neqBT+2GtoIG9rac70/vWKLR15s8ZBrvz4RCPGzjmIRqdn4f5YPByseLXr/XNBuLzp9Xq+2niW77acx8PBmp9HNKZhoGvxA0to0YE4ZXlYU+MCbNdFpkTy64lfAbBQWfBBqw/u2OT6dsJdw/m+0/c8veFp8rX5/HPxH5ytnXm7+dtGv57e6lmTo5cyOBaXTlxqHq/9dYyZIxqX6PV4N8sOxysX6G/H09GaEHd7QjzsCfawJ8DFmgsnD9GmdWssLIr/XgS62eFqb3XXfeacmsPXh77G3sqeL9p/QXPf5iV+HsVZdnaZsjwgfIDJj1/enqn3DEeTjrI3YS/JeclM2j6JmV1mKq/HpsGu1K3izIn4DE7GZ7IvOpUWoXcuS5uVr+atZSeU9UHtMlkSteiO+7vZuBHkFESgY6DhX6dAfOx9MFfdKG2n1er4cN1pDl5MBcDbyZrPBtbHxtKc4T+dRVtgh6+zDTV9Hcv67RB3EOYaRkvfluxJ2MOl7EtsjdtKp6BOd9y/QFvAlF1T0GMI5j3b4FmCnYPLNIeJ9ScSmxnL2ui15GvzeW7zc8zvMR9vO292XTb07XKwdKChd+WVjhZCCCGEEEKUvzIFZ5ydnfnnn3/o06cPrVu3Ztq0aQwbNqxImbDy8OOPPwLwyCOPFHl89uzZjB49GoBJkyaRl5fHxIkTSUtLo3nz5mzYsAFHxxt/7H711VdYWFgwePBg8vLy6NSpE7///jvm5tKZWggh7iavUMvkpceV9UndqhPoXrJsC4BHqnvxxeD6vLjoKADfbj6Pm70VT7YOufvAh4BOp+eD1RH8vvsiAElZBQz7ZS8zhjbk0do+JjtPSnYBG04ZbnJwt7eiU01vo8eqdWqm7J6CVm+483xcvXGlyrZo6NWQLx75ghc2v4BWr2XxmcW42bgxscFEo8ZbW5jz/fCG9PxmJxl5ajZGXGHm9ijGt69a4rn8V2pOIf9bE6Gs967vR7iXAyGe9gS7G4IxDtZFf51Sq9WsjYF6/s5lLlGg1+v54dgP/HTM0OsioyCDZ/59hqmtptKnap8yHftmuepc1kSvAcDWwpbuId1NduyKYm5mzvS20xm8ajBJeUkcSDzA90e/58VGLwKGXoRj24Yonze/7oy+a3Bm+rrTXM7IB6BFNXt2ps1QtnUM6EgN9xoEOQYpgZjrGWDFmT2sFoN+2kNEQiYJ+fDF6jzGtgmlsMAQAHykupfJAovi9kbWHsmeBEOPrLkRc+8anPn52I3MwDrudYpk3pSWSqViWutpJOYkcjjpMMl5yTy76VleafwKGQUZALT0a4mlmZQ4EUIIIYQQ4kFWprJmoaGhdO/enYyMDNLS0nj++efx9PTEx8eH0NDQu35VrVr6CyZ6vf62X9cDM2D4o+f9998nISGB/Px8tm3bRp06dYocx8bGhm+//ZaUlBRyc3NZtWoVAQElu2NYCCEeRl9uPENMSi4ATYJcGdUyuNTH6tugCu/3rqWsT10VwYojxvfqeBBptDomLT2uBGauy1freGbeIWbvijbZuZYfiUetNdwRPrCxv9FN0gF+P/k7p1MNjcyruVRjXN1xpZ5HO/92TGs9TVn/8diPLIhcYPR4f1c7vh7SQFn/dP0ZDsfepqlHCX28NpK0a2XSetb15dthDXm+Uxi96vlRp4rzLYEZU9Lr9Xx64FMlMHOdRqfh7Z1v8+OxH01WCnBDzAZy1DkAdA/pjr2lvUmOW9Hcbd35/JHPlWyVWSdmsTt+t7K9R11ffJxsAPg38goXk3Nue5w9F1KYvy8WADsrc6pX30NCTgIAzX2b83WHr5lQfwI9QntQ26O20YEZAHtrC34b3VSZx5HYdF5bckzZ3rGGlDQrb639WlPV2fC3yOGkw0XKit0sIiWC307+BoCFmQUftC5ZZuDdWJlbMaPDDIKcDCUNz6ef59VtryrbpaSZEEIIIYQQD74yBWcuXrzIxYsXSUpKAgwXEXQ6HUlJScq2u30JIYS4/xyOTePXnYbggJWFGZ8MrIeZWdnu8h7dOoQXOlZT1l9bcowtZ5LKdMz7VaFGxwuLjvDXIcNd9GYqmN6/Lo818ANArzcEsD5YFYFWV7YL83q9oZzcdUNKUNLsQvoFfjz247U5mjGt9TQszct2l3fvqr2Z1HSSsj59/3TWRa8zenyHGl4828FwwVWr0/PioiNk5pe+/8yeCyksOXSj38iUm4KI5U2r0/L+nveZFzlPeez1Jq8zpPoQZf2Hoz8wZfcU1Lqy99hZenapsjwg7P4raXazhl4Nebnxy8r6WzvfIiUvBQBLczNGtQoGDO+l2wU6cws1RTIDRz9iycroxQBYmVkxpcWUMme2+Djb8NvopthbGYJIeWpD9pmVhRmtq905m0eYhkql4olaTyjrcyPm3rKPWqdmyq4bmYFP13uaMNcwk87DxcaFHzr9gIu1CwB5mjxlW5sqbUx6LiGEEEIIIcS9p0y3fo0aNcpU8xBCCHEfKNBomfTXca7HBF7uHE5VTweTHPvlLuEk5xSyYF8sGp2eCfMOMX9scxoHuZnk+PeDvEItz8w7xLazhsbiluYqvhnakO51fRnSNIAANzu+3XwegN92RXM5PY+vhzbAxrJ05TgPxqRx4aohc6BZiBuhRv4stTotU3bdCAqMqj2KOh51ihllnBG1RpCan8qsE7PQo+etnW/hbOVMqyqtjBr/Uudw9lxI4XCsof/MuytO8vWQBiW+mF6g0fL2ihv9RiZ1r4H3tUyH8qbWqnlz55usv7geMAS/3m/5Pv3C+qHX6/F38OeLQ18AsOL8Cq7kXOHLR77Ewap078Xzaec5evUoYMiAqutR1yTPozKNqDWCPQl72BW/i5T8FN7d9S7fd/oelUrF8GaBfLPpHHlqLX8evMQrXarjbHcjsPjFhrPEpl7LDAx25nDu98oF+vH1xxPoFGiSOdbyc+K74Y14as4B5TO1Rag7dlbll40lbugV2otvDn9DWkEaGy5u4OXGL+Njf6Nk5G8nfuNM2hnA0BtrbJ2x5TKPQKdAZnSYwdgNY5XP1DrudfCw9SiX8wkhhBBCCCHuHWX662/27NmmmocQQoj7wHebz3M+KRuAulWcGdfWdL1hVCoV0/rWIT23kLUnEslX63hy9gGWPNOK6j4PfnPsrHw1T/1+kP3XGoXbWJrx84gmtA/3BAzfn1e7Vsff1Za3lp9Eq9Pzz6lEhv2yl1kjm+DuYF3icy7aH6csD2tmfNbM/Mj5HE82ZBYEOwUzsb5xvWGM9ULDF0jLT2PpuaVodBpe2voSs7rOop5nvWLHWpqbMWNoQ3rM2EFWgYaVRy/TLsyTAY39SzSHn7ZGEXUtcNUgwIXHm5nmgnxx8jX5vLL1FXbE7wDAQmXB9HbTeTT4UcDwOhhdZzQ+Dj68veNtCnWF7EnYw8h/RvJDpx+KXFw21rLzy5TlgeEDH4h+J2YqM/7X+n8M/HsgKfkp7IjfwfzI+TxR6wmc7SwZ1MSfuXtiyFNrWbA/lgmPGDKuDsWk8du1bBprCzPaNz7HzIiTAIQ6h/Jk7SdNOs8ONbyY2qc27640lNXqXc/XpMcXd2ZjYcPg6oP5+fjPaPQaFpxewCuNXwEMAcufjhvKCZqrzPmg9Qdlzgy8m0bejfhf6/8xecdkALqFdCu3cwkhhBBCCCHuHWUqayaEEOLhcTI+gx+2XgAMGR2fDaqHhblp/xsxN1Px1ZAGtKlmuGM4M1/DyN/2EXftLvYHVVpOIY/P2qcEZhysLZg7prkSmLnZkKaBRcohHYlNp/+Pu4m6ml2ic2bkqVlz4jIATjYWdK9j3EXh2MxYvj3yLQAqVHzQ+gNsLEybUaJSqXi3xbt0DuwMGEr9TNw0kQvpF4waH+Bmx4f9b2R/vLvyJNF36C1yO1FXs/l+iyFDydxMxcf965a5dJ8xctQ5TPh3ghKYsTa3ZkbHGUpg5mbdgrsx69FZOFs7A3Au7RyPr3mcM6lnSnTOQm0hqy6sAgwlu3qF9irjs7h3eNh68GGbD5X1Lw99SWRKJABPtg7hegxqzu6LqLU68tVaJv11jOttfJ7u4M78cz8r46e0nFIuF+hHtAxm7phmzBjagIElDCKKshlaYyiWZoaf6V9n/yJXnWvIDNw9BY1OA8Do2qOp7V673OfSI7QHsx+dzf9a/48naj5R/AAhhBBCCCHEfU+CM0IIIYql1uqY9NdxpcfJxEeqUcPHqVzOZW1hzk8jGlPf33DR+UpmAaN+209uoaZczlfZkjLzGTJzD8cvZQDgamfJgnHNaRZy53Ju7cM9+fOZlng7GbJlYlJyGfDjbg5eC+4UJyNPzdzdF8lX6wDo17CKUaXRdHod7+1+j3xtPgDDagyjoVdDo85ZUuZm5kxvN51mPs0Mcy7IYPzG8WQUZBg1vk99PwZdu9CdW6jlhYVHKNToih2n1+t5e/lJCrWGfce2DaGmb/m81m+WUZDBuA3jOHjlIAB2Fnb82PnHuzYFb+jVkHnd5+HvYHieSXlJjPpnFLvidxl1zhx1Dn+d/Yv0gnQAOgd1VoI9D4rWVVozqpahDK9ap2bS9knkqnMJ8bCnc01vABIz81l7IoFvN59TyvzV93cmVrWAHLVhfUDYABp7Ny63ebYL96RvgyoPRNbS/cTD1oOeoT0ByCrMYsX5FcyLnMeJZENJw2CnYCY0mFBh82ni04S+1fpibla6UpVCCCGEEEKI+4vJi1pfuXKFkydPkppquEDk5uZGnTp18Pb2NvWphBBCVJCZ26OISMgEoLq3I892qFau53OwtmD2k80Y+NNuoq7mEJWcw4xN53ize81yPW9Fi0vN5Ylf9xGTYsgM8nK0Zt7Y5oR7F1/GrbafM8sntmbM7wc4nZhFWq6a4bP28dXgBvSs50tOgYbo5BwupuRwMTmH6ORcLqbkEJ2cQ2pOYZFjDTWyZNdfZ/9SggdVHKrwYqMXS/iMS8ba3JoZHWYwZv0YIlMjuZJ7ha8OfcX7rd43avz7fWpzKCaNqOQcTsRn8MWGM7zZ4+6voaWH49kTZWge7+9qy4udTNsA/HaS85IZt2Ec59MN2TpOVk781Pkn6noW3/sl2DmY+T3n8/ym5zmefJwcdQ7PbnqWKS2n0D+sP3maPGIzY4nNiiUmM4aYzBhiMw3LKfkpRY41MHxguTy/yvZioxfZn7ifyNRILmZe5JMDnzC11VSeahPCxogrAHy+4QyX0w1BR0tzFYPaZfLJkU0AuNm48XLjlytt/qJ8jag1ghXnVwDw28nflGClChXTWk/D2rzkJSOFEEIIIYQQwhgmCc7o9XpmzpzJd999R0RExG33qVWrFs8//zzjxo2TuwKFEOI+cu5KFjP+PQeAmQo+G1QPK4vyT7x0s7di1sgmdPt6B4VaHbN2RPNYgyoVksVQEc4nZfPErH0kZhouCPu72jJ/bHOC3O2NPoafiy1/PtOSifMOs/N8MoUaHc8uOMzUVdYkZRUYdYxmwW5GfU8TshP44uAXyvr7rd7HztLO6LmWloOVA992/Ja+K/uSo85h6bml9K7a26gsBntrC74Z1pB+P+xCrdXz8/YoWlfzoN1tysUBpOYU8uGaG7/HTHusTrk3Z7+cfZlxG8YRmxULgLuNOzO7ziTcNdzoY7jZuDHr0Vm8ueNNNsVuQqvX8t7u9/juyHdczbtq1DGquVSjiXeTUj2He52luSWftvuUwasHk6fJY9m5ZbTya0XXkK7UqeLEyfhM4lLzlP3Ht/dnztlXlfXXm77+wGUUiRvCXcNp4duCvQl7uZJ7RXn88ZqP08CrQeVNTAghhBBCCPHAK/PVtbS0NNq2bcvEiROJiIhAr9ff9isiIoIJEybQrl070tPTTTB1IYQQ5U2r0/P6X8eVEk9Pt6tKPX+XCjt/qKeDkqWj1el5c9kJdNdKq93PNp++wpCf9yiBmaqe9ix5pmWJAjPXOdlYMvvJpkV6VdwtMOPtZE2LUDeGNQvgnZ41+fGJRsWeQ6/XM3XPVHI1hgyfAWEDaOHbosRzLS1ve29eaPiCsv7Bng9Qa9VGja1TxZnJ3Woo66/8eYzk7Nt/fz5aG0laruG4Pev50qG6VxlmXbwDiQcYuW6kEpjxtfdlTvc5JQrMXGdrYcsX7b8o0qviboEZD1sPGnk1ol+1frzc+GV+6vzTA33zTLBzMG81f0tZn7p7Kgk5CYxtE1pkvxo+jmic/yExJxGAlr4t6RnSs0LnKireyFoji6xXcajC8w2fr6TZCCGEEEIIIR4WZbodVK/X07dvX3bv3g2Au7s7gwcPpnnz5vj4+KDX67ly5Qr79+/nzz//JDk5md27d9O3b1+2bdtmkicghBAPKq1Oz9YzSdT0dcLPxbZS5vDrziiOxqUDEOppz0udy7/E038980gofx+L58LVHI7GpTN/fywjWgRV+DxMIS2nkKmrTrHi6GXlsVq+Tsx9qhkeDrcvnaPX69l1eRdBjkEEOAXcdh9LczM+G1iPEA97Zmw6h5ONBcHu9oR42BPsce1fd3uCPexKlQmy4vwKdl029DHxsvPi1SavFjPC9IZUH8LqqNWcSD5BVEYUs0/N5ul6Txs1dkzrELafS2b72askZxfw+pJj/Da6aZFgxJ4LKfx16BIAjtYWvNerVrk8D4Dswmy+PPQlS84uUR4Ldgrml66/4GPvU+rjmpuZM7nZZPwd/ZlxeAY25jYEOgUS5BREoOO1f6+t21uWPBB4v+tbtS+743ez7uI6stRZTN4+mZ87/4qPkw2JmfmYm6l49lFb3jkwHzCU1Xu3xbsPdNBKGLSu0ppQ51CiMqIAmNpqaoVkBgohhBBCCCEebmUKzixYsICdO3eiUqkYPnw4P/zwA46Ot9bJHzlyJNOnT+fZZ5/ljz/+YOfOnSxcuJBhw4aV5fRCCPFA+3htJLN2RmNvZc7Cp1tUWMaKRqtjQ8QVZu+K5sDFNABUKvh0QD2jmsabmrWFOR/1q8uQmXsB+HTdabrW8sbbyabC51IWa08kMGXlSZKzb/R7aR/uyTfDGuJsa3nHcb+c+IVvj3yLlZkVP3X5iaY+TW+7n0ql4tkO1Zj4SFWTXEzW6XXsuLSDeZHz2JuwV3n8vZbv4WhVfE8cUzM3M2dKyykMXT0UrV7Lz8d+pltwNwKdiu+XY2am4otB9ek+YzvJ2YVsOXOV2bsuMqZNCAAFGi1vLz+h7D+pew28yun1tePSDqbumVqkfFIjr0Z88cgXeNh6mOQcj9d8nOE1hktQ4T9UKhXvtnyX48nHic+O5+jVo8yO+IVvhg3j283nGNDYj/kXXkenN2QKjq83/o4BUfFgMVOZMb3tdGYcmUGnwE40921e2VMSQgghhBBCPATKVNZswYIFALRv354//vjjtoGZ6xwcHJgzZw7t27dHr9czb968spxaCCEeaAkZeczdEwNATqGW0bMPcOFqdrmeMyNXzc/bLtD+s61MnH9YCcyAIfOgSbBbuZ7/bpqHujO4iaFsV1aBhg9W3b6/2b0oKSufZ/44xMT5h5XAjJONBZ8Pqs/vTza9a2AmoyCD307+BkChrpDnNz9PZErkXc9X1gvyOeoc5kfOp8+KPjy3+bkigZk+VfvQzr9dmY5fFjXcajCi1gjA8P2Ytncaer1xZe48Ha35YnADZX36utOcjM8A4MetF4hKzgGgQYALjzcrPuBTUhkFGby9820mbpqoBGZsLWx5q/lbzO4222SBmeskMHN7jlaOfNLuE8xVhkDzzOMzMbON4o+nmpNjvZ1TKacAqOpcldG1R1fiTEVFq+lek586/8Sg8EGVPRUhhBBCCCHEQ6JMwZnDhw+jUql47rnnjB7z/POG+s1Hjhwpy6mFEOKB9vO2KKXPCxgalY/8dT8JGXl3GVU6F65m8+6Kk7T4eBMfrztNfPqNc4R5OTC9f13e7lHT5OctqTe718TN3gqANScS2Hz6SjEjKpder2fpoUt0+XI7/5xKVB7vWsubf19pz8DG/sVeQJ8fOZ8cdY6ynqPO4Zl/nyEmM8bk872UdYlPD3xK5yWdmb5/epFzBDgG8EazN5jaaqrJz1tSE+pPwNfeF4C9CXtZE73G6LHtwz0Zey1bplCr44VFRzgZn8EPWy4AYG6m4uP+dTEzM21gY2PMRvqu6MvfF/5WHmvp25LlfZczrMYwzFRlbgEoSqC+Z30mNpgIGDLE3tjxBmfTzvLtkW+Vfaa0nIKl+Z0Dp0IIIYQQQgghRFmVqaxZamoqACEhIUaPub7v9bFCCCGKSsrMZ8F+Q4NwW0tzgtztOJ2YRXx6HiN/3c+f41viei1IUVp6vZ7t55KZvSuarWdubRresYYXT7YOpk01j3vmDnxXeyve6VmTV/48BsC7K07R4hX3UvVQKW/x6Xm8tewE287e+N6621sxtW9tetb1Nep7mlWYxbwIQ5aphcqCcLdwIlIiSM1PZfzG8cztPhcvu7I1rNfr9Ry8cpD5kfPZErdFKed0XXPf5oyoOYK2/m3vmQCCnaUdbzd/m+c2G24M+ezAZ7St0hZna2ejxr/erTp7o1M4GZ9J1NUcBvy4WwmEjm0bQk1fJ5PNNSUvhU93fcrGmI3KY46Wjrze9HUeq/bYPfPeehg9Vecp9ibs5UDiAa7kXmH4muEUaAsAGBA2gEbejSp5hkIIIYQQQgghHnRlutLi7Gy4EHL58uVi9rzh+r5OTqa7+CGEEA+SmdujKNQYLhaPaBnE3KeaEehmaEx8LimbMXMOkFuoKdWx9Xo9q45dpstX2xn12/4igRk7K3NGtgxi86vt+W10U9qGed5zF4/7NaxCq6rugCEAMuPfc5U8o6J0Oj3z98Xw6FfbiwRm+jbwY+Mr7elVz8/o7+nC0wvJUmcB0Ltqb2Z2mUmYaxgA8dnxjN84noyCjFLPdWvcVgavHsyY9WPYFLtJCcxYmVkxIGwAS/ssZVbXWbQPaH/PBGauax/Qni5BXQBIzU/ly0NfGj3W2sKcb4Y2xM7KUNaq4Np7zd/Vlpc6hZtkfnq9nqOFRxm4ZmCRwMwjAY+w4rEV9Avrd8+9tx425mbmfNzmYyWodz0w42bjxsuNX67MqQkhhBBCCCGEeEiU6XbjOnXqsG3bNmbPnk3Pnj2NGvPbb78pY4UQDwaNVkdartqofVUqQwaBXJi8veTsAubtM5STsrYwY2zbELwcbfjjqWYM+HEPydkFHIlNZ8K8w/wysglWFsZfNE/MyOedFSf4NzKpyONVXGwZ3SqYwU0D7tr/pDhanZa0grTid7zGzcatxBf9VSoV/3usDt1m7KBQo2PWzmj6NqhCLb/KDfhn5atZdyKRhQdiORKbrjzu7WTNh4/VpXMt7xIdL0edw9yIuYChUfXYumNxtnbmp84/MXLdSOKz4zmffp7nNz/Pz11+xtbC1uhjp+anMn3fdNZdXFfkcU9bT4bWGMrA8IG42VRefyFjvdHsDXZf3k2OOodl55bRO7Q3TXyaGDU21NOBqX1q8/pfx5XHpj1WB9trAZvSylXnsil2E8vOLuNg7kHlcVdrV95s/ibdgrvJZ989xNvemw9afcCLW15UHpvcdLLRWVhCCCGEEEIIIURZlCk4M3DgQLZu3cry5ct5//33ee+99+540UGv1zN16lSWL1+OSqVi0CBptinEg+DC1WyG/LyX5OwCo8eEezvw3fBGhHs7luPM7k+/7IgiX224k39480C8HG0ACHK3Z+6YZgyZuYesfA3bzl7ltSXH+HpIg2L7Y+j1ehYfiOPDtZFk5d/IuGka7MpTbULoXNMbC/OyZUYkZCcw6p9RJOQkGD0mwDGAT9t9Sh2PkgXrQz0deK5DNb7ceBatTs9by0+wdEIrzE3cJ6Q4aq2OHeeusuxwPBsjrigZGNcNaRLAWz1rlirgtej0IiUrpmdITwKdDA3qvey8mNllJiPWjSA1P5UjSUd4bdtrfN3hayzN7n4evV7PPxf/4eN9HxcJotV2r82IWiPoGtT1vuqx4WXnxYuNXuSjfR8B8MHeD/ir919YmRtX8m9gY39OxGcwd08Mo1oG0aF66UrEaXVa9iXuY/WF1fwb+y95mqJ9oboHd+eN5m/cFwGvh1HHwI48Xe9pZh6fSa/QXnQP6V7ZUxJCCCGEEEII8ZBQ6fV6fWkHq9Vq6tevz+nTp1GpVNSqVYvRo0fTvHlzvL29UalUJCYmsm/fPubMmcOpU6fQ6/XUrFmTY8eOYWFx7/UJKInMzEycnZ1JTk7G3d29sqcjRIXT6vQM/Gl3kUwBYznaWPDziMa0quph+olVArVazdq1a+nRoweWlqW7wJ2aU0ibTzaTW6jFysKMHZM64O1kU2Sf/dGpjPh1nxIIGN0qmPd617pjYDwuNZc3l51g5/lk5TFPR2um9a1Dtzo+pZrnf+n1eib8O4Fdl3eVeKythS2ftP2EDoEdSjSuQKOlx4wdXLiaA8AHfWszsmVwic9fUnq9nhPxGSw7HM+qY5dJySm8ZZ9qXg6817sWbcM8S3WOXHUu3ZZ2I60gDRUqVj62khDnor3dIlMiGbN+DNnqbAB6h/bmf23+d8dMpKTcJKbtncbWuK3KY87WzkxuOpleob3u22wOrU7LiHUjOJF8AoDnGjzH+PrjS3SMvEItNpZmJf4enEk9w+qo1ayNWktSXtIt213NXHmn9Tt0De1aouOKypGvycfa3Pq+fS+Ie5cpfj8QQjwY5PNACHEz+UwQ4sF2PW6QkZFx1/YuZYqOWFpasm7dOjp27Eh0dDQRERFMmjTpjvvr9XpCQ0NZt27dfR+YEULA7F3RSmDG28maev4uxY65cDWbqKs5ZOVrGPXbfj4dWI9+Df3Ld6L3id92RpNbqAVgaNOAWwIzAM1C3Ph+eCPGzzuEVqfn990Xcbe34vlOYUX20+n0zN1zkU/Xn1GOCYZsgXd71sLZznS//P194W8lMONm40Z9z/rFjonLiuN8+nnyNHm8tPUl3mj2BsNqDDP6nNYW5nzUry5DZu4F4NN/zvBobZ/bfs9M4VJaLiuPXmbZ4UtKQOhmbvZW9K7nS79G/tT3dy7TBd4lZ5comS3dQrrdEpgBqOlek286fsMzG5+hUFfIqqhVuNi48HqT14ucW6/Xs+L8Cj478JnSvwagS1AX3mr+Fh6293dw1NzMnPdavseQ1UPQ6rXMPD6TbiHdCHIKMvoYJSlllpSbxNqotayKWsXZtLO3bHe0dOTRkEfpHtidywcu0yGgZEFHUXlsLMrns0MIIYQQQgghhLiTMkdIgoKCOH78OO+//z6//vor6enpt93PxcWFsWPHMmXKFBwcHMp6WiFEJYtJyeHzDWcAQx+Z74c3oklw8WV7cgo0vLDwCJtOJ6HW6nl58TEupebxXMdqD/Udyxm5an7ffREAS3MVz7Svesd9O9fyZnr/ukq/jC82nsXV3oonWhguSF+4ms3kv45zMOZG6So/Zxs+6l+XR0pZuulOruZe5ZMDnyjrH7T6gPYB7YsdV6gt5J1d77Aueh06vY6P9n1EfFY8rzR5xeg+NM1D3RnSJIDFB+PILtAwddUpfni8camfy+1odXreXHacPw9eumWblYUZXWp6069hFdpX98SyjKXhwHD3/uyTs5X1p+s+fcd9m/o05dP2n/LK1lfQ6XX8EfEHbjZujK07FoDL2Zd5f/f77EnYo4xxs3HjnRbv0CWoS5nneq+o7ladkbVGMvvUbAp1hUzbO41fuvxi0s8TvV7PZwc/Y37kfHT6ouXrLFQWtPFvQ+/Q3rQPaI+1uTVqtZoElfEl/oQQQgghhBBCCPHwMUn6ir29PZ999hkffvghhw4d4uTJk6SmpgLg5uZGnTp1aNy4MVZWxtWBF0Lc23Q6PZOXHld6o4xqGWxUYAbA3tpQzuz9VaeYtzcWMAQXLqXl8b9+dUxygft+9NuuaLILDP1gBjYOwM/l7g3eBzUJIC23kI/Wngbg3ZUncbK1JD4tj6/+PUvhTf1PnmgRyORuNXC0MW2qtF6vZ9reaWQVGjIyeob2NCowA2BlbsX0ttOp4lCFWSdmATAnYg6Xcy7zUZuPjL6L/c0eNfg38gopOYWsPZHIpsgrdKrpXbondBu/7Yy+JTDTLMSN/g2r0L2ub6n6ydzN0nNLSclPAQzZLdVcq911/06BnXiv5Xu8t/s9AGYcnoGztTManYavD31NriZX2bd3aG8mNZ2Ei42LSed8L3im/jOsv7ieyzmX2Zewj9VRq+ldtbfJjr/i/Ar+iPijyGP1POrRq2ovugV3w9XG1WTnEkIIIYQQQgghxMPBpLXFrKysaNmyJS1btjTlYYUQ95gF+2PZG2UIwPq72vL6o9VLNN7C3IxpfesQ4GrHx+sMwYXFB+O4nJHHD483MnkQ4V6Xma/mt13RAFiYqZj4yJ2zZm72dLuqpOQU8vO2KPR6eGHhkSLbg9zt+GRAPVqElk9PrPUX17MlbgtgyMh4o+kbJRpvpjLjxUYv4ufgx4d7P0Sr17IxZiNXc6/yTcdvjLrg7WJnxTu9avLy4mMATFl5ipZV3bGzKvt/b2cSs/hs/Y3ssBc6hjGwsT8BbnZlPvbtFGgL+O3Eb8r6+HrG9U7pH9aftPw0vj78NQAf7PmgyHZvO2+mtJxCO/92JpvrvcbO0o63W7zNs5ueBeCzA5/RtkpbkwSi4rPji2SHPVn7SfqH9SfYObjMxxZCCCGEEEIIIcTD6+G8RV0IUWrx6Xl8vDZSWf9kQD3srUt+IVylUjG+fVW+G94QKwvDR9GOc8kM+mkPCRl5Jpvv/WDu7otk5RuyZvo3qlKii/9vdKvB4CZFe/aYqWBc2xD+ebFduQVmUvNT+Xj/x8r6283fLvWF8EHhg/i247fYWhiyhY5ePcqIdSOIzYw1avxjDarQuprhecan5/Hlhlt7gZRUoUbHK38epVBryEAa2yaEl7uEl1tgBmDFuRVKY/mOAR2p7mZ80HNMnTGMqjXqlscHhQ9iRd8VD3Rg5rp2/u3oGtQVgLSCNL449EWZj6nT63hn5zvkqA19hh6r9hivNHlFAjNCCCGEEEIIIYQoMwnOCCGMptfreWvZCXKuNZgf1iyA1tXK1lC8Vz0/5o9tjsu1BvWnE7Po9/1uIhMyyzzf+0F2gYZZOw1ZM+ZmKp7tcPcyVv+lUqn4qF9detXzBSDMy4G/JrTi7Z61StTovKSm759Oar4he6pLUBe6Bnct0/Ha+rfl926/42nrCUBMZgxPrH2Co0lHix2rUqn432N1lSDfrJ3RrDwaX6b5fLv5HKcuG16DYV4OvNq1ZNlhJaXWqpl1cpayPr6+cVkz16lUKl5p8goDwwcC4O/gz69df2VKyyk4WD08fd4mN5uMg6Xh+a44v4LFpxeX6XjzIuZx8MpBAPzs/ZjcdHKZ5yiEEEIIIYQQQggBJShrtn37dpOfvF27B/9OXiEeJEsPx7Pt7FUAfJxseLNHTZMct2mwG0sntOLJ2QeITc0lMTOfQT/t4YfHG9Eu3NMk57hX/bEnhvRcNQB96/sR5G5f4mNYmJvx3fBGvN0zD29HG8zMTNcI/Xa2xG5hXfQ6AJysnHir+VsmOW4t91rM7zGfiZsmcj79PGkFaYzdMJbpbafTOajzXceGeNgz6dHq/G+NIavr9SXH8XOxpamRvZBudjg2je+3nAcMZea+GtIAG8vyC3QBrLywksScRMCQAVLLvVaJj2GmMuO9lu/xTL1n8LD1wNysfOd8L/Ky82Jys8m8u+tdAD7a/xF+Dn609W9b4mNdSL/AjMMzlPX/tfnfQxXoEkIIIYQQQgghRPkyOjjzyCOPoFKZ7oKfSqVCo9GY7HhCiPKVlJnPB6tOKesf9quDkwl7w1T1dGDZxFaMnXOQo3HpZBdoGPP7AT4ZUI8Bjf2LP8B9KLdQwy87ogBDT5NnO5Ysa+a/fJ1tTTGtu8oszGTa3mnK+hvN3sDDtmzZUzfzdfBlTvc5vLLlFfYl7qNAW8ArW19hcrPJPF7z8buOfapNCOeTsll0II5CrY6n5x5k+cTWBHsYH/DKK9Ty6p/H0OkN6y92CqNOFeeyPKViqXVqZp24KWvGyF4zd+Jt713WKd3XHqv2GFEZUcw+ORudXsdr215jbve5JSoTp9apeXPHmxTqCgEYUWsETX2alteUhRBCCCGEEEII8RAqcVkzvV5vsi8hxP1Br9fzzoqTZF7ri9KvYRU61TT9BWAPB2sWjmtB11qGY2t0eiYtPc6Fq9kmP9e9YMG+WFJzDBd/e9fzo6rnvX9X/ucHPudqniF7qm2VtvQK7WXyczhZOfFj5x/pU7UPAHr0TN8/nRNXT9x1nEqlYtpjdWhzrdReWq6aJ38/QNq177Expq+LJDrZ0F+kQYALEx6pWspnYbw1UWuIzzaUYWvt15p6nvXK/ZwPupcavUSXoC4A5GpyeXbTsyTlJhk9fubxmUSmGrKwQp1DeaHhC+UyTyGEEEIIIYQQQjy8StzF29bWlr59+9KlSxfMzKRljRAPgzUnEtgQcQUADwcrpvQqecklY9lamfPjE415Z8UJFu6PQ6vT88m608wc2aTczlkZ8tVaftp2I2vmuTJmzVSE3fG7WX5+OQD2lvZMaTnFpBmVN7M0t+R/rf+Hu407s0/NBuCLQ18w+9HZdz2npbkZPzzRiAE/7OZcUjbRyTmMn3eIP55qhrXF3ct87Th3lTl7YgCwsTTjy8H1sTAv3//nNDoNvxz/RVl/pv4z5Xq+h4WZyoyP2nzElZwrHE8+zpXcKzy36Tl+7/Y7dpZ2dx174uoJ5WdiobLgo7YfYWNhUxHTFkIIIYQQQgghxEPE6OCMo6MjWVlZ5OXlsXjxYrZu3crw4cMZMWIE9evXL885CiEqUUp2Ae+tvFHO7IO+dXC1tyrXc5qbqXi3Vy02RSaRlFXAhogr7I9OpVlIyfuH3KsW7o8lObsAgO51fAj3dqzkGd1djjqHqXumKuuvNnkVH3ufcj2nSqXi+UbPszluMzGZMRy6coitcVvpENjhruOcbCz5bXRT+v2wm+TsAvZHp/LG0hN8Obj+HQM7GblqXl9yXFl/q0dNQisgk2ld9Dpis2IBaO7TnAZeDcr9nA8LGwsbZnScwRNrnyA+O57I1EgmbZ/EjA4z7tiPJ0+Tx1s730Kr1wIwvv54arvXrshpCyGEEEIIIYQQ4iFh9C3BV65cYeHChfTo0QNzc3MSExP56quvaNSoEfXr1+fzzz/n8uXL5TlXIUQlmLoqgpRrZaG61/GhR13fCjmvnZUFr3QJV9Y/Whv5wJRDNGTNXFDWn+sQVomzMc7Xh77mco7hM765T3MGhg2skPNamlnyUqOXlPWvDn+FRld8v7IANzt+HdUEG0vDf3PLj8Tz9b/n7rj/e3+fJDEzH4C2YR480TyobBM3glanZebxmcr6+Ppl6zUjbuVh68H3nb7H0dIQ/Nx2aRufHfzsjvt/fehrLmZeBKCuR13G1h1bEdMUQgghhBBCCCHEQ8jo4IyNjQ1Dhgxh9erVxMfH89VXX9GwYUP0ej0nTpxg8uTJBAUF0aVLF/744w9ycnLKc95CiAqwMeIKfx8zXJB3sbNkat+KvYN8YGN/wr0N2QtH49JZeyKxQs9fXpYcusSVTEPWTNda3tTyc6rkGd3dwcSDLDqzCABbC1vea/VeuZUzu51OgZ1o4NkAgOiMaJadW2bUuPoBLnw9pCHXpzpj0zmWHrp0y35rjiew4qjhde5oY8GnA+thZlb+z29jzEYlENDYu7E0nC8nVV2q8mWHL7FQGZKF50fOZ37k/Fv223N5DwtOLwDA2tyaD9t8iIVZiau/CiGEEEIIIYQQQhilVMX0PT09efHFFzl48CCnTp1i8uTJ+Pv7o9Vq2bRpE6NHj8bb25sRI0awfv36B+ZudyEeJhl5at5efqMB+5RetfByrNi+CxbmZrzRvYay/un60xRqdBU6B1Mr0Gj5cct5Zf2FTvd21kyeJo/3dr+nrL/Q8AUCHAMqdA4qler/7d13eBRl28bha3fTSUih9y69SlFQAaVIEQEFpKugAqIUBbGDFTuWFxU+6SjygqIUgSBNXlC69N5LgJBK6iaZ748lS0I6ZDeF33kcOdjdmZ25J6yPYa48z62Xmr5kfz5191RFWbP3CwAP1yut1zvXtj+f8Mse/X3iqv355YhYvbHkxuf83UfrqYyvZy5UnbnEpER9v+d7+3N6zTjWPWXu0Vv3vmV//vG2j7X+7Hr784j4CL35vzftz8fcPUZVfKs4sUIAAAAAAHCnue1Ox7Vr19aHH36o06dPa+3atXryySfl4+Oj6OhozZ8/X507d1a5cuX0yiuv5Ea9AJzk/eUHdDnSNrujbc0S6tG4XJ7U0bZmSd1btZgk6fTVaM37+3Se1HE7DMPQvvPhemfpAbWavFYXwm3LZz1Yq6TqlfPN4+oyN3X3VHtPlEYlGqlvrb55Ukejko3UvlJ7SdLV2KuatX9Wtt875L4qGnBPRUmSNdHQc3N36PiVazIMQxN+2avQaKskqXP90nq0Udlcrz2lo6FH9fmOz9VhcQcdC7OFdA1LNFSL0i0cel5IPWr00DP1n5EkJRlJGr9xvPZftfXTmvzPZF2KviRJalGmRZ59zgEAAAAAwJ0jV9fraNOmjdq0aaOpU6dqyZIlmjt3rgIDAxUUFKSvv/5aH330UW6eDnnsaPBFjQ38WJdisnez3GQyqX5Ac33TeYw8XB3bUB63Z+vJEC3cblv+ycfdRR/0rJ/hMlaR8ZGaunuqDlw9kK1jm0wmNSvdTM/Uf0Zulqw/ByaTSa91rq1HvtkkSfpq7VE9dnd5+Xq6ZvNq8s6FsBgt2X1ev+48r6OXr6Xa5moxaXS7/D1r5nDIYc05MEeS5GZ206RWkzJspO4Mo5qM0roz65RgJGj2/tnqdVcvlfQqmeX7TCaTJj5SV2dDYrThyBWFx1j11MxteqJ5Ba09dFmSVNzbXe91z/hzfjuCY4K1/MRyLTuxTIdCDqXaZjaZNbLxSKcuE3cnG9l4pM5GntXKUysVkxCjF/58QU/Xe1pLTyyVJPm4+ui9Vu/JbLrt310BAAAAAADIlEMWUzeZTDKbzTKZTNxwKqT2BJ3SoOVDlOhyWcrBvdp/wo6o/fy9WtL7WxXz8nFcgbhliUmGJi3db3/+SqdaGS7zFBIbomGBw3Qw5GCOzrHj0g7tuLRDX7T5Qr7uWc8cqV/eV90bldWS3RcUFm3V1PXH9Gqn2lm+Ly9ExiZozb9B+nXnef198qpuXtXRzWJWuzolNeS+qmpQ3i9PaswOwzD08baPlWTYlpEb1nCYqvpWzdOaKhWtpN41e+vHQz8qJiFGU3dP1cSWE7P1XheLWd/0a6xe323RoaBInQmJ1scrD9u3f/x4fQUUyb3QONoarbVn12rZ8WXacnGL/ftor8fkovvK3acBdQaoRRlmzTiL2WTWe/e9p6CoIO2+sltXYq7oo203fnHk1RavqnSR0nlYIQAAAAAAuFPkajizYcMGzZ07V4sWLVJkZKQk2w2+MmXKaODAgbl5KuShv88c1rOBz8pwCbml94eZ/lWHBU9o/iPfq1aJ8rlcHW7Xoh1ntf9ChCSpTpmi6tu8Yrr7XYq6pGcDn9WJ8BO3dJ5tQds06I9Bmtpuqsp5Z71k2ssda2rFviDFJyRp5v9OaeA9lVTe3+uWzp3bEhKTtP7IFc0+Ytb4besVl05fnGaV/dWzSXl1rl+mQMz6WXtmrbYGbZUklfcur8F1B+dxRTbDGg7T78d/1zXrNf167FcNqD1A1f2rZ+u9Ph6umvFkM3X/z//sS/ZJUt/mFfRgrVK3XVtiUqK2XdqmpceXas3pNYpOiE6zT/3i9dW1alc9XOVhBXgE3PY5kXPuFnd9+eCX6r+8v85dO2d/vX2l9upatWseVgYAAAAAAO4ktx3OHDx4UHPnztX8+fN17pztJodhGPLy8lKPHj00aNAgPfTQQzKbWSKkMFhz7F+N2ThCcrHdvDcnFNe09tNUv0z6N/BT+unf9Zqy5w3JEqt4yxn1/r2/prT5Wg9Wa+DospFNkbFWfbLqiP35W4/UkcWcdvbb2cizemb1Mzp/7bwkqaRXSX3f7ntV8q2U5TkOXD2gF9e+qJDYEJ0IP6H+y/vrP+3+o7rF6mb6vvL+XnqqZWV9v/GE4hOS9NnqI/qiT6OcXWAusvWRidAvu85p6b8XFHwtXrY2XjeCmSrFi6hH43Lq0bicKgTkjyApO+IT4/Xp9k/tz19u+nK2lqBzBn8Pfw2pP0Rf7vxSSUaSPt/xuaa2m5rt95f189SMJ5up9/dbFB2fqAoBnnq9S53bqulI6BEtO75My08u1+Xoy2m2l/Mupy5Vu6hr1a40mc8nAjwCNLXdVA1YMUAR8REK8AjQG/e8wWxfAAAAAADgNLcUzly+fFk//fST5s6dq127dkmy3ag0m81q27atBg0apJ49e6pIkSK5Wizy1pIDf+vNv0dJFttvg7sklNWPj/yg2iWzN/tlSNOOquRXWmM3vCDDJVSGS4hGbRiql8M/1OAmDzmydGTTf9YdV/A124yCTvVK656qxdLsczzsuJ5d/awux9huQpf3Lq/pHaarvE/2PgcNSzTUvM7zNGLNCJ2KOKWrsVf11Mqn9MkDn6h1hdaZvndE2+r6eftZhUVb9euu8xpyXxXVK5f1smi56XxYjJbsOq9fd53XsZv6yEiSv5erHmlYVj0al1OjCn4F8mbvvIPz7DMKmpdurgcrPpjHFaU2oPYA/Xz4ZwVFBemv83/pn4v/5GhpsHrlfLXwuXu1fO9F9WteUd7uOf9f4ZXoK1pxckW6fWQkW++SDpU76JFqj6hxycb0MMmHqvhW0ZxOc7T0+FJ1q9aNmUwAAAAAACBjSYlSbLgUEypFh9j+jAlJ8TzF45Ar2Tpktu9IxcbGasmSJZo7d64CAwOVmJgo43ozhXr16mngwIHq37+/ypYte2sXh3xt7q61+mj3eJksthv37omVtKjHDFUOyLoZd0rtqjfUz0V/VP9lz8pqOStZYvTJnpd0Kuwlvf0gS98lMwxDx69cU2lfz1u6cXwrTl+N0oxNJyXZ+qK81jltT5cDVw9oWOAwhcaFSpKq+VbTtA7TstWUPaUKPhU0r/M8vbj2Re28vFMxCTF6cd2Leq35a+pTq0+G7/P1dNULD9bQu8sOSJI+/OOg5g1p4fAAJDLWqj/2BumXXef0z8mQtH1kXMx6sGYJlU+4oDFPtFMRT/dcO/ep8FMK8AxQUbeiuXbMzATHBGvanmmSbP05xjcbn+8CJg8XD73Q+AW9vul1SdJn2z/Tgq4LchSA1Cvnm+NgL1t9ZMrfp0eqPqLWFVrL3ZJ7nwM4RjW/ahp99+i8LgMAAAAAADiLYUhxkTcFK6FZhy4xYZKMrI5uE5e9/bJ917dkyZKKioq6Xr+h0qVLq2/fvho4cKAaNWqU3cOgAJr6zzJNPfCmTOYESZJXUg391usHlfbxv6Xj1S5ZXqt6L1CPRcMUbtorkylRi85+rDNLzmt6t/F3/BJ4SUmGxi/eo0U7zqmIm0W9mlbQky0rq3Jxx85E+2DFQcUn2m42D7m/SppluHZe2qnn/3xe16y22SJ1itXRd+2+k7/HrX0OfN19Na3DNL2x6Q2tPLVSSUaS3vvnPZ2/dl6j7x6d4Y32gfdU0uzNp3QmJFr/O3ZV649cUduaOQuHsmvf+XB9t+G4Ag9cSrePTPPKAerRpJw61y8jLxdpxYrzcnPJnc+vYRj6ZPsnmntgrjwsHuparasG1B6gan7VcuX4Gfl619eKstrG+p41eqpmQE2Hnu9Wda3aVXMPzNWhkEM6GHJQK06ucFi/kONhxzVj3wwFng5UTEJMmu0NijdQ12pd9XDlh2/5vwcAAAAAAADkUHx05jNYYsJsz28OXZISHFxY9u4PZjucuXbtmkwmkzw8PNStWzd16NBBFotFe/bs0Z49e26pxEGDBt3S++A8kzf+rHknPpTJnChJKmrU17I+0+Tv5X1bxy3hXVRr+s/S4/8dr9PWPyVJW8Pn65EFF7S416fycM0f/S2czTAMvbv8gBbtsC0pFRWfqFmbT2n2llN6qFZJPd2qiu6tVizXZzJsPh6sVfsvSZJK+Ljr+bapG6xvPr9Zo9aNUmxirCSpSckm+uahb+Tj5nNb53W3uOujBz5SWe+ymrFvhiRp5v6ZuhB1Qe/f9366Mw/cXMwa/3BNjfzRtqTi5BWH9ECNEun2xrkdxy5f02Pfbk4TylS93kem+019ZKxWa66e/7s932nugbmSpNjEWC06skiLjizSvWXu1YA6A3Rfuftyfamsg1cP6tejv0qSvF29NbLRyFw9fm4ym8wae/dYPRv4rCTpq51fqX2l9rk+WyUoKkgDVgywh5LJynmXU9eqXdW1aldV9q2cq+cEAAAAAAC4oyTE35i9kmHQks7sloRYx9fm4St5Bkie/pLX9T89A2567H/jsae/FCdpcta/wJvj9ZJiY2O1cOFCLVy48FYuxc5kMhHO5HOvB87Qb+enyGSyTcMqbmqmZU98qyLuuXPz08PVTb8/8bmeW/qZ/g6bI0k6Y12nh+YP1i+Pf6dS3s7tJZIfTF1/XDP/d0qSZDGb5GoxKdaaJMOQ1hy8rDUHL6tWaR891aqyHm1UTh6ults+Z2KSoXeWHrA/H9exZqql1P48/afGbRwna5ItfGhVtpW+aPuFPF08b/vcku0m+5i7x6icdzm9/8/7SjKStOrUKl2JvqIv234pPw+/NO/pUr+Mplc4qX/PhunwpUgt2nFWfZpVzJV6kn22+rA9mAko4qZHGpRRjybl1bC8r8OX+fr50M+auvtGk3svFy9FJ9h6PW25uEVbLm5R5aKV1a92Pz1a7VF5uXpldKhsMwxDH237SMb16ZnDGg5TMc+0PYfyk3vL3qtW5Vrpf+f/p4tRF/XjwR/1VL2ncvUc3/37nT2Y8XH1UccqHfVIVVsfmfy23BsAAAAAAECeSu7LkjJAybQ/y/XAJT7S8bW5eV8PUPxTBC1ZhC4evpLlFlpOWCOytVuOjmzc3GgBhdaLy7/WuuBpSr73WM7ygH5/4ku5ueRu/xOz2azpj47Te+vLacHJj2UyJyrCtE+dfn5Cc7tOU91SFXL1fPnZj/+c0SerDtufT+5ZX+3rlNJPW89qzpZTuhhuS4IPBUXqlcV7NfmPQ+rfopIG3ltJpYp63PJ5f952VoeCbANgvXJF9XiT8vZtS48v1Zv/e1OJhm3mVPtK7TX5/slys+T+zKbeNXurdJHSennDy4pJiNHOyzs18I+BmvrQVFUomvpzYDKZ9Hrn2ur9/RZJ0merj+iRhmXl5ZY7n89/z4bpj31Bkmwzida/3EZFnNT7Z+WplXr/n/ftz8c1HaeeNXpqybElmn9wvs5ds82qOhVxSh/884G+3vm1etboqb61+6qcd7lbPm/g6UDtuLRDklTRp6L61ep3exfiJGOajNHm85tlyND0PdPVo3qPdAO9W3Eq/JSWHFsiyTaTaHnP5SxbBgAAAAAACj/DkOIictaTJTrEFsxkty/LrbK4pw5QPP3SCVpuDl38JZf81xs423cb161b58g6kI8MWTJZW8Pn259Xc39Yi3pNlovl9mdpZOSNNv1U2a+MPtr5imSJkdXlnPotG6Sljy1URb8SDjtvfvHH3ot6Y8le+/NXO9VSr6a2QGJ4m2oaen8VrdofpBmbTmrnmTBJUmi0Vd+sO6bvNhxXlwZlNOqhGqpaImfLzUXEWvXZ6huB0Ftd68p8fXmwBYcWpAoJulXrpkktJ8nF7LiQ4oHyD2jWw7P0/J/PKzgmWKciTmnwysFa0HWBSnql7ivTvEqA2tcppcADl3Q5Mk7/99dJvfhQjVypI2VI9uKD1Z0WzGy+sFmv/vWqffbKkHpDNKiubYbhgDoD1LdWX204t0HzD87X1qCtkqRIa6RmH5ituQfn6sEKD2pYw2E57hMTlxinz3d8bn/+ctOX5WpxzaWrcqyaATX1aPVHteTYEkVaI/X9nu/1SvNXcuXY3+z+xh5MPln3SYIZAAAAAABQ8MRH52ypsOTXHN2XxWTJZNZKJrNbXL2kQrKaSbbvOLZu3dqRdSCfGPzL+9oZucD+vKH345rT402Zzbnb2yI9Axq1VfmiMzRq3UgluVxVkkuwnvh1pNYOmFuoe9BsPhasUQt2K+l6qPzcA1X1XOvUTd9dLWZ1bVBWXRuU1a4zoZr5v1NasfeiEpIMJSQZ+m33Ba3ef0kTu9VR76YVsr3c0td/HtXVqHhJUpcGZdS8SoAk6ceDP+rDrR/a9+tbq68mNJ+Q6z1O0lOnWB3N7zxfI9aM0PHw47oSc0Vj1o3RjIdnpOknMqFTLa09dFmJSYa+33BcfZtXVAmf20vB/3csWJuOBUuSKgR45vpyaRnZe2WvRq8brYTr/+N7rMZjGtVkVKp9LGaLHqz4oB6s+KAOhxzWvIPztPzEclmTrEoykrTmzBptOLdBY+8eq/61+2f7czBn/xydv3ZeknRPmXvUpkKbXL02RxvZaKRWnlyp2MRYLTi8QP1q91MFn9ubdXfg6gGtOrVKkhTgEaCBdQbmRqkAAAAAAAC3xt6XJbOgJeTGUmHJ25zSl8UvG0uF+aXe5l600IQst8o5vw6OHDl4+Zwq+pbItd4u2TVkyUepgpmW/oP1fbeXnVpDm6r1NNtzlgb+0VeyXFOk+YAG/Pq2FvX+MOs3F0B7zoXpmTnbFZ9o623S6+7ymtCpliQpOCZYRd2KpllCrHFFfzWu6K/XOtfW3L9P6cd/zig02qoYa6JeWbxXfx0N1vs96svXM/OZDyeDozRr8ylJkruLWa9eP+/CwwtTBTND6w/Vi41fdGp/jbLeZTXj4Rnqu6yvLkRd0J7gPXrv7/f0Tst3UtVRrYS3+javoHl/n1FUfKI+DzyiD3vWv+XzGoahj1PMmnmpfU25uTg+kDoRdkIj/hyhmIQYSdJDFR/SG/e8ken3vGZATb3b6l2NbjJa/z3yX/18+GcFxwTLmmTVR9s+0paLW/Ruq3cV4BGQ6bmvRF/R9L3TJdn6/4xvNr7A9VIpVaSUBtYZqOl7pyshKUFf7PhCn7f5POs3ZuKrnV/ZHz/b4Nlc6esDAAAAAACgpEQpJiybPVlCru8bIsVfc3xtbt6ZLBWWTk8WT3/bvmbHrbhUmBHO5CPxCQnqtmCUzidulJHkIk+jkioWqaWmpRur010t1KBURYfNYHnu90+1NXye/fn9AU9r6iNjHHKurDQqU1mvNPlAk3ePlsmUpMMxyzRxbW1NfHBQntTjKMevXNOTM7cpKt62bFK72qXswcLH2z7W3ANz5WJ2Ue2A2mpQooEaFG+gBiUaqJx3OZlMJpX29dC4jrX0fNvqenfZQf209Ywkadmei9p1Jkxf9W2kuytlfGP+/eUHZU20Tdd59oGqKu/vpcVHFuvdv9+17/NM/Wf0QuMX8uRmfYBHgL588EsNXDFQsYmxWnJsiWoF1FL/2v1T7Tfqobv0687ziopP1E9bz6hj3VJqU7NkBkfN3Kr9l/Tv2TBJUq3SPurWsOztXkaWgqKC9GzgswqLs523Welm+uiBj7K9fFwxz2Ia1nCYnq73tL7a+ZVmH5gtSdp4bqMe+/0xfXj/h7qnzD0Zvv/LnV/aQ6Fed/VSDf/cWRrO2Z6u97QWH12skNgQBZ4O1IoTK9S5audbOta2oG3634X/SZLKFimrXnf1ys1SAQAAAABAYZDclyWjWSsZhS6x4Y6vzd6XJYNZK+mFLp7+kkvhXb0oPyKcyScSEhP1yIIXdSHxL0mSyZygWB3XkdjjOnJquX48JSmxqAIs1VXTv57ur3C3OtdspmJePrd97hFLv9Dm0Nn25/f6D8qzYCbZgEZttfPiMAVenipJWnT6CzU6cJe618n4JnNBcjE8RoN+2KqQ60uKNa8SoG/6NZbFbNLH2z7WvIO2oCwhKUF7g/dqb/BezZetD1CAR4A9qGlQooHqFa+nD3vW1/01imvC4j2KiE3Q+bAY9f7+b41pV0PD21SXxZw6XNl0NFhrDl6SJJX0cdew1tW05NgSTdoyyb7P0/WezrNgJlmtgFp6t9W7GrdxnCTpk22fqLpfdbUo08K+Twkfd73csaYmLT0gSXr5v//qj1EP5Hh5s8QkQ5+m6L8zrmNNe/8dRwmNDdWzgc/qUrTt76J2QG191farNMu3ZYebxU0vN3tZ95S9R69vel0hsSEKjgnWs6uf1VP1ntLIxiPlak49m2p/8H79dvw3SZKPm4+eb/T87V9UHvF289bLTV/Wa5tekyS9+/e7alCigcr7lM/RcQzD0JSdU+zPn2/8fJrZawAAAAAAoBAxDMkanbOeLMnPr/eqdRizS4oeLNnsyeIZILmxAkhBQDiTD9iCmdH2YMYwzLIkBijJJTj1jpYIhWintoTu1JbQOfroX7Pck8qpY4XH9M6DT8rFkvPpY6NWfKO/QmbYnzf37a9p3cbd1vXklk87PqdHFhzSGetamcwJeuvvcapd4mfVLOH42QyOFBoVr0E/bNX5MNtshdpliur/BjeVu4tZn+/43B7MmGRSpaKVdCriVKr3h8SGaP259Vp/br19v+r+1dXrrl5a+kIXvbRwn7afDr0eNhzR/45d1Rd9Gqm0r4ckKSExSe8s228/3isP19Lac3/orf+9ZW9EP7jOYI1uMjpfLG/1cJWHdSjkkH7Y94MSjUS9vOFl/dTlp1Q33Z9sWVl/HQ3W2kOXFXwtXi/991/NerJZjsKVX3ae07HLtumhd1fy14O1bm32TXZFW6P1/J/P62T4SUlSRZ+KmtpuqrzdvG/ruPeVu0+Luy3W65te1+YLm2XI0Ix9M7QtaJs+euAjey8WwzD00baP7O8b3nB4gW94/0i1R7T5wmYtO7FM16zX9Mpfr2jWw7PShFKZWX92vfZc2SNJqu5XXV2qdHFQtQAAAAAAINclxOdsqbDk/RLjHFyYSfLwzcZSYTeFLu4+d3xflsKMcCaPJSUlqfvPL+lcwnpJtmBmcLU3NO7+XjoRckkrjmzV3+d36kTEAUXqhGS+0cDJZEpSvOWsll6YolWzf9X4pq+oT4P7s33usX98q7VXvrc/b+zTRz90n5Br13a7zGazFj72sdrO76MYy3EZljANXDpC6/r/7PR+PLklOj5BT8/epqPXQ4CKAV6a/XQz+bi76MudX2rW/ln2fSe1nKQeNXooPC5c+4L3aU/wHu25YvuKiI+w72fI0NHQo/rgnw9U3W+hxnUbr78P1NA3a48qyZC2nLiqTl9u1CePN1S7OqX009YzOnLJdv6GFfzk4fevXvvfG/ZgZkDtAXqp6Uv5IphJ9kLjF3Q49LA2nd+ksLgwjVo3SnM7zbX3ATGZTPrk8QZ6+Mu/dCUyThuPXNEPm07qmQeqZuv4cQmJmrLmqP35+I41HXr91kSrRq8brb3BeyVJJTxL6Pv236u4Z/FcOX5xz+L6tt23mrN/jr7c+aUSDNsMrF5Le+nNe95Ul6pdtPLUSu26vEuSVLloZT1R64lcOXdee73F69p9ebfOXTunPVf26Nvd3+rFJi9m672JSYn6ateNXjMvNH5BFtZMBQAAAADA+RITbMt/ZStouR62RIdI1ijH1+bmcz1A8c96qbDkxx6+9GVBGibDMIy8LqKgioiIkK+vr4KDg1WsWLEcvz8pKUk9fh6nE/GrJUmGYVK/Kq/qtdZ9090/ITFRG0/t15rj2/Rv8L+6EHNYCS4XUu1T2txKUzq8rrqlKmR67vGrpumPoK/tzxt4P6a5Pd5yWE+b23Hoyjn1XvqEDIttPcZKrg9pWb8peVvULYhPSNIzc7Zrw5ErkqTi3u5aPPxeVSpWRN/s+kbf77kRlL1979t6/K7H0z2OYRg6HXHaHtb8e+VfHQo5lGqfhyo+pIdKDdX7Sy4pKOJGoNe/RUWt2HtRodFWSdIrj8Xpu4OTlGQkSZL61Oyj11u8nq+CmWQR8RHqv7y/fSZRh0od9GnrT1PVuulosAbO+EeGIblaTPpleCvVL++b5bFnbDqpd5bZlkVrU7OEZj3VPMf1Wa1WrVixQp07d5ara8YzNRKTEjXhrwlaeWqlJNtyYrMenqW7/O/K8TmzY1/wPo3fOF5nI8/aX+tWrZu2Bm1VUFSQJOk/D/1HD5R/wCHnzwt7r+zVoD8GKcFIkEkm/V+H/1PzMln/nS49vtS+LFqDEg00r9O8fPnfAvK/7I4HAAo/xgMAyRgPAKR0R40JhnEjZIkJzV5PlphQ5/RlcfHI2VJhXgGShx99WZCl5NwgPDxcRYsWzXA/wpnbcDvhTFJSkh5bOEHH4v6QZAtmelcar7faDsjRcWZsX62v//0sVUhjJLmphX8vff7wi/L1SLu+4GuBP+j381/KZLL91dfxelQ/PfZOvgxmki058Lfe+Ge4TOYESVKHUiP02cPD87iq7Au+FqcR83dq68kQSZKPh4t+fvZe1SlbVN/++62m7p5q3/eNFm+oT60+OTr+v1f+1eR/Jmvf1X3219zMbupz10AdOtRMaw+GpXnPvfXP62DiVCVeXxvz8bse15v3vCmzKf9+Dk6EnVC/Ff0Udf23IEY1GaWh9Yem2ufDPw7q+w0nJElVihfRshfuUxH3jCcJXotL0AMfr7P3/1n2wn2qVy7rQOdm2fnBKiI+QhM2TtBf521LGHpYPDStwzQ1Ltk4x+fLiShrlN77+z0tO7EszbZWZVvp23bfFroQ4oe9P9h7x5T0LKlF3RZlumybNdGqR5Y8ovPXzkuSZnScoWalmzmjVBRCd9Q/tABkivEAQDLGAwApFcgxIbkvS7o9WUJShy43z2hxWl+WgJsa3PulE7SkeOzq6di6cMfKbjjDsmZ5ICkpSb0XvZ4qmHmswks5DmYk6emmHdSvYRu9tma6Ai/OkSzRMpnjtTV8vh6Yv1KDar6gMS172IOXt/+cnSqYqenZNd8HM5LUvc492nVxtH4596kkaVXQd2q8u5YGNGqbx5Vl7d+zYRo2b4cuhttmsLi7mPV/g5qqTtmimr5neqpgZkLzCTkOZiSpYYmGmt9lvn4//rum7Jiiq7FXFZ8Ur7mHflBJr6V6ou1A/fJXScUn2P7ePf0O6mDiPHsw06N6j3wfzEhSVb+qmnz/ZL249kUZMvTVzq90l/9dqWZ9vNS+prYcv6o958J1MjhKb/++X5/2apjhMX/466Q9mHmkYdlbCmay41joMY1aN0pnIs9Ikiwmiz5r85nDgxlJKuJaRB/e/6Falm2p9/5+T9EJ0fYaxjUbV+iCGUl6qt5T2nJxi/65+I8ux1zWW5vf0ldtv8rwWv975L/2YKZl2ZYEMwAAAACAwishLhtLhYWmDWAS4x1cmMkWqOSkJ4unP31ZUGARzjhZUlKS+i1+W4djbvwGe7dyozTpocG3fEwPVzd93ul5nQrppVGrPtLxuECZTIaSXK5q1vGJWnT0Z717/+vafHavFp/9zB7MVHfvpIWPv5/vg5lkkx4arH0/H9SR2OUymZL00c7XVK/UT2pUpnJel5ah/24/q9eX7FN8gm3ZsFJF3fXtgLvVpKK/Zu6bmaq/xbim49S/dv9bPpfZZFb36t3VrmI7TdszTXMPzlVCUoIuR1/W8ujP1KBZfYWc6azT4ZfkVma+PZjpVq2bJracmO+DmWRtKrTRyMYj9fWur2XI0CsbX9H8LvNV1dfWX8bNxayvnmisLl/9paj4RC3acU731yiuRxuVS3OskKh4Tf/LNsvGYjZpbHvHLC0WeDpQr296XTEJMZIkP3c/fdL6E91T5h6HnC8jj1R7RA1LNNSEvyZob/BePdvgWVXzq+bUGpzFbDLrg/s+0OO/P67QuFCtP7tePx/+Od3eOtHW6FTLCma3Rw0AAAAAAHkqMUGKDctm0JJidosz+rK4F00naMkidKEvC+4whDNONujXd7U/eon9eafSL+iD9kNy5diVA0rqt76fadXRXXp70/uKMh+WJF0zH9ToTYMkGfZgpopbey3uPbnABDPJ5vd8Rw/OO6lI8wHJck1D/hihP/v+V36eRfK6tFSsiUl6b9kBzd5y2v5a00r+mjqgiUr6eGjO/jn6fMfn9m1j7x6rQXUH5cq5vd28NbbpWPWs0VOfbv9UG85tkCQdDtsrU9F9KuprUYJhWx6uc5XOeqflOwUmmEn2TP1ndCjkkAJPB+qa9ZpGrR2lH7v8KB83H0lS5eJF9G73ehq78F9J0hu/7lOTiv6qEJB6mb+p647pWpzte9GnWQVVKZ67n6PEpET9Z/d/NH3vdPtrtQJqaUrbKSrnnTYscoaKRStqfuf5ioiPkK+7Y2YJ5RclvUrq3VbvauTakZKkT7Z9oialmqTp7zPv4DyFxNqWHOxQqYPqFqvr9FoBAAAAAHewpCQpLjxnPVmiQ23vcTQXz5z1ZEnex1JAlmsD8hDhjBMN/uV9/Xttkf15+5Ij9HHHZ3P9PB1rNFb7agv16aZFmnf0PzJcQmQyJdm3V3R9UL/0/qTABTOSbZbQgh7f6JHFvZTkclXxlrPqtWisVvX/Nt9cz5XIOD0/f6e2ngqxvzbgnop6q2tdubmYNf/gfH2y/RP7tlFNRumpek/leh2VfSvrm4e+0abzm/TR1o90KuKUDBn2YObhyg/r/fvel6UA/kaCyWTSe63e06mIUzoaelSnIk5pwl8T9FXbr+zX07NJeW08ckVLdl9QZFyCXlywSwufu1euFtvn5EJYjOb8bQvP3F3MevHBGrlaY0R8hF7Z+Io2nd9kf61zlc6a2HKiPF3ydk1Tk8lU6IOZZK0rtFa/Wv3046EfFZ8Ur1c2vqIfu/xo/zsIiw3TzH0zJdmWeRvZeGRelgsAAAAAKMgMQ4qPyjhMuR60WKKCdf/Fk3I5Pck28yUmVDKSsjz8bTG7pO3J4uV/43FGoQt9WQCHIZxxkiFLJmtn5AL787bFn9XnnRzX0N5sNmv8A731bLMuGrPyS20LWyST2aoKLm30W5/P5WIpeDfkk1X0K6FPH5iiMZuGyGSOV1DSZg1b9pmmdRuXa+eIiotTtDVeJbx9cvS+m/vLuFnMerd7XfVpVlGS9POhnzV562T7/iMajUjT0D633VfuPrXo1kI/HvpR3/37na5Zr6lDpQ764P4P5GIuuEOAl6uXvmr7lZ5Y/oTC48K18dxGfbXrK425e4x9n3e719POM2E6ExKtXWfCNGXNEY3rWEuS9OWao/bl5p5sWVmlfT3SnMOaZFVMQoyKumXcuCs9R0OPavS60an6y4y9e6wG1hlYKPu75Hdjm47V9kvbdST0iI6FHdOn2z7Vm/e+KUmasW+GrlmvSZK6V++uKr5V8rJUAAAAAEB+YY3NeU+WmNBs9WUxSwqQpFtaXcx00wyWbPRk8QqQ3LzpywLkMwX3zmwBkJSUpO+3r9S8A3MVYdpnf/3+gKf1VZcXnFKDn2cRzezxmk6FDNWBK2fVuebdTjmvo7Wv0UiDgyZozol3JElbQufozTUl9G67J2/72OtO7NWodSOVZAmTW1wjNfDpqjaVmqpxRX/VLlNUbi7pz9BZuP2s3ripv8x3A+5Wowp+2ha0TfMOzNPas2vt+z/X4DkNb+i4gC4lV4urBtcdrB41euhc5DnVDqhdKEKC8j7l9WnrTzUscJgSjUTN2DdDJb1K2nv3+Hi46qu+jfX4t5uVkGRo6vrjalW9uEr6eOi/O85e38dFw9uk7bty4doFPRf4nE5FnFKZImXUoEQDNSjeQA1KNFDtYrXlbnFPt6Y/z/ypt/5+K1V/mU9bf6oWZVo46LuArLhb3PXJA5+oz7I+ik2M1cIjC9WybEvVK15PPx76UZLkZnbTsIbD8rhSAAAAAECuS0y4EaRkGLSknd0ia7TDSzPcfWTKSU8WT3/Jw0/KJ6vHALg9hDMOcDU6Uh9snKc/z/+iRJcgKcU98Jb+gzX1kTEZv9lBKgeUVOWAkk4/ryONu7+XDgQf1faInyRJv577QiU2++vFlo/e8jE3ntyvF9c/J7lEyiTJ6rFTO6w7tXVvBcVvaCVzdEPVLxegRhX81LiinxpX9FdJH3e9u+yA5tzUX2ZK37raHrxOHyybr0Mhh1KdZ2j9oXq+0fO3XOetKupWVHWK1XH6eR3pnjL36JXmr+iDfz6QJE3eOlm+7r7qWrWrJKlRBT+91KGmPlp5SIYhjfl5t2qVLqokW/slPfdAVfl5uaU6ZlBUkJ5e9bTOXzsvSboYdVEXoy5q1alVkiQXs4tqB9RW/eL1baFNiQYq4VZCq2NWa+Omjfbj1A6orSltp6isd1lHfxuQhap+VfVK81c0acskSdJbm99Ss9LNFJcYJ0nqW6uvShcpnZclAgAAAAAyk9yXJTpEignLRk+W6/s5oy+Lq1eK5cH80glaUocuVlcf/bF+izp16SZXV3qzAHcqwplctPviKX246f90IHK1ZIlJ9d01JQSoa8VB+qD9kLwrsBD64dEJ6vHzVZ2IXy2TKUnTDk9UsSK+6t+wTY6Ptfn0IY1c95xkibS9YJgkk+0OvsXzrDzLLVCSdYX2ht6rnVuay9hkax7v5WZRdHyi/Ti9mvuqYuVd6r/qVXuT8WQlPEtoSP0h6lerX6GYuZJf9K3VV1djrur7Pd9Lkt7c9KZ83Xx1f/n7JdkCmE3Hruh/x67qUkScLkVckSQV93bXU61SL2N1KepSqmDG391fsYmx9pkwkpSQlKC9wXu1N3ivfeaFp4tnqn26VO2it+99O8/7y+CGx2o8ps0XNivwdKAi4iP055k/JUnert4OX14QAAAAAHCdYUjx1zIOUzIKXWLDnNCXxTWdWSt+mfdk8QyQXNMulZ4pq1WGiduywJ2OUSAX/Hrgb/148hddTtoukylJStHOxTOxhnrV6KsX7nlUHq5uGR8Et8RsNmtx74/V8ccIXU76WyZzgibvGKdint/r4buaZPs4/5w5qmF/PiPDYvttCteE8lrY/f/079Wtmrl3jk5fO2Y7n2uE3EuuklvxP2UNbyxraCtFx9l+297d64KaNtivNZEblLA3IdXx6xWrpwF1BqhDpQ5ytfAbEY7wfKPnFRobqoVHFirBSNDY9WM1vcN0NSrZSGazSZ/3bqSHp2xUaLTV/p4XHqyuIu43hsEr0Vc0ZPUQnY20LXlWqWglzeg4QwEeAToWdkx7ruyxfQXv0cnwk6nOnxzMWEwWvdT0JQ2oPYAALp8xmUx6+963tTd4r4KiguyvD647WH4efnlXGAAAAAAUVNbYLHqyhKReKiz59SRr1se+HSazbfmvdJcHC5C8/FPMdEkRurgVoS8LAKchnMkFn+8fJ4unxT52G4ZFZV3u1Qt3P6VHajfP2+LuAC4Wi37v/Y3a//SkIk37JEusxm0aqWJec9SsfPUs37/93DE9EzhEhkuY7XgJZbWoxyxVDSil6sV6qGeN7tp+abvmHZindWfXyZAhkzlBbv7b5Oa/TYqpIRdLkhLcjmtPipmyFpNF7Sq104DaA9SwRENu1DuYyWTSay1eU3h8uFadWqXYxFiN+HOEZj88WzX8a6hUUQ992quhhszeLkkq7++pvs0r2t8fHBOsIauH6HSEbXm68t7l9X8d/k8lvWzLAdYKqKVaAbXUu2ZvSVJ4XLj2Be/TnmBbYLP3yl7JKn384MdqWb6lk68e2eXr7qvJ90/W06ueVpKRpACPAA2qMyivywIAAACAvJVozdlSYcn7pVhBwmHcfTNYKiyd0CV5P3df+rIAyPcIZ3JTorca+XbW6w88pVolyud1NXeUIu7uWtp7mjou6K84y0nJEqmhq57Rwm7zVbNExv0+dl88pSGrhspwCZUkuSSU1sJHbcFMMpPJpGalm6lZ6WY6G3lWPx36Sb8c/UVR1ijbDp5HlXKeTFG3onr8rsfpYZEHLGaLPrjvA4XHhevvi38rMj5SwwKHaU7nOSrnXU4P1S6lD3rU16r9QXqpw11yc7H9oBYSG6Khq4baZ8OU8y6nGR1nZPr35+vuq1blWqlVuVaSJKvVqhUrVqhZqWaOv1DclrtL3a3J90/WkmNLNLT+UHm5euV1SQAAAACQO5KSbMt/xYSmncGSbtCS3JclwvG1uXplPmslvdDFw0+ycPsSQOHE6JYLXBPKqWvZvhp//xPy9eAmX14p5uWjRd3/T91/7a9ElyAluQSr7+9DtKzXfJUtGpBm/31BZzR4xVNKcrkqSbIklNRP3WaqRvEyGZ6jgk8FjW82XiMajtBvx3/T/IPz7UtgVfWtqv61+6tr1a7c7M1DbhY3TWk7RUNXDdW+q/t0Oeayngt8TrMfnq1insXUr0VF9WtxY8ZMaGyohq4equPhxyVJZYqU0Q8df1AZ74w/Byj4OlXppE5VOuV1GQAAAACQvuS+LOkuFZZZ6BImyXBsbfa+LClnrfhnHrR4+ue8LwsAFHKEM7lgTZ8fVaJEibwuA5IqB5TUrM7TNWjFQBkuYbK6nFOPxc9odd+5qYKzg5fPqf/yJ5XkEixJMieU0Pyus7I948nbzVv9a/fXEzWf0LZL22QxWdS0VFOWLssnirgW0dR2UzXoj0E6FXFKpyNOa/ia4ZrRcYa83bzt+4XHhevZwGd1NPSoJKmUVyn90OEHlfMul1elAwAAAAAKG2tMJrNWQtPvyeKsvixpZrBk0ZPF05++LACQSwhncoGZNSzzlUZlKuurtt/qhfVDJEu0os1H9MjPw7S63//Jw9VNh69cUN+lTyrJ5YokyZxQTHO7zFDdUhVyfC6L2aJ7ytyT25eAXODv4a9p7adp4B8DdSn6kg6GHNSodaM0td1UuVvcFREfoWcDn9WhkEOSpBKeJfRDxx9UoWjOPwcAAAAAgDtAojWbS4XdNLvFWX1ZvDIKWm6awZK8n3tR+rIAQB4inEGh1KZqPU2KmaK3to6UyRyvUO1Sj4Vj9VXH1/XE74OV6HJJkmRKCNCsTjPVoHTlvC0YDlHGu4ymtZ+mQSsHKTwuXFuDtmrCxgma2HKihq8ZrgNXD0iSinkU0w8df1ClopXyuGIAAAAAgMMlJUqx4WmDlkxDl1ApPtLxtbkWuR6m+GWvJ4unP31ZAKCAYuRGodWz7r26Gv2Bvtz3ikzmRJ1L2KCev2+VXGy/sWJK8NeMjj+ocdkqeVwpHKmqX1VNfWiqhq4eqpiEGK05s0Zbg7YqIt7W7DDAI0A/dPxBVXz5HAAAAABAgWIYUlxkDnuyhDqnL4vFLW1PljQzWNIJXVzcHVsXACDfIJxBofZMs466GhOmeSfel8lkSJbrwUyin6a3/z81LV89jyuEMzQo0UBT2kzR82ufV0JSgj2Y8Xf31/91+D9V86uWxxUCAAAAwB0uPjqLpcLC0p/dkpTg2LpMlvTDlaxmt7h60ZcFAJCpAhnObNy4UZ988ol27Nihixcv6tdff1X37t3t2w3D0KRJkzRt2jSFhoaqRYsW+s9//qO6deva94mLi9PLL7+sn376STExMXrooYc0depUlS+fvYbwKDgmPNBHV6JDtPrSVEmSKdFX3z40TS0q3pXHlcGZWpZrqQ/v+1DjN46XIUO+7r6a3mG6avjXyOvSAAAAAKDwSIi/MXsluz1ZYkKkhFjH1+bhm3VPFi//1LNb6MsCAHCQAhnOREVFqWHDhnrqqaf02GOPpdn+8ccf6/PPP9esWbN011136b333lP79u11+PBh+fj4SJJGjx6tpUuXasGCBSpWrJheeuklde3aVTt27JDFYnH2JcHBPnt4uD75q4S2nN+mV+97Rs2YMXNHerjKw/Jx89HGcxvVp1YfVfWtmtclAQAAAED+lNyXJSc9WWKc1JfFzfvGbJbs9GTxDLAFM/RlAQDkIwXy/0qdOnVSp06d0t1mGIamTJmi119/XT179pQkzZ49W6VKldKPP/6o5557TuHh4frhhx80d+5ctWvXTpI0b948VahQQWvWrFHHjh3TPXZcXJzi4uLszyMibEsjWa1WWa3W3LxEOMDoex7VaD0qSfx93cGal2yu5iWbS8rdz0HysfhsAWA8AJCM8QBAsjwdD5L7ssSGyhQdIsWGSTEhMkWHSrG2QMWUYqaL/XFsuEwO7stiWNztAYthn63iJyPVnykee/jdel+WJENKYjxG/sDPCEDhlt3/tgtkOJOZkydPKigoSB06dLC/5u7urtatW2vz5s167rnntGPHDlmt1lT7lC1bVvXq1dPmzZszDGc+/PBDTZo0Kc3r69atk5eXV+5fDIACJzAwMK9LAJBPMB4ASMZ4ACDZbY0HhiGLES/XhGtyS4iSW+I1uSVck+v1P90So2zPE66l2BYlt4QomZWYexeRjiSZZXUponiLt+JdvBVv8ZbVxVvxliKKd0l+nLztxmuJJrf0+7LEXv8KTX4h7PoXULjwMwJQOEVHR2drv0IXzgQFBUmSSpUqler1UqVK6fTp0/Z93Nzc5O/vn2af5Pen59VXX9XYsWPtzyMiIlShQgW1bdtWxYoVy61LAFAAWa1WBQYGqn379nJ1dc3rcgDkIcYDAMkYDwAkSzMeJN7oy5LerBVTTIgUE2Z7LTbMvpyYKTEuq1PdNuP67JQ0M1k8/CTPABle/pLHTbNd3H1kNpnkIcnD4RUCBR8/IwCFW/KKW1kpdOFMMtNNv3lhGEaa126W1T7u7u5yd087ddbV1ZWBFIAkxgMANzAeAEjGeAAUYkmJ10OUzHuyWKJD1DrolDxPvG4LYOKvOb42N297uJKtniye/pKnn0xmWx/ezO+gAMgN/IwAFE7Z/e+60IUzpUuXlmSbHVOmTBn765cvX7bPpildurTi4+MVGhqaavbM5cuX1bJlS+cWDAAAAAAA8pZhSHERqcOV6NAsQxfFhmfr8GZJfpIUcwu1WdxvhCte18OWVEFLOqGLp7/k4nYLJwMAAM5S6MKZKlWqqHTp0goMDFTjxo0lSfHx8dqwYYM++ugjSdLdd98tV1dXBQYGqnfv3pKkixcvat++ffr444/zrHYAAAAAAHAbDEOyRqcOUOyPQ296fNM2w9F9WSwyFQmQKc2sFf/MgxY3etwCAFAYFchw5tq1azp27Jj9+cmTJ7V7924FBASoYsWKGj16tD744APVqFFDNWrU0AcffCAvLy/169dPkuTr66shQ4bopZdeUrFixRQQEKCXX35Z9evXV7t27fLqsgAAAAAAQLKE+Mxnrdgfh6Xez+F9WUySh282lgq7EbpYXYtqxZqN6tylC0sYAQAASQU0nNm+fbvatm1rfz527FhJ0uDBgzVr1iyNHz9eMTExGjFihEJDQ9WiRQutXr1aPj4+9vd88cUXcnFxUe/evRUTE6OHHnpIs2bNksVicfr1AAAAAABQaCUm2Jb/ylbQcj1siQ6RrFGOr83N53qA4p/1UmHJjz18JXMO7x1YrVIWfXABAMCdpUCGM23atJFhGBluN5lMmjhxoiZOnJjhPh4eHvr666/19ddfO6BCAAAAAAAKGcO4EbLEhGavJ0tMaLb7stwWF4+cLRXmFSB5+NGXBQAA5JkCGc4AAAAAAIBblNyXJd2eLCGZhC5hDu/LIrNLBmGKv1I1u785dHH1dGxdAAAAuYxwBgAAAACAgiohLhtLhYWmDWAS4x1cmEny9Mt2Txb7fu4+LP8FAADuCIQzAAAAAADktcQEKTbsphksmYUu1wMXZ/RlcS+aTtCSRehyK31ZAAAA7iCEMwAAAAAA5JakJCkuPGc9WaJDbe9xNBfPnPVkSd7H4ur42gAAAO4whDMAAAAAANzMMKT4qMxnraQXusSESkaSY2tL7suSk54snv70ZQEAAMhHCGcAAAAAAIWbNTbnPVliQh3fl8Vkljz8ctaTxStAcvOmLwsAAEABRzgDAAAAACgYEhNuBCnZ7ckSEyJZox1fm3vRnC0V5ulvC2bMZsfXBgAAgHyHcAYAAAAA4FzJfVmiQ6SYsGwELdf/jItwfG2uXimWB/NLJ2hJJ3Tx9KMvCwAAAHKEcAYAAAAAcGsMQ4q/lkGYEpZx6BIb5oS+LK7pzFrxyzpocfVwbF0AAACACGcAAAAAANL1viyZ9WQJSb1UWPLrSVbH1pXclyXDpcL8U8xgSRG6uBWhLwsAAADyLcIZAAAAAChMEq05WCosxX4JMY6vzd03g6XCbu7JkiJ0cfelLwsAAAAKHcIZAAAAAMiPkpJsy3/FhKYzgyWj0CXMiX1ZMpm1kl7o4uEnWfgnKAAAACARzgAAAACAYxmGFBeZwVJhoZkHLTIcW5u9L0vKWSv+mQctnv70ZQEAAABuE+EMAAAAAGSXNSaD5cGSH9/oyeISfVUdwy7J5d9o5/RlSTODJYueLJ7+9GUBAAAA8gjhDAAAAIA7T6I1i1krGcxuyUFfFpOkW5pf4u57PVDJoieLp/+N/dyL0pcFAAAAKEAIZwAAAAAUXEmJUmx4DnqyXJ/dEh/p8NIM1yKKkYc8A8rK5JWNniye/vRlAQAAAO4Q/NQPAAAAIO8l92XJqifLzaGLM/qyWNzS9mRJM4MlbeiSYJgVuGKFOnfuLFdXV8fWCAAAAKBAIZwBAAAAkLvio7NYKiws/aAlKcGxdZks6YcrXgGSp1/GQYur1631ZbE6uM8MAAAAgAKLcAYAAABA+hLib8xeyW5PlpgQKSHW8bV5+GY6a8UWuvinnt1CXxYAAAAA+QThDAAAAFDYJfdlyUlPlhjn9GWRm/eN2SzZ6cnieX2Wi9ni+NoAAAAAwEEIZwAAAICCwjCkuIibwpSwrEOX2HA5vi+Le+azVtINXfwlF3fH1gUAAAAA+RDhDAAAAOBshiFZY3K2VFjya87qy5LurJWMZrcESK6et9aXBQAAAADuQIQzAAAAwO2w92XJKGi5aamw5NktiXGOr83DLxtLhfml3uZelJAFAAAAAByMcAYAAACQbH1ZYsKy2ZPl+nJiMSFS/DXH1+bmfaPXSnZ6sngFSB6+9GUBAAAAgHyKcAYAAACFS3JflsxmraQXusSGO742e1+WDGatpBe6ePpLLm6Orw0AAAAA4DSEMwAAAMifDEOyRme/J0vK141Ex9ZmdknRg+Wmniw3ByspQxc3L8fWBQAAAAAoEAhnAAAA4HgJ8TlbKix5P4f3ZTHZlv/Kcqkw/9RBi7sPfVkAAAAAALeMcAYAAADZl5hgW/4rW0HL9bAlOkSyRjm+Njcfycs/41kr6YUu9GUBAAAAAOQBwhkAAIA7kWHcCFmy25MlJtQ5fVlcPDKftZLeYw8/+rIAAAAAAAoMwhkAAICCLLkvS7qzVjILXcKc1JclvVkrWcxucfV0bF0AAAAAAOQxwhkAAID8IiEuG0uFhd70OERKjHdwYSbJ0y9nPVk8/enLAgAAAABABghnAAAAcpnJSJSigiVrZPaWCkue3eKMvizuRdMJWrIIXejLAgAAAABAriKcAQAAyEhSkhQXnqOeLC7RIeoWFyHtdnBtLp4568mSvI/F1cGFAQAAAACArBDOAACAws8wpPioHPZkuf66kZSjU+V4Ea/kviw56cni6U9fFgAAAAAACjDCGQAAULBYY3PekyUm1PF9WUxmycNPhqefQmNN8itTReYixVPPWkkZwCQ/d/OmLwsAAAAAAHcYwhkAAJA3EhNuBCnZ7ckSEyJZox1fm3vRTJYKS6cni6e/5OEnmc1KsFr114oV6ty5s8yuLCEGAAAAAADSIpwBAAC3J7kvS3SIFBOWjaDl+p9xEY6vzdUrxUwVv2z0ZLm+H31ZAAAAAACAAxHOAAAAG8OQ4q9lEKaEZRy6xIbluC9Ljpld05m14pd10OLq4di6AAAAAAAAbgHhDAAAhZE1NmdLhSXvl2R1bF3X+7JkvFSYv9L0ZPEMkNyK0JcFAAAAAAAUGoQzAADkZ4nWHCwVlmK/hBjH1+bum8FSYTf3ZEkRurj7Smaz42sDAAAAAADIxwhnAABwhqQk2/JfMaGp+65kGrqEObEvSyazVtILXTz8JAs/RgAAAAAAANwK7qoAAJATyX1Z0mtwf3PocnPQIsOxtdn7sqScteKfQdCSYjt9WQAAAAAAAJyKcAYAcOeyxuS8J4uz+rKkmcGSRU8WT3/6sgAAAAAAABQQhDMAgIIv0ZrFrJUMZrc4qy+LV3aWCksRurgXpS8LAAAAAABAIUY4AwDIP5ISpdjwHPRkuT67JT7S8bW5FrkepvhlryeLpz99WQAAAAAAAJAu7hgBAHKfYUhxkVn3ZLk5dHFGXxaLW9qeLCmDlYxCFxd3x9YFAAAAAACAOwbhDAAgc/HRWSwVFpZ+0JKU4Ni6TJb0w5WsZre4etGXBQAAAAAAAHmKcAYA7hQJ8Tdmr2S6VFhY6m0JsY6vzcM3Zz1ZPP3pywIAAAAAAIACi3AGAAqa5L4sOenJEuOkvixu3jdms2SnJ4vn9VkuZovjawMAAAAAAADyCcIZAMgrhiHFRWQ+ayW90CU2XI7vy+Ke+ayVdEMX+rIAAAAAAAAA2UE4AwC3yzAka7Q84q9Kl/ZJ8RE3zWAJvelxitec1Zcl3VkrN72eMnRx9aQvCwAAAAAAAOAghDMAkJK9L0s2lwq7vp9rYpw6StJ+RxVmut6XJaulwvxSb3MvSsgCAAAAAAAA5DOEMwAKp6RE2xJh2QpaUsxkib/m+NrcvG/0WkkTtGQQunj40pcFAAAAAAAAKCQIZwDkb8l9WTKZtZJu6BIb7vja7H1ZApTk6aegsDiVrlJb5iLFMg5aPP0lFzfH1wYAAAAAAAAg3yKcAeAc1/uyZDprJWXQkvJ1I9GxtZldbvRgyW5PFk9/yc3LfohEq1XbVqxQ586dZXZ1dWy9AAAAAAAAAAo0whkAOZcQl36D+zShS1jqoCUxzsGFXe/LkmlPFv+bgpcAyd2HviwAAAAAAAAAnIZwBriTJSZIsWGZzFq5OXQJsz22Rjm+Njcfycs/41kr6YUu9GUBAAAAAAAAUAAQzgCFgWHYeqzkpCdLTKhz+rK4eGQ+ayW9xx5+9GUBAAAAAAAAUGgRzgD5SXJfljTLg2UVuoQ5qS9LerNWspjd4urp2LoAAAAAAAAAoIAhnAEcJSEuGz1ZQtMuKZYY7+DCTJKnXxZLhd0UwHj605cFAAAAAAAAAHIJ4QyQleS+LNnqyZJidosz+rK4F81G0HLTkmL0ZQEAAAAAAACAPEU4gztHUpIUF5758mBpQpdQ23sczcUz81krGc1usbg6vjYAAAAAAAAAQK4inEHBYxhSfFQOe7Jcf91IcmxtyX1ZctKTxdOfviwAAAAAAAAAcAchnEHessbmvCdLTKjj+7KYzJKHX+bLg6U3u8XNm74sAAAAAAAAAIBMEc4gdyQm3AhSstuTJSZEskY7vjb3ojcFKln0ZPH0twUzZrPjawMAAAAAAAAA3HEIZ5Bacl+WjGatpBu6hEpxEY6vzdUrZ0uFeQZInn70ZQEAAAAAAAAA5CuEM4WVYUjx1zJYKiws46AlNswJfVlc05m14pdJ0JLcl8XDsXUBAAAAAAAAAOAEd3w4M3XqVH3yySe6ePGi6tatqylTpuj+++/P67JSs8bmbKmw5P2SrI6tK7kvS4ZLhWUwu8WtCH1ZAAAAAAAAAAB3rDs6nPn55581evRoTZ06Va1atdL333+vTp066cCBA6pYsWLunzDRmvmslYxmtyTE5H4tN3P3tc1eybInS4rQxd2XviwAAAAAAAAAAOTQHR3OfP755xoyZIiGDh0qSZoyZYpWrVqlb7/9Vh9++GG2j2M6sU46l5hF6BLmxL4sOejJ4hVgm/1iuaM/CgAAAAAAAAAAOM0de0c+Pj5eO3bs0IQJE1K93qFDB23evDnd98TFxSkuLs7+PCLCFra4/PK05J67y3QZZld7kGJ4+ksetrDF8AqQPPxtr13/MpIb33v6Sy630JclyXD8EmhAIWe1WlP9CeDOxXgAIBnjAYBkjAcAUmJMAAq37P63fceGM8HBwUpMTFSpUqVSvV6qVCkFBQWl+54PP/xQkyZNytF5DJkU7+KteIu3rC5FFG/xVvz1P60uPoq3FFG8i7es1/9M3jfR7J5+X5bY619hyS+EX/8CkB8EBgbmdQkA8gnGAwDJGA8AJGM8AJASYwJQOEVHR2drvzs2nElmuikAMQwjzWvJXn31VY0dO9b+PCIiQhUqVJC1+Uglliovw9PPvnSYkbxsmLuPzCazPCTdwpwWAAWE1WpVYGCg2rdvL1dX17wuB0AeYjwAkIzxAEAyxgMAKTEmAIVb8opbWbljw5nixYvLYrGkmSVz+fLlNLNpkrm7u8vd3T3thgdekqVYMUeUCaCAcXV15QcrAJIYDwDcwHgAIBnjAYCUGBOAwim7/12bHVxHvuXm5qa77747zfTBwMBAtWzZMo+qAgAAAAAAAAAAhd0dO3NGksaOHauBAweqadOmuvfeezVt2jSdOXNGw4YNy+vSAAAAAAAAAABAIXVHhzN9+vTR1atX9c477+jixYuqV6+eVqxYoUqVKuV1aQAAAAAAAAAAoJC6o8MZSRoxYoRGjBiR12UAAAAAAAAAAIA7xB3bcwYAAAAAAAAAACAvEM4AAAAAAAAAAAA4EeEMAAAAAAAAAACAExHOAAAAAAAAAAAAOBHhDAAAAAAAAAAAgBMRzgAAAAAAAAAAADgR4QwAAAAAAAAAAIATEc4AAAAAAAAAAAA4EeEMAAAAAAAAAACAExHOAAAAAAAAAAAAOBHhDAAAAAAAAAAAgBMRzgAAAAAAAAAAADgR4QwAAAAAAAAAAIATEc4AAAAAAAAAAAA4EeEMAAAAAAAAAACAExHOAAAAAAAAAAAAOBHhDAAAAAAAAAAAgBO55HUBBZlhGJKkyMhIubq65nE1APKS1WpVdHS0IiIiGA+AOxzjAYBkjAcAkjEeAEiJMQEo3CIiIiTdyA8yQjhzG65evSpJqlKlSh5XAgAAAAAAAAAA8ovIyEj5+vpmuJ1w5jYEBARIks6cOZPpNxmFX7NmzbRt27a8LgN5KCIiQhUqVNDZs2dVtGjRvC4HeYjxAIwHSMZ4AMYDJGM8AOMBUmJMAGMCkjEeFE6GYSgyMlJly5bNdD/CmdtgNtta9vj6+jKQ3uEsFgufAUiSihYtymfhDsd4gGSMB2A8QDLGAzAeIBnjASTGBNzAmADGg8IrO5M5zE6oAyj0nn/++bwuAUA+wXgAIBnjAYBkjAcAUmJMAJCM8eDOZjKy6kqDDEVERMjX11fh4eEknMAdjvEAQDLGAwDJGA8AJGM8AJASYwIAiZkzt8Xd3V1vv/223N3d87oUAHmM8QBAMsYDAMkYDwAkYzwAkBJjAgCJmTMAAAAAAAAAAABOxcwZAAAAAAAAAAAAJyKcAQAAAAAAAAAAcCLCGQAAAAAAAAAAACcinAEAAAAAAAAAAHCiOz6c2bhxox555BGVLVtWJpNJS5YsSbX90qVLevLJJ1W2bFl5eXnp4Ycf1tGjR9M9lmEY6tSpU7rH2blzp9q3by8/Pz8VK1ZMzz77rK5du+agqwJwK3JjPGjTpo1MJlOqryeeeCLVPu+//75atmwpLy8v+fn5OfiqANwKZ40H3bp1U8WKFeXh4aEyZcpo4MCBunDhgqMvD0AOOGs8qFy5cpp9JkyY4OjLA5ADzhgP1q9fn2Z78te2bduccZkAssFZPx9wPxEo3O74cCYqKkoNGzbUN998k2abYRjq3r27Tpw4od9++027du1SpUqV1K5dO0VFRaXZf8qUKTKZTGlev3Dhgtq1a6fq1avrn3/+0cqVK7V//349+eSTjrgkALcot8aDZ555RhcvXrR/ff/996m2x8fHq1evXho+fLhDrwfArXPWeNC2bVstXLhQhw8f1uLFi3X8+HE9/vjjDr02ADnjrPFAkt55551U+7zxxhsOuy4AOeeM8aBly5aptl28eFFDhw5V5cqV1bRpU4dfI4DsccZ4wP1E4A5gwE6S8euvv9qfHz582JBk7Nu3z/5aQkKCERAQYEyfPj3Ve3fv3m2UL1/euHjxYprjfP/990bJkiWNxMRE+2u7du0yJBlHjx512PUAuHW3Oh60bt3aGDVqVLbOMXPmTMPX1zeXKgbgKM4YD5L99ttvhslkMuLj42+3bAAO4MjxoFKlSsYXX3yRyxUDcBRn/XwQHx9vlCxZ0njnnXdyo2wADuCo8YD7iUDhd8fPnMlMXFycJMnDw8P+msVikZubmzZt2mR/LTo6Wn379tU333yj0qVLp3scNzc3mc03vt2enp6SlOo4APKv7I4HkjR//nwVL15cdevW1csvv6zIyEin1grAsRw1HoSEhGj+/Plq2bKlXF1dHVM8gFyV2+PBRx99pGLFiqlRo0Z6//33FR8f79gLAJBrHPXzwe+//67g4GB+Ux4oQHJrPOB+IlD4Ec5kolatWqpUqZJeffVVhYaGKj4+XpMnT1ZQUJAuXrxo32/MmDFq2bKlHn300XSP8+CDDyooKEiffPKJ4uPjFRoaqtdee02SUh0HQP6V3fGgf//++umnn7R+/Xq9+eabWrx4sXr27JmHlQPIbbk9HrzyyisqUqSIihUrpjNnzui3335z5uUAuA25OR6MGjVKCxYs0Lp16zRy5EhNmTJFI0aMcPYlAbhFjvr3wg8//KCOHTuqQoUKzrgMALkgt8YD7icChZ9LXheQn7m6umrx4sUaMmSIAgICZLFY1K5dO3Xq1Mm+z++//661a9dq165dGR6nbt26mj17tsaOHatXX31VFotFL774okqVKiWLxeKMSwFwm7IzHki29WKT1atXTzVq1FDTpk21c+dONWnSxNllA3CA3B4Pxo0bpyFDhuj06dOaNGmSBg0apGXLlqXbxw5A/pKb48GYMWPs+zRo0ED+/v56/PHH7bNpAORvjvj3wrlz57Rq1SotXLjQKdcAIHfk1njA/USg8GPmTBbuvvtu7d69W2FhYbp48aJWrlypq1evqkqVKpKktWvX6vjx4/Lz85OLi4tcXGx512OPPaY2bdrYj9OvXz8FBQXp/Pnzunr1qiZOnKgrV67YjwMg/8tqPEhPkyZN5OrqqqNHjzqxUgCOlpvjQfHixXXXXXepffv2WrBggVasWKG///7b0ZcAIJc46ueDe+65R5J07NixXK8ZgGPk9ngwc+ZMFStWTN26dXNk2QAcILfGA+4nAoUb4Uw2+fr6qkSJEjp69Ki2b99uX8JswoQJ2rNnj3bv3m3/kqQvvvhCM2fOTHOcUqVKydvbWz///LM8PDzUvn17Z14GgFyQ0XiQnv3798tqtapMmTJOrBCAs+T2eGAYhqQb61QDKDhyezxInpnPzxBAwZMb44FhGJo5c6YGDRpELzqgAMutnw+4nwgUTnf8smbXrl1L9dtoJ0+e1O7duxUQEKCKFSvqv//9r0qUKKGKFStq7969GjVqlLp3764OHTpIkkqXLq3SpUunOW7FihVTpdjffPONWrZsKW9vbwUGBmrcuHGaPHmy/Pz8HH6NALLndseD48ePa/78+ercubOKFy+uAwcO6KWXXlLjxo3VqlUr+3HPnDmjkJAQnTlzRomJifZQt3r16vL29nbqNQNInzPGg61bt2rr1q2677775O/vrxMnTuitt95StWrVdO+99+bJdQNIyxnjwZYtW/T333+rbdu28vX11bZt2zRmzBh169ZNFStWzJPrBpCWs/69INlW6Th58qSGDBni1GsEkD3OGg+4nwgUcsYdbt26dYakNF+DBw82DMMwvvzyS6N8+fKGq6urUbFiReONN94w4uLiMj2mJOPXX39N9drAgQONgIAAw83NzWjQoIExZ84cB10RgFt1u+PBmTNnjAceeMD+33q1atWMF1980bh69Wqq8wwePDjd86xbt86JVwsgM84YD/bs2WO0bdvWCAgIMNzd3Y3KlSsbw4YNM86dO+fsywWQCWeMBzt27DBatGhh+Pr6Gh4eHkbNmjWNt99+24iKinL25QLIhLP+vWAYhtG3b1+jZcuWzro0ADnkrPGA+4lA4WYyjOvrZwAAAAAAAAAAAMDh6DkDAAAAAAAAAADgRIQzAAAAAAAAAAAATkQ4AwAAAAAAAAAA4ESEMwAAAAAAAAAAAE5EOAMAAAAAAAAAAOBEhDMAAAAAAAAAAABORDgDAAAAAAAAAADgRIQzAAAAAAAAAAAATkQ4AwAAAECzZs2SyWSSyWTSqVOn8rocFHBPPvmk/fOU8ut2P1sTJ05M97jr16/PlboBAAAAZyGcAQAAAAqwU6dOpXuzOqdfAAAAAADnIZwBAAAAgBQqV64sk8mkJ598Mq9LKfDKli2rvXv32r/KlSuXZp+Us2GyMmLECPuxZsyY4YiSAQAAAKdwyesCAAAAANy6cuXKae/evRlu79ixoy5cuKCyZctq1apVGe5Xr149wgjkOldXV9WrVy/XjleyZEmVLFlSkhQcHJxrxwUAAACcjXAGAAAAKMCyuvnt6uqarf0AAAAAAM7DsmYAAAAAAAAAAABORDgDAAAAQLNmzbL3/Th16lSa7W3atJHJZFKbNm0kSceOHdOwYcNUtWpVeXp6qnLlyhoyZIhOnz6d6n379u3TU089papVq8rDw0MVKlTQ8OHDdfny5WzVFRgYqAEDBqhKlSry9PRU0aJF1bBhQ40fP14XL17M9L0XLlzQhAkT1KRJE/n6+srNzU2lS5dW/fr11bdvX82aNUsRERFprjH5GmbPnm3/niR/JV9/stDQUM2cOVMDBgxQnTp15O3tbT9Px44dNW3aNMXHx2dY46lTp+zHnjVrliTpl19+UYcOHVSyZEkVKVJEDRs21Ndffy2r1Wp/n2EY+vHHH9WmTRuVLFlSXl5eatKkib777jsZhpHh+ZLPNXHiREnSmjVr1K1bN5UpU0YeHh6qWrWqRo4cqXPnzmX6vc0NyZ+5SZMmpakv5Vd6n0cAAACgoGNZMwAAAAA5smbNGvXs2VORkZH2106fPq0ZM2Zo2bJl2rBhg2rVqqWffvpJTz31lOLi4uz7nTt3Tt99953++OMPbd68WWXLlk33HFFRURo4cKB+/fXXVK/HxsZqz5492rNnj7799lv99NNP6tq1a5r3//XXX+ratWuq8EWSLl26pEuXLmnfvn1asGCBihcvnu77s6tx48ZpAqnk86xevVqrV6/Wd999pxUrVqh06dJZHm/EiBH69ttvU722Z88evfjii1q/fr0WLlyohIQEDRgwQIsWLUq1365duzR8+HDt3LlT06ZNy/JckyZNsoc0yU6ePKn//Oc/mjt3rpYuXaoHHnggy+MAAAAAyDnCGQAAAADZduHCBfXu3Vt+fn764IMP1Lx5c8XHx2vx4sX68ssvdfnyZQ0dOlRffPGFBg0apBo1auill15SgwYNFBUVpRkzZmju3Lk6ffq0xo4dqwULFqQ5R2Jioh555BGtW7dOJpNJTzzxhHr27KkqVarIarVq69at+uyzz3TmzBk99thj2rx5s+6++277++Pi4vTEE08oIiJCPj4+Gj58uNq2bauSJUvKarXq9OnT2rJlixYvXpzqvDNnzlRUVJQ6duyoCxcu6NFHH9V7772Xap8iRYqkqbVFixbq2rWrGjdurFKlSik+Pl4nT57UvHnztHLlSu3atUtPPPGE1q9fn+n39rvvvtM///yjzp07a+jQoapUqZLOnj2rDz/8UP/8849++eUXzZw5U3v27NGiRYvUr18/9evXT2XKlNHRo0c1ceJEHTp0SNOnT1fPnj318MMPZ3iu5cuXa/v27apZs6bGjx+vBg0aKDw8XP/97381ffp0RUREqGvXrtq7d68qVaqUad23qnv37mratKmmTp1qD6T27t2bZr9y5co55PwAAABAnjIAAAAAFFqVKlUyJBmVKlXKdL+ZM2cakgxJxsmTJ9Nsb926tX17jRo1jMuXL6fZZ9y4cfZ9SpQoYbRq1cqIiopKs1+vXr0MSYaLi0u6x/n0008NSYarq6uxYsWKdOsNCQkx6tata0gy7rvvvlTb/vzzT3sdS5cuzfCarVarER4enub15O/Z4MGDM3xvsiNHjmS6fcaMGfZa1qxZk2b7yZMn7dslGaNHj06zT1RUlFG5cmVDklG8eHHDZDIZU6ZMSbPfxYsXDR8fH0OS0a1bt3TrSXmuJk2aGJGRkWn2mTNnjn2fxx9/PNPry8jgwYOz9bkzDMN4++237efLiXXr1tnft27duluqEwAAAMgr9JwBAAAAkCNfffWVSpQokeb1ESNG2B8HBwdr+vTp8vLySrPf8OHDJUkJCQnasmVLqm1Wq1WfffaZJGnkyJHq1KlTujX4+/vrk08+kSRt2rRJx44ds28LCgqyP85sWS4XFxcVLVo0w+3ZUaNGjUy3P/XUU2rcuLEkacmSJZnuW6FCBX388cdpXvfy8tLgwYMl2b6vLVq00KhRo9LsV7p0afXo0UOSbVm3rEybNk3e3t5pXh84cKD9+75kyZIse/sAAAAAyDnCGQAAAADZ5ufnp44dO6a7rXLlyvawo0GDBqpdu3a6+zVs2ND++MSJE6m2bd261R4G9O7dO9NaUgYvKUOeMmXK2B/PnDkz02PkJsMwFBQUpCNHjmjfvn32r+S+Ov/++2+m7+/Zs6dcXV3T3dagQQP74z59+mR4jOTvbWhoqMLCwjLcr379+qmWgrvZ008/LckWoGW1HBsAAACAnKPnDAAAAIBsq1GjhkwmU4bbfX19FRERobvuuivDffz8/OyPIyMjU23bvn27/fG9996b7bpSzpa57777VLVqVZ04cUKjR4/W/Pnz1aNHD7Vu3VpNmzaVm5tbto+bHcuXL9e3336rjRs3prmelIKDgzM9Tna/Zzn53qZ8nlKzZs0yraV58+b2x/v27ct0XwAAAAA5RzgDAAAAINvSW6YsJbPZnOV+yftIUmJiYqptly9fvqW6oqOj7Y9dXV21dOlSPf744zp48KC2bdumbdu2SZI8PT3VunVrDRw4UH369JHFYrml80m2mTLPPPOMfvjhh2ztHxMTk+n27H7PbvV7m1LJkiUzraVUqVL2xyEhIZnuCwAAACDnCGcAAAAA5BspA4X169erWLFi2XrfzWFDnTp1tHfvXi1dulRLly7Vhg0bdPz4ccXExGjlypVauXKlPv/8c61YsSLLoCIjM2bMsAczjRo10ujRo9WiRQuVK1dOXl5e9uBn0KBBmjt3rgzDuKXzOEJms58AAAAAOB7hDAAAAIB8I2UY4+bmpnr16t3ysSwWi7p3767u3btLki5evKg//vhDU6dO1Y4dO7Rjxw4999xz+vXXX2/p+NOnT5ckVatWTZs3b5anp2e6+4WGht7S8R3p0qVL2d4eEBDg6HIAAACAO445610AAAAAwDkaN25sf7x69epcPXaZMmX09NNPa8uWLWrSpIkkadmyZWmWG8vurJL9+/dLkh599NEMgxnDMLRz587bqNoxkpd5y8722wnIsoNZPAAAALgTEc4AAAAAyDfuu+8++0yN7777ThEREbl+DldXV7Vu3VqSlJCQoLCwsFTbPTw8JElxcXGZHichIUFS6n43N/v999914cKF26jWMfbu3atdu3ZluH3GjBmSbLOP2rRp49Bakr/fUtbfcwAAAKCwIJwBAAAAkG94eHjo5ZdfliQFBQXpiSeeUFRUVIb7R0ZG6ptvvkn12l9//aVjx45l+J74+Hht2LBBkuTt7a0SJUqk2l6mTBlJ0vHjxzOttUaNGpKkpUuXprt02fHjxzVixIhMj5GXnn322XS/tz/++KNWrFghSerevbv9++EoKY+f1fccAAAAKCzoOQMAAAAgXxk/frz+/PNP/fnnn/rjjz9Up04dDRs2TPfee6/8/PwUGRmpw4cPa/369VqyZIk8PDw0cuRI+/v//PNPvfvuu7r//vvVpUsXNWjQQCVKlFBMTIyOHDmi7777zr7U2NChQ+XikvqfRS1bttS6deu0bds2TZ48WZ06dVKRIkUkSZ6enipXrpwkadCgQRo3bpzOnz+vli1bavz48apbt65iY2O1du1aTZkyRXFxcWrSpEm+W9qsadOm2r59u5o2bapXXnlF9evXV3h4uBYtWqTvv/9ekuTj46NPP/3U4bW0bNnS/njMmDF6/fXXVaZMGftyZ5UrV07zdwQAAAAUdPyECwAAACBfsVgsWrp0qYYNG6Y5c+bozJkzeu211zLcv2TJkmleS0pK0oYNG+wzZNLTs2dPffjhh2leHz58uL799luFhITo1Vdf1auvvmrf1rp1a61fv16SNGrUKAUGBmr16tU6dOiQnn766VTH8fT01Jw5c7R8+fJ8F8506dJFXbp00aRJk/TUU0+l2V60aFH9/vvvqly5ssNrqV69unr37q2FCxdq9erVaXoNnTx50il1AAAAAM7EsmYAAAAA8h1PT0/Nnj1b27dv1/Dhw1W3bl35+vrKxcVFfn5+atSokYYMGaJFixbp4MGDqd47fvx4rVixQmPGjNE999yjihUrysPDQx4eHqpcubL69Omj5cuXa/Hixan6nSQrV66ctm7dqiFDhqh69erp7iPZetcsX75cX331lZo2bSovLy95enqqevXqGjZsmHbu3KlevXo55PuTGyZOnKiVK1eqS5cuKlWqlNzc3FS5cmWNGDFC+/fvt/flcYZ58+bp448/VvPmzeXr6yuzmX+qAgAAoHAzGYZh5HURAAAAAADHS14q7O2339bEiRMddp4nn3xSs2fPVqVKlXTq1CmHnGP9+vVq27atJGndunVq06aNQ84DAAAAOALLmgEAAAAAHMJqtWrfvn325zVr1hsD4hIAAADjSURBVJSrq+stH+/y5cu6fPmyJNtyZwAAAEBBRTgDAAAAAHCICxcuqH79+vbnt9s/ZurUqZo0aVIuVAYAAADkLRbyBQAAAAAAAAAAcCJ6zgAAAADAHcJZPWcAAAAAZI6ZMwAAAAAAAAAAAE5EzxkAAAAAuEOwcAIAAACQPzBzBgAAAAAAAAAAwIkIZwAAAAAAAAAAAJyIcAYAAAAAAAAAAMCJCGcAAAAAAAAAAACciHAGAAAAAAAAAADAiQhnAAAAAAAAAAAAnIhwBgAAAAAAAAAAwIkIZwAAAAAAAAAAAJzo/wHKpST47pdFVAAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "fig, ax = plt.subplots(1, 1, figsize = (20, 7))\n", "plot_df = AirPassengersPanel[AirPassengersPanel.unique_id=='Airline1'].set_index('ds')\n", @@ -522,7 +1110,100 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
unique_iddsytrendy_[lag12]month
0Airline11949-01-31112.00112.0-0.500000
1Airline11949-02-28118.01118.0-0.409091
2Airline11949-03-31132.02132.0-0.318182
3Airline11949-04-30129.03129.0-0.227273
4Airline11949-05-31121.04121.0-0.136364
\n", + "
" + ], + "text/plain": [ + " unique_id ds y trend y_[lag12] month\n", + "0 Airline1 1949-01-31 112.0 0 112.0 -0.500000\n", + "1 Airline1 1949-02-28 118.0 1 118.0 -0.409091\n", + "2 Airline1 1949-03-31 132.0 2 132.0 -0.318182\n", + "3 Airline1 1949-04-30 129.0 3 129.0 -0.227273\n", + "4 Airline1 1949-05-31 121.0 4 121.0 -0.136364" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "AirPassengerPanelCalendar, calendar_cols = augment_calendar_df(df=AirPassengersPanel, freq='M')\n", "AirPassengerPanelCalendar.head()" @@ -532,7 +1213,18 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkMAAAGwCAYAAACq12GxAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAACy/0lEQVR4nO29d5gc1ZU2/lbHySPNjEYJITAiGUkEYUDCmGAQMGCz2GvjD1YEY3ZZzANY9mKwwSCvbWz/dkEs35pgm2g+DLsInAaBLBGEEoogQBJJAaTJGk2ejvf3R3VVV+cKN436vs+jp0Y9PT1nbp1769z3nPdcjRBCoKCgoKCgoKBQpvCJNkBBQUFBQUFBQSRUMKSgoKCgoKBQ1lDBkIKCgoKCgkJZQwVDCgoKCgoKCmUNFQwpKCgoKCgolDVUMKSgoKCgoKBQ1lDBkIKCgoKCgkJZIyDaANmRTCaxb98+1NbWQtM00eYoKCgoKCgo2AAhBAMDA5gyZQp8vuLcjwqGSmDfvn2YNm2aaDMUFBQUFBQUXODTTz/FIYccUvQ9KhgqgdraWgD6YNbV1Qm2hg1isRheeeUVzJ8/H8FgULQ50kONlzOo8bIPNVbOoMbLGcptvPr7+zFt2jTzOV4MKhgqASM1VldXd1AHQ1VVVairqyuLCeIVarycQY2XfaixcgY1Xs5QruNlp8RFFVArKCgoKCgolDVUMKSgoKCgoKBQ1lDBkIKCgoKCgkJZQwVDCgoKCgoKCmUNFQwpKCgoKCgolDVUMKSgoKCgoKBQ1lDBkIKCgoKCgkJZQwVDCgoKCgoKCmUNFQwpKCgoKCgolDVUMKSgoKCgoKBQ1lDBkIKCgoKCgkJZQwVDCgoKCgoKCmUNFQwpgBCCaEK0FbmIJ5KIxpOizcjBaCwBQkRbkQvdLvkMG5HRuSCnXbLOxWSSYDQmn2HReBIJ+VxezUUH6BwYxaf7hxGJi7VNBUMKuPnZd/CTjX70DEVFm2IikSQ4f/EbuPiBlUgm5VlUOgdGMfdXr+PJD+WaOp90DeL4Ra9g0V/eF21KBpa934ETfrYcqztKnxrNE//96keYvehlbNi1X7QpGfjec1tx50Y/egYjok3JwIJH1+GLv3oVw9G4aFNMjMYS+PJ9K/F/3/OLNiUDu7qHcPyiV3DXn98TbUoGlm/rwIk/X4FVks3Fx1ftwhm/fhX3tG4XaodcK7qCEKz+pAcjCQ17eoZFm2Kio38UH3cN4YOOQYwK3jFYsfWzPgxG4tgzKNeCsnF3LyLxJLZ8ekC0KRlY83EPkgT4VLLxevPDbsQSBNva+kWbkoHVn/RgNKFhp0RzMZ5IYs3HPegejKCtb1S0OSY+6RpCe38EuwdFW5IJYy6+LeFcTCSJdHOxo18P/JvrwkLtUMFQmWM0lkDfiL7bS0hE63b0pxfdhETMkDFxZUvedQ6k7JLoHgJAx4B+H2UbL8MumXwrEk+gdzgGQC67eoaiMMyRiaU17qFkLp/2LckM6zDXCMGGZMFY6yfVVQi1QwVDZQ5r0CHTgzTTLoGGZKG9X84FuD21Y5fpHgJAR5+c49VhjpdgQyzo7E+nxmQar/Y+Oeei6VuC7ciG6VuS7QCkHa/UmjpRBUMKItFhWYBlmrwdGQ8GeaZvZ7+kTEe/pAvwgHwL8GAkjqFUIalMwWMGGyqrXRJFQ8YaQaBJtUaY7LFENgEWllYus8wNpgqGFISiXVJmqF3SBVhWZsgMhiQyjBBieTAINsaCTKZDHsNknYuyssftkrPHMo0VIcT0e3msAoajcQyM6mUaE1XNkIJIdEq6oMiaJkvvRuWCjLvRA8MxszWCPFbJ7PNypslktatT0iDNZI/lMQn9I3FEjLkokV2Gb1WH/KitCAq1RQVDZQ5Zd8my7kY7JFzoEkmCrpQUW0YWDZBrvGRlHWVNR7VLmr7L8C9JxiuZJGkxgyQ2AVljJdCObMhSLwSoYKjsYSgMANmCDvnsisQT2J/qxSSHRTp6BiPmw1OSoQKQ+XCXyCxp69Fk3wAAstllXSMEGmJBz1AU8ZQxco2VZS7KY5YKhhTkQUefnLtRGe2SVu0j6c5d3lSn1bcEGpKFDJZWIrs6JGRgYokkeobS81EWv5e1CF5WljYdDImtFwJUMFT2MBQGgDwP+KFIHAORdKdbWezqHJCVapaPRQPkrTWRlenoHBMsrUBDLOgaiGT4lCwMX2bgKNCQLHRKytK29+m+pZghBaGwKgwAeRgY64ICyGOXMXEBuR7u7ZIuwO2yLsASBkPZc1EWu/SmrDHz/9LMxZw1QpAhWZDRtwCJmaEBlSZTkABWhQEgz+S17kQBmeySj0UD5FfVAHItwJ0SMmkDkThGLAehyjJe2RsTWRiYziy7ZLmPY4KlFWhHNoxyCBUMKQhF9u5K1gVYlkWlQ1JFhozsHiAnM6SrfeSrGerok5UNzbJLkrmYbZc0a0SffL4FyLuRM5ihSfWqZkhBIMZC0AHIGaRJMlQAshWBAg3Jgow1Q/uHo4gl0sbIwnRks6HS2DWQzdIKMiQL8tplXSMkMQpybuSsTVmbaxUzpCAQsjJDuXbJYZiMTAeQuRuVZQGOJZLoHrQEaQJtsULWDYCsczGbsZJlvGS1S8a6r3giia4B+TYm1qasok+sB1QwVNbIybtLsgJ3Zu2SZUkZZEjrJToPyboblSWN0T2YrfYRZ4sVucX5ggzJQo5dkgxYTvAoyVy0+jwgj11WRaAs61bPUDQjuCbQxBljgbEBaKgOIRzwC7ZGBUNljbHAwAByPEgJIVLu3kdjCRwYTqt9ZHkoZNd0yLIAj4XifEAehk9Gnwfy1QwJMsQCa1NWQI51C8gzFyWxS6aGi4AKhsoaY+XBIMMOazASx3A0kfGaDHZls2gSmAQgn28JMiQL0hbeSspYjQWWFpCDSZPRJiAPuyfIjmzI1HARUMFQWcNwxoBP37XLsM4RQsxFJW2XeMOyxwqQY/fennMPxdsE5I6XHFalG2fKNl7tEvo8kOtfMvi8tSmrTHblrqfibQLyzEU5zDI3TJMUM6QgGsYkaa7VI3MZdn29wzFEU9tiwy4ZFhVj4lopXRl2ftlUswz3EMi1SxKzTGYoPV4irUmjM2suyuDzutona7wksMuwqTrkR3VYrzWR4T5mrxGyNEDNsUv8LQSQDrSbVTCkIBJWhcHket0ZZdhdGQ+rxuoQwkF9oZNh8hp2GWMFyGGX8WBI30OR1qTRLqldxoNBJp9PWE46N+ySwbf6RmJmU9ZJEtll+NbE+gr4NHlYmGyfl8EmIM9cFGmMBcYGQDFDCkJhKAz8Pg1NNSEAQEKCWWKoRJrrKmBkpGRgOwy7Jo+rNF+ToVjZDIZSdsmwcwfS9ROGXZJsknPHS4J72DMUQSJJ4NPkYmmNwHF8VRCVxsZEArsM35pYawmGpLBLzrmY7fMSDBUAS1CraoYURMJgOpprwwj4dTeQYSdj9A+ZVBeWatdn2DVFMmbIqDWZIuludIpEzFA0nkRPSu0zRSKmoyN15l1TTRjB1FyUgbFqt6TIUlNRCv8y7JpUn94wyXAf8/m8DPexI9sukcZYkK/0QCRUMFSm6LDka/2aPIV11gnil6jgLzu9AsjxYMiXJpNpAZaJmjeKp0N+HxpTbKhMY6X7vP6aDA/3TLvkEVmk164wfBIVK2f7PCDn2iWDTdamrCoYUhCKjn4rA6O/JgM1n7kb1Q2Tya5J9ZY0mQSrSjYFDoh/YA1H4xgY1dU+MlHzZut/C+soQypDVp9Ps7RypaM6+vPYJcF97MhKDQPi/Ws0lkDfiN6HTKaUtdGUNeDT0FgdEm0OABUMlS2sDIxMu6vOvLtkmewKS7NLtqp9pkgUpBm+VRXyo64iCEAOZkhWpiOfb0nBWA2k7ZIx6JhYVwG/JGkyeeeiblNF0Idxlam5KP4WZpRp+CztSkRCBUNlCuuDQapdn+UUY1kW4KRF7WOtUxC9e+8fjWM0pu/zJo+zSP4F22XducuV6pSV6Uj3WzF9S7xZ6aDD6vMS3MiOPEya6PtobcpqnYui5fVW3zI3ACINSsHqW7JABUNlinZJ6xTa+9KnGKcfWCIt0pV38SSBpulFrj5Njt278VCorwyiKpQ+20f088pa02H6lkB7DGSkySRiQ9vzbUwksMsMOmrlYdKsTVkzWVo55mJtRQA14YD5umi7rL18fBJuTCZKcFq9ARUMlSk6LTsGTZL6iVgiiZ6hPAyMJAudofaRzS4r0wHIZZcmVXG+dbz010SzaIDlwVAvG2NlVW3JYVdmU1aZ1gjrepp+XbRdnXnWCPGelelbskAFQ2UKa48HY0ERzXR0DehFdUG/hoaqkHQMjNEPI717F2YSAEvevS6csQAL3432WerRJFyAMxkYkRbp6MgzF0XbZW3KavUv8b6VbsoaCvikCbatnc2tGxMimBJN2yWPbwFWxkqOHkPAGAyGfvOb3+Dwww9HRUUF5syZg5UrV9r6uVWrViEQCOCEE05ga+AYgFVhYJXWi25pnz4eRKd0DVpXvF3pJm8A0ikWwauKWcdkuYeAeLusjTP9EgUd1gXYL0k6KhJPoHdYn4uTJBIzWJuyNlan01GimbR0Ubc+F/2SqO/SPh/OmIuimaGOgTwbEwnmYqdk55IBYywYevbZZ3HLLbfgxz/+MTZv3owzzjgDF154Ifbs2VP05/r6+nDllVfiy1/+MidL5YYRdFQG/airCFh2V3IwMM0mA6O/LvrBYG3/D0CaHVah3ahou6ySbM1kHQUalEJnnlSGaN8ybAoFfKivDEpTM2RV+/h9mjQP0o6+bJZWf11GnwfE38eOPGuEBFMxo05OFoypYOjee+/Ftddei+985zs49thjsXjxYkybNg0PPvhg0Z/7l3/5F1x++eWYO3cuJ0vlRoelAFHTtPSuT/TEzdotyFIc2ZlV7CfLAyuj1sRnDYZE70bzSMVFGgRd7TOYOuncKq0Xzihk1VdJ83DPOkRTlr5M5hpRn8nSit/IpRmYzPsoy1yUTU0m11EcABAo/RY5EI1GsXHjRtx2220Zr8+fPx+rV68u+HOPPfYYPv74Y/zhD3/Az372s5K/JxKJIBKJmP/v7+8HAMRiMcRiMZfWy4W9+wcB6Lu+WCwGkpJrxeMJoX/jvt5hAEBTTUi3I7WOxGJxoXa1HRgBAEyoCSAWi5kLXTQq1ifa+3S7mqrSdiUJEInGEIuJ2efo/Vb0+dNYFcBwVA9AkgRCx2pvzxAAoCYcQMhHkEz5fCKZFGvXft2u5lrD53Wnjwmfiym7jLkISdaIA6k1ojqIWCwGYwsQEb1GGHOx2piLGpKEpOaiv8RPswEhJF1jVeVHJB5PvS52LlqbsjZUBpja4uSzx0ww1N3djUQigYkTJ2a8PnHiRLS3t+f9mQ8//BC33XYbVq5ciUDA3p96zz33YNGiRTmvv/LKK6iqqnJuuIR4fZ8GwI/4QA9aW1vx6R4fAB927tqN1tadwuza9JFux4F9O9Ha+gn29+j/3/z22wju2yLMro/2+gFo2L19K1o73kEsqv9/9Zo12F0tzCzs7tLt+GjrBkR3Ahr0///978sxTtCGaygGROP6XNu06lV0jwJAAATAsmXLxBgF4IM+3eerfTG0trZia7f+/66ubrS2tgqz67U23Y7E4H60trZid2ou7totdi6uTtkxur8dra2t2PeZ/v9t27ejdXCbMLve/Vi3o3P3h2ht/QCDA7rPb9q8GdFd4liY3Z3GXNyI+C4ARP//8uUrMF7QXByOAxFzLr6G3igABECI2LnYOaLbEfIRvLH8lYy0Im0MDw/bfu+YCYYMaFkjRwjJeQ0AEokELr/8cixatAhHHXWU7c+//fbbsXDhQvP//f39mDZtGubPn4+6ujr3hkuEt1/aAezejROPORwtFxyNrUu3A/v2YNqhh6Kl5fPC7Hr2sQ1A13586QvHo+WEKVjSswnbDnRj5qzZaDlpqjC77n77VQAxXPzlL+KYSbX4xbuvoz8WwSmnnobjD20QYlMiSbBw3d8BEFx6wTmYWFeBf1v/dyTiSZx19tmYYjkSgCe2tw8AG9ZgfFUQX714Pj7pGsI9b69CkgDnnXcegsGgELtiW/YB77+LIyY3oqXlZGBrO5748B2Mb2hES8sXhNgEAO8s3QHs2o0Tjj4cLRcejfdf3oG/792NQ6ZNQ0vLccLsen3Ju8DefThl1lFoOfNzWP2n97Gm8zPMOPIotJx9hDC7Ht61BsAAvjzvZJx99AQ8umctdg/2Y/bxx+OCmVOE2JRMEnzfMhcn11fghxv+jngsiTPPOhuHjBczFz/oGADWr8G4yiD+4SvzsatnCL/YsgpJiJ2L63buB7ZswNTx1bjooi8y/V1GZscOxkww1NTUBL/fn8MCdXZ25rBFADAwMIANGzZg8+bNuPHGGwEAyWQShBAEAgG88sorOOecc3J+LhwOIxzODeWDwaAw56GNriHjrJoq/e8KpGhczSf0b+wc1E8Unzq+Wrcr1bHP5xNnl1XtM7WhBsFg0My9+/x+YXb19o8ikSTwacDk8TXw+zRTxeLzB4TZ1TOcrssJBoMIh9LHcYicQ11Dul2T6isRDAYRChpLnybU57tTc3FK1lzUBM/FrtRcnGzMxYA+FzWBcxEAOgdSa0SDblfAXCPEzcWugYjZlHXy+GoE/T5zLvqFzkW9I7Y5F4PG4cRi56K5RtRXMLfByeePmQLqUCiEOXPm5NB7y5Ytw7x583LeX1dXh61bt2LLli3mv+uvvx5HH300tmzZglNPPZWX6dLBVD7US1YQ3JdZtKlJIPk31T5+H8ZX6RNLhoZ9hhpjQm26SFmGos3sZmqy9PPpyFIEytJo1NorCrAUKktU2A1AiqaL1qasE801Qv+eyPHKbsoKyFFwnq2CNRWUogxKob0v07dkwZhhhgBg4cKFWLBgAU4++WTMnTsXjzzyCPbs2YPrr78egJ7i2rt3L5588kn4fD7MnDkz4+ebm5tRUVGR83q5IbtXhwwP0aFIHAMRY/cuj12dlv4hxgNUhkZv2co7QI7AI7snkymtF2VQCun2/3K1bbD2igIsDyzhwWNacQrI4VvZTVmtdomdi7nKKBlaN3Rm+7wkx3FYlXcyYUwFQ5dddhl6enrw05/+FG1tbZg5cyZaW1sxffp0AEBbW1vJnkPlDqvCIHvXJ5KBMRaU6pDfPNtHBmm90U3ZGnTI0IqgPUv6DMDSpFK8XcZuVJYzrbIZK9MugYZZ5+JEidpJWJuyTsxi+KTw+dp0KwmZ5mK+NUKkf7Vn+7wR0AqzSEeHhD2GgDEWDAHADTfcgBtuuCHv9x5//PGiP3v33Xfj7rvvpm/UGEL/SByRuD4dJtRm7vpE9urId4qxDCdS55u4MrAKnXkWYBmOVenM2iXL0ugtfUirPOm7gUgcI7F0XQcgR8ra2pS1NrUxkcnn8zMwIizSke1bgBz+levz+uvimSE5g6ExUzOkQAfGbmF8VRAVQb1YU4bdVb5TjGU4yiF/MGQEaUJMApB55pABGViY7F2yL7XCiFyAk0lipjvTdoln0YwaubqKACpD/pRd+veEMgqWmkJjQyIF09GXOxf9Mmzk8tTAyHCsSk7dl9kAVRM6XmnGSp6Gi4AKhsoO+R7ucuyuMildQI7daL56ACl27wO5eXdNguLb7HqANDMkbgHePxxFLKGrfSZIVDOU3U0ZkIRRMA5orbUyMPLYlW9jItTnB/KtEfpVhsLubJYWELc5IYSYopTmWsUMKQhEvjNhzCMTJMhvW08xlmF3lZ13B6x2CTEJQOaZQwZEHz4aSyTRPZj5wPJLsAAbD4XG6rTaxy9B4W2+uShFoN2X6/N+g7GSwK6M8TLtEmGRjuKMlRCTEE8k0ZVVnJ9xmLMgw3qHY4imilNlOrEeUMFQ2SFf3l2G4sh8pxjLUNidbxcjBaswIB+T1j2oq30CPg2N1ZlqH0Ccf3VmKaMAOaT1RevRJBAz5E8Ny+DzcrG0ncVYWkF29QxFkST6RrexRh8vzfK0TwjemDRUhxAOiDmmpBBUMFRmyKd8kOFwyLyMleCFjhCSlxkSrfgZjSVwINUI0lpjJZqxsp50btjis6wwwuwqpvaRIhjKV/clng3Nz1gJMQmAhYGpzWeXGMMi8QT2D+mNIGXyL2OsJtSk+5BlsrSC7JK0eBpQwVDZoajyQYr8dh4KXJBdg5E4hqOG2idXwSKqHsBgOiqCPtRVpgWhousnivkWIO4+Zp/ADlgZGPE+P0kiRgHIz6TJxB5PzMeGCp6LoYAP46rS3Y5F21Ws1hEQuXbl2iULVDBUZsi3AItOr1iL6vKlMkQ9r4yxqq0IoCqUG3SIei5Yd1fWc/lES+vz+5b4OoViQYdYNjRf8KhfZahlkqltg7Upq0yMlTXoyJyLstiVK5QBRLLHueUQskAFQ2WG/AyM2ImbUVSXR1ovajdaqFOqXzgDk59qFi0Xz7sbzUiTib6PuekokWqffL2iRPsWIaToGiHat2rCAbMpa4Zdon2rVra5mLt2GT4PCJyLZkd/FQwpCIRVYTBRoiJEI7/dWB1CKJB2SdG70UJn6IhutV8wGBK8G83uPg1kM0PcTQJgqTXJk14R5VuJJClaeCvKt/pGYmZT1ua87SSEmJVXbQqIZ9Ly+Twg3r/yqmBlYGklPZcMUMFQWSFDYVCdpx5A1C5moDjTIX4Xk7kAi25umE77ZNkluAFdZ55dsgxyXqPhYr7CW3FqnwgSSQKfBjTVhMzXRUvYDUZhfFUwQ+3jF1y/l09tCogXWaTP/5JrY2LWydXm9j4CBNYy5enJJAtUMFRGsKp9rJSp6N1VR55uyoB4aX2hXYxwJq1A+s4s7JZoN6oJXoCj8SS6B1NqH4maG3akaieaasII+K1sqBwMTGHWUS4VkiY4HVWom7LoYDtfE1tN04Q32DVqhpSaTEEo8qlqAPHHceTrxAtYmkGKrgeQLB1VKE0mC2OVXQQvsnVDV6oJZNCvYbxF7SP6eIl8DytAfNBRKgUruj9Nofo90Q09C89FudYukUxaLJFEz5AKhhQkQPr8r8xdjOg6Betp1FaIlrC353m4AzLJZgs8SAXYNRyNY2BUV/tkB9sid8lpNjS/8k42nxfuWwVZWv0qPkjLb5foQmWZ7uNoLIG+kdw+ZIDYo5e6BnKbssoEFQyVEQoyMILTZJ0FdsmiD2rtLKHaEvFgsKp9ctJ3Au0yfKsqlD7pPNsuEf5VyLdEKyg7C6RXhLN7A/l9SxaWViafz5iLEvmXYVN2HzJALGNlrWPyWQuYJIEKhsoIpeoB5GNgxE3cZAG1j25X6j0C7OofiWM0lv9sH5G7ZOv5TFYGBhBsVynfEu3zOTt30crO3N5HgPhDgE2GT6K5OFCgKSsgdk0tPhfF2VWoTEMWqGCojFAovSJeKi5f0NEzFEU8mXnSedougbu+1M59XFUQFcHMs31E2tVZRCUiMqiV0bcAi10FJNnCGKsCzJBI3yKEpO0qWGPF3SyT3ctuygqIldZ3FNjEAem1XoTbF2L3ZIEKhsoIhfLuIqn5YkV1IhuX5Tvp3LRLhl1fbe6CIrI4Mt/J3QbEBkOlCoJF18DkTw2LZmByC4L1qwgmbf9QFLFU5faEGnkYvmLKKJH+1VFkLor0r0IsrSxQwVAZoWDeXeDDyiiqC/o1NFRlFtUJZWAK1HQAgnd9BZq8AWIZvmK7PpEnsZeur+JuEoDCdmkCa3PiiSS6B3ObsgJi03eGbzXVZDZlBcQyaYXuISDJ2iUdS1t47ZIBKhgqE1gVBjl5d4Ey4w6Lqia7qE5k0WahNvuAtRUBV5MAIF3HVJu70Ilk+Iq12RdZtFmoc7FfIKMQiSfQO5xS++TYpV9FMAqFmrLqdonzedO3isxFIQ/3Ak1ZAcFrV5E0mU9gU898TVllggqGygRG0FEZ9KOuIn9+W+QuJh91KlJaX6jNPiB2l2weEZLHLpEpg2Jt9kUyVoU6F4usGTJsCgd8qK8MZnwvfQ+5m1WwKSuQfogKebgX8XmhjJUNnxeS4reVsuZqEoD8TVllggqGygTtlv4h2QoDoXlkG/ltIUGajdocEeUm7UUUGSJlxsXqAUTdx8FIHIN5TjoHMtNkvB/wVlVnjtpHIKNQ1LckrTURuUYUUucCYlnaonZJEDyqmiEFoSiuMJCU0hU5cU31SuGaIREPhnwnnRsQZRchJE2BS/QgNVjH2nAA1dm9jyxBCG/3Kl5rol9FpMk6pa01KeZb+lWMyMLG2iVgLhbzL5Ol5cw8DkXiGCiwMZEFKhgqExSjmtOHQ/K0SEcxu4QyHcWoZgkYmHwLnSjGqnc4hmgif+8jQFzTxY48p9UbEHmAbHsRu6RIY0jGdBQNHoXWFdpIWXO2q28khki88FwUVWNljFV1yI/aimCJd4uBCobKBMWoU6HHJRShwMXu+mzk3TnbFU8k0WUyaflqrMTcR+Ph3lAdyjjp3IAotqOYb2mWlY+3XYWOxQHkkIrnT5PpVzF2FZuL+pX3PUxkNGUtsnYJ8vl8fcgAgWtEkfVUFqhgqExg5+EuVCpeZDfK2yyr2if/Llm/8n4uZKh9avLt+gy7OD/cB4ovdKJ2ycXSGH6habL8x+IAYhmYQg0XAbFpss4i/iWKSesZiiBhNGXNOxfF2FWqsaEo9rhYGl0WqGCoTFBMtZVmYHhapKPYA0vUEQDGxA0FfBhXlUvpimZgJtTkqn0AcaxCqcJIYXbZ2AAA/P2rWKGyJohRAOw1zuQtrY/Gk+gejAIo0d2cu88bvY/CCPhzH6Oi52KhIy9Ese2yN1wEVDBUNijeFE/Mrs+q9inarE9UGiOP8g4Q13SxVNMyUbvkUrtRUa0b7BSSAvz9q3gRvHiWNq9oQJC0vivVBDLk96Ehz0nnMvoWIK6dRLGGi4C4VgSyN1wEVDBUFiCEFK8ZElxUl0/tA4iTzRYrJAWsrQi4mQSg9EIn6j6WqgeQ0S4rs8ZTWWOdi8WK4Hnv3EeiCfSPFlb7iFIEpg9oLbAxkdC3AHFpspJrl6hUegm7ZIAKhsoAB4ZjiBZRGAjbXVkWunwQxVgVOiHbgKhWBCWDDkEUeLF0FCCQ4SuSvrOmyXja1T8Sx2is8FwUdZCm4VtVIT9q8mxMxLGONuvRhPm8nKnhwmkyQUFakRSsLFDBUBnAKHAtrPYRNHELnERtQNSuz1CJFN5diU1HFdyNCiqOLJZeAcQwfEmL2ie/9Dn9Nc/6HMPnC6l9REufJ+VpBJlhl6CHu3xzsbhdokUDstqlgiEFoSgVlYtjYIpPEGHFfkXa7APiZLOl6xTEFHaX2r1rAoLtnqEo4kXUPpqmCanrkN23Ct9D/SpbOkpUwXm78XAvWL+nX0VJ2AtvMPUrz7mob0yK2yUDVDBUBihF6YqSipdagEVJ6wsd7mlAVJFrqaBWRD1Aptqn1O6dIwOTuoeF1D6AGCatmJIMsPoWN5MAZB7Xkw+ijnEopVT0C5qLxc7/AsSsXbFEEt2DRq8oeVja/cNRxFIyxHwbE1mggqEyQCnqVLTCoCTTwV1ab48CF1WbUygdJSLdaah9gn4NDVW5ah/dLv3KU5Ztp2BTxH0sduSFKJsASxqjZMqam0kAivdkAgTWyRXpyQSIWbu6ByMgqT5kTdUl1GQc7UpvTEIIBeQNOeS1TIEaSu1GRe36SvWeEGFXhtqnFNXMcbisap+CrIKAB1b6pPMK8/dnwzwygaNhdvqa+AQwabKqkNJdseVKpZsFwYXsEjBeo7EEDhRpygqIYWnTczFccC6K8K9S91AWqGCoDNBRqk4h5QW8peKlupKKUCH1j6bVPjLVWHVY1D61edQ+ul36VQTTUTToEFDLVCqNAViZNC4mAShdSCq6Nkc6BqakXfznorFuhQM+1FUWmosiGJjSRcoiaplKsXuyQAVDZYBiJ7ADYqTiySQpudBpAvLbhk31lfnVPoCYIM3aPySf2gcQU8tU6iEKiGndUCo1DIgNauVT+9gTWfA0a2A0hqFoAkAxCbt+FTIX60vPRRFrVzGfF7HWjwVZPaCCobKAeQBjAZrSL2DnblX7NBUoqhNReFuqkBSw1nVwMQmAtX9Iabu4pslsUOAigzTZdsmli+D515oQQiwsbfG5yNMusylrRQBVoRIMjACfL5RSBMQEtXZSw34BDF8pAY8sUMHQQY5YIomeIXtFiCJ2MU01YQQLqH1EnJRdaocMiHm4OyoIFpAysMMMCVmAi9nl43sf4xa1z8SCLC1SNnExCQDQOxxDNBXZFwpqRaTvHLF7IlLDdnxewBpR3C45GSsZoIKhgxxdA7rCoKjaR0BDNVsPd4HFfsXsMnfJXBmF4mofQGzR5pi8j5wZvu7BKJIl1D4iWFrjHhZT+4hkaYsG2gJFA4UUgYAYab2zNZUnY1V67ZIBKhg6yGFNYxRU+0hK6YqQGdtJr2gCmTRb1LyAoNZe+o6PXaOxBHpTap9i/sW7pUR6LhZW+/gszBAvxsqO2kfeFKx+lUkRCIiR1tupzRExXnbWLhmggqGDHHbUPn7Lw53fAlxa+SBCWl+q3wpg2fWJYNIk2yU7YWB43ceugbTap74yWNguU0XJN+go+rCyBEm83N6Wb4nsyVQgpQiIWSNKqWABMSytHbt4B7WReAL7h/SmrEpNpiAUdnYLVkUEtwVY8l2MbLU59pg0/crrgZWp9inNpPFKR1l37oXUPgB/hZSdQlLrAbK8/MuJb8nHwOhXEeyxneCRF0s7FIljIKL3IbPjX7zsMgK0kN+H8VWFNyYyQAVDBznabe1i0gswr8WuVAdXQCzTYe/BwMOibLVPaQaGN7tXGw6gukDvI8BaP8Hp4W6jjgngn75zEmgD/O2ys0bwrYGxv3bxsosQ4ixlzTnQrg75UVtRjA3la5dxJllzXbjoxkQGqGDoIEenrV1M+mtuu1GDGZJodxVPJM0Ui0wKFjtqH4D/Qa121CsAfybNtl1Gmoybz5dOwVrnIq9mkE5UWyIUgTKxtH0jMUTiqbkoUaPRdps+z/tgW8PnZVeSASoYOuhhhwIXkiaTkJq3qn0aixwoyNsuI3BsrC5+tg/v3aidnkwA/4Lz9M69uF28WyQ4YRQA/v5lTzTAx6ZEkqBzoDQzxNsuYz0dX1W4KStgLYSXx7cA/j5vJ9UpC1QwdJDDjjP6rXUKHJ5YkXha7WMnTcabUZhQE85IHRayi9vDfcDegmIWbfIqCLZrF2cmrd1m+3/e0nonRfCAACbNRjqKV9DRMxRBIkng03TJf0m7OLNopXzeXLu4Fec79XleNUMqGFKQBJ22KPD01zwWO+vZPsXUPrzPjrJLNXOvNbHRbwXgLxUvdeadAVG1OaUWYFGsgh3RAAAQDn4fjSfRY0Ptw70GJpVemVAbRqBAU1ZA3FwsGQwJY2nt+jxri3Ski83l7j4NqGDooMZgJI5BU2FgbzfKI/Cwq/bhXxDsLL3Ca3dlJ9UJ8C+OtEuBp0+HZ22RDrvBUJpVYG/YcDSOgdHSap8MlpbHxiTF7pVS+4jzecmCDttzUb/y3wDYXSPkScHKAhUMHcQwz/YpofbhXadgtz0772I/O2kMILMxHg/YpuY5H19i3y5+C7BV7WOfsWJuljlWpdQ+GmeW1rCrlNong7HiuEbI5/MOfUs6u/Qrr2DITt2XLFDB0EEMg9ItpnoA+KvJ2m3axZ3p6LM3cXnvruw/GETZVcK/OI5X/0gco7HSah+Ar+LHfhpDgwbdHp4bE7s+D/Bhh2RlOpwqKPmxoUZQa6+WiUcGgBCimCEFOWCnORjAfwHutCFfBwQ0CLNZECxKwm53vBIczEpa1D52mTSeKdhxJdQ+gDV9xy8dZeehoPEcL5t1X1oGe8zUJADO2WN+wVBqw1RStaVfeawR+ly0y2rzCx4HInGMxPSmrEparyAUdtMYAN/Tsu0cwAjwz7vbfTD4uafJHNbAcDCsO6X20TRdfVfULo4LsJMTsnnWpNn1eSC9KHMZL9tKRb6pdDvNYgGrbzE3CYD9DSbPRqP7h6OIpXZAzTbrHbkEtCmfr6sIoDJUfGMiA1QwdBDD7kMUAIyljgcFbh7AaJeB4Vy0WUr5wLOYNBpPontQV/uUTEdxrJ8w1D5NNcXVPgDf+2jXtwCrXUxNAmC1q7SqhucREx02e0XxVpw6VW3xGKtYIonuQXtBGlefT41VU00IwRJzkecaMZZ6DAEqGDqoYZfpAPjuRjsdpn14MDBWtU/pIE2/8hirrsH02T4N1YX7rQB8pfVOGBieBedpuX/poINnMamdFhcGjLiDC0trWzTAOU1mM+2TZrQ5sKGDERACBHwaGkvMRZ4MjJMULM80md3eR7JABUMHMexS4AC/OgVCSHoBtkmB88i7GxO3KuRHbRHlHcC3sNtabF7qbB+/jz/TYce3TLs4pn1spck4Suvt+jzAt97Ezpl3QNYBsozHazSWwIFUU1a76TsedXLmXKwNZ7QjyQeeDVCdHHnBs5Gtk8yEDFDB0EEMuxQ4wI/tcKL24cnAWFm0UkEH392Vk12ffuUrfbbjW/zGy3gwOEmT8Q1q7TNDrMfLujGxK2E3fo4lDN+qCPpQV1F8YyJkLtpgOrj6vIPUME+W1u5xPbJgzAVDv/nNb3D44YejoqICc+bMwcqVKwu+d8mSJTjvvPMwYcIE1NXVYe7cuXj55Zc5WisOTtQ+QNoRWO8YjJ27HbUPTwbGerpyKfg4MgrO0lFypsl4Fpy7KaBm7fOE2Ff7APzqOgYicQxHdbWPXQk7wJ4Zsh4ca3djwse37CnJAL6F3XbLDgC+x3E4mYsyYEwFQ88++yxuueUW/PjHP8bmzZtxxhln4MILL8SePXvyvv+NN97Aeeedh9bWVmzcuBFnn302vvKVr2Dz5s2cLeePnqEo4im1T1MJtQ/AL/du7hZsLCg8pfWO6qs4trR3UnjLU1pvV+0D8C1ydcSkpVY/1j6/fyit9imlvAOszBBDo5B+iNZWBFAVKs7A8JTWu2E6eBbn2wpoOSph7XbFBviy7R0O7qMMGFPB0L333otrr70W3/nOd3Dsscdi8eLFmDZtGh588MG871+8eDFuvfVWfOELX8CRRx6JX/ziFzjyyCPxl7/8hbPl/GE4YlNNuKTCALAwQ4zrTZxQzTwXFHPXZ8MunlJxJ4W3POW8nY7uI5/xilvVPjbOQuIVpBm+1VQTQihgYy5yemA5qTUB+LVucMJ08Gzo6Sxlzb9Q2dba5ePPpI0VZqj4dkAiRKNRbNy4EbfddlvG6/Pnz8fq1attfUYymcTAwAAaGhoKvicSiSASiZj/7+/vBwDEYjHEYjEXlovBZ/sHAejnbJWyOxaLmTuGKOO/c1/vMACguSZU8vck47q6K0n0+1+KMveCtgO6XU3VwdJ2JfTUQiKZZO4TjuxK6nbFE+ztMuW8Vf7SvytVlR+PJ5ja1dY3iiTRF/z6kK/k7zIajcbicaZ27e3V52Kz3bmY+joSZTsXndgF6EFawrSLXd8YY42YUGNnLuprRCJJ2Pv8gREAQFN1oOTvIobPc5mLKbuq7NsVS7Cdi4kkMZWwjXbWCEZw8nvHTDDU3d2NRCKBiRMnZrw+ceJEtLe32/qM//zP/8TQ0BC++c1vFnzPPffcg0WLFuW8/sorr6CqqsqZ0QKxqkMD4AdGDqC1tbXk+33QF7c3Vq7EJ9Xs7HrrEx8AH/o7PkVr6+6i7x2KAYaL/q31JZQQcHjC9j1+ABr2ffQ+WnvfK/reD/r0se3vH7Q1tl7wSZtu1873t6B1b/H07jvdul1d3d1M7YomgAMj+n15e+1KfFT4qC0AwO49+j3ftXsPWlt3MbNr9wAABFAbSGLp0pdKvr+7S7dry9vvoKLtbWZ2rTbnYp+t+6Jp+lx88803sbuGmVlYuVe3K9bfZc9fkrovLl+xAg0Ma2I3f6Dfl57PPkFr68dF39s5AgB6EMB6Ln6cmou7tr2N1n1bir737R59bLu79zO1K54Eeof1ufjOupX4xOZc3M14LvZFgUQyAA0Eb61cYdYN8sbw8LDt946ZYMhANjtACLHFGDzzzDO4++678ac//QnNzc0F33f77bdj4cKF5v/7+/sxbdo0zJ8/H3V1de4N54wPln8EfPIJZs04FC0tny/63lgshrs2rgAAzJv3Rcycyu7v/MvTm4GOLpx+0nFoOWVa0ff2jcTwow2vAgDOv+ACW+k+t/j/tr0BYBQXnDkXJx06ruh76z/sBN7fgsrqarS0fJGZTQDwo43LASRwyfwv4bDG4lGq9m47Hv/wHYwb34CWllOY2bRn/zDw1psIB3z4x69eWHL+bXtlB5bt3Y2p06ahpeU4Zna98n4H8O7bmN48Di0tp5Z8/5/2b8b7B7owc+YstJx8CDO7Pl7xMfDJx7bn4qJN+lw8bd7pOP6QemZ2rf/rNmDPp5hz7BFoOe/Iku+/bcPfEYslceZZZ2HaeHYbw6f2vQX0HMDZp56IllmTir73445+YMtaaP4AWlrOZ2YTAPx40woAcXzl3C/hiAnF56L/vQ489sHbGDd+PNO5+FnvCLBuJUIBH75hYy5uX/YBsHcXph7Cdi5u3dsHbFyH5toKfOWiM5n9nlIwMjt2MGaCoaamJvj9/hwWqLOzM4ctysazzz6La6+9Fv/zP/+Dc889t+h7w+EwwuHcbU8wGEQwWCLslgjdgzo9OGVclS27DdbF5/cz/Ts7U92Up46vLvl7won01/5AAMEAG2peV/uk7GoobVfI8n2WYzUwGsNQSu0ztaEGwWDx6Zq2S2NqV89w6ryh+gqEQsWbzwEw75um+RjbpadMJtdX2vo9RudszcfW57uGdN+yOxeNxxnrudiVmotTxtuzy+/zAUjC7w+wXSMczMVwSP8+IYSpTUOROAYjun8d0mhnLurfJ2C7RvQMDwDQi6edzEVobNeI7iF9rCbVVwh9bjr53WOmgDoUCmHOnDlYtmxZxuvLli3DvHnzCv7cM888g6uvvhr/7//9P1x00UWszZQGTluhm8dx8FKTOShCBNg2g9w/FEU0VTnebEvlpl95FZvbUfsA/Jr1mb5lY6wAfgfbOu1rwuvUeqd2pRugsr6P9nsyAXyOCXHS+wiw+jwzkwCkfb4mHEBNiaasAD9lp9O5yKuwe6wpyYAxxAwBwMKFC7FgwQKcfPLJmDt3Lh555BHs2bMH119/PQA9xbV37148+eSTAPRA6Morr8T999+P0047zWSVKisrUV/Pjn6WAU5UW4D1CAB2k8St2gdgO3kN1UNjtV21D68FxZkag9cRAE6UZIC1FwynIM2uXZyk9U7UPoD1aBxGBqXQ4aCdBMDHv/pGYojG7TVlBdKBNq9GkHZsAiT2LU5tQcaakgwYY8HQZZddhp6eHvz0pz9FW1sbZs6cidbWVkyfPh0A0NbWltFz6OGHH0Y8Hsd3v/tdfPe73zVfv+qqq/D444/zNp8rnHQIBviwHd2DUVPt01htp7lh+mu2wZAzFo2XnNcJiwbwk9ane0XZ8y0/pwW400FTPICntN4pk6ZfWdplVfs49S+Wfm8EtOOrggjbSIvzaszqtIEgbwZGNp930vtIFjgOhhKJBB5//HEsX74cnZ2dSGblL1asWEHNuHy44YYbcMMNN+T9XnaA89prrzG1RVaMxhLoTZ3tY3fy8mjGZe6uasMZHW0LgVearMNBMzWA4+7KwdlyAL/TzjscdDYH+J3g7aQpHsCH6YjGk+hJ1QzZHq/UlWVQ2zMYQSJJ4NP0/kd2wKP/UYeDZp4Av6aLbllaXqn0STaYdoAnY+Vs7ZIBjoOhm2++GY8//jguuugizJw5k2nvFwV3MHbI4YAP9ZX2CsjMrrcMFxWndUx+Tmkyp7sYbru+PmcLHbddsqSMlVu7WPq8cQxHyO/D+Cp7c5FHsG34/ITasFlIXgo8WIW0zzsLOgD7ymI3cHK2HCCApXW4pnJj0sbIifWAi2Doj3/8I5577jm0tLSwsEeBAqyMgt3FQeOwADtN3VlNZ1nk6jhNxumYEKfBo7x26VeWu+ShSBwDKbWP40JlHmxoXdj+XExd2fq8MwYG4HMOmNuCYEAP0gKMGtqk02TOfItbCtY2e8yJpXUYpMkAx2qyUCiEGTNmsLBFgRKcnLNlgMdBrU7z7pqmcaXm7R9LoF95FSE6DoYYjhUhxHn9BIdaE8Om6pAftRX2GBgeB7W6KSTl4fNOA1ogzXYwZYYcFsH7M+oKWVikwynTwePIHn0uujtShWVAOxJNoH/U2JgcxMHQ97//fdx///1czj9ScAenCwrAZ5dsnIXkxK4028HEJADOdzEa5+JI+3UK+pXlw71vJIaIA7UPwCdIc6okAzgxHX0u5mLqypLhc6okA/gyaU42TAZ4bJjspsl8HFLW/aNxjMT0nl+OWVoO97Ay6EddxdjRaNmy9Gtf+1rG/1esWIGXXnoJxx13XE5ToyVLltCzTsEV0goD+5X8RlTMMsh1qnwAUotKkkilJuOx60skCToH3Kl9eKQxxlUFURG01wSTh5rMqZIMsDyweDAdTuySMGUN8KlJc1q/x6OuMJl0wYZyWCMMm+oqAqgM2ZuLXDcmDlLDMsBWMJTdk+fSSy9lYowCHRjN1JwUr6Vz3Cws0uGmqI61WsSq9rFdQO0zbGJiEgCgZ8i52odHPYCbtA8Pu5wqyQC+u2S7RfAAn3oTN2kyPg94d2oygN147R+OIp4k0DS94NyJXTxSis58nuMGYAylyACbwdBjjz3G2g4FinDjjCY1L1mdAutUhlXt01DtLOhgyqKlUorO1D76lenDykVhJI+mi258noe03o3P85DWd3rYMLF6kMYsTVmdtm0A2N1HI9XZWB22fT4i1xSsq3o0FhbpGItKMsBFzdA555yDAwcO5Lze39+Pc845h4ZNCh7h7sGgX1ktdMPROAZGnal9APYpqXQtgH1Kl0/hrfsCV5YLsLv0in6VNu3DUlrvSrWlX1n6l6sCasZ+3z0YASFA0K+hocrexsTar4xV8GhsmJywezwaVDpNowPp1LBsSkUZ4DgYeu211xCNRnNeHx0dxcqVK6kYpeAehBBXarI0M8TAKKQniBO1D2BJGTALhuTcXXlh0biko9ykyZgW58tXEGw9Z8vZXCQpu5iYhdFYAn0jelNWmVhas5dPbYX50C5tU/prVn5vCj8c1H1xSXW6UQ2n7GLJOrpZu2SA7VLvd955x/z6/fffzzg9PpFIYOnSpZg6dSpd6xQco38k7ljtAwA+jQDQmD2w3PadYN28zNWCwrPw1tE95Fe06eQARj+XNJkztQ/AnukYiMQxHHWm9gHYp6OMe1gR9DlS+/gYS+udnv8F6IG2BgICjV2azIVSUcZic8Di8yxrQx0eTCwLbM+EE044QXc8TcubDqusrMQDDzxA1TgF53Cj9gHSCzBrqtlpMMS6rb3TIy8AvkGHsyBNv8rUkwlg33QxmSSWVIbzoJZZPZoLtQ9gPaiV/QbAidqHdU2a28M9NQAE7OzqdDMXOawRnW7q0XhsTAacj5cMsB0M7dy5E4QQfO5zn8Nbb72FCRMmmN8LhUJobm6G329/wiuwgRtaHrA0XWRGNbsrqmPdsM/NLoaHVLzdRd6dR6t9V6otxuze/uEoYgn9s5udtJNgnFY00itOfZ51N3i3aQzWdTBu7dJS0RB7u+Riad3MRdb30NoI8qBNkxknw2cfzKogF9ykMQB+C7ATChxgL1F1s6BkKFiSxHZ9gxO4UW2xlrBb1T7OUrBs01GGzzfVhGyrfXS79Kt0D/fUlVWazE1RN2BpNsqKpXWZSvcBSID9Rs5VETwjm+KJJLrcFFCbNZgsrAJ6h2OIuijTkAGu2kN+8MEHeO211/KeWv+Tn/yEimEK7pDuLOvMEXktwE4ZKz/jok03DwargiVJCHxgEAy5SPuw3vUZap+AT0NTtZNgCCm7mJjluq8JazWZW7t4BWlOGSu/+SBlNF4uVFtAmhli137DOcPHWtnZMxRFkui/p6nG+caEFUtr+HxDdQjhwNjKFDkOhn7729/iX//1X9HU1IRJkyZl7JI1TVPBkGC4TpPxWoBd7kZZLMBWtY+b3RWg20W74fxoLIEDw27UPvqV1cM9rfYJO2LDWDd6M9NRbpkOxkGamxoYgF3Q4ZaxYv0gdSuyYKncisQT2G80ZXXURZx1ClYfqwk14YzNmXC7xqiSDHARDP3sZz/Dz3/+c/zwhz9kYY+CR7hR1QA8pPXu0ncsi4IHM9Q+zhQsBlg8Fwy2yrHah/nD3Z1vsa6fcO1bzAuC3alqWKesO13axdq/3KbvWBacGzaFAj6Mq3LeEkQ232LN0rr1LRnguM9Qb28vvvGNb7CwRYECXO9GGVLghBBXHW8BtrJsY6xqKwKoCtkPOqznIbHYYVlZNEdqH9bF5m5ZR8ZNF93axbqhp5sieIADw+fxPrLw+aFIHAMRdyedswwerUGHk7nIOk3mOgXLugjeJUsrAxwHQ9/4xjfwyiuvsLBFgQJcT5LUlUXQsX8oimhKVz3BQX4bYCutdztxraw0i0WlXVKmw42qBuDHDDm2y6wZom4SAPcFwSyPxvGi9mF5Hw3fqgkHUBN2lrBgOV7uyw7Yigbcpzr1K+s1wunaJQMcp8lmzJiBO++8E2vXrsWsWbNyTq2/6aabqBmn4Axxi9pnopsiRLDZ9RmLb1NNCKGAs/ib5U7G7Rk61noZFg9SN31NAPbSejPocMruMZdkGz4vzwMrkSTocnjOVtqu9GfQxgEPah+W99FtQAuwfcDLGDgClp5MLtcuVj2/3K5dMsBxMPTII4+gpqYGr7/+Ol5//fWM72mapoIhgegajKQVBg7UPgDbmiEvpxizTBmYuxgHhZFAeqEDGO1GXfZkYi2t95yCZcXASCga6BmMIJEk8GlwpPYBrAe1UjfL9Hk3ah+W0novh3uaBedMNnLeGBhCdDbOSYqNrV2Mi+DN+zj2aoYcB0M7d+5kYYcCBZgFrg7VPgCfoMNdMMSO7ehwOXG5pckcNBAEcg+tpL0Auz5SheECnKH2kUhab9zDCbXO1D4Aa6bDnW8BbKX1bs7/MmANPGjDzXE9QHb7jfTY0UK7yyMv2B987W6DKQMc1wxZQQhh2tZbwRncPqwAtuoHb8wQu1SGe6ZDMw/TZGGX22Jz1odWulb7MGSsrGqf8Q7UPgBbab3boyUAttJ6LwwMS1bBbQoW4MQMuWRpAbZ2ycTS6k1Z9Y2JG/8SDVfB0JNPPolZs2ahsrISlZWVmD17Np566inatik4hNsJAliO45Ao6ADY1im0u5SKA9az3GhapMO92idzN0oTVrWP8zoFNjYB7tU+AGOmg0JqmA3T4T5IY1lv4mnt4rGR88DS0rZrJJpA/2hKeeeyfo9FQGs0pwz6NTRUhah/Pms4TpPde++9uPPOO3HjjTfi9NNPByEEq1atwvXXX4/u7m5873vfY2Gngg14KUJk+nD3cIoxy/Rdh0sKHGC3G3XbCBJgW8tk2FQd8jtW+7AsJjULXN2kVxg+GNwqyQDGTMeAe7UPy6DDrVIRYKcms85FLywtq7lYGfSj1uVcZJPqTKfIWBxRxBqOg6EHHngADz74IK688krztUsuuQTHHXcc7r77bhUMCUQ7BaqZSdDhUu0DsEtleFH7AOnzkGgvdH0j7tU+LBdgL2kMHpJsT74lW0EwS6bDwwaAZZrMbQoWYNdnqH80jtGY7hzeNiZUzcrwLadsKMueTGO54SLgIk3W1taGefPm5bw+b948tLW1UTFKwR3SlK77XR/LNJkbu1ilyaxqn8Zq55SuuahQfpAaD/fxVUHHah8eC7AX32KaJnPjWyx3yR4Klc2OykzFDC7sMtNkdO1KJomUilPDpvrKICqCXuYiG7vcFcGzFw2MxaM4ABfB0IwZM/Dcc8/lvP7ss8/iyCOPpGKUgju47T0BsJPWR+NJ9Ay5L6pjt9DpYzWhNoyAg5PODbDavbvtawJkLsC0H1hefIvl2WRuFYGAtTZHniJ4gHVHZe/+Rduu/cNRxJMEmqbPR6dgVXBOo44JYBekeZqLEvVkkgWO02SLFi3CZZddhjfeeAOnn346NE3Dm2++ieXLl+cNkhT4wUudAqugozNVoxDyO1f7AOwWYK+7GFYF5x0uewwBudJ6mvCiVGRaBO/F5xkxHYD7IniA3dE4sUQSPUPugzQ/ow2AcQ+basIIetiY0HYvswbGBYuWLa2nCW9F8PqVbauSsRkMOfa8r3/961i3bh2amprw4osvYsmSJWhqasJbb72FSy+9lIWNCjaQebaPPEWI6UM0nat9AHYFf16DIY0Rq9BOIR0FsGCGvBfnM6lTGJCP6RiNJdA3EgPgUqmYutKei10DERDiXu3DilXw4luAZWPCioFxFdCyZGm9FMHz2JiMzZohx8wQAMyZMwd/+MMfaNui4AHGBKkO+VFb4ZyBYbUb9dJvBWAnBfXaNj59UjYlg1Lw1G9F06Bp+g6ZWdGmG2aI0QJMCHHdFA8A/OYumc3DqjLoR12F8yWWlbTe2nHdjdqHlbTe6xrBOmXtlunw+zQkkvR78ck4F4G0UnGspsk8NV1UkAdeVDWA9aBWSgalYO4WXNrF6sw0r7sYVnZ5WegAlrt394pAVk0X+0fjGIkldLsk2iVbj1Nxw4ayktZ3ePR5VtJ6zyxt6srKLreHjrISpXg58oKV8APwplSUAba3LX6/vWr6RCLh2hgF9+j00G8FYP9wd2tXut6EmkkA6NUMsXswuHtg+TUNCRCqdnlW+5jsHjWTAKTZvbqKACpDztQ+ALsgzYuSDGAXdHi5hwC740u81DoC1npHWhbpoLMxIVTXLkKIuda7OfKCVf3ewGgMQ1H3GxMZYDsYIoRg+vTpuOqqq3DiiSeytEnBBdw2BzPAjmp2v4sB2CmRvKh9AHY7LK+KDBZBraH2AVxKxZnvkL2yaNRMAkDBt1JX2j7f7tm32IyXkV5xnSZLXWVSkwFs1q7e4RiiCXd9yKw2sSqHqA0HUO2wEaQssG31unXr8Oijj+L+++/H4Ycfjm9/+9u44oorMH78eJb2KdiEF+UDwDBN5pWBKSNqXj/bx9sDK90Yj5pZFrVPyJXah3U6yqtvMSuC91wDQ8siHV6ZIWZz0ePaxULMEE8k0WUW58vD8Blj1VAdctyHDGCnvLMKZcYqbK9sX/jCF/Dggw+ira0NCxcuxAsvvIBDDjkE3/rWt7Bs2TKWNirYQKfX3RWjNJmXzrIAm927Ve3j9cFAc4fVPZhW+7hpBAmwocE7PRZGGnJe6mkyD0oygJ203nPQwegQYK8sLaviW+M+umb4Uleat7FnKKqfNu/T0FjjMhhikOL3WqTsZ+zzY1VWD7gooK6oqMA//dM/Yfny5Xj33XfR2dmJCy64APv372dhn4JNeFHVADBPYae50GWc7eMxGKL5YPCq9gHSzBDN3SiNs31YBLVe+poADGtzPPo8qzSZ1/QK87YNntNk9OyKxBPYbzRldW1XKnik6vPpui+/y7nIwu/TRcoeyw4k8y0Z4OpJ8Nlnn+Hxxx/H448/jpGREfzbv/0b6urqaNum4AAdHk5gB9hQugOROIY9FtWxkNZblWRu1D6AhRmiWDNEg2pmUXDuXVXDegF2yXQwktZ7tcvHiKX1WqjsZyCtN5jjUMCH+krnLUEARukojz4PMFq7qKU6aVmkw6tvyQDbwVA0GsULL7yA3//+91i5ciUuvPBCLF68GC0tLfD5lEJfJJJJkk6TeS7apGQUvKt9ADZMR4fH9ArApuDca78VgE3g4bknE6MFuFNCpoMQ4r0IPnWlOV6Dkbip9vF6H2k+3K0smtuNCYv6vbTPu9+YsEile/Ut1q03xqqsHnAQDE2ePBm1tbW46qqr8Jvf/AbNzc0AgMHBwYz3KYaIP/YPRxFLuFf7AGwmrple8ZBHZsF00NjFsFiAaVDNLBY7L31NAHZyXq9qMvOgVoobgAPDMUTj7tU+ANvCWy9qHxY1Vl7T6AAbaT3VuciAPXZdX2Vpc0EIcR2AZqOs0mS9vb3o7e3Fv//7v+NnP/tZzveNgVV9hvjDmCBu1T7AGHi4s1iAPQRpLKT1NII0FimWtNrHGwNDcwFOJIlF7eNVeUfft9yqfQA20vpOCilYFjVWXpVkABtpvbGRk25jQqlxJqDfRz+dWMjzkSoywHYw9Oqrr7K0Q8EDvKpXADYHtdKxi/6CQsWu1JWqXWaq03vNEE0SxlT7uK01sQQ/tBbg7sEIkkT32ybXah/9SjUFS8G3WEjraWwAWKjJvPoWwCZ951WdC8ip7MyciwR+eJ+MepmG9yyAaNgOhs4880yWdih4AI1dDIsFmMZugU3e3btdLM5yM3d9Lrt1A/QVLFa1j9eiTUC3y606xwpjrCZQUPuwCbQ9+HzqStW3PHaCB9im72ikrGmmO6nYRZmljcaT6B70NhezD5ANuiMvM9A9FEEiSaB52JjIAFX5fBCAJtPBIuigsbuiyXRQKVROXanuRj2c/2XAR1khZVX7jK9yqfbxZe5GaYCmb9HdAHj3LRbSejq+xUAq7vFMRYCVmIEeS0truLpSDVmDfg0NVW77kKW/pjVchm811YRdl2nIgLFruYIJGg8GFguw1/b/gEXxQ2lFsfY+orPro2EVMBSJYyASB0ArrUjFrAymw2sbAoDeAkw1NSxZnVya6aDPwNBRKlIxCQCdtYv2xmQkmkD/KL25SMsuGn3IfFZmiLJdY1lJBqhg6KCA174mABtpPdWCYEoTl4baB6BfcG7cw5pwADUezvahnfqhk15hsABTCWgZpMko+jyLmiEaKWtaD/fMjQmFlDWlATNsqgr5Pc1F2nbRSe/TZ2lp3EMZoIKhgwAdNChwykFHIklMWlcmab1RpOxF7QPQLzinpcagbxcN32KRJqPgWyykzxSK4FmqyTwFaZTTZP2jcYzGkp7tot2XiUbvI8BacE7FLCpHXljFC7T8i4ZvyQAVDB0E6KCwe6d9vETPoF5U59Pg+pwtgL603tpm3wtYLcBeFxTax5fQ8C0fgwXY7Nbt4T6ykT6nOsF7mYuU03dWtY+MKdj6yiAqPFTy0h4vWoeOsmJpvfhW5sbEs0kADo4eQ4BNNdnXvvY12x+4ZMkS18YoOEfG2T4UihDpUbr64juhNoyAh6I62gtKJwVGAQB8GgGgUQw66HRwpV1wTrOQFJBrl0xbWh9LJNEz5N2/0iwtDav0Q0fjKbXPBA/BI21pPY16IYB+7Rc1uyhL62msXdZaI9prfVnUDNXX15v/6urqsHz5cmzYsMH8/saNG7F8+XLU19czM1QhP0y1j9+92gegf/Izrd0C7fQdjRoYgH7BudfGhgYMWp9a/QQViTH9BZiGXbSZjq6BCAjxpvYB6LO0xsO9sdqb2oc2A0Oj4SJAX1pPo1UJQL8BqteGiwaMQ7lp+5fX+ygatpihxx57zPz6hz/8Ib75zW/ioYcegt+vU5uJRAI33HCDOopDAIwmXM0e1D4AO6rZ64LCiunwUgMD0G+6mG7y5m1BoX34KI30CgD4QJCERmUBpqX2od0Ur8OSxnCr9gEYMh0e2D3AelArXd+Sjhny2NjQAPW1i5JdPk1nHWViaWWA423Co48+ih/84AdmIAQAfr8fCxcuxKOPPkrVOIXSoLWLoa2OolUQTJvpoGeXfqW3G6VbM0TjPhJCqNlFs0mlcQ8rgj7UVXhR3ulX2WpNaEvrabGhaam4Z5MA0PN52gXntE5gp752UR4vGnNxNJZA73AMgHf/Eg3HwVA8Hse2bdtyXt+2bRuSNGUZCrZAO+9OO+jwXgOjX6mrkKQLHr2rtgC6h0MOROIYielnDXpWuaWuNNyLltqHdrduWr5FW1pPzbcoM0PUWFra40VBEQiklVs01ojBSBxDUWMuUhovCgNmnA8YCvgwzkOZhgxwvK265ppr8O1vfxsfffQRTjvtNADA2rVr8ctf/hLXXHMNdQMViqNjgNJulNEC7LUGhraajJ5qS7/SsIsQ4vnMIQM0a6wMyWxtRQBVIfcMDJDyL0JnvDoG6PqWTI0gAfpMRyc1Zki/0ktHpYI0SspOGj5PCEmvXZSYNBrjZfiW1z5kAN2NHI2mrLLA8aj+x3/8ByZNmoT77rsPbW1tAIDJkyfj1ltvxfe//33qBioURwel7p+sijY920WxyDWeSKJ7kHZa0aNRAPYPRRFLSYe8Sv5pdr01UrA0VCJUF2BKPs+sQSWllCJtu7wyHdSl9cZ99MgM0RQz0GrKCtAdrw5KxdMA3Y1vO6V1XgY4DoZ8Ph9uvfVW3Hrrrejv7wcAVTgtEB0UjrwA2KXJPBchUnxgdQ9GkSRAwKd56n0E0K6BMc72CXk+2yct5/VsFrV7CNBNZVBrUElZWm+e/0UppUhLWk+LpaU5F61NWb0X5+ugEmhTasoK0D0nkFbxNGDxLxosLSXfkgGuVt54PI6///3veOaZZ8yd+759+zA4OEjVOIXSoE7NU1iArUV1MilF0k3Lwp7UPgDd85BYBB00FjqazdRoFgXTa9tAuSCYGhuqX+VjafUrjblobcrq9aRzmmIGWkXdAN2aNKosLUX/ouVbMsBxMLR7927MmjULl1xyCb773e+iq6sLAPDrX/8aP/jBD6gbmI3f/OY3OPzww1FRUYE5c+Zg5cqVRd//+uuvY86cOaioqMDnPvc5PPTQQ8xt5AnqxzhQmCBGUV044ENdpbf8tsl0UFjo0mofegsKnd0VvQWYply8k5JvAXQX4E5KbKgpFaeuJqPDdNDwLWtTVmrjRZFRmFAbzmjK6QY0Nya02D2ArrSeydpFuWZorMNxMHTzzTfj5JNPRm9vLyorK83XL730Uixfvpyqcdl49tlnccstt+DHP/4xNm/ejDPOOAMXXngh9uzZk/f9O3fuREtLC8444wxs3rwZP/rRj3DTTTfh+eefZ2onLwyMxqgpDGjWdKRrFLypfQC6x0vQ3MXQZNJoMjA062Cs99EraC7AtOyiyXQMR+MYSPU+omeXV6voNWUF6DJpNGtNaG5M6NpFM3g07PIedJhpRcmYNNFwvG1/8803sWrVKoRCmTUX06dPx969e6kZlg/33nsvrr32WnznO98BACxevBgvv/wyHnzwQdxzzz0573/ooYdw6KGHYvHixQCAY489Fhs2bMB//Md/4Otf/zpTW3nA2F3VhgOo9qgw8FGkmmmcZ2WAprSe5i6G5gneHRR3ozSl9bRUNQC9BVhX+9DxL7/l4U4I8RS4G2NV7fGkc4Ayi0apKStAV1rPgumgWY9Gwy6a0nqa7DHNTQCtpqwywPGsTSaTSCQSOa9/9tlnqK2tpWJUPkSjUWzcuBG33XZbxuvz58/H6tWr8/7MmjVrMH/+/IzXzj//fPz+979HLBZDMJi7U4pEIohEIub/jSLxWCyGWCzm9c+gir379Rqt5rqwJ9tisZhFzpv0/Hfu6x0GAEyoDXn+LJJ6esbjFOw6MKLbVePNrlgsZi4o0Xjcs11tB1LjVR2k4GOEml3tffp4NVUH6I2Xx3l0YDiGSErt01Dp8/RZiUTc/DoSjXlK1xhzcSKFuWgEjvGEd5/fu39It6vWm10AQFLrfoLiGtFMYS4a4xVLJCjMxdQaQXEuxqjMRT0YaqQxF1Nfe52LelPW1BpR5c0uVnBik+Ng6LzzzsPixYvxyCOPANCpwMHBQdx1111oaWlx+nG20d3djUQigYkTJ2a8PnHiRLS3t+f9mfb29rzvj8fj6O7uxuTJk3N+5p577sGiRYtyXn/llVdQVVXl4S+gj/VdGgA//NEBtLa2evosg1Ho6/f+WWt3+QD4MNS9D62tn3n6rO1t+t/42d69aG391NNnvf+Jblfbzu1oHcxtHOoEvtQSvG3bdrT2e/usj/b6AWjYvWMrWjvf8fRZPV363/j22++gqv1t15+TJEBnv27Xe+tXYa83s+CDrsxZ+eab2F3j/nP2DQNAAFUBguXLXvZk03Bc/ywA+FvrSwh4EPJtMOfioOf5YwSOA4PeP+u11PxJDO33/FlbevTP6u7x/lkbP9L99EDbLrS27vT0WZqm37hdO3ehtfUTT5/14We6z+/5YCtau7w5fXdnai6+sxXVHe4/K0mADmMubliNtq2ezIJP0+fim2+uwqce+IuRODAS0+fP5tWv4T1v4jsmGB4etv1ex8HQfffdh7PPPhuf//znMTo6issvvxwffvghmpqa8Mwzzzj9OMfIpnpL0dv53p/vdQO33347Fi5caP6/v78f06ZNw/z586VrIfDpGzuBjz7EcYdPRUvLLNefE4vF8NH/LgMAVFXXoKXldE92LXvuHaCtHXOPPwYtpx/m6bO61+7BC7u2Y9LkyWhpOd7TZz3w0SoAQ5j/xVMw74hG158Ti8XwzMd6fdyRRx2NljM/58mun77zGoAoLj7nizh2sjd29S+9m/FubxeOmzkLLV84xPXndA1EkFz7OjQN+OZXL0DAg+Q/Foth0aYVAIDT5s7DCdPGuf6slR92A29vwrTGWrS0zHP9OQAwMBrH7et1u84//3yEg+5X871v6nPx8xTm4sfP63OxsqoaLS1fdP1ZALD15Q+AXbtwwlGHoaXlGE+fFXi/A4998DbGjR+PlpZTPH3W/z6xEejqwRknz0bLSVNdf04sFsNLj/4dAHDIoYeipeXznuz62dbXAERx0dmn47gp3tb7vx7Ygq29nfj8cTPRcso015/TMxhBYu3rAIDLvnqBp/YbsVgMP7XMxRMPHef6sz7sHATWr0ZdRQD/8JX5pX9AAIzMjh04DoamTJmCLVu24JlnnsGmTZuQTCZx7bXX4oorrsgoqKaNpqYm+P3+HBaos7Mzh/0xMGnSpLzvDwQCaGzM/zAMh8MIh3NrN4LBYN60mkh0D6Xk6+OqPNtm1ikAnj+ra1BXr0weX+35s4JGrw9N8/xZRn57aoN3u4xQWtN8nj4rlkiiJ6X2oWGXP7VQaj5vdu0f0XdUTTVhVFZQUJOlrj5/wJNd3cOpA1rrKz2PVTiZ3hD5AgEEg+5rfboHU8XT47zb5aM4F7uNuUhhjQgG9PFJEnprxBQKa4SZ3fQ4F+OJJLrNuVjj2S5jA+HzOBd7zLkYQhWNuZgaL5/f722NGE6Ld2R7NhpwYper2V9ZWYlvf/vb+Pa3v+3mx10hFAphzpw5WLZsGS699FLz9WXLluGSSy7J+zNz587FX/7yl4zXXnnlFZx88snS3jwnaKfUiRegK62nqdqiVRBM66RzA7SOvTACtKBfQ4PHRpCAVc7rzS6avgXQKzhPd5+mJ30GvBffslAqUlVHUVAE0vItgLJSMXX1alfXYASEUlNWgF4DVJrF0wC91g0076EMcMy3+f1+nH322di/f3/G6x0dHRkn2bPAwoUL8bvf/Q6PPvootm3bhu9973vYs2cPrr/+egB6iuvKK68033/99ddj9+7dWLhwIbZt24ZHH30Uv//977n0Q+KBdFdSejt3r8GQ9Wwfmuoor0GHsaBUUVD7APSk9aZ6pdZ7GwKAnpyXpm8B9B7wNDvxWofbq9+zUPvQkLB30lQEUjpeYjSWwAGKJ53Tktan1ZPem7IC9Jou0jplwAAt9Z117ToY4PipQAhBJBLBySefjD//+c+YOXNmxvdY4rLLLkNPTw9++tOfoq2tDTNnzkRrayumT58OAGhra8voOXT44YejtbUV3/ve9/Df//3fmDJlCv7rv/7roJDVA9bzaijKLT0yMP2j1pPO6UnrPTMdlE46N0DroFZa5zMZ8FN6YNH0LYDecRxGJ14qvmXxA6/3kWqvqNTV60OUEEJ1905LWm8EaBVB701ZAetxHN4+x+yZQ20u6levgTbtXj60ji8x2VCPZ97JAseeqGkann/+efzyl7/EvHnz8NRTT5lpKh6n1t5www244YYb8n7v8ccfz3ntzDPPxKZNmxhbxR/JJKHa44HWBDG6FtdXBlHhoSDVADWmw+wfQpfpoMco0LGLVrqT+m40dfXsXzTPaNLopMkIIVQ7F9PqAzMYiWPYbMpKr7eWZ9+y3EM6bKh+peZblJgOWg1QO2mztNTWiIOn4SLgIk1GCIHf78f999+P//iP/8Bll12Gn/3sZ8xZIYVM9AxFEU8SaJre0t4raFHNtE8xpsZ0ULaLFtXcTjno8FE6joP2faT1IKVaJ2dJhXjx+97hGKKpbqVUG1RS8vnaigCqQt4ZGFoHtVJnOmj7FiVmiFbNEKv6PdnWLtHwNEP++Z//GUcddRT+8R//Ea+//jotmxRswFjoGqvDnk86B2jWwBinGFNiOih1oKbNdNBm0ug9GAwmzdvnUGfSKATb8UQS3YP0GBhAfzAkibc0bHouhhDy0qwoBXo7d9q1JnIWBFOrR6O9dlGuZaLO0npOd5Y5MzR9+vSMQumzzjoLa9euxWefeWuup+AMtPO19NIrtHcxdJkO6kWI0jJptOoB6C7AXszqHowiSXRVU6PHk84N0DislbZvyciiAZZDgCmlrGkoAgGrUtHb59Beu2ip76irySj4V8JSpnEwnFgPuGCGdu7M7RY6Y8YMbN68GR0dHVSMUigNc7dAKb9NvwaGNtMh1y6G1sGjtBkYk0nzMF6ReAK9FNU+AJ1dsjFWE2q8n3RuQGc7iCe2o5Ny3Rdt5R1tpkM6xip1pVVXSJtJ88LSRuPpPmQy+VfPUASJJIFP0/sfHQzwzummUFFRYaq6FNjD3I3S2rnTqhmirMigJedtp82kpa7ed6N0d1c0UhnmSecBH8Z5POncAI1gm7bPA3RUgYbCTSYWDbD2ZKL8cJeVpZXMLhrBo1E8TasPGUCn3rEj5fNNNWFP3ellgi1mqKGhAR988AGampowfvz4ogqA7P5DCmxg7kZp7dxTV88LsKFwo1DUDdCR1lt7H9HqiUEjeByMxDEYodcIEqCTJrMq3GgpRGkswGmfpyflpTJeBgMjEYsG0Gdg0mkyb59DvU4udfUyXMPROAbMpqyU1i7Ne5rMum7Rmos06h0PNiUZYDMYuu+++8wT6RcvXszSHgWboM10UCvapKzIoCGtPzAcQzR10jk9ab1uD40FpTYcQDWFRpAAnd0o7TomgM4CzKLjLQ3mkb7P61dZmQ6vGxPa/kVjvIygozrkR20FJTaUApNGu3YPoDNetH1LBthaga+66qq8XyuIQ1r5IM9uNJEk6BqkvBul8bBK7dwbqkMIB+h0SacRdNCuFwLoSOtp+xYAaBoBoFGxi+YCTKORIO1u3fSVirRqhrw/3PtH4xiN0d2YyMp0+ClI62n3IQOMjZzmyedp+5YMsBUMOTn5VbaT3Q9WdFBOk9GoU+gZTBfV0TjbB6AjrTfqmJopplfS8lT3n8FiAaYhraftWwCdos10+3+KwSMFtsOoGaKdgvXyEKXdlBWgxKJRbsoK0ElZM9mY0KhHY3DkBY3jXsqWGRo3blzJfCUhBJqmIZFIUDFMoTAi8QT2pxQG1BqEUVBHGTv3CbX0iupoSOuNgmC66RX9SoPpoJmOoiHnZdFmn4b8mUXKwKu0PpZIomeITQG1F9+i3ZQVoCOtpy1fB+hs5FjYRYOlZbJ2pa6yrV2iYSsYevXVV1nboeAAptrH78N4GdU+kjEd7SyZDhqMFdV0lH71skum3SEYoMMMMbHLCLZd+lfXgH7SedCvoaGKEhtqCRyNTaZT0G7KarWLCkvLgIHx5lsMUrA01i7T5ymy2pIyaaJhKxg688wzWduh4ACdlv4h1BQGFBdgFnl3GkwHC0m2l92ocR9pNZ8D6NRY0U6vAN4fpCPRBPpH6SrvAO9qMuvJ3TROOgfSgSOg30e/i49lwu5RqK9i0aiPSgE1xTPvDNBUKrKYi7KxtKLhWsIyPDyMPXv2IBqNZrw+e/Zsz0YpFAeLXQzNBVjWXYxsdrFgOrymFQkhjBgY/eo16KgI+lBXQUd5B3gP0ljskK37kESSuGowyYINNXzLU60JQ9bRU9DBZC7qV9ns8spqj8boN2WVAY5Xla6uLlxzzTV46aWX8n5f1QyxB5O8O4UFmIVddPrm0N+N0sy7U2WsPNYpDETiGInRO+nctCt1dRs7Wn2LFhsKeGc7mPiW5c9zH6TR9y0/BTUZS5aWhuKUBZPm9h4ORuIYihpzUZ4grWuAflNWGeA4mXzLLbegt7cXa9euRWVlJZYuXYonnngCRx55JP785z+zsFEhC0yUD5av3U6SdgaSbJ+Pwm6UQfrOK9Ohq33k240aO1FaJ50b8DpepqqGcsGmV4UUE9+yfO31PlKtk6PYToJm40yvzBCLpqwABd9K3cOacAA1lPqQAd4Pam23MO00Nyai4XiEV6xYgT/96U/4whe+AJ/Ph+nTp+O8885DXV0d7rnnHlx00UUs7FSwgDUz5Hat62ShyPCocss86ZwBNe+yOLJ3OIpYQv+b6ErFvRVtslKJeF2AOxnZ5VVaz6JOLjtl7QZsmQ73n9HBQB3l9dBkFk1ZAe/Sela9fLy2bmDx/JEBjpmhoaEhNDc3A9CP6ejq6gIAzJo1C5s2baJrnUJesN6Nug08WKrJXFO6g7raJ+DTqPU+0u3Sr17HqqkmRE3tA3gvOGfVP8RrKqOd0YPBe5qMvl3WNJlbu1goFc2UtUubWDRlBbynYA3fotmUFfCeJmM2F1NXmXxLBjhehY8++mjs2LEDAHDCCSfg4Ycfxt69e/HQQw9h8uTJ1A1UyEUni068HusURmMJHEgV1dFlhrxJn9P0d5ia2gew9jbxxnTQXui8FnazOnPIq7SelV1eUxksmLRMltalfzFQbXlNwVqbsjbVyCNmYO1b0rG0Hu8jC9+SAY7TZLfccgva2toAAHfddRfOP/98PP300wiFQnj88cdp26eQBevZPizUUYC7nZ/xcA8HfKirpJff9nvdXTHaxXhdgFnt+rxK61kwHQA9NRm78fJWm0O1Ts7ytRv/sjZlZXF0SdJl+w3D5yfUhl0JNAralbp69y26Pu/1oNYOVnVyqavXNfVgOooDcBEMXXHFFebXJ554Inbt2oXt27fj0EMPRVNTE1XjFHIxGIljmIHCwGudQrpGga7ax2vQke7lw2pBcffz7JkObwsw7f4h3h9Y9GtNAG9B2lAkjoGI3vuIRQ0M4M6/WDRlBdK+Beh1hU6nuaxMB2u73KbS07U5ctYMHUxHcQAe+gwZqKqqwkknnUTDFgUbMCYuzZPOAe91CizOswK8H3YoL9NhpMnY1MB4tYumqgbwtgDrah+2/uXG5410QXXIT1XtY9iVSBJXrAKLpqxAmukA9Ae8D84+mx3ToY+R1zWCtl301i5GYgaPabKyD4YIIfjf//1fvPrqq+js7EQyq5hjyZIl1IxTyAWLPh0GfFqaAncKkzqlzSh4ZDrMBpW0GYXU1TMDw6hQ2Wv9BHUGJnV1Y1ffSAwRBmofwFsjQVY+D+j3MQF3rILh89SZDkv+zo3fs/J5r2oydnPRW8E5i15RgDeVm7Upa9nXDN1888145JFHcPbZZ2PixIkHVZ+BsQBWTAeg72SSCeJqATZ3CxRl4oD3YMjs5UOZUUgvKO5+nlkNjIe+TJknndO+j/rVTaBtPBTGVdE76dyAFzUZK98CDL8n7lLWjOurAHd+z2rt8t5FXD6WllUfMsAbS5vZlLXMg6E//OEPWLJkCVpaWljYo1ACrApvAePQSncLsLlbkGgXA7Czi1Y9AP2Fzv0C3D2kq300DZhAUe2j24WUXc5/tp3Rzh3w9iBl5VuAN79nXY8GuBwvVgrK1FWmxpmAN9/az6gPGeCNpTUEA3UVAVSG6G5MRMOxtL6+vh6f+9znWNiiYAOsJNkAnQVYtg7BzGqGUlc3TEcskUT3oKH2YbNLdsV0pHyrqSaMAMXeR4DHBZiRbwHemEezvooBS+vNLrZ1coC7TUAn4xoYN77Fqikr4E1ab9xD2n3IgPRD3wtLe7CxQoCLYOjuu+/GokWLMDIywsIehRJgma/1IjNmlXf3Iq1nddI54DHoSKWign4NDRQbQQLepPUsfctTmsy0i0Fq2MN4sezE6yV9186o7ssqhycuHvCs7PLiW6yasgLepPUsFVteWG1W91AGOE6TfeMb38AzzzyD5uZmHHbYYQgGM6Wbqgs1W3QMsNn1AdYUi7Ofs57tw2o36oVRqGKg9vFCzZtMRy3dNgSAR0aBpW+lrm4W4A5GtROAN1UgyweWl47dnYwUgV7SZNamrLRrrLw83Fk1ZQXo2MXW553/rHXtOtjg+Alx9dVXY+PGjfinf/onVUAtAOYBjCyYoRRP6DTw6B9lV1TnpSDYWmtCP+jQr64eoixrTTwwaSx9y8vD3VQEMvF57wwMm/Fydx+tTVlZMTCA8we8EaBVBOk2ZQW8bUxYKgK9SOvbWc7F1NVLOQTNM+9kgWOv/Nvf/oaXX34ZX/ziF1nYo1AEmWofdguwU1rXqAWor2Sg9jHy7h527ixqOrxI61kqAr2k75juRlNXV93NGTJDbqX1hBBL/Z48NUOZTVlps7QaNE0fK6d2Wdk92hsTL9J69opArz7PIgOgX2VjQ0XDcc3QtGnTUFdXx8IWhRLoGYoibqh9KCsMAPeBB1u1j3x1TIA3aT0rVQ1Aj0mjDSqqLYlqc3qHY4imKmNZpAxMVsGhfxk+X1sRQFWILgMDWA9rdfZzXJgO2RSBHlhapj6furpTdqoCahP/+Z//iVtvvRW7du1iYI5CMRgLXWN1mLrCAHD/gGerqtGvxEUzSC5Mh0SqGiBd9+VNtcWwZsihb2Wqfdj5l2Omw5yLIYQC9Oei2907a7WP280Jl4JgD2woy7VLOpbWk+L04GWGHG8d/umf/gnDw8M44ogjUFVVlVNAvX//fmrGKWSCdb7W7U6GJQNjVbAkCeB3wLCzrekwbPKgyJBVEciyb45Du7oHo/p992lopNz7CHA/Xix9C/BgF+PuwD4fgITzBymrc7YAq1Tc+c8yXbs8dDdnWpyfujrdXCYsZRoHW/dpwEUwtHjxYgZmKNiBuVtgVMkv564vU8Hid3AeElMGJnUekmwMjJVJc4JIPIFeRmofwH1fJmOsJtTQPencgFsFZSfDui/APavQMcDOtwD3NVZ81FFy1cC4bYAajSfRM8SmD5lul351Wg7Rk2rK6tP0/kcHGxwFQ7FYDK+99hruvPNO1XhRAMzdKKMeD24XYNZnNBlIJAmc1Ge3M2TSNJdBB8DulGzAkiZzqfYJBXwYR/Gk87Rd+tVtPZp8Pp+6h4zschukdbBmhjzWFbJMWXvpm8OSPXY8F1MBLYs+ZEB6I+fct9g1ZZUBjv6iYDCIF154gZUtCiVg7kaZM0POfq6D0blkQFajNwd2WXsfsShwdbvQDUbiGIywaQQJuG9SaVW4sWiX4dPcLcBpn2fDdKQLzl0yMIzmovv7yLpmSL86tYslS+u2bcNwNI4Bsykru7XL8XpqWbfYzEX96lTldjAryQAXBdSXXnopXnzxRQamKJQCS6YDcC+tZ9o3x7IYOAk8DgzHEGV00jngvoDaVPuEA6im3AgScL/QsaxjAtxL61l3vHW9AWDo84D3+8gsGHIRPGb0PpKobYMRdFSH/KitoM+GupXWs6zdA9ynFVn7lmg4Xo1nzJiBf//3f8fq1asxZ84cVFdXZ3z/pptuomacQibSygdWu1H96iRlkEgSdDE62wdw3/XW2Lk3VIcQDtA/UDDd28TZz7GsFwKsRfDOfo61b0mrjnIprWfZrRtwn45iXctkFAU7UQX2j8YxGmO3MfGqCGTtW15YWhYwGBCnyk7WviUajoOh3/3udxg3bhw2btyIjRs3ZnxP0zQVDDEES+UD4K5OoWfQWlTHbqEDnO2wWPY1Adz3NmG960sfDimPIhBwvwAzf2C53SUz7IoNuCtUtjZlZV/L5GBjkrqH46roN2XVbULKJp2Fspta4uVbjuvRGNuVrneUiz0WDcfB0M6dO1nYoVACkXgC+02FgTxyXmPnPqGWjdonW1pvFyy7AwMUmA5WtSaeFYFsx8utmow10+FkvGKJJHqG+DBWTuyyNmVlsTEB3LHHHaxrHS1fE5L2tVLg5VtO67o7GbOh7lP8B2/DRcBFzZAVhBBXJ/IqOIep9vH7MJ6B2gdwt0tmXmtirRlywgxxqoFxynSwVN4BXpgOxrvR1NWxComxOsoNG9o1oJ90HvRraKhiIzF2wyoYD/emGjZNWQF3jBVrn7cGP078y2T3GLNo8vm8fk04TqWzvY+i4WrGPPnkk5g1axYqKytRWVmJ2bNn46mnnqJtm4IF1loTVofjuqmfaDftYjdB3Ch+WNvl88p0MFJHua8Z4pWOsv8zI9EE+lNqH5nq5Ezfqq2gftK5ATfHqqQDWnY1HW5qmZj7vOVrN+k7Ziytx5ohZnWFqav7AmpVMwQAuPfee3HnnXfixhtvxOmnnw5CCFatWoXrr78e3d3d+N73vsfCzrIHy940Btzs+jo55JF9GpCA0zQZJ2ZI0pohJwudtQ0BczWZi4dVZdCPugr6yjvAnYKSRyGpmyMTjKJupnMx9SR1k0pnrY4CnK1d7OeifnXeoJIPM+SkBnM0lsCBVFNWVTOUwgMPPIAHH3wQV155pfnaJZdcguOOOw533323CoYYgYes0Q01z2M3qjNhxBkFzqkGxq2clzVj5cSu/tE4RmLGSefyLMDWe8iKDU2f5Wb/Z1inFAF3Qa0h92fJ0rqRi7Nmaa2e4YbhY7dGOGfaB0ZjGIoynoupqxPfsjZlra9kU6YhGo7TZG1tbZg3b17O6/PmzUNbWxsVoxRyweOAPDdFm2bDRZZpMhcLMHNJdurqJBbS1T6MVVsu0mSGb9VVBFAZoq/2AdzVMvFo8uZ3w3Tw9Hk3DAwXu+z/DGuWNkNxanO8CCHMC5XdpMmMe8iqDxlg3fTa/xkr68hqYyIajoOhGTNm4Lnnnst5/dlnn8WRRx5JxSiFXLBuuAh4242yopoB5w/SmOWkc5kal+0fjiKW0NU+E1jVT7jYjbJubAi4KzhnncYA5PV5q1zcLnhIn92k73iJGQD7fb96h2OIppyRVRdxNywtjyJlY7wc1WAyLuqWAY5Dz0WLFuGyyy7DG2+8gdNPPx2apuHNN9/E8uXL8wZJCnTAh5rXr24WYD6Mlb33y6r2Me5hYzU7tY/fRQ0MT99yZhd7BsZNMMSjkNTvQszAuvAWcC5miCeS6Brg0+YCsH8f03MxhFCArfLO0XrKpQhev7opgmfpW6Lh2Au+/vWvY926dWhqasKLL76IJUuWoKmpCW+99RYuvfRSFjYqAGYzNZkeDKOxBPpGYtzssvtg6OCg9knvruz/jJkiY8juuTkQlYdvuSk4T3d55uHz9n+GR/rOFWPFlUmz9/6eoSiSRA+iGhn1PsqoGbI5Xjx9Szafd8M6si7qlgGukpJz5szBH/7wB9q2KBQAIYQLTel0N8pD7WO1y+5ulHUzNcBlT6Y+DjUdLmqGePiWG9aR9QnsgKVmyEU9mkzKztFYAr0c1D5OH/CGbzUzasoK6A93n6b7lt35yCPV6aYlCA+fNxgQZ61K2JYdyAA2/KACVfBQ+wDWLsH23m+ldFkW1TmldXmkfdwwHTx6MnlR+zBVBKaustnllIEZjMQxGDFOOpdH2clL7eNUWs/D5wGr39t7Px/f0q/uFG5y1X3xUCqKhu3tvM/nK/nA0zQN8Xjcs1EKmeCh9gGshzDapZr5tGd3utDxsMvaQ8TueUg8ejK5UUdxUSo6ZNJ4qH0Aa6NRe+83WEeWah/AubKTl9rHqbIz7fNsa018KWrI9nhx9C1n6SgOdqWubg6+VmkyAC+88ELB761evRoPPPCAOpqDEXjsFgDnu+QODgwMIKddGQoWAvhtPH+4MDBuijY5qsns3sMMtY9E6c70Dpnxw92hXTwKbwHn/sVv7dKv9oMhueu+eDT0tLu55FWmIRq2g6FLLrkk57Xt27fj9ttvx1/+8hdcccUV+Pd//3eqxinoYN3B1YBTuTiPhyjgYqHjWKgM6Hb5UToa4tqsz+bTKpEkFrUPh6JNhwxMQ3UI4QA7NtTpA4ufzzsLOng83AHnqR8eikDABXvMqXM+YH8u6n3I2K/1Tjcm/SNxROLsNyai4apmaN++fbjuuuswe/ZsxONxbNmyBU888QQOPfRQ2vYpgN9C57T4ltsC7NAuLkGH5WvbdR0cFjqnzfq6ByOm2ofVSeeA8wVYWjaU08ndbhWUrHfuTouCWTcZNeD8PsqnVOweiiCRTPUhYzgXHddgpsZqXFUQFUF2GxPRcBQM9fX14Yc//CFmzJiB9957D8uXL8df/vIXzJw5k5V9CuBHgbtdgOWzix/TAdgrOI/EE9g/FAXA7mBIwLm03vCtCTXs1D6A8wW4g7vP23s/bwbGbtDRzilIc3oSO4+NCeDMv6LxJLoHU3ORZQrW52ysOlIsWlNNGAFGfcgA68bE3vt5NPOUAbbTZL/+9a/xq1/9CpMmTcIzzzyTN22mwAa8dn1OF2AeEmPA2W7UqvZh2onX8rWdxc6q9hlXxU7t457dY11rol/t28XLt/Sr07YNvHze8caEcfrO6XEc6d5HbP3LyRrRlepOH/L70FDNpikr4LwBKi/fcnpOYLrhogqGAAC33XYbKisrMWPGDDzxxBN44okn8r5vyZIl1IxT0MFvN2p/oSOEcEtlOJGCclP7OOx6a13o2LYhcFYzxMu3nErr+fmWu5ohfnbZez+3DZOD4HEkmkD/KPs2BIB1jSj93nZLETyXliCS+bxTNVm699HBWy8EOAiGrrzyyoP2gDbZwW2SONiNHhiOIcqpqM7JbpSX2iejgNqGYTyUZID7PjDlqPYBrAyMvffzSt85qf2yqn1kSlkbvlUV8qOG4cYEcDZestY68mZpZQvSRMO2hz7++OMMzSiN3t5e3HTTTfjzn/8MAPjqV7+KBx54AOPGjcv7/lgshjvuuAOtra345JNPUF9fj3PPPRe//OUvMWXKFI6We4NV7SOTastQbLFW+wDOiiPTSjI+TAdgM0jjXHibtNn/iJtSMXV1XBDMOL3iJDXMS+0DOJuLVrWPTOwxLzYUcBak8Ss7SP/NySQpeTQQ7zSZXcU/r7VLNMZMB+rLL78cW7ZswdKlS7F06VJs2bIFCxYsKPj+4eFhbNq0CXfeeSc2bdqEJUuW4IMPPsBXv/pVjlZ7h6H28Wn6oYIs4WR3ZW2zzxpOGtCZUl6GRcqA+zQZc6bDsgDbWez4FwTbe7/1fDmWcHK8RM9QFPGU2oel8g5wxioYO/f6SvZqH7MmzUHQwUOO7TNrhkq/t52TXda5aGvt4rVhSl2dsrSqgFoCbNu2DUuXLsXatWtx6qmnAgB++9vfYu7cudixYweOPvronJ+pr6/HsmXLMl574IEHcMopp2DPnj0F2wBEIhFEIhHz//39/QB0pikWi9H6k2xj7/5BALrahyQTiCUT1H9H+u/SJ0csnij5t+7rHQIATKwNMx8XLWVXNBYv+bvaDgwDACbUhJjZZXyucR5SJBpDLFZ8X7Gv17AryHS8Eol0B/jRaBTBEqqU9r4RAEBjVYDpeBmPhXgyWfL3xBJptU9TlZ/peBGiMyrxRGm7jLnYVB0CWM/F1IMqZsPn9/bqdvGYi6ZdcTt26WtEcw07u4zPNfwrYmOdbj+g+zz7uZj+7NFoDOFA8bnYkZqLTdWM56Km38OEjbkIpIMhlmsEKzixd0wEQ2vWrEF9fb0ZCAHAaaedhvr6eqxevTpvMJQPfX190DStYGoNAO655x4sWrQo5/VXXnkFVVVVjm33iq37NQB+hJOjaG1tZfq72vbtA+DDtu3b0Tqwreh73/xMtyva18ncroF+PwAN695aj8EPi+9m3v7AB8CH7k8/Qmvrh0zt0oM0DX//+3KMK7HJ3L5b/xvaPt6G1r73mdmk16vq0/qll5aixPqLz3p0uz7Ysg79HzAzy2SGBgYGS/rL/ggABODXCNa8vhwMFf/Y3qb78d59+9Da+lnR977bq7+3gkQ4zMW9AHzYvmMHWoe2F33v2k7dLl+kn7ldXZ36/Hp761bUdL5T9L3rdunvHerei9bWT5naFRkdAaBh1apV2Ftb/L3v79Ttavt4O1r7i69znmxKANa5WOokpU9Tc3HH229hgOHSZSwJA4NDJf0lQYCuAd2ud9e/iT1vs7OLBYaHh22/d0wEQ+3t7Whubs55vbm5Ge3t7bY+Y3R0FLfddhsuv/xy1NXVFXzf7bffjoULF5r/7+/vx7Rp0zB//vyiP8cKvev2ADu246hpzWhpOZHJ74jFYli2bBmmHTIV67racOSRR6PlrM8V/Zk1f34f+PQzzPn8DLR8eQYTuwz8/tO1+HSoH3NOPhnnHD2h6Hsf+2wdsL8PZ592Es4/biITe4zx8vv8SCSSOOvsszFlXGXRn7l3x5sAhjH/S6filMMamNgFAEOROH64fgUAYP755xdNm4xEExhZsxwA8I2Lz0NtBRvJfywWwyfP6yxtVXU1Wlq+WPT9m/ccADa9hYl1lbj4oi8xsclA77o9eH7XdkycOAktLScUfW/f+k+B7dtw5CEc5uK0Q7Cmcx+OOPIotJx9RNGf2fnaJ8DHH+G4Iw5BSwvbnm9/69uCrb2d+PxxM9FyyrSi7136x7eBtg6cdsKxaJk7nYk9xnhVV1ehJzKC0+bOw0mHjiv6M4s/0Ofi+V86Facezm4ujkQTuPUtfX7NP38+qkKFH7ejsQSGU3PxH1vOY9Z+IxaL4ZEl+lysqKxCS8sZRd/f1jcKsvYN+H0avvnVC0vWPckGI7NjB0KDobvvvjsvC2PF+vXrASBvAZ7dAzJjsRi+9a1vIZlM4je/+U3R94bDYYTDudv8YDCIYJBdf5hC6B7S0x6Tx1Ux//1+f+rBqflK/q6uAT2NMWV8NXu7UhIpzYFdUxvY2+XzAUgAPn+g6O8ihJiF3Yc01DC1K0zSVJBuV+EpvrdPH6vKoB/jayqZFrla+wyV+vt7hlN9ouormN/DQEAfHwKt5O/qHtQp98njKtnb5bfv892pZp5TOKwRTuzqTKU6p/JYI1IOpvn8pe1KFcFPZTwXE8iei4V/V1u/7lvhgA9NdYznYupKUHou7h9JpTprwwiH2dassoCT+ys0GLrxxhvxrW99q+h7DjvsMLzzzjvo6OjI+V5XVxcmTiy++4/FYvjmN7+JnTt3YsWKFULYHS/gdRYS4OzEcx7nfxmwK61PJolFhcRhvGwWnPePxjEa46T2saTFStll9S3mah+bNgG8fd6JOopPI0jAqZiBn9rHkbKToyTbrshiYDSGoWgiZRefRpC6XcXfy3UuOmi6yKuDuAwQGgw1NTWhqamp5Pvmzp2Lvr4+vPXWWzjllFMAAOvWrUNfXx/mzZtX8OeMQOjDDz/Eq6++isbGRmq280JaVcNBkeGgY6qxALNW+wD2F2Ceah/AfmO8Do5qn0w5b/H38uprAjjretvOSUkGOJPW8+y34qQZJNegw6bPE0LMrus8/MvuA95syloRKJq2omNTprS+GEzf4uDzTo7jKBclGTBGpPXHHnssLrjgAlx33XVYu3Yt1q5di+uuuw4XX3xxRvH0McccgxdeeAEAEI/H8Y//+I/YsGEDnn76aSQSCbS3t6O9vR3RaFTUn+IYPJkOuzLjWCKJniE+/VYA+40EjbFqqgmXVFHRgPErSvU24bmgOJHz8n2I6lc7C3Anp95HgDNpPa8jLwBnzSC5+pdNaX3vcAzRlPE8glq77DFPds9J+41Ojr7l5Bw3nhsm0RgTwRAAPP3005g1axbmz5+P+fPnY/bs2Xjqqacy3rNjxw709fUBAD777DP8+c9/xmeffYYTTjgBkydPNv+tXr1axJ/gCu19/BY6uw+sroEICAGCfg0NVezzyHYbqvHexdhl0kyqmcNCZ2XYSy12BrvHY7zMposOeljx8Xl3TQRZwy5jFU8k0Z06a2sih5R1uvbLns831YQQKiVppAC7B8iavsVlLmq2D05u53jkhVkz5IQN5TBeojEm1GQA0NDQgD/84Q9F32O9uYcddpjtA/JkhfVsHx6H5Nnd9VnTGDzUBX6bDdU6ONLygP0Hqbm74pDq1DTN7H9UkhkaMJrP8QvS7MzJtF0c6tFs+vxoLIHeYb3IlU/ax16arHswiiTR/47Gan71e6Ue7ryaZhrw26wZMn2Ll12ahjghpdeuAZ51X/rVCUvLI30nGmOGGSpHGAtKZdCPugr2cavdOoVOztSpZpMZ4n2Gjt2DGHkdeWEgfVhr8fd18GRgUldbxyVwtMsu02EciRMO+FBfyV5VmmZDi7+v3VJT6OewMUmzocXfxzO9DzioGTKZIb4bppJrBMdCZSdnk/EUM4iGCoYkhvVwTx6H5PrtLsCcFQZ+mw8sngsKYF/Bkm7/L6ddPB4Mms3daKbah2dtjv1Am8dctFsnlz6Bna9v2R8vTkGHTVUg9w2T3fvIszg/dbVVnF9GajIVDEkMngWugP3DIXmdoWPA9m50gB+jANhPZfCvZdKvxeyyqn14pAzsLsAGi1YbDqCa8UnngH3f4lnHBNj3rU7T53kxHfpVpuJ8wMlclG/tIoQIqUcrxaINReIYiOhlGqqAWkEoeFPNTtNkvO2yW4TIq9jPbu5dVGF3sTSZVe3DtU7BZhE873totwaGv2/ZLbzl7Vs2U8O8NwCSiiyKMWl9IzFE4inlHccCarvrVnXIz6w7vUxQwZDE4NlMDbDfdJE3BW7XLt4yUDsLXTyRNOtNuI2XjaDWeIg2VnNS+6SuJdMYfWLuoW3f4lAED7ioGeK2RthMR/FOWdsQMySSxOw+LRPbbtzDcVXs+5ABsK9wKyMlGaCCIalhpH14TVzN4QLMfaErstJF4mm1j0zS+gy1D4dGkIC9xY6nkgywvwDz9nm7TEe7oCL4Uuq7Ts4MjO37KErMUMSunqEIEkkCn6ZL/rnYZaN+jzuLlrra9a1yUJIBKhiSGjxVNUCaUZBtAbZTHGnYxEvtA9hrutjBWe0DWFsRFLGLY18TwP4CzN3nbTIdvB/uTgu7eQVpdljHaDyJntR5adyCR1s+r68RE2rD5hlrrGGnGSTvImW7arJyUpIBKhiSGuldsjzFkYOROAbNojp58u4dnNU+gL3jOHinMQB7KRbecn+7CzDvAle70nrewZBd9R331LCNnl9GUXfI78N4RqevZ8PpGsELdtqC8L6HTmuGykFJBqhgSFroCgPO+W0bRwCYZ/twUvsA9qT17ZwLIwF7TJpZbM5RjWGHmud5/hdgv9icdwrWDgPDW+1jtauYbw1H4xgYlXFjklIpcmoJAthjYHj7FmCv3pH32mUlqIsyaWV0FAeggiFp0TscQ5SjwgCwV6fQYfY14fhwd1AQzNcu/VqsTkHEAmynGSR3paLl62L1OaKK4IsxHf0jcYzGxMzFor6V8vmqkB81nDYmdthjMQyMfpXJtwB7ys4OzuyxdS4WWyN4KxVFQwVDksKYIA3VIYQD7BUGgGWXbGcXwzGPbKtmaIBvHRNgzy7eikDAylgVfg//oCP9daEHadKi9uFdqGzH53mpfXS79GvRWhNL7R43BsbG8SUiHqJ2juMQcQK7nY2cqDYEgD3/4pniFwkVDEkKEYyCnToF3qk7wKY8leMBjAbsKJE6OTeCBOz1i+JeA2P5utBwdafUPpoGTOCkvHPCdAh5iEpaA1N0jeCsCATsSet5N4sF7HWg5r3B1GxvTFQBtYIE6ODcbwWQdwG2Y5eYQmX9aidIE7EAF2I7ovEkugdTah/OBcFA4fEy1D5NNfzUPj47TIesviUk7aNfi7ZtELB22WonIWIultiYxBJJdA+ma6x4IHNjkt+u/cNRxBL695o59dYSDRUMSQre1Clgs1BZAAVuT1ovzq6ieXeO538ZKFXY3ZVafIN+DeOrOPVbsXxdMBgSmsYo/B7ebQgAe77Fuys2YE80ICKVbseuDgFMR6nC7u7BCAgBAj4NTdX8U9aF/MvwraaaEIKcNiaiUR5/5RiEmDSZjQVYCAWuXwvZRQgRoiYr9SAVofYBSkvrzWLz2grzocsamo0FWNpicwGpTltBmqQbgE4hqfTido3GEjiQasoqovSgoM+bczHMby5avi50G8tNVg+oYEhadApwRjsN6ERQ4KV2ff2j/NU+QOlUhsHu8VT7AKWl9bzPlgOymaH87+kUkPaxI2E3iuBF9IqyU3grU22OdWMipjYn//eNAK0i6ENdBb+5WKqwW0SRckYBdUFmiH9mQjRUMCQpRKRXjElS6MEgQu0DlD6otUOA2gco/cCyphR5qX10u/RroYVORK2JZmMBlpHdA0Sl7/SrdPVoJewaiMQxHE0AkKve0epbfOdicWm9CN+yUzOUblWigiEFwUgf4yCAai4wQXqGooin1D5NnNQ+QGnGynwocD5Dp5Rdneb5X3wLEEs94IWkYC1fF1yARaZXJFLeAaV9y6r2EcGkFQo6DHavtiKAqpA8bKiIInjAxoZJ0MaklHJYRJAmGioYkhC6woDv2T5A6YdouqguzLWorhTTIaKQFCjd6E1U07JSD3jeZ8sBmQtwYbv430cjvVKIDY1b1D4TObK0WgmmozdD7SOPtN5IKfL3ef1ayre421VC2Slq7SoVpHUIyEyIhgqGJISRigr6NTRwUvsApWXGonYLpXZ9abv4TtxSh1aaNR2cF7pSdQoi0itA6WaQIo9UKVTg2j0YRZLoY8pL7aPbpV9LMQpNNSGEAvyWcX+JoIN3Z3MDJX1LQB8yoHS9o7A1taR/lVfDRUAFQ1JChNoHsDNB+FO6gJxpH6C0gsXc9XFO35Wq/RKlFCl2KGqm2kcAA1PCt3iqfQD7vsWTFQJKnw7P+8w7A6WYNOt95AmtRM2QqI2JXf9SaTIFoRChqgFsTBBhE1e/Fp64/GtNgPTDvSTTwT19V1xaL2r3Xsy/jNRdOOBDfSWfk851m/RrqVSnbL5lqn2E+ZZc6ZXSaTIx42XXLlFraj6zIvEE9g/xbcoqA1QwJCFEPUSLTRBAnNxSVqq51FluohiYYmmygdEYhgSofax25Rsuq8/zVPuUSimKOE4FKO1bolOdshXelhRZCLYr39o1FIljIKL3IeO+1hfpF2UEaKGAD+Oq+G1MREMFQxLC7D3BPb1SYgEWll6RM+goVoRICLHs+sTskvOxHYZv8Vb7AMUb0IlLKdoNOkSlhosHaTL5FiCu1qRYmowQIjA1XJilNWyq5tyHDCjuX1bf4rkxEQ0VDEkIYUWINguVxRUE534vnkiia4C/2gco/mDYPxRFNLUCigpq842XyM6yxQrOhalqbDIKwuySTalYao0QZFexgvO+kRgicf5NWYHiPi/Kt4ASdqUUgbw3JqKhgiEJIWo3Wkoq3iGslkm/5rPLqvZp5Kj20e0qFnToC0pjNV+1D1Cc7RD1EAWK70bTvaLk8S3AUtMhqAi+cJAmqtbE8K3c7yWSxDz3TpxdhYOO8VVBhAP8mrICxU+tF8WGAsX7DIkM0kRCBUMSQsT5X0BxBmY0lkBvSu0j0260w6IS8XNU+1jtKpr2EcHAFKlTEOVbQPEjEzoEdDYHSrdHEFW/V0ryL+K4HgDwF+nL1DMYQSJJ4NN0yT9P+IrUo4kSWADFNwCiiuABS01a3pqh8lOSASoYkhKiqOaieWRBah+rXfny7qLqmIDirQhEPUSBErU5glhHoLiaTJRSsZS0XrxduYZF4gn0DPFvygqUYPdSPj+hNowA55POiylOOwT1GAJKrF2CfAuwdx9FrBEioYIhyZCp9pFH+mxlFHgX1RVTk4lK3QHFG70JtasIwycySLNVPyGIDQVyU2VWtQ9/5Z1+zedbptrH78N4zmqfomuEQAbGlm8JSEcVq8MUuUYUUw6LDNJEQgVDksFU+4QDqOauMNCvxSaICOq0WLM+kc3BtCLyZ5FpMjvUvAi7CvXOsap9RHXiBXLHy6r2qa0QFHQUUfs0C1D7FKuTE8nSanY2JkKYIf1abCMnsn4vb5psQEwLFdFQwZBkEDtxbTzchdqV+z1T+SBwoStaDyAyfSdZLVOhBVik2scaTGT7vajjVIDiGwBR538BxX1LZK1J0TSZyA1TUWm9fGsXIUQxQwpyQCh1akv5IDLtU6QnhggKvEhvEznqATJf1086F/ggLXAfjYfCuKogKoJ81T7WNFm2e4lU+xTzLaEBbZG5KEoFC5QSWYjp9wUUTt/pc1G++zgQiWMkppdpiEili4QKhiSD2ILgIg93gcqHYrtRUQcwAsWLb6VgYLIWuu4hcWof3S79mr0Ai+oODGSmybJZBZH1VbL2iirK0kpgl0z1aEDhDeb+4ShiqUHkfV4aUNi/jGLz+kr+GxPRUMGQZBClXgGKt9o3JewS7WIAscoHo8g1+yEajSdNtY/IAursOoWOVHqlqYa/2gco7F+GzwvxLUuarFDNEO/UHVC8bYMMPp+vBkbUOVuApeliVjoqlkiiW1DvI6DwBtPYxDXVhBAUMBcLpazLVUkGqGBIOoisNbFzXILYYr/M14ejcQyMGmofcXZlPxgM+jvk96Ghmj8DU+g+iupsbqDQIZ9p3xK3QwZyH6RifV6/Fi28FciGFgvShNqVzYYORkAIEPRraBQwFwsxaSJTZEBhllakwEI0VDAkGURSuoUYBWtRnYgHQyFpvTFxRah9gMLpKCujIOJsn0LSesO3eB8PYsBfIGUgMk2WIa3PtkuCbt35xQwySNgzXx+NJdA3ojdlFbl2FbqHzbUVJtsmwq7stUtkETxgGa8CGwAVDCkIh9jeE/kXuv6RuDC1D2BhOgosdOJ3V5mvi95dFaLm04yCGAo8bVfm60JTsEWl9WIOHQUKz0XRap9CbKhxDyuCPtRV8G0JotulXwv5vIh1CyjM0rYL9HmgcFNPkRsA0VDBkETIUPtIJK03Jq4ItQ9QmOkQTTWX2o2KWlAKFZOKTPsAxewSmRrW8gbbVrWPmOMS9Gt20JGh9hFYEJzzcLf4vAg2tNCBuyJ9CyjMpIk+8qIQSyuyhYpoqGBIIhhqH00DJtSI7EoqTxoDKLwbFakkAyy7qwK7UdkYq3aBTAdQWFkjstYEsPpX+jXRap+C9VUpn6+rCKAyxH9jUiigFZneB8auXcJZ2kLBkACfFw0VDEkEQ40hSu1TSMEiMo0BFLPLeLiLmbgF1VGCFRmFGCvhu9E8C3DcovYRfR+t/mXcQ1Fqn8K+JTYFW5ClFW1XwYJgwcGQr9CGSfDGpGDKWlxmQjRUMCQR5EmvZL6ePjhW1C5Gv8qX9tGvMvWnAQozVvLYlX6tK6X2Cfg0NFWLreuw+pf4h6h+le0eluwVJZFvATLUyenXQhsm4WuX5T4mkgRdg+V5FAeggiGpIJ7S1a85u5gB0RM3/0InOn2nFUjfid4l51voRmMJHBhOqX0Eqcny2ZVW+4SFqH2A/P4lutakUIGr6CCtVHsE0eko+UQWuXZF4gnsT/UhE72mWteu7kG9TMPv09AooExDNFQwJBE6BadXCjU3NGSgwindghJ2sUWI1oWOECI8qM2XyjACtIqgD3WV/NU+QH6Zseh0AZA/rdgusBEkYEndFQyGxKYUCx5dIiw1rF9l25jk83nDplDAh3FV/FuCANbSg/Rrxj2cUBPOaDlRLlDBkEQQzXTk27kDclK6hBBzURFOzVuGayASx3BUV/sIC2rzpMmsviVC7QPkP7RStG8B+Vs3iLarYBG8JKn0gqlhiRiroUgcAxFxTVl1u/Rrvnq0iYL6kAH5050mi1aG9UKACoakQrvoXUzJgmB5mI79Q1FEU09VEco7IP9hmga7V1sRQFVIDAOTj0kT3dcEyF/kKkP7//yMlSQsrXRiBv2azNqYCC/szrN2Gb5VEw6gJiyIDS1il6h0NZBfzFDOSjJABUNSwUyTCWY6rA8Fq9pnomgZaDI3vdJUE0IoIMaN8++uxBcg5mPSRCvJgPxBmgx9TfIJB8yNiWC5v0w9mYD8Y3VgOIaowKasQP65KDqgBfKXHnQI9i0gf01aOSvJABUMSQXRVLPfl0s1dw1GkCT690SpffIvKGLZKiB/ozfRqhrAynSkX0sXksr2YJAnSMuXyhDXhkC/Wn0rQ+0jyL/y1lelxqqhOoRwQMxJ50V9S4ZAWyLfAixNFy0pa9G1jqKhgiFJkKH2Eb67Sr9mFrgKVfvo10y7xE/cYgudqPO/gPzS+o4B8QcwptWK6ddEp1eAXFbBqvYRrdoC0vexJ6X28WkQcugoUJyBEdGc0kAxRaDIdFTxtUvceOU72FaGNVUkVDAkCYxi4HDAh/pKQQqDPAuwaGkqkL82R4ZdTL5dsui+JkB+lVuH4G7dQH6ZcYcM/pV1aKVV7TNekNon3wGyhs9PqBXTlBUowaIJ9S39mq9tg8h0VDGlogxrar46uXLsMQSoYEgaWNMr4hQGuQuwDBMkf35bBrv0ayJPOkqGtA/Jk76TgUkzFmCr2keGIC3b52VQ+wDpB7y0viVFnZycD3dZ1650w9j0azKk0kVCBUOSoEMChUG+BVgGSjffri+dXpGAmie56SiRqq1smbGu9pFvATZsqg75hal9gFxpvQzpFWsQZriXDL6VlxkaEK9ULMZYSbF2ZcxF8anh7CBtJJpA/2iqDYEqoFYQCSlUNb7cBbhdAruKFgSLtCtPx+4OCXbv2bLZvpEYIoLVPkBukasMvgXkSutlsMtvCYaMB6kMvpVPWi+XXenXZAg6sqX1/aNxjMSMPmTi19RsNrQq5EetwI2JSKhgSBKYD3ehRYjpr40FuFOiXXLeXZ8EdhkLilXtI1NBsPFwH18VFKb2AYqkowTeQ8DKKuj/75TALutczAkepWND5bMrmSRSFARn94sybKqrCKAyJHIu6lezNtQyVqJSw6KhgiFJYFDgQiXZeWqGZJKKGzZF40n0GGf7SGCX8RC1qn2aasSofQDrAqz/X4YdMpBbCC9LX5NCwaPIIngrS5vMSivKwSikXzNqhuSwSzds/3AU8SSBpukF56JQaAMg3ucNMYP+fxlSiqKhgiFJIIOqJlPOq19lsCt7F9OZ2omG/OLUPkDaruz0iki1D5Cr2pJBSQbkFpzLoKoBclskyGBXUTGD0GZ9+tVgaWOJJHqGxAe1abv0q3EPm2rCCAqdi/o1uwheFp8n2b5VpkoyQAVD0iBNNYvfXQH6Apx5to8Muyv9/+nDPcWpfYB8uz7xO2TA0lBNsnRUdm1OpwTpFSCX7eiUqCcTIJd/+TMKuwm6BiIgBAj6NTRUiWNDs6XisvkWkci3gHxrhBx2icSYCYZ6e3uxYMEC1NfXo76+HgsWLMCBAwds//y//Mu/QNM0LF68mJmNbkEIkUQ2m/46aVEgVYf8qK0QycAUoJol2V2ZTIcEaQwgV/4sQ0EwkCcdJYHPA5m1X7LMRU3TMlRuo7EE+kaMpqyyMFaWM+9qK4Q1ZQVy1wgZ5P5Abl2hDL4F5B6aLMvaJRJjJhi6/PLLsWXLFixduhRLly7Fli1bsGDBAls/++KLL2LdunWYMmUKYyvdQRa1T/YCLMtDNLseQAYlGZCu6yDZ6ShJgqFEUrLgMavGSoYzmoDMJpWyqH0AK9uR9vnKoB91FeLUPtaAJ5EkljS6WAbGUJMl8hQEi4TfTN/Jo1QE8qjJJEmli8SY0NBt27YNS5cuxdq1a3HqqacCAH77299i7ty52LFjB44++uiCP7t3717ceOONePnll3HRRReV/F2RSASRSMT8f39/PwAgFoshFot5/EsK2Lh/EAAwrjIIP5KIxZIlfoIujL8rFovBp2lIEIJoNIZ9vcMAgOaaELO/3Q4SCT1Vl0gSxGIxtB3Q7ZpQHRRil/E7SVJ/aMYTSd2uPt2uJkF2GSBE959Eyi7jQdpYHRA6XgZVFY/HEYlEzVRGY6Vf6HgZj/dYLG7OxbqKAAKa2LlobEwi0Rj2GnOxNox4PM7VJisSlt8djUaxz5iLgtYI43cmE/pcTKbWiPYDcs7Fjr4R3a4q0XNRtysWT+jjlQrSGgXZxQpO/pYxEQytWbMG9fX1ZiAEAKeddhrq6+uxevXqgsFQMpnEggUL8G//9m847rjjbP2ue+65B4sWLcp5/ZVXXkFVVZW7P6AEth3QAPhRqUXR2trK5HfYwbJlywDiB6Dh78tXYFO3bld8oEeoXR0jABBAJKKPz6YPfQB86N23E62tnwiz692tWwH40dXdjdbWVrz3sW5Xx64daG3dLsyube36fdu7bx9aWz/Dni79nn70znqMfizMLOzbuxeAD9t27MD/HNiOWEJffja++Sq2COSo+/v08Xlr/QZs9gGAH1VaTPxcTOp2LV+xAjsH9HsajA8JtSuSAIzHRuvSl7H6M93nR/a3C7Vr44b1AAIYGNTHZ2tqLnbu/gCtrTuE2fW+MRfb2tDauhe7O1NzcesGRHcKMwt7U3Nxx44d+NvQdrQf0O16f+NqtL8rzi7aGB4etv3eMREMtbe3o7m5Oef15uZmtLe3F/y5X/3qVwgEArjpppts/67bb78dCxcuNP/f39+PadOmYf78+airq3NmuE0MbdwLbHsPM6Y0oaVlDpPfUQyxWAzLli3Deeedh8D615GIJ3HW2Wfjk1W7gT17cOIxn0PL+Udxt8vArp4h/GLLKviDQbS0nI//9+h6oLsXX/rCCWg5fjJ3e4zxOuH42Xjiw/cwbnwDWlpOwf/9eBWAIZz7xS/gjBlN3O0ycOCtT/G/O7ehedIknHf+bNyy9u8AgK9d+GU01fBPZxjjdei0Q7Cmcx9mHHkUZh49AdiwFo3VIXzl4vncbbLi8c/WYfdgH046aQ76R2PAtvdwhARzMbjxDcSiCXzpzLMw+n4n8OEHOGb6ZLS0zOZul4HRWAK3vrUcAHDe/PlY9ZdtwL42nDLraLR86XDu9hjjdeoppwDvbkJFVRVaWs7Ag5+sBjCIc0//Ar50pLi52L/+M/zPzvcxceIkzD9/Nr6XmouXXnCOkBSeMV7Tp03D6o69OGLGkZh72qGIr30NAPDNr1yAUGDMVM+UhJHZsQOhwdDdd9+dl4WxYv369QCQVzVECCmoJtq4cSPuv/9+bNq0yZHiKBwOIxzOfWAEg0EEg2yKiHuGdCpv8rhKZr/DDoLBoFlv4vMF0J3q5TNlfJVQu0Kp351MEgSDQXQN6HZNbagWO14BY/poCAaD6EzZdUhDjRR2EWjoiyRNtc/E+mqhRa4Bf6rJnObD/pH0mWQixwoA/KmCE83nQ8+wbpdMc9HvD6A7tUaInotJLf2g9PkD6Bo05qIcawQh+th1DsqyRug+TwD0RwmSRK/XmTy+JkO9yxtG6w/N58P+ET3F2FgdQnXlwdVnyMm9FxoM3XjjjfjWt75V9D2HHXYY3nnnHXR0dOR8r6urCxMnTsz7cytXrkRnZycOPfRQ87VEIoHvf//7WLx4MXbt2uXJdpqQQVZvwFpYJ4vcMldaL8d4mU0Xs9U+wiXs+tV6DtKEmrDQQAjILDiXxbcA6zEhcjWfs/ao6TBVW4Kl4lnSetnaNiQJQTSexP7URk60Xfl8q6kmJDQQAqxiBotvSTAXRUJoMNTU1ISmptIU5ty5c9HX14e33noLp5xyCgBg3bp16Ovrw7x58/L+zIIFC3DuuedmvHb++edjwYIFuOaaa7wbTxFdxgGMghc6IPPQSlns8lmCjqFIHENRfScj2i5jrJIkfWxJOOBDXaXY7LP1+BLjaIkJEix01kMrjfESfQ+BzKAj7fMSjJcleOyUJHi0SusTSct4CQ4erc0gjSNxgn4N4wQ2ZQUylZ1pn5fAtywbzE4JDr2WAWOiZujYY4/FBRdcgOuuuw4PP/wwAOCf//mfcfHFF2cUTx9zzDG45557cOmll6KxsRGNjY0ZnxMMBjFp0qSi6jMRMBpxiWwbb8DcYSWJqfYRbZe1oZqx+FaF/KgWfKBgvrES3QgSyDwc0ngwTBBQK5SNTLvk8C0gsxmkVHPR0gvG9C9JNiYAMGw56XxCjWgJu8W3BtI+L3wuWpobmnNRAt+y9vySxbdEY8xUSj399NOYNWsW5s+fj/nz52P27Nl46qmnMt6zY8cO9PX1CbLQPczJK9GOoX80jtGUrFj05PXl2fXJMHGtjd46pWIU9GuSyLNzB6xMmjysI5B5H2WyS8vnXxIxfIZNIQnY0Hz3UA42VE7fsh5BI9PaJRJjghkCgIaGBvzhD38o+h6j+V0hyFQnZIBIOkmMPHJNOICqkOCFzpdL6YoO0IBsqlme3VW+IE0KZsiXuwDLdB8TSevGRLxdBqswMBrHsCSpYUC/j8lEutZEBgbG57P6fNou0UgHHZCGaQcyj6BJ1wyJt0skxgwzdLCifzRudp+WYZIYu76OfnkmrrVOQaYFxXqAbKeEAW1Ssoe7L18qQwq79OvAaMzsPi2HXbph7ZZjcUSnhoE0YyXXGqFfk5Y6JjnsysNYSWCXlpEmk2ftEgkVDAmGMUFqKwKoCPoFW5O7AMswca3CC+tZSKKRN00mETUvUxE8UChNJs94tUvEhgIWu1Jdi0UXTxsw5mO7lGyoXIIUX16fl8Euy9rVL8/aJRIqGBIMmXYLQJo+Nc6qkcEua9GmXHbpV5mkz0CmtF4m/zKKXA8MR002VEQTyGz4JPR5IO1fbZLZZdxHmcbLWicnVQrWmuKXyC7jHsaTcgVpIqGCIcGQKb8NpHfv7f3y2GVNk8loF7HuRiXYXWl5FjoZFuBsBqY2HEBlSAY2VL/K5FuAlRmSqw+MnOxxWsIuo89n2CVYeQek1/kDw1FEE/KUaYiECoYEQ6aHKJBmhmQqVLY2epPRroRkNUOGXX3DMXOhk4mBMe+hJAWbOT4vi11ZQYcMvgXkuY8S2JVvYyKDXQZL2z8Sk6o21PB5g3UcVxVEOCB+YyISKhgSDJn6wAC5uz4ZFmCrUEWm3ah5qng8YXa8lWG8jJSBMVb1lUFJ6tH0q2wMjJbNdEhjl341mSEJfAvIVzMkfiOX0ThTovYb2b4lDxuaxTpKMFaiIb5KsMzRJdHuCkgvKoaUVwa7/FmN3gA5Jq9hV3fqHKSgX8P4qpBIkwCkFzqZ7iEgr11+We0yGJgBObpPGzDskmm8zKNxkgSJ1Lk9MrChsvqWsWGSzbdEQjFDgiHTLgbIrM8B5Ji82TZpGtBQLU/QYSy+Mpz/BeS5hxI8FID0AmxABkYByFQrAvLNRcO/ZLEru6eQDGtEtk11kqlzDcgwVkCetUsSu0RCBUOCIVN+G0DOAYIyPLCyH1aN1WHz1GWRyFnoJNld5dxDyWpgDMji89kBrDR2aWPjPjbViN+YZNskSw1m9gZAWt+SYJ0XDfFPlDKHTHJLIHOH5ZOEgdE0LaNuSJaxkpVRyG4GLAszJCOjAEi8e895kMrxwLL6vSyFt9lzURafl9a3JF27REIFQwIRSySlKrwF0uoHAGisCeewDKJg3flJs6DksGhy2CUrA5PLOkpil6S7ZKtd4YAPdRVylHha/V6aoENSdk9Gph2Qlz0WCRUMCURPqvDW75Oj8BbI3MnI8rAC5LRLVqo5J0iTZKHL2b3Lch8tq6AsbCiQyaQ114k//8uAL8suGSAr0yGrz2f7kixrl0ioYEggjIaLTTUhKQpvgcxJIsvEBTAm0mQTJX0wyNDkDZA3TWa1SyY21GrGRIkeVla7pGGGxoBvAfLYJevaJRIqGBIImc5nMuCXcKEDMmldWeySl4GRcwG2pn38Pg0NkrChfglZRyDT52XxLSArTSbJeI0Fnwfk8S9ZU8MioYIhgZBNSQZkLirKruLIpeblWFBkfTBYx0smNtQnIesIZKXJJPEtYGzMRXnsknMuWn1LlkaQoqGCIYFIn1UjxwQBMnd9suxigMzFTha7pN31WQYr6NcwrjIo0Jo0ZGQUADkLggE5RQNANpMmR5Ama6GytR5NKjbUYpcsx8+IhgqGBMI8z0oiZ8zcJcuxoAByPkiz2xA0SvIg1TIYGDkaQQLZAa1EviVhQTCQ+SCVJdAG5Kzfk7c2J22XXGyonJtekVDBkEDImCbzSxh0AHLukq1jJVPh7Zi4h5IEjoCc9WhAdpAmT/AorX+l7Ar45GFDZR0rn4TsnmioYEggZDukFZB3x2Ds/CqDftSEJem3YlX7yMQoSH4PAbkeDJqsbKjFMOVfpWHMxwm1crKhcq3z6a9l8i2RUMGQQBjSeqmoeUkfWEaOe0KtpP1WJH2IynUP5UxH+SVNk2WMl0z+lbIr6NdQLwkDA6T9Xiafl3WNkNW3REIFQ4JACLEUUMvjjMYcqQr5US0JAwOMhYVOJrvSX8u6G5XLLlnTZPo16NcwvkqmoEO/TqiRZ2MCWNYIqe6hnBuT7IaeCioYEobBSByjsSQAoKlWDoUBkJ68Mj3cATntklHhBmQtwBLVmsiaJpNdWi9t0CGRbwHp+yjTw13WYEhWnxcJFQwJgqEkqwkHUBWSiIHxycfAAGlljUx2ZSjcJHowyFoQLKMkG0jfx2rJ2FC/pEGHX0IGBrCsXRLZJasiUNa5KBIqGBKEdPdpeSYIkFmEKBPkpMDTX0+UaLxklD4DWU0XJWRDZRorIP0glcm3gLR/STdeEt5HWZkhzTIZVQG1DhUMCYIRDDVJNEGANKsgU9ABWHajEo1XZuGtPLurzOJIecbL2LnLxob6ZWVDjdSwZA8rWcdLRrtkldYbxy7JpM4VDRUMCUKnhD2GgHSdgkwPdyC9G5XpwaBJW0At5wJs1nRIZBNg8S3J0gXpOjlZ7ZLrPqZZbXnGS16WNh1oy1SPJhIqJBSEaDyJiqBPugVl/ucnYutnfTj76GbRpmTgotlTsPTdNsw5tEG0KSZCAR8unDkJw9EEJkkUPE6oCWPu5xoxoTaMiqA8Zw7NmlqPGc01+IcTpog2JQNfnNGEPzZ8iotmTxZtSgbO+/xEbPn0AM45Rq65eP7MSfisdxhfOnKCaFMycPHsKdi4uxfHTakTbYqJpuow5h3RiMaasFRs6MypdTiyuQYXz5ZrLoqERgghoo2QGf39/aivr0dfXx/q6uhOMkII4kmCoF8sQReLxdDa2oqWlhYEg/JIeGWFGi9nUONlH2qsnEGNlzOU23g5eX7LE6qWITRNQ9CvKEoFBQUFBQWRUDVDCgoKCgoKCmUNFQwpKCgoKCgolDVUMKSgoKCgoKBQ1lDBkIKCgoKCgkJZQwVDCgoKCgoKCmUNFQwpKCgoKCgolDVUMKSgoKCgoKBQ1lDBkIKCgoKCgkJZQwVDCgoKCgoKCmUNFQwpKCgoKCgolDVUMKSgoKCgoKBQ1lDBkIKCgoKCgkJZQwVDCgoKCgoKCmUNdWp9CRBCAAD9/f2CLWGHWCyG4eFh9Pf3IxgMijZHeqjxcgY1XvahxsoZ1Hg5Q7mNl/HcNp7jxaCCoRIYGBgAAEybNk2wJQoKCgoKCgpOMTAwgPr6+qLv0YidkKmMkUwmsW/fPtTW1kLTNNHmMEF/fz+mTZuGTz/9FHV1daLNkR5qvJxBjZd9qLFyBjVezlBu40UIwcDAAKZMmQKfr3hVkGKGSsDn8+GQQw4RbQYX1NXVlcUEoQU1Xs6gxss+1Fg5gxovZyin8SrFCBlQBdQKCgoKCgoKZQ0VDCkoKCgoKCiUNVQwpIBwOIy77roL4XBYtCljAmq8nEGNl32osXIGNV7OoMarMFQBtYKCgoKCgkJZQzFDCgoKCgoKCmUNFQwpKCgoKCgolDVUMKSgoKCgoKBQ1lDBkIKCgoKCgkJZQwVDBwneeOMNfOUrX8GUKVOgaRpefPHFjO93dHTg6quvxpQpU1BVVYULLrgAH374YcZ7zjrrLGialvHvW9/6VsZ7ent7sWDBAtTX16O+vh4LFizAgQMHGP919MFjvHbt2oVrr70Whx9+OCorK3HEEUfgrrvuQjQa5fEnUgMv3zIQiURwwgknQNM0bNmyhdFfxQ48x+tvf/sbTj31VFRWVqKpqQlf+9rXWP5pTMBrvD744ANccsklaGpqQl1dHU4//XS8+uqrrP88qqAxVgCwZs0anHPOOaiursa4ceNw1llnYWRkxPz+wbLOO4EKhg4SDA0N4fjjj8f//b//N+d7hBD8wz/8Az755BP86U9/wubNmzF9+nSce+65GBoaynjvddddh7a2NvPfww8/nPH9yy+/HFu2bMHSpUuxdOlSbNmyBQsWLGD6t7EAj/Havn07kskkHn74Ybz33nu477778NBDD+FHP/oR87+PJnj5loFbb70VU6ZMYfK38ACv8Xr++eexYMECXHPNNXj77bexatUqXH755Uz/NhbgNV4XXXQR4vE4VqxYgY0bN+KEE07AxRdfjPb2dqZ/H03QGKs1a9bgggsuwPz58/HWW29h/fr1uPHGGzOOqzhY1nlHIAoHHQCQF154wfz/jh07CADy7rvvmq/F43HS0NBAfvvb35qvnXnmmeTmm28u+Lnvv/8+AUDWrl1rvrZmzRoCgGzfvp3q38ATrMYrH37961+Tww8/3KvJwsB6rFpbW8kxxxxD3nvvPQKAbN68maL1/MFqvGKxGJk6dSr53e9+x8JsYWA1Xl1dXQQAeeONN8zX+vv7CQDy97//nerfwAtux+rUU08ld9xxR8HPPVjX+VJQzFAZIBKJAAAqKirM1/x+P0KhEN58882M9z799NNoamrCcccdhx/84AcYGBgwv7dmzRrU19fj1FNPNV877bTTUF9fj9WrVzP+K/iB1njlQ19fHxoaGugbLQg0x6qjowPXXXcdnnrqKVRVVbE3XgBojdemTZuwd+9e+Hw+nHjiiZg8eTIuvPBCvPfee3z+EE6gNV6NjY049thj8eSTT2JoaAjxeBwPP/wwJk6ciDlz5vD5YxjDzlh1dnZi3bp1aG5uxrx58zBx4kSceeaZGWNZLut8NlQwVAY45phjMH36dNx+++3o7e1FNBrFL3/5S7S3t6Otrc183xVXXIFnnnkGr732Gu688048//zzGTUI7e3taG5uzvn85ubmMUU1lwKt8crGxx9/jAceeADXX389jz+DC2iNFSEEV199Na6//nqcfPLJIv4ULqA1Xp988gkA4O6778Ydd9yBv/71rxg/fjzOPPNM7N+/n/vfxQq0xkvTNCxbtgybN29GbW0tKioqcN9992Hp0qUYN26cgL+MPuyMldVvrrvuOixduhQnnXQSvvzlL5u1ReWyzudANDWlQB/Iok8JIWTDhg3k+OOPJwCI3+8n559/PrnwwgvJhRdeWPBzNmzYQACQjRs3EkII+fnPf06OOuqonPfNmDGD3HPPPVT/Bp5gNV5W7N27l8yYMYNce+21tM3nClZjdf/995N58+aReDxOCCFk586dB2WajBA64/X0008TAOThhx823zM6OkqamprIQw89xORv4QFW45VMJslXv/pVcuGFF5I333yTbNy4kfzrv/4rmTp1Ktm3bx/LP4kZ3IzVqlWrCABy++23Z/zcrFmzyG233UYIOXjX+VJQzFCZYM6cOdiyZQsOHDiAtrY2LF26FD09PTj88MML/sxJJ52EYDBo7hgmTZqEjo6OnPd1dXVh4sSJzGwXARrjZWDfvn04++yzMXfuXDzyyCOsTecOGmO1YsUKrF27FuFwGIFAADNmzAAAnHzyybjqqqu4/B28QGO8Jk+eDAD4/Oc/b74nHA7jc5/7HPbs2cP2D+AMWv7117/+FX/84x9x+umn46STTsJvfvMbVFZW4oknnuD1pzBHqbHK5zcAcOyxx5p+U07rvBUqGCoz1NfXY8KECfjwww+xYcMGXHLJJQXf+9577yEWi5kTaO7cuejr68Nbb71lvmfdunXo6+vDvHnzmNsuAl7GCwD27t2Ls846CyeddBIee+yxDMXGwQYvY/Vf//VfePvtt7FlyxZs2bIFra2tAIBnn30WP//5z7nYzxtexmvOnDkIh8PYsWOH+Z5YLIZdu3Zh+vTpzG0XAS/jNTw8DAA588/n8yGZTLIzWhAKjdVhhx2GKVOmZPgNoLcdMPymHNd5ACpNdrBgYGCAbN68mWzevJkAIPfeey/ZvHkz2b17NyGEkOeee468+uqr5OOPPyYvvvgimT59Ovna175m/vxHH31EFi1aRNavX0927txJ/va3v5FjjjmGnHjiiWbqghBCLrjgAjJ79myyZs0asmbNGjJr1ixy8cUXc/97vYLHeBmpsXPOOYd89tlnpK2tzfw3lsDLt6wYy2kyXuN18803k6lTp5KXX36ZbN++nVx77bWkubmZ7N+/n/vf7AU8xqurq4s0NjaSr33ta2TLli1kx44d5Ac/+AEJBoNky5YtQv5uN/A6VoQQct9995G6ujryP//zP+TDDz8kd9xxB6moqCAfffSR+Z6DZZ13AhUMHSR49dVXCYCcf1dddRUhRK/JOOSQQ0gwGCSHHnooueOOO0gkEjF/fs+ePeRLX/oSaWhoIKFQiBxxxBHkpptuIj09PRm/p6enh1xxxRWktraW1NbWkiuuuIL09vZy/EvpgMd4PfbYY3l/x1jbg/DyLSvGcjDEa7yi0Sj5/ve/T5qbm0ltbS0599xzM2TVYwW8xmv9+vVk/vz5pKGhgdTW1pLTTjuNtLa28vxTPcPrWBm45557yCGHHEKqqqrI3LlzycqVKzO+f7Cs806gEUIIG85JQUFBQUFBQUF+HLwFDAoKCgoKCgoKNqCCIQUFBQUFBYWyhgqGFBQUFBQUFMoaKhhSUFBQUFBQKGuoYEhBQUFBQUGhrKGCIQUFBQUFBYWyhgqGFBQUFBQUFMoaKhhSUFBQUFBQKGuoYEhBQUFBQUGhrKGCIQUFBW64+uqroWkaNE1DMBjExIkTcd555+HRRx91dGDm448/jnHjxlG17bXXXoOmaThw4ADVz1VQUJAfKhhSUFDgigsuuABtbW3YtWsXXnrpJZx99tm4+eabcfHFFyMej4s2T0FBoQyhgiEFBQWuCIfDmDRpEqZOnYqTTjoJP/rRj/CnP/0JL730Eh5//HEAwL333otZs2ahuroa06ZNww033IDBwUEAOoNzzTXXoK+vz2SZ7r77bgBANBrFrbfeiqlTp6K6uhqnnnoqXnvtNfN37969G1/5ylcwfvx4VFdX47jjjkNrayt27dqFs88+GwAwfvx4aJqGq6++GgCwdOlSfPGLX8S4cePQ2NiIiy++GB9//LH5mbt27YKmaXjuuedwxhlnoLKyEl/4whfwwQcfYP369Tj55JNRU1ODCy64AF1dXebPXX311fiHf/gHLFq0CM3Nzairq8O//Mu/IBqNsht8BQWFvFDBkIKCgnCcc845OP7447FkyRIAgM/nw3/913/h3XffxRNPPIEVK1bg1ltvBQDMmzcPixcvRl1dHdra2tDW1oYf/OAHAIBrrrkGq1atwh//+Ee88847+MY3voELLrgAH374IQDgu9/9LiKRCN544w1s3boVv/rVr1BTU4Np06bh+eefBwDs2LEDbW1tuP/++wEAQ0NDWLhwIdavX4/ly5fD5/Ph0ksvzUnr3XXXXbjjjjuwadMmBAIB/J//839w66234v7778fKlSvx8ccf4yc/+UnGzyxfvhzbtm3Dq6++imeeeQYvvPACFi1axG6gFRQU8sP7wfcKCgoK9nDVVVeRSy65JO/3LrvsMnLsscfm/d5zzz1HGhsbzf8/9thjpL6+PuM9H330EdE0jezduzfj9S9/+cvk9ttvJ4QQMmvWLHL33Xfn/R2vvvoqAUB6e3uL/g2dnZ0EANm6dSshhJCdO3cSAOR3v/ud+Z5nnnmGACDLly83X7vnnnvI0Ucfbf7/qquuIg0NDWRoaMh87cEHHyQ1NTUkkUgUtUFBQYEuAmJDMQUFBQUdhBBomgYAePXVV/GLX/wC77//Pvr7+xGPxzE6OoqhoSFUV1fn/flNmzaBEIKjjjoq4/VIJILGxkYAwE033YR//dd/xSuvvIJzzz0XX//61zF79uyidn388ce48847sXbtWnR3d5uM0J49ezBz5kzzfdbPmThxIgBg1qxZGa91dnZmfPbxxx+Pqqoq8/9z587F4OAgPv30U0yfPr2oXQoKCvSg0mQKCgpSYNu2bTj88MOxe/dutLS0YObMmXj++eexceNG/Pd//zcAIBaLFfz5ZDIJv9+PjRs3YsuWLea/bdu2mSmv73znO/jkk0+wYMECbN26FSeffDIeeOCBonZ95StfQU9PD377299i3bp1WLduHQDk1PYEg0HzayOoy37NrmLO+HkFBQU+UMGQgoKCcKxYsQJbt27F17/+dWzYsAHxeBz/+Z//idNOOw1HHXUU9u3bl/H+UCiERCKR8dqJJ56IRCKBzs5OzJgxI+PfpEmTzPdNmzYN119/PZYsWYLvf//7+O1vf2t+JoCMz+3p6cG2bdtwxx134Mtf/jKOPfZY9Pb2Uvu73377bYyMjJj/X7t2LWpqanDIIYdQ+x0KCgqloYIhBQUFrohEImhvb8fevXuxadMm/OIXv8All1yCiy++GFdeeSWOOOIIxONxPPDAA/jkk0/w1FNP4aGHHsr4jMMOOwyDg4NYvnw5uru7MTw8jKOOOgpXXHEFrrzySixZsgQ7d+7E+vXr8atf/Qqtra0AgFtuuQUvv/wydu7ciU2bNmHFihU49thjAQDTp0+Hpmn461//iq6uLgwODmL8+PFobGzEI488go8++ggrVqzAwoULqY1FNBrFtddei/fffx8vvfQS7rrrLtx4443w+dTSrKDAFaKLlhQUFMoHV111FQFAAJBAIEAmTJhAzj33XPLoo49mFA3fe++9ZPLkyaSyspKcf/755Mknn8wpbr7++utJY2MjAUDuuusuQggh0WiU/OQnPyGHHXYYCQaDZNKkSeTSSy8l77zzDiGEkBtvvJEcccQRJBwOkwkTJpAFCxaQ7u5u8zN/+tOfkkmTJhFN08hVV11FCCFk2bJl5NhjjyXhcJjMnj2bvPbaawQAeeGFFwgh6QLqzZs3m5+Trxg7u+jbKCb/yU9+QhobG0lNTQ35zne+Q0ZHR6mMtYKCgn1ohBAiMBZTUFBQKEtcffXVOHDgAF588UXRpigolD0UF6ugoKCgoKBQ1lDBkIKCgoKCgkJZQ6XJFBQUFBQUFMoaihlSUFBQUFBQKGuoYEhBQUFBQUGhrKGCIQUFBQUFBYWyhgqGFBQUFBQUFMoaKhhSUFBQUFBQKGuoYEhBQUFBQUGhrKGCIQUFBQUFBYWyhgqGFBQUFBQUFMoa/z85qz2D6c3tFAAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "plot_df = AirPassengerPanelCalendar[AirPassengerPanelCalendar.unique_id=='Airline1'].set_index('ds')\n", "plt.plot(plot_df['month'])\n", @@ -609,41 +1301,51 @@ "source": [ "#| export\n", "def add_conformal_distribution_intervals(\n", - " fcst_df: DFType, \n", + " model_fcsts: np.array, \n", " cs_df: DFType,\n", - " model_names: List[str],\n", - " level: List[Union[int, float]],\n", + " model: str,\n", " cs_n_windows: int,\n", " n_series: int,\n", " horizon: int,\n", - ") -> DFType:\n", + " level: Optional[List[Union[int, float]]] = None,\n", + " quantiles: Optional[List[float]] = None,\n", + ") -> Tuple[np.array, List[str]]:\n", " \"\"\"\n", " Adds conformal intervals to a `fcst_df` based on conformal scores `cs_df`.\n", " `level` should be already sorted. This strategy creates forecasts paths\n", " based on errors and calculate quantiles using those paths.\n", " \"\"\"\n", - " fcst_df = ufp.copy_if_pandas(fcst_df, deep=False)\n", - " alphas = [100 - lv for lv in level]\n", - " cuts = [alpha / 200 for alpha in reversed(alphas)]\n", - " cuts.extend(1 - alpha / 200 for alpha in alphas)\n", - " for model in model_names:\n", - " scores = cs_df[model].to_numpy().reshape(n_series, cs_n_windows, horizon)\n", - " scores = scores.transpose(1, 0, 2)\n", - " # restrict scores to horizon\n", - " scores = scores[:,:,:horizon]\n", - " mean = fcst_df[model].to_numpy().reshape(1, n_series, -1)\n", - " scores = np.vstack([mean - scores, mean + scores])\n", - " quantiles = np.quantile(\n", - " scores,\n", - " cuts,\n", - " axis=0,\n", - " )\n", - " quantiles = quantiles.reshape(len(cuts), -1).T\n", + " assert level is not None or quantiles is not None, \"Either level or quantiles must be provided\"\n", + " \n", + " if quantiles is None and level is not None:\n", + " alphas = [100 - lv for lv in level]\n", + " cuts = [alpha / 200 for alpha in reversed(alphas)]\n", + " cuts.extend(1 - alpha / 200 for alpha in alphas)\n", + " elif quantiles is not None:\n", + " cuts = quantiles\n", + " \n", + " scores = cs_df[model].to_numpy().reshape(n_series, cs_n_windows, horizon)\n", + " scores = scores.transpose(1, 0, 2)\n", + " # restrict scores to horizon\n", + " scores = scores[:,:,:horizon]\n", + " mean = model_fcsts.reshape(1, n_series, -1)\n", + " scores = np.vstack([mean - scores, mean + scores])\n", + " scores_quantiles = np.quantile(\n", + " scores,\n", + " cuts,\n", + " axis=0,\n", + " )\n", + " scores_quantiles = scores_quantiles.reshape(len(cuts), -1).T\n", + " if quantiles is None and level is not None:\n", " lo_cols = [f\"{model}-lo-{lv}\" for lv in reversed(level)]\n", " hi_cols = [f\"{model}-hi-{lv}\" for lv in level]\n", " out_cols = lo_cols + hi_cols\n", - " fcst_df = ufp.assign_columns(fcst_df, out_cols, quantiles)\n", - " return fcst_df" + " elif quantiles is not None:\n", + " out_cols = [f\"{model}-ql{q}\" for q in quantiles]\n", + "\n", + " fcsts_with_intervals = np.hstack([model_fcsts, scores_quantiles])\n", + "\n", + " return fcsts_with_intervals, out_cols" ] }, { @@ -654,39 +1356,59 @@ "source": [ "#| export\n", "def add_conformal_error_intervals(\n", - " fcst_df: DFType, \n", + " model_fcsts: np.array, \n", " cs_df: DFType, \n", - " model_names: List[str],\n", - " level: List[Union[int, float]],\n", + " model: str,\n", " cs_n_windows: int,\n", " n_series: int,\n", " horizon: int,\n", - ") -> DFType:\n", + " level: Optional[List[Union[int, float]]] = None,\n", + " quantiles: Optional[List[float]] = None,\n", + ") -> Tuple[np.array, List[str]]:\n", " \"\"\"\n", " Adds conformal intervals to a `fcst_df` based on conformal scores `cs_df`.\n", " `level` should be already sorted. This startegy creates prediction intervals\n", " based on the absolute errors.\n", " \"\"\"\n", - " fcst_df = ufp.copy_if_pandas(fcst_df, deep=False)\n", - " cuts = [lv / 100 for lv in level]\n", - " for model in model_names:\n", - " mean = fcst_df[model].to_numpy().ravel()\n", - " scores = cs_df[model].to_numpy().reshape(n_series, cs_n_windows, horizon)\n", - " scores = scores.transpose(1, 0, 2)\n", - " # restrict scores to horizon\n", - " scores = scores[:,:,:horizon]\n", - " quantiles = np.quantile(\n", - " scores,\n", - " cuts,\n", - " axis=0,\n", - " )\n", - " quantiles = quantiles.reshape(len(cuts), -1)\n", + " assert level is not None or quantiles is not None, \"Either level or quantiles must be provided\"\n", + "\n", + " if quantiles is None and level is not None:\n", + " cuts = [lv / 100 for lv in level]\n", + " elif quantiles is not None:\n", + " cuts = quantiles\n", + "\n", + " mean = model_fcsts.ravel()\n", + " scores = cs_df[model].to_numpy().reshape(n_series, cs_n_windows, horizon)\n", + " scores = scores.transpose(1, 0, 2)\n", + " # restrict scores to horizon\n", + " scores = scores[:,:,:horizon]\n", + " scores_quantiles = np.quantile(\n", + " scores,\n", + " cuts,\n", + " axis=0,\n", + " )\n", + " scores_quantiles = scores_quantiles.reshape(len(cuts), -1)\n", + " if quantiles is None and level is not None:\n", " lo_cols = [f\"{model}-lo-{lv}\" for lv in reversed(level)]\n", " hi_cols = [f\"{model}-hi-{lv}\" for lv in level]\n", - " quantiles = np.vstack([mean - quantiles[::-1], mean + quantiles]).T\n", - " columns = lo_cols + hi_cols\n", - " fcst_df = ufp.assign_columns(fcst_df, columns, quantiles)\n", - " return fcst_df" + " out_cols = lo_cols + hi_cols\n", + " scores_quantiles = np.vstack([mean - scores_quantiles[::-1], mean + scores_quantiles]).T\n", + " elif quantiles is not None:\n", + " out_cols = []\n", + " scores_quantiles_ls = []\n", + " for i, q in enumerate(quantiles):\n", + " out_cols.append(f\"{model}-ql{q}\")\n", + " if q < 0.5:\n", + " scores_quantiles_ls.append(mean - scores_quantiles[::-1][i])\n", + " elif q > 0.5:\n", + " scores_quantiles_ls.append(mean + scores_quantiles[i])\n", + " else:\n", + " scores_quantiles_ls.append(mean)\n", + " scores_quantiles = np.vstack(scores_quantiles_ls).T \n", + "\n", + " fcsts_with_intervals = np.hstack([model_fcsts, scores_quantiles])\n", + "\n", + " return fcsts_with_intervals, out_cols" ] }, { @@ -708,6 +1430,45 @@ " )\n", " return available_methods[method]" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "def level_to_quantiles(level: List[Union[int, float]]) -> List[float]:\n", + " \"\"\"\n", + " Converts a list of levels to a list of quantiles.\n", + " \"\"\"\n", + " level_set = set(level)\n", + " return sorted(list(set(sum([[(50 - l / 2) / 100, (50 + l / 2) / 100] for l in level_set], []))))\n", + "\n", + "def quantiles_to_level(quantiles: List[float]) -> List[Union[int, float]]:\n", + " \"\"\"\n", + " Converts a list of quantiles to a list of levels.\n", + " \"\"\"\n", + " quantiles_set = set(quantiles)\n", + " return sorted(set([int(round(100 - 200 * (q * (q < 0.5) + (1 - q) * (q >= 0.5)), 2)) for q in quantiles_set]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# Test level_to_quantiles\n", + "level_base = [80, 90]\n", + "quantiles_base = [0.05, 0.1, 0.9, 0.95]\n", + "quantiles = level_to_quantiles(level_base)\n", + "level = quantiles_to_level(quantiles_base)\n", + "\n", + "assert quantiles == quantiles_base\n", + "assert level == level_base" + ] } ], "metadata": { diff --git a/neuralforecast/_modidx.py b/neuralforecast/_modidx.py index 25f008ce4..4e9e8fe6c 100644 --- a/neuralforecast/_modidx.py +++ b/neuralforecast/_modidx.py @@ -164,6 +164,10 @@ 'neuralforecast/core.py'), 'neuralforecast.core.NeuralForecast._conformity_scores': ( 'core.html#neuralforecast._conformity_scores', 'neuralforecast/core.py'), + 'neuralforecast.core.NeuralForecast._generate_forecasts': ( 'core.html#neuralforecast._generate_forecasts', + 'neuralforecast/core.py'), + 'neuralforecast.core.NeuralForecast._get_column_name': ( 'core.html#neuralforecast._get_column_name', + 'neuralforecast/core.py'), 'neuralforecast.core.NeuralForecast._get_model_names': ( 'core.html#neuralforecast._get_model_names', 'neuralforecast/core.py'), 'neuralforecast.core.NeuralForecast._get_needed_exog': ( 'core.html#neuralforecast._get_needed_exog', @@ -284,10 +288,14 @@ 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.DistributionLoss.__init__': ( 'losses.pytorch.html#distributionloss.__init__', 'neuralforecast/losses/pytorch.py'), + 'neuralforecast.losses.pytorch.DistributionLoss._domain_map': ( 'losses.pytorch.html#distributionloss._domain_map', + 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.DistributionLoss.get_distribution': ( 'losses.pytorch.html#distributionloss.get_distribution', 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.DistributionLoss.sample': ( 'losses.pytorch.html#distributionloss.sample', 'neuralforecast/losses/pytorch.py'), + 'neuralforecast.losses.pytorch.DistributionLoss.update_quantile': ( 'losses.pytorch.html#distributionloss.update_quantile', + 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.GMM': ( 'losses.pytorch.html#gmm', 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.GMM.__call__': ( 'losses.pytorch.html#gmm.__call__', @@ -296,12 +304,14 @@ 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.GMM.domain_map': ( 'losses.pytorch.html#gmm.domain_map', 'neuralforecast/losses/pytorch.py'), - 'neuralforecast.losses.pytorch.GMM.neglog_likelihood': ( 'losses.pytorch.html#gmm.neglog_likelihood', - 'neuralforecast/losses/pytorch.py'), + 'neuralforecast.losses.pytorch.GMM.get_distribution': ( 'losses.pytorch.html#gmm.get_distribution', + 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.GMM.sample': ( 'losses.pytorch.html#gmm.sample', 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.GMM.scale_decouple': ( 'losses.pytorch.html#gmm.scale_decouple', 'neuralforecast/losses/pytorch.py'), + 'neuralforecast.losses.pytorch.GMM.update_quantile': ( 'losses.pytorch.html#gmm.update_quantile', + 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.HuberLoss': ( 'losses.pytorch.html#huberloss', 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.HuberLoss.__call__': ( 'losses.pytorch.html#huberloss.__call__', @@ -342,6 +352,8 @@ 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.ISQF.crps': ( 'losses.pytorch.html#isqf.crps', 'neuralforecast/losses/pytorch.py'), + 'neuralforecast.losses.pytorch.ISQF.mean': ( 'losses.pytorch.html#isqf.mean', + 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.MAE': ( 'losses.pytorch.html#mae', 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.MAE.__call__': ( 'losses.pytorch.html#mae.__call__', @@ -384,12 +396,14 @@ 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.NBMM.domain_map': ( 'losses.pytorch.html#nbmm.domain_map', 'neuralforecast/losses/pytorch.py'), - 'neuralforecast.losses.pytorch.NBMM.neglog_likelihood': ( 'losses.pytorch.html#nbmm.neglog_likelihood', - 'neuralforecast/losses/pytorch.py'), + 'neuralforecast.losses.pytorch.NBMM.get_distribution': ( 'losses.pytorch.html#nbmm.get_distribution', + 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.NBMM.sample': ( 'losses.pytorch.html#nbmm.sample', 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.NBMM.scale_decouple': ( 'losses.pytorch.html#nbmm.scale_decouple', 'neuralforecast/losses/pytorch.py'), + 'neuralforecast.losses.pytorch.NBMM.update_quantile': ( 'losses.pytorch.html#nbmm.update_quantile', + 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.PMM': ( 'losses.pytorch.html#pmm', 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.PMM.__call__': ( 'losses.pytorch.html#pmm.__call__', @@ -398,12 +412,14 @@ 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.PMM.domain_map': ( 'losses.pytorch.html#pmm.domain_map', 'neuralforecast/losses/pytorch.py'), - 'neuralforecast.losses.pytorch.PMM.neglog_likelihood': ( 'losses.pytorch.html#pmm.neglog_likelihood', - 'neuralforecast/losses/pytorch.py'), + 'neuralforecast.losses.pytorch.PMM.get_distribution': ( 'losses.pytorch.html#pmm.get_distribution', + 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.PMM.sample': ( 'losses.pytorch.html#pmm.sample', 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.PMM.scale_decouple': ( 'losses.pytorch.html#pmm.scale_decouple', 'neuralforecast/losses/pytorch.py'), + 'neuralforecast.losses.pytorch.PMM.update_quantile': ( 'losses.pytorch.html#pmm.update_quantile', + 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.QuantileLayer': ( 'losses.pytorch.html#quantilelayer', 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.QuantileLayer.__init__': ( 'losses.pytorch.html#quantilelayer.__init__', @@ -454,8 +470,6 @@ 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch._weighted_mean': ( 'losses.pytorch.html#_weighted_mean', 'neuralforecast/losses/pytorch.py'), - 'neuralforecast.losses.pytorch.bernoulli_domain_map': ( 'losses.pytorch.html#bernoulli_domain_map', - 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.bernoulli_scale_decouple': ( 'losses.pytorch.html#bernoulli_scale_decouple', 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.est_alpha': ( 'losses.pytorch.html#est_alpha', @@ -470,16 +484,10 @@ 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.level_to_outputs': ( 'losses.pytorch.html#level_to_outputs', 'neuralforecast/losses/pytorch.py'), - 'neuralforecast.losses.pytorch.nbinomial_domain_map': ( 'losses.pytorch.html#nbinomial_domain_map', - 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.nbinomial_scale_decouple': ( 'losses.pytorch.html#nbinomial_scale_decouple', 'neuralforecast/losses/pytorch.py'), - 'neuralforecast.losses.pytorch.normal_domain_map': ( 'losses.pytorch.html#normal_domain_map', - 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.normal_scale_decouple': ( 'losses.pytorch.html#normal_scale_decouple', 'neuralforecast/losses/pytorch.py'), - 'neuralforecast.losses.pytorch.poisson_domain_map': ( 'losses.pytorch.html#poisson_domain_map', - 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.poisson_scale_decouple': ( 'losses.pytorch.html#poisson_scale_decouple', 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.quantiles_to_outputs': ( 'losses.pytorch.html#quantiles_to_outputs', @@ -496,8 +504,6 @@ 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.sCRPS.__init__': ( 'losses.pytorch.html#scrps.__init__', 'neuralforecast/losses/pytorch.py'), - 'neuralforecast.losses.pytorch.student_domain_map': ( 'losses.pytorch.html#student_domain_map', - 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.student_scale_decouple': ( 'losses.pytorch.html#student_scale_decouple', 'neuralforecast/losses/pytorch.py'), 'neuralforecast.losses.pytorch.tweedie_domain_map': ( 'losses.pytorch.html#tweedie_domain_map', @@ -589,15 +595,7 @@ 'neuralforecast.models.deepar.DeepAR.__init__': ( 'models.deepar.html#deepar.__init__', 'neuralforecast/models/deepar.py'), 'neuralforecast.models.deepar.DeepAR.forward': ( 'models.deepar.html#deepar.forward', - 'neuralforecast/models/deepar.py'), - 'neuralforecast.models.deepar.DeepAR.predict_step': ( 'models.deepar.html#deepar.predict_step', - 'neuralforecast/models/deepar.py'), - 'neuralforecast.models.deepar.DeepAR.train_forward': ( 'models.deepar.html#deepar.train_forward', - 'neuralforecast/models/deepar.py'), - 'neuralforecast.models.deepar.DeepAR.training_step': ( 'models.deepar.html#deepar.training_step', - 'neuralforecast/models/deepar.py'), - 'neuralforecast.models.deepar.DeepAR.validation_step': ( 'models.deepar.html#deepar.validation_step', - 'neuralforecast/models/deepar.py')}, + 'neuralforecast/models/deepar.py')}, 'neuralforecast.models.deepnpts': { 'neuralforecast.models.deepnpts.DeepNPTS': ( 'models.deepnpts.html#deepnpts', 'neuralforecast/models/deepnpts.py'), 'neuralforecast.models.deepnpts.DeepNPTS.__init__': ( 'models.deepnpts.html#deepnpts.__init__', @@ -1304,14 +1302,6 @@ 'neuralforecast/models/tsmixer.py'), 'neuralforecast.models.tsmixer.MixingLayer.forward': ( 'models.tsmixer.html#mixinglayer.forward', 'neuralforecast/models/tsmixer.py'), - 'neuralforecast.models.tsmixer.ReversibleInstanceNorm1d': ( 'models.tsmixer.html#reversibleinstancenorm1d', - 'neuralforecast/models/tsmixer.py'), - 'neuralforecast.models.tsmixer.ReversibleInstanceNorm1d.__init__': ( 'models.tsmixer.html#reversibleinstancenorm1d.__init__', - 'neuralforecast/models/tsmixer.py'), - 'neuralforecast.models.tsmixer.ReversibleInstanceNorm1d.forward': ( 'models.tsmixer.html#reversibleinstancenorm1d.forward', - 'neuralforecast/models/tsmixer.py'), - 'neuralforecast.models.tsmixer.ReversibleInstanceNorm1d.reverse': ( 'models.tsmixer.html#reversibleinstancenorm1d.reverse', - 'neuralforecast/models/tsmixer.py'), 'neuralforecast.models.tsmixer.TSMixer': ( 'models.tsmixer.html#tsmixer', 'neuralforecast/models/tsmixer.py'), 'neuralforecast.models.tsmixer.TSMixer.__init__': ( 'models.tsmixer.html#tsmixer.__init__', @@ -1494,5 +1484,9 @@ 'neuralforecast/utils.py'), 'neuralforecast.utils.get_prediction_interval_method': ( 'utils.html#get_prediction_interval_method', 'neuralforecast/utils.py'), + 'neuralforecast.utils.level_to_quantiles': ( 'utils.html#level_to_quantiles', + 'neuralforecast/utils.py'), + 'neuralforecast.utils.quantiles_to_level': ( 'utils.html#quantiles_to_level', + 'neuralforecast/utils.py'), 'neuralforecast.utils.time_features_from_frequency_str': ( 'utils.html#time_features_from_frequency_str', 'neuralforecast/utils.py')}}} diff --git a/neuralforecast/auto.py b/neuralforecast/auto.py index b3c85892a..cb69edc49 100644 --- a/neuralforecast/auto.py +++ b/neuralforecast/auto.py @@ -63,10 +63,10 @@ class AutoRNN(BaseAuto): "input_size_multiplier": [-1, 4, 16, 64], "inference_input_size_multiplier": [-1], "h": None, - "encoder_hidden_size": tune.choice([50, 100, 200, 300]), + "encoder_hidden_size": tune.choice([16, 32, 64, 128]), "encoder_n_layers": tune.randint(1, 4), "context_size": tune.choice([5, 10, 50]), - "decoder_hidden_size": tune.choice([64, 128, 256, 512]), + "decoder_hidden_size": tune.choice([16, 32, 64, 128]), "learning_rate": tune.loguniform(1e-4, 1e-1), "max_steps": tune.choice([500, 1000]), "batch_size": tune.choice([16, 32]), @@ -138,10 +138,10 @@ class AutoLSTM(BaseAuto): "input_size_multiplier": [-1, 4, 16, 64], "inference_input_size_multiplier": [-1], "h": None, - "encoder_hidden_size": tune.choice([50, 100, 200, 300]), + "encoder_hidden_size": tune.choice([16, 32, 64, 128]), "encoder_n_layers": tune.randint(1, 4), "context_size": tune.choice([5, 10, 50]), - "decoder_hidden_size": tune.choice([64, 128, 256, 512]), + "decoder_hidden_size": tune.choice([16, 32, 64, 128]), "learning_rate": tune.loguniform(1e-4, 1e-1), "max_steps": tune.choice([500, 1000]), "batch_size": tune.choice([16, 32]), @@ -209,10 +209,10 @@ class AutoGRU(BaseAuto): "input_size_multiplier": [-1, 4, 16, 64], "inference_input_size_multiplier": [-1], "h": None, - "encoder_hidden_size": tune.choice([50, 100, 200, 300]), + "encoder_hidden_size": tune.choice([16, 32, 64, 128]), "encoder_n_layers": tune.randint(1, 4), "context_size": tune.choice([5, 10, 50]), - "decoder_hidden_size": tune.choice([64, 128, 256, 512]), + "decoder_hidden_size": tune.choice([16, 32, 64, 128]), "learning_rate": tune.loguniform(1e-4, 1e-1), "max_steps": tune.choice([500, 1000]), "batch_size": tune.choice([16, 32]), @@ -280,9 +280,9 @@ class AutoTCN(BaseAuto): "input_size_multiplier": [-1, 4, 16, 64], "inference_input_size_multiplier": [-1], "h": None, - "encoder_hidden_size": tune.choice([50, 100, 200, 300]), + "encoder_hidden_size": tune.choice([16, 32, 64, 128]), "context_size": tune.choice([5, 10, 50]), - "decoder_hidden_size": tune.choice([64, 128]), + "decoder_hidden_size": tune.choice([32, 64]), "learning_rate": tune.loguniform(1e-4, 1e-1), "max_steps": tune.choice([500, 1000]), "batch_size": tune.choice([16, 32]), @@ -422,10 +422,10 @@ class AutoDilatedRNN(BaseAuto): "inference_input_size_multiplier": [-1], "h": None, "cell_type": tune.choice(["LSTM", "GRU"]), - "encoder_hidden_size": tune.choice([50, 100, 200, 300]), + "encoder_hidden_size": tune.choice([16, 32, 64, 128]), "dilations": tune.choice([[[1, 2], [4, 8]], [[1, 2, 4, 8]]]), "context_size": tune.choice([5, 10, 50]), - "decoder_hidden_size": tune.choice([64, 128, 256, 512]), + "decoder_hidden_size": tune.choice([16, 32, 64, 128]), "learning_rate": tune.loguniform(1e-4, 1e-1), "max_steps": tune.choice([500, 1000]), "batch_size": tune.choice([16, 32]), diff --git a/neuralforecast/common/_base_auto.py b/neuralforecast/common/_base_auto.py index a44f86267..2a306cae9 100644 --- a/neuralforecast/common/_base_auto.py +++ b/neuralforecast/common/_base_auto.py @@ -178,7 +178,11 @@ def config_f(trial): self.callbacks = callbacks # Base Class attributes - self.SAMPLING_TYPE = cls_model.SAMPLING_TYPE + self.EXOGENOUS_FUTR = cls_model.EXOGENOUS_FUTR + self.EXOGENOUS_HIST = cls_model.EXOGENOUS_HIST + self.EXOGENOUS_STAT = cls_model.EXOGENOUS_STAT + self.MULTIVARIATE = cls_model.MULTIVARIATE + self.RECURRENT = cls_model.RECURRENT def __repr__(self): return type(self).__name__ if self.alias is None else self.alias diff --git a/neuralforecast/common/_base_model.py b/neuralforecast/common/_base_model.py index 606ee8f0e..8b7964425 100644 --- a/neuralforecast/common/_base_model.py +++ b/neuralforecast/common/_base_model.py @@ -10,19 +10,25 @@ from contextlib import contextmanager from copy import deepcopy from dataclasses import dataclass +from typing import List, Dict, Union import fsspec import numpy as np import torch import torch.nn as nn +import torch.nn.functional as F import pytorch_lightning as pl +import neuralforecast.losses.pytorch as losses + +from ..losses.pytorch import BasePointLoss, DistributionLoss from pytorch_lightning.callbacks.early_stopping import EarlyStopping from neuralforecast.tsdataset import ( TimeSeriesDataModule, BaseTimeSeriesDataset, _DistributedTimeSeriesDataModule, ) -from ..losses.pytorch import IQLoss +from ._scalers import TemporalNorm +from ..utils import get_indexer_raise_missing # %% ../../nbs/common.base_model.ipynb 3 @dataclass @@ -63,27 +69,96 @@ def noop(*args, **kwargs): # %% ../../nbs/common.base_model.ipynb 5 class BaseModel(pl.LightningModule): - EXOGENOUS_FUTR = True - EXOGENOUS_HIST = True - EXOGENOUS_STAT = True + EXOGENOUS_FUTR = True # If the model can handle future exogenous variables + EXOGENOUS_HIST = True # If the model can handle historical exogenous variables + EXOGENOUS_STAT = True # If the model can handle static exogenous variables + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, - random_seed, - loss, - valid_loss, - optimizer, - optimizer_kwargs, - lr_scheduler, - lr_scheduler_kwargs, - futr_exog_list, - hist_exog_list, - stat_exog_list, - max_steps, - early_stop_patience_steps, + h: int, + input_size: int, + loss: Union[BasePointLoss, DistributionLoss, nn.Module], + valid_loss: Union[BasePointLoss, DistributionLoss, nn.Module], + learning_rate: float, + max_steps: int, + val_check_steps: int, + batch_size: int, + valid_batch_size: Union[int, None], + windows_batch_size: int, + inference_windows_batch_size: Union[int, None], + start_padding_enabled: bool, + n_series: Union[int, None] = None, + n_samples: Union[int, None] = 100, + h_train: int = 1, + inference_input_size: Union[int, None] = None, + step_size: int = 1, + num_lr_decays: int = 0, + early_stop_patience_steps: int = -1, + scaler_type: str = "identity", + futr_exog_list: Union[List, None] = None, + hist_exog_list: Union[List, None] = None, + stat_exog_list: Union[List, None] = None, + exclude_insample_y: Union[bool, None] = False, + num_workers_loader: Union[int, None] = 0, + drop_last_loader: Union[bool, None] = False, + random_seed: Union[int, None] = 1, + alias: Union[str, None] = None, + optimizer: Union[torch.optim.Optimizer, None] = None, + optimizer_kwargs: Union[Dict, None] = None, + lr_scheduler: Union[torch.optim.lr_scheduler.LRScheduler, None] = None, + lr_scheduler_kwargs: Union[Dict, None] = None, + dataloader_kwargs=None, **trainer_kwargs, ): super().__init__() + + # Multivarariate checks + if self.MULTIVARIATE and n_series is None: + raise Exception( + f"{type(self).__name__} is a multivariate model. Please set n_series to the number of unique time series in your dataset." + ) + if not self.MULTIVARIATE: + if n_series is not None: + warnings.warn( + f"{type(self).__name__} is a univariate model. Parameter n_series is ignored." + ) + n_series = 1 + self.n_series = n_series + + # Protections for previous recurrent models + if input_size < 1: + input_size = 3 * h + warnings.warn( + f"Input size too small. Automatically setting input size to 3 * horizon = {input_size}" + ) + + if inference_input_size is None: + inference_input_size = input_size + elif inference_input_size is not None and inference_input_size < 1: + inference_input_size = input_size + warnings.warn( + f"Inference input size too small. Automatically setting inference input size to input_size = {input_size}" + ) + + # For recurrent models we need one additional input as we need to shift insample_y to use it as input + if self.RECURRENT: + input_size += 1 + inference_input_size += 1 + + # Attributes needed for recurrent models + self.horizon_backup = h + self.input_size_backup = input_size + self.n_samples = n_samples + if self.RECURRENT: + self.h_train = h_train + self.inference_input_size = inference_input_size + self.rnn_state = None + self.maintain_state = False + with warnings.catch_warnings(record=False): warnings.filterwarnings("ignore") # the following line issues a warning about the loss attribute being saved @@ -98,8 +173,8 @@ def __init__( self.valid_loss = loss else: self.valid_loss = valid_loss - self.train_trajectories = [] - self.valid_trajectories = [] + self.train_trajectories: List = [] + self.valid_trajectories: List = [] # Optimization if optimizer is not None and not issubclass(optimizer, torch.optim.Optimizer): @@ -145,14 +220,41 @@ def __init__( f"{type(self).__name__} does not support static exogenous variables." ) - # Implicit Quantile Loss - if isinstance(self.loss, IQLoss): - if not isinstance(self.valid_loss, IQLoss): + # Protections for loss functions + if isinstance(self.loss, (losses.IQLoss, losses.MQLoss, losses.HuberMQLoss)): + loss_type = type(self.loss) + if not isinstance(self.valid_loss, loss_type): + raise Exception( + f"Please set valid_loss={type(self.loss).__name__}() when training with {type(self.loss).__name__}" + ) + if isinstance(self.valid_loss, losses.IQLoss): + valid_loss_type = type(self.valid_loss) + if not isinstance(self.loss, valid_loss_type): raise Exception( - "Please set valid_loss to IQLoss() when training with IQLoss" + f"Please set loss={type(self.valid_loss).__name__}() when validating with {type(self.valid_loss).__name__}" ) - if isinstance(self.valid_loss, IQLoss) and not isinstance(self.loss, IQLoss): - raise Exception("Please set loss to IQLoss() when validating with IQLoss") + + # Deny impossible loss / valid_loss combinations + if ( + isinstance(self.loss, losses.BasePointLoss) + and self.valid_loss.is_distribution_output + ): + raise Exception( + f"Validation with distribution loss {type(self.valid_loss).__name__} is not possible when using loss={type(self.loss).__name__}. Please use a point valid_loss (MAE, MSE, ...)" + ) + elif self.valid_loss.is_distribution_output and self.valid_loss is not loss: + # Maybe we should raise a Warning or an Exception here, but meh for now. + self.valid_loss = loss + + if isinstance(self.loss, (losses.relMSE, losses.Accuracy, losses.sCRPS)): + raise Exception( + f"{type(self.loss).__name__} cannot be used for training. Please use another loss function (MAE, MSE, ...)" + ) + + if isinstance(self.valid_loss, (losses.relMSE)): + raise Exception( + f"{type(self.valid_loss).__name__} cannot be used for validation. Please use another valid_loss (MAE, MSE, ...)" + ) ## Trainer arguments ## # Max steps, validation steps and check_val_every_n_epoch @@ -183,7 +285,79 @@ def __init__( if trainer_kwargs.get("enable_checkpointing", None) is None: trainer_kwargs["enable_checkpointing"] = False + # Set other attributes self.trainer_kwargs = trainer_kwargs + self.h = h + self.input_size = input_size + self.windows_batch_size = windows_batch_size + self.start_padding_enabled = start_padding_enabled + + # Padder to complete train windows, + # example y=[1,2,3,4,5] h=3 -> last y_output = [5,0,0] + if start_padding_enabled: + self.padder_train = nn.ConstantPad1d( + padding=(self.input_size - 1, self.h), value=0.0 + ) + else: + self.padder_train = nn.ConstantPad1d(padding=(0, self.h), value=0.0) + + # Batch sizes + if self.MULTIVARIATE and n_series is not None: + self.batch_size = max(batch_size, n_series) + else: + self.batch_size = batch_size + if valid_batch_size is None: + self.valid_batch_size = batch_size + else: + self.valid_batch_size = valid_batch_size + if inference_windows_batch_size is None: + self.inference_windows_batch_size = windows_batch_size + else: + self.inference_windows_batch_size = inference_windows_batch_size + + # Optimization + self.learning_rate = learning_rate + self.max_steps = max_steps + self.num_lr_decays = num_lr_decays + self.lr_decay_steps = ( + max(max_steps // self.num_lr_decays, 1) if self.num_lr_decays > 0 else 10e7 + ) + self.early_stop_patience_steps = early_stop_patience_steps + self.val_check_steps = val_check_steps + self.windows_batch_size = windows_batch_size + self.step_size = step_size + + # If the model does not support exogenous, it can't support exclude_insample_y + if exclude_insample_y and not ( + self.EXOGENOUS_FUTR or self.EXOGENOUS_HIST or self.EXOGENOUS_STAT + ): + raise Exception( + f"{type(self).__name__} does not support `exclude_insample_y=True`. Please set `exclude_insample_y=False`" + ) + + self.exclude_insample_y = exclude_insample_y + + # Scaler + self.scaler = TemporalNorm( + scaler_type=scaler_type, + dim=1, # Time dimension is 1. + num_features=1 + len(self.hist_exog_list) + len(self.futr_exog_list), + ) + + # Fit arguments + self.val_size = 0 + self.test_size = 0 + + # Model state + self.decompose_forecast = False + + # DataModule arguments + self.num_workers_loader = num_workers_loader + self.dataloader_kwargs = dataloader_kwargs + self.drop_last_loader = drop_last_loader + # used by on_validation_epoch_end hook + self.validation_step_outputs: List = [] + self.alias = alias def __repr__(self): return type(self).__name__ if self.alias is None else self.alias @@ -220,21 +394,13 @@ def _get_temporal_exogenous_cols(self, temporal_cols): set(temporal_cols.tolist()) & set(self.hist_exog_list + self.futr_exog_list) ) - def _set_quantile_for_iqloss(self, **data_module_kwargs): - if "quantile" in data_module_kwargs: - if not isinstance(self.loss, IQLoss): - raise Exception( - "Please train with loss=IQLoss() to make use of the quantile argument." - ) - else: - self.quantile = data_module_kwargs["quantile"] - data_module_kwargs.pop("quantile") - self.loss.update_quantile(q=self.quantile) - elif isinstance(self.loss, IQLoss): - self.quantile = 0.5 - self.loss.update_quantile(q=self.quantile) - - return data_module_kwargs + def _set_quantiles(self, quantiles=None): + if quantiles is None and isinstance(self.loss, losses.IQLoss): + self.loss.update_quantile(q=[0.5]) + elif hasattr(self.loss, "update_quantile") and callable( + self.loss.update_quantile + ): + self.loss.update_quantile(q=quantiles) def _fit_distributed( self, @@ -463,3 +629,931 @@ def load(cls, path, **kwargs): else: # pytorch<2.1 model.load_state_dict(content["state_dict"], strict=True) return model + + def _create_windows(self, batch, step, w_idxs=None): + # Parse common data + window_size = self.input_size + self.h + temporal_cols = batch["temporal_cols"] + temporal = batch["temporal"] + + if step == "train": + if self.val_size + self.test_size > 0: + cutoff = -self.val_size - self.test_size + temporal = temporal[:, :, :cutoff] + + temporal = self.padder_train(temporal) + + if temporal.shape[-1] < window_size: + raise Exception( + "Time series is too short for training, consider setting a smaller input size or set start_padding_enabled=True" + ) + + windows = temporal.unfold( + dimension=-1, size=window_size, step=self.step_size + ) + + if self.MULTIVARIATE: + # [n_series, C, Ws, L + h] -> [Ws, L + h, C, n_series] + windows = windows.permute(2, 3, 1, 0) + else: + # [n_series, C, Ws, L + h] -> [Ws * n_series, L + h, C, 1] + windows_per_serie = windows.shape[2] + windows = windows.permute(0, 2, 3, 1) + windows = windows.flatten(0, 1) + windows = windows.unsqueeze(-1) + + # Sample and Available conditions + available_idx = temporal_cols.get_loc("available_mask") + available_condition = windows[:, : self.input_size, available_idx] + available_condition = torch.sum( + available_condition, axis=(1, -1) + ) # Sum over time & series dimension + final_condition = available_condition > 0 + + if self.h > 0: + sample_condition = windows[:, self.input_size :, available_idx] + sample_condition = torch.sum( + sample_condition, axis=(1, -1) + ) # Sum over time & series dimension + final_condition = (sample_condition > 0) & (available_condition > 0) + + windows = windows[final_condition] + + # Parse Static data to match windows + static = batch.get("static", None) + static_cols = batch.get("static_cols", None) + + # Repeat static if univariate: [n_series, S] -> [Ws * n_series, S] + if static is not None and not self.MULTIVARIATE: + static = torch.repeat_interleave( + static, repeats=windows_per_serie, dim=0 + ) + static = static[final_condition] + + # Protection of empty windows + if final_condition.sum() == 0: + raise Exception("No windows available for training") + + # Sample windows + if self.windows_batch_size is not None: + n_windows = windows.shape[0] + w_idxs = np.random.choice( + n_windows, + size=self.windows_batch_size, + replace=(n_windows < self.windows_batch_size), + ) + windows = windows[w_idxs] + + if static is not None and not self.MULTIVARIATE: + static = static[w_idxs] + + windows_batch = dict( + temporal=windows, + temporal_cols=temporal_cols, + static=static, + static_cols=static_cols, + ) + return windows_batch + + elif step in ["predict", "val"]: + + if step == "predict": + initial_input = temporal.shape[-1] - self.test_size + if ( + initial_input <= self.input_size + ): # There is not enough data to predict first timestamp + temporal = F.pad( + temporal, + pad=(self.input_size - initial_input, 0), + mode="constant", + value=0.0, + ) + predict_step_size = self.predict_step_size + cutoff = -self.input_size - self.test_size + temporal = temporal[:, :, cutoff:] + + elif step == "val": + predict_step_size = self.step_size + cutoff = -self.input_size - self.val_size - self.test_size + if self.test_size > 0: + temporal = batch["temporal"][:, :, cutoff : -self.test_size] + else: + temporal = batch["temporal"][:, :, cutoff:] + if temporal.shape[-1] < window_size: + initial_input = temporal.shape[-1] - self.val_size + temporal = F.pad( + temporal, + pad=(self.input_size - initial_input, 0), + mode="constant", + value=0.0, + ) + + if ( + (step == "predict") + and (self.test_size == 0) + and (len(self.futr_exog_list) == 0) + ): + temporal = F.pad(temporal, pad=(0, self.h), mode="constant", value=0.0) + + windows = temporal.unfold( + dimension=-1, size=window_size, step=predict_step_size + ) + + static = batch.get("static", None) + static_cols = batch.get("static_cols", None) + + if self.MULTIVARIATE: + # [n_series, C, Ws, L + h] -> [Ws, L + h, C, n_series] + windows = windows.permute(2, 3, 1, 0) + else: + # [n_series, C, Ws, L + h] -> [Ws * n_series, L + h, C, 1] + windows_per_serie = windows.shape[2] + windows = windows.permute(0, 2, 3, 1) + windows = windows.flatten(0, 1) + windows = windows.unsqueeze(-1) + if static is not None: + static = torch.repeat_interleave( + static, repeats=windows_per_serie, dim=0 + ) + + # Sample windows for batched prediction + if w_idxs is not None: + windows = windows[w_idxs] + if static is not None and not self.MULTIVARIATE: + static = static[w_idxs] + + windows_batch = dict( + temporal=windows, + temporal_cols=temporal_cols, + static=static, + static_cols=static_cols, + ) + return windows_batch + else: + raise ValueError(f"Unknown step {step}") + + def _normalization(self, windows, y_idx): + # windows are already filtered by train/validation/test + # from the `create_windows_method` nor leakage risk + temporal = windows["temporal"] # [Ws, L + h, C, n_series] + temporal_cols = windows["temporal_cols"].copy() # [Ws, L + h, C, n_series] + + # To avoid leakage uses only the lags + temporal_data_cols = self._get_temporal_exogenous_cols( + temporal_cols=temporal_cols + ) + temporal_idxs = get_indexer_raise_missing(temporal_cols, temporal_data_cols) + temporal_idxs = np.append(y_idx, temporal_idxs) + temporal_data = temporal[:, :, temporal_idxs] + temporal_mask = temporal[:, :, temporal_cols.get_loc("available_mask")].clone() + if self.h > 0: + temporal_mask[:, -self.h :] = 0.0 + + # Normalize. self.scaler stores the shift and scale for inverse transform + temporal_mask = temporal_mask.unsqueeze( + 2 + ) # Add channel dimension for scaler.transform. + temporal_data = self.scaler.transform(x=temporal_data, mask=temporal_mask) + + # Replace values in windows dict + temporal[:, :, temporal_idxs] = temporal_data + windows["temporal"] = temporal + + return windows + + def _inv_normalization(self, y_hat, y_idx): + # Receives window predictions [Ws, h, output, n_series] + # Broadcasts scale if necessary and inverts normalization + add_channel_dim = y_hat.ndim > 3 + y_loc, y_scale = self._get_loc_scale(y_idx, add_channel_dim=add_channel_dim) + y_hat = self.scaler.inverse_transform(z=y_hat, x_scale=y_scale, x_shift=y_loc) + + return y_hat + + def _parse_windows(self, batch, windows): + # windows: [Ws, L + h, C, n_series] + + # Filter insample lags from outsample horizon + y_idx = batch["y_idx"] + mask_idx = batch["temporal_cols"].get_loc("available_mask") + + insample_y = windows["temporal"][:, : self.input_size, y_idx] + insample_mask = windows["temporal"][:, : self.input_size, mask_idx] + + # Declare additional information + outsample_y = None + outsample_mask = None + hist_exog = None + futr_exog = None + stat_exog = None + + if self.h > 0: + outsample_y = windows["temporal"][:, self.input_size :, y_idx] + outsample_mask = windows["temporal"][:, self.input_size :, mask_idx] + + # Recurrent models at t predict t+1, so we shift the input (insample_y) by one + if self.RECURRENT: + insample_y = torch.cat((insample_y, outsample_y[:, :-1]), dim=1) + insample_mask = torch.cat((insample_mask, outsample_mask[:, :-1]), dim=1) + self.maintain_state = False + + if len(self.hist_exog_list): + hist_exog_idx = get_indexer_raise_missing( + windows["temporal_cols"], self.hist_exog_list + ) + if self.RECURRENT: + hist_exog = windows["temporal"][:, :, hist_exog_idx] + hist_exog[:, self.input_size :] = 0.0 + hist_exog = hist_exog[:, 1:] + else: + hist_exog = windows["temporal"][:, : self.input_size, hist_exog_idx] + if not self.MULTIVARIATE: + hist_exog = hist_exog.squeeze(-1) + else: + hist_exog = hist_exog.swapaxes(1, 2) + + if len(self.futr_exog_list): + futr_exog_idx = get_indexer_raise_missing( + windows["temporal_cols"], self.futr_exog_list + ) + futr_exog = windows["temporal"][:, :, futr_exog_idx] + if self.RECURRENT: + futr_exog = futr_exog[:, 1:] + if not self.MULTIVARIATE: + futr_exog = futr_exog.squeeze(-1) + else: + futr_exog = futr_exog.swapaxes(1, 2) + + if len(self.stat_exog_list): + static_idx = get_indexer_raise_missing( + windows["static_cols"], self.stat_exog_list + ) + stat_exog = windows["static"][:, static_idx] + + # TODO: think a better way of removing insample_y features + if self.exclude_insample_y: + insample_y = insample_y * 0 + + return ( + insample_y, + insample_mask, + outsample_y, + outsample_mask, + hist_exog, + futr_exog, + stat_exog, + ) + + def _get_loc_scale(self, y_idx, add_channel_dim=False): + # [B, L, C, n_series] -> [B, L, n_series] + y_scale = self.scaler.x_scale[:, :, y_idx] + y_loc = self.scaler.x_shift[:, :, y_idx] + + # [B, L, n_series] -> [B, L, n_series, 1] + if add_channel_dim: + y_scale = y_scale.unsqueeze(-1) + y_loc = y_loc.unsqueeze(-1) + + return y_loc, y_scale + + def _compute_valid_loss( + self, insample_y, outsample_y, output, outsample_mask, y_idx + ): + if self.loss.is_distribution_output: + y_loc, y_scale = self._get_loc_scale(y_idx) + distr_args = self.loss.scale_decouple( + output=output, loc=y_loc, scale=y_scale + ) + if isinstance( + self.valid_loss, (losses.sCRPS, losses.MQLoss, losses.HuberMQLoss) + ): + _, _, quants = self.loss.sample(distr_args=distr_args) + output = quants + elif isinstance(self.valid_loss, losses.BasePointLoss): + distr = self.loss.get_distribution(distr_args=distr_args) + output = distr.mean + + # Validation Loss evaluation + if self.valid_loss.is_distribution_output: + valid_loss = self.valid_loss( + y=outsample_y, distr_args=distr_args, mask=outsample_mask + ) + else: + output = self._inv_normalization(y_hat=output, y_idx=y_idx) + valid_loss = self.valid_loss( + y=outsample_y, y_hat=output, y_insample=insample_y, mask=outsample_mask + ) + return valid_loss + + def _validate_step_recurrent_batch( + self, insample_y, insample_mask, futr_exog, hist_exog, stat_exog, y_idx + ): + # Remember state in network and set horizon to 1 + self.rnn_state = None + self.maintain_state = True + self.h = 1 + + # Initialize results array + n_outputs = self.loss.outputsize_multiplier + y_hat = torch.zeros( + (insample_y.shape[0], self.horizon_backup, self.n_series * n_outputs), + device=insample_y.device, + dtype=insample_y.dtype, + ) + + # First step prediction + tau = 0 + + # Set exogenous + hist_exog_current = None + if self.hist_exog_size > 0: + hist_exog_current = hist_exog[:, : self.input_size + tau - 1] + + futr_exog_current = None + if self.futr_exog_size > 0: + futr_exog_current = futr_exog[:, : self.input_size + tau - 1] + + # First forecast step + y_hat[:, tau], insample_y = self._validate_step_recurrent_single( + insample_y=insample_y[:, : self.input_size + tau - 1], + insample_mask=insample_mask[:, : self.input_size + tau - 1], + hist_exog=hist_exog_current, + futr_exog=futr_exog_current, + stat_exog=stat_exog, + y_idx=y_idx, + ) + + # Horizon prediction recursively + for tau in range(self.horizon_backup): + # Set exogenous + if self.hist_exog_size > 0: + hist_exog_current = hist_exog[:, self.input_size + tau - 1].unsqueeze(1) + + if self.futr_exog_size > 0: + futr_exog_current = futr_exog[:, self.input_size + tau - 1].unsqueeze(1) + + y_hat[:, tau], insample_y = self._validate_step_recurrent_single( + insample_y=insample_y, + insample_mask=None, + hist_exog=hist_exog_current, + futr_exog=futr_exog_current, + stat_exog=stat_exog, + y_idx=y_idx, + ) + + # Reset state and horizon + self.maintain_state = False + self.rnn_state = None + self.h = self.horizon_backup + + return y_hat + + def _validate_step_recurrent_single( + self, insample_y, insample_mask, hist_exog, futr_exog, stat_exog, y_idx + ): + # Input sequence + windows_batch = dict( + insample_y=insample_y, # [Ws, L, n_series] + insample_mask=insample_mask, # [Ws, L, n_series] + futr_exog=futr_exog, # univariate: [Ws, L, F]; multivariate: [Ws, F, L, n_series] + hist_exog=hist_exog, # univariate: [Ws, L, X]; multivariate: [Ws, X, L, n_series] + stat_exog=stat_exog, + ) # univariate: [Ws, S]; multivariate: [n_series, S] + + # Model Predictions + output_batch_unmapped = self(windows_batch) + output_batch = self.loss.domain_map(output_batch_unmapped) + + # Inverse normalization and sampling + if self.loss.is_distribution_output: + # Sample distribution + y_loc, y_scale = self._get_loc_scale(y_idx) + distr_args = self.loss.scale_decouple( + output=output_batch, loc=y_loc, scale=y_scale + ) + # When validating, the output is the mean of the distribution which is an attribute + distr = self.loss.get_distribution(distr_args=distr_args) + + # Scale back to feed back as input + insample_y = self.scaler.scaler(distr.mean, y_loc, y_scale) + else: + # Todo: for now, we assume that in case of a BasePointLoss with ndim==4, the last dimension + # contains a set of predictions for the target (e.g. MQLoss multiple quantiles), for which we use the + # mean as feedback signal for the recurrent predictions. A more precise way is to increase the + # insample input size of the recurrent network by the number of outputs so that each output + # can be fed back to a specific input channel. + if output_batch.ndim == 4: + output_batch = output_batch.mean(dim=-1) + + insample_y = output_batch + + # Remove horizon dim: [B, 1, N * n_outputs] -> [B, N * n_outputs] + y_hat = output_batch_unmapped.squeeze(1) + return y_hat, insample_y + + def _predict_step_recurrent_batch( + self, insample_y, insample_mask, futr_exog, hist_exog, stat_exog, y_idx + ): + # Remember state in network and set horizon to 1 + self.rnn_state = None + self.maintain_state = True + self.h = 1 + + # Initialize results array + n_outputs = len(self.loss.output_names) + y_hat = torch.zeros( + (insample_y.shape[0], self.horizon_backup, self.n_series, n_outputs), + device=insample_y.device, + dtype=insample_y.dtype, + ) + + # First step prediction + tau = 0 + + # Set exogenous + hist_exog_current = None + if self.hist_exog_size > 0: + hist_exog_current = hist_exog[:, : self.input_size + tau - 1] + + futr_exog_current = None + if self.futr_exog_size > 0: + futr_exog_current = futr_exog[:, : self.input_size + tau - 1] + + # First forecast step + y_hat[:, tau], insample_y = self._predict_step_recurrent_single( + insample_y=insample_y[:, : self.input_size + tau - 1], + insample_mask=insample_mask[:, : self.input_size + tau - 1], + hist_exog=hist_exog_current, + futr_exog=futr_exog_current, + stat_exog=stat_exog, + y_idx=y_idx, + ) + + # Horizon prediction recursively + for tau in range(self.horizon_backup): + # Set exogenous + if self.hist_exog_size > 0: + hist_exog_current = hist_exog[:, self.input_size + tau - 1].unsqueeze(1) + + if self.futr_exog_size > 0: + futr_exog_current = futr_exog[:, self.input_size + tau - 1].unsqueeze(1) + + y_hat[:, tau], insample_y = self._predict_step_recurrent_single( + insample_y=insample_y, + insample_mask=None, + hist_exog=hist_exog_current, + futr_exog=futr_exog_current, + stat_exog=stat_exog, + y_idx=y_idx, + ) + + # Reset state and horizon + self.maintain_state = False + self.rnn_state = None + self.h = self.horizon_backup + + # Squeeze for univariate case + if not self.MULTIVARIATE: + y_hat = y_hat.squeeze(2) + + return y_hat + + def _predict_step_recurrent_single( + self, insample_y, insample_mask, hist_exog, futr_exog, stat_exog, y_idx + ): + # Input sequence + windows_batch = dict( + insample_y=insample_y, # [Ws, L, n_series] + insample_mask=insample_mask, # [Ws, L, n_series] + futr_exog=futr_exog, # univariate: [Ws, L, F]; multivariate: [Ws, F, L, n_series] + hist_exog=hist_exog, # univariate: [Ws, L, X]; multivariate: [Ws, X, L, n_series] + stat_exog=stat_exog, + ) # univariate: [Ws, S]; multivariate: [n_series, S] + + # Model Predictions + output_batch_unmapped = self(windows_batch) + output_batch = self.loss.domain_map(output_batch_unmapped) + + # Inverse normalization and sampling + if self.loss.is_distribution_output: + # Sample distribution + y_loc, y_scale = self._get_loc_scale(y_idx) + distr_args = self.loss.scale_decouple( + output=output_batch, loc=y_loc, scale=y_scale + ) + # When predicting, we need to sample to get the quantiles. The mean is an attribute. + _, _, quants = self.loss.sample( + distr_args=distr_args, num_samples=self.n_samples + ) + mean = self.loss.distr_mean + + # Scale back to feed back as input + insample_y = self.scaler.scaler(mean, y_loc, y_scale) + + # Save predictions + y_hat = torch.concat((mean.unsqueeze(-1), quants), axis=-1) + + if self.loss.return_params: + distr_args = torch.stack(distr_args, dim=-1) + if distr_args.ndim > 4: + distr_args = distr_args.flatten(-2, -1) + y_hat = torch.concat((y_hat, distr_args), axis=-1) + else: + # Todo: for now, we assume that in case of a BasePointLoss with ndim==4, the last dimension + # contains a set of predictions for the target (e.g. MQLoss multiple quantiles), for which we use the + # mean as feedback signal for the recurrent predictions. A more precise way is to increase the + # insample input size of the recurrent network by the number of outputs so that each output + # can be fed back to a specific input channel. + if output_batch.ndim == 4: + output_batch = output_batch.mean(dim=-1) + + insample_y = output_batch + y_hat = self._inv_normalization(y_hat=output_batch, y_idx=y_idx) + y_hat = y_hat.unsqueeze(-1) + + # Remove horizon dim: [B, 1, N, n_outputs] -> [B, N, n_outputs] + y_hat = y_hat.squeeze(1) + return y_hat, insample_y + + def _predict_step_direct_batch( + self, insample_y, insample_mask, hist_exog, futr_exog, stat_exog, y_idx + ): + windows_batch = dict( + insample_y=insample_y, # [Ws, L, n_series] + insample_mask=insample_mask, # [Ws, L, n_series] + futr_exog=futr_exog, # univariate: [Ws, L, F]; multivariate: [Ws, F, L, n_series] + hist_exog=hist_exog, # univariate: [Ws, L, X]; multivariate: [Ws, X, L, n_series] + stat_exog=stat_exog, + ) # univariate: [Ws, S]; multivariate: [n_series, S] + + # Model Predictions + output_batch = self(windows_batch) + output_batch = self.loss.domain_map(output_batch) + + # Inverse normalization and sampling + if self.loss.is_distribution_output: + y_loc, y_scale = self._get_loc_scale(y_idx) + distr_args = self.loss.scale_decouple( + output=output_batch, loc=y_loc, scale=y_scale + ) + _, sample_mean, quants = self.loss.sample(distr_args=distr_args) + y_hat = torch.concat((sample_mean, quants), axis=-1) + + if self.loss.return_params: + distr_args = torch.stack(distr_args, dim=-1) + if distr_args.ndim > 4: + distr_args = distr_args.flatten(-2, -1) + y_hat = torch.concat((y_hat, distr_args), axis=-1) + else: + y_hat = self._inv_normalization(y_hat=output_batch, y_idx=y_idx) + + return y_hat + + def training_step(self, batch, batch_idx): + # Set horizon to h_train in case of recurrent model to speed up training + if self.RECURRENT: + self.h = self.h_train + + # windows: [Ws, L + h, C, n_series] or [Ws, L + h, C] + y_idx = batch["y_idx"] + + windows = self._create_windows(batch, step="train") + original_outsample_y = torch.clone( + windows["temporal"][:, self.input_size :, y_idx] + ) + windows = self._normalization(windows=windows, y_idx=y_idx) + + # Parse windows + ( + insample_y, + insample_mask, + outsample_y, + outsample_mask, + hist_exog, + futr_exog, + stat_exog, + ) = self._parse_windows(batch, windows) + + windows_batch = dict( + insample_y=insample_y, # [Ws, L, n_series] + insample_mask=insample_mask, # [Ws, L, n_series] + futr_exog=futr_exog, # univariate: [Ws, L, F]; multivariate: [Ws, F, L, n_series] + hist_exog=hist_exog, # univariate: [Ws, L, X]; multivariate: [Ws, X, L, n_series] + stat_exog=stat_exog, + ) # univariate: [Ws, S]; multivariate: [n_series, S] + + # Model Predictions + output = self(windows_batch) + output = self.loss.domain_map(output) + + if self.loss.is_distribution_output: + y_loc, y_scale = self._get_loc_scale(y_idx) + outsample_y = original_outsample_y + distr_args = self.loss.scale_decouple( + output=output, loc=y_loc, scale=y_scale + ) + loss = self.loss(y=outsample_y, distr_args=distr_args, mask=outsample_mask) + else: + loss = self.loss( + y=outsample_y, y_hat=output, y_insample=insample_y, mask=outsample_mask + ) + + if torch.isnan(loss): + print("Model Parameters", self.hparams) + print("insample_y", torch.isnan(insample_y).sum()) + print("outsample_y", torch.isnan(outsample_y).sum()) + raise Exception("Loss is NaN, training stopped.") + + train_loss_log = loss.detach().item() + self.log( + "train_loss", + train_loss_log, + batch_size=outsample_y.size(0), + prog_bar=True, + on_epoch=True, + ) + self.train_trajectories.append((self.global_step, train_loss_log)) + + self.h = self.horizon_backup + + return loss + + def validation_step(self, batch, batch_idx): + if self.val_size == 0: + return np.nan + + # TODO: Hack to compute number of windows + windows = self._create_windows(batch, step="val") + n_windows = len(windows["temporal"]) + y_idx = batch["y_idx"] + + # Number of windows in batch + windows_batch_size = self.inference_windows_batch_size + if windows_batch_size < 0: + windows_batch_size = n_windows + n_batches = int(np.ceil(n_windows / windows_batch_size)) + + valid_losses = [] + batch_sizes = [] + for i in range(n_batches): + # Create and normalize windows [Ws, L + h, C, n_series] + w_idxs = np.arange( + i * windows_batch_size, min((i + 1) * windows_batch_size, n_windows) + ) + windows = self._create_windows(batch, step="val", w_idxs=w_idxs) + original_outsample_y = torch.clone( + windows["temporal"][:, self.input_size :, y_idx] + ) + + windows = self._normalization(windows=windows, y_idx=y_idx) + + # Parse windows + ( + insample_y, + insample_mask, + _, + outsample_mask, + hist_exog, + futr_exog, + stat_exog, + ) = self._parse_windows(batch, windows) + + if self.RECURRENT: + output_batch = self._validate_step_recurrent_batch( + insample_y=insample_y, + insample_mask=insample_mask, + futr_exog=futr_exog, + hist_exog=hist_exog, + stat_exog=stat_exog, + y_idx=y_idx, + ) + else: + windows_batch = dict( + insample_y=insample_y, # [Ws, L, n_series] + insample_mask=insample_mask, # [Ws, L, n_series] + futr_exog=futr_exog, # univariate: [Ws, L, F]; multivariate: [Ws, F, L, n_series] + hist_exog=hist_exog, # univariate: [Ws, L, X]; multivariate: [Ws, X, L, n_series] + stat_exog=stat_exog, + ) # univariate: [Ws, S]; multivariate: [n_series, S] + + # Model Predictions + output_batch = self(windows_batch) + + output_batch = self.loss.domain_map(output_batch) + valid_loss_batch = self._compute_valid_loss( + insample_y=insample_y, + outsample_y=original_outsample_y, + output=output_batch, + outsample_mask=outsample_mask, + y_idx=batch["y_idx"], + ) + valid_losses.append(valid_loss_batch) + batch_sizes.append(len(output_batch)) + + valid_loss = torch.stack(valid_losses) + batch_sizes = torch.tensor(batch_sizes, device=valid_loss.device) + batch_size = torch.sum(batch_sizes) + valid_loss = torch.sum(valid_loss * batch_sizes) / batch_size + + if torch.isnan(valid_loss): + raise Exception("Loss is NaN, training stopped.") + + valid_loss_log = valid_loss.detach() + self.log( + "valid_loss", + valid_loss_log.item(), + batch_size=batch_size, + prog_bar=True, + on_epoch=True, + ) + self.validation_step_outputs.append(valid_loss_log) + return valid_loss + + def predict_step(self, batch, batch_idx): + if self.RECURRENT: + self.input_size = self.inference_input_size + + # TODO: Hack to compute number of windows + windows = self._create_windows(batch, step="predict") + n_windows = len(windows["temporal"]) + y_idx = batch["y_idx"] + + # Number of windows in batch + windows_batch_size = self.inference_windows_batch_size + if windows_batch_size < 0: + windows_batch_size = n_windows + n_batches = int(np.ceil(n_windows / windows_batch_size)) + y_hats = [] + for i in range(n_batches): + # Create and normalize windows [Ws, L+H, C] + w_idxs = np.arange( + i * windows_batch_size, min((i + 1) * windows_batch_size, n_windows) + ) + windows = self._create_windows(batch, step="predict", w_idxs=w_idxs) + windows = self._normalization(windows=windows, y_idx=y_idx) + + # Parse windows + insample_y, insample_mask, _, _, hist_exog, futr_exog, stat_exog = ( + self._parse_windows(batch, windows) + ) + + if self.RECURRENT: + y_hat = self._predict_step_recurrent_batch( + insample_y=insample_y, + insample_mask=insample_mask, + futr_exog=futr_exog, + hist_exog=hist_exog, + stat_exog=stat_exog, + y_idx=y_idx, + ) + else: + y_hat = self._predict_step_direct_batch( + insample_y=insample_y, + insample_mask=insample_mask, + futr_exog=futr_exog, + hist_exog=hist_exog, + stat_exog=stat_exog, + y_idx=y_idx, + ) + + y_hats.append(y_hat) + y_hat = torch.cat(y_hats, dim=0) + self.input_size = self.input_size_backup + + return y_hat + + def fit( + self, + dataset, + val_size=0, + test_size=0, + random_seed=None, + distributed_config=None, + ): + """Fit. + + The `fit` method, optimizes the neural network's weights using the + initialization parameters (`learning_rate`, `windows_batch_size`, ...) + and the `loss` function as defined during the initialization. + Within `fit` we use a PyTorch Lightning `Trainer` that + inherits the initialization's `self.trainer_kwargs`, to customize + its inputs, see [PL's trainer arguments](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer). + + The method is designed to be compatible with SKLearn-like classes + and in particular to be compatible with the StatsForecast library. + + By default the `model` is not saving training checkpoints to protect + disk memory, to get them change `enable_checkpointing=True` in `__init__`. + + **Parameters:**
+ `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
+ `val_size`: int, validation size for temporal cross-validation.
+ `random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
+ `test_size`: int, test size for temporal cross-validation.
+ """ + return self._fit( + dataset=dataset, + batch_size=self.batch_size, + valid_batch_size=self.valid_batch_size, + val_size=val_size, + test_size=test_size, + random_seed=random_seed, + distributed_config=distributed_config, + ) + + def predict( + self, + dataset, + test_size=None, + step_size=1, + random_seed=None, + quantiles=None, + **data_module_kwargs, + ): + """Predict. + + Neural network prediction with PL's `Trainer` execution of `predict_step`. + + **Parameters:**
+ `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
+ `test_size`: int=None, test size for temporal cross-validation.
+ `step_size`: int=1, Step size between each window.
+ `random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
+ `quantiles`: list of floats, optional (default=None), target quantiles to predict.
+ `**data_module_kwargs`: PL's TimeSeriesDataModule args, see [documentation](https://pytorch-lightning.readthedocs.io/en/1.6.1/extensions/datamodules.html#using-a-datamodule). + """ + self._check_exog(dataset) + self._restart_seed(random_seed) + if "quantile" in data_module_kwargs: + warnings.warn( + "The 'quantile' argument will be deprecated, use 'quantiles' instead." + ) + if quantiles is not None: + raise ValueError("You can't specify quantile and quantiles.") + quantiles = [data_module_kwargs.pop("quantile")] + self._set_quantiles(quantiles) + + self.predict_step_size = step_size + self.decompose_forecast = False + datamodule = TimeSeriesDataModule( + dataset=dataset, + valid_batch_size=self.valid_batch_size, + **data_module_kwargs, + ) + + # Protect when case of multiple gpu. PL does not support return preds with multiple gpu. + pred_trainer_kwargs = self.trainer_kwargs.copy() + if (pred_trainer_kwargs.get("accelerator", None) == "gpu") and ( + torch.cuda.device_count() > 1 + ): + pred_trainer_kwargs["devices"] = [0] + + trainer = pl.Trainer(**pred_trainer_kwargs) + fcsts = trainer.predict(self, datamodule=datamodule) + fcsts = torch.vstack(fcsts) + + if self.MULTIVARIATE: + # [B, h, n_series (, Q)] -> [n_series, B, h (, Q)] + fcsts = fcsts.swapaxes(0, 2) + fcsts = fcsts.swapaxes(1, 2) + + fcsts = fcsts.numpy().flatten() + fcsts = fcsts.reshape(-1, len(self.loss.output_names)) + return fcsts + + def decompose( + self, + dataset, + step_size=1, + random_seed=None, + quantiles=None, + **data_module_kwargs, + ): + """Decompose Predictions. + + Decompose the predictions through the network's layers. + Available methods are `ESRNN`, `NHITS`, `NBEATS`, and `NBEATSx`. + + **Parameters:**
+ `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation here](https://nixtla.github.io/neuralforecast/tsdataset.html).
+ `step_size`: int=1, step size between each window of temporal data.
+ `quantiles`: list of floats, optional (default=None), target quantiles to predict.
+ `**data_module_kwargs`: PL's TimeSeriesDataModule args, see [documentation](https://pytorch-lightning.readthedocs.io/en/1.6.1/extensions/datamodules.html#using-a-datamodule). + """ + # Restart random seed + if random_seed is None: + random_seed = self.random_seed + torch.manual_seed(random_seed) + self._set_quantiles(quantiles) + + self.predict_step_size = step_size + self.decompose_forecast = True + datamodule = TimeSeriesDataModule( + dataset=dataset, + valid_batch_size=self.valid_batch_size, + **data_module_kwargs, + ) + trainer = pl.Trainer(**self.trainer_kwargs) + fcsts = trainer.predict(self, datamodule=datamodule) + self.decompose_forecast = False # Default decomposition back to false + return torch.vstack(fcsts).numpy() diff --git a/neuralforecast/common/_base_multivariate.py b/neuralforecast/common/_base_multivariate.py deleted file mode 100644 index 0fdc3b94d..000000000 --- a/neuralforecast/common/_base_multivariate.py +++ /dev/null @@ -1,608 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/common.base_multivariate.ipynb. - -# %% auto 0 -__all__ = ['BaseMultivariate'] - -# %% ../../nbs/common.base_multivariate.ipynb 5 -import numpy as np -import torch -import torch.nn as nn -import pytorch_lightning as pl -import neuralforecast.losses.pytorch as losses - -from ._base_model import BaseModel -from ._scalers import TemporalNorm -from ..tsdataset import TimeSeriesDataModule -from ..utils import get_indexer_raise_missing - -# %% ../../nbs/common.base_multivariate.ipynb 6 -class BaseMultivariate(BaseModel): - """Base Multivariate - - Base class for all multivariate models. The forecasts for all time-series are produced simultaneously - within each window, which are randomly sampled during training. - - This class implements the basic functionality for all windows-based models, including: - - PyTorch Lightning's methods training_step, validation_step, predict_step.
- - fit and predict methods used by NeuralForecast.core class.
- - sampling and wrangling methods to generate multivariate windows. - """ - - def __init__( - self, - h, - input_size, - loss, - valid_loss, - learning_rate, - max_steps, - val_check_steps, - n_series, - batch_size, - step_size=1, - num_lr_decays=0, - early_stop_patience_steps=-1, - scaler_type="robust", - futr_exog_list=None, - hist_exog_list=None, - stat_exog_list=None, - num_workers_loader=0, - drop_last_loader=False, - random_seed=1, - alias=None, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, - dataloader_kwargs=None, - **trainer_kwargs, - ): - super().__init__( - random_seed=random_seed, - loss=loss, - valid_loss=valid_loss, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, - futr_exog_list=futr_exog_list, - hist_exog_list=hist_exog_list, - stat_exog_list=stat_exog_list, - max_steps=max_steps, - early_stop_patience_steps=early_stop_patience_steps, - **trainer_kwargs, - ) - - # Padder to complete train windows, - # example y=[1,2,3,4,5] h=3 -> last y_output = [5,0,0] - self.h = h - self.input_size = input_size - self.n_series = n_series - self.padder = nn.ConstantPad1d(padding=(0, self.h), value=0.0) - - # Multivariate models do not support these loss functions yet. - unsupported_losses = ( - losses.sCRPS, - losses.MQLoss, - losses.DistributionLoss, - losses.PMM, - losses.GMM, - losses.HuberMQLoss, - losses.MASE, - losses.relMSE, - losses.NBMM, - ) - if isinstance(self.loss, unsupported_losses): - raise Exception(f"{self.loss} is not supported in a Multivariate model.") - if isinstance(self.valid_loss, unsupported_losses): - raise Exception( - f"{self.valid_loss} is not supported in a Multivariate model." - ) - - self.batch_size = batch_size - - # Optimization - self.learning_rate = learning_rate - self.max_steps = max_steps - self.num_lr_decays = num_lr_decays - self.lr_decay_steps = ( - max(max_steps // self.num_lr_decays, 1) if self.num_lr_decays > 0 else 10e7 - ) - self.early_stop_patience_steps = early_stop_patience_steps - self.val_check_steps = val_check_steps - self.step_size = step_size - - # Scaler - self.scaler = TemporalNorm( - scaler_type=scaler_type, dim=2 - ) # Time dimension is in the second axis - - # Fit arguments - self.val_size = 0 - self.test_size = 0 - - # Model state - self.decompose_forecast = False - - # DataModule arguments - self.num_workers_loader = num_workers_loader - self.dataloader_kwargs = dataloader_kwargs - self.drop_last_loader = drop_last_loader - # used by on_validation_epoch_end hook - self.validation_step_outputs = [] - self.alias = alias - - def _create_windows(self, batch, step): - # Parse common data - window_size = self.input_size + self.h - temporal_cols = batch["temporal_cols"] - temporal = batch["temporal"] - - if step == "train": - if self.val_size + self.test_size > 0: - cutoff = -self.val_size - self.test_size - temporal = temporal[:, :, :cutoff] - - temporal = self.padder(temporal) - windows = temporal.unfold( - dimension=-1, size=window_size, step=self.step_size - ) - # [n_series, C, Ws, L+H] 0, 1, 2, 3 - - # Sample and Available conditions - available_idx = temporal_cols.get_loc("available_mask") - sample_condition = windows[:, available_idx, :, -self.h :] - sample_condition = torch.sum(sample_condition, axis=2) # Sum over time - sample_condition = torch.sum( - sample_condition, axis=0 - ) # Sum over time-series - available_condition = windows[:, available_idx, :, : -self.h] - available_condition = torch.sum( - available_condition, axis=2 - ) # Sum over time - available_condition = torch.sum( - available_condition, axis=0 - ) # Sum over time-series - final_condition = (sample_condition > 0) & ( - available_condition > 0 - ) # Of shape [Ws] - windows = windows[:, :, final_condition, :] - - # Get Static data - static = batch.get("static", None) - static_cols = batch.get("static_cols", None) - - # Protection of empty windows - if final_condition.sum() == 0: - raise Exception("No windows available for training") - - # Sample windows - n_windows = windows.shape[2] - if self.batch_size is not None: - w_idxs = np.random.choice( - n_windows, - size=self.batch_size, - replace=(n_windows < self.batch_size), - ) - windows = windows[:, :, w_idxs, :] - - windows = windows.permute(2, 1, 3, 0) # [Ws, C, L+H, n_series] - - windows_batch = dict( - temporal=windows, - temporal_cols=temporal_cols, - static=static, - static_cols=static_cols, - ) - - return windows_batch - - elif step in ["predict", "val"]: - - if step == "predict": - predict_step_size = self.predict_step_size - cutoff = -self.input_size - self.test_size - temporal = batch["temporal"][:, :, cutoff:] - - elif step == "val": - predict_step_size = self.step_size - cutoff = -self.input_size - self.val_size - self.test_size - if self.test_size > 0: - temporal = batch["temporal"][:, :, cutoff : -self.test_size] - else: - temporal = batch["temporal"][:, :, cutoff:] - - if ( - (step == "predict") - and (self.test_size == 0) - and (len(self.futr_exog_list) == 0) - ): - temporal = self.padder(temporal) - - windows = temporal.unfold( - dimension=-1, size=window_size, step=predict_step_size - ) - # [n_series, C, Ws, L+H] -> [Ws, C, L+H, n_series] - windows = windows.permute(2, 1, 3, 0) - - # Get Static data - static = batch.get("static", None) - static_cols = batch.get("static_cols", None) - - windows_batch = dict( - temporal=windows, - temporal_cols=temporal_cols, - static=static, - static_cols=static_cols, - ) - - return windows_batch - else: - raise ValueError(f"Unknown step {step}") - - def _normalization(self, windows, y_idx): - - # windows are already filtered by train/validation/test - # from the `create_windows_method` nor leakage risk - temporal = windows["temporal"] # [Ws, C, L+H, n_series] - temporal_cols = windows["temporal_cols"].copy() # [Ws, C, L+H, n_series] - - # To avoid leakage uses only the lags - temporal_data_cols = self._get_temporal_exogenous_cols( - temporal_cols=temporal_cols - ) - temporal_idxs = get_indexer_raise_missing(temporal_cols, temporal_data_cols) - temporal_idxs = np.append(y_idx, temporal_idxs) - temporal_data = temporal[:, temporal_idxs, :, :] - temporal_mask = temporal[ - :, temporal_cols.get_loc("available_mask"), :, : - ].clone() - temporal_mask[:, -self.h :, :] = 0.0 - - # Normalize. self.scaler stores the shift and scale for inverse transform - temporal_mask = temporal_mask.unsqueeze( - 1 - ) # Add channel dimension for scaler.transform. - temporal_data = self.scaler.transform(x=temporal_data, mask=temporal_mask) - # Replace values in windows dict - temporal[:, temporal_idxs, :, :] = temporal_data - windows["temporal"] = temporal - - return windows - - def _inv_normalization(self, y_hat, temporal_cols, y_idx): - # Receives window predictions [Ws, H, n_series] - # Broadcasts outputs and inverts normalization - - # Add C dimension - # if y_hat.ndim == 2: - # remove_dimension = True - # y_hat = y_hat.unsqueeze(-1) - # else: - # remove_dimension = False - - y_scale = self.scaler.x_scale[:, [y_idx], :].squeeze(1) - y_loc = self.scaler.x_shift[:, [y_idx], :].squeeze(1) - - # y_scale = torch.repeat_interleave(y_scale, repeats=y_hat.shape[-1], dim=-1) - # y_loc = torch.repeat_interleave(y_loc, repeats=y_hat.shape[-1], dim=-1) - - y_hat = self.scaler.inverse_transform(z=y_hat, x_scale=y_scale, x_shift=y_loc) - - # if remove_dimension: - # y_hat = y_hat.squeeze(-1) - # y_loc = y_loc.squeeze(-1) - # y_scale = y_scale.squeeze(-1) - - return y_hat, y_loc, y_scale - - def _parse_windows(self, batch, windows): - # Temporal: [Ws, C, L+H, n_series] - - # Filter insample lags from outsample horizon - mask_idx = batch["temporal_cols"].get_loc("available_mask") - y_idx = batch["y_idx"] - insample_y = windows["temporal"][:, y_idx, : -self.h, :] - insample_mask = windows["temporal"][:, mask_idx, : -self.h, :] - outsample_y = windows["temporal"][:, y_idx, -self.h :, :] - outsample_mask = windows["temporal"][:, mask_idx, -self.h :, :] - - # Filter historic exogenous variables - if len(self.hist_exog_list): - hist_exog_idx = get_indexer_raise_missing( - windows["temporal_cols"], self.hist_exog_list - ) - hist_exog = windows["temporal"][:, hist_exog_idx, : -self.h, :] - else: - hist_exog = None - - # Filter future exogenous variables - if len(self.futr_exog_list): - futr_exog_idx = get_indexer_raise_missing( - windows["temporal_cols"], self.futr_exog_list - ) - futr_exog = windows["temporal"][:, futr_exog_idx, :, :] - else: - futr_exog = None - - # Filter static variables - if len(self.stat_exog_list): - static_idx = get_indexer_raise_missing( - windows["static_cols"], self.stat_exog_list - ) - stat_exog = windows["static"][:, static_idx] - else: - stat_exog = None - - return ( - insample_y, - insample_mask, - outsample_y, - outsample_mask, - hist_exog, - futr_exog, - stat_exog, - ) - - def training_step(self, batch, batch_idx): - # Create and normalize windows [batch_size, n_series, C, L+H] - windows = self._create_windows(batch, step="train") - y_idx = batch["y_idx"] - windows = self._normalization(windows=windows, y_idx=y_idx) - - # Parse windows - ( - insample_y, - insample_mask, - outsample_y, - outsample_mask, - hist_exog, - futr_exog, - stat_exog, - ) = self._parse_windows(batch, windows) - - windows_batch = dict( - insample_y=insample_y, # [Ws, L, n_series] - insample_mask=insample_mask, # [Ws, L, n_series] - futr_exog=futr_exog, # [Ws, F, L + h, n_series] - hist_exog=hist_exog, # [Ws, X, L, n_series] - stat_exog=stat_exog, - ) # [n_series, S] - - # Model Predictions - output = self(windows_batch) - if self.loss.is_distribution_output: - outsample_y, y_loc, y_scale = self._inv_normalization( - y_hat=outsample_y, temporal_cols=batch["temporal_cols"], y_idx=y_idx - ) - distr_args = self.loss.scale_decouple( - output=output, loc=y_loc, scale=y_scale - ) - loss = self.loss(y=outsample_y, distr_args=distr_args, mask=outsample_mask) - else: - loss = self.loss(y=outsample_y, y_hat=output, mask=outsample_mask) - - if torch.isnan(loss): - print("Model Parameters", self.hparams) - print("insample_y", torch.isnan(insample_y).sum()) - print("outsample_y", torch.isnan(outsample_y).sum()) - print("output", torch.isnan(output).sum()) - raise Exception("Loss is NaN, training stopped.") - - self.log( - "train_loss", - loss.detach().item(), - batch_size=outsample_y.size(0), - prog_bar=True, - on_epoch=True, - ) - self.train_trajectories.append((self.global_step, loss.detach().item())) - return loss - - def validation_step(self, batch, batch_idx): - if self.val_size == 0: - return np.nan - - # Create and normalize windows [Ws, L+H, C] - windows = self._create_windows(batch, step="val") - y_idx = batch["y_idx"] - windows = self._normalization(windows=windows, y_idx=y_idx) - - # Parse windows - ( - insample_y, - insample_mask, - outsample_y, - outsample_mask, - hist_exog, - futr_exog, - stat_exog, - ) = self._parse_windows(batch, windows) - - windows_batch = dict( - insample_y=insample_y, # [Ws, L, n_series] - insample_mask=insample_mask, # [Ws, L, n_series] - futr_exog=futr_exog, # [Ws, F, L + h, n_series] - hist_exog=hist_exog, # [Ws, X, L, n_series] - stat_exog=stat_exog, - ) # [n_series, S] - - # Model Predictions - output = self(windows_batch) - if self.loss.is_distribution_output: - outsample_y, y_loc, y_scale = self._inv_normalization( - y_hat=outsample_y, temporal_cols=batch["temporal_cols"], y_idx=y_idx - ) - distr_args = self.loss.scale_decouple( - output=output, loc=y_loc, scale=y_scale - ) - - if str(type(self.valid_loss)) in [ - "", - "", - ]: - _, output = self.loss.sample(distr_args=distr_args) - - # Validation Loss evaluation - if self.valid_loss.is_distribution_output: - valid_loss = self.valid_loss( - y=outsample_y, distr_args=distr_args, mask=outsample_mask - ) - else: - valid_loss = self.valid_loss( - y=outsample_y, y_hat=output, mask=outsample_mask - ) - - if torch.isnan(valid_loss): - raise Exception("Loss is NaN, training stopped.") - - self.log( - "valid_loss", - valid_loss.detach().item(), - batch_size=outsample_y.size(0), - prog_bar=True, - on_epoch=True, - ) - self.validation_step_outputs.append(valid_loss) - return valid_loss - - def predict_step(self, batch, batch_idx): - # Create and normalize windows [Ws, L+H, C] - windows = self._create_windows(batch, step="predict") - y_idx = batch["y_idx"] - windows = self._normalization(windows=windows, y_idx=y_idx) - - # Parse windows - insample_y, insample_mask, _, _, hist_exog, futr_exog, stat_exog = ( - self._parse_windows(batch, windows) - ) - - windows_batch = dict( - insample_y=insample_y, # [Ws, L, n_series] - insample_mask=insample_mask, # [Ws, L, n_series] - futr_exog=futr_exog, # [Ws, F, L + h, n_series] - hist_exog=hist_exog, # [Ws, X, L, n_series] - stat_exog=stat_exog, - ) # [n_series, S] - - # Model Predictions - output = self(windows_batch) - if self.loss.is_distribution_output: - _, y_loc, y_scale = self._inv_normalization( - y_hat=torch.empty( - size=(insample_y.shape[0], self.h, self.n_series), - dtype=output[0].dtype, - device=output[0].device, - ), - temporal_cols=batch["temporal_cols"], - y_idx=y_idx, - ) - distr_args = self.loss.scale_decouple( - output=output, loc=y_loc, scale=y_scale - ) - _, y_hat = self.loss.sample(distr_args=distr_args) - - if self.loss.return_params: - distr_args = torch.stack(distr_args, dim=-1) - distr_args = torch.reshape( - distr_args, (len(windows["temporal"]), self.h, -1) - ) - y_hat = torch.concat((y_hat, distr_args), axis=2) - else: - y_hat, _, _ = self._inv_normalization( - y_hat=output, temporal_cols=batch["temporal_cols"], y_idx=y_idx - ) - return y_hat - - def fit( - self, - dataset, - val_size=0, - test_size=0, - random_seed=None, - distributed_config=None, - ): - """Fit. - - The `fit` method, optimizes the neural network's weights using the - initialization parameters (`learning_rate`, `windows_batch_size`, ...) - and the `loss` function as defined during the initialization. - Within `fit` we use a PyTorch Lightning `Trainer` that - inherits the initialization's `self.trainer_kwargs`, to customize - its inputs, see [PL's trainer arguments](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer). - - The method is designed to be compatible with SKLearn-like classes - and in particular to be compatible with the StatsForecast library. - - By default the `model` is not saving training checkpoints to protect - disk memory, to get them change `enable_checkpointing=True` in `__init__`. - - **Parameters:**
- `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
- `val_size`: int, validation size for temporal cross-validation.
- `test_size`: int, test size for temporal cross-validation.
- """ - if distributed_config is not None: - raise ValueError( - "multivariate models cannot be trained using distributed data parallel." - ) - return self._fit( - dataset=dataset, - batch_size=self.n_series, - valid_batch_size=self.n_series, - val_size=val_size, - test_size=test_size, - random_seed=random_seed, - shuffle_train=False, - distributed_config=None, - ) - - def predict( - self, - dataset, - test_size=None, - step_size=1, - random_seed=None, - **data_module_kwargs, - ): - """Predict. - - Neural network prediction with PL's `Trainer` execution of `predict_step`. - - **Parameters:**
- `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
- `test_size`: int=None, test size for temporal cross-validation.
- `step_size`: int=1, Step size between each window.
- `**data_module_kwargs`: PL's TimeSeriesDataModule args, see [documentation](https://pytorch-lightning.readthedocs.io/en/1.6.1/extensions/datamodules.html#using-a-datamodule). - """ - self._check_exog(dataset) - self._restart_seed(random_seed) - data_module_kwargs = self._set_quantile_for_iqloss(**data_module_kwargs) - - self.predict_step_size = step_size - self.decompose_forecast = False - datamodule = TimeSeriesDataModule( - dataset=dataset, - valid_batch_size=self.n_series, - batch_size=self.n_series, - **data_module_kwargs, - ) - - # Protect when case of multiple gpu. PL does not support return preds with multiple gpu. - pred_trainer_kwargs = self.trainer_kwargs.copy() - if (pred_trainer_kwargs.get("accelerator", None) == "gpu") and ( - torch.cuda.device_count() > 1 - ): - pred_trainer_kwargs["devices"] = [0] - - trainer = pl.Trainer(**pred_trainer_kwargs) - fcsts = trainer.predict(self, datamodule=datamodule) - fcsts = torch.vstack(fcsts).numpy() - - fcsts = np.transpose(fcsts, (2, 0, 1)) - fcsts = fcsts.flatten() - fcsts = fcsts.reshape(-1, len(self.loss.output_names)) - return fcsts - - def decompose(self, dataset, step_size=1, random_seed=None, **data_module_kwargs): - raise NotImplementedError("decompose") diff --git a/neuralforecast/common/_base_recurrent.py b/neuralforecast/common/_base_recurrent.py deleted file mode 100644 index 0479996c1..000000000 --- a/neuralforecast/common/_base_recurrent.py +++ /dev/null @@ -1,593 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/common.base_recurrent.ipynb. - -# %% auto 0 -__all__ = ['BaseRecurrent'] - -# %% ../../nbs/common.base_recurrent.ipynb 6 -import numpy as np -import torch -import torch.nn as nn -import pytorch_lightning as pl -import neuralforecast.losses.pytorch as losses - -from ._base_model import BaseModel -from ._scalers import TemporalNorm -from ..tsdataset import TimeSeriesDataModule -from ..utils import get_indexer_raise_missing - -# %% ../../nbs/common.base_recurrent.ipynb 7 -class BaseRecurrent(BaseModel): - """Base Recurrent - - Base class for all recurrent-based models. The forecasts are produced sequentially between - windows. - - This class implements the basic functionality for all windows-based models, including: - - PyTorch Lightning's methods training_step, validation_step, predict_step.
- - fit and predict methods used by NeuralForecast.core class.
- - sampling and wrangling methods to sequential windows.
- """ - - def __init__( - self, - h, - input_size, - inference_input_size, - loss, - valid_loss, - learning_rate, - max_steps, - val_check_steps, - batch_size, - valid_batch_size, - scaler_type="robust", - num_lr_decays=0, - early_stop_patience_steps=-1, - futr_exog_list=None, - hist_exog_list=None, - stat_exog_list=None, - num_workers_loader=0, - drop_last_loader=False, - random_seed=1, - alias=None, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, - dataloader_kwargs=None, - **trainer_kwargs, - ): - super().__init__( - random_seed=random_seed, - loss=loss, - valid_loss=valid_loss, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, - futr_exog_list=futr_exog_list, - hist_exog_list=hist_exog_list, - stat_exog_list=stat_exog_list, - max_steps=max_steps, - early_stop_patience_steps=early_stop_patience_steps, - **trainer_kwargs, - ) - - # Padder to complete train windows, - # example y=[1,2,3,4,5] h=3 -> last y_output = [5,0,0] - self.h = h - self.input_size = input_size - self.inference_input_size = inference_input_size - self.padder = nn.ConstantPad1d(padding=(0, self.h), value=0.0) - - unsupported_distributions = ["Bernoulli", "ISQF"] - if ( - isinstance(self.loss, losses.DistributionLoss) - and self.loss.distribution in unsupported_distributions - ): - raise Exception( - f"Distribution {self.loss.distribution} not available for Recurrent-based models. Please choose another distribution." - ) - - # Valid batch_size - self.batch_size = batch_size - if valid_batch_size is None: - self.valid_batch_size = batch_size - else: - self.valid_batch_size = valid_batch_size - - # Optimization - self.learning_rate = learning_rate - self.max_steps = max_steps - self.num_lr_decays = num_lr_decays - self.lr_decay_steps = ( - max(max_steps // self.num_lr_decays, 1) if self.num_lr_decays > 0 else 10e7 - ) - self.early_stop_patience_steps = early_stop_patience_steps - self.val_check_steps = val_check_steps - - # Scaler - self.scaler = TemporalNorm( - scaler_type=scaler_type, - dim=-1, # Time dimension is -1. - num_features=1 + len(self.hist_exog_list) + len(self.futr_exog_list), - ) - - # Fit arguments - self.val_size = 0 - self.test_size = 0 - - # DataModule arguments - self.num_workers_loader = num_workers_loader - self.dataloader_kwargs = dataloader_kwargs - self.drop_last_loader = drop_last_loader - # used by on_validation_epoch_end hook - self.validation_step_outputs = [] - self.alias = alias - - def _normalization(self, batch, val_size=0, test_size=0): - temporal = batch["temporal"] # B, C, T - temporal_cols = batch["temporal_cols"].copy() - y_idx = batch["y_idx"] - - # Separate data and mask - temporal_data_cols = self._get_temporal_exogenous_cols( - temporal_cols=temporal_cols - ) - temporal_idxs = get_indexer_raise_missing(temporal_cols, temporal_data_cols) - temporal_idxs = np.append(y_idx, temporal_idxs) - temporal_data = temporal[:, temporal_idxs, :] - temporal_mask = temporal[:, temporal_cols.get_loc("available_mask"), :].clone() - - # Remove validation and test set to prevent leakeage - if val_size + test_size > 0: - cutoff = val_size + test_size - temporal_mask[:, -cutoff:] = 0 - - # Normalize. self.scaler stores the shift and scale for inverse transform - temporal_mask = temporal_mask.unsqueeze( - 1 - ) # Add channel dimension for scaler.transform. - temporal_data = self.scaler.transform(x=temporal_data, mask=temporal_mask) - - # Replace values in windows dict - temporal[:, temporal_idxs, :] = temporal_data - batch["temporal"] = temporal - - return batch - - def _inv_normalization(self, y_hat, temporal_cols, y_idx): - # Receives window predictions [B, seq_len, H, output] - # Broadcasts outputs and inverts normalization - - # Get 'y' scale and shift, and add W dimension - y_loc = self.scaler.x_shift[:, [y_idx], 0].flatten() # [B,C,T] -> [B] - y_scale = self.scaler.x_scale[:, [y_idx], 0].flatten() # [B,C,T] -> [B] - - # Expand scale and shift to y_hat dimensions - y_loc = y_loc.view(*y_loc.shape, *(1,) * (y_hat.ndim - 1)) # .expand(y_hat) - y_scale = y_scale.view( - *y_scale.shape, *(1,) * (y_hat.ndim - 1) - ) # .expand(y_hat) - - y_hat = self.scaler.inverse_transform(z=y_hat, x_scale=y_scale, x_shift=y_loc) - - return y_hat, y_loc, y_scale - - def _create_windows(self, batch, step): - temporal = batch["temporal"] - temporal_cols = batch["temporal_cols"] - - if step == "train": - if self.val_size + self.test_size > 0: - cutoff = -self.val_size - self.test_size - temporal = temporal[:, :, :cutoff] - temporal = self.padder(temporal) - - # Truncate batch to shorter time-series - av_condition = torch.nonzero( - torch.min( - temporal[:, temporal_cols.get_loc("available_mask")], axis=0 - ).values - ) - min_time_stamp = int(av_condition.min()) - - available_ts = temporal.shape[-1] - min_time_stamp - if available_ts < 1 + self.h: - raise Exception( - "Time series too short for given input and output size. \n" - f"Available timestamps: {available_ts}" - ) - - temporal = temporal[:, :, min_time_stamp:] - - if step == "val": - if self.test_size > 0: - temporal = temporal[:, :, : -self.test_size] - temporal = self.padder(temporal) - - if step == "predict": - if (self.test_size == 0) and (len(self.futr_exog_list) == 0): - temporal = self.padder(temporal) - - # Test size covers all data, pad left one timestep with zeros - if temporal.shape[-1] == self.test_size: - padder_left = nn.ConstantPad1d(padding=(1, 0), value=0.0) - temporal = padder_left(temporal) - - # Parse batch - window_size = 1 + self.h # 1 for current t and h for future - windows = temporal.unfold(dimension=-1, size=window_size, step=1) - - # Truncated backprogatation/inference (shorten sequence where RNNs unroll) - n_windows = windows.shape[2] - input_size = -1 - if (step == "train") and (self.input_size > 0): - input_size = self.input_size - if (input_size > 0) and (n_windows > input_size): - max_sampleable_time = n_windows - self.input_size + 1 - start = np.random.choice(max_sampleable_time) - windows = windows[:, :, start : (start + input_size), :] - - if (step == "val") and (self.inference_input_size > 0): - cutoff = self.inference_input_size + self.val_size - windows = windows[:, :, -cutoff:, :] - - if (step == "predict") and (self.inference_input_size > 0): - cutoff = self.inference_input_size + self.test_size - windows = windows[:, :, -cutoff:, :] - - # [B, C, input_size, 1+H] - windows_batch = dict( - temporal=windows, - temporal_cols=temporal_cols, - static=batch.get("static", None), - static_cols=batch.get("static_cols", None), - ) - - return windows_batch - - def _parse_windows(self, batch, windows): - # [B, C, seq_len, 1+H] - # Filter insample lags from outsample horizon - mask_idx = batch["temporal_cols"].get_loc("available_mask") - y_idx = batch["y_idx"] - insample_y = windows["temporal"][:, y_idx, :, : -self.h] - insample_mask = windows["temporal"][:, mask_idx, :, : -self.h] - outsample_y = windows["temporal"][:, y_idx, :, -self.h :].contiguous() - outsample_mask = windows["temporal"][:, mask_idx, :, -self.h :].contiguous() - - # Filter historic exogenous variables - if len(self.hist_exog_list): - hist_exog_idx = get_indexer_raise_missing( - windows["temporal_cols"], self.hist_exog_list - ) - hist_exog = windows["temporal"][:, hist_exog_idx, :, : -self.h] - else: - hist_exog = None - - # Filter future exogenous variables - if len(self.futr_exog_list): - futr_exog_idx = get_indexer_raise_missing( - windows["temporal_cols"], self.futr_exog_list - ) - futr_exog = windows["temporal"][:, futr_exog_idx, :, :] - else: - futr_exog = None - # Filter static variables - if len(self.stat_exog_list): - static_idx = get_indexer_raise_missing( - windows["static_cols"], self.stat_exog_list - ) - stat_exog = windows["static"][:, static_idx] - else: - stat_exog = None - - return ( - insample_y, - insample_mask, - outsample_y, - outsample_mask, - hist_exog, - futr_exog, - stat_exog, - ) - - def training_step(self, batch, batch_idx): - # Create and normalize windows [Ws, L+H, C] - batch = self._normalization( - batch, val_size=self.val_size, test_size=self.test_size - ) - windows = self._create_windows(batch, step="train") - - # Parse windows - ( - insample_y, - insample_mask, - outsample_y, - outsample_mask, - hist_exog, - futr_exog, - stat_exog, - ) = self._parse_windows(batch, windows) - - windows_batch = dict( - insample_y=insample_y, # [B, seq_len, 1] - insample_mask=insample_mask, # [B, seq_len, 1] - futr_exog=futr_exog, # [B, F, seq_len, 1+H] - hist_exog=hist_exog, # [B, C, seq_len] - stat_exog=stat_exog, - ) # [B, S] - - # Model predictions - output = self(windows_batch) # tuple([B, seq_len, H, output]) - if self.loss.is_distribution_output: - outsample_y, y_loc, y_scale = self._inv_normalization( - y_hat=outsample_y, - temporal_cols=batch["temporal_cols"], - y_idx=batch["y_idx"], - ) - B = output[0].size()[0] - T = output[0].size()[1] - H = output[0].size()[2] - output = [arg.view(-1, *(arg.size()[2:])) for arg in output] - outsample_y = outsample_y.view(B * T, H) - outsample_mask = outsample_mask.view(B * T, H) - y_loc = y_loc.repeat_interleave(repeats=T, dim=0).squeeze(-1) - y_scale = y_scale.repeat_interleave(repeats=T, dim=0).squeeze(-1) - distr_args = self.loss.scale_decouple( - output=output, loc=y_loc, scale=y_scale - ) - loss = self.loss(y=outsample_y, distr_args=distr_args, mask=outsample_mask) - else: - loss = self.loss(y=outsample_y, y_hat=output, mask=outsample_mask) - - if torch.isnan(loss): - print("Model Parameters", self.hparams) - print("insample_y", torch.isnan(insample_y).sum()) - print("outsample_y", torch.isnan(outsample_y).sum()) - print("output", torch.isnan(output).sum()) - raise Exception("Loss is NaN, training stopped.") - - self.log( - "train_loss", - loss.detach().item(), - batch_size=outsample_y.size(0), - prog_bar=True, - on_epoch=True, - ) - self.train_trajectories.append((self.global_step, loss.detach().item())) - return loss - - def validation_step(self, batch, batch_idx): - if self.val_size == 0: - return np.nan - - # Create and normalize windows [Ws, L+H, C] - batch = self._normalization( - batch, val_size=self.val_size, test_size=self.test_size - ) - windows = self._create_windows(batch, step="val") - y_idx = batch["y_idx"] - - # Parse windows - ( - insample_y, - insample_mask, - outsample_y, - outsample_mask, - hist_exog, - futr_exog, - stat_exog, - ) = self._parse_windows(batch, windows) - - windows_batch = dict( - insample_y=insample_y, # [B, seq_len, 1] - insample_mask=insample_mask, # [B, seq_len, 1] - futr_exog=futr_exog, # [B, F, seq_len, 1+H] - hist_exog=hist_exog, # [B, C, seq_len] - stat_exog=stat_exog, - ) # [B, S] - - # Remove train y_hat (+1 and -1 for padded last window with zeros) - # tuple([B, seq_len, H, output]) -> tuple([B, validation_size, H, output]) - val_windows = (self.val_size) + 1 - outsample_y = outsample_y[:, -val_windows:-1, :] - outsample_mask = outsample_mask[:, -val_windows:-1, :] - - # Model predictions - output = self(windows_batch) # tuple([B, seq_len, H, output]) - if self.loss.is_distribution_output: - output = [arg[:, -val_windows:-1] for arg in output] - outsample_y, y_loc, y_scale = self._inv_normalization( - y_hat=outsample_y, temporal_cols=batch["temporal_cols"], y_idx=y_idx - ) - B = output[0].size()[0] - T = output[0].size()[1] - H = output[0].size()[2] - output = [arg.reshape(-1, *(arg.size()[2:])) for arg in output] - outsample_y = outsample_y.reshape(B * T, H) - outsample_mask = outsample_mask.reshape(B * T, H) - y_loc = y_loc.repeat_interleave(repeats=T, dim=0).squeeze(-1) - y_scale = y_scale.repeat_interleave(repeats=T, dim=0).squeeze(-1) - distr_args = self.loss.scale_decouple( - output=output, loc=y_loc, scale=y_scale - ) - _, sample_mean, quants = self.loss.sample(distr_args=distr_args) - - if str(type(self.valid_loss)) in [ - "", - "", - ]: - output = quants - elif str(type(self.valid_loss)) in [ - "" - ]: - output = torch.unsqueeze(sample_mean, dim=-1) # [N,H,1] -> [N,H] - - else: - output = output[:, -val_windows:-1, :] - - # Validation Loss evaluation - if self.valid_loss.is_distribution_output: - valid_loss = self.valid_loss( - y=outsample_y, distr_args=distr_args, mask=outsample_mask - ) - else: - outsample_y, _, _ = self._inv_normalization( - y_hat=outsample_y, temporal_cols=batch["temporal_cols"], y_idx=y_idx - ) - output, _, _ = self._inv_normalization( - y_hat=output, temporal_cols=batch["temporal_cols"], y_idx=y_idx - ) - valid_loss = self.valid_loss( - y=outsample_y, y_hat=output, mask=outsample_mask - ) - - if torch.isnan(valid_loss): - raise Exception("Loss is NaN, training stopped.") - - self.log( - "valid_loss", - valid_loss.detach().item(), - batch_size=outsample_y.size(0), - prog_bar=True, - on_epoch=True, - ) - self.validation_step_outputs.append(valid_loss) - return valid_loss - - def predict_step(self, batch, batch_idx): - # Create and normalize windows [Ws, L+H, C] - batch = self._normalization(batch, val_size=0, test_size=self.test_size) - windows = self._create_windows(batch, step="predict") - y_idx = batch["y_idx"] - - # Parse windows - insample_y, insample_mask, _, _, hist_exog, futr_exog, stat_exog = ( - self._parse_windows(batch, windows) - ) - - windows_batch = dict( - insample_y=insample_y, # [B, seq_len, 1] - insample_mask=insample_mask, # [B, seq_len, 1] - futr_exog=futr_exog, # [B, F, seq_len, 1+H] - hist_exog=hist_exog, # [B, C, seq_len] - stat_exog=stat_exog, - ) # [B, S] - - # Model Predictions - output = self(windows_batch) # tuple([B, seq_len, H], ...) - if self.loss.is_distribution_output: - _, y_loc, y_scale = self._inv_normalization( - y_hat=output[0], temporal_cols=batch["temporal_cols"], y_idx=y_idx - ) - B = output[0].size()[0] - T = output[0].size()[1] - H = output[0].size()[2] - output = [arg.reshape(-1, *(arg.size()[2:])) for arg in output] - y_loc = y_loc.repeat_interleave(repeats=T, dim=0).squeeze(-1) - y_scale = y_scale.repeat_interleave(repeats=T, dim=0).squeeze(-1) - distr_args = self.loss.scale_decouple( - output=output, loc=y_loc, scale=y_scale - ) - _, sample_mean, quants = self.loss.sample(distr_args=distr_args) - y_hat = torch.concat((sample_mean, quants), axis=2) - y_hat = y_hat.view(B, T, H, -1) - - if self.loss.return_params: - distr_args = torch.stack(distr_args, dim=-1) - distr_args = torch.reshape(distr_args, (B, T, H, -1)) - y_hat = torch.concat((y_hat, distr_args), axis=3) - else: - y_hat, _, _ = self._inv_normalization( - y_hat=output, temporal_cols=batch["temporal_cols"], y_idx=y_idx - ) - return y_hat - - def fit( - self, - dataset, - val_size=0, - test_size=0, - random_seed=None, - distributed_config=None, - ): - """Fit. - - The `fit` method, optimizes the neural network's weights using the - initialization parameters (`learning_rate`, `batch_size`, ...) - and the `loss` function as defined during the initialization. - Within `fit` we use a PyTorch Lightning `Trainer` that - inherits the initialization's `self.trainer_kwargs`, to customize - its inputs, see [PL's trainer arguments](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer). - - The method is designed to be compatible with SKLearn-like classes - and in particular to be compatible with the StatsForecast library. - - By default the `model` is not saving training checkpoints to protect - disk memory, to get them change `enable_checkpointing=True` in `__init__`. - - **Parameters:**
- `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
- `val_size`: int, validation size for temporal cross-validation.
- `test_size`: int, test size for temporal cross-validation.
- `random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
- """ - return self._fit( - dataset=dataset, - batch_size=self.batch_size, - valid_batch_size=self.valid_batch_size, - val_size=val_size, - test_size=test_size, - random_seed=random_seed, - distributed_config=distributed_config, - ) - - def predict(self, dataset, step_size=1, random_seed=None, **data_module_kwargs): - """Predict. - - Neural network prediction with PL's `Trainer` execution of `predict_step`. - - **Parameters:**
- `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
- `step_size`: int=1, Step size between each window.
- `random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
- `**data_module_kwargs`: PL's TimeSeriesDataModule args, see [documentation](https://pytorch-lightning.readthedocs.io/en/1.6.1/extensions/datamodules.html#using-a-datamodule). - """ - self._check_exog(dataset) - self._restart_seed(random_seed) - data_module_kwargs = self._set_quantile_for_iqloss(**data_module_kwargs) - - if step_size > 1: - raise Exception("Recurrent models do not support step_size > 1") - - # fcsts (window, batch, h) - # Protect when case of multiple gpu. PL does not support return preds with multiple gpu. - pred_trainer_kwargs = self.trainer_kwargs.copy() - if (pred_trainer_kwargs.get("accelerator", None) == "gpu") and ( - torch.cuda.device_count() > 1 - ): - pred_trainer_kwargs["devices"] = [0] - - trainer = pl.Trainer(**pred_trainer_kwargs) - - datamodule = TimeSeriesDataModule( - dataset=dataset, - valid_batch_size=self.valid_batch_size, - num_workers=self.num_workers_loader, - **data_module_kwargs, - ) - fcsts = trainer.predict(self, datamodule=datamodule) - if self.test_size > 0: - # Remove warmup windows (from train and validation) - # [N,T,H,output], avoid indexing last dim for univariate output compatibility - fcsts = torch.vstack( - [fcst[:, -(1 + self.test_size - self.h) :, :] for fcst in fcsts] - ) - fcsts = fcsts.numpy().flatten() - fcsts = fcsts.reshape(-1, len(self.loss.output_names)) - else: - fcsts = torch.vstack([fcst[:, -1:, :] for fcst in fcsts]).numpy().flatten() - fcsts = fcsts.reshape(-1, len(self.loss.output_names)) - return fcsts diff --git a/neuralforecast/common/_base_windows.py b/neuralforecast/common/_base_windows.py deleted file mode 100644 index dd4a4c869..000000000 --- a/neuralforecast/common/_base_windows.py +++ /dev/null @@ -1,744 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/common.base_windows.ipynb. - -# %% auto 0 -__all__ = ['BaseWindows'] - -# %% ../../nbs/common.base_windows.ipynb 5 -import numpy as np -import torch -import torch.nn as nn -import pytorch_lightning as pl - -from ._base_model import BaseModel -from ._scalers import TemporalNorm -from ..tsdataset import TimeSeriesDataModule -from ..utils import get_indexer_raise_missing - -# %% ../../nbs/common.base_windows.ipynb 6 -class BaseWindows(BaseModel): - """Base Windows - - Base class for all windows-based models. The forecasts are produced separately - for each window, which are randomly sampled during training. - - This class implements the basic functionality for all windows-based models, including: - - PyTorch Lightning's methods training_step, validation_step, predict_step.
- - fit and predict methods used by NeuralForecast.core class.
- - sampling and wrangling methods to generate windows. - """ - - def __init__( - self, - h, - input_size, - loss, - valid_loss, - learning_rate, - max_steps, - val_check_steps, - batch_size, - valid_batch_size, - windows_batch_size, - inference_windows_batch_size, - start_padding_enabled, - step_size=1, - num_lr_decays=0, - early_stop_patience_steps=-1, - scaler_type="identity", - futr_exog_list=None, - hist_exog_list=None, - stat_exog_list=None, - exclude_insample_y=False, - num_workers_loader=0, - drop_last_loader=False, - random_seed=1, - alias=None, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, - dataloader_kwargs=None, - **trainer_kwargs, - ): - super().__init__( - random_seed=random_seed, - loss=loss, - valid_loss=valid_loss, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, - futr_exog_list=futr_exog_list, - hist_exog_list=hist_exog_list, - stat_exog_list=stat_exog_list, - max_steps=max_steps, - early_stop_patience_steps=early_stop_patience_steps, - **trainer_kwargs, - ) - - # Padder to complete train windows, - # example y=[1,2,3,4,5] h=3 -> last y_output = [5,0,0] - self.h = h - self.input_size = input_size - self.windows_batch_size = windows_batch_size - self.start_padding_enabled = start_padding_enabled - if start_padding_enabled: - self.padder_train = nn.ConstantPad1d( - padding=(self.input_size - 1, self.h), value=0.0 - ) - else: - self.padder_train = nn.ConstantPad1d(padding=(0, self.h), value=0.0) - - # Batch sizes - self.batch_size = batch_size - if valid_batch_size is None: - self.valid_batch_size = batch_size - else: - self.valid_batch_size = valid_batch_size - if inference_windows_batch_size is None: - self.inference_windows_batch_size = windows_batch_size - else: - self.inference_windows_batch_size = inference_windows_batch_size - - # Optimization - self.learning_rate = learning_rate - self.max_steps = max_steps - self.num_lr_decays = num_lr_decays - self.lr_decay_steps = ( - max(max_steps // self.num_lr_decays, 1) if self.num_lr_decays > 0 else 10e7 - ) - self.early_stop_patience_steps = early_stop_patience_steps - self.val_check_steps = val_check_steps - self.windows_batch_size = windows_batch_size - self.step_size = step_size - - self.exclude_insample_y = exclude_insample_y - - # Scaler - self.scaler = TemporalNorm( - scaler_type=scaler_type, - dim=1, # Time dimension is 1. - num_features=1 + len(self.hist_exog_list) + len(self.futr_exog_list), - ) - - # Fit arguments - self.val_size = 0 - self.test_size = 0 - - # Model state - self.decompose_forecast = False - - # DataModule arguments - self.num_workers_loader = num_workers_loader - self.dataloader_kwargs = dataloader_kwargs - self.drop_last_loader = drop_last_loader - # used by on_validation_epoch_end hook - self.validation_step_outputs = [] - self.alias = alias - - def _create_windows(self, batch, step, w_idxs=None): - # Parse common data - window_size = self.input_size + self.h - temporal_cols = batch["temporal_cols"] - temporal = batch["temporal"] - - if step == "train": - if self.val_size + self.test_size > 0: - cutoff = -self.val_size - self.test_size - temporal = temporal[:, :, :cutoff] - - temporal = self.padder_train(temporal) - if temporal.shape[-1] < window_size: - raise Exception( - "Time series is too short for training, consider setting a smaller input size or set start_padding_enabled=True" - ) - windows = temporal.unfold( - dimension=-1, size=window_size, step=self.step_size - ) - - # [B, C, Ws, L+H] 0, 1, 2, 3 - # -> [B * Ws, L+H, C] 0, 2, 3, 1 - windows_per_serie = windows.shape[2] - windows = windows.permute(0, 2, 3, 1).contiguous() - windows = windows.reshape(-1, window_size, len(temporal_cols)) - - # Sample and Available conditions - available_idx = temporal_cols.get_loc("available_mask") - available_condition = windows[:, : self.input_size, available_idx] - available_condition = torch.sum(available_condition, axis=1) - final_condition = available_condition > 0 - if self.h > 0: - sample_condition = windows[:, self.input_size :, available_idx] - sample_condition = torch.sum(sample_condition, axis=1) - final_condition = (sample_condition > 0) & (available_condition > 0) - windows = windows[final_condition] - - # Parse Static data to match windows - # [B, S_in] -> [B, Ws, S_in] -> [B*Ws, S_in] - static = batch.get("static", None) - static_cols = batch.get("static_cols", None) - if static is not None: - static = torch.repeat_interleave( - static, repeats=windows_per_serie, dim=0 - ) - static = static[final_condition] - - # Protection of empty windows - if final_condition.sum() == 0: - raise Exception("No windows available for training") - - # Sample windows - n_windows = len(windows) - if self.windows_batch_size is not None: - w_idxs = np.random.choice( - n_windows, - size=self.windows_batch_size, - replace=(n_windows < self.windows_batch_size), - ) - windows = windows[w_idxs] - - if static is not None: - static = static[w_idxs] - - # think about interaction available * sample mask - # [B, C, Ws, L+H] - windows_batch = dict( - temporal=windows, - temporal_cols=temporal_cols, - static=static, - static_cols=static_cols, - ) - return windows_batch - - elif step in ["predict", "val"]: - - if step == "predict": - initial_input = temporal.shape[-1] - self.test_size - if ( - initial_input <= self.input_size - ): # There is not enough data to predict first timestamp - padder_left = nn.ConstantPad1d( - padding=(self.input_size - initial_input, 0), value=0.0 - ) - temporal = padder_left(temporal) - predict_step_size = self.predict_step_size - cutoff = -self.input_size - self.test_size - temporal = temporal[:, :, cutoff:] - - elif step == "val": - predict_step_size = self.step_size - cutoff = -self.input_size - self.val_size - self.test_size - if self.test_size > 0: - temporal = batch["temporal"][:, :, cutoff : -self.test_size] - else: - temporal = batch["temporal"][:, :, cutoff:] - if temporal.shape[-1] < window_size: - initial_input = temporal.shape[-1] - self.val_size - padder_left = nn.ConstantPad1d( - padding=(self.input_size - initial_input, 0), value=0.0 - ) - temporal = padder_left(temporal) - - if ( - (step == "predict") - and (self.test_size == 0) - and (len(self.futr_exog_list) == 0) - ): - padder_right = nn.ConstantPad1d(padding=(0, self.h), value=0.0) - temporal = padder_right(temporal) - - windows = temporal.unfold( - dimension=-1, size=window_size, step=predict_step_size - ) - - # [batch, channels, windows, window_size] 0, 1, 2, 3 - # -> [batch * windows, window_size, channels] 0, 2, 3, 1 - windows_per_serie = windows.shape[2] - windows = windows.permute(0, 2, 3, 1).contiguous() - windows = windows.reshape(-1, window_size, len(temporal_cols)) - - static = batch.get("static", None) - static_cols = batch.get("static_cols", None) - if static is not None: - static = torch.repeat_interleave( - static, repeats=windows_per_serie, dim=0 - ) - - # Sample windows for batched prediction - if w_idxs is not None: - windows = windows[w_idxs] - if static is not None: - static = static[w_idxs] - - windows_batch = dict( - temporal=windows, - temporal_cols=temporal_cols, - static=static, - static_cols=static_cols, - ) - return windows_batch - else: - raise ValueError(f"Unknown step {step}") - - def _normalization(self, windows, y_idx): - # windows are already filtered by train/validation/test - # from the `create_windows_method` nor leakage risk - temporal = windows["temporal"] # B, L+H, C - temporal_cols = windows["temporal_cols"].copy() # B, L+H, C - - # To avoid leakage uses only the lags - # temporal_data_cols = temporal_cols.drop('available_mask').tolist() - temporal_data_cols = self._get_temporal_exogenous_cols( - temporal_cols=temporal_cols - ) - temporal_idxs = get_indexer_raise_missing(temporal_cols, temporal_data_cols) - temporal_idxs = np.append(y_idx, temporal_idxs) - temporal_data = temporal[:, :, temporal_idxs] - temporal_mask = temporal[:, :, temporal_cols.get_loc("available_mask")].clone() - if self.h > 0: - temporal_mask[:, -self.h :] = 0.0 - - # Normalize. self.scaler stores the shift and scale for inverse transform - temporal_mask = temporal_mask.unsqueeze( - -1 - ) # Add channel dimension for scaler.transform. - temporal_data = self.scaler.transform(x=temporal_data, mask=temporal_mask) - - # Replace values in windows dict - temporal[:, :, temporal_idxs] = temporal_data - windows["temporal"] = temporal - - return windows - - def _inv_normalization(self, y_hat, temporal_cols, y_idx): - # Receives window predictions [B, H, output] - # Broadcasts outputs and inverts normalization - - # Add C dimension - if y_hat.ndim == 2: - remove_dimension = True - y_hat = y_hat.unsqueeze(-1) - else: - remove_dimension = False - - y_scale = self.scaler.x_scale[:, :, [y_idx]] - y_loc = self.scaler.x_shift[:, :, [y_idx]] - - y_scale = torch.repeat_interleave(y_scale, repeats=y_hat.shape[-1], dim=-1).to( - y_hat.device - ) - y_loc = torch.repeat_interleave(y_loc, repeats=y_hat.shape[-1], dim=-1).to( - y_hat.device - ) - - y_hat = self.scaler.inverse_transform(z=y_hat, x_scale=y_scale, x_shift=y_loc) - y_loc = y_loc.to(y_hat.device) - y_scale = y_scale.to(y_hat.device) - - if remove_dimension: - y_hat = y_hat.squeeze(-1) - y_loc = y_loc.squeeze(-1) - y_scale = y_scale.squeeze(-1) - - return y_hat, y_loc, y_scale - - def _parse_windows(self, batch, windows): - # Filter insample lags from outsample horizon - y_idx = batch["y_idx"] - mask_idx = batch["temporal_cols"].get_loc("available_mask") - - insample_y = windows["temporal"][:, : self.input_size, y_idx] - insample_mask = windows["temporal"][:, : self.input_size, mask_idx] - - # Declare additional information - outsample_y = None - outsample_mask = None - hist_exog = None - futr_exog = None - stat_exog = None - - if self.h > 0: - outsample_y = windows["temporal"][:, self.input_size :, y_idx] - outsample_mask = windows["temporal"][:, self.input_size :, mask_idx] - - if len(self.hist_exog_list): - hist_exog_idx = get_indexer_raise_missing( - windows["temporal_cols"], self.hist_exog_list - ) - hist_exog = windows["temporal"][:, : self.input_size, hist_exog_idx] - - if len(self.futr_exog_list): - futr_exog_idx = get_indexer_raise_missing( - windows["temporal_cols"], self.futr_exog_list - ) - futr_exog = windows["temporal"][:, :, futr_exog_idx] - - if len(self.stat_exog_list): - static_idx = get_indexer_raise_missing( - windows["static_cols"], self.stat_exog_list - ) - stat_exog = windows["static"][:, static_idx] - - # TODO: think a better way of removing insample_y features - if self.exclude_insample_y: - insample_y = insample_y * 0 - - return ( - insample_y, - insample_mask, - outsample_y, - outsample_mask, - hist_exog, - futr_exog, - stat_exog, - ) - - def training_step(self, batch, batch_idx): - # Create and normalize windows [Ws, L+H, C] - windows = self._create_windows(batch, step="train") - y_idx = batch["y_idx"] - original_outsample_y = torch.clone(windows["temporal"][:, -self.h :, y_idx]) - windows = self._normalization(windows=windows, y_idx=y_idx) - - # Parse windows - ( - insample_y, - insample_mask, - outsample_y, - outsample_mask, - hist_exog, - futr_exog, - stat_exog, - ) = self._parse_windows(batch, windows) - - windows_batch = dict( - insample_y=insample_y, # [Ws, L] - insample_mask=insample_mask, # [Ws, L] - futr_exog=futr_exog, # [Ws, L + h, F] - hist_exog=hist_exog, # [Ws, L, X] - stat_exog=stat_exog, - ) # [Ws, S] - - # Model Predictions - output = self(windows_batch) - if self.loss.is_distribution_output: - _, y_loc, y_scale = self._inv_normalization( - y_hat=outsample_y, temporal_cols=batch["temporal_cols"], y_idx=y_idx - ) - outsample_y = original_outsample_y - distr_args = self.loss.scale_decouple( - output=output, loc=y_loc, scale=y_scale - ) - loss = self.loss(y=outsample_y, distr_args=distr_args, mask=outsample_mask) - else: - loss = self.loss(y=outsample_y, y_hat=output, mask=outsample_mask) - - if torch.isnan(loss): - print("Model Parameters", self.hparams) - print("insample_y", torch.isnan(insample_y).sum()) - print("outsample_y", torch.isnan(outsample_y).sum()) - print("output", torch.isnan(output).sum()) - raise Exception("Loss is NaN, training stopped.") - - self.log( - "train_loss", - loss.detach().item(), - batch_size=outsample_y.size(0), - prog_bar=True, - on_epoch=True, - ) - self.train_trajectories.append((self.global_step, loss.detach().item())) - return loss - - def _compute_valid_loss( - self, outsample_y, output, outsample_mask, temporal_cols, y_idx - ): - if self.loss.is_distribution_output: - _, y_loc, y_scale = self._inv_normalization( - y_hat=outsample_y, temporal_cols=temporal_cols, y_idx=y_idx - ) - distr_args = self.loss.scale_decouple( - output=output, loc=y_loc, scale=y_scale - ) - _, sample_mean, quants = self.loss.sample(distr_args=distr_args) - - if str(type(self.valid_loss)) in [ - "", - "", - ]: - output = quants - elif str(type(self.valid_loss)) in [ - "" - ]: - output = torch.unsqueeze(sample_mean, dim=-1) # [N,H,1] -> [N,H] - - # Validation Loss evaluation - if self.valid_loss.is_distribution_output: - valid_loss = self.valid_loss( - y=outsample_y, distr_args=distr_args, mask=outsample_mask - ) - else: - output, _, _ = self._inv_normalization( - y_hat=output, temporal_cols=temporal_cols, y_idx=y_idx - ) - valid_loss = self.valid_loss( - y=outsample_y, y_hat=output, mask=outsample_mask - ) - return valid_loss - - def validation_step(self, batch, batch_idx): - if self.val_size == 0: - return np.nan - - # TODO: Hack to compute number of windows - windows = self._create_windows(batch, step="val") - n_windows = len(windows["temporal"]) - y_idx = batch["y_idx"] - - # Number of windows in batch - windows_batch_size = self.inference_windows_batch_size - if windows_batch_size < 0: - windows_batch_size = n_windows - n_batches = int(np.ceil(n_windows / windows_batch_size)) - - valid_losses = [] - batch_sizes = [] - for i in range(n_batches): - # Create and normalize windows [Ws, L+H, C] - w_idxs = np.arange( - i * windows_batch_size, min((i + 1) * windows_batch_size, n_windows) - ) - windows = self._create_windows(batch, step="val", w_idxs=w_idxs) - original_outsample_y = torch.clone(windows["temporal"][:, -self.h :, y_idx]) - windows = self._normalization(windows=windows, y_idx=y_idx) - - # Parse windows - ( - insample_y, - insample_mask, - _, - outsample_mask, - hist_exog, - futr_exog, - stat_exog, - ) = self._parse_windows(batch, windows) - - windows_batch = dict( - insample_y=insample_y, # [Ws, L] - insample_mask=insample_mask, # [Ws, L] - futr_exog=futr_exog, # [Ws, L + h, F] - hist_exog=hist_exog, # [Ws, L, X] - stat_exog=stat_exog, - ) # [Ws, S] - - # Model Predictions - output_batch = self(windows_batch) - valid_loss_batch = self._compute_valid_loss( - outsample_y=original_outsample_y, - output=output_batch, - outsample_mask=outsample_mask, - temporal_cols=batch["temporal_cols"], - y_idx=batch["y_idx"], - ) - valid_losses.append(valid_loss_batch) - batch_sizes.append(len(output_batch)) - - valid_loss = torch.stack(valid_losses) - batch_sizes = torch.tensor(batch_sizes, device=valid_loss.device) - batch_size = torch.sum(batch_sizes) - valid_loss = torch.sum(valid_loss * batch_sizes) / batch_size - - if torch.isnan(valid_loss): - raise Exception("Loss is NaN, training stopped.") - - self.log( - "valid_loss", - valid_loss.detach().item(), - batch_size=batch_size, - prog_bar=True, - on_epoch=True, - ) - self.validation_step_outputs.append(valid_loss) - return valid_loss - - def predict_step(self, batch, batch_idx): - - # TODO: Hack to compute number of windows - windows = self._create_windows(batch, step="predict") - n_windows = len(windows["temporal"]) - y_idx = batch["y_idx"] - - # Number of windows in batch - windows_batch_size = self.inference_windows_batch_size - if windows_batch_size < 0: - windows_batch_size = n_windows - n_batches = int(np.ceil(n_windows / windows_batch_size)) - - y_hats = [] - for i in range(n_batches): - # Create and normalize windows [Ws, L+H, C] - w_idxs = np.arange( - i * windows_batch_size, min((i + 1) * windows_batch_size, n_windows) - ) - windows = self._create_windows(batch, step="predict", w_idxs=w_idxs) - windows = self._normalization(windows=windows, y_idx=y_idx) - - # Parse windows - insample_y, insample_mask, _, _, hist_exog, futr_exog, stat_exog = ( - self._parse_windows(batch, windows) - ) - - windows_batch = dict( - insample_y=insample_y, # [Ws, L] - insample_mask=insample_mask, # [Ws, L] - futr_exog=futr_exog, # [Ws, L + h, F] - hist_exog=hist_exog, # [Ws, L, X] - stat_exog=stat_exog, - ) # [Ws, S] - - # Model Predictions - output_batch = self(windows_batch) - # Inverse normalization and sampling - if self.loss.is_distribution_output: - _, y_loc, y_scale = self._inv_normalization( - y_hat=torch.empty( - size=(insample_y.shape[0], self.h), - dtype=output_batch[0].dtype, - device=output_batch[0].device, - ), - temporal_cols=batch["temporal_cols"], - y_idx=y_idx, - ) - distr_args = self.loss.scale_decouple( - output=output_batch, loc=y_loc, scale=y_scale - ) - _, sample_mean, quants = self.loss.sample(distr_args=distr_args) - y_hat = torch.concat((sample_mean, quants), axis=2) - - if self.loss.return_params: - distr_args = torch.stack(distr_args, dim=-1) - distr_args = torch.reshape( - distr_args, (len(windows["temporal"]), self.h, -1) - ) - y_hat = torch.concat((y_hat, distr_args), axis=2) - else: - y_hat, _, _ = self._inv_normalization( - y_hat=output_batch, - temporal_cols=batch["temporal_cols"], - y_idx=y_idx, - ) - y_hats.append(y_hat) - y_hat = torch.cat(y_hats, dim=0) - return y_hat - - def fit( - self, - dataset, - val_size=0, - test_size=0, - random_seed=None, - distributed_config=None, - ): - """Fit. - - The `fit` method, optimizes the neural network's weights using the - initialization parameters (`learning_rate`, `windows_batch_size`, ...) - and the `loss` function as defined during the initialization. - Within `fit` we use a PyTorch Lightning `Trainer` that - inherits the initialization's `self.trainer_kwargs`, to customize - its inputs, see [PL's trainer arguments](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer). - - The method is designed to be compatible with SKLearn-like classes - and in particular to be compatible with the StatsForecast library. - - By default the `model` is not saving training checkpoints to protect - disk memory, to get them change `enable_checkpointing=True` in `__init__`. - - **Parameters:**
- `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
- `val_size`: int, validation size for temporal cross-validation.
- `random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
- `test_size`: int, test size for temporal cross-validation.
- """ - return self._fit( - dataset=dataset, - batch_size=self.batch_size, - valid_batch_size=self.valid_batch_size, - val_size=val_size, - test_size=test_size, - random_seed=random_seed, - distributed_config=distributed_config, - ) - - def predict( - self, - dataset, - test_size=None, - step_size=1, - random_seed=None, - **data_module_kwargs, - ): - """Predict. - - Neural network prediction with PL's `Trainer` execution of `predict_step`. - - **Parameters:**
- `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation](https://nixtla.github.io/neuralforecast/tsdataset.html).
- `test_size`: int=None, test size for temporal cross-validation.
- `step_size`: int=1, Step size between each window.
- `random_seed`: int=None, random_seed for pytorch initializer and numpy generators, overwrites model.__init__'s.
- `**data_module_kwargs`: PL's TimeSeriesDataModule args, see [documentation](https://pytorch-lightning.readthedocs.io/en/1.6.1/extensions/datamodules.html#using-a-datamodule). - """ - self._check_exog(dataset) - self._restart_seed(random_seed) - data_module_kwargs = self._set_quantile_for_iqloss(**data_module_kwargs) - - self.predict_step_size = step_size - self.decompose_forecast = False - datamodule = TimeSeriesDataModule( - dataset=dataset, - valid_batch_size=self.valid_batch_size, - **data_module_kwargs, - ) - - # Protect when case of multiple gpu. PL does not support return preds with multiple gpu. - pred_trainer_kwargs = self.trainer_kwargs.copy() - if (pred_trainer_kwargs.get("accelerator", None) == "gpu") and ( - torch.cuda.device_count() > 1 - ): - pred_trainer_kwargs["devices"] = [0] - - trainer = pl.Trainer(**pred_trainer_kwargs) - fcsts = trainer.predict(self, datamodule=datamodule) - fcsts = torch.vstack(fcsts).numpy().flatten() - fcsts = fcsts.reshape(-1, len(self.loss.output_names)) - return fcsts - - def decompose(self, dataset, step_size=1, random_seed=None, **data_module_kwargs): - """Decompose Predictions. - - Decompose the predictions through the network's layers. - Available methods are `ESRNN`, `NHITS`, `NBEATS`, and `NBEATSx`. - - **Parameters:**
- `dataset`: NeuralForecast's `TimeSeriesDataset`, see [documentation here](https://nixtla.github.io/neuralforecast/tsdataset.html).
- `step_size`: int=1, step size between each window of temporal data.
- `**data_module_kwargs`: PL's TimeSeriesDataModule args, see [documentation](https://pytorch-lightning.readthedocs.io/en/1.6.1/extensions/datamodules.html#using-a-datamodule). - """ - # Restart random seed - if random_seed is None: - random_seed = self.random_seed - torch.manual_seed(random_seed) - data_module_kwargs = self._set_quantile_for_iqloss(**data_module_kwargs) - - self.predict_step_size = step_size - self.decompose_forecast = True - datamodule = TimeSeriesDataModule( - dataset=dataset, - valid_batch_size=self.valid_batch_size, - **data_module_kwargs, - ) - trainer = pl.Trainer(**self.trainer_kwargs) - fcsts = trainer.predict(self, datamodule=datamodule) - self.decompose_forecast = False # Default decomposition back to false - return torch.vstack(fcsts).numpy() diff --git a/neuralforecast/common/_model_checks.py b/neuralforecast/common/_model_checks.py new file mode 100644 index 000000000..ab387c0ff --- /dev/null +++ b/neuralforecast/common/_model_checks.py @@ -0,0 +1,224 @@ +# AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/common.model_checks.ipynb. + +# %% auto 0 +__all__ = ['seed', 'test_size', 'FREQ', 'N_SERIES_1', 'df', 'max_ds', 'Y_TRAIN_DF_1', 'Y_TEST_DF_1', 'N_SERIES_2', 'Y_TRAIN_DF_2', + 'Y_TEST_DF_2', 'N_SERIES_3', 'STATIC_3', 'Y_TRAIN_DF_3', 'Y_TEST_DF_3', 'N_SERIES_4', 'STATIC_4', + 'Y_TRAIN_DF_4', 'Y_TEST_DF_4', 'check_loss_functions', 'check_airpassengers', 'check_model'] + +# %% ../../nbs/common.model_checks.ipynb 4 +import pandas as pd +import neuralforecast.losses.pytorch as losses + +from .. import NeuralForecast +from neuralforecast.utils import ( + AirPassengersPanel, + AirPassengersStatic, + generate_series, +) + +# %% ../../nbs/common.model_checks.ipynb 5 +seed = 0 +test_size = 14 +FREQ = "D" + +# 1 series, no exogenous +N_SERIES_1 = 1 +df = generate_series(n_series=N_SERIES_1, seed=seed, freq=FREQ, equal_ends=True) +max_ds = df.ds.max() - pd.Timedelta(test_size, FREQ) +Y_TRAIN_DF_1 = df[df.ds < max_ds] +Y_TEST_DF_1 = df[df.ds >= max_ds] + +# 5 series, no exogenous +N_SERIES_2 = 5 +df = generate_series(n_series=N_SERIES_2, seed=seed, freq=FREQ, equal_ends=True) +max_ds = df.ds.max() - pd.Timedelta(test_size, FREQ) +Y_TRAIN_DF_2 = df[df.ds < max_ds] +Y_TEST_DF_2 = df[df.ds >= max_ds] + +# 1 series, with static and temporal exogenous +N_SERIES_3 = 1 +df, STATIC_3 = generate_series( + n_series=N_SERIES_3, + n_static_features=2, + n_temporal_features=2, + seed=seed, + freq=FREQ, + equal_ends=True, +) +max_ds = df.ds.max() - pd.Timedelta(test_size, FREQ) +Y_TRAIN_DF_3 = df[df.ds < max_ds] +Y_TEST_DF_3 = df[df.ds >= max_ds] + +# 5 series, with static and temporal exogenous +N_SERIES_4 = 5 +df, STATIC_4 = generate_series( + n_series=N_SERIES_4, + n_static_features=2, + n_temporal_features=2, + seed=seed, + freq=FREQ, + equal_ends=True, +) +max_ds = df.ds.max() - pd.Timedelta(test_size, FREQ) +Y_TRAIN_DF_4 = df[df.ds < max_ds] +Y_TEST_DF_4 = df[df.ds >= max_ds] + + +# Generic test for a given config for a model +def _run_model_tests(model_class, config): + if model_class.RECURRENT: + config["inference_input_size"] = config["input_size"] + + # DF_1 + if model_class.MULTIVARIATE: + config["n_series"] = N_SERIES_1 + if isinstance(config["loss"], losses.relMSE): + config["loss"].y_train = Y_TRAIN_DF_1["y"].values + if isinstance(config["valid_loss"], losses.relMSE): + config["valid_loss"].y_train = Y_TRAIN_DF_1["y"].values + + model = model_class(**config) + fcst = NeuralForecast(models=[model], freq=FREQ) + fcst.fit(df=Y_TRAIN_DF_1, val_size=24) + _ = fcst.predict(futr_df=Y_TEST_DF_1) + # DF_2 + if model_class.MULTIVARIATE: + config["n_series"] = N_SERIES_2 + if isinstance(config["loss"], losses.relMSE): + config["loss"].y_train = Y_TRAIN_DF_2["y"].values + if isinstance(config["valid_loss"], losses.relMSE): + config["valid_loss"].y_train = Y_TRAIN_DF_2["y"].values + model = model_class(**config) + fcst = NeuralForecast(models=[model], freq=FREQ) + fcst.fit(df=Y_TRAIN_DF_2, val_size=24) + _ = fcst.predict(futr_df=Y_TEST_DF_2) + + if model.EXOGENOUS_STAT and model.EXOGENOUS_FUTR: + # DF_3 + if model_class.MULTIVARIATE: + config["n_series"] = N_SERIES_3 + if isinstance(config["loss"], losses.relMSE): + config["loss"].y_train = Y_TRAIN_DF_3["y"].values + if isinstance(config["valid_loss"], losses.relMSE): + config["valid_loss"].y_train = Y_TRAIN_DF_3["y"].values + model = model_class(**config) + fcst = NeuralForecast(models=[model], freq=FREQ) + fcst.fit(df=Y_TRAIN_DF_3, static_df=STATIC_3, val_size=24) + _ = fcst.predict(futr_df=Y_TEST_DF_3) + + # DF_4 + if model_class.MULTIVARIATE: + config["n_series"] = N_SERIES_4 + if isinstance(config["loss"], losses.relMSE): + config["loss"].y_train = Y_TRAIN_DF_4["y"].values + if isinstance(config["valid_loss"], losses.relMSE): + config["valid_loss"].y_train = Y_TRAIN_DF_4["y"].values + model = model_class(**config) + fcst = NeuralForecast(models=[model], freq=FREQ) + fcst.fit(df=Y_TRAIN_DF_4, static_df=STATIC_4, val_size=24) + _ = fcst.predict(futr_df=Y_TEST_DF_4) + + +# Tests a model against every loss function +def check_loss_functions(model_class): + loss_list = [ + losses.MAE(), + losses.MSE(), + losses.RMSE(), + losses.MAPE(), + losses.SMAPE(), + losses.MASE(seasonality=7), + losses.QuantileLoss(q=0.5), + losses.MQLoss(), + losses.IQLoss(), + losses.DistributionLoss("Normal"), + losses.DistributionLoss("StudentT"), + losses.DistributionLoss("Poisson"), + losses.DistributionLoss("NegativeBinomial"), + losses.DistributionLoss("Tweedie", rho=1.5), + losses.DistributionLoss("ISQF"), + losses.PMM(), + losses.PMM(weighted=True), + losses.GMM(), + losses.GMM(weighted=True), + losses.NBMM(), + losses.NBMM(weighted=True), + losses.HuberLoss(), + losses.TukeyLoss(), + losses.HuberQLoss(q=0.5), + losses.HuberMQLoss(), + ] + for loss in loss_list: + test_name = f"{model_class.__name__}: checking {loss._get_name()}" + print(f"{test_name}") + config = { + "max_steps": 2, + "h": 7, + "input_size": 28, + "loss": loss, + "valid_loss": None, + "enable_progress_bar": False, + "enable_model_summary": False, + "val_check_steps": 2, + } + try: + _run_model_tests(model_class, config) + except RuntimeError: + raise Exception(f"{test_name} failed.") + except Exception: + print(f"{test_name} skipped on raised Exception.") + pass + + +# Tests a model against the AirPassengers dataset +def check_airpassengers(model_class): + print(f"{model_class.__name__}: checking forecast AirPassengers dataset") + Y_train_df = AirPassengersPanel[ + AirPassengersPanel.ds < AirPassengersPanel["ds"].values[-12] + ] # 132 train + Y_test_df = AirPassengersPanel[ + AirPassengersPanel.ds >= AirPassengersPanel["ds"].values[-12] + ].reset_index( + drop=True + ) # 12 test + + config = { + "max_steps": 2, + "h": 12, + "input_size": 24, + "enable_progress_bar": False, + "enable_model_summary": False, + "val_check_steps": 2, + } + + if model_class.MULTIVARIATE: + config["n_series"] = Y_train_df["unique_id"].nunique() + # Normal forecast + fcst = NeuralForecast(models=[model_class(**config)], freq="M") + fcst.fit(df=Y_train_df, static_df=AirPassengersStatic) + _ = fcst.predict(futr_df=Y_test_df) + + # Cross-validation + fcst = NeuralForecast(models=[model_class(**config)], freq="M") + _ = fcst.cross_validation( + df=AirPassengersPanel, static_df=AirPassengersStatic, n_windows=2, step_size=12 + ) + + +# Add unit test functions to this function +def check_model(model_class, checks=["losses", "airpassengers"]): + """ + Check model with various tests. Options for checks are:
+ "losses": test the model against all loss functions
+ "airpassengers": test the model against the airpassengers dataset for forecasting and cross-validation
+ + """ + if "losses" in checks: + check_loss_functions(model_class) + if "airpassengers" in checks: + try: + check_airpassengers(model_class) + except RuntimeError: + raise Exception( + f"{model_class.__name__}: AirPassengers forecast test failed." + ) diff --git a/neuralforecast/common/_modules.py b/neuralforecast/common/_modules.py index d50228b87..852968bd0 100644 --- a/neuralforecast/common/_modules.py +++ b/neuralforecast/common/_modules.py @@ -4,7 +4,7 @@ __all__ = ['ACTIVATIONS', 'MLP', 'Chomp1d', 'CausalConv1d', 'TemporalConvolutionEncoder', 'TransEncoderLayer', 'TransEncoder', 'TransDecoderLayer', 'TransDecoder', 'AttentionLayer', 'PositionalEmbedding', 'TokenEmbedding', 'TimeFeatureEmbedding', 'FixedEmbedding', 'TemporalEmbedding', 'DataEmbedding', 'MovingAvg', 'SeriesDecomp', - 'RevIN'] + 'RevIN', 'RevINMultivariate'] # %% ../../nbs/common.modules.ipynb 3 import math @@ -601,3 +601,66 @@ def _denormalize(self, x): else: x = x + self.mean return x + +# %% ../../nbs/common.modules.ipynb 21 +class RevINMultivariate(nn.Module): + """ + ReversibleInstanceNorm1d for Multivariate models + """ + + def __init__( + self, + num_features: int, + eps=1e-5, + affine=False, + subtract_last=False, + non_norm=False, + ): + super().__init__() + self.num_features = num_features + self.eps = eps + self.affine = affine + if self.affine: + self._init_params() + + def forward(self, x, mode: str): + if mode == "norm": + x = self._normalize(x) + elif mode == "denorm": + x = self._denormalize(x) + else: + raise NotImplementedError + return x + + def _init_params(self): + # initialize RevIN params: (C,) + self.affine_weight = nn.Parameter(torch.ones((1, 1, self.num_features))) + self.affine_bias = nn.Parameter(torch.zeros((1, 1, self.num_features))) + + def _normalize(self, x): + # Batch statistics + self.batch_mean = torch.mean(x, axis=1, keepdim=True).detach() + self.batch_std = torch.sqrt( + torch.var(x, axis=1, keepdim=True, unbiased=False) + self.eps + ).detach() + + # Instance normalization + x = x - self.batch_mean + x = x / self.batch_std + + if self.affine: + x = x * self.affine_weight + x = x + self.affine_bias + + return x + + def _denormalize(self, x): + # Reverse the normalization + if self.affine: + x = x - self.affine_bias + x = x / self.affine_weight + + x = x * self.batch_std + x = x + self.batch_mean + + return x diff --git a/neuralforecast/common/_scalers.py b/neuralforecast/common/_scalers.py index c45b58d62..f11187d21 100644 --- a/neuralforecast/common/_scalers.py +++ b/neuralforecast/common/_scalers.py @@ -402,11 +402,11 @@ def __init__(self, scaler_type="robust", dim=-1, eps=1e-6, num_features=None): def _init_params(self, num_features): # Initialize RevIN scaler params to broadcast: if self.dim == 1: # [B,T,C] [1,1,C] - self.revin_bias = nn.Parameter(torch.zeros(1, 1, num_features)) - self.revin_weight = nn.Parameter(torch.ones(1, 1, num_features)) + self.revin_bias = nn.Parameter(torch.zeros(1, 1, num_features, 1)) + self.revin_weight = nn.Parameter(torch.ones(1, 1, num_features, 1)) elif self.dim == -1: # [B,C,T] [1,C,1] - self.revin_bias = nn.Parameter(torch.zeros(1, num_features, 1)) - self.revin_weight = nn.Parameter(torch.ones(1, num_features, 1)) + self.revin_bias = nn.Parameter(torch.zeros(1, num_features, 1, 1)) + self.revin_weight = nn.Parameter(torch.ones(1, num_features, 1, 1)) # @torch.no_grad() def transform(self, x, mask): diff --git a/neuralforecast/core.py b/neuralforecast/core.py index fffe4bde5..d53267ba7 100644 --- a/neuralforecast/core.py +++ b/neuralforecast/core.py @@ -29,6 +29,7 @@ from .common._base_model import DistributedConfig from .compat import SparkDataFrame +from .losses.pytorch import IQLoss from neuralforecast.tsdataset import ( _FilesDataset, TimeSeriesDataset, @@ -69,7 +70,12 @@ RMoK, ) from .common._base_auto import BaseAuto, MockTrial -from .utils import PredictionIntervals, get_prediction_interval_method +from neuralforecast.utils import ( + PredictionIntervals, + get_prediction_interval_method, + level_to_quantiles, + quantiles_to_level, +) # %% ../nbs/core.ipynb 5 # this disables warnings about the number of workers in the dataloaders @@ -264,6 +270,7 @@ def __init__( # Flags and attributes self._fitted = False self._reset_models() + self._add_level = False def _scalers_fit_transform(self, dataset: TimeSeriesDataset) -> None: self.scalers_ = {} @@ -681,13 +688,16 @@ def _get_model_names(self, add_level=False) -> List[str]: names: List[str] = [] count_names = {"model": 0} for model in self.models: - if add_level and model.loss.outputsize_multiplier > 1: - continue - model_name = repr(model) count_names[model_name] = count_names.get(model_name, -1) + 1 if count_names[model_name] > 0: model_name += str(count_names[model_name]) + + if add_level and ( + model.loss.outputsize_multiplier > 1 or isinstance(model.loss, IQLoss) + ): + continue + names.extend(model_name + n for n in model.loss.output_names) return names @@ -815,6 +825,7 @@ def predict( verbose: bool = False, engine=None, level: Optional[List[Union[int, float]]] = None, + quantiles: Optional[List[float]] = None, **data_kwargs, ): """Predict with core.NeuralForecast. @@ -838,6 +849,8 @@ def predict( Distributed engine for inference. Only used if df is a spark dataframe or if fit was called on a spark dataframe. level : list of ints or floats, optional (default=None) Confidence levels between 0 and 100. + quantiles : list of floats, optional (default=None) + Alternative to level, target quantiles to predict. data_kwargs : kwargs Extra arguments to be passed to the dataset within each model. @@ -853,6 +866,22 @@ def predict( if not self._fitted: raise Exception("You must fit the model before predicting.") + quantiles_ = None + level_ = None + has_level = False + if level is not None: + has_level = True + if quantiles is not None: + raise ValueError("You can't set both level and quantiles.") + level_ = sorted(list(set(level))) + quantiles_ = level_to_quantiles(level_) + + if quantiles is not None: + if level is not None: + raise ValueError("You can't set both level and quantiles.") + quantiles_ = sorted(list(set(quantiles))) + level_ = quantiles_to_level(quantiles_) + needed_futr_exog = self._get_needed_futr_exog() if needed_futr_exog: if futr_df is None: @@ -905,8 +934,6 @@ def predict( if verbose: print("Using stored dataset.") - cols = self._get_model_names() - # Placeholder dataframe for predictions with unique_id and ds fcsts_df = ufp.make_future_dataframe( uids=uids, @@ -949,27 +976,20 @@ def predict( self._scalers_transform(futr_dataset) dataset = dataset.append(futr_dataset) - col_idx = 0 - fcsts = np.full( - (self.h * len(uids), len(cols)), fill_value=np.nan, dtype=np.float32 + fcsts, cols = self._generate_forecasts( + dataset=dataset, + uids=uids, + quantiles_=quantiles_, + level_=level_, + has_level=has_level, + **data_kwargs, ) - for model in self.models: - old_test_size = model.get_test_size() - model.set_test_size(self.h) # To predict h steps ahead - model_fcsts = model.predict(dataset=dataset, **data_kwargs) - # Append predictions in memory placeholder - output_length = len(model.loss.output_names) - fcsts[:, col_idx : col_idx + output_length] = model_fcsts - col_idx += output_length - model.set_test_size(old_test_size) # Set back to original value + if self.scalers_: indptr = np.append(0, np.full(len(uids), self.h).cumsum()) fcsts = self._scalers_target_inverse_transform(fcsts, indptr) # Declare predictions pd.DataFrame - cols = ( - self._get_model_names() - ) # Needed for IQLoss as column names may have changed during the call to .predict() if isinstance(fcsts_df, pl_DataFrame): fcsts = pl_DataFrame(dict(zip(cols, fcsts.T))) else: @@ -979,29 +999,6 @@ def predict( _warn_id_as_idx() fcsts_df = fcsts_df.set_index(self.id_col) - # add prediction intervals - if level is not None: - if self._cs_df is None or self.prediction_intervals is None: - raise Exception( - "You must fit the model with prediction_intervals to use level." - ) - else: - level_ = sorted(level) - model_names = self._get_model_names(add_level=True) - prediction_interval_method = get_prediction_interval_method( - self.prediction_intervals.method - ) - - fcsts_df = prediction_interval_method( - fcsts_df, - self._cs_df, - model_names=list(model_names), - level=level_, - cs_n_windows=self.prediction_intervals.n_windows, - n_series=len(uids), - horizon=self.h, - ) - return fcsts_df def _reset_models(self): @@ -1050,15 +1047,6 @@ def _no_refit_cross_validation( "Validation and test sets are larger than the shorter time-series." ) - cols = [] - count_names = {"model": 0} - for model in self.models: - model_name = repr(model) - count_names[model_name] = count_names.get(model_name, -1) + 1 - if count_names[model_name] > 0: - model_name += str(count_names[model_name]) - cols += [model_name + n for n in model.loss.output_names] - fcsts_df = ufp.cv_times( times=self.ds, uids=self.uids, @@ -1072,23 +1060,22 @@ def _no_refit_cross_validation( # the cv_times is sorted by window and then id fcsts_df = ufp.sort(fcsts_df, [id_col, "cutoff", time_col]) - col_idx = 0 - fcsts = np.full( - (self.dataset.n_groups * self.h * n_windows, len(cols)), - np.nan, - dtype=np.float32, - ) - + fcsts_list: List = [] for model in self.models: + if self._add_level and ( + model.loss.outputsize_multiplier > 1 or isinstance(model.loss, IQLoss) + ): + continue + model.fit(dataset=self.dataset, val_size=val_size, test_size=test_size) model_fcsts = model.predict( self.dataset, step_size=step_size, **data_kwargs ) # Append predictions in memory placeholder - output_length = len(model.loss.output_names) - fcsts[:, col_idx : (col_idx + output_length)] = model_fcsts - col_idx += output_length + fcsts_list.append(model_fcsts) + + fcsts = np.concatenate(fcsts_list, axis=-1) # we may have allocated more space than needed # each serie can produce at most (serie.size - 1) // self.h CV windows effective_sizes = ufp.counts_by_id(fcsts_df, id_col)["counts"].to_numpy() @@ -1116,6 +1103,7 @@ def _no_refit_cross_validation( self._fitted = True # Add predictions to forecasts DataFrame + cols = self._get_model_names(add_level=self._add_level) if isinstance(self.uids, pl_Series): fcsts = pl_DataFrame(dict(zip(cols, fcsts.T))) else: @@ -1151,6 +1139,7 @@ def cross_validation( target_col: str = "y", prediction_intervals: Optional[PredictionIntervals] = None, level: Optional[List[Union[int, float]]] = None, + quantiles: Optional[List[float]] = None, **data_kwargs, ) -> DataFrame: """Temporal Cross-Validation with core.NeuralForecast. @@ -1192,7 +1181,9 @@ def cross_validation( prediction_intervals : PredictionIntervals, optional (default=None) Configuration to calibrate prediction intervals (Conformal Prediction). level : list of ints or floats, optional (default=None) - Confidence levels between 0 and 100. Use with prediction_intervals. + Confidence levels between 0 and 100. + quantiles : list of floats, optional (default=None) + Alternative to level, target quantiles to predict. data_kwargs : kwargs Extra arguments to be passed to the dataset within each model. @@ -1225,17 +1216,19 @@ def cross_validation( df = df.reset_index(id_col) # Checks for prediction intervals - if prediction_intervals is not None or level is not None: - if level is None: - warnings.warn("Level not provided, using level=[90].") - level = [90] - if prediction_intervals is None: - raise Exception("You must set prediction_intervals to use level.") + if prediction_intervals is not None: + if level is None and quantiles is None: + raise Exception( + "When passing prediction_intervals you need to set the level or quantiles argument." + ) if not refit: raise Exception( - "Passing prediction_intervals and/or level is only supported with refit=True." + "Passing prediction_intervals is only supported with refit=True." ) + if level is not None and quantiles is not None: + raise ValueError("You can't set both level and quantiles argument.") + if not refit: return self._no_refit_cross_validation( @@ -1296,6 +1289,7 @@ def cross_validation( sort_df=sort_df, verbose=verbose, level=level, + quantiles=quantiles, **data_kwargs, ) preds = ufp.join(preds, cutoffs, on=id_col, how="left") @@ -1317,7 +1311,7 @@ def cross_validation( out = out.set_index(id_col) return out - def predict_insample(self, step_size: int = 1): + def predict_insample(self, step_size: int = 1, **data_kwargs): """Predict insample with core.NeuralForecast. `core.NeuralForecast`'s `predict_insample` uses stored fitted `models` @@ -1338,26 +1332,6 @@ def predict_insample(self, step_size: int = 1): "The models must be fitted first with `fit` or `cross_validation`." ) - for model in self.models: - if model.SAMPLING_TYPE == "recurrent": - warnings.warn( - f"Predict insample might not provide accurate predictions for \ - recurrent model {repr(model)} class yet due to scaling." - ) - print( - f"WARNING: Predict insample might not provide accurate predictions for \ - recurrent model {repr(model)} class yet due to scaling." - ) - - cols = [] - count_names = {"model": 0} - for model in self.models: - model_name = repr(model) - count_names[model_name] = count_names.get(model_name, -1) + 1 - if count_names[model_name] > 0: - model_name += str(count_names[model_name]) - cols += [model_name + n for n in model.loss.output_names] - # Remove test set from dataset and last dates test_size = self.models[0].get_test_size() @@ -1396,9 +1370,7 @@ def predict_insample(self, step_size: int = 1): time_col=self.time_col, ) - col_idx = 0 - fcsts = np.full((len(fcsts_df), len(cols)), np.nan, dtype=np.float32) - + fcsts_list: List = [] for model in self.models: # Test size is the number of periods to forecast (full size of trimmed dataset) model.set_test_size(test_size=trimmed_dataset.max_size) @@ -1406,10 +1378,9 @@ def predict_insample(self, step_size: int = 1): # Predict model_fcsts = model.predict(trimmed_dataset, step_size=step_size) # Append predictions in memory placeholder - output_length = len(model.loss.output_names) - fcsts[:, col_idx : (col_idx + output_length)] = model_fcsts - col_idx += output_length + fcsts_list.append(model_fcsts) model.set_test_size(test_size=test_size) # Set original test_size + fcsts = np.concatenate(fcsts_list, axis=-1) # original y original_y = { @@ -1419,6 +1390,7 @@ def predict_insample(self, step_size: int = 1): } # Add predictions to forecasts DataFrame + cols = self._get_model_names() if isinstance(self.uids, pl_Series): fcsts = pl_DataFrame(dict(zip(cols, fcsts.T))) Y_df = pl_DataFrame(original_y) @@ -1698,6 +1670,7 @@ def _conformity_scores( "Please reduce the number of windows, horizon or remove those series." ) + self._add_level = True cv_results = self.cross_validation( df=df, static_df=static_df, @@ -1706,6 +1679,7 @@ def _conformity_scores( time_col=time_col, target_col=target_col, ) + self._add_level = False kept = [time_col, id_col, "cutoff"] # conformity score for each model @@ -1717,3 +1691,126 @@ def _conformity_scores( cv_results = ufp.assign_columns(cv_results, model, abs_err) dropped = list(set(cv_results.columns) - set(kept)) return ufp.drop_columns(cv_results, dropped) + + def _generate_forecasts( + self, + dataset: TimeSeriesDataset, + uids: Series, + quantiles_: Optional[List[float]] = None, + level_: Optional[List[Union[int, float]]] = None, + has_level: Optional[bool] = False, + **data_kwargs, + ) -> np.array: + fcsts_list: List = [] + cols = [] + count_names = {"model": 0} + for model in self.models: + old_test_size = model.get_test_size() + model.set_test_size(self.h) # To predict h steps ahead + + # Increment model name if the same model is used more than once + model_name = repr(model) + count_names[model_name] = count_names.get(model_name, -1) + 1 + if count_names[model_name] > 0: + model_name += str(count_names[model_name]) + + # Predict for every quantile or level if requested and the loss function supports it + # case 1: DistributionLoss and MixtureLosses + if ( + quantiles_ is not None + and not isinstance(model.loss, IQLoss) + and hasattr(model.loss, "update_quantile") + and callable(model.loss.update_quantile) + ): + model_fcsts = model.predict( + dataset=dataset, quantiles=quantiles_, **data_kwargs + ) + fcsts_list.append(model_fcsts) + col_names = [] + for i, quantile in enumerate(quantiles_): + col_name = self._get_column_name(model_name, quantile, has_level) + if i == 0: + col_names.extend([f"{model_name}", col_name]) + else: + col_names.extend([col_name]) + if hasattr(model.loss, "return_params") and model.loss.return_params: + cols.extend( + col_names + + [ + model_name + param_name + for param_name in model.loss.param_names + ] + ) + else: + cols.extend(col_names) + # case 2: IQLoss + elif quantiles_ is not None and isinstance(model.loss, IQLoss): + # IQLoss does not give monotonically increasing quantiles, so we apply a hack: compute all quantiles, and take the quantile over the quantiles + quantiles_iqloss = np.linspace(0.01, 0.99, 20) + fcsts_list_iqloss = [] + for i, quantile in enumerate(quantiles_iqloss): + model_fcsts = model.predict( + dataset=dataset, quantiles=[quantile], **data_kwargs + ) + fcsts_list_iqloss.append(model_fcsts) + fcsts_iqloss = np.concatenate(fcsts_list_iqloss, axis=-1) + + # Get the actual requested quantiles + model_fcsts = np.quantile(fcsts_iqloss, quantiles_, axis=-1).T + fcsts_list.append(model_fcsts) + + # Get the right column names + col_names = [] + for i, quantile in enumerate(quantiles_): + col_name = self._get_column_name(model_name, quantile, has_level) + col_names.extend([col_name]) + cols.extend(col_names) + # case 3: PointLoss via prediction intervals + elif quantiles_ is not None and model.loss.outputsize_multiplier == 1: + if self.prediction_intervals is None: + raise AttributeError( + f"You have trained {model_name} with loss={type(model.loss).__name__}(). \n" + " You then must set `prediction_intervals` during fit to use level or quantiles during predict." + ) + model_fcsts = model.predict( + dataset=dataset, quantiles=quantiles_, **data_kwargs + ) + prediction_interval_method = get_prediction_interval_method( + self.prediction_intervals.method + ) + fcsts_with_intervals, out_cols = prediction_interval_method( + model_fcsts, + self._cs_df, + model=model_name, + level=level_ if has_level else None, + cs_n_windows=self.prediction_intervals.n_windows, + n_series=len(uids), + horizon=self.h, + quantiles=quantiles_ if not has_level else None, + ) + fcsts_list.append(fcsts_with_intervals) + cols.extend([model_name] + out_cols) + # base case: quantiles or levels are not supported or provided as arguments + else: + model_fcsts = model.predict(dataset=dataset, **data_kwargs) + fcsts_list.append(model_fcsts) + cols.extend(model_name + n for n in model.loss.output_names) + model.set_test_size(old_test_size) # Set back to original value + fcsts = np.concatenate(fcsts_list, axis=-1) + + return fcsts, cols + + @staticmethod + def _get_column_name(model_name, quantile, has_level) -> str: + if not has_level: + col_name = f"{model_name}_ql{quantile}" + elif quantile < 0.5: + level_lo = int(round(100 - 200 * quantile)) + col_name = f"{model_name}-lo-{level_lo}" + elif quantile > 0.5: + level_hi = int(round(100 - 200 * (1 - quantile))) + col_name = f"{model_name}-hi-{level_hi}" + else: + col_name = f"{model_name}-median" + + return col_name diff --git a/neuralforecast/losses/pytorch.py b/neuralforecast/losses/pytorch.py index a713b5b31..6e6e98e8c 100644 --- a/neuralforecast/losses/pytorch.py +++ b/neuralforecast/losses/pytorch.py @@ -6,9 +6,8 @@ 'Accuracy', 'sCRPS'] # %% ../../nbs/losses.pytorch.ipynb 4 -from typing import Optional, Union, Tuple +from typing import Optional, Union, Tuple, List -import math import numpy as np import torch @@ -22,6 +21,9 @@ Poisson, NegativeBinomial, Beta, + Gamma, + MixtureSameFamily, + Categorical, AffineTransform, TransformedDistribution, ) @@ -55,7 +57,9 @@ class BasePointLoss(torch.nn.Module): `output_names`: Names of the outputs.
""" - def __init__(self, horizon_weight, outputsize_multiplier, output_names): + def __init__( + self, horizon_weight=None, outputsize_multiplier=None, output_names=None + ): super(BasePointLoss, self).__init__() if horizon_weight is not None: horizon_weight = torch.Tensor(horizon_weight.flatten()) @@ -66,10 +70,13 @@ def __init__(self, horizon_weight, outputsize_multiplier, output_names): def domain_map(self, y_hat: torch.Tensor): """ - Univariate loss operates in dimension [B,T,H]/[B,H] - This changes the network's output from [B,H,1]->[B,H] + Input: + Univariate: [B, H, 1] + Multivariate: [B, H, N] + + Output: [B, H, N] """ - return y_hat.squeeze(-1) + return y_hat def _compute_weights(self, y, mask): """ @@ -78,17 +85,18 @@ def _compute_weights(self, y, mask): If set, check that it has the same length as the horizon in x. """ if mask is None: - mask = torch.ones_like(y, device=y.device) + mask = torch.ones_like(y) if self.horizon_weight is None: - self.horizon_weight = torch.ones(mask.shape[-1]) + weights = torch.ones_like(mask) else: - assert mask.shape[-1] == len( + assert mask.shape[1] == len( self.horizon_weight ), "horizon_weight must have same length as Y" + weights = self.horizon_weight.clone() + weights = weights[None, :, None].to(mask.device) + weights = torch.ones_like(mask, device=mask.device) * weights - weights = self.horizon_weight.clone() - weights = torch.ones_like(mask, device=mask.device) * weights.to(mask.device) return weights * mask # %% ../../nbs/losses.pytorch.ipynb 11 @@ -118,7 +126,8 @@ def __call__( y: torch.Tensor, y_hat: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + y_insample: Union[torch.Tensor, None] = None, + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor, Actual values.
@@ -158,8 +167,9 @@ def __call__( self, y: torch.Tensor, y_hat: torch.Tensor, + y_insample: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor, Actual values.
@@ -203,7 +213,8 @@ def __call__( y: torch.Tensor, y_hat: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + y_insample: Union[torch.Tensor, None] = None, + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor, Actual values.
@@ -248,8 +259,9 @@ def __call__( self, y: torch.Tensor, y_hat: torch.Tensor, + y_insample: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor, Actual values.
@@ -298,7 +310,8 @@ def __call__( y: torch.Tensor, y_hat: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + y_insample: Union[torch.Tensor, None] = None, + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor, Actual values.
@@ -348,12 +361,12 @@ def __call__( y_hat: torch.Tensor, y_insample: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor (batch_size, output_size), Actual values.
`y_hat`: tensor (batch_size, output_size)), Predicted values.
- `y_insample`: tensor (batch_size, input_size), Actual insample Seasonal Naive predictions.
+ `y_insample`: tensor (batch_size, input_size), Actual insample values.
`mask`: tensor, Specifies date stamps per serie to consider in loss.
**Returns:**
@@ -366,7 +379,7 @@ def __call__( ), axis=1, ) - losses = _divide_no_nan(delta_y, scale[:, None]) + losses = _divide_no_nan(delta_y, scale[:, None, None]) weights = self._compute_weights(y=y, mask=mask) return _weighted_mean(losses=losses, weights=weights) @@ -375,11 +388,11 @@ class relMSE(BasePointLoss): """Relative Mean Squared Error Computes Relative Mean Squared Error (relMSE), as proposed by Hyndman & Koehler (2006) as an alternative to percentage errors, to avoid measure unstability. - $$ \mathrm{relMSE}(\\mathbf{y}, \\mathbf{\hat{y}}, \\mathbf{\hat{y}}^{naive1}) = - \\frac{\mathrm{MSE}(\\mathbf{y}, \\mathbf{\hat{y}})}{\mathrm{MSE}(\\mathbf{y}, \\mathbf{\hat{y}}^{naive1})} $$ + $$ \mathrm{relMSE}(\\mathbf{y}, \\mathbf{\hat{y}}, \\mathbf{\hat{y}}^{benchmark}) = + \\frac{\mathrm{MSE}(\\mathbf{y}, \\mathbf{\hat{y}})}{\mathrm{MSE}(\\mathbf{y}, \\mathbf{\hat{y}}^{benchmark})} $$ **Parameters:**
- `y_train`: numpy array, Training values.
+ `y_train`: numpy array, deprecated.
`horizon_weight`: Tensor of size h, weight for each timestamp of the forecasting window.
**References:**
@@ -391,34 +404,32 @@ class relMSE(BasePointLoss): Submitted to the International Journal Forecasting, Working paper available at arxiv.](https://arxiv.org/pdf/2110.13179.pdf) """ - def __init__(self, y_train, horizon_weight=None): + def __init__(self, y_train=None, horizon_weight=None): super(relMSE, self).__init__( horizon_weight=horizon_weight, outputsize_multiplier=1, output_names=[""] ) - self.y_train = y_train + if y_train is not None: + raise DeprecationWarning("y_train will be deprecated in a future release.") self.mse = MSE(horizon_weight=horizon_weight) def __call__( self, y: torch.Tensor, y_hat: torch.Tensor, + y_benchmark: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor (batch_size, output_size), Actual values.
`y_hat`: tensor (batch_size, output_size)), Predicted values.
- `y_insample`: tensor (batch_size, input_size), Actual insample Seasonal Naive predictions.
+ `y_benchmark`: tensor (batch_size, output_size), Benchmark predicted values.
`mask`: tensor, Specifies date stamps per serie to consider in loss.
**Returns:**
`relMSE`: tensor (single value). """ - horizon = y.shape[-1] - last_col = self.y_train[:, -1].unsqueeze(1) - y_naive = last_col.repeat(1, horizon) - - norm = self.mse(y=y, y_hat=y_naive, mask=mask) # Already weighted + norm = self.mse(y=y, y_hat=y_benchmark, mask=mask) # Already weighted norm = norm + 1e-5 # Numerical stability loss = self.mse(y=y, y_hat=y_hat, mask=mask) # Already weighted loss = _divide_no_nan(loss, norm) @@ -456,8 +467,9 @@ def __call__( self, y: torch.Tensor, y_hat: torch.Tensor, + y_insample: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor, Actual values.
@@ -549,38 +561,48 @@ def __init__(self, level=[80, 90], quantiles=None, horizon_weight=None): def domain_map(self, y_hat: torch.Tensor): """ - Identity domain map [B,T,H,Q]/[B,H,Q] + Input: + Univariate: [B, H, 1 * Q] + Multivariate: [B, H, N * Q] + + Output: [B, H, N, Q] """ - return y_hat + output = y_hat.reshape( + y_hat.shape[0], y_hat.shape[1], -1, self.outputsize_multiplier + ) + + return output def _compute_weights(self, y, mask): """ Compute final weights for each datapoint (based on all weights and all masks) Set horizon_weight to a ones[H] tensor if not set. If set, check that it has the same length as the horizon in x. + + y: [B, h, N, 1] + mask: [B, h, N, 1] """ - if mask is None: - mask = torch.ones_like(y, device=y.device) - else: - mask = mask.unsqueeze(1) # Add Q dimension. if self.horizon_weight is None: - self.horizon_weight = torch.ones(mask.shape[-1]) + weights = torch.ones_like(mask) else: - assert mask.shape[-1] == len( + assert mask.shape[1] == len( self.horizon_weight ), "horizon_weight must have same length as Y" + weights = self.horizon_weight.clone() + weights = weights[None, :, None, None] + weights = weights.to(mask.device) + weights = torch.ones_like(mask, device=mask.device) * weights - weights = self.horizon_weight.clone() - weights = torch.ones_like(mask, device=mask.device) * weights.to(mask.device) return weights * mask def __call__( self, y: torch.Tensor, y_hat: torch.Tensor, + y_insample: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor, Actual values.
@@ -590,26 +612,24 @@ def __call__( **Returns:**
`mqloss`: tensor (single value). """ + # [B, h, N] -> [B, h, N, 1] + if y_hat.ndim == 3: + y_hat = y_hat.unsqueeze(-1) + + y = y.unsqueeze(-1) + if mask is not None: + mask = mask.unsqueeze(-1) + else: + mask = torch.ones_like(y, device=y.device) + + error = y_hat - y - error = y_hat - y.unsqueeze(-1) sq = torch.maximum(-error, torch.zeros_like(error)) s1_q = torch.maximum(error, torch.zeros_like(error)) - losses = (1 / len(self.quantiles)) * ( - self.quantiles * sq + (1 - self.quantiles) * s1_q - ) - - if y_hat.ndim == 3: # BaseWindows - losses = losses.swapaxes( - -2, -1 - ) # [B,H,Q] -> [B,Q,H] (needed for horizon weighting, H at the end) - elif y_hat.ndim == 4: # BaseRecurrent - losses = losses.swapaxes(-2, -1) - losses = losses.swapaxes( - -2, -3 - ) # [B,seq_len,H,Q] -> [B,Q,seq_len,H] (needed for horizon weighting, H at the end) + quantiles = self.quantiles[None, None, None, :] + losses = (1 / len(quantiles)) * (quantiles * sq + (1 - quantiles) * s1_q) weights = self._compute_weights(y=losses, mask=mask) # Use losses for extra dim - # NOTE: Weights do not have Q dimension. return _weighted_mean(losses=losses, weights=weights) @@ -700,9 +720,9 @@ def _init_sampling_distribution(self, device): concentration0=concentration0, concentration1=concentration1 ) - def update_quantile(self, q: float = 0.5): - self.q = q - self.output_names = [f"_ql{q}"] + def update_quantile(self, q: List[float] = [0.5]): + self.q = q[0] + self.output_names = [f"_ql{q[0]}"] self.has_predicted = True def domain_map(self, y_hat): @@ -711,9 +731,8 @@ def domain_map(self, y_hat): Input shapes to this function: - base_windows: y_hat = [B, h, 1] - base_multivariate: y_hat = [B, h, n_series] - base_recurrent: y_hat = [B, seq_len, h, n_series] + Univariate: y_hat = [B, h, 1] + Multivariate: y_hat = [B, h, N] """ if self.eval() and self.has_predicted: quantiles = torch.full( @@ -734,7 +753,7 @@ def domain_map(self, y_hat): emb_outputs = self.output_layer(emb_inputs) # Domain map - y_hat = emb_outputs.squeeze(-1).squeeze(-1) + y_hat = emb_outputs.squeeze(-1) return y_hat @@ -767,20 +786,6 @@ def weighted_average( return x.mean(dim=dim) # %% ../../nbs/losses.pytorch.ipynb 65 -def bernoulli_domain_map(input: torch.Tensor): - """Bernoulli Domain Map - Maps input into distribution constraints, by construction input's - last dimension is of matching `distr_args` length. - - **Parameters:**
- `input`: tensor, of dimensions [B,T,H,theta] or [B,H,theta].
- - **Returns:**
- `(probs,)`: tuple with tensors of Poisson distribution arguments.
- """ - return (input.squeeze(-1),) - - def bernoulli_scale_decouple(output, loc=None, scale=None): """Bernoulli Scale Decouple @@ -795,22 +800,6 @@ def bernoulli_scale_decouple(output, loc=None, scale=None): return (probs,) -def student_domain_map(input: torch.Tensor): - """Student T Domain Map - Maps input into distribution constraints, by construction input's - last dimension is of matching `distr_args` length. - - **Parameters:**
- `input`: tensor, of dimensions [B,T,H,theta] or [B,H,theta].
- `eps`: float, helps the initialization of scale for easier optimization.
- - **Returns:**
- `(df, loc, scale)`: tuple with tensors of StudentT distribution arguments.
- """ - df, loc, scale = torch.tensor_split(input, 3, dim=-1) - return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1) - - def student_scale_decouple(output, loc=None, scale=None, eps: float = 0.1): """Normal Scale Decouple @@ -827,22 +816,6 @@ def student_scale_decouple(output, loc=None, scale=None, eps: float = 0.1): return (df, mean, tscale) -def normal_domain_map(input: torch.Tensor): - """Normal Domain Map - Maps input into distribution constraints, by construction input's - last dimension is of matching `distr_args` length. - - **Parameters:**
- `input`: tensor, of dimensions [B,T,H,theta] or [B,H,theta].
- `eps`: float, helps the initialization of scale for easier optimization.
- - **Returns:**
- `(mean, std)`: tuple with tensors of Normal distribution arguments.
- """ - mean, std = torch.tensor_split(input, 2, dim=-1) - return mean.squeeze(-1), std.squeeze(-1) - - def normal_scale_decouple(output, loc=None, scale=None, eps: float = 0.2): """Normal Scale Decouple @@ -858,20 +831,6 @@ def normal_scale_decouple(output, loc=None, scale=None, eps: float = 0.2): return (mean, std) -def poisson_domain_map(input: torch.Tensor): - """Poisson Domain Map - Maps input into distribution constraints, by construction input's - last dimension is of matching `distr_args` length. - - **Parameters:**
- `input`: tensor, of dimensions [B,T,H,theta] or [B,H,theta].
- - **Returns:**
- `(rate,)`: tuple with tensors of Poisson distribution arguments.
- """ - return (input.squeeze(-1),) - - def poisson_scale_decouple(output, loc=None, scale=None): """Poisson Scale Decouple @@ -887,21 +846,6 @@ def poisson_scale_decouple(output, loc=None, scale=None): return (rate,) -def nbinomial_domain_map(input: torch.Tensor): - """Negative Binomial Domain Map - Maps input into distribution constraints, by construction input's - last dimension is of matching `distr_args` length. - - **Parameters:**
- `input`: tensor, of dimensions [B,T,H,theta] or [B,H,theta].
- - **Returns:**
- `(total_count, alpha)`: tuple with tensors of N.Binomial distribution arguments.
- """ - mu, alpha = torch.tensor_split(input, 2, dim=-1) - return mu.squeeze(-1), alpha.squeeze(-1) - - def nbinomial_scale_decouple(output, loc=None, scale=None): """Negative Binomial Scale Decouple @@ -964,10 +908,12 @@ class Tweedie(Distribution): Series B (Methodological), 49(2), 127–162. http://www.jstor.org/stable/2345415](http://www.jstor.org/stable/2345415)
""" + arg_constraints = {"log_mu": constraints.real} + support = constraints.nonnegative + def __init__(self, log_mu, rho, validate_args=None): # TODO: add sigma2 dispersion # TODO add constraints - # arg_constraints = {'log_mu': constraints.real, 'rho': constraints.positive} # support = constraints.real self.log_mu = log_mu self.rho = rho @@ -1001,7 +947,7 @@ def sample(self, sample_shape=torch.Size()): beta = beta.expand(shape) N = torch.poisson(rate) + 1e-5 - gamma = torch.distributions.gamma.Gamma(N * alpha, beta) + gamma = Gamma(N * alpha, beta) samples = gamma.sample() samples[N == 0] = 0 @@ -1017,12 +963,12 @@ def log_prob(self, y_true): return a - b -def tweedie_domain_map(input: torch.Tensor): +def tweedie_domain_map(input: torch.Tensor, rho: float = 1.5): """ Maps output of neural network to domain of distribution loss """ - return (input.squeeze(-1),) + return (input, rho) def tweedie_scale_decouple(output, loc=None, scale=None): @@ -1032,14 +978,14 @@ def tweedie_scale_decouple(output, loc=None, scale=None): count and logits based on anchoring `loc`, `scale`. Also adds Tweedie domain protection to the distribution parameters. """ - log_mu = output[0] + log_mu, rho = output log_mu = F.softplus(log_mu) log_mu = torch.clamp(log_mu, 1e-9, 37) if (loc is not None) and (scale is not None): log_mu += torch.log(loc) log_mu = torch.clamp(log_mu, 1e-9, 37) - return (log_mu,) + return (log_mu, rho) # %% ../../nbs/losses.pytorch.ipynb 67 # Code adapted from: https://github.com/awslabs/gluonts/blob/61133ef6e2d88177b32ace4afc6843ab9a7bc8cd/src/gluonts/torch/distributions/isqf.py @@ -1097,6 +1043,14 @@ def crps(self, y: torch.Tensor) -> torch.Tensor: p = self.base_dist.crps(z) return p * scale + @property + def mean(self): + """ + Function used to compute the empirical mean + """ + samples = self.sample([1000]) + return samples.mean(dim=0) + class BaseISQF(Distribution): """ @@ -1753,7 +1707,7 @@ def isqf_domain_map( last dimension is of matching `distr_args` length. **Parameters:**
- `input`: tensor, of dimensions [B,T,H,theta] or [B,H,theta].
+ `input`: tensor, of dimensions [B, H, N * n_outputs].
`tol`: float, tolerance.
`quantiles`: tensor, quantiles used for ISQF (i.e. x-positions for the knots).
`num_pieces`: int, num_pieces used for each quantile spline.
@@ -1768,6 +1722,10 @@ def isqf_domain_map( # Because in this case the spline knots could be squeezed together # and cause overflow in spline CRPS computation num_qk = len(quantiles) + n_outputs = 2 * (num_qk - 1) * num_pieces + 2 + num_qk + + # Reshape: [B, h, N * n_outputs] -> [B, h, N, n_outputs] + input = input.reshape(input.shape[0], input.shape[1], -1, n_outputs) start_index = 0 spline_knots = input[..., start_index : start_index + (num_qk - 1) * num_pieces] start_index += (num_qk - 1) * num_pieces @@ -1777,26 +1735,19 @@ def isqf_domain_map( start_index += 1 beta_r = input[..., start_index : start_index + 1] start_index += 1 - quantile_knots = input[..., start_index : start_index + num_qk] - - qk_y = torch.cat( - [ - quantile_knots[..., 0:1], - torch.abs(quantile_knots[..., 1:]) + tol, - ], - dim=-1, - ) - qk_y = torch.cumsum(qk_y, dim=-1) + quantile_knots = F.softplus(input[..., start_index : start_index + num_qk]) + tol + + qk_y = torch.cumsum(quantile_knots, dim=-1) # Prevent overflow when we compute 1/beta - beta_l = torch.abs(beta_l.squeeze(-1)) + tol - beta_r = torch.abs(beta_r.squeeze(-1)) + tol + beta_l = F.softplus(beta_l.squeeze(-1)) + tol + beta_r = F.softplus(beta_r.squeeze(-1)) + tol # Reshape spline arguments batch_shape = spline_knots.shape[:-1] # repeat qk_x from (num_qk,) to (*batch_shape, num_qk) - qk_x_repeat = torch.sort(quantiles).values.repeat(*batch_shape, 1).to(input.device) + qk_x_repeat = quantiles.repeat(*batch_shape, 1).to(input.device) # knots and heights have shape (*batch_shape, (num_qk-1)*num_pieces) # reshape them to (*batch_shape, (num_qk-1), num_pieces) @@ -1902,15 +1853,6 @@ def __init__( Tweedie=Tweedie, ISQF=ISQF, ) - domain_maps = dict( - Bernoulli=bernoulli_domain_map, - Normal=normal_domain_map, - Poisson=poisson_domain_map, - StudentT=student_domain_map, - NegativeBinomial=nbinomial_domain_map, - Tweedie=tweedie_domain_map, - ISQF=partial(isqf_domain_map, quantiles=qs, num_pieces=num_pieces), - ) scale_decouples = dict( Bernoulli=bernoulli_scale_decouple, Normal=normal_scale_decouple, @@ -1935,9 +1877,23 @@ def __init__( assert ( distribution in available_distributions.keys() ), f"{distribution} not available" + if distribution == "ISQF": + quantiles = torch.sort(qs).values + self.domain_map = partial( + isqf_domain_map, quantiles=quantiles, num_pieces=num_pieces + ) + if return_params: + raise Exception("ISQF does not support 'return_params=True'") + elif distribution == "Tweedie": + rho = distribution_kwargs.pop("rho") + self.domain_map = partial(tweedie_domain_map, rho=rho) + if return_params: + raise Exception("Tweedie does not support 'return_params=True'") + else: + self.domain_map = self._domain_map + self.distribution = distribution self._base_distribution = available_distributions[distribution] - self.domain_map = domain_maps[distribution] self.scale_decouple = scale_decouples[distribution] self.distribution_kwargs = distribution_kwargs self.num_samples = num_samples @@ -1953,6 +1909,16 @@ def __init__( self.outputsize_multiplier = len(self.param_names) self.is_distribution_output = True + self.has_predicted = False + + def _domain_map(self, input: torch.Tensor): + """ + Maps output of neural network to domain of distribution loss + + """ + output = torch.tensor_split(input, self.outputsize_multiplier, dim=2) + + return output def get_distribution(self, distr_args, **distribution_kwargs) -> Distribution: """ @@ -1965,10 +1931,10 @@ def get_distribution(self, distr_args, **distribution_kwargs) -> Distribution: **Returns**
`Distribution`: AffineTransformed distribution.
""" - # TransformedDistribution(distr, [AffineTransform(loc=loc, scale=scale)]) distr = self._base_distribution(*distr_args, **distribution_kwargs) + self.distr_mean = distr.mean - if self.distribution == "Poisson": + if self.distribution in ("Poisson", "NegativeBinomial"): distr.support = constraints.nonnegative return distr @@ -1979,7 +1945,7 @@ def sample(self, distr_args: torch.Tensor, num_samples: Optional[int] = None): **Parameters**
`distr_args`: Constructor arguments for the underlying Distribution type.
- `num_samples`: int=500, overwrite number of samples for the empirical quantiles.
+ `num_samples`: int, overwrite number of samples for the empirical quantiles.
**Returns**
`samples`: tensor, shape [B,H,`num_samples`].
@@ -1988,29 +1954,39 @@ def sample(self, distr_args: torch.Tensor, num_samples: Optional[int] = None): if num_samples is None: num_samples = self.num_samples - # print(distr_args[0].size()) - B, H = distr_args[0].shape[:2] - Q = len(self.quantiles) - # Instantiate Scaled Decoupled Distribution distr = self.get_distribution(distr_args=distr_args, **self.distribution_kwargs) samples = distr.sample(sample_shape=(num_samples,)) - samples = samples.permute(1, 2, 0) # [samples,B,H] -> [B,H,samples] - samples = samples.view(B * H, num_samples) - sample_mean = torch.mean(samples, dim=-1) + samples = samples.permute( + 1, 2, 3, 0 + ) # [samples, B, H, N] -> [B, H, N, samples] + + sample_mean = torch.mean(samples, dim=-1, keepdim=True) # Compute quantiles quantiles_device = self.quantiles.to(distr_args[0].device) - quants = torch.quantile(input=samples, q=quantiles_device, dim=1) - quants = quants.permute((1, 0)) # [Q, B*H] -> [B*H, Q] - - # Final reshapes - samples = samples.view(B, H, num_samples) - sample_mean = sample_mean.view(B, H, 1) - quants = quants.view(B, H, Q) + quants = torch.quantile(input=samples, q=quantiles_device, dim=-1) + quants = quants.permute(1, 2, 3, 0) # [Q, B, H, N] -> [B, H, N, Q] return samples, sample_mean, quants + def update_quantile(self, q: Optional[List[float]] = None): + if q is not None: + self.quantiles = nn.Parameter( + torch.tensor(q, dtype=torch.float32), requires_grad=False + ) + self.output_names = ( + [""] + + [f"_ql{q_i}" for q_i in q] + + self.return_params * self.param_names + ) + self.has_predicted = True + elif q is None and self.has_predicted: + self.quantiles = nn.Parameter( + torch.tensor([0.5], dtype=torch.float32), requires_grad=False + ) + self.output_names = ["", "-median"] + self.return_params * self.param_names + def __call__( self, y: torch.Tensor, @@ -2029,10 +2005,6 @@ def __call__( **Parameters**
`y`: tensor, Actual values.
`distr_args`: Constructor arguments for the underlying Distribution type.
- `loc`: Optional tensor, of the same shape as the batch_shape + event_shape - of the resulting distribution.
- `scale`: Optional tensor, of the same shape as the batch_shape+event_shape - of the resulting distribution.
`mask`: tensor, Specifies date stamps per serie to consider in loss.
**Returns**
@@ -2079,6 +2051,7 @@ def __init__( return_params=False, batch_correlation=False, horizon_correlation=False, + weighted=False, ): super(PMM, self).__init__() # Transform level to MQLoss parameters @@ -2093,21 +2066,36 @@ def __init__( self.num_samples = num_samples self.batch_correlation = batch_correlation self.horizon_correlation = horizon_correlation + self.weighted = weighted # If True, predict_step will return Distribution's parameters self.return_params = return_params + + lambda_names = [f"-lambda-{i}" for i in range(1, n_components + 1)] + if weighted: + weight_names = [f"-weight-{i}" for i in range(1, n_components + 1)] + self.param_names = [i for j in zip(lambda_names, weight_names) for i in j] + else: + self.param_names = lambda_names + if self.return_params: - self.param_names = [f"-lambda-{i}" for i in range(1, n_components + 1)] self.output_names = self.output_names + self.param_names # Add first output entry for the sample_mean self.output_names.insert(0, "") - self.outputsize_multiplier = n_components + self.n_outputs = 1 + weighted + self.n_components = n_components + self.outputsize_multiplier = self.n_outputs * n_components self.is_distribution_output = True + self.has_predicted = False def domain_map(self, output: torch.Tensor): - return (output,) # , weights + output = output.reshape( + output.shape[0], output.shape[1], -1, self.outputsize_multiplier + ) + + return torch.tensor_split(output, self.n_outputs, dim=-1) def scale_decouple( self, @@ -2121,26 +2109,61 @@ def scale_decouple( variance and residual location based on anchoring `loc`, `scale`. Also adds domain protection to the distribution parameters. """ - lambdas = output[0] + if self.weighted: + lambdas, weights = output + weights = F.softmax(weights, dim=-1) + else: + lambdas = output[0] + if (loc is not None) and (scale is not None): - loc = loc.view(lambdas.size(dim=0), 1, -1) - scale = scale.view(lambdas.size(dim=0), 1, -1) + if loc.ndim == 3: + loc = loc.unsqueeze(-1) + scale = scale.unsqueeze(-1) lambdas = (lambdas * scale) + loc - lambdas = F.softplus(lambdas) - return (lambdas,) - def sample(self, distr_args, num_samples=None): + lambdas = F.softplus(lambdas) + 1e-3 + + if self.weighted: + return (lambdas, weights) + else: + return (lambdas,) + + def get_distribution(self, distr_args) -> Distribution: + """ + Construct the associated Pytorch Distribution, given the collection of + constructor arguments and, optionally, location and scale tensors. + + **Parameters**
+ `distr_args`: Constructor arguments for the underlying Distribution type.
+ + **Returns**
+ `Distribution`: AffineTransformed distribution.
+ """ + if self.weighted: + lambdas, weights = distr_args + else: + lambdas = distr_args[0] + weights = torch.full_like(lambdas, fill_value=1 / self.n_components) + + mix = Categorical(weights) + components = Poisson(rate=lambdas) + components.support = constraints.nonnegative + distr = MixtureSameFamily( + mixture_distribution=mix, component_distribution=components + ) + + self.distr_mean = distr.mean + + return distr + + def sample(self, distr_args: torch.Tensor, num_samples: Optional[int] = None): """ Construct the empirical quantiles from the estimated Distribution, sampling from it `num_samples` independently. **Parameters**
`distr_args`: Constructor arguments for the underlying Distribution type.
- `loc`: Optional tensor, of the same shape as the batch_shape + event_shape - of the resulting distribution.
- `scale`: Optional tensor, of the same shape as the batch_shape+event_shape - of the resulting distribution.
- `num_samples`: int=500, overwrites number of samples for the empirical quantiles.
+ `num_samples`: int, overwrite number of samples for the empirical quantiles.
**Returns**
`samples`: tensor, shape [B,H,`num_samples`].
@@ -2149,100 +2172,75 @@ def sample(self, distr_args, num_samples=None): if num_samples is None: num_samples = self.num_samples - lambdas = distr_args[0] - B, H, K = lambdas.size() - Q = len(self.quantiles) - - # Sample K ~ Mult(weights) - # shared across B, H - # weights = torch.repeat_interleave(input=weights, repeats=H, dim=2) - weights = (1 / K) * torch.ones_like(lambdas, device=lambdas.device) - - # Avoid loop, vectorize - weights = weights.reshape(-1, K) - lambdas = lambdas.flatten() - - # Vectorization trick to recover row_idx - sample_idxs = torch.multinomial( - input=weights, num_samples=num_samples, replacement=True - ) - aux_col_idx = ( - torch.unsqueeze(torch.arange(B * H, device=lambdas.device), -1) * K - ) - - # To device - sample_idxs = sample_idxs.to(lambdas.device) - - sample_idxs = sample_idxs + aux_col_idx - sample_idxs = sample_idxs.flatten() - - sample_lambdas = lambdas[sample_idxs] + # Instantiate Scaled Decoupled Distribution + distr = self.get_distribution(distr_args=distr_args) + samples = distr.sample(sample_shape=(num_samples,)) + samples = samples.permute( + 1, 2, 3, 0 + ) # [samples, B, H, N] -> [B, H, N, samples] - # Sample y ~ Poisson(lambda) independently - samples = torch.poisson(sample_lambdas).to(lambdas.device) - samples = samples.view(B * H, num_samples) - sample_mean = torch.mean(samples, dim=-1) + sample_mean = torch.mean(samples, dim=-1, keepdim=True) # Compute quantiles - quantiles_device = self.quantiles.to(lambdas.device) - quants = torch.quantile(input=samples, q=quantiles_device, dim=1) - quants = quants.permute((1, 0)) # Q, B*H - - # Final reshapes - samples = samples.view(B, H, num_samples) - sample_mean = sample_mean.view(B, H, 1) - quants = quants.view(B, H, Q) + quantiles_device = self.quantiles.to(distr_args[0].device) + quants = torch.quantile(input=samples, q=quantiles_device, dim=-1) + quants = quants.permute(1, 2, 3, 0) # [Q, B, H, N] -> [B, H, N, Q] return samples, sample_mean, quants - def neglog_likelihood( + def update_quantile(self, q: Optional[List[float]] = None): + if q is not None: + self.quantiles = nn.Parameter( + torch.tensor(q, dtype=torch.float32), requires_grad=False + ) + self.output_names = ( + [""] + + [f"_ql{q_i}" for q_i in q] + + self.return_params * self.param_names + ) + self.has_predicted = True + elif q is None and self.has_predicted: + self.quantiles = nn.Parameter( + torch.tensor([0.5], dtype=torch.float32), requires_grad=False + ) + self.output_names = ["", "-median"] + self.return_params * self.param_names + + def __call__( self, y: torch.Tensor, - distr_args: Tuple[torch.Tensor], + distr_args: torch.Tensor, mask: Union[torch.Tensor, None] = None, ): - if mask is None: - mask = (y > 0) * 1 - else: - mask = mask * ((y > 0) * 1) - - eps = 1e-10 - lambdas = distr_args[0] - B, H, K = lambdas.size() - - weights = (1 / K) * torch.ones_like(lambdas, device=lambdas.device) + """ + Computes the negative log-likelihood objective function. + To estimate the following predictive distribution: - y = y[:, :, None] - mask = mask[:, :, None] + $$\mathrm{P}(\mathbf{y}_{\\tau}\,|\,\\theta) \\quad \mathrm{and} \\quad -\log(\mathrm{P}(\mathbf{y}_{\\tau}\,|\,\\theta))$$ - y = y * mask # Protect y negative entries + where $\\theta$ represents the distributions parameters. It aditionally + summarizes the objective signal using a weighted average using the `mask` tensor. - # Single Poisson likelihood - log_pi = y.xlogy(lambdas + eps) - lambdas - (y + 1).lgamma() + **Parameters**
+ `y`: tensor, Actual values.
+ `distr_args`: Constructor arguments for the underlying Distribution type.
+ `mask`: tensor, Specifies date stamps per serie to consider in loss.
+ **Returns**
+ `loss`: scalar, weighted loss function against which backpropagation will be performed.
+ """ + # Instantiate Scaled Decoupled Distribution + distr = self.get_distribution(distr_args=distr_args) + x = distr._pad(y) + log_prob_x = distr.component_distribution.log_prob(x) + log_mix_prob = torch.log_softmax(distr.mixture_distribution.logits, dim=-1) if self.batch_correlation: - log_pi = torch.sum(log_pi, dim=0, keepdim=True) - + log_prob_x = torch.sum(log_prob_x, dim=0, keepdim=True) if self.horizon_correlation: - log_pi = torch.sum(log_pi, dim=1, keepdim=True) - - # Numerically Stable Mixture loglikelihood - loglik = torch.logsumexp((torch.log(weights) + log_pi), dim=2, keepdim=True) - loglik = loglik * mask + log_prob_x = torch.sum(log_prob_x, dim=1, keepdim=True) - mean = torch.sum(weights * lambdas, axis=-1, keepdims=True) - reglrz = torch.mean(torch.square(y - mean) * mask) - loss = -torch.mean(loglik) + 0.001 * reglrz - return loss - - def __call__( - self, - y: torch.Tensor, - distr_args: Tuple[torch.Tensor], - mask: Union[torch.Tensor, None] = None, - ): + loss_values = -torch.logsumexp(log_prob_x + log_mix_prob, dim=-1) - return self.neglog_likelihood(y=y, distr_args=distr_args, mask=mask) + return weighted_average(loss_values, weights=mask) # %% ../../nbs/losses.pytorch.ipynb 82 class GMM(torch.nn.Module): @@ -2280,6 +2278,7 @@ def __init__( return_params=False, batch_correlation=False, horizon_correlation=False, + weighted=False, ): super(GMM, self).__init__() # Transform level to MQLoss parameters @@ -2294,24 +2293,39 @@ def __init__( self.num_samples = num_samples self.batch_correlation = batch_correlation self.horizon_correlation = horizon_correlation + self.weighted = weighted # If True, predict_step will return Distribution's parameters self.return_params = return_params + + mu_names = [f"-mu-{i}" for i in range(1, n_components + 1)] + std_names = [f"-std-{i}" for i in range(1, n_components + 1)] + if weighted: + weight_names = [f"-weight-{i}" for i in range(1, n_components + 1)] + self.param_names = [ + i for j in zip(mu_names, std_names, weight_names) for i in j + ] + else: + self.param_names = [i for j in zip(mu_names, std_names) for i in j] + if self.return_params: - mu_names = [f"-mu-{i}" for i in range(1, n_components + 1)] - std_names = [f"-std-{i}" for i in range(1, n_components + 1)] - mu_std_names = [i for j in zip(mu_names, std_names) for i in j] - self.output_names = self.output_names + mu_std_names + self.output_names = self.output_names + self.param_names # Add first output entry for the sample_mean self.output_names.insert(0, "") - self.outputsize_multiplier = 2 * n_components + self.n_outputs = 2 + weighted + self.n_components = n_components + self.outputsize_multiplier = self.n_outputs * n_components self.is_distribution_output = True + self.has_predicted = False def domain_map(self, output: torch.Tensor): - means, stds = torch.tensor_split(output, 2, dim=-1) - return (means, stds) + output = output.reshape( + output.shape[0], output.shape[1], -1, self.outputsize_multiplier + ) + + return torch.tensor_split(output, self.n_outputs, dim=-1) def scale_decouple( self, @@ -2326,130 +2340,136 @@ def scale_decouple( variance and residual location based on anchoring `loc`, `scale`. Also adds domain protection to the distribution parameters. """ - means, stds = output + if self.weighted: + means, stds, weights = output + weights = F.softmax(weights, dim=-1) + else: + means, stds = output + stds = F.softplus(stds) if (loc is not None) and (scale is not None): - loc = loc.view(means.size(dim=0), 1, -1) - scale = scale.view(means.size(dim=0), 1, -1) + if loc.ndim == 3: + loc = loc.unsqueeze(-1) + scale = scale.unsqueeze(-1) means = (means * scale) + loc stds = (stds + eps) * scale - return (means, stds) - def sample(self, distr_args, num_samples=None): + if self.weighted: + return (means, stds, weights) + else: + return (means, stds) + + def get_distribution(self, distr_args) -> Distribution: """ - Construct the empirical quantiles from the estimated Distribution, - sampling from it `num_samples` independently. + Construct the associated Pytorch Distribution, given the collection of + constructor arguments and, optionally, location and scale tensors. **Parameters**
`distr_args`: Constructor arguments for the underlying Distribution type.
- `loc`: Optional tensor, of the same shape as the batch_shape + event_shape - of the resulting distribution.
- `scale`: Optional tensor, of the same shape as the batch_shape+event_shape - of the resulting distribution.
- `num_samples`: int=500, number of samples for the empirical quantiles.
**Returns**
- `samples`: tensor, shape [B,H,`num_samples`].
- `quantiles`: tensor, empirical quantiles defined by `levels`.
+ `Distribution`: AffineTransformed distribution.
""" - if num_samples is None: - num_samples = self.num_samples - - means, stds = distr_args - B, H, K = means.size() - Q = len(self.quantiles) - assert means.shape == stds.shape + if self.weighted: + means, stds, weights = distr_args + else: + means, stds = distr_args + weights = torch.full_like(means, fill_value=1 / self.n_components) - # Sample K ~ Mult(weights) - # shared across B, H - # weights = torch.repeat_interleave(input=weights, repeats=H, dim=2) + mix = Categorical(weights) + components = Normal(loc=means, scale=stds) + distr = MixtureSameFamily( + mixture_distribution=mix, component_distribution=components + ) - weights = (1 / K) * torch.ones_like(means, device=means.device) + self.distr_mean = distr.mean - # Avoid loop, vectorize - weights = weights.reshape(-1, K) - means = means.flatten() - stds = stds.flatten() + return distr - # Vectorization trick to recover row_idx - sample_idxs = torch.multinomial( - input=weights, num_samples=num_samples, replacement=True - ) - aux_col_idx = torch.unsqueeze(torch.arange(B * H, device=means.device), -1) * K + def sample(self, distr_args: torch.Tensor, num_samples: Optional[int] = None): + """ + Construct the empirical quantiles from the estimated Distribution, + sampling from it `num_samples` independently. - # To device - sample_idxs = sample_idxs.to(means.device) + **Parameters**
+ `distr_args`: Constructor arguments for the underlying Distribution type.
+ `num_samples`: int, overwrite number of samples for the empirical quantiles.
- sample_idxs = sample_idxs + aux_col_idx - sample_idxs = sample_idxs.flatten() + **Returns**
+ `samples`: tensor, shape [B,H,`num_samples`].
+ `quantiles`: tensor, empirical quantiles defined by `levels`.
+ """ + if num_samples is None: + num_samples = self.num_samples - sample_means = means[sample_idxs] - sample_stds = stds[sample_idxs] + # Instantiate Scaled Decoupled Distribution + distr = self.get_distribution(distr_args=distr_args) + samples = distr.sample(sample_shape=(num_samples,)) + samples = samples.permute( + 1, 2, 3, 0 + ) # [samples, B, H, N] -> [B, H, N, samples] - # Sample y ~ Normal(mu, std) independently - samples = torch.normal(sample_means, sample_stds).to(means.device) - samples = samples.view(B * H, num_samples) - sample_mean = torch.mean(samples, dim=-1) + sample_mean = torch.mean(samples, dim=-1, keepdim=True) # Compute quantiles - quantiles_device = self.quantiles.to(means.device) - quants = torch.quantile(input=samples, q=quantiles_device, dim=1) - quants = quants.permute((1, 0)) # Q, B*H - - # Final reshapes - samples = samples.view(B, H, num_samples) - sample_mean = sample_mean.view(B, H, 1) - quants = quants.view(B, H, Q) + quantiles_device = self.quantiles.to(distr_args[0].device) + quants = torch.quantile(input=samples, q=quantiles_device, dim=-1) + quants = quants.permute(1, 2, 3, 0) # [Q, B, H, N] -> [B, H, N, Q] return samples, sample_mean, quants - def neglog_likelihood( + def update_quantile(self, q: Optional[List[float]] = None): + if q is not None: + self.quantiles = nn.Parameter( + torch.tensor(q, dtype=torch.float32), requires_grad=False + ) + self.output_names = ( + [""] + + [f"_ql{q_i}" for q_i in q] + + self.return_params * self.param_names + ) + self.has_predicted = True + elif q is None and self.has_predicted: + self.quantiles = nn.Parameter( + torch.tensor([0.5], dtype=torch.float32), requires_grad=False + ) + self.output_names = ["", "-median"] + self.return_params * self.param_names + + def __call__( self, y: torch.Tensor, - distr_args: Tuple[torch.Tensor, torch.Tensor], + distr_args: torch.Tensor, mask: Union[torch.Tensor, None] = None, ): + """ + Computes the negative log-likelihood objective function. + To estimate the following predictive distribution: - if mask is None: - mask = torch.ones_like(y) - - means, stds = distr_args - B, H, K = means.size() - - weights = (1 / K) * torch.ones_like(means, device=means.device) + $$\mathrm{P}(\mathbf{y}_{\\tau}\,|\,\\theta) \\quad \mathrm{and} \\quad -\log(\mathrm{P}(\mathbf{y}_{\\tau}\,|\,\\theta))$$ - y = y[:, :, None] - mask = mask[:, :, None] + where $\\theta$ represents the distributions parameters. It aditionally + summarizes the objective signal using a weighted average using the `mask` tensor. - var = stds**2 - log_stds = torch.log(stds) - log_pi = ( - -((y - means) ** 2 / (2 * var)) - - log_stds - - math.log(math.sqrt(2 * math.pi)) - ) + **Parameters**
+ `y`: tensor, Actual values.
+ `distr_args`: Constructor arguments for the underlying Distribution type.
+ `mask`: tensor, Specifies date stamps per serie to consider in loss.
+ **Returns**
+ `loss`: scalar, weighted loss function against which backpropagation will be performed.
+ """ + # Instantiate Scaled Decoupled Distribution + distr = self.get_distribution(distr_args=distr_args) + x = distr._pad(y) + log_prob_x = distr.component_distribution.log_prob(x) + log_mix_prob = torch.log_softmax(distr.mixture_distribution.logits, dim=-1) if self.batch_correlation: - log_pi = torch.sum(log_pi, dim=0, keepdim=True) - + log_prob_x = torch.sum(log_prob_x, dim=0, keepdim=True) if self.horizon_correlation: - log_pi = torch.sum(log_pi, dim=1, keepdim=True) - - # Numerically Stable Mixture loglikelihood - loglik = torch.logsumexp((torch.log(weights) + log_pi), dim=2, keepdim=True) - loglik = loglik * mask - - loss = -torch.mean(loglik) - return loss - - def __call__( - self, - y: torch.Tensor, - distr_args: Tuple[torch.Tensor, torch.Tensor], - mask: Union[torch.Tensor, None] = None, - ): + log_prob_x = torch.sum(log_prob_x, dim=1, keepdim=True) + loss_values = -torch.logsumexp(log_prob_x + log_mix_prob, dim=-1) - return self.neglog_likelihood(y=y, distr_args=distr_args, mask=mask) + return weighted_average(loss_values, weights=mask) # %% ../../nbs/losses.pytorch.ipynb 90 class NBMM(torch.nn.Module): @@ -2483,6 +2503,7 @@ def __init__( quantiles=None, num_samples=1000, return_params=False, + weighted=False, ): super(NBMM, self).__init__() # Transform level to MQLoss parameters @@ -2495,26 +2516,41 @@ def __init__( qs = torch.Tensor(quantiles) self.quantiles = torch.nn.Parameter(qs, requires_grad=False) self.num_samples = num_samples + self.weighted = weighted # If True, predict_step will return Distribution's parameters self.return_params = return_params - if self.return_params: - total_count_names = [ - f"-total_count-{i}" for i in range(1, n_components + 1) + + total_count_names = [f"-total_count-{i}" for i in range(1, n_components + 1)] + probs_names = [f"-probs-{i}" for i in range(1, n_components + 1)] + if weighted: + weight_names = [f"-weight-{i}" for i in range(1, n_components + 1)] + self.param_names = [ + i for j in zip(total_count_names, probs_names, weight_names) for i in j + ] + else: + self.param_names = [ + i for j in zip(total_count_names, probs_names) for i in j ] - probs_names = [f"-probs-{i}" for i in range(1, n_components + 1)] - param_names = [i for j in zip(total_count_names, probs_names) for i in j] - self.output_names = self.output_names + param_names + + if self.return_params: + self.output_names = self.output_names + self.param_names # Add first output entry for the sample_mean self.output_names.insert(0, "") - self.outputsize_multiplier = 2 * n_components + self.n_outputs = 2 + weighted + self.n_components = n_components + self.outputsize_multiplier = self.n_outputs * n_components self.is_distribution_output = True + self.has_predicted = False def domain_map(self, output: torch.Tensor): - mu, alpha = torch.tensor_split(output, 2, dim=-1) - return (mu, alpha) + output = output.reshape( + output.shape[0], output.shape[1], -1, self.outputsize_multiplier + ) + + return torch.tensor_split(output, self.n_outputs, dim=-1) def scale_decouple( self, @@ -2530,11 +2566,18 @@ def scale_decouple( Also adds domain protection to the distribution parameters. """ # Efficient NBinomial parametrization - mu, alpha = output + if self.weighted: + mu, alpha, weights = output + weights = F.softmax(weights, dim=-1) + else: + mu, alpha = output + mu = F.softplus(mu) + 1e-8 alpha = F.softplus(alpha) + 1e-8 # alpha = 1/total_counts if (loc is not None) and (scale is not None): - loc = loc.view(mu.size(dim=0), 1, -1) + if loc.ndim == 3: + loc = loc.unsqueeze(-1) + scale = scale.unsqueeze(-1) mu *= loc alpha /= loc + 1.0 @@ -2543,20 +2586,47 @@ def scale_decouple( # => probs = mu / [total_count * (1 + mu * (1/total_count))] total_count = 1.0 / alpha probs = (mu * alpha / (1.0 + mu * alpha)) + 1e-8 - return (total_count, probs) + if self.weighted: + return (total_count, probs, weights) + else: + return (total_count, probs) + + def get_distribution(self, distr_args) -> Distribution: + """ + Construct the associated Pytorch Distribution, given the collection of + constructor arguments and, optionally, location and scale tensors. + + **Parameters**
+ `distr_args`: Constructor arguments for the underlying Distribution type.
+ + **Returns**
+ `Distribution`: AffineTransformed distribution.
+ """ + if self.weighted: + total_count, probs, weights = distr_args + else: + total_count, probs = distr_args + weights = torch.full_like(total_count, fill_value=1 / self.n_components) + + mix = Categorical(weights) + components = NegativeBinomial(total_count, probs) + components.support = constraints.nonnegative + distr = MixtureSameFamily( + mixture_distribution=mix, component_distribution=components + ) + + self.distr_mean = distr.mean + + return distr - def sample(self, distr_args, num_samples=None): + def sample(self, distr_args: torch.Tensor, num_samples: Optional[int] = None): """ Construct the empirical quantiles from the estimated Distribution, sampling from it `num_samples` independently. **Parameters**
`distr_args`: Constructor arguments for the underlying Distribution type.
- `loc`: Optional tensor, of the same shape as the batch_shape + event_shape - of the resulting distribution.
- `scale`: Optional tensor, of the same shape as the batch_shape+event_shape - of the resulting distribution.
- `num_samples`: int=500, number of samples for the empirical quantiles.
+ `num_samples`: int, overwrite number of samples for the empirical quantiles.
**Returns**
`samples`: tensor, shape [B,H,`num_samples`].
@@ -2565,105 +2635,68 @@ def sample(self, distr_args, num_samples=None): if num_samples is None: num_samples = self.num_samples - total_count, probs = distr_args - B, H, K = total_count.size() - Q = len(self.quantiles) - assert total_count.shape == probs.shape - - # Sample K ~ Mult(weights) - # shared across B, H - # weights = torch.repeat_interleave(input=weights, repeats=H, dim=2) - - weights = (1 / K) * torch.ones_like(probs, device=probs.device) - - # Avoid loop, vectorize - weights = weights.reshape(-1, K) - total_count = total_count.flatten() - probs = probs.flatten() - - # Vectorization trick to recover row_idx - sample_idxs = torch.multinomial( - input=weights, num_samples=num_samples, replacement=True - ) - aux_col_idx = torch.unsqueeze(torch.arange(B * H, device=probs.device), -1) * K - - # To device - sample_idxs = sample_idxs.to(probs.device) - - sample_idxs = sample_idxs + aux_col_idx - sample_idxs = sample_idxs.flatten() - - sample_total_count = total_count[sample_idxs] - sample_probs = probs[sample_idxs] + # Instantiate Scaled Decoupled Distribution + distr = self.get_distribution(distr_args=distr_args) + samples = distr.sample(sample_shape=(num_samples,)) + samples = samples.permute( + 1, 2, 3, 0 + ) # [samples, B, H, N] -> [B, H, N, samples] - # Sample y ~ NBinomial(total_count, probs) independently - dist = NegativeBinomial(total_count=sample_total_count, probs=sample_probs) - samples = dist.sample(sample_shape=(1,)).to(probs.device)[0] - samples = samples.view(B * H, num_samples) - sample_mean = torch.mean(samples, dim=-1) + sample_mean = torch.mean(samples, dim=-1, keepdim=True) # Compute quantiles - quantiles_device = self.quantiles.to(probs.device) - quants = torch.quantile(input=samples, q=quantiles_device, dim=1) - quants = quants.permute((1, 0)) # Q, B*H - - # Final reshapes - samples = samples.view(B, H, num_samples) - sample_mean = sample_mean.view(B, H, 1) - quants = quants.view(B, H, Q) + quantiles_device = self.quantiles.to(distr_args[0].device) + quants = torch.quantile(input=samples, q=quantiles_device, dim=-1) + quants = quants.permute(1, 2, 3, 0) # [Q, B, H, N] -> [B, H, N, Q] return samples, sample_mean, quants - def neglog_likelihood( + def update_quantile(self, q: Optional[List[float]] = None): + if q is not None: + self.quantiles = nn.Parameter( + torch.tensor(q, dtype=torch.float32), requires_grad=False + ) + self.output_names = ( + [""] + + [f"_ql{q_i}" for q_i in q] + + self.return_params * self.param_names + ) + self.has_predicted = True + elif q is None and self.has_predicted: + self.quantiles = nn.Parameter( + torch.tensor([0.5], dtype=torch.float32), requires_grad=False + ) + self.output_names = ["", "-median"] + self.return_params * self.param_names + + def __call__( self, y: torch.Tensor, - distr_args: Tuple[torch.Tensor, torch.Tensor], + distr_args: torch.Tensor, mask: Union[torch.Tensor, None] = None, ): + """ + Computes the negative log-likelihood objective function. + To estimate the following predictive distribution: - if mask is None: - mask = torch.ones_like(y) - - total_count, probs = distr_args - B, H, K = total_count.size() - - weights = (1 / K) * torch.ones_like(probs, device=probs.device) - - y = y[:, :, None] - mask = mask[:, :, None] - - log_unnormalized_prob = total_count * torch.log(1.0 - probs) + y * torch.log( - probs - ) - log_normalization = ( - -torch.lgamma(total_count + y) - + torch.lgamma(1.0 + y) - + torch.lgamma(total_count) - ) - log_normalization[total_count + y == 0.0] = 0.0 - log = log_unnormalized_prob - log_normalization - - # log = torch.sum(log, dim=0, keepdim=True) # Joint within batch/group - # log = torch.sum(log, dim=1, keepdim=True) # Joint within horizon - - # Numerical stability mixture and loglik - log_max = torch.amax(log, dim=2, keepdim=True) # [1,1,K] (collapsed joints) - lik = weights * torch.exp(log - log_max) # Take max - loglik = torch.log(torch.sum(lik, dim=2, keepdim=True)) + log_max # Return max + $$\mathrm{P}(\mathbf{y}_{\\tau}\,|\,\\theta) \\quad \mathrm{and} \\quad -\log(\mathrm{P}(\mathbf{y}_{\\tau}\,|\,\\theta))$$ - loglik = loglik * mask # replace with mask + where $\\theta$ represents the distributions parameters. It aditionally + summarizes the objective signal using a weighted average using the `mask` tensor. - loss = -torch.mean(loglik) - return loss + **Parameters**
+ `y`: tensor, Actual values.
+ `distr_args`: Constructor arguments for the underlying Distribution type.
+ `mask`: tensor, Specifies date stamps per serie to consider in loss.
- def __call__( - self, - y: torch.Tensor, - distr_args: Tuple[torch.Tensor, torch.Tensor], - mask: Union[torch.Tensor, None] = None, - ): + **Returns**
+ `loss`: scalar, weighted loss function against which backpropagation will be performed.
+ """ + # Instantiate Scaled Decoupled Distribution + distr = self.get_distribution(distr_args=distr_args) + loss_values = -distr.log_prob(y) + loss_weights = mask - return self.neglog_likelihood(y=y, distr_args=distr_args, mask=mask) + return weighted_average(loss_values, weights=loss_weights) # %% ../../nbs/losses.pytorch.ipynb 97 class HuberLoss(BasePointLoss): @@ -2702,8 +2735,9 @@ def __call__( self, y: torch.Tensor, y_hat: torch.Tensor, + y_insample: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor, Actual values.
@@ -2718,7 +2752,7 @@ def __call__( return _weighted_mean(losses=losses, weights=weights) # %% ../../nbs/losses.pytorch.ipynb 102 -class TukeyLoss(torch.nn.Module): +class TukeyLoss(BasePointLoss): """ Tukey Loss The Tukey loss function, also known as Tukey's biweight function, is a @@ -2758,10 +2792,14 @@ def __init__(self, c: float = 4.685, normalize: bool = True): def domain_map(self, y_hat: torch.Tensor): """ - Univariate loss operates in dimension [B,T,H]/[B,H] - This changes the network's output from [B,H,1]->[B,H] + Input: + Univariate: [B, H, 1] + Multivariate: [B, H, N] + + Output: [B, H, N] """ - return y_hat.squeeze(-1) + + return y_hat def masked_mean(self, x, mask, dim): x_nan = x.masked_fill(mask < 1, float("nan")) @@ -2773,8 +2811,9 @@ def __call__( self, y: torch.Tensor, y_hat: torch.Tensor, + y_insample: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor, Actual values.
@@ -2844,8 +2883,9 @@ def __call__( self, y: torch.Tensor, y_hat: torch.Tensor, + y_insample: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor, Actual values.
@@ -2855,6 +2895,7 @@ def __call__( **Returns:**
`huber_qloss`: tensor (single value). """ + error = y_hat - y zero_error = torch.zeros_like(error) sq = torch.maximum(-error, zero_error) @@ -2914,9 +2955,17 @@ def __init__( def domain_map(self, y_hat: torch.Tensor): """ - Identity domain map [B,T,H,Q]/[B,H,Q] + Input: + Univariate: [B, H, 1 * Q] + Multivariate: [B, H, N * Q] + + Output: [B, H, N, Q] """ - return y_hat + output = y_hat.reshape( + y_hat.shape[0], y_hat.shape[1], -1, self.outputsize_multiplier + ) + + return output def _compute_weights(self, y, mask): """ @@ -2924,28 +2973,26 @@ def _compute_weights(self, y, mask): Set horizon_weight to a ones[H] tensor if not set. If set, check that it has the same length as the horizon in x. """ - if mask is None: - mask = torch.ones_like(y, device=y.device) - else: - mask = mask.unsqueeze(1) # Add Q dimension. if self.horizon_weight is None: - self.horizon_weight = torch.ones(mask.shape[-1]) + weights = torch.ones_like(mask) else: - assert mask.shape[-1] == len( + assert mask.shape[1] == len( self.horizon_weight ), "horizon_weight must have same length as Y" + weights = self.horizon_weight.clone() + weights = weights[None, :, None, None].to(mask.device) + weights = torch.ones_like(mask, device=mask.device) * weights - weights = self.horizon_weight.clone() - weights = torch.ones_like(mask, device=mask.device) * weights.to(mask.device) return weights * mask def __call__( self, y: torch.Tensor, y_hat: torch.Tensor, + y_insample: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor, Actual values.
@@ -2955,35 +3002,33 @@ def __call__( **Returns:**
`hmqloss`: tensor (single value). """ + y = y.unsqueeze(-1) + + if mask is not None: + mask = mask.unsqueeze(-1) + else: + mask = torch.ones_like(y, device=y.device) + + error = y_hat - y - error = y_hat - y.unsqueeze(-1) zero_error = torch.zeros_like(error) sq = torch.maximum(-error, torch.zeros_like(error)) s1_q = torch.maximum(error, torch.zeros_like(error)) + + quantiles = self.quantiles[None, None, None, :] losses = F.huber_loss( - self.quantiles * sq, zero_error, reduction="none", delta=self.delta + quantiles * sq, zero_error, reduction="none", delta=self.delta ) + F.huber_loss( - (1 - self.quantiles) * s1_q, zero_error, reduction="none", delta=self.delta + (1 - quantiles) * s1_q, zero_error, reduction="none", delta=self.delta ) - losses = (1 / len(self.quantiles)) * losses - - if y_hat.ndim == 3: # BaseWindows - losses = losses.swapaxes( - -2, -1 - ) # [B,H,Q] -> [B,Q,H] (needed for horizon weighting, H at the end) - elif y_hat.ndim == 4: # BaseRecurrent - losses = losses.swapaxes(-2, -1) - losses = losses.swapaxes( - -2, -3 - ) # [B,seq_len,H,Q] -> [B,Q,seq_len,H] (needed for horizon weighting, H at the end) + losses = (1 / len(quantiles)) * losses - weights = self._compute_weights(y=losses, mask=mask) # Use losses for extra dim - # NOTE: Weights do not have Q dimension. + weights = self._compute_weights(y=losses, mask=mask) return _weighted_mean(losses=losses, weights=weights) # %% ../../nbs/losses.pytorch.ipynb 118 -class Accuracy(torch.nn.Module): +class Accuracy(BasePointLoss): """Accuracy Computes the accuracy between categorical `y` and `y_hat`. @@ -2999,20 +3044,26 @@ def __init__( ): super(Accuracy, self).__init__() self.is_distribution_output = False + self.outputsize_multiplier = 1 def domain_map(self, y_hat: torch.Tensor): """ - Univariate loss operates in dimension [B,T,H]/[B,H] - This changes the network's output from [B,H,1]->[B,H] + Input: + Univariate: [B, H, 1] + Multivariate: [B, H, N] + + Output: [B, H, N] """ - return y_hat.squeeze(-1) + + return y_hat def __call__( self, y: torch.Tensor, y_hat: torch.Tensor, + y_insample: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor, Actual values.
@@ -3022,15 +3073,16 @@ def __call__( **Returns:**
`accuracy`: tensor (single value). """ + if mask is None: mask = torch.ones_like(y_hat) - measure = (y.unsqueeze(-1) == y_hat) * mask.unsqueeze(-1) + measure = (y == y_hat) * mask accuracy = torch.mean(measure) return accuracy # %% ../../nbs/losses.pytorch.ipynb 122 -class sCRPS(torch.nn.Module): +class sCRPS(BasePointLoss): """Scaled Continues Ranked Probability Score Calculates a scaled variation of the CRPS, as proposed by Rangapuram (2021), @@ -3070,8 +3122,9 @@ def __call__( self, y: torch.Tensor, y_hat: torch.Tensor, + y_insample: torch.Tensor, mask: Union[torch.Tensor, None] = None, - ): + ) -> torch.Tensor: """ **Parameters:**
`y`: tensor, Actual values.
@@ -3081,7 +3134,7 @@ def __call__( **Returns:**
`scrps`: tensor (single value). """ - mql = self.mql(y=y, y_hat=y_hat, mask=mask) + mql = self.mql(y=y, y_hat=y_hat, mask=mask, y_insample=y_insample) norm = torch.sum(torch.abs(y)) unmean = torch.sum(mask) scrps = 2 * mql * unmean / (norm + 1e-5) diff --git a/neuralforecast/models/autoformer.py b/neuralforecast/models/autoformer.py index 815e57bc2..d1a4c53b8 100644 --- a/neuralforecast/models/autoformer.py +++ b/neuralforecast/models/autoformer.py @@ -14,7 +14,7 @@ import torch.nn.functional as F from ..common._modules import DataEmbedding, SeriesDecomp -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel from ..losses.pytorch import MAE @@ -394,7 +394,7 @@ def forward(self, x, cross, x_mask=None, cross_mask=None, trend=None): return x, trend # %% ../../nbs/models.autoformer.ipynb 10 -class Autoformer(BaseWindows): +class Autoformer(BaseModel): """Autoformer The Autoformer model tackles the challenge of finding reliable dependencies on intricate temporal patterns of long-horizon forecasting. @@ -454,10 +454,13 @@ class Autoformer(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = True EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -631,13 +634,9 @@ def __init__( def forward(self, windows_batch): # Parse windows_batch insample_y = windows_batch["insample_y"] - # insample_mask = windows_batch['insample_mask'] - # hist_exog = windows_batch['hist_exog'] - # stat_exog = windows_batch['stat_exog'] futr_exog = windows_batch["futr_exog"] # Parse inputs - insample_y = insample_y.unsqueeze(-1) # [Ws,L,1] if self.futr_exog_size > 0: x_mark_enc = futr_exog[:, : self.input_size, :] x_mark_dec = futr_exog[:, -(self.label_len + self.h) :, :] @@ -670,5 +669,6 @@ def forward(self, windows_batch): # final dec_out = trend_part + seasonal_part - forecast = self.loss.domain_map(dec_out[:, -self.h :]) + forecast = dec_out[:, -self.h :] + return forecast diff --git a/neuralforecast/models/bitcn.py b/neuralforecast/models/bitcn.py index 53a775838..34566fd36 100644 --- a/neuralforecast/models/bitcn.py +++ b/neuralforecast/models/bitcn.py @@ -12,7 +12,7 @@ import numpy as np from neuralforecast.losses.pytorch import MAE -from neuralforecast.common._base_windows import BaseWindows +from neuralforecast.common._base_model import BaseModel # %% ../../nbs/models.bitcn.ipynb 8 class CustomConv1d(nn.Module): @@ -84,7 +84,7 @@ def forward(self, x): return (h_prev + h_next, out_prev + out_next) # %% ../../nbs/models.bitcn.ipynb 10 -class BiTCN(BaseWindows): +class BiTCN(BaseModel): """BiTCN Bidirectional Temporal Convolutional Network (BiTCN) is a forecasting architecture based on two temporal convolutional networks (TCNs). The first network ('forward') encodes future covariates of the time series, whereas the second network ('backward') encodes past observations and covariates. This is a univariate model. @@ -108,7 +108,7 @@ class BiTCN(BaseWindows): `batch_size`: int=32, number of different series in each batch.
`valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
`windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.
- `inference_windows_batch_size`: int=-1, number of windows to sample in each inference batch, -1 uses all.
+ `inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.
`start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
`step_size`: int=1, step size between each window of temporal data.
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
@@ -129,10 +129,13 @@ class BiTCN(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -277,7 +280,7 @@ def __init__( def forward(self, windows_batch): # Parse windows_batch - x = windows_batch["insample_y"].unsqueeze(-1) # [B, L, 1] + x = windows_batch["insample_y"].contiguous() # [B, L, 1] hist_exog = windows_batch["hist_exog"] # [B, L, X] futr_exog = windows_batch["futr_exog"] # [B, L + h, F] stat_exog = windows_batch["stat_exog"] # [B, S] @@ -348,9 +351,6 @@ def forward(self, windows_batch): # Output layer to create forecasts x = x.permute(0, 2, 1) # [B, 3 * hidden_size, h] -> [B, h, 3 * hidden_size] - x = self.output_lin(x) # [B, h, 3 * hidden_size] -> [B, h, n_outputs] - - # Map to output domain - forecast = self.loss.domain_map(x) + forecast = self.output_lin(x) # [B, h, 3 * hidden_size] -> [B, h, n_outputs] return forecast diff --git a/neuralforecast/models/deepar.py b/neuralforecast/models/deepar.py index 3d2a2fd94..8d3859a14 100644 --- a/neuralforecast/models/deepar.py +++ b/neuralforecast/models/deepar.py @@ -4,15 +4,13 @@ __all__ = ['Decoder', 'DeepAR'] # %% ../../nbs/models.deepar.ipynb 4 -import numpy as np - import torch import torch.nn as nn from typing import Optional -from ..common._base_windows import BaseWindows -from ..losses.pytorch import DistributionLoss, MQLoss +from ..common._base_model import BaseModel +from ..losses.pytorch import DistributionLoss, MAE # %% ../../nbs/models.deepar.ipynb 7 class Decoder(nn.Module): @@ -53,7 +51,7 @@ def forward(self, x): return self.layers(x) # %% ../../nbs/models.deepar.ipynb 8 -class DeepAR(BaseWindows): +class DeepAR(BaseModel): """DeepAR **Parameters:**
@@ -101,10 +99,11 @@ class DeepAR(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = True EXOGENOUS_HIST = False EXOGENOUS_STAT = True + MULTIVARIATE = False + RECURRENT = True def __init__( self, @@ -123,7 +122,7 @@ def __init__( loss=DistributionLoss( distribution="StudentT", level=[80, 90], return_params=False ), - valid_loss=MQLoss(level=[80, 90]), + valid_loss=MAE(), max_steps: int = 1000, learning_rate: float = 1e-3, num_lr_decays: int = 3, @@ -150,19 +149,6 @@ def __init__( if exclude_insample_y: raise Exception("DeepAR has no possibility for excluding y.") - if not loss.is_distribution_output: - raise Exception("DeepAR only supports distributional outputs.") - - if str(type(valid_loss)) not in [ - "" - ]: - raise Exception("DeepAR only supports MQLoss as validation loss.") - - if loss.return_params: - raise Exception( - "DeepAR does not return distribution parameters due to Monte Carlo sampling." - ) - # Inherit BaseWindows class super(DeepAR, self).__init__( h=h, @@ -196,8 +182,7 @@ def __init__( **trainer_kwargs ) - self.horizon_backup = self.h # Used because h=0 during training - self.trajectory_samples = trajectory_samples + self.n_samples = trajectory_samples # LSTM self.encoder_n_layers = lstm_n_layers @@ -208,6 +193,8 @@ def __init__( input_encoder = 1 + self.futr_exog_size + self.stat_exog_size # Instantiate model + self.rnn_state = None + self.maintain_state = False self.hist_encoder = nn.LSTM( input_size=input_encoder, hidden_size=self.encoder_hidden_size, @@ -224,206 +211,17 @@ def __init__( hidden_layers=decoder_hidden_layers, ) - # Override BaseWindows method - def training_step(self, batch, batch_idx): - - # During training h=0 - self.h = 0 - y_idx = batch["y_idx"] - - # Create and normalize windows [Ws, L, C] - windows = self._create_windows(batch, step="train") - original_insample_y = windows["temporal"][ - :, :, y_idx - ].clone() # windows: [B, L, Feature] -> [B, L] - original_insample_y = original_insample_y[ - :, 1: - ] # Remove first (shift in DeepAr, cell at t outputs t+1) - windows = self._normalization(windows=windows, y_idx=y_idx) - - # Parse windows - insample_y, insample_mask, _, _, _, futr_exog, stat_exog = self._parse_windows( - batch, windows - ) - - windows_batch = dict( - insample_y=insample_y, # [Ws, L] - insample_mask=insample_mask, # [Ws, L] - futr_exog=futr_exog, # [Ws, L+H] - hist_exog=None, # None - stat_exog=stat_exog, - y_idx=y_idx, - ) # [Ws, 1] - - # Model Predictions - output = self.train_forward(windows_batch) - - if self.loss.is_distribution_output: - _, y_loc, y_scale = self._inv_normalization( - y_hat=original_insample_y, - temporal_cols=batch["temporal_cols"], - y_idx=y_idx, - ) - outsample_y = original_insample_y - distr_args = self.loss.scale_decouple( - output=output, loc=y_loc, scale=y_scale - ) - mask = insample_mask[ - :, 1: - ].clone() # Remove first (shift in DeepAr, cell at t outputs t+1) - loss = self.loss(y=outsample_y, distr_args=distr_args, mask=mask) - else: - raise Exception("DeepAR only supports distributional outputs.") - - if torch.isnan(loss): - print("Model Parameters", self.hparams) - print("insample_y", torch.isnan(insample_y).sum()) - print("outsample_y", torch.isnan(outsample_y).sum()) - print("output", torch.isnan(output).sum()) - raise Exception("Loss is NaN, training stopped.") - - self.log( - "train_loss", - loss.item(), - batch_size=outsample_y.size(0), - prog_bar=True, - on_epoch=True, - ) - self.train_trajectories.append((self.global_step, loss.item())) - - self.h = self.horizon_backup # Restore horizon - return loss - - def validation_step(self, batch, batch_idx): - - self.h == self.horizon_backup - - if self.val_size == 0: - return np.nan - - # TODO: Hack to compute number of windows - windows = self._create_windows(batch, step="val") - n_windows = len(windows["temporal"]) - y_idx = batch["y_idx"] - - # Number of windows in batch - windows_batch_size = self.inference_windows_batch_size - if windows_batch_size < 0: - windows_batch_size = n_windows - n_batches = int(np.ceil(n_windows / windows_batch_size)) - - valid_losses = [] - batch_sizes = [] - for i in range(n_batches): - # Create and normalize windows [Ws, L+H, C] - w_idxs = np.arange( - i * windows_batch_size, min((i + 1) * windows_batch_size, n_windows) - ) - windows = self._create_windows(batch, step="val", w_idxs=w_idxs) - original_outsample_y = torch.clone(windows["temporal"][:, -self.h :, 0]) - windows = self._normalization(windows=windows, y_idx=y_idx) - - # Parse windows - insample_y, insample_mask, _, outsample_mask, _, futr_exog, stat_exog = ( - self._parse_windows(batch, windows) - ) - windows_batch = dict( - insample_y=insample_y, - insample_mask=insample_mask, - futr_exog=futr_exog, - hist_exog=None, - stat_exog=stat_exog, - temporal_cols=batch["temporal_cols"], - y_idx=y_idx, - ) - - # Model Predictions - output_batch = self(windows_batch) - # Monte Carlo already returns y_hat with mean and quantiles - output_batch = output_batch[:, :, 1:] # Remove mean - valid_loss_batch = self.valid_loss( - y=original_outsample_y, y_hat=output_batch, mask=outsample_mask - ) - valid_losses.append(valid_loss_batch) - batch_sizes.append(len(output_batch)) - - valid_loss = torch.stack(valid_losses) - batch_sizes = torch.tensor(batch_sizes, device=valid_loss.device) - batch_size = torch.sum(batch_sizes) - valid_loss = torch.sum(valid_loss * batch_sizes) / batch_size - - if torch.isnan(valid_loss): - raise Exception("Loss is NaN, training stopped.") - - self.log( - "valid_loss", - valid_loss.item(), - batch_size=batch_size, - prog_bar=True, - on_epoch=True, - ) - self.validation_step_outputs.append(valid_loss) - return valid_loss - - def predict_step(self, batch, batch_idx): - - self.h == self.horizon_backup - - # TODO: Hack to compute number of windows - windows = self._create_windows(batch, step="predict") - n_windows = len(windows["temporal"]) - y_idx = batch["y_idx"] - - # Number of windows in batch - windows_batch_size = self.inference_windows_batch_size - if windows_batch_size < 0: - windows_batch_size = n_windows - n_batches = int(np.ceil(n_windows / windows_batch_size)) - - y_hats = [] - for i in range(n_batches): - # Create and normalize windows [Ws, L+H, C] - w_idxs = np.arange( - i * windows_batch_size, min((i + 1) * windows_batch_size, n_windows) - ) - windows = self._create_windows(batch, step="predict", w_idxs=w_idxs) - windows = self._normalization(windows=windows, y_idx=y_idx) - - # Parse windows - insample_y, insample_mask, _, _, _, futr_exog, stat_exog = ( - self._parse_windows(batch, windows) - ) - windows_batch = dict( - insample_y=insample_y, # [Ws, L] - insample_mask=insample_mask, # [Ws, L] - futr_exog=futr_exog, # [Ws, L+H] - stat_exog=stat_exog, - temporal_cols=batch["temporal_cols"], - y_idx=y_idx, - ) - - # Model Predictions - y_hat = self(windows_batch) - # Monte Carlo already returns y_hat with mean and quantiles - y_hats.append(y_hat) - y_hat = torch.cat(y_hats, dim=0) - return y_hat - - def train_forward(self, windows_batch): + def forward(self, windows_batch): # Parse windows_batch - encoder_input = windows_batch["insample_y"][:, :, None] # <- [B,T,1] + encoder_input = windows_batch["insample_y"] # <- [B, T, 1] futr_exog = windows_batch["futr_exog"] stat_exog = windows_batch["stat_exog"] - # [B, input_size-1, X] - encoder_input = encoder_input[ - :, :-1, : - ] # Remove last (shift in DeepAr, cell at t outputs t+1) _, input_size = encoder_input.shape[:2] if self.futr_exog_size > 0: - # Shift futr_exog (t predicts t+1, last output is outside insample_y) - encoder_input = torch.cat((encoder_input, futr_exog[:, 1:, :]), dim=2) + encoder_input = torch.cat((encoder_input, futr_exog), dim=2) + if self.stat_exog_size > 0: stat_exog = stat_exog.unsqueeze(1).repeat( 1, input_size, 1 @@ -431,114 +229,20 @@ def train_forward(self, windows_batch): encoder_input = torch.cat((encoder_input, stat_exog), dim=2) # RNN forward - hidden_state, _ = self.hist_encoder( - encoder_input + if self.maintain_state: + rnn_state = self.rnn_state + else: + rnn_state = None + + hidden_state, rnn_state = self.hist_encoder( + encoder_input, rnn_state ) # [B, input_size-1, rnn_hidden_state] + if self.maintain_state: + self.rnn_state = rnn_state + # Decoder forward output = self.decoder(hidden_state) # [B, input_size-1, output_size] - output = self.loss.domain_map(output) - return output - - def forward(self, windows_batch): - - # Parse windows_batch - encoder_input = windows_batch["insample_y"][:, :, None] # <- [B,L,1] - futr_exog = windows_batch["futr_exog"] # <- [B,L+H, n_f] - stat_exog = windows_batch["stat_exog"] - y_idx = windows_batch["y_idx"] - # [B, seq_len, X] - batch_size, input_size = encoder_input.shape[:2] - if self.futr_exog_size > 0: - futr_exog_input_window = futr_exog[ - :, 1 : input_size + 1, : - ] # Align y_t with futr_exog_t+1 - encoder_input = torch.cat((encoder_input, futr_exog_input_window), dim=2) - if self.stat_exog_size > 0: - stat_exog_input_window = stat_exog.unsqueeze(1).repeat( - 1, input_size, 1 - ) # [B, S] -> [B, input_size, S] - encoder_input = torch.cat((encoder_input, stat_exog_input_window), dim=2) - - # Use input_size history to predict first h of the forecasting window - _, h_c_tuple = self.hist_encoder(encoder_input) - h_n = h_c_tuple[0] # [n_layers, B, lstm_hidden_state] - c_n = h_c_tuple[1] # [n_layers, B, lstm_hidden_state] - - # Vectorizes trajectory samples in batch dimension [1] - h_n = torch.repeat_interleave( - h_n, self.trajectory_samples, 1 - ) # [n_layers, B*trajectory_samples, rnn_hidden_state] - c_n = torch.repeat_interleave( - c_n, self.trajectory_samples, 1 - ) # [n_layers, B*trajectory_samples, rnn_hidden_state] - - # Scales for inverse normalization - y_scale = ( - self.scaler.x_scale[:, 0, [y_idx]].squeeze(-1).to(encoder_input.device) - ) - y_loc = self.scaler.x_shift[:, 0, [y_idx]].squeeze(-1).to(encoder_input.device) - y_scale = torch.repeat_interleave(y_scale, self.trajectory_samples, 0) - y_loc = torch.repeat_interleave(y_loc, self.trajectory_samples, 0) - - # Recursive strategy prediction - quantiles = self.loss.quantiles.to(encoder_input.device) - y_hat = torch.zeros( - batch_size, self.h, len(quantiles) + 1, device=encoder_input.device - ) - for tau in range(self.h): - # Decoder forward - last_layer_h = h_n[-1] # [B*trajectory_samples, lstm_hidden_state] - output = self.decoder(last_layer_h) - output = self.loss.domain_map(output) - - # Inverse normalization - distr_args = self.loss.scale_decouple( - output=output, loc=y_loc, scale=y_scale - ) - # Add horizon (1) dimension - distr_args = list(distr_args) - for i in range(len(distr_args)): - distr_args[i] = distr_args[i].unsqueeze(-1) - distr_args = tuple(distr_args) - samples_tau, _, _ = self.loss.sample(distr_args=distr_args, num_samples=1) - samples_tau = samples_tau.reshape(batch_size, self.trajectory_samples) - sample_mean = torch.mean(samples_tau, dim=-1).to(encoder_input.device) - quants = torch.quantile(input=samples_tau, q=quantiles, dim=-1).to( - encoder_input.device - ) - y_hat[:, tau, 0] = sample_mean - y_hat[:, tau, 1:] = quants.permute((1, 0)) # [Q, B] -> [B, Q] - - # Stop if already in the last step (no need to predict next step) - if tau + 1 == self.h: - continue - # Normalize to use as input - encoder_input = self.scaler.scaler( - samples_tau.flatten(), y_loc, y_scale - ) # [B*n_samples] - encoder_input = encoder_input[:, None, None] # [B*n_samples, 1, 1] - - # Update input - if self.futr_exog_size > 0: - futr_exog_tau = futr_exog[:, [input_size + tau + 1], :] # [B, 1, n_f] - futr_exog_tau = torch.repeat_interleave( - futr_exog_tau, self.trajectory_samples, 0 - ) # [B*n_samples, 1, n_f] - encoder_input = torch.cat( - (encoder_input, futr_exog_tau), dim=2 - ) # [B*n_samples, 1, 1+n_f] - if self.stat_exog_size > 0: - stat_exog_tau = torch.repeat_interleave( - stat_exog, self.trajectory_samples, 0 - ) # [B*n_samples, n_s] - encoder_input = torch.cat( - (encoder_input, stat_exog_tau[:, None, :]), dim=2 - ) # [B*n_samples, 1, 1+n_f+n_s] - - _, h_c_tuple = self.hist_encoder(encoder_input, (h_n, c_n)) - h_n = h_c_tuple[0] # [n_layers, B, rnn_hidden_state] - c_n = h_c_tuple[1] # [n_layers, B, rnn_hidden_state] - - return y_hat + # Return only horizon part + return output[:, -self.h :] diff --git a/neuralforecast/models/deepnpts.py b/neuralforecast/models/deepnpts.py index f958e71be..b8b168f5a 100644 --- a/neuralforecast/models/deepnpts.py +++ b/neuralforecast/models/deepnpts.py @@ -11,11 +11,11 @@ from typing import Optional -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel from ..losses.pytorch import MAE # %% ../../nbs/models.deepnpts.ipynb 6 -class DeepNPTS(BaseWindows): +class DeepNPTS(BaseModel): """DeepNPTS Deep Non-Parametric Time Series Forecaster (`DeepNPTS`) is a baseline model for time-series forecasting. This model generates predictions by (weighted) sampling from the empirical distribution according to a learnable strategy. The strategy is learned by exploiting the information across multiple related time series. @@ -62,10 +62,13 @@ class DeepNPTS(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -107,12 +110,12 @@ def __init__( if exclude_insample_y: raise Exception("DeepNPTS has no possibility for excluding y.") - if not isinstance(loss, losses.BasePointLoss): + if loss.outputsize_multiplier > 1: raise Exception( "DeepNPTS only supports point loss functions (MAE, MSE, etc) as loss function." ) - if not isinstance(valid_loss, losses.BasePointLoss): + if valid_loss is not None and not isinstance(valid_loss, losses.BasePointLoss): raise Exception( "DeepNPTS only supports point loss functions (MAE, MSE, etc) as valid loss function." ) @@ -175,13 +178,13 @@ def __init__( def forward(self, windows_batch): # Parse windows_batch - x = windows_batch["insample_y"].unsqueeze(-1) # [B, L, 1] + x = windows_batch["insample_y"] # [B, L, 1] hist_exog = windows_batch["hist_exog"] # [B, L, X] futr_exog = windows_batch["futr_exog"] # [B, L + h, F] stat_exog = windows_batch["stat_exog"] # [B, S] batch_size, seq_len = x.shape[:2] # B = batch_size, L = seq_len - insample_y = windows_batch["insample_y"].unsqueeze(-1) + insample_y = windows_batch["insample_y"] # Concatenate x_t with future exogenous of input if self.futr_exog_size > 0: @@ -223,8 +226,6 @@ def forward(self, windows_batch): x = ( F.softmax(weights, dim=1) * insample_y ) # [B, L, h] * [B, L, 1] = [B, L, h] - output = torch.sum(x, dim=1).unsqueeze(-1) # [B, L, h] -> [B, h, 1] - - forecast = self.loss.domain_map(output) # [B, h, 1] -> [B, h, 1] + forecast = torch.sum(x, dim=1).unsqueeze(-1) # [B, L, h] -> [B, h, 1] return forecast diff --git a/neuralforecast/models/dilated_rnn.py b/neuralforecast/models/dilated_rnn.py index d56cc5f08..cbc8bb484 100644 --- a/neuralforecast/models/dilated_rnn.py +++ b/neuralforecast/models/dilated_rnn.py @@ -10,7 +10,7 @@ import torch.nn as nn from ..losses.pytorch import MAE -from ..common._base_recurrent import BaseRecurrent +from ..common._base_model import BaseModel from ..common._modules import MLP # %% ../../nbs/models.dilated_rnn.ipynb 7 @@ -256,8 +256,8 @@ def _split_outputs(self, dilated_outputs, rate): for i in range(rate) ] - interleaved = torch.stack((blocks)).transpose(1, 0).contiguous() - interleaved = interleaved.view( + interleaved = torch.stack((blocks)).transpose(1, 0) + interleaved = interleaved.reshape( dilated_outputs.size(0) * rate, batchsize, dilated_outputs.size(2) ) return interleaved @@ -286,7 +286,7 @@ def _prepare_inputs(self, inputs, rate): return dilated_inputs # %% ../../nbs/models.dilated_rnn.ipynb 12 -class DilatedRNN(BaseRecurrent): +class DilatedRNN(BaseModel): """DilatedRNN **Parameters:**
@@ -326,25 +326,29 @@ class DilatedRNN(BaseRecurrent): """ # Class attributes - SAMPLING_TYPE = "recurrent" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, h: int, - input_size: int = -1, + input_size: int, inference_input_size: int = -1, cell_type: str = "LSTM", dilations: List[List[int]] = [[1, 2], [4, 8]], - encoder_hidden_size: int = 200, + encoder_hidden_size: int = 128, context_size: int = 10, - decoder_hidden_size: int = 200, + decoder_hidden_size: int = 128, decoder_layers: int = 2, futr_exog_list=None, hist_exog_list=None, stat_exog_list=None, + exclude_insample_y=False, loss=MAE(), valid_loss=None, max_steps: int = 1000, @@ -354,6 +358,9 @@ def __init__( val_check_steps: int = 100, batch_size=32, valid_batch_size: Optional[int] = None, + windows_batch_size=128, + inference_windows_batch_size=1024, + start_padding_enabled=False, step_size: int = 1, scaler_type: str = "robust", random_seed: int = 1, @@ -369,7 +376,10 @@ def __init__( super(DilatedRNN, self).__init__( h=h, input_size=input_size, - inference_input_size=inference_input_size, + futr_exog_list=futr_exog_list, + hist_exog_list=hist_exog_list, + stat_exog_list=stat_exog_list, + exclude_insample_y=exclude_insample_y, loss=loss, valid_loss=valid_loss, max_steps=max_steps, @@ -379,13 +389,14 @@ def __init__( val_check_steps=val_check_steps, batch_size=batch_size, valid_batch_size=valid_batch_size, + windows_batch_size=windows_batch_size, + inference_windows_batch_size=inference_windows_batch_size, + start_padding_enabled=start_padding_enabled, + step_size=step_size, scaler_type=scaler_type, - futr_exog_list=futr_exog_list, - hist_exog_list=hist_exog_list, - stat_exog_list=stat_exog_list, + random_seed=random_seed, num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, - random_seed=random_seed, optimizer=optimizer, optimizer_kwargs=optimizer_kwargs, lr_scheduler=lr_scheduler, @@ -407,14 +418,14 @@ def __init__( self.decoder_layers = decoder_layers # RNN input size (1 for target variable y) - input_encoder = 1 + self.hist_exog_size + self.stat_exog_size + input_encoder = ( + 1 + self.hist_exog_size + self.stat_exog_size + self.futr_exog_size + ) # Instantiate model layers = [] for grp_num in range(len(self.dilations)): - if grp_num == 0: - input_encoder = 1 + self.hist_exog_size + self.stat_exog_size - else: + if grp_num > 0: input_encoder = self.encoder_hidden_size layer = DRNN( input_encoder, @@ -428,14 +439,11 @@ def __init__( self.rnn_stack = nn.Sequential(*layers) # Context adapter - self.context_adapter = nn.Linear( - in_features=self.encoder_hidden_size + self.futr_exog_size * h, - out_features=self.context_size * h, - ) + self.context_adapter = nn.Linear(in_features=self.input_size, out_features=h) # Decoder MLP self.mlp_decoder = MLP( - in_features=self.context_size + self.futr_exog_size, + in_features=self.encoder_hidden_size + self.futr_exog_size, out_features=self.loss.outputsize_multiplier, hidden_size=self.decoder_hidden_size, num_layers=self.decoder_layers, @@ -446,26 +454,30 @@ def __init__( def forward(self, windows_batch): # Parse windows_batch - encoder_input = windows_batch["insample_y"] # [B, seq_len, 1] - futr_exog = windows_batch["futr_exog"] - hist_exog = windows_batch["hist_exog"] - stat_exog = windows_batch["stat_exog"] + encoder_input = windows_batch["insample_y"] # [B, L, 1] + futr_exog = windows_batch["futr_exog"] # [B, L + h, F] + hist_exog = windows_batch["hist_exog"] # [B, L, X] + stat_exog = windows_batch["stat_exog"] # [B, S] # Concatenate y, historic and static inputs - # [B, C, seq_len, 1] -> [B, seq_len, C] - # Contatenate [ Y_t, | X_{t-L},..., X_{t} | S ] batch_size, seq_len = encoder_input.shape[:2] if self.hist_exog_size > 0: - hist_exog = hist_exog.permute(0, 2, 1, 3).squeeze( - -1 - ) # [B, X, seq_len, 1] -> [B, seq_len, X] - encoder_input = torch.cat((encoder_input, hist_exog), dim=2) + encoder_input = torch.cat( + (encoder_input, hist_exog), dim=2 + ) # [B, L, 1] + [B, L, X] -> [B, L, 1 + X] if self.stat_exog_size > 0: stat_exog = stat_exog.unsqueeze(1).repeat( 1, seq_len, 1 - ) # [B, S] -> [B, seq_len, S] - encoder_input = torch.cat((encoder_input, stat_exog), dim=2) + ) # [B, S] -> [B, L, S] + encoder_input = torch.cat( + (encoder_input, stat_exog), dim=2 + ) # [B, L, 1 + X] + [B, L, S] -> [B, L, 1 + X + S] + + if self.futr_exog_size > 0: + encoder_input = torch.cat( + (encoder_input, futr_exog[:, :seq_len]), dim=2 + ) # [B, L, 1 + X + S] + [B, L, F] -> [B, L, 1 + X + S + F] # DilatedRNN forward for layer_num in range(len(self.rnn_stack)): @@ -475,24 +487,21 @@ def forward(self, windows_batch): output += residual encoder_input = output - if self.futr_exog_size > 0: - futr_exog = futr_exog.permute(0, 2, 3, 1)[ - :, :, 1:, : - ] # [B, F, seq_len, 1+H] -> [B, seq_len, H, F] - encoder_input = torch.cat( - (encoder_input, futr_exog.reshape(batch_size, seq_len, -1)), dim=2 - ) - # Context adapter - context = self.context_adapter(encoder_input) - context = context.reshape(batch_size, seq_len, self.h, self.context_size) + output = output.permute(0, 2, 1) # [B, L, C] -> [B, C, L] + context = self.context_adapter(output) # [B, C, L] -> [B, C, h] # Residual connection with futr_exog if self.futr_exog_size > 0: - context = torch.cat((context, futr_exog), dim=-1) + futr_exog_futr = futr_exog[:, seq_len:].permute( + 0, 2, 1 + ) # [B, h, F] -> [B, F, h] + context = torch.cat( + (context, futr_exog_futr), dim=1 + ) # [B, C, h] + [B, F, h] = [B, C + F, h] # Final forecast - output = self.mlp_decoder(context) - output = self.loss.domain_map(output) + context = context.permute(0, 2, 1) # [B, C + F, h] -> [B, h, C + F] + output = self.mlp_decoder(context) # [B, h, C + F] -> [B, h, n_output] return output diff --git a/neuralforecast/models/dlinear.py b/neuralforecast/models/dlinear.py index 17965c869..79a8d75de 100644 --- a/neuralforecast/models/dlinear.py +++ b/neuralforecast/models/dlinear.py @@ -9,7 +9,7 @@ import torch import torch.nn as nn -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel from ..losses.pytorch import MAE @@ -48,7 +48,7 @@ def forward(self, x): return res, moving_mean # %% ../../nbs/models.dlinear.ipynb 10 -class DLinear(BaseWindows): +class DLinear(BaseModel): """DLinear *Parameters:*
@@ -87,10 +87,13 @@ class DLinear(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = False EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -178,11 +181,7 @@ def __init__( def forward(self, windows_batch): # Parse windows_batch - insample_y = windows_batch["insample_y"] - # insample_mask = windows_batch['insample_mask'] - # hist_exog = windows_batch['hist_exog'] - # stat_exog = windows_batch['stat_exog'] - # futr_exog = windows_batch['futr_exog'] + insample_y = windows_batch["insample_y"].squeeze(-1) # Parse inputs batch_size = len(insample_y) @@ -194,5 +193,4 @@ def forward(self, windows_batch): # Final forecast = trend_part + seasonal_part forecast = forecast.reshape(batch_size, self.h, self.loss.outputsize_multiplier) - forecast = self.loss.domain_map(forecast) return forecast diff --git a/neuralforecast/models/fedformer.py b/neuralforecast/models/fedformer.py index 89e2fe3ef..a91bae3a8 100644 --- a/neuralforecast/models/fedformer.py +++ b/neuralforecast/models/fedformer.py @@ -4,7 +4,7 @@ __all__ = ['LayerNorm', 'AutoCorrelationLayer', 'EncoderLayer', 'Encoder', 'DecoderLayer', 'Decoder', 'get_frequency_modes', 'FourierBlock', 'FourierCrossAttention', 'FEDformer'] -# %% ../../nbs/models.fedformer.ipynb 5 +# %% ../../nbs/models.fedformer.ipynb 6 import numpy as np from typing import Optional @@ -14,11 +14,11 @@ from ..common._modules import DataEmbedding from ..common._modules import SeriesDecomp -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel from ..losses.pytorch import MAE -# %% ../../nbs/models.fedformer.ipynb 7 +# %% ../../nbs/models.fedformer.ipynb 8 class LayerNorm(nn.Module): """ Special designed layernorm for the seasonal part @@ -66,7 +66,7 @@ def forward(self, queries, keys, values, attn_mask): return self.out_projection(out), attn -# %% ../../nbs/models.fedformer.ipynb 8 +# %% ../../nbs/models.fedformer.ipynb 9 class EncoderLayer(nn.Module): """ FEDformer encoder layer with the progressive decomposition architecture @@ -234,7 +234,7 @@ def forward(self, x, cross, x_mask=None, cross_mask=None, trend=None): x = self.projection(x) return x, trend -# %% ../../nbs/models.fedformer.ipynb 9 +# %% ../../nbs/models.fedformer.ipynb 10 def get_frequency_modes(seq_len, modes=64, mode_select_method="random"): """ Get modes on frequency domain: @@ -390,8 +390,8 @@ def forward(self, q, k, v, mask): ) return (out, None) -# %% ../../nbs/models.fedformer.ipynb 11 -class FEDformer(BaseWindows): +# %% ../../nbs/models.fedformer.ipynb 12 +class FEDformer(BaseModel): """FEDformer The FEDformer model tackles the challenge of finding reliable dependencies on intricate temporal patterns of long-horizon forecasting. @@ -450,10 +450,13 @@ class FEDformer(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = True EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -626,13 +629,9 @@ def __init__( def forward(self, windows_batch): # Parse windows_batch insample_y = windows_batch["insample_y"] - # insample_mask = windows_batch['insample_mask'] - # hist_exog = windows_batch['hist_exog'] - # stat_exog = windows_batch['stat_exog'] futr_exog = windows_batch["futr_exog"] # Parse inputs - insample_y = insample_y.unsqueeze(-1) # [Ws,L,1] if self.futr_exog_size > 0: x_mark_enc = futr_exog[:, : self.input_size, :] x_mark_dec = futr_exog[:, -(self.label_len + self.h) :, :] @@ -666,6 +665,6 @@ def forward(self, windows_batch): ) # final dec_out = trend_part + seasonal_part + forecast = dec_out[:, -self.h :] - forecast = self.loss.domain_map(dec_out[:, -self.h :]) return forecast diff --git a/neuralforecast/models/gru.py b/neuralforecast/models/gru.py index 9a6d92325..f8061500e 100644 --- a/neuralforecast/models/gru.py +++ b/neuralforecast/models/gru.py @@ -9,13 +9,14 @@ import torch import torch.nn as nn +import warnings from ..losses.pytorch import MAE -from ..common._base_recurrent import BaseRecurrent +from ..common._base_model import BaseModel from ..common._modules import MLP -# %% ../../nbs/models.gru.ipynb 8 -class GRU(BaseRecurrent): +# %% ../../nbs/models.gru.ipynb 7 +class GRU(BaseModel): """GRU Multi Layer Recurrent Network with Gated Units (GRU), and @@ -23,7 +24,7 @@ class GRU(BaseRecurrent): using ADAM stochastic gradient descent. The network accepts static, historic and future exogenous data, flattens the inputs. - **Parameters:**
+ **Parameters:**
`h`: int, forecast horizon.
`input_size`: int, maximum sequence length for truncated train backpropagation. Default -1 uses all history.
`inference_input_size`: int, maximum sequence length for truncated inference. Default -1 uses all history.
@@ -32,7 +33,7 @@ class GRU(BaseRecurrent): `encoder_activation`: Optional[str]=None, Deprecated. Activation function in GRU is frozen in PyTorch.
`encoder_bias`: bool=True, whether or not to use biases b_ih, b_hh within GRU units.
`encoder_dropout`: float=0., dropout regularization applied to GRU outputs.
- `context_size`: int=10, size of context vector for each timestamp on the forecasting window.
+ `context_size`: deprecated.
`decoder_hidden_size`: int=200, size of hidden layer for the MLP decoder.
`decoder_layers`: int=2, number of layers for the MLP decoder.
`futr_exog_list`: str list, future exogenous columns.
@@ -61,10 +62,13 @@ class GRU(BaseRecurrent): """ # Class attributes - SAMPLING_TYPE = "recurrent" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + True # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -76,12 +80,14 @@ def __init__( encoder_activation: Optional[str] = None, encoder_bias: bool = True, encoder_dropout: float = 0.0, - context_size: int = 10, - decoder_hidden_size: int = 200, + context_size: Optional[int] = None, + decoder_hidden_size: int = 128, decoder_layers: int = 2, futr_exog_list=None, hist_exog_list=None, stat_exog_list=None, + exclude_insample_y=False, + recurrent=False, loss=MAE(), valid_loss=None, max_steps: int = 1000, @@ -91,6 +97,10 @@ def __init__( val_check_steps: int = 100, batch_size=32, valid_batch_size: Optional[int] = None, + windows_batch_size=128, + inference_windows_batch_size=1024, + start_padding_enabled=False, + step_size: int = 1, scaler_type: str = "robust", random_seed=1, num_workers_loader=0, @@ -102,10 +112,16 @@ def __init__( dataloader_kwargs=None, **trainer_kwargs ): + + self.RECURRENT = recurrent + super(GRU, self).__init__( h=h, input_size=input_size, - inference_input_size=inference_input_size, + futr_exog_list=futr_exog_list, + hist_exog_list=hist_exog_list, + stat_exog_list=stat_exog_list, + exclude_insample_y=exclude_insample_y, loss=loss, valid_loss=valid_loss, max_steps=max_steps, @@ -115,13 +131,14 @@ def __init__( val_check_steps=val_check_steps, batch_size=batch_size, valid_batch_size=valid_batch_size, + windows_batch_size=windows_batch_size, + inference_windows_batch_size=inference_windows_batch_size, + start_padding_enabled=start_padding_enabled, + step_size=step_size, scaler_type=scaler_type, - futr_exog_list=futr_exog_list, - hist_exog_list=hist_exog_list, - stat_exog_list=stat_exog_list, + random_seed=random_seed, num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, - random_seed=random_seed, optimizer=optimizer, optimizer_kwargs=optimizer_kwargs, lr_scheduler=lr_scheduler, @@ -145,16 +162,23 @@ def __init__( self.encoder_dropout = encoder_dropout # Context adapter - self.context_size = context_size + if context_size is not None: + warnings.warn( + "context_size is deprecated and will be removed in future versions." + ) # MLP decoder self.decoder_hidden_size = decoder_hidden_size self.decoder_layers = decoder_layers # RNN input size (1 for target variable y) - input_encoder = 1 + self.hist_exog_size + self.stat_exog_size + input_encoder = ( + 1 + self.hist_exog_size + self.stat_exog_size + self.futr_exog_size + ) # Instantiate model + self.rnn_state = None + self.maintain_state = False self.hist_encoder = nn.GRU( input_size=input_encoder, hidden_size=self.encoder_hidden_size, @@ -164,69 +188,80 @@ def __init__( batch_first=True, ) - # Context adapter - self.context_adapter = nn.Linear( - in_features=self.encoder_hidden_size + self.futr_exog_size * h, - out_features=self.context_size * h, - ) - # Decoder MLP - self.mlp_decoder = MLP( - in_features=self.context_size + self.futr_exog_size, - out_features=self.loss.outputsize_multiplier, - hidden_size=self.decoder_hidden_size, - num_layers=self.decoder_layers, - activation="ReLU", - dropout=0.0, - ) + if self.RECURRENT: + self.proj = nn.Linear( + self.encoder_hidden_size, self.loss.outputsize_multiplier + ) + else: + self.mlp_decoder = MLP( + in_features=self.encoder_hidden_size + self.futr_exog_size, + out_features=self.loss.outputsize_multiplier, + hidden_size=self.decoder_hidden_size, + num_layers=self.decoder_layers, + activation="ReLU", + dropout=0.0, + ) def forward(self, windows_batch): # Parse windows_batch encoder_input = windows_batch["insample_y"] # [B, seq_len, 1] - futr_exog = windows_batch["futr_exog"] - hist_exog = windows_batch["hist_exog"] - stat_exog = windows_batch["stat_exog"] + futr_exog = windows_batch["futr_exog"] # [B, seq_len, F] + hist_exog = windows_batch["hist_exog"] # [B, seq_len, X] + stat_exog = windows_batch["stat_exog"] # [B, S] # Concatenate y, historic and static inputs - # [B, C, seq_len, 1] -> [B, seq_len, C] - # Contatenate [ Y_t, | X_{t-L},..., X_{t} | S ] batch_size, seq_len = encoder_input.shape[:2] if self.hist_exog_size > 0: - hist_exog = hist_exog.permute(0, 2, 1, 3).squeeze( - -1 - ) # [B, X, seq_len, 1] -> [B, seq_len, X] - encoder_input = torch.cat((encoder_input, hist_exog), dim=2) + encoder_input = torch.cat( + (encoder_input, hist_exog), dim=2 + ) # [B, seq_len, 1] + [B, seq_len, X] -> [B, seq_len, 1 + X] if self.stat_exog_size > 0: + # print(encoder_input.shape) stat_exog = stat_exog.unsqueeze(1).repeat( 1, seq_len, 1 ) # [B, S] -> [B, seq_len, S] - encoder_input = torch.cat((encoder_input, stat_exog), dim=2) - - # RNN forward - hidden_state, _ = self.hist_encoder( - encoder_input - ) # [B, seq_len, rnn_hidden_state] + encoder_input = torch.cat( + (encoder_input, stat_exog), dim=2 + ) # [B, seq_len, 1 + X] + [B, seq_len, S] -> [B, seq_len, 1 + X + S] if self.futr_exog_size > 0: - futr_exog = futr_exog.permute(0, 2, 3, 1)[ - :, :, 1:, : - ] # [B, F, seq_len, 1+H] -> [B, seq_len, H, F] - hidden_state = torch.cat( - (hidden_state, futr_exog.reshape(batch_size, seq_len, -1)), dim=2 - ) + encoder_input = torch.cat( + (encoder_input, futr_exog[:, :seq_len]), dim=2 + ) # [B, seq_len, 1 + X + S] + [B, seq_len, F] -> [B, seq_len, 1 + X + S + F] - # Context adapter - context = self.context_adapter(hidden_state) - context = context.reshape(batch_size, seq_len, self.h, self.context_size) + if self.RECURRENT: + if self.maintain_state: + rnn_state = self.rnn_state + else: + rnn_state = None - # Residual connection with futr_exog - if self.futr_exog_size > 0: - context = torch.cat((context, futr_exog), dim=-1) + output, rnn_state = self.hist_encoder( + encoder_input, rnn_state + ) # [B, seq_len, rnn_hidden_state] + output = self.proj( + output + ) # [B, seq_len, rnn_hidden_state] -> [B, seq_len, n_output] + if self.maintain_state: + self.rnn_state = rnn_state + else: + hidden_state, _ = self.hist_encoder( + encoder_input, None + ) # [B, seq_len, rnn_hidden_state] + hidden_state = hidden_state[ + :, -self.h : + ] # [B, seq_len, rnn_hidden_state] -> [B, h, rnn_hidden_state] + + if self.futr_exog_size > 0: + futr_exog_futr = futr_exog[:, -self.h :] # [B, h, F] + hidden_state = torch.cat( + (hidden_state, futr_exog_futr), dim=-1 + ) # [B, h, rnn_hidden_state] + [B, h, F] -> [B, h, rnn_hidden_state + F] - # Final forecast - output = self.mlp_decoder(context) - output = self.loss.domain_map(output) + output = self.mlp_decoder( + hidden_state + ) # [B, h, rnn_hidden_state + F] -> [B, seq_len, n_output] - return output + return output[:, -self.h :] diff --git a/neuralforecast/models/informer.py b/neuralforecast/models/informer.py index 8b115cebd..f775e31e7 100644 --- a/neuralforecast/models/informer.py +++ b/neuralforecast/models/informer.py @@ -19,7 +19,7 @@ DataEmbedding, AttentionLayer, ) -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel from ..losses.pytorch import MAE @@ -179,7 +179,7 @@ def forward(self, queries, keys, values, attn_mask): return context.contiguous(), attn # %% ../../nbs/models.informer.ipynb 11 -class Informer(BaseWindows): +class Informer(BaseModel): """Informer The Informer model tackles the vanilla Transformer computational complexity challenges for long-horizon forecasting. @@ -238,10 +238,11 @@ class Informer(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = True EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = False + RECURRENT = False def __init__( self, @@ -414,14 +415,8 @@ def __init__( def forward(self, windows_batch): # Parse windows_batch insample_y = windows_batch["insample_y"] - # insample_mask = windows_batch['insample_mask'] - # hist_exog = windows_batch['hist_exog'] - # stat_exog = windows_batch['stat_exog'] - futr_exog = windows_batch["futr_exog"] - insample_y = insample_y.unsqueeze(-1) # [Ws,L,1] - if self.futr_exog_size > 0: x_mark_enc = futr_exog[:, : self.input_size, :] x_mark_dec = futr_exog[:, -(self.label_len + self.h) :, :] @@ -438,5 +433,5 @@ def forward(self, windows_batch): dec_out = self.dec_embedding(x_dec, x_mark_dec) dec_out = self.decoder(dec_out, enc_out, x_mask=None, cross_mask=None) - forecast = self.loss.domain_map(dec_out[:, -self.h :]) + forecast = dec_out[:, -self.h :] return forecast diff --git a/neuralforecast/models/itransformer.py b/neuralforecast/models/itransformer.py index 9e577a71d..fb67ee20f 100644 --- a/neuralforecast/models/itransformer.py +++ b/neuralforecast/models/itransformer.py @@ -11,9 +11,9 @@ import numpy as np from math import sqrt - +from typing import Optional from ..losses.pytorch import MAE -from ..common._base_multivariate import BaseMultivariate +from ..common._base_model import BaseModel from neuralforecast.common._modules import ( TransEncoder, @@ -102,7 +102,7 @@ def forward(self, x, x_mark): return self.dropout(x) # %% ../../nbs/models.itransformer.ipynb 13 -class iTransformer(BaseMultivariate): +class iTransformer(BaseModel): """iTransformer **Parameters:**
@@ -128,6 +128,10 @@ class iTransformer(BaseMultivariate): `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
`val_check_steps`: int=100, Number of training steps between every validation loss check.
`batch_size`: int=32, number of different series in each batch.
+ `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
+ `windows_batch_size`: int=128, number of windows to sample in each training batch, default uses all.
+ `inference_windows_batch_size`: int=128, number of windows to sample in each inference batch, -1 uses all.
+ `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
`step_size`: int=1, step size between each window of temporal data.
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
`random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
@@ -146,10 +150,11 @@ class iTransformer(BaseMultivariate): """ # Class attributes - SAMPLING_TYPE = "multivariate" EXOGENOUS_FUTR = False EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = True + RECURRENT = False def __init__( self, @@ -159,6 +164,7 @@ def __init__( futr_exog_list=None, hist_exog_list=None, stat_exog_list=None, + exclude_insample_y=False, hidden_size: int = 512, n_heads: int = 8, e_layers: int = 2, @@ -175,6 +181,10 @@ def __init__( early_stop_patience_steps: int = -1, val_check_steps: int = 100, batch_size: int = 32, + valid_batch_size: Optional[int] = None, + windows_batch_size=128, + inference_windows_batch_size=128, + start_padding_enabled=False, step_size: int = 1, scaler_type: str = "identity", random_seed: int = 1, @@ -195,6 +205,7 @@ def __init__( stat_exog_list=None, futr_exog_list=None, hist_exog_list=None, + exclude_insample_y=exclude_insample_y, loss=loss, valid_loss=valid_loss, max_steps=max_steps, @@ -203,6 +214,10 @@ def __init__( early_stop_patience_steps=early_stop_patience_steps, val_check_steps=val_check_steps, batch_size=batch_size, + valid_batch_size=valid_batch_size, + windows_batch_size=windows_batch_size, + inference_windows_batch_size=inference_windows_batch_size, + start_padding_enabled=start_padding_enabled, step_size=step_size, scaler_type=scaler_type, random_seed=random_seed, @@ -253,7 +268,9 @@ def __init__( norm_layer=torch.nn.LayerNorm(self.hidden_size), ) - self.projector = nn.Linear(self.hidden_size, h, bias=True) + self.projector = nn.Linear( + self.hidden_size, h * self.loss.outputsize_multiplier, bias=True + ) def forecast(self, x_enc): if self.use_norm: @@ -287,8 +304,16 @@ def forecast(self, x_enc): if self.use_norm: # De-Normalization from Non-stationary Transformer - dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, self.h, 1)) - dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, self.h, 1)) + dec_out = dec_out * ( + stdev[:, 0, :] + .unsqueeze(1) + .repeat(1, self.h * self.loss.outputsize_multiplier, 1) + ) + dec_out = dec_out + ( + means[:, 0, :] + .unsqueeze(1) + .repeat(1, self.h * self.loss.outputsize_multiplier, 1) + ) return dec_out @@ -296,11 +321,6 @@ def forward(self, windows_batch): insample_y = windows_batch["insample_y"] y_pred = self.forecast(insample_y) - y_pred = y_pred[:, -self.h :, :] - y_pred = self.loss.domain_map(y_pred) + y_pred = y_pred.reshape(insample_y.shape[0], self.h, -1) - # domain_map might have squeezed the last dimension in case n_series == 1 - if y_pred.ndim == 2: - return y_pred.unsqueeze(-1) - else: - return y_pred + return y_pred diff --git a/neuralforecast/models/kan.py b/neuralforecast/models/kan.py index 29d7b1d00..924f53136 100644 --- a/neuralforecast/models/kan.py +++ b/neuralforecast/models/kan.py @@ -12,7 +12,7 @@ import torch.nn.functional as F from ..losses.pytorch import MAE -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel # %% ../../nbs/models.kan.ipynb 8 class KANLinear(torch.nn.Module): @@ -240,7 +240,7 @@ def regularization_loss(self, regularize_activation=1.0, regularize_entropy=1.0) ) # %% ../../nbs/models.kan.ipynb 9 -class KAN(BaseWindows): +class KAN(BaseModel): """KAN Simple Kolmogorov-Arnold Network (KAN). @@ -294,10 +294,13 @@ class KAN(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -436,7 +439,7 @@ def regularization_loss(self, regularize_activation=1.0, regularize_entropy=1.0) def forward(self, windows_batch, update_grid=False): - insample_y = windows_batch["insample_y"] + insample_y = windows_batch["insample_y"].squeeze(-1) futr_exog = windows_batch["futr_exog"] hist_exog = windows_batch["hist_exog"] stat_exog = windows_batch["stat_exog"] @@ -466,5 +469,4 @@ def forward(self, windows_batch, update_grid=False): y_pred = layer(y_pred) y_pred = y_pred.reshape(batch_size, self.h, self.loss.outputsize_multiplier) - y_pred = self.loss.domain_map(y_pred) return y_pred diff --git a/neuralforecast/models/lstm.py b/neuralforecast/models/lstm.py index e89db3628..8ad263cc1 100644 --- a/neuralforecast/models/lstm.py +++ b/neuralforecast/models/lstm.py @@ -8,13 +8,14 @@ import torch import torch.nn as nn +import warnings from ..losses.pytorch import MAE -from ..common._base_recurrent import BaseRecurrent +from ..common._base_model import BaseModel from ..common._modules import MLP # %% ../../nbs/models.lstm.ipynb 7 -class LSTM(BaseRecurrent): +class LSTM(BaseModel): """LSTM LSTM encoder, with MLP decoder. @@ -30,7 +31,7 @@ class LSTM(BaseRecurrent): `encoder_hidden_size`: int=200, units for the LSTM's hidden state size.
`encoder_bias`: bool=True, whether or not to use biases b_ih, b_hh within LSTM units.
`encoder_dropout`: float=0., dropout regularization applied to LSTM outputs.
- `context_size`: int=10, size of context vector for each timestamp on the forecasting window.
+ `context_size`: deprecated.
`decoder_hidden_size`: int=200, size of hidden layer for the MLP decoder.
`decoder_layers`: int=2, number of layers for the MLP decoder.
`futr_exog_list`: str list, future exogenous columns.
@@ -59,26 +60,30 @@ class LSTM(BaseRecurrent): """ # Class attributes - SAMPLING_TYPE = "recurrent" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + True # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, h: int, - input_size: int = -1, - inference_input_size: int = -1, + input_size: int, encoder_n_layers: int = 2, - encoder_hidden_size: int = 200, + encoder_hidden_size: int = 128, encoder_bias: bool = True, encoder_dropout: float = 0.0, - context_size: int = 10, - decoder_hidden_size: int = 200, + context_size: Optional[int] = None, + decoder_hidden_size: int = 128, decoder_layers: int = 2, futr_exog_list=None, hist_exog_list=None, stat_exog_list=None, + exclude_insample_y=False, + recurrent=False, loss=MAE(), valid_loss=None, max_steps: int = 1000, @@ -88,6 +93,10 @@ def __init__( val_check_steps: int = 100, batch_size=32, valid_batch_size: Optional[int] = None, + windows_batch_size=128, + inference_windows_batch_size=1024, + start_padding_enabled=False, + step_size: int = 1, scaler_type: str = "robust", random_seed=1, num_workers_loader=0, @@ -99,10 +108,16 @@ def __init__( dataloader_kwargs=None, **trainer_kwargs ): + + self.RECURRENT = recurrent + super(LSTM, self).__init__( h=h, input_size=input_size, - inference_input_size=inference_input_size, + futr_exog_list=futr_exog_list, + hist_exog_list=hist_exog_list, + stat_exog_list=stat_exog_list, + exclude_insample_y=exclude_insample_y, loss=loss, valid_loss=valid_loss, max_steps=max_steps, @@ -112,13 +127,14 @@ def __init__( val_check_steps=val_check_steps, batch_size=batch_size, valid_batch_size=valid_batch_size, + windows_batch_size=windows_batch_size, + inference_windows_batch_size=inference_windows_batch_size, + start_padding_enabled=start_padding_enabled, + step_size=step_size, scaler_type=scaler_type, - futr_exog_list=futr_exog_list, - hist_exog_list=hist_exog_list, - stat_exog_list=stat_exog_list, + random_seed=random_seed, num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, - random_seed=random_seed, optimizer=optimizer, optimizer_kwargs=optimizer_kwargs, lr_scheduler=lr_scheduler, @@ -134,16 +150,23 @@ def __init__( self.encoder_dropout = encoder_dropout # Context adapter - self.context_size = context_size + if context_size is not None: + warnings.warn( + "context_size is deprecated and will be removed in future versions." + ) # MLP decoder self.decoder_hidden_size = decoder_hidden_size self.decoder_layers = decoder_layers # LSTM input size (1 for target variable y) - input_encoder = 1 + self.hist_exog_size + self.stat_exog_size + input_encoder = ( + 1 + self.hist_exog_size + self.stat_exog_size + self.futr_exog_size + ) # Instantiate model + self.rnn_state = None + self.maintain_state = False self.hist_encoder = nn.LSTM( input_size=input_encoder, hidden_size=self.encoder_hidden_size, @@ -151,71 +174,76 @@ def __init__( bias=self.encoder_bias, dropout=self.encoder_dropout, batch_first=True, - ) - - # Context adapter - self.context_adapter = nn.Linear( - in_features=self.encoder_hidden_size + self.futr_exog_size * h, - out_features=self.context_size * h, + proj_size=self.loss.outputsize_multiplier if self.RECURRENT else 0, ) # Decoder MLP - self.mlp_decoder = MLP( - in_features=self.context_size + self.futr_exog_size, - out_features=self.loss.outputsize_multiplier, - hidden_size=self.decoder_hidden_size, - num_layers=self.decoder_layers, - activation="ReLU", - dropout=0.0, - ) + if not self.RECURRENT: + self.mlp_decoder = MLP( + in_features=self.encoder_hidden_size + self.futr_exog_size, + out_features=self.loss.outputsize_multiplier, + hidden_size=self.decoder_hidden_size, + num_layers=self.decoder_layers, + activation="ReLU", + dropout=0.0, + ) def forward(self, windows_batch): # Parse windows_batch encoder_input = windows_batch["insample_y"] # [B, seq_len, 1] - futr_exog = windows_batch["futr_exog"] - hist_exog = windows_batch["hist_exog"] - stat_exog = windows_batch["stat_exog"] + futr_exog = windows_batch["futr_exog"] # [B, seq_len, F] + hist_exog = windows_batch["hist_exog"] # [B, seq_len, X] + stat_exog = windows_batch["stat_exog"] # [B, S] # Concatenate y, historic and static inputs - # [B, C, seq_len, 1] -> [B, seq_len, C] - # Contatenate [ Y_t, | X_{t-L},..., X_{t} | S ] batch_size, seq_len = encoder_input.shape[:2] if self.hist_exog_size > 0: - hist_exog = hist_exog.permute(0, 2, 1, 3).squeeze( - -1 - ) # [B, X, seq_len, 1] -> [B, seq_len, X] - encoder_input = torch.cat((encoder_input, hist_exog), dim=2) + encoder_input = torch.cat( + (encoder_input, hist_exog), dim=2 + ) # [B, seq_len, 1] + [B, seq_len, X] -> [B, seq_len, 1 + X] if self.stat_exog_size > 0: + # print(encoder_input.shape) stat_exog = stat_exog.unsqueeze(1).repeat( 1, seq_len, 1 ) # [B, S] -> [B, seq_len, S] - encoder_input = torch.cat((encoder_input, stat_exog), dim=2) - - # RNN forward - hidden_state, _ = self.hist_encoder( - encoder_input - ) # [B, seq_len, rnn_hidden_state] - - if self.futr_exog_size > 0: - futr_exog = futr_exog.permute(0, 2, 3, 1)[ - :, :, 1:, : - ] # [B, F, seq_len, 1+H] -> [B, seq_len, H, F] - hidden_state = torch.cat( - (hidden_state, futr_exog.reshape(batch_size, seq_len, -1)), dim=2 - ) - - # Context adapter - context = self.context_adapter(hidden_state) - context = context.reshape(batch_size, seq_len, self.h, self.context_size) + encoder_input = torch.cat( + (encoder_input, stat_exog), dim=2 + ) # [B, seq_len, 1 + X] + [B, seq_len, S] -> [B, seq_len, 1 + X + S] - # Residual connection with futr_exog if self.futr_exog_size > 0: - context = torch.cat((context, futr_exog), dim=-1) - - # Final forecast - output = self.mlp_decoder(context) - output = self.loss.domain_map(output) - - return output + encoder_input = torch.cat( + (encoder_input, futr_exog[:, :seq_len]), dim=2 + ) # [B, seq_len, 1 + X + S] + [B, seq_len, F] -> [B, seq_len, 1 + X + S + F] + + if self.RECURRENT: + if self.maintain_state: + rnn_state = self.rnn_state + else: + rnn_state = None + + output, rnn_state = self.hist_encoder( + encoder_input, rnn_state + ) # [B, seq_len, n_output] + if self.maintain_state: + self.rnn_state = rnn_state + else: + hidden_state, _ = self.hist_encoder( + encoder_input, None + ) # [B, seq_len, rnn_hidden_state] + hidden_state = hidden_state[ + :, -self.h : + ] # [B, seq_len, rnn_hidden_state] -> [B, h, rnn_hidden_state] + + if self.futr_exog_size > 0: + futr_exog_futr = futr_exog[:, -self.h :] # [B, h, F] + hidden_state = torch.cat( + (hidden_state, futr_exog_futr), dim=-1 + ) # [B, h, rnn_hidden_state] + [B, h, F] -> [B, h, rnn_hidden_state + F] + + output = self.mlp_decoder( + hidden_state + ) # [B, h, rnn_hidden_state + F] -> [B, seq_len, n_output] + + return output[:, -self.h :] diff --git a/neuralforecast/models/mlp.py b/neuralforecast/models/mlp.py index 0794ac7c3..63e2a5409 100644 --- a/neuralforecast/models/mlp.py +++ b/neuralforecast/models/mlp.py @@ -10,10 +10,10 @@ import torch.nn as nn from ..losses.pytorch import MAE -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel # %% ../../nbs/models.mlp.ipynb 6 -class MLP(BaseWindows): +class MLP(BaseModel): """MLP Simple Multi Layer Perceptron architecture (MLP). @@ -58,10 +58,13 @@ class MLP(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -158,7 +161,7 @@ def __init__( def forward(self, windows_batch): # Parse windows_batch - insample_y = windows_batch["insample_y"] + insample_y = windows_batch["insample_y"].squeeze(-1) futr_exog = windows_batch["futr_exog"] hist_exog = windows_batch["hist_exog"] stat_exog = windows_batch["stat_exog"] @@ -187,5 +190,4 @@ def forward(self, windows_batch): y_pred = self.out(y_pred) y_pred = y_pred.reshape(batch_size, self.h, self.loss.outputsize_multiplier) - y_pred = self.loss.domain_map(y_pred) return y_pred diff --git a/neuralforecast/models/mlpmultivariate.py b/neuralforecast/models/mlpmultivariate.py index 7554bb44d..89124c47f 100644 --- a/neuralforecast/models/mlpmultivariate.py +++ b/neuralforecast/models/mlpmultivariate.py @@ -7,11 +7,12 @@ import torch import torch.nn as nn +from typing import Optional from ..losses.pytorch import MAE -from ..common._base_multivariate import BaseMultivariate +from ..common._base_model import BaseModel # %% ../../nbs/models.mlpmultivariate.ipynb 6 -class MLPMultivariate(BaseMultivariate): +class MLPMultivariate(BaseModel): """MLPMultivariate Simple Multi Layer Perceptron architecture (MLP) for multivariate forecasting. @@ -37,6 +38,10 @@ class MLPMultivariate(BaseMultivariate): `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
`val_check_steps`: int=100, Number of training steps between every validation loss check.
`batch_size`: int=32, number of different series in each batch.
+ `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
+ `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.
+ `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.
+ `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
`step_size`: int=1, step size between each window of temporal data.
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
`random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
@@ -52,10 +57,13 @@ class MLPMultivariate(BaseMultivariate): """ # Class attributes - SAMPLING_TYPE = "multivariate" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = True # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -65,6 +73,7 @@ def __init__( futr_exog_list=None, hist_exog_list=None, stat_exog_list=None, + exclude_insample_y=False, num_layers=2, hidden_size=1024, loss=MAE(), @@ -75,6 +84,10 @@ def __init__( early_stop_patience_steps: int = -1, val_check_steps: int = 100, batch_size: int = 32, + valid_batch_size: Optional[int] = None, + windows_batch_size=256, + inference_windows_batch_size=256, + start_padding_enabled=False, step_size: int = 1, scaler_type: str = "identity", random_seed: int = 1, @@ -96,6 +109,7 @@ def __init__( futr_exog_list=futr_exog_list, hist_exog_list=hist_exog_list, stat_exog_list=stat_exog_list, + exclude_insample_y=exclude_insample_y, loss=loss, valid_loss=valid_loss, max_steps=max_steps, @@ -104,6 +118,10 @@ def __init__( early_stop_patience_steps=early_stop_patience_steps, val_check_steps=val_check_steps, batch_size=batch_size, + valid_batch_size=valid_batch_size, + windows_batch_size=windows_batch_size, + inference_windows_batch_size=inference_windows_batch_size, + start_padding_enabled=start_padding_enabled, step_size=step_size, scaler_type=scaler_type, num_workers_loader=num_workers_loader, @@ -173,12 +191,6 @@ def forward(self, windows_batch): x = torch.relu(layer(x)) x = self.out(x) - x = x.reshape(batch_size, self.h, -1) - forecast = self.loss.domain_map(x) + forecast = x.reshape(batch_size, self.h, -1) - # domain_map might have squeezed the last dimension in case n_series == 1 - # Note that this fails in case of a tuple loss, but Multivariate does not support tuple losses yet. - if forecast.ndim == 2: - return forecast.unsqueeze(-1) - else: - return forecast + return forecast diff --git a/neuralforecast/models/nbeats.py b/neuralforecast/models/nbeats.py index 02280fb79..8cf178583 100644 --- a/neuralforecast/models/nbeats.py +++ b/neuralforecast/models/nbeats.py @@ -11,7 +11,7 @@ import torch.nn as nn from ..losses.pytorch import MAE -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel # %% ../../nbs/models.nbeats.ipynb 7 class IdentityBasis(nn.Module): @@ -189,7 +189,7 @@ def forward(self, insample_y: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] return backcast, forecast # %% ../../nbs/models.nbeats.ipynb 9 -class NBEATS(BaseWindows): +class NBEATS(BaseModel): """NBEATS The Neural Basis Expansion Analysis for Time Series (NBEATS), is a simple and yet @@ -241,10 +241,13 @@ class NBEATS(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = False EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -406,8 +409,8 @@ def create_stack( def forward(self, windows_batch): # Parse windows_batch - insample_y = windows_batch["insample_y"] - insample_mask = windows_batch["insample_mask"] + insample_y = windows_batch["insample_y"].squeeze(-1) + insample_mask = windows_batch["insample_mask"].squeeze(-1) # NBEATS' forward residuals = insample_y.flip(dims=(-1,)) # backcast init @@ -423,9 +426,6 @@ def forward(self, windows_batch): if self.decompose_forecast: block_forecasts.append(block_forecast) - # Adapting output's domain - forecast = self.loss.domain_map(forecast) - if self.decompose_forecast: # (n_batch, n_blocks, h, out_features) block_forecasts = torch.stack(block_forecasts) diff --git a/neuralforecast/models/nbeatsx.py b/neuralforecast/models/nbeatsx.py index 811392a66..403ca53c0 100644 --- a/neuralforecast/models/nbeatsx.py +++ b/neuralforecast/models/nbeatsx.py @@ -11,7 +11,7 @@ import torch.nn as nn from ..losses.pytorch import MAE -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel # %% ../../nbs/models.nbeatsx.ipynb 8 class IdentityBasis(nn.Module): @@ -274,7 +274,7 @@ def forward( return backcast, forecast # %% ../../nbs/models.nbeatsx.ipynb 10 -class NBEATSx(BaseWindows): +class NBEATSx(BaseModel): """NBEATSx The Neural Basis Expansion Analysis with Exogenous variables (NBEATSx) is a simple @@ -328,10 +328,13 @@ class NBEATSx(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -513,8 +516,8 @@ def create_stack( def forward(self, windows_batch): # Parse windows_batch - insample_y = windows_batch["insample_y"] - insample_mask = windows_batch["insample_mask"] + insample_y = windows_batch["insample_y"].squeeze(-1) + insample_mask = windows_batch["insample_mask"].squeeze(-1) futr_exog = windows_batch["futr_exog"] hist_exog = windows_batch["hist_exog"] stat_exog = windows_batch["stat_exog"] @@ -538,9 +541,6 @@ def forward(self, windows_batch): if self.decompose_forecast: block_forecasts.append(block_forecast) - # Adapting output's domain - forecast = self.loss.domain_map(forecast) - if self.decompose_forecast: # (n_batch, n_blocks, h) block_forecasts = torch.stack(block_forecasts) diff --git a/neuralforecast/models/nhits.py b/neuralforecast/models/nhits.py index ce5caeaaa..20833a4d7 100644 --- a/neuralforecast/models/nhits.py +++ b/neuralforecast/models/nhits.py @@ -12,7 +12,7 @@ import torch.nn.functional as F from ..losses.pytorch import MAE -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel # %% ../../nbs/models.nhits.ipynb 8 class _IdentityBasis(nn.Module): @@ -184,7 +184,7 @@ def forward( return backcast, forecast # %% ../../nbs/models.nhits.ipynb 10 -class NHITS(BaseWindows): +class NHITS(BaseModel): """NHITS The Neural Hierarchical Interpolation for Time Series (NHITS), is an MLP-based deep @@ -240,10 +240,13 @@ class NHITS(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -398,8 +401,8 @@ def create_stack( def forward(self, windows_batch): # Parse windows_batch - insample_y = windows_batch["insample_y"] - insample_mask = windows_batch["insample_mask"] + insample_y = windows_batch["insample_y"].squeeze(-1).contiguous() + insample_mask = windows_batch["insample_mask"].squeeze(-1).contiguous() futr_exog = windows_batch["futr_exog"] hist_exog = windows_batch["hist_exog"] stat_exog = windows_batch["stat_exog"] @@ -423,9 +426,6 @@ def forward(self, windows_batch): if self.decompose_forecast: block_forecasts.append(block_forecast) - # Adapting output's domain - forecast = self.loss.domain_map(forecast) - if self.decompose_forecast: # (n_batch, n_blocks, h, output_size) block_forecasts = torch.stack(block_forecasts) diff --git a/neuralforecast/models/nlinear.py b/neuralforecast/models/nlinear.py index 4909ddbd3..e0db42273 100644 --- a/neuralforecast/models/nlinear.py +++ b/neuralforecast/models/nlinear.py @@ -8,12 +8,12 @@ import torch.nn as nn -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel from ..losses.pytorch import MAE # %% ../../nbs/models.nlinear.ipynb 7 -class NLinear(BaseWindows): +class NLinear(BaseModel): """NLinear *Parameters:*
@@ -51,10 +51,13 @@ class NLinear(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = False EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -132,11 +135,7 @@ def __init__( def forward(self, windows_batch): # Parse windows_batch - insample_y = windows_batch["insample_y"] - # insample_mask = windows_batch['insample_mask'] - # hist_exog = windows_batch['hist_exog'] - # stat_exog = windows_batch['stat_exog'] - # futr_exog = windows_batch['futr_exog'] + insample_y = windows_batch["insample_y"].squeeze(-1) # Parse inputs batch_size = len(insample_y) @@ -148,5 +147,4 @@ def forward(self, windows_batch): # Final forecast = self.linear(norm_insample_y) + last_value forecast = forecast.reshape(batch_size, self.h, self.loss.outputsize_multiplier) - forecast = self.loss.domain_map(forecast) return forecast diff --git a/neuralforecast/models/patchtst.py b/neuralforecast/models/patchtst.py index 0b2029fd4..9472b8e86 100644 --- a/neuralforecast/models/patchtst.py +++ b/neuralforecast/models/patchtst.py @@ -14,7 +14,7 @@ import torch.nn as nn import torch.nn.functional as F -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel from ..common._modules import RevIN from ..losses.pytorch import MAE @@ -785,7 +785,7 @@ def forward( return output, attn_weights # %% ../../nbs/models.patchtst.ipynb 15 -class PatchTST(BaseWindows): +class PatchTST(BaseModel): """PatchTST The PatchTST model is an efficient Transformer-based model for multivariate time series forecasting. @@ -848,10 +848,13 @@ class PatchTST(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = False EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -995,20 +998,10 @@ def __init__( def forward(self, windows_batch): # x: [batch, input_size] # Parse windows_batch - insample_y = windows_batch["insample_y"] - # insample_mask = windows_batch['insample_mask'] - # hist_exog = windows_batch['hist_exog'] - # stat_exog = windows_batch['stat_exog'] - # futr_exog = windows_batch['futr_exog'] - - # Add dimension for channel - x = insample_y.unsqueeze(-1) # [Ws,L,1] + x = windows_batch["insample_y"] x = x.permute(0, 2, 1) # x: [Batch, 1, input_size] x = self.model(x) - x = x.reshape(x.shape[0], self.h, -1) # x: [Batch, h, c_out] - - # Domain map - forecast = self.loss.domain_map(x) + forecast = x.reshape(x.shape[0], self.h, -1) # x: [Batch, h, c_out] return forecast diff --git a/neuralforecast/models/rmok.py b/neuralforecast/models/rmok.py index 35db80aca..f91d589a5 100644 --- a/neuralforecast/models/rmok.py +++ b/neuralforecast/models/rmok.py @@ -11,8 +11,9 @@ import torch.nn.functional as F from ..losses.pytorch import MAE -from ..common._base_multivariate import BaseMultivariate -from ..common._modules import RevIN +from ..common._base_model import BaseModel +from ..common._modules import RevINMultivariate +from typing import Optional # %% ../../nbs/models.rmok.ipynb 8 class WaveKANLayer(nn.Module): @@ -256,9 +257,11 @@ def forward(self, x): return y # %% ../../nbs/models.rmok.ipynb 14 -class RMoK(BaseMultivariate): +class RMoK(BaseModel): """Reversible Mixture of KAN - **Parameters**
+ + + **Parameters:**
`h`: int, Forecast horizon.
`input_size`: int, autorregresive inputs size, y=[1,2,3,4] input_size=2 -> y_[t-2:t]=[1,2].
`n_series`: int, number of time-series.
@@ -278,6 +281,10 @@ class RMoK(BaseMultivariate): `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
`val_check_steps`: int=100, Number of training steps between every validation loss check.
`batch_size`: int=32, number of different series in each batch.
+ `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
+ `windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.
+ `inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.
+ `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
`step_size`: int=1, step size between each window of temporal data.
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
`random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
@@ -291,21 +298,24 @@ class RMoK(BaseMultivariate): `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
- Reference
- [Xiao Han, Xinfeng Zhang, Yiling Wu, Zhenduo Zhang, Zhe Wu."KAN4TSF: Are KAN and KAN-based models Effective for Time Series Forecasting?"](https://arxiv.org/abs/2408.11306) + **References**
+ - [Xiao Han, Xinfeng Zhang, Yiling Wu, Zhenduo Zhang, Zhe Wu."KAN4TSF: Are KAN and KAN-based models Effective for Time Series Forecasting?". arXiv.](https://arxiv.org/abs/2408.11306)
""" # Class attributes - SAMPLING_TYPE = "multivariate" EXOGENOUS_FUTR = False EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = True # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, h, input_size, - n_series, + n_series: int, futr_exog_list=None, hist_exog_list=None, stat_exog_list=None, @@ -322,6 +332,10 @@ def __init__( early_stop_patience_steps: int = -1, val_check_steps: int = 100, batch_size: int = 32, + valid_batch_size: Optional[int] = None, + windows_batch_size=1024, + inference_windows_batch_size=1024, + start_padding_enabled=False, step_size: int = 1, scaler_type: str = "identity", random_seed: int = 1, @@ -350,6 +364,10 @@ def __init__( early_stop_patience_steps=early_stop_patience_steps, val_check_steps=val_check_steps, batch_size=batch_size, + valid_batch_size=valid_batch_size, + windows_batch_size=windows_batch_size, + inference_windows_batch_size=inference_windows_batch_size, + start_padding_enabled=start_padding_enabled, step_size=step_size, scaler_type=scaler_type, random_seed=random_seed, @@ -376,25 +394,34 @@ def __init__( self.experts = nn.ModuleList( [ TaylorKANLayer( - self.input_size, self.h, order=self.taylor_order, addbias=True + self.input_size, + self.h * self.loss.outputsize_multiplier, + order=self.taylor_order, + addbias=True, + ), + JacobiKANLayer( + self.input_size, + self.h * self.loss.outputsize_multiplier, + degree=self.jacobi_degree, ), - JacobiKANLayer(self.input_size, self.h, degree=self.jacobi_degree), WaveKANLayer( - self.input_size, self.h, wavelet_type=self.wavelet_function + self.input_size, + self.h * self.loss.outputsize_multiplier, + wavelet_type=self.wavelet_function, ), - nn.Linear(self.input_size, self.h), + nn.Linear(self.input_size, self.h * self.loss.outputsize_multiplier), ] ) self.num_experts = len(self.experts) self.gate = nn.Linear(self.input_size, self.num_experts) self.softmax = nn.Softmax(dim=-1) - self.rev = RevIN(self.n_series, affine=self.revin_affine) + self.rev = RevINMultivariate(self.n_series, affine=self.revin_affine) def forward(self, windows_batch): insample_y = windows_batch["insample_y"] B, L, N = insample_y.shape - x = self.rev(insample_y, "norm") if self.rev else insample_y + x = self.rev(insample_y, "norm") x = self.dropout(x).transpose(1, 2).reshape(B * N, L) score = F.softmax(self.gate(x), dim=-1) @@ -403,15 +430,11 @@ def forward(self, windows_batch): ) y_pred = ( - torch.einsum("BLE,BE->BL", expert_outputs, score) - .reshape(B, N, -1) + torch.einsum("BLE, BE -> BL", expert_outputs, score) + .reshape(B, N, self.h * self.loss.outputsize_multiplier) .permute(0, 2, 1) ) y_pred = self.rev(y_pred, "denorm") - y_pred = self.loss.domain_map(y_pred) + y_pred = y_pred.reshape(B, self.h, -1) - # domain_map might have squeezed the last dimension in case n_series == 1 - if y_pred.ndim == 2: - return y_pred.unsqueeze(-1) - else: - return y_pred + return y_pred diff --git a/neuralforecast/models/rnn.py b/neuralforecast/models/rnn.py index f5d60f42a..30c53c15c 100644 --- a/neuralforecast/models/rnn.py +++ b/neuralforecast/models/rnn.py @@ -8,13 +8,14 @@ import torch import torch.nn as nn +import warnings from ..losses.pytorch import MAE -from ..common._base_recurrent import BaseRecurrent +from ..common._base_model import BaseModel from ..common._modules import MLP # %% ../../nbs/models.rnn.ipynb 7 -class RNN(BaseRecurrent): +class RNN(BaseModel): """RNN Multi Layer Elman RNN (RNN), with MLP decoder. @@ -31,7 +32,7 @@ class RNN(BaseRecurrent): `encoder_activation`: str=`tanh`, type of RNN activation from `tanh` or `relu`.
`encoder_bias`: bool=True, whether or not to use biases b_ih, b_hh within RNN units.
`encoder_dropout`: float=0., dropout regularization applied to RNN outputs.
- `context_size`: int=10, size of context vector for each timestamp on the forecasting window.
+ `context_size`: deprecated.
`decoder_hidden_size`: int=200, size of hidden layer for the MLP decoder.
`decoder_layers`: int=2, number of layers for the MLP decoder.
`futr_exog_list`: str list, future exogenous columns.
@@ -61,10 +62,13 @@ class RNN(BaseRecurrent): """ # Class attributes - SAMPLING_TYPE = "recurrent" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + True # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -72,16 +76,18 @@ def __init__( input_size: int = -1, inference_input_size: int = -1, encoder_n_layers: int = 2, - encoder_hidden_size: int = 200, + encoder_hidden_size: int = 128, encoder_activation: str = "tanh", encoder_bias: bool = True, encoder_dropout: float = 0.0, - context_size: int = 10, - decoder_hidden_size: int = 200, + context_size: Optional[int] = None, + decoder_hidden_size: int = 128, decoder_layers: int = 2, futr_exog_list=None, hist_exog_list=None, stat_exog_list=None, + exclude_insample_y=False, + recurrent=False, loss=MAE(), valid_loss=None, max_steps: int = 1000, @@ -91,6 +97,10 @@ def __init__( val_check_steps: int = 100, batch_size=32, valid_batch_size: Optional[int] = None, + windows_batch_size=128, + inference_windows_batch_size=1024, + start_padding_enabled=False, + step_size: int = 1, scaler_type: str = "robust", random_seed=1, num_workers_loader=0, @@ -102,10 +112,16 @@ def __init__( dataloader_kwargs=None, **trainer_kwargs ): + + self.RECURRENT = recurrent + super(RNN, self).__init__( h=h, input_size=input_size, - inference_input_size=inference_input_size, + futr_exog_list=futr_exog_list, + hist_exog_list=hist_exog_list, + stat_exog_list=stat_exog_list, + exclude_insample_y=exclude_insample_y, loss=loss, valid_loss=valid_loss, max_steps=max_steps, @@ -115,13 +131,14 @@ def __init__( val_check_steps=val_check_steps, batch_size=batch_size, valid_batch_size=valid_batch_size, + windows_batch_size=windows_batch_size, + inference_windows_batch_size=inference_windows_batch_size, + start_padding_enabled=start_padding_enabled, + step_size=step_size, scaler_type=scaler_type, - futr_exog_list=futr_exog_list, - hist_exog_list=hist_exog_list, - stat_exog_list=stat_exog_list, + random_seed=random_seed, num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, - random_seed=random_seed, optimizer=optimizer, optimizer_kwargs=optimizer_kwargs, lr_scheduler=lr_scheduler, @@ -137,6 +154,12 @@ def __init__( self.encoder_bias = encoder_bias self.encoder_dropout = encoder_dropout + # Context adapter + if context_size is not None: + warnings.warn( + "context_size is deprecated and will be removed in future versions." + ) + # Context adapter self.context_size = context_size @@ -145,82 +168,96 @@ def __init__( self.decoder_layers = decoder_layers # RNN input size (1 for target variable y) - input_encoder = 1 + self.hist_exog_size + self.stat_exog_size + input_encoder = ( + 1 + self.hist_exog_size + self.stat_exog_size + self.futr_exog_size + ) # Instantiate model + self.rnn_state = None + self.maintain_state = False self.hist_encoder = nn.RNN( input_size=input_encoder, hidden_size=self.encoder_hidden_size, num_layers=self.encoder_n_layers, - nonlinearity=self.encoder_activation, bias=self.encoder_bias, dropout=self.encoder_dropout, batch_first=True, ) - # Context adapter - self.context_adapter = nn.Linear( - in_features=self.encoder_hidden_size + self.futr_exog_size * h, - out_features=self.context_size * h, - ) - # Decoder MLP - self.mlp_decoder = MLP( - in_features=self.context_size + self.futr_exog_size, - out_features=self.loss.outputsize_multiplier, - hidden_size=self.decoder_hidden_size, - num_layers=self.decoder_layers, - activation="ReLU", - dropout=0.0, - ) + if self.RECURRENT: + self.proj = nn.Linear( + self.encoder_hidden_size, self.loss.outputsize_multiplier + ) + else: + self.mlp_decoder = MLP( + in_features=self.encoder_hidden_size + self.futr_exog_size, + out_features=self.loss.outputsize_multiplier, + hidden_size=self.decoder_hidden_size, + num_layers=self.decoder_layers, + activation="ReLU", + dropout=0.0, + ) def forward(self, windows_batch): # Parse windows_batch encoder_input = windows_batch["insample_y"] # [B, seq_len, 1] - futr_exog = windows_batch["futr_exog"] - hist_exog = windows_batch["hist_exog"] - stat_exog = windows_batch["stat_exog"] + futr_exog = windows_batch["futr_exog"] # [B, seq_len, F] + hist_exog = windows_batch["hist_exog"] # [B, seq_len, X] + stat_exog = windows_batch["stat_exog"] # [B, S] # Concatenate y, historic and static inputs - # [B, C, seq_len, 1] -> [B, seq_len, C] - # Contatenate [ Y_t, | X_{t-L},..., X_{t} | S ] batch_size, seq_len = encoder_input.shape[:2] if self.hist_exog_size > 0: - hist_exog = hist_exog.permute(0, 2, 1, 3).squeeze( - -1 - ) # [B, X, seq_len, 1] -> [B, seq_len, X] - encoder_input = torch.cat((encoder_input, hist_exog), dim=2) + encoder_input = torch.cat( + (encoder_input, hist_exog), dim=2 + ) # [B, seq_len, 1] + [B, seq_len, X] -> [B, seq_len, 1 + X] if self.stat_exog_size > 0: + # print(encoder_input.shape) stat_exog = stat_exog.unsqueeze(1).repeat( 1, seq_len, 1 ) # [B, S] -> [B, seq_len, S] - encoder_input = torch.cat((encoder_input, stat_exog), dim=2) - - # RNN forward - hidden_state, _ = self.hist_encoder( - encoder_input - ) # [B, seq_len, rnn_hidden_state] + encoder_input = torch.cat( + (encoder_input, stat_exog), dim=2 + ) # [B, seq_len, 1 + X] + [B, seq_len, S] -> [B, seq_len, 1 + X + S] if self.futr_exog_size > 0: - futr_exog = futr_exog.permute(0, 2, 3, 1)[ - :, :, 1:, : - ] # [B, F, seq_len, 1+H] -> [B, seq_len, H, F] - hidden_state = torch.cat( - (hidden_state, futr_exog.reshape(batch_size, seq_len, -1)), dim=2 - ) + encoder_input = torch.cat( + (encoder_input, futr_exog[:, :seq_len]), dim=2 + ) # [B, seq_len, 1 + X + S] + [B, seq_len, F] -> [B, seq_len, 1 + X + S + F] - # Context adapter - context = self.context_adapter(hidden_state) - context = context.reshape(batch_size, seq_len, self.h, self.context_size) + if self.RECURRENT: + if self.maintain_state: + rnn_state = self.rnn_state + else: + rnn_state = None - # Residual connection with futr_exog - if self.futr_exog_size > 0: - context = torch.cat((context, futr_exog), dim=-1) + output, rnn_state = self.hist_encoder( + encoder_input, rnn_state + ) # [B, seq_len, rnn_hidden_state] + output = self.proj( + output + ) # [B, seq_len, rnn_hidden_state] -> [B, seq_len, n_output] + if self.maintain_state: + self.rnn_state = rnn_state + else: + hidden_state, _ = self.hist_encoder( + encoder_input, None + ) # [B, seq_len, rnn_hidden_state] + hidden_state = hidden_state[ + :, -self.h : + ] # [B, seq_len, rnn_hidden_state] -> [B, h, rnn_hidden_state] + + if self.futr_exog_size > 0: + futr_exog_futr = futr_exog[:, -self.h :] # [B, h, F] + hidden_state = torch.cat( + (hidden_state, futr_exog_futr), dim=-1 + ) # [B, h, rnn_hidden_state] + [B, h, F] -> [B, h, rnn_hidden_state + F] - # Final forecast - output = self.mlp_decoder(context) - output = self.loss.domain_map(output) + output = self.mlp_decoder( + hidden_state + ) # [B, h, rnn_hidden_state + F] -> [B, seq_len, n_output] - return output + return output[:, -self.h :] diff --git a/neuralforecast/models/softs.py b/neuralforecast/models/softs.py index cb425200a..c7a1a2a7c 100644 --- a/neuralforecast/models/softs.py +++ b/neuralforecast/models/softs.py @@ -8,8 +8,9 @@ import torch.nn as nn import torch.nn.functional as F +from typing import Optional from ..losses.pytorch import MAE -from ..common._base_multivariate import BaseMultivariate +from ..common._base_model import BaseModel from ..common._modules import TransEncoder, TransEncoderLayer # %% ../../nbs/models.softs.ipynb 6 @@ -57,7 +58,7 @@ def forward(self, input, *args, **kwargs): # stochastic pooling if self.training: - ratio = F.softmax(combined_mean, dim=1) + ratio = F.softmax(torch.nan_to_num(combined_mean), dim=1) ratio = ratio.permute(0, 2, 1) ratio = ratio.reshape(-1, channels) indices = torch.multinomial(ratio, 1) @@ -79,7 +80,7 @@ def forward(self, input, *args, **kwargs): return output, None # %% ../../nbs/models.softs.ipynb 10 -class SOFTS(BaseMultivariate): +class SOFTS(BaseModel): """SOFTS **Parameters:**
@@ -103,6 +104,10 @@ class SOFTS(BaseMultivariate): `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
`val_check_steps`: int=100, Number of training steps between every validation loss check.
`batch_size`: int=32, number of different series in each batch.
+ `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
+ `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.
+ `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.
+ `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
`step_size`: int=1, step size between each window of temporal data.
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
`random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
@@ -121,10 +126,11 @@ class SOFTS(BaseMultivariate): """ # Class attributes - SAMPLING_TYPE = "multivariate" EXOGENOUS_FUTR = False EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = True + RECURRENT = False def __init__( self, @@ -134,6 +140,7 @@ def __init__( futr_exog_list=None, hist_exog_list=None, stat_exog_list=None, + exclude_insample_y=False, hidden_size: int = 512, d_core: int = 512, e_layers: int = 2, @@ -148,6 +155,10 @@ def __init__( early_stop_patience_steps: int = -1, val_check_steps: int = 100, batch_size: int = 32, + valid_batch_size: Optional[int] = None, + windows_batch_size=256, + inference_windows_batch_size=256, + start_padding_enabled=False, step_size: int = 1, scaler_type: str = "identity", random_seed: int = 1, @@ -168,6 +179,7 @@ def __init__( stat_exog_list=None, futr_exog_list=None, hist_exog_list=None, + exclude_insample_y=exclude_insample_y, loss=loss, valid_loss=valid_loss, max_steps=max_steps, @@ -176,6 +188,10 @@ def __init__( early_stop_patience_steps=early_stop_patience_steps, val_check_steps=val_check_steps, batch_size=batch_size, + valid_batch_size=valid_batch_size, + windows_batch_size=windows_batch_size, + inference_windows_batch_size=inference_windows_batch_size, + start_padding_enabled=start_padding_enabled, step_size=step_size, scaler_type=scaler_type, random_seed=random_seed, @@ -211,7 +227,9 @@ def __init__( ] ) - self.projection = nn.Linear(hidden_size, self.h, bias=True) + self.projection = nn.Linear( + hidden_size, self.h * self.loss.outputsize_multiplier, bias=True + ) def forecast(self, x_enc): # Normalization from Non-stationary Transformer @@ -230,19 +248,22 @@ def forecast(self, x_enc): # De-Normalization from Non-stationary Transformer if self.use_norm: - dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, self.h, 1)) - dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, self.h, 1)) + dec_out = dec_out * ( + stdev[:, 0, :] + .unsqueeze(1) + .repeat(1, self.h * self.loss.outputsize_multiplier, 1) + ) + dec_out = dec_out + ( + means[:, 0, :] + .unsqueeze(1) + .repeat(1, self.h * self.loss.outputsize_multiplier, 1) + ) return dec_out def forward(self, windows_batch): insample_y = windows_batch["insample_y"] y_pred = self.forecast(insample_y) - y_pred = y_pred[:, -self.h :, :] - y_pred = self.loss.domain_map(y_pred) + y_pred = y_pred.reshape(insample_y.shape[0], self.h, -1) - # domain_map might have squeezed the last dimension in case n_series == 1 - if y_pred.ndim == 2: - return y_pred.unsqueeze(-1) - else: - return y_pred + return y_pred diff --git a/neuralforecast/models/stemgnn.py b/neuralforecast/models/stemgnn.py index 85a014e65..b242ad2ce 100644 --- a/neuralforecast/models/stemgnn.py +++ b/neuralforecast/models/stemgnn.py @@ -8,8 +8,9 @@ import torch.nn as nn import torch.nn.functional as F +from typing import Optional from ..losses.pytorch import MAE -from ..common._base_multivariate import BaseMultivariate +from ..common._base_model import BaseModel # %% ../../nbs/models.stemgnn.ipynb 7 class GLU(nn.Module): @@ -136,7 +137,7 @@ def forward(self, x, mul_L): return forecast, backcast_source # %% ../../nbs/models.stemgnn.ipynb 9 -class StemGNN(BaseMultivariate): +class StemGNN(BaseModel): """StemGNN The Spectral Temporal Graph Neural Network (`StemGNN`) is a Graph-based multivariate @@ -163,6 +164,10 @@ class StemGNN(BaseMultivariate): `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
`val_check_steps`: int=100, Number of training steps between every validation loss check.
`batch_size`: int, number of windows in each batch.
+ `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
+ `windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.
+ `inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.
+ `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
`step_size`: int=1, step size between each window of temporal data.
`scaler_type`: str='robust', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
`random_seed`: int, random_seed for pytorch initializer and numpy generators.
@@ -178,10 +183,13 @@ class StemGNN(BaseMultivariate): """ # Class attributes - SAMPLING_TYPE = "multivariate" EXOGENOUS_FUTR = False EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = True # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -191,6 +199,7 @@ def __init__( futr_exog_list=None, hist_exog_list=None, stat_exog_list=None, + exclude_insample_y=False, n_stacks=2, multi_layer: int = 5, dropout_rate: float = 0.5, @@ -203,6 +212,10 @@ def __init__( early_stop_patience_steps: int = -1, val_check_steps: int = 100, batch_size: int = 32, + valid_batch_size: Optional[int] = None, + windows_batch_size=1024, + inference_windows_batch_size=1024, + start_padding_enabled=False, step_size: int = 1, scaler_type: str = "robust", random_seed: int = 1, @@ -224,6 +237,7 @@ def __init__( futr_exog_list=futr_exog_list, hist_exog_list=hist_exog_list, stat_exog_list=stat_exog_list, + exclude_insample_y=exclude_insample_y, loss=loss, valid_loss=valid_loss, max_steps=max_steps, @@ -232,6 +246,10 @@ def __init__( early_stop_patience_steps=early_stop_patience_steps, val_check_steps=val_check_steps, batch_size=batch_size, + valid_batch_size=valid_batch_size, + windows_batch_size=windows_batch_size, + inference_windows_batch_size=inference_windows_batch_size, + start_padding_enabled=start_padding_enabled, step_size=step_size, scaler_type=scaler_type, num_workers_loader=num_workers_loader, @@ -370,11 +388,5 @@ def forward(self, windows_batch): forecast = forecast.reshape( batch_size, self.h, self.loss.outputsize_multiplier * self.n_series ) - forecast = self.loss.domain_map(forecast) - # domain_map might have squeezed the last dimension in case n_series == 1 - # Note that this fails in case of a tuple loss, but Multivariate does not support tuple losses yet. - if forecast.ndim == 2: - return forecast.unsqueeze(-1) - else: - return forecast + return forecast diff --git a/neuralforecast/models/tcn.py b/neuralforecast/models/tcn.py index fd900512c..8ac791ef7 100644 --- a/neuralforecast/models/tcn.py +++ b/neuralforecast/models/tcn.py @@ -10,11 +10,11 @@ import torch.nn as nn from ..losses.pytorch import MAE -from ..common._base_recurrent import BaseRecurrent +from ..common._base_model import BaseModel from ..common._modules import MLP, TemporalConvolutionEncoder # %% ../../nbs/models.tcn.ipynb 7 -class TCN(BaseRecurrent): +class TCN(BaseModel): """TCN Temporal Convolution Network (TCN), with MLP decoder. @@ -56,10 +56,13 @@ class TCN(BaseRecurrent): """ # Class attributes - SAMPLING_TYPE = "recurrent" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -68,10 +71,10 @@ def __init__( inference_input_size: int = -1, kernel_size: int = 2, dilations: List[int] = [1, 2, 4, 8, 16], - encoder_hidden_size: int = 200, + encoder_hidden_size: int = 128, encoder_activation: str = "ReLU", context_size: int = 10, - decoder_hidden_size: int = 200, + decoder_hidden_size: int = 128, decoder_layers: int = 2, futr_exog_list=None, hist_exog_list=None, @@ -85,6 +88,10 @@ def __init__( val_check_steps: int = 100, batch_size: int = 32, valid_batch_size: Optional[int] = None, + windows_batch_size=128, + inference_windows_batch_size=1024, + start_padding_enabled=False, + step_size: int = 1, scaler_type: str = "robust", random_seed: int = 1, num_workers_loader=0, @@ -109,6 +116,10 @@ def __init__( val_check_steps=val_check_steps, batch_size=batch_size, valid_batch_size=valid_batch_size, + windows_batch_size=windows_batch_size, + inference_windows_batch_size=inference_windows_batch_size, + start_padding_enabled=start_padding_enabled, + step_size=step_size, scaler_type=scaler_type, futr_exog_list=futr_exog_list, hist_exog_list=hist_exog_list, @@ -139,7 +150,9 @@ def __init__( self.decoder_layers = decoder_layers # TCN input size (1 for target variable y) - input_encoder = 1 + self.hist_exog_size + self.stat_exog_size + input_encoder = ( + 1 + self.hist_exog_size + self.stat_exog_size + self.futr_exog_size + ) # ---------------------------------- Instantiate Model -----------------------------------# # Instantiate historic encoder @@ -152,14 +165,11 @@ def __init__( ) # Context adapter - self.context_adapter = nn.Linear( - in_features=self.encoder_hidden_size + self.futr_exog_size * h, - out_features=self.context_size * h, - ) + self.context_adapter = nn.Linear(in_features=self.input_size, out_features=h) # Decoder MLP self.mlp_decoder = MLP( - in_features=self.context_size + self.futr_exog_size, + in_features=self.encoder_hidden_size + self.futr_exog_size, out_features=self.loss.outputsize_multiplier, hidden_size=self.decoder_hidden_size, num_layers=self.decoder_layers, @@ -170,50 +180,51 @@ def __init__( def forward(self, windows_batch): # Parse windows_batch - encoder_input = windows_batch["insample_y"] # [B, seq_len, 1] - futr_exog = windows_batch["futr_exog"] - hist_exog = windows_batch["hist_exog"] - stat_exog = windows_batch["stat_exog"] + encoder_input = windows_batch["insample_y"] # [B, L, 1] + futr_exog = windows_batch["futr_exog"] # [B, L + h, F] + hist_exog = windows_batch["hist_exog"] # [B, L, X] + stat_exog = windows_batch["stat_exog"] # [B, S] # Concatenate y, historic and static inputs - # [B, C, seq_len, 1] -> [B, seq_len, C] - # Contatenate [ Y_t, | X_{t-L},..., X_{t} | S ] - batch_size, seq_len = encoder_input.shape[:2] + batch_size, input_size = encoder_input.shape[:2] if self.hist_exog_size > 0: - hist_exog = hist_exog.permute(0, 2, 1, 3).squeeze( - -1 - ) # [B, X, seq_len, 1] -> [B, seq_len, X] - encoder_input = torch.cat((encoder_input, hist_exog), dim=2) + encoder_input = torch.cat( + (encoder_input, hist_exog), dim=2 + ) # [B, L, 1] + [B, L, X] -> [B, L, 1 + X] if self.stat_exog_size > 0: + # print(encoder_input.shape) stat_exog = stat_exog.unsqueeze(1).repeat( - 1, seq_len, 1 - ) # [B, S] -> [B, seq_len, S] - encoder_input = torch.cat((encoder_input, stat_exog), dim=2) - - # TCN forward - hidden_state = self.hist_encoder( - encoder_input - ) # [B, seq_len, tcn_hidden_state] + 1, input_size, 1 + ) # [B, S] -> [B, L, S] + encoder_input = torch.cat( + (encoder_input, stat_exog), dim=2 + ) # [B, L, 1 + X] + [B, L, S] -> [B, L, 1 + X + S] if self.futr_exog_size > 0: - futr_exog = futr_exog.permute(0, 2, 3, 1)[ - :, :, 1:, : - ] # [B, F, seq_len, 1+H] -> [B, seq_len, H, F] - hidden_state = torch.cat( - (hidden_state, futr_exog.reshape(batch_size, seq_len, -1)), dim=2 - ) + encoder_input = torch.cat( + (encoder_input, futr_exog[:, :input_size]), dim=2 + ) # [B, L, 1 + X + S] + [B, L, F] -> [B, L, 1 + X + S + F] + + # TCN forward + hidden_state = self.hist_encoder(encoder_input) # [B, L, C] # Context adapter - context = self.context_adapter(hidden_state) - context = context.reshape(batch_size, seq_len, self.h, self.context_size) + hidden_state = hidden_state.permute(0, 2, 1) # [B, L, C] -> [B, C, L] + context = self.context_adapter(hidden_state) # [B, C, L] -> [B, C, h] # Residual connection with futr_exog if self.futr_exog_size > 0: - context = torch.cat((context, futr_exog), dim=-1) + futr_exog_futr = futr_exog[:, input_size:].swapaxes( + 1, 2 + ) # [B, L + h, F] -> [B, F, h] + context = torch.cat( + (context, futr_exog_futr), dim=1 + ) # [B, C, h] + [B, F, h] = [B, C + F, h] + + context = context.swapaxes(1, 2) # [B, C + F, h] -> [B, h, C + F] # Final forecast - output = self.mlp_decoder(context) - output = self.loss.domain_map(output) + output = self.mlp_decoder(context) # [B, h, C + F] -> [B, h, n_output] return output diff --git a/neuralforecast/models/tft.py b/neuralforecast/models/tft.py index f96d5646b..8214fbe15 100644 --- a/neuralforecast/models/tft.py +++ b/neuralforecast/models/tft.py @@ -13,7 +13,7 @@ from torch.nn import LayerNorm import pandas as pd from ..losses.pytorch import MAE -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel # %% ../../nbs/models.tft.ipynb 11 def get_activation_fn(activation_str: str) -> Callable: @@ -419,7 +419,7 @@ def forward(self, temporal_features, ce): return x, atten_vect # %% ../../nbs/models.tft.ipynb 24 -class TFT(BaseWindows): +class TFT(BaseModel): """TFT The Temporal Fusion Transformer architecture (TFT) is an Sequence-to-Sequence @@ -470,10 +470,13 @@ class TFT(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -595,7 +598,7 @@ def __init__( def forward(self, windows_batch): # Parsiw windows_batch - y_insample = windows_batch["insample_y"][:, :, None] # <- [B,T,1] + y_insample = windows_batch["insample_y"] # <- [B,T,1] futr_exog = windows_batch["futr_exog"] hist_exog = windows_batch["hist_exog"] stat_exog = windows_batch["stat_exog"] @@ -665,7 +668,6 @@ def forward(self, windows_batch): # Adapt output to loss y_hat = self.output_adapter(temporal_features) - y_hat = self.loss.domain_map(y_hat) return y_hat diff --git a/neuralforecast/models/tide.py b/neuralforecast/models/tide.py index ec98c2b13..b5a6f9144 100644 --- a/neuralforecast/models/tide.py +++ b/neuralforecast/models/tide.py @@ -11,7 +11,7 @@ import torch.nn.functional as F from ..losses.pytorch import MAE -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel # %% ../../nbs/models.tide.ipynb 8 class MLPResidual(nn.Module): @@ -48,7 +48,7 @@ def forward(self, input): return x # %% ../../nbs/models.tide.ipynb 10 -class TiDE(BaseWindows): +class TiDE(BaseModel): """TiDE Time-series Dense Encoder (`TiDE`) is a MLP-based univariate time-series forecasting model. `TiDE` uses Multi-layer Perceptrons (MLPs) in an encoder-decoder model for long-term time-series forecasting. @@ -94,10 +94,13 @@ class TiDE(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -243,7 +246,7 @@ def __init__( def forward(self, windows_batch): # Parse windows_batch - x = windows_batch["insample_y"].unsqueeze(-1) # [B, L, 1] + x = windows_batch["insample_y"] # [B, L, 1] hist_exog = windows_batch["hist_exog"] # [B, L, X] futr_exog = windows_batch["futr_exog"] # [B, L + h, F] stat_exog = windows_batch["stat_exog"] # [B, S] @@ -313,7 +316,6 @@ def forward(self, windows_batch): x ) # [B, h, temporal_width + decoder_output_dim] -> [B, h, n_outputs] - # Map to output domain - forecast = self.loss.domain_map(x + x_skip) + forecast = x + x_skip return forecast diff --git a/neuralforecast/models/timellm.py b/neuralforecast/models/timellm.py index aa9276f72..3468db9b6 100644 --- a/neuralforecast/models/timellm.py +++ b/neuralforecast/models/timellm.py @@ -7,12 +7,12 @@ import math from typing import Optional +import neuralforecast.losses.pytorch as losses import torch import torch.nn as nn -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel from ..common._modules import RevIN - from ..losses.pytorch import MAE try: @@ -165,7 +165,7 @@ def reprogramming(self, target_embedding, source_embedding, value_embedding): return reprogramming_embedding # %% ../../nbs/models.timellm.ipynb 11 -class TimeLLM(BaseWindows): +class TimeLLM(BaseModel): """TimeLLM Time-LLM is a reprogramming framework to repurpose an off-the-shelf LLM for time series forecasting. @@ -226,10 +226,13 @@ class TimeLLM(BaseWindows): """ - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = False EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -309,6 +312,15 @@ def __init__( dataloader_kwargs=dataloader_kwargs, **trainer_kwargs, ) + if loss.outputsize_multiplier > 1: + raise Exception( + "TimeLLM only supports point loss functions (MAE, MSE, etc) as loss function." + ) + + if valid_loss is not None and not isinstance(valid_loss, losses.BasePointLoss): + raise Exception( + "TimeLLM only supports point loss functions (MAE, MSE, etc) as valid loss function." + ) # Architecture self.patch_len = patch_len @@ -468,12 +480,9 @@ def calcute_lags(self, x_enc): return lags def forward(self, windows_batch): - insample_y = windows_batch["insample_y"] - - x = insample_y.unsqueeze(-1) + x = windows_batch["insample_y"] y_pred = self.forecast(x) y_pred = y_pred[:, -self.h :, :] - y_pred = self.loss.domain_map(y_pred) return y_pred diff --git a/neuralforecast/models/timemixer.py b/neuralforecast/models/timemixer.py index 5585539bd..10dd07222 100644 --- a/neuralforecast/models/timemixer.py +++ b/neuralforecast/models/timemixer.py @@ -11,7 +11,7 @@ import torch import torch.nn as nn -from ..common._base_multivariate import BaseMultivariate +from ..common._base_model import BaseModel from neuralforecast.common._modules import ( PositionalEmbedding, TokenEmbedding, @@ -19,8 +19,8 @@ SeriesDecomp, RevIN, ) - from ..losses.pytorch import MAE +from typing import Optional # %% ../../nbs/models.timemixer.ipynb 6 class DataEmbedding_wo_pos(nn.Module): @@ -249,7 +249,7 @@ def forward(self, x_list): return out_list # %% ../../nbs/models.timemixer.ipynb 12 -class TimeMixer(BaseMultivariate): +class TimeMixer(BaseModel): """TimeMixer **Parameters**
`h`: int, Forecast horizon.
@@ -279,6 +279,10 @@ class TimeMixer(BaseMultivariate): `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
`val_check_steps`: int=100, Number of training steps between every validation loss check.
`batch_size`: int=32, number of different series in each batch.
+ `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
+ `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.
+ `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.
+ `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
`step_size`: int=1, step size between each window of temporal data.
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
`random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
@@ -293,14 +297,17 @@ class TimeMixer(BaseMultivariate): `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
- [Shiyu Wang, Haixu Wu, Xiaoming Shi, Tengge Hu, Huakun Luo, Lintao Ma, James Y. Zhang, Jun Zhou."TimeMixer: Decomposable Multiscale Mixing For Time Series Forecasting"](https://openreview.net/pdf?id=7oLshfEIC2) + [Shiyu Wang, Haixu Wu, Xiaoming Shi, Tengge Hu, Huakun Luo, Lintao Ma, James Y. Zhang, Jun Zhou."TimeMixer: Decomposable Multiscale Mixing For Time Series Forecasting"](https://openreview.net/pdf?id=7oLshfEIC2)
""" # Class attributes - SAMPLING_TYPE = "multivariate" EXOGENOUS_FUTR = False EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = True # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -331,6 +338,10 @@ def __init__( early_stop_patience_steps: int = -1, val_check_steps: int = 100, batch_size: int = 32, + valid_batch_size: Optional[int] = None, + windows_batch_size=256, + inference_windows_batch_size=256, + start_padding_enabled=False, step_size: int = 1, scaler_type: str = "identity", random_seed: int = 1, @@ -359,6 +370,10 @@ def __init__( early_stop_patience_steps=early_stop_patience_steps, val_check_steps=val_check_steps, batch_size=batch_size, + valid_batch_size=valid_batch_size, + windows_batch_size=windows_batch_size, + inference_windows_batch_size=inference_windows_batch_size, + start_padding_enabled=start_padding_enabled, step_size=step_size, scaler_type=scaler_type, random_seed=random_seed, @@ -474,6 +489,11 @@ def __init__( ] ) + if self.loss.outputsize_multiplier > 1: + self.distr_output = nn.Linear( + self.n_series, self.n_series * self.loss.outputsize_multiplier + ) + def out_projection(self, dec_out, i, out_res): dec_out = self.projection_layer(dec_out) out_res = out_res.permute(0, 2, 1) @@ -647,10 +667,7 @@ def forward(self, windows_batch): y_pred = self.forecast(insample_y, x_mark_enc, x_mark_dec) y_pred = y_pred[:, -self.h :, :] - y_pred = self.loss.domain_map(y_pred) + if self.loss.outputsize_multiplier > 1: + y_pred = self.distr_output(y_pred) - # domain_map might have squeezed the last dimension in case n_series == 1 - if y_pred.ndim == 2: - return y_pred.unsqueeze(-1) - else: - return y_pred + return y_pred diff --git a/neuralforecast/models/timesnet.py b/neuralforecast/models/timesnet.py index aab548382..24dde3ecd 100644 --- a/neuralforecast/models/timesnet.py +++ b/neuralforecast/models/timesnet.py @@ -12,7 +12,7 @@ import torch.fft from ..common._modules import DataEmbedding -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel from ..losses.pytorch import MAE @@ -119,7 +119,7 @@ def forward(self, x): return res # %% ../../nbs/models.timesnet.ipynb 10 -class TimesNet(BaseWindows): +class TimesNet(BaseModel): """TimesNet The TimesNet univariate model tackles the challenge of modeling multiple intraperiod and interperiod temporal variations. @@ -199,10 +199,13 @@ class TimesNet(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = True EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -309,13 +312,9 @@ def forward(self, windows_batch): # Parse windows_batch insample_y = windows_batch["insample_y"] - # insample_mask = windows_batch['insample_mask'] - # hist_exog = windows_batch['hist_exog'] - # stat_exog = windows_batch['stat_exog'] futr_exog = windows_batch["futr_exog"] # Parse inputs - insample_y = insample_y.unsqueeze(-1) # [Ws,L,1] if self.futr_exog_size > 0: x_mark_enc = futr_exog[:, : self.input_size, :] else: @@ -332,5 +331,5 @@ def forward(self, windows_batch): # porject back dec_out = self.projection(enc_out) - forecast = self.loss.domain_map(dec_out[:, -self.h :]) + forecast = dec_out[:, -self.h :] return forecast diff --git a/neuralforecast/models/tsmixer.py b/neuralforecast/models/tsmixer.py index 0d68e1e4c..46dd6d908 100644 --- a/neuralforecast/models/tsmixer.py +++ b/neuralforecast/models/tsmixer.py @@ -1,15 +1,16 @@ # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/models.tsmixer.ipynb. # %% auto 0 -__all__ = ['TemporalMixing', 'FeatureMixing', 'MixingLayer', 'ReversibleInstanceNorm1d', 'TSMixer'] +__all__ = ['TemporalMixing', 'FeatureMixing', 'MixingLayer', 'TSMixer'] # %% ../../nbs/models.tsmixer.ipynb 5 -import torch import torch.nn as nn import torch.nn.functional as F +from typing import Optional from ..losses.pytorch import MAE -from ..common._base_multivariate import BaseMultivariate +from ..common._base_model import BaseModel +from ..common._modules import RevINMultivariate # %% ../../nbs/models.tsmixer.ipynb 8 class TemporalMixing(nn.Module): @@ -93,44 +94,7 @@ def forward(self, input): return x # %% ../../nbs/models.tsmixer.ipynb 10 -class ReversibleInstanceNorm1d(nn.Module): - """ - ReversibleInstanceNorm1d - """ - - def __init__(self, n_series, eps=1e-5): - super().__init__() - self.weight = nn.Parameter(torch.ones((1, 1, n_series))) - self.bias = nn.Parameter(torch.zeros((1, 1, n_series))) - - self.eps = eps - - def forward(self, x): - # Batch statistics - self.batch_mean = torch.mean(x, axis=1, keepdim=True).detach() - self.batch_std = torch.sqrt( - torch.var(x, axis=1, keepdim=True, unbiased=False) + self.eps - ).detach() - - # Instance normalization - x = x - self.batch_mean - x = x / self.batch_std - x = x * self.weight - x = x + self.bias - - return x - - def reverse(self, x): - # Reverse the normalization - x = x - self.bias - x = x / self.weight - x = x * self.batch_std - x = x + self.batch_mean - - return x - -# %% ../../nbs/models.tsmixer.ipynb 12 -class TSMixer(BaseMultivariate): +class TSMixer(BaseModel): """TSMixer Time-Series Mixer (`TSMixer`) is a MLP-based multivariate time-series forecasting model. `TSMixer` jointly learns temporal and cross-sectional representations of the time-series by repeatedly combining time- and feature information using stacked mixing layers. A mixing layer consists of a sequential time- and feature Multi Layer Perceptron (`MLP`). @@ -154,6 +118,10 @@ class TSMixer(BaseMultivariate): `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
`val_check_steps`: int=100, Number of training steps between every validation loss check.
`batch_size`: int=32, number of different series in each batch.
+ `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
+ `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.
+ `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.
+ `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
`step_size`: int=1, step size between each window of temporal data.
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
`random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
@@ -173,10 +141,13 @@ class TSMixer(BaseMultivariate): """ # Class attributes - SAMPLING_TYPE = "multivariate" EXOGENOUS_FUTR = False EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = True # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -186,6 +157,7 @@ def __init__( futr_exog_list=None, hist_exog_list=None, stat_exog_list=None, + exclude_insample_y=False, n_block=2, ff_dim=64, dropout=0.9, @@ -198,6 +170,10 @@ def __init__( early_stop_patience_steps: int = -1, val_check_steps: int = 100, batch_size: int = 32, + valid_batch_size: Optional[int] = None, + windows_batch_size=256, + inference_windows_batch_size=256, + start_padding_enabled=False, step_size: int = 1, scaler_type: str = "identity", random_seed: int = 1, @@ -219,6 +195,7 @@ def __init__( futr_exog_list=futr_exog_list, hist_exog_list=hist_exog_list, stat_exog_list=stat_exog_list, + exclude_insample_y=exclude_insample_y, loss=loss, valid_loss=valid_loss, max_steps=max_steps, @@ -227,6 +204,10 @@ def __init__( early_stop_patience_steps=early_stop_patience_steps, val_check_steps=val_check_steps, batch_size=batch_size, + valid_batch_size=valid_batch_size, + windows_batch_size=windows_batch_size, + inference_windows_batch_size=inference_windows_batch_size, + start_padding_enabled=start_padding_enabled, step_size=step_size, scaler_type=scaler_type, random_seed=random_seed, @@ -243,7 +224,7 @@ def __init__( # Reversible InstanceNormalization layer self.revin = revin if self.revin: - self.norm = ReversibleInstanceNorm1d(n_series=n_series) + self.norm = RevINMultivariate(num_features=n_series, affine=True) # Mixing layers mixing_layers = [ @@ -266,22 +247,16 @@ def forward(self, windows_batch): # TSMixer: InstanceNorm + Mixing layers + Dense output layer + ReverseInstanceNorm if self.revin: - x = self.norm(x) + x = self.norm(x, "norm") x = self.mixing_layers(x) x = x.permute(0, 2, 1) x = self.out(x) x = x.permute(0, 2, 1) if self.revin: - x = self.norm.reverse(x) + x = self.norm(x, "denorm") x = x.reshape( batch_size, self.h, self.loss.outputsize_multiplier * self.n_series ) - forecast = self.loss.domain_map(x) - - # domain_map might have squeezed the last dimension in case n_series == 1 - # Note that this fails in case of a tuple loss, but Multivariate does not support tuple losses yet. - if forecast.ndim == 2: - return forecast.unsqueeze(-1) - else: - return forecast + + return x diff --git a/neuralforecast/models/tsmixerx.py b/neuralforecast/models/tsmixerx.py index 24897d442..61eb55e68 100644 --- a/neuralforecast/models/tsmixerx.py +++ b/neuralforecast/models/tsmixerx.py @@ -8,8 +8,10 @@ import torch.nn as nn import torch.nn.functional as F +from typing import Optional from ..losses.pytorch import MAE -from ..common._base_multivariate import BaseMultivariate +from ..common._base_model import BaseModel +from ..common._modules import RevINMultivariate # %% ../../nbs/models.tsmixerx.ipynb 8 class TemporalMixing(nn.Module): @@ -158,7 +160,7 @@ def reverse(self, x): return x # %% ../../nbs/models.tsmixerx.ipynb 12 -class TSMixerx(BaseMultivariate): +class TSMixerx(BaseModel): """TSMixerx Time-Series Mixer exogenous (`TSMixerx`) is a MLP-based multivariate time-series forecasting model, with capability for additional exogenous inputs. `TSMixerx` jointly learns temporal and cross-sectional representations of the time-series by repeatedly combining time- and feature information using stacked mixing layers. A mixing layer consists of a sequential time- and feature Multi Layer Perceptron (`MLP`). @@ -182,6 +184,10 @@ class TSMixerx(BaseMultivariate): `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
`val_check_steps`: int=100, Number of training steps between every validation loss check.
`batch_size`: int=32, number of different series in each batch.
+ `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
+ `windows_batch_size`: int=256, number of windows to sample in each training batch, default uses all.
+ `inference_windows_batch_size`: int=256, number of windows to sample in each inference batch, -1 uses all.
+ `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
`step_size`: int=1, step size between each window of temporal data.
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
`random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
@@ -201,10 +207,13 @@ class TSMixerx(BaseMultivariate): """ # Class attributes - SAMPLING_TYPE = "multivariate" EXOGENOUS_FUTR = True EXOGENOUS_HIST = True EXOGENOUS_STAT = True + MULTIVARIATE = True # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -214,6 +223,7 @@ def __init__( futr_exog_list=None, hist_exog_list=None, stat_exog_list=None, + exclude_insample_y=False, n_block=2, ff_dim=64, dropout=0.0, @@ -226,6 +236,10 @@ def __init__( early_stop_patience_steps: int = -1, val_check_steps: int = 100, batch_size: int = 32, + valid_batch_size: Optional[int] = None, + windows_batch_size=256, + inference_windows_batch_size=256, + start_padding_enabled=False, step_size: int = 1, scaler_type: str = "identity", random_seed: int = 1, @@ -247,6 +261,7 @@ def __init__( futr_exog_list=futr_exog_list, hist_exog_list=hist_exog_list, stat_exog_list=stat_exog_list, + exclude_insample_y=exclude_insample_y, loss=loss, valid_loss=valid_loss, max_steps=max_steps, @@ -255,6 +270,10 @@ def __init__( early_stop_patience_steps=early_stop_patience_steps, val_check_steps=val_check_steps, batch_size=batch_size, + valid_batch_size=valid_batch_size, + windows_batch_size=windows_batch_size, + inference_windows_batch_size=inference_windows_batch_size, + start_padding_enabled=start_padding_enabled, step_size=step_size, scaler_type=scaler_type, random_seed=random_seed, @@ -270,7 +289,7 @@ def __init__( # Reversible InstanceNormalization layer self.revin = revin if self.revin: - self.norm = ReversibleInstanceNorm1d(n_series=n_series) + self.norm = RevINMultivariate(num_features=n_series, affine=True) # Forecast horizon self.h = h @@ -358,12 +377,12 @@ def forward(self, windows_batch): stat_exog = windows_batch["stat_exog"] # [N, stat_exog_size (S)] batch_size, input_size = x.shape[:2] - # Add channel dimension to x - x = x.unsqueeze(1) # [B, L, N] -> [B, 1, L, N] - # Apply revin to x if self.revin: - x = self.norm(x) # [B, 1, L, N] -> [B, 1, L, N] + x = self.norm(x, mode="norm") # [B, L, N] -> [B, L, N] + + # Add channel dimension to x + x = x.unsqueeze(1) # [B, L, N] -> [B, 1, L, N] # Concatenate x with historical exogenous if self.hist_exog_size > 0: @@ -430,24 +449,16 @@ def forward(self, windows_batch): x = self.mixing_block(x) # [B, h, ff_dim] -> [B, h, ff_dim] # Fully connected output layer - x = self.out(x) # [B, h, ff_dim] -> [B, h, N * n_outputs] + forecast = self.out(x) # [B, h, ff_dim] -> [B, h, N * n_outputs] # Reverse Instance Normalization on output if self.revin: - x = x.reshape( - batch_size, self.h, self.loss.outputsize_multiplier, -1 - ) # [B, h, N * n_outputs] -> [B, h, n_outputs, N] - x = self.norm.reverse(x) - x = x.reshape( + forecast = forecast.reshape( + batch_size, self.h * self.loss.outputsize_multiplier, -1 + ) # [B, h, N * n_outputs] -> [B, h * n_outputs, N] + forecast = self.norm(forecast, "denorm") + forecast = forecast.reshape( batch_size, self.h, -1 - ) # [B, h, n_outputs, N] -> [B, h, n_outputs * N] + ) # [B, h * n_outputs, N] -> [B, h, n_outputs * N] - # Map to loss domain - forecast = self.loss.domain_map(x) - - # domain_map might have squeezed the last dimension in case n_series == 1 - # Note that this fails in case of a tuple loss, but Multivariate does not support tuple losses yet. - if forecast.ndim == 2: - return forecast.unsqueeze(-1) - else: - return forecast + return forecast diff --git a/neuralforecast/models/vanillatransformer.py b/neuralforecast/models/vanillatransformer.py index 69fcc9c4d..7cf9ec714 100644 --- a/neuralforecast/models/vanillatransformer.py +++ b/neuralforecast/models/vanillatransformer.py @@ -19,7 +19,7 @@ DataEmbedding, AttentionLayer, ) -from ..common._base_windows import BaseWindows +from ..common._base_model import BaseModel from ..losses.pytorch import MAE @@ -73,7 +73,7 @@ def forward(self, queries, keys, values, attn_mask): return (V.contiguous(), None) # %% ../../nbs/models.vanillatransformer.ipynb 10 -class VanillaTransformer(BaseWindows): +class VanillaTransformer(BaseModel): """VanillaTransformer Vanilla Transformer, following implementation of the Informer paper, used as baseline. @@ -129,10 +129,13 @@ class VanillaTransformer(BaseWindows): """ # Class attributes - SAMPLING_TYPE = "windows" EXOGENOUS_FUTR = True EXOGENOUS_HIST = False EXOGENOUS_STAT = False + MULTIVARIATE = False # If the model produces multivariate forecasts (True) or univariate (False) + RECURRENT = ( + False # If the model produces forecasts recursively (True) or direct (False) + ) def __init__( self, @@ -293,14 +296,8 @@ def __init__( def forward(self, windows_batch): # Parse windows_batch insample_y = windows_batch["insample_y"] - # insample_mask = windows_batch['insample_mask'] - # hist_exog = windows_batch['hist_exog'] - # stat_exog = windows_batch['stat_exog'] - futr_exog = windows_batch["futr_exog"] - insample_y = insample_y.unsqueeze(-1) # [Ws,L,1] - if self.futr_exog_size > 0: x_mark_enc = futr_exog[:, : self.input_size, :] x_mark_dec = futr_exog[:, -(self.label_len + self.h) :, :] @@ -317,5 +314,5 @@ def forward(self, windows_batch): dec_out = self.dec_embedding(x_dec, x_mark_dec) dec_out = self.decoder(dec_out, enc_out, x_mask=None, cross_mask=None) - forecast = self.loss.domain_map(dec_out[:, -self.h :]) + forecast = dec_out[:, -self.h :] return forecast diff --git a/neuralforecast/utils.py b/neuralforecast/utils.py index 4a272dfcb..ab3ff1d5e 100644 --- a/neuralforecast/utils.py +++ b/neuralforecast/utils.py @@ -6,17 +6,16 @@ 'HourOfDay', 'DayOfWeek', 'DayOfMonth', 'DayOfYear', 'MonthOfYear', 'WeekOfYear', 'time_features_from_frequency_str', 'augment_calendar_df', 'get_indexer_raise_missing', 'PredictionIntervals', 'add_conformal_distribution_intervals', 'add_conformal_error_intervals', - 'get_prediction_interval_method'] + 'get_prediction_interval_method', 'level_to_quantiles', 'quantiles_to_level'] # %% ../nbs/utils.ipynb 3 import random from itertools import chain -from typing import List, Union +from typing import List, Union, Optional, Tuple from utilsforecast.compat import DFType import numpy as np import pandas as pd -import utilsforecast.processing as ufp # %% ../nbs/utils.ipynb 6 def generate_series( @@ -484,77 +483,113 @@ def __repr__(self): # %% ../nbs/utils.ipynb 32 def add_conformal_distribution_intervals( - fcst_df: DFType, + model_fcsts: np.array, cs_df: DFType, - model_names: List[str], - level: List[Union[int, float]], + model: str, cs_n_windows: int, n_series: int, horizon: int, -) -> DFType: + level: Optional[List[Union[int, float]]] = None, + quantiles: Optional[List[float]] = None, +) -> Tuple[np.array, List[str]]: """ Adds conformal intervals to a `fcst_df` based on conformal scores `cs_df`. `level` should be already sorted. This strategy creates forecasts paths based on errors and calculate quantiles using those paths. """ - fcst_df = ufp.copy_if_pandas(fcst_df, deep=False) - alphas = [100 - lv for lv in level] - cuts = [alpha / 200 for alpha in reversed(alphas)] - cuts.extend(1 - alpha / 200 for alpha in alphas) - for model in model_names: - scores = cs_df[model].to_numpy().reshape(n_series, cs_n_windows, horizon) - scores = scores.transpose(1, 0, 2) - # restrict scores to horizon - scores = scores[:, :, :horizon] - mean = fcst_df[model].to_numpy().reshape(1, n_series, -1) - scores = np.vstack([mean - scores, mean + scores]) - quantiles = np.quantile( - scores, - cuts, - axis=0, - ) - quantiles = quantiles.reshape(len(cuts), -1).T + assert ( + level is not None or quantiles is not None + ), "Either level or quantiles must be provided" + + if quantiles is None and level is not None: + alphas = [100 - lv for lv in level] + cuts = [alpha / 200 for alpha in reversed(alphas)] + cuts.extend(1 - alpha / 200 for alpha in alphas) + elif quantiles is not None: + cuts = quantiles + + scores = cs_df[model].to_numpy().reshape(n_series, cs_n_windows, horizon) + scores = scores.transpose(1, 0, 2) + # restrict scores to horizon + scores = scores[:, :, :horizon] + mean = model_fcsts.reshape(1, n_series, -1) + scores = np.vstack([mean - scores, mean + scores]) + scores_quantiles = np.quantile( + scores, + cuts, + axis=0, + ) + scores_quantiles = scores_quantiles.reshape(len(cuts), -1).T + if quantiles is None and level is not None: lo_cols = [f"{model}-lo-{lv}" for lv in reversed(level)] hi_cols = [f"{model}-hi-{lv}" for lv in level] out_cols = lo_cols + hi_cols - fcst_df = ufp.assign_columns(fcst_df, out_cols, quantiles) - return fcst_df + elif quantiles is not None: + out_cols = [f"{model}-ql{q}" for q in quantiles] + + fcsts_with_intervals = np.hstack([model_fcsts, scores_quantiles]) + + return fcsts_with_intervals, out_cols # %% ../nbs/utils.ipynb 33 def add_conformal_error_intervals( - fcst_df: DFType, + model_fcsts: np.array, cs_df: DFType, - model_names: List[str], - level: List[Union[int, float]], + model: str, cs_n_windows: int, n_series: int, horizon: int, -) -> DFType: + level: Optional[List[Union[int, float]]] = None, + quantiles: Optional[List[float]] = None, +) -> Tuple[np.array, List[str]]: """ Adds conformal intervals to a `fcst_df` based on conformal scores `cs_df`. `level` should be already sorted. This startegy creates prediction intervals based on the absolute errors. """ - fcst_df = ufp.copy_if_pandas(fcst_df, deep=False) - cuts = [lv / 100 for lv in level] - for model in model_names: - mean = fcst_df[model].to_numpy().ravel() - scores = cs_df[model].to_numpy().reshape(n_series, cs_n_windows, horizon) - scores = scores.transpose(1, 0, 2) - # restrict scores to horizon - scores = scores[:, :, :horizon] - quantiles = np.quantile( - scores, - cuts, - axis=0, - ) - quantiles = quantiles.reshape(len(cuts), -1) + assert ( + level is not None or quantiles is not None + ), "Either level or quantiles must be provided" + + if quantiles is None and level is not None: + cuts = [lv / 100 for lv in level] + elif quantiles is not None: + cuts = quantiles + + mean = model_fcsts.ravel() + scores = cs_df[model].to_numpy().reshape(n_series, cs_n_windows, horizon) + scores = scores.transpose(1, 0, 2) + # restrict scores to horizon + scores = scores[:, :, :horizon] + scores_quantiles = np.quantile( + scores, + cuts, + axis=0, + ) + scores_quantiles = scores_quantiles.reshape(len(cuts), -1) + if quantiles is None and level is not None: lo_cols = [f"{model}-lo-{lv}" for lv in reversed(level)] hi_cols = [f"{model}-hi-{lv}" for lv in level] - quantiles = np.vstack([mean - quantiles[::-1], mean + quantiles]).T - columns = lo_cols + hi_cols - fcst_df = ufp.assign_columns(fcst_df, columns, quantiles) - return fcst_df + out_cols = lo_cols + hi_cols + scores_quantiles = np.vstack( + [mean - scores_quantiles[::-1], mean + scores_quantiles] + ).T + elif quantiles is not None: + out_cols = [] + scores_quantiles_ls = [] + for i, q in enumerate(quantiles): + out_cols.append(f"{model}-ql{q}") + if q < 0.5: + scores_quantiles_ls.append(mean - scores_quantiles[::-1][i]) + elif q > 0.5: + scores_quantiles_ls.append(mean + scores_quantiles[i]) + else: + scores_quantiles_ls.append(mean) + scores_quantiles = np.vstack(scores_quantiles_ls).T + + fcsts_with_intervals = np.hstack([model_fcsts, scores_quantiles]) + + return fcsts_with_intervals, out_cols # %% ../nbs/utils.ipynb 34 def get_prediction_interval_method(method: str): @@ -568,3 +603,30 @@ def get_prediction_interval_method(method: str): f'please choose one of {", ".join(available_methods.keys())}' ) return available_methods[method] + +# %% ../nbs/utils.ipynb 35 +def level_to_quantiles(level: List[Union[int, float]]) -> List[float]: + """ + Converts a list of levels to a list of quantiles. + """ + level_set = set(level) + return sorted( + list( + set(sum([[(50 - l / 2) / 100, (50 + l / 2) / 100] for l in level_set], [])) + ) + ) + + +def quantiles_to_level(quantiles: List[float]) -> List[Union[int, float]]: + """ + Converts a list of quantiles to a list of levels. + """ + quantiles_set = set(quantiles) + return sorted( + set( + [ + int(round(100 - 200 * (q * (q < 0.5) + (1 - q) * (q >= 0.5)), 2)) + for q in quantiles_set + ] + ) + )