"""
The implementation of DLinear for the partially-observed time-series imputation task.
"""
# Created by Wenjie Du <wenjay.du@gmail.com>
# License: BSD-3-Clause
from typing import Union, Optional
import numpy as np
import torch
from torch.utils.data import DataLoader
from .core import _DLinear
from .data import DatasetForDLinear
from ..base import BaseNNImputer
from ...data.checking import key_in_data_set
from ...data.dataset import BaseDataset
from ...nn.modules.loss import Criterion, MAE, MSE
from ...optim.adam import Adam
from ...optim.base import Optimizer
[docs]
class DLinear(BaseNNImputer):
"""The PyTorch implementation of the DLinear model.
DLinear is originally proposed by Zeng et al. in :cite:`zeng2023dlinear`.
Parameters
----------
n_steps :
The number of time steps in the time-series data sample.
n_features :
The number of features in the time-series data sample.
moving_avg_window_size :
The window size of moving average.
individual :
Whether to make a linear layer for each variate/channel/feature individually.
d_model:
The dimension of the space in which the time-series data will be embedded and modeled.
It is necessary only for DLinear in the non-individual mode.
ORT_weight :
The weight for the ORT loss, the same as SAITS.
MIT_weight :
The weight for the MIT loss, the same as SAITS.
batch_size :
The batch size for training and evaluating the model.
epochs :
The number of epochs for training the model.
patience :
The patience for the early-stopping mechanism. Given a positive integer, the training process will be
stopped when the model does not perform better after that number of epochs.
Leaving it default as None will disable the early-stopping.
training_loss:
The customized loss function designed by users for training the model.
If not given, will use the default loss as claimed in the original paper.
validation_metric:
The customized metric function designed by users for validating the model.
If not given, will use the default MSE metric.
optimizer :
The optimizer for model training.
If not given, will use a default Adam optimizer.
num_workers :
The number of subprocesses to use for data loading.
`0` means data loading will be in the main process, i.e. there won't be subprocesses.
device :
The device for the model to run on. It can be a string, a :class:`torch.device` object, or a list of them.
If not given, will try to use CUDA devices first (will use the default CUDA device if there are multiple),
then CPUs, considering CUDA and CPU are so far the main devices for people to train ML models.
If given a list of devices, e.g. ['cuda:0', 'cuda:1'], or [torch.device('cuda:0'), torch.device('cuda:1')] , the
model will be parallely trained on the multiple devices (so far only support parallel training on CUDA devices).
Other devices like Google TPU and Apple Silicon accelerator MPS may be added in the future.
saving_path :
The path for automatically saving model checkpoints and tensorboard files (i.e. loss values recorded during
training into a tensorboard file). Will not save if not given.
model_saving_strategy :
The strategy to save model checkpoints. It has to be one of [None, "best", "better", "all"].
No model will be saved when it is set as None.
The "best" strategy will only automatically save the best model after the training finished.
The "better" strategy will automatically save the model during training whenever the model performs
better than in previous epochs.
The "all" strategy will save every model after each epoch training.
verbose :
Whether to print out the training logs during the training process.
"""
def __init__(
self,
n_steps: int,
n_features: int,
moving_avg_window_size: int,
individual: bool = False,
d_model: Optional[int] = None,
ORT_weight: float = 1,
MIT_weight: float = 1,
batch_size: int = 32,
epochs: int = 100,
patience: Optional[int] = None,
training_loss: Criterion = MAE(),
validation_metric: Criterion = MSE(),
optimizer: Optimizer = Adam(),
num_workers: int = 0,
device: Optional[Union[str, torch.device, list]] = None,
saving_path: str = None,
model_saving_strategy: Optional[str] = "best",
verbose: bool = True,
):
super().__init__(
batch_size=batch_size,
epochs=epochs,
patience=patience,
training_loss=training_loss,
validation_metric=validation_metric,
num_workers=num_workers,
device=device,
saving_path=saving_path,
model_saving_strategy=model_saving_strategy,
verbose=verbose,
)
self.n_steps = n_steps
self.n_features = n_features
# model hype-parameters
self.moving_avg_window_size = moving_avg_window_size
self.individual = individual
self.d_model = d_model
self.ORT_weight = ORT_weight
self.MIT_weight = MIT_weight
# set up the model
self.model = _DLinear(
self.n_steps,
self.n_features,
self.moving_avg_window_size,
self.individual,
self.d_model,
self.ORT_weight,
self.MIT_weight,
self.training_loss,
)
self._send_model_to_given_device()
self._print_model_size()
# set up the optimizer
self.optimizer = optimizer
self.optimizer.init_optimizer(self.model.parameters())
def _assemble_input_for_training(self, data: list) -> dict:
(
indices,
X,
missing_mask,
X_ori,
indicating_mask,
) = self._send_data_to_given_device(data)
inputs = {
"X": X,
"missing_mask": missing_mask,
"X_ori": X_ori,
"indicating_mask": indicating_mask,
}
return inputs
def _assemble_input_for_validating(self, data: list) -> dict:
return self._assemble_input_for_training(data)
def _assemble_input_for_testing(self, data: list) -> dict:
indices, X, missing_mask = self._send_data_to_given_device(data)
inputs = {
"X": X,
"missing_mask": missing_mask,
}
return inputs
[docs]
def fit(
self,
train_set: Union[dict, str],
val_set: Optional[Union[dict, str]] = None,
file_type: str = "hdf5",
) -> None:
# Step 1: wrap the input data with classes Dataset and DataLoader
training_set = DatasetForDLinear(train_set, return_X_ori=False, return_y=False, file_type=file_type)
training_loader = DataLoader(
training_set,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
)
val_loader = None
if val_set is not None:
if not key_in_data_set("X_ori", val_set):
raise ValueError("val_set must contain 'X_ori' for model validation.")
val_set = DatasetForDLinear(val_set, return_X_ori=True, return_y=False, file_type=file_type)
val_loader = DataLoader(
val_set,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
# Step 2: train the model and freeze it
self._train_model(training_loader, val_loader)
self.model.load_state_dict(self.best_model_dict)
# Step 3: save the model if necessary
self._auto_save_model_if_necessary(confirm_saving=self.model_saving_strategy == "best")
[docs]
@torch.no_grad()
def predict(
self,
test_set: Union[dict, str],
file_type: str = "hdf5",
) -> dict:
# Step 1: wrap the input data with classes Dataset and DataLoader
test_set = BaseDataset(
test_set,
return_X_ori=False,
return_X_pred=False,
return_y=False,
file_type=file_type,
)
test_loader = DataLoader(
test_set,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
imputation_collector = []
# Step 2: process the data with the model
for idx, data in enumerate(test_loader):
inputs = self._assemble_input_for_testing(data)
results = self.model.forward(inputs)
imputation_collector.append(results["imputed_data"])
# Step 3: output collection and return
imputation = torch.cat(imputation_collector).cpu().detach().numpy()
result_dict = {
"imputation": imputation,
}
return result_dict
[docs]
def impute(
self,
test_set: Union[dict, str],
file_type: str = "hdf5",
) -> np.ndarray:
result_dict = self.predict(test_set, file_type=file_type)
return result_dict["imputation"]