Skip to content

Commit

Permalink
✨ Add LocalScaler modules (#61)
Browse files Browse the repository at this point in the history
  • Loading branch information
TakuyaShintate authored Sep 17, 2021
1 parent 80b4ab7 commit 8235045
Show file tree
Hide file tree
Showing 19 changed files with 240 additions and 41 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ LOGGER:
import torch
from tsts.solvers import TimeSeriesForecaster

# Define trainining + validation datasets (they are divided inside)
# Define training + validation datasets (they are divided inside)
sin_dataset = torch.sin(torch.arange(0.0, 100.0, 0.1))
sin_dataset = sin_dataset.unsqueeze(-1)

Expand Down
3 changes: 3 additions & 0 deletions benchmark/nbeats/tourism_monthly.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ MODEL:
NUM_H_UNITS: 512
ADD_LAST_STEP_VAL: True

LOCALSCALER:
NAME: "LastStep"

LOSSES:
NAMES: ["MAPE"]

Expand Down
10 changes: 7 additions & 3 deletions tsts/cfg/defaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,6 @@
_C.MODEL = CN()
# Model name
_C.MODEL.NAME = "Seq2Seq"
# If True, Add x_t (the last value of input time series) to every output
# Set it to True when dataset has high varience
_C.MODEL.ADD_LAST_STEP_VAL = False
# Number of hidden units in encoder and decoder
_C.MODEL.NUM_H_UNITS = 64
# Number of hidden layers in encoder and decoder
Expand All @@ -85,6 +82,12 @@
# Polynomial degree
_C.MODEL.DEGREE = 2

_C.LOCALSCALER = CN()
# Local scaler name
_C.LOCALSCALER.NAME = "NOOP"
# Order p i.e. AR(p)
_C.LOCALSCALER.NUM_STEPS = 100

_C.LOSSES = CN()
# Loss function names
_C.LOSSES.NAMES = ["MSE"]
Expand Down Expand Up @@ -114,6 +117,7 @@
_C.SCALER.NAME = "StandardScaler"

_C.COLLATOR = CN()
# Collator name
_C.COLLATOR.NAME = "Collator"

_C.DATALOADER = CN()
Expand Down
2 changes: 2 additions & 0 deletions tsts/core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
COLLATORS,
DATALOADERS,
DATASETS,
LOCALSCALERS,
LOGGERS,
LOSSES,
METRICS,
Expand All @@ -18,6 +19,7 @@
"COLLATORS",
"DATALOADERS",
"DATASETS",
"LOCALSCALERS",
"LOGGERS",
"LOSSES",
"METRICS",
Expand Down
2 changes: 2 additions & 0 deletions tsts/core/registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
"COLLATORS",
"DATALOADERS",
"DATASETS",
"LOCALSCALERS",
"LOGGERS",
"LOSSES",
"METRICS",
Expand Down Expand Up @@ -50,6 +51,7 @@ def wrapper(cls: Any) -> Any:
COLLATORS = Registry()
DATALOADERS = Registry()
DATASETS = Registry()
LOCALSCALERS = Registry()
LOGGERS = Registry()
LOSSES = Registry()
METRICS = Registry()
Expand Down
2 changes: 2 additions & 0 deletions tsts/loggers/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

def build_logger(
model: Module,
local_scaler: Module,
losses: List[Loss],
metrics: List[Metric],
context_manager: ContextManager,
Expand All @@ -22,6 +23,7 @@ def build_logger(
cls = LOGGERS[logger_name]
logger = cls.from_cfg(
model,
local_scaler,
losses,
metrics,
context_manager,
Expand Down
10 changes: 8 additions & 2 deletions tsts/loggers/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,14 @@ def __init__(
self,
log_dir: str,
model: Module,
local_scaler: Module,
losses: List[Loss],
metrics: List[Metric],
context_manager: ContextManager,
) -> None:
self.log_dir = log_dir
self.model = model
self.local_scaler = local_scaler
self.losses = losses
self.metrics = metrics
self.context_manager = context_manager
Expand All @@ -36,6 +38,7 @@ def __init__(
def from_cfg(
cls,
model: Module,
local_scaler: Module,
losses: List[Loss],
metrics: List[Metric],
context_manager: ContextManager,
Expand All @@ -47,6 +50,7 @@ def from_cfg(
logger = cls(
log_dir,
model,
local_scaler,
losses,
metrics,
context_manager,
Expand Down Expand Up @@ -79,8 +83,10 @@ def log(
current_ave_score = sum(ave_scores) / len(ave_scores)
if current_ave_score < self.best_ave_score:
self.best_ave_score = current_ave_score
root = os.path.join(self.log_dir, "model.pth")
torch.save(self.model.state_dict(), root)
model_path = os.path.join(self.log_dir, "model.pth")
torch.save(self.model.state_dict(), model_path)
local_scaler_path = os.path.join(self.log_dir, "local_scaler.pth")
torch.save(self.local_scaler.state_dict(), local_scaler_path)
# Add new record to log file
record: Dict[str, Any] = {
"epoch": epoch,
Expand Down
16 changes: 2 additions & 14 deletions tsts/models/informer.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,8 @@
import torch.nn.functional as F
import torch.nn.init as init
from torch import Tensor
from torch.nn import (
BatchNorm1d,
Conv1d,
Dropout,
Embedding,
LayerNorm,
Linear,
MaxPool1d,
ModuleList,
Parameter,
)
from torch.nn import (BatchNorm1d, Conv1d, Dropout, Embedding, LayerNorm,
Linear, MaxPool1d, ModuleList, Parameter)
from tsts.cfg import CfgNode as CN
from tsts.core import MODELS

Expand Down Expand Up @@ -842,7 +833,6 @@ def _run_decoders(self, mb_feats: Tensor, mb_enc_feats: Tensor) -> Tensor:
def forward(
self,
X: Tensor,
bias: Tensor,
X_mask: Tensor,
time_stamps: List[Union[None, Tensor]],
) -> Tensor:
Expand Down Expand Up @@ -882,6 +872,4 @@ def forward(
mb_dec_feats = self.dropout(mb_dec_feats)
mb_dec_feats = self._run_decoders(mb_dec_feats, mb_enc_feats)
mb_feats = self.projector(mb_dec_feats)
if self.add_last_step_val is True:
mb_feats = mb_feats + bias[:, -1:]
return mb_feats[:, -self.horizon :]
6 changes: 6 additions & 0 deletions tsts/models/localscalers/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
from .ar import AutoRegressiveModel
from .builder import build_local_scaler
from .laststep import LastStep
from .noop import NOOP

__all__ = ["AutoRegressiveModel", "build_local_scaler", "LastStep", "NOOP"]
43 changes: 43 additions & 0 deletions tsts/models/localscalers/ar.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
from torch import Tensor
from torch.nn import Linear
from tsts.cfg import CfgNode as CN
from tsts.core import LOCALSCALERS
from tsts.models import Module

__all__ = ["AutoRegressiveModel"]


@LOCALSCALERS.register()
class AutoRegressiveModel(Module):
def __init__(
self,
num_in_feats: int,
num_out_feats: int,
num_steps: int,
) -> None:
super(AutoRegressiveModel, self).__init__()
self.num_in_feats = num_in_feats
self.num_out_feats = num_out_feats
self.num_steps = num_steps
self._init_linear()

@classmethod
def from_cfg(
cls,
num_in_feats: int,
num_out_feats: int,
cfg: CN,
) -> "AutoRegressiveModel":
num_steps = cfg.LOCALSCALER.NUM_STEPS
local_scaler = cls(num_in_feats, num_out_feats, num_steps)
return local_scaler

def _init_linear(self) -> None:
self.linear = Linear(self.num_steps, 1)

def forward(self, bias: Tensor) -> Tensor:
bias = bias[:, -self.num_steps :]
bias = bias.permute(0, 2, 1).reshape(-1, self.num_steps)
bias = self.linear(bias)
bias = bias.view(-1, 1, self.num_out_feats)
return bias
38 changes: 38 additions & 0 deletions tsts/models/localscalers/builder.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
from torch.nn import Module
from tsts.cfg import CfgNode as CN
from tsts.core import LOCALSCALERS

__all__ = ["build_local_scaler"]


def build_local_scaler(
num_in_feats: int,
num_out_feats: int,
cfg: CN,
) -> Module:
"""Build local scaler.
Parameters
----------
num_in_feats : int
Number of input features
num_out_feats : int
Number of output features
cfg : CN
Global configuration
Returns
-------
Module
Forecasting model
"""
local_scaler_name = cfg.LOCALSCALER.NAME
cls = LOCALSCALERS[local_scaler_name]
local_scaler = cls.from_cfg(
num_in_feats,
num_out_feats,
cfg,
)
return local_scaler
27 changes: 27 additions & 0 deletions tsts/models/localscalers/laststep.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
from torch import Tensor
from tsts.cfg import CfgNode as CN
from tsts.core import LOCALSCALERS
from tsts.models import Module

__all__ = ["LastStep"]


@LOCALSCALERS.register()
class LastStep(Module):
def __init__(
self,
) -> None:
super(LastStep, self).__init__()

@classmethod
def from_cfg(
cls,
num_in_feats: int,
num_out_feats: int,
cfg: CN,
) -> "LastStep":
local_scaler = cls()
return local_scaler

def forward(self, bias: Tensor) -> Tensor:
return bias[:, -1].unsqueeze(1)
28 changes: 28 additions & 0 deletions tsts/models/localscalers/noop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import torch
from torch import Tensor
from tsts.cfg import CfgNode as CN
from tsts.core import LOCALSCALERS
from tsts.models import Module

__all__ = ["NOOP"]


@LOCALSCALERS.register()
class NOOP(Module):
def __init__(
self,
) -> None:
super(NOOP, self).__init__()

@classmethod
def from_cfg(
cls,
num_in_feats: int,
num_out_feats: int,
cfg: CN,
) -> "NOOP":
local_scaler = cls()
return local_scaler

def forward(self, bias: Tensor) -> Tensor:
return torch.zeros_like(bias)[:, 0:1]
9 changes: 1 addition & 8 deletions tsts/models/nbeats.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,6 @@ def _init_stack(self) -> None:
def forward(
self,
X: Tensor,
bias: Tensor,
X_mask: Tensor,
time_stamps: Optional[Tensor] = None,
) -> Tensor:
Expand All @@ -290,15 +289,9 @@ def forward(
"""
batch_size = X.size(0)
X = X.reshape(batch_size, -1)
bias = bias.reshape(batch_size, -1)
X_mask = X_mask.reshape(batch_size, -1)
X_mask = X_mask.flip(dims=(1,))
# Predict offset
if self.add_last_step_val is True:
mb_total_preds = bias[:, -self.num_out_feats :]
mb_total_preds = mb_total_preds.repeat(1, self.horizon)
else:
mb_total_preds = torch.zeros_like(X[:, -1:])
mb_total_preds = torch.zeros_like(X[:, -1:])
mb_feats = X.flip(dims=(1,))
for i in range(self.stack_size):
(current_mb_feats, mb_preds) = self.stack[i](mb_feats)
Expand Down
11 changes: 4 additions & 7 deletions tsts/models/seq2seq.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,9 +118,9 @@ def _run_encoder(self, X: Tensor) -> List[Tensor]:
c[i] = c_t
return h

def _run_decoder(self, h: List[Tensor], bias: Tensor) -> Tensor:
batch_size = bias.size(0)
device = bias.device
def _run_decoder(self, h: List[Tensor]) -> Tensor:
batch_size = h[0].size(0)
device = h[0].device
(_, c) = self._init_memory(batch_size, device)
mb_preds = []
h_t = h[-1]
Expand All @@ -130,15 +130,12 @@ def _run_decoder(self, h: List[Tensor], bias: Tensor) -> Tensor:
h[i] = h_t
c[i] = c_t
y_t = self.regressor(h_t)
if self.add_last_step_val is True:
y_t = y_t + bias[:, -1]
mb_preds.append(y_t.unsqueeze(1))
return torch.cat(mb_preds, dim=1)

def forward(
self,
X: Tensor,
bias: Tensor,
X_mask: Tensor,
time_stamps: Optional[Tensor] = None,
) -> Tensor:
Expand All @@ -158,5 +155,5 @@ def forward(
Prediction
"""
h = self._run_encoder(X)
mb_preds = self._run_decoder(h, bias)
mb_preds = self._run_decoder(h)
return mb_preds
Loading

0 comments on commit 8235045

Please sign in to comment.