feat: add TimesNet_Q and xPatch models with Q matrix transformations

This commit is contained in:
game-loader
2025-08-06 18:39:26 +08:00
parent 7fdf0f364d
commit 6bba6613c9
14 changed files with 872 additions and 3 deletions

21
layers/decomp.py Normal file
View File

@ -0,0 +1,21 @@
import torch
from torch import nn
from layers.ema import EMA
from layers.dema import DEMA
class DECOMP(nn.Module):
"""
Series decomposition block
"""
def __init__(self, ma_type, alpha, beta):
super(DECOMP, self).__init__()
if ma_type == 'ema':
self.ma = EMA(alpha)
elif ma_type == 'dema':
self.ma = DEMA(alpha, beta)
def forward(self, x):
moving_average = self.ma(x)
res = x - moving_average
return res, moving_average

27
layers/dema.py Normal file
View File

@ -0,0 +1,27 @@
import torch
from torch import nn
class DEMA(nn.Module):
"""
Double Exponential Moving Average (DEMA) block to highlight the trend of time series
"""
def __init__(self, alpha, beta):
super(DEMA, self).__init__()
# self.alpha = nn.Parameter(alpha) # Learnable alpha
# self.beta = nn.Parameter(beta) # Learnable beta
self.alpha = alpha.to(device='cuda')
self.beta = beta.to(device='cuda')
def forward(self, x):
# self.alpha.data.clamp_(0, 1) # Clamp learnable alpha to [0, 1]
# self.beta.data.clamp_(0, 1) # Clamp learnable beta to [0, 1]
s_prev = x[:, 0, :]
b = x[:, 1, :] - s_prev
res = [s_prev.unsqueeze(1)]
for t in range(1, x.shape[1]):
xt = x[:, t, :]
s = self.alpha * xt + (1 - self.alpha) * (s_prev + b)
b = self.beta * (s - s_prev) + (1 - self.beta) * b
s_prev = s
res.append(s.unsqueeze(1))
return torch.cat(res, dim=1)

37
layers/ema.py Normal file
View File

@ -0,0 +1,37 @@
import torch
from torch import nn
class EMA(nn.Module):
"""
Exponential Moving Average (EMA) block to highlight the trend of time series
"""
def __init__(self, alpha):
super(EMA, self).__init__()
# self.alpha = nn.Parameter(alpha) # Learnable alpha
self.alpha = alpha
# Optimized implementation with O(1) time complexity
def forward(self, x):
# x: [Batch, Input, Channel]
# self.alpha.data.clamp_(0, 1) # Clamp learnable alpha to [0, 1]
_, t, _ = x.shape
powers = torch.flip(torch.arange(t, dtype=torch.double), dims=(0,))
weights = torch.pow((1 - self.alpha), powers).to('cuda')
divisor = weights.clone()
weights[1:] = weights[1:] * self.alpha
weights = weights.reshape(1, t, 1)
divisor = divisor.reshape(1, t, 1)
x = torch.cumsum(x * weights, dim=1)
x = torch.div(x, divisor)
return x.to(torch.float32)
# # Naive implementation with O(n) time complexity
# def forward(self, x):
# # self.alpha.data.clamp_(0, 1) # Clamp learnable alpha to [0, 1]
# s = x[:, 0, :]
# res = [s.unsqueeze(1)]
# for t in range(1, x.shape[1]):
# xt = x[:, t, :]
# s = self.alpha * xt + (1 - self.alpha) * s
# res.append(s.unsqueeze(1))
# return torch.cat(res, dim=1)

61
layers/revin.py Normal file
View File

@ -0,0 +1,61 @@
import torch
from torch import nn
class RevIN(nn.Module):
def __init__(self, num_features: int, eps=1e-5, affine=True, subtract_last=False):
"""
:param num_features: the number of features or channels
:param eps: a value added for numerical stability
:param affine: if True, RevIN has learnable affine parameters
"""
super(RevIN, self).__init__()
self.num_features = num_features
self.eps = eps
self.affine = affine
self.subtract_last = subtract_last
if self.affine:
self._init_params()
def forward(self, x, mode:str):
if mode == 'norm':
self._get_statistics(x)
x = self._normalize(x)
elif mode == 'denorm':
x = self._denormalize(x)
else: raise NotImplementedError
return x
def _init_params(self):
# initialize RevIN params: (C,)
self.affine_weight = nn.Parameter(torch.ones(self.num_features))
self.affine_bias = nn.Parameter(torch.zeros(self.num_features))
def _get_statistics(self, x):
dim2reduce = tuple(range(1, x.ndim-1))
if self.subtract_last:
self.last = x[:,-1,:].unsqueeze(1)
else:
self.mean = torch.mean(x, dim=dim2reduce, keepdim=True).detach()
self.stdev = torch.sqrt(torch.var(x, dim=dim2reduce, keepdim=True, unbiased=False) + self.eps).detach()
def _normalize(self, x):
if self.subtract_last:
x = x - self.last
else:
x = x - self.mean
x = x / self.stdev
if self.affine:
x = x * self.affine_weight
x = x + self.affine_bias
return x
def _denormalize(self, x):
if self.affine:
x = x - self.affine_bias
x = x / (self.affine_weight + self.eps*self.eps)
x = x * self.stdev
if self.subtract_last:
x = x + self.last
else:
x =x + self.mean
return x

30
layers/telu.py Normal file
View File

@ -0,0 +1,30 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
class TeLU(nn.Module):
"""
实现论文中提出的 TeLU 激活函数。
论文: TeLU Activation Function for Fast and Stable Deep Learning
公式: TeLU(x) = x * tanh(e^x)
"""
def __init__(self):
"""
TeLU 激活函数没有可学习的参数,所以 __init__ 方法很简单。
"""
super(TeLU, self).__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
前向传播的计算逻辑。
"""
# 直接应用公式
return x * torch.tanh(torch.exp(x))
def __repr__(self):
"""
(可选但推荐) 定义一个好的字符串表示,方便打印模型结构。
"""
return f"{self.__class__.__name__}()"