first commit
This commit is contained in:
0
exp/__init__.py
Normal file
0
exp/__init__.py
Normal file
207
exp/exp_anomaly_detection.py
Normal file
207
exp/exp_anomaly_detection.py
Normal file
@ -0,0 +1,207 @@
|
||||
from data_provider.data_factory import data_provider
|
||||
from exp.exp_basic import Exp_Basic
|
||||
from utils.tools import EarlyStopping, adjust_learning_rate, adjustment
|
||||
from sklearn.metrics import precision_recall_fscore_support
|
||||
from sklearn.metrics import accuracy_score
|
||||
import torch.multiprocessing
|
||||
|
||||
torch.multiprocessing.set_sharing_strategy('file_system')
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch import optim
|
||||
import os
|
||||
import time
|
||||
import warnings
|
||||
import numpy as np
|
||||
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
|
||||
class Exp_Anomaly_Detection(Exp_Basic):
|
||||
def __init__(self, args):
|
||||
super(Exp_Anomaly_Detection, self).__init__(args)
|
||||
|
||||
def _build_model(self):
|
||||
model = self.model_dict[self.args.model].Model(self.args).float()
|
||||
|
||||
if self.args.use_multi_gpu and self.args.use_gpu:
|
||||
model = nn.DataParallel(model, device_ids=self.args.device_ids)
|
||||
return model
|
||||
|
||||
def _get_data(self, flag):
|
||||
data_set, data_loader = data_provider(self.args, flag)
|
||||
return data_set, data_loader
|
||||
|
||||
def _select_optimizer(self):
|
||||
model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
|
||||
return model_optim
|
||||
|
||||
def _select_criterion(self):
|
||||
criterion = nn.MSELoss()
|
||||
return criterion
|
||||
|
||||
def vali(self, vali_data, vali_loader, criterion):
|
||||
total_loss = []
|
||||
self.model.eval()
|
||||
with torch.no_grad():
|
||||
for i, (batch_x, _) in enumerate(vali_loader):
|
||||
batch_x = batch_x.float().to(self.device)
|
||||
|
||||
outputs = self.model(batch_x, None, None, None)
|
||||
|
||||
f_dim = -1 if self.args.features == 'MS' else 0
|
||||
outputs = outputs[:, :, f_dim:]
|
||||
pred = outputs.detach()
|
||||
true = batch_x.detach()
|
||||
|
||||
loss = criterion(pred, true)
|
||||
total_loss.append(loss.item())
|
||||
total_loss = np.average(total_loss)
|
||||
self.model.train()
|
||||
return total_loss
|
||||
|
||||
def train(self, setting):
|
||||
train_data, train_loader = self._get_data(flag='train')
|
||||
vali_data, vali_loader = self._get_data(flag='val')
|
||||
test_data, test_loader = self._get_data(flag='test')
|
||||
|
||||
path = os.path.join(self.args.checkpoints, setting)
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
time_now = time.time()
|
||||
|
||||
train_steps = len(train_loader)
|
||||
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
|
||||
|
||||
model_optim = self._select_optimizer()
|
||||
criterion = self._select_criterion()
|
||||
|
||||
for epoch in range(self.args.train_epochs):
|
||||
iter_count = 0
|
||||
train_loss = []
|
||||
|
||||
self.model.train()
|
||||
epoch_time = time.time()
|
||||
for i, (batch_x, batch_y) in enumerate(train_loader):
|
||||
iter_count += 1
|
||||
model_optim.zero_grad()
|
||||
|
||||
batch_x = batch_x.float().to(self.device)
|
||||
|
||||
outputs = self.model(batch_x, None, None, None)
|
||||
|
||||
f_dim = -1 if self.args.features == 'MS' else 0
|
||||
outputs = outputs[:, :, f_dim:]
|
||||
loss = criterion(outputs, batch_x)
|
||||
train_loss.append(loss.item())
|
||||
|
||||
if (i + 1) % 100 == 0:
|
||||
print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item()))
|
||||
speed = (time.time() - time_now) / iter_count
|
||||
left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)
|
||||
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
|
||||
iter_count = 0
|
||||
time_now = time.time()
|
||||
|
||||
loss.backward()
|
||||
model_optim.step()
|
||||
|
||||
print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - epoch_time))
|
||||
train_loss = np.average(train_loss)
|
||||
vali_loss = self.vali(vali_data, vali_loader, criterion)
|
||||
test_loss = self.vali(test_data, test_loader, criterion)
|
||||
|
||||
print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format(
|
||||
epoch + 1, train_steps, train_loss, vali_loss, test_loss))
|
||||
early_stopping(vali_loss, self.model, path)
|
||||
if early_stopping.early_stop:
|
||||
print("Early stopping")
|
||||
break
|
||||
adjust_learning_rate(model_optim, epoch + 1, self.args)
|
||||
|
||||
best_model_path = path + '/' + 'checkpoint.pth'
|
||||
self.model.load_state_dict(torch.load(best_model_path))
|
||||
|
||||
return self.model
|
||||
|
||||
def test(self, setting, test=0):
|
||||
test_data, test_loader = self._get_data(flag='test')
|
||||
train_data, train_loader = self._get_data(flag='train')
|
||||
if test:
|
||||
print('loading model')
|
||||
self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))
|
||||
|
||||
attens_energy = []
|
||||
folder_path = './test_results/' + setting + '/'
|
||||
if not os.path.exists(folder_path):
|
||||
os.makedirs(folder_path)
|
||||
|
||||
self.model.eval()
|
||||
self.anomaly_criterion = nn.MSELoss(reduce=False)
|
||||
|
||||
# (1) stastic on the train set
|
||||
with torch.no_grad():
|
||||
for i, (batch_x, batch_y) in enumerate(train_loader):
|
||||
batch_x = batch_x.float().to(self.device)
|
||||
# reconstruction
|
||||
outputs = self.model(batch_x, None, None, None)
|
||||
# criterion
|
||||
score = torch.mean(self.anomaly_criterion(batch_x, outputs), dim=-1)
|
||||
score = score.detach().cpu().numpy()
|
||||
attens_energy.append(score)
|
||||
|
||||
attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
|
||||
train_energy = np.array(attens_energy)
|
||||
|
||||
# (2) find the threshold
|
||||
attens_energy = []
|
||||
test_labels = []
|
||||
for i, (batch_x, batch_y) in enumerate(test_loader):
|
||||
batch_x = batch_x.float().to(self.device)
|
||||
# reconstruction
|
||||
outputs = self.model(batch_x, None, None, None)
|
||||
# criterion
|
||||
score = torch.mean(self.anomaly_criterion(batch_x, outputs), dim=-1)
|
||||
score = score.detach().cpu().numpy()
|
||||
attens_energy.append(score)
|
||||
test_labels.append(batch_y)
|
||||
|
||||
attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
|
||||
test_energy = np.array(attens_energy)
|
||||
combined_energy = np.concatenate([train_energy, test_energy], axis=0)
|
||||
threshold = np.percentile(combined_energy, 100 - self.args.anomaly_ratio)
|
||||
print("Threshold :", threshold)
|
||||
|
||||
# (3) evaluation on the test set
|
||||
pred = (test_energy > threshold).astype(int)
|
||||
test_labels = np.concatenate(test_labels, axis=0).reshape(-1)
|
||||
test_labels = np.array(test_labels)
|
||||
gt = test_labels.astype(int)
|
||||
|
||||
print("pred: ", pred.shape)
|
||||
print("gt: ", gt.shape)
|
||||
|
||||
# (4) detection adjustment
|
||||
gt, pred = adjustment(gt, pred)
|
||||
|
||||
pred = np.array(pred)
|
||||
gt = np.array(gt)
|
||||
print("pred: ", pred.shape)
|
||||
print("gt: ", gt.shape)
|
||||
|
||||
accuracy = accuracy_score(gt, pred)
|
||||
precision, recall, f_score, support = precision_recall_fscore_support(gt, pred, average='binary')
|
||||
print("Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} ".format(
|
||||
accuracy, precision,
|
||||
recall, f_score))
|
||||
|
||||
f = open("result_anomaly_detection.txt", 'a')
|
||||
f.write(setting + " \n")
|
||||
f.write("Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} ".format(
|
||||
accuracy, precision,
|
||||
recall, f_score))
|
||||
f.write('\n')
|
||||
f.write('\n')
|
||||
f.close()
|
||||
return
|
80
exp/exp_basic.py
Normal file
80
exp/exp_basic.py
Normal file
@ -0,0 +1,80 @@
|
||||
import os
|
||||
import torch
|
||||
from models import Autoformer, Transformer, TimesNet, Nonstationary_Transformer, DLinear, FEDformer, \
|
||||
Informer, LightTS, Reformer, ETSformer, Pyraformer, PatchTST, MICN, Crossformer, FiLM, iTransformer, \
|
||||
Koopa, TiDE, FreTS, TimeMixer, TSMixer, SegRNN, MambaSimple, TemporalFusionTransformer, SCINet, PAttn, TimeXer, \
|
||||
WPMixer, MultiPatchFormer, xPatch_SparseChannel
|
||||
|
||||
|
||||
class Exp_Basic(object):
|
||||
def __init__(self, args):
|
||||
self.args = args
|
||||
self.model_dict = {
|
||||
'TimesNet': TimesNet,
|
||||
'Autoformer': Autoformer,
|
||||
'Transformer': Transformer,
|
||||
'Nonstationary_Transformer': Nonstationary_Transformer,
|
||||
'DLinear': DLinear,
|
||||
'FEDformer': FEDformer,
|
||||
'Informer': Informer,
|
||||
'LightTS': LightTS,
|
||||
'Reformer': Reformer,
|
||||
'ETSformer': ETSformer,
|
||||
'PatchTST': PatchTST,
|
||||
'Pyraformer': Pyraformer,
|
||||
'MICN': MICN,
|
||||
'Crossformer': Crossformer,
|
||||
'FiLM': FiLM,
|
||||
'iTransformer': iTransformer,
|
||||
'Koopa': Koopa,
|
||||
'TiDE': TiDE,
|
||||
'FreTS': FreTS,
|
||||
'MambaSimple': MambaSimple,
|
||||
'TimeMixer': TimeMixer,
|
||||
'TSMixer': TSMixer,
|
||||
'SegRNN': SegRNN,
|
||||
'TemporalFusionTransformer': TemporalFusionTransformer,
|
||||
"SCINet": SCINet,
|
||||
'PAttn': PAttn,
|
||||
'TimeXer': TimeXer,
|
||||
'WPMixer': WPMixer,
|
||||
'MultiPatchFormer': MultiPatchFormer,
|
||||
'xPatch_SparseChannel': xPatch_SparseChannel
|
||||
}
|
||||
if args.model == 'Mamba':
|
||||
print('Please make sure you have successfully installed mamba_ssm')
|
||||
from models import Mamba
|
||||
self.model_dict['Mamba'] = Mamba
|
||||
|
||||
self.device = self._acquire_device()
|
||||
self.model = self._build_model().to(self.device)
|
||||
|
||||
def _build_model(self):
|
||||
raise NotImplementedError
|
||||
return None
|
||||
|
||||
def _acquire_device(self):
|
||||
if self.args.use_gpu and self.args.gpu_type == 'cuda':
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = str(
|
||||
self.args.gpu) if not self.args.use_multi_gpu else self.args.devices
|
||||
device = torch.device('cuda:{}'.format(self.args.gpu))
|
||||
print('Use GPU: cuda:{}'.format(self.args.gpu))
|
||||
elif self.args.use_gpu and self.args.gpu_type == 'mps':
|
||||
device = torch.device('mps')
|
||||
print('Use GPU: mps')
|
||||
else:
|
||||
device = torch.device('cpu')
|
||||
print('Use CPU')
|
||||
return device
|
||||
|
||||
def _get_data(self):
|
||||
pass
|
||||
|
||||
def vali(self):
|
||||
pass
|
||||
|
||||
def train(self):
|
||||
pass
|
||||
|
||||
def test(self):
|
||||
pass
|
192
exp/exp_classification.py
Normal file
192
exp/exp_classification.py
Normal file
@ -0,0 +1,192 @@
|
||||
from data_provider.data_factory import data_provider
|
||||
from exp.exp_basic import Exp_Basic
|
||||
from utils.tools import EarlyStopping, adjust_learning_rate, cal_accuracy
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch import optim
|
||||
import os
|
||||
import time
|
||||
import warnings
|
||||
import numpy as np
|
||||
import pdb
|
||||
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
|
||||
class Exp_Classification(Exp_Basic):
|
||||
def __init__(self, args):
|
||||
super(Exp_Classification, self).__init__(args)
|
||||
|
||||
def _build_model(self):
|
||||
# model input depends on data
|
||||
train_data, train_loader = self._get_data(flag='TRAIN')
|
||||
test_data, test_loader = self._get_data(flag='TEST')
|
||||
self.args.seq_len = max(train_data.max_seq_len, test_data.max_seq_len)
|
||||
self.args.pred_len = 96
|
||||
self.args.enc_in = train_data.feature_df.shape[1]
|
||||
self.args.num_class = len(train_data.class_names)
|
||||
# model init
|
||||
model = self.model_dict[self.args.model].Model(self.args).float()
|
||||
if self.args.use_multi_gpu and self.args.use_gpu:
|
||||
model = nn.DataParallel(model, device_ids=self.args.device_ids)
|
||||
return model
|
||||
|
||||
def _get_data(self, flag):
|
||||
data_set, data_loader = data_provider(self.args, flag)
|
||||
return data_set, data_loader
|
||||
|
||||
def _select_optimizer(self):
|
||||
# model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
|
||||
model_optim = optim.RAdam(self.model.parameters(), lr=self.args.learning_rate)
|
||||
return model_optim
|
||||
|
||||
def _select_criterion(self):
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
return criterion
|
||||
|
||||
def vali(self, vali_data, vali_loader, criterion):
|
||||
total_loss = []
|
||||
preds = []
|
||||
trues = []
|
||||
self.model.eval()
|
||||
with torch.no_grad():
|
||||
for i, (batch_x, label, padding_mask) in enumerate(vali_loader):
|
||||
batch_x = batch_x.float().to(self.device)
|
||||
padding_mask = padding_mask.float().to(self.device)
|
||||
label = label.to(self.device)
|
||||
|
||||
outputs = self.model(batch_x, padding_mask, None, None)
|
||||
|
||||
pred = outputs.detach()
|
||||
loss = criterion(pred, label.long().squeeze())
|
||||
total_loss.append(loss.item())
|
||||
|
||||
preds.append(outputs.detach())
|
||||
trues.append(label)
|
||||
|
||||
total_loss = np.average(total_loss)
|
||||
|
||||
preds = torch.cat(preds, 0)
|
||||
trues = torch.cat(trues, 0)
|
||||
probs = torch.nn.functional.softmax(preds) # (total_samples, num_classes) est. prob. for each class and sample
|
||||
predictions = torch.argmax(probs, dim=1).cpu().numpy() # (total_samples,) int class index for each sample
|
||||
trues = trues.flatten().cpu().numpy()
|
||||
accuracy = cal_accuracy(predictions, trues)
|
||||
|
||||
self.model.train()
|
||||
return total_loss, accuracy
|
||||
|
||||
def train(self, setting):
|
||||
train_data, train_loader = self._get_data(flag='TRAIN')
|
||||
vali_data, vali_loader = self._get_data(flag='TEST')
|
||||
test_data, test_loader = self._get_data(flag='TEST')
|
||||
|
||||
path = os.path.join(self.args.checkpoints, setting)
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
time_now = time.time()
|
||||
|
||||
train_steps = len(train_loader)
|
||||
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
|
||||
|
||||
model_optim = self._select_optimizer()
|
||||
criterion = self._select_criterion()
|
||||
|
||||
for epoch in range(self.args.train_epochs):
|
||||
iter_count = 0
|
||||
train_loss = []
|
||||
|
||||
self.model.train()
|
||||
epoch_time = time.time()
|
||||
|
||||
for i, (batch_x, label, padding_mask) in enumerate(train_loader):
|
||||
iter_count += 1
|
||||
model_optim.zero_grad()
|
||||
|
||||
batch_x = batch_x.float().to(self.device)
|
||||
padding_mask = padding_mask.float().to(self.device)
|
||||
label = label.to(self.device)
|
||||
|
||||
outputs = self.model(batch_x, padding_mask, None, None)
|
||||
loss = criterion(outputs, label.long().squeeze(-1))
|
||||
train_loss.append(loss.item())
|
||||
|
||||
if (i + 1) % 100 == 0:
|
||||
print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item()))
|
||||
speed = (time.time() - time_now) / iter_count
|
||||
left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)
|
||||
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
|
||||
iter_count = 0
|
||||
time_now = time.time()
|
||||
|
||||
loss.backward()
|
||||
nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=4.0)
|
||||
model_optim.step()
|
||||
|
||||
print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - epoch_time))
|
||||
train_loss = np.average(train_loss)
|
||||
vali_loss, val_accuracy = self.vali(vali_data, vali_loader, criterion)
|
||||
# test_loss, test_accuracy = self.vali(test_data, test_loader, criterion)
|
||||
|
||||
print(
|
||||
"Epoch: {0}, Steps: {1} | Train Loss: {2:.3f} Vali Loss: {3:.3f} Vali Acc: {4:.3f}" # Test Loss: {5:.3f} Test Acc: {6:.3f}"
|
||||
.format(epoch + 1, train_steps, train_loss, vali_loss, val_accuracy))
|
||||
# test_loss, test_accuracy))
|
||||
early_stopping(-val_accuracy, self.model, path)
|
||||
if early_stopping.early_stop:
|
||||
print("Early stopping")
|
||||
break
|
||||
|
||||
best_model_path = path + '/' + 'checkpoint.pth'
|
||||
self.model.load_state_dict(torch.load(best_model_path))
|
||||
|
||||
return self.model
|
||||
|
||||
def test(self, setting, test=0):
|
||||
test_data, test_loader = self._get_data(flag='TEST')
|
||||
if test:
|
||||
print('loading model')
|
||||
self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))
|
||||
|
||||
preds = []
|
||||
trues = []
|
||||
folder_path = './test_results/' + setting + '/'
|
||||
if not os.path.exists(folder_path):
|
||||
os.makedirs(folder_path)
|
||||
|
||||
self.model.eval()
|
||||
with torch.no_grad():
|
||||
for i, (batch_x, label, padding_mask) in enumerate(test_loader):
|
||||
batch_x = batch_x.float().to(self.device)
|
||||
padding_mask = padding_mask.float().to(self.device)
|
||||
label = label.to(self.device)
|
||||
|
||||
outputs = self.model(batch_x, padding_mask, None, None)
|
||||
|
||||
preds.append(outputs.detach())
|
||||
trues.append(label)
|
||||
|
||||
preds = torch.cat(preds, 0)
|
||||
trues = torch.cat(trues, 0)
|
||||
print('test shape:', preds.shape, trues.shape)
|
||||
|
||||
probs = torch.nn.functional.softmax(preds) # (total_samples, num_classes) est. prob. for each class and sample
|
||||
predictions = torch.argmax(probs, dim=1).cpu().numpy() # (total_samples,) int class index for each sample
|
||||
trues = trues.flatten().cpu().numpy()
|
||||
accuracy = cal_accuracy(predictions, trues)
|
||||
|
||||
# result save
|
||||
folder_path = './results/' + setting + '/'
|
||||
if not os.path.exists(folder_path):
|
||||
os.makedirs(folder_path)
|
||||
|
||||
print('accuracy:{}'.format(accuracy))
|
||||
file_name='result_classification.txt'
|
||||
f = open(os.path.join(folder_path,file_name), 'a')
|
||||
f.write(setting + " \n")
|
||||
f.write('accuracy:{}'.format(accuracy))
|
||||
f.write('\n')
|
||||
f.write('\n')
|
||||
f.close()
|
||||
return
|
228
exp/exp_imputation.py
Normal file
228
exp/exp_imputation.py
Normal file
@ -0,0 +1,228 @@
|
||||
from data_provider.data_factory import data_provider
|
||||
from exp.exp_basic import Exp_Basic
|
||||
from utils.tools import EarlyStopping, adjust_learning_rate, visual
|
||||
from utils.metrics import metric
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch import optim
|
||||
import os
|
||||
import time
|
||||
import warnings
|
||||
import numpy as np
|
||||
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
|
||||
class Exp_Imputation(Exp_Basic):
|
||||
def __init__(self, args):
|
||||
super(Exp_Imputation, self).__init__(args)
|
||||
|
||||
def _build_model(self):
|
||||
model = self.model_dict[self.args.model].Model(self.args).float()
|
||||
|
||||
if self.args.use_multi_gpu and self.args.use_gpu:
|
||||
model = nn.DataParallel(model, device_ids=self.args.device_ids)
|
||||
return model
|
||||
|
||||
def _get_data(self, flag):
|
||||
data_set, data_loader = data_provider(self.args, flag)
|
||||
return data_set, data_loader
|
||||
|
||||
def _select_optimizer(self):
|
||||
model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
|
||||
return model_optim
|
||||
|
||||
def _select_criterion(self):
|
||||
criterion = nn.MSELoss()
|
||||
return criterion
|
||||
|
||||
def vali(self, vali_data, vali_loader, criterion):
|
||||
total_loss = []
|
||||
self.model.eval()
|
||||
with torch.no_grad():
|
||||
for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader):
|
||||
batch_x = batch_x.float().to(self.device)
|
||||
batch_x_mark = batch_x_mark.float().to(self.device)
|
||||
|
||||
# random mask
|
||||
B, T, N = batch_x.shape
|
||||
"""
|
||||
B = batch size
|
||||
T = seq len
|
||||
N = number of features
|
||||
"""
|
||||
mask = torch.rand((B, T, N)).to(self.device)
|
||||
mask[mask <= self.args.mask_rate] = 0 # masked
|
||||
mask[mask > self.args.mask_rate] = 1 # remained
|
||||
inp = batch_x.masked_fill(mask == 0, 0)
|
||||
|
||||
outputs = self.model(inp, batch_x_mark, None, None, mask)
|
||||
|
||||
f_dim = -1 if self.args.features == 'MS' else 0
|
||||
outputs = outputs[:, :, f_dim:]
|
||||
|
||||
# add support for MS
|
||||
batch_x = batch_x[:, :, f_dim:]
|
||||
mask = mask[:, :, f_dim:]
|
||||
|
||||
pred = outputs.detach()
|
||||
true = batch_x.detach()
|
||||
mask = mask.detach()
|
||||
|
||||
loss = criterion(pred[mask == 0], true[mask == 0])
|
||||
total_loss.append(loss.item())
|
||||
total_loss = np.average(total_loss)
|
||||
self.model.train()
|
||||
return total_loss
|
||||
|
||||
def train(self, setting):
|
||||
train_data, train_loader = self._get_data(flag='train')
|
||||
vali_data, vali_loader = self._get_data(flag='val')
|
||||
test_data, test_loader = self._get_data(flag='test')
|
||||
|
||||
path = os.path.join(self.args.checkpoints, setting)
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
time_now = time.time()
|
||||
|
||||
train_steps = len(train_loader)
|
||||
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
|
||||
|
||||
model_optim = self._select_optimizer()
|
||||
criterion = self._select_criterion()
|
||||
|
||||
for epoch in range(self.args.train_epochs):
|
||||
iter_count = 0
|
||||
train_loss = []
|
||||
|
||||
self.model.train()
|
||||
epoch_time = time.time()
|
||||
for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):
|
||||
iter_count += 1
|
||||
model_optim.zero_grad()
|
||||
|
||||
batch_x = batch_x.float().to(self.device)
|
||||
batch_x_mark = batch_x_mark.float().to(self.device)
|
||||
|
||||
# random mask
|
||||
B, T, N = batch_x.shape
|
||||
mask = torch.rand((B, T, N)).to(self.device)
|
||||
mask[mask <= self.args.mask_rate] = 0 # masked
|
||||
mask[mask > self.args.mask_rate] = 1 # remained
|
||||
inp = batch_x.masked_fill(mask == 0, 0)
|
||||
|
||||
outputs = self.model(inp, batch_x_mark, None, None, mask)
|
||||
|
||||
f_dim = -1 if self.args.features == 'MS' else 0
|
||||
outputs = outputs[:, :, f_dim:]
|
||||
|
||||
# add support for MS
|
||||
batch_x = batch_x[:, :, f_dim:]
|
||||
mask = mask[:, :, f_dim:]
|
||||
|
||||
loss = criterion(outputs[mask == 0], batch_x[mask == 0])
|
||||
train_loss.append(loss.item())
|
||||
|
||||
if (i + 1) % 100 == 0:
|
||||
print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item()))
|
||||
speed = (time.time() - time_now) / iter_count
|
||||
left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)
|
||||
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
|
||||
iter_count = 0
|
||||
time_now = time.time()
|
||||
|
||||
loss.backward()
|
||||
model_optim.step()
|
||||
|
||||
print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - epoch_time))
|
||||
train_loss = np.average(train_loss)
|
||||
vali_loss = self.vali(vali_data, vali_loader, criterion)
|
||||
test_loss = self.vali(test_data, test_loader, criterion)
|
||||
|
||||
print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format(
|
||||
epoch + 1, train_steps, train_loss, vali_loss, test_loss))
|
||||
early_stopping(vali_loss, self.model, path)
|
||||
if early_stopping.early_stop:
|
||||
print("Early stopping")
|
||||
break
|
||||
adjust_learning_rate(model_optim, epoch + 1, self.args)
|
||||
|
||||
best_model_path = path + '/' + 'checkpoint.pth'
|
||||
self.model.load_state_dict(torch.load(best_model_path))
|
||||
|
||||
return self.model
|
||||
|
||||
def test(self, setting, test=0):
|
||||
test_data, test_loader = self._get_data(flag='test')
|
||||
if test:
|
||||
print('loading model')
|
||||
self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))
|
||||
|
||||
preds = []
|
||||
trues = []
|
||||
masks = []
|
||||
folder_path = './test_results/' + setting + '/'
|
||||
if not os.path.exists(folder_path):
|
||||
os.makedirs(folder_path)
|
||||
|
||||
self.model.eval()
|
||||
with torch.no_grad():
|
||||
for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(test_loader):
|
||||
batch_x = batch_x.float().to(self.device)
|
||||
batch_x_mark = batch_x_mark.float().to(self.device)
|
||||
|
||||
# random mask
|
||||
B, T, N = batch_x.shape
|
||||
mask = torch.rand((B, T, N)).to(self.device)
|
||||
mask[mask <= self.args.mask_rate] = 0 # masked
|
||||
mask[mask > self.args.mask_rate] = 1 # remained
|
||||
inp = batch_x.masked_fill(mask == 0, 0)
|
||||
|
||||
# imputation
|
||||
outputs = self.model(inp, batch_x_mark, None, None, mask)
|
||||
|
||||
# eval
|
||||
f_dim = -1 if self.args.features == 'MS' else 0
|
||||
outputs = outputs[:, :, f_dim:]
|
||||
|
||||
# add support for MS
|
||||
batch_x = batch_x[:, :, f_dim:]
|
||||
mask = mask[:, :, f_dim:]
|
||||
|
||||
outputs = outputs.detach().cpu().numpy()
|
||||
pred = outputs
|
||||
true = batch_x.detach().cpu().numpy()
|
||||
preds.append(pred)
|
||||
trues.append(true)
|
||||
masks.append(mask.detach().cpu())
|
||||
|
||||
if i % 20 == 0:
|
||||
filled = true[0, :, -1].copy()
|
||||
filled = filled * mask[0, :, -1].detach().cpu().numpy() + \
|
||||
pred[0, :, -1] * (1 - mask[0, :, -1].detach().cpu().numpy())
|
||||
visual(true[0, :, -1], filled, os.path.join(folder_path, str(i) + '.pdf'))
|
||||
|
||||
preds = np.concatenate(preds, 0)
|
||||
trues = np.concatenate(trues, 0)
|
||||
masks = np.concatenate(masks, 0)
|
||||
print('test shape:', preds.shape, trues.shape)
|
||||
|
||||
# result save
|
||||
folder_path = './results/' + setting + '/'
|
||||
if not os.path.exists(folder_path):
|
||||
os.makedirs(folder_path)
|
||||
|
||||
mae, mse, rmse, mape, mspe = metric(preds[masks == 0], trues[masks == 0])
|
||||
print('mse:{}, mae:{}'.format(mse, mae))
|
||||
f = open("result_imputation.txt", 'a')
|
||||
f.write(setting + " \n")
|
||||
f.write('mse:{}, mae:{}'.format(mse, mae))
|
||||
f.write('\n')
|
||||
f.write('\n')
|
||||
f.close()
|
||||
|
||||
np.save(folder_path + 'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))
|
||||
np.save(folder_path + 'pred.npy', preds)
|
||||
np.save(folder_path + 'true.npy', trues)
|
||||
return
|
268
exp/exp_long_term_forecasting.py
Normal file
268
exp/exp_long_term_forecasting.py
Normal file
@ -0,0 +1,268 @@
|
||||
from data_provider.data_factory import data_provider
|
||||
from exp.exp_basic import Exp_Basic
|
||||
from utils.tools import EarlyStopping, adjust_learning_rate, visual
|
||||
from utils.metrics import metric
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch import optim
|
||||
import os
|
||||
import time
|
||||
import warnings
|
||||
import numpy as np
|
||||
from utils.dtw_metric import dtw, accelerated_dtw
|
||||
from utils.augmentation import run_augmentation, run_augmentation_single
|
||||
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
|
||||
class Exp_Long_Term_Forecast(Exp_Basic):
|
||||
def __init__(self, args):
|
||||
super(Exp_Long_Term_Forecast, self).__init__(args)
|
||||
|
||||
def _build_model(self):
|
||||
model = self.model_dict[self.args.model].Model(self.args).float()
|
||||
|
||||
if self.args.use_multi_gpu and self.args.use_gpu:
|
||||
model = nn.DataParallel(model, device_ids=self.args.device_ids)
|
||||
return model
|
||||
|
||||
def _get_data(self, flag):
|
||||
data_set, data_loader = data_provider(self.args, flag)
|
||||
return data_set, data_loader
|
||||
|
||||
def _select_optimizer(self):
|
||||
model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
|
||||
return model_optim
|
||||
|
||||
def _select_criterion(self):
|
||||
criterion = nn.MSELoss()
|
||||
return criterion
|
||||
|
||||
|
||||
def vali(self, vali_data, vali_loader, criterion):
|
||||
total_loss = []
|
||||
self.model.eval()
|
||||
with torch.no_grad():
|
||||
for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader):
|
||||
batch_x = batch_x.float().to(self.device)
|
||||
batch_y = batch_y.float()
|
||||
|
||||
batch_x_mark = batch_x_mark.float().to(self.device)
|
||||
batch_y_mark = batch_y_mark.float().to(self.device)
|
||||
|
||||
# decoder input
|
||||
dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()
|
||||
dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)
|
||||
# encoder - decoder
|
||||
if self.args.use_amp:
|
||||
with torch.cuda.amp.autocast():
|
||||
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
|
||||
else:
|
||||
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
|
||||
f_dim = -1 if self.args.features == 'MS' else 0
|
||||
outputs = outputs[:, -self.args.pred_len:, f_dim:]
|
||||
batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)
|
||||
|
||||
pred = outputs.detach()
|
||||
true = batch_y.detach()
|
||||
|
||||
loss = criterion(pred, true)
|
||||
|
||||
total_loss.append(loss.item())
|
||||
total_loss = np.average(total_loss)
|
||||
self.model.train()
|
||||
return total_loss
|
||||
|
||||
def train(self, setting):
|
||||
train_data, train_loader = self._get_data(flag='train')
|
||||
vali_data, vali_loader = self._get_data(flag='val')
|
||||
test_data, test_loader = self._get_data(flag='test')
|
||||
|
||||
path = os.path.join(self.args.checkpoints, setting)
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
time_now = time.time()
|
||||
|
||||
train_steps = len(train_loader)
|
||||
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
|
||||
|
||||
model_optim = self._select_optimizer()
|
||||
criterion = self._select_criterion()
|
||||
|
||||
if self.args.use_amp:
|
||||
scaler = torch.cuda.amp.GradScaler()
|
||||
|
||||
for epoch in range(self.args.train_epochs):
|
||||
iter_count = 0
|
||||
train_loss = []
|
||||
|
||||
self.model.train()
|
||||
epoch_time = time.time()
|
||||
for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):
|
||||
iter_count += 1
|
||||
model_optim.zero_grad()
|
||||
batch_x = batch_x.float().to(self.device)
|
||||
batch_y = batch_y.float().to(self.device)
|
||||
batch_x_mark = batch_x_mark.float().to(self.device)
|
||||
batch_y_mark = batch_y_mark.float().to(self.device)
|
||||
|
||||
# decoder input
|
||||
dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()
|
||||
dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)
|
||||
|
||||
# encoder - decoder
|
||||
if self.args.use_amp:
|
||||
with torch.cuda.amp.autocast():
|
||||
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
|
||||
|
||||
f_dim = -1 if self.args.features == 'MS' else 0
|
||||
outputs = outputs[:, -self.args.pred_len:, f_dim:]
|
||||
batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)
|
||||
loss = criterion(outputs, batch_y)
|
||||
train_loss.append(loss.item())
|
||||
else:
|
||||
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
|
||||
|
||||
f_dim = -1 if self.args.features == 'MS' else 0
|
||||
outputs = outputs[:, -self.args.pred_len:, f_dim:]
|
||||
batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)
|
||||
loss = criterion(outputs, batch_y)
|
||||
train_loss.append(loss.item())
|
||||
|
||||
if (i + 1) % 100 == 0:
|
||||
print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item()))
|
||||
speed = (time.time() - time_now) / iter_count
|
||||
left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)
|
||||
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
|
||||
iter_count = 0
|
||||
time_now = time.time()
|
||||
|
||||
if self.args.use_amp:
|
||||
scaler.scale(loss).backward()
|
||||
scaler.step(model_optim)
|
||||
scaler.update()
|
||||
else:
|
||||
loss.backward()
|
||||
model_optim.step()
|
||||
|
||||
print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - epoch_time))
|
||||
train_loss = np.average(train_loss)
|
||||
vali_loss = self.vali(vali_data, vali_loader, criterion)
|
||||
test_loss = self.vali(test_data, test_loader, criterion)
|
||||
|
||||
print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format(
|
||||
epoch + 1, train_steps, train_loss, vali_loss, test_loss))
|
||||
early_stopping(vali_loss, self.model, path)
|
||||
if early_stopping.early_stop:
|
||||
print("Early stopping")
|
||||
break
|
||||
|
||||
adjust_learning_rate(model_optim, epoch + 1, self.args)
|
||||
|
||||
best_model_path = path + '/' + 'checkpoint.pth'
|
||||
self.model.load_state_dict(torch.load(best_model_path))
|
||||
|
||||
return self.model
|
||||
|
||||
def test(self, setting, test=0):
|
||||
test_data, test_loader = self._get_data(flag='test')
|
||||
if test:
|
||||
print('loading model')
|
||||
self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))
|
||||
|
||||
preds = []
|
||||
trues = []
|
||||
folder_path = './test_results/' + setting + '/'
|
||||
if not os.path.exists(folder_path):
|
||||
os.makedirs(folder_path)
|
||||
|
||||
self.model.eval()
|
||||
with torch.no_grad():
|
||||
for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(test_loader):
|
||||
batch_x = batch_x.float().to(self.device)
|
||||
batch_y = batch_y.float().to(self.device)
|
||||
|
||||
batch_x_mark = batch_x_mark.float().to(self.device)
|
||||
batch_y_mark = batch_y_mark.float().to(self.device)
|
||||
|
||||
# decoder input
|
||||
dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()
|
||||
dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)
|
||||
# encoder - decoder
|
||||
if self.args.use_amp:
|
||||
with torch.cuda.amp.autocast():
|
||||
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
|
||||
else:
|
||||
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
|
||||
|
||||
f_dim = -1 if self.args.features == 'MS' else 0
|
||||
outputs = outputs[:, -self.args.pred_len:, :]
|
||||
batch_y = batch_y[:, -self.args.pred_len:, :].to(self.device)
|
||||
outputs = outputs.detach().cpu().numpy()
|
||||
batch_y = batch_y.detach().cpu().numpy()
|
||||
if test_data.scale and self.args.inverse:
|
||||
shape = batch_y.shape
|
||||
if outputs.shape[-1] != batch_y.shape[-1]:
|
||||
outputs = np.tile(outputs, [1, 1, int(batch_y.shape[-1] / outputs.shape[-1])])
|
||||
outputs = test_data.inverse_transform(outputs.reshape(shape[0] * shape[1], -1)).reshape(shape)
|
||||
batch_y = test_data.inverse_transform(batch_y.reshape(shape[0] * shape[1], -1)).reshape(shape)
|
||||
|
||||
outputs = outputs[:, :, f_dim:]
|
||||
batch_y = batch_y[:, :, f_dim:]
|
||||
|
||||
pred = outputs
|
||||
true = batch_y
|
||||
|
||||
preds.append(pred)
|
||||
trues.append(true)
|
||||
if i % 20 == 0:
|
||||
input = batch_x.detach().cpu().numpy()
|
||||
if test_data.scale and self.args.inverse:
|
||||
shape = input.shape
|
||||
input = test_data.inverse_transform(input.reshape(shape[0] * shape[1], -1)).reshape(shape)
|
||||
gt = np.concatenate((input[0, :, -1], true[0, :, -1]), axis=0)
|
||||
pd = np.concatenate((input[0, :, -1], pred[0, :, -1]), axis=0)
|
||||
visual(gt, pd, os.path.join(folder_path, str(i) + '.pdf'))
|
||||
|
||||
preds = np.concatenate(preds, axis=0)
|
||||
trues = np.concatenate(trues, axis=0)
|
||||
print('test shape:', preds.shape, trues.shape)
|
||||
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
|
||||
trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
|
||||
print('test shape:', preds.shape, trues.shape)
|
||||
|
||||
# result save
|
||||
folder_path = './results/' + setting + '/'
|
||||
if not os.path.exists(folder_path):
|
||||
os.makedirs(folder_path)
|
||||
|
||||
# dtw calculation
|
||||
if self.args.use_dtw:
|
||||
dtw_list = []
|
||||
manhattan_distance = lambda x, y: np.abs(x - y)
|
||||
for i in range(preds.shape[0]):
|
||||
x = preds[i].reshape(-1, 1)
|
||||
y = trues[i].reshape(-1, 1)
|
||||
if i % 100 == 0:
|
||||
print("calculating dtw iter:", i)
|
||||
d, _, _, _ = accelerated_dtw(x, y, dist=manhattan_distance)
|
||||
dtw_list.append(d)
|
||||
dtw = np.array(dtw_list).mean()
|
||||
else:
|
||||
dtw = 'Not calculated'
|
||||
|
||||
mae, mse, rmse, mape, mspe = metric(preds, trues)
|
||||
print('mse:{}, mae:{}, dtw:{}'.format(mse, mae, dtw))
|
||||
f = open("result_long_term_forecast.txt", 'a')
|
||||
f.write(setting + " \n")
|
||||
f.write('mse:{}, mae:{}, dtw:{}'.format(mse, mae, dtw))
|
||||
f.write('\n')
|
||||
f.write('\n')
|
||||
f.close()
|
||||
|
||||
np.save(folder_path + 'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))
|
||||
np.save(folder_path + 'pred.npy', preds)
|
||||
np.save(folder_path + 'true.npy', trues)
|
||||
|
||||
return
|
235
exp/exp_short_term_forecasting.py
Normal file
235
exp/exp_short_term_forecasting.py
Normal file
@ -0,0 +1,235 @@
|
||||
from data_provider.data_factory import data_provider
|
||||
from data_provider.m4 import M4Meta
|
||||
from exp.exp_basic import Exp_Basic
|
||||
from utils.tools import EarlyStopping, adjust_learning_rate, visual
|
||||
from utils.losses import mape_loss, mase_loss, smape_loss
|
||||
from utils.m4_summary import M4Summary
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch import optim
|
||||
import os
|
||||
import time
|
||||
import warnings
|
||||
import numpy as np
|
||||
import pandas
|
||||
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
|
||||
class Exp_Short_Term_Forecast(Exp_Basic):
|
||||
def __init__(self, args):
|
||||
super(Exp_Short_Term_Forecast, self).__init__(args)
|
||||
|
||||
def _build_model(self):
|
||||
if self.args.data == 'm4':
|
||||
self.args.pred_len = M4Meta.horizons_map[self.args.seasonal_patterns] # Up to M4 config
|
||||
self.args.seq_len = 2 * self.args.pred_len # input_len = 2*pred_len
|
||||
self.args.label_len = self.args.pred_len
|
||||
self.args.frequency_map = M4Meta.frequency_map[self.args.seasonal_patterns]
|
||||
model = self.model_dict[self.args.model].Model(self.args).float()
|
||||
|
||||
if self.args.use_multi_gpu and self.args.use_gpu:
|
||||
model = nn.DataParallel(model, device_ids=self.args.device_ids)
|
||||
return model
|
||||
|
||||
def _get_data(self, flag):
|
||||
data_set, data_loader = data_provider(self.args, flag)
|
||||
return data_set, data_loader
|
||||
|
||||
def _select_optimizer(self):
|
||||
model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
|
||||
return model_optim
|
||||
|
||||
def _select_criterion(self, loss_name='MSE'):
|
||||
if loss_name == 'MSE':
|
||||
return nn.MSELoss()
|
||||
elif loss_name == 'MAPE':
|
||||
return mape_loss()
|
||||
elif loss_name == 'MASE':
|
||||
return mase_loss()
|
||||
elif loss_name == 'SMAPE':
|
||||
return smape_loss()
|
||||
|
||||
def train(self, setting):
|
||||
train_data, train_loader = self._get_data(flag='train')
|
||||
vali_data, vali_loader = self._get_data(flag='val')
|
||||
|
||||
path = os.path.join(self.args.checkpoints, setting)
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
time_now = time.time()
|
||||
|
||||
train_steps = len(train_loader)
|
||||
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
|
||||
|
||||
model_optim = self._select_optimizer()
|
||||
criterion = self._select_criterion(self.args.loss)
|
||||
mse = nn.MSELoss()
|
||||
|
||||
for epoch in range(self.args.train_epochs):
|
||||
iter_count = 0
|
||||
train_loss = []
|
||||
|
||||
self.model.train()
|
||||
epoch_time = time.time()
|
||||
for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):
|
||||
iter_count += 1
|
||||
model_optim.zero_grad()
|
||||
batch_x = batch_x.float().to(self.device)
|
||||
|
||||
batch_y = batch_y.float().to(self.device)
|
||||
batch_y_mark = batch_y_mark.float().to(self.device)
|
||||
|
||||
# decoder input
|
||||
dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()
|
||||
dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)
|
||||
|
||||
outputs = self.model(batch_x, None, dec_inp, None)
|
||||
|
||||
f_dim = -1 if self.args.features == 'MS' else 0
|
||||
outputs = outputs[:, -self.args.pred_len:, f_dim:]
|
||||
batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)
|
||||
|
||||
batch_y_mark = batch_y_mark[:, -self.args.pred_len:, f_dim:].to(self.device)
|
||||
loss_value = criterion(batch_x, self.args.frequency_map, outputs, batch_y, batch_y_mark)
|
||||
loss_sharpness = mse((outputs[:, 1:, :] - outputs[:, :-1, :]), (batch_y[:, 1:, :] - batch_y[:, :-1, :]))
|
||||
loss = loss_value # + loss_sharpness * 1e-5
|
||||
train_loss.append(loss.item())
|
||||
|
||||
if (i + 1) % 100 == 0:
|
||||
print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item()))
|
||||
speed = (time.time() - time_now) / iter_count
|
||||
left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)
|
||||
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
|
||||
iter_count = 0
|
||||
time_now = time.time()
|
||||
|
||||
loss.backward()
|
||||
model_optim.step()
|
||||
|
||||
print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - epoch_time))
|
||||
train_loss = np.average(train_loss)
|
||||
vali_loss = self.vali(train_loader, vali_loader, criterion)
|
||||
test_loss = vali_loss
|
||||
print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format(
|
||||
epoch + 1, train_steps, train_loss, vali_loss, test_loss))
|
||||
early_stopping(vali_loss, self.model, path)
|
||||
if early_stopping.early_stop:
|
||||
print("Early stopping")
|
||||
break
|
||||
|
||||
adjust_learning_rate(model_optim, epoch + 1, self.args)
|
||||
|
||||
best_model_path = path + '/' + 'checkpoint.pth'
|
||||
self.model.load_state_dict(torch.load(best_model_path))
|
||||
|
||||
return self.model
|
||||
|
||||
def vali(self, train_loader, vali_loader, criterion):
|
||||
x, _ = train_loader.dataset.last_insample_window()
|
||||
y = vali_loader.dataset.timeseries
|
||||
x = torch.tensor(x, dtype=torch.float32).to(self.device)
|
||||
x = x.unsqueeze(-1)
|
||||
|
||||
self.model.eval()
|
||||
with torch.no_grad():
|
||||
# decoder input
|
||||
B, _, C = x.shape
|
||||
dec_inp = torch.zeros((B, self.args.pred_len, C)).float().to(self.device)
|
||||
dec_inp = torch.cat([x[:, -self.args.label_len:, :], dec_inp], dim=1).float()
|
||||
# encoder - decoder
|
||||
outputs = torch.zeros((B, self.args.pred_len, C)).float() # .to(self.device)
|
||||
id_list = np.arange(0, B, 500) # validation set size
|
||||
id_list = np.append(id_list, B)
|
||||
for i in range(len(id_list) - 1):
|
||||
outputs[id_list[i]:id_list[i + 1], :, :] = self.model(x[id_list[i]:id_list[i + 1]], None,
|
||||
dec_inp[id_list[i]:id_list[i + 1]],
|
||||
None).detach().cpu()
|
||||
f_dim = -1 if self.args.features == 'MS' else 0
|
||||
outputs = outputs[:, -self.args.pred_len:, f_dim:]
|
||||
pred = outputs
|
||||
true = torch.from_numpy(np.array(y))
|
||||
batch_y_mark = torch.ones(true.shape)
|
||||
|
||||
loss = criterion(x.detach().cpu()[:, :, 0], self.args.frequency_map, pred[:, :, 0], true, batch_y_mark)
|
||||
|
||||
self.model.train()
|
||||
return loss
|
||||
|
||||
def test(self, setting, test=0):
|
||||
_, train_loader = self._get_data(flag='train')
|
||||
_, test_loader = self._get_data(flag='test')
|
||||
x, _ = train_loader.dataset.last_insample_window()
|
||||
y = test_loader.dataset.timeseries
|
||||
x = torch.tensor(x, dtype=torch.float32).to(self.device)
|
||||
x = x.unsqueeze(-1)
|
||||
|
||||
if test:
|
||||
print('loading model')
|
||||
self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))
|
||||
|
||||
folder_path = './test_results/' + setting + '/'
|
||||
if not os.path.exists(folder_path):
|
||||
os.makedirs(folder_path)
|
||||
|
||||
self.model.eval()
|
||||
with torch.no_grad():
|
||||
B, _, C = x.shape
|
||||
dec_inp = torch.zeros((B, self.args.pred_len, C)).float().to(self.device)
|
||||
dec_inp = torch.cat([x[:, -self.args.label_len:, :], dec_inp], dim=1).float()
|
||||
# encoder - decoder
|
||||
outputs = torch.zeros((B, self.args.pred_len, C)).float().to(self.device)
|
||||
id_list = np.arange(0, B, 1)
|
||||
id_list = np.append(id_list, B)
|
||||
for i in range(len(id_list) - 1):
|
||||
outputs[id_list[i]:id_list[i + 1], :, :] = self.model(x[id_list[i]:id_list[i + 1]], None,
|
||||
dec_inp[id_list[i]:id_list[i + 1]], None)
|
||||
|
||||
if id_list[i] % 1000 == 0:
|
||||
print(id_list[i])
|
||||
|
||||
f_dim = -1 if self.args.features == 'MS' else 0
|
||||
outputs = outputs[:, -self.args.pred_len:, f_dim:]
|
||||
outputs = outputs.detach().cpu().numpy()
|
||||
|
||||
preds = outputs
|
||||
trues = y
|
||||
x = x.detach().cpu().numpy()
|
||||
|
||||
for i in range(0, preds.shape[0], preds.shape[0] // 10):
|
||||
gt = np.concatenate((x[i, :, 0], trues[i]), axis=0)
|
||||
pd = np.concatenate((x[i, :, 0], preds[i, :, 0]), axis=0)
|
||||
visual(gt, pd, os.path.join(folder_path, str(i) + '.pdf'))
|
||||
|
||||
print('test shape:', preds.shape)
|
||||
|
||||
# result save
|
||||
folder_path = './m4_results/' + self.args.model + '/'
|
||||
if not os.path.exists(folder_path):
|
||||
os.makedirs(folder_path)
|
||||
|
||||
forecasts_df = pandas.DataFrame(preds[:, :, 0], columns=[f'V{i + 1}' for i in range(self.args.pred_len)])
|
||||
forecasts_df.index = test_loader.dataset.ids[:preds.shape[0]]
|
||||
forecasts_df.index.name = 'id'
|
||||
forecasts_df.set_index(forecasts_df.columns[0], inplace=True)
|
||||
forecasts_df.to_csv(folder_path + self.args.seasonal_patterns + '_forecast.csv')
|
||||
|
||||
print(self.args.model)
|
||||
file_path = './m4_results/' + self.args.model + '/'
|
||||
if 'Weekly_forecast.csv' in os.listdir(file_path) \
|
||||
and 'Monthly_forecast.csv' in os.listdir(file_path) \
|
||||
and 'Yearly_forecast.csv' in os.listdir(file_path) \
|
||||
and 'Daily_forecast.csv' in os.listdir(file_path) \
|
||||
and 'Hourly_forecast.csv' in os.listdir(file_path) \
|
||||
and 'Quarterly_forecast.csv' in os.listdir(file_path):
|
||||
m4_summary = M4Summary(file_path, self.args.root_path)
|
||||
# m4_forecast.set_index(m4_winner_forecast.columns[0], inplace=True)
|
||||
smape_results, owa_results, mape, mase = m4_summary.evaluate()
|
||||
print('smape:', smape_results)
|
||||
print('mape:', mape)
|
||||
print('mase:', mase)
|
||||
print('owa:', owa_results)
|
||||
else:
|
||||
print('After all 6 tasks are finished, you can calculate the averaged index')
|
||||
return
|
Reference in New Issue
Block a user