feat: add PEMS and Solar dataset support
- Add Dataset_PEMS and Dataset_Solar classes for PEMS and Solar datasets - Update data_factory.py to include new dataset mappings - Fix M4 dataset handling with proper numpy array dtype - Add PEMS-specific loss function (L1Loss) and inverse transform support - Update validation logic for PEMS dataset with inverse scaling - Fix M4 data loader insample mask calculation bug Changes support new traffic and solar energy datasets while maintaining backward compatibility with existing datasets.
This commit is contained in:
@ -1,5 +1,5 @@
|
|||||||
from data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_M4, PSMSegLoader, \
|
from data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_M4, PSMSegLoader, \
|
||||||
MSLSegLoader, SMAPSegLoader, SMDSegLoader, SWATSegLoader, UEAloader
|
MSLSegLoader, SMAPSegLoader, SMDSegLoader, SWATSegLoader, UEAloader, Dataset_PEMS, Dataset_Solar
|
||||||
from data_provider.uea import collate_fn
|
from data_provider.uea import collate_fn
|
||||||
from torch.utils.data import DataLoader
|
from torch.utils.data import DataLoader
|
||||||
|
|
||||||
@ -15,7 +15,9 @@ data_dict = {
|
|||||||
'SMAP': SMAPSegLoader,
|
'SMAP': SMAPSegLoader,
|
||||||
'SMD': SMDSegLoader,
|
'SMD': SMDSegLoader,
|
||||||
'SWAT': SWATSegLoader,
|
'SWAT': SWATSegLoader,
|
||||||
'UEA': UEAloader
|
'UEA': UEAloader,
|
||||||
|
'PEMS': Dataset_PEMS,
|
||||||
|
'Solar': Dataset_Solar,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -340,7 +340,7 @@ class Dataset_M4(Dataset):
|
|||||||
dataset = M4Dataset.load(training=False, dataset_file=self.root_path)
|
dataset = M4Dataset.load(training=False, dataset_file=self.root_path)
|
||||||
training_values = np.array(
|
training_values = np.array(
|
||||||
[v[~np.isnan(v)] for v in
|
[v[~np.isnan(v)] for v in
|
||||||
dataset.values[dataset.groups == self.seasonal_patterns]]) # split different frequencies
|
dataset.values[dataset.groups == self.seasonal_patterns]], dtype=np.ndarray) # split different frequencies
|
||||||
self.ids = np.array([i for i in dataset.ids[dataset.groups == self.seasonal_patterns]])
|
self.ids = np.array([i for i in dataset.ids[dataset.groups == self.seasonal_patterns]])
|
||||||
self.timeseries = [ts for ts in training_values]
|
self.timeseries = [ts for ts in training_values]
|
||||||
|
|
||||||
@ -381,8 +381,8 @@ class Dataset_M4(Dataset):
|
|||||||
insample_mask = np.zeros((len(self.timeseries), self.seq_len))
|
insample_mask = np.zeros((len(self.timeseries), self.seq_len))
|
||||||
for i, ts in enumerate(self.timeseries):
|
for i, ts in enumerate(self.timeseries):
|
||||||
ts_last_window = ts[-self.seq_len:]
|
ts_last_window = ts[-self.seq_len:]
|
||||||
insample[i, -len(ts):] = ts_last_window
|
insample[i, -len(ts_last_window):] = ts_last_window
|
||||||
insample_mask[i, -len(ts):] = 1.0
|
insample_mask[i, -len(ts_last_window):] = 1.0
|
||||||
return insample, insample_mask
|
return insample, insample_mask
|
||||||
|
|
||||||
|
|
||||||
@ -746,3 +746,161 @@ class UEAloader(Dataset):
|
|||||||
|
|
||||||
def __len__(self):
|
def __len__(self):
|
||||||
return len(self.all_IDs)
|
return len(self.all_IDs)
|
||||||
|
|
||||||
|
|
||||||
|
class Dataset_PEMS(Dataset):
|
||||||
|
def __init__(self, args, root_path, flag='train', size=None,
|
||||||
|
features='S', data_path='ETTh1.csv',
|
||||||
|
target='OT', scale=True, timeenc=0, freq='h', seasonal_patterns=None):
|
||||||
|
# size [seq_len, label_len, pred_len]
|
||||||
|
# info
|
||||||
|
|
||||||
|
self.seq_len = size[0]
|
||||||
|
self.label_len = size[1]
|
||||||
|
self.pred_len = size[2]
|
||||||
|
# init
|
||||||
|
assert flag in ['train', 'test', 'val']
|
||||||
|
type_map = {'train': 0, 'val': 1, 'test': 2}
|
||||||
|
self.set_type = type_map[flag]
|
||||||
|
|
||||||
|
self.features = features
|
||||||
|
self.target = target
|
||||||
|
self.scale = scale
|
||||||
|
self.timeenc = timeenc
|
||||||
|
self.freq = freq
|
||||||
|
|
||||||
|
self.root_path = root_path
|
||||||
|
self.data_path = data_path
|
||||||
|
self.__read_data__()
|
||||||
|
|
||||||
|
def __read_data__(self):
|
||||||
|
self.scaler = StandardScaler()
|
||||||
|
data_file = os.path.join(self.root_path, self.data_path)
|
||||||
|
print('data file:', data_file)
|
||||||
|
data = np.load(data_file, allow_pickle=True)
|
||||||
|
data = data['data'][:, :, 0]
|
||||||
|
|
||||||
|
train_ratio = 0.6
|
||||||
|
valid_ratio = 0.2
|
||||||
|
train_data = data[:int(train_ratio * len(data))]
|
||||||
|
valid_data = data[int(train_ratio * len(data)):int((train_ratio + valid_ratio) * len(data))]
|
||||||
|
test_data = data[int((train_ratio + valid_ratio) * len(data)):]
|
||||||
|
total_data = [train_data, valid_data, test_data]
|
||||||
|
data = total_data[self.set_type]
|
||||||
|
|
||||||
|
if self.scale:
|
||||||
|
self.scaler.fit(data)
|
||||||
|
data = self.scaler.transform(data)
|
||||||
|
|
||||||
|
df = pd.DataFrame(data)
|
||||||
|
df = df.fillna(method='ffill', limit=len(df)).fillna(method='bfill', limit=len(df)).values
|
||||||
|
|
||||||
|
self.data_x = df
|
||||||
|
self.data_y = df
|
||||||
|
|
||||||
|
def __getitem__(self, index):
|
||||||
|
if self.set_type == 2: # test:首尾相连
|
||||||
|
s_begin = index * 12
|
||||||
|
else:
|
||||||
|
s_begin = index
|
||||||
|
s_end = s_begin + self.seq_len
|
||||||
|
r_begin = s_end - self.label_len
|
||||||
|
r_end = r_begin + self.label_len + self.pred_len
|
||||||
|
|
||||||
|
seq_x = self.data_x[s_begin:s_end]
|
||||||
|
seq_y = self.data_y[r_begin:r_end]
|
||||||
|
seq_x_mark = torch.zeros((seq_x.shape[0], 1))
|
||||||
|
seq_y_mark = torch.zeros((seq_y.shape[0], 1))
|
||||||
|
|
||||||
|
return seq_x, seq_y, seq_x_mark, seq_y_mark
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
if self.set_type == 2: # test:首尾相连
|
||||||
|
return (len(self.data_x) - self.seq_len - self.pred_len + 1) // 12
|
||||||
|
else:
|
||||||
|
return len(self.data_x) - self.seq_len - self.pred_len + 1
|
||||||
|
|
||||||
|
def inverse_transform(self, data):
|
||||||
|
return self.scaler.inverse_transform(data)
|
||||||
|
|
||||||
|
|
||||||
|
class Dataset_Solar(Dataset):
|
||||||
|
def __init__(self, args, root_path, flag='train', size=None,
|
||||||
|
features='S', data_path='ETTh1.csv',
|
||||||
|
target='OT', scale=True, timeenc=0, freq='h', seasonal_patterns=None):
|
||||||
|
# size [seq_len, label_len, pred_len]
|
||||||
|
# info
|
||||||
|
if size == None:
|
||||||
|
self.seq_len = 24 * 4 * 4
|
||||||
|
self.label_len = 24 * 4
|
||||||
|
self.pred_len = 24 * 4
|
||||||
|
else:
|
||||||
|
self.seq_len = size[0]
|
||||||
|
self.label_len = size[1]
|
||||||
|
self.pred_len = size[2]
|
||||||
|
# init
|
||||||
|
assert flag in ['train', 'test', 'val']
|
||||||
|
type_map = {'train': 0, 'val': 1, 'test': 2}
|
||||||
|
self.set_type = type_map[flag]
|
||||||
|
|
||||||
|
self.features = features
|
||||||
|
self.target = target
|
||||||
|
self.scale = scale
|
||||||
|
self.timeenc = timeenc
|
||||||
|
self.freq = freq
|
||||||
|
|
||||||
|
self.root_path = root_path
|
||||||
|
self.data_path = data_path
|
||||||
|
self.__read_data__()
|
||||||
|
|
||||||
|
def __read_data__(self):
|
||||||
|
self.scaler = StandardScaler()
|
||||||
|
df_raw = []
|
||||||
|
with open(os.path.join(self.root_path, self.data_path), "r", encoding='utf-8') as f:
|
||||||
|
for line in f.readlines():
|
||||||
|
line = line.strip('\n').split(',') # 去除文本中的换行符
|
||||||
|
data_line = np.stack([float(i) for i in line])
|
||||||
|
df_raw.append(data_line)
|
||||||
|
df_raw = np.stack(df_raw, 0)
|
||||||
|
df_raw = pd.DataFrame(df_raw)
|
||||||
|
'''
|
||||||
|
df_raw.columns: ['date', ...(other features), target feature]
|
||||||
|
'''
|
||||||
|
num_train = int(len(df_raw) * 0.7)
|
||||||
|
num_test = int(len(df_raw) * 0.2)
|
||||||
|
num_vali = len(df_raw) - num_train - num_test
|
||||||
|
border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]
|
||||||
|
border2s = [num_train, num_train + num_vali, len(df_raw)]
|
||||||
|
border1 = border1s[self.set_type]
|
||||||
|
border2 = border2s[self.set_type]
|
||||||
|
|
||||||
|
df_data = df_raw.values
|
||||||
|
|
||||||
|
if self.scale:
|
||||||
|
train_data = df_data[border1s[0]:border2s[0]]
|
||||||
|
self.scaler.fit(train_data)
|
||||||
|
data = self.scaler.transform(df_data)
|
||||||
|
else:
|
||||||
|
data = df_data
|
||||||
|
|
||||||
|
self.data_x = data[border1:border2]
|
||||||
|
self.data_y = data[border1:border2]
|
||||||
|
|
||||||
|
def __getitem__(self, index):
|
||||||
|
s_begin = index
|
||||||
|
s_end = s_begin + self.seq_len
|
||||||
|
r_begin = s_end - self.label_len
|
||||||
|
r_end = r_begin + self.label_len + self.pred_len
|
||||||
|
|
||||||
|
seq_x = self.data_x[s_begin:s_end]
|
||||||
|
seq_y = self.data_y[r_begin:r_end]
|
||||||
|
seq_x_mark = torch.zeros((seq_x.shape[0], 1))
|
||||||
|
seq_y_mark = torch.zeros((seq_y.shape[0], 1))
|
||||||
|
|
||||||
|
return seq_x, seq_y, seq_x_mark, seq_y_mark
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.data_x) - self.seq_len - self.pred_len + 1
|
||||||
|
|
||||||
|
def inverse_transform(self, data):
|
||||||
|
return self.scaler.inverse_transform(data)
|
||||||
|
@ -129,10 +129,11 @@ class M4Meta:
|
|||||||
} # from interpretable.gin
|
} # from interpretable.gin
|
||||||
|
|
||||||
|
|
||||||
def load_m4_info() -> pd.DataFrame:
|
def load_m4_info(info_file_path: str) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
Load M4Info file.
|
Load M4Info file.
|
||||||
|
|
||||||
|
:param info_file_path: Path to M4-info.csv file
|
||||||
:return: Pandas DataFrame of M4Info.
|
:return: Pandas DataFrame of M4Info.
|
||||||
"""
|
"""
|
||||||
return pd.read_csv(INFO_FILE_PATH)
|
return pd.read_csv(info_file_path)
|
||||||
|
@ -34,9 +34,19 @@ class Exp_Long_Term_Forecast(Exp_Basic):
|
|||||||
model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
|
model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
|
||||||
return model_optim
|
return model_optim
|
||||||
|
|
||||||
def _select_criterion(self):
|
def _select_criterion(self, loss_name='MSE'):
|
||||||
criterion = nn.MSELoss()
|
if self.args.data == 'PEMS':
|
||||||
return criterion
|
return nn.L1Loss()
|
||||||
|
elif loss_name == 'MSE':
|
||||||
|
return nn.MSELoss()
|
||||||
|
elif loss_name == 'MAPE':
|
||||||
|
return mape_loss()
|
||||||
|
elif loss_name == 'MASE':
|
||||||
|
return mase_loss()
|
||||||
|
elif loss_name == 'SMAPE':
|
||||||
|
return smape_loss()
|
||||||
|
elif loss_name == 'MAE':
|
||||||
|
return nn.L1Loss(reduction='mean')
|
||||||
|
|
||||||
|
|
||||||
def vali(self, vali_data, vali_loader, criterion):
|
def vali(self, vali_data, vali_loader, criterion):
|
||||||
@ -66,9 +76,18 @@ class Exp_Long_Term_Forecast(Exp_Basic):
|
|||||||
pred = outputs.detach()
|
pred = outputs.detach()
|
||||||
true = batch_y.detach()
|
true = batch_y.detach()
|
||||||
|
|
||||||
loss = criterion(pred, true)
|
if self.args.data == 'PEMS':
|
||||||
|
B, T, C = pred.shape
|
||||||
|
pred = pred.cpu().numpy()
|
||||||
|
true = true.cpu().numpy()
|
||||||
|
pred = vali_data.inverse_transform(pred.reshape(-1, C)).reshape(B, T, C)
|
||||||
|
true = vali_data.inverse_transform(true.reshape(-1, C)).reshape(B, T, C)
|
||||||
|
mae, mse, rmse, mape, mspe = metric(pred, true)
|
||||||
|
total_loss.append(mae)
|
||||||
|
else:
|
||||||
|
loss = criterion(pred, true)
|
||||||
|
total_loss.append(loss.item())
|
||||||
|
|
||||||
total_loss.append(loss.item())
|
|
||||||
total_loss = np.average(total_loss)
|
total_loss = np.average(total_loss)
|
||||||
self.model.train()
|
self.model.train()
|
||||||
return total_loss
|
return total_loss
|
||||||
|
Reference in New Issue
Block a user