feat: add PEMS and Solar dataset support
- Add Dataset_PEMS and Dataset_Solar classes for PEMS and Solar datasets - Update data_factory.py to include new dataset mappings - Fix M4 dataset handling with proper numpy array dtype - Add PEMS-specific loss function (L1Loss) and inverse transform support - Update validation logic for PEMS dataset with inverse scaling - Fix M4 data loader insample mask calculation bug Changes support new traffic and solar energy datasets while maintaining backward compatibility with existing datasets.
This commit is contained in:
@ -340,7 +340,7 @@ class Dataset_M4(Dataset):
|
||||
dataset = M4Dataset.load(training=False, dataset_file=self.root_path)
|
||||
training_values = np.array(
|
||||
[v[~np.isnan(v)] for v in
|
||||
dataset.values[dataset.groups == self.seasonal_patterns]]) # split different frequencies
|
||||
dataset.values[dataset.groups == self.seasonal_patterns]], dtype=np.ndarray) # split different frequencies
|
||||
self.ids = np.array([i for i in dataset.ids[dataset.groups == self.seasonal_patterns]])
|
||||
self.timeseries = [ts for ts in training_values]
|
||||
|
||||
@ -381,8 +381,8 @@ class Dataset_M4(Dataset):
|
||||
insample_mask = np.zeros((len(self.timeseries), self.seq_len))
|
||||
for i, ts in enumerate(self.timeseries):
|
||||
ts_last_window = ts[-self.seq_len:]
|
||||
insample[i, -len(ts):] = ts_last_window
|
||||
insample_mask[i, -len(ts):] = 1.0
|
||||
insample[i, -len(ts_last_window):] = ts_last_window
|
||||
insample_mask[i, -len(ts_last_window):] = 1.0
|
||||
return insample, insample_mask
|
||||
|
||||
|
||||
@ -746,3 +746,161 @@ class UEAloader(Dataset):
|
||||
|
||||
def __len__(self):
|
||||
return len(self.all_IDs)
|
||||
|
||||
|
||||
class Dataset_PEMS(Dataset):
|
||||
def __init__(self, args, root_path, flag='train', size=None,
|
||||
features='S', data_path='ETTh1.csv',
|
||||
target='OT', scale=True, timeenc=0, freq='h', seasonal_patterns=None):
|
||||
# size [seq_len, label_len, pred_len]
|
||||
# info
|
||||
|
||||
self.seq_len = size[0]
|
||||
self.label_len = size[1]
|
||||
self.pred_len = size[2]
|
||||
# init
|
||||
assert flag in ['train', 'test', 'val']
|
||||
type_map = {'train': 0, 'val': 1, 'test': 2}
|
||||
self.set_type = type_map[flag]
|
||||
|
||||
self.features = features
|
||||
self.target = target
|
||||
self.scale = scale
|
||||
self.timeenc = timeenc
|
||||
self.freq = freq
|
||||
|
||||
self.root_path = root_path
|
||||
self.data_path = data_path
|
||||
self.__read_data__()
|
||||
|
||||
def __read_data__(self):
|
||||
self.scaler = StandardScaler()
|
||||
data_file = os.path.join(self.root_path, self.data_path)
|
||||
print('data file:', data_file)
|
||||
data = np.load(data_file, allow_pickle=True)
|
||||
data = data['data'][:, :, 0]
|
||||
|
||||
train_ratio = 0.6
|
||||
valid_ratio = 0.2
|
||||
train_data = data[:int(train_ratio * len(data))]
|
||||
valid_data = data[int(train_ratio * len(data)):int((train_ratio + valid_ratio) * len(data))]
|
||||
test_data = data[int((train_ratio + valid_ratio) * len(data)):]
|
||||
total_data = [train_data, valid_data, test_data]
|
||||
data = total_data[self.set_type]
|
||||
|
||||
if self.scale:
|
||||
self.scaler.fit(data)
|
||||
data = self.scaler.transform(data)
|
||||
|
||||
df = pd.DataFrame(data)
|
||||
df = df.fillna(method='ffill', limit=len(df)).fillna(method='bfill', limit=len(df)).values
|
||||
|
||||
self.data_x = df
|
||||
self.data_y = df
|
||||
|
||||
def __getitem__(self, index):
|
||||
if self.set_type == 2: # test:首尾相连
|
||||
s_begin = index * 12
|
||||
else:
|
||||
s_begin = index
|
||||
s_end = s_begin + self.seq_len
|
||||
r_begin = s_end - self.label_len
|
||||
r_end = r_begin + self.label_len + self.pred_len
|
||||
|
||||
seq_x = self.data_x[s_begin:s_end]
|
||||
seq_y = self.data_y[r_begin:r_end]
|
||||
seq_x_mark = torch.zeros((seq_x.shape[0], 1))
|
||||
seq_y_mark = torch.zeros((seq_y.shape[0], 1))
|
||||
|
||||
return seq_x, seq_y, seq_x_mark, seq_y_mark
|
||||
|
||||
def __len__(self):
|
||||
if self.set_type == 2: # test:首尾相连
|
||||
return (len(self.data_x) - self.seq_len - self.pred_len + 1) // 12
|
||||
else:
|
||||
return len(self.data_x) - self.seq_len - self.pred_len + 1
|
||||
|
||||
def inverse_transform(self, data):
|
||||
return self.scaler.inverse_transform(data)
|
||||
|
||||
|
||||
class Dataset_Solar(Dataset):
|
||||
def __init__(self, args, root_path, flag='train', size=None,
|
||||
features='S', data_path='ETTh1.csv',
|
||||
target='OT', scale=True, timeenc=0, freq='h', seasonal_patterns=None):
|
||||
# size [seq_len, label_len, pred_len]
|
||||
# info
|
||||
if size == None:
|
||||
self.seq_len = 24 * 4 * 4
|
||||
self.label_len = 24 * 4
|
||||
self.pred_len = 24 * 4
|
||||
else:
|
||||
self.seq_len = size[0]
|
||||
self.label_len = size[1]
|
||||
self.pred_len = size[2]
|
||||
# init
|
||||
assert flag in ['train', 'test', 'val']
|
||||
type_map = {'train': 0, 'val': 1, 'test': 2}
|
||||
self.set_type = type_map[flag]
|
||||
|
||||
self.features = features
|
||||
self.target = target
|
||||
self.scale = scale
|
||||
self.timeenc = timeenc
|
||||
self.freq = freq
|
||||
|
||||
self.root_path = root_path
|
||||
self.data_path = data_path
|
||||
self.__read_data__()
|
||||
|
||||
def __read_data__(self):
|
||||
self.scaler = StandardScaler()
|
||||
df_raw = []
|
||||
with open(os.path.join(self.root_path, self.data_path), "r", encoding='utf-8') as f:
|
||||
for line in f.readlines():
|
||||
line = line.strip('\n').split(',') # 去除文本中的换行符
|
||||
data_line = np.stack([float(i) for i in line])
|
||||
df_raw.append(data_line)
|
||||
df_raw = np.stack(df_raw, 0)
|
||||
df_raw = pd.DataFrame(df_raw)
|
||||
'''
|
||||
df_raw.columns: ['date', ...(other features), target feature]
|
||||
'''
|
||||
num_train = int(len(df_raw) * 0.7)
|
||||
num_test = int(len(df_raw) * 0.2)
|
||||
num_vali = len(df_raw) - num_train - num_test
|
||||
border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]
|
||||
border2s = [num_train, num_train + num_vali, len(df_raw)]
|
||||
border1 = border1s[self.set_type]
|
||||
border2 = border2s[self.set_type]
|
||||
|
||||
df_data = df_raw.values
|
||||
|
||||
if self.scale:
|
||||
train_data = df_data[border1s[0]:border2s[0]]
|
||||
self.scaler.fit(train_data)
|
||||
data = self.scaler.transform(df_data)
|
||||
else:
|
||||
data = df_data
|
||||
|
||||
self.data_x = data[border1:border2]
|
||||
self.data_y = data[border1:border2]
|
||||
|
||||
def __getitem__(self, index):
|
||||
s_begin = index
|
||||
s_end = s_begin + self.seq_len
|
||||
r_begin = s_end - self.label_len
|
||||
r_end = r_begin + self.label_len + self.pred_len
|
||||
|
||||
seq_x = self.data_x[s_begin:s_end]
|
||||
seq_y = self.data_y[r_begin:r_end]
|
||||
seq_x_mark = torch.zeros((seq_x.shape[0], 1))
|
||||
seq_y_mark = torch.zeros((seq_y.shape[0], 1))
|
||||
|
||||
return seq_x, seq_y, seq_x_mark, seq_y_mark
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data_x) - self.seq_len - self.pred_len + 1
|
||||
|
||||
def inverse_transform(self, data):
|
||||
return self.scaler.inverse_transform(data)
|
||||
|
Reference in New Issue
Block a user