feat: add mamba and dynamic chunking related code and test code
This commit is contained in:
142
scripts/classification/DC_PatchTST.sh
Executable file
142
scripts/classification/DC_PatchTST.sh
Executable file
@ -0,0 +1,142 @@
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
model_name=DC_PatchTST
|
||||
|
||||
|
||||
|
||||
# DC_PatchTST specific parameters
|
||||
d_model_stage0=64 # Stage 0 dimension (D0)
|
||||
depth_enc0=1 # Stage 0 Mamba2 encoder depth
|
||||
depth_enc1=1 # Stage 1 Mamba2 encoder depth
|
||||
target_ratio0=0.25 # Target compression ratio for stage 0
|
||||
target_ratio1=0.25 # Target compression ratio for stage 1
|
||||
|
||||
# EthanolConcentration dataset
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/EthanolConcentration/ \
|
||||
--model_id EthanolConcentration \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 3 \
|
||||
--batch_size 8 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 8 \
|
||||
--dropout 0.1 \
|
||||
--activation gelu \
|
||||
--des 'DC_PatchTST_Exp' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.0002 \
|
||||
--train_epochs 100 \
|
||||
--patience 10 \
|
||||
--d_model_stage0 $d_model_stage0 \
|
||||
--depth_enc0 $depth_enc0 \
|
||||
--depth_enc1 $depth_enc1 \
|
||||
--target_ratio0 $target_ratio0 \
|
||||
--target_ratio1 $target_ratio1
|
||||
|
||||
# FaceDetection dataset
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/FaceDetection/ \
|
||||
--model_id FaceDetection \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 3 \
|
||||
--batch_size 8 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 8 \
|
||||
--dropout 0.1 \
|
||||
--activation gelu \
|
||||
--des 'DC_PatchTST_Exp' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 100 \
|
||||
--patience 10 \
|
||||
--d_model_stage0 $d_model_stage0 \
|
||||
--depth_enc0 $depth_enc0 \
|
||||
--depth_enc1 $depth_enc1 \
|
||||
--target_ratio0 $target_ratio0 \
|
||||
--target_ratio1 $target_ratio1
|
||||
|
||||
# Handwriting dataset
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/Handwriting/ \
|
||||
--model_id Handwriting \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 3 \
|
||||
--batch_size 8 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 8 \
|
||||
--dropout 0.1 \
|
||||
--activation gelu \
|
||||
--des 'DC_PatchTST_Exp' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 100 \
|
||||
--patience 10 \
|
||||
--d_model_stage0 $d_model_stage0 \
|
||||
--depth_enc0 $depth_enc0 \
|
||||
--depth_enc1 $depth_enc1 \
|
||||
--target_ratio0 $target_ratio0 \
|
||||
--target_ratio1 $target_ratio1
|
||||
|
||||
# Heartbeat dataset
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/Heartbeat/ \
|
||||
--model_id Heartbeat \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 3 \
|
||||
--batch_size 8 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 8 \
|
||||
--dropout 0.1 \
|
||||
--activation gelu \
|
||||
--des 'DC_PatchTST_Exp' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 100 \
|
||||
--patience 10 \
|
||||
--d_model_stage0 $d_model_stage0 \
|
||||
--depth_enc0 $depth_enc0 \
|
||||
--depth_enc1 $depth_enc1 \
|
||||
--target_ratio0 $target_ratio0 \
|
||||
--target_ratio1 $target_ratio1
|
||||
|
||||
# JapaneseVowels dataset
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/JapaneseVowels/ \
|
||||
--model_id JapaneseVowels \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 3 \
|
||||
--batch_size 8 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 8 \
|
||||
--dropout 0.1 \
|
||||
--activation gelu \
|
||||
--des 'DC_PatchTST_Exp' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 100 \
|
||||
--patience 10 \
|
||||
--d_model_stage0 $d_model_stage0 \
|
||||
--depth_enc0 $depth_enc0 \
|
||||
--depth_enc1 $depth_enc1 \
|
||||
--target_ratio0 $target_ratio0 \
|
||||
--target_ratio1 $target_ratio1
|
259
scripts/classification/vanillaMamba_classification.sh
Normal file
259
scripts/classification/vanillaMamba_classification.sh
Normal file
@ -0,0 +1,259 @@
|
||||
#!/bin/bash
|
||||
|
||||
# vanillaMamba Classification Training Script for Multiple Datasets
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
model_name=vanillaMamba
|
||||
|
||||
# Create results directory if it doesn't exist
|
||||
mkdir -p ./results
|
||||
|
||||
# UWaveGestureLibrary dataset (seq_len=315, enc_in=3) - use Copy1 config
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/UWaveGestureLibrary/ \
|
||||
--model_id UWaveGestureLibrary \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 315 \
|
||||
--enc_in 3 \
|
||||
--d_model 128 \
|
||||
--d_state 64 \
|
||||
--d_conv 4 \
|
||||
--expand 2 \
|
||||
--headdim 128 \
|
||||
--dropout 0.1 \
|
||||
--des 'vanillaMamba_UWaveGestureLibrary' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.002 \
|
||||
--train_epochs 150 \
|
||||
--patience 30 \
|
||||
--revin 0 | tee ./results/vanillaMamba_UWaveGestureLibrary.log
|
||||
|
||||
# EthanolConcentration dataset (seq_len=1751, enc_in=3) - use Copy1 config
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 3 \
|
||||
--root_path ./dataset/EthanolConcentration/ \
|
||||
--model_id EthanolConcentration \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 1751 \
|
||||
--enc_in 4 \
|
||||
--d_model 128 \
|
||||
--d_state 64 \
|
||||
--d_conv 4 \
|
||||
--expand 2 \
|
||||
--headdim 64 \
|
||||
--dropout 0.1 \
|
||||
--des 'vanillaMamba_EthanolConcentration' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 200 \
|
||||
--patience 30 \
|
||||
--revin 0 | tee ./results/vanillaMamba_EthanolConcentration.log
|
||||
|
||||
# Handwriting dataset (seq_len=152, enc_in=3) - use Copy1 config
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/Handwriting/ \
|
||||
--model_id Handwriting \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 4 \
|
||||
--batch_size 64 \
|
||||
--seq_len 152 \
|
||||
--enc_in 3 \
|
||||
--d_model 128 \
|
||||
--d_state 64 \
|
||||
--d_conv 4 \
|
||||
--expand 2 \
|
||||
--headdim 64 \
|
||||
--dropout 0.1 \
|
||||
--des 'vanillaMamba_Handwriting' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 200 \
|
||||
--patience 30 \
|
||||
--revin 0 | tee ./results/vanillaMamba_Handwriting.log
|
||||
|
||||
# JapaneseVowels dataset (seq_len=29, enc_in=12) - use Copy1 config
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/JapaneseVowels/ \
|
||||
--model_id JapaneseVowels \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 3 \
|
||||
--batch_size 64 \
|
||||
--seq_len 29 \
|
||||
--enc_in 12 \
|
||||
--d_model 128 \
|
||||
--d_state 64 \
|
||||
--d_conv 4 \
|
||||
--expand 2 \
|
||||
--headdim 64 \
|
||||
--dropout 0.1 \
|
||||
--des 'vanillaMamba_JapaneseVowels' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.0005 \
|
||||
--train_epochs 100 \
|
||||
--patience 30 \
|
||||
--revin 0 | tee ./results/vanillaMamba_JapaneseVowels.log
|
||||
|
||||
# PEMS-SF dataset (seq_len=144, enc_in=963) - use Copy1 config
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/PEMS-SF/ \
|
||||
--model_id PEMS-SF \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 3 \
|
||||
--batch_size 16 \
|
||||
--seq_len 144 \
|
||||
--enc_in 963 \
|
||||
--d_model 128 \
|
||||
--d_state 64 \
|
||||
--d_conv 4 \
|
||||
--expand 2 \
|
||||
--headdim 64 \
|
||||
--dropout 0.1 \
|
||||
--des 'vanillaMamba_PEMS-SF' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 150 \
|
||||
--patience 30 \
|
||||
--revin 0 | tee ./results/vanillaMamba_PEMS-SF.log
|
||||
|
||||
# Heartbeat dataset (seq_len=405, enc_in=61) - use original config
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/Heartbeat/ \
|
||||
--model_id Heartbeat \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 3 \
|
||||
--batch_size 64 \
|
||||
--seq_len 405 \
|
||||
--enc_in 61 \
|
||||
--d_model 128 \
|
||||
--d_state 64 \
|
||||
--d_conv 4 \
|
||||
--expand 2 \
|
||||
--headdim 64 \
|
||||
--dropout 0.1 \
|
||||
--des 'vanillaMamba_Heartbeat' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 150 \
|
||||
--patience 10 \
|
||||
--revin 0 | tee ./results/vanillaMamba_Heartbeat.log
|
||||
|
||||
# FaceDetection dataset (seq_len=62, enc_in=144) - use original config
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/FaceDetection/ \
|
||||
--model_id FaceDetection \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 3 \
|
||||
--batch_size 64 \
|
||||
--seq_len 62 \
|
||||
--enc_in 144 \
|
||||
--d_model 128 \
|
||||
--d_state 64 \
|
||||
--d_conv 4 \
|
||||
--expand 2 \
|
||||
--headdim 64 \
|
||||
--dropout 0.1 \
|
||||
--des 'vanillaMamba_FaceDetection' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 100 \
|
||||
--patience 10 \
|
||||
--revin 0 | tee ./results/vanillaMamba_FaceDetection.log
|
||||
|
||||
# SelfRegulationSCP1 dataset (seq_len=896, enc_in=6) - use original config
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/SelfRegulationSCP1/ \
|
||||
--model_id SelfRegulationSCP1 \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 3 \
|
||||
--batch_size 64 \
|
||||
--seq_len 896 \
|
||||
--enc_in 6 \
|
||||
--d_model 128 \
|
||||
--d_state 64 \
|
||||
--d_conv 4 \
|
||||
--expand 2 \
|
||||
--headdim 64 \
|
||||
--dropout 0.1 \
|
||||
--des 'vanillaMamba_SelfRegulationSCP1' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 100 \
|
||||
--patience 10 \
|
||||
--revin 0 | tee ./results/vanillaMamba_SelfRegulationSCP1.log
|
||||
|
||||
# SelfRegulationSCP2 dataset (seq_len=1152, enc_in=7) - use original config
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/SelfRegulationSCP2/ \
|
||||
--model_id SelfRegulationSCP2 \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 3 \
|
||||
--batch_size 64 \
|
||||
--seq_len 1152 \
|
||||
--enc_in 7 \
|
||||
--d_model 128 \
|
||||
--d_state 64 \
|
||||
--d_conv 4 \
|
||||
--expand 2 \
|
||||
--headdim 64 \
|
||||
--dropout 0.1 \
|
||||
--des 'vanillaMamba_SelfRegulationSCP2' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 100 \
|
||||
--patience 10 \
|
||||
--revin 0 | tee ./results/vanillaMamba_SelfRegulationSCP2.log
|
||||
|
||||
# SpokenArabicDigits dataset (seq_len=93, enc_in=13) - use original config
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/SpokenArabicDigits/ \
|
||||
--model_id SpokenArabicDigits \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 3 \
|
||||
--batch_size 64 \
|
||||
--seq_len 93 \
|
||||
--enc_in 13 \
|
||||
--d_model 128 \
|
||||
--d_state 64 \
|
||||
--d_conv 4 \
|
||||
--expand 2 \
|
||||
--headdim 64 \
|
||||
--dropout 0.1 \
|
||||
--des 'vanillaMamba_SpokenArabicDigits' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 100 \
|
||||
--patience 10 \
|
||||
--revin 0 | tee ./results/vanillaMamba_SpokenArabicDigits.log
|
145
scripts/classification/xPatch_SparseChannel-Copy1.sh
Normal file
145
scripts/classification/xPatch_SparseChannel-Copy1.sh
Normal file
@ -0,0 +1,145 @@
|
||||
#!/bin/bash
|
||||
|
||||
# xPatch_SparseChannel Classification Training Script for Multiple Datasets
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
model_name=xPatch_SparseChannel
|
||||
|
||||
# Create results directory if it doesn't exist
|
||||
mkdir -p ./results
|
||||
|
||||
|
||||
|
||||
# UWaveGestureLibrary dataset (seq_len=315, enc_in=3, k_graph=3)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/UWaveGestureLibrary/ \
|
||||
--model_id UWaveGestureLibrary \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 315 \
|
||||
--enc_in 3 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_UWaveGestureLibrary' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 100 \
|
||||
--patience 30 \
|
||||
--revin 0 \
|
||||
--k_graph 3 | tee ./results/xPatch_SparseChannel_UWaveGestureLibrary.log
|
||||
|
||||
|
||||
|
||||
# EthanolConcentration dataset (seq_len=1751, enc_in=3, k_graph=3)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/EthanolConcentration/ \
|
||||
--model_id EthanolConcentration \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 1751 \
|
||||
--enc_in 3 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_EthanolConcentration' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.0005 \
|
||||
--train_epochs 100 \
|
||||
--patience 30 \
|
||||
--revin 0 \
|
||||
--k_graph 3 | tee ./results/xPatch_SparseChannel_EthanolConcentration.log
|
||||
|
||||
# Handwriting dataset (seq_len=152, enc_in=3, k_graph=3)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/Handwriting/ \
|
||||
--model_id Handwriting \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 152 \
|
||||
--enc_in 3 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_Handwriting' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 100 \
|
||||
--patience 30 \
|
||||
--revin 0 \
|
||||
--k_graph 3 | tee ./results/xPatch_SparseChannel_Handwriting.log
|
||||
|
||||
# JapaneseVowels dataset (seq_len=29, enc_in=12, k_graph=8)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/JapaneseVowels/ \
|
||||
--model_id JapaneseVowels \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 29 \
|
||||
--enc_in 12 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_JapaneseVowels' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.0005 \
|
||||
--train_epochs 100 \
|
||||
--patience 30 \
|
||||
--revin 0 \
|
||||
--k_graph 8 | tee ./results/xPatch_SparseChannel_JapaneseVowels.log
|
||||
|
||||
|
||||
|
||||
# PEMS-SF dataset (seq_len=144, enc_in=963, k_graph=8)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/PEMS-SF/ \
|
||||
--model_id PEMS-SF \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 16 \
|
||||
--seq_len 144 \
|
||||
--enc_in 963 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_PEMS-SF' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.0005 \
|
||||
--train_epochs 100 \
|
||||
--patience 30 \
|
||||
--revin 0 \
|
||||
--k_graph 8 | tee ./results/xPatch_SparseChannel_PEMS-SF.log
|
@ -1,10 +1,66 @@
|
||||
#!/bin/bash
|
||||
|
||||
# xPatch_SparseChannel Classification Training Script for FaceDetection Dataset
|
||||
# xPatch_SparseChannel Classification Training Script for Multiple Datasets
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
model_name=xPatch_SparseChannel
|
||||
|
||||
# Create results directory if it doesn't exist
|
||||
mkdir -p ./results
|
||||
|
||||
# Heartbeat dataset (seq_len=405, enc_in=61, k_graph=8)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/Heartbeat/ \
|
||||
--model_id Heartbeat \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 405 \
|
||||
--enc_in 61 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_Heartbeat' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.0005 \
|
||||
--train_epochs 100 \
|
||||
--patience 5 \
|
||||
--revin 0 \
|
||||
--k_graph 8 | tee ./results/xPatch_SparseChannel_Heartbeat.log
|
||||
|
||||
# UWaveGestureLibrary dataset (seq_len=315, enc_in=3, k_graph=3)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/UWaveGestureLibrary/ \
|
||||
--model_id UWaveGestureLibrary \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 315 \
|
||||
--enc_in 3 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_UWaveGestureLibrary' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 100 \
|
||||
--patience 30 \
|
||||
--revin 0 \
|
||||
--k_graph 3 | tee ./results/xPatch_SparseChannel_UWaveGestureLibrary.log
|
||||
|
||||
# FaceDetection dataset (seq_len=62, enc_in=144, k_graph=8)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
@ -12,21 +68,202 @@ python -u run.py \
|
||||
--model_id FaceDetection \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 3 \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 62 \
|
||||
--enc_in 144 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 8 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--moving_avg 25 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_FaceDetection' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.0005 \
|
||||
--train_epochs 100 \
|
||||
--patience 5 \
|
||||
--revin 1 \
|
||||
--k_graph 8
|
||||
--revin 0 \
|
||||
--k_graph 8 | tee ./results/xPatch_SparseChannel_FaceDetection.log
|
||||
|
||||
# EthanolConcentration dataset (seq_len=1751, enc_in=3, k_graph=3)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/EthanolConcentration/ \
|
||||
--model_id EthanolConcentration \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 1751 \
|
||||
--enc_in 3 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_EthanolConcentration' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.0005 \
|
||||
--train_epochs 100 \
|
||||
--patience 30 \
|
||||
--revin 0 \
|
||||
--k_graph 3 | tee ./results/xPatch_SparseChannel_EthanolConcentration.log
|
||||
|
||||
# Handwriting dataset (seq_len=152, enc_in=3, k_graph=3)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/Handwriting/ \
|
||||
--model_id Handwriting \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 152 \
|
||||
--enc_in 3 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_Handwriting' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.001 \
|
||||
--train_epochs 100 \
|
||||
--patience 30 \
|
||||
--revin 0 \
|
||||
--k_graph 3 | tee ./results/xPatch_SparseChannel_Handwriting.log
|
||||
|
||||
# JapaneseVowels dataset (seq_len=29, enc_in=12, k_graph=8)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/JapaneseVowels/ \
|
||||
--model_id JapaneseVowels \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 29 \
|
||||
--enc_in 12 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_JapaneseVowels' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.0005 \
|
||||
--train_epochs 100 \
|
||||
--patience 30 \
|
||||
--revin 0 \
|
||||
--k_graph 8 | tee ./results/xPatch_SparseChannel_JapaneseVowels.log
|
||||
|
||||
# SelfRegulationSCP1 dataset (seq_len=896, enc_in=6, k_graph=6)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/SelfRegulationSCP1/ \
|
||||
--model_id SelfRegulationSCP1 \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 896 \
|
||||
--enc_in 6 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_SelfRegulationSCP1' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.0005 \
|
||||
--train_epochs 100 \
|
||||
--patience 5 \
|
||||
--revin 0 \
|
||||
--k_graph 6 | tee ./results/xPatch_SparseChannel_SelfRegulationSCP1.log
|
||||
|
||||
# SelfRegulationSCP2 dataset (seq_len=1152, enc_in=7, k_graph=7)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/SelfRegulationSCP2/ \
|
||||
--model_id SelfRegulationSCP2 \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 1152 \
|
||||
--enc_in 7 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_SelfRegulationSCP2' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.0005 \
|
||||
--train_epochs 100 \
|
||||
--patience 5 \
|
||||
--revin 0 \
|
||||
--k_graph 7 | tee ./results/xPatch_SparseChannel_SelfRegulationSCP2.log
|
||||
|
||||
# SpokenArabicDigits dataset (seq_len=93, enc_in=13, k_graph=8)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/SpokenArabicDigits/ \
|
||||
--model_id SpokenArabicDigits \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 64 \
|
||||
--seq_len 93 \
|
||||
--enc_in 13 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_SpokenArabicDigits' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.0005 \
|
||||
--train_epochs 100 \
|
||||
--patience 5 \
|
||||
--revin 0 \
|
||||
--k_graph 8 | tee ./results/xPatch_SparseChannel_SpokenArabicDigits.log
|
||||
|
||||
# PEMS-SF dataset (seq_len=144, enc_in=963, k_graph=8)
|
||||
python -u run.py \
|
||||
--task_name classification \
|
||||
--is_training 1 \
|
||||
--root_path ./dataset/PEMS-SF/ \
|
||||
--model_id PEMS-SF \
|
||||
--model $model_name \
|
||||
--data UEA \
|
||||
--e_layers 2 \
|
||||
--batch_size 16 \
|
||||
--seq_len 144 \
|
||||
--enc_in 963 \
|
||||
--d_model 128 \
|
||||
--d_ff 256 \
|
||||
--n_heads 16 \
|
||||
--patch_len 16 \
|
||||
--stride 8 \
|
||||
--dropout 0.1 \
|
||||
--des 'xPatch_SparseChannel_PEMS-SF' \
|
||||
--itr 1 \
|
||||
--learning_rate 0.0005 \
|
||||
--train_epochs 100 \
|
||||
--patience 30 \
|
||||
--revin 0 \
|
||||
--k_graph 8 | tee ./results/xPatch_SparseChannel_PEMS-SF.log
|
||||
|
Reference in New Issue
Block a user