feat: add mamba and dynamic chunking related code and test code

This commit is contained in:
gameloader
2025-09-04 01:32:13 +00:00
parent 12cb7652cf
commit ef307a57e9
21 changed files with 4550 additions and 86 deletions

View File

@ -0,0 +1,145 @@
#!/bin/bash
# xPatch_SparseChannel Classification Training Script for Multiple Datasets
export CUDA_VISIBLE_DEVICES=0
model_name=xPatch_SparseChannel
# Create results directory if it doesn't exist
mkdir -p ./results
# UWaveGestureLibrary dataset (seq_len=315, enc_in=3, k_graph=3)
python -u run.py \
--task_name classification \
--is_training 1 \
--root_path ./dataset/UWaveGestureLibrary/ \
--model_id UWaveGestureLibrary \
--model $model_name \
--data UEA \
--e_layers 2 \
--batch_size 64 \
--seq_len 315 \
--enc_in 3 \
--d_model 128 \
--d_ff 256 \
--n_heads 16 \
--patch_len 16 \
--stride 8 \
--dropout 0.1 \
--des 'xPatch_SparseChannel_UWaveGestureLibrary' \
--itr 1 \
--learning_rate 0.001 \
--train_epochs 100 \
--patience 30 \
--revin 0 \
--k_graph 3 | tee ./results/xPatch_SparseChannel_UWaveGestureLibrary.log
# EthanolConcentration dataset (seq_len=1751, enc_in=3, k_graph=3)
python -u run.py \
--task_name classification \
--is_training 1 \
--root_path ./dataset/EthanolConcentration/ \
--model_id EthanolConcentration \
--model $model_name \
--data UEA \
--e_layers 2 \
--batch_size 64 \
--seq_len 1751 \
--enc_in 3 \
--d_model 128 \
--d_ff 256 \
--n_heads 16 \
--patch_len 16 \
--stride 8 \
--dropout 0.1 \
--des 'xPatch_SparseChannel_EthanolConcentration' \
--itr 1 \
--learning_rate 0.0005 \
--train_epochs 100 \
--patience 30 \
--revin 0 \
--k_graph 3 | tee ./results/xPatch_SparseChannel_EthanolConcentration.log
# Handwriting dataset (seq_len=152, enc_in=3, k_graph=3)
python -u run.py \
--task_name classification \
--is_training 1 \
--root_path ./dataset/Handwriting/ \
--model_id Handwriting \
--model $model_name \
--data UEA \
--e_layers 2 \
--batch_size 64 \
--seq_len 152 \
--enc_in 3 \
--d_model 128 \
--d_ff 256 \
--n_heads 16 \
--patch_len 16 \
--stride 8 \
--dropout 0.1 \
--des 'xPatch_SparseChannel_Handwriting' \
--itr 1 \
--learning_rate 0.001 \
--train_epochs 100 \
--patience 30 \
--revin 0 \
--k_graph 3 | tee ./results/xPatch_SparseChannel_Handwriting.log
# JapaneseVowels dataset (seq_len=29, enc_in=12, k_graph=8)
python -u run.py \
--task_name classification \
--is_training 1 \
--root_path ./dataset/JapaneseVowels/ \
--model_id JapaneseVowels \
--model $model_name \
--data UEA \
--e_layers 2 \
--batch_size 64 \
--seq_len 29 \
--enc_in 12 \
--d_model 128 \
--d_ff 256 \
--n_heads 16 \
--patch_len 16 \
--stride 8 \
--dropout 0.1 \
--des 'xPatch_SparseChannel_JapaneseVowels' \
--itr 1 \
--learning_rate 0.0005 \
--train_epochs 100 \
--patience 30 \
--revin 0 \
--k_graph 8 | tee ./results/xPatch_SparseChannel_JapaneseVowels.log
# PEMS-SF dataset (seq_len=144, enc_in=963, k_graph=8)
python -u run.py \
--task_name classification \
--is_training 1 \
--root_path ./dataset/PEMS-SF/ \
--model_id PEMS-SF \
--model $model_name \
--data UEA \
--e_layers 2 \
--batch_size 16 \
--seq_len 144 \
--enc_in 963 \
--d_model 128 \
--d_ff 256 \
--n_heads 16 \
--patch_len 16 \
--stride 8 \
--dropout 0.1 \
--des 'xPatch_SparseChannel_PEMS-SF' \
--itr 1 \
--learning_rate 0.0005 \
--train_epochs 100 \
--patience 30 \
--revin 0 \
--k_graph 8 | tee ./results/xPatch_SparseChannel_PEMS-SF.log