diff --git a/scripts/pusht/imf_attnres_local_queue.sh b/scripts/pusht/imf_attnres_local_queue.sh new file mode 100755 index 0000000..d074dcf --- /dev/null +++ b/scripts/pusht/imf_attnres_local_queue.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +set -euo pipefail +cd /home/droid/project/diffusion_policy/.worktrees/feat-pusht-imf-attnres +export PYTHONUNBUFFERED=1 +export SWANLAB_API_KEY='PSZrBMLx1XAjDjvmhUcNz' +export LD_LIBRARY_PATH="$(printf '%s:' .venv/lib/python3.9/site-packages/nvidia/*/lib | sed 's/:$//')" +run_exp() { + local name="$1" emb="$2" layer="$3" + echo "[$(date '+%F %T')] START $name emb=$emb layer=$layer" + .venv/bin/python train.py \ + --config-dir=. \ + --config-name=image_pusht_diffusion_policy_dit_imf_attnres_full.yaml \ + training.device=cuda:0 \ + training.num_epochs=350 \ + training.resume=false \ + exp_name="$name" \ + logging.group=imf_pusht_attnres_arch_sweep \ + logging.name="$name" \ + logging.resume=false \ + logging.id=null \ + hydra.run.dir="data/outputs/$name" \ + policy.n_emb="$emb" \ + policy.n_layer="$layer" \ + > "data/run_logs/${name}.log" 2>&1 + echo "[$(date '+%F %T')] END $name" +} +run_exp imf_attnres_emb384_layer18_seed42_local 384 18 +run_exp imf_attnres_emb256_layer6_seed42_local 256 6 +run_exp imf_attnres_emb128_layer6_seed42_local 128 6 diff --git a/scripts/pusht/imf_attnres_remote_gpu0_queue.sh b/scripts/pusht/imf_attnres_remote_gpu0_queue.sh new file mode 100755 index 0000000..2f2a1ae --- /dev/null +++ b/scripts/pusht/imf_attnres_remote_gpu0_queue.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +set -euo pipefail +cd /home/droid/project/diffusion_policy-smoke +export PYTHONUNBUFFERED=1 +export SWANLAB_API_KEY='PSZrBMLx1XAjDjvmhUcNz' +export LD_LIBRARY_PATH="$(printf '%s:' .venv/lib/python3.9/site-packages/nvidia/*/lib | sed 's/:$//')" +run_exp() { + local name="$1" emb="$2" layer="$3" + echo "[$(date '+%F %T')] START $name emb=$emb layer=$layer" + .venv/bin/python train.py \ + --config-dir=. \ + --config-name=image_pusht_diffusion_policy_dit_imf_attnres_full.yaml \ + training.device=cuda:0 \ + training.num_epochs=350 \ + training.resume=false \ + exp_name="$name" \ + logging.group=imf_pusht_attnres_arch_sweep \ + logging.name="$name" \ + logging.resume=false \ + logging.id=null \ + hydra.run.dir="data/outputs/$name" \ + policy.n_emb="$emb" \ + policy.n_layer="$layer" \ + > "data/run_logs/${name}.log" 2>&1 + echo "[$(date '+%F %T')] END $name" +} +run_exp imf_attnres_emb384_layer12_seed42_5880gpu0 384 12 +run_exp imf_attnres_emb256_layer12_seed42_5880gpu0 256 12 +run_exp imf_attnres_emb128_layer12_seed42_5880gpu0 128 12 diff --git a/scripts/pusht/imf_attnres_remote_gpu1_queue.sh b/scripts/pusht/imf_attnres_remote_gpu1_queue.sh new file mode 100755 index 0000000..9161fdf --- /dev/null +++ b/scripts/pusht/imf_attnres_remote_gpu1_queue.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +set -euo pipefail +cd /home/droid/project/diffusion_policy-smoke +export PYTHONUNBUFFERED=1 +export SWANLAB_API_KEY='PSZrBMLx1XAjDjvmhUcNz' +export LD_LIBRARY_PATH="$(printf '%s:' .venv/lib/python3.9/site-packages/nvidia/*/lib | sed 's/:$//')" +run_exp() { + local name="$1" emb="$2" layer="$3" + echo "[$(date '+%F %T')] START $name emb=$emb layer=$layer" + .venv/bin/python train.py \ + --config-dir=. \ + --config-name=image_pusht_diffusion_policy_dit_imf_attnres_full.yaml \ + training.device=cuda:1 \ + training.num_epochs=350 \ + training.resume=false \ + exp_name="$name" \ + logging.group=imf_pusht_attnres_arch_sweep \ + logging.name="$name" \ + logging.resume=false \ + logging.id=null \ + hydra.run.dir="data/outputs/$name" \ + policy.n_emb="$emb" \ + policy.n_layer="$layer" \ + > "data/run_logs/${name}.log" 2>&1 + echo "[$(date '+%F %T')] END $name" +} +run_exp imf_attnres_emb384_layer6_seed42_5880gpu1 384 6 +run_exp imf_attnres_emb256_layer18_seed42_5880gpu1 256 18 +run_exp imf_attnres_emb128_layer18_seed42_5880gpu1 128 18