feat: 推理时输出action

This commit is contained in:
gouhanke
2026-02-12 19:54:11 +08:00
parent 926a78eb66
commit 0b05c01024
3 changed files with 14 additions and 2 deletions

View File

@@ -243,6 +243,13 @@ def main(cfg: DictConfig):
# 转换为 numpy
action = action.cpu().numpy()
# 调试:打印当前时间步的动作(由配置控制)
if eval_cfg.get('verbose_action', False):
print(f"\n[Step {t:3d}] 预测动作: {action}")
print(f" - 动作形状: {action.shape}")
print(f" - 动作范围: [{action.min():.4f}, {action.max():.4f}]")
print(f" - 动作均值: {action.mean():.4f}, 标准差: {action.std():.4f}")
# 可选:平滑动作
if smoother:
action = smoother.smooth(action)

View File

@@ -23,9 +23,9 @@ normalization_type: "min_max" # "min_max" or "gaussian"
# ====================
# 时间步配置
# ====================
pred_horizon: 16 # 预测未来多少步动作
pred_horizon: 8 # 预测未来多少步动作
obs_horizon: 2 # 使用多少步历史观测
num_action_steps: 8 # 每次推理实际执行多少步动作(应 <= pred_horizon - obs_horizon + 1
num_action_steps: 4 # 每次推理实际执行多少步动作(应 <= pred_horizon - obs_horizon + 1
# ====================
# 相机配置

View File

@@ -26,4 +26,9 @@ use_smoothing: false
smooth_method: "ema"
smooth_alpha: 0.3
# ====================
# 调试选项
# ====================
verbose_action: true # 是否打印每个时间步的动作信息