feat(sim): save air-insert and rollout validation updates

This commit is contained in:
Logic
2026-05-05 20:52:53 +08:00
parent 73f5b6e3d9
commit acbd7c605a
11 changed files with 2555 additions and 242 deletions

200
render.py Normal file
View File

@@ -0,0 +1,200 @@
import mujoco
from mujoco import viewer
import sys
import numpy as np
import time
import threading
class MjBasicRenderer:
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def __init__(self, mj_model=None, mj_data=None):
# keyboard flag
self.render_paused = True
self.exit_flag = False
# init param
self.mj_model = mj_model
self.mj_data = mj_data
self.renderer = "viewer" # default
self.viewer = None
self._image = None
# Set up mujoco viewer
self.image_renderer = mujoco.Renderer(self.mj_model)
def __del__(self):
pass
def _init_renderer(self):
"""Initialize renderer, choose official renderer with "viewer"(joined from version 2.3.3),
another renderer with "mujoco_viewer"
"""
def key_callback(keycode):
if keycode == 32: # space
self.render_paused = not self.render_paused
elif keycode == 256: # escape
self.exit_flag = not self.exit_flag
if self.renderer == "viewer":
# This function does not block, allowing user code to continue execution.
self.viewer = viewer.launch_passive(
self.mj_model,
self.mj_data,
key_callback=key_callback,
show_left_ui=False,
show_right_ui=False,
)
self.set_renderer_config()
else:
raise ValueError("Invalid renderer for some reason.")
def render(self):
"""mujoco render"""
if self.viewer is not None and self.render_paused is True:
if self.viewer.is_running() and self.exit_flag is False:
self.viewer: viewer.Handle
self.viewer.sync()
else:
self.viewer.close()
def set_renderer_config(self):
"""Setup mujoco global config while using viewer as renderer.
It should be noted that the render thread need locked.
"""
self.viewer.cam.lookat = np.array([0.4, 0, 0.5])
self.viewer.cam.azimuth -= 0.005
with self.viewer.lock():
self.viewer.opt.flags[mujoco.mjtVisFlag.mjVIS_CONTACTPOINT] = int(
self.mj_data.time % 2
)
try:
import cv2
except ImportError:
print("Could not import cv2, please install it to enable camera viewer.")
class MjMultiRenderer(MjBasicRenderer):
# __slots__=('mj_model','mj_data','renderer','enable_camera_viewer')
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def __init__(
self,
mj_model=None,
mj_data=None,
renderer=None,
enable_camera_viewer=False,
enable_depth=False,
):
super().__init__(mj_model, mj_data)
self._depth = None
self.renderer = renderer
self._init_renderer()
self.enable_camera_viewer = enable_camera_viewer
if self.enable_camera_viewer:
self.enable_depth = enable_depth
self._init_window()
else:
self.enable_depth = False
print("No Camera View")
def __del__(self):
self.close()
def _init_renderer(self):
"""
Initialize renderer, choose official renderer with "viewer"(joined from version 2.3.3)
"""
if self.renderer == "unity":
# TODO: Support unity renderer.
raise ValueError("Unity renderer init failed for no supporting reason")
elif self.renderer == "viewer":
super()._init_renderer()
print("mujoco viewer init !")
else:
raise ValueError("renderer init failed for some reason.")
def _init_window(self, name="Camera view"):
if not self.enable_depth:
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
else:
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.namedWindow("Camera depth view", cv2.WINDOW_NORMAL)
def render(self):
"""render mujoco"""
if self.renderer == "viewer":
super().render()
elif self.renderer == "unity":
# TODO: Support unity renderer.
raise ValueError("Unity renderer not supported now.")
else:
raise ValueError("Invalid renderer for some reason.")
def render(self):
"""mujoco render"""
if self.viewer is not None and self.render_paused is True:
if self.viewer.is_running() and self.exit_flag is False:
self.viewer: viewer.Handle
self.viewer.sync()
else:
self.viewer.close()
def camera_render(self, cam=None):
if self.enable_camera_viewer:
if not self.enable_depth:
rgb, depth = self.render_from_camera(cam)
rgb = cv2.resize(rgb, (1920, 1600))
cv2.imshow("Camera view", rgb)
cv2.waitKey(1)
else:
rgb, depth = self.render_from_camera(cam)
cv2.imshow("Camera view", rgb)
cv2.imshow("Camera depth view", depth)
cv2.waitKey(1)
else:
print("camera info disable")
return
def render_from_camera(self, cam=None):
self.image_renderer.update_scene(self.mj_data, camera=cam)
if self.enable_depth is True:
self.image_renderer.enable_depth_rendering()
org = self.image_renderer.render()
depth = org[:, :]
self.image_renderer.disable_depth_rendering()
org = self.image_renderer.render()
image = org[:, :, ::-1]
else:
org = self.image_renderer.render()
image = org[:, :, ::-1]
depth = np.zeros([240, 320])
return image, depth
def close(self):
"""close the environment."""
if self.enable_camera_viewer and self.viewer.is_running() == False:
cv2.destroyAllWindows()
self.viewer.close()
# sys.exit(0)
# def get_cam_intrinsic(self, fovy=45.0, width=320, height=240):
# aspect = width * 1.0 / height
# fovx = np.degrees(2 * np.arctan(aspect * np.tan(np.radians(fovy / 2))))
# cx = 0.5 * width
# cy = 0.5 * height
# fx = cx / np.tan(fovx * np.pi / 180 * 0.5)
# fy = cy / np.tan(fovy * np.pi / 180 * 0.5)
# K = np.array([[fx, 0, cx],
# [0, fy, cy],
# [0, 0, 1]], dtype=np.float32)

View File

@@ -12,12 +12,11 @@ class TestAirInsertPolicy(PolicyBase):
LEGACY_GRASP_STRATEGY = "legacy" LEGACY_GRASP_STRATEGY = "legacy"
SOCKET_HOLD_Z = 0.85 SOCKET_HOLD_Z = 0.85
PEG_INSERT_START_OFFSET = np.array([0.105, 0.0, 0.0], dtype=np.float64) PEG_INSERT_START_OFFSET = np.array([0.105, 0.0, 0.0], dtype=np.float64)
INSERT_START_T = 650 INSERT_END_T = 580
INSERT_END_T = 730
LEFT_SOCKET_GRIPPER_CLOSED = -100 LEFT_SOCKET_GRIPPER_CLOSED = -100
RIGHT_PEG_GRIPPER_CLOSED = -100 RIGHT_PEG_GRIPPER_CLOSED = -100
SOCKET_APPROACH_Z = 1.05 SOCKET_APPROACH_Z = 1.05
EPISODE_END_T = 1000 EPISODE_END_T = 600
def __init__(self, inject_noise=False, grasp_strategy=SOCKET_OUTER_GRASP_STRATEGY): def __init__(self, inject_noise=False, grasp_strategy=SOCKET_OUTER_GRASP_STRATEGY):
super().__init__(inject_noise=inject_noise) super().__init__(inject_noise=inject_noise)
@@ -120,13 +119,7 @@ class TestAirInsertPolicy(PolicyBase):
"gripper": self.LEFT_SOCKET_GRIPPER_CLOSED, "gripper": self.LEFT_SOCKET_GRIPPER_CLOSED,
}, },
{ {
"t": 450, "t": 350,
"xyz": socket_hold_action,
"quat": left_pick_quat,
"gripper": self.LEFT_SOCKET_GRIPPER_CLOSED,
},
{
"t": 750,
"xyz": socket_hold_action, "xyz": socket_hold_action,
"quat": left_pick_quat, "quat": left_pick_quat,
"gripper": self.LEFT_SOCKET_GRIPPER_CLOSED, "gripper": self.LEFT_SOCKET_GRIPPER_CLOSED,
@@ -165,19 +158,13 @@ class TestAirInsertPolicy(PolicyBase):
"gripper": self.RIGHT_PEG_GRIPPER_CLOSED, "gripper": self.RIGHT_PEG_GRIPPER_CLOSED,
}, },
{ {
"t": 450, "t": 350,
"xyz": peg_init_xyz, "xyz": peg_init_xyz,
"quat": right_pick_quat, "quat": right_pick_quat,
"gripper": self.RIGHT_PEG_GRIPPER_CLOSED, "gripper": self.RIGHT_PEG_GRIPPER_CLOSED,
}, },
{ {
"t": 550, "t": 450,
"xyz": peg_lift_center,
"quat": right_pick_quat,
"gripper": self.RIGHT_PEG_GRIPPER_CLOSED,
},
{
"t": self.INSERT_START_T,
"xyz": peg_lift_center, "xyz": peg_lift_center,
"quat": right_pick_quat, "quat": right_pick_quat,
"gripper": self.RIGHT_PEG_GRIPPER_CLOSED, "gripper": self.RIGHT_PEG_GRIPPER_CLOSED,
@@ -188,12 +175,6 @@ class TestAirInsertPolicy(PolicyBase):
"quat": right_pick_quat, "quat": right_pick_quat,
"gripper": self.RIGHT_PEG_GRIPPER_CLOSED, "gripper": self.RIGHT_PEG_GRIPPER_CLOSED,
}, },
{
"t": 750,
"xyz": peg_insert_end_center,
"quat": right_pick_quat,
"gripper": self.RIGHT_PEG_GRIPPER_CLOSED,
},
{ {
"t": self.EPISODE_END_T, "t": self.EPISODE_END_T,
"xyz": peg_insert_end_center, "xyz": peg_insert_end_center,

File diff suppressed because it is too large Load Diff

View File

@@ -702,10 +702,28 @@ def _run_training(cfg: DictConfig):
from roboimi.demos.vla_scripts import eval_vla from roboimi.demos.vla_scripts import eval_vla
rollout_cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=False)) rollout_cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=False))
rollout_num_episodes = int(cfg.train.get('rollout_num_episodes', 1))
rollout_device = str(cfg.train.get('rollout_device', cfg.train.device))
configured_rollout_workers = cfg.train.get('rollout_num_workers', None)
if configured_rollout_workers is None:
if rollout_device.startswith('cuda'):
rollout_num_workers = min(max(rollout_num_episodes, 1), 8)
else:
rollout_num_workers = 1
else:
rollout_num_workers = int(configured_rollout_workers)
rollout_cfg.eval.ckpt_path = str(checkpoint_path) rollout_cfg.eval.ckpt_path = str(checkpoint_path)
rollout_cfg.eval.num_episodes = int(cfg.train.get('rollout_num_episodes', 1)) rollout_cfg.eval.num_episodes = rollout_num_episodes
rollout_cfg.eval.num_workers = rollout_num_workers
rollout_cfg.eval.headless = True rollout_cfg.eval.headless = True
rollout_cfg.eval.device = 'cpu' rollout_cfg.eval.device = rollout_device
rollout_cfg.eval.cuda_devices = cfg.train.get('rollout_cuda_devices', None)
rollout_cfg.eval.response_timeout_s = float(
cfg.train.get('rollout_response_timeout_s', 300.0)
)
rollout_cfg.eval.server_startup_timeout_s = float(
cfg.train.get('rollout_server_startup_timeout_s', 300.0)
)
rollout_cfg.eval.verbose_action = False rollout_cfg.eval.verbose_action = False
rollout_cfg.eval.record_video = False rollout_cfg.eval.record_video = False
rollout_cfg.eval.save_trajectory_image = True rollout_cfg.eval.save_trajectory_image = True
@@ -716,9 +734,11 @@ def _run_training(cfg: DictConfig):
) )
log.info( log.info(
"🎯 开始 checkpoint rollout 验证: %s (episodes=%s, headless=True)", "🎯 开始 checkpoint rollout 验证: %s (episodes=%s, device=%s, workers=%s, headless=True)",
checkpoint_path, checkpoint_path,
rollout_cfg.eval.num_episodes, rollout_cfg.eval.num_episodes,
rollout_cfg.eval.device,
rollout_cfg.eval.num_workers,
) )
return eval_vla._run_eval(rollout_cfg) return eval_vla._run_eval(rollout_cfg)

View File

@@ -91,7 +91,6 @@ class DualDianaMed(MujocoEnv):
def step(self,action): def step(self,action):
self.compute_qpos = action #for observation ! self.compute_qpos = action #for observation !
self.obs = self._get_obs()
if self.interpolator_left is not None and self.interpolator_right is not None: if self.interpolator_left is not None and self.interpolator_right is not None:
self.interpolator_left.updateInput(action[:7], control_cycle=self.base_time) self.interpolator_left.updateInput(action[:7], control_cycle=self.base_time)
self.interpolator_right.updateInput(action[7:-2], control_cycle=self.base_time) self.interpolator_right.updateInput(action[7:-2], control_cycle=self.base_time)
@@ -104,6 +103,7 @@ class DualDianaMed(MujocoEnv):
super().step(action) super().step(action)
self.base_time = time.time() - ctrl_cur_time self.base_time = time.time() - ctrl_cur_time
self.obs = self._get_obs()
def preStep(self, action): def preStep(self, action):

View File

@@ -29,6 +29,11 @@ train:
rollout_val_freq_epochs: 50 # 每隔多少个 epoch 执行一次 rollout 验证 rollout_val_freq_epochs: 50 # 每隔多少个 epoch 执行一次 rollout 验证
rollout_validate_on_checkpoint: false # 是否在保存 checkpoint 后立即运行 rollout 验证 rollout_validate_on_checkpoint: false # 是否在保存 checkpoint 后立即运行 rollout 验证
rollout_num_episodes: 3 # rollout 验证的回合数 rollout_num_episodes: 3 # rollout 验证的回合数
rollout_device: ${train.device} # rollout 使用的设备;默认跟随训练设备
rollout_num_workers: null # rollout 并行 worker 数null 时 CUDA 自动推断CPU 保持 1
rollout_cuda_devices: null # rollout CUDA 并行使用的逻辑 device 列表null 时默认 [0]
rollout_response_timeout_s: 300.0 # rollout worker 等待 inference server 响应的超时时间
rollout_server_startup_timeout_s: 300.0 # rollout 等待 inference server 就绪的超时时间
# 学习率调度器(带预热) # 学习率调度器(带预热)
warmup_steps: 2000 # 预热步数Transformer建议更长 warmup_steps: 2000 # 预热步数Transformer建议更长

View File

@@ -2,6 +2,10 @@
# 评估配置 # 评估配置
ckpt_path: "checkpoints/vla_model_best.pt" # 模型检查点路径 ckpt_path: "checkpoints/vla_model_best.pt" # 模型检查点路径
num_episodes: 3 # 评估回合数 num_episodes: 3 # 评估回合数
num_workers: 1 # 并行 worker 数1 表示保持单进程评估
cuda_devices: null # CUDA 并行评估时使用的逻辑设备列表null 表示默认 [0]
response_timeout_s: 300.0 # worker 等待 inference server 响应的超时时间(秒)
server_startup_timeout_s: 300.0 # parent 等待 inference server 就绪的超时时间(秒)
max_timesteps: 700 # 每回合最大时间步 max_timesteps: 700 # 每回合最大时间步
device: ${train.device} # 与训练保持一致 device: ${train.device} # 与训练保持一致
task_name: "sim_transfer" # 环境任务名称 task_name: "sim_transfer" # 环境任务名称

View File

@@ -1,5 +1,11 @@
import unittest import unittest
from unittest import mock
import numpy as np
import torch
from omegaconf import OmegaConf
from roboimi.demos.vla_scripts import eval_vla
from roboimi.vla.eval_utils import execute_policy_action from roboimi.vla.eval_utils import execute_policy_action
@@ -14,6 +20,48 @@ class _FakeEnv:
self.calls.append(("step_jnt", action)) self.calls.append(("step_jnt", action))
class _FakeQueue:
def __init__(self, initial_items=None):
self.items = list(initial_items or [])
self.put_calls = []
def put(self, item):
self.put_calls.append(item)
self.items.append(item)
def get(self, timeout=None):
del timeout
if not self.items:
raise AssertionError("queue unexpectedly empty")
return self.items.pop(0)
def _make_parallel_cfg(**eval_overrides):
eval_cfg = {
"ckpt_path": "checkpoints/vla_model_best.pt",
"num_episodes": 5,
"num_workers": 2,
"max_timesteps": 1,
"device": "cpu",
"task_name": "sim_transfer",
"camera_names": ["front"],
"use_smoothing": False,
"smooth_alpha": 0.3,
"verbose_action": False,
"headless": True,
"artifact_dir": None,
"save_artifacts": False,
"save_summary_json": False,
"save_timing": False,
"save_trajectory": False,
"save_trajectory_npz": False,
"record_video": False,
"save_trajectory_image": False,
}
eval_cfg.update(eval_overrides)
return OmegaConf.create({"agent": {}, "eval": eval_cfg})
class EvalVLAExecutionTest(unittest.TestCase): class EvalVLAExecutionTest(unittest.TestCase):
def test_execute_policy_action_uses_ee_step(self): def test_execute_policy_action_uses_ee_step(self):
env = _FakeEnv() env = _FakeEnv()
@@ -23,6 +71,662 @@ class EvalVLAExecutionTest(unittest.TestCase):
self.assertEqual(env.calls, [("step", action)]) self.assertEqual(env.calls, [("step", action)])
def test_split_episode_indices_balances_workers(self):
self.assertEqual(
eval_vla._split_episode_indices(num_episodes=10, num_workers=3),
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]],
)
def test_normalize_num_workers_caps_worker_count_to_episode_count(self):
self.assertEqual(eval_vla._normalize_num_workers(num_workers=5, num_episodes=2), 2)
def test_plan_episode_box_poses_uses_global_episode_order(self):
planned_poses = [
np.array([0.1, 0.2, 0.3], dtype=np.float32),
np.array([1.1, 1.2, 1.3], dtype=np.float32),
np.array([2.1, 2.2, 2.3], dtype=np.float32),
]
sampler = mock.Mock(side_effect=planned_poses)
result = eval_vla._plan_episode_box_poses(num_episodes=3, sampler=sampler)
self.assertEqual(sampler.call_count, 3)
self.assertEqual(len(result), 3)
for expected, actual in zip(planned_poses, result):
np.testing.assert_array_equal(actual, expected)
def test_resolve_policy_camera_names_matches_vlaagent_fallback_sorting(self):
cfg = OmegaConf.create(
{
"agent": {
"_target_": "roboimi.vla.agent.VLAAgent",
},
"eval": {
"camera_names": ["r_vis", "top", "front"],
},
}
)
self.assertEqual(
eval_vla._resolve_policy_camera_names(cfg),
["front", "r_vis", "top"],
)
def test_resolve_policy_camera_names_matches_gr00t_fallback_input_order(self):
cfg = OmegaConf.create(
{
"agent": {
"_target_": "roboimi.vla.agent_gr00t_dit.VLAAgentGr00tDiT",
},
"eval": {
"camera_names": ["r_vis", "top", "front"],
},
}
)
self.assertEqual(
eval_vla._resolve_policy_camera_names(cfg),
["r_vis", "top", "front"],
)
def test_build_episode_plans_without_box_poses_keeps_serial_sampling_lazy(self):
plans = eval_vla._build_episode_plans(num_episodes=3)
self.assertEqual(
plans,
[
{"episode_index": 0},
{"episode_index": 1},
{"episode_index": 2},
],
)
def test_prepare_local_policy_batch_pads_latest_observation_to_obs_horizon(self):
queues = eval_vla._new_local_policy_queues(obs_horizon=3)
observation = {
"qpos": torch.tensor([1.0, 2.0], dtype=torch.float32),
"images": {
"front": torch.tensor([[[1.0]]], dtype=torch.float32),
},
}
eval_vla._populate_local_policy_queues(queues, observation)
batch = eval_vla._prepare_local_policy_batch(
queues,
obs_horizon=3,
camera_names=["front"],
)
self.assertEqual(tuple(batch["qpos"].shape), (1, 3, 2))
self.assertEqual(tuple(batch["images"]["front"].shape), (1, 3, 1, 1, 1))
np.testing.assert_array_equal(
batch["qpos"][0].cpu().numpy(),
np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]], dtype=np.float32),
)
np.testing.assert_array_equal(
batch["images"]["front"][0].cpu().numpy(),
np.array([[[[1.0]]], [[[1.0]]], [[[1.0]]]], dtype=np.float32),
)
def test_enqueue_predicted_actions_uses_executable_slice(self):
queues = eval_vla._new_local_policy_queues(obs_horizon=2)
predicted_actions = torch.tensor(
[[[10.0], [20.0], [30.0], [40.0]]],
dtype=torch.float32,
)
eval_vla._enqueue_predicted_actions(
queues,
predicted_actions=predicted_actions,
obs_horizon=2,
num_action_steps=2,
)
self.assertEqual(len(queues["action"]), 2)
np.testing.assert_array_equal(queues["action"].popleft().numpy(), np.array([20.0], dtype=np.float32))
np.testing.assert_array_equal(queues["action"].popleft().numpy(), np.array([30.0], dtype=np.float32))
def test_remote_policy_runner_only_requests_server_inference_when_local_action_queue_is_empty(self):
request_queue = _FakeQueue()
response_queue = _FakeQueue(
[
{
"type": "predict_chunk_result",
"actions": np.asarray([[[10.0], [20.0], [30.0]]], dtype=np.float32),
}
]
)
runner = eval_vla._RemotePolicyRunner(
worker_index=3,
server_index=1,
request_queue=request_queue,
response_queue=response_queue,
camera_names=["front"],
obs_horizon=2,
num_action_steps=2,
)
first_observation = {
"qpos": torch.tensor([1.0, 2.0], dtype=torch.float32),
"images": {"front": torch.tensor([[[1.0]]], dtype=torch.float32)},
}
second_observation = {
"qpos": torch.tensor([3.0, 4.0], dtype=torch.float32),
"images": {"front": torch.tensor([[[2.0]]], dtype=torch.float32)},
}
first_action, first_forward = runner.select_action(
first_observation,
episode_index=7,
timestep=0,
)
second_action, second_forward = runner.select_action(
second_observation,
episode_index=7,
timestep=1,
)
self.assertTrue(first_forward)
self.assertFalse(second_forward)
self.assertEqual(len(request_queue.put_calls), 1)
self.assertEqual(request_queue.put_calls[0]["type"], "predict_chunk")
self.assertEqual(request_queue.put_calls[0]["worker_index"], 3)
self.assertEqual(request_queue.put_calls[0]["server_index"], 1)
np.testing.assert_array_equal(first_action.numpy(), np.array([20.0], dtype=np.float32))
np.testing.assert_array_equal(second_action.numpy(), np.array([30.0], dtype=np.float32))
def test_merge_worker_summaries_sorts_episodes_and_recomputes_aggregates(self):
worker_summaries = [
{
"avg_inference_fps": 999.0,
"avg_control_fps": 999.0,
"avg_obs_read_time_ms": 999.0,
"avg_total_time_ms": 999.0,
"timing_summary": {"count": 999, "model_forward_count": 999},
"episodes": [
{
"episode_index": 2,
"episode_reward": 9.0,
"episode_max_reward": 4.0,
"inference_fps": 30.0,
"control_fps": 15.0,
}
],
"_merge_state": {
"obs_read_time_ms": [9.0],
"preprocess_time_ms": [1.0],
"inference_time_ms": [3.0],
"env_step_time_ms": [4.0],
"total_time_ms": [10.0],
"model_forward_flags": [False],
},
},
{
"avg_inference_fps": 888.0,
"avg_control_fps": 888.0,
"avg_obs_read_time_ms": 888.0,
"avg_total_time_ms": 888.0,
"timing_summary": {"count": 888, "model_forward_count": 888},
"episodes": [
{
"episode_index": 1,
"episode_reward": 6.0,
"episode_max_reward": 3.0,
"inference_fps": 20.0,
"control_fps": 10.0,
},
{
"episode_index": 0,
"episode_reward": 5.0,
"episode_max_reward": 2.0,
"inference_fps": 10.0,
"control_fps": 5.0,
},
],
"_merge_state": {
"obs_read_time_ms": [1.0, 2.0, 12.0],
"preprocess_time_ms": [2.0, 3.0, 4.0],
"inference_time_ms": [4.0, 5.0, 6.0],
"env_step_time_ms": [6.0, 7.0, 8.0],
"total_time_ms": [8.0, 9.0, 20.0],
"model_forward_flags": [True, False, True],
},
},
]
artifact_paths = {
"output_dir": "/tmp/merged",
"summary_json": "/tmp/merged/rollout_summary.json",
"timing_json": "/tmp/merged/timing.json",
"trajectory_npz": None,
"video_mp4": None,
"video_camera_name": None,
}
merged = eval_vla._merge_worker_summaries(worker_summaries, artifact_paths)
self.assertEqual([episode["episode_index"] for episode in merged["episodes"]], [0, 1, 2])
self.assertEqual(merged["episode_rewards"], [5.0, 6.0, 9.0])
self.assertEqual(merged["episode_max_rewards"], [2.0, 3.0, 4.0])
self.assertAlmostEqual(merged["avg_reward"], 20.0 / 3.0)
self.assertAlmostEqual(merged["avg_max_reward"], 3.0)
self.assertAlmostEqual(merged["avg_inference_fps"], 20.0)
self.assertAlmostEqual(merged["avg_control_fps"], 10.0)
self.assertAlmostEqual(merged["avg_obs_read_time_ms"], 6.0)
self.assertAlmostEqual(merged["avg_total_time_ms"], 47.0 / 4.0)
self.assertEqual(merged["timing_summary"]["count"], 4)
self.assertEqual(merged["timing_summary"]["model_forward_count"], 2)
self.assertEqual(merged["artifact_dir"], "/tmp/merged")
self.assertEqual(merged["artifacts"], artifact_paths)
def test_build_cuda_server_payloads_uses_round_robin_worker_assignment(self):
cfg = _make_parallel_cfg(num_episodes=4, num_workers=4, device="cuda", cuda_devices=[0, 1])
artifact_paths = {"output_dir": None}
with mock.patch.object(
eval_vla,
"sample_transfer_pose",
side_effect=[
np.array([0.1, 0.2, 0.3], dtype=np.float32),
np.array([0.4, 0.5, 0.6], dtype=np.float32),
np.array([0.7, 0.8, 0.9], dtype=np.float32),
np.array([1.0, 1.1, 1.2], dtype=np.float32),
],
):
worker_payloads, _ = eval_vla._build_parallel_worker_payloads(cfg, artifact_paths)
server_payloads, assigned_workers = eval_vla._build_cuda_server_payloads(
cfg,
worker_payloads=worker_payloads,
cuda_devices=[0, 1],
)
self.assertEqual([payload["device_index"] for payload in server_payloads], [0, 1])
self.assertEqual([payload["worker_index"] for payload in assigned_workers], [0, 1, 2, 3])
self.assertEqual([payload["server_index"] for payload in assigned_workers], [0, 1, 0, 1])
self.assertEqual(server_payloads[0]["worker_indices"], [0, 2])
self.assertEqual(server_payloads[1]["worker_indices"], [1, 3])
def test_run_eval_parallel_dispatches_episode_splits_and_box_poses(self):
cfg = _make_parallel_cfg(num_episodes=5, num_workers=2, artifact_dir="/tmp/parallel-root")
planned_poses = [
np.array([float(index), float(index) + 0.1, float(index) + 0.2], dtype=np.float32)
for index in range(5)
]
observed_payloads = []
def fake_run_spawn_jobs(payloads, max_workers, worker_fn):
del worker_fn
self.assertEqual(max_workers, 2)
observed_payloads.extend(payloads)
return [
{
"episodes": [
{
"episode_index": 4,
"episode_reward": 5.0,
"episode_max_reward": 5.0,
"inference_fps": 50.0,
"control_fps": 25.0,
},
{
"episode_index": 3,
"episode_reward": 4.0,
"episode_max_reward": 4.0,
"inference_fps": 40.0,
"control_fps": 20.0,
},
],
"_merge_state": {
"obs_read_time_ms": [4.0, 5.0],
"preprocess_time_ms": [1.0, 1.0],
"inference_time_ms": [2.0, 2.0],
"env_step_time_ms": [3.0, 3.0],
"total_time_ms": [4.0, 5.0],
"model_forward_flags": [True, True],
},
},
{
"episodes": [
{
"episode_index": 2,
"episode_reward": 3.0,
"episode_max_reward": 3.0,
"inference_fps": 30.0,
"control_fps": 15.0,
},
{
"episode_index": 1,
"episode_reward": 2.0,
"episode_max_reward": 2.0,
"inference_fps": 20.0,
"control_fps": 10.0,
},
{
"episode_index": 0,
"episode_reward": 1.0,
"episode_max_reward": 1.0,
"inference_fps": 10.0,
"control_fps": 5.0,
},
],
"_merge_state": {
"obs_read_time_ms": [1.0, 2.0, 3.0],
"preprocess_time_ms": [1.0, 1.0, 1.0],
"inference_time_ms": [2.0, 2.0, 2.0],
"env_step_time_ms": [3.0, 3.0, 3.0],
"total_time_ms": [1.0, 2.0, 3.0],
"model_forward_flags": [False, True, False],
},
},
]
with mock.patch.object(
eval_vla,
"sample_transfer_pose",
side_effect=planned_poses,
), mock.patch.object(
eval_vla,
"_run_spawn_jobs",
side_effect=fake_run_spawn_jobs,
):
summary = eval_vla._run_eval_parallel(cfg)
self.assertEqual(len(observed_payloads), 2)
self.assertEqual(
[[plan["episode_index"] for plan in payload["episode_plans"]] for payload in observed_payloads],
[[0, 1, 2], [3, 4]],
)
for payload in observed_payloads:
for plan in payload["episode_plans"]:
np.testing.assert_array_equal(
np.asarray(plan["box_pos"], dtype=np.float32),
planned_poses[plan["episode_index"]],
)
self.assertEqual([episode["episode_index"] for episode in summary["episodes"]], [0, 1, 2, 3, 4])
self.assertEqual(summary["episode_rewards"], [1.0, 2.0, 3.0, 4.0, 5.0])
self.assertEqual(summary["num_episodes"], 5)
def test_run_eval_parallel_allows_trajectory_images_and_keeps_worker_artifact_paths(self):
cfg = _make_parallel_cfg(
num_episodes=2,
num_workers=2,
artifact_dir="/tmp/parallel-images",
save_summary_json=True,
save_trajectory_image=True,
)
observed_payloads = []
def fake_run_spawn_jobs(payloads, max_workers, worker_fn):
del worker_fn
self.assertEqual(max_workers, 2)
observed_payloads.extend(payloads)
return [
{
"episodes": [
{
"episode_index": 0,
"episode_reward": 1.0,
"episode_max_reward": 1.0,
"inference_fps": 10.0,
"control_fps": 5.0,
"artifact_paths": {
"trajectory_image": f"{payloads[0]['artifact_dir']}/rollout_front_ep01_trajectory.png",
},
},
],
"_merge_state": {
"obs_read_time_ms": [1.0],
"preprocess_time_ms": [1.0],
"inference_time_ms": [1.0],
"env_step_time_ms": [1.0],
"total_time_ms": [1.0],
"model_forward_flags": [True],
},
},
{
"episodes": [
{
"episode_index": 1,
"episode_reward": 2.0,
"episode_max_reward": 2.0,
"inference_fps": 20.0,
"control_fps": 10.0,
"artifact_paths": {
"trajectory_image": f"{payloads[1]['artifact_dir']}/rollout_front_ep02_trajectory.png",
},
},
],
"_merge_state": {
"obs_read_time_ms": [2.0],
"preprocess_time_ms": [2.0],
"inference_time_ms": [2.0],
"env_step_time_ms": [2.0],
"total_time_ms": [2.0],
"model_forward_flags": [False],
},
},
]
with mock.patch.object(
eval_vla,
"sample_transfer_pose",
side_effect=[
np.array([0.1, 0.2, 0.3], dtype=np.float32),
np.array([0.4, 0.5, 0.6], dtype=np.float32),
],
), mock.patch.object(
eval_vla,
"_run_spawn_jobs",
side_effect=fake_run_spawn_jobs,
):
summary = eval_vla._run_eval_parallel(cfg)
self.assertEqual(len(observed_payloads), 2)
self.assertTrue(observed_payloads[0]["artifact_dir"].endswith("workers/worker_00"))
self.assertTrue(observed_payloads[1]["artifact_dir"].endswith("workers/worker_01"))
self.assertTrue(
summary["episodes"][0]["artifact_paths"]["trajectory_image"].endswith(
"workers/worker_00/rollout_front_ep01_trajectory.png"
)
)
self.assertTrue(
summary["episodes"][1]["artifact_paths"]["trajectory_image"].endswith(
"workers/worker_01/rollout_front_ep02_trajectory.png"
)
)
def test_run_eval_parallel_surfaces_worker_failures(self):
cfg = _make_parallel_cfg(num_episodes=2, num_workers=2)
with mock.patch.object(
eval_vla,
"sample_transfer_pose",
side_effect=[
np.array([0.1, 0.2, 0.3], dtype=np.float32),
np.array([0.4, 0.5, 0.6], dtype=np.float32),
],
), mock.patch.object(
eval_vla,
"_run_spawn_jobs",
side_effect=RuntimeError("boom"),
):
with self.assertRaisesRegex(RuntimeError, "Parallel rollout worker failed"):
eval_vla._run_eval_parallel(cfg)
def test_run_eval_parallel_cuda_builds_server_payloads_and_merges_worker_results(self):
cfg = _make_parallel_cfg(
num_episodes=4,
num_workers=4,
device="cuda",
cuda_devices=[0],
artifact_dir="/tmp/cuda-root",
)
observed_server_payloads = []
observed_worker_payloads = []
def fake_run_cuda_parallel_processes(server_payloads, worker_payloads):
observed_server_payloads.extend(server_payloads)
observed_worker_payloads.extend(worker_payloads)
return [
{
"episodes": [
{
"episode_index": 2,
"episode_reward": 3.0,
"episode_max_reward": 3.0,
"inference_fps": 30.0,
"control_fps": 15.0,
},
{
"episode_index": 0,
"episode_reward": 1.0,
"episode_max_reward": 1.0,
"inference_fps": 10.0,
"control_fps": 5.0,
},
],
"_merge_state": {
"obs_read_time_ms": [1.0, 2.0],
"preprocess_time_ms": [1.0, 1.0],
"inference_time_ms": [2.0, 2.0],
"env_step_time_ms": [3.0, 3.0],
"total_time_ms": [4.0, 4.0],
"model_forward_flags": [True, False],
},
},
{
"episodes": [
{
"episode_index": 3,
"episode_reward": 4.0,
"episode_max_reward": 4.0,
"inference_fps": 40.0,
"control_fps": 20.0,
},
{
"episode_index": 1,
"episode_reward": 2.0,
"episode_max_reward": 2.0,
"inference_fps": 20.0,
"control_fps": 10.0,
},
],
"_merge_state": {
"obs_read_time_ms": [3.0, 4.0],
"preprocess_time_ms": [1.0, 1.0],
"inference_time_ms": [2.0, 2.0],
"env_step_time_ms": [3.0, 3.0],
"total_time_ms": [4.0, 4.0],
"model_forward_flags": [True, True],
},
},
]
with mock.patch.object(
eval_vla,
"sample_transfer_pose",
side_effect=[
np.array([0.1, 0.2, 0.3], dtype=np.float32),
np.array([0.4, 0.5, 0.6], dtype=np.float32),
np.array([0.7, 0.8, 0.9], dtype=np.float32),
np.array([1.0, 1.1, 1.2], dtype=np.float32),
],
), mock.patch.object(
eval_vla,
"_run_cuda_parallel_processes",
side_effect=fake_run_cuda_parallel_processes,
create=True,
):
summary = eval_vla._run_eval_parallel_cuda(cfg)
self.assertEqual(len(observed_server_payloads), 1)
self.assertEqual(observed_server_payloads[0]["device_index"], 0)
self.assertEqual(len(observed_worker_payloads), 4)
self.assertTrue(all(payload["server_index"] == 0 for payload in observed_worker_payloads))
self.assertEqual([episode["episode_index"] for episode in summary["episodes"]], [0, 1, 2, 3])
self.assertEqual(summary["episode_rewards"], [1.0, 2.0, 3.0, 4.0])
self.assertEqual(summary["num_episodes"], 4)
def test_run_eval_parallel_cuda_surfaces_server_failures(self):
cfg = _make_parallel_cfg(num_episodes=2, num_workers=2, device="cuda", cuda_devices=[0])
with mock.patch.object(
eval_vla,
"sample_transfer_pose",
side_effect=[
np.array([0.1, 0.2, 0.3], dtype=np.float32),
np.array([0.4, 0.5, 0.6], dtype=np.float32),
],
), mock.patch.object(
eval_vla,
"_run_cuda_parallel_processes",
side_effect=RuntimeError("server boom"),
create=True,
):
with self.assertRaisesRegex(RuntimeError, "Parallel CUDA rollout failed"):
eval_vla._run_eval_parallel_cuda(cfg)
def test_run_spawn_jobs_supports_real_spawn_with_actual_eval_worker_entry(self):
payloads = [
{"_spawn_probe": True, "probe_value": 1, "worker_index": 0},
{"_spawn_probe": True, "probe_value": 2, "worker_index": 1},
]
results = eval_vla._run_spawn_jobs(
payloads=payloads,
max_workers=2,
worker_fn=eval_vla._run_eval_worker_entry,
)
self.assertEqual(sorted(result["probe_value"] for result in results), [1, 2])
self.assertEqual(sorted(result["worker_index"] for result in results), [0, 1])
def test_cuda_server_and_env_worker_entrypoints_support_real_spawn_probe(self):
ctx = eval_vla.multiprocessing.get_context("spawn")
request_queue = ctx.Queue()
response_queue = ctx.Queue()
result_queue = ctx.Queue()
server = ctx.Process(
target=eval_vla._inference_server_main,
args=(
{
"_spawn_probe": True,
"server_index": 0,
"request_queue": request_queue,
"response_queues": [response_queue],
},
),
)
worker = ctx.Process(
target=eval_vla._env_worker_main,
args=(
{
"_spawn_probe": True,
"worker_index": 0,
"server_index": 0,
"request_queue": request_queue,
"response_queue": response_queue,
"result_queue": result_queue,
},
),
)
server.start()
worker.start()
result = result_queue.get(timeout=10.0)
worker.join(timeout=10.0)
request_queue.put({"type": "shutdown_server"})
server.join(timeout=10.0)
self.assertEqual(result["kind"], "worker_result")
self.assertEqual(result["summary"]["probe_worker_index"], 0)
self.assertEqual(result["summary"]["probe_server_index"], 0)
self.assertEqual(result["summary"]["probe_actions"], [[[11.0], [22.0], [33.0]]])
self.assertEqual(worker.exitcode, 0)
self.assertEqual(server.exitcode, 0)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()

View File

@@ -216,6 +216,31 @@ class EvalVLAHeadlessTest(unittest.TestCase):
self.assertIsNotNone(env.top) self.assertIsNotNone(env.top)
self.assertIsNotNone(env.front) self.assertIsNotNone(env.front)
def test_dual_diana_step_refreshes_obs_after_physics_step(self):
env = DualDianaMed.__new__(DualDianaMed)
env.compute_qpos = np.zeros(16)
env.interpolator_left = None
env.interpolator_right = None
env.control_timestep = 0.001
env.model_timestep = 0.001
env.base_time = 0.0
events = []
def fake_get_obs():
events.append("obs")
return {"images": {}, "qpos": np.zeros(16, dtype=np.float32)}
env._get_obs = fake_get_obs
with mock.patch(
"roboimi.envs.double_base.MujocoEnv.step",
autospec=True,
side_effect=lambda _self, _action: events.append("physics"),
):
env.step(np.zeros(16))
self.assertEqual(events, ["physics", "obs"])
def test_eval_main_headless_skips_render_and_still_executes_policy(self): def test_eval_main_headless_skips_render_and_still_executes_policy(self):
fake_env = _FakeEnv() fake_env = _FakeEnv()
fake_agent = _FakeAgent() fake_agent = _FakeAgent()
@@ -323,6 +348,193 @@ class EvalVLAHeadlessTest(unittest.TestCase):
self.assertAlmostEqual(summary["avg_reward"], 3.75) self.assertAlmostEqual(summary["avg_reward"], 3.75)
self.assertEqual(summary["num_episodes"], 2) self.assertEqual(summary["num_episodes"], 2)
def test_eval_config_exposes_num_workers_default(self):
eval_cfg = OmegaConf.load(Path("roboimi/vla/conf/eval/eval.yaml"))
self.assertIn("num_workers", eval_cfg)
self.assertEqual(eval_cfg.num_workers, 1)
def test_eval_config_exposes_cuda_devices_default(self):
eval_cfg = OmegaConf.load(Path("roboimi/vla/conf/eval/eval.yaml"))
self.assertIn("cuda_devices", eval_cfg)
self.assertIsNone(eval_cfg.cuda_devices)
def test_eval_config_exposes_parallel_timeout_defaults(self):
eval_cfg = OmegaConf.load(Path("roboimi/vla/conf/eval/eval.yaml"))
self.assertIn("response_timeout_s", eval_cfg)
self.assertIn("server_startup_timeout_s", eval_cfg)
self.assertEqual(eval_cfg.response_timeout_s, 300.0)
self.assertEqual(eval_cfg.server_startup_timeout_s, 300.0)
def test_run_eval_uses_serial_path_when_num_workers_is_one(self):
cfg = OmegaConf.create(
{
"eval": {
"num_workers": 1,
"num_episodes": 3,
}
}
)
with mock.patch.object(
eval_vla,
"_run_eval_serial",
return_value={"mode": "serial"},
) as run_eval_serial, mock.patch.object(
eval_vla,
"_run_eval_parallel",
) as run_eval_parallel:
result = eval_vla._run_eval(cfg)
self.assertEqual(result, {"mode": "serial"})
run_eval_serial.assert_called_once_with(cfg)
run_eval_parallel.assert_not_called()
def test_run_eval_uses_serial_path_when_requested_workers_collapse_to_one(self):
cfg = OmegaConf.create(
{
"eval": {
"num_workers": 8,
"num_episodes": 1,
}
}
)
with mock.patch.object(
eval_vla,
"_run_eval_serial",
return_value={"mode": "serial"},
) as run_eval_serial, mock.patch.object(
eval_vla,
"_run_eval_parallel",
) as run_eval_parallel:
result = eval_vla._run_eval(cfg)
self.assertEqual(result, {"mode": "serial"})
run_eval_serial.assert_called_once_with(cfg)
run_eval_parallel.assert_not_called()
def test_run_eval_parallel_requires_headless_true(self):
cfg = OmegaConf.create(
{
"agent": {},
"eval": {
"ckpt_path": "checkpoints/vla_model_best.pt",
"num_episodes": 2,
"num_workers": 2,
"max_timesteps": 1,
"device": "cpu",
"task_name": "sim_transfer",
"camera_names": ["front"],
"use_smoothing": False,
"smooth_alpha": 0.3,
"verbose_action": False,
"headless": False,
},
}
)
with self.assertRaisesRegex(ValueError, "headless=true"):
eval_vla._run_eval_parallel(cfg)
def test_run_eval_parallel_dispatches_to_cpu_workers_when_device_is_cpu(self):
cfg = OmegaConf.create(
{
"agent": {},
"eval": {
"ckpt_path": "checkpoints/vla_model_best.pt",
"num_episodes": 2,
"num_workers": 2,
"max_timesteps": 1,
"device": "cpu",
"task_name": "sim_transfer",
"camera_names": ["front"],
"use_smoothing": False,
"smooth_alpha": 0.3,
"verbose_action": False,
"headless": True,
"cuda_devices": None,
},
}
)
with mock.patch.object(
eval_vla,
"_run_eval_parallel_cpu",
return_value={"mode": "cpu"},
create=True,
) as run_cpu_parallel, mock.patch.object(
eval_vla,
"_run_eval_parallel_cuda",
create=True,
) as run_cuda_parallel:
result = eval_vla._run_eval_parallel(cfg)
self.assertEqual(result, {"mode": "cpu"})
run_cpu_parallel.assert_called_once_with(cfg)
run_cuda_parallel.assert_not_called()
def test_run_eval_parallel_dispatches_to_cuda_servers_when_device_is_cuda(self):
cfg = OmegaConf.create(
{
"agent": {},
"eval": {
"ckpt_path": "checkpoints/vla_model_best.pt",
"num_episodes": 2,
"num_workers": 2,
"max_timesteps": 1,
"device": "cuda",
"task_name": "sim_transfer",
"camera_names": ["front"],
"use_smoothing": False,
"smooth_alpha": 0.3,
"verbose_action": False,
"headless": True,
"cuda_devices": [0],
},
}
)
with mock.patch.object(
eval_vla,
"_run_eval_parallel_cpu",
create=True,
) as run_cpu_parallel, mock.patch.object(
eval_vla,
"_run_eval_parallel_cuda",
return_value={"mode": "cuda"},
create=True,
) as run_cuda_parallel:
result = eval_vla._run_eval_parallel(cfg)
self.assertEqual(result, {"mode": "cuda"})
run_cpu_parallel.assert_not_called()
run_cuda_parallel.assert_called_once_with(cfg)
def test_resolve_cuda_devices_defaults_to_single_logical_gpu(self):
cfg = OmegaConf.create(
{
"device": "cuda",
"cuda_devices": None,
}
)
self.assertEqual(eval_vla._resolve_cuda_devices(cfg), [0])
def test_resolve_cuda_devices_rejects_empty_selection(self):
cfg = OmegaConf.create(
{
"device": "cuda",
"cuda_devices": [],
}
)
with self.assertRaisesRegex(ValueError, "cuda_devices"):
eval_vla._resolve_cuda_devices(cfg)
def test_run_eval_uses_air_insert_sampler_for_socket_peg_task(self): def test_run_eval_uses_air_insert_sampler_for_socket_peg_task(self):
self.assertTrue( self.assertTrue(
hasattr(eval_vla, "sample_air_insert_socket_peg_state"), hasattr(eval_vla, "sample_air_insert_socket_peg_state"),

View File

@@ -102,10 +102,8 @@ class EvalVLARolloutArtifactsTest(unittest.TestCase):
self.assertIn('artifact_dir', eval_cfg) self.assertIn('artifact_dir', eval_cfg)
self.assertFalse(eval_cfg.save_summary_json) self.assertFalse(eval_cfg.save_summary_json)
self.assertFalse(eval_cfg.save_trajectory_npz) self.assertFalse(eval_cfg.save_trajectory_npz)
self.assertFalse(eval_cfg.save_trajectory_image)
self.assertFalse(eval_cfg.record_video) self.assertFalse(eval_cfg.record_video)
self.assertIsNone(eval_cfg.artifact_dir) self.assertIsNone(eval_cfg.artifact_dir)
self.assertIsNone(eval_cfg.trajectory_image_camera_name)
self.assertIsNone(eval_cfg.video_camera_name) self.assertIsNone(eval_cfg.video_camera_name)
self.assertEqual(eval_cfg.video_fps, 30) self.assertEqual(eval_cfg.video_fps, 30)
@@ -135,8 +133,6 @@ class EvalVLARolloutArtifactsTest(unittest.TestCase):
'artifact_dir': tmpdir, 'artifact_dir': tmpdir,
'save_summary_json': True, 'save_summary_json': True,
'save_trajectory_npz': True, 'save_trajectory_npz': True,
'save_trajectory_image': True,
'trajectory_image_camera_name': 'front',
'record_video': True, 'record_video': True,
'video_camera_name': 'front', 'video_camera_name': 'front',
'video_fps': 12, 'video_fps': 12,
@@ -180,14 +176,12 @@ class EvalVLARolloutArtifactsTest(unittest.TestCase):
trajectory_path = Path(artifacts['trajectory_npz']) trajectory_path = Path(artifacts['trajectory_npz'])
summary_path = Path(artifacts['summary_json']) summary_path = Path(artifacts['summary_json'])
video_path = Path(artifacts['video_mp4']) video_path = Path(artifacts['video_mp4'])
trajectory_image_path = Path(summary['episodes'][0]['artifact_paths']['trajectory_image'])
self.assertEqual(Path(artifacts['output_dir']), Path(tmpdir)) self.assertEqual(Path(artifacts['output_dir']), Path(tmpdir))
self.assertEqual(artifacts['video_camera_name'], 'front') self.assertEqual(artifacts['video_camera_name'], 'front')
self.assertTrue(trajectory_path.exists()) self.assertTrue(trajectory_path.exists())
self.assertTrue(summary_path.exists()) self.assertTrue(summary_path.exists())
self.assertTrue(video_path.exists()) self.assertTrue(video_path.exists())
self.assertTrue(trajectory_image_path.exists())
rollout_npz = np.load(trajectory_path) rollout_npz = np.load(trajectory_path)
np.testing.assert_array_equal(rollout_npz['episode_index'], np.array([0, 0])) np.testing.assert_array_equal(rollout_npz['episode_index'], np.array([0, 0]))
@@ -224,120 +218,267 @@ class EvalVLARolloutArtifactsTest(unittest.TestCase):
saved_summary = json.load(fh) saved_summary = json.load(fh)
self.assertEqual(saved_summary['artifacts']['trajectory_npz'], str(trajectory_path)) self.assertEqual(saved_summary['artifacts']['trajectory_npz'], str(trajectory_path))
self.assertEqual(saved_summary['artifacts']['video_mp4'], str(video_path)) self.assertEqual(saved_summary['artifacts']['video_mp4'], str(video_path))
self.assertEqual(
saved_summary['episodes'][0]['artifact_paths']['trajectory_image'],
str(trajectory_image_path),
)
self.assertEqual(saved_summary['episode_rewards'], [3.0]) self.assertEqual(saved_summary['episode_rewards'], [3.0])
self.assertAlmostEqual(summary['avg_reward'], 3.0) self.assertAlmostEqual(summary['avg_reward'], 3.0)
self.assertIn('avg_obs_read_time_ms', summary) self.assertIn('avg_obs_read_time_ms', summary)
self.assertIn('avg_env_step_time_ms', summary) self.assertIn('avg_env_step_time_ms', summary)
def test_run_eval_exports_front_trajectory_images_without_video_dependency(self): def test_run_eval_parallel_rejects_trajectory_and_video_exports(self):
actions = [ unsupported_flags = [
np.arange(16, dtype=np.float32), "record_video",
np.arange(16, dtype=np.float32) + 10.0, "save_trajectory",
np.arange(16, dtype=np.float32) + 100.0, "save_trajectory_npz",
np.arange(16, dtype=np.float32) + 110.0,
] ]
fake_agent = _FakeAgent(actions)
fake_env = _FakeEnv()
with tempfile.TemporaryDirectory() as tmpdir: for flag_name in unsupported_flags:
with self.subTest(flag_name=flag_name):
cfg = OmegaConf.create( cfg = OmegaConf.create(
{ {
'agent': {}, "agent": {},
'eval': { "eval": {
'ckpt_path': 'checkpoints/vla_model_best.pt', "ckpt_path": "checkpoints/vla_model_best.pt",
'num_episodes': 2, "num_episodes": 2,
'max_timesteps': 2, "num_workers": 2,
'device': 'cpu', "max_timesteps": 1,
'task_name': 'sim_transfer', "device": "cpu",
'camera_names': ['top', 'front'], "task_name": "sim_transfer",
'use_smoothing': True, "camera_names": ["front"],
'smooth_alpha': 0.5, "use_smoothing": False,
'verbose_action': False, "smooth_alpha": 0.3,
'headless': True, "verbose_action": False,
'artifact_dir': tmpdir, "headless": True,
'save_trajectory_image': True, "save_artifacts": True,
'record_video': False, flag_name: True,
}, },
} }
) )
trajectory_image_calls = [] with self.assertRaisesRegex(ValueError, flag_name):
eval_vla._run_eval_parallel(cfg)
def fake_save_rollout_trajectory_image( def test_run_eval_parallel_writes_merged_summary_timing_and_worker_dirs(self):
env, with tempfile.TemporaryDirectory() as tmpdir:
output_path, cfg = OmegaConf.create(
raw_actions,
camera_name,
*,
line_radius=0.004,
max_markers=1500,
):
del env, line_radius, max_markers
trajectory_image_calls.append(
{ {
'output_path': output_path, "agent": {},
'camera_name': camera_name, "eval": {
'raw_actions': [np.array(action, copy=True) for action in raw_actions], "ckpt_path": "checkpoints/vla_model_best.pt",
"num_episodes": 3,
"num_workers": 2,
"max_timesteps": 1,
"device": "cpu",
"task_name": "sim_transfer",
"camera_names": ["front"],
"use_smoothing": False,
"smooth_alpha": 0.3,
"verbose_action": False,
"headless": True,
"artifact_dir": tmpdir,
"save_summary_json": True,
"save_timing": True,
},
} }
) )
if output_path is None:
return None def fake_run_spawn_jobs(payloads, max_workers, worker_fn):
output_path = Path(output_path) del max_workers, worker_fn
output_path.parent.mkdir(parents=True, exist_ok=True) return [
output_path.write_bytes(b'fake-png') {
return str(output_path) "episodes": [
{
"episode_index": 2,
"episode_reward": 3.0,
"episode_max_reward": 3.0,
"inference_fps": 30.0,
"control_fps": 15.0,
}
],
"_merge_state": {
"obs_read_time_ms": [3.0],
"preprocess_time_ms": [1.0],
"inference_time_ms": [2.0],
"env_step_time_ms": [4.0],
"total_time_ms": [5.0],
"model_forward_flags": [True],
},
},
{
"episodes": [
{
"episode_index": 1,
"episode_reward": 2.0,
"episode_max_reward": 2.0,
"inference_fps": 20.0,
"control_fps": 10.0,
},
{
"episode_index": 0,
"episode_reward": 1.0,
"episode_max_reward": 1.0,
"inference_fps": 10.0,
"control_fps": 5.0,
},
],
"_merge_state": {
"obs_read_time_ms": [1.0, 2.0],
"preprocess_time_ms": [1.0, 1.0],
"inference_time_ms": [2.0, 2.0],
"env_step_time_ms": [4.0, 4.0],
"total_time_ms": [5.0, 5.0],
"model_forward_flags": [False, True],
},
},
]
with mock.patch.object( with mock.patch.object(
eval_vla, eval_vla,
'load_checkpoint', "sample_transfer_pose",
return_value=(fake_agent, None), side_effect=[
np.array([0.1, 0.2, 0.3], dtype=np.float32),
np.array([0.4, 0.5, 0.6], dtype=np.float32),
np.array([0.7, 0.8, 0.9], dtype=np.float32),
],
), mock.patch.object( ), mock.patch.object(
eval_vla, eval_vla,
'make_sim_env', "_run_spawn_jobs",
return_value=fake_env, side_effect=fake_run_spawn_jobs,
), mock.patch.object( ):
eval_vla, summary = eval_vla._run_eval_parallel(cfg)
'sample_transfer_pose',
return_value=np.array([0.1, 0.2, 0.3], dtype=np.float32),
), mock.patch.object(
eval_vla,
'tqdm',
side_effect=lambda iterable, **kwargs: iterable,
), mock.patch.object(
eval_vla,
'_save_rollout_trajectory_image',
side_effect=fake_save_rollout_trajectory_image,
) as save_trajectory_image_mock, mock.patch.object(
eval_vla,
'_open_video_writer',
) as open_video_writer_mock:
summary = eval_vla._run_eval(cfg)
self.assertEqual(save_trajectory_image_mock.call_count, 2) summary_path = Path(tmpdir) / "rollout_summary.json"
open_video_writer_mock.assert_not_called() timing_path = Path(tmpdir) / "timing.json"
self.assertIsNone(summary['artifacts']['video_mp4']) worker_00_dir = Path(tmpdir) / "workers" / "worker_00"
self.assertEqual(summary['artifacts']['trajectory_image_camera_name'], 'front') worker_01_dir = Path(tmpdir) / "workers" / "worker_01"
self.assertEqual(
[call['camera_name'] for call in trajectory_image_calls], self.assertTrue(summary_path.exists())
['front', 'front'], self.assertTrue(timing_path.exists())
self.assertTrue(worker_00_dir.is_dir())
self.assertTrue(worker_01_dir.is_dir())
self.assertEqual(summary["episode_rewards"], [1.0, 2.0, 3.0])
with summary_path.open("r", encoding="utf-8") as fh:
saved_summary = json.load(fh)
with timing_path.open("r", encoding="utf-8") as fh:
saved_timing = json.load(fh)
self.assertEqual(saved_summary["episode_rewards"], [1.0, 2.0, 3.0])
self.assertEqual(saved_summary["artifact_dir"], tmpdir)
self.assertEqual(saved_timing["count"], 3)
self.assertEqual(saved_timing["model_forward_count"], 2)
def test_run_eval_parallel_cuda_writes_merged_summary_timing_and_worker_dirs(self):
with tempfile.TemporaryDirectory() as tmpdir:
cfg = OmegaConf.create(
{
"agent": {},
"eval": {
"ckpt_path": "checkpoints/vla_model_best.pt",
"num_episodes": 3,
"num_workers": 2,
"cuda_devices": [0],
"max_timesteps": 1,
"device": "cuda",
"task_name": "sim_transfer",
"camera_names": ["front"],
"use_smoothing": False,
"smooth_alpha": 0.3,
"verbose_action": False,
"headless": True,
"artifact_dir": tmpdir,
"save_summary_json": True,
"save_timing": True,
},
}
) )
first_episode_path = Path(summary['episodes'][0]['artifact_paths']['trajectory_image']) def fake_run_cuda_parallel_processes(server_payloads, worker_payloads):
second_episode_path = Path(summary['episodes'][1]['artifact_paths']['trajectory_image']) self.assertEqual(len(server_payloads), 1)
self.assertTrue(first_episode_path.exists()) self.assertEqual(server_payloads[0]["device_index"], 0)
self.assertTrue(second_episode_path.exists()) self.assertEqual([payload["server_index"] for payload in worker_payloads], [0, 0])
self.assertNotEqual(first_episode_path, second_episode_path) return [
self.assertEqual(first_episode_path.parent, Path(tmpdir)) {
self.assertEqual(second_episode_path.parent, Path(tmpdir)) "episodes": [
{
"episode_index": 2,
"episode_reward": 3.0,
"episode_max_reward": 3.0,
"inference_fps": 30.0,
"control_fps": 15.0,
}
],
"_merge_state": {
"obs_read_time_ms": [3.0],
"preprocess_time_ms": [1.0],
"inference_time_ms": [2.0],
"env_step_time_ms": [4.0],
"total_time_ms": [5.0],
"model_forward_flags": [True],
},
},
{
"episodes": [
{
"episode_index": 1,
"episode_reward": 2.0,
"episode_max_reward": 2.0,
"inference_fps": 20.0,
"control_fps": 10.0,
},
{
"episode_index": 0,
"episode_reward": 1.0,
"episode_max_reward": 1.0,
"inference_fps": 10.0,
"control_fps": 5.0,
},
],
"_merge_state": {
"obs_read_time_ms": [1.0, 2.0],
"preprocess_time_ms": [1.0, 1.0],
"inference_time_ms": [2.0, 2.0],
"env_step_time_ms": [4.0, 4.0],
"total_time_ms": [5.0, 5.0],
"model_forward_flags": [False, True],
},
},
]
np.testing.assert_array_equal(trajectory_image_calls[0]['raw_actions'][0], actions[0]) with mock.patch.object(
np.testing.assert_array_equal(trajectory_image_calls[0]['raw_actions'][1], actions[1]) eval_vla,
np.testing.assert_array_equal(trajectory_image_calls[1]['raw_actions'][0], actions[2]) "sample_transfer_pose",
np.testing.assert_array_equal(trajectory_image_calls[1]['raw_actions'][1], actions[3]) side_effect=[
np.array([0.1, 0.2, 0.3], dtype=np.float32),
np.array([0.4, 0.5, 0.6], dtype=np.float32),
np.array([0.7, 0.8, 0.9], dtype=np.float32),
],
), mock.patch.object(
eval_vla,
"_run_cuda_parallel_processes",
side_effect=fake_run_cuda_parallel_processes,
create=True,
):
summary = eval_vla._run_eval_parallel_cuda(cfg)
summary_path = Path(tmpdir) / "rollout_summary.json"
timing_path = Path(tmpdir) / "timing.json"
worker_00_dir = Path(tmpdir) / "workers" / "worker_00"
worker_01_dir = Path(tmpdir) / "workers" / "worker_01"
self.assertTrue(summary_path.exists())
self.assertTrue(timing_path.exists())
self.assertTrue(worker_00_dir.is_dir())
self.assertTrue(worker_01_dir.is_dir())
self.assertEqual(summary["episode_rewards"], [1.0, 2.0, 3.0])
with summary_path.open("r", encoding="utf-8") as fh:
saved_summary = json.load(fh)
with timing_path.open("r", encoding="utf-8") as fh:
saved_timing = json.load(fh)
self.assertEqual(saved_summary["episode_rewards"], [1.0, 2.0, 3.0])
self.assertEqual(saved_summary["artifact_dir"], tmpdir)
self.assertEqual(saved_timing["count"], 3)
self.assertEqual(saved_timing["model_forward_count"], 2)
if __name__ == '__main__': if __name__ == '__main__':

View File

@@ -158,6 +158,101 @@ class TrainVLARolloutValidationTest(unittest.TestCase):
self.assertGreater(float(cfg.train.lr), 5e-5) self.assertGreater(float(cfg.train.lr), 5e-5)
self.assertGreater(cfg.train.num_workers, 8) self.assertGreater(cfg.train.num_workers, 8)
self.assertEqual(cfg.train.rollout_val_freq_epochs, 50) self.assertEqual(cfg.train.rollout_val_freq_epochs, 50)
self.assertEqual(cfg.train.rollout_device, cfg.train.device)
self.assertIsNone(cfg.train.rollout_num_workers)
self.assertIsNone(cfg.train.rollout_cuda_devices)
def test_run_training_rollout_validation_propagates_gpu_parallel_settings(self):
cfg = OmegaConf.create(
{
'train': {
'device': 'cpu',
'batch_size': 1,
'num_workers': 0,
'val_split': 0.0,
'seed': 0,
'lr': 1e-3,
'max_steps': 2,
'log_freq': 1,
'save_freq': 1000,
'warmup_steps': 1,
'scheduler_type': 'constant',
'min_lr': 0.0,
'grad_clip': 1.0,
'weight_decay': 0.0,
'pretrained_ckpt': None,
'resume_ckpt': None,
'use_swanlab': False,
'rollout_val_freq_epochs': 2,
'rollout_num_episodes': 5,
'rollout_device': 'cuda',
'rollout_num_workers': 4,
'rollout_cuda_devices': [0, 1],
'rollout_response_timeout_s': 123.0,
'rollout_server_startup_timeout_s': 456.0,
},
'data': {
'camera_names': ['front'],
},
'agent': {
'_target_': 'fake.agent',
},
'eval': {
'ckpt_path': 'unused.pt',
'num_episodes': 99,
'max_timesteps': 1,
'device': 'cpu',
'task_name': 'sim_transfer',
'camera_names': ['front'],
'use_smoothing': False,
'smooth_alpha': 0.3,
'verbose_action': False,
'headless': False,
},
}
)
rollout_mock = mock.Mock(return_value={'avg_reward': 1.0})
def fake_instantiate(config_node, **_kwargs):
if config_node is cfg.data:
return _FakeDataset()
if config_node is cfg.agent:
return _FakeAgent()
raise AssertionError(f'unexpected instantiate config: {config_node!r}')
def fake_dataloader(_dataset, *, shuffle, **_kwargs):
del shuffle, _kwargs
return _FakeLoader(
{
'observation.front': torch.zeros(1, 3, 2, 2),
'observation.state': torch.zeros(1, 4),
'action': torch.zeros(1, 2),
'action_is_pad': torch.zeros(1, 1, dtype=torch.bool),
},
length=1,
)
with tempfile.TemporaryDirectory() as tempdir:
previous_cwd = os.getcwd()
try:
os.chdir(tempdir)
with mock.patch.object(train_vla, 'instantiate', side_effect=fake_instantiate), mock.patch.object(train_vla, 'DataLoader', side_effect=fake_dataloader), mock.patch.object(train_vla, 'build_training_optimizer', return_value=_FakeOptimizer(cfg.train.lr)), mock.patch.object(train_vla, 'get_lr_schedule_with_warmup', return_value=_FakeScheduler()), mock.patch.object(train_vla, 'tqdm', side_effect=lambda iterable, **kwargs: _FakeProgressBar(iterable)), mock.patch.object(train_vla.torch, 'save', return_value=None), mock.patch.object(eval_vla, '_run_eval', rollout_mock, create=True):
train_vla._run_training(cfg)
finally:
os.chdir(previous_cwd)
rollout_cfg = rollout_mock.call_args.args[0]
self.assertEqual(rollout_cfg.eval.device, 'cuda')
self.assertEqual(rollout_cfg.eval.num_workers, 4)
self.assertEqual(list(rollout_cfg.eval.cuda_devices), [0, 1])
self.assertEqual(float(rollout_cfg.eval.response_timeout_s), 123.0)
self.assertEqual(float(rollout_cfg.eval.server_startup_timeout_s), 456.0)
self.assertTrue(rollout_cfg.eval.headless)
self.assertEqual(rollout_cfg.eval.num_episodes, 5)
self.assertFalse(rollout_cfg.eval.record_video)
self.assertTrue(rollout_cfg.eval.save_summary_json)
self.assertTrue(rollout_cfg.eval.save_trajectory_image)
def test_training_passes_backbone_image_resize_override_to_dataset_instantiation(self): def test_training_passes_backbone_image_resize_override_to_dataset_instantiation(self):
cfg = OmegaConf.create( cfg = OmegaConf.create(