code release

This commit is contained in:
Cheng Chi
2023-03-07 16:07:15 -05:00
parent 7b8d98f0c2
commit 4bf419aa5a
364 changed files with 49460 additions and 0 deletions

View File

@@ -0,0 +1,44 @@
import sys
import os
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(ROOT_DIR)
os.chdir(ROOT_DIR)
from diffusion_policy.env.block_pushing.block_pushing_multimodal import BlockPushMultimodal
from gym.wrappers import FlattenObservation
from diffusion_policy.gym_util.multistep_wrapper import MultiStepWrapper
from diffusion_policy.gym_util.video_wrapper import VideoWrapper
def test():
env = MultiStepWrapper(
VideoWrapper(
FlattenObservation(
BlockPushMultimodal()
),
enabled=True,
steps_per_render=2
),
n_obs_steps=2,
n_action_steps=8,
max_episode_steps=16
)
env = BlockPushMultimodal()
obs = env.reset()
import pdb; pdb.set_trace()
env = FlattenObservation(BlockPushMultimodal())
obs = env.reset()
action = env.action_space.sample()
next_obs, reward, done, info = env.step(action)
print(obs[8:10] + action - next_obs[8:10])
import pdb; pdb.set_trace()
for i in range(3):
obs, reward, done, info = env.step(env.action_space.sample())
img = env.render()
import pdb; pdb.set_trace()
print("Done!", done)
if __name__ == '__main__':
test()

22
tests/test_cv2_util.py Normal file
View File

@@ -0,0 +1,22 @@
import sys
import os
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(ROOT_DIR)
os.chdir(ROOT_DIR)
import numpy as np
from diffusion_policy.common.cv2_util import get_image_transform
def test():
tf = get_image_transform((1280,720), (640,480), bgr_to_rgb=False)
in_img = np.zeros((720,1280,3), dtype=np.uint8)
out_img = tf(in_img)
# print(out_img.shape)
assert out_img.shape == (480,640,3)
# import pdb; pdb.set_trace()
if __name__ == '__main__':
test()

View File

@@ -0,0 +1,82 @@
import sys
import os
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(ROOT_DIR)
os.chdir(ROOT_DIR)
import cv2
import json
import time
import numpy as np
from diffusion_policy.real_world.multi_realsense import MultiRealsense
from diffusion_policy.real_world.video_recorder import VideoRecorder
def test():
config = json.load(open('/home/cchi/dev/diffusion_policy/diffusion_policy/real_world/realsense_config/415_high_accuracy_mode.json', 'r'))
def transform(data):
color = data['color']
h,w,_ = color.shape
factor = 4
color = cv2.resize(color, (w//factor,h//factor), interpolation=cv2.INTER_AREA)
# color = color[:,140:500]
data['color'] = color
return data
from diffusion_policy.common.cv2_util import get_image_transform
color_transform = get_image_transform(
input_res=(1280,720),
output_res=(640,480),
bgr_to_rgb=False)
def transform(data):
data['color'] = color_transform(data['color'])
return data
# one thread per camera
video_recorder = VideoRecorder.create_h264(
fps=30,
codec='h264',
thread_type='FRAME'
)
with MultiRealsense(
resolution=(1280,720),
capture_fps=30,
record_fps=15,
enable_color=True,
# advanced_mode_config=config,
transform=transform,
# recording_transform=transform,
# video_recorder=video_recorder,
verbose=True
) as realsense:
realsense.set_exposure(exposure=150, gain=5)
intr = realsense.get_intrinsics()
print(intr)
video_path = 'data_local/test'
rec_start_time = time.time() + 1
realsense.start_recording(video_path, start_time=rec_start_time)
realsense.restart_put(rec_start_time)
out = None
vis_img = None
while True:
out = realsense.get(out=out)
# bgr = out['color']
# print(bgr.shape)
# vis_img = np.concatenate(list(bgr), axis=0, out=vis_img)
# cv2.imshow('default', vis_img)
# key = cv2.pollKey()
# if key == ord('q'):
# break
time.sleep(1/60)
if time.time() > (rec_start_time + 20.0):
break
if __name__ == "__main__":
test()

View File

@@ -0,0 +1,126 @@
import sys
import os
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(ROOT_DIR)
os.chdir(ROOT_DIR)
from tqdm import tqdm
import numpy as np
import scipy.interpolate as si
import scipy.spatial.transform as st
from diffusion_policy.common.pose_trajectory_interpolator import (
rotation_distance,
pose_distance,
PoseTrajectoryInterpolator)
def test_rotation_distance():
def rotation_distance_align(a: st.Rotation, b: st.Rotation) -> float:
return st.Rotation.align_vectors(b.as_matrix().T, a.as_matrix().T)[0].magnitude()
for i in range(10000):
a = st.Rotation.from_euler('xyz', np.random.uniform(-7,7,size=3))
b = st.Rotation.from_euler('xyz', np.random.uniform(-7,7,size=3))
x = rotation_distance(a, b)
y = rotation_distance_align(a, b)
assert abs(x-y) < 1e-7
def test_pose_trajectory_interpolator():
t = np.linspace(-1,5,100)
interp = PoseTrajectoryInterpolator(
[0,1,3],
np.zeros((3,6))
)
times = interp.times
poses = interp.poses
trimmed_interp = interp.trim(-1,4)
assert len(trimmed_interp.times) == 5
trimmed_interp(t)
trimmed_interp = interp.trim(-1,4)
assert len(trimmed_interp.times) == 5
trimmed_interp(t)
trimmed_interp = interp.trim(0.5, 3.5)
assert len(trimmed_interp.times) == 4
trimmed_interp(t)
trimmed_interp = interp.trim(0.5, 2.5)
assert len(trimmed_interp.times) == 3
trimmed_interp(t)
trimmed_interp = interp.trim(0.5, 1.5)
assert len(trimmed_interp.times) == 3
trimmed_interp(t)
trimmed_interp = interp.trim(1.2, 1.5)
assert len(trimmed_interp.times) == 2
trimmed_interp(t)
trimmed_interp = interp.trim(1.3, 1.3)
assert len(trimmed_interp.times) == 1
trimmed_interp(t)
# import pdb; pdb.set_trace()
def test_add_waypoint():
# fuzz testing
for i in tqdm(range(10000)):
rng = np.random.default_rng(i)
n_waypoints = rng.integers(1, 5)
waypoint_times = np.sort(rng.uniform(0, 1, size=n_waypoints))
last_waypoint_time = waypoint_times[-1]
insert_time = rng.uniform(-0.1, 1.1)
curr_time = rng.uniform(-0.1, 1.1)
max_pos_speed = rng.poisson(3) + 1e-3
max_rot_speed = rng.poisson(3) + 1e-3
waypoint_poses = rng.normal(0, 3, size=(n_waypoints, 6))
new_pose = rng.normal(0, 3, size=6)
if rng.random() < 0.1:
last_waypoint_time = None
if rng.random() < 0.1:
curr_time = None
interp = PoseTrajectoryInterpolator(
times=waypoint_times,
poses=waypoint_poses)
new_interp = interp.add_waypoint(
pose=new_pose,
time=insert_time,
max_pos_speed=max_pos_speed,
max_rot_speed=max_rot_speed,
curr_time=curr_time,
last_waypoint_time=last_waypoint_time
)
def test_drive_to_waypoint():
# fuzz testing
for i in tqdm(range(10000)):
rng = np.random.default_rng(i)
n_waypoints = rng.integers(1, 5)
waypoint_times = np.sort(rng.uniform(0, 1, size=n_waypoints))
insert_time = rng.uniform(-0.1, 1.1)
curr_time = rng.uniform(-0.1, 1.1)
max_pos_speed = rng.poisson(3) + 1e-3
max_rot_speed = rng.poisson(3) + 1e-3
waypoint_poses = rng.normal(0, 3, size=(n_waypoints, 6))
new_pose = rng.normal(0, 3, size=6)
interp = PoseTrajectoryInterpolator(
times=waypoint_times,
poses=waypoint_poses)
new_interp = interp.drive_to_waypoint(
pose=new_pose,
time=insert_time,
curr_time=curr_time,
max_pos_speed=max_pos_speed,
max_rot_speed=max_rot_speed
)
if __name__ == '__main__':
test_drive_to_waypoint()

View File

@@ -0,0 +1,56 @@
import sys
import os
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(ROOT_DIR)
os.chdir(ROOT_DIR)
import time
import numpy as np
from diffusion_policy.common.precise_sleep import precise_sleep, precise_wait
def test_sleep():
dt = 0.1
tol = 1e-3
time_samples = list()
for i in range(100):
precise_sleep(dt)
# time.sleep(dt)
time_samples.append(time.monotonic())
time_deltas = np.diff(time_samples)
from matplotlib import pyplot as plt
plt.plot(time_deltas)
plt.ylim((dt-tol,dt+tol))
def test_wait():
dt = 0.1
tol = 1e-3
errors = list()
t_start = time.monotonic()
for i in range(1,100):
t_end_desired = t_start + i * dt
time.sleep(t_end_desired - time.monotonic())
t_end = time.monotonic()
errors.append(t_end - t_end_desired)
new_errors = list()
t_start = time.monotonic()
for i in range(1,100):
t_end_desired = t_start + i * dt
precise_wait(t_end_desired)
t_end = time.monotonic()
new_errors.append(t_end - t_end_desired)
from matplotlib import pyplot as plt
plt.plot(errors, label='time.sleep')
plt.plot(new_errors, label='sleep/spin hybrid')
plt.ylim((-tol,+tol))
plt.title('0.1 sec sleep error')
plt.legend()
if __name__ == '__main__':
test_sleep()

View File

@@ -0,0 +1,62 @@
import sys
import os
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(ROOT_DIR)
os.chdir(ROOT_DIR)
import zarr
from diffusion_policy.common.replay_buffer import ReplayBuffer
def test():
import numpy as np
buff = ReplayBuffer.create_empty_numpy()
buff.add_episode({
'obs': np.zeros((100,10), dtype=np.float16)
})
buff.add_episode({
'obs': np.ones((50,10)),
'action': np.ones((50,2))
})
# buff.rechunk(256)
obs = buff.get_episode(0)
import numpy as np
buff = ReplayBuffer.create_empty_zarr()
buff.add_episode({
'obs': np.zeros((100,10), dtype=np.float16)
})
buff.add_episode({
'obs': np.ones((50,10)),
'action': np.ones((50,2))
})
obs = buff.get_episode(0)
buff.set_chunks({
'obs': (100,10),
'action': (100,2)
})
def test_real():
import os
dist_group = zarr.open(
os.path.expanduser('~/dev/diffusion_policy/data/pusht/pusht_cchi_v2.zarr'), 'r')
buff = ReplayBuffer.create_empty_numpy()
key, group = next(iter(dist_group.items()))
for key, group in dist_group.items():
buff.add_episode(group)
# out_path = os.path.expanduser('~/dev/diffusion_policy/data/pusht_cchi2_v2_replay.zarr')
out_path = os.path.expanduser('~/dev/diffusion_policy/data/test.zarr')
out_store = zarr.DirectoryStore(out_path)
buff.save_to_store(out_store)
buff = ReplayBuffer.copy_from_path(out_path, store=zarr.MemoryStore())
buff.pop_episode()
def test_pop():
buff = ReplayBuffer.create_from_path(
'/home/chengchi/dev/diffusion_policy/data/pusht_cchi_v3_replay.zarr',
mode='rw')

188
tests/test_ring_buffer.py Normal file
View File

@@ -0,0 +1,188 @@
import sys
import os
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(ROOT_DIR)
os.chdir(ROOT_DIR)
import time
import numpy as np
import multiprocessing as mp
from multiprocessing.managers import SharedMemoryManager
from diffusion_policy.shared_memory.shared_memory_ring_buffer import (
SharedMemoryRingBuffer,
SharedAtomicCounter)
def test():
shm_manager = SharedMemoryManager()
shm_manager.start()
ring_buffer = SharedMemoryRingBuffer.create_from_examples(
shm_manager,
{'timestamp': np.array(0, dtype=np.float64)},
buffer_size=128
)
for i in range(30):
ring_buffer.put({
'timestamp': np.array(
time.perf_counter(),
dtype=np.float64)
})
print(ring_buffer.get())
def _timestamp_worker(ring_buffer, start_event, stop_event):
while not stop_event.is_set():
start_event.set()
ring_buffer.put({
'timestamp': np.array(
time.time(),
dtype=np.float64)
})
def test_mp():
shm_manager = SharedMemoryManager()
shm_manager.start()
ring_buffer = SharedMemoryRingBuffer.create_from_examples(
shm_manager,
{'timestamp': np.array(0, dtype=np.float64)},
get_max_k=1,
get_time_budget=0.01,
put_desired_frequency=1000
)
start_event = mp.Event()
stop_event = mp.Event()
worker = mp.Process(target=_timestamp_worker, args=(
ring_buffer, start_event, stop_event))
worker.start()
start_event.wait()
for i in range(1000):
t = float(ring_buffer.get()['timestamp'])
curr_t = time.time()
print('latency', curr_t - t)
stop_event.set()
worker.join()
def test_get_last_k():
shm_manager = SharedMemoryManager()
shm_manager.start()
ring_buffer = SharedMemoryRingBuffer.create_from_examples(
shm_manager,
{'counter': np.array(0, dtype=np.int64)},
buffer_size=8
)
from collections import deque
k = 4
last_k = deque(maxlen=k)
for i in range(100):
ring_buffer.put({
'counter': np.array(i, dtype=np.int64)
})
last_k.append(i)
if i > k:
result = ring_buffer.get_last_k(k)['counter']
assert np.allclose(result, last_k)
print(ring_buffer.shared_arrays['counter'].get())
result = ring_buffer.get_last_k(4)
print(result)
def test_timing():
shm_manager = SharedMemoryManager()
shm_manager.start()
ring_buffer = SharedMemoryRingBuffer.create_from_examples(
shm_manager,
{'counter': np.array(0, dtype=np.int64)},
get_max_k=8,
get_time_budget=0.1,
put_desired_frequency=100
)
# print(ring_buffer.timestamp_array.get())
print('buffer_size', ring_buffer.buffer_size)
dt = 1 / 150
t_init = time.monotonic()
for i in range(1000):
t_start = time.monotonic()
ring_buffer.put({
'counter': np.array(i, dtype=np.int64)
}, wait=False)
if (i % 10 == 0) and (i > 0):
result = ring_buffer.get_last_k(8)
t_end =time.monotonic()
desired_t = (i+1) * dt + t_init
if desired_t > t_end:
time.sleep(desired_t - t_end)
hz = 1 / (time.monotonic() - t_start)
print(f'{hz}Hz')
def _timestamp_image_worker(ring_buffer, img_shape, dt, start_event, stop_event):
i = 0
t_init = time.monotonic()
image = np.ones(img_shape, dtype=np.uint8)
while not stop_event.is_set():
t_start = time.monotonic()
start_event.set()
ring_buffer.put({
'img': image,
'timestamp': time.time(),
'counter': i
})
t_end = time.monotonic()
desired_t = (i+1) * dt + t_init
# print('alive')
if desired_t > t_end:
time.sleep(desired_t - t_end)
# hz = 1 / (time.monotonic() - t_start)
i += 1
def test_timing_mp():
shm_manager = SharedMemoryManager()
shm_manager.start()
hz = 200
img_shape = (1920,1080,3)
ring_buffer = SharedMemoryRingBuffer.create_from_examples(
shm_manager,
examples={
'img': np.zeros(img_shape, dtype=np.uint8),
'timestamp': time.time(),
'counter': 0
},
get_max_k=60,
get_time_budget=0.02,
put_desired_frequency=hz
)
start_event = mp.Event()
stop_event = mp.Event()
worker = mp.Process(target=_timestamp_image_worker, args=(
ring_buffer, img_shape, 1/hz, start_event, stop_event))
worker.start()
start_event.wait()
out = None
t_start = time.monotonic()
k = 1
for i in range(1000):
if ring_buffer.count < k:
time.sleep(0)
continue
out = ring_buffer.get_last_k(k=k, out=out)
t = float(out['timestamp'][-1])
curr_t = time.time()
print('latency', curr_t - t)
t_end = time.monotonic()
print('Get Hz', 1/(t_end-t_start)*1000)
stop_event.set()
worker.join()
if __name__ == '__main__':
# test_mp()
test_timing_mp()

View File

@@ -0,0 +1,38 @@
import sys
import os
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(ROOT_DIR)
os.chdir(ROOT_DIR)
from diffusion_policy.env_runner.robomimic_image_runner import RobomimicImageRunner
def test():
import os
from omegaconf import OmegaConf
cfg_path = os.path.expanduser('~/dev/diffusion_policy/diffusion_policy/config/task/lift_image.yaml')
cfg = OmegaConf.load(cfg_path)
cfg['n_obs_steps'] = 1
cfg['n_action_steps'] = 1
cfg['past_action_visible'] = False
runner_cfg = cfg['env_runner']
runner_cfg['n_train'] = 1
runner_cfg['n_test'] = 1
del runner_cfg['_target_']
runner = RobomimicImageRunner(
**runner_cfg,
output_dir='/tmp/test')
# import pdb; pdb.set_trace()
self = runner
env = self.env
env.seed(seeds=self.env_seeds)
obs = env.reset()
for i in range(10):
_ = env.step(env.action_space.sample())
imgs = env.render()
if __name__ == '__main__':
test()

View File

@@ -0,0 +1,34 @@
import sys
import os
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(ROOT_DIR)
os.chdir(ROOT_DIR)
from diffusion_policy.env_runner.robomimic_lowdim_runner import RobomimicLowdimRunner
def test():
import os
from omegaconf import OmegaConf
cfg_path = os.path.expanduser('~/dev/diffusion_policy/diffusion_policy/config/task/lift_lowdim.yaml')
cfg = OmegaConf.load(cfg_path)
cfg['n_obs_steps'] = 1
cfg['n_action_steps'] = 1
cfg['past_action_visible'] = False
runner_cfg = cfg['env_runner']
runner_cfg['n_train'] = 1
runner_cfg['n_test'] = 0
del runner_cfg['_target_']
runner = RobomimicLowdimRunner(
**runner_cfg,
output_dir='/tmp/test')
# import pdb; pdb.set_trace()
self = runner
env = self.env
env.seed(seeds=self.env_seeds)
obs = env.reset()
if __name__ == '__main__':
test()

View File

@@ -0,0 +1,67 @@
import sys
import os
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(ROOT_DIR)
os.chdir(ROOT_DIR)
import numpy as np
from multiprocessing.managers import SharedMemoryManager
from diffusion_policy.shared_memory.shared_memory_queue import SharedMemoryQueue, Full, Empty
def test():
shm_manager = SharedMemoryManager()
shm_manager.start()
example = {
'cmd': 0,
'pose': np.zeros((6,))
}
queue = SharedMemoryQueue.create_from_examples(
shm_manager=shm_manager,
examples=example,
buffer_size=3
)
raised = False
try:
queue.get()
except Empty:
raised = True
assert raised
data = {
'cmd': 1,
'pose': np.ones((6,))
}
queue.put(data)
result = queue.get()
assert result['cmd'] == data['cmd']
assert np.allclose(result['pose'], data['pose'])
queue.put(data)
queue.put(data)
queue.put(data)
assert queue.qsize() == 3
raised = False
try:
queue.put(data)
except Full:
raised = True
assert raised
result = queue.get_all()
assert np.allclose(result['cmd'], [1,1,1])
queue.put({'cmd': 0})
queue.put({'cmd': 1})
queue.put({'cmd': 2})
queue.get()
queue.put({'cmd': 3})
result = queue.get_k(3)
assert np.allclose(result['cmd'], [1,2,3])
queue.clear()
if __name__ == "__main__":
test()

View File

@@ -0,0 +1,87 @@
import sys
import os
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(ROOT_DIR)
os.chdir(ROOT_DIR)
import cv2
import json
import time
from multiprocessing.managers import SharedMemoryManager
from diffusion_policy.real_world.single_realsense import SingleRealsense
def test():
serials = SingleRealsense.get_connected_devices_serial()
# import pdb; pdb.set_trace()
serial = serials[0]
config = json.load(open('/home/cchi/dev/diffusion_policy/diffusion_policy/real_world/realsense_config/415_high_accuracy_mode.json', 'r'))
def transform(data):
color = data['color']
h,w,_ = color.shape
factor = 2
color = cv2.resize(color, (w//factor,h//factor), interpolation=cv2.INTER_AREA)
# color = color[:,140:500]
data['color'] = color
return data
# at 960x540 with //3, 60fps and 30fps are indistinguishable
with SharedMemoryManager() as shm_manager:
with SingleRealsense(
shm_manager=shm_manager,
serial_number=serial,
resolution=(1280,720),
# resolution=(960,540),
# resolution=(640,480),
capture_fps=30,
enable_color=True,
# enable_depth=True,
# enable_infrared=True,
# advanced_mode_config=config,
# transform=transform,
# recording_transform=transform
# verbose=True
) as realsense:
cv2.setNumThreads(1)
realsense.set_exposure(exposure=150, gain=5)
intr = realsense.get_intrinsics()
print(intr)
video_path = 'data_local/test.mp4'
rec_start_time = time.time() + 2
realsense.start_recording(video_path, start_time=rec_start_time)
data = None
while True:
data = realsense.get(out=data)
t = time.time()
# print('capture_latency', data['receive_timestamp']-data['capture_timestamp'], 'receive_latency', t - data['receive_timestamp'])
# print('receive', t - data['receive_timestamp'])
# dt = time.time() - data['timestamp']
# print(dt)
# print(data['capture_timestamp'] - rec_start_time)
bgr = data['color']
# print(bgr.shape)
cv2.imshow('default', bgr)
key = cv2.pollKey()
# if key == ord('q'):
# break
# elif key == ord('r'):
# video_path = 'data_local/test.mp4'
# realsense.start_recording(video_path)
# elif key == ord('s'):
# realsense.stop_recording()
time.sleep(1/60)
if time.time() > (rec_start_time + 20.0):
break
if __name__ == "__main__":
test()

View File

@@ -0,0 +1,151 @@
import sys
import os
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(ROOT_DIR)
os.chdir(ROOT_DIR)
import numpy as np
import time
from diffusion_policy.common.timestamp_accumulator import (
get_accumulate_timestamp_idxs,
TimestampObsAccumulator,
TimestampActionAccumulator
)
def test_index():
buffer = np.zeros(16)
start_time = 0.0
dt = 1/10
timestamps = np.linspace(0,1,100)
gi = list()
next_global_idx = 0
local_idxs, global_idxs, next_global_idx = get_accumulate_timestamp_idxs(timestamps,
start_time=start_time, dt=dt, next_global_idx=next_global_idx)
assert local_idxs[0] == 0
assert global_idxs[0] == 0
# print(local_idxs)
# print(global_idxs)
# print(timestamps[local_idxs])
buffer[global_idxs] = timestamps[local_idxs]
gi.extend(global_idxs)
timestamps = np.linspace(0.5,1.5,100)
local_idxs, global_idxs, next_global_idx = get_accumulate_timestamp_idxs(timestamps,
start_time=start_time, dt=dt, next_global_idx = next_global_idx)
# print(local_idxs)
# print(global_idxs)
# print(timestamps[local_idxs])
# import pdb; pdb.set_trace()
buffer[global_idxs] = timestamps[local_idxs]
gi.extend(global_idxs)
assert np.all(buffer[1:] > buffer[:-1])
assert np.all(np.array(gi) == np.array(list(range(len(gi)))))
# print(buffer)
# start over
next_global_idx = 0
timestamps = np.linspace(0,1,3)
local_idxs, global_idxs, next_global_idx = get_accumulate_timestamp_idxs(timestamps,
start_time=start_time, dt=dt, next_global_idx = next_global_idx)
assert local_idxs[0] == 0
assert local_idxs[-1] == 2
# print(local_idxs)
# print(global_idxs)
# print(timestamps[local_idxs])
# test numerical error issue
# this becomes a problem when eps <= 1e-7
start_time = time.time()
next_global_idx = 0
timestamps = np.arange(100000) * dt + start_time
local_idxs, global_idxs, next_global_idx = get_accumulate_timestamp_idxs(timestamps,
start_time=start_time, dt=dt, next_global_idx = next_global_idx)
assert local_idxs == global_idxs
# print(local_idxs)
# print(global_idxs)
# print(timestamps[local_idxs])
def test_obs_accumulator():
dt = 1/10
ddt = 1/100
n = 100
d = 6
start_time = time.time()
toa = TimestampObsAccumulator(start_time, dt)
poses = np.arange(n).reshape((n,1))
poses = np.repeat(poses, d, axis=1)
timestamps = np.arange(n) * ddt + start_time
toa.put({
'pose': poses,
'timestamp': timestamps
}, timestamps)
assert np.all(toa.data['pose'][:,0] == np.arange(10)*10)
assert len(toa) == 10
# add the same thing, result shouldn't change
toa.put({
'pose': poses,
'timestamp': timestamps
}, timestamps)
assert np.all(toa.data['pose'][:,0] == np.arange(10)*10)
assert len(toa) == 10
# add lower than desired freuquency to test fill_in
dt = 1/10
ddt = 1/5
n = 10
d = 6
start_time = time.time()
toa = TimestampObsAccumulator(start_time, dt)
poses = np.arange(n).reshape((n,1))
poses = np.repeat(poses, d, axis=1)
timestamps = np.arange(n) * ddt + start_time
toa.put({
'pose': poses,
'timestamp': timestamps
}, timestamps)
assert len(toa) == 1 + (n-1) * 2
timestamps = (np.arange(n) + 2) * ddt + start_time
toa.put({
'pose': poses,
'timestamp': timestamps
}, timestamps)
assert len(toa) == 1 + (n-1) * 2 + 4
def test_action_accumulator():
dt = 1/10
n = 10
d = 6
start_time = time.time()
taa = TimestampActionAccumulator(start_time, dt)
actions = np.arange(n).reshape((n,1))
actions = np.repeat(actions, d, axis=1)
timestamps = np.arange(n) * dt + start_time
taa.put(actions, timestamps)
assert np.all(taa.actions == actions)
assert np.all(taa.timestamps == timestamps)
# add another round
taa.put(actions-5, timestamps-0.5)
assert np.allclose(taa.timestamps, timestamps)
# add another round
taa.put(actions+5, timestamps+0.5)
assert len(taa) == 15
assert np.all(taa.actions[:,0] == np.arange(15))
if __name__ == '__main__':
test_action_accumulator()