-
-
Notifications
You must be signed in to change notification settings - Fork 4.2k
Open
Description
Hello,
I am recording recorder files for later replay in docker with CARLA 0.9.15. When I replay them with the following script, sometimes vehicles are missing (even the main vehicle from the scenario), sometimes there is an additional car being pushed up meters in the air and falling down in the background. This is inconsistent to the recorder file. I have 128 CPU cores and an A100, so this should not be a performance issue. Can anyone help?
#!/usr/bin/env python
import glob
import os
import sys
import time
import argparse
import carla
import xml.etree.ElementTree as ET
import queue
import threading
import cv2
import numpy as np
from Bench2Drive.leaderboard.scripts.scenario_creator import print_scenario_data
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
rgb_queue = queue.Queue()
semantic_queue = queue.Queue()
depth_queue = queue.Queue()
def save_worker_rgb(save):
while True:
item = rgb_queue.get()
if item is None: # sentinel
break
image, cam_name = item
out_dir = save
os.makedirs(out_dir, exist_ok=True)
image.save_to_disk(f"{out_dir}/{cam_name}/{image.frame:06d}.png")
rgb_queue.task_done()
def save_worker_semantic(save):
while True:
item = semantic_queue.get()
if item is None: # sentinel
break
image, cam_name = item
out_dir = os.path.join(save, cam_name)
os.makedirs(out_dir, exist_ok=True)
# Convert CARLA image to numpy array (H, W, 4) → BGRA
array = np.frombuffer(image.raw_data, dtype=np.uint8)
array = array.reshape((image.height, image.width, 4))
# Extract semantic labels from the R channel
semantics = array[:, :, 2] # channel 2 = Red (contains semantic class IDs)
# Save as grayscale PNG
filename = f"{out_dir}/{cam_name}/{image.frame:06d}.png"
cv2.imwrite(filename, semantics)
semantic_queue.task_done()
def save_worker_depth(save):
while True:
item = depth_queue.get()
if item is None: # sentinel
break
image, cam_name = item
out_dir = os.path.join(save, cam_name)
os.makedirs(out_dir, exist_ok=True)
# Convert CARLA depth image to numpy array (H, W, 4 → BGRA)
array = np.frombuffer(image.raw_data, dtype=np.uint8)
array = array.reshape((image.height, image.width, 4))
# Extract first three channels (BGR in CARLA, but contains depth encoding)
depth_bgr = array[:, :, :3]
# Convert to normalized depth [0,1]
depth_normalized = (
depth_bgr[:, :, 2].astype(np.float32) +
depth_bgr[:, :, 1].astype(np.float32) * 256.0 +
depth_bgr[:, :, 0].astype(np.float32) * 256.0 * 256.0
) / (256.0 ** 3 - 1.0)
# Scale to 0–255 for saving as 8-bit grayscale
depth_img = (depth_normalized * 255.0 + 0.5).astype(np.uint8)
# Save as grayscale PNG
filename = f"{out_dir}/{cam_name}/{image.frame:06d}.png"
cv2.imwrite(filename, depth_img)
depth_queue.task_done()
def save_rgb_image(image, cam_name):
rgb_queue.put((image, cam_name))
def save_semantic_image(image, cam_name):
semantic_queue.put((image, cam_name))
def save_depth_image(image, cam_name):
depth_queue.put((image, cam_name))
def spawn_rgb_camera(world, transform, attached_actor, fov, resolution, cam_name):
blueprint_library = world.get_blueprint_library()
camera_bp = blueprint_library.find('sensor.camera.rgb')
width, height = resolution
camera_bp.set_attribute('image_size_x', str(width))
camera_bp.set_attribute('image_size_y', str(height))
camera_bp.set_attribute('fov', str(fov))
camera_bp.set_attribute('sensor_tick', '0.05')
camera = world.spawn_actor(camera_bp, transform, attach_to=attached_actor)
camera.listen(lambda image, name=cam_name: save_rgb_image(image, name))
return camera
def spawn_semantic_camera(world, transform, attached_actor, fov, resolution, cam_name):
blueprint_library = world.get_blueprint_library()
camera_bp = blueprint_library.find('sensor.camera.semantic_segmentation')
width, height = resolution
camera_bp.set_attribute('image_size_x', str(width))
camera_bp.set_attribute('image_size_y', str(height))
camera_bp.set_attribute('fov', str(fov))
camera_bp.set_attribute('sensor_tick', '0.05')
camera = world.spawn_actor(camera_bp, transform, attach_to=attached_actor)
camera.listen(lambda image, name=cam_name: save_semantic_image(image, name))
return camera
def spawn_depth_camera(world, transform, attached_actor, fov, resolution, cam_name):
blueprint_library = world.get_blueprint_library()
camera_bp = blueprint_library.find('sensor.camera.depth')
width, height = resolution
camera_bp.set_attribute('image_size_x', str(width))
camera_bp.set_attribute('image_size_y', str(height))
camera_bp.set_attribute('fov', str(fov))
camera_bp.set_attribute('sensor_tick', '0.05')
camera = world.spawn_actor(camera_bp, transform, attach_to=attached_actor)
camera.listen(lambda image, name=cam_name: save_depth_image(image, name))
return camera
def main():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument('--host', default='localhost')
argparser.add_argument('--port', default=5452, type=int)
argparser.add_argument('--start', default=0.0, type=float)
argparser.add_argument('--duration', default=25.7, type=float)
argparser.add_argument('--record', default="/home/carla/Accident_1334_0.log")
argparser.add_argument('--savepath', default="/home/carla/")
argparser.add_argument('--routes', default="store_true")
argparser.add_argument('--ignore-hero', action='store_true')
argparser.add_argument('--move-spectator', action='store_true')
argparser.add_argument('--follow-ego', action="store_true")
args = argparser.parse_args()
client = carla.Client(args.host, args.port)
client.set_timeout(90.0)
for i in range(40):
try:
world = client.get_world()
print("Connected to CARLA world.")
break
except Exception as e:
print(f"[{i}] CARLA not ready: {e}")
time.sleep(2)
else:
raise RuntimeError("CARLA did not become ready in time.")
file_info = client.show_recorder_file_info(args.record, True)
follow_id = 0
file_split = file_info.split("Create ")
for data in file_split:
if 'hero' in data:
follow_id = int(data.split(": ")[0])
print("Detected an ego vehicle with id '{}'".format(follow_id))
break
vehicles_b = world.get_actors().filter('vehicle.*')
print("Vehicles before Loading")
print(vehicles_b)
for actor in world.get_actors():
if 'vehicle' in actor.type_id or 'walker' in actor.type_id:
actor.destroy()
client.replay_file(args.record, args.start, args.duration, follow_id, False)
file_info = client.show_recorder_file_info(args.record, True)
output_txt_file = '/beegfs/work_fast/stelzer/recording_file_new.txt'
with open(output_txt_file, 'w') as f:
f.write(file_info)
time.sleep(200)
print("Replaying recording...")
client.set_replayer_time_factor(0.0) # normal time factor
client.set_replayer_ignore_hero(args.ignore_hero)
client.set_replayer_ignore_spectator(not args.move_spectator)
world = client.get_world()
for actor in world.get_actors():
if actor.type_id.startswith("vehicle.") or actor.type_id.startswith("walker."):
actor.destroy()
world.tick()
weather = get_weather_from_xml(args.routes)
world.set_weather(weather)
settings = world.get_settings()
settings.synchronous_mode = True
settings.fixed_delta_seconds = 0.05 # 20Hz
settings.max_substep_delta_time = 0.005
settings.max_substeps = 10
settings.no_rendering_mode = False
world.apply_settings(settings)
vehicles = world.get_actors().filter('vehicle.*')
#print(vehicles)
if get_night_mode(weather):
print("Night mode ON: Turning on vehicle lights")
for vehicle in vehicles:
vehicle.set_light_state(
carla.VehicleLightState(carla.VehicleLightState.LowBeam | carla.VehicleLightState.Position))
else:
print("Night mode OFF: Turning off vehicle lights")
for vehicle in vehicles:
vehicle.set_light_state(carla.VehicleLightState.NONE)
ego_vehicle = None
timeout = 90
start_time = time.time()
while time.time() - start_time < timeout:
#world.tick()
actors = world.get_actors().filter('vehicle.*')
#print(actors)
for actor in actors:
if actor.attributes.get('role_name') == 'hero':
ego_vehicle = actor
break
if ego_vehicle:
break
time.sleep(0.5)
if not ego_vehicle:
raise RuntimeError("Ego vehicle not found.")
print(f"Ego vehicle found: {ego_vehicle.id}")
camera_transform_ref = carla.Transform(
carla.Location(x=0.60, y=0.0, z=1.38),
carla.Rotation(pitch=0.0, yaw=0.0, roll=0.0))
camera_transform_TF = carla.Transform(
carla.Location(x=-1.50, y=0.0, z=2.0),
carla.Rotation(pitch=0.0, yaw=0.0, roll=0.0))
camera_transform_IF_center = carla.Transform(
carla.Location(x=1.3, y=0.0, z=2.3),
carla.Rotation(pitch=0.0, yaw=0.0, roll=0.0))
camera_transform_IF_left = carla.Transform(
carla.Location(x=1.3, y=0.0, z=2.3),
carla.Rotation(pitch=0.0, yaw=-60.0, roll=0.0))
camera_transform_IF_right = carla.Transform(
carla.Location(x=1.3, y=0.0, z=2.3),
carla.Rotation(pitch=0.0, yaw=60.0, roll=0.0))
print("Created Transforms")
save_thread_rgb = threading.Thread(target=lambda: save_worker_rgb(args.savepath))
save_thread_rgb.start()
#save_thread_semantic = threading.Thread(target=lambda: save_worker_semantic(args.savepath))
#save_thread_semantic.start()
#save_thread_depth = threading.Thread(target=lambda: save_worker_depth(args.savepath))
#save_thread_depth.start()
print("Created Workers")
cameras = []
rgb_ref_1 = spawn_rgb_camera(world, camera_transform_ref, ego_vehicle, 120, (1200, 800), "rgb_reference_1")
print("Created First Camera")
cameras.append(rgb_ref_1)
print("Appended First Camera")
#rgb_TF = spawn_rgb_camera(world, camera_transform_TF, ego_vehicle, 110, (1024,512), "rgb_transfuser")
print("Created Second Camera")
#cameras.append(rgb_TF)
print("Appended Second Camera")
#rgb_IF_center = spawn_rgb_camera(world, camera_transform_IF_center, ego_vehicle, 100, (800,600), "rgb_interfuser_center")
#cameras.append(rgb_IF_center)
#rgb_IF_left = spawn_rgb_camera(world, camera_transform_IF_left, ego_vehicle, 100, (800, 600), "rgb_interfuser_left")
#cameras.append(rgb_IF_left)
#rgb_IF_right = spawn_rgb_camera(world, camera_transform_IF_right, ego_vehicle, 100, (800, 600), "rgb_interfuser_right")
#cameras.append(rgb_IF_right)
#semantic_TF = spawn_semantic_camera(world, camera_transform_TF, ego_vehicle, 110, (1024, 512), "depth_transfuser")'
#cameras.append(semantic_TF))
#depth_TF = spawn_depth_camera(world, camera_transform_TF, ego_vehicle, 110, (1024, 512), "semantic_transfuser")
#cameras.append(depth_TF)
print("Camera spawned and recording...")
client.set_replayer_time_factor(1.0)
world.tick()
try:
reference_path = os.path.join(args.savepath, "rgb_rearmirror_center")
num_entries = len(os.listdir(reference_path))
duration= (num_entries/10)+1
print("Starting replay loop with manual stepping (0.05s steps)...")
#total_steps = int(duration / 0.05)
total_steps=170
for step in range(total_steps):
frame = world.tick()
print(f"World ticked: frame {frame}, step {step+1}/{total_steps}")
actors = world.get_actors().filter('vehicle.*')
print(actors)
# time.sleep(0.01) # optional visualization slow down
time.sleep(0.2)
except KeyboardInterrupt:
print("Recording stopped by user.")
finally:
print("Cleaning up sensors.")
for cam in cameras:
if cam is not None:
cam.stop()
cam.destroy()
# Stop worker threads
rgb_queue.put(None)
#semantic_queue.put(None)
#depth_queue.put(None)
save_thread_rgb.join()
#save_thread_semantic.join()
#save_thread_depth.join()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
print('\ndone.')
Metadata
Metadata
Assignees
Labels
No labels