Skip to content

Commit

Permalink
Bug fix and tuning (#62)
Browse files Browse the repository at this point in the history
* Draw boundary on top of all detections

* Update launch

* Add bag name to video

* Update params for better evaluation

* Fix OBB bug

* Update params

* Tune transform
  • Loading branch information
adrian-soch authored Nov 28, 2023
1 parent 1aa9a20 commit f918b4f
Show file tree
Hide file tree
Showing 6 changed files with 92 additions and 17 deletions.
13 changes: 7 additions & 6 deletions Metrics/preprocessGT.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,16 +35,14 @@ def main(args):
write_lines(result_name, normalized_lines)

# Define a list of points as tuples, must be clockwise
# q7_2_may10_2023_clean bag
# roi_points = np.array([(495, 230), (900, 230), (1910, 730), (525, 900)])

# q7_2_may10_2023_clean_short_range
# may10 short_range
# roi_points = np.array([(658,305), (980,960), (1900,737), (1049,287)])

# oct18_r9
# oct18
roi_points = np.array([(5,735),(1270,645),(1500,676),(240,1060)])

# dec7_dhd1_short
# dec7
# roi_points = np.array([(835,480), (1050,1056), (1918,845), (1130,465)])

# Create a polygon object from the points using shapely
Expand All @@ -56,7 +54,6 @@ def main(args):
if args.image_path is not None:
# Draw roi on image
img = cv2.imread(args.image_path)
cv2.polylines(img, [roi_points], True, (255, 0, 0), 3)

# Read the csv file using pandas
df = pd.read_csv(result_name, header=None)
Expand Down Expand Up @@ -85,6 +82,10 @@ def main(args):
if args.image_path is not None:
cv2.polylines(img, [points], True, colour, 2)

if args.image_path is not None:
# Draw roi on image
cv2.polylines(img, [roi_points], True, (255, 0, 0), 3)

# Drop the rows that are in the remove list using pandas
df = df.drop(remove_indices)

Expand Down
8 changes: 7 additions & 1 deletion fusion_module/fusion_module/fusion_visualizer_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,12 @@ def __init__(self):
self.declare_parameter('save_video', False)
save_video = self.get_parameter(
'save_video').get_parameter_value().bool_value

self.declare_parameter('video_name', 'video')
self.video_name = self.get_parameter(
'video_name').get_parameter_value().string_value

print(self.video_name + '\n')

self.bridge = CvBridge()
self.image_sub = Subscriber(self, Image, 'image')
Expand Down Expand Up @@ -104,7 +110,7 @@ def callback(self, image_msg, detection_msg):
if self.video_writer is None:
fourcc = VideoWriter_fourcc(*'mp4v') # choose codec
fps = 10 # choose frame rate
filename = 'video_{}.mp4'.format(datetime.datetime.now().strftime(
filename = self.video_name + '_{}.mp4'.format(datetime.datetime.now().strftime(
'%Y%m%d_%H%M%S')) # choose file name with current timestamp
self.video_writer = VideoWriter(
filename, fourcc, fps, (cv_image.shape[1], cv_image.shape[0]))
Expand Down
2 changes: 1 addition & 1 deletion lidar_pipeline/configs/dec7_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ projection_node:
# Transform from Lidar to camera frame
lidar2cam_extrinsic:
rotation: [0.0, 1.0, 0.0, 0.0, 0.0, -1.0, -1.0, 0.0, 0.0]
translation: [0.00, 0.0508, 0.0502]
translation: [-0.3, -0.32, 0.0502]

# Transform from lidar data from to lidar sensor frame
# See https://github.com/ros-drivers/ros2_ouster_drivers/issues/87
Expand Down
55 changes: 55 additions & 0 deletions lidar_pipeline/configs/oct18_r1_config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# These parameters are dependant on the sensor location
# Note: For converting angles use https://www.andre-gaschler.com/rotationconverter/
perception_node:
ros__parameters:

# Transformation from sensor to world frame
lidar2world_transform:
# [x,y,z] in meters
translation: [0.0, 0.0, 3.25]

# [w,x,y,z] normalized quaternion
quaternion: [0.999769, 0.0, -0.021498, 0.0]

# The crop box isolates a region of interest in the pointcloud
# all point outside this box are removed
# Use the roi_visulaizer.py tool to find a new crop_box params if needed
crop_box_transform:
# [x,y,z] in meters
translation: [35.0, -0.5, 1.75]

# [w,x,y,z] normalized quaternion
quaternion: [0.9025852843498606, 0.0, 0.0, 0.43051109680829514]

# [x, y, z] in meters
size: [16.0, 75.0, 3.5]


# Use the same `lidar2world_transform` for both nodes
projection_node:
ros__parameters:

# Matrix from intrinsic camera calibration
# This day's data was recorded at 1920x1080
camera_matrix: [1199.821557, 0.0, 960.562236, 0.0, 1198.033465, 551.675808, 0.0, 0.0, 1.0]

# Transformation from sensor to world frame
lidar2world_transform:
# [x,y,z] in meters
translation: [0.0, 0.0, 3.25]

# [w,x,y,z] normalized quaternion
quaternion: [0.999769, 0.0, -0.021498, 0.0]

# Transform from Lidar to camera frame
lidar2cam_extrinsic:
rotation: [-0.0174524, 0.9998477, 0.0000000,-0.0000000, -0.0000000, -1.0000000,-0.9998477, -0.0174524, 0.0000000 ]
translation: [0.00, 0.0508, 0.0502]

# Transform from lidar data from to lidar sensor frame
# See https://github.com/ros-drivers/ros2_ouster_drivers/issues/87
# The translation part may already be accounted for internally in the driver
lidarData2lidarSensor_extrinsic:
rotation: [-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0]
translation: [0.0, 0.0, 0.03618]

25 changes: 19 additions & 6 deletions lidar_pipeline/launch/fusion_demo.launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
# Used to change playback rate of ros bag
# A9 data bags were recoreded at 2.5Hz so they need a x4 speedup
# if left as 1 then thre is no rate change
BAG_PLAY_RATE = 0.16
BAG_PLAY_RATE = 1.0
FLIP_IMAGE = False

BAG_PLAY_LOOP = True
Expand All @@ -35,16 +35,16 @@
'''
ABS_PATH_TO_ROSBAGS = '/home/adrian/dev/bags/'

# 10, 7, 6, 12
BAG_SELECTOR = 7
# 10, 7, 6, 12, 13
BAG_SELECTOR = 13

# Determines what kind of output you want, Video/Rviz2
# Determines what kind of output you want, Video/Rviz2/csv_tracker_data
SAVE_OUTPUT_VIDEO = True
SAVE_CSV_FUSION_OUTPUT = True
SHOW_RVIZ = False

# Fusion Overrides
LIDAR_RESULT_ONLY = True
LIDAR_RESULT_ONLY = False
CAM_RESULT_ONLY = False

# Because of the yolov5 includes, its easier to just run this directly
Expand All @@ -58,7 +58,6 @@
# FLIP_IMAGE = True
CONFIG_NAME = 'default_config.yaml'


# MARC Rooftop data without data syncronization
# Fusion and projection will not work
elif BAG_SELECTOR == 0:
Expand All @@ -77,6 +76,9 @@
elif BAG_SELECTOR == 12:
BAG_NAME = 'cleaned_bags/may10_r5_clean'
CONFIG_NAME = 'may10_config.yaml'
elif BAG_SELECTOR == 13:
BAG_NAME = 'cleaned_bags/oct18_r1_clean'
CONFIG_NAME = 'oct18_r1_config.yaml'

# MARC Rooftop data with syncronized lidar + camera
elif BAG_SELECTOR == 2:
Expand Down Expand Up @@ -122,6 +124,15 @@
print('Invalid bag selection!')
exit(-1)

# Set and validate parameters

if SAVE_CSV_FUSION_OUTPUT:
BAG_PLAY_RATE = 0.16

if LIDAR_RESULT_ONLY and CAM_RESULT_ONLY:
print('Error: Must use lidar or camera results for fusion.')
exit(ValueError)

START_BAG_DELAY = 0.0
if SAVE_OUTPUT_VIDEO:
START_BAG_DELAY = 7.5
Expand Down Expand Up @@ -196,13 +207,15 @@ def generate_launch_description():
]
)

video_name = BAG_NAME.split('/')[-1]
fusion_viz = Node(
package='fusion_module',
executable='fusion_viz_node',
name='fusion_viz_node',
parameters=[
{'flip_image': FLIP_IMAGE},
{'save_video': SAVE_OUTPUT_VIDEO},
{'video_name': video_name},
]
)

Expand Down
6 changes: 3 additions & 3 deletions lidar_pipeline/src/lidar_processing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -277,9 +277,9 @@ vision_msgs::msg::BoundingBox3D LidarProcessing::getOrientedBoudingBox(const pcl
size.getArray3fMap() = max_pt_T.getArray3fMap() - min_pt_T.getArray3fMap();

vision_msgs::msg::BoundingBox3D bbox;
bbox.center.position.x = centroid[X];
bbox.center.position.y = centroid[Y];
bbox.center.position.z = centroid[Z];
bbox.center.position.x = center[X];
bbox.center.position.y = center[Y];
bbox.center.position.z = center[Z];
bbox.center.orientation.x = bboxQ.x();
bbox.center.orientation.y = bboxQ.y();
bbox.center.orientation.z = bboxQ.z();
Expand Down

0 comments on commit f918b4f

Please sign in to comment.