Skip to content

Commit

Permalink
More metrics and adjustments (#61)
Browse files Browse the repository at this point in the history
* Add frame for debug

* Add flip option

* Update ROI

* Update launch for metrics

* Small updates to paramters

* default uses detection from image pipeline

* fix height

* fix box

* cleanup

* defualt no warp

* update to track height, but not use it yet

* Update params

* Update sort

* Update sort

* fix include

* Fix diagram

* Add system_time option

* Fix name format

* Add params

* Update params

* Add bags

* Add dbscan option

* Add l-shape fitting

* Tune for better hota

* Updated params

* Example for full 3d track

* Add config

* update param

* Add timing code

* Ad new bag
  • Loading branch information
adrian-soch authored Nov 24, 2023
1 parent ca6f1f8 commit 1aa9a20
Show file tree
Hide file tree
Showing 27 changed files with 598 additions and 111 deletions.
161 changes: 161 additions & 0 deletions Docs/Archive/obj_tracker_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
"""
* @file obj_tracker_utils.py
* @brief Utils for 2D Object tracking
* @author Adrian Sochaniwsky (sochania@mcmaster.ca)
* @version 0.1
* @date 2023-05-25
*
* @copyright Copyright (c) 2023
"""

import numpy as np

from vision_msgs.msg import ObjectHypothesisWithPose
from vision_msgs.msg import Detection3D, Detection3DArray

from tf_transformations import quaternion_from_euler


def createDetection3DArr(tracks, header, isOBB) -> Detection3DArray:
"""Convert tracker output to message for publishing
Args:
tracks (ndarray): if isOBB: Array of the form [[x,y,x*y,w/h, angle], [x,y,x*y,w/h, angle], ...]
if not OBB then: tracks (ndarray): Array of the form [[x1,y1,x2,y2,id], [x1,y1,x2,y2,id], ...]
Returns:
Detection3DArray:
"""
out = Detection3DArray()
out.header = header

if isOBB:
for trk in tracks:
det = Detection3D()
result = ObjectHypothesisWithPose()
result.hypothesis.score = trk[7]
det.results.append(result)

z_size = trk[6]/2.0 + trk[2]
z_center = z_size/2.0

if z_size <= 1.4:
z_size = 1.85

y_len = np.sqrt(trk[3]*trk[4])
x_len = trk[3]/y_len

det.bbox.size.x = x_len
det.bbox.size.y = y_len
det.bbox.size.z = z_size

det.bbox.center.position.x = trk[0]
det.bbox.center.position.y = trk[1]
det.bbox.center.position.z = z_center

q = quaternion_from_euler(0, 0, trk[5])

det.bbox.center.orientation.x = q[0]
det.bbox.center.orientation.y = q[1]
det.bbox.center.orientation.z = q[2]
det.bbox.center.orientation.w = q[3]

out.detections.append(det)
else:
for trk in tracks:
det = Detection3D()
result = ObjectHypothesisWithPose()
result.hypothesis.score = trk[4]
det.results.append(result)

x_len = trk[2] - trk[0]
y_len = trk[3] - trk[1]

det.bbox.center.position.x = x_len/2.0 + trk[0]
det.bbox.center.position.y = y_len/2.0 + trk[1]
det.bbox.size.x = x_len
det.bbox.size.y = y_len

# Fixed height based on length
if det.bbox.size.x > 6 or det.bbox.size.y > 6:
det.bbox.size.z = 3.2
else:
det.bbox.size.z = 2.0
det.bbox.center.position.z = det.bbox.size.z/2.0

out.detections.append(det)
return out


def detection3DArray2Numpy(detection_list, isOBB):
"""
Convert vision_msgs/Detection3DArray to numpy array
Args:
detection_list (vision_msgs/Detection3DArray)
Returns:
numpy_arr: if isOBB: Numpy float array of [[x,y,w*h,w/h, angle, height], [...], ...]
if not OBB: [[x1,y1,x2,y2,id], [x1,y1,x2,y2,id], ...]
"""
size = 5
if isOBB:
size = 7

if len(detection_list) <= 0:
return np.empty((0, size))

# Pre-allocate numpy array
out_arr = np.empty(shape=(len(detection_list), size), dtype=float)

idx = 0

if isOBB:
for det in detection_list:
area = det.bbox.size.y*det.bbox.size.x
angle = euler_from_quaternion(det.bbox.center.orientation)
out_arr[idx] = [det.bbox.center.position.x, det.bbox.center.position.y,
area,
det.bbox.size.y/det.bbox.size.x,
angle,
det.bbox.size.z,
det.bbox.center.position.z]
idx += 1
else:
for det in detection_list:
half_x = det.bbox.size.x/2
half_y = det.bbox.size.y/2
out_arr[idx] = [
det.bbox.center.position.x - half_x,
det.bbox.center.position.y - half_y,
det.bbox.center.position.x + half_x,
det.bbox.center.position.y + half_y,
0.5]
idx += 1

return out_arr


def euler_from_quaternion(q):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
Note: only returns yaw about z axis
"""
# t0 = +2.0 * (q.w * q.x + q.y * q.z)
# t1 = +1.0 - 2.0 * (q.x * q.x + q.y * q.y)
# roll_x = np.atan2(t0, t1)

# t2 = +2.0 * (q.w * q.y - q.z * q.x)
# t2 = +1.0 if t2 > +1.0 else t2
# t2 = -1.0 if t2 < -1.0 else t2
# pitch_y = np.asin(t2)

t3 = +2.0 * (q.w * q.z + q.x * q.y)
t4 = +1.0 - 2.0 * (q.y * q.y + q.z * q.z)
yaw_z = np.arctan2(t3, t4)

return yaw_z # in radians
3 changes: 1 addition & 2 deletions Metrics/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@ Since we are using a custom dataset with rooftop traffic data, the basic workflo
graph TD;
RawData(Raw data: \n rosbag file)-->Clean(Bag to images and PCDs, \n and removing un-synced frames: \n synced_image_cloud_2file.cpp);
Clean-->Bag(Convert images + PCDs to clean bag: \n synced_file2image_cloud_msgs.cpp);
Bag(Clean Data: \n processed rosbag file)-->Trans(Bag to images: \n data_tools convert node);
Bag-->Trans(Bag to images: \n data_tools convert node);
Trans-->Video(Images folder to video file: \n images2video.py);
Video-->GroundTruth(2D labeled ground truth: \n Supervisely video annotator JSON);
GroundTruth-->MOTChallenge(JSON to MOT Challenge format: \n supervisely_video_ann2MOT.py);
Expand Down
8 changes: 7 additions & 1 deletion Metrics/preprocessGT.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,13 @@ def main(args):
# roi_points = np.array([(495, 230), (900, 230), (1910, 730), (525, 900)])

# q7_2_may10_2023_clean_short_range
roi_points = np.array([(658,305), (980,960), (1900,737), (1049,287)])
# roi_points = np.array([(658,305), (980,960), (1900,737), (1049,287)])

# oct18_r9
roi_points = np.array([(5,735),(1270,645),(1500,676),(240,1060)])

# dec7_dhd1_short
# roi_points = np.array([(835,480), (1050,1056), (1918,845), (1130,465)])

# Create a polygon object from the points using shapely
region_of_interest = Polygon(roi_points)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def __init__(self):
self.curr_frame, self.prev_frame = [None], [None]

@torch.no_grad()
def update(self, im, return_image=False, detection_only=False):
def update(self, im, return_image=False, detection_only=True):
"""
Runs the detection and tracking, must be called for each image.
Developer muat ensure loop speed is sufficient for desired output frequency
Expand Down
6 changes: 5 additions & 1 deletion camera_pipeline/camera_pipeline/camera_processing_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ def __init__(self):
# Used to convert between ROS and OpenCV images
self.br = CvBridge()

self.frame_count = 0

self.tracker = vision_track.VisionTracker()
self.get_logger().info('Vision Tracker created.')

Expand All @@ -84,8 +86,10 @@ def camera_callback(self, msg):
out_msg.header = msg.header
self.result_pub_.publish(out_msg)

self.frame_count += 1

t5 = time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)
self.get_logger().info(f'Time (msec): {(t5-t1)*1000:.1f}')
self.get_logger().info(f'Frame count: {self.frame_count}, Time (msec): {(t5-t1)*1000:.1f}')

@staticmethod
def createDetection2DArr(tracks, header) -> Detection2DArray:
Expand Down
12 changes: 6 additions & 6 deletions data_tools/launch/file2bag.launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,18 +15,17 @@
from launch.actions import TimerAction

ABS_PATH_TO_ROSBAGS = '/home/adrian/dev/bags/cleaned_bags/'
IMAGE_FOLDER = '/home/adrian/dev/bags/may10_q7_rebag/images'
PCD_FOLDER = '/home/adrian/dev/bags/may10_q7_rebag/pcds'
IMAGE_FOLDER = '/home/adrian/dev/bags/cleaned_bags/dec7_dhd1_rebag/images'
PCD_FOLDER = '/home/adrian/dev/bags/cleaned_bags/dec7_dhd1_rebag/pcds'

current_time = time.strftime("%Y-%m-%d_%H-%M-%S", time.gmtime())
folder_name = ABS_PATH_TO_ROSBAGS + '/' + 'may10_q7_clean_rate_test'
folder_name = ABS_PATH_TO_ROSBAGS + '/' + 'dec7_dhd1_clean'

use_system_time = True

START_DELAY = 1.5
PUBLISH_RATE = 10.0

# ABS_PATH_TO_ROSBAGS + '/' + \
# current_time + '_' + PCD_FOLDER.split('/')[-3]


def generate_launch_description():

Expand All @@ -45,6 +44,7 @@ def generate_launch_description():
{'publish_rate': PUBLISH_RATE},
{'image_folder': IMAGE_FOLDER},
{'pointcloud_folder': PCD_FOLDER},
{'use_system_time': use_system_time},
]
)

Expand Down
4 changes: 3 additions & 1 deletion data_tools/src/convert2pcd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,12 @@ class PointCloud2ToPCDConverter : public rclcpp::Node

// Save PCD file with an increasing file name
std::stringstream ss;
ss << file_path_ << std::setfill('0') << std::setw(6) << file_count_++ << msg->header.stamp.sec << "_"
ss << file_path_ << std::setfill('0') << std::setw(6) << file_count_ << "_" << msg->header.stamp.sec << "_"
<< std::setfill('0') << std::setw(9) << msg->header.stamp.nanosec << ".pcd";
std::string file_name = ss.str();

file_count_ += 1;

pcl::io::savePCDFileBinary(file_name, *pcl_cloud);
RCLCPP_INFO(this->get_logger(), "Saved PCD file to %s", file_name.c_str());
}
Expand Down
33 changes: 22 additions & 11 deletions data_tools/src/synced_file2image_cloud_msgs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ class ImagePointCloudPublisher : public rclcpp::Node
this->get_parameter("image_folder", image_folder_);
this->get_parameter("pointcloud_folder", pointcloud_folder_);

this->declare_parameter<bool>("use_system_time", false);
this->get_parameter("use_system_time", use_system_time_);

// Get the frame id for image and point cloud messages
this->declare_parameter<std::string>("frame_id", "map");
this->get_parameter("frame_id", frame_id_);
Expand Down Expand Up @@ -80,16 +83,23 @@ class ImagePointCloudPublisher : public rclcpp::Node
sensor_msgs::msg::PointCloud2::SharedPtr pointcloud_msg(new sensor_msgs::msg::PointCloud2);
pcl::toROSMsg(*cloud, *pointcloud_msg);

std::vector<int> image_times = get_ros_time_from_name(image_files[file_index_]);
std::vector<int> cloud_times = get_ros_time_from_name(pointcloud_files[file_index_]);

// Set the frame id and timestamp for both messages
image_msg->header.frame_id = frame_id_;
image_msg->header.stamp.sec = image_times[0];
image_msg->header.stamp.nanosec = image_times[1];
pointcloud_msg->header.frame_id = frame_id_;
pointcloud_msg->header.stamp.sec = cloud_times[0];
pointcloud_msg->header.stamp.nanosec = cloud_times[1];
image_msg->header.frame_id = frame_id_;
pointcloud_msg->header.frame_id = frame_id_;

if(true == use_system_time_) {
image_msg->header.stamp = this->now();
pointcloud_msg->header.stamp = this->now();
} else {
std::vector<int> image_times = get_ros_time_from_name(image_files[file_index_]);
std::vector<int> cloud_times = get_ros_time_from_name(pointcloud_files[file_index_]);

image_msg->header.stamp.sec = image_times[0];
image_msg->header.stamp.nanosec = image_times[1];

pointcloud_msg->header.stamp.sec = cloud_times[0];
pointcloud_msg->header.stamp.nanosec = cloud_times[1];
}

// Publish the image and point cloud messages
image_pub_->publish(*image_msg);
Expand Down Expand Up @@ -156,7 +166,7 @@ class ImagePointCloudPublisher : public rclcpp::Node
std::getline(ss, temp, '.');
nsec = std::stoi(temp);

return std::vector<int> {sec, nsec};
return std::vector<int> { sec, nsec };
}

rclcpp::Publisher<sensor_msgs::msg::Image>::SharedPtr image_pub_;
Expand All @@ -165,7 +175,8 @@ class ImagePointCloudPublisher : public rclcpp::Node
std::string image_folder_;
std::string pointcloud_folder_;
std::string frame_id_;
size_t file_index_ = 0;
size_t file_index_ { 0 };
bool use_system_time_ { false };
};

int main(int argc, char** argv)
Expand Down
20 changes: 15 additions & 5 deletions data_tools/src/synced_image_cloud_2file.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ using namespace message_filters;
class SyncedSubscriberNode : public rclcpp::Node
{
public:
SyncedSubscriberNode(const std::string& image_topic, const std::string& cloud_topic, const std::string& root_path)
SyncedSubscriberNode(const std::string& image_topic, const std::string& cloud_topic, const std::string& root_path, const bool flip)
: Node("synced_subscriber_node"), pcd_path_(root_path)
{
cloud_subscriber_ = std::make_shared<Subscriber<sensor_msgs::msg::PointCloud2> >(this, cloud_topic);
Expand All @@ -50,6 +50,8 @@ class SyncedSubscriberNode : public rclcpp::Node
pcd_path_ += "/pcds/";
path = pcd_path_;
std::filesystem::create_directories(path);

flip_ = flip;
}

private:
Expand All @@ -64,7 +66,9 @@ class SyncedSubscriberNode : public rclcpp::Node
im_prt = cv_bridge::toCvCopy(image_msg, image_msg->encoding);

// flip image because webcam is upsidedown
cv::flip(im_prt->image, im_prt->image, -1);
if(true == flip_) {
cv::flip(im_prt->image, im_prt->image, -1);
}

// Generate a file name for the image based on the current time
std::stringstream ss;
Expand Down Expand Up @@ -98,6 +102,7 @@ class SyncedSubscriberNode : public rclcpp::Node
} // imageCloudCallback

int count_ { 0 };
bool flip_;
std::string img_path_, pcd_path_;
cv_bridge::CvImagePtr im_prt;

Expand All @@ -111,14 +116,19 @@ int main(int argc, char** argv)
{
rclcpp::init(argc, argv);

if(argc != 4) {
if(!((argc == 4) || (argc == 5))) {
RCLCPP_ERROR(rclcpp::get_logger(
"synced_subscriber_node"), "Usage: %s <image_topic_name> <cloud_topic_name> <file_path>", argv[0]);
"synced_subscriber_node"), "Usage: %s <image_topic_name> <cloud_topic_name> <file_path> <Optional: flip_image>", argv[0]);
return 1;
}

bool flip {false};
if(argc == 5) {
flip = true;
}

// Create a ROS 2 node and pass in the image and point cloud topic names as arguments
auto node = std::make_shared<SyncedSubscriberNode>(argv[1], argv[2], argv[3]);
auto node = std::make_shared<SyncedSubscriberNode>(argv[1], argv[2], argv[3], flip);
rclcpp::spin(node);
rclcpp::shutdown();

Expand Down
Loading

0 comments on commit 1aa9a20

Please sign in to comment.