diff --git a/GEMstack/knowledge/calibration/calib_util.py b/GEMstack/knowledge/calibration/calib_util.py new file mode 100644 index 000000000..9b4a2618d --- /dev/null +++ b/GEMstack/knowledge/calibration/calib_util.py @@ -0,0 +1,106 @@ +#%% +import yaml +from yaml import SafeDumper +import numpy as np +import cv2 +def represent_flow_style_list(dumper, data): + return dumper.represent_sequence(yaml.resolver.BaseResolver.DEFAULT_SEQUENCE_TAG, data, flow_style=True) +SafeDumper.add_representer(list, represent_flow_style_list) +#%% +class FlowListDumper(yaml.Dumper): + def represent_list(self, data): + return self.represent_sequence('tag:yaml.org,2002:seq', data, flow_style=True) + +def load_ex(path,mode,ref='rear_axle_center'): + with open(path) as stream: + y = yaml.safe_load(stream) + assert y['reference'] == ref + if mode == 'matrix': + ret = np.eye(4) + ret[0:3,0:3] = y['rotation'] + ret[:-1,3] = y['position'] + return ret + elif mode == 'tuple': + return np.array(y['rotation']),np.array(y['position']) + + +def save_ex(path,rotation=None,translation=None,matrix=None,ref='rear_axle_center'): + if matrix is not None: + rot = matrix[0:3,0:3] + trans = matrix[0:3,3] + save_ex(path,rot,trans,ref=ref) + return + ret = {} + ret['reference'] = ref + ret['rotation'] = rotation + ret['position'] = translation + for i in ret: + if type(ret[i]) == np.ndarray: + ret[i] = ret[i].tolist() + print(yaml.dump(ret,Dumper=SafeDumper,default_flow_style=False)) + with open(path,'w') as stream: + yaml.dump(ret,stream,Dumper=SafeDumper,default_flow_style=False) + +def load_in(path,mode='matrix',return_distort=False): + with open(path) as stream: + y = yaml.safe_load(stream) + if 'skew' not in y: y['skew'] = 0 + if 'distort' not in y: y['distort'] = [0,0,0,0,0] + if mode == 'matrix': + ret = np.zeros((3,3)) + ret[0,0],ret[1,1] = y['focal'] + ret[2,2] = 1. + ret[0:2,2] = y['center'] + ret[0,1] = y['skew'] + if return_distort: + return ret,np.array(y['distort']) + else: + return ret + elif mode == 'tuple': + return {'focal':np.array(y['focal']), + 'center':np.array(y['center']), + 'skew':np.array(y['skew']), + 'distort':np.array(y['distort'])} + +from collections.abc import Iterable +def save_in(path,focal=None,center=None,skew=0,distort=[0.0]*5,matrix=None): + if matrix is not None: + focal = matrix.diagonal()[0:2] + skew = matrix[0,1] + center = matrix[0:2,2] + save_in(path,focal,center,skew,distort) + return + ret = {} + ret['focal'] = focal + ret['center'] = center + ret['skew'] = skew + assert len(distort) in [4,5] + ret['distort'] = distort + if len(ret['distort']) == 4: + ret['distort'] = list(ret['distort'])+[0.0] + for i in ret: + if type(ret[i]) == np.ndarray: + ret[i] = ret[i].tolist() + if isinstance(ret[i],Iterable): + ret[i] = [*map(float,ret[i])] + print(yaml.dump(ret,Dumper=SafeDumper,default_flow_style=False)) + with open(path,'w') as stream: + yaml.dump(ret,stream,Dumper=SafeDumper,default_flow_style=False) + +def undistort_image(image, camera_matrix, distortion_coefficients): + h, w = image.shape[:2] + newK, roi = cv2.getOptimalNewCameraMatrix(camera_matrix, distortion_coefficients, (w,h), 1, (w,h)) + image = cv2.undistort(image, camera_matrix, distortion_coefficients, None, newK) + return image, newK + + +#%% +if __name__ == "__main__": + #%% + rot, trans = load_ex('/mnt/GEMstack/GEMstack/knowledge/calibration/gem_e4_ouster.yaml',mode='tuple') + save_ex('/tmp/test.yaml',rot,trans) + #%% + focal = [1,2,3] + center = [400,500] + save_in('/tmp/test.yaml',focal,center) + load_in('/tmp/test.yaml',mode='tuple') diff --git a/GEMstack/knowledge/calibration/gem_e4.yaml b/GEMstack/knowledge/calibration/gem_e4.yaml index d0954dc06..ef1f4f30e 100644 --- a/GEMstack/knowledge/calibration/gem_e4.yaml +++ b/GEMstack/knowledge/calibration/gem_e4.yaml @@ -4,4 +4,18 @@ rear_axle_height: 0.33 # height of rear axle center above flat ground gnss_location: [1.10,0,1.62] # meters, taken from https://github.com/hangcui1201/POLARIS_GEM_e2_Real/blob/main/vehicle_drivers/gem_gnss_control/scripts/gem_gnss_tracker_stanley_rtk.py. Note conflict with pure pursuit location? gnss_yaw: 0.0 # radians top_lidar: !include "gem_e4_ouster.yaml" -front_camera: !include "gem_e4_oak.yaml" +front_camera: + extrinsics: !include "gem_e4_oak.yaml" + intrinsics: !include "gem_e4_oak_in.yaml" +front_right_camera: + extrinsics: !include "gem_e4_fr.yaml" + intrinsics: !include "gem_e4_fr_in.yaml" +front_left_camera: + extrinsics: !include "gem_e4_fl.yaml" + intrinsics: !include "gem_e4_fl_in.yaml" +rear_right_camera: + extrinsics: !include "gem_e4_rr.yaml" + intrinsics: !include "gem_e4_rr_in.yaml" +rear_left_camera: + extrinsics: !include "gem_e4_rl.yaml" + intrinsics: !include "gem_e4_rl_in.yaml" diff --git a/GEMstack/knowledge/calibration/gem_e4_fl.yaml b/GEMstack/knowledge/calibration/gem_e4_fl.yaml new file mode 100644 index 000000000..9e10bc4ed --- /dev/null +++ b/GEMstack/knowledge/calibration/gem_e4_fl.yaml @@ -0,0 +1,5 @@ +position: [2.008491967178938, 0.9574436688609637, 1.7222845229507735] +reference: rear_axle_center +rotation: [[0.7229102844527417, -0.13938889438297952, 0.6767358840457229], [-0.6904150547378912, + -0.18396833469067211, 0.6996304053015531], [0.026977264941612008, -0.9729986577348562, + -0.22922879230680995]] diff --git a/GEMstack/knowledge/calibration/gem_e4_fl_in.yaml b/GEMstack/knowledge/calibration/gem_e4_fl_in.yaml new file mode 100644 index 000000000..427fec5c1 --- /dev/null +++ b/GEMstack/knowledge/calibration/gem_e4_fl_in.yaml @@ -0,0 +1,5 @@ +center: [971.5122150421694, 601.7847095069886] +distort: [-0.2625420437513607, 0.1425651774165483, -0.0004946279626072071, -0.00033457504102070386, + -0.042732740327368145] +focal: [1183.2337731693713, 1182.3831532373445] +skew: 0 diff --git a/GEMstack/knowledge/calibration/gem_e4_fr.yaml b/GEMstack/knowledge/calibration/gem_e4_fr.yaml new file mode 100644 index 000000000..00a391c92 --- /dev/null +++ b/GEMstack/knowledge/calibration/gem_e4_fr.yaml @@ -0,0 +1,5 @@ +position: [1.8861563355156226, -0.7733611068168774, 1.6793040225335112] +reference: rear_axle_center +rotation: [[-0.7168464770690616, -0.10046018208578958, 0.6899557088168523], [-0.6970911725372957, + 0.12308618950445319, -0.7063382243117325], [-0.01396515249660048, -0.9872981017750231, + -0.15826380744561577]] diff --git a/GEMstack/knowledge/calibration/gem_e4_fr_in.yaml b/GEMstack/knowledge/calibration/gem_e4_fr_in.yaml new file mode 100644 index 000000000..6ae475334 --- /dev/null +++ b/GEMstack/knowledge/calibration/gem_e4_fr_in.yaml @@ -0,0 +1,5 @@ +center: [966.4326452411585, 608.5803255934914] +distort: [-0.2701363254469883, 0.16439325523243875, -0.001607207824773341, -7.412467081891699e-05, + -0.06199397580030171] +focal: [1176.2554468073797, 1175.1456876174707] +skew: 0 diff --git a/GEMstack/knowledge/calibration/gem_e4_oak.yaml b/GEMstack/knowledge/calibration/gem_e4_oak.yaml index cb9a6c0d0..ae8d1fce7 100644 --- a/GEMstack/knowledge/calibration/gem_e4_oak.yaml +++ b/GEMstack/knowledge/calibration/gem_e4_oak.yaml @@ -1,3 +1,5 @@ -reference: rear_axle_center # rear axle center -rotation: [[0,0,1],[-1,0,0],[0,-1,0]] # rotation matrix mapping z to forward, x to left, y to down, guesstimated -center_position: [1.78,0,1.58] # meters, center camera, guesstimated +position: [1.8680678362969751, 0.03483728869549903, 1.6545932338230158] +reference: rear_axle_center +rotation: [[0.020064651878799838, -0.013111205776054045, 0.99971271174677], [-0.9997929081548379, + 0.0031358785499412617, 0.020107388418472497], [-0.003398609756043868, -0.9999091271454714, + -0.013045570240818732]] diff --git a/GEMstack/knowledge/calibration/gem_e4_oak_in.yaml b/GEMstack/knowledge/calibration/gem_e4_oak_in.yaml new file mode 100644 index 000000000..df84385be --- /dev/null +++ b/GEMstack/knowledge/calibration/gem_e4_oak_in.yaml @@ -0,0 +1,2 @@ +center: [573.37109375, 363.700927734375] +focal: [684.8333129882812, 684.6096801757812] diff --git a/GEMstack/knowledge/calibration/gem_e4_ouster.yaml b/GEMstack/knowledge/calibration/gem_e4_ouster.yaml index 5987373a6..47897f62a 100644 --- a/GEMstack/knowledge/calibration/gem_e4_ouster.yaml +++ b/GEMstack/knowledge/calibration/gem_e4_ouster.yaml @@ -1,3 +1,5 @@ reference: rear_axle_center # rear axle center -position: [1.10,0,2.03] # meters, calibrated by Hang's watchful eye -rotation: [[1,0,0],[0,1,0],[0,0,1]] #rotation matrix mapping lidar frame to vehicle frame \ No newline at end of file +position: [1.10, 0.03773583, 1.95320223] # meters, calibrated by Hang's watchful eye +rotation: [[0.99939639, 0.02547917, 0.023615], + [-0.02530848, 0.99965156, -0.00749882], + [-0.02379784, 0.00689664, 0.999693]] #rotation matrix mapping lidar frame to vehicle frame \ No newline at end of file diff --git a/GEMstack/knowledge/calibration/gem_e4_rl.yaml b/GEMstack/knowledge/calibration/gem_e4_rl.yaml new file mode 100644 index 000000000..786036fc8 --- /dev/null +++ b/GEMstack/knowledge/calibration/gem_e4_rl.yaml @@ -0,0 +1,5 @@ +position: [0.0898392124024201, 0.71876803481624, 1.71024199833245] +reference: rear_axle_center +rotation: [[0.6847850928670124, 0.19803293816635642, -0.7013218462220591], [0.728026894383745, + -0.14318896257364072, 0.6704280439025839], [0.03234528777238433, -0.9696802959730001, + -0.2422269719924619]] diff --git a/GEMstack/knowledge/calibration/gem_e4_rl_in.yaml b/GEMstack/knowledge/calibration/gem_e4_rl_in.yaml new file mode 100644 index 000000000..6cfa24337 --- /dev/null +++ b/GEMstack/knowledge/calibration/gem_e4_rl_in.yaml @@ -0,0 +1,5 @@ +center: [953.302889408274, 608.1966398872765] +distort: [-0.2522996862206216, 0.12482113115174773, -0.0005993692936397102, -0.00017949453391219192, + -0.03499498178003368] +focal: [1181.6177321982138, 1180.0783789769903] +skew: 0 diff --git a/GEMstack/knowledge/calibration/gem_e4_rr.yaml b/GEMstack/knowledge/calibration/gem_e4_rr.yaml new file mode 100644 index 000000000..8d2dfc105 --- /dev/null +++ b/GEMstack/knowledge/calibration/gem_e4_rr.yaml @@ -0,0 +1,5 @@ +position: [0.11419591502518789, -0.6896311735924415, 1.711181163333824] +reference: rear_axle_center +rotation: [[-0.7359657309159472, 0.15986191414426415, -0.6578743127098735], [0.6768157805459531, + 0.14993386619459964, -0.7207220233709469], [-0.016578363047300385, -0.9756864271752846, + -0.21854325362408236]] diff --git a/GEMstack/knowledge/calibration/gem_e4_rr_in.yaml b/GEMstack/knowledge/calibration/gem_e4_rr_in.yaml new file mode 100644 index 000000000..31dd44239 --- /dev/null +++ b/GEMstack/knowledge/calibration/gem_e4_rr_in.yaml @@ -0,0 +1,5 @@ +center: [956.2663906909728, 569.2039945552984] +distort: [-0.25040910859151444, 0.1109210921906881, -0.00041247665414900384, 0.0008205455176671751, + -0.026395952816984845] +focal: [1162.3787554048329, 1162.855381183851] +skew: 0 diff --git a/GEMstack/knowledge/calibration/make_camera_lidar_yaml.py b/GEMstack/knowledge/calibration/make_camera_lidar_yaml.py new file mode 100755 index 000000000..ba3b098b4 --- /dev/null +++ b/GEMstack/knowledge/calibration/make_camera_lidar_yaml.py @@ -0,0 +1,48 @@ +import yaml +import numpy as np +from calib_util import load_ex, load_in + +# +# THIS FILE SHOULD BE RUN FROM ITS LOCAL DIRECTORY FOR THE PATHS TO WORK +# + +# Destination files name +output_file = 'gem_e4_perception_cameras.yaml' + +# Collect names of all sensors and associated extrinsic/intrinsic files +camera_files = {'front': ['gem_e4_oak.yaml', 'gem_e4_oak_in.yaml'], + 'front_right': ['gem_e4_fr.yaml', 'gem_e4_oak_in.yaml'], + 'front_left': ['gem_e4_fl.yaml', 'gem_e4_fl_in.yaml'], + 'back_right': ['gem_e4_rr.yaml', 'gem_e4_rr_in.yaml'], + 'back_left': ['gem_e4_rl.yaml', 'gem_e4_rl_in.yaml']} +lidar_file = 'gem_e4_ouster.yaml' + +# Initialize variables +output_dict = {'cameras': {}} +T_lidar_to_vehicle = load_ex(lidar_file, 'matrix') + +# Collect data for all cameras +for camera in camera_files: + # Load from files + ex_file = camera_files[camera][0] + in_file = camera_files[camera][1] + T_camera_to_vehicle = load_ex(ex_file, 'matrix') + K, D = load_in(in_file, 'matrix', return_distort=True) + + # Calculate necessary values + T_lidar_to_camera = np.linalg.inv(T_camera_to_vehicle) @ T_lidar_to_vehicle + + # Store in the proper format + camera_dict = {} + camera_dict['K'] = K + camera_dict['D'] = D + camera_dict['T_l2c'] = T_lidar_to_camera + camera_dict['T_l2v'] = T_lidar_to_vehicle + for key in camera_dict: + if type(camera_dict[key]) == np.ndarray: + camera_dict[key] = camera_dict[key].tolist() + output_dict['cameras'][camera] = camera_dict + +# Write to file +with open(output_file,'w') as stream: + yaml.safe_dump(output_dict, stream) \ No newline at end of file diff --git a/GEMstack/offboard/calibration/.gitignore b/GEMstack/offboard/calibration/.gitignore new file mode 100644 index 000000000..c20c2ab73 --- /dev/null +++ b/GEMstack/offboard/calibration/.gitignore @@ -0,0 +1,2 @@ +__pycache__ + diff --git a/GEMstack/offboard/calibration/README.md b/GEMstack/offboard/calibration/README.md new file mode 100644 index 000000000..10b81907e --- /dev/null +++ b/GEMstack/offboard/calibration/README.md @@ -0,0 +1,231 @@ +## Table of Contents +- [Pipeline](#Pipeline) +- [img2pc.py](#img2pcpy) - Camera-to-LiDAR extrinsic calibration +- [test_transforms.py](#test_transformspy) - Manual tuning of calibrations +- [capture_ouster_oak.py](#capture_ouster_oakpy) - Sensor data capture script +- [camera_info.py](#camera_infopy) - Intrinsic retrieval from hardware +- [get_intrinsic_by_chessboard.py](#get_intrinsic_by_chessboardpy) - Intrinsic calibration using chessboard +- [get_intrinsic_by_SfM.py](#get_intrinsic_by_sfmpy) - Intrinsic calibration using SfM +- [undistort_images.py](#undistort_imagespy) - Create rectified copies of distorted images +--- + +# Pipeline +**Data collection** + +Use the `capture_ouster_oak.py` script to collect a series of scans combining both camera images and lidar pointclouds. The two sensors are not activated at the same time, so it is best to stay in place for a few seconds at each position to ensure you get aligned scans. + +Some cameras may have calibrated hardware: use the `camera_info.py` script to extract their intrinsics if needed. An output containing all zeros means the hardware is not calibrated, and so you will need to calibrate yourself. + +**Intrinsic Calibration** + +There are two ways to calibrate the intrinsics, depending on what data you have. + +To use the `get_intrinsic_by_chessboard.py` script, collect a series of images with a large chessboard using either the data collection scripts or a rosbag. Select images where the chessboard is at different points in the camera frame, different distances including filling the entire frame, and at different angles. The script detects internal corners where four squares meet, so the extreme edge of the chessboard does not need to be in frame. + +To use the `get_intrinsic_by_SfM.py` script, prepare a set of images recorded from the same camera going through a continuous movement, and follow [this](#get_intrinsic_by_sfmpy) + +The `undistort_images.py` script can then be used to rectify a set of images using the calibrated intrinsics to evaluate or use in other applications. + +**Extrinsic Calibration** + +These scripts use a package within another folder in GEMstack: as such, you may need to add GEMstack to your python path. On Linux, this can be done by running `export PYTHONPATH=<$PATH_TO_GEMSTACK>:PYTHONPATH`, replacing `<$PATH_TO_GEMSTACK>` with the absolute path to the main GEMstack directory. + +The `img2pc.py` file contains the main part of the extrinsic calibration process. Select a synchronized camera image and lidar pointcloud to align, ideally containing features that are easy to detect in both, such as boards or signs with corners. Alignment can be done with 4 feature pairs(must be coplanar) or 6+ points. The first screen will ask you to select points on the image, and will close on its own once *n_features* points are selected. The second screen will ask you to select points in the point cloud, and will need to be closed manually once exactly *n_features* points are selected, or it will prompt you again. The extrinsic matrices will then be displayed, and if an *out_path* is provided they will also be saved. + +The `test_transforms.py` file can then be used to manually fine-tune the calculated intrinsics. Use the sliders to change the translation and rotation to project the lidar points onto the image more accurately. + +## img2pc.py +**Camera-to-LiDAR Extrinsic Calibration Tool** + +Compute camera extrinsic parameters by manually selecting corresponding features in an image and point cloud. + +**Note**: On the img prompt, click n points and the window closed itself. On the pc prompt, right click n points and close the window manually. + +### Usage +```bash +python3 img2pc.py \ + --img_path IMG_PATH \ + --pc_path PC_PATH \ + --img_intrinsic_path IMG_INTRINSICS_PATH \ + [--pc_transform_path PC_TRANSFORM_PATH] \ + [--out_path OUT_PATH] \ + [--n_features N_FEATURES] + [--undistort] +``` + +#### Parameters +| Parameter | Description | Format | Required | Default | +|-----------|-------------|--------|----------|---------| +| `--img_path` | Input image path | .png | Yes | - | +| `--pc_path` | Point cloud path | .npz | Yes | - | +| `--img_intrinsic_path` | Camera intrinsics file | .yaml | Yes | - | +| `--pc_transform_path` | LiDAR extrinsic transform | .yaml | No | Identity | +| `--n_features` | Manual feature points | int | No | 8 | +| `--out_path` | Output extrinsic path | .yaml | No | None | +| `--no_undistort` | to undistort | - | - | False | +| `--show` | visualize reprojection | - | - | False | + +*`--no_undistort True` is rare because it's almost sure that extrinsic solving performs better after undistortion* + +### Example +```bash +root='/mnt/GEMstack' +python3 img2pc.py \ + --img_path $root/data/calib1/img/fl/fl16.png \ + --pc_path $root/data/calib1/pc/ouster16.npz \ + --pc_transform_path $root/GEMstack/knowledge/calibration/gem_e4_ouster.yaml \ + --img_intrinsic_path $root/GEMstack/knowledge/calibration/gem_e4_oak_in.yaml \ + --n_features 4 \ + --out_path $root/GEMstack/knowledge/calibration/gem_e4_oak.yaml +``` + +## test_transforms.py +Script used for testing and fine-tuning extrinsics. + +### Usage +```bash +python3 test_transforms.py \ + --img_path IMG_PATH \ + --lidar_path LIDAR_PATH \ + --lidar_transform_path LIDAR_TRANSFORM_PATH \ + --camera_transform_path CAMERA_TRANSFORM_PATH \ + --img_intrinsics_path IMG_INTRINSICS_PATH \ + [--out_path OUT_PATH] \ + [--undistort] +``` + +#### Parameters +| Parameter | Description | Format | Required | Default | +|-----------|-------------|--------|----------|---------| +| `--img_path` | Input image path | .png | Yes | - | +| `--lidar_path` | Point cloud path | .npz | Yes | - | +| `--lidar_transform_path` | LiDAR extrinsic transform | .yaml | Yes | - | +| `--camera_transform_path` | Camera extrinsic transform | .yaml | Yes | - | +| `--img_intrinsics_path` | Camera intrinsics file | .yaml | Yes | - | +| `--out_path` | Output extrinsic path | .yaml | No | None | +| `--undistort` | Flag for using distortion coefficients | - | - | - | + +### Example +```bash +root='/mnt/GEMstack' +python3 test_transforms.py \ + --img_path $root/data/fl16.png \ + --lidar_path $root/data/ouster16.npz \ + --lidar_transform_path $root/GEMstack/knowledge/calibration/gem_e4_ouster.yaml \ + --camera_transform_path $root/GEMstack/knowledge/calibration/gem_e4_fl.yaml \ + --img_intrinsics_path $root/GEMstack/knowledge/calibration/gem_e4_fl_in.yaml \ + --out_path $root/GEMstack/knowledge/calibration/gem_e4_fl.yaml \ + --undistort +``` + +## capture_ouster_oak.py +Collect synchronized data from all initialized sensors on the e4 vehicle. Requires oak camera to be running. + +### Usage +```bash +python3 capture_ouster_oak.py [FOLDER] [START_INDEX] +``` + +#### Parameters +| Parameter | Description | Format | Required | Default | +|-----------|-------------|--------|----------|---------| +| `FOLDER` | Output data folder path | directory | No | data | +| `START_INDEX` | Start index for scans | int | No | 1 | + +## camera_info.py +Read camera info from ROS publishers to capture intrinsics. If the hardware is not calibrated, the subscriber will receive all zeros. Intrinsics will be saved in a file titled by camera. + +### Usage +```bash +python3 camera_info.py [FOLDER] +``` + +#### Parameters +| Parameter | Description | Format | Required | Default | +|-----------|-------------|--------|----------|---------| +| `FOLDER` | Output data folder path | directory | No | data | + +## get_intrinsic_by_chessboard.py +Chessboard-based Intrinsic Calibration + +Compute camera intrinsic parameters using multiple images of a chessboard pattern. + +### Usage +```bash +python3 get_intrinsic_by_chessboard.py \ + --img_folder_path IMG_FOLDER_PATH \ + [--camera_name CAMERA_NAME] \ + [--out_path OUT_PATH] \ + [--board_width BOARD_WIDTH] \ + [--board_height BOARD_HEIGHT] +``` + +#### Parameters +| Parameter | Description | Format | Required | Default | +|-----------|-------------|--------|----------|---------| +| `--img_folder_path` | Input image folder path | directory | Yes | - | +| `--camera_name` | Camera prefix used to identify images | string | No | empty string | +| `--out_path` | Output extrinsic path | .yaml | No | None | +| `--board_width` | Chessboard width (squares - 1) | int | No | 8 | +| `--board_height` | Chessboard height (squares - 1) | int | No | 6 | + + +## get_intrinsic_by_SfM.py + +Compute camera intrinsic parameters using Structure-from-Motion on a sequence of images. + +### Usage +```bash +python3 intrinsic_calibration_chessboard.py \ + --input_imgs INPUT_IMGS [INPUT_IMGS ...] \ + --workspace WORKSPACE \ + [--out_file OUT_FILE] +``` +### Parameters +| Parameter | Description | Required | +|-----------|-------------|----------| +| `--input_imgs` | Input images (glob pattern) | Yes | +| `--workspace` | Temporary directory path (default `/tmp/colmap_tmp`) | No | +| `--out_file` | Output .yaml path | No | + +*note:`--workspace` allows you to save running time for continuing/redoing a previous job. you can clean it up after. check [colmap](https://colmap.github.io/) for more infomation* +### Example +```bash +root='/mnt/GEMstack' +python3 intrinsic_calibration_chessboard.py \ + --input_imgs data/fl/images/0000[0-8][147].png \ + --workspace /tmp/SfM_intrinsic_fl \ + --out_file $root/GEMstack/knowledge/calibration/camera_intrinsics2/gem_e4_fl_in.yaml +``` + +## undistort_images.py +Script to remove distortion from images. + +### Usage +```bash +python3 undistort_images.py \ + --img_intrinsics_path IMG_INTRINSICS_PATH \ + --img_folder_path IMG_FOLDER_PATH \ + --camera_name CAMERA_NAME +``` + +#### Parameters +| Parameter | Description | Format | Required | Default | +|-----------|-------------|--------|----------|---------| +| `--img_intrinsics_path` | Camera intrinsics file | .yaml | Yes | - | +| `--img_folder_path` | Input image folder path | directory | Yes | - | +| `--camera_name` | Camera prefix used to identify images | string | No | empty string | + +### Example +```bash +root='/mnt/GEMstack' +python3 undistort_images.py \ + --img_intrinsics_path $root/GEMstack/knowledge/calibration/gem_e4_fr_in.yaml \ + --img_folder_path $root/data \ + --camera_name fr +``` + +# Credit +Michal Juscinski, [Renjie Sun](https://github.com/rjsun06), Dev Sakaria + + diff --git a/GEMstack/offboard/calibration/camera_info.py b/GEMstack/offboard/calibration/camera_info.py new file mode 100644 index 000000000..781d10ef4 --- /dev/null +++ b/GEMstack/offboard/calibration/camera_info.py @@ -0,0 +1,60 @@ +# ROS Headers +import rospy +from sensor_msgs.msg import Image,PointCloud2, CameraInfo +import sensor_msgs.point_cloud2 as pc2 +import ctypes +import struct +import pickle +import image_geometry + +import numpy as np +import os +import time +from functools import partial +from GEMstack.GEMstack.knowledge.calibration.calib_util import save_in + +camera_info = {"oak": None, "fr": None, "fl": None, "rr": None, "rl": None, "oak_stereo": None, "oak_right": None, "oak_left": None} + +def info_callback(camera, info : CameraInfo): + global camera_info + camera_info[camera] = info + +def get_intrinsics(folder): + model = image_geometry.PinholeCameraModel() + for camera in camera_info: + model.fromCameraInfo(camera_info[camera]) + print(f"Camera: {camera}\nFocal: [{model.fx()}, {model.fy()}]\nCenter: [{model.cx()}, {model.cy()}]\nDistortion: {str(model.distortionCoeffs())}") + save_file = input("Save this file?") + if save_file.lower() == 'y' or save_file.lower() == 'yes': + print("Saving") + save_in(path=folder + f"/{camera}.yaml", distort=model.distortionCoeffs(), matrix=model.intrinsicMatrix()) + else: + print("Not saved") + +def main(folder='data'): + rospy.init_node("capture_cam_info",disable_signals=True) + caminfo_sub = rospy.Subscriber("/oak/rgb/camera_info", CameraInfo, partial(info_callback, "oak")) + stereoinfo_sub = rospy.Subscriber("/oak/stereo/camera_info", CameraInfo, partial(info_callback, "oak_stereo")) + rightinfo_sub = rospy.Subscriber("/oak/right/camera_info", CameraInfo, partial(info_callback, "oak_right")) + leftinfo_sub = rospy.Subscriber("/oak/left/camera_info", CameraInfo, partial(info_callback, "oak_left")) + flinfo_sub = rospy.Subscriber("/camera_fl/arena_camera_node/camera_info", CameraInfo, partial(info_callback, "fl")) + frinfo_sub = rospy.Subscriber("/camera_fr/arena_camera_node/camera_info", CameraInfo, partial(info_callback, "fr")) + rlinfo_sub = rospy.Subscriber("/camera_rl/arena_camera_node/camera_info", CameraInfo, partial(info_callback, "rl")) + rrinfo_sub = rospy.Subscriber("/camera_rr/arena_camera_node/camera_info", CameraInfo, partial(info_callback, "rr")) + + have_all = False + while not have_all: + have_all = True + for camera in camera_info: + if camera_info[camera] == None: + have_all = False + time.sleep(0.5) + break + get_intrinsics(folder) + +if __name__ == '__main__': + import sys + folder = 'data' + if len(sys.argv) >= 2: + folder = sys.argv[1] + main(folder) diff --git a/GEMstack/offboard/calibration/capture_ouster_oak.py b/GEMstack/offboard/calibration/capture_ouster_oak.py new file mode 100644 index 000000000..4e38b4c0d --- /dev/null +++ b/GEMstack/offboard/calibration/capture_ouster_oak.py @@ -0,0 +1,153 @@ +# ROS Headers +import rospy +from sensor_msgs.msg import Image,PointCloud2,NavSatFix +import sensor_msgs.point_cloud2 as pc2 +import ctypes +import struct + +# OpenCV and cv2 bridge +import cv2 +from cv_bridge import CvBridge +import numpy as np +import os +import time +from functools import partial + +camera_images = {"oak": None, "fr": None, "fl": None, "rr": None, "rl": None, "fr_rect": None, "fl_rect": None, "rr_rect": None, "rl_rect": None} +lidar_clouds = {"ouster": None, "livox": None} +depth_images = {"depth": None} +gnss_locations = {"nav_fix": None} +lidar_filetype = ".npz" +camera_filetype = ".png" +depth_filetype = ".tif" +gnss_filetype = ".npy" +bridge = CvBridge() + +def lidar_callback(scanner, lidar : PointCloud2): + global lidar_clouds + lidar_clouds[scanner] = lidar + +def camera_callback(camera, img : Image): + global camera_images + camera_images[camera] = img + +def depth_callback(camera, img : Image): + global depth_images + depth_images[camera] = img + +def gnss_callback(gnss, sat_fix : NavSatFix): + global gnss_locations + gnss_locations[gnss] = sat_fix + +def pc2_to_numpy(pc2_msg, want_rgb = False): + gen = pc2.read_points(pc2_msg, skip_nans=True) + if want_rgb: + xyzpack = np.array(list(gen),dtype=np.float32) + if xyzpack.shape[1] != 4: + raise ValueError("PointCloud2 does not have points") + xyzrgb = np.empty((xyzpack.shape[0],6)) + xyzrgb[:,:3] = xyzpack[:,:3] + for i,x in enumerate(xyzpack): + rgb = x[3] + # cast float32 to int so that bitwise operations are possible + s = struct.pack('>f' ,rgb) + i = struct.unpack('>l',s)[0] + # you can get back the float value by the inverse operations + pack = ctypes.c_uint32(i).value + r = (pack & 0x00FF0000)>> 16 + g = (pack & 0x0000FF00)>> 8 + b = (pack & 0x000000FF) + #r,g,b values in the 0-255 range + xyzrgb[i,3:] = (r,g,b) + return xyzrgb + else: + return np.array(list(gen),dtype=np.float32)[:,:3] + +def clear_scan(): + global camera_images + for camera in camera_images: + camera_images[camera] = None + global lidar_clouds + for lidar in lidar_clouds: + lidar_clouds[lidar] = None + global depth_images + for camera in depth_images: + depth_images[camera] = None + global gnss_locations + for gnss in gnss_locations: + gnss_locations[gnss] = None + +def save_scan(folder, index): + print("Saving scan", index) + + for lidar in lidar_clouds: + lidar_points = lidar_clouds[lidar] + if lidar_points != None: + lidar_fn = os.path.join(folder, lidar + str(index) + lidar_filetype) + pc = pc2_to_numpy(lidar_points, want_rgb=False) # convert lidar feed to numpy + np.savez(lidar_fn, pc) + + for camera in camera_images: + camera_image = camera_images[camera] + if camera_image != None: + camera_fn = os.path.join(folder, camera + str(index) + camera_filetype) + cv2.imwrite(camera_fn, bridge.imgmsg_to_cv2(camera_image)) + + for camera in depth_images: + depth_image = depth_images[camera] + if depth_image != None: + depth_fn = os.path.join(folder, camera + str(index) + depth_filetype) + dimage = bridge.imgmsg_to_cv2(depth_image) + dimage_non_nan = dimage[np.isfinite(dimage)] + dimage = np.nan_to_num(dimage,nan=0,posinf=0,neginf=0) + dimage = (dimage/4000*0xffff) + dimage = dimage.astype(np.uint16) + cv2.imwrite(depth_fn, dimage) + + for gnss in gnss_locations: + location = gnss_locations[gnss] + if location != None: + gnss_fn = os.path.join(folder, gnss + str(index) + gnss_filetype) + coordinates = np.array([location.latitude, location.longitude]) + np.save(gnss_fn + str(index) + gnss_filetype, coordinates) + +def main(folder='data',start_index=0): + # Initialize node and establish subscribers + rospy.init_node("capture_ouster_oak",disable_signals=True) + ouster_sub = rospy.Subscriber("/ouster/points", PointCloud2, partial(lidar_callback, "ouster")) + livox_sub = rospy.Subscriber("/livox/lidar", PointCloud2, partial(lidar_callback, "livox")) + oak_sub = rospy.Subscriber("/oak/rgb/image_raw", Image, partial(camera_callback, "oak")) + cam_fl_sub = rospy.Subscriber("/camera_fl/arena_camera_node/image_raw", Image, partial(camera_callback, "fl")) + cam_fr_sub = rospy.Subscriber("/camera_fr/arena_camera_node/image_raw", Image, partial(camera_callback, "fr")) + cam_rl_sub = rospy.Subscriber("/camera_rl/arena_camera_node/image_raw", Image, partial(camera_callback, "rl")) + cam_rr_sub = rospy.Subscriber("/camera_rr/arena_camera_node/image_raw", Image, partial(camera_callback, "rr")) + cam_fl_rect_sub = rospy.Subscriber("/camera_fl/arena_camera_node/image_rect_color", Image, partial(camera_callback, "fl_rect")) + cam_fr_rect_sub = rospy.Subscriber("/camera_fr/arena_camera_node/image_rect_color", Image, partial(camera_callback, "fr_rect")) + cam_rl_rect_sub = rospy.Subscriber("/camera_rl/arena_camera_node/image_rect_color", Image, partial(camera_callback, "rl_rect")) + cam_rr_rect_sub = rospy.Subscriber("/camera_rr/arena_camera_node/image_rect_color", Image, partial(camera_callback, "rr_rect")) + depth_sub = rospy.Subscriber("/oak/stereo/image_raw", Image, partial(depth_callback, "depth")) + gnss_sub = rospy.Subscriber("/septentrio_gnss/navsatfix", NavSatFix, partial(gnss_callback, "nav_fix")) + + # Store scans + index = start_index + print(" Storing lidar point clouds as", lidar_filetype) + print(" Storing color images as", camera_filetype) + print(" Storing depth images as", depth_filetype) + print(" Ctrl+C to quit") + while True: + if camera_images["oak"]: + cv2.imshow("result",bridge.imgmsg_to_cv2(camera_images["oak"])) + save_scan(folder, index) + clear_scan() + index += 1 + time.sleep(.5) + +if __name__ == '__main__': + import sys + folder = 'data' + start_index = 1 + if len(sys.argv) >= 2: + folder = sys.argv[1] + if len(sys.argv) >= 3: + start_index = int(sys.argv[2]) + main(folder,start_index) diff --git a/GEMstack/offboard/calibration/get_intrinsic_by_SfM.py b/GEMstack/offboard/calibration/get_intrinsic_by_SfM.py new file mode 100644 index 000000000..9bac16540 --- /dev/null +++ b/GEMstack/offboard/calibration/get_intrinsic_by_SfM.py @@ -0,0 +1,116 @@ +import argparse +import os +import shutil +import pycolmap +import subprocess +from GEMstack.GEMstack.knowledge.calibration.calib_util import save_in + +def run_colmap_command(args): + result = subprocess.run(args, capture_output=True, text=True) + if result.returncode != 0: + print(f"Command failed: {' '.join(args)}") + print(f"Error: {result.stderr}") + raise RuntimeError("COLMAP command failed") + return result + +def main(input_imgs, output_dir, out, refine=True): + # Setup directory structure + workspace_dir = os.path.join(output_dir, 'sfm_workspace') + image_dir = os.path.join(workspace_dir, 'images') + os.makedirs(image_dir, exist_ok=True) + + # Copy images to workspace + print(f"Copying images to workspace...") + for path in input_imgs: + filename = path.split('/')[-1] + dst = os.path.join(image_dir, filename) + shutil.copy(path, dst) + + # Create database path + database_path = os.path.join(workspace_dir, 'database.db') + if os.path.exists(database_path): + os.remove(database_path) + + # Feature extraction + print("Extracting features...") + # pycolmap.extract_features( + # database_path, image_dir, + # camera_mode=pycolmap.CameraMode.SINGLE, + # camera_model=pycolmap.CameraModelId.OPENCV + # ) + run_colmap_command([ + "colmap", "feature_extractor", + "--database_path", database_path, + "--image_path", image_dir, + "--ImageReader.single_camera", "1", + "--ImageReader.camera_model", "OPENCV", + "--SiftExtraction.estimate_affine_shape", "1", + "--SiftExtraction.domain_size_pooling", "1" + ]) + + # Feature matching + print("Matching features...") + match_options = pycolmap.SequentialMatchingOptions() + match_options.overlap = 2 + pycolmap.match_sequential( + database_path, + matching_options=match_options + ) + # Run SfM reconstruction + mapper_options = pycolmap.IncrementalPipelineOptions() + if refine: + mapper_options.ba_refine_focal_length = 1 + mapper_options.ba_refine_principal_point = 1 + mapper_options.ba_refine_extra_params = 1 + else: + mapper_options.ba_refine_focal_length = 0 + mapper_options.ba_refine_principal_point = 0 + mapper_options.ba_refine_extra_params = 0 + + print("Running incremental SfM...") + output_path = os.path.join(workspace_dir, 'sparse') + os.makedirs(output_path, exist_ok=True) + reconstructions = pycolmap.incremental_mapping( + database_path=database_path, + image_path=image_dir, + output_path=output_path, + options=mapper_options + ) + + # Process results + if not reconstructions: + print("SfM failed to reconstruct the scene!") + return + + camera:pycolmap._core.Camera = 0 + print("\nCamera calibration parameters:") + for idx, reconstruction in reconstructions.items(): + print(f"\nReconstruction {idx + 1}:") + for camera_id, camera in reconstruction.cameras.items(): + print(f"\nCamera ID {camera_id}:") + print(f"Model: {camera.model}") + print(f"Parameters: {camera.params}") + print(f"Parameters info: {camera.params_info}") + # print(f"Focal length: {camera.focal_length}") + print(f"Focal length x: {camera.focal_length_x}") + print(f"Focal length y: {camera.focal_length_y}") + print(f"Principal point x: {camera.principal_point_x}") + print(f"Principal point y: {camera.principal_point_y}") + save_in( + path=out, + focal=[camera.focal_length_x,camera.focal_length_y], + center=[camera.principal_point_x,camera.principal_point_y], + distort=list(camera.params[4:])+[0.0], + ) + print("\nCalibration complete! Results saved to:", workspace_dir) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Camera calibration using SfM') + parser.add_argument('--input_imgs','-i', nargs='+', help='List of input imgs', required=True) + parser.add_argument('--workspace','-w', type=str, required=False, default= '/tmp/colmap_tmp', + help='Output directory for results') + parser.add_argument('--out_file','-o', type=str, required=True, + help='output yaml file') + args = parser.parse_args() + + main(args.input_imgs, args.workspace, args.out_file) \ No newline at end of file diff --git a/GEMstack/offboard/calibration/get_intrinsic_by_chessboard.py b/GEMstack/offboard/calibration/get_intrinsic_by_chessboard.py new file mode 100644 index 000000000..ce1dd26b0 --- /dev/null +++ b/GEMstack/offboard/calibration/get_intrinsic_by_chessboard.py @@ -0,0 +1,85 @@ +import numpy as np +import cv2 as cv +import glob +import os +import argparse + +from GEMstack.GEMstack.knowledge.calibration.calib_util import save_in + +def main(): + # Collect arguments + parser = argparse.ArgumentParser(description='calculate intrinsics from checkerboard images', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-f', '--img_folder_path', type=str, required=True, + help='Path to folder containing PNG images') + parser.add_argument('-c', '--camera_name', type=str, required=False, + help='Name of the camera used to identify the correct images') + parser.add_argument('-o', '--out_path', type=str, required=False, + help='Path to output ymal file for camera intrinsics') + parser.add_argument('-w', '--board_width', type=int, required=False, + help='Width in number of internal corners of the checkerboard') + parser.add_argument('-h', '--board_height', type=int, required=False, + help='Height in number of internal corners of the checkerboard') + + args = parser.parse_args() + + # Find image files + folder = args.img_folder_path + camera = '' + if args.camera_name: + camera = args.camera_name + image_files = glob.glob(os.path.join(folder, camera + '*.png')) + + # Determine checkerboard shape + b_width = 8 + if args.board_width: + b_width = args.board_width + b_height = 6 + if args.board_height: + b_height = args.board_height + + # The following code is derived from https://docs.opencv.org/4.x/dc/dbb/tutorial_py_calibration.html + + # termination criteria + criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001) + + # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(7,5,0) + objp = np.zeros((b_width * b_height,3), np.float32) + objp[:,:2] = np.mgrid[0:b_width,0:b_height].T.reshape(-1,2) + + # Arrays to store object points and image points from all the images. + objpoints = [] # 3d point in real world space + imgpoints = [] # 2d points in image plane. + + for fname in image_files: + img = cv.imread(fname) + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + + # Find the chess board corners + ret, corners = cv.findChessboardCorners(gray, (b_width, b_height), None) + + # If found, add object points, image points (after refining them) + if ret == True: + objpoints.append(objp) + + corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria) + imgpoints.append(corners2) + + # Draw and display the corners + cv.drawChessboardCorners(img, (b_width,b_height), corners2, ret) + cv.imshow('img', img) + cv.waitKey(500) + + cv.destroyAllWindows() + + # Calibrate camera + ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) + print(repr(mtx)) + print(dist[0]) + + if args.out_path: + save_in(args.out_path, matrix=mtx) + + +if __name__ == '__main__': + main() diff --git a/GEMstack/offboard/calibration/img2pc.py b/GEMstack/offboard/calibration/img2pc.py new file mode 100644 index 000000000..f62f83fae --- /dev/null +++ b/GEMstack/offboard/calibration/img2pc.py @@ -0,0 +1,141 @@ +import pyvista as pv +import argparse +import cv2 +import numpy as np +from GEMstack.GEMstack.knowledge.calibration.calib_util import load_ex,save_ex,load_in,save_in, undistort_image +from scipy.spatial.transform import Rotation as R +from transform3d import Transform + +def pick_n_img(img,n=4): + corners = [] # Reset the corners list + def click_event(event, x, y, flags, param): + if event == cv2.EVENT_LBUTTONDOWN: + corners.append((x, y)) + cv2.circle(param, (x, y), 5, (0, 255, 0), -1) + cv2.imshow('Image', param) + + cv2.imshow('Image', img) + cv2.setMouseCallback('Image', click_event, img) + + while True: + if len(corners) == n: + break + if cv2.waitKey(1) & 0xFF == ord('q'): + return None + + cv2.destroyAllWindows() + + return corners + +def pick_n_pc(point_cloud,n=4): + points = [] + def cb(pt,*args): + points.append(pt) + while len(points)!=n: + points = [] + cloud = pv.PolyData(point_cloud) + plotter = pv.Plotter(notebook=False) + plotter.camera.position = (-20,0,20) + plotter.camera.focal_point = (0,0,0) + plotter.add_mesh(cloud, color='lightblue', point_size=5, render_points_as_spheres=True) + plotter.enable_point_picking(cb) + plotter.show() + return points + +def pc_projection(pc,T:Transform,K,img_shape) -> np.ndarray: + mask = ~(np.all(pc == 0, axis=1)) + pc = pc[mask] + + pc = T @ pc + if pc.shape[1] == 4: + pc = pc[:,:-1]/pc[:,[-1]] + + assert pc.shape[1] == 3 + x,y,z = pc.T + u = (K[0, 0] * x / z) + K[0, 2] + v = (K[1, 1] * y / z) + K[1, 2] + + img_h, img_w, _ = img_shape + valid_pts = (u >= 0) & (u < img_w) & (v >= 0) & (v < img_h) + return u[valid_pts],v[valid_pts] + +def calib(args,pc,img,K,N): + cpoints = np.array(pick_n_img(img,N)).astype(float) + print(cpoints) + + lpoints = np.array(pick_n_pc(pc,N)) + print(lpoints) + + success, rvec, tvec = cv2.solvePnP(lpoints, cpoints, K, None) + success, rvec, tvec = cv2.solvePnP(lpoints, cpoints, K, None) + R, _ = cv2.Rodrigues(rvec) + + T=np.eye(4) + T[:3, :3] = R + T[:3, 3] = tvec.flatten() + print(T) + + v2c = T + print('vehicle->camera:',v2c) + c2v = np.linalg.inv(v2c) + print('camera->vehicle:',c2v) + + if args.out_path is not None: + save_ex(args.out_path,matrix=c2v) + return c2v + +def main(): + parser = argparse.ArgumentParser(description='register image into point cloud using manual feature selection', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-p', '--img_path', type=str, required=True, + help='Path to PNG image') + parser.add_argument('-l', '--pc_path', type=str, required=True, + help='Path to lidar NPZ point cloud') + parser.add_argument('-t', '--pc_transform_path', type=str, required=True, + help='Path to yaml file for lidar extrinsics') + parser.add_argument('-i', '--img_intrinsic_path', type=str, required=True, + help='Path to yaml file for image intrinsics') + parser.add_argument('-o', '--out_path', type=str, required=False, + help='Path to output yaml file for image extrinsics') + parser.add_argument('-n', '--n_features', type=int, required=False, default=8, + help='Number of features to select and math') + parser.add_argument('-u','--no_undistort', action='store_true', + help='Whether to use distortion parameters') + parser.add_argument('-s', '--show', action='store_true', + help='Show projected points after calibration') + + + args = parser.parse_args() + + # Load data + N = args.n_features + img = cv2.imread(args.img_path, cv2.IMREAD_UNCHANGED) + pc = np.load(args.pc_path)['arr_0'] + pc = pc[~np.all(pc == 0, axis=1)] # remove (0,0,0)'s + + if not args.no_undistort: + K, distort = load_in(args.img_intrinsic_path,mode='matrix',return_distort=True) + print('applying distortion coeffs', distort) + img, K = undistort_image(img, K, distort) + else: + K = load_in(args.img_intrinsic_path,mode='matrix') + + lidar_ex = np.eye(4) + if args.pc_transform_path: + lidar_ex = load_ex(args.pc_transform_path,mode='matrix') + pc = (lidar_ex @ np.pad(pc,((0,0),(0,1)),constant_values=1).T).T[:, :3] + + c2v = calib(args,pc,img,K,N) + T = np.linalg.inv(c2v) + print(T) + + if args.show: + u,v = pc_projection(pc,Transform(T),K,img.shape) + show_img = img.copy() + for uu,vv in zip(u.astype(int),v.astype(int)): + cv2.circle(show_img, (uu, vv), radius=1, color=(0, 0, 255), thickness=-1) + cv2.imshow("projection", show_img) + cv2.waitKey(0) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/GEMstack/offboard/calibration/test_transforms.py b/GEMstack/offboard/calibration/test_transforms.py new file mode 100644 index 000000000..671578c14 --- /dev/null +++ b/GEMstack/offboard/calibration/test_transforms.py @@ -0,0 +1,224 @@ + +import numpy as np +import cv2 as cv +import cv2 as cv +import matplotlib.pyplot as plt +import argparse +import argparse +from scipy.spatial.transform import Rotation as R +from matplotlib.widgets import Slider +from GEMstack.GEMstack.knowledge.calibration.calib_util import load_ex, load_in, save_ex, undistort_image + +x_rot = y_rot = z_rot = x_trans = y_trans = z_trans = None + +def project_points(T_lidar_to_camera, lidar_homogeneous, K, shape): + # Transform LiDAR points to Camera Frame + lidar_points_camera = (T_lidar_to_camera @ lidar_homogeneous.T).T # (N, 4) + + # Extract 3D points in camera frame + X_c = lidar_points_camera[:, 0] + Y_c = lidar_points_camera[:, 1] + Z_c = lidar_points_camera[:, 2] # Depth + # Extract 3D points in camera frame + X_c = lidar_points_camera[:, 0] + Y_c = lidar_points_camera[:, 1] + Z_c = lidar_points_camera[:, 2] # Depth + + # Avoid division by zero + valid = Z_c > 0 + X_c, Y_c, Z_c = X_c[valid], Y_c[valid], Z_c[valid] + # Avoid division by zero + valid = Z_c > 0 + X_c, Y_c, Z_c = X_c[valid], Y_c[valid], Z_c[valid] + + # Project points onto image plane + u = (K[0, 0] * X_c / Z_c) + K[0, 2] + v = (K[1, 1] * Y_c / Z_c) + K[1, 2] + # Project points onto image plane + u = (K[0, 0] * X_c / Z_c) + K[0, 2] + v = (K[1, 1] * Y_c / Z_c) + K[1, 2] + + # Filter points within image bounds + img_h, img_w, _ = shape + valid_pts = (u >= 0) & (u < img_w) & (v >= 0) & (v < img_h) + u, v = u[valid_pts], v[valid_pts] + return u, v + +def modify_transform(T_lidar_to_camera): + modified = np.eye(4) + rotation_mat = R.from_euler('xyz', [x_rot.val, y_rot.val, z_rot.val], degrees=True).as_matrix() + translation_vec = np.array([x_trans.val, y_trans.val, z_trans.val]) + modified[:3, :3] = rotation_mat + modified[:3, 3] = translation_vec + modified = modified @ T_lidar_to_camera + return modified + +def create_sliders(fig, update_func): + # Create space for sliders + fig.subplots_adjust(bottom=0.4) + x_rot_ax = fig.add_axes([0.25, 0.3, 0.65, 0.03]) + y_rot_ax = fig.add_axes([0.25, 0.25, 0.65, 0.03]) + z_rot_ax = fig.add_axes([0.25, 0.2, 0.65, 0.03]) + x_trans_ax = fig.add_axes([0.25, 0.15, 0.65, 0.03]) + y_trans_ax = fig.add_axes([0.25, 0.1, 0.65, 0.03]) + z_trans_ax = fig.add_axes([0.25, 0.05, 0.65, 0.03]) + + # Make sliders to control the rotation + global x_rot + x_rot = Slider( + ax=x_rot_ax, + label="X Rotation", + valmin=-30, + valmax=30, + valinit=0, + orientation="horizontal" + ) + + global y_rot + y_rot = Slider( + ax=y_rot_ax, + label="Y Rotation", + valmin=-30, + valmax=30, + valinit=0, + orientation="horizontal" + ) + + global z_rot + z_rot = Slider( + ax=z_rot_ax, + label="Z Rotation", + valmin=-30, + valmax=30, + valinit=0, + orientation="horizontal" + ) + + # Make sliders to control the translation + global x_trans + x_trans = Slider( + ax=x_trans_ax, + label="X Translation", + valmin=-10, + valmax=10, + valinit=0, + orientation="horizontal" + ) + + global y_trans + y_trans = Slider( + ax=y_trans_ax, + label="Y Translation", + valmin=-10, + valmax=10, + valinit=0, + orientation="horizontal" + ) + + global z_trans + z_trans = Slider( + ax=z_trans_ax, + label="Z Translation", + valmin=-10, + valmax=10, + valinit=0, + orientation="horizontal" + ) + + # Register the update function with each slider + x_rot.on_changed(update_func) + y_rot.on_changed(update_func) + z_rot.on_changed(update_func) + x_trans.on_changed(update_func) + y_trans.on_changed(update_func) + z_trans.on_changed(update_func) + +def main(): + # Collect arguments + parser = argparse.ArgumentParser(description='calculate intrinsics from checkerboard images', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-p', '--img_path', type=str, required=True, + help='Path to PNG image') + parser.add_argument('-l', '--lidar_path', type=str, required=True, + help='Path to lidar NPZ point cloud') + parser.add_argument('-t', '--lidar_transform_path', type=str, required=True, + help='Path to lidar extrinsics') + parser.add_argument('-e', '--camera_transform_path', type=str, required=True, + help='Path to camera extrinsics') + parser.add_argument('-i', '--img_intrinsics_path', type=str, required=True, + help='Path to yaml file for image intrinsics') + parser.add_argument('-o', '--out_path', type=str, required=False, + help='Path to output ymal file for camera intrinsics') + parser.add_argument('-u','--undistort', action='store_true', + help='Whether to use distortion parameters') + + args = parser.parse_args() + + # Load LiDAR points + lidar_data = np.load(args.lidar_path) + lidar_points = lidar_data['arr_0'] # Ensure the correct key + + # Load Camera Image + image = cv.imread(args.img_path) + image = cv.cvtColor(image, cv.COLOR_BGR2RGB) # Convert BGR to RGB + + # Load Transformation Matrices + T_lidar_to_vehicle = load_ex(args.lidar_transform_path, 'matrix') + T_camera_to_vehicle = load_ex(args.camera_transform_path, 'matrix') + T_lidar_to_camera = np.linalg.inv(T_camera_to_vehicle) @ T_lidar_to_vehicle + + # Load Camera Intrinsics + if args.undistort: + K, distortion_coefficients = load_in(args.img_intrinsics_path, return_distort=True) + image, K = undistort_image(image, K, distortion_coefficients) + else: + K = load_in(args.img_intrinsics_path) + + # Convert LiDAR points to homogeneous coordinates + num_points = lidar_points.shape[0] + lidar_homogeneous = np.hstack((lidar_points, np.ones((num_points, 1)))) # (N, 4) + + # Initialize plot + plt.ion() + fig, ax = plt.subplots() + + # Project lidar points to camera frame + u, v = project_points(T_lidar_to_camera, lidar_homogeneous, K, image.shape) + + # Plot projected points on camera image + ax.imshow(image) + dots = ax.scatter(u, v, s=2, c='cyan', alpha=0.2) # Dots for projected points + ax.set_title("Projected LiDAR Points on Camera Image", pad=0) + + # Define update function + def update(val): + modified = modify_transform(T_lidar_to_camera) + + # Update projected points + u, v = project_points(modified, lidar_homogeneous, K, image.shape) + dots.set_offsets(np.c_[u, v]) + plt.draw() + + # Generate sliders and display plot + create_sliders(fig, update) + plt.draw() + + # Update and keep displaying the modified plot + keep_running = True + while keep_running: + try: + if plt.get_fignums(): + plt.pause(0.1) + else: + keep_running = False + except: + keep_running = False + + # Output and write the fine-tuned translation matrix + modified = modify_transform(T_lidar_to_camera) + print(T_lidar_to_vehicle @ np.linalg.inv(modified)) + if args.out_path: + save_ex(args.out_path, matrix=T_lidar_to_vehicle @ np.linalg.inv(modified)) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/GEMstack/offboard/calibration/undistort_images.py b/GEMstack/offboard/calibration/undistort_images.py new file mode 100644 index 000000000..de0075b7f --- /dev/null +++ b/GEMstack/offboard/calibration/undistort_images.py @@ -0,0 +1,39 @@ +import numpy as np +import cv2 as cv +import glob +import os +import argparse + +from GEMstack.GEMstack.knowledge.calibration.calib_util import load_in, undistort_image + +def main(): + # Collect arguments + parser = argparse.ArgumentParser(description='Create copies of images with the distortion removed', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-i', '--img_intrinsics_path', type=str, required=True, + help='Path to yaml file for image intrinsics') + parser.add_argument('-f', '--img_folder_path', type=str, required=True, + help='Path to folder containing PNG images') + parser.add_argument('-c', '--camera_name', type=str, required=False, + help='Name of the camera used to identify the correct images') + + args = parser.parse_args() + + # Get camera intrinsics + camera_matrix, distortion_coefficients = load_in(args.img_intrinsics_path, return_distort=True) + + # Find image files + folder = args.img_folder_path + camera = '' + if args.camera_name: + camera = args.camera_name + image_files = glob.glob(os.path.join(folder, camera + '*.png')) + + for fn in image_files: + image = cv.imread(fn) + dst, _ = undistort_image(image, camera_matrix, distortion_coefficients) + cv.imwrite(fn.replace(camera, camera + "_rect"), dst) + + +if __name__ == '__main__': + main() \ No newline at end of file