diff --git a/getimg.py b/getimg.py new file mode 100644 index 0000000..50cb6d9 --- /dev/null +++ b/getimg.py @@ -0,0 +1,131 @@ +# 相机参考工具 +# @路怀帅 2025.12.19 10:32am +# PS : 新增 加载相机畸变KD 视频对比显示功能 + +import cv2 +import threading +import sys +from datetime import datetime +import os +import argparse +import numpy as np + +# 全局变量 +frame = None +running = True +which_camera = 0 + +# Front 参数 +K = np.array([[ 5.3382445956203196e+02, 0., 9.7253025945442369e+02], + [0., 5.3393792084343488e+02, 5.6249605531924215e+02], + [ 0., 0., 1. ]]) +D = np.array([ -1.5749135021037808e-02, 2.9390620422222835e-03, + -4.3176357910129585e-03, 1.3296605027646462e-03 ]) + +# 加载鱼眼相机参数 + +W, H = 1920, 1080 + + + +def video_thread(): + global frame, running + cap = cv2.VideoCapture(which_camera, cv2.CAP_ANY) + cap.set(cv2.CAP_PROP_FOURCC,cv2.VideoWriter_fourcc(*"YUYV")) + cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) + cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) + # cap.set(cv2.CAP_PROP_FPS, 30) + + if not cap.isOpened(): + print("[ERROR] Cannot open camera", file=sys.stderr) + running = False + return + # 鱼眼相机去畸变 + map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, (W, H), cv2.CV_16SC2) + + while running: + ret, f = cap.read() + if not ret: + break + frame = f.copy() + + # 图像去畸变 + undistorted = cv2.remap(f, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) + # 拼接原视频和抗畸变后的视频 (左右显示) + comparison = np.hstack((f, undistorted)) + + scale = min(1920 / comparison.shape[1], 1080 / comparison.shape[0]) + if scale < 1: + comparison = cv2.resize(comparison, (int(comparison.shape[1] * scale), int(comparison.shape[0] * scale))) + comparison = comparison.astype(np.uint8) + + # 设置视频流全屏显示 + text_info = f"Camera: {args.i.upper()} | Press 'q' to quit, 's' to screenshot" + cv2.putText(comparison, text_info, (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2, cv2.LINE_AA) + + cv2.namedWindow('Video old vs new', cv2.WND_PROP_FULLSCREEN) + cv2.setWindowProperty('Video old vs new', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) + cv2.imshow('Video old vs new', comparison) + + if cv2.waitKey(1) & 0xFF == ord('q'): + running = False + break + + cap.release() + cv2.destroyAllWindows() + +def input_thread(): + global running + print("Commands: 's' = screenshot, 'q' = quit") + while running: + try: + cmd = input().strip().lower() + if cmd == 's': + if frame is not None: + filename = f"./images/{args.i.lower()}.png" + cv2.imwrite(filename, frame) + print(f"[SSH] Saved: {os.path.abspath(filename)}") + else: + print("[SSH] No frame available yet.") + elif cmd == 'q': + running = False + break + else: + print("[SSH] Unknown command. Use 's' or 'q'.") + except EOFError: + break + +if __name__ == "__main__": + + # 获取用户参数启动 + parser = argparse.ArgumentParser(description="Example script with argparse") + + # 获取用户输入的摄像头方位 (front back left right) + parser.add_argument("--i", type=str, required=True, help="摄像头方位") + + args = parser.parse_args() + + print("相机方位:", args.i) + + if args.i == "front": + which_camera = 0 + elif args.i == "back": + which_camera = 2 + elif args.i == "left": + which_camera = 1 + elif args.i == "right": + which_camera = 3 + else: + print("[ERROR] Invalid camera direction. Use 'front', 'back', 'left', or 'right'.", file=sys.stderr) + running = False + exit(1) + + # 启动视频线程 + vt = threading.Thread(target=video_thread, daemon=True) + vt.start() + + # 主线程监听 SSH 输入 + input_thread() + + print("[INFO] Exiting...") diff --git a/images/back.png b/images/back.png index 1d40bd5..6094940 100644 Binary files a/images/back.png and b/images/back.png differ diff --git a/images/front.png b/images/front.png index 2d6b940..5ed3e93 100644 Binary files a/images/front.png and b/images/front.png differ diff --git a/images/left.png b/images/left.png index 66e34fb..1c7e406 100644 Binary files a/images/left.png and b/images/left.png differ diff --git a/run_get_weight_matrices.py b/run_get_weight_matrices.py index a38dd30..2119296 100644 --- a/run_get_weight_matrices.py +++ b/run_get_weight_matrices.py @@ -23,13 +23,30 @@ def main(): birdview = BirdView() Gmat, Mmat = birdview.get_weights_and_masks(projected) birdview.update_frames(projected) - birdview.make_luminance_balance().stitch_all_parts() + # birdview.make_luminance_balance().stitch_all_parts() + birdview.stitch_all_parts() birdview.make_white_balance() birdview.copy_car_image() - ret = display_image("BirdView Result", birdview.image) - if ret > 0: - Image.fromarray((Gmat * 255).astype(np.uint8)).save("weights.png") - Image.fromarray(Mmat.astype(np.uint8)).save("masks.png") + img_samll = cv2.resize(birdview.image,(320,600)) + cv2.imshow("BirdView Result", img_samll) + while True: + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + return -1 + # cv2.waitKey(1) + # cmd = input().strip().lower() + # if cmd == 'q': + # cv2.destroyAllWindows() + # return -1 + + + + # birdview.copy_car_image() + # ret = display_image("BirdView Result", birdview.image) + + # if ret > 0: + # Image.fromarray((Gmat * 255).astype(np.uint8)).save("weights.png") + # Image.fromarray(Mmat.astype(np.uint8)).save("masks.png") if __name__ == "__main__": diff --git a/save_img/shot_20251211_101610.jpg b/save_img/shot_20251211_101610.jpg deleted file mode 100644 index 8f165b1..0000000 Binary files a/save_img/shot_20251211_101610.jpg and /dev/null differ diff --git a/save_img/shot_20251211_101614.jpg b/save_img/shot_20251211_101614.jpg deleted file mode 100644 index ad0109f..0000000 Binary files a/save_img/shot_20251211_101614.jpg and /dev/null differ diff --git a/save_img/shot_20251211_101702.jpg b/save_img/shot_20251211_101702.jpg deleted file mode 100644 index 1dc1993..0000000 Binary files a/save_img/shot_20251211_101702.jpg and /dev/null differ diff --git a/save_img/shot_20251211_101719.jpg b/save_img/shot_20251211_101719.jpg deleted file mode 100644 index a46b540..0000000 Binary files a/save_img/shot_20251211_101719.jpg and /dev/null differ diff --git a/save_img/shot_20251211_101727.jpg b/save_img/shot_20251211_101727.jpg deleted file mode 100644 index e6758c4..0000000 Binary files a/save_img/shot_20251211_101727.jpg and /dev/null differ diff --git a/save_img/shot_20251211_101740.jpg b/save_img/shot_20251211_101740.jpg deleted file mode 100644 index 71c14dc..0000000 Binary files a/save_img/shot_20251211_101740.jpg and /dev/null differ diff --git a/save_img/shot_20251211_101748.jpg b/save_img/shot_20251211_101748.jpg deleted file mode 100644 index 886de2e..0000000 Binary files a/save_img/shot_20251211_101748.jpg and /dev/null differ diff --git a/save_img/shot_20251211_101800.jpg b/save_img/shot_20251211_101800.jpg deleted file mode 100644 index 0a14060..0000000 Binary files a/save_img/shot_20251211_101800.jpg and /dev/null differ diff --git a/save_img/shot_20251211_101802.jpg b/save_img/shot_20251211_101802.jpg deleted file mode 100644 index afabe60..0000000 Binary files a/save_img/shot_20251211_101802.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103514.jpg b/save_img/shot_20251211_103514.jpg deleted file mode 100644 index f664959..0000000 Binary files a/save_img/shot_20251211_103514.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103517.jpg b/save_img/shot_20251211_103517.jpg deleted file mode 100644 index 31bd5d9..0000000 Binary files a/save_img/shot_20251211_103517.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103522.jpg b/save_img/shot_20251211_103522.jpg deleted file mode 100644 index 756083c..0000000 Binary files a/save_img/shot_20251211_103522.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103527.jpg b/save_img/shot_20251211_103527.jpg deleted file mode 100644 index 871d7ea..0000000 Binary files a/save_img/shot_20251211_103527.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103531.jpg b/save_img/shot_20251211_103531.jpg deleted file mode 100644 index b447b4c..0000000 Binary files a/save_img/shot_20251211_103531.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103541.jpg b/save_img/shot_20251211_103541.jpg deleted file mode 100644 index 25579b7..0000000 Binary files a/save_img/shot_20251211_103541.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103547.jpg b/save_img/shot_20251211_103547.jpg deleted file mode 100644 index 5a03e5c..0000000 Binary files a/save_img/shot_20251211_103547.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103550.jpg b/save_img/shot_20251211_103550.jpg deleted file mode 100644 index 2348a85..0000000 Binary files a/save_img/shot_20251211_103550.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103554.jpg b/save_img/shot_20251211_103554.jpg deleted file mode 100644 index 5dfd797..0000000 Binary files a/save_img/shot_20251211_103554.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103558.jpg b/save_img/shot_20251211_103558.jpg deleted file mode 100644 index 1cd936d..0000000 Binary files a/save_img/shot_20251211_103558.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103614.jpg b/save_img/shot_20251211_103614.jpg deleted file mode 100644 index 49f70e4..0000000 Binary files a/save_img/shot_20251211_103614.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103619.jpg b/save_img/shot_20251211_103619.jpg deleted file mode 100644 index 5f91e60..0000000 Binary files a/save_img/shot_20251211_103619.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103622.jpg b/save_img/shot_20251211_103622.jpg deleted file mode 100644 index a920664..0000000 Binary files a/save_img/shot_20251211_103622.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103624.jpg b/save_img/shot_20251211_103624.jpg deleted file mode 100644 index 28ee21d..0000000 Binary files a/save_img/shot_20251211_103624.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103627.jpg b/save_img/shot_20251211_103627.jpg deleted file mode 100644 index 312e396..0000000 Binary files a/save_img/shot_20251211_103627.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103633.jpg b/save_img/shot_20251211_103633.jpg deleted file mode 100644 index 508b140..0000000 Binary files a/save_img/shot_20251211_103633.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103637.jpg b/save_img/shot_20251211_103637.jpg deleted file mode 100644 index fd2ed18..0000000 Binary files a/save_img/shot_20251211_103637.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103640.jpg b/save_img/shot_20251211_103640.jpg deleted file mode 100644 index 6b8b57d..0000000 Binary files a/save_img/shot_20251211_103640.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103642.jpg b/save_img/shot_20251211_103642.jpg deleted file mode 100644 index 64325a0..0000000 Binary files a/save_img/shot_20251211_103642.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103650.jpg b/save_img/shot_20251211_103650.jpg deleted file mode 100644 index 0e04431..0000000 Binary files a/save_img/shot_20251211_103650.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103655.jpg b/save_img/shot_20251211_103655.jpg deleted file mode 100644 index 4f02dd9..0000000 Binary files a/save_img/shot_20251211_103655.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103700.jpg b/save_img/shot_20251211_103700.jpg deleted file mode 100644 index 89b6849..0000000 Binary files a/save_img/shot_20251211_103700.jpg and /dev/null differ diff --git a/save_img/shot_20251211_103704.jpg b/save_img/shot_20251211_103704.jpg deleted file mode 100644 index a3de10c..0000000 Binary files a/save_img/shot_20251211_103704.jpg and /dev/null differ diff --git a/saveimg.py b/saveimg.py index b593842..41cc0e3 100644 --- a/saveimg.py +++ b/saveimg.py @@ -1,7 +1,3 @@ -# 相机参考工具 -# @路怀帅 2025.12.19 10:32am -# PS : 新增 加载相机畸变KD 视频对比显示功能 - import cv2 import threading import sys @@ -16,40 +12,59 @@ running = True which_camera = 0 W, H = 1920, 1080 # 相机分辨率 -# 加载鱼眼相机参数 - -# Front 参数 -# K = np.array([[ 5.3382445956203196e+02, 0., 9.7253025945442369e+02], -# [0., 5.3393792084343488e+02, 5.6249605531924215e+02], -# [ 0., 0., 1. ]]) - -# D = np.array([ -1.5749135021037808e-02, 2.9390620422222835e-03, -# -4.3176357910129585e-03, 1.3296605027646462e-03 ]) - -# Right 参数 -K = np.array([[5.3402108990030604e+02, 0., 9.2598444295282172e+02], - [0., 5.3455325152709827e+02, 5.7771767919091610e+02], - [0., 0., 1.]]) -D = np.array([-1.8724887233075402e-02, 6.4408558584901701e-03, - -5.2069636709412993e-03, 8.4815411645490968e-04]) -# 加载透视变换参数 - -# Right 参数 -front_proj_matrix = np.array([ - [-4.5788125154634862e-01, -2.4847281172116515e+00, 1.1901356339453334e+03], - [7.0190114936088524e-02, -2.2880693827869965e+00, 1.0012772462548877e+03], - [1.4549566908718078e-04, -3.4392525895003334e-03, 1.0] -]) - -# 这些参数在 surround_view 中用于 set_scale_and_shift -scale_xy = np.array([6.99999988e-01, 8.00000012e-01]) # x, y 缩放比例 -shift_xy = np.array([-80., -200.]) # x, y 偏移量 (像素) - +def load_camera_params(camera_name): + """ + 从YAML文件加载相机参数 + """ + yaml_file = os.path.join("yaml", f"{camera_name}.yaml") + + if not os.path.exists(yaml_file): + raise FileNotFoundError(f"YAML file not found: {yaml_file}") + + # 使用OpenCV读取YAML文件 + fs = cv2.FileStorage(yaml_file, cv2.FILE_STORAGE_READ) + + # 读取相机内参矩阵 + camera_matrix = fs.getNode("camera_matrix").mat() + + # 读取畸变系数 + dist_coeffs = fs.getNode("dist_coeffs").mat() + + # 读取投影矩阵 + project_matrix = fs.getNode("project_matrix").mat() + + # 读取缩放参数 + scale_xy_node = fs.getNode("scale_xy") + if scale_xy_node.empty(): + scale_xy = np.array([1.0, 1.0]) + else: + scale_xy = scale_xy_node.mat().flatten() + + # 读取偏移参数 + shift_xy_node = fs.getNode("shift_xy") + if shift_xy_node.empty(): + shift_xy = np.array([0.0, 0.0]) + else: + shift_xy = shift_xy_node.mat().flatten() + + fs.release() + + return camera_matrix, dist_coeffs, project_matrix, scale_xy, shift_xy def video_thread(): global frame, running + + # 动态加载当前摄像头的参数 + try: + K, D, front_proj_matrix, scale_xy, shift_xy = load_camera_params(args.i.lower()) + print(f"[INFO] Loaded parameters for {args.i} camera") + except Exception as e: + print(f"[ERROR] Failed to load camera parameters: {e}", file=sys.stderr) + running = False + return + cap = cv2.VideoCapture(which_camera, cv2.CAP_ANY) - cap.set(cv2.CAP_PROP_FOURCC,cv2.VideoWriter_fourcc(*"YUYV")) + cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"YUYV")) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) @@ -58,6 +73,7 @@ def video_thread(): running = False return + # 创建修改后的相机矩阵(包含缩放和平移) modified_camera_matrix = K.copy() modified_camera_matrix[0, 0] *= scale_xy[0] # fx *= scale_x modified_camera_matrix[1, 1] *= scale_xy[1] # fy *= scale_y @@ -70,7 +86,6 @@ def video_thread(): # 鱼眼相机仅畸变 map3, map4 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, (W, H), cv2.CV_16SC2) - while running: ret, f = cap.read() if not ret: @@ -80,7 +95,6 @@ def video_thread(): undistorted = cv2.remap(f, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) undistorted2 = cv2.remap(f, map3, map4, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) - proj_image = cv2.warpPerspective( undistorted, front_proj_matrix, @@ -89,7 +103,6 @@ def video_thread(): borderValue=(0, 0, 0) ) - frame = f.copy() birdseye_small = cv2.resize(f, (W//2, H//2)) undistorted2_small = cv2.resize(undistorted2, (W//2, H//2)) @@ -98,14 +111,9 @@ def video_thread(): comparison = np.hstack((birdseye_small, undistorted2_small)) show_video = np.vstack((comparison, proj_image)) - scale = min(1920 / comparison.shape[1], 1080 / comparison.shape[0]) - if scale < 1: - comparison = cv2.resize(comparison, (int(comparison.shape[1] * scale), int(comparison.shape[0] * scale))) - comparison = comparison.astype(np.uint8) - # 设置视频流全屏显示 text_info = f"Camera: {args.i.upper()} | Press 'q' to quit, 's' to screenshot" - cv2.putText(comparison, text_info, (10, 30), + cv2.putText(show_video, text_info, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2, cv2.LINE_AA) cv2.namedWindow('Video old vs new', cv2.WND_PROP_FULLSCREEN) @@ -141,12 +149,13 @@ def input_thread(): break if __name__ == "__main__": - # 获取用户参数启动 - parser = argparse.ArgumentParser(description="Example script with argparse") + parser = argparse.ArgumentParser(description="Camera Parameter Loading Tool") # 获取用户输入的摄像头方位 (front back left right) - parser.add_argument("--i", type=str, required=True, help="摄像头方位") + parser.add_argument("--i", type=str, required=True, + choices=["front", "back", "left", "right"], + help="Camera direction (front/back/left/right)") args = parser.parse_args() @@ -165,6 +174,13 @@ if __name__ == "__main__": running = False exit(1) + # 检查YAML目录是否存在 + yaml_dir = "yaml" + if not os.path.exists(yaml_dir): + print(f"[ERROR] YAML directory not found: {yaml_dir}", file=sys.stderr) + print("Please ensure YAML files are in the 'yaml' directory", file=sys.stderr) + exit(1) + # 启动视频线程 vt = threading.Thread(target=video_thread, daemon=True) vt.start() @@ -172,4 +188,4 @@ if __name__ == "__main__": # 主线程监听 SSH 输入 input_thread() - print("[INFO] Exiting...") + print("[INFO] Exiting...") \ No newline at end of file diff --git a/surround_view/__pycache__/fisheye_camera.cpython-38.pyc b/surround_view/__pycache__/fisheye_camera.cpython-38.pyc index 531a893..3e4866f 100644 Binary files a/surround_view/__pycache__/fisheye_camera.cpython-38.pyc and b/surround_view/__pycache__/fisheye_camera.cpython-38.pyc differ diff --git a/surround_view/__pycache__/param_settings.cpython-38.pyc b/surround_view/__pycache__/param_settings.cpython-38.pyc index 6135477..2a93ebc 100644 Binary files a/surround_view/__pycache__/param_settings.cpython-38.pyc and b/surround_view/__pycache__/param_settings.cpython-38.pyc differ diff --git a/surround_view/fisheye_camera.py b/surround_view/fisheye_camera.py index e5f9d26..20860c6 100644 --- a/surround_view/fisheye_camera.py +++ b/surround_view/fisheye_camera.py @@ -92,7 +92,7 @@ class FisheyeCameraModel(object): return cv2.transpose(image)[::-1] else: - return cv2.transpose(image)[::-1] + return cv2.transpose(image)[:,::-1] def save_data(self): fs = cv2.FileStorage(self.camera_file, cv2.FILE_STORAGE_WRITE) diff --git a/surround_view/param_settings.py b/surround_view/param_settings.py index 1050512..9c58bf8 100644 --- a/surround_view/param_settings.py +++ b/surround_view/param_settings.py @@ -6,22 +6,24 @@ camera_names = ["front", "back", "left", "right"] # -------------------------------------------------------------------- # (shift_width, shift_height): 鸟瞰图在水平和垂直方向上超出标定图案的距离 -shift_w = 300 -shift_h = 300 +shift_w = 100 +shift_h = 100 + # 标定图案与车辆之间在水平和垂直方向上的间隙大小 inn_shift_w = 20 -inn_shift_h = 50 +inn_shift_h = 20 # 拼接图像的总宽度/高度 -total_w = 600 + 2 * shift_w -total_h = 1000 + 2 * shift_h +total_w = 300 + 2 * shift_w +total_h = 350 + 2 * shift_h -# 车辆所占矩形区域的四个角点 -# 左上角 (x_left, y_top),右下角 (x_right, y_bottom) -xl = shift_w + 180 + inn_shift_w + +# 计算车辆在全景图中的位置 +xl = shift_w + 45 + inn_shift_w xr = total_w - xl -yt = shift_h + 200 + inn_shift_h +print(xl, xr) +yt = shift_h + 60 + inn_shift_h yb = total_h - yt # -------------------------------------------------------------------- @@ -32,29 +34,28 @@ project_shapes = { "right": (total_h, xl) } -# pixel locations of the four points to be chosen. -# you must click these pixels in the same order when running -# the get_projection_map.py script +# 要选取的四个像素点的位置。 +# 运行 get_projection_map.py 脚本时,必须按相同顺序点击这些像素点。 project_keypoints = { - "front": [(shift_w + 120, shift_h), - (shift_w + 480, shift_h), - (shift_w + 120, shift_h + 160), - (shift_w + 480, shift_h + 160)], + "front": [(shift_w + 0, shift_h), + (shift_w + 300, shift_h), + (shift_w + 0, shift_h + 60), + (shift_w + 300, shift_h + 60)], - "back": [(shift_w + 120, shift_h), - (shift_w + 480, shift_h), - (shift_w + 120, shift_h + 160), - (shift_w + 480, shift_h + 160)], + "back": [(shift_w + 0, shift_h), + (shift_w + 300, shift_h), + (shift_w + 0, shift_h + 80), + (shift_w + 300, shift_h + 80)], - "left": [(shift_h + 280, shift_w), - (shift_h + 840, shift_w), - (shift_h + 280, shift_w + 160), - (shift_h + 840, shift_w + 160)], + "left": [(shift_h + 0, shift_w), + (shift_h + 350, shift_w), + (shift_h + 0, shift_w + 50), + (shift_h + 350, shift_w + 50)], - "right": [(shift_h + 160, shift_w), - (shift_h + 720, shift_w), - (shift_h + 160, shift_w + 160), - (shift_h + 720, shift_w + 160)] + "right": [(shift_h + 0, shift_w), + (shift_h + 350, shift_w), + (shift_h + 0, shift_w + 50), + (shift_h + 350, shift_w + 50)] } car_image = cv2.imread(os.path.join(os.getcwd(), "images", "car.png")) diff --git a/video.py b/video.py new file mode 100644 index 0000000..f4addec --- /dev/null +++ b/video.py @@ -0,0 +1,165 @@ +import cv2 +import threading +import sys +import os +import argparse +import numpy as np +from surround_view import FisheyeCameraModel, BirdView +import surround_view.param_settings as settings + + +class MultiCameraBirdView: + def __init__(self): + self.running = True + self.names = settings.camera_names + self.yamls = [os.path.join(os.getcwd(), "yaml", name + ".yaml") for name in self.names] + self.camera_models = [FisheyeCameraModel(camera_file, camera_name) for camera_file, camera_name in zip(self.yamls, self.names)] + + # 初始化摄像头 + self.caps = [] + self.which_cameras = { + "front": 0, + "back": 2, + "left": 1, + "right": 3 + } + + # 初始化摄像头捕获 + for name in self.names: + cap = cv2.VideoCapture(self.which_cameras[name], cv2.CAP_ANY) + cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"YUYV")) + cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) + cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) + if not cap.isOpened(): + print(f"[ERROR] Cannot open {name} camera", file=sys.stderr) + self.running = False + return + self.caps.append(cap) + + # 初始化鸟瞰图 + self.birdview = BirdView() + + # 预先加载一次静态图像来初始化权重矩阵 + self._initialize_weights() + + def _initialize_weights(self): + """使用静态图像初始化权重矩阵""" + try: + # 加载静态图像用于初始化权重 + images = [os.path.join(os.getcwd(), "images", name + ".png") for name in self.names] + static_frames = [] + + for image_file, camera_model in zip(images, self.camera_models): + img = cv2.imread(image_file) + if img is None: + print(f"[ERROR] Cannot load static image: {image_file}") + continue + img = camera_model.undistort(img) + img = camera_model.project(img) + img = camera_model.flip(img) + static_frames.append(img) + + if len(static_frames) == 4: + # 初始化权重和掩码矩阵 + Gmat, Mmat = self.birdview.get_weights_and_masks(static_frames) + print("[INFO] Weights and masks initialized successfully") + else: + print("[WARNING] Could not load all static images for weight initialization") + except Exception as e: + print(f"[ERROR] Failed to initialize weights: {e}", file=sys.stderr) + + def process_frame(self, img, camera_model): + """处理单帧图像:去畸变 -> 投影 -> 翻转""" + img = camera_model.undistort(img) + img = camera_model.project(img) + img = camera_model.flip(img) + return img + + def run(self): + while self.running: + frames = [] + for i, (cap, camera_model) in enumerate(zip(self.caps, self.camera_models)): + ret, frame = cap.read() + if not ret or frame is None: + print(f"[ERROR] Failed to read frame from camera {self.names[i]}") + continue + processed_frame = self.process_frame(frame, camera_model) + frames.append(processed_frame) + + if len(frames) == 4: # 确保所有摄像头都正常工作 + try: + # 更新鸟瞰图(权重矩阵已预先初始化) + self.birdview.update_frames(frames) + self.birdview.stitch_all_parts() + self.birdview.make_white_balance() + self.birdview.copy_car_image() + + # 显示鸟瞰图 + birdview_display = cv2.resize(self.birdview.image, (750, 1000)) + cv2.imshow("Real-time Bird's Eye View", birdview_display) + + if cv2.waitKey(1) & 0xFF == ord('q'): + self.running = False + break + except Exception as e: + print(f"[ERROR] Processing error: {e}", file=sys.stderr) + import traceback + traceback.print_exc() + continue + + # 释放资源 + for cap in self.caps: + cap.release() + cv2.destroyAllWindows() + + +def main(): + parser = argparse.ArgumentParser(description="Real-time 4-Camera Bird's Eye View") + parser.add_argument("--mode", type=str, required=True, + choices=["realtime", "static"], + help="Mode: 'realtime' for live video, 'static' for static images") + args = parser.parse_args() + + if args.mode == "static": + # 原有的静态图像处理代码 + names = settings.camera_names + images = [os.path.join(os.getcwd(), "images", name + ".png") for name in names] + yamls = [os.path.join(os.getcwd(), "yaml", name + ".yaml") for name in names] + camera_models = [FisheyeCameraModel(camera_file, camera_name) for camera_file, camera_name in zip(yamls, names)] + + projected = [] + for image_file, camera in zip(images, camera_models): + img = cv2.imread(image_file) + img = camera.undistort(img) + img = camera.project(img) + img = camera.flip(img) + projected.append(img) + + birdview = BirdView() + Gmat, Mmat = birdview.get_weights_and_masks(projected) # 初始化权重矩阵 + birdview.update_frames(projected) + birdview.stitch_all_parts() + birdview.make_white_balance() + birdview.copy_car_image() + img_small = cv2.resize(birdview.image, (300, 600)) + cv2.imshow("BirdView Result", img_small) + + while True: + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + cv2.destroyAllWindows() + return -1 + + elif args.mode == "realtime": + # 实时视频处理 + print("Starting real-time 4-camera bird's eye view...") + print("Press 'q' to quit") + multi_cam = MultiCameraBirdView() + if multi_cam.running: + multi_cam.run() + else: + print("[ERROR] Failed to initialize cameras") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/yaml/back.yaml b/yaml/back.yaml index 3737df3..2d22aa4 100644 --- a/yaml/back.yaml +++ b/yaml/back.yaml @@ -1,10 +1,5 @@ %YAML:1.0 --- -resolution: !!opencv-matrix - rows: 2 - cols: 1 - dt: i - data: [ 1920, 1080 ] camera_matrix: !!opencv-matrix rows: 3 cols: 3 @@ -17,3 +12,26 @@ dist_coeffs: !!opencv-matrix dt: d data: [ -9.2062464083377798e-03, -6.3620029090856196e-03, 3.3735324773401221e-03, -1.0289639180500810e-03 ] +resolution: !!opencv-matrix + rows: 2 + cols: 1 + dt: i + data: [ 1920, 1080 ] +project_matrix: !!opencv-matrix + rows: 3 + cols: 3 + dt: d + data: [ -2.2036739433214386e-01, -5.4728752801607983e-01, + 4.0669019104227289e+02, -2.2188976198317060e-02, + -5.6384233390057015e-01, 3.2888161676725548e+02, + -8.4090277960475698e-05, -2.2171947701663608e-03, 1. ] +scale_xy: !!opencv-matrix + rows: 2 + cols: 1 + dt: f + data: [ 6.99999988e-01, 8.00000012e-01 ] +shift_xy: !!opencv-matrix + rows: 2 + cols: 1 + dt: f + data: [ -150., -100. ] diff --git a/yaml/front.yaml b/yaml/front.yaml index 1ee1fa9..5aff58e 100644 --- a/yaml/front.yaml +++ b/yaml/front.yaml @@ -1,10 +1,5 @@ %YAML:1.0 --- -resolution: !!opencv-matrix - rows: 2 - cols: 1 - dt: i - data: [ 1920, 1080 ] camera_matrix: !!opencv-matrix rows: 3 cols: 3 @@ -17,3 +12,26 @@ dist_coeffs: !!opencv-matrix dt: d data: [ -1.5749135021037808e-02, 2.9390620422222835e-03, -4.3176357910129585e-03, 1.3296605027646462e-03 ] +resolution: !!opencv-matrix + rows: 2 + cols: 1 + dt: i + data: [ 1920, 1080 ] +project_matrix: !!opencv-matrix + rows: 3 + cols: 3 + dt: d + data: [ -1.8474101274061588e-01, -5.4968582653196851e-01, + 4.1680109927253847e+02, 9.7678250049304510e-03, + -4.9902821258073782e-01, 3.0304882941065989e+02, + 4.3706314843734903e-05, -2.1601287734653030e-03, 1. ] +scale_xy: !!opencv-matrix + rows: 2 + cols: 1 + dt: f + data: [ 6.99999988e-01, 8.00000012e-01 ] +shift_xy: !!opencv-matrix + rows: 2 + cols: 1 + dt: f + data: [ -150., -100. ] diff --git a/yaml/left.yaml b/yaml/left.yaml index 2450fc1..c277082 100644 --- a/yaml/left.yaml +++ b/yaml/left.yaml @@ -1,10 +1,5 @@ %YAML:1.0 --- -resolution: !!opencv-matrix - rows: 2 - cols: 1 - dt: i - data: [ 1920, 1080 ] camera_matrix: !!opencv-matrix rows: 3 cols: 3 @@ -17,3 +12,26 @@ dist_coeffs: !!opencv-matrix dt: d data: [ -1.1422407725638895e-02, -1.2103818796148216e-02, 9.0774770002077006e-03, -2.8278270352926444e-03 ] +resolution: !!opencv-matrix + rows: 2 + cols: 1 + dt: i + data: [ 1920, 1080 ] +project_matrix: !!opencv-matrix + rows: 3 + cols: 3 + dt: d + data: [ -2.0080056297490056e-01, -6.1187781235143224e-01, + 4.5434494858537062e+02, -2.9556627352318810e-03, + -5.0502143174738201e-01, 3.0816263946657051e+02, + -1.8229281242150158e-05, -2.1645728959083397e-03, 1. ] +scale_xy: !!opencv-matrix + rows: 2 + cols: 1 + dt: f + data: [ 6.99999988e-01, 8.00000012e-01 ] +shift_xy: !!opencv-matrix + rows: 2 + cols: 1 + dt: f + data: [ -80., -100. ] diff --git a/yaml/right.yaml b/yaml/right.yaml index 4929e3f..7d6e04d 100644 --- a/yaml/right.yaml +++ b/yaml/right.yaml @@ -17,15 +17,14 @@ resolution: !!opencv-matrix cols: 1 dt: i data: [ 1920, 1080 ] - project_matrix: !!opencv-matrix rows: 3 cols: 3 dt: d - data: [ -4.5788125154634862e-01, -2.4847281172116515e+00, - 1.1901356339453334e+03, 7.0190114936088524e-02, - -2.2880693827869965e+00, 1.0012772462548877e+03, - 1.4549566908718078e-04, -3.4392525895003334e-03, 1. ] + data: [ -2.0592797222758674e-01, -6.8891188079230814e-01, + 4.5964514365855666e+02, 2.1763027489573539e-02, + -6.3287857963783611e-01, 3.4166476009354983e+02, + 1.3350446934563321e-04, -2.6387220529307219e-03, 1. ] scale_xy: !!opencv-matrix rows: 2 cols: 1 @@ -35,4 +34,4 @@ shift_xy: !!opencv-matrix rows: 2 cols: 1 dt: f - data: [ -80., -200. ] + data: [ -80., -100. ]