环视运行

This commit is contained in:
2025-12-19 17:08:28 +08:00
parent 5283a6f54a
commit 86df465b15
47 changed files with 485 additions and 102 deletions

131
getimg.py Normal file
View File

@@ -0,0 +1,131 @@
# 相机参考工具
# @路怀帅 2025.12.19 10:32am
# PS : 新增 加载相机畸变KD 视频对比显示功能
import cv2
import threading
import sys
from datetime import datetime
import os
import argparse
import numpy as np
# 全局变量
frame = None
running = True
which_camera = 0
# Front 参数
K = np.array([[ 5.3382445956203196e+02, 0., 9.7253025945442369e+02],
[0., 5.3393792084343488e+02, 5.6249605531924215e+02],
[ 0., 0., 1. ]])
D = np.array([ -1.5749135021037808e-02, 2.9390620422222835e-03,
-4.3176357910129585e-03, 1.3296605027646462e-03 ])
# 加载鱼眼相机参数
W, H = 1920, 1080
def video_thread():
global frame, running
cap = cv2.VideoCapture(which_camera, cv2.CAP_ANY)
cap.set(cv2.CAP_PROP_FOURCC,cv2.VideoWriter_fourcc(*"YUYV"))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
# cap.set(cv2.CAP_PROP_FPS, 30)
if not cap.isOpened():
print("[ERROR] Cannot open camera", file=sys.stderr)
running = False
return
# 鱼眼相机去畸变
map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, (W, H), cv2.CV_16SC2)
while running:
ret, f = cap.read()
if not ret:
break
frame = f.copy()
# 图像去畸变
undistorted = cv2.remap(f, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
# 拼接原视频和抗畸变后的视频 (左右显示)
comparison = np.hstack((f, undistorted))
scale = min(1920 / comparison.shape[1], 1080 / comparison.shape[0])
if scale < 1:
comparison = cv2.resize(comparison, (int(comparison.shape[1] * scale), int(comparison.shape[0] * scale)))
comparison = comparison.astype(np.uint8)
# 设置视频流全屏显示
text_info = f"Camera: {args.i.upper()} | Press 'q' to quit, 's' to screenshot"
cv2.putText(comparison, text_info, (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
cv2.namedWindow('Video old vs new', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('Video old vs new', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow('Video old vs new', comparison)
if cv2.waitKey(1) & 0xFF == ord('q'):
running = False
break
cap.release()
cv2.destroyAllWindows()
def input_thread():
global running
print("Commands: 's' = screenshot, 'q' = quit")
while running:
try:
cmd = input().strip().lower()
if cmd == 's':
if frame is not None:
filename = f"./images/{args.i.lower()}.png"
cv2.imwrite(filename, frame)
print(f"[SSH] Saved: {os.path.abspath(filename)}")
else:
print("[SSH] No frame available yet.")
elif cmd == 'q':
running = False
break
else:
print("[SSH] Unknown command. Use 's' or 'q'.")
except EOFError:
break
if __name__ == "__main__":
# 获取用户参数启动
parser = argparse.ArgumentParser(description="Example script with argparse")
# 获取用户输入的摄像头方位 front back left right
parser.add_argument("--i", type=str, required=True, help="摄像头方位")
args = parser.parse_args()
print("相机方位:", args.i)
if args.i == "front":
which_camera = 0
elif args.i == "back":
which_camera = 2
elif args.i == "left":
which_camera = 1
elif args.i == "right":
which_camera = 3
else:
print("[ERROR] Invalid camera direction. Use 'front', 'back', 'left', or 'right'.", file=sys.stderr)
running = False
exit(1)
# 启动视频线程
vt = threading.Thread(target=video_thread, daemon=True)
vt.start()
# 主线程监听 SSH 输入
input_thread()
print("[INFO] Exiting...")

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

After

Width:  |  Height:  |  Size: 2.6 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.7 MiB

After

Width:  |  Height:  |  Size: 2.7 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

After

Width:  |  Height:  |  Size: 2.8 MiB

View File

@@ -23,13 +23,30 @@ def main():
birdview = BirdView() birdview = BirdView()
Gmat, Mmat = birdview.get_weights_and_masks(projected) Gmat, Mmat = birdview.get_weights_and_masks(projected)
birdview.update_frames(projected) birdview.update_frames(projected)
birdview.make_luminance_balance().stitch_all_parts() # birdview.make_luminance_balance().stitch_all_parts()
birdview.stitch_all_parts()
birdview.make_white_balance() birdview.make_white_balance()
birdview.copy_car_image() birdview.copy_car_image()
ret = display_image("BirdView Result", birdview.image) img_samll = cv2.resize(birdview.image,(320,600))
if ret > 0: cv2.imshow("BirdView Result", img_samll)
Image.fromarray((Gmat * 255).astype(np.uint8)).save("weights.png") while True:
Image.fromarray(Mmat.astype(np.uint8)).save("masks.png") key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
return -1
# cv2.waitKey(1)
# cmd = input().strip().lower()
# if cmd == 'q':
# cv2.destroyAllWindows()
# return -1
# birdview.copy_car_image()
# ret = display_image("BirdView Result", birdview.image)
# if ret > 0:
# Image.fromarray((Gmat * 255).astype(np.uint8)).save("weights.png")
# Image.fromarray(Mmat.astype(np.uint8)).save("masks.png")
if __name__ == "__main__": if __name__ == "__main__":

Binary file not shown.

Before

Width:  |  Height:  |  Size: 550 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 565 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 569 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 594 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 561 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 557 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 568 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 572 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 573 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 498 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 498 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 510 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 493 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 495 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 514 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 492 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 506 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 515 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 498 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 504 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 508 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 521 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 520 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 518 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 521 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 523 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 527 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 527 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 512 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 514 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 526 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 518 KiB

View File

@@ -1,7 +1,3 @@
# 相机参考工具
# @路怀帅 2025.12.19 10:32am
# PS : 新增 加载相机畸变KD 视频对比显示功能
import cv2 import cv2
import threading import threading
import sys import sys
@@ -16,40 +12,59 @@ running = True
which_camera = 0 which_camera = 0
W, H = 1920, 1080 # 相机分辨率 W, H = 1920, 1080 # 相机分辨率
# 加载鱼眼相机参数 def load_camera_params(camera_name):
"""
从YAML文件加载相机参数
"""
yaml_file = os.path.join("yaml", f"{camera_name}.yaml")
# Front 参数 if not os.path.exists(yaml_file):
# K = np.array([[ 5.3382445956203196e+02, 0., 9.7253025945442369e+02], raise FileNotFoundError(f"YAML file not found: {yaml_file}")
# [0., 5.3393792084343488e+02, 5.6249605531924215e+02],
# [ 0., 0., 1. ]])
# D = np.array([ -1.5749135021037808e-02, 2.9390620422222835e-03, # 使用OpenCV读取YAML文件
# -4.3176357910129585e-03, 1.3296605027646462e-03 ]) fs = cv2.FileStorage(yaml_file, cv2.FILE_STORAGE_READ)
# Right 参数 # 读取相机内参矩阵
K = np.array([[5.3402108990030604e+02, 0., 9.2598444295282172e+02], camera_matrix = fs.getNode("camera_matrix").mat()
[0., 5.3455325152709827e+02, 5.7771767919091610e+02],
[0., 0., 1.]])
D = np.array([-1.8724887233075402e-02, 6.4408558584901701e-03,
-5.2069636709412993e-03, 8.4815411645490968e-04])
# 加载透视变换参数
# Right 参 # 读取畸变系
front_proj_matrix = np.array([ dist_coeffs = fs.getNode("dist_coeffs").mat()
[-4.5788125154634862e-01, -2.4847281172116515e+00, 1.1901356339453334e+03],
[7.0190114936088524e-02, -2.2880693827869965e+00, 1.0012772462548877e+03],
[1.4549566908718078e-04, -3.4392525895003334e-03, 1.0]
])
# 这些参数在 surround_view 中用于 set_scale_and_shift # 读取投影矩阵
scale_xy = np.array([6.99999988e-01, 8.00000012e-01]) # x, y 缩放比例 project_matrix = fs.getNode("project_matrix").mat()
shift_xy = np.array([-80., -200.]) # x, y 偏移量 (像素)
# 读取缩放参数
scale_xy_node = fs.getNode("scale_xy")
if scale_xy_node.empty():
scale_xy = np.array([1.0, 1.0])
else:
scale_xy = scale_xy_node.mat().flatten()
# 读取偏移参数
shift_xy_node = fs.getNode("shift_xy")
if shift_xy_node.empty():
shift_xy = np.array([0.0, 0.0])
else:
shift_xy = shift_xy_node.mat().flatten()
fs.release()
return camera_matrix, dist_coeffs, project_matrix, scale_xy, shift_xy
def video_thread(): def video_thread():
global frame, running global frame, running
# 动态加载当前摄像头的参数
try:
K, D, front_proj_matrix, scale_xy, shift_xy = load_camera_params(args.i.lower())
print(f"[INFO] Loaded parameters for {args.i} camera")
except Exception as e:
print(f"[ERROR] Failed to load camera parameters: {e}", file=sys.stderr)
running = False
return
cap = cv2.VideoCapture(which_camera, cv2.CAP_ANY) cap = cv2.VideoCapture(which_camera, cv2.CAP_ANY)
cap.set(cv2.CAP_PROP_FOURCC,cv2.VideoWriter_fourcc(*"YUYV")) cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"YUYV"))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
@@ -58,6 +73,7 @@ def video_thread():
running = False running = False
return return
# 创建修改后的相机矩阵(包含缩放和平移)
modified_camera_matrix = K.copy() modified_camera_matrix = K.copy()
modified_camera_matrix[0, 0] *= scale_xy[0] # fx *= scale_x modified_camera_matrix[0, 0] *= scale_xy[0] # fx *= scale_x
modified_camera_matrix[1, 1] *= scale_xy[1] # fy *= scale_y modified_camera_matrix[1, 1] *= scale_xy[1] # fy *= scale_y
@@ -70,7 +86,6 @@ def video_thread():
# 鱼眼相机仅畸变 # 鱼眼相机仅畸变
map3, map4 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, (W, H), cv2.CV_16SC2) map3, map4 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, (W, H), cv2.CV_16SC2)
while running: while running:
ret, f = cap.read() ret, f = cap.read()
if not ret: if not ret:
@@ -80,7 +95,6 @@ def video_thread():
undistorted = cv2.remap(f, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) undistorted = cv2.remap(f, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
undistorted2 = cv2.remap(f, map3, map4, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) undistorted2 = cv2.remap(f, map3, map4, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
proj_image = cv2.warpPerspective( proj_image = cv2.warpPerspective(
undistorted, undistorted,
front_proj_matrix, front_proj_matrix,
@@ -89,7 +103,6 @@ def video_thread():
borderValue=(0, 0, 0) borderValue=(0, 0, 0)
) )
frame = f.copy() frame = f.copy()
birdseye_small = cv2.resize(f, (W//2, H//2)) birdseye_small = cv2.resize(f, (W//2, H//2))
undistorted2_small = cv2.resize(undistorted2, (W//2, H//2)) undistorted2_small = cv2.resize(undistorted2, (W//2, H//2))
@@ -98,14 +111,9 @@ def video_thread():
comparison = np.hstack((birdseye_small, undistorted2_small)) comparison = np.hstack((birdseye_small, undistorted2_small))
show_video = np.vstack((comparison, proj_image)) show_video = np.vstack((comparison, proj_image))
scale = min(1920 / comparison.shape[1], 1080 / comparison.shape[0])
if scale < 1:
comparison = cv2.resize(comparison, (int(comparison.shape[1] * scale), int(comparison.shape[0] * scale)))
comparison = comparison.astype(np.uint8)
# 设置视频流全屏显示 # 设置视频流全屏显示
text_info = f"Camera: {args.i.upper()} | Press 'q' to quit, 's' to screenshot" text_info = f"Camera: {args.i.upper()} | Press 'q' to quit, 's' to screenshot"
cv2.putText(comparison, text_info, (10, 30), cv2.putText(show_video, text_info, (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2, cv2.LINE_AA) cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
cv2.namedWindow('Video old vs new', cv2.WND_PROP_FULLSCREEN) cv2.namedWindow('Video old vs new', cv2.WND_PROP_FULLSCREEN)
@@ -141,12 +149,13 @@ def input_thread():
break break
if __name__ == "__main__": if __name__ == "__main__":
# 获取用户参数启动 # 获取用户参数启动
parser = argparse.ArgumentParser(description="Example script with argparse") parser = argparse.ArgumentParser(description="Camera Parameter Loading Tool")
# 获取用户输入的摄像头方位 front back left right # 获取用户输入的摄像头方位 front back left right
parser.add_argument("--i", type=str, required=True, help="摄像头方位") parser.add_argument("--i", type=str, required=True,
choices=["front", "back", "left", "right"],
help="Camera direction (front/back/left/right)")
args = parser.parse_args() args = parser.parse_args()
@@ -165,6 +174,13 @@ if __name__ == "__main__":
running = False running = False
exit(1) exit(1)
# 检查YAML目录是否存在
yaml_dir = "yaml"
if not os.path.exists(yaml_dir):
print(f"[ERROR] YAML directory not found: {yaml_dir}", file=sys.stderr)
print("Please ensure YAML files are in the 'yaml' directory", file=sys.stderr)
exit(1)
# 启动视频线程 # 启动视频线程
vt = threading.Thread(target=video_thread, daemon=True) vt = threading.Thread(target=video_thread, daemon=True)
vt.start() vt.start()

View File

@@ -92,7 +92,7 @@ class FisheyeCameraModel(object):
return cv2.transpose(image)[::-1] return cv2.transpose(image)[::-1]
else: else:
return cv2.transpose(image)[::-1] return cv2.transpose(image)[:,::-1]
def save_data(self): def save_data(self):
fs = cv2.FileStorage(self.camera_file, cv2.FILE_STORAGE_WRITE) fs = cv2.FileStorage(self.camera_file, cv2.FILE_STORAGE_WRITE)

View File

@@ -6,22 +6,24 @@ camera_names = ["front", "back", "left", "right"]
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# (shift_width, shift_height): 鸟瞰图在水平和垂直方向上超出标定图案的距离 # (shift_width, shift_height): 鸟瞰图在水平和垂直方向上超出标定图案的距离
shift_w = 300 shift_w = 100
shift_h = 300 shift_h = 100
# 标定图案与车辆之间在水平和垂直方向上的间隙大小 # 标定图案与车辆之间在水平和垂直方向上的间隙大小
inn_shift_w = 20 inn_shift_w = 20
inn_shift_h = 50 inn_shift_h = 20
# 拼接图像的总宽度/高度 # 拼接图像的总宽度/高度
total_w = 600 + 2 * shift_w total_w = 300 + 2 * shift_w
total_h = 1000 + 2 * shift_h total_h = 350 + 2 * shift_h
# 车辆所占矩形区域的四个角点
# 左上角 (x_left, y_top),右下角 (x_right, y_bottom) # 计算车辆在全景图中的位置
xl = shift_w + 180 + inn_shift_w xl = shift_w + 45 + inn_shift_w
xr = total_w - xl xr = total_w - xl
yt = shift_h + 200 + inn_shift_h print(xl, xr)
yt = shift_h + 60 + inn_shift_h
yb = total_h - yt yb = total_h - yt
# -------------------------------------------------------------------- # --------------------------------------------------------------------
@@ -32,29 +34,28 @@ project_shapes = {
"right": (total_h, xl) "right": (total_h, xl)
} }
# pixel locations of the four points to be chosen. # 要选取的四个像素点的位置。
# you must click these pixels in the same order when running # 运行 get_projection_map.py 脚本时,必须按相同顺序点击这些像素点。
# the get_projection_map.py script
project_keypoints = { project_keypoints = {
"front": [(shift_w + 120, shift_h), "front": [(shift_w + 0, shift_h),
(shift_w + 480, shift_h), (shift_w + 300, shift_h),
(shift_w + 120, shift_h + 160), (shift_w + 0, shift_h + 60),
(shift_w + 480, shift_h + 160)], (shift_w + 300, shift_h + 60)],
"back": [(shift_w + 120, shift_h), "back": [(shift_w + 0, shift_h),
(shift_w + 480, shift_h), (shift_w + 300, shift_h),
(shift_w + 120, shift_h + 160), (shift_w + 0, shift_h + 80),
(shift_w + 480, shift_h + 160)], (shift_w + 300, shift_h + 80)],
"left": [(shift_h + 280, shift_w), "left": [(shift_h + 0, shift_w),
(shift_h + 840, shift_w), (shift_h + 350, shift_w),
(shift_h + 280, shift_w + 160), (shift_h + 0, shift_w + 50),
(shift_h + 840, shift_w + 160)], (shift_h + 350, shift_w + 50)],
"right": [(shift_h + 160, shift_w), "right": [(shift_h + 0, shift_w),
(shift_h + 720, shift_w), (shift_h + 350, shift_w),
(shift_h + 160, shift_w + 160), (shift_h + 0, shift_w + 50),
(shift_h + 720, shift_w + 160)] (shift_h + 350, shift_w + 50)]
} }
car_image = cv2.imread(os.path.join(os.getcwd(), "images", "car.png")) car_image = cv2.imread(os.path.join(os.getcwd(), "images", "car.png"))

165
video.py Normal file
View File

@@ -0,0 +1,165 @@
import cv2
import threading
import sys
import os
import argparse
import numpy as np
from surround_view import FisheyeCameraModel, BirdView
import surround_view.param_settings as settings
class MultiCameraBirdView:
def __init__(self):
self.running = True
self.names = settings.camera_names
self.yamls = [os.path.join(os.getcwd(), "yaml", name + ".yaml") for name in self.names]
self.camera_models = [FisheyeCameraModel(camera_file, camera_name) for camera_file, camera_name in zip(self.yamls, self.names)]
# 初始化摄像头
self.caps = []
self.which_cameras = {
"front": 0,
"back": 2,
"left": 1,
"right": 3
}
# 初始化摄像头捕获
for name in self.names:
cap = cv2.VideoCapture(self.which_cameras[name], cv2.CAP_ANY)
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"YUYV"))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
if not cap.isOpened():
print(f"[ERROR] Cannot open {name} camera", file=sys.stderr)
self.running = False
return
self.caps.append(cap)
# 初始化鸟瞰图
self.birdview = BirdView()
# 预先加载一次静态图像来初始化权重矩阵
self._initialize_weights()
def _initialize_weights(self):
"""使用静态图像初始化权重矩阵"""
try:
# 加载静态图像用于初始化权重
images = [os.path.join(os.getcwd(), "images", name + ".png") for name in self.names]
static_frames = []
for image_file, camera_model in zip(images, self.camera_models):
img = cv2.imread(image_file)
if img is None:
print(f"[ERROR] Cannot load static image: {image_file}")
continue
img = camera_model.undistort(img)
img = camera_model.project(img)
img = camera_model.flip(img)
static_frames.append(img)
if len(static_frames) == 4:
# 初始化权重和掩码矩阵
Gmat, Mmat = self.birdview.get_weights_and_masks(static_frames)
print("[INFO] Weights and masks initialized successfully")
else:
print("[WARNING] Could not load all static images for weight initialization")
except Exception as e:
print(f"[ERROR] Failed to initialize weights: {e}", file=sys.stderr)
def process_frame(self, img, camera_model):
"""处理单帧图像:去畸变 -> 投影 -> 翻转"""
img = camera_model.undistort(img)
img = camera_model.project(img)
img = camera_model.flip(img)
return img
def run(self):
while self.running:
frames = []
for i, (cap, camera_model) in enumerate(zip(self.caps, self.camera_models)):
ret, frame = cap.read()
if not ret or frame is None:
print(f"[ERROR] Failed to read frame from camera {self.names[i]}")
continue
processed_frame = self.process_frame(frame, camera_model)
frames.append(processed_frame)
if len(frames) == 4: # 确保所有摄像头都正常工作
try:
# 更新鸟瞰图(权重矩阵已预先初始化)
self.birdview.update_frames(frames)
self.birdview.stitch_all_parts()
self.birdview.make_white_balance()
self.birdview.copy_car_image()
# 显示鸟瞰图
birdview_display = cv2.resize(self.birdview.image, (750, 1000))
cv2.imshow("Real-time Bird's Eye View", birdview_display)
if cv2.waitKey(1) & 0xFF == ord('q'):
self.running = False
break
except Exception as e:
print(f"[ERROR] Processing error: {e}", file=sys.stderr)
import traceback
traceback.print_exc()
continue
# 释放资源
for cap in self.caps:
cap.release()
cv2.destroyAllWindows()
def main():
parser = argparse.ArgumentParser(description="Real-time 4-Camera Bird's Eye View")
parser.add_argument("--mode", type=str, required=True,
choices=["realtime", "static"],
help="Mode: 'realtime' for live video, 'static' for static images")
args = parser.parse_args()
if args.mode == "static":
# 原有的静态图像处理代码
names = settings.camera_names
images = [os.path.join(os.getcwd(), "images", name + ".png") for name in names]
yamls = [os.path.join(os.getcwd(), "yaml", name + ".yaml") for name in names]
camera_models = [FisheyeCameraModel(camera_file, camera_name) for camera_file, camera_name in zip(yamls, names)]
projected = []
for image_file, camera in zip(images, camera_models):
img = cv2.imread(image_file)
img = camera.undistort(img)
img = camera.project(img)
img = camera.flip(img)
projected.append(img)
birdview = BirdView()
Gmat, Mmat = birdview.get_weights_and_masks(projected) # 初始化权重矩阵
birdview.update_frames(projected)
birdview.stitch_all_parts()
birdview.make_white_balance()
birdview.copy_car_image()
img_small = cv2.resize(birdview.image, (300, 600))
cv2.imshow("BirdView Result", img_small)
while True:
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
cv2.destroyAllWindows()
return -1
elif args.mode == "realtime":
# 实时视频处理
print("Starting real-time 4-camera bird's eye view...")
print("Press 'q' to quit")
multi_cam = MultiCameraBirdView()
if multi_cam.running:
multi_cam.run()
else:
print("[ERROR] Failed to initialize cameras")
if __name__ == "__main__":
main()

View File

@@ -1,10 +1,5 @@
%YAML:1.0 %YAML:1.0
--- ---
resolution: !!opencv-matrix
rows: 2
cols: 1
dt: i
data: [ 1920, 1080 ]
camera_matrix: !!opencv-matrix camera_matrix: !!opencv-matrix
rows: 3 rows: 3
cols: 3 cols: 3
@@ -17,3 +12,26 @@ dist_coeffs: !!opencv-matrix
dt: d dt: d
data: [ -9.2062464083377798e-03, -6.3620029090856196e-03, data: [ -9.2062464083377798e-03, -6.3620029090856196e-03,
3.3735324773401221e-03, -1.0289639180500810e-03 ] 3.3735324773401221e-03, -1.0289639180500810e-03 ]
resolution: !!opencv-matrix
rows: 2
cols: 1
dt: i
data: [ 1920, 1080 ]
project_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ -2.2036739433214386e-01, -5.4728752801607983e-01,
4.0669019104227289e+02, -2.2188976198317060e-02,
-5.6384233390057015e-01, 3.2888161676725548e+02,
-8.4090277960475698e-05, -2.2171947701663608e-03, 1. ]
scale_xy: !!opencv-matrix
rows: 2
cols: 1
dt: f
data: [ 6.99999988e-01, 8.00000012e-01 ]
shift_xy: !!opencv-matrix
rows: 2
cols: 1
dt: f
data: [ -150., -100. ]

View File

@@ -1,10 +1,5 @@
%YAML:1.0 %YAML:1.0
--- ---
resolution: !!opencv-matrix
rows: 2
cols: 1
dt: i
data: [ 1920, 1080 ]
camera_matrix: !!opencv-matrix camera_matrix: !!opencv-matrix
rows: 3 rows: 3
cols: 3 cols: 3
@@ -17,3 +12,26 @@ dist_coeffs: !!opencv-matrix
dt: d dt: d
data: [ -1.5749135021037808e-02, 2.9390620422222835e-03, data: [ -1.5749135021037808e-02, 2.9390620422222835e-03,
-4.3176357910129585e-03, 1.3296605027646462e-03 ] -4.3176357910129585e-03, 1.3296605027646462e-03 ]
resolution: !!opencv-matrix
rows: 2
cols: 1
dt: i
data: [ 1920, 1080 ]
project_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ -1.8474101274061588e-01, -5.4968582653196851e-01,
4.1680109927253847e+02, 9.7678250049304510e-03,
-4.9902821258073782e-01, 3.0304882941065989e+02,
4.3706314843734903e-05, -2.1601287734653030e-03, 1. ]
scale_xy: !!opencv-matrix
rows: 2
cols: 1
dt: f
data: [ 6.99999988e-01, 8.00000012e-01 ]
shift_xy: !!opencv-matrix
rows: 2
cols: 1
dt: f
data: [ -150., -100. ]

View File

@@ -1,10 +1,5 @@
%YAML:1.0 %YAML:1.0
--- ---
resolution: !!opencv-matrix
rows: 2
cols: 1
dt: i
data: [ 1920, 1080 ]
camera_matrix: !!opencv-matrix camera_matrix: !!opencv-matrix
rows: 3 rows: 3
cols: 3 cols: 3
@@ -17,3 +12,26 @@ dist_coeffs: !!opencv-matrix
dt: d dt: d
data: [ -1.1422407725638895e-02, -1.2103818796148216e-02, data: [ -1.1422407725638895e-02, -1.2103818796148216e-02,
9.0774770002077006e-03, -2.8278270352926444e-03 ] 9.0774770002077006e-03, -2.8278270352926444e-03 ]
resolution: !!opencv-matrix
rows: 2
cols: 1
dt: i
data: [ 1920, 1080 ]
project_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ -2.0080056297490056e-01, -6.1187781235143224e-01,
4.5434494858537062e+02, -2.9556627352318810e-03,
-5.0502143174738201e-01, 3.0816263946657051e+02,
-1.8229281242150158e-05, -2.1645728959083397e-03, 1. ]
scale_xy: !!opencv-matrix
rows: 2
cols: 1
dt: f
data: [ 6.99999988e-01, 8.00000012e-01 ]
shift_xy: !!opencv-matrix
rows: 2
cols: 1
dt: f
data: [ -80., -100. ]

View File

@@ -17,15 +17,14 @@ resolution: !!opencv-matrix
cols: 1 cols: 1
dt: i dt: i
data: [ 1920, 1080 ] data: [ 1920, 1080 ]
project_matrix: !!opencv-matrix project_matrix: !!opencv-matrix
rows: 3 rows: 3
cols: 3 cols: 3
dt: d dt: d
data: [ -4.5788125154634862e-01, -2.4847281172116515e+00, data: [ -2.0592797222758674e-01, -6.8891188079230814e-01,
1.1901356339453334e+03, 7.0190114936088524e-02, 4.5964514365855666e+02, 2.1763027489573539e-02,
-2.2880693827869965e+00, 1.0012772462548877e+03, -6.3287857963783611e-01, 3.4166476009354983e+02,
1.4549566908718078e-04, -3.4392525895003334e-03, 1. ] 1.3350446934563321e-04, -2.6387220529307219e-03, 1. ]
scale_xy: !!opencv-matrix scale_xy: !!opencv-matrix
rows: 2 rows: 2
cols: 1 cols: 1
@@ -35,4 +34,4 @@ shift_xy: !!opencv-matrix
rows: 2 rows: 2
cols: 1 cols: 1
dt: f dt: f
data: [ -80., -200. ] data: [ -80., -100. ]