web前备份

This commit is contained in:
2025-12-23 09:18:32 +08:00
parent d8b28c238b
commit 4961794bf5
26 changed files with 1124 additions and 232 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -30,7 +30,7 @@ W, H = 1920, 1080
def video_thread(): def video_thread():
global frame, running global frame, running
cap = cv2.VideoCapture(which_camera, cv2.CAP_ANY) cap = cv2.VideoCapture(which_camera, cv2.CAP_V4L2)
cap.set(cv2.CAP_PROP_FOURCC,cv2.VideoWriter_fourcc(*"YUYV")) cap.set(cv2.CAP_PROP_FOURCC,cv2.VideoWriter_fourcc(*"YUYV"))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)

BIN
images/back copy.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 MiB

BIN
images/car copy 2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

BIN
images/car copy.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

82
mjpeg_streamer.py Normal file
View File

@@ -0,0 +1,82 @@
# mjpeg_streamer.py
import threading
import time
from flask import Flask, Response, render_template, request, redirect, session, url_for
import cv2
# ====== 新增:登录配置 ======
AUTO_LOGIN = None # 👈 设置为 True 可跳过登录
VALID_USER = {"username": "admin", "password": "admin"}
class MJPEGServer:
def __init__(self, frame_buffer, host="0.0.0.0", port=8080):
self.frame_buffer = frame_buffer
self.host = host
self.port = port
self.app = Flask(__name__)
self.app.secret_key = 'your-secret-key-change-in-prod' # 用于 session
# 路由
self.app.add_url_rule('/', 'index', self.index)
self.app.add_url_rule('/login', 'login', self.login, methods=['GET', 'POST'])
self.app.add_url_rule('/logout', 'logout', self.logout)
self.app.add_url_rule('/video_feed', 'video_feed', self.video_feed)
# 静态文件自动托管Layui
self.app.static_folder = 'static'
def is_logged_in(self):
return session.get('logged_in', False)
def check_auth(self):
if AUTO_LOGIN:
session['logged_in'] = True
return True
return self.is_logged_in()
def index(self):
if not self.check_auth():
return redirect(url_for('login'))
return render_template('index.html')
def login(self):
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
if username == VALID_USER['username'] and password == VALID_USER['password']:
session['logged_in'] = True
return redirect(url_for('index'))
else:
return '<script>alert("用户名或密码错误");window.history.back();</script>'
return render_template('login.html')
def logout(self):
session.pop('logged_in', None)
return redirect(url_for('login'))
def video_feed(self):
if not self.check_auth():
return '', 403
return Response(self._gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def _gen(self):
while True:
success, frame = self.frame_buffer.get_frame()
if not success or frame is None:
time.sleep(0.1)
continue
ret, buffer = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
if not ret:
continue
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + buffer.tobytes() + b'\r\n')
def start(self):
thread = threading.Thread(
target=self.app.run,
kwargs={'host': self.host, 'port': self.port, 'debug': False, 'use_reloader': False},
daemon=True
)
thread.start()
print(f"[MJPEG] Web 系统已启动,访问 http://{self.host}:{self.port}")

View File

@@ -1,191 +1,403 @@
# main.py
import cv2 import cv2
import threading import threading
import sys import sys
from datetime import datetime
import os import os
import argparse import argparse
import numpy as np import numpy as np
import time
# 全局变量 from surround_view import FisheyeCameraModel, BirdView
frame = None import surround_view.param_settings as settings
running = True
which_camera = 0
W, H = 1920, 1080 # 相机分辨率
def load_camera_params(camera_name): sys.path.append(os.path.dirname(__file__))
""" from py_utils.coco_utils import COCO_test_helper
从YAML文件加载相机参数 from py_utils.rknn_executor import RKNN_model_container
"""
yaml_file = os.path.join("yaml", f"{camera_name}.yaml")
if not os.path.exists(yaml_file): from mjpeg_streamer import MJPEGServer
raise FileNotFoundError(f"YAML file not found: {yaml_file}")
# 使用OpenCV读取YAML文件 # --- 新增 ---
fs = cv2.FileStorage(yaml_file, cv2.FILE_STORAGE_READ) from shared_buffer import CameraFrameBuffer, DetectionResultBuffer
# 读取相机内参矩阵 # 启用 OpenCLMali-G610
camera_matrix = fs.getNode("camera_matrix").mat() if cv2.ocl.haveOpenCL():
cv2.ocl.setUseOpenCL(True)
print("✅ OpenCL is ON — using Mali-G610 GPU for acceleration")
else:
print("⚠️ OpenCL not available")
# 读取畸变系数 # ------ YOLO 配置 -----------
dist_coeffs = fs.getNode("dist_coeffs").mat() YOLO_MODEL_PATH = './yolov5s-640-640.rknn'
OBJ_THRESH = 0.6
NMS_THRESH = 0.6
IMG_SIZE = (640, 640)
CLASSES = ("person",)
# 读取投影矩阵 ANCHORS_FILE = './model/anchors_yolov5.txt'
project_matrix = fs.getNode("project_matrix").mat() with open(ANCHORS_FILE, 'r') as f:
values = [float(_v) for _v in f.readlines()]
ANCHORS = np.array(values).reshape(3, -1, 2).tolist()
# 读取缩放参数
scale_xy_node = fs.getNode("scale_xy")
if scale_xy_node.empty():
scale_xy = np.array([1.0, 1.0])
else:
scale_xy = scale_xy_node.mat().flatten()
# 读取偏移参数 # ========== YOLO 后处理函数(保持不变)==========
shift_xy_node = fs.getNode("shift_xy") def filter_boxes(boxes, box_confidences, box_class_probs):
if shift_xy_node.empty(): box_confidences = box_confidences.reshape(-1)
shift_xy = np.array([0.0, 0.0]) class_max_score = np.max(box_class_probs, axis=-1)
else: classes = np.argmax(box_class_probs, axis=-1)
shift_xy = shift_xy_node.mat().flatten() _class_pos = np.where(class_max_score * box_confidences >= OBJ_THRESH)
scores = (class_max_score * box_confidences)[_class_pos]
boxes = boxes[_class_pos]
classes = classes[_class_pos]
return boxes, classes, scores
fs.release() def nms_boxes(boxes, scores):
x = boxes[:, 0]; y = boxes[:, 1]; w = boxes[:, 2] - boxes[:, 0]; h = boxes[:, 3] - boxes[:, 1]
areas = w * h; order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]; keep.append(i)
xx1 = np.maximum(x[i], x[order[1:]]); yy1 = np.maximum(y[i], y[order[1:]])
xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]]); yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])
w1 = np.maximum(0.0, xx2 - xx1 + 0.00001); h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
inter = w1 * h1
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= NMS_THRESH)[0]
order = order[inds + 1]
return np.array(keep)
return camera_matrix, dist_coeffs, project_matrix, scale_xy, shift_xy def box_process(position, anchors):
grid_h, grid_w = position.shape[2:4]
col, row = np.meshgrid(np.arange(0, grid_w), np.arange(0, grid_h))
col = col.reshape(1, 1, grid_h, grid_w); row = row.reshape(1, 1, grid_h, grid_w)
grid = np.concatenate((col, row), axis=1)
stride = np.array([IMG_SIZE[1] // grid_h, IMG_SIZE[0] // grid_w]).reshape(1, 2, 1, 1)
col = col.repeat(len(anchors), axis=0); row = row.repeat(len(anchors), axis=0)
anchors = np.array(anchors).reshape(*anchors.shape, 1, 1)
box_xy = position[:, :2, :, :] * 2 - 0.5
box_wh = pow(position[:, 2:4, :, :] * 2, 2) * anchors
box_xy += grid; box_xy *= stride
box = np.concatenate((box_xy, box_wh), axis=1)
xyxy = np.copy(box)
xyxy[:, 0, :, :] = box[:, 0, :, :] - box[:, 2, :, :] / 2
xyxy[:, 1, :, :] = box[:, 1, :, :] - box[:, 3, :, :] / 2
xyxy[:, 2, :, :] = box[:, 0, :, :] + box[:, 2, :, :] / 2
xyxy[:, 3, :, :] = box[:, 1, :, :] + box[:, 3, :, :] / 2
return xyxy
def video_thread(): def post_process(input_data, anchors):
global frame, running boxes, scores, classes_conf = [], [], []
input_data = [_in.reshape([len(anchors[0]), -1] + list(_in.shape[-2:])) for _in in input_data]
for i in range(len(input_data)):
boxes.append(box_process(input_data[i][:, :4, :, :], anchors[i]))
scores.append(input_data[i][:, 4:5, :, :])
classes_conf.append(input_data[i][:, 5:, :, :])
def sp_flatten(_in):
ch = _in.shape[1]; _in = _in.transpose(0, 2, 3, 1); return _in.reshape(-1, ch)
boxes = [sp_flatten(_v) for _v in boxes]
classes_conf = [sp_flatten(_v) for _v in classes_conf]
scores = [sp_flatten(_v) for _v in scores]
boxes = np.concatenate(boxes); classes_conf = np.concatenate(classes_conf); scores = np.concatenate(scores)
boxes, classes, scores = filter_boxes(boxes, scores, classes_conf)
nboxes, nclasses, nscores = [], [], []
for c in set(classes):
inds = np.where(classes == c)
b = boxes[inds]; c = classes[inds]; s = scores[inds]
keep = nms_boxes(b, s)
if len(keep) != 0:
nboxes.append(b[keep]); nclasses.append(c[keep]); nscores.append(s[keep])
if not nclasses: return None, None, None
boxes = np.concatenate(nboxes); classes = np.concatenate(nclasses); scores = np.concatenate(nscores)
return boxes, classes, scores
# 动态加载当前摄像头的参数 def draw_detections(image, boxes, scores, classes):
try: if boxes is None: return image
K, D, front_proj_matrix, scale_xy, shift_xy = load_camera_params(args.i.lower()) for box, score, cl in zip(boxes, scores, classes):
print(f"[INFO] Loaded parameters for {args.i} camera") if CLASSES[cl] != "person": continue
except Exception as e: top, left, right, bottom = [int(_b) for _b in box]
print(f"[ERROR] Failed to load camera parameters: {e}", file=sys.stderr) cv2.rectangle(image, (top, left), (right, bottom), (0, 255, 0), 2)
running = False label = f'person: {score:.2f}'
return (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
cv2.rectangle(image, (top, left - h - 5), (top + w, left), (0, 255, 0), -1)
cv2.putText(image, label, (top, left - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
return image
cap = cv2.VideoCapture(which_camera, cv2.CAP_ANY)
# ========== 主类 ==========
class MultiCameraBirdView:
def __init__(self):
self.running = True
self.names = settings.camera_names
self.yamls = [os.path.join(os.getcwd(), "yaml", name + ".yaml") for name in self.names]
self.camera_models = [
FisheyeCameraModel(camera_file, camera_name)
for camera_file, camera_name in zip(self.yamls, self.names)
]
self.which_cameras = {"front": 0, "back": 2, "left": 1, "right": 3}
self.caps = []
print("[INFO] 初始化摄像头...")
for name in self.names:
cap = cv2.VideoCapture(self.which_cameras[name], cv2.CAP_V4L2)
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"YUYV")) cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"YUYV"))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
if not cap.isOpened(): if not cap.isOpened():
print("[ERROR] Cannot open camera", file=sys.stderr) print(f"[ERROR] 无法打开 {name} 摄像头", file=sys.stderr)
running = False self.running = False
return return
self.caps.append(cap)
# 创建修改后的相机矩阵(包含缩放和平移) self.birdview = BirdView()
modified_camera_matrix = K.copy() self._initialize_weights()
modified_camera_matrix[0, 0] *= scale_xy[0] # fx *= scale_x
modified_camera_matrix[1, 1] *= scale_xy[1] # fy *= scale_y
modified_camera_matrix[0, 2] += shift_xy[0] # cx += shift_x
modified_camera_matrix[1, 2] += shift_xy[1] # cy += shift_y
# 鱼眼相机去畸变 合并缩放系数 # 预警状态
map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), modified_camera_matrix, (W, H), cv2.CV_16SC2) self.alerts = {name: False for name in self.names}
self.alert_lock = threading.Lock()
# 鱼眼相机仅畸变 # YOLO 模型
map3, map4 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, (W, H), cv2.CV_16SC2) try:
self.yolo_model = RKNN_model_container(YOLO_MODEL_PATH, target='rk3588')
self.co_helper = COCO_test_helper(enable_letter_box=True)
print("[INFO] YOLO 模型加载成功")
except Exception as e:
print(f"[ERROR] YOLO 模型加载失败: {e}")
self.yolo_model = None
while running: # 共享缓冲区
ret, f = cap.read() self.undistorted_buffer = CameraFrameBuffer(self.names)
if not ret: self.detection_buffer = DetectionResultBuffer(self.names)
self.shared_display_buffer = SharedFrameBuffer() # 用于 MJPEG
# 当前视图控制
self.current_view = "front"
self.current_view_lock = threading.Lock()
# 启动摄像头线程
self.camera_threads = []
for cap, model, name in zip(self.caps, self.camera_models, self.names):
thread = threading.Thread(target=self.camera_reader_thread, args=(cap, model, name), daemon=True)
thread.start()
self.camera_threads.append(thread)
# 启动 AI 检测线程
if self.yolo_model is not None:
self.ai_thread = threading.Thread(target=self.ai_detection_thread, daemon=True)
self.ai_thread.start()
else:
self.ai_thread = None
def _initialize_weights(self):
try:
images = [os.path.join(os.getcwd(), "images", name + ".png") for name in self.names]
static_frames = []
for img_path, cam_model in zip(images, self.camera_models):
img = cv2.imread(img_path)
if img is None:
img = np.zeros((1080, 1920, 3), dtype=np.uint8)
img = cam_model.undistort(img)
img = cam_model.project(img)
img = cam_model.flip(img)
static_frames.append(img)
if len(static_frames) == 4:
self.birdview.get_weights_and_masks(static_frames)
print("[INFO] 权重矩阵初始化成功")
except Exception as e:
print(f"[ERROR] 权重初始化失败: {e}")
def camera_reader_thread(self, cap, model, name):
"""摄像头读取 + 去畸变线程"""
while self.running:
ret, frame = cap.read()
if ret:
undistorted = model.undistort(frame)
self.undistorted_buffer.update(name, undistorted)
else:
print(f"[WARN] {name} 摄像头读取失败")
break break
# 图像去畸变 def detect_persons(self, image):
undistorted = cv2.remap(f, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) if self.yolo_model is None:
undistorted2 = cv2.remap(f, map3, map4, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) return image, [], []
try:
proj_image = cv2.warpPerspective( orig_h, orig_w = image.shape[:2]
undistorted, pad_color = (0, 0, 0)
front_proj_matrix, img_preprocessed = self.co_helper.letter_box(
(W, H), # 输出大小 (与原始分辨率一致) im=image.copy(),
borderMode=cv2.BORDER_CONSTANT, new_shape=(IMG_SIZE[1], IMG_SIZE[0]),
borderValue=(0, 0, 0) pad_color=pad_color
) )
outputs = self.yolo_model.run([np.expand_dims(img_preprocessed, 0)])
boxes, classes, scores = post_process(outputs, ANCHORS)
if boxes is not None:
real_boxes = self.co_helper.get_real_box(boxes)
person_boxes, person_scores = [], []
for i in range(len(real_boxes)):
if classes[i] < len(CLASSES) and CLASSES[classes[i]] == "person":
box = real_boxes[i].copy()
box = np.clip(box, [0, 0, 0, 0], [orig_w, orig_h, orig_w, orig_h])
person_boxes.append(box)
person_scores.append(scores[i])
if person_boxes:
image = draw_detections(image, np.array(person_boxes), np.array(person_scores), np.zeros(len(person_boxes), dtype=int))
return image, person_boxes, person_scores
else:
return image, [], []
except Exception as e:
print(f"[ERROR] YOLO检测失败: {e}")
return image, [], []
frame = f.copy() def ai_detection_thread(self):
birdseye_small = cv2.resize(f, (W//2, H//2)) detection_interval = 3
undistorted2_small = cv2.resize(undistorted2, (W//2, H//2)) frame_count = 0
while self.running:
with self.current_view_lock:
view = self.current_view
success, frame = self.undistorted_buffer.get(view)
if success:
frame_count += 1
if frame_count % detection_interval == 0:
img_with_det, boxes, scores = self.detect_persons(frame)
self.detection_buffer.update(view, img_with_det, boxes, scores)
with self.alert_lock:
if boxes:
self.alerts[view] = True
for v in self.alerts:
if v != view:
self.alerts[v] = False
else:
for v in self.alerts:
self.alerts[v] = False
else:
self.detection_buffer.update(view, frame, [], [])
time.sleep(0.001)
# 拼接原视频和抗畸变后的视频 (左右显示) def overlay_alert(self, birdview_img):
comparison = np.hstack((birdseye_small, undistorted2_small)) h, w = birdview_img.shape[:2]
show_video = np.vstack((comparison, proj_image)) overlay = birdview_img.copy()
alpha = 0.2
red = (0, 0, 200)
margin_f_b = int(min(h, w) * 0.07)
margin_l_r = int(min(h, w) * 0.15)
with self.alert_lock:
alerts = self.alerts.copy()
if alerts["front"]: cv2.rectangle(overlay, (0, 0), (w, margin_f_b), red, -1)
if alerts["back"]: cv2.rectangle(overlay, (0, h - margin_f_b), (w, h), red, -1)
if alerts["left"]: cv2.rectangle(overlay, (0, 0), (margin_l_r, h), red, -1)
if alerts["right"]: cv2.rectangle(overlay, (w - margin_l_r, 0), (w, h), red, -1)
return cv2.addWeighted(birdview_img, 1 - alpha, overlay, alpha, 0)
# 设置视频流全屏显示 def run(self):
text_info = f"Camera: {args.i.upper()} | Press 'q' to quit, 's' to screenshot" h_display, w_display = 720, 1280
cv2.putText(show_video, text_info, (10, 30), w_bird = w_display // 3
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2, cv2.LINE_AA) w_single = w_display - w_bird
cv2.namedWindow('Video old vs new', cv2.WND_PROP_FULLSCREEN) while self.running:
cv2.setWindowProperty('Video old vs new', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # 1. 获取四路去畸变帧 → project → 拼接鸟瞰图
cv2.imshow('Video old vs new', show_video) processed_frames = []
for name in self.names:
success, undist_frame = self.undistorted_buffer.get(name)
if success:
# 使用 UMat 加速 warpPerspective
uimg = cv2.UMat(undist_frame)
uresult = cv2.warpPerspective(
uimg,
self.camera_models[self.names.index(name)].project_matrix,
self.camera_models[self.names.index(name)].project_shape,
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT
)
p_frame = uresult.get()
p_frame = self.camera_models[self.names.index(name)].flip(p_frame)
processed_frames.append(p_frame)
else:
processed_frames.append(np.zeros((600, 800, 3), dtype=np.uint8))
if cv2.waitKey(1) & 0xFF == ord('q'): self.birdview.update_frames(processed_frames)
running = False self.birdview.stitch_all_parts()
break self.birdview.make_white_balance()
self.birdview.copy_car_image()
# 2. 获取当前视图的 AI 检测结果
with self.current_view_lock:
view = self.current_view
det_img, _, _ = self.detection_buffer.get(view)
if det_img is None:
det_img = np.zeros((h_display, w_single, 3), dtype=np.uint8)
# 3. 拼接显示
bird_resized = cv2.resize(self.overlay_alert(self.birdview.image), (w_bird, h_display))
single_resized = cv2.resize(det_img, (w_single, h_display))
display = np.hstack((bird_resized, single_resized))
self.shared_display_buffer.update(display)
cv2.namedWindow('Video', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('Video', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow("Video", display)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
self.running = False
elif key == ord('1'):
with self.current_view_lock:
self.current_view = "front"
elif key == ord('2'):
with self.current_view_lock:
self.current_view = "back"
elif key == ord('3'):
with self.current_view_lock:
self.current_view = "left"
elif key == ord('4'):
with self.current_view_lock:
self.current_view = "right"
elif key == ord('0'):
with self.alert_lock:
for k in self.alerts:
self.alerts[k] = False
for cap in self.caps:
cap.release() cap.release()
cv2.destroyAllWindows() cv2.destroyAllWindows()
def input_thread():
global running # ========== 辅助类 ==========
print("SSH命令: 's' = 截图, 'q' = 退出") class SharedFrameBuffer:
while running: def __init__(self):
self._frame = None
self._lock = threading.Lock()
self._has_frame = False
def update(self, frame: np.ndarray):
with self._lock:
self._frame = frame.copy()
self._has_frame = True
def get_frame(self):
with self._lock:
if self._has_frame and self._frame is not None:
return True, self._frame.copy()
else:
return False, None
# ========== 主函数 ==========
def main():
print("🚀 启动实时四路环视系统...")
multi_cam = MultiCameraBirdView()
if not multi_cam.running:
print("[ERROR] 摄像头初始化失败")
return
# 启动 MJPEG 流
try: try:
cmd = input().strip().lower() mjpeg_server = MJPEGServer(multi_cam.shared_display_buffer, host="0.0.0.0", port=8080)
if cmd == 's': mjpeg_server.start()
if frame is not None: print("[INFO] MJPEG 流已启动: http://<IP>:8080")
filename = f"./images/{args.i.lower()}.png" except Exception as e:
cv2.imwrite(filename, frame) print(f"[WARN] MJPEG 流启动失败: {e}")
print(f"[SSH] Saved: {os.path.abspath(filename)}")
else: multi_cam.run()
print("[SSH] No frame available yet.")
elif cmd == 'q':
running = False
break
else:
print("[SSH] Unknown command. Use 's' or 'q'.")
except EOFError:
break
if __name__ == "__main__": if __name__ == "__main__":
# 获取用户参数启动 main()
parser = argparse.ArgumentParser(description="Camera Parameter Loading Tool")
# 获取用户输入的摄像头方位 front back left right
parser.add_argument("--i", type=str, required=True,
choices=["front", "back", "left", "right"],
help="Camera direction (front/back/left/right)")
args = parser.parse_args()
print("相机方位:", args.i)
if args.i == "front":
which_camera = 0
elif args.i == "back":
which_camera = 2
elif args.i == "left":
which_camera = 1
elif args.i == "right":
which_camera = 3
else:
print("[ERROR] Invalid camera direction. Use 'front', 'back', 'left', or 'right'.", file=sys.stderr)
running = False
exit(1)
# 检查YAML目录是否存在
yaml_dir = "yaml"
if not os.path.exists(yaml_dir):
print(f"[ERROR] YAML directory not found: {yaml_dir}", file=sys.stderr)
print("Please ensure YAML files are in the 'yaml' directory", file=sys.stderr)
exit(1)
# 启动视频线程
vt = threading.Thread(target=video_thread, daemon=True)
vt.start()
# 主线程监听 SSH 输入
input_thread()
print("[INFO] Exiting...")

40
shared_buffer.py Normal file
View File

@@ -0,0 +1,40 @@
# shared_buffer.py
import threading
import numpy as np
class CameraFrameBuffer:
"""线程安全的摄像头帧缓冲区(去畸变后)"""
def __init__(self, cam_names):
self.frames = {name: None for name in cam_names}
self.locks = {name: threading.Lock() for name in cam_names}
self.updated = {name: False for name in cam_names}
def update(self, name, frame):
with self.locks[name]:
self.frames[name] = frame.copy()
self.updated[name] = True
def get(self, name):
with self.locks[name]:
if self.updated[name] and self.frames[name] is not None:
return True, self.frames[name].copy()
else:
return False, None
class DetectionResultBuffer:
"""线程安全的检测结果缓冲区"""
def __init__(self, cam_names):
self.results = {name: (None, [], []) for name in cam_names} # (image, boxes, scores)
self.lock = threading.Lock()
def update(self, name, image, boxes, scores):
with self.lock:
self.results[name] = (image.copy() if image is not None else None, list(boxes), list(scores))
def get(self, name):
with self.lock:
img, boxes, scores = self.results[name]
if img is not None:
return img.copy(), list(boxes), list(scores)
return None, [], []

File diff suppressed because one or more lines are too long

Binary file not shown.

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 326 KiB

Binary file not shown.

Binary file not shown.

Binary file not shown.

1
static/layui/layui.js Normal file

File diff suppressed because one or more lines are too long

View File

@@ -11,7 +11,7 @@ class CaptureThread(BaseThread):
device_id, device_id,
flip_method=0, flip_method=0,
drop_if_full=True, drop_if_full=True,
api_preference=cv2.CAP_ANY, api_preference=cv2.CAP_V4L2,
resolution=None, resolution=None,
parent=None): parent=None):
""" """

View File

@@ -76,10 +76,33 @@ class FisheyeCameraModel(object):
result = cv2.remap(image, *self.undistort_maps, interpolation=cv2.INTER_LINEAR, result = cv2.remap(image, *self.undistort_maps, interpolation=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT) borderMode=cv2.BORDER_CONSTANT)
return result return result
# def undistort(self, image):
# uimg = cv2.UMat(image)
# uresult = cv2.remap(
# uimg,
# *self.undistort_maps,
# interpolation=cv2.INTER_LINEAR,
# borderMode=cv2.BORDER_CONSTANT,
# borderValue=(0, 0, 0)
# )
# return uresult.get()
# def project(self, image):
# result = cv2.warpPerspective(image, self.project_matrix, self.project_shape)
# return result
def project(self, image): def project(self, image):
result = cv2.warpPerspective(image, self.project_matrix, self.project_shape) # 转为 UMat触发 GPU 路径)
return result uimg = cv2.UMat(image)
uresult = cv2.warpPerspective(
uimg,
self.project_matrix,
self.project_shape,
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(0, 0, 0)
)
return uresult.get() # 转回 numpy array 供后续使用
def flip(self, image): def flip(self, image):
if self.camera_name == "front": if self.camera_name == "front":

View File

@@ -8,6 +8,7 @@ def gstreamer_pipeline(cam_id=0,
framerate=30, framerate=30,
format="YUYV", format="YUYV",
): ):
print("11111111233333333333545656646464646")
""" """
Use libgstreamer to open csi-cameras. Use libgstreamer to open csi-cameras.
""" """
@@ -17,7 +18,6 @@ def gstreamer_pipeline(cam_id=0,
"videoconvert ! " "videoconvert ! "
"video/x-raw,format=YUYV ! " # 转为 OpenCV 能直接用的 BGR 格式 "video/x-raw,format=YUYV ! " # 转为 OpenCV 能直接用的 BGR 格式
"appsink" "appsink"
) )

View File

@@ -1,18 +1,54 @@
<!DOCTYPE html> <!DOCTYPE html>
<html> <html>
<head><title>Surround View System</title></head> <head>
<body style="background:#111;color:white;text-align:center;"> <meta charset="utf-8">
<h2>🚗 四路摄像头 + 环视鸟瞰图</h2> <title>环视系统</title>
<div> <link rel="stylesheet" href="/static/layui/css/layui.css">
<h3>四路分屏</h3> <style>
<img src="/video_feed_original" width="800"/> html, body { margin: 0; padding: 0; height: 100%; overflow: hidden; }
#app { display: flex; height: 100vh; }
#sidebar { width: 200px; background: #393D49; color: white; padding: 20px 0; }
#main { flex: 1; position: relative; background: black; }
#video { width: 100%; height: 100%; object-fit: contain; }
.menu-item { padding: 12px 20px; cursor: pointer; }
.menu-item:hover { background: #4B515D; }
.active { background: #009688 !important; }
</style>
</head>
<body>
<div id="app">
<div id="sidebar">
<div class="menu-item active" data-view="all">全景鸟瞰</div>
<div class="menu-item" data-view="front">前视</div>
<div class="menu-item" data-view="back">后视</div>
<div class="menu-item" data-view="left">左视</div>
<div class="menu-item" data-view="right">右视</div>
<hr style="border-color:#5a5e6a;margin:15px 10px">
<div class="menu-item" onclick="logout()">退出登录</div>
</div>
<div id="main">
<img id="video" src="/video_feed" />
</div>
</div> </div>
<div style="margin-top:20px;">
<h3>鸟瞰图 (Bird's Eye View)</h3> <script src="/static/layui/layui.js"></script>
<img src="/video_feed_birdview" width="500"/> <script>
</div> // 切换视图(可选:后续通过 WebSocket 或 REST API 控制后端 current_view
<p>按 Ctrl+C 停止服务 | 账号: admin / 密码: password123</p> document.querySelectorAll('.menu-item[data-view]').forEach(item => {
item.addEventListener('click', function() {
document.querySelector('.active').classList.remove('active');
this.classList.add('active');
const view = this.getAttribute('data-view');
// TODO: 发送 AJAX 请求通知后端切换 current_view
// fetch('/api/set_view?view=' + view);
});
});
function logout() {
if (confirm('确定退出?')) {
window.location.href = '/logout';
}
}
</script>
</body> </body>
</html> </html>

36
templates/login.html Normal file
View File

@@ -0,0 +1,36 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>登录 - 环视系统</title>
<link rel="stylesheet" href="/static/layui/css/layui.css">
<style>
body { background: #f2f2f2; }
.login-box {
width: 400px;
margin: 100px auto;
padding: 30px;
background: white;
border-radius: 8px;
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
}
</style>
</head>
<body>
<div class="login-box">
<h2 style="text-align:center">环视监控系统</h2>
<form class="layui-form" action="/login" method="post">
<div class="layui-form-item">
<input type="text" name="username" required lay-verify="required" placeholder="用户名" autocomplete="off" class="layui-input">
</div>
<div class="layui-form-item">
<input type="password" name="password" required lay-verify="required" placeholder="密码" class="layui-input">
</div>
<div class="layui-form-item">
<button class="layui-btn layui-btn-fluid" lay-submit>登录</button>
</div>
</form>
</div>
<script src="/static/layui/layui.js"></script>
</body>
</html>

142
web.py
View File

@@ -12,6 +12,18 @@ sys.path.append(os.path.dirname(__file__)) # 确保能导入 py_utils
from py_utils.coco_utils import COCO_test_helper from py_utils.coco_utils import COCO_test_helper
from py_utils.rknn_executor import RKNN_model_container # 假设使用 RKNN from py_utils.rknn_executor import RKNN_model_container # 假设使用 RKNN
from mjpeg_streamer import MJPEGServer
from multiprocessing import Pool, Manager
import cv2
# 启用 OpenCLMali-G610
if cv2.ocl.haveOpenCL():
cv2.ocl.setUseOpenCL(True)
print("✅ OpenCL is ON — using Mali-G610 GPU for acceleration")
else:
print("⚠️ OpenCL not available")
# ------YOLO 配置----------- # ------YOLO 配置-----------
@@ -30,6 +42,30 @@ with open(ANCHORS_FILE, 'r') as f:
ANCHORS = np.array(values).reshape(3, -1, 2).tolist() ANCHORS = np.array(values).reshape(3, -1, 2).tolist()
import threading
import numpy as np
class SharedFrameBuffer:
def __init__(self):
self._frame = None
self._lock = threading.Lock()
self._has_frame = False
def update(self, frame: np.ndarray):
"""主线程调用:更新最新帧"""
with self._lock:
self._frame = frame.copy()
self._has_frame = True
def get_frame(self):
"""YUYV 线程调用:获取最新帧"""
with self._lock:
if self._has_frame and self._frame is not None:
return True, self._frame.copy()
else:
return False, None
# ---------- YOLO 处理函数 ---------- # ---------- YOLO 处理函数 ----------
def filter_boxes(boxes, box_confidences, box_class_probs): def filter_boxes(boxes, box_confidences, box_class_probs):
box_confidences = box_confidences.reshape(-1) box_confidences = box_confidences.reshape(-1)
@@ -44,6 +80,7 @@ def filter_boxes(boxes, box_confidences, box_class_probs):
return boxes, classes, scores return boxes, classes, scores
def nms_boxes(boxes, scores): def nms_boxes(boxes, scores):
x = boxes[:, 0] x = boxes[:, 0]
y = boxes[:, 1] y = boxes[:, 1]
@@ -125,6 +162,7 @@ def post_process(input_data, anchors):
boxes, classes, scores = filter_boxes(boxes, scores, classes_conf) boxes, classes, scores = filter_boxes(boxes, scores, classes_conf)
nboxes, nclasses, nscores = [], [], [] nboxes, nclasses, nscores = [], [], []
for c in set(classes): for c in set(classes):
inds = np.where(classes == c) inds = np.where(classes == c)
b = boxes[inds] b = boxes[inds]
@@ -207,11 +245,12 @@ class MultiCameraBirdView:
print("[INFO] 初始化摄像头...") print("[INFO] 初始化摄像头...")
for name in self.names: for name in self.names:
cap = cv2.VideoCapture(self.which_cameras[name], cv2.CAP_ANY) cap = cv2.VideoCapture(self.which_cameras[name], cv2.CAP_V4L2)
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"YUYV")) cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"YUYV"))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) # 最小缓冲,降低延迟 cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) # 最小缓冲,降低延迟
# cap.set(cv2.CAP_PROP_FPS, 30) # 设置帧率为 30 FPS
if not cap.isOpened(): if not cap.isOpened():
print(f"[ERROR] 无法打开 {name} 摄像头 (设备 {self.which_cameras[name]})", file=sys.stderr) print(f"[ERROR] 无法打开 {name} 摄像头 (设备 {self.which_cameras[name]})", file=sys.stderr)
self.running = False self.running = False
@@ -240,6 +279,8 @@ class MultiCameraBirdView:
print(f"[ERROR] YOLO 模型加载失败: {e}") print(f"[ERROR] YOLO 模型加载失败: {e}")
self.yolo_model = None self.yolo_model = None
self.shared_buffer = SharedFrameBuffer() # 👈 新增
def overlay_alert(self, birdview_img): def overlay_alert(self, birdview_img):
"""在鸟瞰图上叠加半透明红色预警区域""" """在鸟瞰图上叠加半透明红色预警区域"""
h, w = birdview_img.shape[:2] h, w = birdview_img.shape[:2]
@@ -265,6 +306,7 @@ class MultiCameraBirdView:
return blended return blended
def detect_persons(self, image): def detect_persons(self, image):
"""使用YOLO模型检测图像中的人体""" """使用YOLO模型检测图像中的人体"""
if self.yolo_model is None: if self.yolo_model is None:
return image, [], [] return image, [], []
@@ -280,7 +322,7 @@ class MultiCameraBirdView:
new_shape=(IMG_SIZE[1], IMG_SIZE[0]), new_shape=(IMG_SIZE[1], IMG_SIZE[0]),
pad_color=pad_color pad_color=pad_color
) )
img_preprocessed = cv2.cvtColor(img_preprocessed, cv2.COLOR_BGR2RGB) # img_preprocessed = cv2.cvtColor(img_preprocessed, cv2.COLOR_BGR2RGB)
# 推理 # 推理
outputs = self.yolo_model.run([np.expand_dims(img_preprocessed, 0)]) outputs = self.yolo_model.run([np.expand_dims(img_preprocessed, 0)])
@@ -354,7 +396,7 @@ class MultiCameraBirdView:
return frame return frame
def process_frame_undistort(self, frame, model): def process_frame_undistort(self, frame, model):
"""只处理一次:去畸变 + 投影 + 翻转""" """只处理一次:去畸变"""
frame = model.undistort(frame) frame = model.undistort(frame)
return frame return frame
@@ -366,20 +408,18 @@ class MultiCameraBirdView:
while self.running: while self.running:
raw_frames = {} raw_frames = {}
processed_frames = [] processed_frames = []
valid = True
for i, (cap, model, name) in enumerate(zip(self.caps, self.camera_models, self.names)): for i, (cap, model, name) in enumerate(zip(self.caps, self.camera_models, self.names)):
ret, frame = cap.read() ret, frame = cap.read()
if not ret or frame is None:
print(f"[WARN] 跳过 {name}") raw_frames[name] = frame
valid = False # self.shared_buffer.update(raw_frames[current_view])
break
raw_frames[name] = frame.copy()
p_frame = self.process_frame_once(frame, model) p_frame = self.process_frame_once(frame, model)
processed_frames.append(p_frame) processed_frames.append(p_frame)
if not valid or len(processed_frames) != 4:
continue
# 更新鸟瞰图 # 更新鸟瞰图
self.birdview.update_frames(processed_frames) self.birdview.update_frames(processed_frames)
@@ -394,6 +434,7 @@ class MultiCameraBirdView:
) )
# 在单路图像上进行人体检测 # 在单路图像上进行人体检测
frame_count += 1 frame_count += 1
if frame_count % detection_interval == 0 and self.yolo_model is not None: if frame_count % detection_interval == 0 and self.yolo_model is not None:
single_img, person_boxes, person_scores = self.detect_persons(single_img) single_img, person_boxes, person_scores = self.detect_persons(single_img)
@@ -401,7 +442,7 @@ class MultiCameraBirdView:
# 根据检测结果自动触发预警 # 根据检测结果自动触发预警
if person_boxes: if person_boxes:
# 可以根据人体的位置和数量来触发预警 # 可以根据人体的位置和数量来触发预警
# 这里简单示例:只要检测到人就触发当前视图的预警
self.alerts[current_view] = True self.alerts[current_view] = True
# 重置其他视图的预警 # 重置其他视图的预警
for view in self.alerts: for view in self.alerts:
@@ -412,10 +453,9 @@ class MultiCameraBirdView:
for view in self.alerts: for view in self.alerts:
self.alerts[view] = False self.alerts[view] = False
birdview_img = self.birdview.image.copy()
# 叠加预警区域 # 叠加预警区域
birdview_with_alert = self.overlay_alert(birdview_img) birdview_with_alert = self.overlay_alert(self.birdview.image)
# 拼接显示左侧鸟瞰图1/3右侧单路2/3 # 拼接显示左侧鸟瞰图1/3右侧单路2/3
h_display, w_display = 720, 1280 h_display, w_display = 720, 1280
@@ -426,10 +466,13 @@ class MultiCameraBirdView:
single_resized = cv2.resize(single_img, (w_single, h_display)) single_resized = cv2.resize(single_img, (w_single, h_display))
display = np.hstack((bird_resized, single_resized)) display = np.hstack((bird_resized, single_resized))
# 在显示窗口上添加状态信息 # 在显示窗口上添加状态信息
info_text = f"View: {current_view} | Persons detected: {len(person_boxes) if 'person_boxes' in locals() else 0}" # info_text = f"View: {current_view} | Persons detected: {len(person_boxes) if 'person_boxes' in locals() else 0}"
cv2.putText(display, info_text, (10, 30), # cv2.putText(display, info_text, (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2) # cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
# 全屏显示 # 全屏显示
cv2.namedWindow('Video', cv2.WND_PROP_FULLSCREEN) cv2.namedWindow('Video', cv2.WND_PROP_FULLSCREEN)
@@ -437,33 +480,33 @@ class MultiCameraBirdView:
cv2.imshow("Video", display) cv2.imshow("Video", display)
key = cv2.waitKey(1) & 0xFF key = cv2.waitKey(1) & 0xFF
if key == ord('q'): # if key == ord('q'):
self.running = False # self.running = False
break # break
elif key == ord('1'): # elif key == ord('1'):
current_view = "front" # current_view = "front"
elif key == ord('2'): # elif key == ord('2'):
current_view = "back" # current_view = "back"
elif key == ord('3'): # elif key == ord('3'):
current_view = "left" # current_view = "left"
elif key == ord('4'): # elif key == ord('4'):
current_view = "right" # current_view = "right"
# 新增:预警控制 # # 新增:预警控制
elif key == ord('5'): # elif key == ord('5'):
self.alerts["front"] = True # self.alerts["front"] = True
elif key == ord('6'): # elif key == ord('6'):
self.alerts["back"] = True # self.alerts["back"] = True
elif key == ord('7'): # elif key == ord('7'):
self.alerts["left"] = True # self.alerts["left"] = True
elif key == ord('8'): # elif key == ord('8'):
self.alerts["right"] = True # self.alerts["right"] = True
elif key == ord('0'): # elif key == ord('0'):
# 清除所有预警 # # 清除所有预警
for k in self.alerts: # for k in self.alerts:
self.alerts[k] = False # self.alerts[k] = False
elif key == ord('d'): # elif key == ord('d'):
# 手动触发一次检测 # # 手动触发一次检测
single_img, person_boxes, person_scores = self.detect_persons(single_img) # single_img, person_boxes, person_scores = self.detect_persons(single_img)
for cap in self.caps: for cap in self.caps:
cap.release() cap.release()
@@ -478,12 +521,21 @@ def main():
print(" 0 : 清除所有预警") print(" 0 : 清除所有预警")
print(" d : 手动触发人体检测") print(" d : 手动触发人体检测")
print(" q : 退出程序") print(" q : 退出程序")
multi_cam = MultiCameraBirdView() multi_cam = MultiCameraBirdView()
# ===== 启动视频流 =====
try:
from mjpeg_streamer import MJPEGServer
mjpeg_server = MJPEGServer(multi_cam.shared_buffer, host="0.0.0.0", port=8080)
mjpeg_server.start()
except Exception as e:
print(f"[WARN] YUYV 流启动失败: {e}")
if multi_cam.running: if multi_cam.running:
multi_cam.run() multi_cam.run()
else: else:
print("[ERROR] 摄像头初始化失败") print("[ERROR] 摄像头初始化失败")
if __name__ == "__main__": if __name__ == "__main__":
main() main()