迁移框选数据从config读取

This commit is contained in:
zqc
2026-02-27 14:39:43 +08:00
parent 16a55fadd7
commit 5670b702f9

View File

@@ -25,6 +25,9 @@ from algorithm.common.npu_yolo_onnx_person_car_phone import YOLOv8_ONNX # 主
from yolox.tracker.byte_tracker import BYTETracker
from utils.logger import get_logger
logger = get_logger(__name__)
# ========================= 配置区 =========================
# Kadian 模型路径与ROI可根据实际情况修改
detector_model_path = 'YOLO_Weight/prisoner_model.onnx'
@@ -38,9 +41,15 @@ RTSP_TARGET_FPS = 10.0
# 新增:告警推送频率限制(秒)
ALERT_PUSH_INTERVAL = 5.0 # 相同action 5秒内仅推送一次
# 默认 ROI 相对坐标(区域多边形)
DEFAULT_ROI_RELATIVE = [(0.47, 0.35), (0.5, 0.35), (0.7, 1.0), (0.3, 1.0)]
class TrajectoryDetector:
def __init__(self):
def __init__(self, params=None):
# 摄像头额外参数
self.params = params if params is not None else {}
# 模型加载
self.police_prisoner_detector = YOLOv8_ONNX(detector_model_path, conf_threshold=0.5, iou_threshold=0.45,
input_size=input_size)
@@ -75,12 +84,12 @@ class TrajectoryDetector:
self.frame_thresh_prisoner = int(self.TIME_THRESHOLD_PRISONER * self.fps)
self.frame_buffer_prisoner = int(self.TIME_TOLERANCE_PRISONER * self.fps)
print(f"\n超参数设置:")
print(f" FPS: {self.fps:.2f}")
print(f" 判定 'police Detected' 需累计检测: {self.frame_thresh_police}")
print(f" 警察丢失缓冲帧数: {self.frame_buffer_police}")
print(f" 判定 'prisoner Detected' 需累计检测: {self.frame_thresh_prisoner}")
print(f" 犯人丢失缓冲帧数: {self.frame_buffer_prisoner}")
logger.debug(f"\n超参数设置:")
logger.debug(f" FPS: {self.fps:.2f}")
logger.debug(f" 判定 'police Detected' 需累计检测: {self.frame_thresh_police}")
logger.debug(f" 警察丢失缓冲帧数: {self.frame_buffer_police}")
logger.debug(f" 判定 'prisoner Detected' 需累计检测: {self.frame_thresh_prisoner}")
logger.debug(f" 犯人丢失缓冲帧数: {self.frame_buffer_prisoner}")
# ==========================================
# 状态变量初始化
@@ -102,10 +111,13 @@ class TrajectoryDetector:
# =========================
# ⚠️ 改为相对坐标0-1区间按 [x, y] 格式x/y 范围 0~1
# 示例:原 (50,100) 在 960x480 分辨率下 → x=50/960≈0.052, y=100/480≈0.208
# ROI 处理:优先从 params 获取,否则使用默认值 DEFAULT_ROI_RELATIVE
roi_points = self.params.get('roi_points', DEFAULT_ROI_RELATIVE)
self.route_rois = [
{
"name": "zone", # 单一区域,犯人离开即报警
"polygon_rel": [(0.47, 0.35), (0.5, 0.35), (0.7, 1.0), (0.3, 1.0)] # 相对坐标,可自定义
"polygon_rel": roi_points # 相对坐标从params获取或使用默认值
}
]
@@ -113,7 +125,7 @@ class TrajectoryDetector:
self.width = 0
self.height = 0
print(f"相对坐标 ROI: {self.route_rois}")
logger.debug(f"相对坐标 ROI: {self.route_rois}")
# 每个犯人track_id一套状态
self.prisoner_route_state = {}
@@ -498,64 +510,73 @@ class FrameProcessorWorker(threading.Thread):
except queue.Empty:
continue
cam_id = item["camera_id"]
ts = item["timestamp"]
frame = item["frame"]
# 抽帧控制
if ts - self.last_ts.get(cam_id, 0) < target_interval:
self.raw_queue.task_done()
continue
self.last_ts[cam_id] = ts
# 获取检测器实例
if cam_id not in self.trajectory_detectors:
self.trajectory_detectors[cam_id] = TrajectoryDetector()
detector = self.trajectory_detectors[cam_id]
# 执行检测
result = detector.process_frame(frame.copy(), cam_id, ts)
result_img = result["image"]
result_type = result["alerts"]
# ========= 核心修改过滤5秒内重复的action =========
# 初始化当前摄像头的推送时间记录
if cam_id not in self.last_alert_push_time:
self.last_alert_push_time[cam_id] = {}
# 筛选出符合推送条件的action5秒内未推送过
push_actions = []
current_time = time.time()
for alert in result_type:
action = alert['action']
last_push = self.last_alert_push_time[cam_id].get(action, 0)
# 检查是否超过推送间隔
if current_time - last_push >= ALERT_PUSH_INTERVAL:
push_actions.append(action)
# 更新该action的最后推送时间
self.last_alert_push_time[cam_id][action] = current_time
# 通过 WebSocket 发送帧结果
try:
img_b64 = self._encode_image_to_base64(result_img)
except Exception as e:
print(f"[ERROR] Encode image failed: {e}")
img_b64 = None
cam_id = item["camera_id"]
ts = item["timestamp"]
frame = item["frame"]
if img_b64 is not None:
msg = {
"msg_type": "frame",
"camera_id": 1,
"timestamp": ts,
"result_type": push_actions,
"image_base64": img_b64,
}
# 抽帧控制
if ts - self.last_ts.get(cam_id, 0) < target_interval:
# self.raw_queue.task_done()
continue
self.last_ts[cam_id] = ts
# 获取检测器实例
if cam_id not in self.trajectory_detectors:
camera_config = self.cameras.get(cam_id)
params = camera_config.params if camera_config else None
self.trajectory_detectors[cam_id] = TrajectoryDetector(params)
detector = self.trajectory_detectors[cam_id]
# 执行检测
result = detector.process_frame(frame.copy(), cam_id, ts)
result_img = result["image"]
result_type = result["alerts"]
# ========= 核心修改过滤5秒内重复的action =========
# 初始化当前摄像头的推送时间记录
if cam_id not in self.last_alert_push_time:
self.last_alert_push_time[cam_id] = {}
# 筛选出符合推送条件的action5秒内未推送过
push_actions = []
current_time = time.time()
for alert in result_type:
action = alert['action']
last_push = self.last_alert_push_time[cam_id].get(action, 0)
# 检查是否超过推送间隔
if current_time - last_push >= ALERT_PUSH_INTERVAL:
push_actions.append(action)
# 更新该action的最后推送时间
self.last_alert_push_time[cam_id][action] = current_time
# 通过 WebSocket 发送帧结果
try:
self.ws_queue.put(msg, timeout=1.0)
# if push_actions and len(push_actions) > 0:
# self.ws_queue_2.put(msg, timeout=1.0)
except queue.Full:
print("[WARN] ws_send_queue full, drop frame message")
img_b64 = self._encode_image_to_base64(result_img)
except Exception as e:
logger.error(f"[ERROR] Encode image failed: {e}")
img_b64 = None
self.raw_queue.task_done()
if img_b64 is not None:
msg = {
"msg_type": "frame",
"camera_id": 1,
"timestamp": ts,
"result_type": push_actions,
"image_base64": img_b64,
}
try:
self.ws_queue.put(msg, timeout=1.0)
# if push_actions and len(push_actions) > 0:
# self.ws_queue_2.put(msg, timeout=1.0)
except queue.Full:
logger.warning("[WARN] ws_send_queue full, drop frame message")
except Exception as e:
logger.error(
f"[ERROR] Frame processing failed for camera {cam_id if 'cam_id' in locals() else 'unknown'}: {e}")
logger.exception("Exception details:") # 打印完整的堆栈跟踪
# 继续处理下一帧,不要退出循环
finally:
self.raw_queue.task_done()