Compare commits

...

19 Commits

Author SHA1 Message Date
zqc
a488ee812f 完成初版选点提取坐标 2026-04-24 10:03:46 +08:00
zqc
a38b27a78d indoor_biz增加帧回溯(未测试) 2026-04-17 10:17:31 +08:00
zqc
a327dd0339 新增base_detector,将帧回溯放入其中 2026-04-17 10:06:36 +08:00
1f00f8f3f7 删除报警相关逻辑 2026-04-14 11:20:02 +08:00
f2e2569b7c 更新跟踪框匹配逻辑 2026-04-13 20:04:16 +08:00
e7e2b86cd7 添加我的代码 2026-04-10 21:43:08 +08:00
zqc
4259774365 indoor biz增加todo 2026-04-10 15:58:48 +08:00
zqc
aa4f167840 完成视频浏览 2026-04-02 13:17:30 +08:00
zqc
68d6849120 完成final接口适配 2026-04-02 12:30:58 +08:00
zqc
bfbe69eeb5 完成获取子节点接口适配 2026-04-02 12:19:20 +08:00
zqc
1aecfba186 新增获取视频地址页面,雏形未修改 2026-04-02 12:10:20 +08:00
zqc
8598098d32 修改indoor警报 2026-04-01 17:16:41 +08:00
zqc
59ab409d52 模型、区域点、颜色从配置中读取 2026-04-01 17:00:36 +08:00
zqc
54a906211a 删除错误引用 2026-04-01 15:50:35 +08:00
zqc
c22ff36194 引入indoor biz 2026-04-01 15:50:20 +08:00
7c9055c3b2 Merge branch 'master' of https://gitea.swiftsnake.cn/yipai-tech/SupervisorAI 2026-03-31 16:28:57 +08:00
164d9e4744 指挥室新需求: 仅检查 Nobody 这一种违规 2026-03-31 16:23:13 +08:00
zqc
af8a6f7822 web page预览图最大化 2026-03-12 10:50:51 +08:00
zqc
4f398a6593 增加last_seen_time 2026-03-12 10:50:27 +08:00
13 changed files with 1992 additions and 444 deletions

62
biz/base_detector.py Normal file
View File

@@ -0,0 +1,62 @@
from collections import deque
from typing import Optional
import numpy as np
class BaseDetector:
"""
检测器基类
提供通用的帧回溯缓存功能,子类可按需使用
"""
def __init__(self):
# 帧回溯缓存(子类需要时调用 init_frame_buffer 初始化)
self._frame_buffer: Optional[deque] = None
def init_frame_buffer(self, buffer_seconds: float, fps: float):
"""
初始化帧回溯缓存队列
Args:
buffer_seconds: 需要缓存的时间长度(秒)
fps: 视频帧率
"""
maxlen = int(buffer_seconds * fps)
self._frame_buffer = deque(maxlen=maxlen)
def append_frame(self, frame: np.ndarray, timestamp: float):
"""
将当前帧入队缓存
Args:
frame: 当前帧图像
timestamp: 当前帧的时间戳
"""
if self._frame_buffer is not None:
self._frame_buffer.append({
'timestamp': timestamp,
'frame': frame.copy(),
})
def find_target_frame(self, target_time_sec: float) -> Optional[np.ndarray]:
"""
在帧缓存中找到最接近目标时间的帧
Args:
target_time_sec: 目标时间戳
Returns:
最接近目标时间的帧图像,缓存为空则返回 None
"""
if self._frame_buffer is None or len(self._frame_buffer) == 0:
return None
target_frame = None
min_time_diff = float('inf')
for buffered in self._frame_buffer:
time_diff = abs(buffered['timestamp'] - target_time_sec)
if time_diff < min_time_diff:
min_time_diff = time_diff
target_frame = buffered['frame']
return target_frame

View File

@@ -1,16 +1,14 @@
import cv2 import cv2
import numpy as np import numpy as np
from typing import Dict, Any from typing import Dict, Any
import threading
import queue
from collections import deque
from biz.base_frame_processor import BaseFrameProcessorWorker from biz.base_frame_processor import BaseFrameProcessorWorker
from biz.base_detector import BaseDetector
# -------------------------- Kadian 检测相关导入 -------------------------- # -------------------------- Kadian 检测相关导入 --------------------------
from algorithm.common.npu_yolo_onnx_person_car_phone import YOLOv8_ONNX # 主检测模型(人/车/后备箱/手机) from algorithm.common.npu_yolo_onnx_person_car_phone import YOLOv8_ONNX # 主检测模型(人/车/后备箱/手机)
from algorithm.common.npu_yolo_pose_onnx import YOLOv8_Pose_ONNX # Pose 专用模型 # from algorithm.common.npu_yolo_pose_onnx import YOLOv8_Pose_ONNX # Pose 专用模型
from yolox.tracker.byte_tracker import BYTETracker from yolox.tracker.byte_tracker import BYTETracker
from utils.logger import get_logger from utils.logger import get_logger
@@ -55,8 +53,9 @@ PERSON_CAR_INPUT_SIZE = 640
RTSP_TARGET_FPS = 10.0 RTSP_TARGET_FPS = 10.0
# ========================= Kadian TrafficMonitor精简版专为服务设计 ========================= # ========================= Kadian TrafficMonitor精简版专为服务设计 =========================
class KadianDetector: class KadianDetector(BaseDetector):
def __init__(self, params=None): def __init__(self, params=None):
super().__init__()
# 摄像头额外参数 # 摄像头额外参数
self.params = params if params is not None else {} self.params = params if params is not None else {}
@@ -147,8 +146,8 @@ class KadianDetector:
self.nobody_frames = 0 # 累计无人在场帧数 self.nobody_frames = 0 # 累计无人在场帧数
self.only_one_frames = 0 # 累计单人在场帧数 self.only_one_frames = 0 # 累计单人在场帧数
self.max_car_frames = int((15.0 + self.TIME_TOLERANCE_CAR) * self.fps) # buffer_seconds = 15.0 + self.TIME_TOLERANCE_CAR
self.frame_buffer_ignore_untrunk = deque(maxlen=self.max_car_frames) self.init_frame_buffer(buffer_seconds, self.fps)
self.untrunk_rollback_time = 12.0 # 未检查后备箱需要回溯的时间 self.untrunk_rollback_time = 12.0 # 未检查后备箱需要回溯的时间
self.ignored_rollback_time = 12.0 # 漏检需要回溯的时间 self.ignored_rollback_time = 12.0 # 漏检需要回溯的时间
@@ -219,21 +218,6 @@ class KadianDetector:
x1, y1, x2, y2 = box x1, y1, x2, y2 = box
return x1 < px < x2 and y1 < py < y2 return x1 < px < x2 and y1 < py < y2
def find_target_frame(self, target_time_sec):
target_frame = None
min_time_diff = float('inf')
for buffered in self.frame_buffer_ignore_untrunk:
time_diff = abs(buffered['timestamp'] - target_time_sec)
if time_diff < min_time_diff:
min_time_diff = time_diff
target_frame = buffered['frame']
# 如果没找到,返回最早的帧
if target_frame is None and len(self.frame_buffer_ignore_untrunk) > 0:
target_frame = self.frame_buffer_ignore_untrunk[0]['frame']
return target_frame
def process_frame(self, frame, camera_id: int, timestamp: float) -> Dict[str, Any]: def process_frame(self, frame, camera_id: int, timestamp: float) -> Dict[str, Any]:
h, w = frame.shape[:2] h, w = frame.shape[:2]
self.width, self.height = w, h self.width, self.height = w, h
@@ -355,6 +339,7 @@ class KadianDetector:
self.car_enter_pending[tid] = { self.car_enter_pending[tid] = {
'first_seen': self.current_frame_idx, 'first_seen': self.current_frame_idx,
'last_seen': self.current_frame_idx, 'last_seen': self.current_frame_idx,
'last_seen_time': current_time_sec,
'frames_count': 1, 'frames_count': 1,
'last_box': [x1, y1, x2, y2], 'last_box': [x1, y1, x2, y2],
} }
@@ -401,11 +386,7 @@ class KadianDetector:
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
# 每帧保存到缓存(移到循环外,确保每帧只写入一次) # 每帧保存到缓存(移到循环外,确保每帧只写入一次)
self.frame_buffer_ignore_untrunk.append({ self.append_frame(frame, current_time_sec)
'frame_idx': self.current_frame_idx,
'timestamp': current_time_sec,
'frame': frame.copy(),
})
# ========================================== # ==========================================
# 关联分析: 哪个后备箱属于哪辆车? # 关联分析: 哪个后备箱属于哪辆车?

487
biz/prison/indoor_biz.py Normal file
View File

@@ -0,0 +1,487 @@
import cv2
import numpy as np
import time
# import requests
from collections import deque
from biz.base_detector import BaseDetector
from biz.base_frame_processor import BaseFrameProcessorWorker
from algorithm.common.npu_yolo_onnx_person_car_phone import YOLOv8_ONNX
from yolox.tracker.byte_tracker import BYTETracker
from common.constants import MODEL_ROOT_PATH
# ========================= 走廊场景专属配置 =========================
DETECT_MODEL_PATH = 'YOLO_Weight/kanshousuo.onnx' # 犯人检测onnx模型路径
INPUT_SIZE = 640 # 模型输入尺寸
RTSP_FPS = 10 # 视频流目标FPS
ALERT_PUSH_INTERVAL = 5 # 相同报警5秒内仅推送1次
# ALERT_PUSH_URL = "http://123.57.151.210:10000/picenter/websocket/test/process"
# 消失判定中心点在ROI内消失后持续无检测的帧数1.0秒,可微调)
ROI_LOST_FRAMES_THRESH = int(0.5 * RTSP_FPS) # todo: 从frame改为时间
# ========================= 默认ROI区域配置当config.yaml未配置时使用 =========================
DEFAULT_DOOR_ROIS = {
"left": {
"points": [[0.195, 0.245], [0.42, 0], [0.421, 0.185], [0.248, 0.8]],
"color": [255, 0, 0]
},
"right": {
"points": [[0.575, 0.], [0.81, 0.22], [0.78, 0.8], [0.575, 0.185]],
"color": [255, 0, 0]
}
}
# ==================================================================================
class PrisonerDoorDetector(BaseDetector):
def __init__(self, params=None):
super().__init__()
self.params = params or {}
# 0. 从params解析ROI配置无则使用默认值
door_rois_config = self.params.get('door_rois', DEFAULT_DOOR_ROIS)
self.roi_config = {}
self.roi_colors = {}
for door_name, door_cfg in door_rois_config.items():
self.roi_config[door_name] = door_cfg['points']
self.roi_colors[door_name] = tuple(door_cfg['color'])
model_path = self.params.get('model_path')
if model_path:
full_model_path = f"{MODEL_ROOT_PATH}/{model_path}"
else:
full_model_path = DETECT_MODEL_PATH
self.detector = YOLOv8_ONNX(
full_model_path,
conf_threshold=0.7, # 置信度阈值,可根据模型精度调整
iou_threshold=0.4, # IOU阈值
input_size=INPUT_SIZE
)
# 2. 初始化ByteTracker跟踪器适配走廊单/多犯人跟踪)
class TrackerArgs:
track_thresh = 0.65
track_buffer = 60 # 减小缓冲避免跟踪漂移
match_thresh = 0.5
mot20 = False
self.tracker = BYTETracker(TrackerArgs(), frame_rate=RTSP_FPS)
# 3. 状态变量初始化
# self.last_alert_time = 0.0 # 最后报警时间(防重复推送)
# 犯人跟踪信息:{track_id: {'is_cx_in_roi': 中心点是否在ROI, 'lost_frames': 消失帧数, 'lost_roi': 消失的ROI名称, 'last_cxcy': 最后中心点坐标}}
self.prisoner_track_info = {}
self.frame_width = 0 # 帧宽度(动态获取)
self.frame_height = 0 # 帧高度(动态获取)
self.roi_abs_cache = {} # ROI绝对坐标缓存{roi_name: np.int32数组}
self.entry_frame_cache = {}
# 基于位置的跟踪状态管理
self.active_targets = {} # {target_id: {...}}
self.next_target_id = 0
self.position_history = {} # {target_id: deque of positions}
# 距离阈值(用于匹配检测框和已有目标)
self.distance_threshold = 100 # 像素距离
buffer_seconds = 3 # 最大回溯3秒
self.init_frame_buffer(buffer_seconds, RTSP_FPS)
self.detect_rollback_time = 0.9 # 警报帧回溯时间(秒)
def compute_center_distance(self, box1, box2):
"""计算两个框中心点的欧氏距离"""
cx1 = (box1[0] + box1[2]) / 2
cy1 = (box1[1] + box1[3]) / 2
cx2 = (box2[0] + box2[2]) / 2
cy2 = (box2[1] + box2[3]) / 2
return np.sqrt((cx1 - cx2) ** 2 + (cy1 - cy2) ** 2)
def compute_iou(self, boxA, boxB):
"""IOU计算匹配跟踪框与犯人检测框过滤非犯人目标"""
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interW = max(0, xB - xA)
interH = max(0, yB - yA)
interArea = interW * interH
boxAArea = (boxA[2] - boxA[0]) * (boxA[3] - boxA[1])
boxBArea = (boxB[2] - boxB[0]) * (boxB[3] - boxB[1])
unionArea = boxAArea + boxBArea - interArea
return interArea / unionArea if unionArea > 0 else 0.0
def _get_roi_abs(self, roi_name):
"""相对坐标转绝对像素坐标适配当前帧分辨率OpenCV要求int32"""
if roi_name not in self.roi_config:
return None
roi_rel = np.array(self.roi_config[roi_name], dtype=np.float64)
roi_abs = roi_rel * np.array([self.frame_width, self.frame_height])
return roi_abs.astype(np.int32)
def is_cxcy_in_roi(self, cx, cy):
"""判断犯人框**中心点(cx,cy)** 是否在任意ROI内返回(是否在ROI, 所在ROI名称)"""
for roi_name, roi_abs in self.roi_abs_cache.items():
# OpenCV点在多边形内判定>=0 表示在内部/边上
if cv2.pointPolygonTest(roi_abs, (cx, cy), False) >= 0:
return (True, roi_name)
return (False, "outside")
def match_detection_to_target(self, detection_box, detection_conf):
"""
【核心】将检测框匹配到已有目标
返回: (matched_target_id, match_score)
"""
best_match_id = None
best_match_score = 0
det_center = np.array([(detection_box[0] + detection_box[2]) / 2,
(detection_box[1] + detection_box[3]) / 2])
for target_id, target_info in self.active_targets.items():
# 计算与目标最后已知位置的距离
last_box = target_info['last_box']
last_center = np.array([(last_box[0] + last_box[2]) / 2,
(last_box[1] + last_box[3]) / 2])
distance = np.linalg.norm(det_center - last_center)
# 计算IOU如果目标最近刚更新
time_since_update = time.time() - target_info['last_update_time']
iou_score = self.compute_iou(detection_box, last_box) if time_since_update < 1.0 else 0
# 综合评分:距离近 + IOU高
distance_score = max(0, 1 - distance / self.distance_threshold)
match_score = 0.3 * distance_score + 0.7 * iou_score
# 考虑位置预测(如果目标在移动中)
if target_id in self.position_history and len(self.position_history[target_id]) >= 2:
# 简单的线性预测
hist = list(self.position_history[target_id])
if len(hist) >= 2:
velocity = hist[-1] - hist[-2]
predicted_pos = last_center + velocity
pred_distance = np.linalg.norm(det_center - predicted_pos)
pred_score = max(0, 1 - pred_distance / self.distance_threshold)
match_score = 0.7 * match_score + 0.3 * pred_score
if match_score > best_match_score and match_score > 0.3: # 阈值可调
best_match_score = match_score
best_match_id = target_id
return best_match_id, best_match_score
# def push_alert(self, camera_id, target_id, lost_roi, last_cxcy, timestamp, entry_frame):
# """报警推送"""
# current_time = time.time()
# if current_time - self.last_alert_time < ALERT_PUSH_INTERVAL:
# return False
#
# _, frame_encoded = cv2.imencode('.jpg', entry_frame)
# frame_base64 = frame_encoded.tobytes()
#
# alert_info = {
# "camera_id": camera_id,
# "alert_type": "prisoner_cx_disappear_in_roi",
# "prisoner_track_id": target_id,
# "disappear_roi": lost_roi,
# "last_cx": round(last_cxcy[0], 2),
# "last_cy": round(last_cxcy[1], 2),
# "timestamp": timestamp,
# "entry_frame_base64": frame_base64,
# "details": f"犯人框中心点在{lost_roi}区域内消失"
# }
#
# try:
# requests.post(ALERT_PUSH_URL, json=alert_info, timeout=3)
# print(f"[报警成功] target_id={target_id}, roi={lost_roi}")
# self.last_alert_time = current_time
# return True
# except Exception as e:
# print(f"[报警失败] {str(e)}")
# return False
def process_frame(self, frame, camera_id: int, timestamp: float) -> dict:
"""
核心帧处理:
1. 绘制5个ROI区域 2. 检测+跟踪犯人 3. 判定中心点是否在ROI内
4. 中心点在ROI内消失则累计帧数达到阈值触发报警
"""
self.frame_height, self.frame_width = frame.shape[:2]
current_frame_alerts = [] # 本帧报警信息
frame_copy = frame.copy()
current_time = time.time()
# ========================= 1. 初始化ROI绝对坐标并绘制ROI =========================
self.roi_abs_cache.clear()
for roi_name in self.roi_config:
roi_abs = self._get_roi_abs(roi_name)
if roi_abs is None:
continue
self.roi_abs_cache[roi_name] = roi_abs
# 绘制ROI多边形闭合+ ROI名称标签
roi_draw = roi_abs.reshape((-1, 1, 2)) # OpenCV绘制要求形状 (n,1,2)
color = self.roi_colors.get(roi_name, (255, 255, 255))
cv2.polylines(frame, [roi_draw], isClosed=True, color=color, thickness=2)
cv2.putText(frame, roi_name, (roi_abs[0][0], roi_abs[0][1] - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# ========================= 2. 模型推理:仅提取犯人检测框 =========================
detect_results = self.detector(frame)
prisoner_detections = []
if detect_results:
for det in detect_results:
x1, y1, x2, y2, conf, cls_id = det
# 确保坐标在图像范围内
x1 = max(0, min(x1, self.frame_width - 1))
y1 = max(0, min(y1, self.frame_height - 1))
x2 = max(0, min(x2, self.frame_width - 1))
y2 = max(0, min(y2, self.frame_height - 1))
if cls_id == 1 and x2 > x1 and y2 > y1 and (x2 - x1) * (y2 - y1) > 100: # 过滤太小的框
prisoner_detections.append([x1, y1, x2, y2, conf, cls_id])
# ========================= 3. ByteTracker跟踪 =========================
prisoner_det_boxes = np.array(
[[x1, y1, x2, y2, conf] for x1, y1, x2, y2, conf, cls_id in prisoner_detections],
dtype=np.float32) if prisoner_detections else np.empty((0, 5))
if len(prisoner_det_boxes) > 0:
track_results = self.tracker.update(
prisoner_det_boxes,
[self.frame_height, self.frame_width],
[self.frame_height, self.frame_width]
)
else:
track_results = []
# ========================= 4. 【核心改进】融合跟踪和检测 =========================
# 4.1 先处理跟踪结果
tracked_detections = {} # {track_id: detection_box}
used_det_indices = set()
for track in track_results:
track_id = track.track_id
t_box = [float(x) for x in track.tlbr]
# 寻找匹配的检测框
best_iou = 0.0 # 最低阈值
best_det_idx = -1
for det_idx, det in enumerate(prisoner_detections):
if det_idx in used_det_indices:
continue
iou = self.compute_iou(t_box, det[:4])
if iou > best_iou:
best_iou = iou
best_det_idx = det_idx
if best_det_idx != -1:
# 跟踪框有对应的检测框,使用检测框(更准确)
tracked_detections[f"track_{track_id}"] = {
'box': prisoner_detections[best_det_idx][:4],
'conf': prisoner_detections[best_det_idx][4],
'source': 'tracked'
}
used_det_indices.add(best_det_idx)
else:
# 跟踪框没有对应的检测框,但仍保留跟踪框
tracked_detections[f"track_{track_id}"] = {
'box': t_box,
'conf': 0.5, # 给个中等置信度
'source': 'track_only'
}
# 4.2 处理未被跟踪的检测框
for det_idx, det in enumerate(prisoner_detections):
if det_idx not in used_det_indices:
tracked_detections[f"det_{det_idx}"] = {
'box': det[:4],
'conf': det[4],
'source': 'det_only'
}
# ========================= 5. 匹配到已有目标 =========================
current_target_ids = set()
matched_det_keys = set()
for det_key, det_info in tracked_detections.items():
det_box = det_info['box']
det_conf = det_info['conf']
# 计算中心点
cx = (det_box[0] + det_box[2]) / 2
cy = (det_box[1] + det_box[3]) / 2
# 匹配到已有目标
matched_target_id, match_score = self.match_detection_to_target(det_box, det_conf)
if matched_target_id is not None and match_score > 0.3:
# 更新已有目标
target_id = matched_target_id
target_info = self.active_targets[target_id]
# 更新位置历史
if target_id not in self.position_history:
self.position_history[target_id] = deque(maxlen=10)
self.position_history[target_id].append(np.array([cx, cy]))
# 判断是否在ROI内
is_cx_in_roi, current_roi = self.is_cxcy_in_roi(cx, cy)
# 首次进入ROI缓存帧
if not target_info.get('in_roi', False) and is_cx_in_roi:
self.entry_frame_cache[target_id] = frame_copy.copy()
target_info['lost_frames'] = 0
# 更新目标信息
target_info.update({
'last_box': det_box,
'last_cxcy': (cx, cy),
'last_conf': det_conf,
'last_update_time': current_time,
'in_roi': is_cx_in_roi,
'current_roi': current_roi if is_cx_in_roi else target_info.get('current_roi', 'outside'),
'detection_source': det_info['source']
})
current_target_ids.add(target_id)
matched_det_keys.add(det_key)
else:
# 创建新目标
target_id = self.next_target_id
self.next_target_id += 1
is_cx_in_roi, current_roi = self.is_cxcy_in_roi(cx, cy)
self.active_targets[target_id] = {
'first_seen': current_time,
'last_box': det_box,
'last_cxcy': (cx, cy),
'last_conf': det_conf,
'last_update_time': current_time,
'in_roi': is_cx_in_roi,
'current_roi': current_roi if is_cx_in_roi else 'outside',
'lost_frames': 0,
'detection_source': det_info['source']
}
self.position_history[target_id] = deque(maxlen=10)
self.position_history[target_id].append(np.array([cx, cy]))
if is_cx_in_roi:
self.entry_frame_cache[target_id] = frame_copy.copy()
current_target_ids.add(target_id)
matched_det_keys.add(det_key)
# ========================= 6. 处理消失和报警 =========================
for target_id in list(self.active_targets.keys()):
target_info = self.active_targets[target_id]
if target_id not in current_target_ids:
# 目标在当前帧未出现
if target_info['in_roi']:
# 在ROI内消失
target_info['lost_frames'] += 1
if target_info['lost_frames'] >= ROI_LOST_FRAMES_THRESH:
# 触发报警
# entry_frame = self.entry_frame_cache.get(target_id, frame_copy)
# self.push_alert(
# camera_id=camera_id,
# target_id=target_id,
# lost_roi=target_info['current_roi'],
# last_cxcy=target_info['last_cxcy'],
# timestamp=timestamp,
# entry_frame=entry_frame
# )
alert_frame = self.find_target_frame(timestamp - self.detect_rollback_time)
current_frame_alerts.append({
"time": timestamp,
"camera_id": camera_id,
"action": "Indoor Violation",
'image': alert_frame,
"prisoner_track_id": target_id,
"disappear_roi": target_info['current_roi'],
"last_cx": round(target_info['last_cxcy'][0], 2),
"last_cy": round(target_info['last_cxcy'][1], 2)
})
# 清理
del self.active_targets[target_id]
if target_id in self.position_history:
del self.position_history[target_id]
if target_id in self.entry_frame_cache:
del self.entry_frame_cache[target_id]
else:
# 不在ROI内消失直接清理
del self.active_targets[target_id]
if target_id in self.position_history:
del self.position_history[target_id]
if target_id in self.entry_frame_cache:
del self.entry_frame_cache[target_id]
else:
# 目标仍在但可能已离开ROI
if not target_info['in_roi']:
target_info['lost_frames'] = 0
# ========================= 7. 清理超时目标 =========================
timeout_threshold = 5.0 # 5秒无更新就清理
for target_id in list(self.active_targets.keys()):
if current_time - self.active_targets[target_id]['last_update_time'] > timeout_threshold:
del self.active_targets[target_id]
if target_id in self.position_history:
del self.position_history[target_id]
if target_id in self.entry_frame_cache:
del self.entry_frame_cache[target_id]
# ========================= 8. 绘制可视化 =========================
for target_id, target_info in self.active_targets.items():
box = target_info['last_box']
cx, cy = target_info['last_cxcy']
in_roi = target_info['in_roi']
current_roi = target_info['current_roi']
source = target_info.get('detection_source', 'unknown')
# 根据状态选择颜色
if in_roi:
color = (0, 0, 255) # 红色在ROI内
else:
color = (0, 255, 0) # 绿色不在ROI内
# 根据来源选择线型
thickness = 3 if source == 'tracked' else 2
cv2.rectangle(frame, (int(box[0]), int(box[1])),
(int(box[2]), int(box[3])), color, thickness)
cv2.circle(frame, (int(cx), int(cy)), 5, color, -1)
status = f"T{target_id}_{current_roi[:2]}"
if source == 'det_only':
status += "_DET"
cv2.putText(frame, status, (int(box[0]), int(box[1]) - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# ========================= 9. 统计信息 =========================
cv2.putText(frame, f"Camera: {camera_id}", (20, self.frame_height - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.putText(frame, f"Active Targets: {len(self.active_targets)}",
(20, self.frame_height - 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
self.append_frame(frame, timestamp)
return {"image": frame, "alerts": current_frame_alerts}
# ========================= 帧处理线程 =========================
class FrameProcessorWorker(BaseFrameProcessorWorker):
"""看守所走廊犯人检测 - 增强跟踪版"""
DETECTOR_FACTORY = lambda params: PrisonerDoorDetector(params)
POST_TYPE = 3
TARGET_FPS = RTSP_FPS

View File

@@ -15,114 +15,70 @@ from typing import Dict, Any, Tuple, List
from biz.base_frame_processor import BaseFrameProcessorWorker from biz.base_frame_processor import BaseFrameProcessorWorker
# -------------------------- Kadian 检测相关导入 -------------------------- # -------------------------- Kadian 检测相关导入 --------------------------
from algorithm.common.npu_yolo_onnx_person_car_phone import YOLOv8_ONNX # 主检测模型(人/车/后备箱/手机) from algorithm.common.npu_yolo_onnx_person_car_phone import YOLOv8_ONNX # 主检测模型(人/车/后备箱/手机)
from yolox.tracker.byte_tracker import BYTETracker from yolox.tracker.byte_tracker import BYTETracker
from rtsp_service_ws_prison import TrackerArgs
from utils.logger import get_logger from utils.logger import get_logger
logger = get_logger(__name__) logger = get_logger(__name__)
# ========================= 配置区 ========================= # ========================= 配置区 =========================
Person_Phone_Model = r'YOLO_Weight/person_phone_model.onnx' # 人和手机的检测模型
Smoke_Model = r'YOLO_Weight/smoke_model.onnx' # 抽烟检测模型
person_phone_input_size = 1280 # 模型输入尺寸,与训练时的模型一致
smoke_input_size = 1280 # 模型输入尺寸,与训练时的模型一致
# RTSP 服务配置 # RTSP 服务配置
RTSP_TARGET_FPS = 5.0 RTSP_TARGET_FPS = 10.0
# 新增:告警推送频率限制(秒) # 新增:告警推送频率限制(秒)
ALERT_PUSH_INTERVAL = 5.0 # 相同action 5秒内仅推送一次 ALERT_PUSH_INTERVAL = 5.0 # 相同action 5秒内仅推送一次
Model_Path = 'YOLO_Weight/zhihuishi.onnx'
Model_size = 640 # yolo模型尺寸
Label_Map = {
-1: 'Unknown',
0: 'Police'
}
Color_Map = {
-1: (255, 255, 255), # 白
0: (0, 255, 0) # 绿
}
NOBODY_THRESHOLD = 5.0 * RTSP_TARGET_FPS # 当屏幕中的人消失了开始计数如果累计够时长判定Nobody如果中间又检测到了人则立即清空计数
class ZhihuishiDetector: class ZhihuishiDetector:
def __init__(self, params=None): def __init__(self, params=None):
# 模型加载 # person检测模型
self.detector = YOLOv8_ONNX(Model_Path, conf_threshold=0.6, iou_threshold=0.6, input_size=Model_size)
# 人和手机检测模型
print(f"加载人和手机检测模型: {Person_Phone_Model}")
self.person_phone_detector = YOLOv8_ONNX(Person_Phone_Model, conf_threshold=0.6, iou_threshold=0.45,
input_size=person_phone_input_size)
# 抽烟检测模型
print(f"加载抽烟检测模型: {Smoke_Model}")
self.smoke_detector = YOLOv8_ONNX(Smoke_Model, conf_threshold=0.4, iou_threshold=0.65,
input_size=smoke_input_size)
# ByteTracker # ByteTracker
class TrackerArgs: class TrackerArgs:
track_thresh = 0.25 track_thresh = 0.61
track_buffer = 30 track_buffer = RTSP_TARGET_FPS * 3 # 3 秒未见该目标,则判定该目标消失,在字典中删除
match_thresh = 0.8 match_thresh = 0.8
mot20 = False mot20 = False
self.fps = RTSP_TARGET_FPS self.fps = RTSP_TARGET_FPS
self.person_phone_tracker = BYTETracker(TrackerArgs(), frame_rate=self.fps) # 当前帧的ID
self.smoke_tracker = BYTETracker(TrackerArgs(), frame_rate=self.fps) self.current_frame_idx = 0
self.person_phone_track_role = {} # 设置 ByteTrack跟踪器
self.smoke_track_role = {} self.ByteTracker = BYTETracker(TrackerArgs(), frame_rate=self.fps)
# 用来保存历史跟踪目标的字典,当目标消失了之后,就在该字典中清除该目标
self.track_role = {}
# ========================================== # 记录无人的帧数
# 超参数设置 (Hyperparameters) self.nobody_frames = 0
# ==========================================
# 1. 业务判定时间阈值
self.TIME_THRESHOLD_NOBODY = 2.0 # 无人在场判定时长
self.TIME_TOLERANCE_NOBODY = 2.0 # 人丢失缓冲时间
self.TIME_THRESHOLD_SMOKE = 1.0 # 抽烟判定时长
self.TIME_TOLERANCE_SMOKE = 0.5 # 烟丢失缓冲时间(防抖动)
self.TIME_THRESHOLD_PHONE = 1.0 # 玩手机判定时长
self.TIME_TOLERANCE_PHONE = 0.5 # 手机丢失缓冲时间(防抖动)
# 无人在场帧数阈值
self.frame_thresh_nobody = int(self.TIME_THRESHOLD_NOBODY * self.fps)
self.frame_buffer_nobody = int(self.TIME_TOLERANCE_NOBODY * self.fps)
# 抽烟检测帧数阈值
self.frame_thresh_smoke = int(self.TIME_THRESHOLD_SMOKE * self.fps)
self.frame_buffer_smoke = int(self.TIME_TOLERANCE_SMOKE * self.fps)
# 手机检测帧数阈值
self.frame_thresh_phone = int(self.TIME_THRESHOLD_PHONE * self.fps)
self.frame_buffer_phone = int(self.TIME_TOLERANCE_PHONE * self.fps)
print(f"\n超参数设置:") print(f"\n超参数设置:")
print(f" FPS: {self.fps:.2f}") print(f" FPS: {self.fps:.2f}")
print(f" 判定 'Nobody' 需连续: {self.frame_thresh_nobody}") print(f" 判定 'Nobody' 需连续: {NOBODY_THRESHOLD}")
print(f" 判定 'Smoke Detected' 需累计检测: {self.frame_thresh_smoke}")
print(f" 抽烟丢失缓冲帧数: {self.frame_buffer_smoke}")
print(f" 判定 'Phone Detected' 需累计检测: {self.frame_thresh_phone}")
print(f" 手机丢失缓冲帧数: {self.frame_buffer_phone}")
# ========================================== def compute_iou(self, boxA, boxB):
# 状态变量初始化
# ==========================================
self.current_frame_idx = 0
# 无人在场检测状态变量
self.nobody_detection_frames = 0
self.nobody_missing_frames = 0 # 连续未检测到手机的帧数
self.nobody_alert_active = False # 手机报警是否激活
# 手机检测状态变量
self.phone_detection_frames = 0 # 连续检测到手机的帧数
self.phone_missing_frames = 0 # 连续未检测到手机的帧数
self.phone_alert_active = False # 手机报警是否激活
# 抽烟检测状态变量
self.smoke_detection_frames = 0 # 连续检测到手机的帧数
self.smoke_missing_frames = 0 # 连续未检测到手机的帧数
self.smoke_alert_active = False # 手机报警是否激活
def compute_iou(self,boxA, boxB):
# box = [x1, y1, x2, y2] # box = [x1, y1, x2, y2]
xA = max(boxA[0], boxB[0]) xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1]) yA = max(boxA[1], boxB[1])
@@ -142,7 +98,7 @@ class ZhihuishiDetector:
return interArea / unionArea return interArea / unionArea
def draw_alert(self, frame, text, color=(0, 0, 255), sub_text=None, offset_y=0): def draw_alert(self, frame, text, color=(0, 0, 255), offset_y=0):
"""在右上角绘制警告文字 (支持垂直偏移,防止文字重叠)""" """在右上角绘制警告文字 (支持垂直偏移,防止文字重叠)"""
font_scale = 1.5 font_scale = 1.5
thickness = 3 thickness = 3
@@ -155,347 +111,114 @@ class ZhihuishiDetector:
cv2.rectangle(frame, (x - 10, y - text_h - 10), (x + text_w + 10, y + 10), (0, 0, 0), -1) cv2.rectangle(frame, (x - 10, y - text_h - 10), (x + text_w + 10, y + 10), (0, 0, 0), -1)
cv2.putText(frame, text, (x, y), font, font_scale, color, thickness) cv2.putText(frame, text, (x, y), font, font_scale, color, thickness)
if sub_text:
cv2.putText(frame, sub_text, (x, y + 40), font, 0.7, (200, 200, 200), 2)
def process_frame(self, frame, camera_id: int, timestamp: float) -> Dict[str, Any]: def process_frame(self, frame, camera_id: int, timestamp: float) -> Dict[str, Any]:
# =================================== 收集检测结果 ===================================
h, w = frame.shape[:2] h, w = frame.shape[:2]
self.width, self.height = w, h self.width, self.height = w, h
self.current_frame_idx += 1 self.current_frame_idx += 1
current_time_sec = timestamp # 当前时间戳
current_time_sec = timestamp # yolo 的检测结果
detect_results = self.detector(frame)
# ========= 人和手机检测 ========= detect_xyxy = [] # 存储 yolo检测出来的所有检测框的角点坐标x1, y1, x2, y2为角点坐标x1 y1为左上角x2 y2为右下角
person_phone_results = self.person_phone_detector(frame) detect_roles = [] # 存储 yolo检测出来的所有检测框的标签类别用id的形式保存
detect_bytetrack = [] # 从 yolo的检测结果中提取出来用于ByteTrack追踪检测框所需的信息保存在这里面
# ========= 抽烟检测 ========= # 累计在当前帧里每个标签类别被检测到的次数,存储格式为 类别id:次数
smoke_results = self.smoke_detector(frame) current_labels_count = {id: 0 for id in Label_Map}
person_phone_dets_xyxy = [] # ========= 存储当前帧所有警告 ==========
person_phone_dets_roles = []
person_phone_dets_for_tracker = []
smoke_dets_xyxy = []
smoke_dets_roles = []
smoke_dets_for_tracker = []
# ========= 当前帧所有警告列表(关键改动)==========
current_frame_alerts = [] # 每帧清空,重新收集 current_frame_alerts = [] # 每帧清空,重新收集
# 收集 人和手机的检测结果 # 遍历 yolo 的检测结果,对 detect_xyxy detect_roles detect_bytetrack 进行填充
if person_phone_results: if detect_results:
for det in person_phone_results: for result in detect_results: # yolo检测结果返回 x1, y1, x2, y2, conf, cls_id
x1, y1, x2, y2, conf, cls_id = det # x1, y1, x2, y2为角点坐标x1 y1为左上角x2 y2为右下角 detect_xyxy.append(result[:-2])
person_phone_dets_xyxy.append([x1, y1, x2, y2]) detect_roles.append(result[-1])
person_phone_dets_for_tracker.append([x1, y1, x2, y2, conf]) detect_bytetrack.append(result[:-1])
if cls_id == 0:
person_phone_dets_roles.append("phone")
elif cls_id == 1:
person_phone_dets_roles.append("police")
person_phone_dets = np.array(person_phone_dets_for_tracker, dtype=np.float32) if len( # 根据收集到的 detect_bytetrack 确定追踪的检测框目标
person_phone_dets_for_tracker) else np.empty((0, 5)) tracks = self.ByteTracker.update(
np.array(detect_bytetrack, dtype=np.float32) if len(detect_bytetrack) else np.empty((0, 5)),
person_phone_tracks = self.person_phone_tracker.update( # np.empty((0,5)) 表示一个 0 行、5 列 的二维空数组
person_phone_dets,
[self.height, self.width], [self.height, self.width],
[self.height, self.width] [self.height, self.width]
) )
# 收集 抽烟的检测结果 # 匹配每个跟踪目标的正确类别
if smoke_results: # 为什么要用track的结果来统计标签类别的出现次数以及绘制检测框而不是仅用yolo的检测结果来统计及绘制是因为yolo的检测结果是针对单帧而bytetrack可以实现跨帧处理bytetrack的track_id会给每个目标设置一个唯一的id
for det in smoke_results: current_track_ids = []
x1, y1, x2, y2, conf, cls_id = det for track in tracks:
smoke_dets_xyxy.append([x1, y1, x2, y2]) track_id = track.track_id
smoke_dets_for_tracker.append([x1, y1, x2, y2, conf]) current_track_ids.append(track_id)
if cls_id == 0:
smoke_dets_roles.append("smoke")
smoke_dets = np.array(smoke_dets_for_tracker, dtype=np.float32) if len( reIdentify_frame_interval = 10 # 重新匹配每个跟踪目标的类别的帧间隔
smoke_dets_for_tracker) else np.empty((0, 5)) if (current_time_sec % reIdentify_frame_interval == 0) or track_id not in self.track_role:
best_iou = 0.0
best_role = -1
smoke_tracks = self.smoke_tracker.update( track_box = list(map(float, track.tlbr))
smoke_dets,
[self.height, self.width],
[self.height, self.width]
)
# ========= 单帧统计变量 ========= for i, box in enumerate(detect_xyxy):
current_person_count = 0 iou = self.compute_iou(track_box, box)
current_phone_count = 0 if iou > best_iou:
current_smoke_count = 0 best_iou = iou
best_role = detect_roles[i]
# ========= 人和手机检测 ========= self.track_role[track_id] = best_role
for t in person_phone_tracks:
# print("t: {}".format(t))
tid = t.track_id
# cls_id = -1
# IoU 匹配角色 role = self.track_role[track_id]
# IoU匹配跟踪ID和类别
REVALIDATE_FRAME_INTERVAL = 10
if (self.current_frame_idx % REVALIDATE_FRAME_INTERVAL == 0) or (tid not in self.person_phone_track_role):
#if tid not in self.person_phone_track_role:
best_iou = 0
best_role = "unknown"
t_box = list(map(float, t.tlbr)) # [x1,y1,x2,y2] current_labels_count[role] += 1
for i, box in enumerate(person_phone_dets_xyxy): # 当 role 不等于 unknown 的时候,绘制检测框
iou_val = self.compute_iou(t_box, box) if role != -1:
if iou_val > best_iou: x1, y1, x2, y2 = map(int, track.tlbr)
best_iou = iou_val cv2.rectangle(frame, (x1, y1), (x2, y2), Color_Map[role], 2)
best_role = person_phone_dets_roles[i] cv2.putText(frame, Label_Map[role], (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, Color_Map[role], 2)
if best_iou > 0.1:
self.person_phone_track_role[tid] = best_role
else:
self.person_phone_track_role[tid] = "unknown"
role = self.person_phone_track_role.get(tid, "unknown") # 处理过期的 track_role 里的role,如果 track_role 里包含 tracks 里没有的 role ,直接删了即可
cls_id = -1 for role in list(self.track_role.keys()): # 遍历字典的时候不能直接删元素,用 list() 先复制一份 key再遍历删除才安全
if role == "phone": if role not in current_track_ids:
cls_id = 0 del self.track_role[role]
elif role == "police":
cls_id = 1
# print("tid: {}, role: {}, cls: {}".format(tid, role,cls_id))
x1, y1, x2, y2 = map(int, t.tlbr) # ========================= 业务逻辑判断 ===========================
nobody_alter_flag = False # 无人在场业务逻辑是否成立
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
color = None # Nobody 业务逻辑判断
label = None if current_labels_count[0] == 0:
self.nobody_frames += 1
if cls_id == 0: # Person if self.nobody_frames >= NOBODY_THRESHOLD:
current_phone_count += 1 nobody_alter_flag = True
color = (255, 0, 255)
label = "Phone"
elif cls_id == 1: # Phone主模型已支持
current_person_count += 1
color = (0, 0, 139)
label = "Person"
else:
color = (255, 255, 255)
label = "Unknown"
# label = f"ID:{tid} IN"
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
# ========= 抽烟检测 =========
for t in smoke_tracks:
# print("t: {}".format(t))
tid = t.track_id
# cls_id = -1
# IoU 匹配角色
# IoU匹配跟踪ID和类别
REVALIDATE_FRAME_INTERVAL = 10
if (self.current_frame_idx % REVALIDATE_FRAME_INTERVAL == 0) or (tid not in self.smoke_track_role):
#if tid not in self.smoke_track_role:
best_iou = 0
best_role = "unknown"
t_box = list(map(float, t.tlbr)) # [x1,y1,x2,y2]
for i, box in enumerate(smoke_dets_xyxy):
iou_val = self.compute_iou(t_box, box)
if iou_val > best_iou:
best_iou = iou_val
best_role = smoke_dets_roles[i]
# self.smoke_track_role[tid] = best_role
if best_iou > 0.1:
self.smoke_track_role[tid] = best_role
else:
self.smoke_track_role[tid] = "unknown"
role = self.smoke_track_role.get(tid, "unknown")
cls_id = -1
if role == "smoke":
cls_id = 0
x1, y1, x2, y2 = map(int, t.tlbr)
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
color = None
label = None
if cls_id == 0: # 抽烟
current_smoke_count += 1
color = (255, 255, 0)
label = "Smoke"
else:
color = (255, 255, 255)
label = "Unknown"
# label = f"ID:{tid} IN"
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
# ==========================================
# 手机检测
# ==========================================
if current_phone_count > 0:
# 检测到手机框
self.phone_detection_frames += 1
self.phone_missing_frames = 0 # 重置丢失计数器
# 当检测累计达到阈值时,激活报警
if self.phone_detection_frames >= self.frame_thresh_phone:
self.phone_alert_active = True
else: else:
# 未检测到手机框 self.nobody_frames = 0
self.phone_missing_frames += 1
# 如果之前检测到手机,重置检测计数器
if self.phone_detection_frames > 0:
# 只有在连续丢失超过缓冲帧数时才重置
if self.phone_missing_frames >= self.frame_buffer_phone:
self.phone_detection_frames = 0
self.phone_alert_active = False
else:
# 从未检测到手机,保持状态
pass
# ==========================================
# 抽烟检测
# ==========================================
if current_smoke_count > 0:
# 检测到抽烟框
self.smoke_detection_frames += 1
self.smoke_missing_frames = 0 # 重置丢失计数器
# 当检测累计达到阈值时,激活报警
if self.smoke_detection_frames >= self.frame_thresh_smoke:
self.smoke_alert_active = True
else:
# 未检测到抽烟框
self.smoke_missing_frames += 1
# 如果之前检测到抽烟,重置检测计数器
if self.smoke_detection_frames > 0:
# 只有在连续丢失超过缓冲帧数时才重置
if self.smoke_missing_frames >= self.frame_buffer_smoke:
self.smoke_detection_frames = 0
self.smoke_alert_active = False
else:
# 从未检测到抽烟,保持状态
pass
# ==========================================
# 9. 业务逻辑判定 (Only One / Nobody)
# ==========================================
status_text = ""
if current_person_count == 0:
self.nobody_detection_frames += 1
self.nobody_missing_frames = 0
if self.nobody_detection_frames >= self.frame_thresh_nobody:
self.nobody_alert_active = True
else:
self.nobody_missing_frames += 1
if self.nobody_detection_frames > 0:
if self.nobody_missing_frames >= self.frame_buffer_nobody:
self.nobody_detection_frames = 0
self.nobody_alert_active = False
else:
pass
# if current_person_count == 0:
# self.cnt_frame_nobody += 1
# else:
# self.cnt_frame_nobody = 0
# ========================================== if nobody_alter_flag:
# 10. 收集并生成结构化警告(核心改动) action_text = 'Nobody Checking'
# ==========================================
alert_offset = 0
# A. Playing Phone
if self.phone_alert_active:
duration_seconds = self.phone_detection_frames / self.fps
current_frame_alerts.append( current_frame_alerts.append(
{ {
'time': current_time_sec, 'time': current_time_sec,
'action': 'Playing Phone', 'action': action_text,
'confidence': 1.0, # 固定为1.0(规则判定)
'details': f"Detected for {duration_seconds:.1f}s"
} }
) )
self.draw_alert(frame, action_text, offset_y=0)
# A. Playing Phone
if self.smoke_alert_active:
duration_seconds = self.smoke_detection_frames / self.fps
current_frame_alerts.append(
{
'time': current_time_sec,
'action': 'Smoke',
'confidence': 1.0, # 固定为1.0(规则判定)
'details': f"Detected for {duration_seconds:.1f}s"
}
)
# D. Nobody Checking
if self.nobody_alert_active:
duration_seconds = self.nobody_detection_frames / self.fps
current_frame_alerts.append({
'time': current_time_sec,
'action': 'Nobody Checking',
'confidence': 1.0,
'details': f"Detected for {duration_seconds:.1f}s"
})
# ==========================================
# 11. 统一显示当前帧所有警告(可替换原分层显示)
# ==========================================
debug_info = f"Person: {current_person_count} | Phone: {current_phone_count} | Smoke: {current_smoke_count}"
cv2.putText(frame, debug_info, (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
# 统一警告显示区
alert_y_start = 150
for i, alert in enumerate(current_frame_alerts):
action = alert['action']
details = alert.get('details', '')
color = (0, 0, 255) # 默认红色警告
if action == 'Nobody Checking':
color = (255, 255, 255)
elif action == 'Smoke':
color = (0, 0, 255)
elif action == 'Playing Phone':
color = (255, 0, 0)
main_text = action
if details:
main_text += f" ({details})"
y_pos = alert_y_start + i * 50
cv2.rectangle(frame, (20, y_pos - 40), (900, y_pos + 10), (0, 0, 0), -1)
cv2.putText(frame, main_text, (30, y_pos), cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 2)
return { return {
"image": frame, "image": frame,
"alerts": current_frame_alerts
"alerts":current_frame_alerts
} }
# ========================= 帧处理线程 ========================= # ========================= 帧处理线程 =========================
class FrameProcessorWorker(BaseFrameProcessorWorker): class FrameProcessorWorker(BaseFrameProcessorWorker):
"""监控室检测帧处理线程""" """监控室检测帧处理线程"""
# 子类配置 # 子类配置
DETECTOR_FACTORY = lambda params: ZhihuishiDetector(params) DETECTOR_FACTORY = lambda params: ZhihuishiDetector(params)
POST_TYPE = 2 POST_TYPE = 2

View File

@@ -3,6 +3,7 @@ from biz.prison.trajectory02_biz import FrameProcessorWorker as TrajectoryWorker
from biz.prison.supervision_room_biz import FrameProcessorWorker as SupervisionWorker from biz.prison.supervision_room_biz import FrameProcessorWorker as SupervisionWorker
from biz.prison.ab_biz import FrameProcessorWorker as AbWorker from biz.prison.ab_biz import FrameProcessorWorker as AbWorker
from biz.prison.prison_biz import FrameProcessorWorker as CorridorWorker from biz.prison.prison_biz import FrameProcessorWorker as CorridorWorker
from biz.prison.indoor_biz import FrameProcessorWorker as IndoorWorker
# ... 其他导入 # ... 其他导入
@@ -11,7 +12,8 @@ PROCESSOR_MAP = {
"trajectory": TrajectoryWorker, "trajectory": TrajectoryWorker,
"supervision_room": SupervisionWorker, "supervision_room": SupervisionWorker,
"ab": AbWorker, "ab": AbWorker,
"corridor": CorridorWorker "corridor": CorridorWorker,
"indoor": IndoorWorker
} }
def get_processor(processor_type: str): def get_processor(processor_type: str):

View File

@@ -48,22 +48,69 @@ video_clip_retention_seconds: 3600 # 视频文件
video_clip_default_segment_duration: 2 # 默认分片时长fallback video_clip_default_segment_duration: 2 # 默认分片时长fallback
service_groups: service_groups:
- name: "kadian_group" # 服务组名称 #- name: "kadian_group" # 服务组名称
# video_source_type: "hls"
# ws_host: "0.0.0.0" # WebSocket 服务地址
# ws_port: 8765 # WebSocket 服务端口
# algorithm: "checkpoint" # 算法类型
# cameras: # 该组下的摄像头列表
# - id: 8
# index: "12345"
# name: Entrance
# params:
# model_path: "Kadian_sanshijiazi.onnx"
# roi_points:
# - [0.15, 0.001]
# - [0.5, 0.001]
# - [1.0, 0.8]
# - [0.35, 1.0]
- name: "indoor_group" # 服务组名称
video_source_type: "hls" video_source_type: "hls"
ws_host: "0.0.0.0" # WebSocket 服务地址 ws_host: "0.0.0.0" # WebSocket 服务地址
ws_port: 8765 # WebSocket 服务端口 ws_port: 8765 # WebSocket 服务端口
algorithm: "checkpoint" # 算法类型 algorithm: "indoor" # 算法类型
cameras: # 该组下的摄像头列表 cameras: # 该组下的摄像头列表
- id: 8 - id: 8
index: "12345" index: "12345"
name: Entrance name: Entrance
params: params:
model_path: "Kadian_sanshijiazi.onnx" model_path: "kanshousuo.onnx"
roi_points: door_rois:
- [0.15, 0.001] left_door_1:
- [0.5, 0.001] points:
- [1.0, 0.8] - [0.195, 0.242]
- [0.35, 1.0] - [0.265, 0.17]
- [0.3, 0.63]
- [0.248, 0.8]
color: [255, 0, 0]
left_door_2:
points:
- [0.3, 0.1]
- [0.34, 0.08]
- [0.35, 0.43]
- [0.322, 0.52]
color: [0, 255, 0]
left_door_3:
points:
- [0.355, 0.06]
- [0.42, 0.0]
- [0.42, 0.18]
- [0.362, 0.36]
color: [0, 0, 255]
right_door_1:
points:
- [0.735, 0.142]
- [0.81, 0.22]
- [0.78, 0.8]
- [0.715, 0.65]
color: [255, 255, 0]
right_door_2:
points:
- [0.65, 0.06]
- [0.7, 0.09]
- [0.69, 0.5]
- [0.65, 0.4]
color: [255, 165, 0]
#- name: "prison_group" # 服务组名称 #- name: "prison_group" # 服务组名称
# video_source_type: "hls" # video_source_type: "hls"
# ws_host: "0.0.0.0" # WebSocket 服务地址 # ws_host: "0.0.0.0" # WebSocket 服务地址
@@ -91,6 +138,8 @@ alert_types:
# 监狱检测 (prison) # 监狱检测 (prison)
"prisoner": "带出犯人" "prisoner": "带出犯人"
"violation": "路线违规" "violation": "路线违规"
"Indoor Violation": "违规进入区域"
# 监控室检测 (supervision_room) # 监控室检测 (supervision_room)
"Playing Phone": "玩手机" "Playing Phone": "玩手机"

552
live_catalog/index.html Normal file
View File

@@ -0,0 +1,552 @@
<!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
<title>Live Catalog - 树形直播目录</title>
<!-- HLS.js 用于播放 .m3u8 流 -->
<script src="/static/js/hls.min.js"></script>
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
background: #f0f2f5;
height: 100vh;
overflow: hidden;
}
/* 主布局 */
.app {
display: flex;
height: 100vh;
width: 100%;
}
/* 左侧树形菜单 */
.sidebar {
width: 300px;
background: #fff;
border-right: 1px solid #e8e8e8;
display: flex;
flex-direction: column;
box-shadow: 2px 0 8px rgba(0,0,0,0.02);
overflow: auto;
}
.sidebar-header {
padding: 16px 20px;
background: #fafafa;
border-bottom: 1px solid #e8e8e8;
font-weight: 600;
font-size: 16px;
color: #1f2f3d;
}
.tree-container {
flex: 1;
overflow-y: auto;
padding: 12px 0;
}
/* 树形结构样式 */
.tree {
list-style: none;
margin: 0;
padding-left: 0;
}
.tree-node {
list-style: none;
margin: 0;
padding: 0;
position: relative;
}
.tree-node-content {
display: flex;
align-items: center;
padding: 8px 12px 8px 20px;
cursor: pointer;
transition: background 0.2s;
font-size: 14px;
color: #2c3e50;
border-radius: 4px;
margin: 2px 8px;
}
.tree-node-content:hover {
background-color: #f5f7fa;
}
.tree-node-content.active {
background-color: #e6f7ff;
color: #1890ff;
font-weight: 500;
}
/* 展开/折叠图标 */
.expand-icon {
display: inline-flex;
align-items: center;
justify-content: center;
width: 20px;
height: 20px;
margin-right: 8px;
font-size: 12px;
color: #8c8c8c;
transition: transform 0.2s;
}
.expand-icon.expanded {
transform: rotate(90deg);
}
.node-name {
flex: 1;
}
/* 加载中指示器 */
.loading-icon {
display: inline-block;
width: 14px;
height: 14px;
margin-right: 8px;
border: 2px solid #ddd;
border-top-color: #1890ff;
border-radius: 50%;
animation: spin 0.6s linear infinite;
}
@keyframes spin {
to { transform: rotate(360deg); }
}
/* 子节点容器 */
.children {
list-style: none;
padding-left: 28px;
margin: 0;
display: none;
}
.children.show {
display: block;
}
/* 右侧视频区域 */
.video-area {
flex: 1;
background: #141414;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
position: relative;
overflow: hidden;
}
/* 视频控制栏 */
.video-controls {
position: absolute;
top: 0;
left: 0;
right: 0;
padding: 12px 16px;
background: linear-gradient(to bottom, rgba(0,0,0,0.7), transparent);
display: flex;
align-items: center;
gap: 16px;
z-index: 10;
}
.video-controls label {
color: #fff;
font-size: 13px;
}
.video-controls select {
padding: 4px 8px;
border-radius: 4px;
border: 1px solid #444;
background: #333;
color: #fff;
font-size: 13px;
cursor: pointer;
}
/* 视频信息栏 */
.video-info {
position: absolute;
bottom: 0;
left: 0;
right: 0;
padding: 12px 16px;
background: linear-gradient(to top, rgba(0,0,0,0.8), transparent);
color: #ccc;
font-size: 12px;
z-index: 10;
font-family: monospace;
word-break: break-all;
}
.video-info .info-row {
margin: 4px 0;
}
.video-info .info-label {
color: #888;
margin-right: 8px;
}
.video-info .info-value {
color: #0f0;
}
#video-container {
width: 100%;
height: 100%;
display: flex;
align-items: center;
justify-content: center;
background: #000;
}
video {
max-width: 100%;
max-height: 100%;
width: auto;
height: auto;
background: black;
}
.placeholder {
color: #ccc;
font-size: 16px;
text-align: center;
padding: 20px;
}
/* 响应式小屏 */
@media (max-width: 768px) {
.sidebar {
width: 260px;
}
}
</style>
</head>
<body>
<div class="app">
<div class="sidebar">
<div class="sidebar-header">
📺 直播目录
</div>
<div class="tree-container">
<ul class="tree" id="tree-root">
<!-- 根节点会动态渲染到这里 -->
<li class="tree-node loading-placeholder" style="padding: 12px; text-align: center; color: #999;">加载中...</li>
</ul>
</div>
</div>
<div class="video-area">
<!-- 视频控制栏 -->
<div class="video-controls" id="video-controls" style="display: none;">
<label>码流类型:</label>
<select id="stream-type">
<option value="0">主码流(高清)</option>
<option value="1">子码流(流畅)</option>
</select>
</div>
<div id="video-container">
<div class="placeholder">👈 从左侧选择一个直播源</div>
</div>
<!-- 视频信息栏 -->
<div class="video-info" id="video-info" style="display: none;">
<div class="info-row">
<span class="info-label">摄像头ID</span>
<span class="info-value" id="info-camera-id">-</span>
</div>
<div class="info-row">
<span class="info-label">播放地址:</span>
<span class="info-value" id="info-url">-</span>
</div>
</div>
</div>
</div>
<script>
// API 基础路径(与后端同源)
const API_BASE = '/api';
// 全局状态
let currentVideoNode = null; // 当前播放的节点对象
let hls = null; // HLS 实例
let currentStreamType = 0; // 当前码流类型
// 缓存已加载的子节点数据: { parentId: [childrenNodes] }
const childrenCache = new Map();
// 获取根节点
async function fetchRoots() {
try {
const res = await fetch(`${API_BASE}/roots`);
if (!res.ok) throw new Error(`HTTP ${res.status}`);
return await res.json();
} catch (err) {
console.error('获取根节点失败:', err);
return [];
}
}
// 获取指定节点的子节点
async function fetchChildren(parentId) {
// 检查缓存
if (childrenCache.has(parentId)) {
return childrenCache.get(parentId);
}
try {
const res = await fetch(`${API_BASE}/children/${parentId}`);
if (!res.ok) throw new Error(`HTTP ${res.status}`);
const children = await res.json();
childrenCache.set(parentId, children);
return children;
} catch (err) {
console.error(`获取节点 ${parentId} 的子节点失败:`, err);
return [];
}
}
// 获取视频流地址
async function fetchStreamUrl(nodeId, streamType = 0) {
try {
const res = await fetch(`${API_BASE}/stream/${nodeId}?stream_type=${streamType}`);
if (!res.ok) throw new Error(`HTTP ${res.status}`);
return await res.json();
} catch (err) {
console.error(`获取节点 ${nodeId} 的视频地址失败:`, err);
return null;
}
}
// 播放视频
async function playVideo(node, streamType = 0) {
if (!node || !node.is_leaf) return;
// 显示加载占位
const container = document.getElementById('video-container');
container.innerHTML = '<div class="placeholder">📡 正在加载直播流...</div>';
// 隐藏信息和控制栏
document.getElementById('video-controls').style.display = 'none';
document.getElementById('video-info').style.display = 'none';
// 获取流地址
const streamData = await fetchStreamUrl(node.id, streamType);
if (!streamData || !streamData.url) {
container.innerHTML = '<div class="placeholder">❌ 无法获取视频地址,请稍后重试</div>';
return;
}
const streamUrl = streamData.url;
// 清理旧播放器
if (hls) {
hls.destroy();
hls = null;
}
// 创建 video 元素
const video = document.createElement('video');
video.controls = true;
video.autoplay = true;
video.style.width = '100%';
video.style.height = '100%';
video.style.objectFit = 'contain';
container.innerHTML = '';
container.appendChild(video);
// 判断是否 HLS 流
if (Hls.isSupported() && streamUrl.includes('.m3u8')) {
hls = new Hls();
hls.loadSource(streamUrl);
hls.attachMedia(video);
hls.on(Hls.Events.MANIFEST_PARSED, () => {
video.play().catch(e => console.warn('自动播放被阻止:', e));
});
hls.on(Hls.Events.ERROR, (event, data) => {
console.error('HLS 错误:', data);
container.innerHTML = '<div class="placeholder">⚠️ 直播流播放失败,请检查地址或网络</div>';
});
} else if (video.canPlayType('application/vnd.apple.mpegurl') && streamUrl.includes('.m3u8')) {
// 原生支持 HLSSafari
video.src = streamUrl;
video.addEventListener('loadedmetadata', () => {
video.play().catch(e => console.warn('自动播放被阻止:', e));
});
} else {
// 非 HLS 直接播放(如 mp4
video.src = streamUrl;
video.addEventListener('error', () => {
container.innerHTML = '<div class="placeholder">❌ 视频无法播放,格式可能不支持</div>';
});
}
// 更新并显示视频信息
document.getElementById('info-camera-id').textContent = streamData.cameraIndexCode;
document.getElementById('info-url').textContent = streamUrl;
document.getElementById('video-info').style.display = 'block';
// 显示控制栏
document.getElementById('video-controls').style.display = 'flex';
currentVideoNode = node;
}
// 渲染树节点(递归方式,但为了动态添加子节点,我们只渲染当前层的 ul
// 核心思路:为每个节点生成一个 <li>,内部包含 .tree-node-content 和一个 .children 容器
// 点击节点时,如果已经有 .children 内容则切换显示;否则加载子节点并填充
async function renderTree() {
const rootContainer = document.getElementById('tree-root');
const roots = await fetchRoots();
if (!roots.length) {
rootContainer.innerHTML = '<li class="tree-node" style="padding: 12px; text-align: center; color: #999;">暂无数据</li>';
return;
}
// 清空并重新构建根节点
rootContainer.innerHTML = '';
for (const node of roots) {
const li = createTreeNodeElement(node);
rootContainer.appendChild(li);
}
}
// 创建单个树节点的 DOM 元素(不含子节点,子节点容器为空)
function createTreeNodeElement(node) {
const li = document.createElement('li');
li.className = 'tree-node';
li.dataset.id = node.id;
li.dataset.isLeaf = node.is_leaf;
// 内容包装器
const contentDiv = document.createElement('div');
contentDiv.className = 'tree-node-content';
if (!node.is_leaf) {
// 非叶子节点:带展开图标
const iconSpan = document.createElement('span');
iconSpan.className = 'expand-icon';
iconSpan.innerHTML = '▶'; // 初始向右,展开时旋转
iconSpan.style.display = 'inline-block';
contentDiv.appendChild(iconSpan);
} else {
// 叶子节点占位空白
const placeholder = document.createElement('span');
placeholder.style.width = '28px';
placeholder.style.display = 'inline-block';
contentDiv.appendChild(placeholder);
}
const nameSpan = document.createElement('span');
nameSpan.className = 'node-name';
nameSpan.textContent = node.name;
contentDiv.appendChild(nameSpan);
li.appendChild(contentDiv);
// 子节点容器
const childrenUl = document.createElement('ul');
childrenUl.className = 'children';
li.appendChild(childrenUl);
// 绑定点击事件(在父容器上委托)
contentDiv.addEventListener('click', (e) => {
e.stopPropagation();
onNodeClick(node, li, childrenUl, contentDiv);
});
return li;
}
// 处理节点点击
async function onNodeClick(node, li, childrenUl, contentDiv) {
// 如果是叶子节点:播放视频
if (node.is_leaf) {
// 高亮当前选中的节点
clearActiveHighlight();
contentDiv.classList.add('active');
await playVideo(node, currentStreamType);
return;
}
// 非叶子节点:展开/折叠逻辑
const isExpanded = childrenUl.classList.contains('show');
if (isExpanded) {
// 折叠
childrenUl.classList.remove('show');
const icon = contentDiv.querySelector('.expand-icon');
if (icon) icon.classList.remove('expanded');
} else {
// 展开:检查是否已加载子节点
if (childrenUl.children.length === 0) {
// 显示加载动画
const loadingSpan = document.createElement('span');
loadingSpan.className = 'loading-icon';
loadingSpan.style.marginLeft = '8px';
contentDiv.appendChild(loadingSpan);
const children = await fetchChildren(node.id);
contentDiv.removeChild(loadingSpan);
if (children && children.length) {
for (const child of children) {
const childLi = createTreeNodeElement(child);
childrenUl.appendChild(childLi);
}
} else {
// 无子节点:可显示“暂无子节点”提示
const emptyMsg = document.createElement('li');
emptyMsg.textContent = '(暂无下级)';
emptyMsg.style.padding = '6px 12px';
emptyMsg.style.color = '#999';
emptyMsg.style.fontSize = '12px';
childrenUl.appendChild(emptyMsg);
}
}
// 展开并旋转图标
childrenUl.classList.add('show');
const icon = contentDiv.querySelector('.expand-icon');
if (icon) icon.classList.add('expanded');
}
}
// 清除所有高亮
function clearActiveHighlight() {
document.querySelectorAll('.tree-node-content.active').forEach(el => {
el.classList.remove('active');
});
}
// 初始化视频区域(可选:尝试自动播放需用户交互)
function initVideoArea() {
// 监听码流类型切换
document.getElementById('stream-type').addEventListener('change', async (e) => {
currentStreamType = parseInt(e.target.value);
// 如果当前有播放的视频,重新加载
if (currentVideoNode) {
await playVideo(currentVideoNode, currentStreamType);
}
});
}
// 启动
(async function init() {
await renderTree();
initVideoArea();
})();
</script>
</body>
</html>

View File

@@ -0,0 +1,270 @@
import os
import urllib.parse
import socket
import json
from http.server import ThreadingHTTPServer, SimpleHTTPRequestHandler
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.hikvision_cam_utils import get_organization_list, get_final_list, get_camera_preview_url
# ========== 海康威视 API 配置 ==========
ROOT_PARENT_INDEX_CODE = "4fa15af07b6b400f94af1e35d8235c30"
def transform_org_node(item, parent_id=None):
"""将海康威视组织节点转换为前端期望的格式"""
return {
"id": item["indexCode"],
"name": item["name"],
"parent_id": parent_id or item.get("parentIndexCode"),
"is_leaf": False, # 组织机构节点不是叶子节点
"stream_url": None
}
def transform_camera_node(item, parent_id=None):
"""将海康威视摄像头节点转换为前端期望的格式(叶子节点)"""
return {
"id": item["cameraIndexCode"],
"name": item["name"],
"parent_id": parent_id,
"is_leaf": True, # 摄像头是叶子节点
"stream_url": None
}
def get_children(parent_id):
"""返回父节点下的直接子节点列表(从海康威视 API 获取)
逻辑:
1. 先调用 get_organization_list 获取子组织
2. 如果返回 list 为空,则调用 get_final_list 获取摄像头(叶子节点)
"""
if parent_id is None:
parent_id = ROOT_PARENT_INDEX_CODE
try:
# 先尝试获取子组织
result = get_organization_list(parent_id)
if result.get("code") != "0":
print(f"海康威视 API 返回错误: {result.get('msg')}")
return []
items = result.get("data", {}).get("list", [])
# 如果有子组织,返回组织节点
if items:
return [transform_org_node(item, parent_id) for item in items]
# 如果没有子组织,尝试获取摄像头列表(叶子节点)
print(f"组织 {parent_id} 无下级组织,尝试获取摄像头列表...")
final_result = get_final_list(parent_id)
if final_result.get("code") != "0":
print(f"获取摄像头列表失败: {final_result.get('msg')}")
return []
camera_items = final_result.get("data", {}).get("list", [])
print(f"获取到 {len(camera_items)} 个摄像头")
return [transform_camera_node(item, parent_id) for item in camera_items]
except Exception as e:
print(f"调用海康威视 API 失败: {e}")
return []
def get_stream_url(node_id, stream_type=0):
"""获取摄像头的视频流地址
Args:
node_id: 摄像头的 cameraIndexCode
stream_type: 码流类型0=主码流1=子码流
"""
try:
result = get_camera_preview_url(node_id, stream_type)
if result.get("code") != "0":
print(f"获取视频流地址失败: {result.get('msg')}")
return None
url = result.get("data", {}).get("url")
return url
except Exception as e:
print(f"调用 get_camera_preview_url 失败: {e}")
return None
# ========== HTTP 处理器 ==========
class APIHandler(SimpleHTTPRequestHandler):
# 设置超时
timeout = 30
# MIME 类型映射
MIME_TYPES = {
'.html': 'text/html; charset=utf-8',
'.css': 'text/css',
'.js': 'application/javascript',
'.json': 'application/json',
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.svg': 'image/svg+xml',
'.ico': 'image/x-icon',
'.mp3': 'audio/mpeg',
'.wav': 'audio/wav',
'.mp4': 'video/mp4',
'.txt': 'text/plain; charset=utf-8',
}
def log_message(self, format, *args):
print(f"[{self.log_date_time_string()}] {self.address_string()} - {format % args}")
def send_json_response(self, data, status=200):
"""统一返回 JSON 格式"""
self.send_response(status)
self.send_header('Content-type', 'application/json')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(json.dumps(data, ensure_ascii=False).encode('utf-8'))
def send_error_json(self, message, status=400):
"""返回错误 JSON"""
self.send_json_response({"error": message}, status)
def do_GET(self):
try:
parsed_path = urllib.parse.urlparse(self.path)
path = parsed_path.path
query = parsed_path.query
# 新 API 路由 (RESTful)
if path.startswith('/api/roots'):
# GET /api/roots
roots = get_children(None)
self.send_json_response(roots)
return
elif path.startswith('/api/children/'):
# GET /api/children/21020000
node_id = path.split('/')[-1]
if not node_id:
self.send_error_json("Invalid node id", 400)
return
children = get_children(node_id)
self.send_json_response(children)
return
elif path.startswith('/api/stream/'):
# GET /api/stream/21020000?stream_type=0
node_id = path.split('/')[-1]
if not node_id:
self.send_error_json("Invalid node id", 400)
return
# 解析 stream_type 参数
params = urllib.parse.parse_qs(query)
stream_type = int(params.get('stream_type', ['0'])[0])
url = get_stream_url(node_id, stream_type)
if url is None:
self.send_error_json("Stream not found or node is not a leaf", 404)
return
# 返回完整信息
self.send_json_response({
"cameraIndexCode": node_id,
"url": url,
"stream_type": stream_type
})
return
# 静态文件服务(与原逻辑一致)
if path == '/' or path == '/index.html':
self.serve_file('index.html', query='api=1')
return
else:
filename = path.lstrip('/')
if os.path.exists(filename):
self.serve_static_file(filename)
else:
self.send_error(404, 'Not Found')
return
except Exception as e:
print(f"Error handling request: {e}")
self.send_error(500, 'Internal Server Error')
def serve_file(self, filename, query=None):
"""与原代码相同的文件服务(保留原逻辑)"""
try:
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), filename)
if os.path.exists(file_path):
self.send_response(200)
self.send_header('Content-type', 'text/html; charset=utf-8')
self.end_headers()
with open(file_path, 'rb') as f:
content = f.read()
if query:
try:
content = content.decode('utf-8')
# 替换原有 js 中的 apiParam 赋值语句(兼容旧前端)
content = content.replace(
"const apiParam = urlParams.get('api') || '1';",
f"const apiParam = '{query.split('=')[1]}';"
)
content = content.encode('utf-8')
except Exception as e:
print(f"Error modifying HTML content: {e}")
self.wfile.write(content)
else:
self.send_error(404, f'{filename} not found')
except Exception as e:
print(f"Error serving file: {e}")
raise
def serve_static_file(self, filename):
"""与原代码相同的静态文件服务"""
try:
ext = os.path.splitext(filename)[1].lower()
content_type = self.MIME_TYPES.get(ext, 'application/octet-stream')
self.send_response(200)
self.send_header('Content-type', content_type)
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
with open(filename, 'rb') as f:
self.wfile.write(f.read())
except Exception as e:
print(f"Error serving static file: {e}")
self.send_error(500, 'Internal Server Error')
def check_port_available(port):
"""检查端口是否可用"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind(('', port))
return True
except socket.error:
return False
def run():
port = 18369
if not check_port_available(port):
print(f"错误: 端口 {port} 已被占用")
return
server_address = ('', port)
httpd = ThreadingHTTPServer(server_address, APIHandler)
print(f'Server running on http://localhost:{port}')
print('API endpoints:')
print(' GET /api/roots - 获取所有根节点')
print(' GET /api/children/<id> - 获取指定节点的子节点(自动判断组织/摄像头)')
print(' GET /api/stream/<id> - 获取视频流地址')
print('静态文件服务: 访问 / 或 /index.html')
print('按 Ctrl+C 停止服务器')
try:
httpd.serve_forever()
except KeyboardInterrupt:
print('\n服务器已停止')
httpd.server_close()
if __name__ == '__main__':
run()

2
live_catalog/static/js/hls.min.js vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -1,6 +1,5 @@
import requests import requests
import urllib3 import urllib3
import yaml
from hikvision_openapi_signer import HikvisionOpenAPISigner from hikvision_openapi_signer import HikvisionOpenAPISigner
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
@@ -13,12 +12,12 @@ signer = HikvisionOpenAPISigner(
headers={'tagId': '0'} # 根据平台要求设置 headers={'tagId': '0'} # 根据平台要求设置
) )
def get_camera_preview_url(camera_index_code): def get_organization_list(parentIndexCode):
# 签名一个请求 # 签名一个请求
request = signer.sign( request = signer.sign(
'POST', 'POST',
'/api/video/v1/cameras/previewURLs', '/api/resource/v1/regions/subRegions',
jsons={'cameraIndexCode': camera_index_code, 'protocol': 'rtsp', 'expand': 'streamform=rtp'}, jsons={'parentIndexCode': parentIndexCode, 'treeCode': '0'},
accept='application/json' accept='application/json'
) )
method, url, headers, body = request method, url, headers, body = request
@@ -26,28 +25,28 @@ def get_camera_preview_url(camera_index_code):
response = requests.request(method, url, headers=headers, data=body, verify=False) response = requests.request(method, url, headers=headers, data=body, verify=False)
return response.json() return response.json()
# 读取 config.yaml def get_final_list(regionIndexCode):
with open('../config.yaml', 'r', encoding='utf-8') as f: # 签名一个请求
config = yaml.safe_load(f) request = signer.sign(
'POST',
'/api/resource/v1/regions/regionIndexCode/cameras',
jsons={'pageNo': 1, 'pageSize': 100, 'regionIndexCode': regionIndexCode, 'treeCode': '0'},
accept='application/json'
)
method, url, headers, body = request
# 发送请求注意离线环境下verify=False可能必要但需知安全风险
response = requests.request(method, url, headers=headers, data=body, verify=False)
return response.json()
# 遍历所有摄像头 def get_camera_preview_url(camera_index_code, stream_type = 0):
for camera in config['cameras']: # 签名一个请求
if 'index' in camera: request = signer.sign(
index = camera['index'] 'POST',
print(f"正在获取摄像头 {camera['name']} (index: {index}) 的预览地址...") '/api/video/v1/cameras/previewURLs',
result = get_camera_preview_url(index) jsons={'cameraIndexCode': camera_index_code, 'protocol': 'hls', 'streamType': stream_type, 'expand': 'transcode=1'},
print(f"API返回结果: {result}") accept='application/json'
)
# 提取 url 并更新到 config method, url, headers, body = request
if 'data' in result and 'url' in result['data']: # 发送请求
rtsp_url = result['data']['url'] response = requests.request(method, url, headers=headers, data=body, verify=False)
camera['rtsp_url'] = rtsp_url return response.json()
print(f"更新 rtsp_url: {rtsp_url}")
else:
print(f"未找到 url 在返回结果中")
# 保存更新后的 config.yaml
with open('../config.yaml', 'w', encoding='utf-8') as f:
yaml.dump(config, f, default_flow_style=False, allow_unicode=True)
print("config.yaml 已更新")

416
web_page_2/coordinate.html Normal file
View File

@@ -0,0 +1,416 @@
<!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8" />
<title>坐标提取工具</title>
<style>
* { margin: 0; padding: 0; box-sizing: border-box; }
html, body { width: 100%; height: 100%; overflow: hidden; background: #111827; color: #e5e7eb; }
body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", sans-serif; display: flex; }
.left-panel {
width: 320px; min-width: 320px; background: #020617; border-right: 1px solid #1f2937;
display: flex; flex-direction: column; height: 100vh;
}
.left-header {
padding: 12px 16px; border-bottom: 1px solid #1f2937; font-size: 14px; color: #9ca3af;
font-weight: 500; display: flex; align-items: center; justify-content: space-between; flex-shrink: 0;
}
.coord-list {
flex: 1; overflow-y: auto; padding: 8px;
}
.coord-group {
margin-bottom: 10px; background: #0f172a; border: 1px solid #1f2937; border-radius: 6px;
overflow: hidden;
}
.coord-group-header {
display: flex; align-items: center; justify-content: space-between;
padding: 8px 12px; border-bottom: 1px solid #1f2937; font-size: 13px;
}
.coord-group-color {
display: inline-block; width: 10px; height: 10px; border-radius: 50%; margin-right: 8px;
}
.coord-group-actions { display: flex; gap: 6px; }
.coord-group-actions button {
background: none; border: none; cursor: pointer; font-size: 13px; padding: 2px 4px;
border-radius: 3px; transition: background 0.15s;
}
.coord-group-actions .copy-btn { color: #60a5fa; }
.coord-group-actions .copy-btn:hover { background: rgba(96,165,250,0.15); }
.coord-group-actions .del-btn { color: #f87171; }
.coord-group-actions .del-btn:hover { background: rgba(248,113,113,0.15); }
.coord-group pre {
margin: 0; padding: 8px 12px; font-family: 'Courier New', monospace; font-size: 12px;
color: #d1d5db; white-space: pre; overflow-x: auto; line-height: 1.6;
}
.main-area {
flex: 1; display: flex; flex-direction: column; min-width: 0;
}
.toolbar {
padding: 10px 16px; background: #0f172a; border-bottom: 1px solid #1f2937;
display: flex; align-items: center; gap: 12px; flex-shrink: 0;
}
.upload-btn {
padding: 6px 16px; background: #3b82f6; color: #fff; border: none; border-radius: 4px;
font-size: 13px; cursor: pointer; transition: background 0.2s;
}
.upload-btn:hover { background: #2563eb; }
.toolbar-info { font-size: 12px; color: #9ca3af; }
.toolbar-hint {
margin-left: auto; font-size: 12px; color: #6b7280;
}
.toolbar-hint kbd {
padding: 1px 5px; background: #1f2937; border: 1px solid #374151;
border-radius: 3px; font-size: 11px; font-family: inherit;
}
.canvas-area {
flex: 1; position: relative; overflow: hidden; background: #000;
display: flex; align-items: center; justify-content: center;
}
#imageCanvas { cursor: crosshair; }
.upload-placeholder {
position: absolute; display: flex; flex-direction: column; align-items: center;
gap: 12px; color: #6b7280; font-size: 14px;
}
.upload-placeholder svg { opacity: 0.3; }
.toast {
position: fixed; top: 20px; right: 20px; padding: 10px 18px;
background: #10b981; color: #fff; border-radius: 6px; font-size: 13px;
opacity: 0; transition: opacity 0.3s; pointer-events: none; z-index: 999;
}
.toast.show { opacity: 1; }
#fileInput { display: none; }
</style>
</head>
<body>
<aside class="left-panel">
<div class="left-header">
<span>坐标数据</span>
<button id="copyAllBtn" class="copy-btn" style="background:none;border:none;color:#60a5fa;cursor:pointer;font-size:12px;">复制全部</button>
</div>
<div id="coordList" class="coord-list"></div>
</aside>
<div class="main-area">
<div class="toolbar">
<button class="upload-btn" id="uploadBtn">上传图片</button>
<input type="file" id="fileInput" accept=".jpg,.jpeg,.png" />
<span class="toolbar-info" id="imageInfo"></span>
<span class="toolbar-hint">
<kbd>点击</kbd> 标记点 &nbsp;
<kbd>Backspace</kbd> 撤销 &nbsp;
<kbd>Enter</kbd> 完成当前组
</span>
</div>
<div class="canvas-area" id="canvasArea">
<canvas id="imageCanvas"></canvas>
<div class="upload-placeholder" id="placeholder">
<svg width="64" height="64" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.5">
<rect x="3" y="3" width="18" height="18" rx="2" ry="2"/>
<circle cx="8.5" cy="8.5" r="1.5"/>
<polyline points="21 15 16 10 5 21"/>
</svg>
<span>点击「上传图片」开始</span>
</div>
</div>
</div>
<div class="toast" id="toast"></div>
<script>
const GROUP_COLORS = [
'#ef4444', '#3b82f6', '#10b981', '#f59e0b', '#8b5cf6',
'#ec4899', '#06b6d4', '#f97316', '#6366f1', '#14b8a6'
];
let groups = []; // [{points: [[x,y], ...]}, ...]
let currentPoints = []; // current group being edited
let image = null; // HTMLImageElement
let canvasScale = 1;
let canvasOffsetX = 0;
let canvasOffsetY = 0;
const canvas = document.getElementById('imageCanvas');
const ctx = canvas.getContext('2d');
const canvasArea = document.getElementById('canvasArea');
const placeholder = document.getElementById('placeholder');
const fileInput = document.getElementById('fileInput');
const imageInfo = document.getElementById('imageInfo');
const coordList = document.getElementById('coordList');
const toast = document.getElementById('toast');
// --- Upload ---
document.getElementById('uploadBtn').addEventListener('click', () => fileInput.click());
fileInput.addEventListener('change', (e) => {
const file = e.target.files[0];
if (!file) return;
const ext = file.name.toLowerCase().split('.').pop();
if (!['jpg', 'jpeg', 'png'].includes(ext)) {
showToast('仅支持 JPG/PNG 格式', true);
return;
}
const reader = new FileReader();
reader.onload = (ev) => {
image = new Image();
image.onload = () => {
imageInfo.textContent = `${image.width} x ${image.height}`;
placeholder.style.display = 'none';
fitCanvas();
redraw();
};
image.src = ev.target.result;
};
reader.readAsDataURL(file);
// Reset groups on new image
groups = [];
currentPoints = [];
renderCoordList();
});
// --- Canvas sizing ---
function fitCanvas() {
if (!image) return;
const areaW = canvasArea.clientWidth;
const areaH = canvasArea.clientHeight;
const imgAspect = image.width / image.height;
const areaAspect = areaW / areaH;
let drawW, drawH;
if (imgAspect > areaAspect) {
drawW = areaW;
drawH = areaW / imgAspect;
} else {
drawH = areaH;
drawW = areaH * imgAspect;
}
canvasScale = drawW / image.width;
canvasOffsetX = (areaW - drawW) / 2;
canvasOffsetY = (areaH - drawH) / 2;
canvas.width = areaW;
canvas.height = areaH;
canvas.style.width = areaW + 'px';
canvas.style.height = areaH + 'px';
}
window.addEventListener('resize', () => { fitCanvas(); redraw(); });
// --- Drawing ---
function redraw() {
ctx.clearRect(0, 0, canvas.width, canvas.height);
if (!image) return;
// Draw image
ctx.drawImage(image, canvasOffsetX, canvasOffsetY, image.width * canvasScale, image.height * canvasScale);
// Draw completed groups
groups.forEach((group, gi) => {
const color = GROUP_COLORS[gi % GROUP_COLORS.length];
drawGroup(group.points, color);
});
// Draw current group
if (currentPoints.length > 0) {
const color = GROUP_COLORS[groups.length % GROUP_COLORS.length];
drawGroup(currentPoints, color, true);
}
}
function drawGroup(points, color, isCurrent) {
if (points.length === 0) return;
// Draw lines
if (points.length > 1) {
ctx.beginPath();
ctx.strokeStyle = color;
ctx.lineWidth = 2;
const p0 = toCanvas(points[0]);
ctx.moveTo(p0.x, p0.y);
for (let i = 1; i < points.length; i++) {
const p = toCanvas(points[i]);
ctx.lineTo(p.x, p.y);
}
ctx.stroke();
}
// Draw points
points.forEach((pt, i) => {
const p = toCanvas(pt);
ctx.beginPath();
ctx.arc(p.x, p.y, 5, 0, Math.PI * 2);
ctx.fillStyle = color;
ctx.fill();
ctx.strokeStyle = '#fff';
ctx.lineWidth = 1.5;
ctx.stroke();
// Label
ctx.fillStyle = '#fff';
ctx.font = '11px sans-serif';
ctx.fillText((i + 1).toString(), p.x + 7, p.y - 5);
});
// "editing" indicator for current group
if (isCurrent) {
const last = toCanvas(points[points.length - 1]);
ctx.beginPath();
ctx.arc(last.x, last.y, 10, 0, Math.PI * 2);
ctx.strokeStyle = color;
ctx.lineWidth = 1.5;
ctx.setLineDash([3, 3]);
ctx.stroke();
ctx.setLineDash([]);
}
}
function toCanvas(pt) {
return {
x: pt[0] * image.width * canvasScale + canvasOffsetX,
y: pt[1] * image.height * canvasScale + canvasOffsetY
};
}
function toNorm(cx, cy) {
return [
Math.round(((cx - canvasOffsetX) / (image.width * canvasScale)) * 1000) / 1000,
Math.round(((cy - canvasOffsetY) / (image.height * canvasScale)) * 1000) / 1000
];
}
// --- Click ---
canvas.addEventListener('click', (e) => {
if (!image) return;
const rect = canvas.getBoundingClientRect();
const cx = e.clientX - rect.left;
const cy = e.clientY - rect.top;
// Check within image bounds
const nx = (cx - canvasOffsetX) / (image.width * canvasScale);
const ny = (cy - canvasOffsetY) / (image.height * canvasScale);
if (nx < 0 || nx > 1 || ny < 0 || ny > 1) return;
const pt = toNorm(cx, cy);
currentPoints.push(pt);
redraw();
renderCoordList();
});
// --- Keyboard ---
document.addEventListener('keydown', (e) => {
if (e.key === 'Enter') {
if (currentPoints.length === 0) return;
groups.push({ points: [...currentPoints] });
currentPoints = [];
redraw();
renderCoordList();
showToast(`${groups.length} 组坐标已完成`);
} else if (e.key === 'Backspace') {
if (currentPoints.length > 0) {
currentPoints.pop();
redraw();
renderCoordList();
}
}
});
// --- Coord list ---
function renderCoordList() {
coordList.innerHTML = '';
if (groups.length === 0 && currentPoints.length === 0) {
coordList.innerHTML = '<div style="padding:16px;color:#4b5563;font-size:13px;text-align:center;">暂无坐标数据</div>';
return;
}
groups.forEach((group, gi) => {
const color = GROUP_COLORS[gi % GROUP_COLORS.length];
const div = document.createElement('div');
div.className = 'coord-group';
div.innerHTML = `
<div class="coord-group-header">
<span><span class="coord-group-color" style="background:${color}"></span>第 ${gi + 1} 组 (${group.points.length} 点)</span>
<div class="coord-group-actions">
<button class="copy-btn" data-group="${gi}" title="复制">复制</button>
<button class="del-btn" data-group="${gi}" title="删除">删除</button>
</div>
</div>
<pre>${formatYAML(group.points)}</pre>
`;
coordList.appendChild(div);
});
// Current editing group
if (currentPoints.length > 0) {
const gi = groups.length;
const color = GROUP_COLORS[gi % GROUP_COLORS.length];
const div = document.createElement('div');
div.className = 'coord-group';
div.style.borderColor = color;
div.innerHTML = `
<div class="coord-group-header">
<span><span class="coord-group-color" style="background:${color}"></span>第 ${gi + 1} 组 (编辑中, ${currentPoints.length} 点)</span>
</div>
<pre>${formatYAML(currentPoints)}</pre>
`;
coordList.appendChild(div);
}
// Bind copy/delete
coordList.querySelectorAll('.copy-btn[data-group]').forEach(btn => {
btn.addEventListener('click', () => {
const gi = parseInt(btn.dataset.group);
copyText(formatYAML(groups[gi].points));
showToast('已复制到剪贴板');
});
});
coordList.querySelectorAll('.del-btn[data-group]').forEach(btn => {
btn.addEventListener('click', () => {
const gi = parseInt(btn.dataset.group);
groups.splice(gi, 1);
redraw();
renderCoordList();
});
});
}
function formatYAML(points) {
return points.map(pt => `- [${pt[0]}, ${pt[1]}]`).join('\n');
}
// --- Copy all ---
document.getElementById('copyAllBtn').addEventListener('click', () => {
if (groups.length === 0) { showToast('暂无数据', true); return; }
const all = groups.map((g, i) => `# 第 ${i + 1}\n${formatYAML(g.points)}`).join('\n\n');
copyText(all);
showToast('已复制全部坐标');
});
// --- Utils ---
function copyText(text) {
navigator.clipboard.writeText(text).catch(() => {
const ta = document.createElement('textarea');
ta.value = text;
document.body.appendChild(ta);
ta.select();
document.execCommand('copy');
document.body.removeChild(ta);
});
}
let toastTimer = null;
function showToast(msg, isError) {
toast.textContent = msg;
toast.style.background = isError ? '#ef4444' : '#10b981';
toast.classList.add('show');
clearTimeout(toastTimer);
toastTimer = setTimeout(() => toast.classList.remove('show'), 1800);
}
// Initial render
renderCoordList();
</script>
</body>
</html>

View File

@@ -48,6 +48,8 @@ class APIHandler(SimpleHTTPRequestHandler):
elif path == '/' or path == '/index.html': elif path == '/' or path == '/index.html':
# 默认访问使用 api=1 # 默认访问使用 api=1
self.serve_file('index.html', query='api=1') self.serve_file('index.html', query='api=1')
elif path == '/coords' or path == '/coordinate.html':
self.serve_file('coordinate.html')
else: else:
# 处理静态文件请求 # 处理静态文件请求
# 移除开头的 / # 移除开头的 /
@@ -130,6 +132,7 @@ def run():
httpd = ThreadingHTTPServer(server_address, APIHandler) httpd = ThreadingHTTPServer(server_address, APIHandler)
print(f'Server running on http://localhost:{port}') print(f'Server running on http://localhost:{port}')
print(f'支持的接口: /, /api/1, /api/2, /api/3, /api/4, /api/5, /api/6, /api/7, /api/11-16') print(f'支持的接口: /, /api/1, /api/2, /api/3, /api/4, /api/5, /api/6, /api/7, /api/11-16')
print(f'坐标提取工具: /coords')
print('按 Ctrl+C 停止服务器') print('按 Ctrl+C 停止服务器')
try: try:

View File

@@ -138,8 +138,9 @@
min-width: 0; min-width: 0;
} }
#liveImage { #liveImage {
max-width: 100%; width: 100%;
max-height: 100%; height: 100%;
object-fit: contain;
background: #000; background: #000;
} }
.status-bar { .status-bar {
@@ -244,7 +245,7 @@
const WS_PORT = config.port; const WS_PORT = config.port;
const WS_HOST = '29.1.70.11'; const WS_HOST = '29.1.70.11';
<!-- const WS_HOST = '127.0.0.1';--> <!-- const WS_HOST = '127.0.0.1'; -->
const liveImage = document.getElementById('liveImage'); const liveImage = document.getElementById('liveImage');
const statusBar = document.getElementById('status'); const statusBar = document.getElementById('status');
@@ -320,7 +321,8 @@
'Playing Phone': '玩手机', 'Playing Phone': '玩手机',
'Unvaild Uniform!!': '违规着装', 'Unvaild Uniform!!': '违规着装',
'Unchecked Trunk': '未检查后备箱', 'Unchecked Trunk': '未检查后备箱',
'Ignore': '漏检' 'Ignore': '漏检',
'Indoor Violation': '违规进入区域'
}; };
tag.textContent = actionMap[action] || action; tag.textContent = actionMap[action] || action;
title.appendChild(tag); title.appendChild(tag);