增加一些耗时操作打印

This commit is contained in:
zqc
2026-02-03 13:33:41 +08:00
parent 7f47735e0d
commit f04a8c0773
2 changed files with 33 additions and 5 deletions

View File

@@ -246,6 +246,7 @@ class YOLOv8_Pose_ONNX:
# -------------------------------------------------
def __call__(self, frame):
inp = self.preprocess(frame)
# 耗时操作
pred = self.session.run(None, {self.input_name: inp})[0]
return self.postprocess(pred, frame.shape[:2])

View File

@@ -227,17 +227,27 @@ class KadianDetector:
self.width, self.height = w, h
self.current_frame_idx += 1
# 性能计时开始
# total_start = time.time()
# ========= 每帧动态获取正确的 ROIint32=========
roi_points_int32 = self._get_roi_points(w, h) # shape: (4, 2), dtype: int32
roi_points_draw = roi_points_int32.reshape((-1, 1, 2)) # shape: (4, 1, 2) 用于绘制
current_time_sec = timestamp
# ========= 骨骼检测 =========
# pose_start = time.time()
# 耗时操作
pose_results = self.pose_detector(frame)
# pose_time = (time.time() - pose_start) * 1000
# ========= 主检测 =========
# detect_start = time.time()
# 耗时操作
detections = self.detector(frame)
# detect_time = (time.time() - detect_start) * 1000
dets_xyxy = []
dets_roles = []
@@ -272,7 +282,8 @@ class KadianDetector:
[self.height, self.width]
)
# logger.debug("tracks: {}".format(tracks))
# 绘制骨骼
# ========= 绘制骨骼 =========
frame = YOLOv8_Pose_ONNX.draw_keypoints(frame, pose_results)
# ========= 绘制 ROI =========
cv2.polylines(frame, [roi_points_draw], isClosed=True, color=(255, 0, 0), thickness=3)
@@ -666,9 +677,16 @@ class KadianDetector:
self.draw_alert(frame, alert_text, (0, 0, 255), offset_y=alert_offset)
alert_offset += 100
# # ========= 性能统计和输出 =========
# total_time = (time.time() - total_start) * 1000
# logger.info(f"[PERF_DETAIL] Camera {camera_id} - ProcessFrame Total: {total_time:.1f}ms | "
# f"PoseDetect: {pose_time:.1f}ms | "
# f"MainDetect: {detect_time:.1f}ms | "
# )
return {
"image": frame,
"alerts":current_frame_alerts
}
@@ -717,7 +735,9 @@ class FrameProcessorWorker(threading.Thread):
detector = self.kadian_detectors[cam_id]
# 执行检测
# detect_start = time.time()
result = detector.process_frame(frame.copy(), cam_id, ts)
# detect_time = (time.time() - detect_start) * 1000
result_img = result["image"]
result_type = result["alerts"]
@@ -741,11 +761,13 @@ class FrameProcessorWorker(threading.Thread):
self.last_alert_push_time[cam_id][action] = current_time
# 通过 WebSocket 发送帧结果
# encode_start = time.time()
try:
img_b64 = self._encode_base64(result_img)
except Exception as e:
logger.error(f"[ERROR] Encode image failed: {e}")
img_b64 = None
# encode_time = (time.time() - encode_start) * 1000
if img_b64 is not None:
# 将abnormal_actions对象数组转换为字符串数组
@@ -766,9 +788,14 @@ class FrameProcessorWorker(threading.Thread):
except queue.Full:
logger.warning("[WARN] ws_send_queue full, drop frame message")
# # 打印关键操作的耗时
# total_time = detect_time + encode_time
# logger.info(f"[PERF] Camera {cam_id} - Total: {total_time:.1f}ms | "
# f"Detect: {detect_time:.1f}ms | "
# f"Encode: {encode_time:.1f}ms | ")
self.raw_queue.task_done()
except Exception as e:
logger.error(f"[ERROR] Frame processing failed for camera {cam_id if 'cam_id' in locals() else 'unknown'}: {e}")
logger.exception("Exception details:") # 打印完整的堆栈跟踪
# 继续处理下一帧,不要退出循环
finally:
self.raw_queue.task_done()