基于YOLOv11的图书馆占座检测系统 深度学习YOLO11模型如何训练图书馆占位检测数据集 YOLO数据集 目标检测算法
.图书馆占位检测数据集2568张提供yolo和voc两种标注方式2类标注数量chair座位: 7434person人: 3016image num: 25682.模型代码模型训练使用yolov11n训练30个epoch训练结果map如描述图所示。3.qt界面运行界面采用qt编写本项目已经训练好模型配置好环境后可直接使用运行效果见描述图像 图书馆占座检测系统信息表类别内容系统名称基于YOLOv11的图书馆占座检测系统核心算法YOLOv11n (Ultralytics)检测任务图书馆座位占用状态检测检测类别 (共2类)chair(座位)person(人)数据集规模总计2568张图像标注数量chair: 7434 个person: 3016 个标注格式提供YOLO(.txt) 和VOC(.xml) 两种格式模型性能mAP0.5: 0.983chairAP: 0.976personAP: 0.989训练配置Epochs: 30模型:yolov11n.pt用户界面基于PyQt5开发的图形化界面系统功能- 图片/视频文件导入检测- 实时摄像头检测- 检测结果显示类别、置信度、坐标- 检测结果统计与展示️ 构建系统代码以下是构建该系统的完整代码分为两部分模型训练代码和Qt界面代码。1. 模型训练代码 (train.py)此代码用于训练YOLOv11n模型。请确保您的数据集已按以下结构组织并创建了data.yaml文件。数据集结构示例:library_dataset/ ├── images/ │ ├── train/ │ └── val/ ├── labels/ │ ├── train/ │ └── val/ └── data.yamldata.yaml文件内容:# data.yamlpath:./library_dataset# 数据集根目录train:images/train# 训练集图片路径val:images/val# 验证集图片路径# 类别数量nc:2# 类别名称names:0:chair1:persontrain.py代码:fromultralyticsimportYOLOdeftrain_library_model(): 使用 YOLOv11n 训练图书馆占座检测模型 # 1. 加载预训练模型modelYOLO(yolov11n.pt)# 加载 YOLOv11n 预训练权重# 2. 开始训练resultsmodel.train(datalibrary_dataset/data.yaml,# 数据配置文件路径epochs30,# 训练轮数imgsz640,# 输入图像尺寸batch16,# 批大小根据显存调整namelibrary_occupancy_v11n,# 训练任务名称projectruns/train,# 项目保存目录exist_okTrue,# 允许覆盖已有实验patience5,# 早停耐心值device0# 使用GPU 0若使用CPU则设为 cpu)print(训练完成)print(f最佳模型保存在:{results.save_dir}/weights/best.pt)if__name____main__:train_library_model()2. Qt 界面代码 (main_window.py)此代码使用PyQt5创建图形界面实现图片/视频检测、摄像头实时检测及结果显示功能。请确保已安装PyQt5和opencv-python。importsysimportosimporttimeimportcv2importtorchfromPyQt5.QtWidgetsimport(QApplication,QMainWindow,QWidget,QVBoxLayout,QHBoxLayout,QLabel,QPushButton,QFileDialog,QComboBox,QTableWidget,QTableWidgetItem,QMessageBox)fromPyQt5.QtGuiimportQPixmap,QImage,QFontfromPyQt5.QtCoreimportQt,QTimer,QThread,pyqtSignalfromultralyticsimportYOLO# --- 检测线程 ---classDetectionThread(QThread):# 信号用于更新UIframe_processedpyqtSignal(QImage,list,float)finishedpyqtSignal()def__init__(self,model,source0):super().__init__()self.modelmodel self.sourcesource# 0 for webcam, or file pathself.runningTruedefrun(self):capcv2.VideoCapture(self.source)whileself.runningandcap.isOpened():ret,framecap.read()ifnotret:breakstart_timetime.time()# 进行推理resultsself.model(frame)infer_timetime.time()-start_time# 解析结果detections[]annotated_frameresults[0].plot()# 获取带标注的帧forboxinresults[0].boxes:cls_idint(box.cls[0])conffloat(box.conf[0])xyxybox.xyxy[0].tolist()detections.append({class:self.model.names[cls_id],confidence:conf,coordinates:xyxy})# 转换颜色格式 (BGR - RGB)rgb_imagecv2.cvtColor(annotated_frame,cv2.COLOR_BGR2RGB)h,w,chrgb_image.shape bytes_per_linech*w qt_imageQImage(rgb_image.data,w,h,bytes_per_line,QImage.Format_RGB888)# 发送信号self.frame_processed.emit(qt_image,detections,infer_time)cap.release()self.finished.emit()defstop(self):self.runningFalseself.wait()# --- 主窗口 ---classMainWindow(QMainWindow):def__init__(self):super().__init__()self.setWindowTitle(基于YOLOv11的图书馆占座检测)self.setGeometry(100,100,1200,800)# 加载模型self.modelYOLO(runs/train/library_occupancy_v11n/weights/best.pt)# 请确保路径正确self.init_ui()self.detection_threadNonedefinit_ui(self):# --- 主布局 ---central_widgetQWidget()self.setCentralWidget(central_widget)main_layoutQHBoxLayout(central_widget)# --- 左侧图像显示区域 ---left_layoutQVBoxLayout()self.image_labelQLabel(等待输入...)self.image_label.setAlignment(Qt.AlignCenter)self.image_label.setMinimumSize(640,480)self.image_label.setStyleSheet(QLabel { background-color : lightgray; })left_layout.addWidget(self.image_label)# --- 右侧控制面板 ---right_layoutQVBoxLayout()# 文件导入file_groupQLabel(文件导入)file_group.setFont(QFont(Arial,12,QFont.Bold))right_layout.addWidget(file_group)self.file_path_labelQLabel(d(DST1037)/qtUI/1.jpg)right_layout.addWidget(self.file_path_label)self.btn_select_videoQPushButton(请选择视频文件)self.btn_select_video.clicked.connect(self.select_video)right_layout.addWidget(self.btn_select_video)self.btn_webcamQPushButton(摄像头未开启)self.btn_webcam.clicked.connect(self.toggle_webcam)right_layout.addWidget(self.btn_webcam)# 检测结果result_groupQLabel(检测结果)result_group.setFont(QFont(Arial,12,QFont.Bold))right_layout.addWidget(result_group)self.time_labelQLabel(用时: 0.000s)self.count_labelQLabel(目标数目: 0)self.target_comboQComboBox()self.target_combo.addItems([全部,chair,person])self.conf_labelQLabel(置信度: 0.00%)right_layout.addWidget(self.time_label)right_layout.addWidget(self.count_label)right_layout.addWidget(self.target_combo)right_layout.addWidget(self.conf_label)# 目标位置position_groupQLabel(目标位置)position_group.setFont(QFont(Arial,12,QFont.Bold))right_layout.addWidget(position_group)self.xmin_labelQLabel(xmin: 0)self.ymin_labelQLabel(ymin: 0)self.xmax_labelQLabel(xmax: 0)self.ymax_labelQLabel(ymax: 0)right_layout.addWidget(self.xmin_label)right_layout.addWidget(self.ymin_label)right_layout.addWidget(self.xmax_label)right_layout.addWidget(self.ymax_label)# 操作按钮action_groupQLabel(操作)action_group.setFont(QFont(Arial,12,QFont.Bold))right_layout.addWidget(action_group)self.btn_saveQPushButton(保存)self.btn_exitQPushButton(退出)self.btn_exit.clicked.connect(self.close)btn_layoutQHBoxLayout()btn_layout.addWidget(self.btn_save)btn_layout.addWidget(self.btn_exit)right_layout.addLayout(btn_layout)# --- 底部检测结果表格 ---self.tableQTableWidget()self.table.setColumnCount(5)self.table.setHorizontalHeaderLabels([序号,文件路径,类别,置信度,坐标位置])main_layout.addLayout(left_layout,70)main_layout.addLayout(right_layout,30)main_layout.addWidget(self.table)defselect_video(self):file_path,_QFileDialog.getOpenFileName(self,选择视频文件,,Video Files (*.mp4 *.avi *.mov))iffile_path:self.file_path_label.setText(file_path)self.start_detection(file_path)deftoggle_webcam(self):ifself.detection_threadandself.detection_thread.isRunning():self.detection_thread.stop()self.btn_webcam.setText(摄像头未开启)else:self.start_detection(0)self.btn_webcam.setText(摄像头已开启)defstart_detection(self,source):ifself.detection_threadandself.detection_thread.isRunning():self.detection_thread.stop()self.detection_threadDetectionThread(self.model,source)self.detection_thread.frame_processed.connect(self.update_frame)self.detection_thread.finished.connect(self.on_detection_finished)self.detection_thread.start()defupdate_frame(self,qt_image,detections,infer_time):# 更新图像pixmapQPixmap.fromImage(qt_image)self.image_label.setPixmap(pixmap.scaled(self.image_label.size(),Qt.KeepAspectRatio))# 更新检测结果self.time_label.setText(f用时:{infer_time:.3f}s)self.count_label.setText(f目标数目:{len(detections)})# 更新置信度 (显示所有目标的平均置信度)ifdetections:avg_confsum(d[confidence]fordindetections)/len(detections)self.conf_label.setText(f置信度:{avg_conf:.2%})else:self.conf_label.setText(置信度: 0.00%)# 更新表格self.table.setRowCount(len(detections))fori,detinenumerate(detections):self.table.setItem(i,0,QTableWidgetItem(str(i1)))self.table.setItem(i,1,QTableWidgetItem(self.file_path_label.text()))self.table.setItem(i,2,QTableWidgetItem(det[class]))self.table.setItem(i,3,QTableWidgetItem(f{det[confidence]:.2%}))coords[int(x)forxindet[coordinates]]self.table.setItem(i,4,QTableWidgetItem(str(coords)))# 如果有检测结果更新位置信息显示第一个检测到的目标ifi0:self.xmin_label.setText(fxmin:{coords[0]})self.ymin_label.setText(fymin:{coords[1]})self.xmax_label.setText(fxmax:{coords[2]})self.ymax_label.setText(fymax:{coords[3]})defon_detection_finished(self):self.btn_webcam.setText(摄像头未开启)QMessageBox.information(self,提示,视频播放完毕或摄像头已停止。)defcloseEvent(self,event):ifself.detection_threadandself.detection_thread.isRunning():self.detection_thread.stop()event.accept()if__name____main__:appQApplication(sys.argv)windowMainWindow()window.show()sys.exit(app.exec_()) 使用说明准备环境:pipinstallultralytics PyQt5 opencv-python torch训练模型:将您的数据集按上述结构组织好。创建data.yaml文件。运行train.py脚本。训练完成后模型权重将保存在runs/train/library_occupancy_v11n/weights/best.pt。运行界面:确保main_window.py中的模型路径self.model YOLO(...)指向您训练好的best.pt文件。运行main_window.py即可启动图形界面。