一、赛事背景
城市治理赛道——随着城市化进程的加快,城市管理面临着前所未有的挑战。占道经营、垃圾堆放和无照经营游商等问题对城市管理提出了更高的要求。本赛道聚焦城市违规行为的智能检测,要求选手研究开发高效可靠的计算机视觉算法,提升违规行为检测识别的准确度,降低对大量人工的依赖,提升检测效果和效率,从而推动城市治理向更高效、更智能、更文明的方向发展,为居民创造一个安全、和谐、可持续的居住环境。
二、赛事任务
初赛任务是根据给定的城管视频监控数据集,进行城市违规行为的检测。违规行为主要包括垃圾桶满溢、机动车违停、非机动车违停等。选手需要能够从视频中分析并标记出违规行为,提供违规行为发生的时间和位置信息。
初赛提供城管视频监控数据与对应违规行为标注。违规行为包括垃圾桶满溢、机动车违停、非机动车违停等。
视频数据为mp4格式,标注文件为json格式,每个视频对应一个json文件。
json文件的内容是每帧检测到的违规行为,包括以下字段:
- frame_id:违规行为出现的帧编号
- event_id:违规行为ID
- category:违规行为类别
- bbox:检测到的违规行为矩形框的坐标,[xmin,ymin,xmax,ymax]形式
三、Task1 跑通Basline
1.对应的库以及数据集导入
!/opt/miniconda/bin/pip install opencv-python pandas matplotlib ultralytics
import os, sys
import cv2, glob, json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
!apt install zip unzip -y
!apt install unar -y
!wget "https://comp-public-prod.obs.cn-east-3.myhuaweicloud.com/dataset/2024/%E8%AE%AD%E7%BB%83%E9%9B%86%28%E6%9C%89%E6%A0%87%E6%B3%A8%E7%AC%AC%E4%B8%80%E6%89%B9%29.zip?AccessKeyId=583AINLNMLDRFK7CC1YM&Expires=1739168844&Signature=9iONBSJORCS8UNr2m/VZnc7yYno%3D" -O 训练集\(有标注第一批\).zip
!unar -q 训练集\(有标注第一批\).zip
!wget "https://comp-public-prod.obs.cn-east-3.myhuaweicloud.com/dataset/2024/%E6%B5%8B%E8%AF%95%E9%9B%86.zip?AccessKeyId=583AINLNMLDRFK7CC1YM&Expires=1739168909&Signature=CRsB54VqOtrzIdUHC3ay0l2ZGNw%3D" -O 测试集.zip
!unar -q 测试集.zip
这段代码除了导入一些主要的库之外,还通过网址对赛事所提供的数据集进行下载!
2.数据读取
有标注数据集读取
train_anno = json.load(open('训练集(有标注第一批)/标注/45.json', encoding='utf-8'))
train_anno[0], len(train_anno)
通过加载所下载的json文件 ,获取其中第一个标注对象及标注对象总数。
pd.read_json('训练集(有标注第一批)/标注/45.json')
运用导入的pandas库,打开指定json文件,解析文件内容,并将其转换为表格形式的DataFrame.
视频数据集读取
video_path = '训练集(有标注第一批)/视频/45.mp4'
cap = cv2.VideoCapture(video_path)
while True:
# 读取下一帧
ret, frame = cap.read()
if not ret:
break
break
frame.shape
int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
bbox = [746, 494, 988, 786]
pt1 = (bbox[0], bbox[1])
pt2 = (bbox[2], bbox[3])
color = (0, 255, 0)
thickness = 2 # 线条粗细
cv2.rectangle(frame, pt1, pt2, color, thickness)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
plt.imshow(frame)
在一个图像帧上绘制一个绿色的矩形框,并将其转为RGB颜色空间以便在matplotlib中显示。
3.数据转换
if not os.path.exists('yolo-dataset/'):
os.mkdir('yolo-dataset/')
if not os.path.exists('yolo-dataset/train'):
os.mkdir('yolo-dataset/train')
if not os.path.exists('yolo-dataset/val'):
os.mkdir('yolo-dataset/val')
dir_path = os.path.abspath('./') + '/'
# 需要按照你的修改path
with open('yolo-dataset/yolo.yaml', 'w', encoding='utf-8') as up:
up.write(f'''
path: {dir_path}/yolo-dataset/
train: train/
val: val/
names:
0: 非机动车违停
1: 机动车违停
2: 垃圾桶满溢
3: 违法经营
''')
设置YOLO目标检测模型的数据集的路径和类别名称,以便模型可以正确的读取和处理数据
train_annos = glob.glob('训练集(有标注第一批)/标注/*.json')
train_videos = glob.glob('训练集(有标注第一批)/视频/*.mp4')
train_annos.sort(); train_videos.sort();
category_labels = ["非机动车违停", "机动车违停", "垃圾桶满溢", "违法经营"]
通过获取所有标注和视频文件的路径,并将它们排序,可以保证训练过程中按顺序处理每个视频及其对应的标注。
for anno_path, video_path in zip(train_annos[:5], train_videos[:5]):
print(video_path)
anno_df = pd.read_json(anno_path)
cap = cv2.VideoCapture(video_path)
frame_idx = 0
while True:
ret, frame = cap.read()
if not ret:
break
img_height, img_width = frame.shape[:2]
frame_anno = anno_df[anno_df['frame_id'] == frame_idx]
cv2.imwrite('./yolo-dataset/train/' + anno_path.split('/')[-1][:-5] + '_' + str(frame_idx) + '.jpg', frame)
if len(frame_anno) != 0:
with open('./yolo-dataset/train/' + anno_path.split('/')[-1][:-5] + '_' + str(frame_idx) + '.txt', 'w') as up:
for category, bbox in zip(frame_anno['category'].values, frame_anno['bbox'].values):
category_idx = category_labels.index(category)
x_min, y_min, x_max, y_max = bbox
x_center = (x_min + x_max) / 2 / img_width
y_center = (y_min + y_max) / 2 / img_height
width = (x_max - x_min) / img_width
height = (y_max - y_min) / img_height
if x_center > 1:
print(bbox)
up.write(f'{category_idx} {x_center} {y_center} {width} {height}\n')
frame_idx += 1
将视频帧和对应的标注转换为YOLO格式,以便后面使用YOLO进行训练。
for anno_path, video_path in zip(train_annos[-3:], train_videos[-3:]):
print(video_path)
anno_df = pd.read_json(anno_path)
cap = cv2.VideoCapture(video_path)
frame_idx = 0
while True:
ret, frame = cap.read()
if not ret:
break
img_height, img_width = frame.shape[:2]
frame_anno = anno_df[anno_df['frame_id'] == frame_idx]
cv2.imwrite('./yolo-dataset/val/' + anno_path.split('/')[-1][:-5] + '_' + str(frame_idx) + '.jpg', frame)
if len(frame_anno) != 0:
with open('./yolo-dataset/val/' + anno_path.split('/')[-1][:-5] + '_' + str(frame_idx) + '.txt', 'w') as up:
for category, bbox in zip(frame_anno['category'].values, frame_anno['bbox'].values):
category_idx = category_labels.index(category)
x_min, y_min, x_max, y_max = bbox
x_center = (x_min + x_max) / 2 / img_width
y_center = (y_min + y_max) / 2 / img_height
width = (x_max - x_min) / img_width
height = (y_max - y_min) / img_height
up.write(f'{category_idx} {x_center} {y_center} {width} {height}\n')
frame_idx += 1
为YOLO模型生成验证集数据。他将视频帧保存为图像,并为每一帧创建一个对应的标注文本文件,文件中包含了该帧中所有标注对象的类别索引和归一化边界框坐标。
4.运用YOLO模型进行训练
!wget http://mirror.coggle.club/yolo/yolov8n-v8.2.0.pt -O yolov8n.pt
!mkdir -p ~/.config/Ultralytics/
!wget http://mirror.coggle.club/yolo/Arial.ttf -O ~/.config/Ultralytics/Arial.ttf
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import warnings
warnings.filterwarnings('ignore')
from ultralytics import YOLO
model = YOLO("yolov8n.pt")
results = model.train(data="yolo-dataset/yolo.yaml", epochs=2, imgsz=1080, batch=16)
训练日志
category_labels = ["非机动车违停", "机动车违停", "垃圾桶满溢", "违法经营"]
if not os.path.exists('result/'):
os.mkdir('result')
from ultralytics import YOLO
model = YOLO("runs/detect/train/weights/best.pt")
import glob
for path in glob.glob('测试集/*.mp4'):
submit_json = []
results = model(path, conf=0.05, imgsz=1080, verbose=False)
for idx, result in enumerate(results):
boxes = result.boxes # Boxes object for bounding box outputs
masks = result.masks # Masks object for segmentation masks outputs
keypoints = result.keypoints # Keypoints object for pose outputs
probs = result.probs # Probs object for classification outputs
obb = result.obb # Oriented boxes object for OBB outputs
if len(boxes.cls) == 0:
continue
xywh = boxes.xyxy.data.cpu().numpy().round()
cls = boxes.cls.data.cpu().numpy().round()
conf = boxes.conf.data.cpu().numpy()
for i, (ci, xy, confi) in enumerate(zip(cls, xywh, conf)):
submit_json.append(
{
'frame_id': idx,
'event_id': i+1,
'category': category_labels[int(ci)],
'bbox': list([int(x) for x in xy]),
"confidence": float(confi)
}
)
with open('./result/' + path.split('/')[-1][:-4] + '.json', 'w', encoding='utf-8') as up:
json.dump(submit_json, up, indent=4, ensure_ascii=False)
结果输出
!\rm result/.ipynb_checkpoints/ -rf
!\rm result.zip
!zip -r result.zip result/
四、结果提交
将result.zip提交到赛事官网即可,几分钟后将会传回成绩!
标签:Task,AI,frame,yolo,Datawhale,anno,json,train,path From: https://blog.csdn.net/2301_81185698/article/details/141462319