处理步骤
- 首先将要处理的XML和JPG文件放入同一个文件夹
- 修改下列代码中定义的常量。主要需要修改常量:
TRAIN_RATIO:训练集比例
SRC_DIR:源文件夹路径,存放XML和JPG
TAR_DIR:目标文件夹,存放生成结果 - 运行脚本
- 生成的COCO格式的JSON文件存放在annocations目录下
- 生成的YOLO格式的TXT文件存放在labels文件夹下
代码
import sys
import os
import glob
import json
import shutil
import numpy as np
import xml.etree.ElementTree as ET
import random
START_BOUNDING_BOX_ID = 1
TRAIN_RATIO = 0.8 # 将数据集分为训练集和测试集,TRAIN_RATIO为训练集所占比例
TRAIN_DIR = "train" # train名称
VAL_DIR = "val" # val名称
DATASETS_NAME = "my_data" # 该数据集名称
SRC_DIR = "images" # 存放xml文件和img图片路径,注意xml和jpg放置相同路径
TAR_DIR = "." # 目标文件夹路径,默认为当前路径
classes = {} # 类别,若only_care_pre_define_categories = False,可为空,会搜索图片类别自动添加
pre_define_categories = {} # 预先定义的类别,example :pre_define_categories = {'a1': 1, 'a3': 2, 'a6': 3, 'a9': 4, "a10": 5}
only_care_pre_define_categories = False # 如果为True,则只对有pre_define_categories定义的类别的Object进行处理
def get(root, name):
return root.findall(name)
def get_and_check(root, name, length):
vars = root.findall(name)
if len(vars) == 0:
raise NotImplementedError('Can not find %s in %s.' % (name, root.tag))
if length > 0 and len(vars) != length:
raise NotImplementedError('The size of %s is supposed to be %d, but is %d.' % (name, length, len(vars)))
if length == 1:
vars = vars[0]
return vars
def convert(xml_list, json_file, txt_path):
categories = pre_define_categories
all_categories = classes
json_dict = {"images": [], "type": "instances", "annotations": [], "categories": []}
bnd_id = START_BOUNDING_BOX_ID
for index, line in enumerate(xml_list):
# print("Processing %s"%(line))
xml_f = line
tree = ET.parse(xml_f)
root = tree.getroot()
filename = os.path.basename(xml_f)[:-4] + ".jpg"
image_id = 20190000001 + index
size = get_and_check(root, 'size', 1)
width = int(get_and_check(size, 'width', 1).text)
height = int(get_and_check(size, 'height', 1).text)
image = {'file_name': filename, 'height': height, 'width': width, 'id': image_id}
json_dict['images'].append(image)
txt_file_path = txt_path + "/" + os.path.basename(xml_f)[:-4] + ".txt"
txt_dict = {"class_id": [], "x": [], "y": [], "w": [], "h": []}
## Cruuently we do not support segmentation
# segmented = get_and_check(root, 'segmented', 1).text
# assert segmented == '0'
for obj in get(root, 'object'):
category = get_and_check(obj, 'name', 1).text
if category in all_categories:
all_categories[category] += 1
else:
all_categories[category] = 1
if category not in categories:
if only_care_pre_define_categories:
continue
new_id = len(categories) + 1
print("[warning] category '{}' not in 'pre_define_categories'({}), create new id: {} automatically"
.format(category, pre_define_categories, new_id))
categories[category] = new_id
category_id = categories[category]
bndbox = get_and_check(obj, 'bndbox', 1)
xmin = int(float(get_and_check(bndbox, 'xmin', 1).text))
ymin = int(float(get_and_check(bndbox, 'ymin', 1).text))
xmax = int(float(get_and_check(bndbox, 'xmax', 1).text))
ymax = int(float(get_and_check(bndbox, 'ymax', 1).text))
assert (xmax > xmin), "xmax <= xmin, {}".format(line)
assert (ymax > ymin), "ymax <= ymin, {}".format(line)
o_width = abs(xmax - xmin)
o_height = abs(ymax - ymin)
# 放入YOLO格式的txt标记文件
txt_dict["class_id"].append(category_id)
txt_dict["x"].append((xmin + xmax) / 2 / width)
txt_dict["y"].append((ymin + ymax) / 2 / height)
txt_dict["w"].append((xmax - xmin) / width)
txt_dict["h"].append((ymax - ymin) / height)
# 放入json
ann = {'area': o_width * o_height,
'iscrowd': 0,
'image_id': image_id,
'bbox': [xmin, ymin, o_width, o_height],
'category_id': category_id, 'id': bnd_id, 'ignore': 0,
'segmentation': []
}
json_dict['annotations'].append(ann)
bnd_id = bnd_id + 1
# 开始写入txt文件
with open(txt_file_path, "w") as fp:
txt_str = ""
for index in range(len(txt_dict["x"])):
txt_str += str(txt_dict["class_id"][index]) + " " + \
str(txt_dict["x"][index]) + " " + \
str(txt_dict["y"][index]) + " " + \
str(txt_dict["w"][index]) + " " + \
str(txt_dict["h"][index]) + "\n"
fp.write(txt_str)
for cate, cid in categories.items():
cat = {'supercategory': 'none', 'id': cid, 'name': cate}
json_dict['categories'].append(cat)
json_fp = open(json_file, 'w')
json_str = json.dumps(json_dict)
json_fp.write(json_str)
json_fp.close()
def make_dir(TAR_DIR):
dir_list = ["/annotations",
"/images/{}".format(TRAIN_DIR),
"/images/{}".format(VAL_DIR),
"/labels/{}".format(TRAIN_DIR),
"/labels/{}".format(VAL_DIR)]
for dir_path in dir_list:
path = TAR_DIR + dir_path
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
if __name__ == '__main__':
# 创建文件夹
make_dir(TAR_DIR)
# 随机选取图片
xml_list = glob.glob(SRC_DIR + "/*.xml")
total_num = len(xml_list)
train_num = int(total_num * TRAIN_RATIO) # 按照rate比例从文件夹中取一定数量图片
val_num = total_num - train_num
train_list = random.sample(xml_list, train_num)
val_list = list(set(xml_list) - set(train_list))
# 转化成json文件
save_json_train = TAR_DIR + "/annotations/instances_{}.json".format(TRAIN_DIR)
save_json_val = TAR_DIR + "/annotations/instances_{}.json".format(VAL_DIR)
save_txt_train = TAR_DIR + "/labels/{}".format(TRAIN_DIR)
save_txt_val = TAR_DIR + "/labels/{}".format(VAL_DIR)
convert(train_list, save_json_train, save_txt_train)
convert(val_list, save_json_val, save_txt_val)
# 写入train.txt
train_txt_path = TAR_DIR + "/train.txt"
train_img_path = TAR_DIR + "/images/{}/".format(TRAIN_DIR)
with open(train_txt_path, "w") as f1:
for xml in train_list:
img = xml[:-4] + ".jpg"
shutil.copyfile(img, train_img_path + os.path.basename(img))
f1.write(os.path.abspath(train_img_path + os.path.basename(img)) + "\n")
# 写入 valid.txt
valid_txt_path = TAR_DIR + "/valid.txt"
valid_img_path = TAR_DIR + "/images/{}/".format(VAL_DIR)
with open(valid_txt_path, "w") as f2:
for xml in val_list:
img = xml[:-4] + ".jpg"
shutil.copyfile(img, valid_img_path + os.path.basename(img))
f2.write(os.path.abspath(valid_img_path + os.path.basename(img)) + "\n")
# 写入.name文件
names_txt_path = TAR_DIR + "/" + DATASETS_NAME + ".names"
with open(names_txt_path, "w") as f3:
for category, val in classes.items():
f3.write(category + "\n")
# 写入dataset.data
data_txt_path = TAR_DIR + "/" + DATASETS_NAME + ".data"
with open(data_txt_path, "w") as f4:
text = "classes=" + str(len(classes)) + "\n"
text += "train=" + os.path.abspath(train_txt_path) + "\n"
text += "valid=" + os.path.abspath(valid_txt_path) + "\n"
text += "names=" + os.path.abspath(names_txt_path) + "\n"
f4.write(text)
print("-------------------------------")
print("train number:", train_num)
print("val number:", val_num)
参考
https://blog.csdn.net/weixin_43878078/article/details/120578830
标签:category,xml,get,集转成,YOLO,VOC,id,check,categories From: https://www.cnblogs.com/qs-smile/p/17033263.html