前言
因为在做目标检测时需要用到DAGM2007数据集,但是官方提供的数据集格式与现在主流的COCO、VOC格式不同,重新写Dataset类又太过麻烦。因此在这里写了几个代码方便进行数据集格式转换。
以下代码如果有不规范或错误的地方欢迎大家批评指正!
注意:本博客中的COCO2VOC以及VOC2COCO仅针对灰度图使用,若需要转换COCO数据集或Pascal VOC数据集的格式,需要对代码进行修改。
另外参考了这位大佬写的文章,写的真的很清楚 VOC格式与COCO格式详解
1. DAGM2007数据集下载
DAGM2007数据集包含10个文件夹,前6个是训练集,后4个是测试集,常被用于深度学习的目标检测中评估网络的性能。下面是DAGM2007的下载链接,填写邮箱信息后会自动发送下载链接到该邮箱。 DAGM2007下载地址 有时候可能进不去,多刷两遍就可以进去了。实在进不去可以看看其他博客,有大佬上传到百度网盘上了。
2. DAGM2COCO
逻辑大概是: 1、先从原数据集中读取图像所需要的信息 2、将这些信息写成COCO的格式,写入新数据集中 3、复制图像到新数据集 最后新路径下数据集的格式为:
- DAGM-COCO
- annotations
- instances_train.json
- instances_val.json
- train
- val
代码
这里因为DAGM2007数据集中图像的名字重复,因此重新命了个名 obj_list是十个类名
import os
import cv2
import json
import shutil
obj_list = ['class1', 'class2', 'class3', 'class4', 'class5', 'class6', 'class7', 'class8', 'class9', 'class10']
def get_infor(path, mode, new_dataset_path, rate=0.5):
information = []
dataset_cla = [cla for cla in os.listdir(path) if os.path.isdir(os.path.join(path, cla))]
i = 0
for cla in dataset_cla:
cla_path = os.path.join(path, cla, cla)
f = open(os.path.join(cla_path, mode, 'Label', 'Labels.txt'), 'r')
f_train = f.read().split('\n')
for line in f_train:
img = {}
if len(line) <= 1:
continue
infor = line.split('\t')
if int(infor[1]) == 1:
img['image_path'] = os.path.join(cla_path, mode, infor[0] + '.PNG')
img['category_id'] = int(cla.strip('Class'))
img['label_path'] = os.path.join(cla_path, mode, 'Label', infor[4])
img['image_id'] = i
information.append(img)
i += 1
f.close()
if mode == 'Train':
for img in information:
new_name = ''.join('0' for i in range(8 - len(str(img['image_id'])))) + str(img['image_id']) + '.PNG'
img['file_name'] = new_name
shutil.copy(img['image_path'], os.path.join(new_dataset_path, 'train', new_name))
return information
else:
for img in information:
new_name = ''.join('0' for i in range(8 - len(str(img['image_id'])))) + str(img['image_id']) + '.PNG'
img['file_name'] = new_name
shutil.copy(img['image_path'], os.path.join(new_dataset_path, 'val', new_name))
return information
def get_bbox(label_path):
annotations = []
img = cv2.imread(label_path)[:, :, 0]
contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
annotations.append([x, y, w, h])
return annotations
def infor2coco(infor, new_path, mode):
coco = {}
info = {}
images = []
annotations = []
categories = []
global_annotation_id = 0
info['year'] = 2007
info['contributor'] = 'https://hci.iwr.uni-heidelberg.de/content/weakly-supervised-learning-industrial-optical-inspection'
coco['info'] = info
for img_infor in infor:
image = {}
annotation = {}
image['id'] = img_infor['image_id']
img = cv2.imread(img_infor['image_path'])[:, :, 0]
image['width'] = img.shape[0]
image['height'] = img.shape[1]
image['file_name'] = img_infor['file_name']
images.append(image)
bboxes = get_bbox(img_infor['label_path'])
if len(bboxes) > 1:
for bbox in bboxes:
annotation['id'] = global_annotation_id
annotation['image_id'] = img_infor['image_id']
annotation['category_id'] = img_infor['category_id']
annotation['bbox'] = bbox
annotation['iscrowd'] = 0
annotation['area'] = bbox[2] * bbox[3]
annotations.append(annotation)
global_annotation_id += 1
else:
annotation['id'] = global_annotation_id
annotation['image_id'] = img_infor['image_id']
annotation['category_id'] = img_infor['category_id']
annotation['bbox'] = bboxes[0]
annotation['iscrowd'] = 0
annotation['area'] = bboxes[0][2] * bboxes[0][3]
annotations.append(annotation)
global_annotation_id += 1
coco['images'] = images
coco['annotations'] = annotations
for i, cla in enumerate(obj_list):
category = {}
category['id'] = i + 1
category['name'] = cla
category['supercategory'] = 'defect'
categories.append(category)
coco['categories'] = categories
file_name = f'{new_path}/annotations/instances_{mode}.json'
if os.path.exists(file_name):
os.remove(file_name)
json.dump(coco, open(file_name, 'w'))
def main():
path = '/DAGM2007'
new_path = '/DAGM2007-COCO'
infor = get_infor(path, 'Train', new_dataset_path=new_path)
infor2coco(infor, new_path, 'train')
val_infor = get_infor(path, 'Test', new_dataset_path=new_path)
infor2coco(val_infor, new_path, 'val')
if __name__ == '__main__':
main()
3. DAGM2VOC
有大佬写了,这里放上大佬的链接 DAGM2VOC
4. COCO2VOC
这里参考了另外一位博主的方法,下面是他的链接 COCO2VOC
代码
from pycocotools.coco import COCO
import os
import shutil
from lxml import etree, objectify
from tqdm import tqdm
def write_xml(root, annotation, ori_root, t):
filename = annotation["filename"] if t == 'train' else str(int(annotation["filename"].split('.')[0]) + 10000000) + '.PNG'
annot_save_path = os.path.join(root, 'Annotations', filename.replace('PNG', 'xml'))
img_save_path = os.path.join(root, 'JPEGImages', filename)
shutil.copy(os.path.join(ori_root, t, annotation["filename"]),
img_save_path)
E = objectify.ElementMaker(annotate=False)
anno_tree = E.annotation(
E.folder(root),
E.filename(filename),
E.size(
E.width(annotation['size'][0]),
E.height(annotation['size'][1]),
E.depth(annotation['size'][2])
),
E.segmented(0)
)
for obj in annotation['objects']:
E2 = objectify.ElementMaker(annotate=False)
anno_tree2 = E2.object(
E.name(obj['name']),
E.pose('Unspecified'),
E.truncated(0),
E.difficult(0),
E.bndbox(
E.xmin(obj['bbox'][0]),
E.ymin(obj['bbox'][1]),
E.xmax(obj['bbox'][2]),
E.ymax(obj['bbox'][3])
)
)
anno_tree.append(anno_tree2)
etree.ElementTree(anno_tree).write(annot_save_path, pretty_print=True)
return filename
def trans_c2v(coco, index, classes, voc_root, coco_root, t):
img_info = coco.loadImgs(index)[0]
annots = coco.loadAnns(coco.getAnnIds(imgIds=index))
annotation = dict()
annotation['filename'] = img_info['file_name']
annotation['size'] = [img_info['width'], img_info['height'], 3]
objects = []
for annot in annots:
obj = dict()
obj['name'] = classes[annot['category_id']]
obj['bbox'] = [annot['bbox'][0],
annot['bbox'][1],
annot['bbox'][0] + annot['bbox'][2],
annot['bbox'][1] + annot['bbox'][3]]
objects.append(obj)
annotation['objects'] = objects
filename = write_xml(voc_root, annotation, coco_root, t)
return filename
def main(coco_root, voc_root):
voc_imgs_path = os.path.join(voc_root, 'JPEGImages')
voc_annots_path = os.path.join(voc_root, 'Annotations')
voc_set_path = os.path.join(voc_root, 'ImageSets', 'Main')
if not os.path.exists(voc_root):
os.makedirs(voc_imgs_path)
os.makedirs(voc_annots_path)
os.makedirs(voc_set_path)
else:
shutil.rmtree(voc_root)
main(coco_root, voc_root)
train_txt = []
val_txt = []
train_coco = COCO(os.path.join(coco_root, 'annotations', 'instances_train.json'))
category = train_coco.dataset['categories']
classes = dict()
for c in category:
classes[c['id']] = c['name']
images_ID = train_coco.getImgIds()
for ids in tqdm(images_ID):
filename = trans_c2v(train_coco, ids, classes, voc_root, coco_root, 'train')
train_txt.append(filename)
val_coco = COCO(os.path.join(coco_root, 'annotations', 'instances_val.json'))
images_ID = val_coco.getImgIds()
for ids in tqdm(images_ID):
filename = trans_c2v(val_coco, ids, classes, voc_root, coco_root, 'val')
val_txt.append(filename)
with open(os.path.join(voc_set_path, 'train.txt'), 'w') as f:
for i, name in enumerate(train_txt):
f.write(name.split('.')[0])
if i <= (len(train_txt) - 1):
f.write('\n')
with open(os.path.join(voc_set_path, 'val.txt'), 'w') as f:
for i, name in enumerate(val_txt):
f.write(name.split('.')[0])
if i <= (len(val_txt) - 1):
f.write('\n')
if __name__ == '__main__':
coco_root = '/DAGM2007-COCO'
voc_root = '/DAGM2007-VOC'
main(coco_root, voc_root)
测试代码
from pycocotools.coco import COCO
import os
import cv2
root = '/DAGM2007-COCO'
set_name = 'train'
dagm_coco = COCO(os.path.join(root, 'annotations', 'instances_' + set_name + '.json'))
image_ids = dagm_coco.getImgIds()
for index in image_ids:
image_info = dagm_coco.loadImgs(index)[0]
annots = dagm_coco.loadAnns(dagm_coco.getAnnIds(imgIds=index))
image_path = os.path.join(root, set_name, image_info['file_name'])
img = cv2.imread(image_path)
for annot in annots:
cv2.rectangle(img,
(annot['bbox'][0], annot['bbox'][1]),
(annot['bbox'][0] + annot['bbox'][2], annot['bbox'][1] + annot['bbox'][3]),
(255, 0, 0), 1)
cv2.imshow(f"{image_info['file_name']}", img)
cv2.waitKey(1000)
cv2.destroyAllWindows()
|