使用指南
挂载谷歌云盘
from google.colab import drive
drive.mount('/content/drive')
确认连到GPU
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('没有发现GPU device')
print('Found GPU at: {}'.format(device_name))
查看显卡
!/opt/bin/nvidia-smi
安装OpenMMLab的依赖
!pip install openmim
!mim install mmdet
检查是否安装依赖完成
from mmcv.runner import checkpoint
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
print("载入成功!")
切换工作目录
import os
os.chdir("/content/drive/MyDrive/mmdetection")
os.getcwd()
修改自己的类别数
修改 mmdet/core/evaluation/class_names.py,return自己的类别
修改 mmdet/datasets/coco.py,将 CLASSES = () 修改成自己的类别。
重新编译
!python setup.py install
我的yolact_r50_1x8_coco.py 配置文件如下
_base_ = '../_base_/default_runtime.py'
img_size = 550
checkpoint_config = dict(
interval=5)
model = dict(
type='YOLACT',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
zero_init_residual=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5,
upsample_cfg=dict(mode='bilinear')),
bbox_head=dict(
type='YOLACTHead',
num_classes=11,
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=3,
scales_per_octave=1,
base_sizes=[8, 16, 32, 64, 128],
ratios=[0.5, 1.0, 2.0],
strides=[550.0 / x for x in [69, 35, 18, 9, 5]],
centers=[(550 * 0.5 / x, 550 * 0.5 / x)
for x in [69, 35, 18, 9, 5]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
reduction='none',
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
num_head_convs=1,
num_protos=32,
use_ohem=True),
mask_head=dict(
type='YOLACTProtonet',
in_channels=256,
num_protos=32,
num_classes=11,
max_masks_to_train=100,
loss_mask_weight=6.125),
segm_head=dict(
type='YOLACTSegmHead',
num_classes=11,
in_channels=256,
loss_segm=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
iou_thr=0.5,
top_k=200,
max_per_img=100))
dataset_type = 'CocoDataset'
classes = ('Squamous','WBC','Urothelial','UKA','CaOX','RBC','Hyaline','Granular','UA','YEAST','Renal')
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.68, 116.78, 103.94], std=[58.40, 57.12, 57.38], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(4.0, 4.0)),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(img_size, img_size), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(img_size, img_size),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(
classes = classes,
type=dataset_type,
ann_file=data_root + 'annotations/train325.json',
img_prefix=data_root + 'train_img/',
pipeline=train_pipeline),
val=dict(
classes = ('Squamous','WBC','Urothelial','UKA','CaOX','RBC','Hyaline','Granular','UA','YEAST','Renal'),
type=dataset_type,
ann_file=data_root + 'annotations/test134.json',
img_prefix=data_root + 'test_img/',
pipeline=test_pipeline),
test=dict(
classes = ('Squamous','WBC','Urothelial','UKA','CaOX','RBC','Hyaline','Granular','UA','YEAST','Renal'),
type=dataset_type,
ann_file=data_root + 'annotations/test134.json',
img_prefix=data_root + 'test_img/',
pipeline=test_pipeline))
optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict()
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.1,
step=[20, 42, 49, 52])
runner = dict(type='EpochBasedRunner', max_epochs=20)
cudnn_benchmark = True
evaluation = dict(metric=['bbox', 'segm'])
运行python文件训练
!python tools/train.py --auto-resume pig_work_dir/yolact/yolact_r50_1x8_coco.py
测试语句
from mmcv.runner import checkpoint
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
config = '/content/drive/MyDrive/mmdetection/work_dirs/yolact_r50_1x8_coco/yolact_r50_1x8_coco.py'
checkpoint = '/content/drive/MyDrive/mmdetection/work_dirs/yolact_r50_1x8_coco/latest.pth'
model = init_detector(config, checkpoint, device='cuda:0')
img = '/content/drive/MyDrive/mmdetection/data/coco/test_img/bd000005.jpg'
result = inference_detector(model, img)
show_result_pyplot(model, img, result, score_thr=0.3)
防止colab掉线(把这段代码放在控制台)
function ClickConnect(){
console.log("Working");
document
.querySelector("#top-toolbar > colab-connect-button")
.shadowRoot
.querySelector("#connect")
.click()
}
var id=setInterval(ClickConnect,5*60000)
报错
报错1
AssertionError: The num_classes (3) in Shared2FCBBoxHead of MMDataParallel does not matches the length of CLASSES 80) in CocoDataset
解决方法
在配置文件中添加类别信息
data = dict( train=dict( classes=(‘Squamous’,‘WBC’,‘Urothelial’,‘UKA’,‘CaOX’,‘RBC’,‘Hyaline’,‘Granular’,‘UA’,‘YEAST’,‘Renal’), # 你自己的类别 type=dataset_type,
? …
))
具体如下
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(
classes = ('Squamous','WBC','Urothelial','UKA','CaOX','RBC','Hyaline','Granular','UA','YEAST','Renal'),
type=dataset_type,
ann_file=data_root + 'annotations/train325.json',
img_prefix=data_root + 'train_img/',
pipeline=train_pipeline),
val=dict(
classes = ('Squamous','WBC','Urothelial','UKA','CaOX','RBC','Hyaline','Granular','UA','YEAST','Renal'),
type=dataset_type,
ann_file=data_root + 'annotations/test134.json',
img_prefix=data_root + 'test_img/',
pipeline=test_pipeline),
test=dict(
classes = ('Squamous','WBC','Urothelial','UKA','CaOX','RBC','Hyaline','Granular','UA','YEAST','Renal'),
type=dataset_type,
ann_file=data_root + 'annotations/test134.json',
img_prefix=data_root + 'test_img/',
pipeline=test_pipeline))
最后感谢小伙伴们的学习噢~
|