首页 Paddle框架 帖子详情
整理yolov3/yolov3-tiny训练代码
收藏
快速回复
Paddle框架 问答深度学习 2747 10
整理yolov3/yolov3-tiny训练代码
收藏
快速回复
Paddle框架 问答深度学习 2747 10

整理的yolov3/yolov3-tiny训练代码

 

# -*- coding: UTF-8 -*-
"""
训练常基于dark-net的YOLOv3网络,目标检测
"""
#训练Yolo-v3模型的配置项,目前暂时没有预训练模型。可以控制是否启用tiny版本,tiny版本体积小,适合部署在移动设备。如果不熟悉,请不要随便更改图片的尺寸和anchors的尺寸,两者相互关联
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import uuid
import numpy as np
import time
import six
import math
import random
import paddle
import paddle.fluid as fluid
import logging
import xml.etree.ElementTree
import codecs
import json
import paddle
import paddle.fluid as fluid
import codecs


from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
from PIL import Image, ImageEnhance, ImageDraw

logger = None
train_parameters = {
"data_dir": "",
"file_list": "train.txt",
"class_dim": -1,
"label_dict": {},
"image_count": -1,
"continue_train": False, # 是否加载前一次的训练参数,接着训练
"pretrained": False,
"pretrained_model_dir": "./pretrained-model",
"save_model_dir": "./yolo-model",
"model_prefix": "yolo-v3",
"use_tiny": True, # 是否使用 裁剪 tiny 模型
"max_box_num": 20, # 一幅图上最多有多少个目标
"num_epochs": 120,
"train_batch_size": 5, # 对于完整 yolov3,每一批的训练样本不能太多,内存会炸掉
"use_gpu": True,
"yolo_cfg": {
"input_size": [3, 608, 608],
"anchors": [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326],
"anchor_mask": [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
},
"yolo_tiny_cfg": {
"input_size": [3, 256, 256],
"anchors": [6, 8, 13, 15, 22, 34, 48, 50, 81, 100, 205, 191],
"anchor_mask": [[3, 4, 5], [0, 1, 2]]
},
"ignore_thresh": 0.7,
"mean_rgb": [127.5, 127.5, 127.5],
"mode": "train",
"multi_data_reader_count": 4,
"apply_distort": True,
"valid_thresh": 0.01,
"nms_thresh": 0.45,
"image_distort_strategy": {
"expand_prob": 0.5,
"expand_max_ratio": 4,
"hue_prob": 0.5,
"hue_delta": 18,
"contrast_prob": 0.5,
"contrast_delta": 0.5,
"saturation_prob": 0.5,
"saturation_delta": 0.5,
"brightness_prob": 0.5,
"brightness_delta": 0.125
},
"rsm_strategy": {
"learning_rate": 0.001,
"lr_epochs": [20, 40, 60, 80, 100],
"lr_decay": [1, 0.5, 0.25, 0.1, 0.05, 0.01],
},
"momentum_strategy": {
"learning_rate": 0.1,
"decay_steps": 2 ** 7,
"decay_rate": 0.8
},
"early_stop": {
"sample_frequency": 50,
"successive_limit": 3,
"min_loss": 2.5,
"min_curr_map": 0.84
}
}
#定义两个类,分别代表 Yolo-v3 和 Yolo-v3-tiny 两个模型。跟随其后的是模型选择函数,根据配置使用不同的模型
class YOLOv3(object):
def __init__(self, class_num, anchors, anchor_mask):
self.outputs = []
self.downsample_ratio = 1
self.anchor_mask = anchor_mask
self.anchors = anchors
self.class_num = class_num

self.yolo_anchors = []
self.yolo_classes = []
for mask_pair in self.anchor_mask:
mask_anchors = []
for mask in mask_pair:
mask_anchors.append(self.anchors[2 * mask])
mask_anchors.append(self.anchors[2 * mask + 1])
self.yolo_anchors.append(mask_anchors)
self.yolo_classes.append(class_num)

def name(self):
return 'YOLOv3'

def get_anchors(self):
return self.anchors

def get_anchor_mask(self):
return self.anchor_mask

def get_class_num(self):
return self.class_num

def get_downsample_ratio(self):
return self.downsample_ratio

def get_yolo_anchors(self):
return self.yolo_anchors

def get_yolo_classes(self):
return self.yolo_classes

def conv_bn(self,
input,
num_filters,
filter_size,
stride,
padding,
use_cudnn=True):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
act=None,
use_cudnn=use_cudnn,
param_attr=ParamAttr(initializer=fluid.initializer.Normal(0., 0.02)),
bias_attr=False)

# batch_norm中的参数不需要参与正则化,所以主动使用正则系数为0的正则项屏蔽掉
# 在batch_norm中使用 leaky 的话,只能使用默认的 alpha=0.02;如果需要设值,必须提出去单独来
out = fluid.layers.batch_norm(
input=conv, act=None,
param_attr=ParamAttr(initializer=fluid.initializer.Normal(0., 0.02), regularizer=L2Decay(0.)),
bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), regularizer=L2Decay(0.)))
out = fluid.layers.leaky_relu(out, 0.1)
return out

def downsample(self, input, num_filters, filter_size=3, stride=2, padding=1):
self.downsample_ratio *= 2
return self.conv_bn(input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding)

def basicblock(self, input, num_filters):
conv1 = self.conv_bn(input, num_filters, filter_size=1, stride=1, padding=0)
conv2 = self.conv_bn(conv1, num_filters * 2, filter_size=3, stride=1, padding=1)
out = fluid.layers.elementwise_add(x=input, y=conv2, act=None)
return out

def layer_warp(self, input, num_filters, count):
res_out = self.basicblock(input, num_filters)
for j in range(1, count):
res_out = self.basicblock(res_out, num_filters)
return res_out

def upsample(self, input, scale=2):
# get dynamic upsample output shape
shape_nchw = fluid.layers.shape(input)
shape_hw = fluid.layers.slice(shape_nchw, axes=[0], starts=[2], ends=[4])
shape_hw.stop_gradient = True
in_shape = fluid.layers.cast(shape_hw, dtype='int32')
out_shape = in_shape * scale
out_shape.stop_gradient = True

# reisze by actual_shape
out = fluid.layers.resize_nearest(
input=input,
scale=scale,
actual_shape=out_shape)
return out

def yolo_detection_block(self, input, num_filters):
assert num_filters % 2 == 0, "num_filters {} cannot be divided by 2".format(num_filters)
conv = input
for j in range(2):
conv = self.conv_bn(conv, num_filters, filter_size=1, stride=1, padding=0)
conv = self.conv_bn(conv, num_filters * 2, filter_size=3, stride=1, padding=1)
route = self.conv_bn(conv, num_filters, filter_size=1, stride=1, padding=0)
tip = self.conv_bn(route, num_filters * 2, filter_size=3, stride=1, padding=1)
return route, tip

def net(self, img):
# darknet
stages = [1, 2, 8, 8, 4]
assert len(self.anchor_mask) <= len(stages), "anchor masks can't bigger than downsample times"
# 256x256
conv1 = self.conv_bn(img, num_filters=32, filter_size=3, stride=1, padding=1)
downsample_ = self.downsample(conv1, conv1.shape[1] * 2)
blocks = []

for i, stage_count in enumerate(stages):
block = self.layer_warp(downsample_, 32 * (2 ** i), stage_count)
blocks.append(block)
if i < len(stages) - 1:
downsample_ = self.downsample(block, block.shape[1] * 2)
blocks = blocks[-1:-4:-1] # 取倒数三层,并且逆序,后面跨层级联需要

# yolo detector
for i, block in enumerate(blocks):
# yolo 中跨视域链接
if i > 0:
block = fluid.layers.concat(input=[route, block], axis=1)
route, tip = self.yolo_detection_block(block, num_filters=512 // (2 ** i))
block_out = fluid.layers.conv2d(
input=tip,
num_filters=len(self.anchor_mask[i]) * (self.class_num + 5), # 5 elements represent x|y|h|w|score
filter_size=1,
stride=1,
padding=0,
act=None,
param_attr=ParamAttr(initializer=fluid.initializer.Normal(0., 0.02)),
bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), regularizer=L2Decay(0.)))
self.outputs.append(block_out)
# 为了跨视域链接,差值方式提升特征图尺寸
if i < len(blocks) - 1:
route = self.conv_bn(route, 256 // (2 ** i), filter_size=1, stride=1, padding=0)
route = self.upsample(route)

return self.outputs


class YOLOv3Tiny(object):
def __init__(self, class_num, anchors, anchor_mask):
self.outputs = []
self.downsample_ratio = 1
self.anchor_mask = anchor_mask
self.anchors = anchors
self.class_num = class_num

self.yolo_anchors = []
self.yolo_classes = []
for mask_pair in self.anchor_mask:
mask_anchors = []
for mask in mask_pair:
mask_anchors.append(self.anchors[2 * mask])
mask_anchors.append(self.anchors[2 * mask + 1])
self.yolo_anchors.append(mask_anchors)
self.yolo_classes.append(class_num)

def name(self):
return 'YOLOv3-tiny'

def get_anchors(self):
return self.anchors

def get_anchor_mask(self):
return self.anchor_mask

def get_class_num(self):
return self.class_num

def get_downsample_ratio(self):
return self.downsample_ratio

def get_yolo_anchors(self):
return self.yolo_anchors

def get_yolo_classes(self):
return self.yolo_classes

def conv_bn(self,
input,
num_filters,
filter_size,
stride,
padding,
num_groups=1,
use_cudnn=True):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
act=None,
groups=num_groups,
use_cudnn=use_cudnn,
param_attr=ParamAttr(initializer=fluid.initializer.Normal(0., 0.02)),
bias_attr=False)

# batch_norm中的参数不需要参与正则化,所以主动使用正则系数为0的正则项屏蔽掉
out = fluid.layers.batch_norm(
input=conv, act='relu',
param_attr=ParamAttr(initializer=fluid.initializer.Normal(0., 0.02), regularizer=L2Decay(0.)),
bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), regularizer=L2Decay(0.)))

return out

def depthwise_conv_bn(self, input, filter_size=3, stride=1, padding=1):
num_filters = input.shape[1]
return self.conv_bn(input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
num_groups=num_filters)

def downsample(self, input, pool_size=2, pool_stride=2):
self.downsample_ratio *= 2
return fluid.layers.pool2d(input=input, pool_type='max', pool_size=pool_size,
pool_stride=pool_stride)

def basicblock(self, input, num_filters):
conv1 = self.conv_bn(input, num_filters, filter_size=3, stride=1, padding=1)
out = self.downsample(conv1)
return out

def upsample(self, input, scale=2):
# get dynamic upsample output shape
shape_nchw = fluid.layers.shape(input)
shape_hw = fluid.layers.slice(shape_nchw, axes=[0], starts=[2], ends=[4])
shape_hw.stop_gradient = True
in_shape = fluid.layers.cast(shape_hw, dtype='int32')
out_shape = in_shape * scale
out_shape.stop_gradient = True

# reisze by actual_shape
out = fluid.layers.resize_nearest(
input=input,
scale=scale,
actual_shape=out_shape)
return out

def yolo_detection_block(self, input, num_filters):
route = self.conv_bn(input, num_filters, filter_size=1, stride=1, padding=0)
tip = self.conv_bn(route, num_filters * 2, filter_size=3, stride=1, padding=1)
return route, tip

def net(self, img):
# darknet-tiny
stages = [16, 32, 64, 128, 256, 512]
assert len(self.anchor_mask) <= len(stages), "anchor masks can't bigger than downsample times"
# 256x256
tmp = img
blocks = []
for i, stage_count in enumerate(stages):
if i == len(stages) - 1:
block = self.conv_bn(tmp, stage_count, filter_size=3, stride=1, padding=1)
blocks.append(block)
block = self.depthwise_conv_bn(blocks[-1])
block = self.depthwise_conv_bn(blocks[-1])
block = self.conv_bn(blocks[-1], stage_count * 2, filter_size=1, stride=1, padding=0)
blocks.append(block)
else:
tmp = self.basicblock(tmp, stage_count)
blocks.append(tmp)

blocks = [blocks[-1], blocks[3]]

# yolo detector
for i, block in enumerate(blocks):
# yolo 中跨视域链接
if i > 0:
block = fluid.layers.concat(input=[route, block], axis=1)
if i < 1:
route, tip = self.yolo_detection_block(block, num_filters=256 // (2 ** i))
else:
tip = self.conv_bn(block, num_filters=256, filter_size=3, stride=1, padding=1)
block_out = fluid.layers.conv2d(
input=tip,
num_filters=len(self.anchor_mask[i]) * (self.class_num + 5), # 5 elements represent x|y|h|w|score
filter_size=1,
stride=1,
padding=0,
act=None,
param_attr=ParamAttr(initializer=fluid.initializer.Normal(0., 0.02)),
bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), regularizer=L2Decay(0.)))
self.outputs.append(block_out)
# 为了跨视域链接,差值方式提升特征图尺寸
if i < len(blocks) - 1:
route = self.conv_bn(route, 128 // (2 ** i), filter_size=1, stride=1, padding=0)
route = self.upsample(route)

return self.outputs


def get_yolo(is_tiny, class_num, anchors, anchor_mask):
if is_tiny:
return YOLOv3Tiny(class_num, anchors, anchor_mask)
else:
return YOLOv3(class_num, anchors, anchor_mask)
#初始化参数,初始化日志的便利函数
def init_train_parameters():
"""
初始化训练参数,主要是初始化图片数量,类别数
:return:
"""
file_list = os.path.join(train_parameters['data_dir'], train_parameters['file_list'])
label_list = os.path.join(train_parameters['data_dir'], "label_list")
index = 0
with codecs.open(label_list, encoding='utf-8') as flist:
lines = [line.strip() for line in flist]
for line in lines:
train_parameters['label_dict'][line.strip()] = index
index += 1
train_parameters['class_dim'] = index
with codecs.open(file_list, encoding='utf-8') as flist:
lines = [line.strip() for line in flist]
train_parameters['image_count'] = len(lines)


def init_log_config():
"""
初始化日志相关配置
:return:
"""
global logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_path = os.path.join(os.getcwd(), 'logs')
if not os.path.exists(log_path):
os.makedirs(log_path)
log_name = os.path.join(log_path, 'train.log')
sh = logging.StreamHandler()
fh = logging.FileHandler(log_name, mode='w')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.addHandler(fh)
#图像增强处理的系列函数
def box_to_center_relative(box, img_height, img_width):
"""
Convert COCO annotations box with format [x1, y1, w, h] to
center mode [center_x, center_y, w, h] and divide image width
and height to get relative value in range[0, 1]
"""
assert len(box) == 4, "box should be a len(4) list or tuple"
x, y, w, h = box

x1 = max(x, 0)
x2 = min(x + w - 1, img_width - 1)
y1 = max(y, 0)
y2 = min(y + h - 1, img_height - 1)

x = (x1 + x2) / 2 / img_width
y = (y1 + y2) / 2 / img_height
w = (x2 - x1) / img_width
h = (y2 - y1) / img_height

return np.array([x, y, w, h])


def resize_img(img, sampled_labels, input_size):
target_size = input_size
img = img.resize((target_size[1], target_size[2]), Image.BILINEAR)
return img


def box_iou_xywh(box1, box2):
assert box1.shape[-1] == 4, "Box1 shape[-1] should be 4."
assert box2.shape[-1] == 4, "Box2 shape[-1] should be 4."

b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2

inter_x1 = np.maximum(b1_x1, b2_x1)
inter_x2 = np.minimum(b1_x2, b2_x2)
inter_y1 = np.maximum(b1_y1, b2_y1)
inter_y2 = np.minimum(b1_y2, b2_y2)
inter_w = inter_x2 - inter_x1 + 1
inter_h = inter_y2 - inter_y1 + 1
inter_w[inter_w < 0] = 0
inter_h[inter_h < 0] = 0

inter_area = inter_w * inter_h
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)

return inter_area / (b1_area + b2_area - inter_area)


def box_crop(boxes, labels, crop, img_shape):
x, y, w, h = map(float, crop)
im_w, im_h = map(float, img_shape)

boxes = boxes.copy()
boxes[:, 0], boxes[:, 2] = (boxes[:, 0] - boxes[:, 2] / 2) * im_w, (boxes[:, 0] + boxes[:, 2] / 2) * im_w
boxes[:, 1], boxes[:, 3] = (boxes[:, 1] - boxes[:, 3] / 2) * im_h, (boxes[:, 1] + boxes[:, 3] / 2) * im_h

crop_box = np.array([x, y, x + w, y + h])
centers = (boxes[:, :2] + boxes[:, 2:]) / 2.0
mask = np.logical_and(crop_box[:2] <= centers, centers <= crop_box[2:]).all(axis=1)

boxes[:, :2] = np.maximum(boxes[:, :2], crop_box[:2])
boxes[:, 2:] = np.minimum(boxes[:, 2:], crop_box[2:])
boxes[:, :2] -= crop_box[:2]
boxes[:, 2:] -= crop_box[:2]

mask = np.logical_and(mask, (boxes[:, :2] < boxes[:, 2:]).all(axis=1))
boxes = boxes * np.expand_dims(mask.astype('float32'), axis=1)
labels = labels * mask.astype('float32')
boxes[:, 0], boxes[:, 2] = (boxes[:, 0] + boxes[:, 2]) / 2 / w, (boxes[:, 2] - boxes[:, 0]) / w
boxes[:, 1], boxes[:, 3] = (boxes[:, 1] + boxes[:, 3]) / 2 / h, (boxes[:, 3] - boxes[:, 1]) / h

return boxes, labels, mask.sum()


def random_brightness(img):
prob = np.random.uniform(0, 1)
if prob < train_parameters['image_distort_strategy']['brightness_prob']:
brightness_delta = train_parameters['image_distort_strategy']['brightness_delta']
delta = np.random.uniform(-brightness_delta, brightness_delta) + 1
img = ImageEnhance.Brightness(img).enhance(delta)
return img


def random_contrast(img):
prob = np.random.uniform(0, 1)
if prob < train_parameters['image_distort_strategy']['contrast_prob']:
contrast_delta = train_parameters['image_distort_strategy']['contrast_delta']
delta = np.random.uniform(-contrast_delta, contrast_delta) + 1
img = ImageEnhance.Contrast(img).enhance(delta)
return img


def random_saturation(img):
prob = np.random.uniform(0, 1)
if prob < train_parameters['image_distort_strategy']['saturation_prob']:
saturation_delta = train_parameters['image_distort_strategy']['saturation_delta']
delta = np.random.uniform(-saturation_delta, saturation_delta) + 1
img = ImageEnhance.Color(img).enhance(delta)
return img


def random_hue(img):
prob = np.random.uniform(0, 1)
if prob < train_parameters['image_distort_strategy']['hue_prob']:
hue_delta = train_parameters['image_distort_strategy']['hue_delta']
delta = np.random.uniform(-hue_delta, hue_delta)
img_hsv = np.array(img.convert('HSV'))
img_hsv[:, :, 0] = img_hsv[:, :, 0] + delta
img = Image.fromarray(img_hsv, mode='HSV').convert('RGB')
return img


def distort_image(img):
prob = np.random.uniform(0, 1)
# Apply different distort order
if prob > 0.5:
img = random_brightness(img)
img = random_contrast(img)
img = random_saturation(img)
img = random_hue(img)
else:
img = random_brightness(img)
img = random_saturation(img)
img = random_hue(img)
img = random_contrast(img)
return img


def random_crop(img, boxes, labels, scales=[0.3, 1.0], max_ratio=2.0, constraints=None, max_trial=50):
if random.random() > 0.6:
return img, boxes, labels
if len(boxes) == 0:
return img, boxes, labels

if not constraints:
constraints = [
(0.1, 1.0),
(0.3, 1.0),
(0.5, 1.0),
(0.7, 1.0),
(0.9, 1.0),
(0.0, 1.0)]

w, h = img.size
crops = [(0, 0, w, h)]
for min_iou, max_iou in constraints:
for _ in range(max_trial):
scale = random.uniform(scales[0], scales[1])
aspect_ratio = random.uniform(max(1 / max_ratio, scale * scale),
min(max_ratio, 1 / scale / scale))
crop_h = int(h * scale / np.sqrt(aspect_ratio))
crop_w = int(w * scale * np.sqrt(aspect_ratio))
crop_x = random.randrange(w - crop_w)
crop_y = random.randrange(h - crop_h)
crop_box = np.array([[
(crop_x + crop_w / 2.0) / w,
(crop_y + crop_h / 2.0) / h,
crop_w / float(w),
crop_h /float(h)
]])

iou = box_iou_xywh(crop_box, boxes)
if min_iou <= iou.min() and max_iou >= iou.max():
crops.append((crop_x, crop_y, crop_w, crop_h))
break

while crops:
crop = crops.pop(np.random.randint(0, len(crops)))
crop_boxes, crop_labels, box_num = box_crop(boxes, labels, crop, (w, h))
if box_num < 1:
continue
img = img.crop((crop[0], crop[1], crop[0] + crop[2],
crop[1] + crop[3])).resize(img.size, Image.LANCZOS)
return img, crop_boxes, crop_labels
return img, boxes, labels


def random_expand(img, gtboxes, keep_ratio=True):
if np.random.uniform(0, 1) < train_parameters['image_distort_strategy']['expand_prob']:
return img, gtboxes

max_ratio = train_parameters['image_distort_strategy']['expand_max_ratio']
w, h = img.size
c = 3
ratio_x = random.uniform(1, max_ratio)
if keep_ratio:
ratio_y = ratio_x
else:
ratio_y = random.uniform(1, max_ratio)
oh = int(h * ratio_y)
ow = int(w * ratio_x)
off_x = random.randint(0, ow -w)
off_y = random.randint(0, oh -h)

out_img = np.zeros((oh, ow, c), np.uint8)
for i in range(c):
out_img[:, :, i] = train_parameters['mean_rgb'][i]

out_img[off_y: off_y + h, off_x: off_x + w, :] = img
gtboxes[:, 0] = ((gtboxes[:, 0] * w) + off_x) / float(ow)
gtboxes[:, 1] = ((gtboxes[:, 1] * h) + off_y) / float(oh)
gtboxes[:, 2] = gtboxes[:, 2] / ratio_x
gtboxes[:, 3] = gtboxes[:, 3] / ratio_y

return Image.fromarray(out_img), gtboxes


def preprocess(img, bbox_labels, input_size, mode):
img_width, img_height = img.size
sample_labels = np.array(bbox_labels)
if mode == 'train':
if train_parameters['apply_distort']:
img = distort_image(img)
img, gtboxes = random_expand(img, sample_labels[:, 1:5])
img, gtboxes, gtlabels = random_crop(img, gtboxes, sample_labels[:, 0])
sample_labels[:, 0] = gtlabels
sample_labels[:, 1:5] = gtboxes
img = resize_img(img, sample_labels, input_size)
img = np.array(img).astype('float32')
img -= train_parameters['mean_rgb']
img = img.transpose((2, 0, 1)) # HWC to CHW
img *= 0.007843
return img, sample_labels
#自定义数据读取器,如果需要自定义数据,需要修改下面这段函数,以适配自定义数据的格式
def custom_reader(file_list, data_dir, input_size, mode):
def reader():
np.random.shuffle(file_list)
for line in file_list:
if mode == 'train' or mode == 'eval':
###################### 以下可能是需要自定义修改的部分 ############################
image_path, label_path = line.split()
image_path = os.path.join(data_dir, image_path)
label_path = os.path.join(data_dir, label_path)
img = Image.open(image_path)
if img.mode != 'RGB':
img = img.convert('RGB')
im_width, im_height = img.size
# layout: label | xmin | ymin | xmax | ymax | difficult
bbox_labels = []
root = xml.etree.ElementTree.parse(label_path).getroot()
for object in root.findall('object'):
bbox_sample = []
# start from 1
bbox_sample.append(float(train_parameters['label_dict'][object.find('name').text]))
bbox = object.find('bndbox')
difficult = float(object.find('difficult').text)
bbox_sample.append(float(bbox.find('xmin').text) / im_width)
bbox_sample.append(float(bbox.find('ymin').text) / im_height)
bbox_sample.append(float(bbox.find('xmax').text) / im_width)
bbox_sample.append(float(bbox.find('ymax').text) / im_height)
bbox_sample.append(difficult)
bbox_labels.append(bbox_sample)
###################### 可能需要自定义修改部分结束 ############################
if len(bbox_labels) == 0: continue
img, sample_labels = preprocess(img, bbox_labels, input_size, mode)
# sample_labels = np.array(sample_labels)
if len(sample_labels) == 0: continue
boxes = sample_labels[:, 1:5]
lbls = sample_labels[:, 0].astype('int32')
difficults = sample_labels[:, -1].astype('int32')
max_box_num = train_parameters['max_box_num']
cope_size = max_box_num if len(boxes) >= max_box_num else len(boxes)
ret_boxes = np.zeros((max_box_num, 4), dtype=np.float32)
ret_lbls = np.zeros((max_box_num), dtype=np.int32)
ret_difficults= np.zeros((max_box_num), dtype=np.int32)
ret_boxes[0: cope_size] = boxes[0: cope_size]
ret_lbls[0: cope_size] = lbls[0: cope_size]
ret_difficults[0: cope_size] = difficults[0: cope_size]

yield img, ret_boxes, ret_lbls, ret_difficults
elif mode == 'test':
img_path = os.path.join(line)
yield Image.open(img_path)

return reader
#定义异步数据读取

#定义优化器

#构建 program 和损失函数
def multi_process_custom_reader(file_path, data_dir, num_workers, input_size, mode):
file_path = os.path.join(data_dir, file_path)
readers = []
images = [line.strip() for line in open(file_path)]
n = int(math.ceil(len(images) // num_workers))
image_lists = [images[i: i + n] for i in range(0, len(images), n)]
for l in image_lists:
readers.append(paddle.batch(custom_reader(l, data_dir, input_size, mode),
batch_size=train_parameters['train_batch_size']))
return paddle.reader.multiprocess_reader(readers, False)


def create_eval_reader(file_path, data_dir, input_size, mode):
file_path = os.path.join(data_dir, file_path)
images = [line.strip() for line in open(file_path)]
return paddle.batch(custom_reader(images, data_dir, input_size, mode),
batch_size=train_parameters['train_batch_size'],
drop_last=True)


def optimizer_momentum_setting():
learning_strategy = train_parameters['momentum_strategy']
learning_rate = fluid.layers.exponential_decay(learning_rate=learning_strategy['learning_rate'],
decay_steps=learning_strategy['decay_steps'],
decay_rate=learning_strategy['decay_rate'])
optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=learning_rate, momentum=0.1)
return optimizer


def optimizer_rms_setting():
batch_size = train_parameters["train_batch_size"]
iters = train_parameters["image_count"] // batch_size
learning_strategy = train_parameters['rsm_strategy']
lr = learning_strategy['learning_rate']

boundaries = [i * iters for i in learning_strategy["lr_epochs"]]
values = [i * lr for i in learning_strategy["lr_decay"]]

optimizer = fluid.optimizer.RMSProp(
learning_rate=fluid.layers.piecewise_decay(boundaries, values),
regularization=fluid.regularizer.L2Decay(0.00005))

return optimizer


def build_train_program_with_async_reader(main_prog, startup_prog):
max_box_num = train_parameters['max_box_num']
ues_tiny = train_parameters['use_tiny']
yolo_config = train_parameters['yolo_tiny_cfg'] if ues_tiny else train_parameters['yolo_cfg']
with fluid.program_guard(main_prog, startup_prog):
img = fluid.layers.data(name='img', shape=yolo_config['input_size'], dtype='float32')
gt_box = fluid.layers.data(name='gt_box', shape=[max_box_num, 4], dtype='float32', lod_level=0)
gt_label = fluid.layers.data(name='gt_label', shape=[max_box_num], dtype='int32', lod_level=0)
difficult = fluid.layers.data(name='difficult', shape=[max_box_num], dtype='int32', lod_level=0)
data_reader = fluid.layers.create_py_reader_by_data(capacity=train_parameters['train_batch_size'],
feed_list=[img, gt_box, gt_label, difficult],
name='train')
multi_reader = multi_process_custom_reader(train_parameters['file_list'],
train_parameters['data_dir'],
train_parameters['multi_data_reader_count'],
yolo_config['input_size'],
'train')
data_reader.decorate_paddle_reader(multi_reader)
with fluid.unique_name.guard():
img, gt_box, gt_label, difficult = fluid.layers.read_file(data_reader)
model = get_yolo(ues_tiny, train_parameters['class_dim'], yolo_config['anchors'], yolo_config['anchor_mask'])
outputs = model.net(img)
losses = []
downsample_ratio = model.get_downsample_ratio()
with fluid.unique_name.guard('train'):
for i, out in enumerate(outputs):
logger.info("{0} downsample_ratio: {1} output:{2}".format(i, downsample_ratio, out))
loss = fluid.layers.yolov3_loss(
x=out,
gt_box=gt_box,
gt_label=gt_label,
anchors=model.get_anchors(),
anchor_mask=model.get_anchor_mask()[i],
class_num=model.get_class_num(),
ignore_thresh=train_parameters['ignore_thresh'],
downsample_ratio=downsample_ratio)
losses.append(fluid.layers.reduce_mean(loss))
downsample_ratio //= 2
loss = sum(losses)
optimizer = optimizer_rms_setting()
optimizer.minimize(loss)
return data_reader, loss


def build_eval_program_with_feeder(main_prog, startup_prog, place):
ues_tiny = train_parameters['use_tiny']
yolo_config = train_parameters['yolo_tiny_cfg'] if ues_tiny else train_parameters['yolo_cfg']
with fluid.program_guard(main_prog, startup_prog):
img = fluid.layers.data(name='img', shape=yolo_config['input_size'], dtype='float32')
gt_box = fluid.layers.data(name='gt_box', shape=[4], dtype='float32', lod_level=1)
gt_label = fluid.layers.data(name='gt_label', shape=[1], dtype='int32', lod_level=1)
difficult = fluid.layers.data(name='difficult', shape=[1], dtype='int32', lod_level=1)
feeder = fluid.DataFeeder(feed_list=[img, gt_box, gt_label, difficult], place=place, program=main_prog)
reader = create_eval_reader(train_parameters['file_list'], train_parameters['data_dir'],
yolo_config['input_size'], 'eval')
with fluid.unique_name.guard():
model = get_yolo(ues_tiny, train_parameters['class_dim'], yolo_config['anchors'], yolo_config['anchor_mask'])
outputs = model.net(img)
return feeder, reader, outputs, gt_box, gt_label, difficult
#加载已经有的参数
def load_pretrained_params(exe, program):
if train_parameters['continue_train'] and os.path.exists(train_parameters['save_model_dir']):
logger.info('load param from retrain model')
fluid.io.load_persistables(executor=exe,
dirname=train_parameters['save_model_dir'],
main_program=program)
elif train_parameters['pretrained'] and os.path.exists(train_parameters['pretrained_model_dir']):
logger.info('load param from pretrained model')
def if_exist(var):
return os.path.exists(os.path.join(train_parameters['pretrained_model_dir'], var.name))

fluid.io.load_vars(exe, train_parameters['pretrained_model_dir'], main_program=program,
predicate=if_exist)
#训练主体
def train():
init_log_config()
init_train_parameters()
logger.info("start train YOLOv3, train params:%s", str(train_parameters))

logger.info("create place, use gpu:" + str(train_parameters['use_gpu']))
place = fluid.CUDAPlace(0) if train_parameters['use_gpu'] else fluid.CPUPlace()

logger.info("build network and program")
train_program = fluid.Program()
start_program = fluid.Program()
eval_program = fluid.Program()
start_program = fluid.Program()
train_reader, loss = build_train_program_with_async_reader(train_program, start_program)
eval_feeder, eval_reader, outputs, gt_box, gt_label, difficult = build_eval_program_with_feeder(eval_program, start_program, place)
eval_program = eval_program.clone(for_test=True)

logger.info("build executor and init params")
exe = fluid.Executor(place)
exe.run(start_program)
train_fetch_list = [loss.name]
eval_fetch_list = [v.name for v in outputs]
load_pretrained_params(exe, train_program)


stop_strategy = train_parameters['early_stop']
successive_limit = stop_strategy['successive_limit']
sample_freq = stop_strategy['sample_frequency']
min_curr_map = stop_strategy['min_curr_map']
min_loss = stop_strategy['min_loss']
stop_train = False
successive_count = 0
total_batch_count = 0
valid_thresh = train_parameters['valid_thresh']
nms_thresh = train_parameters['nms_thresh']
for pass_id in range(train_parameters["num_epochs"]):
logger.info("current pass: %d, start read image", pass_id)
batch_id = 0
train_reader.start()
try:
while True:
t1 = time.time()
loss = exe.run(train_program, fetch_list=train_fetch_list)
period = time.time() - t1
loss = np.mean(np.array(loss))
batch_id += 1
total_batch_count += 1

if batch_id % 10 == 0:
logger.info(
"Pass {0}, trainbatch {1}, loss {2} time {3}".format(pass_id, batch_id, loss, "%2.2f sec" % period))
# 采用简单的定时采样停止办法,可以调整为更精细的保存策略
if total_batch_count % 100 == 0:
logger.info("temp save {0} batch train result".format(total_batch_count))
fluid.io.save_persistables(dirname=train_parameters['save_model_dir'],
main_program=train_program,
executor=exe)
except fluid.core.EOFException:
train_reader.reset()

logger.info("training till last epcho, end training")
fluid.io.save_persistables(dirname=train_parameters['save_model_dir'], main_program=train_program, executor=exe)
# 固化模型


def freeze_model():
init_train_parameters()
print(train_parameters['class_dim'])
path = "./yolo-model"
exe = fluid.Executor(fluid.CPUPlace())

ues_tiny = train_parameters['use_tiny']
yolo_config = train_parameters['yolo_tiny_cfg'] if ues_tiny else train_parameters['yolo_cfg']
model = get_yolo(ues_tiny,train_parameters['class_dim'], yolo_config['anchors'], yolo_config['anchor_mask'])
image = fluid.layers.data(name='image', shape=yolo_config['input_size'], dtype='float32')
pred = model.net(image)

freeze_program = fluid.default_main_program()
fluid.io.load_persistables(exe, path, freeze_program)
freeze_program = freeze_program.clone(for_test=True)

fluid.io.save_inference_model("./freeze_model", ['image'], pred, exe, freeze_program)


if __name__ == '__main__':
train()
#freeze_model()

本代码分为两部分 训练部分和固化部分 分别调用train()或freeze_model()这两个函数即可

 

4
收藏
回复
全部评论(10)
时间顺序
peng4554
#2 回复于2020-05

如果对大家有用请点个赞  谢谢

0
回复
AIStudio810258
#3 回复于2020-05

大哥,请用代码格式发代码~~

0
回复
AIStudio810258
#4 回复于2020-05

值得借鉴,感谢~~

0
回复
Gao
#5 回复于2020-05

非常不错

0
回复
peng4554
#6 回复于2020-05

好的  好的  

0
回复
peng4554
#7 回复于2020-05

很少写帖子   以后注意会一下

0
回复
k
kis幻龙
#8 回复于2020-12

这格式,搞毛啊

0
回复
AIStudio810260
#9 回复于2020-12

赞赞赞

0
回复
AIStudio810260
#10 回复于2020-12

大佬能到AI Studio上建个项目嘛?这边不支持代码高亮和格式……

https://aistudio.baidu.com/aistudio/projectoverview/public

0
回复
金龙鱼
#11 回复于2020-12

确实,这格式……

0
回复
需求/bug反馈?一键提issue告诉我们
发现bug?如果您知道修复办法,欢迎提pr直接参与建设飞桨~
在@后输入用户全名并按空格结束,可艾特全站任一用户