貌似global_block中有一个名为“learning_rate”的LoDTensor,
可以试试这个方法,看能不能获取
lr_tensor = fluid.global_scope().find_var('learning_rate').get_tensor() out = exe.run( ..., fetch_list=[..., lr_tensor], ...)
boundaries =[160,180]
values = [0.001,0.0001,0.0001]
opt = fluid.optimizer.Momentum(learning_rate=fluid.layers.piecewise_decay(boundaries, values), momentum=0.9)
for epoch in range(MAX_EPOCH):
for i, data in enumerate(train_loader()):
img, gt_boxes, gt_labels, img_scale = data
gt_scores = np.ones(gt_labels.shape).astype('float32')
gt_scores = to_variable(gt_scores)
img = to_variable(img)
gt_boxes = to_variable(gt_boxes)
gt_labels = to_variable(gt_labels)
outputs = model(img)
# print(11)
loss = model.get_loss(outputs, gt_boxes, gt_labels, gtscore=gt_scores,
anchors = ANCHORS,
anchor_masks = ANCHOR_MASKS,
ignore_thresh=IGNORE_THRESH,
use_label_smooth=False)
loss.backward()
opt.minimize(loss)
model.clear_gradients()
# if i % 20 == 0: # timestring = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time())) # print('{}[TRAIN]epoch {}, iter {}, output loss: {}'.format(timestring, epoch, i, loss.numpy())) epoch_loss = epoch_loss + loss.numpy() lr = np.array(fluid.global_scope().find_var('learning_rate').get_tensor()) # 这里直接报错,变量是空的
您这个使用的是动态图模式, 提供的方法也是可以的,但是是在静态图模式下使用的,不适用于您的场景
您可以尝试用以下方法打印:
lr = opt.current_step_lr() print(lr)
我这个是想在训练中打印每步的实时学习率,图像是静态图,按您说的还是报错
“ lr = opt.current_step_lr()
AttributeError: 'MomentumOptimizer' object has no attribute 'current_step_lr'”
分段衰减学习率,如何打印出训练过程中的实时学习率?
boundaries =[160,180]
values = [0.001,0.0001,0.0001]
opt = fluid.optimizer.Momentum(learning_rate=fluid.layers.piecewise_decay(boundaries, values), momentum=0.9)