首页 Paddle框架 帖子详情
添加注意力CBAM模块,优化器报错
收藏
快速回复
Paddle框架 问答深度学习 1217 1
添加注意力CBAM模块,优化器报错
收藏
快速回复
Paddle框架 问答深度学习 1217 1

在VGG卷积块中添加CBAM注意力模块,结果优化器报错,优化器代码如下:

def cbam(ipt,num_channels,reduction):
        #通道注意力
        avg_pool = fluid.layers.pool2d(
        input=ipt,
        pool_size=1,
        pool_type='avg',
        )
        fc_1 = fluid.layers.conv2d(input=avg_pool,num_filters=num_channels//reduction, filter_size=1, stride=1, padding=0, act=None)
        avg = fluid.layers.relu(fc_1)
        avg = fluid.layers.conv2d(input=avg,num_filters=num_channels, filter_size=1, stride=1, padding=0, act=None)
        max_pool = fluid.layers.pool2d(
        input=ipt,
        pool_size=1,
        pool_type='max'
        )
        fc1_2 = fluid.layers.conv2d(input=max_pool,num_filters=num_channels//reduction, filter_size=1, stride=1, padding=0, act=None)
        mx = fluid.layers.relu(fc1_2)
        mx = fluid.layers.conv2d(input=mx,num_filters=num_channels, filter_size=1, stride=1, padding=0, act=None)
        out1 = avg+mx
        out1 = fluid.layers.sigmoid(out1)
        
       #空间注意力
        ipt2 = ipt*out1
        avg = fluid.layers.reduce_mean(ipt2,1,keep_dim=True)  
        mx = fluid.layers.reduce_max(ipt2,1,keep_dim=True)
        #print(avg.shape, mx.shape) 
        out2 = fluid.layers.concat([avg, mx], axis=1)
        #print(out2.shape)
        out2 = fluid.layers.conv2d(input=out2,num_filters=1, filter_size=7, stride=1, padding=3, act=None)
        out2 = fluid.layers.sigmoid(out2)
        #print(ipt2.shape, out2.shape) 
        out3 = ipt2*out2
        return out3

0
收藏
回复
全部评论(1)
时间顺序
我是黄茂云
#2 回复于2020-10

报错信息:375 #optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
376 #optimizer = fluid.optimizer.AdagradOptimizer(0.001)
--> 377 opts = optimizer.minimize(avg_cost)
378 DATADIR = '/home/aistudio/dataset/train'
379 test_path = 'dataset/val'
in minimize(self, loss, startup_program, parameter_list, no_grad_set)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in __impl__(func, *args, **kwargs)
201 def __impl__(func, *args, **kwargs):
202 with _switch_tracer_mode_guard_(is_train=False):
--> 203 return func(*args, **kwargs)
204
205 return __impl__(func)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in minimize(self, loss, startup_program, parameter_list, no_grad_set)
832 startup_program=startup_program,
833 parameter_list=parameter_list,
--> 834 no_grad_set=no_grad_set)
835
836 optimize_ops = self.apply_optimize(
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in backward(self, loss, startup_program, parameter_list, no_grad_set, callbacks)
675 with program_guard(program, startup_program):
676 params_grads = append_backward(loss, parameter_list,
--> 677 act_no_grad_set, callbacks)
678 # Note: since we can't use all_reduce_op now,
679 # dgc_op should be the last op of one grad.
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/backward.py in append_backward(loss, parameter_list, no_grad_set, callbacks, checkpoints)
1417
1418 program.current_block_idx = current_block_idx
-> 1419 program._sync_with_cpp()
1420
1421 if parameter_list is not None:
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/framework.py in _sync_with_cpp(self)
4555 self.blocks.append(Block(self, block_idx))
4556 for block in self.blocks:
-> 4557 block._sync_with_cpp()
4558
4559 def _copy_param_info_from(self, other):
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/framework.py in _sync_with_cpp(self)
2744 ops_in_python_index += 1
2745
-> 2746 assert len(self.ops) == len(ops_in_cpp)
2747 for index in range(len(self.ops)):
2748 assert self.ops[index].desc == ops_in_cpp[index]
AssertionError: 

0
回复
需求/bug反馈?一键提issue告诉我们
发现bug?如果您知道修复办法,欢迎提pr直接参与建设飞桨~
在@后输入用户全名并按空格结束,可艾特全站任一用户