车牌识别运行错误
收藏
快速回复
AI Studio平台使用 问答Notebook项目 830 6
车牌识别运行错误
收藏
快速回复
AI Studio平台使用 问答Notebook项目 830 6

-------------------------------------------------------------------------RuntimeError Traceback (most recent call last) in
1 with fluid.dygraph.guard():
----> 2 model=MyDNN() #模型实例化
3 model.train() #训练模式
4 opt=fluid.optimizer.SGDOptimizer(learning_rate=train_parameters['learning_strategy']['lr'], parameter_list=model.parameters())#优化器选用SGD随机梯度下降,学习率为0.001.
5 epochs_num=train_parameters['num_epochs'] #迭代次数
in __init__(self)
6 def __init__(self):
7 super(MyDNN,self).__init__()
----> 8 self.conv1=Conv2D(1,64,3,padding=1)
9 self.batch1=BatchNorm(64,act="relu")
10 self.conv2=Conv2D(64,128,3,padding=1)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/nn.py in __init__(self, num_channels, num_filters, filter_size, stride, padding, dilation, groups, param_attr, bias_attr, use_cudnn, act, dtype)
215 shape=filter_shape,
216 dtype=self._dtype,
--> 217 default_initializer=_get_default_param_initializer())
218
219 self.bias = self.create_parameter(
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py in create_parameter(self, shape, attr, dtype, is_bias, default_initializer)
260 temp_attr = None
261 return self._helper.create_parameter(temp_attr, shape, dtype, is_bias,
--> 262 default_initializer)
263
264 # TODO: Add more parameter list when we need them
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layer_helper_base.py in create_parameter(self, attr, shape, dtype, is_bias, default_initializer, stop_gradient, type)
345 type=type,
346 stop_gradient=stop_gradient,
--> 347 **attr._to_kwargs(with_initializer=True))
348 else:
349 self.startup_program.global_block().create_parameter(
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/framework.py in create_parameter(self, *args, **kwargs)
2568 pass
2569 else:
-> 2570 initializer(param, self)
2571 param.stop_gradient = False
2572 return param
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/initializer.py in __call__(self, var, block)
340 "use_mkldnn": False
341 },
--> 342 stop_gradient=True)
343
344 if var.dtype == VarDesc.VarType.FP16:
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/framework.py in _prepend_op(self, *args, **kwargs)
2668 kwargs.get("outputs", {}), attrs
2669 if attrs else {},
-> 2670 kwargs.get("stop_gradient", False))
2671 else:
2672 op_desc = self.desc._prepend_op()
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/tracer.py in trace_op(self, type, inputs, outputs, attrs, stop_gradient)
41 self.trace(type, inputs, outputs, attrs,
42 framework._current_expected_place(), self._train_mode and
---> 43 not stop_gradient)
44
45 def train_mode(self):
RuntimeError: parallel_for failed: unspecified launch failure

0
收藏
回复
全部评论(6)
时间顺序
m
mar836924042
#2 回复于2020-07

错误是显示self.conv1=Conv2D(1,64,3,padding=1)这句话有问题,你直接导入了了paddle.fluid.dygraph库?,最好把相关的代码部分给出来,只看错误很难给出具体修改建议

0
回复
夜神月之娃哈哈
#3 回复于2020-07

#定义DNN网络
class MyDNN(fluid.dygraph.Layer):
    '''
    DNN网络
    '''
    def __init__(self):
        super(MyDNN,self).__init__()
        self.conv1=Conv2D(1,64,3,padding=1)
        self.batch1=BatchNorm(64,act="relu")
        self.conv2=Conv2D(64,128,3,padding=1)
        self.batch2=BatchNorm(128,act="relu")
        
        self.pool1=Pool2D(3, 'max',pool_stride=2,pool_padding=1)#5x5

        self.conv3=Conv2D(128,256,3,padding=1)
        self.batch3=BatchNorm(256,act="relu")
        self.conv4=Conv2D(256,512,3,padding=1)
        self.batch4=BatchNorm(512,act="relu")

        self.pool2=Pool2D(3,"max",pool_stride=2)#3x3


        self.conv5=Conv2D(512,512,3,padding=1)
        self.batch5=BatchNorm(512,act="relu")
        self.conv6=Conv2D(512,1024,3,padding=1)
        self.batch6=BatchNorm(1024,act="relu")

        self.pool3=Pool2D(3,"max",pool_stride=2)#1x1

        self.conv7=Conv2D(1024,512,1)
        self.batch7=BatchNorm(512,act="relu")
        self.conv8=Conv2D(512,128,1)
        self.batch8=BatchNorm(128,act="relu")

        self.fc=Linear(128,1)

        print("come in ")
        
    def forward(self,input):        # forward 定义执行实际运行时网络的执行逻辑
        '''前向计算'''
        y=self.conv1(input)
        y=self.batch1(y)
        y=self.conv2(y)
        y=self.batch2(y)
        y=self.pool1(y)
        y=self.conv3(y)
        y=self.batch3(y)
        y=self.conv4(y)
        y=self.batch4(y)
        y=self.pool2(y)
        y=self.conv5(y)
        y=self.batch5(y)
        y=self.conv6(y)
        y=self.batch6(y)
        y=self.pool3(y)
        y=self.conv7(y)
        y=self.batch7(y)
        y=self.conv8(y)
        y=self.batch8(y)
        y=fluid.layers.reshape(y,shape=[-1,128])
        y=self.fc(y)
        return y

0
回复
夜神月之娃哈哈
#4 回复于2020-07
错误是显示self.conv1=Conv2D(1,64,3,padding=1)这句话有问题,你直接导入了了paddle.fluid.dygraph库?,最好把相关的代码部分给出来,只看错误很难给出具体修改建议
展开

我把代码贴出来了。我在本地试过,不报错。

0
回复
AIStudio810258
#5 回复于2020-08

用动态图时,所有对fluid的操作,包括模型声明都要写在 with fluid.dygraph.guard() 下

0
回复
AIStudio810258
#6 回复于2020-08

尤其是模型声明,写在外面也可以,但运行到第一个Conv2D或Linear就会报错

0
回复
lianzhang132
#7 回复于2020-08

不规范?

0
回复
在@后输入用户全名并按空格结束,可艾特全站任一用户