动态图转静态图报错
收藏
我在动态图转静态图保存时报错:ValueError: The feeded_var_names[2]: 'generated_tensor_31632' doesn't exist in pruned inference program. Please check whether 'generated_tensor_31632' is a valid feed_var name, or remove it from feeded_var_names if 'generated_tensor_31632' is not involved in the target_vars calculation.
请问大佬们这个该怎么解决?
这个是官方DeepFM网络结构的代码
#%%
import math
import paddle
class DeepFM(paddle.nn.Layer):
def __init__(self, args):
super(DeepFM, self).__init__()
self.args = args
# 初始化值
self.init_value_ = 0.1
# FM Layer
self.fm = FM(args)
# DNN Layer
self.dnn = DNN(args)
@paddle.jit.to_static
def forward(self, raw_feat_idx, raw_feat_value, label):
# reshape变量
feat_idx = paddle.reshape(raw_feat_idx,
[-1, 1]) # (None * num_field) * 1
feat_value = paddle.reshape(
raw_feat_value,
[-1, self.args.num_field, 1]) # None * num_field * 1
# 前向计算FM
y_first_order, y_second_order, feat_embeddings = self.fm(feat_idx,
feat_value)
# 前向计算DNN
y_dnn = self.dnn(feat_embeddings)
# 综合FM和DNN 计算整体的预测值
predict = paddle.nn.functional.sigmoid(y_first_order + y_second_order +
y_dnn)
return predict
class FM(paddle.nn.Layer):
# 在__init__()函数中创建了需要训练的参数。
def __init__(self, args):
super(FM, self).__init__()
self.args = args
self.init_value_ = 0.1
# 创建嵌入层的参数
# init_value表示参数初始化的值
self.embedding_w = paddle.nn.Embedding(
self.args.num_feat + 1,
1,
padding_idx=0,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.TruncatedNormal(
mean=0.0, std=self.init_value_),
regularizer=paddle.regularizer.L1Decay(self.args.reg)))
self.embedding = paddle.nn.Embedding(
self.args.num_feat + 1,
self.args.embedding_size,
padding_idx=0,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.TruncatedNormal(
mean=0.0,
std=self.init_value_ /
math.sqrt(float(self.args.embedding_size)))))
# Forward函数中描述了前向计算的逻辑。
@paddle.jit.to_static
def forward(self, feat_idx, feat_value):
# -------------------- first order term --------------------
# 一阶特征
first_weights_re = self.embedding_w(feat_idx)
first_weights = paddle.reshape(
first_weights_re,
shape=[-1, self.args.num_field, 1]) # None * num_field * 1
# w*x
y_first_order = paddle.sum(first_weights * feat_value, 1)
# -------------------- second order term --------------------
feat_embeddings_re = self.embedding(feat_idx)
feat_embeddings = paddle.reshape(
feat_embeddings_re,
shape=[-1, self.args.num_field, self.args.embedding_size
]) # None * num_field * embedding_size
feat_embeddings = feat_embeddings * feat_value # None * num_field * embedding_size
# sum_square part
# 和的平方
summed_features_emb = paddle.sum(feat_embeddings,
1) # None * embedding_size
summed_features_emb_square = paddle.square(
summed_features_emb) # None * embedding_size
# square_sum part
# 平方和
squared_features_emb = paddle.square(
feat_embeddings) # None * num_field * embedding_size
squared_sum_features_emb = paddle.sum(squared_features_emb,
1) # None * embedding_size
# 和的平方减平方和的一半等价于二阶组合特征
y_second_order = 0.5 * paddle.sum(
summed_features_emb_square - squared_sum_features_emb,
1,
keepdim=True) # None * 1
# 由于DNN和FM共享嵌入层参数,所以返回嵌入层作为DNNLayer的输入
return y_first_order, y_second_order, feat_embeddings
class DNN(paddle.nn.Layer):
def __init__(self, args):
super(DNN, self).__init__()
self.args = args
self.init_value_ = 0.1
# 创建参数
sizes = [self.args.num_field * self.args.embedding_size
] + self.args.layer_sizes + [1]
acts = [self.args.act
for _ in range(len(self.args.layer_sizes))] + [None]
w_scales = [
self.init_value_ / math.sqrt(float(10))
for _ in range(len(self.args.layer_sizes))
] + [self.init_value_]
self._layers = []
# 循环全连接层的个数,创建参数
for i in range(len(self.args.layer_sizes) + 1):
linear = paddle.nn.Linear(
in_features=sizes[i],
out_features=sizes[i + 1],
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.TruncatedNormal(
mean=0.0, std=w_scales[i])),
bias_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.TruncatedNormal(
mean=0.0, std=self.init_value_)))
#linear = getattr(paddle.nn.functional, acts[i])(linear) if acts[i] else linear
self.add_sublayer('linear_%d' % i, linear)
self._layers.append(linear)
if acts[i] == 'relu':
act = paddle.nn.ReLU()
self.add_sublayer('act_%d' % i, act)
self._layers.append(act)
@paddle.jit.to_static
def forward(self, feat_embeddings):
# 输入的embedding是FM层创建的共享参数
y_dnn = paddle.reshape(
feat_embeddings,
[-1, self.args.num_field * self.args.embedding_size])
for n_layer in self._layers:
y_dnn = n_layer(y_dnn)
return y_dnn
#%%
# 导入模型定义
#from network_aistudio import DeepFM
# 指定执行设备,这里默认使用CPU。建议使用下面的GPU
place = paddle.CPUPlace()
# place = paddle.CUDAPlace(0) # 使用第0个GPU
deepfm = DeepFM(args) # 根据args创建模型
# 使用Adam优化器 # 根据args创建优化器
optimizer = paddle.optimizer.Adam(
parameters=deepfm.parameters(),
weight_decay=paddle.regularizer.L2Decay(args.reg))
0
收藏
请登录后评论
保存的时候出错吗?是用 paddle.jit.save 进行保存的么?
感谢回复。是在保存的时候使用paddle.jit.save报错,报错内容就是帖子最上面的那些。
解决了吗老哥