首页 Paddle框架 帖子详情
[operator < lookup_table_v2 > error]
收藏
快速回复
Paddle框架 问答深度学习 1266 4
[operator < lookup_table_v2 > error]
收藏
快速回复
Paddle框架 问答深度学习 1266 4

   1)PaddlePaddle版本:1.6
   2)CPU/GPU:cpu
   3)系统环境:ubuntu18.04
   4)Python版本号:3.7.4
python代码:

import math
import sys
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets

IS_SPARSE = True
BATCH_SIZE = 256
PASS_NUM = 20

def get_usr_combined_features():
"""network definition for user part"""
USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1
uid = fluid.data(name='user_id', shape=[None], dtype='int64')
usr_emb = fluid.embedding(
input=uid,
dtype='float32',
size=[USR_DICT_SIZE, 32],
param_attr='user_table',
is_sparse=IS_SPARSE)
usr_fc = layers.fc(input=usr_emb, size=32)
USR_GENDER_DICT_SIZE = 2
usr_gender_id = fluid.data(name='gender_id', shape=[None], dtype='int64')
usr_gender_emb = fluid.embedding(
input=usr_gender_id,
size=[USR_GENDER_DICT_SIZE, 16],
param_attr='gender_table',
is_sparse=IS_SPARSE)
usr_gender_fc = layers.fc(input=usr_gender_emb, size=16)
USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
usr_age_id = fluid.data(name='age_id', shape=[None], dtype="int64")
usr_age_emb = fluid.embedding(
input=usr_age_id,
size=[USR_AGE_DICT_SIZE, 16],
is_sparse=IS_SPARSE,
param_attr='age_table')
usr_age_fc = layers.fc(input=usr_age_emb, size=16)
USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
usr_job_id = fluid.data(name='job_id', shape=[None], dtype="int64")
usr_job_emb = fluid.embedding(
input=usr_job_id,
size=[USR_JOB_DICT_SIZE, 16],
param_attr='job_table',
is_sparse=IS_SPARSE)
usr_job_fc = layers.fc(input=usr_job_emb, size=16)
concat_embed = layers.concat(
input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1)
usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")

return usr_combined_features

def get_mov_combined_features():
"""network definition for item(movie) part"""
MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1
mov_id = fluid.data(name='movie_id', shape=[None], dtype='int64')
mov_emb = fluid.embedding(
input=mov_id,
dtype='float32',
size=[MOV_DICT_SIZE, 32],
param_attr='movie_table',
is_sparse=IS_SPARSE)
mov_fc = layers.fc(input=mov_emb, size=32)
CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())
category_id = fluid.data(
name='category_id', shape=[None], dtype='int64', lod_level=1)
mov_categories_emb = fluid.embedding(
input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_categories_hidden = layers.sequence_pool(
input=mov_categories_emb, pool_type="sum")
MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())
mov_title_id = fluid.data(
name='movie_title', shape=[None], dtype='int64', lod_level=1)
mov_title_emb = fluid.embedding(
input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_title_conv = nets.sequence_conv_pool(
input=mov_title_emb,
num_filters=32,
filter_size=3,
act="tanh",
pool_type="sum")
concat_embed = layers.concat(
input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1)
# print(concat_embed)
mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")

return mov_combined_features

def inference_program():
"""the combined network"""
usr_combined_features = get_usr_combined_features()
mov_combined_features = get_mov_combined_features()

print("get_usr_combined_features:",usr_combined_features)
print("get_mov_combined_features:",mov_combined_features)

# place = fluid.CPUPlace()
# exe = fluid.Executor(place)
# exe.run(fluid.default_startup_program())
# results = exe.run(fluid.default_main_program(),
#               fetch_list=[b], return_numpy=False)
# # results[0].set_lod([[0, 1, 200]])
# print("The data of the result: {}.".format(np.array(results[0])))

inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features)
scale_infer = layers.scale(x=inference, scale=5.0)

return scale_infer

def test():
a = np.array([[1,2,3],[4,5,6],[7,8,9]], dtype="float32")
b = fluid.layers.create_tensor(dtype="float32")
fluid.layers.assign(a, b)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
results = exe.run(fluid.default_main_program(),
fetch_list=[b], return_numpy=False)
results[0].set_lod([[0, 1, 3]])
lod_tensor = results[0]
print("The LoD of the result: {}.".format(lod_tensor.lod()))
print("The array : {}.".format(np.array(lod_tensor)))

当我单独执行test()或者inference_program()时,可以运行成功。

当我同时运行
test()
inference_program()
报错:
Cache file /root/.cache/paddle/dataset/movielens/movielens%2Fml-1m.zip not found, downloading https://[==================================================]
get_usr_combined_features: name: "fc_4.tmp_2"
type {
type: LOD_TENSOR
lod_tensor {
tensor {
data_type: FP32
dims: -1
dims: 200
}
lod_level: 0
}
}
persistable: false

get_mov_combined_features: name: "fc_6.tmp_2"
type {
type: LOD_TENSOR
lod_tensor {
tensor {
data_type: FP32
dims: -1
dims: 200
}
lod_level: 0
}
}
persistable: false

/data/workspace/conda3/lib/python3.7/site-packages/paddle/fluid/executor.py:774: UserWarning: The following exception is not an EOF exception.
"The following exception is not an EOF exception.")
Traceback (most recent call last):
File "recommender.py", line 138, in
test()
File "recommender.py", line 128, in test
fetch_list=[b], return_numpy=False)
File "/data/workspace/conda3/lib/python3.7/site-packages/paddle/fluid/executor.py", line 775, in run
six.reraise(*sys.exc_info())
File "/data/workspace/conda3/lib/python3.7/site-packages/six.py", line 693, in reraise
raise value
File "/data/workspace/conda3/lib/python3.7/site-packages/paddle/fluid/executor.py", line 770, in run
use_program_cache=use_program_cache)
File "/data/workspace/conda3/lib/python3.7/site-packages/paddle/fluid/executor.py", line 817, in _run_impl
use_program_cache=use_program_cache)
File "/data/workspace/conda3/lib/python3.7/site-packages/paddle/fluid/executor.py", line 894, in _run_program
fetch_var_name)
paddle.fluid.core_avx.EnforceNotMet:


C++ Call Stacks (More useful to developers):

0 std::string paddle::platform::GetTraceBackString<std::string const&>(std::string const&, char const*, int)
1 paddle::platform::EnforceNotMet::EnforceNotMet(std::string const&, char const*, int)
2 paddle::framework::Tensor::check_memory_size() const
3 long const* paddle::framework::Tensor::data() const
4 paddle::operators::LookupTableV2Kernel::Compute(paddle::framework::ExecutionContext const&) const
5 std::Function_handler<void (paddle::framework::ExecutionContext const&), paddle::framework::OpKernelRegistrarFunctor<paddle::platform::CPUPlace, false, 0ul, paddle::operators::LookupTableV2Kernel, paddle::operators::LookupTableV2Kernel >::operator()(char const*, char const*, int) const::{lambda(paddle::framework::ExecutionContext const&)#1}>::M_invoke(std::Any_data const&, paddle::framework::ExecutionContext const&)
6 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::void
, boost::detail::variant::void
, boost::detail::variant::void
, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> const&, paddle::framework::RuntimeContext*) const
7 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> const&) const
8 paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_, boost::detail::variant::void_> const&)
9 paddle::framework::Executor::RunPreparedContext(paddle::framework::ExecutorPrepareContext*, paddle::framework::Scope*, bool, bool, bool)
10 paddle::framework::Executor::Run(paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool, std::vector<std::string, std::allocatorstd::string > const&, bool)


Python Call Stacks (More useful to users):

File "/data/workspace/conda3/lib/python3.7/site-packages/paddle/fluid/framework.py", line 2459, in append_op
attrs=kwargs.get("attrs", None))
File "/data/workspace/conda3/lib/python3.7/site-packages/paddle/fluid/layer_helper.py", line 43, in append_op
return self.main_program.current_block().append_op(*args, **kwargs)
File "/data/workspace/conda3/lib/python3.7/site-packages/paddle/fluid/input.py", line 268, in embedding
'padding_idx': padding_idx
File "recommender.py", line 22, in get_usr_combined_features
is_sparse=IS_SPARSE)
File "recommender.py", line 93, in inference_program
usr_combined_features = get_usr_combined_features()
File "recommender.py", line 137, in
inference_program()


Error Message Summary:

PaddleCheckError: holder_ should not be null
Tensor holds no memory. Call Tensor::mutable_data first. at [/paddle/paddle/fluid/framework/tensor.cc:23]
[operator < lookup_table_v2 > error]

0
收藏
回复
全部评论(4)
时间顺序
AIStudio796316
#2 回复于2019-11

我想同时执行
test()
inference_program()
应该怎么做

0
回复
AIStudio796316
#5 回复于2019-11

感谢,这个问题已解决。

BTW,其实我想拿到usr_combined_features的特征向量数据,也就是说把variable转为list:

import math
import sys
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets

IS_SPARSE = True
BATCH_SIZE = 256
PASS_NUM = 20

def get_usr_combined_features():
"""network definition for user part"""
USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1
uid = fluid.data(name='user_id', shape=[None], dtype='int64')
usr_emb = fluid.embedding(
input=uid,
dtype='float32',
size=[USR_DICT_SIZE, 32],
param_attr='user_table',
is_sparse=IS_SPARSE)
usr_fc = layers.fc(input=usr_emb, size=32)
USR_GENDER_DICT_SIZE = 2
usr_gender_id = fluid.data(name='gender_id', shape=[None], dtype='int64')
usr_gender_emb = fluid.embedding(
input=usr_gender_id,
size=[USR_GENDER_DICT_SIZE, 16],
param_attr='gender_table',
is_sparse=IS_SPARSE)
usr_gender_fc = layers.fc(input=usr_gender_emb, size=16)
USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
usr_age_id = fluid.data(name='age_id', shape=[None], dtype="int64")
usr_age_emb = fluid.embedding(
input=usr_age_id,
size=[USR_AGE_DICT_SIZE, 16],
is_sparse=IS_SPARSE,
param_attr='age_table')
usr_age_fc = layers.fc(input=usr_age_emb, size=16)
USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
usr_job_id = fluid.data(name='job_id', shape=[None], dtype="int64")
usr_job_emb = fluid.embedding(
input=usr_job_id,
size=[USR_JOB_DICT_SIZE, 16],
param_attr='job_table',
is_sparse=IS_SPARSE)
usr_job_fc = layers.fc(input=usr_job_emb, size=16)
concat_embed = layers.concat(
input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1)
usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")

return usr_combined_features

def get_mov_combined_features():
"""network definition for item(movie) part"""
MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1
mov_id = fluid.data(name='movie_id', shape=[None], dtype='int64')
mov_emb = fluid.embedding(
input=mov_id,
dtype='float32',
size=[MOV_DICT_SIZE, 32],
param_attr='movie_table',
is_sparse=IS_SPARSE)
mov_fc = layers.fc(input=mov_emb, size=32)
CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())
category_id = fluid.data(
name='category_id', shape=[None], dtype='int64', lod_level=1)
mov_categories_emb = fluid.embedding(
input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_categories_hidden = layers.sequence_pool(
input=mov_categories_emb, pool_type="sum")
MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())
mov_title_id = fluid.data(
name='movie_title', shape=[None], dtype='int64', lod_level=1)
mov_title_emb = fluid.embedding(
input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_title_conv = nets.sequence_conv_pool(
input=mov_title_emb,
num_filters=32,
filter_size=3,
act="tanh",
pool_type="sum")
concat_embed = layers.concat(
input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1)
# print(concat_embed)
mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")

return mov_combined_features

def inference_program():
"""the combined network"""
usr_combined_features = get_usr_combined_features()
mov_combined_features = get_mov_combined_features()

print("get_usr_combined_features:",usr_combined_features)
print("get_mov_combined_features:",mov_combined_features)

main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(startup_program)
    results = exe.run(main_program,
                  fetch_list=[usr_combined_features], return_numpy=False)
    # results[0].set_lod([[0, 1, 3]])
    # lod_tensor = results[0]
    # print("The LoD of the result: {}.".format(lod_tensor.lod()))
    print("The array : {}.".format(np.array(lod_tensor)))

# inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features)
# scale_infer = layers.scale(x=inference, scale=5.0)

# return scale_infer

def test():
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
a = np.array([[1,2,3],[4,5,6],[7,8,9]], dtype="float32")
b = fluid.layers.create_tensor(dtype="float32")
fluid.layers.assign(a, b)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
results = exe.run(main_program,
fetch_list=[b], return_numpy=False)
results[0].set_lod([[0, 1, 3]])
lod_tensor = results[0]
print("The LoD of the result: {}.".format(lod_tensor.lod()))
print("The array : {}.".format(np.array(lod_tensor)))

if name == 'main':
# test()
inference_program()

但是报错:Error Message Summary:

PaddleCheckError: Cannot find fetch variable in scope, fetch_var_name is fc_4.tmp_2 at [/paddle/paddle/fluid/operators/controlflow/fetch_op.cc:38]
[operator < fetch > error]

我该如何修改,可以直接获得list/array特征向量数据呢?

0
回复
AIStudio796316
#6 回复于2019-11

我通过修改https://github.com/PaddlePaddle/book/blob/develop/05.recommender_system/train.py,将该代码中的model输出添加了两个参数,usr_combined_features 和 mov_combined_features ,最后模型运用是用Executor得到了用户特征向量和电影特征向量。

0
回复
aistudio_2
#9 回复于2021-05

mark

0
回复
需求/bug反馈?一键提issue告诉我们
发现bug?如果您知道修复办法,欢迎提pr直接参与建设飞桨~
在@后输入用户全名并按空格结束,可艾特全站任一用户