import argparse import numpy as np from paddle.fluid.core import AnalysisConfig from paddle.fluid.core import create_paddle_predictor def main(): # 设置AnalysisConfig config = set_config() # 创建PaddlePredictor predictor = create_paddle_predictor(config) # 获取输入的名称 input_names = predictor.get_input_names() input_tensor = predictor.get_input_tensor(input_names[0]) # 设置输入 fake_input = np.random.randn(1, 3, 318, 318).astype("float32") input_tensor.reshape([1, 3, 318, 318]) input_tensor.copy_from_cpu(fake_input) # 运行predictor predictor.zero_copy_run() # 获取输出 output_names = predictor.get_output_names() output_tensor = predictor.get_output_tensor(output_names[0]) output_data = output_tensor.copy_to_cpu() # numpy.ndarray类型 def set_config(): config = AnalysisConfig("../inference_model/faster_rcnn_dcn_r50_vd_fpn_3x_server_side/__model__", "../inference_model/faster_rcnn_dcn_r50_vd_fpn_3x_server_side/__params__") config.disable_gpu() config.switch_use_feed_fetch_ops(False) config.switch_specify_input_names(True) return config if __name__ == "__main__": main()
以下是错误log:
Traceback (most recent call last):
File "test_zero_copy.py", line 41, in
main()
File "test_zero_copy.py", line 24, in main
predictor.zero_copy_run()
paddle.fluid.core_avx.EnforceNotMet:
--------------------------------------------
C++ Call Stacks (More useful to developers):
--------------------------------------------
0 std::string paddle::platform::GetTraceBackString(std::string const&, char const*, int)
1 paddle::platform::EnforceNotMet::EnforceNotMet(std::string const&, char const*, int)
2 paddle::framework::Tensor::check_memory_size() const
3 paddle::framework::Tensor::Slice(long, long) const
4 paddle::operators::GenerateProposalsKernel::Compute(paddle::framework::ExecutionContext const&) const
5 std::_Function_handler, paddle::operators::GenerateProposalsKernel >::operator()(char const*, char const*, int) const::{lambda(paddle::framework::ExecutionContext const&)#1}>::_M_invoke(std::_Any_data const&, paddle::framework::ExecutionContext const&)
6 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&, paddle::framework::RuntimeContext*) const
7 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&) const
8 paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, paddle::platform::Place const&)
9 paddle::framework::NaiveExecutor::Run()
10 paddle::AnalysisPredictor::ZeroCopyRun()
------------------------------------------
Python Call Stacks (More useful to users):
------------------------------------------
File "/usr/local/lib/python3.6/dist-packages/paddle/fluid/framework.py", line 2610, in append_op
attrs=kwargs.get("attrs", None))
File "/usr/local/lib/python3.6/dist-packages/paddle/fluid/layer_helper.py", line 43, in append_op
return self.main_program.current_block().append_op(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/paddle/fluid/layers/detection.py", line 2846, in generate_proposals
'RpnRoisLod': rpn_rois_lod
File "/root/PaddleDetection/ppdet/core/workspace.py", line 150, in partial_apply
return op(*args, **kwargs_)
File "/root/PaddleDetection/ppdet/modeling/anchor_heads/rpn_head.py", line 438, in _get_single_proposals
variances=self.anchor_var)
File "/root/PaddleDetection/ppdet/modeling/anchor_heads/rpn_head.py", line 462, in get_proposals
fpn_feat, im_info, lvl, mode)
File "/root/PaddleDetection/ppdet/modeling/architectures/faster_rcnn.py", line 100, in build
rois = self.rpn_head.get_proposals(body_feats, im_info, mode=mode)
File "/root/PaddleDetection/ppdet/modeling/architectures/faster_rcnn.py", line 248, in test
return self.build(feed_vars, 'test')
File "tools/export_model.py", line 197, in main
test_fetches = model.test(feed_vars)
File "tools/export_model.py", line 216, in
main()
----------------------
Error Message Summary:
----------------------
Error: Tensor holds no memory. Call Tensor::mutable_data first.
[Hint: holder_ should not be null.] at (/paddle/paddle/fluid/framework/tensor.cc:23)
[operator < generate_proposals > error]
Error: Tensor holds no memory, 这个提示应该是内存爆了。模型比较大吧,可以在AIStudio上跑一下看看。