首页 Paddle Inference 帖子详情
是否支持多线程报错?
收藏
快速回复
Paddle Inference 其他学习资料 640 0
是否支持多线程报错?
收藏
快速回复
Paddle Inference 其他学习资料 640 0

//编码 utf8转gb2312 防止中文路径错误
static string utf8_to_gb2312(string data)
{
char* data1 = const_cast(data.c_str());

WCHAR* strSrc;
TCHAR* path;
int i = MultiByteToWideChar(CP_UTF8, 0, data1, -1, NULL, 0); //得到多字节字符的长度
strSrc = new WCHAR[i + 1];
MultiByteToWideChar(CP_UTF8, 0, data1, -1, strSrc, i); //多字节字符转为宽字符
i = WideCharToMultiByte(CP_ACP, 0, strSrc, -1, NULL, 0, NULL, NULL); //得到宽字符的长度
path = new TCHAR[i + 1];
WideCharToMultiByte(CP_ACP, 0, strSrc, -1, path, i, NULL, NULL); //宽字符转为多字节字符

delete[] strSrc;
return path;
}

//ANSI转UTF8
string ANSItoUTF8(string data)
{
//获取转换为宽字节后需要的缓冲区大小,创建宽字节缓冲区,936为简体中文GB2312代码页
char* data1 = const_cast(data.c_str());
UINT nLen = MultiByteToWideChar(CP_ACP, NULL, data1, -1, NULL, NULL);
WCHAR* wszBuffer = new WCHAR[nLen + 1];
nLen = MultiByteToWideChar(CP_ACP, NULL, data1, -1, wszBuffer, nLen);
wszBuffer[nLen] = 0;
//获取转为UTF8多字节后需要的缓冲区大小,创建多字节缓冲区
nLen = WideCharToMultiByte(CP_UTF8, NULL, wszBuffer, -1, NULL, NULL, NULL, NULL);
TCHAR* szBuffer = new CHAR[nLen + 1];
nLen = WideCharToMultiByte(CP_UTF8, NULL, wszBuffer, -1, szBuffer, nLen, NULL, NULL);
szBuffer[nLen] = 0;

delete[] wszBuffer;
return szBuffer;
}

OCRConfig* g_config = NULL;
DBDetector* g_det = NULL;
Classifier* g_cls = NULL;
CRNNRecognizer* g_rec = NULL;

int ocr_InitEx(const char* config)
{
g_config = new OCRConfig(config);
//g_config.PrintConfigInfo();

g_det = new DBDetector(g_config->det_model_dir, g_config->use_gpu, g_config->gpu_id,
g_config->gpu_mem, g_config->cpu_math_library_num_threads,
g_config->use_mkldnn, g_config->max_side_len, g_config->det_db_thresh,
g_config->det_db_box_thresh, g_config->det_db_unclip_ratio,
g_config->use_polygon_score, g_config->visualize,
g_config->use_tensorrt, g_config->use_fp16);

if (g_config->use_angle_cls == true)
{
g_cls = new Classifier(g_config->cls_model_dir, g_config->use_gpu, g_config->gpu_id,
g_config->gpu_mem, g_config->cpu_math_library_num_threads,
g_config->use_mkldnn, g_config->cls_thresh,
g_config->use_tensorrt, g_config->use_fp16);
}

g_rec = new CRNNRecognizer(g_config->rec_model_dir, g_config->use_gpu, g_config->gpu_id,
g_config->gpu_mem, g_config->cpu_math_library_num_threads,
g_config->use_mkldnn, g_config->char_list_file,
g_config->use_tensorrt, g_config->use_fp16);
return 0;
}

DWORD WINAPI pub_main(void* param)
{
printf("pub_main:%d\n", ::GetCurrentThreadId());
cv::Mat* srcimg = (cv::Mat*)param;
std::vector>> boxes;

g_det->Run(*srcimg, boxes);

std::vector str_res;
g_rec->Run(boxes, *srcimg, g_cls, str_res);
for (int i = 0; i < str_res.size(); i++)
{
printf("Txt=%d/%d [%s]\n", i, str_res.size(), str_res[i].c_str());
}

return 0;
}

int main(int argc, char **argv)
{
if (argc < 3) {
std::cerr << "[ERROR] usage: " << argv[0]
<< " configure_filepath image_path\n";
getchar();
exit(1);
}

char strModule[MAX_PATH];
::GetModuleFileName(NULL, strModule, MAX_PATH - 1); //得到当前模块路径
std::string StrExe = strModule;
int index = StrExe.rfind('\\');
std::string strPath = StrExe.substr(0, index);
strPath.append("\\");
SetCurrentDirectory(strPath.c_str());

printf("%s\n", strPath.c_str());

std::string img_path(argv[2]);
cv::Mat srcimg = cv::imread(img_path, cv::IMREAD_COLOR);

if (!srcimg.data)
{
std::cerr << "[ERROR] image read failed! image path: " << img_path << "\n";
exit(1);
}

ocr_InitEx(argv[1]);

auto start = std::chrono::system_clock::now();
std::vector>> boxes;
g_det->Run(srcimg, boxes);

std::vector str_res;
g_rec->Run(boxes, srcimg, g_cls, str_res);

for (int i = 0; i < str_res.size(); i++)
{
printf("Txt=%d/%d [%s]\n", i, str_res.size(),str_res[i].c_str());
}


auto end = std::chrono::system_clock::now();
auto duration =
std::chrono::duration_cast(end - start);
std::cout << "Cost "
<< double(duration.count()) *
std::chrono::microseconds::period::num /
std::chrono::microseconds::period::den
<< "s" << std::endl;


DWORD threadID;
HANDLE handle = CreateThread(NULL, 0, &pub_main, &srcimg, THREAD_RESUME, &threadID);
handle = CreateThread(NULL, 0, &pub_main, &srcimg, THREAD_RESUME, &threadID);
handle = CreateThread(NULL, 0, &pub_main, &srcimg, THREAD_RESUME, &threadID);
handle = CreateThread(NULL, 0, &pub_main, &srcimg, THREAD_RESUME, &threadID);
handle = CreateThread(NULL, 0, &pub_main, &srcimg, THREAD_RESUME, &threadID);

getchar();

delete g_det;
delete g_rec;
delete g_config;
if (g_cls)delete g_cls;

return 0;
}

 

清问,不支持多线程吗?上面的代码运行报错

 

pub_main:15172
pub_main:38284


--------------------------------------
C++ Traceback (most recent call last):
--------------------------------------
Not support stack backtrace yet.

----------------------
Error Message Summary:
----------------------
InvalidArgumentError: The number of input's channels should be equal to filter's channels * groups for Op(Conv). But received: the input's channels is 16, the input's shape is [1, 16, 240, 136]; the filter's channels is 32, the filter's shape is [16, 32, 1, 1]; the groups is 1, the data_format is NCHW. The error may come from wrong data_format setting.
[Hint: Expected input_channels == filter_dims[1] * groups, but received input_channels:16 != filter_dims[1] * groups:32.] (at C:/home/workspace/Paddle_release/paddle/fluid/operators/conv_op.cc:100)

 

配值表:

# model load config
use_gpu 0
gpu_id 0
gpu_mem 4000
cpu_math_library_num_threads 50
use_mkldnn 1

# det config
max_side_len 960
det_db_thresh 0.3
det_db_box_thresh 0.3
det_db_unclip_ratio 1.6
use_polygon_score 1
det_model_dir ./inference/det/

# cls config
use_angle_cls 0
cls_model_dir ./inference/cls/
cls_thresh 0.9

# rec config
rec_model_dir ./inference/rec/
char_list_file ./ppocr_keys_v1.txt

# show the detection results
visualize 0

# use_tensorrt
use_tensorrt 0
use_fp16 0

 

0
收藏
回复
需求/bug反馈?一键提issue告诉我们
发现bug?如果您知道修复办法,欢迎提pr直接参与建设飞桨~
在@后输入用户全名并按空格结束,可艾特全站任一用户