io.batch 报错:'generator' object is not callable
收藏
0
收藏
全部评论(2)
看起来应该是代码问题,我将你的原始代码修改一下以后,如下:
import paddle.reader as reader
import paddle.fluid as fl
import time
def reader_creator_10(x):
print(x)
def iterator():
for i in range(10):
yield i
return iterator
def mapper(x):
return (x + 1)
orders = (True, False)
thread_num = 2# (1, 2, 4, 8, 16)
buffer_size = 2# (1, 2, 4, 8, 16)
user_reader = reader.xmap_readers(mapper,reader_creator_10(5), thread_num, buffer_size, True)
shuffled_reader = fl.io.shuffle(reader, 5)
GPU_flag = False
place = fl.CUDAPlace(0) if GPU_flag else fl.CPUPlace()
batch_size = 5
batch_reader = fl.io.batch(user_reader,batch_size =batch_size)
with fl.dygraph.guard(place):
for data in batch_reader():
print(data)
运行结果符合预期
0
请登录后评论
根据官方的batch的用法,版本是paddle1.6,代码如下:
import paddle.reader as reader
import paddle.fluid as fl
import time
def reader_creator_10(x):
print(x)
for i in range(10):
yield i
def mapper(x):
return (x + 1)
orders = (True, False)
thread_num = 2# (1, 2, 4, 8, 16)
buffer_size = 2# (1, 2, 4, 8, 16)
user_reader = reader.xmap_readers(mapper,reader_creator_10(5), thread_num, buffer_size, True)
shuffled_reader = fl.io.shuffle(reader, 5)
GPU_flag = False
place = fl.CUDAPlace(0) if GPU_flag else fl.CPUPlace()
batch_size = 5
batch_reader = fl.io.batch(user_reader,batch_size =batch_size)
with fl.dygraph.guard(place):
此时就会报如下错误:
Exception in thread Thread-1:
Traceback (most recent call last):
File "/opt/anaconda3/envs/paddle_env/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/opt/anaconda3/envs/paddle_env/lib/python3.7/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "/opt/anaconda3/envs/paddle_env/lib/python3.7/site-packages/paddle/reader/decorator.py", line 445, in order_read_worker
for i in reader():
TypeError: 'generator' object is not callable
### 当修改一下,将reader_creator_10中的参数去掉时,就不报错
import paddle.reader as reader
import paddle.fluid as fl
import time
def reader_creator_10():
print(5)
for i in range(10):
yield i
def mapper(x):
return (x + 1)
orders = (True, False)
thread_num = 2# (1, 2, 4, 8, 16)
buffer_size = 2# (1, 2, 4, 8, 16)
user_reader = reader.xmap_readers(mapper,reader_creator_10, thread_num, buffer_size, True)
shuffled_reader = fl.io.shuffle(reader, 5)
GPU_flag = False
place = fl.CUDAPlace(0) if GPU_flag else fl.CPUPlace()
batch_size = 5
batch_reader = fl.io.batch(user_reader,batch_size =batch_size)
with fl.dygraph.guard(place):
结果如下:
5
[1, 2, 3, 4, 5]
[6, 7, 8, 9, 10]