首页 PaddleHub 帖子详情
使用ERNIE-GEN报错ValueError: (InvalidArgument) 在调试公开项目代码时
收藏
快速回复
PaddleHub 问答预训练模型 1643 2
使用ERNIE-GEN报错ValueError: (InvalidArgument) 在调试公开项目代码时
收藏
快速回复
PaddleHub 问答预训练模型 1643 2

运行到了:loss, _, __ = model(
attn_ids,
sent_ids=tgt_sids,
pos_ids=tgt_pids,
attn_bias=mask_attn_2_srctgtattn,
past_cache=(past_cache_k, past_cache_v),
tgt_labels=tgt_labels,
tgt_pos=paddle.nonzero(attn_ids == attn_id))
报了错:
Traceback (most recent call last):
File "J:/***/ERNIE-GEN/erniePoetry.py", line 289, in
loss, _, __ = model(
File "D:\ProgramData\Anaconda3\lib\site-packages\paddle\fluid\dygraph\layers.py", line 902, in __call__
outputs = self.forward(*inputs, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\paddlenlp\transformers\ernie_gen\modeling.py", line 610, in forward
loss = F.cross_entropy(
File "D:\ProgramData\Anaconda3\lib\site-packages\paddle\nn\functional\loss.py", line 1389, in cross_entropy
_, out = core.ops.softmax_with_cross_entropy(
ValueError: (InvalidArgument) Tensor holds the wrong type, it holds int, but desires to be int64_t.
[Hint: Expected valid == true, but received valid:0 != true:1.] (at C:\home\workspace\Paddle_release\paddle/fluid/framework/tensor_impl.h:33)
[operator < softmax_with_cross_entropy > error]


源代码来自项目:使用PaddleNLP预训练模型ERNIE-GEN生成诗歌
https://aistudio.baidu.com/aistudio/projectdetail/1339888?channelType=0&channel=0
除了注释掉了paddle.set_device('gpu'),都是原代码了。

0
收藏
回复
全部评论(2)
时间顺序
w
welwin
#2 回复于2021-08

补充说明:paddle版本是2.1.2,model = ErnieForGeneration.from_pretrained("ernie-1.0"),相关全部代码如下:

import paddle
print(paddle.__version__)
import paddlenlp
# from paddlenlp.transformers import ErnieForGeneration
from paddlenlp.transformers import ErnieForGeneration as EFG

# paddle.set_device('gpu') #ValueError: The device should not be 'gpu', since PaddlePaddle is not compiled with CUDA
model = ErnieForGeneration.from_pretrained("ernie-1.0")
# 通过以下方式载入保存模型,进行增量训练
path="J:\\***\\ERNIE\\"
#处理目录\的问题的函数topath。什么时候做函数合适?没有更重要的事时。
init_checkpoint = path + "ernie_1.0_poetry.pdparams"
model_state = paddle.load(init_checkpoint)
model.set_state_dict(model_state)
#数据来源于chinese-poetry中开源的300万行唐、宋诗数据。数据集将诗的前2行作为模型输入,其余作为输出,并用特殊字符"\t"作为输入输出语句的分隔符。为了避免切词器对词语进行切分,在每一个字中间还加入了特殊字符"\002"作为分隔符。(也可显示为空格或者第3象限折)
# PaddleNLP已经内置该数据集,一键即可加载。
from paddlenlp.datasets import load_dataset
train_dataset, dev_dataset = load_dataset('poetry', splits=('train', 'dev'), lazy=False)

# Example
print(train_dataset[0]['tokens'])
print(train_dataset[0]['labels'])

from copy import deepcopy
# import numpy as np
from paddlenlp.transformers import ErnieTokenizer

tokenizer = ErnieTokenizer.from_pretrained("ernie-1.0")
# ERNIE-GEN中填充了[ATTN] token作为预测位,由于ERNIE 1.0没有这一token,我们采用[MASK]作为填充
attn_id = tokenizer.vocab['[MASK]']
tgt_type_id = 1

# 设置最大输入、输出长度
max_encode_len = 24
max_decode_len =72

def convert_example(example):
"""convert an example into necessary features"""

encoded_src = tokenizer.encode(
example['tokens'], max_seq_len=max_encode_len, pad_to_max_seq_len=False)
src_ids, src_sids = encoded_src["input_ids"], encoded_src["token_type_ids"]
src_pids = np.arange(len(src_ids))

encoded_tgt = tokenizer.encode(
example['labels'],
max_seq_len=max_decode_len,
pad_to_max_seq_len=False)
tgt_ids, tgt_sids = encoded_tgt["input_ids"], encoded_tgt[
"token_type_ids"]
tgt_ids = np.array(tgt_ids)
tgt_sids = np.array(tgt_sids) + tgt_type_id
tgt_pids = np.arange(len(tgt_ids)) + len(src_ids)

attn_ids = np.ones_like(tgt_ids) * attn_id
tgt_labels = tgt_ids

return (src_ids, src_pids, src_sids, tgt_ids, tgt_pids, tgt_sids,
attn_ids, tgt_labels)

# 将预处理逻辑作用于数据集
train_dataset = train_dataset.map(convert_example)
dev_dataset = dev_dataset.map(convert_example)

#数据预处理
'''
此阶段将原始数据处理成模型可以读入的格式。
ERNIE-GEN的输入类似BERT的输入,需要准备切词器,将明文处理为相应的id。
PaddleNLP内置了ErnieTokenizer,通过调用其encode方法可以直接得到输入的input_ids和segment_ids。
'''

#接下来需要组batch,并准备ERNIE-GEN额外需要的Attention Mask矩阵
from paddle.io import DataLoader
from paddlenlp.data import Stack, Tuple, Pad


def gen_mask(batch_ids, mask_type='bidi', query_len=None, pad_value=0):
if query_len is None:
query_len = batch_ids.shape[1]
if mask_type != 'empty':
mask = (batch_ids != pad_value).astype(np.float32)
mask = np.tile(np.expand_dims(mask, 1), [1, query_len, 1])
if mask_type == 'causal':
assert query_len == batch_ids.shape[1]
mask = np.tril(mask)
elif mask_type == 'causal_without_diag':
assert query_len == batch_ids.shape[1]
mask = np.tril(mask, -1)
elif mask_type == 'diag':
assert query_len == batch_ids.shape[1]
mask = np.stack([np.diag(np.diag(m)) for m in mask], 0)
else:
mask_type == 'empty'
mask = np.zeros_like(batch_ids).astype(np.float32)
mask = np.tile(np.expand_dims(mask, 1), [1, query_len, 1])
return mask


def after_padding(args):
'''
attention mask:
*** src, tgt, attn
src 00, 01, 11
tgt 10, 11, 12
attn 20, 21, 22

*** s1, s2 | t1 t2 t3| attn1 attn2 attn3
s1 1, 1 | 0, 0, 0,| 0, 0, 0,
s2 1, 1 | 0, 0, 0,| 0, 0, 0,
-
t1 1, 1, | 1, 0, 0,| 0, 0, 0,
t2 1, 1, | 1, 1, 0,| 0, 0, 0,
t3 1, 1, | 1, 1, 1,| 0, 0, 0,
-
attn1 1, 1, | 0, 0, 0,| 1, 0, 0,
attn2 1, 1, | 1, 0, 0,| 0, 1, 0,
attn3 1, 1, | 1, 1, 0,| 0, 0, 1,

for details, see Fig3. https://arxiv.org/abs/2001.11314
'''
src_ids, src_pids, src_sids, tgt_ids, tgt_pids, tgt_sids, attn_ids, tgt_labels = args
src_len = src_ids.shape[1]
tgt_len = tgt_ids.shape[1]
mask_00 = gen_mask(src_ids, 'bidi', query_len=src_len)
mask_01 = gen_mask(tgt_ids, 'empty', query_len=src_len)
mask_02 = gen_mask(attn_ids, 'empty', query_len=src_len)

mask_10 = gen_mask(src_ids, 'bidi', query_len=tgt_len)
mask_11 = gen_mask(tgt_ids, 'causal', query_len=tgt_len)
mask_12 = gen_mask(attn_ids, 'empty', query_len=tgt_len)

mask_20 = gen_mask(src_ids, 'bidi', query_len=tgt_len)
mask_21 = gen_mask(tgt_ids, 'causal_without_diag', query_len=tgt_len)
mask_22 = gen_mask(attn_ids, 'diag', query_len=tgt_len)

mask_src_2_src = mask_00
mask_tgt_2_srctgt = np.concatenate([mask_10, mask_11], 2)
mask_attn_2_srctgtattn = np.concatenate([mask_20, mask_21, mask_22], 2)

raw_tgt_labels = deepcopy(tgt_labels)
tgt_labels = tgt_labels[np.where(tgt_labels != 0)]
return (src_ids, src_sids, src_pids, tgt_ids, tgt_sids, tgt_pids, attn_ids,
mask_src_2_src, mask_tgt_2_srctgt, mask_attn_2_srctgtattn,
tgt_labels, raw_tgt_labels)

# 使用fn函数对convert_example返回的sample中对应位置的ids做padding,之后调用after_padding构造Attention Mask矩阵
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # src_ids
Pad(axis=0, pad_val=tokenizer.pad_token_id), # src_pids
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # src_sids
Pad(axis=0, pad_val=tokenizer.pad_token_id), # tgt_ids
Pad(axis=0, pad_val=tokenizer.pad_token_id), # tgt_pids
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # tgt_sids
Pad(axis=0, pad_val=tokenizer.pad_token_id), # attn_ids
Pad(axis=0, pad_val=tokenizer.pad_token_id), # tgt_labels
): after_padding(fn(samples))

batch_size = 48

train_data_loader = DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=batchify_fn,
return_list=True)

dev_data_loader = DataLoader(
dataset=dev_dataset,
batch_size=batch_size,
shuffle=False,
collate_fn=batchify_fn,
return_list=True)

# 优化器
# 我们在这里创建优化器,并设置学习率先升后降,让模型具备更好的收敛性。
import paddle.nn as nn

num_epochs = 1
learning_rate = 2e-5
warmup_proportion = 0.1
weight_decay = 0.1

max_steps = (len(train_data_loader) * num_epochs)
lr_scheduler = paddle.optimizer.lr.LambdaDecay(
learning_rate,
lambda current_step, num_warmup_steps=max_steps*warmup_proportion,
num_training_steps=max_steps: float(
current_step) / float(max(1, num_warmup_steps))
if current_step < num_warmup_steps else max(
0.0,
float(num_training_steps - current_step) / float(
max(1, num_training_steps - num_warmup_steps))))

optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
parameters=model.parameters(),
weight_decay=weight_decay,
grad_clip=nn.ClipGradByGlobalNorm(1.0),
apply_decay_param_fun=lambda x: x in [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
])
# 启动训练
# 一切准备就绪后,就可以将数据喂给模型,不断更新模型参数了。在训练过程中可以使用PaddleNLP提供的logger对象,可以输出带时间信息的日志。
import os
import time

from paddlenlp.utils.log import logger


global_step = 1
logging_steps = 100
save_steps = 1000
output_dir = "save_dir"
tic_train = time.time()
for epoch in range(num_epochs):
for step, batch in enumerate(train_data_loader, start=1):
(src_ids, src_sids, src_pids, tgt_ids, tgt_sids, tgt_pids, attn_ids,
mask_src_2_src, mask_tgt_2_srctgt, mask_attn_2_srctgtattn,
tgt_labels, _) = batch
# import pdb; pdb.set_trace()
_, __, info = model(
src_ids,
sent_ids=src_sids,
pos_ids=src_pids,
attn_bias=mask_src_2_src,
encode_only=True)
cached_k, cached_v = info['caches']
_, __, info = model(
tgt_ids,
sent_ids=tgt_sids,
pos_ids=tgt_pids,
attn_bias=mask_tgt_2_srctgt,
past_cache=(cached_k, cached_v),
encode_only=True)
cached_k2, cached_v2 = info['caches']
past_cache_k = [
paddle.concat([k, k2], 1) for k, k2 in zip(cached_k, cached_k2)
]
past_cache_v = [
paddle.concat([v, v2], 1) for v, v2 in zip(cached_v, cached_v2)
]
loss, _, __ = model(
attn_ids,
sent_ids=tgt_sids,
pos_ids=tgt_pids,
attn_bias=mask_attn_2_srctgtattn,
past_cache=(past_cache_k, past_cache_v),
tgt_labels=tgt_labels,
tgt_pos=paddle.nonzero(attn_ids == attn_id))
#ValueError: (InvalidArgument) Tensor holds the wrong type, it holds int, but desires to be int64_t.[Hint: Expected valid == true, but received valid:0 != true:1.] (at C:\home\workspace\Paddle_release\paddle/fluid/framework/tensor_impl.h:33) [operator < softmax_with_cross_entropy > error]
loss = loss.mean()
if global_step % logging_steps == 0:
logger.info(
"global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s, lr: %.3e"
% (global_step, epoch, step, loss.numpy(), logging_steps /
(time.time() - tic_train), lr_scheduler.get_lr()))
tic_train = time.time()

loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_gradients()
if global_step % save_steps == 0:
output_dir = os.path.join(output_dir,
"model_%d" % global_step)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)

global_step += 1

# 解码逻辑
# ERNIE-GEN采用填充生成的方式进行预测,在解码的时候我们需要实现这一方法。
# 在这里我们采用贪心搜索的方式进行解码,如需采用beam search方法,请参考example。
def gen_bias(encoder_inputs, decoder_inputs, step):
decoder_bsz, decoder_seqlen = decoder_inputs.shape[:2]
encoder_bsz, encoder_seqlen = encoder_inputs.shape[:2]
attn_bias = paddle.reshape(
paddle.arange(
0, decoder_seqlen, 1, dtype='float32') + 1, [1, -1, 1])
decoder_bias = paddle.cast(
(paddle.matmul(
attn_bias, 1. / attn_bias, transpose_y=True) >= 1.),
'float32') #[1, decoderlen, decoderlen]
encoder_bias = paddle.unsqueeze(
paddle.cast(paddle.ones_like(encoder_inputs), 'float32'),
[1]) #[bsz, 1, encoderlen]
encoder_bias = paddle.expand(
encoder_bias, [encoder_bsz, decoder_seqlen,
encoder_seqlen]) #[bsz,decoderlen, encoderlen]
decoder_bias = paddle.expand(
decoder_bias, [decoder_bsz, decoder_seqlen,
decoder_seqlen]) #[bsz, decoderlen, decoderlen]
if step > 0:
bias = paddle.concat([
encoder_bias, paddle.ones([decoder_bsz, decoder_seqlen, step],
'float32'), decoder_bias
], -1)
else:
bias = paddle.concat([encoder_bias, decoder_bias], -1)
return bias


@paddle.no_grad()
def greedy_search_infilling(model,
q_ids,
q_sids,
sos_id,
eos_id,
attn_id,
pad_id,
unk_id,
vocab_size,
max_encode_len=640,
max_decode_len=100,
tgt_type_id=3):
_, logits, info = model(q_ids, q_sids)
d_batch, d_seqlen = q_ids.shape
seqlen = paddle.sum(paddle.cast(q_ids != 0, 'int64'), 1, keepdim=True)
has_stopped = np.zeros([d_batch], dtype=np.bool)
gen_seq_len = np.zeros([d_batch], dtype=np.int64)
output_ids = []

past_cache = info['caches']

cls_ids = paddle.ones([d_batch], dtype='int64') * sos_id
attn_ids = paddle.ones([d_batch], dtype='int64') * attn_id
ids = paddle.stack([cls_ids, attn_ids], -1)
for step in range(max_decode_len):
bias = gen_bias(q_ids, ids, step)
pos_ids = paddle.to_tensor(
np.tile(
np.array(
[[step, step + 1]], dtype=np.int64), [d_batch, 1]))
pos_ids += seqlen
_, logits, info = model(
ids,
paddle.ones_like(ids) * tgt_type_id,
pos_ids=pos_ids,
attn_bias=bias,
past_cache=past_cache)

if logits.shape[-1] > vocab_size:
logits[:, :, vocab_size:] = 0
logits[:, :, pad_id] = 0
logits[:, :, unk_id] = 0
logits[:, :, attn_id] = 0

gen_ids = paddle.argmax(logits, -1)

past_cached_k, past_cached_v = past_cache
cached_k, cached_v = info['caches']
cached_k = [
paddle.concat([pk, k[:, :1, :]], 1)
for pk, k in zip(past_cached_k, cached_k)
] # concat cached
cached_v = [
paddle.concat([pv, v[:, :1, :]], 1)
for pv, v in zip(past_cached_v, cached_v)
]
past_cache = (cached_k, cached_v)

gen_ids = gen_ids[:, 1]
ids = paddle.stack([gen_ids, attn_ids], 1)

gen_ids = gen_ids.numpy()
has_stopped |= (gen_ids == eos_id).astype(np.bool)
gen_seq_len += (1 - has_stopped.astype(np.int64))
output_ids.append(gen_ids.tolist())
if has_stopped.all():
break
output_ids = np.array(output_ids).transpose([1, 0])
return output_ids
# 启动评估
# 评估阶段会调用解码逻辑进行解码,然后计算预测结果得分衡量模型效果。paddlenlp.metrics中包含了Rouge1、Rouge2等指标,在这里我们选用Rouge1指标。
from tqdm import tqdm

from paddlenlp.metrics import Rouge1


rouge1 = Rouge1()

vocab = tokenizer.vocab
eos_id = vocab[tokenizer.sep_token]
sos_id = vocab[tokenizer.cls_token]
pad_id = vocab[tokenizer.pad_token]
unk_id = vocab[tokenizer.unk_token]
vocab_size = len(vocab)

evaluated_sentences_ids = []
reference_sentences_ids = []

logger.info("Evaluating...")
model.eval()
for data in tqdm(dev_data_loader):
(src_ids, src_sids, src_pids, _, _, _, _, _, _, _, _,
raw_tgt_labels) = data # never use target when infer
output_ids = greedy_search_infilling(
model,
src_ids,
src_sids,
eos_id=eos_id,
sos_id=sos_id,
attn_id=attn_id,
pad_id=pad_id,
unk_id=unk_id,
vocab_size=vocab_size,
max_decode_len=max_decode_len,
max_encode_len=max_encode_len,
tgt_type_id=tgt_type_id)

for ids in output_ids.tolist():
if eos_id in ids:
ids = ids[:ids.index(eos_id)]
evaluated_sentences_ids.append(ids)

for ids in raw_tgt_labels.numpy().tolist():
ids = ids[1:ids.index(eos_id)]
reference_sentences_ids.append(ids)

score = rouge1.score(evaluated_sentences_ids, reference_sentences_ids)

logger.info("Rouge-1: %.5f" % (score * 100))

# 预测结果
# 对于生成任务,评估指标并不能很好地提现模型效果,下面我们直接观察模型的预测效果。
evaluated_sentences = []
reference_sentences = []
for ids in reference_sentences_ids[:5]:
reference_sentences.append(''.join(vocab.to_tokens(ids)))
for ids in evaluated_sentences_ids[:5]:
evaluated_sentences.append(''.join(vocab.to_tokens(ids)))
logger.info(reference_sentences)
logger.info(evaluated_sentences)

0
回复
w
welwin
#3 回复于2021-08

其中from paddlenlp.transformers import ErnieForGeneration as EFG   应该是from paddlenlp.transformers import ErnieForGeneration ,复原时漏了。

0
回复
需求/bug反馈?一键提issue告诉我们
发现bug?如果您知道修复办法,欢迎提pr直接参与建设飞桨~
在@后输入用户全名并按空格结束,可艾特全站任一用户