我在使用HAPI2.0训练图像识别,多卡脚本项目报错,求助
代码文件
import paddle
import numpy as np
import paddle.distributed as dist
paddle.__version__
train_parameters = {
"train_dataset_size": 50000, #输入图片的数量
"batch_size": 128,
"epoch_num":20,
"learning_rate":0.0005,
"eta_min":0.00001,
"decay_rate":0.02, #L2正则化参数,过拟合时调大,欠拟合时调小,建议在学习率设置合理的情况下,尝试由小到大2~10倍递增
"tta_num":10 #TTA预测阶段每张图片的副本数或预测次数
}
import paddle.vision.transforms as T
transform1 = T.Compose([T.ColorJitter(0.125,0.4,0.4,0.08),####亮度、对比度、饱和度、色调等增强策略尽量与测试集样本分布保持一致,避免极端
T.Pad(2),
T.RandomRotation(5),#旋转角度按常规考虑偏转量设置即可
T.RandomResizedCrop(224,scale=(0.7901, 0.8920),ratio=(1, 1)),#适度放大拉伸后,上采样至224像素,兼容预训练模型的训练样本尺寸224
T.RandomHorizontalFlip(0.5),#大部分实物拍摄样本考虑水平翻转即可
T.Normalize(mean=[127.5, 127.5, 127.5],
std=[127.5, 127.5, 127.5],
data_format='HWC'),#归一化,表示图片所有像素点RGB值距离mean值的分布情况
T.ToTensor()])#转张量操作输出CHW,一定放在Compose打包策略的最后一步
transform2 = T.Compose([T.Resize(size=224),#验证集上采样至兼容预训练模型、本项目训练模型的训练样本尺寸224
T.Normalize(mean=[127.5, 127.5, 127.5],
std=[127.5, 127.5, 127.5],
data_format='HWC'),
T.ToTensor()])#转张量操作输出CHW,一定放在Compose打包策略的最后一步
# 训练数据集
train_dataset = paddle.vision.datasets.Cifar100(mode='train', transform=transform1)
# 验证数据集
eval_dataset = paddle.vision.datasets.Cifar100(mode='test', transform=transform1) #验证集使用与训练集相同的增强策略,检验模型的泛化能力
network = paddle.vision.models.resnet101(num_classes=100,pretrained=True)#构造resnet101网络,加载预训练模型
model = paddle.Model(network)
model.prepare(paddle.optimizer.Adam(learning_rate=paddle.optimizer.lr.PiecewiseDecay(boundaries=[5,10,15], values=[0.0005,0.0001,0.00002,0.00001]),
parameters=model.parameters(),
weight_decay=paddle.regularizer.L2Decay(train_parameters['decay_rate'])),
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 5))) #配置Accuracy的评估指标
def train():
visualdl=paddle.callbacks.VisualDL(log_dir='visual_log')
model.fit(train_dataset,
eval_dataset,
epochs=train_parameters['epoch_num'],
batch_size=train_parameters['batch_size'],
shuffle=True,#每个EPOCH打乱一次样本,少许提升训练效果
verbose=1,
save_dir='./model/',
save_freq=1,
callbacks=[visualdl])
if __name__ == '__main__':
dist.spawn(train)
train()
报错内容
[INFO]: current net device: eth0, ip: 172.28.0.45
[INFO]: paddle job envs:
POD_IP=job-4497f66885752d5b308e6b530a0b3148-trainer-0.job-4497f66885752d5b308e6b530a0b3148
PADDLE_PORT=12345
PADDLE_TRAINER_ID=0
PADDLE_TRAINERS_NUM=1
PADDLE_USE_CUDA=1
NCCL_SOCKET_IFNAME=eth0
PADDLE_IS_LOCAL=1
OUTPUT_PATH=/root/paddlejob/workspace/output
LOCAL_LOG_PATH=/root/paddlejob/workspace/log
LOCAL_MOUNT_PATH=/mnt/code_20210317140640,/mnt/datasets_20210317140640
JOB_ID=job-4497f66885752d5b308e6b530a0b3148
TRAINING_ROLE=TRAINER
[INFO]: user command: python -u train.py
[INFO]: start trainer
~/paddlejob/workspace/code /mnt
Cache file /root/.cache/paddle/dataset/cifar/cifar-100-python.tar.gz not found, downloading https://dataset.bj.bcebos.com/cifar/cifar-100-python.tar.gz
Begin to download
Download finished
W0317 14:06:50.588541 222 device_context.cc:362] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 10.1, Runtime API Version: 10.1
W0317 14:06:50.592641 222 device_context.cc:372] device: 0, cuDNN Version: 7.6.
2021-03-17 14:07:00,819 - INFO - unique_endpoints {'job-4497f66885752d5b308e6b530a0b3148-trainer-0.job-4497f66885752d5b308e6b530a0b3148:12345'}
没有看到你设置可用的GPU,比如四卡的话:
os.system("export CUDA_VISIBLE_DEVICES=0,1,2,3")
大佬,这个我看的事不设置这个是不是默认4卡
https://aistudio.baidu.com/aistudio/projectdetail/1222066看一下这个就懂了