AI Studio开始训练时没有动,一直卡着
收藏
快速回复
AI Studio平台使用 问答Notebook项目 1785 2
AI Studio开始训练时没有动,一直卡着
收藏
快速回复
AI Studio平台使用 问答Notebook项目 1785 2

代码如下,运行平台为AI Studio
!ls /home/aistudio/data/
import paddle.fluid as fluid
import paddle
import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline

#command==1 do train; otherwise do test
def train_reader_createor():
data_path = '/home/aistudio/data/data8554/flowers/'
data = ["dandelion/","rose/","tulip/","daisy/","sunflower/"]
command = 1
def sep(length):
num = np.arange(length)
test = []
for i in range(int(len(num)/5)):
test.append(int(np.random.randint(len(num))))
train = np.delete(num,test)
return train,np.array(test)
def reader(command):
for i in range(len(data)):
train,test = sep(len(os.listdir(data_path+data[i])))
if command==1:
for j in train:
img = np.array(Image.open(data_path+data[i]+"{0}.jpg".format(j)),dtype='float32')
yield img.transpose((2, 0, 1))/255.0, i
else:
for j in test:
img = np.array(Image.open(data_path+data[i]+"{0}.jpg".format(j)),dtype='float32')
yield img.transpose((2, 0, 1))/255.0, i
return reader(command)


def test_reader_createor():
data_path = '/home/aistudio/data/data8554/flowers/'
data = ["dandelion/","rose/","tulip/","daisy/","sunflower/"]
command = 0
def sep(length):
num = np.arange(length)
test = []
for i in range(int(len(num)/5)):
test.append(int(np.random.randint(len(num))))
train = np.delete(num,test)
return train,np.array(test)
def reader(command):
for i in range(len(data)):
train,test = sep(len(os.listdir(data_path+data[i])))
if command==1:
for j in train:
img = np.array(Image.open(data_path+data[i]+"{0}.jpg".format(j)),dtype='float32')
yield img.transpose((2, 0, 1))/255.0, i
else:
for j in test:
img = np.array(Image.open(data_path+data[i]+"{0}.jpg".format(j)),dtype='float32')
yield img.transpose((2, 0, 1))/255.0, i
return reader(command)


def vgg_bn_drop(input):
def conv_block(ipt, num_filter, groups, dropouts):
return fluid.nets.img_conv_group(
input=ipt,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * groups,
conv_filter_size=3,
conv_act='relu',
conv_with_batchnorm=True,
conv_batchnorm_drop_rate=dropouts,
pool_type='max')

conv1 = conv_block(input, 64, 2, [0.3, 0])
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])

drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
fc1 = fluid.layers.fc(input=drop, size=512, act=None)
bn = fluid.layers.batch_norm(input=fc1, act='relu')
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
fc2 = fluid.layers.fc(input=drop2, size=512, act=None)
predict = fluid.layers.fc(input=fc2, size=5, act='softmax')
return predict

 

def train_program():
predict = inference_program()

label = fluid.layers.data(name='label', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
accuracy = fluid.layers.accuracy(input=predict, label=label)
return [avg_cost, accuracy]



def inference_program():

data_shape = [3, 256, 256]
images = fluid.layers.data(name='flowers', shape=data_shape, dtype='float32')

#predict = resnet_cifar10(images, 32)
predict = vgg_bn_drop(images)
return predict

def optimizer_program():
return fluid.optimizer.Adam(learning_rate=0.001)

BATCH_SIZE = 64
train_reader = paddle.batch(
paddle.reader.shuffle(train_reader_createor, buf_size=8000),
batch_size=BATCH_SIZE)

# Reader for testing. A separated data set for testing.
test_reader = paddle.batch(
test_reader_createor, batch_size=BATCH_SIZE)


use_cuda = True
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()

feed_order = ['flowers', 'label']

main_program = fluid.default_main_program()
star_program = fluid.default_startup_program()

avg_cost, acc = train_program()

# Test program
test_program = main_program.clone(for_test=True)

optimizer = optimizer_program()
optimizer.minimize(avg_cost)

exe = fluid.Executor(place)

EPOCH_NUM = 2

# For training test cost
def train_test(program, reader):
count = 0
feed_var_list = [
program.global_block().var(var_name) for var_name in feed_order
]
feeder_test = fluid.DataFeeder(
feed_list=feed_var_list, place=place)
test_exe = fluid.Executor(place)
accumulated = len([avg_cost, acc]) * [0]
for tid, test_data in enumerate(reader()):
avg_cost_np = test_exe.run(program=program,
feed=feeder_test.feed(test_data),
fetch_list=[avg_cost, acc])
accumulated = [x[0] + x[1][0] for x in zip(accumulated, avg_cost_np)]
count += 1
return [x / count for x in accumulated]

params_dirname = "flowers.model"

from paddle.utils.plot import Ploter

train_prompt = "Train cost"
test_prompt = "Test cost"
plot_cost = Ploter(test_prompt,train_prompt)

# main train loop.
def train_loop():
print ("start")
feed_var_list_loop = [
main_program.global_block().var(var_name) for var_name in feed_order
]
feeder = fluid.DataFeeder(
feed_list=feed_var_list_loop, place=place)
exe.run(star_program)

step = 0
for pass_id in range(EPOCH_NUM):
for step_id, data_train in enumerate(train_reader()):
print (step_id)
avg_loss_value = exe.run(main_program,
feed=feeder.feed(data_train),
fetch_list=[avg_cost, acc])

if step % 1 == 0:
plot_cost.append(train_prompt, step, avg_loss_value[0])
print("%s, Step %d, Cost %f" %
(train_prompt, step, avg_loss_value[0]))
plot_cost.plot()



step += 1

avg_cost_test, accuracy_test = train_test(test_program,
reader=test_reader)
plot_cost.append(test_prompt, step, avg_cost_test)
print("%s, Step %d, Cost %f" %(test_prompt, step, test_metics[0]))
# save parameters
if params_dirname is not None:
fluid.io.save_inference_model(params_dirname, ["flowers"],
[predict], exe)

train_loop()

 

这个是什么问题, 一直卡在这里没有动,请问有没有paddlepaddle的错误集?这样好查一些简单的错误

 

0
收藏
回复
全部评论(2)
时间顺序
l
lucywsq
#2 回复于2019-07

您好,有如下两种途径搜索问题的解决方式:

1.您可以在PaddlePaddle官网的搜索框内输入关键词,链接http://www.paddlepaddle.org.cn/start

2.或到Github的PaddlePaddle repo中检索,若有同类问题可获得答案,链接https://github.com/PaddlePaddle/Paddle

感谢您对PaddlePaddle的支持,如有问题可随时留言~

0
回复
AIStudio810261
#3 回复于2019-07

可否直接公开项目, 我们fork一下看看? 贴到这里, 缩进被破坏了. 

0
回复
在@后输入用户全名并按空格结束,可艾特全站任一用户