代码如下:
datasets_prefix = '/root/paddlejob/workspace/train_data/datasets/'
train_datasets = '通过路径拷贝获取真实数据集文件路径 '
output_dir = "/root/paddlejob/workspace/output"
import paddle,os,random,cv2,zipfile
import paddle.fluid as fluid
from paddle.fluid.dygraph import to_variable
from paddle.fluid.dygraph import Pool2D
from paddle.fluid.dygraph import Conv2D
import numpy as np
import matplotlib
batch_size = 32
nums_epoch = 1
zip_src = '/root/paddlejob/workspace/train_data/datasets/data77571/train_and_label.zip'
zip_src1 = '/root/paddlejob/workspace/train_data/datasets/data77571/img_test.zip'
dst_dir = './'
dst_dir1 = './'
class Transform(object):
def __init__(self,size=256):
self.size=size
def __call__(self,input,label):
#input = cv2.resize(input,(self.size,self.size),interpolation=cv2.INTER_LINEAR)
#label = cv2.resize(label,(self.size,self.size),interpolation=cv2.INTER_LINEAR)
return input,label
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Basic_model(fluid.dygraph.Layer):
def __init__(self,num_classes=4):
super(Basic_model,self).__init__()
#self.pool = Pool2D(pool_size=2,pool_stride=2)
self.conv = Conv2D(num_channels=3,num_filters=num_classes,filter_size=1)
def forward(self,inputs,label=None):
x = inputs
#x = self.pool(inputs)
#x = fluid.layers.interpolate(x,out_shape=inputs.shape[2::])
x = self.conv(x)
# if label is not None:
# acc = fluid.layers.accuracy(input=x, label=label)
# return x, acc
# else:
# return x
return x
class Basic_dataloader(object):
def __init__(self,datas,transform=None,shuffle=True):
self.datas = datas
self.transform = transform
self.shuffle = shuffle
if shuffle == True:
random.shuffle(datas)
def preprocess(self,data,label):
h,w,c=data.shape
h_gt,w_gt=label.shape
if self.transform:
data,label = self.transform(data,label)
label = label[:,:,np.newaxis]
return data,label
def __len__(self):
return len(self.datas)
def __call__(self):
for data_path,label_path in self.datas:
data = cv2.imread(data_path,cv2.IMREAD_COLOR)
data = cv2.cvtColor(data,cv2.COLOR_BGR2RGB).astype('float32')
label = cv2.imread(label_path,cv2.IMREAD_GRAYSCALE).astype('int64')
#print(label)
data,label = self.preprocess(data,label)
yield data,label
def unzip_file(zip_src, dst_dir):
r = zipfile.is_zipfile(zip_src)
if r:
fz = zipfile.ZipFile(zip_src, 'r')
for file in fz.namelist():
fz.extract(file, dst_dir)
print('success')
else:
print('This is not a zip file !!!')
def Basic_SegLoss(preds, labels, ignore_index=255):
n, c, h, w = preds.shape
#preds = fluid.layers.transpose(preds, [0, 2, 3, 1])
eps = np.array([10e-8],dtype = 'float32')
mask = labels!=ignore_index
mask = fluid.layers.cast(mask, 'float32')
loss = fluid.layers.softmax_with_cross_entropy(preds, labels,ignore_index=255)
loss = loss * mask
avg_loss = fluid.layers.mean(loss) / (fluid.layers.mean(mask)+to_variable(eps))
print('l', end='')
return avg_loss
def train(dataloader,model,epoch,total_batch):
model.train()
train_loss_meter = AverageMeter()
criterion = Basic_SegLoss
print("train start!")
for image,label in dataloader():
image = fluid.layers.transpose(image,(0,3,1,2))
pred = model(image)
pred = fluid.layers.transpose(pred,(0,2,3,1))
loss = criterion(pred,label)
optimizer = fluid.optimizer.Adam(learning_rate=0.01,parameter_list=model.parameters())
loss.backward()
optimizer.minimize(loss)
model.clear_gradients()
n = image.shape[0]
train_loss_meter.update(loss.numpy()[0],n)
return train_loss_meter.avg
def main():
global batch_size
print('cnm0')
os.system("mkdir img_train")
os.system("mkdir lab_train")
os.system("mkdir img_testA")
#os.system("python unzip.py")
unzip_file(zip_src,dst_dir)
unzip_file(zip_src1,dst_dir1)
print('cnm1')
datas = []
datatest = []
image_base = 'img_train' # 训练集原图路径
annos_base = 'lab_train' # 训练集标签路径
test_base = 'img_testA'
print('cnm2')
# 读取原图文件名
ids_ = [v.split('.')[0] for v in os.listdir(image_base)]
ids_0 = [v.split('.')[0] for v in os.listdir(test_base)]
print('cnm3')
# 将训练集的图像集和标签路径写入datas中
for id_ in ids_:
img_pt0 = os.path.join(image_base, '{}.jpg'.format(id_))
img_pt1 = os.path.join(annos_base, '{}.png'.format(id_))
datas.append((img_pt0.replace('/home/aistudio', ''), img_pt1.replace('/home/aistudio', '')))
for id in ids_0:
img_pt2 = os.path.join(test_base, '{}.jpg'.format(id))
img_pt3 = os.path.join(test_base, '{}.png'.format(id_))
datatest.append((img_pt2.replace('/home/aistudio', ''),datas[0][1]))
print('cnm4')
# 打印datas的长度和具体存储例子
print('total:', len(datas))
print(datas[0][0])
print(datas[0][1])
print(datas[10][:])
print('test total:',len(datatest))
place = paddle.fluid.CUDAPlace(0)
transform = Transform(256)
with fluid.dygraph.guard(place):
basic_dataloader = Basic_dataloader(datas,transform = transform)
dataloader = fluid.io.DataLoader.from_generator(capacity=2, return_list=True)
dataloader.set_sample_generator(basic_dataloader,batch_size=batch_size,places=place)
total_batch = int(len(basic_dataloader)) / batch_size
model = Basic_model()
for epoch in range(1,nums_epoch+1):
print("epoch:",epoch)
train_loss = train(dataloader,model,epoch,total_batch)
print("train_loss:",train_loss)
batch_size = 1
test_basic_dataloader = Basic_dataloader(datatest,transform = transform)
test_dataloader = fluid.io.DataLoader.from_generator(capacity=2, return_list=True)
test_dataloader.set_sample_generator(test_basic_dataloader,batch_size=batch_size,places=place)
# 加载保存的模型
model.eval()
acc_set = []
avg_loss_set = []
print('eval start!')
for data,label in test_dataloader():
img = to_variable(data)
#label = to_variable(label)
img = fluid.layers.transpose(img,(0,3,1,2))
prediction = model(img)
prediction = fluid.layers.transpose(prediction,(0,2,3,1))
rgbPic = [256,256]
print(prediction.shape)
prediction = np.squeeze(prediction,0)
prediction = fluid.layers.softmax(prediction)
for idx in range(prediction.shape[0]):
for idy in range(prediction.shape[1]):
rgbPic[idx][idy] = 255
for i in range(prediction,shape[2]):
if prediction[idx,idy,i]>0.5:
rgbPic[idx][idy] = i
print(rgbPic)
# loss = fluid.layers.cross_entropy(input=prediction, label=label)
# avg_loss = fluid.layers.mean(loss)
# acc_set.append(float(acc.numpy()))
# avg_loss_set.append(float(avg_loss.numpy()))
# acc_val_mean = np.array(acc_set).mean()
# avg_loss_val_mean = np.array(avg_loss_set).mean()
# print("Eval avg_loss is: {}, acc is: {}".format(avg_loss_val_mean, acc_val_mean))
if __name__ == "__main__":
main()
运行10分钟后的日志
只有我遇到过这种情况吗,有的时候这样,有的时候正常,今天又跑了半个多小时,没反应,既不运行也不报错
我遇到过,好像除了重启没有太好的办法……
谢谢坑神