首页 PaddleCV 帖子详情
眼疾识别测试AttributeError: 'numpy.ndarray' object has no attribute
收藏
快速回复
PaddleCV 问答图像分类 1835 2
眼疾识别测试AttributeError: 'numpy.ndarray' object has no attribute
收藏
快速回复
PaddleCV 问答图像分类 1835 2

以下是眼疾识别的代码:

from PIL import Image
import cv2
import os
import random
import paddle
import numpy as np
from matplotlib import pyplot as plt
from paddle.nn import Conv2D, MaxPool2D, Linear, Dropout
import paddle.nn.functional as F

DATADIR = 'F:\pythonProject2\yanji\PALM-Training400\PALM-Training400'
# 文件名以N开头的是正常眼底图片,以P开头的是病变眼底图片
file1 = 'N0012.jpg'
file2 = 'P0095.jpg'

# 读取图片
img1 = Image.open(os.path.join(DATADIR, file1))
img1 = np.array(img1)
img2 = Image.open(os.path.join(DATADIR, file2))
img2 = np.array(img2)


# 对读入的图像数据进行预处理
def transform_img(img):
    # 将图片尺寸缩放道 224x224
    img = cv2.resize(img, (224, 224))
    # 读入的图像数据格式是[H, W, C]
    # 使用转置操作将其变成[C, H, W]
    img = np.transpose(img, (2, 0, 1))
    img = img.astype('float32')
    # 将数据范围调整到[-1.0, 1.0]之间
    img = img / 255.
    img = img * 2.0 - 1.0
    return img


# 定义训练集数据读取器
def data_loader(datadir, batch_size=10, mode='train'):
    # 将datadir目录下的文件列出来,每条文件都要读入
    filenames = os.listdir(datadir)

    def reader():
        if mode == 'train':
            # 训练时随机打乱数据顺序
            random.shuffle(filenames)
        batch_imgs = []
        batch_labels = []
        for name in filenames:
            filepath = os.path.join(datadir, name)
            img = cv2.imread(filepath)
            img = transform_img(img)
            if name[0] == 'H' or name[0] == 'N':
                # H开头的文件名表示高度近似,N开头的文件名表示正常视力
                # 高度近视和正常视力的样本,都不是病理性的,属于负样本,标签为0
                label = 0
            elif name[0] == 'P':
                # P开头的是病理性近视,属于正样本,标签为1
                label = 1
            else:
                raise ('Not excepted file name')
            # 每读取一个样本的数据,就将其放入数据列表中
            batch_imgs.append(img)
            batch_labels.append(label)
            if len(batch_imgs) == batch_size:
                # 当数据列表的长度等于batch_size的时候,
                # 把这些数据当作一个mini-batch,并作为数据生成器的一个输出
                imgs_array = np.array(batch_imgs).astype('float32')
                labels_array = np.array(batch_labels).astype('float32').reshape(-1, 1)
                yield imgs_array, labels_array
                batch_imgs = []
                batch_labels = []

        if len(batch_imgs) > 0:
            # 剩余样本数目不足一个batch_size的数据,一起打包成一个mini-batch
            imgs_array = np.array(batch_imgs).astype('float32')
            labels_array = np.array(batch_labels).astype('float32').reshape(-1, 1)
            yield imgs_array, labels_array

    return reader


# 定义验证集数据读取器
def valid_data_loader(datadir, csvfile, batch_size=10, mode='valid'):
    # 训练集读取时通过文件名来确定样本标签,验证集则通过csvfile来读取每个图片对应的标签
    # 请查看解压后的验证集标签数据,观察csvfile文件里面所包含的内容
    # csvfile文件所包含的内容格式如下,每一行代表一个样本,
    # 其中第一列是图片id,第二列是文件名,第三列是图片标签,
    # 第四列和第五列是Fovea的坐标,与分类任务无关
    # ID,imgName,Label,Fovea_X,Fovea_Y
    # 1,V0001.jpg,0,1157.74,1019.87
    # 2,V0002.jpg,1,1285.82,1080.47
    # 打开包含验证集标签的csvfile,并读入其中的内容
    filelists = open(csvfile).readlines()

    # valid_loader = valid_data_loader(DATADIR2, CSVFILE)

    def reader():
        batch_imgs = []
        batch_labels = []
        for line in filelists[1:]:
            line = line.strip().split(',')
            name = line[1]
            label = int(float(line[2]))
            # 根据图片文件名加载图片,并对图像数据作预处理
            filepath = os.path.join(datadir, name)
            img = cv2.imread(filepath)
            img = transform_img(img)
            # 每读取一个样本的数据,就将其放入数据列表中
            batch_imgs.append(img)
            batch_labels.append(label)
            if len(batch_imgs) == batch_size:
                # 当数据列表的长度等于batch_size的时候,
                # 把这些数据当作一个mini-batch,并作为数据生成器的一个输出
                imgs_array = np.array(batch_imgs).astype('float32')
                labels_array = np.array(batch_labels).astype('float32').reshape(-1, 1)
                yield imgs_array, labels_array
                batch_imgs = []
                batch_labels = []

        if len(batch_imgs) > 0:
            # 剩余样本数目不足一个batch_size的数据,一起打包成一个mini-batch
            imgs_array = np.array(batch_imgs).astype('float32')
            labels_array = np.array(batch_labels).astype('float32').reshape(-1, 1)
            yield imgs_array, labels_array

    return reader


train_loader = data_loader(DATADIR,
                           batch_size=10, mode='train')
data_reader = train_loader()
data = next(data_reader)
# print(data[0].shape, data[1].shape)

eval_loader = data_loader(DATADIR,
                          batch_size=10, mode='eval')
data_reader = eval_loader()
data = next(data_reader)
# print(data[0].shape, data[1].shape)


DATADIR2 = 'F:\pythonProject2\yanji\PALM-Validation400'
CSVFILE = 'F:\pythonProject2\yanji\PALM-Validation-GT\labels.csv'
# 设置迭代轮数
EPOCH_NUM = 5


# 定义训练过程
def train_pm(model, optimizer):
    # 开启0号GPU训练
    # use_gpu = True
    # paddle.set_device('gpu:0') if use_gpu else paddle.set_device('cpu')

    print('start training ... ')
    model.train()
    # 定义数据读取器,训练数据读取器和验证数据读取器
    train_loader = data_loader(DATADIR, batch_size=10, mode='train')
    valid_loader = valid_data_loader(DATADIR2, CSVFILE)
    for epoch in range(EPOCH_NUM):
        for batch_id, data in enumerate(train_loader()):
            x_data, y_data = data
            img = paddle.to_tensor(x_data)
            label = paddle.to_tensor(y_data)
            # 运行模型前向计算,得到预测值
            logits = model(img)
            loss = F.binary_cross_entropy_with_logits(logits, label)
            avg_loss = paddle.mean(loss)

            if batch_id % 20 == 0:
                print("epoch: {}, batch_id: {}, loss is: {:.4f}".format(epoch, batch_id, float(avg_loss.numpy())))
            # 反向传播,更新权重,清除梯度
            avg_loss.backward()
            optimizer.step()
            optimizer.clear_grad()

        model.eval()
        accuracies = []
        losses = []
        for batch_id, data in enumerate(valid_loader()):
            x_data, y_data = data
            img = paddle.to_tensor(x_data)
            label = paddle.to_tensor(y_data)
            # 运行模型前向计算,得到预测值
            logits = model(img)
            # 二分类,sigmoid计算后的结果以0.5为阈值分两个类别
            # 计算sigmoid后的预测概率,进行loss计算
            pred = F.sigmoid(logits)
            loss = F.binary_cross_entropy_with_logits(logits, label)
            # 计算预测概率小于0.5的类别
            pred2 = pred * (-1.0) + 1.0
            # 得到两个类别的预测概率,并沿第一个维度级联
            pred = paddle.concat([pred2, pred], axis=1)
            acc = paddle.metric.accuracy(pred, paddle.cast(label, dtype='int64'))

            accuracies.append(acc.numpy())
            losses.append(loss.numpy())
        print("[validation] accuracy/loss: {:.4f}/{:.4f}".format(np.mean(accuracies), np.mean(losses)))
        model.train()

        paddle.save(model.state_dict(), 'palm.pdparams')
        paddle.save(optimizer.state_dict(), 'palm.pdopt')


def evaluation(model, params_file_path):
    # 开启0号GPU预估
    # use_gpu = True
    # paddle.set_device('gpu:0') if use_gpu else paddle.set_device('cpu')

    print('start evaluation .......')

    # 加载模型参数
    model_state_dict = paddle.load(params_file_path)
    model.load_dict(model_state_dict)

    model.eval()
    eval_loader = data_loader(DATADIR,
                              batch_size=10, mode='eval')

    acc_set = []
    avg_loss_set = []
    for batch_id, data in enumerate(eval_loader()):
        x_data, y_data = data
        img = paddle.to_tensor(x_data)
        label = paddle.to_tensor(y_data)
        y_data = y_data.astype(np.int64)
        label_64 = paddle.to_tensor(y_data)
        # 计算预测和精度
        prediction, acc = model(img, label_64)
        # 计算损失函数值
        loss = F.binary_cross_entropy_with_logits(prediction, label)
        avg_loss = paddle.mean(loss)
        acc_set.append(float(acc.numpy()))
        avg_loss_set.append(float(avg_loss.numpy()))
    # 求平均精度
    acc_val_mean = np.array(acc_set).mean()
    avg_loss_val_mean = np.array(avg_loss_set).mean()

    print('loss={:.4f}, acc={:.4f}'.format(avg_loss_val_mean, acc_val_mean))


# 定义 LeNet 网络结构
class LeNet(paddle.nn.Layer):
    def __init__(self, num_classes=1):
        super(LeNet, self).__init__()

        # 创建卷积和池化层块,每个卷积层使用Sigmoid激活函数,后面跟着一个2x2的池化
        self.conv1 = Conv2D(in_channels=3, out_channels=6, kernel_size=5)
        self.max_pool1 = MaxPool2D(kernel_size=2, stride=2)
        self.conv2 = Conv2D(in_channels=6, out_channels=16, kernel_size=5)
        self.max_pool2 = MaxPool2D(kernel_size=2, stride=2)
        # 创建第3个卷积层
        self.conv3 = Conv2D(in_channels=16, out_channels=120, kernel_size=4)
        # 创建全连接层,第一个全连接层的输出神经元个数为64
        self.fc1 = Linear(in_features=300000, out_features=64)
        # 第二个全连接层输出神经元个数为分类标签的类别数
        self.fc2 = Linear(in_features=64, out_features=num_classes)

    # 网络的前向计算过程
    def forward(self, x, label=None):
        x = self.conv1(x)
        x = F.sigmoid(x)
        x = self.max_pool1(x)
        x = self.conv2(x)
        x = F.sigmoid(x)
        x = self.max_pool2(x)
        x = self.conv3(x)
        x = F.sigmoid(x)
        x = paddle.reshape(x, [x.shape[0], -1])
        x = self.fc1(x)
        x = F.sigmoid(x)
        x = self.fc2(x)
        if label is not None:
            acc = paddle.metric.accuracy(input=x, label=label)
            return x, acc
        else:
            return x


# 创建模型
# # 启动训练过程
# opt = paddle.optimizer.Momentum(learning_rate=0.001, momentum=0.9, parameters=model.parameters())
# train_pm(model, optimizer=opt)
# evaluation(model, params_file_path="palm.pdparams")

def load_image(img_path):
    # 从img_path中读取图像,并转为灰度图
    im = Image.open(img_path).convert('L')
    # im = im.resize((28, 28), Image.ANTIALIAS)
    im = cv2.resize(im, (224, 224))
    im = np.array(im).reshape(1, -1).astype(np.float32)
    # 图像归一化,保持和数据集的数据范围一致
    im = im / 127.5 - 1
    return im


下面是模型测试代码:

model = LeNet(num_classes=1)
# model_state_dict = paddle.load('palm.pdparams')
img_path = 'F:\pythonProject2\yanji\PALM-Validation400\V0001.jpg'
# model.set_state_dict(model_state_dict)
model.eval()
im = cv2.imread(img_path)
# tensor_img = transform_img(im)
tensor_img = load_image(im)
tensor_img = np.expand_dims(im)
result = model(paddle.to_tensor(im))
lab = F.sigmoid(result)
if lab.numpy()<0.5:
    lab = 0
else:
    lab = 1
print(lab)
print("本次预测的结果是", lab)

运行结果报错:

Traceback (most recent call last):
File "F:\python3.9\lib\site-packages\PIL\Image.py", line 2916, in open
fp.seek(0)
AttributeError: 'numpy.ndarray' object has no attribute 'seek'

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "F:\pythonProject2\main.py", line 304, in
tensor_img = load_image(im)
File "F:\pythonProject2\main.py", line 287, in load_image
im = Image.open(img_path).convert('L')
File "F:\python3.9\lib\site-packages\PIL\Image.py", line 2918, in open
fp = io.BytesIO(fp.read())
AttributeError: 'numpy.ndarray' object has no attribute 'read'

 

各位大佬们,这是什么原因,该如何解决啊

0
收藏
回复
全部评论(2)
时间顺序
d
dragon_learner
#2 回复于2021-09

提示显示是PIL库内部Image.py处理不错,可能是版本不兼容的问题; 可以试下用python3其他版本,或者更新下Image库

0
回复
李长安
#3 回复于2021-12

是否已解决?

0
回复
需求/bug反馈?一键提issue告诉我们
发现bug?如果您知道修复办法,欢迎提pr直接参与建设飞桨~
在@后输入用户全名并按空格结束,可艾特全站任一用户