作业2-1
收藏
挑战题:
import matplotlib.pyplot as plt import numpy as np import json def load_data(): # 从文件导入数据 datafile = './work/housing.data' data = np.fromfile(datafile, sep=' ') feature_names = [ 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV' ] feature_num = len(feature_names) # 将原始数据进行Reshape,变成[N, 14]这样的形状 data = data.reshape([data.shape[0] // feature_num, feature_num]) # 将原数据集拆分成训练集和测试集 # 这里使用80%的数据做训练,20%的数据做测试 # 测试集和训练集必须是没有交集的 ratio = 0.8 offset = int(data.shape[0] * ratio) training_data = data[:offset] # 计算train数据集的最大值,最小值,平均值 maximums, minimums, avgs = training_data.max(axis=0), training_data.min(axis=0), training_data.sum(axis=0) / training_data.shape[0] # 对数据进行归一化处理 for i in range(feature_num): #print(maximums[i], minimums[i], avgs[i]) data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i]) # 训练集和测试集的划分比例 training_data = data[:offset] test_data = data[offset:] return training_data, test_data # 获取数据 training_data, test_data = load_data() x = training_data[:, :-1] y = training_data[:, -1:] class Network_two_layer(object): def __init__(self, num_of_weights): np.random.seed(0) self.w0 = np.random.randn(num_of_weights, 13) self.b0 = np.zeros((1,13)) self.w1 = np.random.randn(13, 1) self.b1 = 0. def forward(self, x): x = np.dot(x, self.w0) + self.b0 z = np.dot(x, self.w1) + self.b1 self.x1 = x return z def loss(self, z, y): error = z - y num_samples = error.shape[0] cost = error * error cost = np.sum(cost) / num_samples return cost def gradient(self, x, y): z = self.forward(x) N = x.shape[0] gradient_x1 = 1. / N * np.dot((z-y),self.w1.T) self.gradient_w1 = 1. / N * np.dot(self.x1.T,z-y) self.gradient_b1 = 1. / N * np.sum(z-y) self.gradient_w0 = np.dot(x.T,gradient_x1) self.gradient_b0 = np.sum(gradient_x1,axis=0) self.gradient_b0 = self.gradient_b0[np.newaxis,:] def update(self, eta = 0.01): self.w1 = self.w1 - eta * self.gradient_w1 self.b1 = self.b1 - eta * self.gradient_b1 self.w0 = self.w0 - eta * self.gradient_w0 self.b0 = self.b0 - eta * self.gradient_b0 def train(self, training_data, num_epoches, batch_size=10, eta=0.01): n = len(training_data) losses = [] for epoch_id in range(num_epoches): # 在每轮迭代开始之前,将训练数据的顺序随机的打乱, # 然后再按每次取batch_size条数据的方式取出 np.random.shuffle(training_data) # 将训练数据进行拆分,每个mini_batch包含batch_size条的数据 mini_batches = [training_data[k:k+batch_size] for k in range(0, n, batch_size)] for iter_id, mini_batch in enumerate(mini_batches): #print(self.w.shape) #print(self.b) x = mini_batch[:, :-1] y = mini_batch[:, -1:] a = self.forward(x) loss = self.loss(a, y) self.gradient(x, y) self.update(eta) losses.append(loss) print('Epoch {:3d} / iter {:3d}, loss = {:.4f}'. format(epoch_id, iter_id, loss)) return losses # 获取数据 train_data, test_data = load_data() # 创建网络 net = Network_two_layer(13) # 启动训练 losses = net.train(train_data, num_epoches=50, batch_size=100, eta=0.1) # 画出损失函数的变化趋势 plot_x = np.arange(len(losses)) plot_y = np.array(losses) plt.plot(plot_x, plot_y) plt.show()
1
收藏
请登录后评论
666