赞
踩
1.用sigmoid(x)函数激活:
其导数f'(x)=f(x)(1-f(x))
- import pandas as pd
- import numpy as np
- import matplotlib.pyplot as plt
-
- def sigmoid(x):
- return 1/(1+np.exp(-x))
-
- def BP(data_tr, data_te, maxiter=1000):
- data_tr, data_te = np.array(data_tr), np.array(data_te)
-
- net_in = np.array([0.0, 0, -1])
- out_in = np.array([0.0, 0, 0, 0, -1]) # 输出层的输入,即隐层的输出
-
- w_mid = np.random.rand(3, 4) # 隐层神经元的权值&阈值
- w_out = np.random.rand(5) # 输出层神经元的权值&阈值
-
- delta_w_out = np.zeros([5]) # 输出层权值&阈值的修正量
- delta_w_mid = np.zeros([3,4]) # 中间层权值&阈值的修正量
-
- yita = 1.75 # η: 学习速率
- Err = np.zeros([maxiter]) # 记录总体样本每迭代一次的错误率
-
- # 1.样本总体训练的次数
- for it in range(maxiter):
-
- # 衡量每一个样本的误差
- err = np.zeros([len(data_tr)])
-
- # 2.训练集训练一遍
- for j in range(len(data_tr)):
- net_in[:2] = data_tr[j, :2] # 存储当前对象前两个属性值
- real = data_tr[j, 2]
-
- # 3.当前对象进行训练
- for i in range(4):
- out_in[i] = sigmoid(sum(net_in*w_mid[:, i])) # 计算输出层输入
- res = sigmoid(sum(out_in * w_out)) # 获得训练结果
-
- err[j] = abs(real - res)
-
- # --先调节输出层的权值与阈值
- delta_w_out = yita*res*(1-res)*(real-res)*out_in # 权值调整
- delta_w_out[4] = -yita*res*(1-res)*(real-res) # 阈值调整
- w_out = w_out + delta_w_out
-
- # --隐层权值和阈值的调节
- for i in range(4):
- # 权值调整
- delta_w_mid[:, i] = yita * out_in[i] * (1 - out_in[i]) * w_out[i] * res * (1 - res) * (real - res) * net_in
- # 阈值调整
- delta_w_mid[2, i] = -yita * out_in[i] * (1 - out_in[i]) * w_out[i] * res * (1 - res) * (real - res)
- w_mid = w_mid + delta_w_mid
- Err[it] = err.mean()
- plt.plot(Err)
- plt.show()
-
- # 存储预测误差
- err_te = np.zeros([ len(data_te) ])
-
- # 预测样本len(data_te)个
- for j in range( len(data_te) ):
- net_in[:2] = data_te[j, :2] # 存储数据
- real = data_te[j, 2] # 真实结果
-
- # net_in和w_mid的相

Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。