(1)初始化: \eta , \alpha ,pass=0, max, me; (2)随机地在[-0.3,0.3]范围内给全部权值和神经的阈值w_{ij}^{(n)} 赋初始值. (3)判段第2层至第M层各个神经元输入端连接权值是否满足:是转(4),否则减少知道满足; (4)P = 0,E_{n},pass+1=pass
X矩阵为:X=[[1,1,0,0,0,0,0],[0,1,1,0,1,1,1],[0,0,1,1,1,1,1],[1,0,1,1,0,1,0],[1,0,0,1,1,1,1],[1,1,0,1,1,1,1],[0,0,1,1,1,0,0],[1,1,1,1,1,1,1],[1,0,1,1,1,1,1]] Y标签特征为Y=[1,0,1,0,1,0,1,0,1]
# -*- coding:utf-8 -*-
# /usr/bin/python
import numpy as np
import math
class BP():
def __init__(self,hidden_n,output_n,learningrate,epoch):
'''BP参数'''
self.hidden_n = hidden_n
self.output_n = output_n
self.hideWeight = None
self.outputWeight = None
self.learningrate = learningrate
self.inputN = None
self.hideOutput = None
self.output = None
self.loss= None
self.epoch = epoch
self.limitloss = 0.01
def initWeight(self,n, m,fill=0.0):
'''初始化权值'''
mat = []
for i in range(m):
mat.append([fill] * n)
mat = np.array(mat)
mat = mat.transpose()
return mat
def sigmoid(self,x):
'''sigmoid激活函数'''
return 1.0 / (1.0 + np.exp(-x))
def linear(self,x):
'''线性作用函数'''
return x
def sigmoidDerivative(self,x):
'''衍生sigmoid'''
return x-x**2
def initBp(self,inputN):
'''初始化BP'''
self.inputN=inputN+1
self.hideOutput = self.hidden_n+1
#init weight
self.hideWeight = self.initWeight(self.inputN,self.hidden_n+1)
self.outputWeight = self.initWeight(self.hidden_n+1,self.output_n)
def forwardPropagation(self,X):
'''前向传播'''
self.hideOutput = self.sigmoid(np.dot(X,self.hideWeight))
# self.hideOutput = np.c_[self.hideOutput, np.ones(self.hideOutput.shape[0])]# 增加一列 为了bias
self.output = self.sigmoid(np.dot(self.hideOutput, self.outputWeight))
def lossFun(self,Y):
'''损失函数'''
self.loss = 0.5*np.sum((Y - self.output) * (Y - self.output))
return self.loss
def backPropagation(self,X,Y):
self.outputWeight = self.outputWeight.transpose()
outputWeightbiassum= 0
for i in range(self.output_n):
outputWeightbias = self.learningrate * (self.output[i] - Y[i]) * self.sigmoidDerivative(self.output[i]) * self.hideOutput
self.outputWeight[i,:] += outputWeightbias
outputWeightbiassum -= outputWeightbias
self.outputWeight = self.outputWeight.transpose()
self.hideWeight = self.hideWeight.transpose()
for i in range(self.hidden_n+1):
hideWeightbias = self.learningrate*outputWeightbias[i]*self.sigmoidDerivative(self.hideOutput[i])*X
self.hideWeight[i, :] -= hideWeightbias
self.hideWeight = self.hideWeight.transpose()
def train(self,X,Y):
'''训练'''
inputN = X.shape[1]
samplesN= X.shape[0]
X = np.c_[X,np.ones(samplesN)] # 增加一列 为了bias
self.initBp(inputN)
for i in range(self.epoch):
for one in range(samplesN):
x,y = X[one,:],Y[one,:]
self.forwardPropagation(x)
loss = self.lossFun(y)
if loss<= self.limitloss:
break
else:
self.backPropagation(x,y)
i +=1
def predict(self,X):
'''预测'''
samplesN = X.shape[0]
X = np.c_[X, np.ones(samplesN)] # 增加一列 为了bias
for one in range(samplesN):
x= X[one, :]
self.forwardPropagation(x)
print(self.output)
X=[[1,1,0,0,0,0,0],[0,1,1,0,1,1,1],[0,0,1,1,1,1,1],[1,0,1,1,0,1,0],[1,0,0,1,1,1,1],[1,1,0,1,1,1,1],[0,0,1,1,1,0,0],[1,1,1,1,1,1,1],[1,0,1,1,1,1,1]]
Y=[[1],[0],[1],[0],[1],[0],[1],[0],[1]]
xtest = [[1,1,0,0,0,0,0],[0,1,1,0,1,1,1]]
print(X,"\n",Y)
XTrain = np.array(X)
YTrain = np.array(Y)
xtest = np.array(xtest)
print(XTrain.shape[1])
print(XTrain)
hidden_n,output_n,learningrate,epoch = 3,1,0.5,1000
newbp = BP(hidden_n,output_n,learningrate,epoch)
newbp.train(XTrain,YTrain)
newbp.predict(xtest)