# 全新二层神经结构建立，用Python就够了

import numpy as np

def sigmoid(x):

return (1 / (1 + np.exp(-x)))

（第0层，即L=0），输入层神经元数量=3

（第1层，即L=1），隐藏层神经元数量=5

（第2层，即L=2），输出层神经元数量=1

def setParameters(X, Y, hidden_size):

np.random.seed(3)

input_size = X.shape[0] # number of neurons in input layer

output_size = Y.shape[0] # number of neurons in output layer.

W1 = np.random.randn(hidden_size, input_size)*np.sqrt(2/input_size)

b1 = np.zeros((hidden_size, 1))

W2 = np.random.randn(output_size, hidden_size)*np.sqrt(2/hidden_size)

b2 = np.zeros((output_size, 1))

return {'W1': W1, 'W2': W2, 'b1': b1, 'b2': b2}

# Python implementation

np.random.randn(output_size, hidden_size)*np.sqrt(2/hidden_size)

np.random.randn(output_size, hidden_size)*0.01

def forwardPropagation(X, params):

Z1 = np.dot(params['W1'], X)+params['b1']

A1 = np.tanh(Z1)

Z2 = np.dot(params['W2'], A1)+params['b2']

y = sigmoid(Z2)

return y, {'Z1': Z1, 'Z2': Z2, 'A1': A1, 'y': y}

def cost(predict, actual):

m = actual.shape[1]

cost__ = -np.sum(np.multiply(np.log(predict), actual) + np.multiply((1 - actual), np.log(1 - predict)))/m

return np.squeeze(cost__)

def backPropagation(X, Y, params, cache):

m = X.shape[1]

dy = cache['y'] - Y

dW2 = (1 / m) * np.dot(dy, np.transpose(cache['A1']))

db2 = (1 / m) * np.sum(dy, axis=1, keepdims=True)

dZ1 = np.dot(np.transpose(params['W2']), dy) * (1-np.power(cache['A1'], 2))

dW1 = (1 / m) * np.dot(dZ1, np.transpose(X))

db1 = (1 / m) * np.sum(dZ1, axis=1, keepdims=True)

return {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2}

def backPropagation(X, Y, params,cache)中的parama和cache是什么？在前向传播中储存数值，就是为了用于反向传播。Params是参数（权值和偏差）。

def updateParameters(gradients, params, learning_rate = 1.2):

W1 = params['W1'] - learning_rate * gradients['dW1']

b1 = params['b1'] - learning_rate * gradients['db1']

W2 = params['W2'] - learning_rate * gradients['dW2']

b2 = params['b2'] - learning_rate * gradients['db2']

return {'W1': W1, 'W2': W2, 'b1': b1, 'b2': b2}

def fit(X, Y, learning_rate, hidden_size, number_of_iterations = 5000):

params = setParameters(X, Y, hidden_size)

cost_ = []

for j in range(number_of_iterations):

y, cache = forwardPropagation(X, params)

costit = cost(y, Y)

gradients = backPropagation(X, Y, params, cache)

params = updateParameters(gradients, params, learning_rate)

cost_.append(costit)

return params, cost_

Hidden_size指隐藏层中神经元数量。由于在学习开始前设定，它类似于超参数。return params, cost_指找到的最佳参数。cost_为每次迭代预估的成本。

import sklearn.datasets

X, Y = sklearn.datasets.make_moons(n_samples=500, noise=.2)X, Y = X.T, Y.reshape(1, Y.shape[0])

X为输入值，Y为实际输出值。

params, cost_ = fit(X, Y, 0.3, 5, 5000)

import matplotlib.pyplot as plt

plt.plot(cost_)

first_cost = 0.7383781203733911

last_cost = 0.06791109327547613

import numpy as np

def sigmoid(x):

return (1 / (1 + np.exp(-x)))

def setParameters(X, Y, hidden_size):

np.random.seed(3)

input_size = X.shape[0] # number of neurons in input layer

output_size = Y.shape[0] # number of neurons in output layer.

W1 = np.random.randn(hidden_size, input_size)*np.sqrt(2/input_size)

b1 = np.zeros((hidden_size, 1))

W2 = np.random.randn(output_size, hidden_size)*np.sqrt(2/hidden_size)

b2 = np.zeros((output_size, 1))

return {'W1': W1, 'W2': W2, 'b1': b1, 'b2': b2}

def forwardPropagation(X, params):

Z1 = np.dot(params['W1'], X)+params['b1']

A1 = np.tanh(Z1)

Z2 = np.dot(params['W2'], A1)+params['b2']

y = sigmoid(Z2)

return y, {'Z1': Z1, 'Z2': Z2, 'A1': A1, 'y': y}

def cost(predict, actual):

m = actual.shape[1]

cost__ = -np.sum(np.multiply(np.log(predict), actual) + np.multiply((1 - actual), np.log(1 - predict)))/m

return np.squeeze(cost__)

def backPropagation(X, Y, params, cache):

m = X.shape[1]

dy = cache['y'] - Y

dW2 = (1 / m) * np.dot(dy, np.transpose(cache['A1']))

db2 = (1 / m) * np.sum(dy, axis=1, keepdims=True)

dZ1 = np.dot(np.transpose(params['W2']), dy) * (1-np.power(cache['A1'], 2))

dW1 = (1 / m) * np.dot(dZ1, np.transpose(X))

db1 = (1 / m) * np.sum(dZ1, axis=1, keepdims=True)

return {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2}

def updateParameters(gradients, params, learning_rate = 1.2):

W1 = params['W1'] - learning_rate * gradients['dW1']

b1 = params['b1'] - learning_rate * gradients['db1']

W2 = params['W2'] - learning_rate * gradients['dW2']

b2 = params['b2'] - learning_rate * gradients['db2']

return {'W1': W1, 'W2': W2, 'b1': b1, 'b2': b2}

def fit(X, Y, learning_rate, hidden_size, number_of_iterations = 5000):

params = setParameters(X, Y, hidden_size)

cost_ = []

for j in range(number_of_iterations):

y, cache = forwardPropagation(X, params)

costit = cost(y, Y)

gradients = backPropagation(X, Y, params, cache)

params = updateParameters(gradients, params, learning_rate)

cost_.append(costit)

return params, cost_

# Testing the code

import sklearn.datasets

X, Y = sklearn.datasets.make_moons(n_samples=500, noise=.2)

X, Y = X.T, Y.reshape(1, Y.shape[0])

params, cost_ = fit(X, Y, 0.3, 5, 5000)

import matplotlib.pyplot as plt

plt.plot(cost_)

89 篇文章19 人订阅

0 条评论

## 相关文章

### 机器视觉与Tesseract介绍

1.可以通过 apt-get 安装:命令：sudo apt-get install tesseract-ocr

9540

13720

### mac 安装TortoiseHg

Clone TortoiseHg repository using Mercurial:

13820

### 离线安装mongodb

sudo mv mongodb-linux-x86_64-ubuntu1604-3.4.0 /usr/local/mongodb

20020

10020

### 通过nginx+uwsgi部署django应用在ubuntu

1.安装python3-pip -------------sudo apt-get install python3-pip 2.安装虚拟环境和虚拟环境管理包...

10610

### Docker 生产SSH服务的镜像

7.当前这个容器 root 用户目录下建立.ssh目录，复制需要的公钥到 authorized_keys 文件

14510

### Mac中搭建Kubernetes

Kubernetes是Google和RadHat公司共同主导的开源容器编排项目，功能非常强大，也非常的火热和流行，但同时里面也有很多的概念和名词需要我们去学习和...

28320

10310

22630