Python机器学习的练习四：多元逻辑回归

```import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline

data
{'X': array([[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]]),
'__globals__': [],
'__header__':'MATLAB 5.0 MAT-file, Platform: GLNXA64, Created on: Sun Oct 16 13:09:09 2011',
'__version__':'1.0',
'y': array([[10],
[10],
[10],
...,
[9],
[9],
[9]], dtype=uint8)}```

```data['X'].shape, data['y'].shape

> ((5000L,400L), (5000L,1L))```

```def sigmoid(z):
return 1 / (1 + np.exp(-z))

def cost(theta, X, y, learningRate):
theta= np.matrix(theta)
X= np.matrix(X)
y= np.matrix(y)
first= np.multiply(-y, np.log(sigmoid(X* theta.T)))
second= np.multiply((1 - y), np.log(1 - sigmoid(X* theta.T)))
reg= (learningRate/ 2 * len(X))* np.sum(np.power(theta[:,1:theta.shape[1]],2))
return np.sum(first- second)/ (len(X))+ reg```

```def gradient_with_loop(theta, X, y, learningRate):
theta= np.matrix(theta)
X= np.matrix(X)
y= np.matrix(y)

parameters= int(theta.ravel().shape[1])

error= sigmoid(X* theta.T)- y

for iin range(parameters):
term= np.multiply(error, X[:,i])

if (i== 0):
else:
grad[i]= (np.sum(term)/ len(X))+ ((learningRate/ len(X))* theta[:,i])

```def gradient(theta, X, y, learningRate):
theta= np.matrix(theta)
X= np.matrix(X)
y= np.matrix(y)

parameters= int(theta.ravel().shape[1])
error= sigmoid(X* theta.T)- y

grad= ((X.T* error)/ len(X)).T+ ((learningRate/ len(X))* theta)

# intercept gradient is not regularized

```from scipy.optimizeimport minimize

def one_vs_all(X, y, num_labels, learning_rate):
rows= X.shape[0]
params= X.shape[1]

# k X (n + 1) array for the parameters of each of the k classifiers
all_theta= np.zeros((num_labels, params+ 1))

# insert a column of ones at the beginning for the intercept term
X= np.insert(X,0, values=np.ones(rows), axis=1)

# labels are 1-indexed instead of 0-indexed
for iin range(1, num_labels+ 1):
theta= np.zeros(params+ 1)
y_i= np.array([1 if label== ielse 0 for labelin y])
y_i= np.reshape(y_i, (rows,1))

# minimize the objective function
fmin= minimize(fun=cost, x0=theta, args=(X, y_i, learning_rate), method='TNC', jac=gradient)
all_theta[i-1,:]= fmin.x

return all_theta```

```rows= data['X'].shape[0]
params= data['X'].shape[1]

all_theta= np.zeros((10, params+ 1))

X= np.insert(data['X'],0, values=np.ones(rows), axis=1)

theta= np.zeros(params+ 1)

y_0= np.array([1 if label== 0 else 0 for labelin data['y']])
y_0= np.reshape(y_0, (rows,1))

X.shape, y_0.shape, theta.shape, all_theta.shape

> ((5000L,401L), (5000L,1L), (401L,), (10L,401L))```

```np.unique(data['y'])

> array([1, 2, 3, 4, 5, 6, 7, 8, 9,10], dtype=uint8)```

```all_theta= one_vs_all(data['X'], data['y'],10,1)
all_theta
array([[-5.79312170e+00,  0.00000000e+00,  0.00000000e+00, ...,
1.22140973e-02,  2.88611969e-07,  0.00000000e+00],
[-4.91685285e+00,  0.00000000e+00,  0.00000000e+00, ...,
2.40449128e-01, -1.08488270e-02,  0.00000000e+00],
[-8.56840371e+00,  0.00000000e+00,  0.00000000e+00, ...,
-2.59241796e-04, -1.12756844e-06,  0.00000000e+00],
...,
[-1.32641613e+01,  0.00000000e+00,  0.00000000e+00, ...,
-5.63659404e+00,  6.50939114e-01,  0.00000000e+00],
[-8.55392716e+00,  0.00000000e+00,  0.00000000e+00, ...,
-2.01206880e-01,  9.61930149e-03,  0.00000000e+00],
[-1.29807876e+01,  0.00000000e+00,  0.00000000e+00, ...,
2.60651472e-04,  4.22693052e-05,  0.00000000e+00]])```

```def predict_all(X, all_theta):
rows= X.shape[0]
params= X.shape[1]
num_labels= all_theta.shape[0]

# same as before, insert ones to match the shape
X= np.insert(X,0, values=np.ones(rows), axis=1)

# convert to matrices
X= np.matrix(X)
all_theta= np.matrix(all_theta)

# compute the class probability for each class on each training instance
h= sigmoid(X* all_theta.T)

# create array of the index with the maximum probability
h_argmax= np.argmax(h, axis=1)

# because our array was zero-indexed we need to add one for the true label prediction
h_argmax= h_argmax+ 1

return h_argmax```

```y_pred= predict_all(data['X'], all_theta)
correct= [1 if a== belse 0 for (a, b)in zip(y_pred, data['y'])]
accuracy= (sum(map(int, correct))/ float(len(correct)))
print 'accuracy = {0}%'.format(accuracy* 100)

> accuracy= 97.58%```

http://www.johnwittenauer.net/machine-learning-exercises-in-python-part-4/

0 条评论

相关文章

3397

1452

851

1.1K7

1013

4455

53511

5205

TensorFlow官方力推、GitHub爆款项目：用Attention模型自动生成图像字幕

【新智元导读】近期，TensorFlow官方推文推荐了一款十分有趣的项目——用Attention模型生成图像字幕。而该项目在GitHub社区也收获了近十万“点赞...

1872

2307