# [编程经验] TensorFlow实现非线性支持向量机

1. 风险最小化

2. VC维，Vapnik-Chervonenkis dimension

3. 结构风险最小化

4. 松弛变量

KKT条件：

```# coding: utf-8
import numpy as np
import tensorflow as tf
from sklearn import datasets

x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
prediction = tf.placeholder(shape=[None, 2], dtype=tf.float32)

def get_data():
iris = datasets.load_iris()
x_vals = np.array([[x[0], x[3]] for x in iris.data])
y_vals = np.array([1 if y == 0 else -1 for y in iris.target])
return x_vals, y_vals

def Gaussian_kernel(gamma):
g = tf.constant(gamma)
# 2 * (x_data * x_data^T)
sq_dists = tf.multiply(2.,
tf.matmul(x_data, tf.transpose(x_data)))
# exp(g * abs(sq_dists))
out = tf.exp(tf.multiply(g, tf.abs(sq_dists)))
return out

def non_linea_svm(my_kernel, batch_size, prediction, gamma=-25.0):
b = tf.Variable(tf.random_normal(shape=[1, batch_size]))
first_term = tf.reduce_sum(b)
b_vec_cross = tf.matmul(tf.transpose(b), b)
# 偏置的内积， b的转置乘以b
y_target_cross = tf.matmul(y_target, tf.transpose(y_target))
# label的内积，y乘以y的转置
second_term = tf.reduce_sum(tf.multiply(my_kernel,
tf.multiply(b_vec_cross, y_target_cross)))
# 偏置的内积乘以label 的内积，结果再乘以核函数
loss = tf.negative(tf.subtract(first_term, second_term))
# 第一项和第二项做差，然后添加一个负号，
# negative是取相反数的意思
rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1),
[-1, 1])
rB = tf.reshape(tf.reduce_sum(tf.square(prediction), 1),
[-1, 1])
stra = tf.subtract(rA, tf.multiply(2.,
tf.matmul(x_data, tf.transpose(prediction))))
pred_sq_dist = tf.add(stra, tf.transpose(rB))
# (rA-2*x_data*预测值的转置）+rB的转置
pred_kernel = tf.exp(tf.multiply(gamma,
tf.abs(pred_sq_dist)))

prediction_output = tf.matmul(tf.multiply(
tf.transpose(y_target), b), pred_kernel)
prediction = tf.sign(prediction_output -
tf.reduce_mean(prediction_output))
accuracy = tf.reduce_mean(
tf.cast(tf.equal(tf.squeeze(prediction),
tf.squeeze(y_target)), tf.float32))
my_opt = tf.train.GradientDescentOptimizer(0.01)
train_step = my_opt.minimize(loss)
return loss, accuracy, train_step

def train_svm(sess, batch_size):
kernel = Gaussian_kernel(-25.0)
loss_vec = []
batch_accuracy = []
for i in range(300):
x_vals, y_vals = get_data()
loss, accuracy, train_step = non_linea_svm(
kernel, batch_size=batch_size,
prediction=prediction)
init = tf.global_variables_initializer()
sess.run(init)
rand_index = np.random.choice(len(x_vals),
size=batch_size)
rand_x = x_vals[rand_index]
rand_y = np.transpose([y_vals[rand_index]])
sess.run(train_step,
feed_dict={x_data: rand_x,
y_target: rand_y})

temp_loss = sess.run(loss,
feed_dict={x_data: rand_x,
y_target: rand_y})
loss_vec.append(temp_loss)

acc_temp = sess.run(accuracy,feed_dict={x_data: rand_x,
y_target: rand_y,
prediction: rand_x})
batch_accuracy.append(acc_temp)

if (i + 1) % 100== 1:
print('Loss = ' + str(temp_loss))

def main(_):
with tf.Session() as sess:
train_svm(sess, batch_size=30)

if __name__ == "__main__":
tf.app.run()```

1. Pattern Recognition and Machine Learning

2. machine Learning A Probabilistic Perspective

3. Introduction to Machine Learning

4. https://www.zhihu.com/question/21883548

68 篇文章30 人订阅

0 条评论

## 相关文章

3725

2415

844

1450

### 【机器学习】CS229课程笔记notes2翻译-Part IV生成学习算法

到目前为止，我们主要谈论建模p(y|x;θ)的学习算法，给定x的y的条件分布。例如，logistic回归建模p(y|x;θ)为hθ(x)=g(θTx...

2526

1903

1863

### Adobe 写实深度摄影风格迁移，局部仿射解决画面扭曲

【新智元导读】康奈尔大学和 Adobe 团队的这项图像风格迁移研究，解决了神经网络风格迁移中由于参考图像风格夸张而产生的的输出图像“扭曲”的问题，在各种场景下得...

3205

3266

1404