# 简单的TensorFlow分类教程

linear data

moon data

saturn data

def generate_Saturn_data(N=100): theta = np.linspace(0, 2*PI, N) + PI*(np.random.rand(N))/100 a = 0.5 b = 0.5 r1 = 0.4 + 2*(np.random.rand(N)-0.5)/10 x1 = a + r1*np.cos(theta) + (np.random.rand(N)-0.5)/50 y1 = b + r1*np.sin(theta) + (np.random.rand(N)-0.5)/50 r2 = 0.2*np.random.rand(N) x2 = a + r2*np.cos(theta) + (np.random.rand(N)-0.5)/50 y2 = b + r2*np.sin(theta) + (np.random.rand(N)-0.5)/50

return x1, y1, x2, y2

gen_data()方法负责把上面生成的模拟数据组装成训练数据集和测试数据集，每个样本的标注采用了TensorFlow支持的One-Hot编码格式。例如，第一种类别的数据样本(x1, y1)的标注应该是(1, 0)，而第二种类别的数据样本(x2, y2)的标注应该是(0, 1)。为了满足随机梯度下降的特点，在gen_data()方法内部还对数据进行了重新随机排列，同时也考虑到了每个类别的样本数量。

tf.reset_default_graph() x = tf.placeholder(dtype=tf.float32, shape=(None, 2), name='samples') y = tf.placeholder(dtype=tf.float32, shape=(None, 2), name='labels') W = tf.Variable(tf.zeros(shape=(2,2)), name='weight') b = tf.Variable(tf.zeros(shape=(2)), name='bias')

pred = tf.nn.softmax(tf.matmul(x, W) + b, name='pred')

accuracy1

loss曲线：

cost1

accuracy2

cost2

# for traincost = tf.reduce_mean(tf.square(y-pred)) optimizer = tf.train.GradientDescentOptimizer(learning_rate) train = optimizer.minimize(cost) correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accurary = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) init_op = tf.global_variables_initializer() saver = tf.train.Saver()with tf.Session() as sess: tf.summary.scalar('cost', cost) tf.summary.histogram('weight', W) tf.summary.scalar('accurary', accurary) merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter('./log/linear_model/train', sess.graph) test_writer = tf.summary.FileWriter('./log/linear_model/test', sess.graph) sess.run(init_op) x_train, y_train = data_linear['train_set'] x_test, y_test = data_linear['test_set'] num_samples = len(x_train) for epoch in range(epochs): steps = int(num_samples / batch_size) indices = np.random.permutation(num_samples) x_train_ = x_train[indices] y_train_ = y_train[indices] for step in range(steps): start = step*batch_size end = start + batch_size x_ = x_train_[start:end,:] y_ = y_train_[start:end,:] summary, _, c = sess.run([merged, train, cost], feed_dict={x:x_, y:y_}) train_writer.add_summary(summary) if epoch%100 == 99: summary, acc = sess.run([merged, accurary], feed_dict={x:x_test, y:y_test}) test_writer.add_summary(summary, epoch) print("Epoch:{:5d}, Accurary:{:.2f}".format(epoch, acc)) print('W:', W.eval()) print('b:', b.eval()) train_writer.close() test_writer.close() print("Training Finished!") save_path = saver.save(sess, './log/linear_model/linear_model.ckpt')

print('model saved in path: ', save_path)

1、实例化tf.summary.FileWriter()

2、把需要监控的参数加入到队列中，标量用tf.summary.scalar，张量用tf.summary.histogram

3、合并所有监控的结点到graph上，建立依赖关系merged = tf.summary.merge_all()

5、在terminal中启动tensorboard tensorboard --logdir=...

linear

WB

x = tf.placeholder(dtype=tf.float32, shape=(None, 2), name='samples') y = tf.placeholder(dtype=tf.float32, shape=(None, 2), name='labels') W1 = tf.Variable(tf.random_normal(shape=(2,32), mean=0.0, stddev=1), name='weight1') b1 = tf.Variable(tf.zeros(shape=[32]), name='bias1') W2 = tf.Variable(tf.random_normal(shape=(32,8)), name='weight2') b2 = tf.Variable(tf.zeros(shape=[8]), name='bias2') W3 = tf.Variable(tf.random_normal(shape=(8,2)), name='weight3') b3 = tf.Variable(tf.zeros(shape=[2]), name='bias3') z = tf.matmul(x, W1) + b1 layer1 = tf.tanh(z, name='layer1') z = tf.matmul(layer1, W2) + b2 layer2 = tf.tanh(z, name='layer2') out = tf.matmul(layer2, W3) + b3 pred = tf.nn.softmax(out, name='pred')# for traincost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=out)) optimizer = tf.train.GradientDescentOptimizer(learning_rate) train = optimizer.minimize(cost)# for testcorrect_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))

accurary = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

moon

x = tf.placeholder(dtype=tf.float32, shape=(None, 2), name='samples') y = tf.placeholder(dtype=tf.float32, shape=(None, 2), name='labels') W1 = tf.Variable(tf.random_normal(shape=(2,3), mean=0.0, stddev=1), name='weight1') b1 = tf.Variable(tf.zeros(shape=(3)), name='bias1') W2 = tf.Variable(tf.random_normal(shape=(3,6)), name='weight2') b2 = tf.Variable(tf.zeros(shape=(6)), name='bias2') W3 = tf.Variable(tf.random_normal(shape=(6,9)), name='weight3') b3 = tf.Variable(tf.zeros(shape=(9)), name='bias3') W4 = tf.Variable(tf.random_normal(shape=(9,2)), name='weight4') b4 = tf.Variable(tf.zeros(shape=(2)), name='bias4') z = tf.matmul(x, W1) + b1# layer1 = tf.nn.relu(z, name='layer1')# layer1 = tf.tanh(z, name='layer1')layer1 = tf.tanh(z, name='layer1') z = tf.matmul(layer1, W2) + b2 layer2 = tf.tanh(z, name='layer2') z = tf.matmul(layer2, W3) + b3 layer3 = tf.tanh(z, name='layer3') out = tf.matmul(layer3, W4) + b4 pred = tf.nn.softmax(out, name='pred')# for traincost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=out)) optimizer = tf.train.GradientDescentOptimizer(learning_rate) train = optimizer.minimize(cost)# for testcorrect_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))

accurary = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

saturn2

saturn1

Simple end-to-end TensorFlow examples Implementing a Neural Network from Scratch in Python – An Introduction Implementing a Neural Network from Scratch in Python - Source Code

0 条评论

• ### TensorFlow Tutorial-1

1、Why TensorFlow? 网上有关介绍太多了，我就不多说了，这里主要注重使用。 ? Intro.PNG ? github.PNG 2、Programi...

• ### 存储Tensorflow训练网络的参数

训练一个神经网络的目的是啥？不就是有朝一日让它有用武之地吗？可是，在别处使用训练好的网络，得先把网络的参数（就是那些variables）保存下来，怎么保存呢？其...

• ### TensorFlow从0到1 | 第十二章：TensorFlow构建3层NN玩转MNIST

上一篇 11 74行Python实现手写体数字识别展示了74行Python代码完成MNIST手写体数字识别，识别率轻松达到95%。这算不上一个好成绩，不过我并不...

• ### win10 tensorflow笔记2 MNIST机器学习入门

这里跟官方有两处不同 1：第1行代码原文是import input_data这里的input_data是无法直接导入的。需要给出具体路径from tensor...

• ### TensorFlow2.X学习笔记(1)--TensorFlow核心概念

TensorFlow™ 是一个采用 数据流图（data flow graphs），用于数值计算的开源软件库。节点（Nodes）在图中表示数学操作，图中的线（e...

• ### TensorFlow2.X学习笔记(3)--TensorFlow低阶API之张量

TensorFlow提供的方法比numpy更全面，运算速度更快，如果需要的话，还可以使用GPU进行加速。

• ### TensorFlow2.X学习笔记(4)--TensorFlow低阶API之AutoGraph相关研究

而Autograph机制可以将动态图转换成静态计算图，兼收执行效率和编码效率之利。