三木社区

 找回密码
 立即注册
搜索
热搜: 活动 交友 discuz
查看: 418|回复: 0
打印 上一主题 下一主题

TensorBoard——Tensor与Graph可视化

[复制链接]

1562

主题

1564

帖子

4904

积分

博士

Rank: 8Rank: 8

积分
4904
跳转到指定楼层
楼主
发表于 2017-9-18 07:54:56 | 只看该作者 回帖奖励 |倒序浏览 |阅读模式
  1. # -*- coding=utf-8 -*-
  2. # @author: 陈水平
  3. # @date: 2017-02-09
  4. # @description: implement a softmax regression model upon MNIST handwritten digits
  5. # @ref: http://yann.lecun.com/exdb/mnist/

  6. import gzip
  7. import struct
  8. import numpy as np
  9. from sklearn.linear_model import LogisticRegression
  10. from sklearn import preprocessing
  11. from sklearn.metrics import accuracy_score
  12. import tensorflow as tf

  13. # MNIST data is stored in binary format,
  14. # and we transform them into numpy ndarray objects by the following two utility functions
  15. def read_image(file_name):
  16.     with gzip.open(file_name, 'rb') as f:
  17.         buf = f.read()
  18.         index = 0
  19.         magic, images, rows, columns = struct.unpack_from('>IIII' , buf , index)
  20.         index += struct.calcsize('>IIII')

  21.         image_size = '>' + str(images*rows*columns) + 'B'
  22.         ims = struct.unpack_from(image_size, buf, index)
  23.         
  24.         im_array = np.array(ims).reshape(images, rows, columns)
  25.         return im_array

  26. def read_label(file_name):
  27.     with gzip.open(file_name, 'rb') as f:
  28.         buf = f.read()
  29.         index = 0
  30.         magic, labels = struct.unpack_from('>II', buf, index)
  31.         index += struct.calcsize('>II')
  32.         
  33.         label_size = '>' + str(labels) + 'B'
  34.         labels = struct.unpack_from(label_size, buf, index)

  35.         label_array = np.array(labels)
  36.         return label_array

  37. print "Start processing MNIST handwritten digits data..."
  38. train_x_data = read_image("MNIST_data/train-images-idx3-ubyte.gz")
  39. train_x_data = train_x_data.reshape(train_x_data.shape[0], -1).astype(np.float32)
  40. train_y_data = read_label("MNIST_data/train-labels-idx1-ubyte.gz")
  41. test_x_data = read_image("MNIST_data/t10k-images-idx3-ubyte.gz")
  42. test_x_data = test_x_data.reshape(test_x_data.shape[0], -1).astype(np.float32)
  43. test_y_data = read_label("MNIST_data/t10k-labels-idx1-ubyte.gz")

  44. train_x_minmax = train_x_data / 255.0
  45. test_x_minmax = test_x_data / 255.0

  46. # Of course you can also use the utility function to read in MNIST provided by tensorflow
  47. # from tensorflow.examples.tutorials.mnist import input_data
  48. # mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)
  49. # train_x_minmax = mnist.train.images
  50. # train_y_data = mnist.train.labels
  51. # test_x_minmax = mnist.test.images
  52. # test_y_data = mnist.test.labels

  53. # We evaluate the softmax regression model by sklearn first
  54. eval_sklearn = False
  55. if eval_sklearn:
  56.     print "Start evaluating softmax regression model by sklearn..."
  57.     reg = LogisticRegression(solver="lbfgs", multi_class="multinomial")
  58.     reg.fit(train_x_minmax, train_y_data)
  59.     np.savetxt('coef_softmax_sklearn.txt', reg.coef_, fmt='%.6f')  # Save coefficients to a text file
  60.     test_y_predict = reg.predict(test_x_minmax)
  61.     print "Accuracy of test set: %f" % accuracy_score(test_y_data, test_y_predict)

  62. eval_tensorflow = True
  63. batch_gradient = False

  64. def variable_summaries(var):
  65.     with tf.name_scope('summaries'):
  66.         mean = tf.reduce_mean(var)
  67.         tf.summary.scalar('mean', mean)
  68.         stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
  69.         tf.summary.scalar('stddev', stddev)
  70.         tf.summary.scalar('max', tf.reduce_max(var))
  71.         tf.summary.scalar('min', tf.reduce_min(var))
  72.         tf.summary.histogram('histogram', var)
  73. if eval_tensorflow:
  74.     print "Start evaluating softmax regression model by tensorflow..."
  75.     # reformat y into one-hot encoding style
  76.     lb = preprocessing.LabelBinarizer()
  77.     lb.fit(train_y_data)
  78.     train_y_data_trans = lb.transform(train_y_data)
  79.     test_y_data_trans = lb.transform(test_y_data)

  80.     x = tf.placeholder(tf.float32, [None, 784])
  81.     with tf.name_scope('weights'):
  82.         W = tf.Variable(tf.zeros([784, 10]))
  83.         variable_summaries(W)
  84.     with tf.name_scope('biases'):
  85.         b = tf.Variable(tf.zeros([10]))
  86.         variable_summaries(b)
  87.     with tf.name_scope('Wx_plus_b'):
  88.         V = tf.matmul(x, W) + b
  89.         tf.summary.histogram('pre_activations', V)
  90.     with tf.name_scope('softmax'):
  91.         y = tf.nn.softmax(V)
  92.         tf.summary.histogram('activations', y)

  93.     y_ = tf.placeholder(tf.float32, [None, 10])

  94.     with tf.name_scope('cross_entropy'):
  95.         loss = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
  96.         tf.summary.scalar('cross_entropy', loss)

  97.     with tf.name_scope('train'):
  98.         optimizer = tf.train.GradientDescentOptimizer(0.5)
  99.         train = optimizer.minimize(loss)
  100.    
  101.     with tf.name_scope('evaluate'):
  102.         with tf.name_scope('correct_prediction'):
  103.             correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  104.         with tf.name_scope('accuracy'):
  105.             accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  106.             tf.summary.scalar('accuracy', accuracy)

  107.     init = tf.global_variables_initializer()

  108.     sess = tf.Session()
  109.     sess.run(init)

  110.     merged = tf.summary.merge_all()
  111.     train_writer = tf.summary.FileWriter('log/train', sess.graph)
  112.     test_writer = tf.summary.FileWriter('log/test')
  113.    
  114.     if batch_gradient:
  115.         for step in range(300):
  116.             sess.run(train, feed_dict={x: train_x_minmax, y_: train_y_data_trans})
  117.             if step % 10 == 0:
  118.                 print "Batch Gradient Descent processing step %d" % step
  119.         print "Finally we got the estimated results, take such a long time..."
  120.     else:
  121.         for step in range(1000):
  122.             if step % 10 == 0:
  123.                 summary, acc = sess.run([merged, accuracy], feed_dict={x: test_x_minmax, y_: test_y_data_trans})
  124.                 test_writer.add_summary(summary, step)
  125.                 print "Stochastic Gradient Descent processing step %d accuracy=%.2f" % (step, acc)
  126.             else:
  127.                 sample_index = np.random.choice(train_x_minmax.shape[0], 100)
  128.                 batch_xs = train_x_minmax[sample_index, :]
  129.                 batch_ys = train_y_data_trans[sample_index, :]
  130.                 summary, _ = sess.run([merged, train], feed_dict={x: batch_xs, y_: batch_ys})
  131.                 train_writer.add_summary(summary, step)

  132.     np.savetxt('coef_softmax_tf.txt', np.transpose(sess.run(W)), fmt='%.6f')  # Save coefficients to a text file
  133.     print "Accuracy of test set: %f" % sess.run(accuracy, feed_dict={x: test_x_minmax, y_: test_y_data_trans})
复制代码

主要修改点有:
  • Summary:所有需要在TensorBoard上展示的统计结果。
  • tf.name_scope():为Graph中的Tensor添加层级,TensorBoard会按照代码指定的层级进行展示,初始状态下只绘制最高层级的效果,点击后可展开层级看到下一层的细节。
  • tf.summary.scalar():添加标量统计结果。
  • tf.summary.histogram():添加任意shape的Tensor,统计这个Tensor的取值分布。
  • tf.summary.merge_all():添加一个操作,代表执行所有summary操作,这样可以避免人工执行每一个summary op。
  • tf.summary.FileWrite:用于将Summary写入磁盘,需要制定存储路径logdir,如果传递了Graph对象,则在Graph Visualization会显示Tensor Shape Information。执行summary op后,将返回结果传递给add_summary()方法即可。

回复

使用道具 举报

Archiver|手机版|小黑屋|三木电子社区 ( 辽ICP备11000133号-4 )

辽公网安备 21021702000620号

GMT+8, 2025-5-10 00:26 , Processed in 0.028475 second(s), 23 queries .

Powered by Discuz! X3.3

© 2001-2017 Comsenz Inc.

快速回复 返回顶部 返回列表