import tensorflow as tfimport numpy as npimport matplotlib.pyplot as plt复制代码
/anaconda3/envs/py35/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 return f(*args, **kwds)/anaconda3/envs/py35/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters/anaconda3/envs/py35/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88 return f(*args, **kwds)复制代码
tf.__version__复制代码
'1.10.0'复制代码
from tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets('MNIST_data', one_hot=False)复制代码
WARNING:tensorflow:From :2: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.Instructions for updating:Please use alternatives such as official/mnist/dataset.py from tensorflow/models.WARNING:tensorflow:From /anaconda3/envs/py35/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.Instructions for updating:Please write your own downloading logic.WARNING:tensorflow:From /anaconda3/envs/py35/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.Instructions for updating:Please use tf.data to implement this functionality.Extracting MNIST_data/train-images-idx3-ubyte.gzWARNING:tensorflow:From /anaconda3/envs/py35/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.Instructions for updating:Please use tf.data to implement this functionality.Extracting MNIST_data/train-labels-idx1-ubyte.gzExtracting MNIST_data/t10k-images-idx3-ubyte.gzExtracting MNIST_data/t10k-labels-idx1-ubyte.gzWARNING:tensorflow:From /anaconda3/envs/py35/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.Instructions for updating:Please use alternatives such as official/mnist/dataset.py from tensorflow/models.复制代码
for i in range(epochs): epoch_loss = [] for x in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) batch_cost, _ = sess.run([loss, optimizer], feed_dict= { inputs: batch[0] + scale*np.random.normal(size = batch[0].shape), targets: batch[0]}) epoch_loss.append(batch_cost) print("Epoch: {}/{}".format(i + 1, epochs), "Training loss: {:.4f}".format(sum(epoch_loss)/len(epoch_loss)))复制代码
Epoch: 1/100 Training loss: 4122.6152Epoch: 2/100 Training loss: 3175.5261Epoch: 3/100 Training loss: 2949.5105Epoch: 4/100 Training loss: 2785.7994Epoch: 5/100 Training loss: 2674.1762Epoch: 6/100 Training loss: 2587.7063Epoch: 7/100 Training loss: 2494.3316Epoch: 8/100 Training loss: 2412.4359Epoch: 9/100 Training loss: 2370.8783Epoch: 10/100 Training loss: 2347.2669Epoch: 11/100 Training loss: 2323.5621Epoch: 12/100 Training loss: 2314.9729Epoch: 13/100 Training loss: 2303.4539Epoch: 14/100 Training loss: 2298.0133Epoch: 15/100 Training loss: 2292.6705Epoch: 16/100 Training loss: 2274.9722Epoch: 17/100 Training loss: 2270.7350Epoch: 18/100 Training loss: 2259.5915Epoch: 19/100 Training loss: 2253.6038Epoch: 20/100 Training loss: 2247.7472Epoch: 21/100 Training loss: 2248.0725Epoch: 22/100 Training loss: 2240.8025Epoch: 23/100 Training loss: 2238.4714Epoch: 24/100 Training loss: 2236.3363Epoch: 25/100 Training loss: 2233.4593Epoch: 26/100 Training loss: 2227.6890Epoch: 27/100 Training loss: 2233.3891Epoch: 28/100 Training loss: 2228.8210Epoch: 29/100 Training loss: 2232.7463Epoch: 30/100 Training loss: 2224.1135Epoch: 31/100 Training loss: 2220.9270Epoch: 32/100 Training loss: 2227.0465Epoch: 33/100 Training loss: 2222.6370Epoch: 34/100 Training loss: 2213.9390Epoch: 35/100 Training loss: 2223.2059Epoch: 36/100 Training loss: 2219.7526Epoch: 37/100 Training loss: 2216.6488Epoch: 38/100 Training loss: 2218.4137Epoch: 39/100 Training loss: 2215.9420Epoch: 40/100 Training loss: 2218.2955Epoch: 41/100 Training loss: 2216.0744Epoch: 42/100 Training loss: 2215.5212Epoch: 43/100 Training loss: 2214.1895Epoch: 44/100 Training loss: 2213.8964Epoch: 45/100 Training loss: 2214.0576Epoch: 46/100 Training loss: 2214.3030Epoch: 47/100 Training loss: 2216.1043Epoch: 48/100 Training loss: 2215.5620Epoch: 49/100 Training loss: 2218.0237Epoch: 50/100 Training loss: 2205.8529Epoch: 51/100 Training loss: 2211.3229Epoch: 52/100 Training loss: 2211.8879Epoch: 53/100 Training loss: 2211.6150Epoch: 54/100 Training loss: 2212.2388Epoch: 55/100 Training loss: 2214.1972Epoch: 56/100 Training loss: 2209.8900Epoch: 57/100 Training loss: 2206.5484Epoch: 58/100 Training loss: 2210.9500Epoch: 59/100 Training loss: 2213.4333Epoch: 60/100 Training loss: 2209.0800Epoch: 61/100 Training loss: 2206.9485Epoch: 62/100 Training loss: 2214.8776Epoch: 63/100 Training loss: 2207.4935Epoch: 64/100 Training loss: 2206.9996Epoch: 65/100 Training loss: 2211.6217Epoch: 66/100 Training loss: 2214.4954Epoch: 67/100 Training loss: 2208.1563Epoch: 68/100 Training loss: 2209.4227Epoch: 69/100 Training loss: 2210.2614Epoch: 70/100 Training loss: 2206.8853Epoch: 71/100 Training loss: 2211.4665Epoch: 72/100 Training loss: 2213.6993Epoch: 73/100 Training loss: 2208.0797Epoch: 74/100 Training loss: 2207.9999Epoch: 75/100 Training loss: 2211.5181Epoch: 76/100 Training loss: 2205.4149Epoch: 77/100 Training loss: 2211.1890Epoch: 78/100 Training loss: 2201.1221Epoch: 79/100 Training loss: 2209.5938Epoch: 80/100 Training loss: 2206.1002Epoch: 81/100 Training loss: 2205.2419Epoch: 82/100 Training loss: 2206.9217Epoch: 83/100 Training loss: 2209.1512Epoch: 84/100 Training loss: 2204.2429Epoch: 85/100 Training loss: 2205.7824Epoch: 86/100 Training loss: 2208.6107Epoch: 87/100 Training loss: 2205.6961Epoch: 88/100 Training loss: 2204.8022Epoch: 89/100 Training loss: 2206.0642Epoch: 90/100 Training loss: 2204.2363Epoch: 91/100 Training loss: 2201.9248Epoch: 92/100 Training loss: 2204.6733Epoch: 93/100 Training loss: 2206.9835Epoch: 94/100 Training loss: 2204.4014Epoch: 95/100 Training loss: 2202.2799Epoch: 96/100 Training loss: 2209.2330Epoch: 97/100 Training loss: 2206.6406Epoch: 98/100 Training loss: 2201.3019Epoch: 99/100 Training loss: 2203.5554Epoch: 100/100 Training loss: 2201.1172复制代码