python – LeNet CNN uses Tensorflow

Can someone help me find out where I'm working wrong? I can not figure it out
My validation accuracy is 0.000 even after 100 epochs and the cost function is also at first sight.

Notebook COLAB LINK

import numpy as np
Import Tensorflow as tf
import matplotlib.pyplot as plt
% matplotlib inline



Import input_data from tensorflow.examples.tutorials.mnist
mnist = input_data.read_data_sets ("MNIST_data /", reshape = false)

X_train, Y_train = mnist.train.images, mnist.train.labels
X_validate, Y_validate = mnist.validation.images, mnist.validation.labels
X_test, Y_test = mnist.test.images, mnist.test.labels

print ("X_train shape:", X_train.shape)
print ("Y_train shape:", Y_train.shape)
print ("X_validate shape:", X_validate.shape)
print ("Y_validate shape:", Y_validate.shape)
print ("X_test shape:", X_test.shape)
print ("Y_test Shape:", Y_test.shape)


X_train = np.pad (X_train, ((0,0), (2,2), (2,2), (0,0)), & # 39; constant & # 39 ;, constant values ​​= 0)
X_validate = np.pad (X_validate, ((0,0), (2,2), (2,2), (0,0)), & # 39; constant & # 39 ;, constant values ​​= 0)
X_test = np.pad (X_test, ((0,0), (2,2), (2,2), (0,0)), & # 39; constant & # 39 ;, constant values ​​= 0)

print ("X_train shape:", X_train.shape)
print ("Y_train shape:", Y_train.shape)
print ("X_validate shape:", X_validate.shape)
print ("Y_validate shape:", Y_validate.shape)
print ("X_test shape:", X_test.shape)
print ("Y_test Shape:", Y_test.shape)



X = tf.placeholder (tf.float32, (None, 32,32,1))
Y = tf.placeholder (tf.int32, (none))

output_y = tf.one_hot (Y, 1)


W1 = tf.Variable (tf.truncated_normal (Form = (5,5,1,6), mean = 0, Stddev = .1))
W2 = tf.Variable (tf.truncated_normal (Form = (5,5,6,16), mean = 0, Stddev = .1))

B1 = tf.Variable (tf.zeros (6))
B2 = tf.Variable (tf.zeros (16))


def feed_forward (X):
Z1 = tf.nn.bias_add (tf.nn.conv2d (X, W1, Strides = [1,1,1,1], padding = & # 39; VALID & # 39 ;, B1)
A1 = tf.nn.relu (Z1)
print (Z1.shape)
print (A1.shape)

P1 = tf.nn.avg_pool (A1, ksize = [1,2,2,1]strides = [1,2,2,1], padding = & # 39; VALID & # 39;
print (P1.form)

Z2 = tf.nn.bias_add (tf.nn.conv2d (P1, W2, Strides =[1,1,1,1], padding = & # 39; VALID & # 39 ;, B2)
A2 = tf.nn.relu (Z2)
print (Z2.shape)
print (A2.shape)

P2 = tf.nn.avg_pool (A2, ksize =[1,2,2,1]strides =[1,2,2,1], padding = & # 39; VALID & # 39;
print (P2.shape)

F = tf.contrib.layers.flatten (P2)
Print (F.Shape)

FC1 = tf.contrib.layers.fully_connected (F, 120, Activation_fn = tf.nn.relu)
FC2 = tf.contrib.layers.fully_connected (FC1,84, activation_fn = tf.nn.relu)
out = tf.contrib.layers.fully_connected (FC2,10, activation_fn = tf.nn.relu)
print (FC1.shape)
print (FC2.shape)
print (out.shape)


come back





model_op = feed_forward (X)

learning rate = 0.001
cross_entropy = tf.nn.softmax_cross_entropy_with_logits (logits = model_op, labels = output_y)
loss_operation = tf.reduce_mean (cross_entropy)
Optimizer = tf.train.AdamOptimizer (learning_rate = learning_rate)
training_operation = optimizer.minimize (loss_operation)



BATCH_SIZE = 128
predicted_op = tf.argmax (model_op, 1)
real_op = tf.argmax (output_y, 1)

correct_op = tf.equal (predicted_op, real_op)
precision_op = tf.reduce_mean (tf.cast (correct_op, tf.float32))



Defeval (x_data, y_data):

num_example = len (x_data)
total_accuracy = 0

sess = tf.get_default_session ()
for index in range (0, num_example, BATCH_SIZE):


batch_x, batch_y = x_data[index:index+BATCH_SIZE]y_data[index:index+BATCH_SIZE]
    Accuracy = Sess.Run (precision_op, Feed_Dict = {X: Batch_x, Y: Batch_y})

total_accuracy + = (precision * len (batch_x))

Returns total_accuracy / num_example



by sklearn.utils import shuffle

print ("Training ...")
saver = tf.train.Saver ()

init = tf.global_variables_initializer ()

with tf.Session () as session:

sess.run (init)

EPOCHS = 10
NUM_SAMPLE = len (X_train)
Costs []

    for epoch within reach (EPOCHS):
X_train, Y_train = Shuffle (X_train, Y_train)

epoch_cost = 0
for index in range (0, NUM_SAMPLE, BATCH_SIZE):

batch_x, batch_y =

X_train[index:index+BATCH_SIZE], Y_train[index:index+BATCH_SIZE]
            temp_cost = sess.run (loss_operation, feed_dict = {X: batch_x, Y: batch_y})
epoch_cost + = temp_cost

cost.append (epoch_cost)

print ("Costs in epoch% i:% f"% (epoch, costs[-1]))
if epoch% 5 == 0:
validation_acc = eval (X_validate, Y_validate)
print ("Validation accuracy of epoch% i:% f"% (epoch, validation_acc))
to press()


saver.save (sess, & # 39; / lenet & # 39;)
Print ("model saved !!!")

thank you in advance