Machine Learning – visualize and understand CNN with Mathematica

The famous 2013 article by Zeiler and Fergus "Visualization and Understanding of Convolution Networks" suggests a method to understand the behavior of CNN using one (or more) DeConv networks in conjunction with the original CNN.

The DeConv networks used use a set of unpooling and deconvolutional layers to reconstruct the features in the input image that are responsible for activating a particular feature map in a given layer.

These, however, use "Maximum location switchto undo the max pooling process, which, if I'm right, is to merge layers that are one argmax Operation, whereby the positions can be determined, from which the pooled maxima originate.

Unfortunately, PoolingLayer does not accept argmax as function Possibility.

Is it possible to circumvent this restriction and theMaximum location switchOr is there another technique that is applicable in Mathematica to produce a visualization similar to that proposed by Zeiler and Fergus to understand which features activate a given plane?

[ Politics ] Open Question: Now that there is no Trump / Russian Collusion, what about the credibility of CNN, MSNBC, Adam Schiff, Eric Swalwell and other fools?

[ Politics ] Open Question: Now that there is no Trump / Russian Collusion, what about the credibility of CNN, MSNBC, Adam Schiff, Eric Swalwell and other fools? ,

How did CNN know that there would be a raid on Roger Stone's house? They even filmed the FBI raid !!?

Normally, the grand jury will meet on Friday. This week they met on Thursday. This indicated to CNN that there would likely be unusual activity in connection with an indictment.

So they put away the houses of the people most likely on Mueller's list, and Roger Stone was one of them.

It's called "journalism." It takes thought, time and money. Maybe Fox News should practice journalism if they want to get those shovels.

,

python – LeNet CNN uses Tensorflow

Can someone help me find out where I'm working wrong? I can not figure it out
My validation accuracy is 0.000 even after 100 epochs and the cost function is also at first sight.

Notebook COLAB LINK

import numpy as np
Import Tensorflow as tf
import matplotlib.pyplot as plt
% matplotlib inline



Import input_data from tensorflow.examples.tutorials.mnist
mnist = input_data.read_data_sets ("MNIST_data /", reshape = false)

X_train, Y_train = mnist.train.images, mnist.train.labels
X_validate, Y_validate = mnist.validation.images, mnist.validation.labels
X_test, Y_test = mnist.test.images, mnist.test.labels

print ("X_train shape:", X_train.shape)
print ("Y_train shape:", Y_train.shape)
print ("X_validate shape:", X_validate.shape)
print ("Y_validate shape:", Y_validate.shape)
print ("X_test shape:", X_test.shape)
print ("Y_test Shape:", Y_test.shape)


X_train = np.pad (X_train, ((0,0), (2,2), (2,2), (0,0)), & # 39; constant & # 39 ;, constant values ​​= 0)
X_validate = np.pad (X_validate, ((0,0), (2,2), (2,2), (0,0)), & # 39; constant & # 39 ;, constant values ​​= 0)
X_test = np.pad (X_test, ((0,0), (2,2), (2,2), (0,0)), & # 39; constant & # 39 ;, constant values ​​= 0)

print ("X_train shape:", X_train.shape)
print ("Y_train shape:", Y_train.shape)
print ("X_validate shape:", X_validate.shape)
print ("Y_validate shape:", Y_validate.shape)
print ("X_test shape:", X_test.shape)
print ("Y_test Shape:", Y_test.shape)



X = tf.placeholder (tf.float32, (None, 32,32,1))
Y = tf.placeholder (tf.int32, (none))

output_y = tf.one_hot (Y, 1)


W1 = tf.Variable (tf.truncated_normal (Form = (5,5,1,6), mean = 0, Stddev = .1))
W2 = tf.Variable (tf.truncated_normal (Form = (5,5,6,16), mean = 0, Stddev = .1))

B1 = tf.Variable (tf.zeros (6))
B2 = tf.Variable (tf.zeros (16))


def feed_forward (X):
Z1 = tf.nn.bias_add (tf.nn.conv2d (X, W1, Strides = [1,1,1,1], padding = & # 39; VALID & # 39 ;, B1)
A1 = tf.nn.relu (Z1)
print (Z1.shape)
print (A1.shape)

P1 = tf.nn.avg_pool (A1, ksize = [1,2,2,1]strides = [1,2,2,1], padding = & # 39; VALID & # 39;
print (P1.form)

Z2 = tf.nn.bias_add (tf.nn.conv2d (P1, W2, Strides =[1,1,1,1], padding = & # 39; VALID & # 39 ;, B2)
A2 = tf.nn.relu (Z2)
print (Z2.shape)
print (A2.shape)

P2 = tf.nn.avg_pool (A2, ksize =[1,2,2,1]strides =[1,2,2,1], padding = & # 39; VALID & # 39;
print (P2.shape)

F = tf.contrib.layers.flatten (P2)
Print (F.Shape)

FC1 = tf.contrib.layers.fully_connected (F, 120, Activation_fn = tf.nn.relu)
FC2 = tf.contrib.layers.fully_connected (FC1,84, activation_fn = tf.nn.relu)
out = tf.contrib.layers.fully_connected (FC2,10, activation_fn = tf.nn.relu)
print (FC1.shape)
print (FC2.shape)
print (out.shape)


come back





model_op = feed_forward (X)

learning rate = 0.001
cross_entropy = tf.nn.softmax_cross_entropy_with_logits (logits = model_op, labels = output_y)
loss_operation = tf.reduce_mean (cross_entropy)
Optimizer = tf.train.AdamOptimizer (learning_rate = learning_rate)
training_operation = optimizer.minimize (loss_operation)



BATCH_SIZE = 128
predicted_op = tf.argmax (model_op, 1)
real_op = tf.argmax (output_y, 1)

correct_op = tf.equal (predicted_op, real_op)
precision_op = tf.reduce_mean (tf.cast (correct_op, tf.float32))



Defeval (x_data, y_data):

num_example = len (x_data)
total_accuracy = 0

sess = tf.get_default_session ()
for index in range (0, num_example, BATCH_SIZE):


batch_x, batch_y = x_data[index:index+BATCH_SIZE]y_data[index:index+BATCH_SIZE]
    Accuracy = Sess.Run (precision_op, Feed_Dict = {X: Batch_x, Y: Batch_y})

total_accuracy + = (precision * len (batch_x))

Returns total_accuracy / num_example



by sklearn.utils import shuffle

print ("Training ...")
saver = tf.train.Saver ()

init = tf.global_variables_initializer ()

with tf.Session () as session:

sess.run (init)

EPOCHS = 10
NUM_SAMPLE = len (X_train)
Costs []

    for epoch within reach (EPOCHS):
X_train, Y_train = Shuffle (X_train, Y_train)

epoch_cost = 0
for index in range (0, NUM_SAMPLE, BATCH_SIZE):

batch_x, batch_y =

X_train[index:index+BATCH_SIZE], Y_train[index:index+BATCH_SIZE]
            temp_cost = sess.run (loss_operation, feed_dict = {X: batch_x, Y: batch_y})
epoch_cost + = temp_cost

cost.append (epoch_cost)

print ("Costs in epoch% i:% f"% (epoch, costs[-1]))
if epoch% 5 == 0:
validation_acc = eval (X_validate, Y_validate)
print ("Validation accuracy of epoch% i:% f"% (epoch, validation_acc))
to press()


saver.save (sess, & # 39; / lenet & # 39;)
Print ("model saved !!!")

thank you in advance

Is the call of the President of the United States a liar? As Kellyanne Conway proposed on CNN?

It depends on whether it is true that Trump tells lies. When Trump was first elected, NPR went into detail on what exactly a "lie" is. Making a false statement can be a lie or not. A lie is when a person purposely makes a false statement in order to deceive someone in order to obtain a secondary gain. That's a lie. But if someone spits out false information without knowing that it is wrong or doing so for a secondary gain, that is not a lie. It's just wrong information instead.
For example, if Obama said that he would carry out his campaign in all 57 states, that is a false statement, but it is not a lie because Obama is misinformed about how many states the US actually has.
Cheers.

,

Python – Improving the accuracy of a Keras sound classification CNN

I'm trying to build a convolution network with Keras (Theano Backend), but I can not achieve accuracy over 33% when training in three classes. I would appreciate someone reading the code and helping me to improve the accuracy.

Import Keras
by keras.models Import Sequential
by keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
import numpy as np
import os
from matplotlib.image import imread
Import pandas as pd
print ("imports successful")
train_size = 3000
test_size = 750

batch_size = 10
num_classes = 3
Epochs = 5

img_rows, img_cols = 1170, 580

count = 0
TrainArray = []
for a file in os.listdir ("C: / Users / aeshon / Desktop / Data / DataAndLabels / CroppedTrainData"):
If len (TrainArray)> = 2000:
count = 0
while counting < 1000:
        img = imread("C:/Users/aeshon/Desktop/Data/DataAndLabels/CroppedTrainData/Road " + str(count) + ".jpg")
        #print("Hi"+str(count))
        new_img = img[:,:,0]
        TrainArray.append(new_img)
        count = count + 1
      break

    if len(TrainArray) >= 1000:
count = 0
while counting < 1000:
        img = imread("C:/Users/aeshon/Desktop/Data/DataAndLabels/CroppedTrainData/Water " + str(count) + ".jpg")
        #print("Hi"+str(count))
        new_img = img[:,:,0]
        TrainArray.append(new_img)
        count = count + 1
      continue

    img = imread("C:/Users/aeshon/Desktop/Data/DataAndLabels/CroppedTrainData/Gravel " + str(count) + ".jpg")
    #print("Hi"+str(count))
    new_img = img[:,:,0]
    TrainArray.append(new_img)
    count = count + 1
print("Train Array Synthesis Complete")
x_train = np.asarray(TrainArray)
del TrainArray
print("Array Deleted!")
count = 0
TestArray = []
for file in os.listdir("C:/Users/aeshon/Desktop/Data/DataAndLabels/CroppedTestData"):
    if len(TestArray) >= 500:
count = 0
while counting < 250:
        img = imread("C:/Users/aeshon/Desktop/Data/DataAndLabels/CroppedTestData/Road " + str(count) + ".jpg")
        #print("Hi"+str(count))
        new_img = img[:,:,0]
        TestArray.append(new_img)
        count = count + 1
      break

    if len(TestArray) >= 250:
count = 0
while counting <250:
img = imread ("C: / Users / Aeshon / Desktop / Data / DataAndLabels / CroppedTestData / Water" + str (count) + ".jpg")
#print ("Hi" + str (number))
new_img = img[:,:,0]
        TestArray.append (new_img)
count = count + 1
continue

img = imread ("C: / Users / Aeshon / Desktop / Data / DataAndLabels / CroppedTestData / Gravel" + str (count) + ".jpg")
#print ("Hi" + str (number))
new_img = img[:,:,0]
    TestArray.append (new_img)
count = count + 1
print ("Test array synthesis completed")
x_test = np.asarray (TestArray)
del TestArray
print ("Array deleted!")

x_train = x_train.reshape (train_size, 1170, 580, 1)
x_test = x_test.reshape (test_size, 1170, 580, 1)

print ("x_train shape:", x_train.shape)
#print (x_train.shape[0]"Tensile specimens")
#print (x_test.shape[0], & Test samples & # 39;

TrainLabels = np.asarray (pd.read_csv (& # 39; C: /Users/aeshon/Desktop/Data/DataAndLabels/TrainingLabelsCompressed.csv&#39;))

for i in range (len (TrainLabels)):
Train labels[i] = int (TrainLabels[i])

y_train = TrainLabels

TestLabels = np.asarray (pd.read_csv (& # 39; C: /Users/aeshon/Desktop/Data/DataAndLabels/TestingLabelsCompressed.csv&#39;))

for i in range (len (TestLabels)):
test labels[i] = int (testlabels[i])

y_test = test labels

y_train = keras.utils.to_categorical (y_train, num_classes)
y_test = keras.utils.to_categorical (y_test, num_classes)
print ("label formatting completed!")

print (& # 39; Data configuration successful! Continue to model composition & # 39;)

Model = Sequential ()
model.add (Conv2D (16, kernel_size = (3,3), activation = & # 39; relu & # 39 ;, input_shape = (1170, 580, 1)))
model.add (Conv2D (32, (3,3), activation = & relu & # 39;))
model.add (MaxPooling2D (pool_size = (2,2)))
model.add (dropout (0.25))
model.add (flattening ())
model.add (Dense (64, Activation = & # 39; relu & # 39;))
model.add (dropout (0.5))
model.add (Dense (num_classes, Activation = & # 39; softmax & # 39;))

model.compile (loss = keras.losses.categorical_crossentropy,
Optimizer = keras.optimizers.Adadelta (),
Metrics logo CNRS logo INIST ['accuracy'])

print ("model compiled!")

model.fit (x_train, y_train, batch_size = batch_size, epochs = epochs,
verbose = 1, validation_data = (x_test, y_test))

score = model.evaluate (x_test, y_test, verbose = 1)
print (& # 39; test loss: & # 39 ;, score[0])
print (& # 39; test accuracy: & # 39 ;, score[1])

model_json = model.to_json ()
with open ("model.json", "w") as json_file:
json_file.write (model_json)
model.save_weights ("C: /Users/aeshon/Desktop/model.h5")
print ("Saved model on plate")

Here you can see the training loop edition (5 epochs)

Train with 3000 samples, validate with 750 samples
Epoch 1/5
3000/3000 [==============================] - 10321s 3s / step - Loss: 10.5418 - acc: 0.3457 - val_loss: 10.7454 - val_acc: 0.3333
Epoch 2/5
3000/3000 [==============================] - 10165s 3s / step - Loss: 10.7615 - acc: 0.3323 - val_loss: 10.7454 - val_acc: 0.3333
Epoch 3/5
3000/3000 [==============================] - 10256s 3s / step - Loss: 10.5681 - acc: 0.3443 - val_loss: 10.7454 - val_acc: 0.3333
Epoch 4/5
3000/3000 [==============================] - 10591s 4s / step - Loss: 10.8213 - acc: 0.3283 - val_loss: 10.7454 - val_acc: 0.3333
Epoch 5/5
3000/3000 [==============================] - 10750s 4s / step - Loss: 10,7400 - acc: 0,3337 - val_loss: 10,7454 - val_acc: 0,3333
750/750 [==============================] - 367s 489ms / step
Test loss: 10.745396969795227
Test accuracy: 0.33333333333333333

I know this post is mainly code, but my question is a bit open. I've looked at other posts, including those, but I already use categorical cross entropy and softmax activation. All comments help!

Python – Flower Type Detection CNN (Tensorflow)

I've created a CNN for image recognition (flower types – 5 classes) and am now considering model parameter changes to improve accuracy. The model (5 3 * 3conv + 2 * 2max pooling layer) achieves a ~ 59% accuracy in the test set over 20 epochs, and through this Keras implementation with 1 Conv + Max pooling layer (https: // www .kaggle). com / jagadeeshkotra / flower appreciation-classification-with keras). What factors can affect the relative underperformance of my model compared to implementation at one level? Code below.

I also implemented the 1-layer Keras implementation in Vanilla Tensorflow with the same model parameters (but AdamOptimizer instead of AdaDeltaOptimizer) and achieved an accuracy of ~ 44%, so the issues are related.

Record: https://www.kaggle.com/alxmamaev/flowers-recognition

& # 39; & # 39; & # 39;
Multi-class classification of floral varieties with CNN
& # 39; & # 39; & # 39;
import os
import numpy as np
Import Tensorflow as tf

# image_folders_dir is the location of folders containing images of different flower types
image_folders_dir = "C: \ User  squir  Dropbox  ML Projects  Kaggle  Flower Recognition \ Flowers & # 39;

IMG_SIZE = 128 # Shrink the image to this height and width
num_classes = 5
batch_size = 32
lr = 0.01 # learning rate
dropout_keep_rate = 0.95
Epochs = 20 # Number of model data shows complete data

MODEL_NAME = & # 39; flowers - {} - {}. Model & # 39; format (lr, & # 39; conv-basic & # 39;)

Print (& # 39; Load existing preprocessed data for training (L) or preprocessing data (P)? & # 39;
Decision1 = input ()
if decision1 == & # 39; P & # 39 ;:
from preprocessing import create_data
train_data, test_data = create_data (image_folders_dir, IMG_SIZE)
elif decision1 == & # 39; L & # 39 ;:
If os.path.exists (& # 39; train_data.npy & # 39;) exists:
train_data = np.load (& # 39; train_data.npy & # 39;
test_data = np.load (& # 39; test_data.npy & # 39;)
otherwise:
Throw exception (& # 39; There is no preprocessed data in the path, please preprocess some. & # 39;
otherwise:
Raise an exception ("Please try again and enter L or P")

#data consists of a list with: [0]: Image data, [1]: Class label
# image data = IMG_SIZE * IMG_SIZE * 3 array
# either rgb or bgr? I do not know if it's important
, ''
4321 (3889train, 432test) pictures are now:
IMG_SIZE * IMG_SIZE RGB attached to a hot-class label flower type that is sorted at random
, ''
# Derive image and label data from new data records
train_data_imgs =[Items[item[Artikel[item[0] for item in train_data]train_data_lbls =[Items[item[Artikel[item[1] for item in train_data]test_data_imgs =[Items[item[Artikel[item[0] for articles in test data]test_data_lbls =[Items[item[Artikel[item[1] for articles in test data]# Create arrays in models for us
X_train = np.array (train_data_imgs) .reshape (-1, IMG_SIZE, IMG_SIZE, 3)
Y_train = train_data_lbls
x_valid = np.array (test_data_imgs) .reshape (-1, IMG_SIZE, IMG_SIZE, 3)
y_valid = test_data_lbls

# GAN Image Augmentation here

# need to normalize?

& # 39; & # 39; & # 39; model execution (training or loading) & # 39; & # 39; & # 39;
x = tf.placeholder (& # 39; float & # 39;) [None, IMG_SIZE, IMG_SIZE, 3])
y = tf.placeholder (& # 39; float & # 39;)


def conv2d (x, W):
Return tf.nn.conv2d (x, W, Strides =[1, 1, 1, 1], padding = & # 39; SAME & # 39;


def maxpool2D (x):
# 2 * 2 pool (ksize), step 2 so not overlapping
Return tf.nn.max_pool (x, ksize =[1, 2, 2, 1]strides =[1, 2, 2, 1], padding = & # 39; SAME & # 39;


def conv_NN_model (x, num_classes, img_size, keep_rate):
# 5 x 5 kernel, 3 input depth (RGB image), 32 output depth (convolution)
Weights = {& # 39; W_conv1 & # 39 ;: tf.Variable (tf.random_normal ([3, 3, 3, 32])),
& # 39; W_conv2 & # 39 ;: tf.Variable (tf.random_normal ([3, 3, 32, 64])),
& # 39; W_conv3 & # 39 ;: tf.Variable (tf.random_normal ([3, 3, 64, 128])),
& # 39; W_conv4 & # 39 ;: tf.Variable (tf.random_normal ([3, 3, 128, 256])),
& # 39; W_conv5 & # 39 ;: tf.Variable (tf.random_normal ([3, 3, 256, 512])),
& # 39; W_fc & # 39 ;: tf.Variable (tf.random_normal ([4 * 4 * 512, 1024])), # conv output layer size, number of neurons in fc
& # 39; out & # 39 ;: tf.Variable (tf.random_normal ([1024, num_classes]))}

vases = {& bgr; b_conv1 & # 39 ;: tf.Variable (tf.random_normal ([32])),
& Bgr; b_conv2 & # 39 ;: tf.Variable (tf.random_normal ([64])),
& Bgr; b_conv3 & # 39 ;: tf.Variable (tf.random_normal ([128])),
& Bgr; b_conv4 & # 39 ;: tf.Variable (tf.random_normal ([256])),
& Bgr; b_conv5 & # 39 ;: tf.Variable (tf.random_normal ([512])),
& Bgr; b_fc & # 39 ;: tf.Variable (tf.random_normal ([1024])), # conv output layer size, number of neurons in fc
& # 39; out & # 39 ;: tf.Variable (tf.random_normal ([num_classes]))}

x = tf.reshape (x, shape =[-1, img_size, img_size, 3])
# tf.reshape (x, shape =[28,28]) have the same effect?

conv1 = conv2d (x, weights['W_conv1'])
conv1 = maxpool2D (conv1)

conv2 = conv2d (conv1, weights['W_conv2'])
conv2 = maxpool2D (conv2)

conv3 = conv2d (conv2, weights['W_conv3'])
conv3 = maxpool2D (conv3)

conv4 = conv2d (conv3, weights['W_conv4'])
conv4 = maxpool2D (conv4)

conv5 = conv2d (conv4, weights['W_conv5'])
conv5 = maxpool2D (conv5)

fc = tf.reshape (conv5, [-1, 4 * 4 * 512])
fc = tf.matmul (fc, weights['W_fc']) + Prejudices['b_fc']
fc = tf.nn.relu (fc)

fc = tf.nn.dropout (fc, keep_prob = keep_rate)

output = tf.matmul (fc, weights['out']) + Prejudices['out']

Return the output


def next_batch (num, data, labels):
& # 39; & # 39; & # 39;
Return a total of "num" samples and labels.
& # 39; & # 39; & # 39;
idx = np.arange (0, len (data))
np.random.shuffle (idx)
idx = idx[:num]
    data_shuffle = [data[i] for i in idx]labels_shuffle = [labels[i] for i in idx]Return np.asarray (data_shuffle), np.asarray (labels_shuffle)


def train_neural_network (x, num_epochs, train_data, batch_size, train_imgs, train_lbls, test_imgs, test_lbls):
Prediction = conv_NN_model (x = x,
num_classes = num_classes,
img_size = IMG_SIZE,
keep_rate = dropout_keep_rate)
cost = tf.reduce_mean (tf.nn.softmax_cross_entropy_with_logits_v2 (logits = prediction, labels = y))

Optimizer = tf.train.AdamOptimizer (learning_rate = 0.001) .minimize (cost)
#optimizer = tf.train.GradientDescentOptimizer (learning_rate = 0,001) .minimize (cost)

with tf.Session () as session:
sess.run (tf.global_variables_initializer ())

for epoch within reach (num_epochs):
epoch_loss = 0
for _ in the range (int (len (train_data) / batch_size)):
epoch_x, epoch_y = next_batch (batch_size, train_imgs, train_lbls)
_, c = sess.run ([optimizer, cost], feed_dict = {x: epoch_x, y: epoch_y})
epoch_loss + = c

print (& # 39; epoch & #;; epoch & # 39; completed & # 39 ;, & # 39; num_epochs & # 39 ;, loss: & # 39; epoch_loss)

right = tf.equal (tf.argmax (prediction, 1), tf.argmax (y, 1))

Accuracy = tf.reduce_mean (tf.cast (correct, & # 39; float & # 39;))
print (& # 39; accuracy: & # 39 ;, precision.eval ({x: test_imgs, y: test_lbls}))


train_neural_network (x = x,
num_epochs = epochs,
train_data = train_data,
batch_size = batch_size,
train_imgs = train_data_imgs,
train_lbls = train_data_lbls,
test_imgs = test_data_imgs,
test_lbls = test_data_lbls)



& # 39; & # 39; & # 39; preparation module
Data is displayed in 5 different folders, one for each class ~ 800 images per folderA
task = Create two .npy files with image data and labels with approximately equal distribution of flower types
Train = 90%, test = 10%
combine in a single list with 2 arrays. [0] = Image data, [1] = a hot coded class name
, ''
import os
import cv2
import numpy as np
from tqdm import tqdm
from the random import mix


def create_data (folder_dir, img_size):
data = []      # create complete data with (numeric) labels
for fold in os.listdir (folder_dir):
# a hot coding class label
if fold == & # 39; daisy & # 39;: label = [1, 0, 0, 0, 0]
        Eliffalte == & # 39; dandelion & # 39 ;: label = [0, 1, 0, 0, 0]
        elif fold == & rose; #: label = [0, 0, 1, 0, 0]
        elif fold == & # 39; sunflower & # 39 ;: label = [0, 0, 0, 1, 0]
        elif fold == & # 39; tulip & # 39 ;: label = [0, 0, 0, 0, 1]
        for img in tqdm (os.listdir (os.path.join (folder_dir, str (fold)))):
path = os.path.join (folder_dir, fold, img)
img = cv2.resize (cv2.imread (path, 1), (img_size, img_size))
data.append ([np.array(img), label])
Shuffle (data)
training_data = data[:4105]
    testing_data = data[4105:]
    np.save (& # 39; train_data.npy & # 39; training_data)
np.save (& # 39; test_data.npy & # 39; testing_data)
Return training_data, testing_data