This is the 16th day of my participation in the August More Text Challenge. For details, see: August More Text Challenge

Session Control

Session is the execution statement of TensorFlow to control and output files. Run session.run () to get the results you want

import tensorflow as tf
# session Session control

tf.compat.v1.disable_eager_execution()   # Ensure that ses.run () works properly
matrix1 = tf.constant([[3.3]])   # Create two matrices
matrix2 = tf.constant([[2], [2]])
product = tf.matmul(matrix1, matrix2)    # matrix multiplication -->np.dot(m1,m2)
Method a #
sess = tf.compat.v1.Session()
result = sess.run(product)
print(result)   # 12 The result of matrix multiplication
sess.close()

Method # 2
# with tf.compat.v1.Session() as Session
# result = session.run(product)
# print(result)
Copy the code

Variable definition

import tensorflow as tf

tf.compat.v1.disable_eager_execution()
state = tf.Variable(0,name='counter')  # Variable variables
# print(state.name) # result=counter:0
one = tf.constant(1)  # plus constant

new_value = tf.add(state,one)
update = tf.compat.v1.assign(state, new_value)   Update the value of this variable

init = tf.compat.v1.global_variables_initializer()    After the update, a function is created that initializes all variables to activate them
with tf.compat.v1.Session() as sess:
    sess.run(init)
    for _ in range(3):
        sess.run(update)  # Update the value in this function every time
        print(sess.run(state))  This is where the state appears. Otherwise, it will not print out
        # 1
        # 2
        # 3
Copy the code

Placeholder control input

import tensorflow as tf

# input1 = tf.pat.v1.placeholder (tb.float32,[2,2]) # [2,2]
tf.compat.v1.disable_eager_execution()
input1 = tf.compat.v1.placeholder(tf.float32)
input2 = tf.compat.v1.placeholder(tf.float32)

output = tf.compat.v1.multiply(input1, input2)  # multiplication
with tf.compat.v1.Session() as sess:
    print(sess.run(output, feed_dict={input1: [7.], input2: [2.]}))  # 14
Copy the code

Activation_Function Activates the function

Activation_Function is used to add nonlinear factors to solve problems that linear models cannot solve

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

tf.compat.v1.disable_eager_execution()

def add_layer(inputs, in_size, out_size,n_layer, activation_function) :  
    # Add hidden layers Even add neural layers to achieve an iterative process
    layer_name = 'layer%s'% n_layer
    with tf.name_scope(layer_name):
        with tf.name_scope('weights'):
            Weights = tf.Variable(tf.random.normal([in_size, out_size]), name='W')  # Define a matrix, randomly define parameters, initial values
            tf.summary.histogram(layer_name+'/weights',Weights)
        with tf.name_scope('biases'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')  The initial value of # biases is not recommended to be zero, so now you have to add 0.1
            tf.summary.histogram(layer_name+'/biases',biases)
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b = tf.compat.v1.matmul(inputs, Weights,name='wb') + biases  Biases # input*Weights + biases
        if activation_function is None:
            outputs = Wx_plus_b  # This is a linear equation, so there is no need to add a nonlinear activation function
        else:
            outputs = activation_function(Wx_plus_b)
        tf.summary.histogram(layer_name+'/outputs',outputs)
        return outputs


x_data = np.linspace(-1.1.300)[:, np.newaxis].astype(np.float32)  # Create an arithmetic sequence of -1, 1, and finally add a dimension to make it into a dimensional matrix form
# is now the matrix input layer of [300,1] with 300 rows and 1 column
# x_data=tf.cast(tf.float32,x_data)
noise = np.random.normal(0.0.05, x_data.shape)  # Manually add noise variance 0.05
# noise=tf.cast(tf.float32,noise)
y_data = np.square(x_data) - 0.5 + noise
# y_data=tf.cast(tf.float32,y_data)

with tf.name_scope('input') :# input layer
    xs = tf.compat.v1.placeholder(tf.float32, [None.1], name='x_input')  # this is for passing in data, but it doesn't matter how many examples you pass in
    ys = tf.compat.v1.placeholder(tf.float32, [None.1], name='y_input')  # Here is a None expression, but is a matrix form: row 1 column is not known

l1 = add_layer(xs, 1.10.1, tf.nn.relu)
# here is the hidden layer with the first layer added [1,10]
predition = add_layer(l1, 10.1.2.None)
# The final output layer is a matrix [10,1] with 10 rows and 1 column

with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - predition), axis=1))  # Average error
    tf.summary.scalar('loss', loss)


with tf.name_scope('train'):
    train_step = tf.compat.v1.train.GradientDescentOptimizer(0.1).minimize(loss)
Use the optimizer to correct this error with an efficiency of 0.1

init = tf.compat.v1.global_variables_initializer()

# Random gradient descent
fig = plt.figure()  Mister in a box
ax = fig.add_subplot(1.1.1)
ax.scatter(x_data, y_data)  # Generate the original image
plt.ion()
plt.show()


with tf.compat.v1.Session() as sess:
    writer = tf.compat.v1.summary.FileWriter("logs/", sess.graph)
    sess.run(init)
    for i in range(1000):
        sess.run(train_step, feed_dict={xs: x_data, ys: y_data})  # Convenient packaging
        if i % 50= =0:
            # print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
            try:
                ax.lines.remove(lines[0])  Remove that line after # display
            except Exception:  # The first time is not,
                pass
            predition_value = sess.run(predition, feed_dict={xs: x_data, ys: y_data})
            lines = ax.plot(x_data, predition_value, 'r-', lw=5)  # Type in the predicted value
            plt.pause(0.1)

# loss_function is getting smaller and smaller, so it's always learning to reduce errors
# 1.9184123
# 0.053955305
# 0.03053456
# 0.017190851
# 0.010993273
# 0.008209449
# 0.0067526144
# 0.0058726957
# 0.005269445
# 0.00477808
# 0.0044394922
# 0.0041766805
# 0.0039696493
# 0.003815
# 0.0036952242
# 0.0036034652
# 0.0035240129
# 0.0034543637
# 0.0033897285
# 0.0033306282

# Tips: Error may be reported when space is running out
Copy the code

TensorBorad

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt


tf.compat.v1.disable_eager_execution()


def add_layer(inputs, in_size, out_size, n_layer, activation_function) :  Even if you add neural layers to achieve an iterative process
    layer_name = 'layer%s' % n_layer
    with tf.name_scope(layer_name):
        with tf.name_scope('weights'):
            Weights = tf.Variable(tf.random.normal([in_size, out_size]), name='W')  # Define a matrix, randomly define parameters, initial values
            tf.summary.histogram(layer_name + '/weights', Weights)
        with tf.name_scope('biases'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')  The initial value of # biases is not recommended to be zero, so now you have to add 0.1
            tf.summary.histogram(layer_name + '/biases', biases)
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b = tf.compat.v1.matmul(inputs, Weights, name='wb') + biases  Biases # input*Weights + biases
        if activation_function is None:
            outputs = Wx_plus_b  # This is a linear equation, so there is no need to add a nonlinear activation function
        else:
            outputs = activation_function(Wx_plus_b)
        tf.summary.histogram(layer_name + '/outputs', outputs)
        return outputs



x_data = np.linspace(-1.1.300)[:, np.newaxis].astype(np.float32)  # Create an arithmetic sequence of -1, 1, and finally add a dimension to make it into a dimensional matrix form
# is now the matrix input layer of [300,1] with 300 rows and 1 column
# x_data=tf.cast(tf.float32,x_data)
noise = np.random.normal(0.0.05, x_data.shape)  # Manually add noise variance 0.05
# noise=tf.cast(tf.float32,noise)
y_data = np.square(x_data) - 0.5 + noise
# y_data=tf.cast(tf.float32,y_data)

with tf.name_scope('input') :# input layer
    xs = tf.compat.v1.placeholder(tf.float32, [None.1], name='x_input')  # this is for passing in data, but it doesn't matter how many examples you pass in
    ys = tf.compat.v1.placeholder(tf.float32, [None.1], name='y_input')  # Here is a None expression, but is a matrix form: row 1 column is not known

l1 = add_layer(xs, 1.10.1, tf.nn.relu)
# here is the hidden layer with the first layer added [1,10]
predition = add_layer(l1, 10.1.2.None)
# The final output layer is a matrix [10,1] with 10 rows and 1 column

with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - predition), axis=1))  # Average error
    tf.summary.scalar('loss', loss)

with tf.name_scope('train'):
    train_step = tf.compat.v1.train.GradientDescentOptimizer(0.1).minimize(loss)
Use the optimizer to correct this error with an efficiency of 0.1

init = tf.compat.v1.global_variables_initializer()

with tf.compat.v1.Session() as sess:
    writer = tf.compat.v1.summary.FileWriter("logs/", sess.graph)
    sess.run(init)
    merged = tf.compat.v1.summary.merge_all()
    for i in range(1000):
        sess.run(train_step, feed_dict={xs: x_data, ys: y_data})  # Convenient packaging
        if i % 50= =0:
            result = sess.run([merged,train_step], feed_dict={xs: x_data, ys: y_data})
            writer.add_summary(result, i)

# Tips: Error may be reported when space is running out
Copy the code

Classification classifiers

Recognize handwritten digits as an example

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist_data = input_data.read_data_sets("MNIST_data/", one_hot=True)


def add_layer(inputs, in_size, out_size, activation_function) :  Even if you add neural layers to achieve an iterative process
    Weights = tf.Variable(tf.random.normal([in_size, out_size]), name='W')  # Define a matrix, randomly define parameters, initial values
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')  The initial value of # biases is not recommended to be zero, so now you have to add 0.1
    Wx_plus_b = tf.compat.v1.matmul(inputs, Weights, name='wb') + biases  
    Biases # input*Weights + biases
    if activation_function is None:
        outputs = Wx_plus_b  # This is a linear equation, so there is no need to add a nonlinear activation function
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs


def compute_accracy(v_xs, v_ys) :
    global prediction
    y_pre = sess.run(prediction, feed_dict={xs: v_xs})
    corrent_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))  # Generate predicted values
    accuracy = tf.reduce_mean(tf.cast(corrent_prediction, tf.float32))
    result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})
    return result


tf.compat.v1.disable_eager_execution()
xs = tf.compat.v1.placeholder(tf.float32, [None.784])  28 * # 28
ys = tf.compat.v1.placeholder(tf.float32, [None.10])

prediction = add_layer(xs, 784.10, activation_function=tf.nn.softmax)

cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.math.log(prediction), axis=1))  # Cross entropy loss function

train_step = tf.compat.v1.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.initialize_all_variables())
for i in range(1000):
    batch_xs, batch_ys = mnist_data.train.next_batch(100)
    sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys})  # Learn 1000 times
    if i % 50= =0:
        print(compute_accracy(mnist_data.test.images, mnist_data.test.labels))

sess.close()
Copy the code

OverFitting fitting

Excelling in the training set, but excelling in the test set, for example: strong in your own group, but weak elsewhere.

Solve overfitting

1. Increase the amount of data. As long as the circle is large enough, the phenomenon of overfitting can be reduced

2. Use the normalization, y=w*x+b

L1 formalization:


c o s t = ( W x R e a l y ) 2 + a b s ( W ) cost=(W*x-Realy)^2+abs(W)

L2 regularization:


c o s t = ( W x R e a l y ) 2 + W 2 cost=(W*x-Realy)^2+W^2

L3 – > cube

Dropout Regularization: Discard the neurons randomly so that training does not rely on them.

CNN convolutional neural networks

The RGB image compression, the image of the length and width pressure small, the height of the increase.

Finally, thickness is turned into a classifier.

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import os

mnist_data = input_data.read_data_sets("MNIST_data/", one_hot=True)

graph = tf.compat.v1.get_default_graph()

tf.compat.v1.disable_eager_execution()
xs = tf.compat.v1.placeholder(tf.float32, [None.784])  28 * # 28
ys = tf.compat.v1.placeholder(tf.float32, [None.10])
keep_prob = tf.compat.v1.placeholder(tf.float32)
x_image = tf.reshape(xs, [-1.28.28.1])  # pixel 28*28 black and white so the number of channels is 1
# print (x_image. Shape) # 28,28,1] [n_smaples,



def add_layer(inputs, in_size, out_size, activation_function) :  Even if you add neural layers to achieve an iterative process
    Weights = tf.Variable(tf.random.normal([in_size, out_size]), name='W')  # Define a matrix, randomly define parameters, initial values
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')  The initial value of # biases is not recommended to be zero, so now you have to add 0.1
    Wx_plus_b = tf.compat.v1.matmul(inputs, Weights, name='wb') + biases  Biases # input*Weights + biases
    if activation_function is None:
        outputs = Wx_plus_b  # This is a linear equation, so there is no need to add a nonlinear activation function
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs


def compute_accracy(v_xs, v_ys) :
    global prediction
    y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})
    corrent_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))  # Generate predicted values
    accuracy = tf.reduce_mean(tf.cast(corrent_prediction, tf.float32))
    result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})
    return result


# Initialize weight bias
def weight_variable(shape) :
    initial = tf.compat.v1.truncated_normal(shape, stddev=0.1)  # stddev Standard deviation
    return tf.Variable(initial)


def bias_variable(shape) :
    initial = tf.compat.v1.constant(0.1, shape=shape)
    return tf.Variable(initial)


def conv2d(x, W) :  # Strides =[Batchsize, width, height, number of channels]
    # [1,x_movement,y_movement,1]
    return tf.nn.conv2d(x, W, strides=[1.1.1.1], padding='SAME')  The span of the three layers of the # 2-dimensional CNN is 1


def max_pool_2x2(x) :
    # [1,x_movement,y_movement,1]
    return tf.nn.max_pool(x, ksize=[1.2.2.1], strides=[1.2.2.1], padding='SAME')  #


## conv1 layer
W_conv1 = weight_variable([5.5.1.32])  # Patch 5*5 pixels, in_size 1 unit of result,out_size 32 height
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)  Outsize =28*28*32
h_pool1 = max_pool_2x2(h_conv1)  # outsize=14*14*32 SAME is pixels/step size

## conv2 layer
W_conv2 = weight_variable([5.5.32.64])  # Getting taller and thicker
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)  Outsize =14*14*64
h_pool2 = max_pool_2x2(h_conv2)  # outsize=7*7*64

## func1 layer
W_fc1 = weight_variable([7 * 7 * 64.1024])  1024 Defines 1024 neurons
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1.7 * 7 * 64])
# Flattens the original shape of h_pool2 from the shape of [n_samples,7,7,64]---->[n_samples,7*7*64]
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
f_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

## func2 layer
W_fc2 = weight_variable([1024.10])  1024 Defines 1024 neurons
b_fc2 = bias_variable([10])
prediction = tf.nn.softmax(tf.nn.relu(tf.matmul(f_fc1_drop, W_fc2) + b_fc2))
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.math.log(prediction), axis=1))  # Cross entropy loss function

train_step = tf.compat.v1.train.AdamOptimizer(1e-4).minimize(cross_entropy)

# When you configure the GPU, it will not break out of memory
gpu_no = '0'  # or '1'
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_no
# Define TensorFlow configuration
config = tf.compat.v1.ConfigProto()
# Configure GPU memory allocation mode, increase on demand, is critical
config.gpu_options.allow_growth = True
# Configure the ratio of video memory available
config.gpu_options.per_process_gpu_memory_fraction = 0.1
# Pass config as parameter when creating session
sess = tf.compat.v1.InteractiveSession(config=config)

sess.run(tf.compat.v1.initialize_all_variables())
for i in range(1000):
    batch_xs, batch_ys = mnist_data.train.next_batch(100)
    sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})
    if i % 50= =0:
        print(compute_accracy(mnist_data.test.images, mnist_data.test.labels))

sess.close()

Copy the code

Saver Save extract

! [IMG_0784](D:\294350394\FileRecv\MobileFile\IMG_0784.PNG)import tensorflow as tf
import numpy as np

tf.compat.v1.disable_eager_execution()
# W = tf.Variable([[1, 2, 3], [3, 4, 5]], dtype=tf.float32, name='weights')
# b = tf.Variable([[1, 2, 3]], dtype=tf.float32, name='biases')
# init = tf.compat.v1.initialize_all_variables()
# saver = tf.compat.v1.train.Saver()
# with tf.compat.v1.Session() as sess:
# sess.run(init)
# save_path = saver.save(sess,"my_net/save_net.ckpt")
# print("Save to path:",save_path)

W = tf.Variable(np.arange(6).reshape((2.3)), dtype=tf.float32, name='weights')
b = tf.Variable(np.arange(3).reshape((1.3)), dtype=tf.float32, name='biases')

saver =tf.compat.v1.train.Saver()
with tf.compat.v1.Session() as sess:
    saver.restore(sess,"my_net/save_net.ckpt")
    print("weights:",sess.run(W))
    print("biases:",sess.run(b))
Weights: [[1.2.3.] #weights: [[1.2.3.
# [3. 4. 5.]]
#biases: [[1. 2. 3.]]
Copy the code

RNN recurrent neural network

Enhanced neural network

Each time the output is a summary of the previous one. I remember what I’ve said before.

That is, following the time, it will become the superposition of the previous content. According to the order of time, each output will have the memory of the previous.

In the course of the cycle, if it is less than 1, there will be a decreasing process, and this process is called gradient disappearance

It’s possible that when this error is greater than one you have a process of accumulating and accumulating and that will result in a gradient explosion

When you go back, you don’t get to the original state.

To solve these problems:

LSTM model (Long-term and short-term memory:

Gate Control unit, used to control the type of shelve input, whether to record the point at the time.

A. Write B. Read C. Forget D. Forget

This is to prevent the split storyline will not affect the main storyline, the last is HappyEnd Or BadEnd

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import os

mnist_data = input_data.read_data_sets("MNIST_data/", one_hot=True)

graph = tf.compat.v1.get_default_graph()
tf.compat.v1.disable_eager_execution()

lr = 0.001
training_iters = 10000
batch_size = 128
display_step = 10

n_input = 28  # MNIST data input
n_step = 28  # Interval of time
n_hidden_unis = 128  # Number of hidden layer neurons
n_classes = 10  Number in # MNIST (0-9 numeric type)

x = tf.compat.v1.placeholder(tf.float32, [None, n_step, n_input])  28 * # 28
y = tf.compat.v1.placeholder(tf.float32, [None, n_classes])

keep_prob = tf.compat.v1.placeholder(tf.float32)

weights = {
    'in': tf.Variable(tf.random.normal([n_input, n_hidden_unis])),  # (28,128)
    'out': tf.Variable(tf.random.normal([n_hidden_unis, n_classes]))  # (128,10)
}
biases = {
    'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_unis, ])),  # (128,)
    'out': tf.constant(0.1, shape=[n_classes, ])  # (10,)
}


def RNN(X, weights, biases) :
    # Hide the structure entered by the layer
    # X (128 batch,28 step,28 Input)
    # ==> (128* 28,28 input)
    X = tf.reshape(X, [-1, n_input])
    # X_in ==> (128batch * 28 steps,128 hidden)
    X_in = tf.matmul(X, weights['in']) + biases['in']
    # X_in ==> (128Batch, 28steps, 128 hidden)
    X_in = tf.reshape(X_in, [-1, n_step, n_hidden_unis])

    # cell
    lstm_cell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(n_hidden_unis, forget_bias=1.0, state_is_tuple=True)
    # (c_state,m_state) # c_state
    _init_state = lstm_cell.zero_state(batch_size, dtype=tf.float32)
    outputs, states = tf.compat.v1.nn.dynamic_rnn(lstm_cell, X_in, initial_state=_init_state, time_major=False)
    # Outputs is a list
    results = tf.matmul(states[1], weights['out']) + biases['out']  States [1] is the result of the split storyline. States [0] is the result of the main storyline
    return results


prediction = RNN(x, weights, biases)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))  # Cross entropy loss function
train_op = tf.compat.v1.train.AdamOptimizer(lr).minimize(cross_entropy)

correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# When you configure the GPU, it will not break out of memory
gpu_no = '0'  # or '1'
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_no
# Define TensorFlow configuration
config = tf.compat.v1.ConfigProto()
# Configure GPU memory allocation mode, increase on demand, is critical
config.gpu_options.allow_growth = True
# Configure the ratio of video memory available
config.gpu_options.per_process_gpu_memory_fraction = 0.1
# Pass config as parameter when creating session
sess = tf.compat.v1.InteractiveSession(config=config)
sess.run(tf.compat.v1.initialize_all_variables())
step = 0
while step * batch_size < training_iters:
    batch_xs, batch_ys = mnist_data.train.next_batch(batch_size)
    batch_xs = batch_xs.reshape([batch_size, n_step, n_input])  28 * # 28
    sess.run([train_op], feed_dict={
        x: batch_xs,
        y: batch_ys
    })
    if step % 20= =0:
        print(sess.run(accuracy, feed_dict={
            x: batch_xs,
            y: batch_ys
        }))
sess.close()
Copy the code

Autoencoder (unsupervised learning)

The process of compressing images and then decompressing them is also called unsupervised learning because it prevents reading from being too slow because there is too much information to process.

This is the case in the figure above. Generally, you only need to encode and decode the X data on the left

The above is the knowledge I have learned about TF at the current stage.