0
6
0
1. 云栖社区>
2. 专知>
3. 博客>
4. 正文

## 深度学习笔记8：利用Tensorflow搭建神经网络

# Loading the datasetX_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()# Flatten the training and test imagesX_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T# Normalize image vectorsX_train = X_train_flatten/255.X_test = X_test_flatten/255.# Convert training and test labels to one hot matricesY_train = convert_to_one_hot(Y_train_orig, 6)
Y_test = convert_to_one_hot(Y_test_orig, 6)print ("number of training examples = " + str(X_train.shape[1]))print ("number of test examples = " + str(X_test.shape[1]))print ("X_train shape: " + str(X_train.shape))print ("Y_train shape: " + str(Y_train.shape))print ("X_test shape: " + str(X_test.shape))print ("Y_test shape: " + str(Y_test.shape))

LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX

-定义网络结构
-初始化模型参数
-执行前向计算/计算当前损失/执行反向传播/权值更新

def create_placeholders(n_x, n_y):
X = tf.placeholder(tf.float32, shape=(n_x, None), name='X')
Y = tf.placeholder(tf.float32, shape=(n_y, None), name='Y')
return X, Y

def initialize_parameters():
tf.set_random_seed(1)
W1 = tf.get_variable("W1", [25, 12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b1 = tf.get_variable("b1", [25, 1], initializer = tf.zeros_initializer())
b2 = tf.get_variable("b2", [12, 1], initializer = tf.zeros_initializer())
W2 = tf.get_variable("W2", [12, 25], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
parameters = {"W1": W1,
W3 = tf.get_variable("W3", [6, 12], initializer = tf.contrib.layers.xavier_initializer(seed = 1)) b3 = tf.get_variable("b3", [6,1], initializer = tf.zeros_initializer())
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
return parameters

def forward_propagation(X, parameters): """
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
""" W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2']
W3 = parameters['W3'] b3 = parameters['b3'] Z1 = tf.add(tf.matmul(W1, X), b1) A1 = tf.nn.relu(Z1)
return Z3
A2 = tf.nn.relu(Z2)
###### 计算损失函数

`Tensorflow` 中损失函数的计算要比手动搭建时方便很多，一行代码即可搞定：

def compute_cost(Z3, Y):
logits = tf.transpose(Z3)
labels = tf.transpose(Y)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = labels))
return cost
###### 代码整合：执行反向传播和权值更新

def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 1500, minibatch_size = 32, print_cost = True):
(n_x, m) = X_train.shape
ops.reset_default_graph() tf.set_random_seed(1) seed = 3 n_y = Y_train.shape[0]
X, Y = create_placeholders(n_x, n_y) # Initialize parameters
costs = [] # Create Placeholders of shape (n_x, n_y)
parameters = initialize_parameters() # Forward propagation: Build the forward propagation in the tensorflow graph
Z3 = forward_propagation(X, parameters) # Cost function: Add cost function to tensorflow graph
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost) # Initialize all the variables
cost = compute_cost(Z3, Y) # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer. init = tf.global_variables_initializer() # Start the session to compute the tensorflow graph
seed = seed + 1
with tf.Session() as sess: # Run the initialization sess.run(init) # Do the training loop for epoch in range(num_epochs): epoch_cost = 0. num_minibatches = int(m / minibatch_size)
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches: # Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
epoch_cost += minibatch_cost / num_minibatches # Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost) # plot the cost
plt.ylabel('cost')
plt.plot(np.squeeze(costs)) plt.xlabel('iterations (per tens)')
plt.show() # lets save the parameters in a variable
plt.title("Learning rate =" + str(learning_rate))
parameters = sess.run(parameters)
print ("Parameters have been trained!") # Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters

parameters = model(X_train, Y_train, X_test, Y_test)

●  `Tensorflow` 语法中两个基本的对象类是 Tensor 和 Operator.

●  `Tensorflow` 执行计算的基本步骤为

●  创建计算图（张量、变量和占位符变量等）
●  创建会话
●  初始化会话
●  在计算图中执行会话

+ 关注