我正在尝试在notMNIST数据集上训练一个具有多个隐藏层的neural network。当只有一个隐藏层时运行正常,但当我添加多个隐藏层时,损失值开始变为NaN。以下是我使用的代码
from __future__ import print_functionimport numpy as npimport tensorflow as tffrom six.moves import cPickle as picklefrom six.moves import rangebatch_size = 128num_hidden = 1024num_hidden2 = 300num_hidden3 = 50SEED = 1234567keep_prob = 0.5graph1 = tf.Graph()with graph1.as_default(): # Input data. For the training data, we use a placeholder that will be fed # at run time with a training minibatch. tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. weights1 = tf.Variable(tf.truncated_normal([image_size * image_size, num_hidden])) biases1 = tf.Variable(tf.zeros([num_hidden])) weights2 = tf.Variable(tf.truncated_normal([num_hidden, num_hidden2])) biases2 = tf.Variable(tf.zeros([num_hidden2])) weights3 = tf.Variable(tf.truncated_normal([num_hidden2, num_hidden3])) biases3 = tf.Variable(tf.zeros([num_hidden3])) weights4 = tf.Variable(tf.truncated_normal([num_hidden3, num_labels])) biases4 = tf.Variable(tf.zeros([num_labels])) # Training computation. l1 = tf.matmul(tf_train_dataset, weights1) + biases1 h1 = tf.nn.relu(l1) h1 = tf.nn.dropout(h1, 0.5, seed=SEED) l2 = tf.matmul(h1, weights2) + biases2 h2 = tf.nn.relu(l2) h2 = tf.nn.dropout(h2, 0.5, seed=SEED) l3 = tf.matmul(h2, weights3) + biases3 h3 = tf.nn.relu(l3) h3 = tf.nn.dropout(h3, 0.5, seed=SEED) logits = tf.matmul(h3, weights4) + biases4 loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) # L2 regularization for the fully connected parameters. regularizers = (tf.nn.l2_loss(weights1) + tf.nn.l2_loss(biases1) + tf.nn.l2_loss(weights2) + tf.nn.l2_loss(biases2) + tf.nn.l2_loss(weights3) + tf.nn.l2_loss(biases3) + tf.nn.l2_loss(weights4) + tf.nn.l2_loss(biases4)) # Add the regularization term to the loss. loss += 5e-4 * regularizers # Optimizer. optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax(logits) v_l1 = tf.matmul(tf_valid_dataset, weights1) + biases1 v_h1 = tf.nn.relu(v_l1) v_l2 = tf.matmul(v_h1, weights2) + biases2 v_h2 = tf.nn.relu(v_l2) v_l3 = tf.matmul(v_h2, weights3) + biases3 v_h3 = tf.nn.relu(v_l3) v_logits = tf.matmul(v_h3, weights4) + biases4 valid_prediction = tf.nn.softmax(v_logits) t_l1 = tf.matmul(tf_test_dataset, weights1) + biases1 t_h1 = tf.nn.relu(t_l1) t_l2 = tf.matmul(t_h1, weights2) + biases2 t_h2 = tf.nn.relu(t_l2) t_l3 = tf.matmul(t_h2, weights3) + biases3 t_h3 = tf.nn.relu(t_l3) t_logits = tf.matmul(t_h3, weights4) + biases4 test_prediction = tf.nn.softmax(t_logits)num_steps = 3001with tf.Session(graph=graph1) as session: tf.initialize_all_variables().run() print("Initialized") for step in range(num_steps): # Pick an offset within the training data, which has been randomized. # Note: we could use better randomization across epochs. offset = (step * batch_size) % (train_labels.shape[0] - batch_size) # Generate a minibatch. batch_data = train_dataset[offset:(offset + batch_size), :] batch_labels = train_labels[offset:(offset + batch_size), :] # Prepare a dictionary telling the session where to feed the minibatch. # The key of the dictionary is the placeholder node of the graph to be fed, # and the value is the numpy array to feed to it. feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, predictions = session.run( [optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 500 == 0): print("Minibatch loss at step %d: %f" % (step, l)) print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels)) print("Validation accuracy: %.1f%%" % accuracy( valid_prediction.eval(), valid_labels)) print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
这是我得到的输出
InitializedMinibatch loss at step 0: 48759.078125Minibatch accuracy: 10.2%Validation accuracy: 10.0%Minibatch loss at step 500: nanMinibatch accuracy: 9.4%Validation accuracy: 10.0%Minibatch loss at step 1000: nanMinibatch accuracy: 8.6%Validation accuracy: 10.0%Minibatch loss at step 1500: nanMinibatch accuracy: 11.7%Validation accuracy: 10.0%Minibatch loss at step 2000: nanMinibatch accuracy: 6.2%Validation accuracy: 10.0%Minibatch loss at step 2500: nanMinibatch accuracy: 10.2%Validation accuracy: 10.0%Minibatch loss at step 3000: nanMinibatch accuracy: 7.8%Validation accuracy: 10.0%Test accuracy: 10.0%
回答:
尝试降低权重的标准差。默认值设置为1。这对我有用。