我正在训练一个自定义的神经网络,希望将其输出值始终限制在两个任意常数之间:[lower_bound,upper_bound]
。在损失函数中编码这一约束有哪些最佳实践?
下面我编写了一个最小工作示例,用于构建和训练基于生成数据的神经网络。此外,我在要优化的损失函数中添加了任意约束,即输出应在[lower_bound,upper_bound] = [-0.5,0.75]
之间。但我尝试使用了一种相对粗糙的方法,即查找所有预测值超出界限的实例,然后简单地为这些项设置一个大的损失函数值(如果预测值在给定界限内则为零):
lower_bound = -0.5 #预设的输出下限
upper_bound = 0.75 #预设的输出上限
cond_v1_1 = tf.greater(self.v1_pred[:,0], upper_bound*tf.ones(tf.shape(self.v1_pred[:,0])))
cond_v1_2 = tf.greater(-1.0*self.v1_pred[:,0], lower_bound*tf.ones(tf.shape(self.v1_pred[:,0])))
self.red_v1 = tf.where(cond_v1_1, 100000.0*tf.ones(tf.shape(self.v1_pred[:,0])), 0.0*tf.zeros(tf.shape(self.v1_pred[:,0])))
self.red_v1 = tf.where(cond_v1_2, 100000.0*tf.ones(tf.shape(self.v1_pred[:,0])), self.red_v1)
self.loss_cond = tf.reduce_sum(1.0*tf.square(self.red_v1))
但在训练神经网络时,有没有更好的方法或损失函数来更好地编码这一约束?或许有一个更平滑的损失函数,更易于优化器处理和/或对我的代码本身进行修改?对于给定输出界限下如何惩罚/训练下面的代码中的神经网络,任何评论和进一步的想法都将不胜感激。
import numpy as np
import tensorflow as tf
end_it = 1000 #迭代次数
frac_train = 1.0 #随机抽样用于创建训练集的数据比例
frac_sample_train = 0.01 #从训练集中随机抽样的用于批量训练的数据比例
layers = [2, 20, 20, 20, 1]
#生成训练数据
len_data = 10000
x_x = np.array([np.linspace(0.,1.,len_data)])
x_y = np.array([np.linspace(0.,1.,len_data)])
y_true = np.array([np.linspace(-0.2,0.2,len_data)])
N_train = int(frac_train*len_data)
idx = np.random.choice(len_data, N_train, replace=False)
x_train = x_x.T[idx,:]
y_train = x_y.T[idx,:]
v1_train = y_true.T[idx,:]
sample_batch_size = int(frac_sample_train*N_train)
np.random.seed(1234)
tf.set_random_seed(1234)
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
tf.logging.set_verbosity(tf.logging.ERROR)
class NeuralNet:
def __init__(self, x, y, v1, layers):
X = np.concatenate([x, y], 1)
self.lb = X.min(0)
self.ub = X.max(0)
self.X = X
self.x = X[:,0:1]
self.y = X[:,1:2]
self.v1 = v1
self.layers = layers
self.weights_v1, self.biases_v1 = self.initialize_NN(layers)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=False, log_device_placement=False))
self.x_tf = tf.placeholder(tf.float32, shape=[None, self.x.shape[1]])
self.y_tf = tf.placeholder(tf.float32, shape=[None, self.y.shape[1]])
self.v1_tf = tf.placeholder(tf.float32, shape=[None, self.v1.shape[1]])
self.v1_pred = self.net(self.x_tf, self.y_tf)
lower_bound = -0.5 #预设的输出下限
upper_bound = 0.75 #预设的输出上限
cond_v1_1 = tf.greater(self.v1_pred[:,0], upper_bound*tf.ones(tf.shape(self.v1_pred[:,0])))
cond_v1_2 = tf.greater(-1.0*self.v1_pred[:,0], lower_bound*tf.ones(tf.shape(self.v1_pred[:,0])))
self.red_v1 = tf.where(cond_v1_1, 100000.0*tf.ones(tf.shape(self.v1_pred[:,0])), 0.0*tf.zeros(tf.shape(self.v1_pred[:,0])))
self.red_v1 = tf.where(cond_v1_2, 100000.0*tf.ones(tf.shape(self.v1_pred[:,0])), self.red_v1)
self.loss_cond = tf.reduce_sum(1.0*tf.square(self.red_v1))
self.loss_data = tf.reduce_mean(tf.square(self.v1_tf - self.v1_pred))
self.loss = self.loss_cond + self.loss_data
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss, var_list=self.weights_v1+self.biases_v1, method = 'L-BFGS-B', options = {'maxiter': 50, 'maxfun': 50000, 'maxcor': 50, 'maxls': 50, 'ftol' : 1.0 * np.finfo(float).eps})
self.optimizer_Adam = tf.train.AdamOptimizer()
self.train_op_Adam_v1 = self.optimizer_Adam.minimize(self.loss, var_list=self.weights_v1+self.biases_v1)
init = tf.global_variables_initializer()
self.sess.run(init)
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W = self.xavier_init(size=[layers[l], layers[l+1]])
b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)
weights.append(W)
biases.append(b)
return weights, biases
def xavier_init(self, size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)
def neural_net(self, X, weights, biases):
num_layers = len(weights) + 1
H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0
for l in range(0,num_layers-2):
W = weights[l]
b = biases[l]
H = tf.tanh(tf.add(tf.matmul(H, W), b))
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y
def net(self, x, y):
v1_out = self.neural_net(tf.concat([x,y], 1), self.weights_v1, self.biases_v1)
v1 = v1_out[:,0:1]
return v1
def callback(self, loss):
global Nfeval
print(str(Nfeval)+' - 循环中的损失: %.3e' % (loss))
Nfeval += 1
def fetch_minibatch(self, x_in, y_in, v1_in, N_train_sample):
idx_batch = np.random.choice(len(x_in), N_train_sample, replace=False)
x_batch = x_in[idx_batch,:]
y_batch = y_in[idx_batch,:]
v1_batch = v1_in[idx_batch,:]
return x_batch, y_batch, v1_batch
def train(self, end_it):
it = 0
while it < end_it:
x_res_batch, y_res_batch, v1_res_batch = self.fetch_minibatch(self.x, self.y, self.v1, sample_batch_size) #获取残差小批量
tf_dict = {self.x_tf: x_res_batch, self.y_tf: y_res_batch, self.v1_tf: v1_res_batch}
self.sess.run(self.train_op_Adam_v1, tf_dict)
self.optimizer.minimize(self.sess, feed_dict = tf_dict, fetches = [self.loss], loss_callback = self.callback)
it = it + 1
def predict(self, x_star, y_star):
tf_dict = {self.x_tf: x_star, self.y_tf: y_star}
v1_star = self.sess.run(self.v1_pred, tf_dict)
return v1_star
model = NeuralNet(x_train, y_train, v1_train, layers)
Nfeval = 1
model.train(end_it)
回答:
在我看来,实施这种限制的最佳方式是通过输出激活函数。我们可以使用tf.nn.sigmoid
作为基础,它的范围在[0, 1]之间,并对其进行轻微的位移和缩放处理。
def bounded_output(x, lower, upper):
scale = upper - lower
return scale * tf.nn.sigmoid(x) + lower
在你的情况下,使用lower=-0.5
和upper=0.75
调用它。这将使sigmoid函数位移,使最低输出为-0.5,范围为0.75 + 0.5 = 1.25
,将上限设为0.75。在网络的最后一层添加这种输出激活函数意味着输出无法超出此范围。
一个问题是:这可能会导致不良的梯度,因为函数在接近极限时会饱和。因此,如果你的网络产生接近这些极限的输出,梯度将很小,学习速度可能会变慢。