我正在尝试手动创建一个逻辑回归模型,但 GradientTape 返回了 NoneType 梯度
class LogisticRegressionTF: def __init__(self,dim): #dim = X_train.shape[0] tf.random.set_seed(1) weight_init = tf.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform", seed=1) zeros_init = tf.zeros_initializer() self.W = tf.Variable(zeros_init([dim,1]), trainable=True, name="W") self.b = tf.Variable(zeros_init([1]), trainable=True, name="b") def sigmoid(self,z): x = tf.Variable(z, trainable=True,dtype=tf.float32, name='x') sigmoid = tf.sigmoid(x) result = sigmoid return result def predict(self, x): x = tf.cast(x, dtype=tf.float32) h = tf.sigmoid(tf.add(tf.matmul(tf.transpose(self.W), x), self.b)) return h def loss(self,logits, labels): z = tf.Variable(logits, trainable=False,dtype=tf.float32, name='z') y = tf.Variable(labels, trainable=False,dtype=tf.float32, name='y') m = tf.cast(tf.size(z), dtype=tf.float32) cost = tf.divide(tf.reduce_sum(y*tf.math.log(z) + (1-y)*tf.math.log(1-z)),-m) return cost def fit(self,X_train, Y_train, lr_rate = 0.01, epochs = 1000): costs=[] optimizer = tf.optimizers.SGD(learning_rate=lr_rate) for i in range(epochs): current_loss = self.loss(self.predict(X_train), Y_train) print(current_loss) with tf.GradientTape() as t: t.watch([self.W, self.b]) currt_loss = self.loss(self.predict(X_train), Y_train) print(currt_loss) grads = t.gradient(currt_loss, [self.W, self.b]) print(grads) #optimizer.apply_gradients(zip(grads,[self.W, self.b])) self.W.assign_sub(lr_rate * grads[0]) self.b.assign_sub(lr_rate * grads[1]) if(i %100 == 0): print('Epoch %2d: , loss=%2.5f' %(i, current_loss)) costs.append(current_loss) plt.plot(costs) plt.ylim(0,50) plt.ylabel('Cost J') plt.xlabel('Iterations')log_reg = LogisticRegressionTF(train_set_x.shape[0])log_reg.fit(train_set_x, train_set_y)
这导致了 TypeError,这是因为梯度返回了 None
tf.Tensor(0.6931474, shape=(), dtype=float32)tf.Tensor(0.6931474, shape=(), dtype=float32)[None, None]---------------------------------------------------------------------------TypeError Traceback (most recent call last)<ipython-input-192-024668d532b0> in <module>() 1 log_reg = LogisticRegressionTF(train_set_x.shape[0])----> 2 log_reg.fit(train_set_x, train_set_y)<ipython-input-191-4fef932eb231> in fit(self, X_train, Y_train, lr_rate, epochs) 40 print(grads) 41 #optimizer.apply_gradients(zip(grads,[self.W, self.b]))---> 42 self.W.assign_sub(lr_rate * grads[0]) 43 self.b.assign_sub(lr_rate * grads[1]) 44 if(i %100 == 0):TypeError: unsupported operand type(s) for *: 'float' and 'NoneType'
我的假设函数是 tf.sigmoid(tf.add(tf.matmul(tf.transpose(self.W), x), self.b))
我手动定义了成本函数为 tf.divide(tf.reduce_sum(y*tf.math.log(z) + (1-y)*tf.math.log(1-z)),-m),其中 m 是训练样本的数量
为了验证,它返回的损失是 tf.Tensor(0.6931474, shape=(), dtype=float32)
我也尝试了 t.watch()
,但没有效果,仍然返回 [None, None]
train_set_y.dtype is dtype('int64')
train_set_x.dtype is dtype('float64')
train_set_x.shape is (12288, 209)
train_set_y.shape is (1, 209)
type(train_set_x) is numpy.ndarray
我哪里做错了??
谢谢
回答:
在我的环境中,TensorFlow 是以 Eagerly
模式运行的,也就是说它处于 Eager Execution 模式。我们可以使用 tf.executing_eagerly()
来检查,如果启用了 Eager Execution,它会返回 True
问题出在 loss(self,logits, labels):
函数上
Logits 不应该是一个 `tf.Variable(…)’
它应该改为 z = logits
,并且 logits 应该被视为 Tensor 对象而不是 tf.Variable 对象。
我还将 tf.divide 改成了 Eager 模式(虽然这不是必须的)
之前:
def loss(self,logits, labels): z = tf.Variable(logits, trainable=False,dtype=tf.float32, name='z') y = tf.Variable(labels, trainable=False,dtype=tf.float32, name='y') m = tf.cast(tf.size(z), dtype=tf.float32) cost = tf.divide(tf.reduce_sum(y*tf.math.log(z) + (1-y)*tf.math.log(1-z)),-m) return cost
之后:
def loss(self,logits, labels): z = logits y = tf.constant(labels,dtype=tf.float32, name='y') m = tf.cast(tf.size(z), dtype=tf.float32) cost = (-1/m)*tf.reduce_sum(y*tf.math.log(z) + (1-y)*tf.math.log(1-z)) return cost