以下代码包含一个带有回调函数的LSTM模型,一旦训练集准确率达到90%,该回调函数就会停止训练。
当我使用tensorflow-gpu=1.14时,它运行良好。
import tensorflow as tffrom tensorflow.keras.layers import Dense, Dropout,Bidirectional,Masking,LSTMfrom keras_self_attention import SeqSelfAttentiondef duo_bi_LSTM_model(X_train, y_train, X_test, y_test, num_classes, loss,batch_size=68, units=128, learning_rate=0.005,epochs=20, dropout=0.2,recurrent_dropout=0.2,optimizer='Adam'): class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if (logs.get('acc') > 0.90): print("\nReached 90% accuracy so cancelling training!") self.model.stop_training = True callbacks = myCallback() adamopt = tf.keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8) RMSopt = tf.keras.optimizers.RMSprop(lr=learning_rate, rho=0.9, epsilon=1e-6) model = tf.keras.models.Sequential() model.add(Masking(mask_value=0.0, input_shape=(X_train.shape[1], X_train.shape[2]))) model.add(Bidirectional( LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True))) model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout))) model.add(Dense(num_classes, activation='softmax')) opt = opt_select(optimizer) model.compile(loss=loss, optimizer=opt, metrics=['accuracy']) history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_test, y_test), verbose=1, callbacks=[callbacks]) score, acc = model.evaluate(X_test, y_test, batch_size=batch_size) return history, thatX_train = np.random.rand(700, 50,34)y_train = np.random.choice([0, 1], 700)X_test = np.random.rand(100, 50, 34)y_test = np.random.choice([0, 1], 100)batch_size= 217epochs = 600dropout = 0.6Rdropout = 0.7learning_rate = 0.00001optimizer = 'RMS'loss = 'categorical_crossentropy'num_classes = y_train.shape[1]duo_bi_LSTM_his,yhat = duo_bi_LSTM_model(X_train,y_train,X_test,y_test,loss =loss,num_classes=num_classes,batch_size=batch_size,units=32,learning_rate=learning_rate,epochs=epochs,dropout = 0.5,recurrent_dropout=Rdropout,optimizer=optimizer)
在更新TensorFlow到2.2后,出现了以下错误:
~/Speech/Feature_engineering/LXRmodels.py in on_epoch_end(self, epoch, logs) 103 class myCallback(tf.keras.callbacks.Callback): 104 def on_epoch_end(self, epoch, logs={}):--> 105 if (logs.get('acc') > 0.90): 106 print("\nReached 90% accuracy so cancelling training!") 107 self.model.stop_training = TrueTypeError: '>' not supported between instances of 'NoneType' and 'float'
这是因为TensorFlow更新导致的问题吗?我应该如何修改代码?
回答:
你需要替换
logs.get('acc')
为
logs.get('accuracy')