ValueError: 维度必须相等,但在tensorflow的tpu上是96和256

我正在尝试创建一个使用tpu的mnist gan。我从这里复制了gan代码。

然后我对代码进行了一些修改以在tpu上运行。我参考了这个教程来进行修改,该教程展示了如何在tensorflow网站上使用tpu。

但这不起作用,并且引发了错误,这是我的代码。

# -*- coding: utf-8 -*-"""Untitled13.ipynbAutomatically generated by Colaboratory.Original file is located at    https://colab.research.google.com/drive/1gbHDaCeFUCGDkkNgAPjGFQIDvZ5NxVfr"""# Commented out IPython magic to ensure Python compatibility.# %tensorflow_version 2.ximport tensorflow as tfimport numpy as npresolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')tf.config.experimental_connect_to_cluster(resolver)# This is the TPU initialization code that has to be at the beginning.tf.tpu.experimental.initialize_tpu_system(resolver)print("All devices: ", tf.config.list_logical_devices('TPU'))strategy = tf.distribute.TPUStrategy(resolver)import globimport matplotlib.pyplot as pltimport numpy as npimport osimport PILfrom tensorflow.keras import layersimport timefrom IPython import display(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')train_images = (train_images - 127.5) / 127.5  # Normalize the images to [-1, 1]BUFFER_SIZE = 60000BATCH_SIZE = 256# Batch and shuffle the datatrain_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)def make_generator_model():    model = tf.keras.Sequential()    model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))    model.add(layers.BatchNormalization())    model.add(layers.LeakyReLU())    model.add(layers.Reshape((7, 7, 256)))    assert model.output_shape == (None, 7, 7, 256)  # Note: None is the batch size    model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))    assert model.output_shape == (None, 7, 7, 128)    model.add(layers.BatchNormalization())    model.add(layers.LeakyReLU())    model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))    assert model.output_shape == (None, 14, 14, 64)    model.add(layers.BatchNormalization())    model.add(layers.LeakyReLU())    model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))    assert model.output_shape == (None, 28, 28, 1)    return modeldef make_discriminator_model():    model = tf.keras.Sequential()    model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',                                     input_shape=[28, 28, 1]))    model.add(layers.LeakyReLU())    model.add(layers.Dropout(0.3))    model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))    model.add(layers.LeakyReLU())    model.add(layers.Dropout(0.3))    model.add(layers.Flatten())    model.add(layers.Dense(1))    return model# This method returns a helper function to compute cross entropy losscross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)EPOCHS = 50noise_dim = 100num_examples_to_generate = 16# You will reuse this seed overtime (so it's easier)# to visualize progress in the animated GIF)seed = tf.random.normal([num_examples_to_generate, noise_dim])def generate_and_save_images(model, epoch, test_input):  # Notice `training` is set to False.  # This is so all layers run in inference mode (batchnorm).  predictions = model(test_input, training=False)  fig = plt.figure(figsize=(4, 4))  for i in range(predictions.shape[0]):      plt.subplot(4, 4, i+1)      plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')      plt.axis('off')  plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))  plt.show()def train(dataset, epochs):  for epoch in range(epochs):    start = time.time()    for image_batch in (dataset):      strategy.run(train_step, args=(image_batch,))    # Produce images for the GIF as you go    display.clear_output(wait=True)    generate_and_save_images(generator,                             epoch + 1,                             seed)    # Save the model every 15 epochs    if (epoch + 1) % 15 == 0:      checkpoint.save(file_prefix = checkpoint_prefix)    print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))  # Generate after the final epoch  display.clear_output(wait=True)  generate_and_save_images(generator,                           epochs,                           seed)def generator_loss(fake_output):    return cross_entropy(tf.ones_like(fake_output), fake_output)def discriminator_loss(real_output, fake_output):    real_loss = cross_entropy(tf.ones_like(real_output), real_output)    fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)    total_loss = real_loss + fake_loss    return total_loss# Notice the use of `tf.function`# This annotation causes the function to be "compiled".@tf.functiondef train_step(images):    noise = tf.random.normal([BATCH_SIZE, noise_dim])    with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:      generated_images = generator(noise, training=True)      real_output = discriminator(images, training=True)      fake_output = discriminator(generated_images, training=True)      fake_output_0 = discriminator(generated_images, training=True)      gen_loss = generator_loss(fake_output_0)      disc_loss = discriminator_loss(real_output, fake_output)    gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)    gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)    generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))    discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))with strategy.scope():  generator = make_generator_model()  generator_optimizer = tf.keras.optimizers.Adam(1e-4)  discriminator = make_discriminator_model()  discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)  checkpoint_dir = './training_checkpoints'  checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")  checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,                                  discriminator_optimizer=discriminator_optimizer,                                  generator=generator,                                  discriminator=discriminator)    train(train_dataset, EPOCHS)

最终输出是(因为我在colab中,不想逐个复制每个单元格的输出,所以不显示全部输出)

ValueError: 维度必须相等,但在节点 '{{node add}} = AddV2[T=DT_FLOAT](binary_crossentropy/weighted_loss/Mul, binary_crossentropy_1/weighted_loss/Mul)' 的输入形状为 [96] 和 [256] 时分别是 96 和 256。

回答:

训练数据有60000个实例,如果你将它们分成大小为256的批次,最后会剩下一个大小为60000 % 256的较小批次,即96。如果不丢弃这个批次,Keras也会将其视为一个批次。因此,在train_step中,对于这个大小为96的批次,real_output的形状将是(96, 1),而fake_output的形状将是(256, 1)。由于你在cross_entropy损失中将reduction设置为None,形状将被保留,因此real_loss的形状将是(96,)fake_loss的形状将是(256,),然后将它们相加肯定会导致错误。

你可以这样解决这个问题 –

# 让reduction参数为默认值,即'auto'/'sum_over_batch_size'类型cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)

或者

# 丢弃剩余的批次train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)

Related Posts

使用LSTM在Python中预测未来值

这段代码可以预测指定股票的当前日期之前的值,但不能预测…

如何在gensim的word2vec模型中查找双词组的相似性

我有一个word2vec模型,假设我使用的是googl…

dask_xgboost.predict 可以工作但无法显示 – 数据必须是一维的

我试图使用 XGBoost 创建模型。 看起来我成功地…

ML Tuning – Cross Validation in Spark

我在https://spark.apache.org/…

如何在React JS中使用fetch从REST API获取预测

我正在开发一个应用程序,其中Flask REST AP…

如何分析ML.NET中多类分类预测得分数组?

我在ML.NET中创建了一个多类分类项目。该项目可以对…

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注