在未知TensorShape上未定义as_list()

**更新**在实现@的人的答案后:我的代码看起来像这样:

from __future__ import absolute_import, division, print_function, unicode_literalsimport tensorflow as tfimport numpy as npimport matplotlib.pyplot as pltimport osimport PIL as pilfrom tensorflow import feature_columnfrom tensorflow_core.python.platform.flags import FLAGSos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'def print_type(name , x):    print(" {} type = {}".format(name, type(x)))def _bytes_feature(value):    """返回一个从字符串/字节生成的bytes_list。"""    if isinstance(value, type(tf.constant(0))):        value = value.numpy() # BytesList不会从EagerTensor中解包字符串。    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))def _float_feature(value):    if not isinstance(value, np.ndarray):        value = [value]    """返回一个从浮点数/双精度浮点数生成的float_list。"""    return tf.train.Feature(float_list=tf.train.FloatList(value=value))def _int64_feature(value):    """返回一个从布尔值/枚举/整数/无符号整数生成的int64_list。"""    if not isinstance(value, np.ndarray):        value = [value]    return tf.train.Feature(int64_list=tf.train.Int64List(value=value))def encode_example(arr, label):    shape = arr.shape    feature = {        'height': _int64_feature(shape[0]),        'width': _int64_feature(shape[1]),        'label': _int64_feature(label),        'image_raw': _bytes_feature(arr.flatten().tobytes()),    }    # print("编码类型 {}".format(type(feature['image_raw'])))    return tf.train.Example(features=tf.train.Features(feature=feature))def decode_example(serialized_example):    # 创建一个描述特征的字典。    image_feature_description = {        'height': tf.io.FixedLenFeature([], tf.int64),        'width': tf.io.FixedLenFeature([], tf.int64),        'label': tf.io.FixedLenFeature([], tf.int64),        'image_raw': tf.io.FixedLenFeature([], tf.string),    }    example = tf.io.parse_single_example(serialized_example, image_feature_description)    return exampledef map_example(height, width, image_raw, label):    # 假设小端解码,传递little_endian=False以进行大端解码    image_data = tf.io.decode_raw(image_raw, tf.uint8)    image_data = tf.reshape(image_data, [height, width])    return image_data, labeldef make_dataset(partition):    files = tf.data.Dataset.list_files("images_" + partition + "*.tfrecord")    dataset = tf.data.TFRecordDataset(files)    # dataset = dataset.shuffle(buffer_size=FLAGS.shuffle_buffer_size)    dataset = dataset.map(decode_example)    dataset = dataset.map(        lambda x: map_example(x['height'], x['width'], x['image_raw'], x['label']))    # dataset = dataset.batch(batch_size=FLAGS.batch_size)    return datasetdef write_examples_to_record_file(file_name, x , y):    with tf.io.TFRecordWriter(file_name) as writer:        for i in range(len(x)):            tf_example = encode_example(x[i], y[i])            writer.write(tf_example.SerializeToString())mnist = tf.keras.datasets.mnist(x_train, y_train), (x_test, y_test) = mnist.load_data()x_train, y_train, x_test, y_test = x_train[:100], y_train[:100], x_test[:100], y_test[:100]# x_train, x_test = x_train.astype(np.int8), x_test.astype(np.int8)write_examples_to_record_file('images_train.tfrecord', x_train, y_train)# write_examples_to_record_file('images_test.tfrecord', x_test, y_test)train_dataset = make_dataset("train")# test_dataset = make_dataset("test")it = iter(train_dataset)r = next(it)print_type("r",r)(x,y) = rprint_type("X", x)print_type("Y", y)print("x 是" , x)print("y 是" , y)print("x 的形状是" , x.shape())print("y 的形状是" , y.shape())# print(next(it))# for r,label in next(it):#     print(repr(r))#     print("特征形状 = {}".format(r.shape.as_list()))#     print("标签形状 = {}".format(label.shape.as_list()))# feature_column = [tf.feature_column.numeric_column(key='image', shape=(28,28))]# feature_layer = tf.keras.layers.DenseFeatures(feature_column)## it = iter(train_dataset)model = tf.keras.models.Sequential([    tf.keras.layers.Flatten(input_shape=[28,28]),    tf.keras.layers.Dense(128, activation='relu'),    tf.keras.layers.Dropout(0.2),    tf.keras.layers.Dense(10, activation='softmax')])model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])model.fit(train_dataset,  epochs=5)# model.evaluate(test_dataset, verbose=2)

但现在错误不同,我认为这是最初问题错误的原因,即获取next()元组的x,y组件的形状时存在问题:

2019-11-19 11:11:54.540221: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] 无法加载动态库 'cudart64_100.dll';dlerror: 未找到cudart64_100.dll2019-11-19 11:11:56.763955: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] 无法加载动态库 'nvcuda.dll';dlerror: 未找到nvcuda.dll2019-11-19 11:11:56.764410: E tensorflow/stream_executor/cuda/cuda_driver.cc:318] 调用cuInit失败:未知错误(303)2019-11-19 11:11:56.767167: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] 为主机szclu-dvcasa027检索CUDA诊断信息2019-11-19 11:11:56.767572: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] 主机名:szclu-dvcasa0272019-11-19 11:11:56.768026: I tensorflow/core/platform/cpu_feature_guard.cc:142] 您的CPU支持此TensorFlow二进制文件未编译使用的指令:AVX2WARNING:tensorflow:实体 <function decode_example at 0x000002A67F5EE438> 无法转换,将按原样执行。请向AutoGraph团队报告此问题。提交错误时,请将详细程度设置为10(在Linux上,`export AUTOGRAPH_VERBOSITY=10`)并附上完整输出。原因:没有名为 'tensorflow_core.estimator' 的模块WARNING:tensorflow:实体 <function make_dataset.<locals>.<lambda> at 0x000002A67F61C0D8> 无法转换,将按原样执行。请向AutoGraph团队报告此问题。提交错误时,请将详细程度设置为10(在Linux上,`export AUTOGRAPH_VERBOSITY=10`)并附上完整输出。原因:没有名为 'tensorflow_core.estimator' 的模块 r type = <class 'tuple'> X type = <class 'tensorflow.python.framework.ops.EagerTensor'> Y type = <class 'tensorflow.python.framework.ops.EagerTensor'>x 是 tf.Tensor([[  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0   3  18  18  18 126 136  175  26 166 255 247 127   0   0   0   0] [  0   0   0   0   0   0   0   0  30  36  94 154 170 253 253 253 253 253  225 172 253 242 195  64   0   0   0   0] [  0   0   0   0   0   0   0  49 238 253 253 253 253 253 253 253 253 251   93  82  82  56  39   0   0   0   0   0] [  0   0   0   0   0   0   0  18 219 253 253 253 253 253 198 182 247 241    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0  80 156 107 253 253 205  11   0  43 154    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0  14   1 154 253  90   0   0   0   0    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0 139 253 190   2   0   0   0    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0  11 190 253  70   0   0   0    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0  35 241 225 160 108   1    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0   0  81 240 253 253 119   25   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0   0   0  45 186 253 253  150  27   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0  16  93 252  253 187   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0 249  253 249  64   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0   0   0  46 130 183 253  253 207   2   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0  39 148 229 253 253 253  250 182   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0  24 114 221 253 253 253 253 201   78   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0  23  66 213 253 253 253 253 198  81   2    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0  18 171 219 253 253 253 253 195  80   9   0   0    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0  55 172 226 253 253 253 253 244 133  11   0   0   0   0    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0 136 253 253 253 212 135 132  16   0   0   0   0   0   0    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0    0   0   0   0   0   0   0   0   0   0] [  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0    0   0   0   0   0   0   0   0   0   0]], shape=(28, 28), dtype=uint8)y 是 tf.Tensor(5, shape=(), dtype=int64)Traceback (most recent call last):  File "<input>", line 1, in <module>  File "C:\Users\me\.IntelliJIdea2019.3\config\plugins\python\helpers\pydev\_pydev_bundle\pydev_umd.py", line 197, in runfile    pydev_imports.execfile(filename, global_vars, local_vars)  # 执行脚本  File "C:\Users\me\.IntelliJIdea2019.3\config\plugins\python\helpers\pydev\_pydev_imps\_pydev_execfile.py", line 18, in execfile    exec(compile(contents+"\n", file, 'exec'), glob, loc)  File "C:/data/projects/cludeeplearning/train_model_so.py", line 108, in <module>    print("x 的形状是" , x.shape())TypeError: 'TensorShape' 对象不可调用

=============================================================================我正在尝试使用tfrecords进行实验,因为最终我们将有大量数据存储在多个文件中,这些文件可能无法一次性加载到内存中。

这个概念验证的基本想法是从mnist数据集加载数据,将其保存为tf.example格式的tfrecrod文件,然后使用tfrecorddataset从tfrecord中加载数据,并训练模型。

这是我的代码样本:

from __future__ import absolute_import, division, print_function, unicode_literalsimport tensorflow as tfimport numpy as npimport matplotlib.pyplot as pltimport osimport PIL as pilfrom tensorflow import feature_columnfrom tensorflow_core.python.platform.flags import FLAGSos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'def _bytes_feature(value):    """返回一个从字符串/字节生成的bytes_list。"""    if isinstance(value, type(tf.constant(0))):        value = value.numpy() # BytesList不会从EagerTensor中解包字符串。    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))def _float_feature(value):    if not isinstance(value, np.ndarray):        value = [value]    """返回一个从浮点数/双精度浮点数生成的float_list。"""    return tf.train.Feature(float_list=tf.train.FloatList(value=value))def _int64_feature(value):    """返回一个从布尔值/枚举/整数/无符号整数生成的int64_list。"""    if not isinstance(value, np.ndarray):        value = [value]    return tf.train.Feature(int64_list=tf.train.Int64List(value=value))def encode_example(arr, label):    shape = arr.shape    feature = {        'height': _int64_feature(shape[0]),        'width': _int64_feature(shape[1]),        'label': _int64_feature(label),        'image_raw': _bytes_feature(arr.flatten().tobytes()),    }    # print("编码类型 {}".format(type(feature['image_raw'])))    return tf.train.Example(features=tf.train.Features(feature=feature))def decode_example(serialized_example):    # 创建一个描述特征的字典。    image_feature_description = {        'height': tf.io.FixedLenFeature([], tf.int64),        'width': tf.io.FixedLenFeature([], tf.int64),        'label': tf.io.FixedLenFeature([], tf.int64),        'image_raw': tf.io.FixedLenFeature([], tf.string),    }    example = tf.io.parse_single_example(serialized_example, image_feature_description)    return exampledef map_example(height, width, image_raw, label):    image_data = np.frombuffer(image_raw.numpy(), dtype=np.dtype('int64'))    image_data = tf.reshape(image_data, [height.numpy(), width.numpy()])    # image_data.set_shape([28,28])    label = tf.constant([label.numpy()], tf.int64)    return image_data, labeldef make_dataset(partition):    files = tf.data.Dataset.list_files("images_" + partition + "*.tfrecord")    dataset = tf.data.TFRecordDataset(files)    # dataset = dataset.shuffle(buffer_size=FLAGS.shuffle_buffer_size)    dataset = dataset.map(decode_example)    dataset = dataset.map(lambda x: tf.py_function(func=map_example, inp=[x['height'], x['width'], x['image_raw'], x['label']], Tout=(tf.int64, tf.int64)))    # dataset = dataset.batch(batch_size=FLAGS.batch_size)    return datasetdef write_examples_to_record_file(file_name, x , y):    with tf.io.TFRecordWriter(file_name) as writer:        for i in range(len(x)):            tf_example = encode_example(x[i], y[i])            writer.write(tf_example.SerializeToString())mnist = tf.keras.datasets.mnist(x_train, y_train), (x_test, y_test) = mnist.load_data()x_train, y_train, x_test, y_test = x_train[:100], y_train[:100], x_test[:100], y_test[:100]x_train, x_test = x_train / 255.0, x_test / 255.0write_examples_to_record_file('images_train.tfrecord', x_train, y_train)# write_examples_to_record_file('images_test.tfrecord', x_test, y_test)train_dataset = make_dataset("train")# test_dataset = make_dataset("test")for r in train_dataset.take(1):    print(r[0].shape.as_list())    print(r[1])# feature_column = [tf.feature_column.numeric_column(key='image', shape=(28,28))]# feature_layer = tf.keras.layers.DenseFeatures(feature_column)#model = tf.keras.models.Sequential([    tf.keras.layers.Flatten(input_shape=[28,28,1]),    tf.keras.layers.Dense(128, activation='relu'),    tf.keras.layers.Dropout(0.2),    tf.keras.layers.Dense(10, activation='softmax')])model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])model.fit(train_dataset,  epochs=5)# model.evaluate(test_dataset, verbose=2)

在我开始训练之前,我遇到了以下错误,您知道为什么它不起作用吗?

2019-11-19 06:31:21.067987: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] 无法加载动态库 'cudart64_100.dll';dlerror: 未找到cudart64_100.dll2019-11-19 06:31:23.315270: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] 无法加载动态库 'nvcuda.dll';dlerror: 未找到nvcuda.dll2019-11-19 06:31:23.315617: E tensorflow/stream_executor/cuda/cuda_driver.cc:318] 调用cuInit失败:未知错误(303)2019-11-19 06:31:23.320751: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] 为主机szclu-dvcasa027检索CUDA诊断信息2019-11-19 06:31:23.321132: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] 主机名:szclu-dvcasa0272019-11-19 06:31:23.321927: I tensorflow/core/platform/cpu_feature_guard.cc:142] 您的CPU支持此TensorFlow二进制文件未编译使用的指令:AVX2WARNING:tensorflow:实体 <function decode_example at 0x00000250EE94A1F8> 无法转换,将按原样执行。请向AutoGraph团队报告此问题。提交错误时,请将详细程度设置为10(在Linux上,`export AUTOGRAPH_VERBOSITY=10`)并附上完整输出。原因:没有名为 'tensorflow_core.estimator' 的模块WARNING:tensorflow:实体 <function make_dataset.<locals>.<lambda> at 0x00000250EE94A438> 无法转换,将按原样执行。请向AutoGraph团队报告此问题。提交错误时,请将详细程度设置为10(在Linux上,`export AUTOGRAPH_VERBOSITY=10`)并附上完整输出。原因:没有名为 'tensorflow_core.estimator' 的模块特征形状 = [28, 28]标签形状 = [1]Epoch 1/5Traceback (most recent call last):  File "C:/data/projects/cludeeplearning/train_model.py", line 110, in <module>    model.fit(train_dataset,  epochs=5)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 728, in fit      1/Unknown - 0s 13ms/step      1/Unknown - 0s 13ms/step    use_multiprocessing=use_multiprocessing)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 324, in fit    total_epochs=epochs)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 123, in run_one_epoch    batch_outs = execution_function(iterator)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py", line 86, in execution_function    distributed_function(input_fn))  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\eager\def_function.py", line 457, in __call__    result = self._call(*args, **kwds)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\eager\def_function.py", line 503, in _call    self._initialize(args, kwds, add_initializers_to=initializer_map)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\eager\def_function.py", line 408, in _initialize    *args, **kwds))  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\eager\function.py", line 1848, in _get_concrete_function_internal_garbage_collected    graph_function, _, _ = self._maybe_define_function(args, kwargs)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\eager\function.py", line 2150, in _maybe_define_function    graph_function = self._create_graph_function(args, kwargs)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\eager\function.py", line 2041, in _create_graph_function    capture_by_value=self._capture_by_value),  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\framework\func_graph.py", line 915, in func_graph_from_py_func    func_outputs = python_func(*func_args, **func_kwargs)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\eager\def_function.py", line 358, in wrapped_fn    return weak_wrapped_fn().__wrapped__(*args, **kwds)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py", line 66, in distributed_function    model, input_iterator, mode)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py", line 112, in _prepare_feed_values    inputs, targets, sample_weights = _get_input_from_iterator(inputs)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py", line 149, in _get_input_from_iterator    distribution_strategy_context.get_strategy(), x, y, sample_weights)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\keras\distribute\distributed_training_utils.py", line 308, in validate_distributed_dataset_inputs    x_values_list = validate_per_replica_inputs(distribution_strategy, x)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\keras\distribute\distributed_training_utils.py", line 356, in validate_per_replica_inputs    validate_all_tensor_shapes(x, x_values)  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\keras\distribute\distributed_training_utils.py", line 373, in validate_all_tensor_shapes    x_shape = x_values[0].shape.as_list()  File "C:\Users\me\AppData\Local\Continuum\anaconda3\envs\deeplearning\lib\site-packages\tensorflow_core\python\framework\tensor_shape.py", line 1171, in as_list    raise ValueError("as_list() 未在未知TensorShape上定义。")ValueError: as_list() 未在未知TensorShape上定义。Process finished with exit code 1

任何帮助都将不胜感激。谢谢


回答:

在没有一个自包含的示例来测试的情况下,很难确定问题所在,但我认为问题可能来自于使用tf.py_func,这可能会丢失张量的形状信息。不过,您在这里并不需要它,我认为您可以将map_examplemake_dataset定义为等效的如下内容:

def map_example(height, width, image_raw, label):    image_data = tf.io.decode_raw(image_raw, tf.uint8)    image_data = tf.reshape(image_data, [1, height, width])    return image_data, labeldef make_dataset(partition):    files = tf.data.Dataset.list_files("images_" + partition + "*.tfrecord")    dataset = tf.data.TFRecordDataset(files)    # dataset = dataset.shuffle(buffer_size=FLAGS.shuffle_buffer_size)    dataset = dataset.map(decode_example)    dataset = dataset.map(        lambda x: map_example(x['height'], x['width'], x['image_raw'], x['label']))    # dataset = dataset.batch(batch_size=FLAGS.batch_size)    return dataset

Related Posts

使用LSTM在Python中预测未来值

这段代码可以预测指定股票的当前日期之前的值,但不能预测…

如何在gensim的word2vec模型中查找双词组的相似性

我有一个word2vec模型,假设我使用的是googl…

dask_xgboost.predict 可以工作但无法显示 – 数据必须是一维的

我试图使用 XGBoost 创建模型。 看起来我成功地…

ML Tuning – Cross Validation in Spark

我在https://spark.apache.org/…

如何在React JS中使用fetch从REST API获取预测

我正在开发一个应用程序,其中Flask REST AP…

如何分析ML.NET中多类分类预测得分数组?

我在ML.NET中创建了一个多类分类项目。该项目可以对…

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注