张量不是此图的元素

我遇到了这个错误

‘ValueError: Tensor Tensor(“Placeholder:0”, shape=(1, 1), dtype=int32) is not an element of this graph.’

代码在没有with tf.Graph().as_default():的情况下运行得很好。然而,我需要多次调用M.sample(...),每次在session.close()之后内存都不会被释放。可能存在内存泄漏,但不确定具体位置在哪里。

我想恢复一个预训练的神经网络,将其设置为默认图,并在默认图上多次测试(例如10000次),每次都不会使其变大。

代码如下:

def SessionOpener(save):    grph = tf.get_default_graph()    sess = tf.Session(graph=grph)    ckpt = tf.train.get_checkpoint_state(save)    saver = tf.train.import_meta_graph('./predictor/save/model.ckpt.meta')    if ckpt and ckpt.model_checkpoint_path:        saver.restore(sess, ckpt.model_checkpoint_path)        tf.global_variables_initializer().run(session=sess)    return sessdef LoadPredictor(save):    with open(os.path.join(save, 'config.pkl'), 'rb') as f:        saved_args = cPickle.load(f)    with open(os.path.join(save, 'words_vocab.pkl'), 'rb') as f:        words, vocab = cPickle.load(f)    model = Model(saved_args, True)    return model, words, vocabif __name__ == '__main__':    Save = './save'    M, W, V = LoadPredictor(Save)    Sess = SessionOpener(Save)    word = M.sample(Sess, W, V, 1, str(123), 2, 1, 4)    Sess.close()

模型如下:

class Model():    def __init__(self, args, infer=False):        with tf.Graph().as_default():            self.args = args            if infer:                args.batch_size = 1                args.seq_length = 1            if args.model == 'rnn':                cell_fn = rnn.BasicRNNCell            elif args.model == 'gru':                cell_fn = rnn.GRUCell            elif args.model == 'lstm':                cell_fn = rnn.BasicLSTMCell            else:                raise Exception("model type not supported: {}".format(args.model))            cells = []            for _ in range(args.num_layers):                cell = cell_fn(args.rnn_size)                cells.append(cell)            self.cell = cell = rnn.MultiRNNCell(cells)            self.input_data = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])            self.targets = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])            self.initial_state = cell.zero_state(args.batch_size, tf.float32)            self.batch_pointer = tf.Variable(0, name="batch_pointer", trainable=False, dtype=tf.int32)            self.inc_batch_pointer_op = tf.assign(self.batch_pointer, self.batch_pointer + 1)            self.epoch_pointer = tf.Variable(0, name="epoch_pointer", trainable=False)            self.batch_time = tf.Variable(0.0, name="batch_time", trainable=False)            tf.summary.scalar("time_batch", self.batch_time)            def variable_summaries(var):            """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""                with tf.name_scope('summaries'):                    mean = tf.reduce_mean(var)                    tf.summary.scalar('mean', mean)                    tf.summary.scalar('max', tf.reduce_max(var))                    tf.summary.scalar('min', tf.reduce_min(var))            with tf.variable_scope('rnnlm'):                softmax_w = tf.get_variable("softmax_w", [args.rnn_size, args.vocab_size])                variable_summaries(softmax_w)                softmax_b = tf.get_variable("softmax_b", [args.vocab_size])                variable_summaries(softmax_b)                with tf.device("/cpu:0"):                    embedding = tf.get_variable("embedding", [args.vocab_size, args.rnn_size])                    inputs = tf.split(tf.nn.embedding_lookup(embedding, self.input_data), args.seq_length, 1)                    inputs = [tf.squeeze(input_, [1]) for input_ in inputs]            def loop(prev, _):                prev = tf.matmul(prev, softmax_w) + softmax_b                prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))                return tf.nn.embedding_lookup(embedding, prev_symbol)            outputs, last_state = legacy_seq2seq.rnn_decoder(inputs, self.initial_state, cell, loop_function=loop if infer else None, scope='rnnlm')            output = tf.reshape(tf.concat(outputs, 1), [-1, args.rnn_size])            self.logits = tf.matmul(output, softmax_w) + softmax_b            self.probs = tf.nn.softmax(self.logits)            loss = legacy_seq2seq.sequence_loss_by_example([self.logits],                    [tf.reshape(self.targets, [-1])],                    [tf.ones([args.batch_size * args.seq_length])],                    args.vocab_size)            self.cost = tf.reduce_sum(loss) / args.batch_size / args.seq_length            tf.summary.scalar("cost", self.cost)            self.final_state = last_state            self.lr = tf.Variable(0.0, trainable=False)            tvars = tf.trainable_variables()            grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),                args.grad_clip)            optimizer = tf.train.AdamOptimizer(self.lr)            self.train_op = optimizer.apply_gradients(zip(grads, tvars))    def sample(self, sess, words, vocab, num=200, prime='first all', sampling_type=1, pick=0, width=4):        def weighted_pick(weights):            t = np.cumsum(weights)            s = np.sum(weights)            return(int(np.searchsorted(t, np.random.rand(1)*s)))        ret = ''        if pick == 1:            state = sess.run(self.cell.zero_state(1, tf.float32))            if not len(prime) or prime == ' ':                prime  = random.choice(list(vocab.keys()))            for word in prime.split()[:-1]:                x = np.zeros((1, 1))                x[0, 0] = vocab.get(word,0)                feed = {self.input_data: x, self.initial_state:state}                [state] = sess.run([self.final_state], feed)            ret = prime            word = prime.split()[-1]            for n in range(num):                x = np.zeros((1, 1))                x[0, 0] = vocab.get(word, 0)                feed = {self.input_data: x, self.initial_state:state}                [probs, state] = sess.run([self.probs, self.final_state], feed)                p = probs[0]                if sampling_type == 0:                    sample = np.argmax(p)                elif sampling_type == 2:                    if word == '\n':                        sample = weighted_pick(p)                    else:                        sample = np.argmax(p)                else: # sampling_type == 1 default:                    sample = weighted_pick(p)                ret = words[sample]        return ret

输出如下:

Traceback (most recent call last):  File "/rcg/software/Linux/Ubuntu/16.04/amd64/TOOLS/TENSORFLOW/1.2.1-GPU-PY352/lib/python3.5/site-packages/tensorflow/python/client/session.py", line 942, in _run    allow_operation=False)  File "/rcg/software/Linux/Ubuntu/16.04/amd64/TOOLS/TENSORFLOW/1.2.1-GPU-PY352/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 2584, in as_graph_element    return self._as_graph_element_locked(obj, allow_tensor, allow_operation)  File "/rcg/software/Linux/Ubuntu/16.04/amd64/TOOLS/TENSORFLOW/1.2.1-GPU-PY352/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 2663, in _as_graph_element_locked    raise ValueError("Tensor %s is not an element of this graph." % obj)ValueError: Tensor Tensor("Placeholder:0", shape=(1, 1), dtype=int32) is not an element of this graph.

回答:

Related Posts

使用LSTM在Python中预测未来值

这段代码可以预测指定股票的当前日期之前的值,但不能预测…

如何在gensim的word2vec模型中查找双词组的相似性

我有一个word2vec模型,假设我使用的是googl…

dask_xgboost.predict 可以工作但无法显示 – 数据必须是一维的

我试图使用 XGBoost 创建模型。 看起来我成功地…

ML Tuning – Cross Validation in Spark

我在https://spark.apache.org/…

如何在React JS中使用fetch从REST API获取预测

我正在开发一个应用程序,其中Flask REST AP…

如何分析ML.NET中多类分类预测得分数组?

我在ML.NET中创建了一个多类分类项目。该项目可以对…

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注