这是我的代码,如何将模型转换为节点REST API。我已经创建了训练集并保存了模型。有人能帮我处理API部分吗?我尝试过但没有成功。
training = []output = []
创建一个空数组用于我们的输出
output_empty = [0] * len(classes)
训练集,每个句子的词袋
for doc in documents: # 初始化我们的词袋 bag = [] # 模式的标记化词列表 pattern_words = doc[0] # 对每个词进行词干提取 pattern_words = [stemmer.stem(word.lower()) for word in pattern_words] # 创建我们的词袋数组 for w in words: bag.append(1) if w in pattern_words else bag.append(0) # 输出为每个标签的'0',当前标签为'1' output_row = list(output_empty) output_row[classes.index(doc[1])] = 1 training.append([bag, output_row])# 打乱我们的特征并转换为np.arrayrandom.shuffle(training)training = []output = []# 创建一个空数组用于我们的输出output_empty = [0] * len(classes)# 训练集,每个句子的词袋for doc in documents: # 初始化我们的词袋 bag = [] # 模式的标记化词列表 pattern_words = doc[0] # 对每个词进行词干提取 pattern_words = [stemmer.stem(word.lower()) for word in pattern_words] # 创建我们的词袋数组 for w in words: bag.append(1) if w in pattern_words else bag.append(0) # 输出为每个标签的'0',当前标签为'1' output_row = list(output_empty) output_row[classes.index(doc[1])] = 1 training.append([bag, output_row])# 打乱我们的特征并转换为np.arrayrandom.shuffle(training)training = np.array(training)# 创建训练和测试列表train_x = list(training[:,0])train_y = list(training[:,1])training = np.array(training)# 创建训练和测试列表train_x = list(training[:,0])train_y = list(training[:,1])tf.reset_default_graph()# 构建神经网络net = tflearn.input_data(shape=[None, len(train_x[0])])net = tflearn.fully_connected(net, 8)net = tflearn.fully_connected(net, 8)net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')net = tflearn.regression(net)
定义模型并设置TensorBoard
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')# 开始训练(应用梯度下降算法)model.fit(train_x, train_y, n_epoch=4000, batch_size=8, show_metric=True)
保存模型
model.save('model.tflearn')# 保存所有数据结构import picklepickle.dump( {'words':words, 'classes':classes, 'train_x':train_x, 'train_y':train_y}, open( "training_data", "wb" ) )import pickledata = pickle.load( open( "training_data", "rb" ) )words = data['words']classes = data['classes']train_x = data['train_x']train_y = data['train_y']# 导入我们的聊天机器人意图文件import jsonwith open('D:\\android\\ad.json') as json_data: intents = json.load(json_data)def clean_up_sentence(sentence): # 对模式进行标记化 sentence_words = nltk.word_tokenize(sentence) # 对每个词进行词干提取 sentence_words = [stemmer.stem(word.lower()) for word in sentence_words] return sentence_words# 返回词袋数组:句子中存在的每个词为0或1def bow(sentence, words, show_details=False): # 对模式进行标记化 sentence_words = clean_up_sentence(sentence) # 词袋 bag = [0]*len(words) for s in sentence_words: for i,w in enumerate(words): if w == s: bag[i] = 1 if show_details: print ("found in bag: %s" % w) return(np.array(bag))ERROR_THRESHOLD = 0.25
对输入进行分类
def classify(sentence): # 从模型生成概率 results = model.predict([bow(sentence, words)])[0] # 过滤掉低于阈值的预测 results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD] # 按概率强度排序 results.sort(key=lambda x: x[1], reverse=True) return_list = [] for r in results: return_list.append((classes[r[0]], r[1])) # 返回意图和概率的元组 return return_listdef response(sentence, userID='123', show_details=False): results = classify(sentence) # 如果我们有分类结果,则查找匹配的意图标签 if results: # 只要有匹配项需要处理,就继续循环 while results: for i in intents['intents']: # 查找与第一个结果匹配的标签 if i['tag'] == results[0][0]: # 从意图中随机选择一个响应 return print(random.choice(i['response']))
回答:
有多种方法可以实现这一点。你可以使用像Flask或Django这样的服务器框架。我将展示一个使用Flask的简单示例:(请注意,这只是一个抽象原型)
创建模型类
import librariesclass Model (): def __init__(self): self.model = load() def inference(self, inpts): return self.model.predict(inputs)
注意,这只是一个原型,函数需要你自己实现。
创建REST端点
from flask import Flask, request, jsonify from model import Modelapp = Flask("__main__")model = Model()@app.route("/inference", methods =["POST"])def inference(): data = request.get_json() results = model.inference(data["inputs"]) return jsonify( {"result" : results } )
然后你可以使用curl来测试端点,你也可以使用axios或fetch发送POST请求到该端点。**不要忘记在同一个域名下尝试时添加CORS**。谢谢