当前位置:   article > 正文

中文聊天机器人(一)_用keras和seq2seq训练中文机器人

用keras和seq2seq训练中文机器人

一、数据预处理

1.定义预处理参数,文件data_util.py

  1. from keras.models import Model
  2. from keras.layers import Input,LSTM,Dense
  3. import numpy as np
  4. import pandas as pd
  5. num_samples = 100000
  6. # 定义路径
  7. question_path = 'question.txt'
  8. answer_path = 'answer.txt'
  9. max_encoder_seq_length = None
  10. max_decoder_seq_length = None
  11. num_encoder_tokens = None
  12. num_decoder_tokens = None

2.获取训练数据X, Y

  1. def get_xy_data():
  2. input_texts = []
  3. target_texts = []
  4. with open(question_path, 'r', encoding='utf-8') as f:
  5. input_texts = f.read().split('\n')
  6. input_texts = input_texts[:min(num_samples,len(input_texts)-1)]
  7. with open(answer_path, 'r', encoding='utf-8') as f:
  8. target_texts = ['\t' + line + '\n' for line in f.read().split('\n')]
  9. target_texts = target_texts[:min(num_samples,len(input_texts)-1)]
  10. return input_texts, target_texts

3.需要将Input数据向量化,这里根据Input数据X, Y获取字符词典

  1. def get_vocab_dict(X, Y):
  2. global max_encoder_seq_length, max_decoder_seq_length, num_encoder_tokens, num_decoder_tokens
  3. input_texts = X
  4. target_texts = Y
  5. input_characters = set()
  6. target_characters = set()
  7. for line in input_texts[:min(num_samples,len(input_texts)-1)]:
  8. for char in line:
  9. if char not in input_characters:
  10. input_characters.add(char)
  11. for line in target_texts[:min(num_samples,len(target_texts)-1)]:
  12. for char in line:
  13. if char not in target_characters:
  14. target_characters.add(char)
  15. input_characters = sorted(list(input_characters))
  16. target_characters = sorted(list(target_characters))
  17. num_encoder_tokens = len(input_characters)
  18. num_decoder_tokens = len(target_characters)
  19. max_encoder_seq_length = max([len(txt) for txt in input_texts])
  20. max_decoder_seq_length = max([len(txt) for txt in target_texts])
  21. print('Number of samples:', len(input_texts))
  22. print('Number of unique input tokens:', num_encoder_tokens)
  23. print('Number of unique output tokens:', num_decoder_tokens)
  24. print('Max sequence length for inputs:', max_encoder_seq_length)
  25. print('Max sequence length for outputs:', max_encoder_seq_length)
  26. input_token_index = dict(
  27. [(char,i) for i, char in enumerate(input_characters)])
  28. target_token_index = dict(
  29. [(char,i) for i, char in enumerate(target_characters)])
  30. return input_token_index, target_token_index

4.需要建立一个逆转词典,用于预测阶段将向量转化为可识别的字符

  1. def get_rev_dict(input_token_index, target_token_index):
  2. reverse_input_char_index = dict(
  3. (i, char) for char, i in input_token_index.items())
  4. reverse_target_char_index = dict(
  5. (i, char) for char, i in target_token_index.items())
  6. return reverse_input_char_index, reverse_target_char_index

二、训练

1.定义参数

  1. from keras.models import Model
  2. from keras.layers import Input,LSTM,Dense
  3. import numpy as np
  4. import pandas as pd
  5. import data_util
  6. from data_util import get_vocab_dict
  7. from data_util import get_xy_data
  8. # 定义超参数
  9. batch_size = 32
  10. epochs = 100
  11. latent_dim = 256
  12. input_texts = []
  13. target_texts = []
  14. input_token_index = []
  15. target_token_index = []
  16. encoder_input_data = None
  17. decoder_input_data = None
  18. decoder_target_data = None

2.调用预处理 data_util.py 得到训练数据和词典

  1. def data_deal():
  2. global encoder_input_data,decoder_input_data,decoder_target_data
  3. global input_texts, target_texts, input_token_index,target_token_index
  4. input_texts, target_texts = get_xy_data()
  5. input_token_index, target_token_index = get_vocab_dict(input_texts, target_texts)
  6. # 每个input_text句子都是一个二维矩阵,
  7. # 那么input_texts是多个二维矩阵组合的三维矩阵
  8. encoder_input_data = np.zeros(
  9. (len(input_texts), data_util.max_encoder_seq_length, len(input_token_index)),dtype='float32')
  10. decoder_input_data = np.zeros(
  11. (len(input_texts), data_util.max_decoder_seq_length, len(target_token_index)),dtype='float32')
  12. decoder_target_data = np.zeros(
  13. (len(input_texts), data_util.max_decoder_seq_length, len(target_token_index)),dtype='float32')
  14. for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
  15. for t, char in enumerate(input_text):
  16. encoder_input_data[i, t, input_token_index[char]] = 1
  17. for t, char in enumerate(target_text):
  18. decoder_input_data[i, t, target_token_index[char]] = 1
  19. if t > 0:
  20. decoder_target_data[i, t-1, target_token_index[char]] =1

3.建立seq2seq模型

  1. def build_model():
  2. global input_token_index,target_token_index
  3. encoder_inputs = Input(shape=(None, len(input_token_index)))
  4. encoder = LSTM(latent_dim, return_state=True)
  5. encoder_outputs, state_h, state_c = encoder(encoder_inputs)
  6. encoder_states = [state_h, state_c]
  7. decoder_inputs = Input(shape=(None, len(target_token_index)))
  8. decoder_lstm = LSTM(latent_dim, return_sequences=True,return_state=True)
  9. decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
  10. initial_state=encoder_states)
  11. decoder_dense = Dense(len(target_token_index), activation='softmax')
  12. decoder_outputs = decoder_dense(decoder_outputs)
  13. model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
  14. # 新序列预测时需要的encoder
  15. encoder_model = Model(encoder_inputs, encoder_states)
  16. # 新序列预测时需要的decoder
  17. decoder_state_input_h = Input(shape=(latent_dim,))
  18. decoder_state_input_c = Input(shape=(latent_dim,))
  19. decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
  20. decoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state=decoder_states_inputs)
  21. decoder_states = [state_h, state_c]
  22. decoder_outputs = decoder_dense(decoder_outputs)
  23. decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)
  24. return model, encoder_model, decoder_model

4.训练模型并保存

  1. # 训练并保存
  2. if __name__ == "__main__":
  3. data_deal()
  4. model,encoder_model,decoder_model = build_model()
  5. model.compile(optimizer='rmsprop',loss='categorical_crossentropy')
  6. model.fit([encoder_input_data,decoder_input_data],decoder_target_data,
  7. batch_size=batch_size,
  8. epochs=epochs,
  9. validation_split=0.2)
  10. model.save('model.h5')
  11. encoder_model.save('encoder_model.h5')
  12. decoder_model.save('decoder_model.h5')

三、预测

1.定义参数

  1. from keras.models import Model,load_model
  2. from keras.layers import Input,LSTM,Dense
  3. import numpy as np
  4. import pandas as pd
  5. from data_util import get_vocab_dict
  6. from data_util import get_xy_data
  7. from data_util import get_rev_dict
  8. import data_util
  9. latent_dim = 256
  10. # 语料向量化
  11. input_texts = []
  12. target_texts = []
  13. input_token_index = []
  14. target_token_index = []

2.开始预测

  1. # 开始inference
  2. def decoder_sequence(input_seq):
  3. # Encode the input as state vectors
  4. states_value = encoder_model.predict(input_seq)
  5. target_seq = np.zeros((1,1,data_util.num_decoder_tokens))
  6. # '\t' is starting character
  7. target_seq[0,0,target_token_index['\t']] = 1
  8. # Sampling loop for a batch of sequences
  9. stop_condition = False
  10. decoded_sentence= ''
  11. while not stop_condition:
  12. output_tokens, h, c = decoder_model.predict(
  13. [target_seq] + states_value)
  14. sampled_token_index = np.argmax(output_tokens[0,-1,:])
  15. sampled_char = reverse_target_char_index[sampled_token_index]
  16. decoded_sentence += sampled_char
  17. if(sampled_char == '\n' or len(decoded_sentence) > data_util.max_decoder_seq_length):
  18. stop_condition = True
  19. # Update the target sequenco to predict next token
  20. target_seq = np.zeros((1,1,data_util.num_decoder_tokens))
  21. target_seq[0,0,sampled_token_index] = 1
  22. # Update state
  23. states_value = [h, c]
  24. return decoded_sentence
  25. def predict_ans(question):
  26. input_seq = np.zeros((1, data_util.max_encoder_seq_length, data_util.num_encoder_tokens),dtype='float16')
  27. for t, char in list(enumerate(question)):
  28. input_seq[0,t,input_token_index[char]] = 1
  29. decoded_sentence = decoder_sequence(input_seq)
  30. return decoded_sentence
  31. if __name__ == "__main__":
  32. input_texts, target_texts = get_xy_data()
  33. input_token_index, target_token_index = get_vocab_dict(input_texts, target_texts)
  34. reverse_input_char_index, reverse_target_char_index = get_rev_dict(input_token_index, target_token_index)
  35. encoder_model = load_model('encoder_model.h5')
  36. decoder_model = load_model('decoder_model.h5')
  37. print('Decoded sentence:', predict_ans('这是个傻子'))

github:项目链接

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/2023面试高手/article/detail/346378?site
推荐阅读
相关标签
  

闽ICP备14008679号