当前位置:   article > 正文

基于lstm与cnn的文本分类_lstm-cnn文本分类

lstm-cnn文本分类

主要内容

本文主要任务是基于文本信息进行用户评价分类,分为两类(即正面情绪和负面情绪)数据样例如下:

项目目录与地址

本文使用的数据有

停顿词(hit_stopwords.txt)来源:

停顿词项目目录预览 - stopwords - GitCode

data目录下的所有数据来源:

项目首页 - chinese_text_cnn - GitCode

所有项目代码地址:

text_classificationWithLSTM: 基于lstm与cnn的文本分类 (gitee.com)

一:数据预处理data_set.py

首先对所获取的数据进行停顿词处理,利用hit_stopwords.txt来进行清洗掉停顿词,对于一些去掉停顿词只剩空格或者符号无效内容的进行删掉,最后生成训练模型所需要的train.txt和test.txt

  1. import pandas as pd
  2. import jieba
  3. # 数据读取
  4. def load_tsv(file_path):
  5. data = pd.read_csv(file_path, sep='\t')
  6. data_x = data.iloc[:, -1]
  7. data_y = data.iloc[:, 1]
  8. return data_x, data_y
  9. with open('./hit_stopwords.txt', 'r', encoding='UTF8') as f:
  10. stop_words = [word.strip() for word in f.readlines()]
  11. print('Successfully')
  12. def drop_stopword(datas):
  13. for data in datas:
  14. for word in data:
  15. if word in stop_words:
  16. data.remove(word)
  17. return datas
  18. def save_data(datax, path):
  19. with open(path, 'w', encoding="UTF8") as f:
  20. for lines in datax:
  21. for i, line in enumerate(lines):
  22. f.write(str(line))
  23. # 如果不是最后一行,就添加一个逗号
  24. if i != len(lines) - 1:
  25. f.write(',')
  26. f.write('\n')
  27. if __name__ == '__main__':
  28. train_x, train_y = load_tsv("./data/train.tsv")
  29. test_x, test_y = load_tsv("./data/test.tsv")
  30. train_x = [list(jieba.cut(x)) for x in train_x]
  31. test_x = [list(jieba.cut(x)) for x in test_x]
  32. train_x = drop_stopword(train_x)
  33. test_x = drop_stopword(test_x)
  34. save_data(train_x, './train.txt')
  35. save_data(test_x, './test.txt')
  36. print('Successfully')

二:lstm模型训练

  1. import pandas as pd
  2. import torch
  3. from torch import nn
  4. import jieba
  5. from gensim.models import Word2Vec
  6. import numpy as np
  7. from data_set import load_tsv
  8. from torch.utils.data import DataLoader, TensorDataset
  9. # 数据读取
  10. def load_txt(path):
  11. with open(path, 'r', encoding='utf-8') as f:
  12. data = [[line.strip()] for line in f.readlines()]
  13. return data
  14. train_x = load_txt('train.txt')
  15. test_x = load_txt('test.txt')
  16. train = train_x + test_x
  17. X_all = [i for x in train for i in x]
  18. _, train_y = load_tsv("./data/train.tsv")
  19. _, test_y = load_tsv("./data/test.tsv")
  20. # 训练Word2Vec模型
  21. word2vec_model = Word2Vec(sentences=X_all, vector_size=100, window=5, min_count=1, workers=4)
  22. # 将文本转换为Word2Vec向量表示
  23. def text_to_vector(text):
  24. vector = [word2vec_model.wv[word] for word in text if word in word2vec_model.wv] # 将每个词转换为 Word2Vec 向量
  25. return sum(vector) / len(vector) if vector else [0] * word2vec_model.vector_size # 计算平均向量
  26. X_train_w2v = [[text_to_vector(text)] for line in train_x for text in line] # 训练集文本转换为 Word2Vec 向量
  27. X_test_w2v = [[text_to_vector(text)] for line in test_x for text in line]
  28. # 将词向量转换为PyTorch张量
  29. X_train_array = np.array(X_train_w2v, dtype=np.float32) # 将训练集词向量转换为 NumPy 数组
  30. X_train_tensor = torch.Tensor(X_train_array) # 将 NumPy 数组转换为 PyTorch 张量
  31. X_test_array = np.array(X_test_w2v, dtype=np.float32) # 将测试集词向量转换为 NumPy 数组
  32. X_test_tensor = torch.Tensor(X_test_array) # 将 NumPy 数组转换为 PyTorch 张量
  33. # 使用DataLoader打包文件
  34. train_dataset = TensorDataset(X_train_tensor, torch.LongTensor(train_y)) # 构建训练集数据集对象
  35. train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True) # 构建训练集数据加载器
  36. test_dataset = TensorDataset(X_test_tensor, torch.LongTensor(test_y)) # 构建测试集数据集对象
  37. test_loader = DataLoader(test_dataset, batch_size=64, shuffle=True) # 构建测试集数据加载器
  38. # 定义LSTM模型
  39. class LSTMModel(nn.Module):
  40. def __init__(self, input_size, hidden_size, output_size):
  41. super(LSTMModel, self).__init__()
  42. self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True)
  43. self.fc = nn.Linear(hidden_size, output_size)
  44. def forward(self, x):
  45. lstm_out, _ = self.lstm(x)
  46. output = self.fc(lstm_out[:, -1, :]) # 取序列的最后一个输出
  47. return output
  48. # 定义模型
  49. input_size = word2vec_model.vector_size
  50. hidden_size = 50 # 隐藏层大小
  51. output_size = 2 # 输出的大小,根据你的任务而定
  52. model = LSTMModel(input_size, hidden_size, output_size)
  53. # 定义损失函数和优化器
  54. criterion = nn.CrossEntropyLoss() # 交叉熵损失函数
  55. optimizer = torch.optim.Adam(model.parameters(), lr=0.0002) # Adam 优化器
  56. if __name__ == "__main__":
  57. # 训练模型
  58. num_epochs = 100 # 迭代次数
  59. log_interval = 100 # 每隔100个批次输出一次日志
  60. loss_min = 100
  61. for epoch in range(num_epochs):
  62. model.train() # 设置模型为训练模式
  63. for batch_idx, (data, target) in enumerate(train_loader):
  64. outputs = model(data) # 模型前向传播
  65. loss = criterion(outputs, target) # 计算损失
  66. optimizer.zero_grad() # 梯度清零
  67. loss.backward() # 反向传播
  68. optimizer.step() # 更新参数
  69. if batch_idx % log_interval == 0:
  70. print('Epoch [{}/{}], Batch [{}/{}], Loss: {:.4f}'.format(
  71. epoch + 1, num_epochs, batch_idx, len(train_loader), loss.item()))
  72. # 保存最佳模型
  73. if loss.item() < loss_min:
  74. loss_min = loss.item()
  75. torch.save(model, 'lstm_model.pth')
  76. # 模型评估
  77. with torch.no_grad():
  78. model.eval()
  79. correct = 0
  80. total = 0
  81. for data, target in test_loader:
  82. outputs = model(data)
  83. _, predicted = torch.max(outputs.data, 1)
  84. total += target.size(0)
  85. correct += (predicted == target).sum().item()
  86. accuracy = correct / total
  87. print('Test Accuracy: {:.2%}'.format(accuracy))

运行完截图

三:cnn模型训练

  1. import pandas as pd
  2. import torch
  3. from torch import nn
  4. import jieba
  5. from gensim.models import Word2Vec
  6. import numpy as np
  7. from data_set import load_tsv
  8. from torch.utils.data import DataLoader, TensorDataset
  9. # 数据读取
  10. def load_txt(path):
  11. with open(path, 'r', encoding='utf-8') as f:
  12. data = [[line.strip()] for line in f.readlines()]
  13. return data
  14. train_x = load_txt('train.txt')
  15. test_x = load_txt('test.txt')
  16. train = train_x + test_x
  17. X_all = [i for x in train for i in x]
  18. _, train_y = load_tsv("./data/train.tsv")
  19. _, test_y = load_tsv("./data/test.tsv")
  20. # 训练Word2Vec模型
  21. word2vec_model = Word2Vec(sentences=X_all, vector_size=100, window=5, min_count=1, workers=4)
  22. # 将文本转换为Word2Vec向量表示
  23. def text_to_vector(text):
  24. vector = [word2vec_model.wv[word] for word in text if word in word2vec_model.wv]
  25. return sum(vector) / len(vector) if vector else [0] * word2vec_model.vector_size
  26. X_train_w2v = [[text_to_vector(text)] for line in train_x for text in line]
  27. X_test_w2v = [[text_to_vector(text)] for line in test_x for text in line]
  28. # 将词向量转换为PyTorch张量
  29. X_train_array = np.array(X_train_w2v, dtype=np.float32)
  30. X_train_tensor = torch.Tensor(X_train_array)
  31. X_test_array = np.array(X_test_w2v, dtype=np.float32)
  32. X_test_tensor = torch.Tensor(X_test_array)
  33. # 使用DataLoader打包文件
  34. train_dataset = TensorDataset(X_train_tensor, torch.LongTensor(train_y))
  35. train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
  36. test_dataset = TensorDataset(X_test_tensor, torch.LongTensor(test_y))
  37. test_loader = DataLoader(test_dataset, batch_size=64, shuffle=True)
  38. # 定义cnn模型
  39. class CNNModel(nn.Module):
  40. def __init__(self, input_size, output_size):
  41. super(CNNModel, self).__init__()
  42. self.conv1 = nn.Conv1d(input_size, 32, kernel_size=3, padding=1) # 第一个一维卷积层
  43. self.conv2 = nn.Conv1d(32, 64, kernel_size=3, padding=1) # 第二个一维卷积层
  44. self.fc = nn.Linear(64, output_size) # 全连接层
  45. def forward(self, x):
  46. x = x.permute(0, 2, 1) # # Conv1d期望输入格式为(batch_size, channels, sequence_length)
  47. x = torch.relu(self.conv1(x)) # 第一个卷积层的激活函数
  48. x = torch.relu(self.conv2(x)) # 第二个卷积层的激活函数
  49. x = torch.max_pool1d(x, kernel_size=x.size(2)) # 全局最大池化
  50. x = x.squeeze(2) # 移除最后一个维度
  51. x = self.fc(x) # 全连接层
  52. return x
  53. # 定义CNN模型、损失函数和优化器
  54. input_size = word2vec_model.vector_size # 输入大小为 Word2Vec 向量大小
  55. output_size = 2 # 输出大小
  56. cnn_model = CNNModel(input_size, output_size) # 创建 CNN 模型对象
  57. criterion = nn.CrossEntropyLoss() # 交叉熵损失函数
  58. optimizer = torch.optim.Adam(cnn_model.parameters(), lr=0.0002) # Adam 优化器
  59. if __name__ == "__main__":
  60. # 训练和评估
  61. num_epochs = 100 # 迭代次数
  62. log_interval = 100 # 日志打印间隔
  63. loss_min = 100 # 最小损失值
  64. for epoch in range(num_epochs):
  65. cnn_model.train() # 设置模型为训练模式
  66. for batch_idx, (data, target) in enumerate(train_loader):
  67. outputs = cnn_model(data) # 模型前向传播
  68. loss = criterion(outputs, target) # 计算损失
  69. optimizer.zero_grad() # 梯度清零
  70. loss.backward() # 反向传播
  71. optimizer.step() # 更新参数
  72. if batch_idx % log_interval == 0:
  73. print('Epoch [{}/{}], Batch [{}/{}], Loss: {:.4f}'.format(
  74. epoch + 1, num_epochs, batch_idx, len(train_loader), loss.item()))
  75. if loss.item() < loss_min:
  76. loss_min = loss.item()
  77. torch.save(cnn_model, 'cnn_model.pth')
  78. # 评估
  79. with torch.no_grad():
  80. cnn_model.eval()
  81. correct = 0
  82. total = 0
  83. for data, target in test_loader:
  84. outputs = cnn_model(data)
  85. _, predicted = torch.max(outputs.data, 1)
  86. total += target.size(0)
  87. correct += (predicted == target).sum().item()
  88. accuracy = correct / total
  89. print('测试准确率(CNN模型):{:.2%}'.format(accuracy))

运行完截图:

四:测试模型

  1. import torch
  2. import jieba
  3. from gensim.models import Word2Vec
  4. import numpy as np
  5. from lstm import LSTMModel
  6. from cnn import CNNModel
  7. # 数据读取
  8. def load_txt(path):
  9. with open(path, 'r', encoding='utf-8') as f:
  10. data = [[line.strip()] for line in f.readlines()]
  11. return data
  12. # 去停用词
  13. def drop_stopword(datas):
  14. # 用于预处理文本数据
  15. with open('./hit_stopwords.txt', 'r', encoding='UTF8') as f:
  16. stop_words = [word.strip() for word in f.readlines()]
  17. datas = [x for x in datas if x not in stop_words]
  18. return datas
  19. def preprocess_text(text):
  20. text = list(jieba.cut(text))
  21. text = drop_stopword(text)
  22. return text
  23. # 将文本转换为Word2Vec向量表示
  24. def text_to_vector(text):
  25. train_x = load_txt('train.txt')
  26. test_x = load_txt('test.txt')
  27. train = train_x + test_x
  28. X_all = [i for x in train for i in x]
  29. # 训练Word2Vec模型
  30. word2vec_model = Word2Vec(sentences=X_all, vector_size=100, window=5, min_count=1, workers=4)
  31. vector = [word2vec_model.wv[word] for word in text if word in word2vec_model.wv]
  32. return sum(vector) / len(vector) if vector else [0] * word2vec_model.vector_size
  33. if __name__ == '__main__':
  34. user_input = input("Select model:\n1.lstm_model.pth\n2.cnn_model.pth\n")
  35. if user_input=="1":
  36. modelName="lstm_model.pth"
  37. elif user_input=="2":
  38. modelName="cnn_model.pth"
  39. else:
  40. print("no model name is "+user_input)
  41. exit(0)
  42. # input_text = "这个车完全就是垃圾,又热又耗油"
  43. input_text = "回头率还可以,无框门,上档次"
  44. label = {1: "正面情绪", 0: "负面情绪"}
  45. model = torch.load(modelName)
  46. # 预处理输入数据
  47. input_data = preprocess_text(input_text)
  48. # 确保输入词向量与模型维度和数据类型相同
  49. input_data = [[text_to_vector(input_data)]]
  50. input_arry = np.array(input_data, dtype=np.float32)
  51. input_tensor = torch.Tensor(input_arry)
  52. # 将输入数据传入模型
  53. with torch.no_grad():
  54. output = model(input_tensor)
  55. predicted_class = label[torch.argmax(output).item()]
  56. print(f"predicted_text:{input_text}")
  57. print(f"模型预测的类别: {predicted_class}")

测试截图:

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/黑客灵魂/article/detail/1015687
推荐阅读
相关标签
  

闽ICP备14008679号