在Python中使用torchtext.vocabVectors()加载和使用中文词向量表进行文本分类
发布时间:2024-01-09 09:54:37
torchtext.vocab.Vectors()是torchtext库中的一个类,用于加载和使用预训练的词向量表。它可以加载不同形式的词向量表,比如Word2Vec、GloVe等,并提供了一些方法来获取词向量。
在中文文本分类任务中使用中文词向量表,可以先将词向量表加载到torchtext.vocab.Vectors()中,然后使用该对象来构建词汇表,并获取词向量。
下面是一个使用中文词向量表进行中文文本分类的例子:
import torch
import torch.nn as nn
import torch.optim as optim
import torchtext
from torchtext.data import Field, LabelField, TabularDataset, BucketIterator
from torchtext.vocab import Vectors
# 设置使用的GPU设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 加载中文词向量表
word_vectors = Vectors(name='path_to_word_vectors')
# 定义Field和LabelField
TEXT = Field(sequential=True, lower=True, include_lengths=True)
LABEL = LabelField()
# 加载数据集
train_data, valid_data, test_data = TabularDataset.splits(
path='path_to_dataset',
train='train.csv',
validation='valid.csv',
test='test.csv',
format='csv',
fields=[('text', TEXT), ('label', LABEL)]
)
# 构建词汇表
TEXT.build_vocab(train_data, vectors=word_vectors)
LABEL.build_vocab(train_data)
# 定义模型
class TextClassifier(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, batch_size, num_layers=1):
super().__init__()
self.embedding = nn.EmbeddingBag(vocab_size, embedding_dim, sparse=True)
self.fc1 = nn.Linear(embedding_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim)
self.batch_size = batch_size
self.num_layers = num_layers
def forward(self, text, text_lengths):
embedded = self.embedding(text, offsets=torch.cumsum(text_lengths, dim=0)[:-1])
hidden = self.fc1(embedded)
hidden = torch.relu(hidden)
output = self.fc2(hidden)
return output
# 定义模型参数
vocab_size = len(TEXT.vocab)
embedding_dim = 300
hidden_dim = 100
output_dim = len(LABEL.vocab)
batch_size = 64
# 初始化模型
model = TextClassifier(vocab_size, embedding_dim, hidden_dim, output_dim, batch_size).to(device)
# 定义优化器和损失函数
optimizer = optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss().to(device)
# 创建迭代器
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=batch_size,
sort_within_batch=True,
sort_key=lambda x: len(x.text),
device=device
)
# 训练模型
def train(model, iterator, optimizer, criterion):
model.train()
epoch_loss = 0
epoch_acc = 0
for batch in iterator:
optimizer.zero_grad()
text, text_lengths = batch.text
text = text.to(device)
text_lengths = text_lengths.to(device)
labels = batch.label.to(device)
predictions = model(text, text_lengths)
loss = criterion(predictions, labels)
acc = binary_accuracy(predictions, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# 测试模型
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
epoch_acc = 0
with torch.no_grad():
for batch in iterator:
text, text_lengths = batch.text
text = text.to(device)
text_lengths = text_lengths.to(device)
labels = batch.label.to(device)
predictions = model(text, text_lengths)
loss = criterion(predictions, labels)
acc = binary_accuracy(predictions, labels)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# 计算精确度
def binary_accuracy(preds, y):
rounded_preds = torch.argmax(preds, dim=1)
correct = (rounded_preds == y).float()
acc = correct.sum() / len(correct)
return acc
# 训练模型
N_EPOCHS = 10
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'best_model.pt')
print(f'Epoch: {epoch+1:02} | Train Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}% | Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
# 加载 模型
model.load_state_dict(torch.load('best_model.pt'))
# 在测试集上评估模型
test_loss, test_acc = evaluate(model, test_iterator, criterion)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')
上述代码中,首先使用torchtext.vocab.Vectors()加载中文词向量表,然后使用该对象和Field定义处理文本和标签的方式。接着加载数据集,并根据训练数据构建词汇表。定义了一个简单的文本分类模型TextClassifier,使用EmbeddingBag作为输入层,接着经过全连接层进行分类。接下来使用TabularDataset和BucketIterator创建数据迭代器。定义了训练函数train和评估函数evaluate,并在每个epoch中训练和评估模型。最后输出测试集上的损失和准确率。
