欢迎访问宙启技术站
智能推送

使用mxnet进行文本分类与情感分析

发布时间:2023-12-16 02:23:52

使用MXNet进行文本分类和情感分析可以通过构建深度学习模型来实现。在本教程中,我们将使用MXNet和Gluon接口来解释如何执行这些任务。

文本分类:

文本分类任务的目标是将给定的文本划分为预定义类别中的一个。下面是一个使用MXNet进行文本分类的示例代码:

import mxnet as mx
from mxnet import gluon, nd
from mxnet.gluon import nn
from mxnet.gluon.data import DataLoader
from mxnet.gluon.data.vision import transforms
from mxnet.gluon.data.vision import datasets
import nltk
nltk.download('punkt')

# 准备数据
def load_data():
    train_dataset = datasets.IMDB('./data', train=True)
    test_dataset = datasets.IMDB('./data', train=False)
    
    train_text = train_dataset._data[0]
    train_label = train_dataset._data[1]
    test_text = test_dataset._data[0]
    test_label = test_dataset._data[1]
    
    # 处理文本数据
    tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
    train_text = [tokenizer.tokenize(sentence.decode('utf-8')) for sentence in train_text]
    test_text = [tokenizer.tokenize(sentence.decode('utf-8')) for sentence in test_text]
    
    vocab = [x for sent in train_text for x in sent]
    counter = nltk.Counter(vocab)
    num_of_words = len(counter)
    word2idx = {word: idx+1 for idx, word in enumerate(counter.keys())}
    
    # 将句子转换为索引序列
    train_text = [[word2idx[word] for word in sent if word in word2idx] for sent in train_text]
    test_text = [[word2idx[word] for word in sent if word in word2idx] for sent in test_text]
    
    return train_text, train_label, test_text, test_label, num_of_words

train_text, train_label, test_text, test_label, num_of_words = load_data()

# 定义模型
class TextClassifier(nn.Block):
    def __init__(self, num_embed, num_hidden, num_classes, **kwargs):
        super(TextClassifier, self).__init__(**kwargs)
        self.embed = nn.Embedding(input_dim=num_of_words, output_dim=num_embed)
        self.hidden = nn.Dense(num_hidden, activation='relu')
        self.output = nn.Dense(num_classes)

    def forward(self, text):
        embedded = self.embed(text)
        hidden = self.hidden(embedded)
        return self.output(hidden)
    
num_embed = 100
num_hidden = 100
num_classes = 2
model = TextClassifier(num_embed, num_hidden, num_classes)

# 训练模型
batch_size = 64
train_loader = DataLoader(list(zip(train_text, train_label)), batch_size=batch_size, shuffle=True)
test_loader = DataLoader(list(zip(test_text, test_label)), batch_size=batch_size)

ctx = mx.cpu()
model.initialize(ctx=ctx)
trainer = gluon.Trainer(model.collect_params(), 'adam')
loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()

num_epochs = 5
for epoch in range(num_epochs):
    epoch_loss = 0
    for batch_text, batch_label in train_loader:
        batch_text = nd.array(batch_text, ctx=ctx)
        batch_label = nd.array(batch_label, ctx=ctx)
        
        with mx.autograd.record():
            output = model(batch_text)
            loss = loss_fn(output, batch_label)
        loss.backward()
        trainer.step(batch_size=batch_size)
        epoch_loss += loss.mean().asscalar()
        
    print("Epoch {}, Loss: {}".format(epoch+1, epoch_loss / len(train_loader)))

# 测试模型
num_correct = 0
num_total = 0
for batch_text, batch_label in test_loader:
    batch_text = nd.array(batch_text, ctx=ctx)
    batch_label = nd.array(batch_label, ctx=ctx)
    
    output = model(batch_text)
    predictions = nd.argmax(output, axis=1)
    num_correct += nd.sum(predictions == batch_label).asscalar()
    num_total += len(batch_text)
    
accuracy = num_correct / num_total
print("Test Accuracy: {:.2%}".format(accuracy))

情感分析:

情感分析任务的目标是根据给定的文本确定其情感倾向性,例如正面、负面或中性。下面是一个使用MXNet进行情感分析的示例代码:

import mxnet as mx
from mxnet.gluon import Block, nn
from mxnet.gluon.data.vision.datasets import IMDB

# 准备数据
def load_data():
    train_dataset = IMDB('./data', train=True)
    test_dataset = IMDB('./data', train=False)
    
    train_text = [data[0] for data in train_dataset]
    train_label = [data[1] for data in train_dataset]
    test_text = [data[0] for data in test_dataset]
    test_label = [data[1] for data in test_dataset]
    
    return train_text, train_label, test_text, test_label

train_text, train_label, test_text, test_label = load_data()

# 定义模型
class SentimentClassifier(Block):
    def __init__(self, vocab_size, embed_size, num_hidden, num_classes, **kwargs):
        super(SentimentClassifier, self).__init__(**kwargs)
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.encoder = nn.LSTM(num_hidden)
        self.output = nn.Dense(num_classes)

    def forward(self, text):
        embedded = self.embedding(text)
        encoded = self.encoder(embedded)
        return self.output(encoded[0].squeeze(axis=0))
    
vocab_size = 20000
embed_size = 100
num_hidden = 100
num_classes = 2
model = SentimentClassifier(vocab_size, embed_size, num_hidden, num_classes)

# 训练模型
batch_size = 64
train_loader = mx.gluon.data.DataLoader(mx.gluon.data.ArrayDataset(train_text, train_label), batch_size=batch_size, shuffle=True)
test_loader = mx.gluon.data.DataLoader(mx.gluon.data.ArrayDataset(test_text, test_label), batch_size=batch_size)

ctx = mx.cpu()
model.initialize(ctx=ctx)
trainer = mx.gluon.Trainer(model.collect_params(), 'adam')
loss_fn = mx.gluon.loss.SoftmaxCrossEntropyLoss()

num_epochs = 5
for epoch in range(num_epochs):
    epoch_loss = 0
    for batch_text, batch_label in train_loader:
        batch_text = batch_text.as_in_context(ctx)
        batch_label = batch_label.as_in_context(ctx)
        
        with mx.autograd.record():
            output = model(batch_text)
            loss = loss_fn(output, batch_label)
        loss.backward()
        trainer.step(batch_text.shape[0])
        epoch_loss += loss.mean().asscalar()
        
    print("Epoch {}, Loss: {}".format(epoch+1, epoch_loss / len(train_loader)))

# 测试模型
num_correct = 0
num_total = 0
for batch_text, batch_label in test_loader:
    batch_text = batch_text.as_in_context(ctx)
    batch_label = batch_label.as_in_context(ctx)
    
    output = model(batch_text)
    predictions = output.argmax(axis=1)
    num_correct += (predictions == batch_label).sum().asscalar()
    num_total += batch_text.shape[0]
    
accuracy = num_correct / num_total
print("Test Accuracy: {:.2%}".format(accuracy))

上述代码片段中,我们首先从IMDB数据集加载训练和测试数据。然后,我们定义了对应于每个任务的模型(TextClassifierSentimentClassifier),并在训练循环中使用数据加载器加载数据进行训练。最后,我们使用测试数据评估模型的性能。

通过以上步骤,您可以使用MXNet进行文本分类和情感分析任务。请注意,数据准备的方式可能因数据集和任务不同而有所不同。因此,根据您所使用的数据集和具体任务,您需要进行相应的修改。