使用python中的nets.cifarnet模块构建卷积神经网络进行图像分类任务
发布时间:2023-12-27 19:28:08
在Python中,可以使用torchvision包中的CIFARNet模块构建一个卷积神经网络进行图像分类任务。CIFARNet是一个在CIFAR-10数据集上进行训练和测试的深度卷积神经网络。
首先,我们需要导入所需的包和模块:
import torch import torchvision import torchvision.transforms as transforms import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler
接下来,我们需要定义数据转换、加载数据集、定义模型和训练模型的函数。
# 数据转换
transform = transforms.Compose(
[transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# 加载数据集
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=128,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# 定义模型
class CIFARNet(nn.Module):
def __init__(self):
super(CIFARNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.fc1 = nn.Linear(256 * 4 * 4, 512)
self.fc2 = nn.Linear(512, 10)
def forward(self, x):
x = nn.functional.relu(self.conv1(x))
x = nn.functional.max_pool2d(x, 2, 2)
x = nn.functional.relu(self.conv2(x))
x = nn.functional.relu(self.conv3(x))
x = nn.functional.max_pool2d(x, 2, 2)
x = nn.functional.relu(self.conv4(x))
x = nn.functional.relu(self.conv5(x))
x = nn.functional.max_pool2d(x, 2, 2)
x = x.view(-1, 256 * 4 * 4)
x = nn.functional.relu(self.fc1(x))
x = self.fc2(x)
return x
# 定义训练函数
def train(model, criterion, optimizer, scheduler, num_epochs=25):
for epoch in range(num_epochs):
scheduler.step()
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 200 == 199: # 每迭代200批次打印一次损失
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 200))
running_loss = 0.0
# 定义测试函数
def test(model):
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
最后,我们使用定义的函数来训练和测试模型:
# 创建模型实例 cifarnet = CIFARNet() # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(cifarnet.parameters(), lr=0.1, momentum=0.9) # 设置学习率衰减策略 scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1) # 训练模型 train(cifarnet, criterion, optimizer, scheduler) # 在测试集上测试模型 test(cifarnet)
在这个例子中,我们加载了CIFAR-10数据集,并使用CIFARNet模块定义了一个卷积神经网络模型。然后,我们使用训练函数对模型进行训练,并使用测试函数在测试集上评估模型的准确率。
