Pytorch实战使用ResNet50/101/152实现Cifar-10的分类任务
作者:互联网
编译器pycharm pytorch版本0.4 python版本3.6
为适应cifar-10中32×32的图片尺寸,对resnet中进行修改,所有层的channel数量都没有进行修改,其中conv1中的(k=7,s=2,p=3)改为(3,1,1),conv2中的maxpool(3,2,1)改为(3,1,1),fc之前的avgpool也进行更改以适应conv5的输出尺寸(4×4在本层输出为1×1)。
最后加了tensorboard输出loss和train acc、test acc曲线。
学习率衰减代码,可调节stepsize与gamma,也可更换其他学习率衰减的方式,pytorch提供了六种
scheduler = lr_scheduler.StepLR(optimizer, step_size = 30, gamma = 0.2)
跟改下面一行代码可以分别调用ResNet 50/101/152
model = ResNet50().to(device)
下面是全部代码:
import os
import torch
import torchvision
import numpy as np
import matplotlib.pyplot as plt
from torch import nn, optim
from torch.autograd import Variable
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from tensorboardX import SummaryWriter
import torch.nn.functional as F
__all__ = ['ResNet50', 'ResNet101','ResNet152']
def Conv1(in_planes, places, stride=1):
return nn.Sequential(
nn.Conv2d(in_channels=in_planes,out_channels=places,kernel_size=3,stride=stride,padding=1, bias=False),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
)
class Bottleneck(nn.Module):
def __init__(self,in_places,places, stride=1,downsampling=False, expansion = 4):
super(Bottleneck,self).__init__()
self.expansion = expansion
self.downsampling = downsampling
self.bottleneck = nn.Sequential(
nn.Conv2d(in_channels=in_places,out_channels=places,kernel_size=1,stride=1, bias=False),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=places, out_channels=places, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=places, out_channels=places*self.expansion, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(places*self.expansion),
)
if self.downsampling:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels=in_places, out_channels=places*self.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(places*self.expansion)
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.bottleneck(x)
if self.downsampling:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,blocks, num_classes=10, expansion = 4):
super(ResNet,self).__init__()
self.expansion = expansion
self.conv1 = Conv1(in_planes = 3, places= 64)
self.layer1 = self.make_layer(in_places = 64, places= 64, block=blocks[0], stride=1)
self.layer2 = self.make_layer(in_places = 256,places=128, block=blocks[1], stride=2)
self.layer3 = self.make_layer(in_places=512,places=256, block=blocks[2], stride=2)
self.layer4 = self.make_layer(in_places=1024,places=512, block=blocks[3], stride=2)
self.fc = nn.Linear(2048,num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def make_layer(self, in_places, places, block, stride):
layers = []
layers.append(Bottleneck(in_places, places,stride, downsampling =True))
for i in range(1, block):
layers.append(Bottleneck(places*self.expansion, places))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.avg_pool2d(x, 4)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def ResNet50():
return ResNet([3, 4, 6, 3])
def ResNet101():
return ResNet([3, 4, 23, 3])
def ResNet152():
return ResNet([3, 8, 36, 3])
def train_accuracy():
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
img, labels = data
img, labels = img.to(device), labels.to(device)
d = img.size()
e = labels.size()
out = model(img)
f = out.size()
_,pred = torch.max(out.data, 1)
total += labels.size(0)
correct += (pred == labels).sum().item()
print('Accuracy of the network on the train image: %d %%' % (100 * correct / total))
return 100.0 * correct / total
def test_accuracy():
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
img, labels = data
img, labels = img.to(device), labels.to(device)
d = img.size()
e = labels.size()
out = model(img)
f = out.size()
_,pred = torch.max(out.data, 1)
total += labels.size(0)
correct += (pred == labels).sum().item()
print('Accuracy of the network on the 10000 test image: %d %%' % (100 * correct / total))
return 100.0 * correct / total
def train():
#定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr = LR, momentum=0.9, weight_decay=5e-4) # 优化方式为mini-batch momentum-SGD,并采用L2正则化(权重衰减)
# optimizer = optim.SGD(model.parameters(), lr = LR, momentum=0.9)
# optimizer = optim.Adam(model.parameters(), lr=LR, betas=(0.9, 0.99))
scheduler = lr_scheduler.StepLR(optimizer, step_size = 30, gamma = 0.2)
# scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
iter = 0
num = 1
#训练网络
for epoch in range(num_epoches):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
iter = iter + 1
img, labels = data
img, labels = img.to(device), labels.to(device)
a = img.size()
b = labels.size()
optimizer.zero_grad()
#训练
out = model(img)
c = out.size()
loss = criterion(out, labels).to(device)
loss.backward()
writer.add_scalar('scalar/loss', loss.item(), iter)
optimizer.step()
running_loss += loss.item()
scheduler.step() # 这一步只是学习率更新,其实应该放在epoch的循环当中
print('epoch: %d\t batch: %d\t lr: %g\t loss: %.6f' % (epoch + 1, i + 1, scheduler.get_lr()[0], running_loss / (batchSize * (i + 1))))
writer.add_scalar('scalar/train_accuracy', train_accuracy(), num + 1)
writer.add_scalar('scalar/test_accuracy', test_accuracy(), num + 1)
print('\n')
num = num + 1
torch.save(model, './model.pkl')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4), #padding后随机裁剪
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
modelPath = './model.pkl'
batchSize = 64
LR = 0.1
num_epoches = 200
writer = SummaryWriter(log_dir='scalar')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
trainset = torchvision.datasets.CIFAR10(root='./Cifar-10', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batchSize, shuffle=True)
testset = torchvision.datasets.CIFAR10(root='./Cifar-10', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batchSize, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
model = ResNet50().to(device)
if __name__ == '__main__':
#如果模型存在,加载模型
if os.path.exists(modelPath):
print('model exists')
model = torch.load(modelPath)
print('model load')
else:
print('model not exists')
print('Training starts')
train()
writer.close()
print('Training Finished')
标签:10,152,ResNet50,places,nn,self,labels,out,size 来源: https://blog.csdn.net/qq_30283085/article/details/100139038