PyTorch入门-简单图片分类
作者:互联网
一. CNN图像分类
PyTorch Version: 1.0.0
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
print("PyTorch Version: ",torch.__version__)
(1)首先定义一个基于ConvNet的简单神经网络
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
NLL loss的定义:
ℓ(x,y)=L={l1,…,lN}⊤,ln=−wynxn,yn,wc=weight[c]⋅1{c≠ignore_index}
def train(model, device, train_loader, optimizer, epoch, log_interval=100):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print("Train Epoch: {} [{}/{} ({:0f}%)]\tLoss: {:.6f}".format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()
))
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
torch.manual_seed(53113)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
batch_size = test_batch_size = 32
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('./mnist_data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./mnist_data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=test_batch_size, shuffle=True, **kwargs)
lr = 0.01
momentum = 0.5
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)
epochs = 2
for epoch in range(1, epochs + 1):
train(model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
save_model = True
if (save_model):
torch.save(model.state_dict(),"mnist_cnn.pt")
结果:
Train Epoch: 1 [0/60000 (0.000000%)] Loss: 2.297938
Train Epoch: 1 [3200/60000 (5.333333%)] Loss: 0.567845
Train Epoch: 1 [6400/60000 (10.666667%)] Loss: 0.206370
Train Epoch: 1 [9600/60000 (16.000000%)] Loss: 0.094653
Train Epoch: 1 [12800/60000 (21.333333%)] Loss: 0.180530
Train Epoch: 1 [16000/60000 (26.666667%)] Loss: 0.041645
Train Epoch: 1 [19200/60000 (32.000000%)] Loss: 0.135092
Train Epoch: 1 [22400/60000 (37.333333%)] Loss: 0.054001
Train Epoch: 1 [25600/60000 (42.666667%)] Loss: 0.111863
Train Epoch: 1 [28800/60000 (48.000000%)] Loss: 0.059039
Train Epoch: 1 [32000/60000 (53.333333%)] Loss: 0.089227
Train Epoch: 1 [35200/60000 (58.666667%)] Loss: 0.186015
Train Epoch: 1 [38400/60000 (64.000000%)] Loss: 0.093208
Train Epoch: 1 [41600/60000 (69.333333%)] Loss: 0.077090
Train Epoch: 1 [44800/60000 (74.666667%)] Loss: 0.038075
Train Epoch: 1 [48000/60000 (80.000000%)] Loss: 0.036247
Train Epoch: 1 [51200/60000 (85.333333%)] Loss: 0.052358
Train Epoch: 1 [54400/60000 (90.666667%)] Loss: 0.013201
Train Epoch: 1 [57600/60000 (96.000000%)] Loss: 0.036660
Test set: Average loss: 0.0644, Accuracy: 9802/10000 (98%)
Train Epoch: 2 [0/60000 (0.000000%)] Loss: 0.054402
Train Epoch: 2 [3200/60000 (5.333333%)] Loss: 0.032239
Train Epoch: 2 [6400/60000 (10.666667%)] Loss: 0.092350
Train Epoch: 2 [9600/60000 (16.000000%)] Loss: 0.058544
Train Epoch: 2 [12800/60000 (21.333333%)] Loss: 0.029762
Train Epoch: 2 [16000/60000 (26.666667%)] Loss: 0.012521
Train Epoch: 2 [19200/60000 (32.000000%)] Loss: 0.101891
Train Epoch: 2 [22400/60000 (37.333333%)] Loss: 0.127773
Train Epoch: 2 [25600/60000 (42.666667%)] Loss: 0.009259
Train Epoch: 2 [28800/60000 (48.000000%)] Loss: 0.013482
Train Epoch: 2 [32000/60000 (53.333333%)] Loss: 0.039676
Train Epoch: 2 [35200/60000 (58.666667%)] Loss: 0.016707
Train Epoch: 2 [38400/60000 (64.000000%)] Loss: 0.168691
Train Epoch: 2 [41600/60000 (69.333333%)] Loss: 0.056318
Train Epoch: 2 [44800/60000 (74.666667%)] Loss: 0.008174
Train Epoch: 2 [48000/60000 (80.000000%)] Loss: 0.075149
Train Epoch: 2 [51200/60000 (85.333333%)] Loss: 0.205798
Train Epoch: 2 [54400/60000 (90.666667%)] Loss: 0.019762
Train Epoch: 2 [57600/60000 (96.000000%)] Loss: 0.012056
Test set: Average loss: 0.0464, Accuracy: 9850/10000 (98%)
torch.manual_seed(53113)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
batch_size = test_batch_size = 32
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('./fashion_mnist_data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('./fashion_mnist_data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=test_batch_size, shuffle=True, **kwargs)
lr = 0.01
momentum = 0.5
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)
epochs = 2
for epoch in range(1, epochs + 1):
train(model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
save_model = True
if (save_model):
torch.save(model.state_dict(),"fashion_mnist_cnn.pt")
Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz
Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz
Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz
Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz
Processing...
Done!
Train Epoch: 1 [0/60000 (0.000000%)] Loss: 2.279603
Train Epoch: 1 [3200/60000 (5.333333%)] Loss: 0.962251
Train Epoch: 1 [6400/60000 (10.666667%)] Loss: 1.019635
Train Epoch: 1 [9600/60000 (16.000000%)] Loss: 0.544330
Train Epoch: 1 [12800/60000 (21.333333%)] Loss: 0.629807
Train Epoch: 1 [16000/60000 (26.666667%)] Loss: 0.514437
Train Epoch: 1 [19200/60000 (32.000000%)] Loss: 0.555741
Train Epoch: 1 [22400/60000 (37.333333%)] Loss: 0.528186
Train Epoch: 1 [25600/60000 (42.666667%)] Loss: 0.656440
Train Epoch: 1 [28800/60000 (48.000000%)] Loss: 0.294654
Train Epoch: 1 [32000/60000 (53.333333%)] Loss: 0.293626
Train Epoch: 1 [35200/60000 (58.666667%)] Loss: 0.227645
Train Epoch: 1 [38400/60000 (64.000000%)] Loss: 0.473842
Train Epoch: 1 [41600/60000 (69.333333%)] Loss: 0.724678
Train Epoch: 1 [44800/60000 (74.666667%)] Loss: 0.519580
Train Epoch: 1 [48000/60000 (80.000000%)] Loss: 0.465854
Train Epoch: 1 [51200/60000 (85.333333%)] Loss: 0.378200
Train Epoch: 1 [54400/60000 (90.666667%)] Loss: 0.503832
Train Epoch: 1 [57600/60000 (96.000000%)] Loss: 0.616502
Test set: Average loss: 0.4365, Accuracy: 8425/10000 (84%)
Train Epoch: 2 [0/60000 (0.000000%)] Loss: 0.385171
Train Epoch: 2 [3200/60000 (5.333333%)] Loss: 0.329045
Train Epoch: 2 [6400/60000 (10.666667%)] Loss: 0.308792
Train Epoch: 2 [9600/60000 (16.000000%)] Loss: 0.360471
Train Epoch: 2 [12800/60000 (21.333333%)] Loss: 0.445865
Train Epoch: 2 [16000/60000 (26.666667%)] Loss: 0.357145
Train Epoch: 2 [19200/60000 (32.000000%)] Loss: 0.376523
Train Epoch: 2 [22400/60000 (37.333333%)] Loss: 0.389735
Train Epoch: 2 [25600/60000 (42.666667%)] Loss: 0.308655
Train Epoch: 2 [28800/60000 (48.000000%)] Loss: 0.352300
Train Epoch: 2 [32000/60000 (53.333333%)] Loss: 0.499613
Train Epoch: 2 [35200/60000 (58.666667%)] Loss: 0.282398
Train Epoch: 2 [38400/60000 (64.000000%)] Loss: 0.330232
Train Epoch: 2 [41600/60000 (69.333333%)] Loss: 0.430427
Train Epoch: 2 [44800/60000 (74.666667%)] Loss: 0.406084
Train Epoch: 2 [48000/60000 (80.000000%)] Loss: 0.443538
Train Epoch: 2 [51200/60000 (85.333333%)] Loss: 0.348947
Train Epoch: 2 [54400/60000 (90.666667%)] Loss: 0.424920
Train Epoch: 2 [57600/60000 (96.000000%)] Loss: 0.231494
Test set: Average loss: 0.3742, Accuracy: 8652/10000 (87%)
(2)CNN模型的迁移学习
-
很多时候当我们需要训练一个新的图像分类任务,我们不会完全从一个随机的模型开始训练,而是利用_预训练_的模型来加速训练的过程。我们经常使用在ImageNet上的预训练模型。
-
这是一种transfer learning的方法。我们常用以下两种方法做迁移学习。
① fine tuning:从一个预训练模型开始,我们改变一些模型的架构,然后继续训练整个模型的参数。
② feature extraction: 我们不再改变与训练模型的参数,而是只更新我们改变过的部分模型参数。我们之所以叫它feature extraction是因为我们把预训练的CNN模型当做一个特征提取模型,利用提取出来的特征做来完成我们的训练任务。 -
构建和训练迁移学习模型的基本步骤:
① 初始化预训练模型
② 把最后一层的输出层改变成我们想要分的类别总数
③ 定义一个optimizer来更新参数
④ 模型训练
(3)数据
我们会使用hymenoptera_data数据集。
这个数据集包括两类图片, bees 和 ants, 这些数据都被处理成了可以使用ImageFolder https://pytorch.org/docs/stable/torchvision/datasets.html#torchvision.datasets.ImageFolder来读取的格式。我们只需要把data_dir设置成数据的根目录,然后把model_name设置成我们想要使用的与训练模型: :: [resnet, alexnet, vgg, squeezenet, densenet, inception]
其他的参数有:
- num_classes表示数据集分类的类别数
- batch_size
- num_epochs
- feature_extract表示我们训练的时候使用fine tuning还是feature extraction方法。如果feature_extract = False,整个模型都会被同时更新。如果feature_extract = True,只有模型的最后一层被更新。
# Top level data directory. Here we assume the format of the directory conforms
# to the ImageFolder structure
data_dir = "./hymenoptera_data"
# Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]
model_name = "resnet"
# Number of classes in the dataset
num_classes = 2
# Batch size for training (change depending on how much memory you have)
batch_size = 32
# Number of epochs to train for
num_epochs = 15
# Flag for feature extracting. When False, we finetune the whole model,
# when True we only update the reshaped layer params
feature_extract = True
def train_model(model, dataloaders, criterion, optimizer, num_epochs=5):
since = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.
for epoch in range(num_epochs):
print("Epoch {}/{}".format(epoch, num_epochs-1))
print("-"*10)
for phase in ["train", "val"]:
running_loss = 0.
running_corrects = 0.
if phase == "train":
model.train()
else:
model.eval()
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
with torch.autograd.set_grad_enabled(phase=="train"):
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
if phase == "train":
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds.view(-1) == labels.view(-1)).item()
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects / len(dataloaders[phase].dataset)
print("{} Loss: {} Acc: {}".format(phase, epoch_loss, epoch_acc))
if phase == "val" and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == "val":
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print("Training compete in {}m {}s".format(time_elapsed // 60, time_elapsed % 60))
print("Best val Acc: {}".format(best_acc))
model.load_state_dict(best_model_wts)
return model, val_acc_history
# it = iter(dataloaders_dict["train"])
# inputs, labels = next(it)
# for inputs, labels in dataloaders_dict["train"]:
# print(labels.size())
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
if model_name == "resnet":
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
return model_ft, input_size
model_ft, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)
print(model_ft)
结果:
ResNet(
(conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(layer1): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer2): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer3): Sequential(
(0): BasicBlock(
(conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer4): Sequential(
(0): BasicBlock(
(conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(avgpool): AvgPool2d(kernel_size=7, stride=1, padding=0)
(fc): Linear(in_features=512, out_features=2, bias=True)
)
(4)读入数据
all_imgs = datasets.ImageFolder(os.path.join(data_dir, "train"), transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]))
loader = torch.utils.data.DataLoader(all_imgs, batch_size=batch_size, shuffle=True, num_workers=4)
img = next(iter(loader))[0]
unloader = transforms.ToPILImage() # reconvert into PIL image
plt.ion()
def imshow(tensor, title=None):
image = tensor.cpu().clone() # we clone the tensor to not do changes on it
image = image.squeeze(0) # remove the fake batch dimension
image = unloader(image)
plt.imshow(image)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
plt.figure()
imshow(img[31], title='Image')
data_transforms = {
"train": transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
"val": transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
print("Initializing Datasets and Dataloaders...")
# Create training and validation datasets
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}
# Create training and validation dataloaders
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']}
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Send the model to GPU
model_ft = model_ft.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract:
params_to_update = []
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t",name)
else:
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
print("\t",name)
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(params_to_update, lr=0.001, momentum=0.9)
# Setup the loss fxn
criterion = nn.CrossEntropyLoss()
# Train and evaluate
model_ft, ohist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=num_epochs)
结果:
Initializing Datasets and Dataloaders...
Params to learn:
fc.weight
fc.bias
Epoch 0/14
----------
train Loss: 0.2623850886450439 Acc: 0.8975409836065574
val Loss: 0.22199168762350394 Acc: 0.9215686274509803
Epoch 1/14
----------
train Loss: 0.20775875546893136 Acc: 0.9262295081967213
val Loss: 0.21329789413930544 Acc: 0.9215686274509803
Epoch 2/14
----------
train Loss: 0.24463887243974405 Acc: 0.9098360655737705
val Loss: 0.2308054333613589 Acc: 0.9215686274509803
Epoch 3/14
----------
train Loss: 0.2108444703406975 Acc: 0.930327868852459
val Loss: 0.20637644174831365 Acc: 0.954248366013072
Epoch 4/14
----------
train Loss: 0.22102872954040279 Acc: 0.9221311475409836
val Loss: 0.19902625017695957 Acc: 0.9281045751633987
Epoch 5/14
----------
train Loss: 0.22044393127081824 Acc: 0.9221311475409836
val Loss: 0.2212505256818011 Acc: 0.9281045751633987
Epoch 6/14
----------
train Loss: 0.1636357441788814 Acc: 0.9467213114754098
val Loss: 0.1969745449380937 Acc: 0.934640522875817
Epoch 7/14
----------
train Loss: 0.1707800094221459 Acc: 0.9385245901639344
val Loss: 0.20569930824578977 Acc: 0.934640522875817
Epoch 8/14
----------
train Loss: 0.18224841185280535 Acc: 0.9344262295081968
val Loss: 0.192565394480244 Acc: 0.9411764705882353
Epoch 9/14
----------
train Loss: 0.17762072372143387 Acc: 0.9385245901639344
val Loss: 0.19549715163466197 Acc: 0.9411764705882353
Epoch 10/14
----------
train Loss: 0.19314993575948183 Acc: 0.9180327868852459
val Loss: 0.2000840900380627 Acc: 0.934640522875817
Epoch 11/14
----------
train Loss: 0.21551114418467537 Acc: 0.9057377049180327
val Loss: 0.18960770005299374 Acc: 0.934640522875817
Epoch 12/14
----------
train Loss: 0.1847396502729322 Acc: 0.9426229508196722
val Loss: 0.1871058808432685 Acc: 0.9411764705882353
Epoch 13/14
----------
train Loss: 0.17342406132670699 Acc: 0.9508196721311475
val Loss: 0.20636656588199093 Acc: 0.9215686274509803
Epoch 14/14
----------
train Loss: 0.16013679030488748 Acc: 0.9508196721311475
val Loss: 0.18491691759988374 Acc: 0.9411764705882353
Training compete in 0.0m 14.700076580047607s
Best val Acc: 0.954248366013072
# Initialize the non-pretrained version of the model used for this run
scratch_model,_ = initialize_model(model_name, num_classes, feature_extract=False, use_pretrained=False)
scratch_model = scratch_model.to(device)
scratch_optimizer = optim.SGD(scratch_model.parameters(), lr=0.001, momentum=0.9)
scratch_criterion = nn.CrossEntropyLoss()
_,scratch_hist = train_model(scratch_model, dataloaders_dict, scratch_criterion, scratch_optimizer, num_epochs=num_epochs)
结果:
Epoch 0/14
----------
train Loss: 0.7185551504619786 Acc: 0.4426229508196721
val Loss: 0.6956208067781785 Acc: 0.45751633986928103
Epoch 1/14
----------
train Loss: 0.6852761008700387 Acc: 0.5778688524590164
val Loss: 0.6626271987273022 Acc: 0.6601307189542484
Epoch 2/14
----------
train Loss: 0.6603062289660094 Acc: 0.5942622950819673
val Loss: 0.6489538297154545 Acc: 0.5816993464052288
Epoch 3/14
----------
train Loss: 0.6203305486772881 Acc: 0.639344262295082
val Loss: 0.6013184107986151 Acc: 0.673202614379085
Epoch 4/14
----------
train Loss: 0.5989709232674271 Acc: 0.6680327868852459
val Loss: 0.5929347966231552 Acc: 0.6993464052287581
Epoch 5/14
----------
train Loss: 0.5821619336722327 Acc: 0.6557377049180327
val Loss: 0.5804777059679717 Acc: 0.6928104575163399
Epoch 6/14
----------
train Loss: 0.6114685896967278 Acc: 0.6270491803278688
val Loss: 0.5674225290616354 Acc: 0.7189542483660131
Epoch 7/14
----------
train Loss: 0.5681056575696977 Acc: 0.6680327868852459
val Loss: 0.5602688086188696 Acc: 0.7189542483660131
Epoch 8/14
----------
train Loss: 0.5701596453541615 Acc: 0.7090163934426229
val Loss: 0.5554519264526616 Acc: 0.7450980392156863
Epoch 9/14
----------
train Loss: 0.5476810380083615 Acc: 0.7254098360655737
val Loss: 0.5805927063125411 Acc: 0.7189542483660131
Epoch 10/14
----------
train Loss: 0.5508710468401674 Acc: 0.6926229508196722
val Loss: 0.5859468777974447 Acc: 0.7058823529411765
Epoch 11/14
----------
train Loss: 0.5344281519045595 Acc: 0.7172131147540983
val Loss: 0.5640550851821899 Acc: 0.7058823529411765
Epoch 12/14
----------
train Loss: 0.5125471890949812 Acc: 0.7295081967213115
val Loss: 0.5665123891207128 Acc: 0.7058823529411765
Epoch 13/14
----------
train Loss: 0.496260079204059 Acc: 0.7254098360655737
val Loss: 0.5820710787586137 Acc: 0.7058823529411765
Epoch 14/14
----------
train Loss: 0.49067981907578767 Acc: 0.7704918032786885
val Loss: 0.5722863315756804 Acc: 0.7058823529411765
Training compete in 0.0m 18.418847799301147s
Best val Acc: 0.7450980392156863
# Plot the training curves of validation accuracy vs. number
# of training epochs for the transfer learning method and
# the model trained from scratch
# ohist = []
# shist = []
# ohist = [h.cpu().numpy() for h in ohist]
# shist = [h.cpu().numpy() for h in scratch_hist]
plt.title("Validation Accuracy vs. Number of Training Epochs")
plt.xlabel("Training Epochs")
plt.ylabel("Validation Accuracy")
plt.plot(range(1,num_epochs+1),ohist,label="Pretrained")
plt.plot(range(1,num_epochs+1),scratch_hist,label="Scratch")
plt.ylim((0,1.))
plt.xticks(np.arange(1, num_epochs+1, 1.0))
plt.legend()
plt.show()
结果:
标签:Loss,入门,60000,分类,Epoch,PyTorch,Train,model,True 来源: https://blog.csdn.net/weixin_42641022/article/details/118557366