其他分享
首页 > 其他分享> > Pytorch 目标分类比赛入门

Pytorch 目标分类比赛入门

作者:互联网

分类比赛叙述

目标分类比赛是入门比较简单和编写代码量较少,适合初学者第一步比赛入门的基础。比赛一般分为初赛复赛,初赛大部分提交csv文件,复赛可能会需要在官方指定的服务器上docker环境部署然后进行预测服务器上测试集照片。

比赛数据

比赛连接AI研习社非常适合入门比赛,里面有很多不同的比赛和代码分享,大家可以去学习学习。数据下载我就以这个比赛的102鲜花分类为例。讲解比赛流程

数据加载

在这里插入图片描述
数据是这个样子的,train是我们要训练的数据图片,train.csv是保存的对应train下照片的label,而test只有照片没有label。我们目的是用train的数据训练模型然后对test进行预测生成test.csv然后提交成绩就可以了。

train_df = pd.read_csv('./54_data/train.csv')
train_df['filename'] = train_df['filename'].apply(lambda x: './54_data/train/{0}'.format(x))

class MyDataset(Dataset):
    def __init__(self, df, transform):
        self.df = df
        self.transform = transform

    def __getitem__(self, index):
        img = Image.open(self.df['filename'].iloc[index]).convert('RGB')
        img = self.transform(img)
        return img, torch.from_numpy(np.array(self.df['label'].iloc[index]))

    def __len__(self):
        return len(self.df)


train_transform = transforms.Compose([
    transforms.RandomRotation(15),
    transforms.Resize([300, 300]),
    transforms.RandomVerticalFlip(),
    #FixedRotation([0, 90, 180, -90]),
    transforms.RandomRotation(90),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
val_transform = transforms.Compose([
    transforms.Resize([300, 300]),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

train_data = MyDataset(train_df, train_transform)
trainloader = DataLoader(train_data, batch_size=16 , shuffle=True)

这一部分代码是读取数据然后整理成迭代器的方式。方便数据处理、里面有很多细节,比如class MyDataset这个类就是重写类。这里我只大概讲解比赛流程。详细部分我博客其他部分讲解的有。数据详解

模型构建

比赛当然是使用各种库。比如timm库或者mmclass库,这里我们使用timm库。而却就使用timm库中的模型。其他版块代码想要使用直接copy到我们代码里面就行了。就可以很方便的调用。

import timm
net=timm.create_model(“resnet18”,pretrained=True,num_classes=102).cuda()


import timm
model_list=timm.list_models("eff*",pretrained=True)
print(model_list)

就这简单的一行代码就构建好模型了。可以使用timm.list_models这个函数查看全部的模型。也可以使用正则表达式的方式搜索一些指定模型进行选择。

训练部分

model_name="efficientnet_b2"
epoch_num=22
net=timm.create_model(model_name,pretrained=True,num_classes=102).cuda()
train_data = MyDataset(train_df, train_transform)
trainloader = DataLoader(train_data, batch_size=16 , shuffle=True)
criterion=nn.CrossEntropyLoss()

optimizer = optim.SGD(net.parameters(),lr=0.02)
#optimizer = torch.optim.Adam(net.parameters(), lr=0.0000001)
#lr_scheduler = CosineLRScheduler(optimizer, t_initial=0.02, lr_min=0.000004)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
PATH = './dir/'+model_name
pre_acc = 0
def get_cur_lr(optimizer):
    for param_group in optimizer.param_groups:
        return param_group['lr']
if __name__ == '__main__':
    correct = 0
    total = 0
    for epoch in range(epoch_num):
        print("========== epoch: [{}/{}] ==========".format(epoch + 1, epoch_num))
        for i, (inputs, labels) in tqdm(enumerate(trainloader)):
            inputs = inputs.cuda()
            labels = labels.cuda()
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            correct += (outputs.argmax(dim=1) == labels).sum().item()
            total += labels.size(0)
            train_acc = 100.0 * correct / total
            optimizer.zero_grad()  # 梯度先全部降为0
            loss.backward()  # 反向传递过程
            optimizer.step()  # 以学习效率0.001来优化梯度
            if i % 10 == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f} train acc: {:.6f}  lr : {:.6f}'.format(epoch+1, i * len(inputs),
                len(trainloader.dataset),100. * i / len(trainloader),loss.item(),train_acc,get_cur_lr(optimizer)))
        if epoch % 1 ==0:
            torch.save({'epoch': epoch,
                        'model_state_dict': net.state_dict(),
                           'optimizer_state_dict': optimizer.state_dict(),
                        'loss': loss
            },PATH+"_"+str(epoch+1)+".pth")
        
        lr_scheduler.step()

以上代码属于基本功。就是定义好网络,我们选择优化器,学习率等等。然后保存权重

全部代码

import torchvision.transforms as transforms
import torch.nn as nn
import numpy as np
import pandas as pd
import torch.optim as optim
from PIL import Image
from tqdm import tqdm
import torch.nn.functional as F
import torch
import random
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import timm
from timm.scheduler.cosine_lr import CosineLRScheduler
train_df = pd.read_csv('./54_data/train.csv')  #这里需要注意一下你数据集的位置
#print(train_df)
#print(train_df['filename'][0])
train_df['filename'] = train_df['filename'].apply(lambda x: './54_data/train/{0}'.format(x))
# val_df = pd.read_csv('./54_data/val.csv')
# val_df['filename'] = val_df['filename'].apply(lambda x: './54_data/train/{0}'.format(x))


class MyDataset(Dataset):
    def __init__(self, df, transform):
        self.df = df
        self.transform = transform

    def __getitem__(self, index):
        img = Image.open(self.df['filename'].iloc[index]).convert('RGB')
        img = self.transform(img)
        return img, torch.from_numpy(np.array(self.df['label'].iloc[index]))

    def __len__(self):
        return len(self.df)


train_transform = transforms.Compose([
    transforms.RandomRotation(15),
    transforms.Resize([300, 300]),
    transforms.RandomVerticalFlip(),
    #FixedRotation([0, 90, 180, -90]),
    transforms.RandomRotation(90),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
val_transform = transforms.Compose([
    transforms.Resize([300, 300]),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
model_name="efficientnet_b2"
epoch_num=22
net=timm.create_model(model_name,pretrained=True,num_classes=102).cuda()
train_data = MyDataset(train_df, train_transform)
trainloader = DataLoader(train_data, batch_size=16 , shuffle=True)
criterion=nn.CrossEntropyLoss()

optimizer = optim.SGD(net.parameters(),lr=0.02)
#optimizer = torch.optim.Adam(net.parameters(), lr=0.0000001)
#lr_scheduler = CosineLRScheduler(optimizer, t_initial=0.02, lr_min=0.000004)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
PATH = './'+model_name  #这里也需要注意,你需要先新建一个dir文件夹,一会在这个文件下存放权重
pre_acc = 0
def get_cur_lr(optimizer):
    for param_group in optimizer.param_groups:
        return param_group['lr']
if __name__ == '__main__':
    correct = 0
    total = 0
    for epoch in range(epoch_num):
        print("========== epoch: [{}/{}] ==========".format(epoch + 1, epoch_num))
        for i, (inputs, labels) in tqdm(enumerate(trainloader)):
            inputs = inputs.cuda()
            labels = labels.cuda()
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            correct += (outputs.argmax(dim=1) == labels).sum().item()
            total += labels.size(0)
            train_acc = 100.0 * correct / total
            optimizer.zero_grad()  
            loss.backward()  
            optimizer.step()  
            if i % 10 == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f} train acc: {:.6f}  lr : {:.6f}'.format(epoch+1, i * len(inputs),
                len(trainloader.dataset),100. * i / len(trainloader),loss.item(),train_acc,get_cur_lr(optimizer)))
        if epoch % 1 ==0:
            torch.save({'epoch': epoch,
                        'model_state_dict': net.state_dict(),
                           'optimizer_state_dict': optimizer.state_dict(),
                        'loss': loss
            },PATH+"_"+str(epoch+1)+".pth")
        
        lr_scheduler.step()

想要成功运行这部分代码,需要处理好数据集的位置和你需要提前安装好各种包。比如timm框架之类的。然后就可以成功运行训练了。
在这里插入图片描述
时间关系我就简单训练5个批次吧,单模还是很容易到98分的成绩训练五个模型差不多集成就可以100分貌似。

预测结果

 	import torch.optim as optim
import torchvision.transforms as transforms
import tqdm
import torch
import os
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import timm

model_name="efficientnet_b2"
net=timm.create_model(model_name,pretrained=False,num_classes=102).cuda()
PATH = './efficientnet_b2_5.pth'
checkpoint = torch.load(PATH)
net.load_state_dict(checkpoint['model_state_dict'])
test='./54_data/test'
data_len = len(os.listdir('./54_data/test'))
test_path_list = ['{}/{}.jpg'.format(test, x) for x in range(0, data_len)]
test_data = np.array(test_path_list)

class MyDataset(Dataset):
    def __init__(self, df, transform, mode='train'):
        self.df = df
        self.transform = transform
        self.mode = mode

    def __getitem__(self, index):
        if self.mode == 'train':
            img = Image.open(self.df['filename'].iloc[index]).convert('RGB')
            img = self.transform(img)
            return img, torch.from_numpy(np.array(self.df['label'].iloc[index]))
        else:
            img = Image.open(self.df[index]).convert('RGB')
            img = self.transform(img)
            return img, torch.from_numpy(np.array(0))

    def __len__(self):
        return len(self.df)

test_transform = transforms.Compose([
    transforms.Resize([300, 300]),
    transforms.ToTensor(),
])

test_dataset = MyDataset(test_data, test_transform,'test')
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
net.eval()
pred_list = []
with torch.no_grad():
    for batch_x, (img,label) in enumerate(test_loader):
        print(batch_x)
        img = img.cuda()
        # compute output
        probs = net(img)
        preds = torch.argmax(probs, dim=1)
        pred_list += [p.item() for p in preds]
submission = pd.DataFrame({"id": range(len(pred_list)), "label": pred_list})
submission.to_csv('submission.csv', index=False, header=False)

提交成绩

比赛连接
在这里插入图片描述
在这里插入图片描述
这仅仅训练五个批次。到此就走了一遍比赛的流程。

总结

这只是分类的一小部分知识,其中还有很多提分的策略需要去尝试。比如TTA,伪标签。模型集成,数据增强也很多都值得尝试。而且使用timm库基本都有现成的。换模型就一行代码就可以切换。比赛就是要在最短的时间尽可能少的尝试次数达到最优。才容易拿到好成绩。

标签:__,入门,df,self,Pytorch,train,transforms,import,比赛
来源: https://blog.csdn.net/cp1314971/article/details/122377437