implement of Deep_learning Code
作者:互联网
Line_Model
import torch
import torch.nn as nn
import math
import random
import numpy as np
# 计算线性回归模型 梯度
def Cal_SGD_Linear(x, pred, label, lr, k, bias=0):
g = 0
for (idx, item) in enumerate(pred):
g += (item - label[idx]) * x[idx]
# 梯度 即loss关于模型参数的导数在当前参数 的导数值
g = (2 * g) / len(x)
print(k - lr * g)
return {'k': k - lr * g, 'bias': 0 if bias == 0 else bias - lr * g}
def Cal_MSE(pred, label):
loss = 0
for (idx, item) in enumerate(pred):
loss += math.pow(item - label[idx], 2)
# print(loss / len(pred)) # MSE 均方误差
# print(math.sqrt(loss / len(pred))) # RMSE 均方根误差
def gen_line_data(len_data):
x = torch.linspace(10, 110, len_data)
x = torch.unsqueeze(x, dim=1)
y = 2 * x + torch.rand(x.size())
return {'x': x, 'y': y}
class LineRegressionNet(nn.Module):
def __init__(self) -> object:
super().__init__()
self.liner = nn.Linear(1, 1, bias=False)
def forward(self, x):
out = self.liner(x)
return out
class line_model():
def __init__(self, lr, epoches):
self.lr = lr
self.epoches = epoches
self.init_model()
def init_model(self):
self.model = LineRegressionNet()
self.optimiser = torch.optim.SGD(self.model.parameters(), lr=self.lr)
self.loss_fn = torch.nn.MSELoss()
def train_model(self , data , model_save_path="model.ck"):
x = data['x']
y = data['y']
model = self.model
for th in range(self.epoches):
random.Random(th).shuffle(x)
random.Random(th).shuffle(y)
model.zero_grad()
outputs = model(x)
loss = self.loss_fn(outputs , y )
loss.backward()
self.optimiser.step()
self.model_save_path = model_save_path
torch.save(model.state_dict() , model_save_path )
def test_model(self , data):
x = data['x']
y = data['y']
self.model.load_state_dict(torch.load(self.model_save_path))
pred = self.model(x)
print(x , pred)
train_data = gen_line_data(10)
test_data = gen_line_data(5)
learning_rate = 0.0001
liner_model = line_model(learning_rate , 100)
liner_model.train_model(train_data)
liner_model.test_model(test_data)
'''
loss_function = torch.nn.MSELoss()
optimizer = torch.optim.Adam(liner_model.parameters(), lr=learning_rate)
optimizer = torch.optim.Adagrad(liner_model.parameters(), lr=learning_rate)
optimizer = torch.optim.SGD(liner_model.parameters(), lr=learning_rate) # 随机梯度下降
x = data['x']
y = data['y']
for i in range(10):
optimizer.zero_grad() # 清空上一次梯度
outputs = liner_model(x)
# Cal_MSE(outputs, y)
loss = loss_function(outputs, y) # 前向传播
pp = liner_model.state_dict()
print('liner.weight', pp['liner.weight'])
Cal_SGD_Linear(x, outputs, y, learning_rate, pp['liner.weight'][0])
loss.backward() # 反向传播
optimizer.step() # 优化器参数更新
pp = liner_model.state_dict()
# test_data = torch.unsqueeze(torch.linspace(100, 200, 10) , dim=1)
# print(test_data, liner_model(test_data))'''
CNN_Model
import torch
import torch.nn as nn
import numpy as np
def gen_line_data(len_data):
x = torch.linspace(0, 100, len_data)
x = torch.unsqueeze(x, dim=1)
y = 2 * x + torch.rand(x.size())
return {'x': x, 'y': y}
class CnnNet(nn.Module):
def __init__(self):
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16 , kernel_size=5 , stride=1,padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2,stride=2)
)
self.fc = nn.Linear(16 , 10)
def forward(self, x):
out = self.layer1(x)
out = self.fc(out)
return out
data = gen_line_data(10)
liner_model = CnnNet()
learning_rate = 0.02
loss_function = torch.nn.MSELoss()
optimizer = torch.optim.SGD(liner_model.parameters(), lr=learning_rate)
x = data['x']
y = data['y']
# 前向传递
outputs = liner_model(x)
loss = loss_function(outputs, y)
# 反向传播和参数更新
optimizer.zero_grad() # 清空上一次梯度
loss.backward() # 反向传播
optimizer.step() # 优化器参数更新
test_data = torch.unsqueeze(torch.linspace(100, 200, 10), dim=1)
print(test_data, liner_model(test_data))
标签:loss,Code,self,torch,liner,learning,implement,data,model 来源: https://www.cnblogs.com/lhx9527/p/16158043.html