编程语言
首页 > 编程语言> > 优化算法篇

优化算法篇

作者:互联网

 梯度下降与随机梯度下降:

import torch
import matplotlib.pyplot as plt
import numpy as np
x_data = [5,6,7,8.5,9,10,11.5,12]
y_data = [1,2,8,4,5,6.5,7.5,8]

w = 1
#初始权重

def forward(x):
    return x * w

#MSE
def cost(xs,ys):
    cost = 0
    for x,y in zip(xs,ys):
        y_pred = forward(x)
        cost += (y-y_pred)**2
    return cost/len(xs)

def SGD_loss(xs,ys):
    y_pred = forward(xs)
    return (y_pred - ys)**2

def SGD_gradient(xs,ys):
    return 2*xs*(xs*w-ys)

def gradient(xs,ys):
    grad = 0
    for x,y in zip(xs,ys):
        grad += 2*x*(x*w-y)
    return grad/len(xs)

def draw(x,y):
    fig = plt.figure(num=1, figsize=(4, 4))
    ax = fig.add_subplot(111)
    ax.plot(x,y)
    plt.show()

# epoch_lis  =[]
# loss_lis = []
# learning_rate = 0.012
#
# for epoch in range(100):
#     cost_val = cost(x_data,y_data)
#     grad_val = gradient(x_data,y_data)
#     w -= learning_rate*grad_val
#     print("Epoch = {} w = {} loss = {} ".format(epoch,w,cost_val))
#     epoch_lis.append(epoch)
#     loss_lis.append(cost_val)
# print(forward(4))
# draw(epoch_lis,loss_lis)
# draw(x_data,y_data)


l_lis= []
epoch = []
learning_rate = 0.009
#SGD
for epoch in range(10):
    for x,y in zip(x_data,y_data):
        grad = SGD_gradient(x,y)
        w -= learning_rate*grad
        print(" x:{}  y:{}   grad:{}".format(x,y,grad))
        l = SGD_loss(x,y)
        print("loss: ",l)
        l_lis.append(l)

X = [int(i) for i in range(len(l_lis))]
draw(X,l_lis)

 

标签:优化,epoch,data,算法,cost,lis,xs,grad
来源: https://www.cnblogs.com/MrMKG/p/16660115.html