深度学习
作者:互联网
python实现神经网络
import numpy
import scipy.special
class neuralNetwork:
# initialise the neural network
def __init__(self,inputnodes,hiddennodes,ouputnodes,learningrate):
# set number of nodes in each input,hidden,outpit layer
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = ouputnodes
# learning rate
self.lr = learningrate
# link weight matrices ,wih and who
# weights inside the arrays are w_i_j ,where link is form node i to node j in the next layer
# w11 w21
# w12 w22 etc
# self.wih = (numpy.random.rand(self.hnodes,self.inodes) - 0.5)
# self.who = (numpy.random.rand(self.onodes,self.hnodes) - 0.5)
# 权重可采用正态分布,平均值为0,标准方差为传入连接数目的开方,即1/根号(连接数目)
self.wih = numpy.random.normal(0.0,pow(self.hnodes,-0.5),(self.hnodes,self.inodes))
self.who = numpy.random.normal(0.0,pow(self.onodes,-0.5),(self.onodes,self.hnodes))
# activation function is the sigmoid function
self.activation_function = lambda x:scipy.special.expit(x)
pass
# new code 1
# train the model
# 分成两个部分,第一个部分是针对给定的样本计算输出,如果此时的权重参数是最佳的,那么就直接获得了预测结果
# 第二部分,将输出与期望输出进行对比,使用差值指导权重的更新
# train the neural network
# 训练时需要传入训练集和训练的标签
def train(self,inputs_list,targets_list):
# convert inputs list into 2d array
inputs = numpy.array(inputs_list,ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = numpy.dot(self.wih,inputs)
# calculate signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final outputs layers
final_inputs = numpy.dot(self.who,hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
# convert targets list into 2d array
targets = numpy.array(targets_list,ndmin=2).T
# error is the (target - final_outputs)
output_errors = targets - final_outputs
# errors_hidden = weights^T_hidden . errors_output
# hidden layer error is the output_errors,split by weights, recombined at hidden nodes
hidden_errors = numpy.dot(self.who.T,output_errors)
# update the weights for the links between the hidden and output layers
# △w_j,k = a * E_k * sigmoid(o_k) * (1 - sigmoid(o_k)) · o_j T
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0- final_outputs))
,numpy.transpose(hidden_outputs))
# update the weights for the links between the input and hedden layers
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0-hidden_outputs)),numpy.transpose(inputs))
pass
# query the neural network
# 接受输入,返回输出
# 输入层的信号,通过隐藏层,最后从输出层输出,当信号给入隐藏节点和
# 输出节点时,,还使用链接权重调整信号,使用s激活函数抑制信号
def query(self,inputs_list):
# convert inputs list into 2d array
inputs = numpy.array(inputs_list,ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = numpy.dot(self.wih,inputs)
# calculate signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final outputs layers
final_inputs = numpy.dot(self.who,hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
return final_outputs
中草药识别
class ConvPool(paddle.nn.Layer):
'''卷积+池化'''
def init(self,
num_channels,
num_filters,
filter_size,
pool_size,
pool_stride,
groups,
conv_stride=1,
conv_padding=1,
):
super(ConvPool, self).init()
for i in range(groups):
self.add_sublayer( #添加子层实例
'bb_%d' % i,
paddle.nn.Conv2D( # layer
in_channels=num_channels, #通道数
out_channels=num_filters, #卷积核个数
kernel_size=filter_size, #卷积核大小
stride=conv_stride, #步长
padding = conv_padding, #padding
)
)
self.add_sublayer(
'relu%d' % i,
paddle.nn.ReLU()
)
num_channels = num_filters
self.add_sublayer(
'Maxpool',
paddle.nn.MaxPool2D(
kernel_size=pool_size, #池化核大小
stride=pool_stride #池化步长
)
)
def forward(self, inputs):
x = inputs
for prefix, sub_layer in self.named_children():
# print(prefix,sub_layer)
x = sub_layer(x)
return x
class VGGNet(paddle.nn.Layer):
def __init__(self):
super(VGGNet, self).__init__()
self.convpool01 = ConvPool(
3, 64, 3, 2, 2, 2) #3:通道数,64:卷积核个数,3:卷积核大小,2:池化核大小,2:池化步长,2:连续卷积个数
self.convpool02 = ConvPool(
64, 128, 3, 2, 2, 2)
self.convpool03 = ConvPool(
128, 256, 3, 2, 2, 3)
self.convpool04 = ConvPool(
256, 512, 3, 2, 2, 3)
self.convpool05 = ConvPool(
512, 512, 3, 2, 2, 3)
self.pool_5_shape = 512 * 7* 7
self.fc01 = paddle.nn.Linear(self.pool_5_shape, 4096)
self.fc02 = paddle.nn.Linear(4096, 4096)
self.fc03 = paddle.nn.Linear(4096, train_parameters['class_dim'])
def forward(self, inputs, label=None):
# print('input_shape:', inputs.shape) #[8, 3, 224, 224]
"""前向计算"""
out = self.convpool01(inputs)
# print('convpool01_shape:', out.shape) #[8, 64, 112, 112]
out = self.convpool02(out)
# print('convpool02_shape:', out.shape) #[8, 128, 56, 56]
out = self.convpool03(out)
# print('convpool03_shape:', out.shape) #[8, 256, 28, 28]
out = self.convpool04(out)
# print('convpool04_shape:', out.shape) #[8, 512, 14, 14]
out = self.convpool05(out)
# print('convpool05_shape:', out.shape) #[8, 512, 7, 7]
out = paddle.reshape(out, shape=[-1, 512*7*7])
out = self.fc01(out)
out = self.fc02(out)
out = self.fc03(out)
if label is not None:
acc = paddle.metric.accuracy(input=out, label=label)
return out, acc
else:
return out
标签:inputs,outputs,self,学习,深度,hidden,numpy,out 来源: https://www.cnblogs.com/hhxxlx/p/14818065.html