其他分享
首页 > 其他分享> > Tensorflow2.0--Keras实战

Tensorflow2.0--Keras实战

作者:互联网

import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
#使用sklearn库的数据归一化模块
from sklearn.preprocessing import StandardScaler
from tensorflow import keras
#可以查看各模块版本,方便复用
# for module in mpl, np, pd, sklearn, tf, keras:
# print(module.__name__, module.__version__)

# 数据读取
# 加载数据集
fashion_mnist = keras.datasets.fashion_mnist
# 数据分割
(x_train_all,y_train_all),(x_test,y_test) = fashion_mnist.load_data()
# 划分出验证集
x_valid,x_train = x_train_all[:5000],x_train_all[5000:]
y_valid,y_train = y_train_all[:5000],y_train_all[5000:]

# 数据归一化,使梯度下降更加高效,模型效果提升
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)
x_test_scaled = scaler.fit_transform(x_test.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)
x_valid_scaled = scaler.fit_transform(x_valid.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)

# 搭建模型
# 创建模型空间
model = keras.models.Sequential()
#将矩阵展平成向量,因为这里使用全连接层进行学习
model.add(keras.layers.Flatten(input_shape = [28,28]))
#使用for循环搭建具有一定深度的网络
for _ in range(10):
#这里selu激活函数在relu的基础上加入了数据的归一化,Dense为全连接层
model.add(keras.layers.Dense(50,activation = 'selu'))
#添加BN层
model.add(keras.layers.BatchNormalization())
#添加Dropout层,使网络稀疏。这里主要是为了展示keras如何添加丰富的网络层。如果要求高精度可以自行测试
#AlphaDropout不影响数据归一化等操作。
model.add(keras.layers.AlphaDropout(rate = 0.3))
model.add(keras.layers.Dense(10,activation = 'softmax'))

# 模型编译,选择loss_function,优化器,与观测指标
model.compile(optimizer = 'sgd',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
#这里可以查看模型内容,即网络层内容
# print(model.summary())

# 设置回调函数(模型保存,模型可视化等)。调用时在终端输入:tensorboard --logdir=logdir_path 即可返回一个网络端口,复制到浏览器中可查看
logdir = os.path.join('dnn-selu-bn-dropout-callbacks')
if not os.path.exists(logdir):
os.mkdir(logdir)
output_model_file = os.path.join(logdir,'fashion-mnist.h5')
callbacks = [keras.callbacks.TensorBoard(logdir),#配置tensorboard的log文件
keras.callbacks.ModelCheckpoint(output_model_file,
save_best_only = True),#配置最终模型文件
keras.callbacks.EarlyStopping(patience=5,min_delta=1e-3)]#设置提前停止条件

# 模型训练
history = model.fit(x_train_scaled,y_train,
epochs = 20,#设置学习迭代轮数,将所有数据计算完算一轮
validation_data = (x_valid_scaled,y_valid),#设置验证集
callbacks = callbacks)#设置callbacks参数


# 绘制模型曲线
def plot_learning_curves(history):
pd.DataFrame(history.history).plot(figsize = (8,5))
plt.grid(True)
plt.gca().set_ylim(0,1)
plt.show()

plot_learning_curves(history)

# 模型测试
# model.evaluate(x_test_scaled,y_test)




标签:实战,keras,Keras,28,Tensorflow2.0,callbacks,train,logdir,model
来源: https://www.cnblogs.com/peiziming/p/13179091.html