Tensorflow keras 回归模型
程序员文章站
2022-05-26 17:55:15
...
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
from sklearn.datasets import fetch_california_housing
## 数据收集
housing = fetch_california_housing()
print(housing.DESCR)
print(housing.data.shape)
print(housing.target.shape)
import pprint
pprint.pprint(housing.data[0:5])
pprint.pprint(housing.target[0:5])
from sklearn.model_selection import train_test_split
#分离测试集
x_train_all, x_test, y_train_all, y_test = train_test_split(
housing.data, housing.target,random_state=7,test_size=0.25)
#控制test_size控制測試集的比例,0.25==默认
# 分离训练集验证集
x_train, x_valid, y_train, y_valid = train_test_split(
x_train_all, y_train_all,random_state=11)
print(x_valid.shape, y_valid.shape) # 验证集
print(x_train.shape, y_train.shape) # 训练集
print(x_test.shape, y_test.shape) # 测试集
print(np.max(x_train), np.min(x_train))# 打印训练集中的极值
## 归一化 x = (x - u(均值)) / std(方差) 就得到符合均值是0,方差是1的
# 正态分布
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()#初始化对象
# 训练集
#先转成32,
#x_train: [None,28,28] -> [None,784] -> 再转换回来reshape(-1, 28, 28)
#fit_transform:fit把训练集的均值方差记录下来给别的用(验证集和测试集)
x_train_scaled = scaler.fit_transform(x_train)
# 验证集 直接用transform
x_vaild_scaled = scaler.transform(x_valid)
# 测试集 直接用transform
x_test_scaled = scaler.transform(x_test)
# tf.keras.models.Sequential()
"""
model = keras.models.Sequential()#建立对象
model.add(keras.layers.Flatten(input_shape=[28,28]))#展平输入
#把二维矩阵展成28*28的一维向量
model.add(keras.layers.Dense(300,activation="relu"))
# 全链接层 activation 是**函数
# :让下一层的所有单元一一的与上一层连接
model.add(keras.layers.Dense(100,activation="relu"))
model.add(keras.layers.Dense(10,activation="softmax"))
"""
model = keras.models.Sequential(
[
keras.layers.Dense(30, activation="relu",
input_shape=x_train.shape[1: ]),#冒号后面应该是8
keras.layers.Dense(1),#最后输出只有一个数,只有一层
])
model.summary()
model.compile(loss="mean_squared_error",#损失函数:均方差(平方和的均值)
optimizer="sgd", # 模型的求解方法,调整方法
)
# relu: y = max(0,x)当x大于0时,输出x,否则输出0
# softmax: 将向量变成概率分布,x=[x1,x2,x3]
# y = [e^x1/sum, e^x2/sum, e^x3/sum ],
# sum = e^x1 + e^x2 + e^x3
# reson for sparse: y是个数 需要 one_hot成向量
# 如果y是一个向量,直接用categorical_crossentropy
# model编译
callbacks = [
keras.callbacks.EarlyStopping(patience=5, min_delta = 1e-3),#min_delta阈值:训练间的差别,
# patience :连续多少次发生min_delta的情况要停止
# 比这个低的话,要停止
]
# 开启训练
history = model.fit(x_train_scaled, y_train, epochs=100, # 训练集遍历10次
validation_data=(x_vaild_scaled, y_valid), # 验证集
callbacks=callbacks)
def plot_learning_curves(history):
pd.DataFrame(history.history).plot(figsize=(8, 5))
# 把数据转换成DataFrame(一种数据结构) 图的大小 (8 ,5)
plt.grid(True) # 显示网格
plt.gca().set_ylim(0, 1) # 设置y坐标轴的范围
plt.show()
plot_learning_curves(history)
# model.evaluate(x_test_scaled,y_test) # 测试集测试
与分类模型相比的变动:
1.损失函数变成
loss="mean_squared_error",#损失函数:均方差(平方和的均值
2.最后只有一层,因为结果只是个数
上一篇: java全角、半角标点符号 操作