model.compile(loss = 'sparse_categorical_crossentropy',optimizer = 'adam',metrics = ['accuracy'])
Fashion-MNIST总共有十个类别的图像。每一个类别由训练数据集6000张图像和测试数据集1000张图像。所以训练集和测试集分别包含60000张和10000张。测试训练集用于评估模型的性能。
每一个输入图像的高度和宽度均为28像素。数据集由灰度图像组成。Fashion-MNIST,中包含十个类别,分别是t-shirt,trouser,pillover,dress,coat,sandal,shirt,sneaker,bag,ankle boot。
import numpy as np
from tensorflow import keras
import tensorflow as tf
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScalercpu=tf.config.list_physical_devices("CPU")
tf.config.set_visible_devices(cpu)
print(tf.config.list_logical_devices())
fashion_mnist = keras.datasets.fashion_mnist
(x_train_all, y_train_all), (x_test, y_test) = fashion_mnist.load_data()
x_valid, x_train = x_train_all[:5000], x_train_all[5000:]
y_valid, y_train = y_train_all[:5000], y_train_all[5000:]
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train.astype(np.float32).reshape(55000, -1)).reshape(-1, 28, 28, 1)
x_valid_scaled = scaler.transform(x_valid.astype(np.float32).reshape(5000, -1)).reshape(-1, 28, 28, 1)
x_test_scaled = scaler.transform(x_test.astype(np.float32).reshape(10000, -1)).reshape(-1, 28, 28, 1)
model = keras.models.Sequential()
# filters 过滤器
# 卷积
model.add(keras.layers.Conv2D(filters = 64,kernel_size = 3,padding = 'same',activation = 'relu',# batch_size, height, width, channels(通道数)input_shape = (28, 28, 1))) # (28, 28, 32)
# 池化, 常用最大值池化
model.add(keras.layers.MaxPool2D()) # (14, 14, 32)# 卷积
model.add(keras.layers.Conv2D(filters = 32,kernel_size = 3,padding = 'same',activation = 'relu')) # (14, 14, 64)
# 池化, 常用最大值池化
model.add(keras.layers.MaxPool2D()) # (7, 7, 64)# 卷积
model.add(keras.layers.Conv2D(filters = 32,kernel_size = 3,padding = 'same',activation = 'relu')) # (7, 7, 128)
# 池化, 常用最大值池化
model.add(keras.layers.MaxPool2D()) # (4, 4, 128)
# 维度变化, 卷积完后为四维, 自动变二维
model.add(keras.layers.Flatten())model.add(keras.layers.Dense(512, activation = 'relu', input_shape = (784, )))
model.add(keras.layers.Dense(256, activation = 'relu'))
model.add(keras.layers.Dense(10, activation = 'softmax'))model.compile(loss = 'sparse_categorical_crossentropy',optimizer = 'adam',metrics = ['accuracy'])
histroy = model.fit(x_train_scaled, y_train, epochs = 10, validation_data= (x_valid_scaled, y_valid))
model.evaluate(x_test_scaled, y_test) # [0.32453039288520813, 0.906000018119812]
model = keras.models.Sequential()
# filters 过滤器
# 卷积
model.add(keras.layers.Conv2D(filters = 64,kernel_size = 3,padding = 'same',activation = 'relu',# batch_size, height, width, channels(通道数)input_shape = (28, 28, 1))) # (28, 28, 32)
model.add(keras.layers.Conv2D(filters = 32,kernel_size = 3,padding = 'same',activation = 'relu')) # (14, 14, 64)
# 池化, 常用最大值池化
model.add(keras.layers.MaxPool2D()) # (14, 14, 32)# 卷积
model.add(keras.layers.Conv2D(filters = 64,kernel_size = 3,padding = 'same',activation = 'relu')) # (14, 14, 64)
model.add(keras.layers.Conv2D(filters = 64,kernel_size = 3,padding = 'same',activation = 'relu')) # (14, 14, 64)
# 池化, 常用最大值池化
model.add(keras.layers.MaxPool2D()) # (7, 7, 64)# 卷积
model.add(keras.layers.Conv2D(filters = 128,kernel_size = 3,padding = 'same',activation = 'relu')) # (7, 7, 128)
model.add(keras.layers.Conv2D(filters = 128,kernel_size = 3,padding = 'same',activation = 'relu')) # (14, 14, 64)
# 池化, 常用最大值池化
model.add(keras.layers.MaxPool2D()) # (4, 4, 128)
# 维度变化, 卷积完后为四维, 自动变二维
model.add(keras.layers.Flatten())model.add(keras.layers.Dense(512, activation = 'relu', input_shape = (784, )))
model.add(keras.layers.Dense(256, activation = 'relu'))
model.add(keras.layers.Dense(10, activation = 'softmax'))model.compile(loss = 'sparse_categorical_crossentropy',optimizer = 'adam',metrics = ['accuracy'])
histroy = model.fit(x_train_scaled, y_train, epochs = 10, validation_data= (x_valid_scaled, y_valid))
model.evaluate(x_test_scaled, y_test) # [0.3228122293949127, 0.9052000045776367]