from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras import utils
import numpy as np
# Загружаем данные
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
# Список с названиями классов
classes = ['футболка', 'брюки', 'свитер', 'платье', 'пальто',
'туфли', 'рубашка', 'кроссовки', 'сумка', 'ботинки']
# Преобразование размерности изображений
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
# Нормализация данных
x_train = x_train / 255
x_test = x_test / 255
# Преобразуем метки в категории
y_train = utils.to_categorical(y_train, 10)
y_test = utils.to_categorical(y_test, 10)
# Создаем последовательную модель
model = Sequential()
# Добавляем уровни сети
model.add(Dense(800, input_dim=784, activation="relu"))
model.add(Dense(10, activation="softmax"))
# Компилируем модель
model.compile(loss="categorical_crossentropy",
optimizer="SGD",
metrics=["accuracy"])
print(model.summary())
# Обучаем сеть
history = model.fit(x_train, y_train,
batch_size=200,
epochs=100,
validation_split=0.2,
verbose=1)
# Оцениваем качество обучения сети на тестовых данных
scores = model.evaluate(x_test, y_test, verbose=1)
print("Доля верных ответов на тестовых данных, в процентах:", round(scores[1] * 100, 4))
Доля верных ответов на тестовых данных, в процентах: 86.87
Train on 48000 samples, validate on 12000 samples
Epoch 1/100
48000/48000 [==============================] - 2s 37us/sample - loss: 1.1875 - acc: 0.6657 - val_loss: 0.8279 - val_acc: 0.7548
Epoch 2/100
48000/48000 [==============================] - 1s 21us/sample - loss: 0.7540 - acc: 0.7721 - val_loss: 0.6858 - val_acc: 0.7850
Epoch 3/100
48000/48000 [==============================] - 1s 19us/sample - loss: 0.6573 - acc: 0.7947 - val_loss: 0.6244 - val_acc: 0.8001
Epoch 4/100
48000/48000 [==============================] - 1s 20us/sample - loss: 0.6061 - acc: 0.8084 - val_loss: 0.5850 - val_acc: 0.8112
Epoch 5/100
48000/48000 [==============================] - 1s 19us/sample - loss: 0.5729 - acc: 0.8158 - val_loss: 0.5609 - val_acc: 0.8153
Epoch 6/100
48000/48000 [==============================] - 1s 20us/sample - loss: 0.5491 - acc: 0.8212 - val_loss: 0.5447 - val_acc: 0.8175
Epoch 7/100
48000/48000 [==============================] - 1s 19us/sample - loss: 0.5304 - acc: 0.8255 - val_loss: 0.5242 - val_acc: 0.8238
Epoch 8/100
48000/48000 [==============================] - 1s 19us/sample - loss: 0.5159 - acc: 0.8289 - val_loss: 0.5133 - val_acc: 0.8267
Epoch 9/100
48000/48000 [==============================] - 1s 19us/sample - loss: 0.5037 - acc: 0.8328 - val_loss: 0.5026 - val_acc: 0.8287
Epoch 10/100
48000/48000 [==============================] - 1s 19us/sample - loss: 0.4933 - acc: 0.8355 - val_loss: 0.4942 - val_acc: 0.8294
...
Epoch 90/100
48000/48000 [==============================] - 1s 19us/sample - loss: 0.3249 - acc: 0.8881 - val_loss: 0.3607 - val_acc: 0.8731
Epoch 91/100
48000/48000 [==============================] - 1s 20us/sample - loss: 0.3244 - acc: 0.8871 - val_loss: 0.3592 - val_acc: 0.8745
Epoch 92/100
48000/48000 [==============================] - 1s 20us/sample - loss: 0.3228 - acc: 0.8884 - val_loss: 0.3581 - val_acc: 0.8756
Epoch 93/100
48000/48000 [==============================] - 1s 20us/sample - loss: 0.3222 - acc: 0.8890 - val_loss: 0.3612 - val_acc: 0.8743
Epoch 94/100
48000/48000 [==============================] - 1s 19us/sample - loss: 0.3210 - acc: 0.8889 - val_loss: 0.3588 - val_acc: 0.8743
Epoch 95/100
48000/48000 [==============================] - 1s 19us/sample - loss: 0.3203 - acc: 0.8888 - val_loss: 0.3577 - val_acc: 0.8742
Epoch 96/100
48000/48000 [==============================] - 1s 20us/sample - loss: 0.3192 - acc: 0.8896 - val_loss: 0.3585 - val_acc: 0.8739
Epoch 97/100
48000/48000 [==============================] - 1s 20us/sample - loss: 0.3183 - acc: 0.8899 - val_loss: 0.3566 - val_acc: 0.8764
Epoch 98/100
48000/48000 [==============================] - 1s 20us/sample - loss: 0.3175 - acc: 0.8899 - val_loss: 0.3574 - val_acc: 0.8740
Epoch 99/100
48000/48000 [==============================] - 1s 19us/sample - loss: 0.3171 - acc: 0.8906 - val_loss: 0.3569 - val_acc: 0.8758
Epoch 100/100
48000/48000 [==============================] - 1s 19us/sample - loss: 0.3160 - acc: 0.8905 - val_loss: 0.3532 - val_acc: 0.8768
history = model.fit(x_train, y_train,
batch_size=200,
epochs=100, # Количество эпох
validation_split=0.2,
verbose=1)
history = model.fit(x_train, y_train,
batch_size=200, # Размер мини-выборки
epochs=100,
validation_split=0.2,
verbose=1)
model.add(Dense(XXX, input_dim=784, activation="relu"))
model.add(Dense(800, input_dim=784, activation="relu"))
model.add(Dense(600, activation="relu")) # Новый скрытый слой
model.add(Dense(10, activation="softmax"))