Prepare the data
# Model / data parameters
num_classes <- 10
input_shape <- c(28, 28, 1)
# Load the data and split it between train and test sets
c(c(x_train, y_train), c(x_test, y_test)) %<-% dataset_mnist()
# Scale images to the [0, 1] range
x_train <- x_train / 255
x_test <- x_test / 255
# Make sure images have shape (28, 28, 1)
x_train <- op_expand_dims(x_train, -1)
x_test <- op_expand_dims(x_test, -1)
dim(x_train)
## [1] 60000 28 28 1
## [1] 10000 28 28 1
# convert class vectors to binary class matrices
y_train <- to_categorical(y_train, num_classes)
y_test <- to_categorical(y_test, num_classes)
Build the model
## Model: "sequential"
## ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
## ┃ Layer (type) ┃ Output Shape ┃ Param # ┃
## ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
## │ conv2d (Conv2D) │ (None, 26, 26, 32) │ 320 │
## ├─────────────────────────────────┼────────────────────────┼───────────────┤
## │ max_pooling2d (MaxPooling2D) │ (None, 13, 13, 32) │ 0 │
## ├─────────────────────────────────┼────────────────────────┼───────────────┤
## │ conv2d_1 (Conv2D) │ (None, 11, 11, 64) │ 18,496 │
## ├─────────────────────────────────┼────────────────────────┼───────────────┤
## │ max_pooling2d_1 (MaxPooling2D) │ (None, 5, 5, 64) │ 0 │
## ├─────────────────────────────────┼────────────────────────┼───────────────┤
## │ flatten (Flatten) │ (None, 1600) │ 0 │
## ├─────────────────────────────────┼────────────────────────┼───────────────┤
## │ dropout (Dropout) │ (None, 1600) │ 0 │
## ├─────────────────────────────────┼────────────────────────┼───────────────┤
## │ dense (Dense) │ (None, 10) │ 16,010 │
## └─────────────────────────────────┴────────────────────────┴───────────────┘
## Total params: 34,826 (136.04 KB)
## Trainable params: 34,826 (136.04 KB)
## Non-trainable params: 0 (0.00 B)
Train the model
batch_size <- 128
epochs <- 15
model |> compile(
loss = "categorical_crossentropy",
optimizer = "adam",
metrics = "accuracy"
)
model |> fit(
x_train, y_train,
batch_size = batch_size,
epochs = epochs,
validation_split = 0.1
)
## Epoch 1/15
## 422/422 - 5s - 13ms/step - accuracy: 0.8894 - loss: 0.3635 - val_accuracy: 0.9787 - val_loss: 0.0792
## Epoch 2/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9666 - loss: 0.1111 - val_accuracy: 0.9852 - val_loss: 0.0549
## Epoch 3/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9743 - loss: 0.0825 - val_accuracy: 0.9880 - val_loss: 0.0440
## Epoch 4/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9785 - loss: 0.0695 - val_accuracy: 0.9897 - val_loss: 0.0398
## Epoch 5/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9803 - loss: 0.0626 - val_accuracy: 0.9903 - val_loss: 0.0351
## Epoch 6/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9824 - loss: 0.0559 - val_accuracy: 0.9910 - val_loss: 0.0329
## Epoch 7/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9834 - loss: 0.0501 - val_accuracy: 0.9918 - val_loss: 0.0309
## Epoch 8/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9850 - loss: 0.0478 - val_accuracy: 0.9923 - val_loss: 0.0309
## Epoch 9/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9860 - loss: 0.0445 - val_accuracy: 0.9918 - val_loss: 0.0302
## Epoch 10/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9866 - loss: 0.0439 - val_accuracy: 0.9913 - val_loss: 0.0294
## Epoch 11/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9874 - loss: 0.0391 - val_accuracy: 0.9918 - val_loss: 0.0305
## Epoch 12/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9873 - loss: 0.0372 - val_accuracy: 0.9925 - val_loss: 0.0292
## Epoch 13/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9886 - loss: 0.0347 - val_accuracy: 0.9918 - val_loss: 0.0291
## Epoch 14/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9891 - loss: 0.0343 - val_accuracy: 0.9920 - val_loss: 0.0281
## Epoch 15/15
## 422/422 - 1s - 2ms/step - accuracy: 0.9894 - loss: 0.0320 - val_accuracy: 0.9920 - val_loss: 0.0280
Evaluate the trained model
score <- model |> evaluate(x_test, y_test, verbose = 0)
score
## $accuracy
## [1] 0.9915
##
## $loss
## [1] 0.02418377