autograph()
Here is a full MNIST training loop implemented in R using tfautograph. (originally adapted from here)
library(magrittr)
library(purrr, warn.conflicts = FALSE)
library(tensorflow)
library(tfdatasets)
library(keras)
library(tfautograph)
tf$version$VERSION
#> [1] "2.6.0"
First, some helpers so we can capture tf.print()
output in the Rmarkdown vignette.
TEMPFILE <- tempfile("tf-print-out", fileext = ".txt")
print_tempfile <- function(clear_after_read = TRUE) {
if (clear_after_read) on.exit(unlink(TEMPFILE))
writeLines(readLines(TEMPFILE, warn = FALSE))
}
tf_print <- function(...)
tf$print(..., output_stream = sprintf("file://%s", TEMPFILE))
c(c(x_train, y_train), .) %<-% tf$keras$datasets$mnist$load_data()
train_dataset <- list(x_train, y_train) %>%
tensor_slices_dataset() %>%
dataset_map(function(x, y) {
x <- tf$cast(x, tf$float32) / 255
y <- tf$cast(y, tf$int64)
list(x, y)
}) %>%
dataset_take(20000) %>%
dataset_shuffle(20000) %>%
dataset_batch(100)
new_model_and_optimizer <- function() {
model <- keras_model_sequential() %>%
layer_reshape(target_shape = c(28 * 28),
input_shape = shape(28, 28)) %>%
layer_dense(100, activation = 'relu') %>%
layer_dense(100, activation = 'relu') %>%
layer_dense(10)
model$build()
optimizer <- tf$keras$optimizers$Adam()
list(model, optimizer)
}
c(model, optimizer) %<-% new_model_and_optimizer()
compute_loss <- tf$keras$losses$SparseCategoricalCrossentropy(from_logits = TRUE)
compute_accuracy <- tf$keras$metrics$SparseCategoricalAccuracy()
train_one_step <- function(model, optimizer, x, y) {
with(tf$GradientTape() %as% tape, {
logits <- model(x)
loss <- compute_loss(y, logits)
})
grads <- tape$gradient(loss, model$trainable_variables)
optimizer$apply_gradients(
transpose(list(grads, model$trainable_variables)))
compute_accuracy(y, logits)
loss
}
train <- autograph(function(model, optimizer) {
step <- 0L
loss <- 0
for (batch in train_dataset) {
c(x, y) %<-% batch
step %<>% add(1L)
loss <- train_one_step(model, optimizer, x, y)
if (compute_accuracy$result() > 0.8) {
tf_print("Accuracy over 0.8; breaking early")
break
} else if (step %% 10L == 0L)
tf_print('Step', step, ': loss', loss, '; accuracy', compute_accuracy$result())
}
tf_print('Final step', step, ": loss", loss, "; accuracy", compute_accuracy$result())
list(step, loss)
})
# autograph also works in eager mode
c(model, optimizer) %<-% new_model_and_optimizer()
c(step, loss) %<-% train(model, optimizer)
print_tempfile()
#> Step 10 : loss 1.71274185 ; accuracy 0.392
#> Step 20 : loss 1.14219153 ; accuracy 0.5385
#> Step 30 : loss 0.720202565 ; accuracy 0.612666667
#> Step 40 : loss 0.508623421 ; accuracy 0.673
#> Step 50 : loss 0.426777333 ; accuracy 0.7104
#> Step 60 : loss 0.342745066 ; accuracy 0.743166685
#> Step 70 : loss 0.371664643 ; accuracy 0.765714288
#> Step 80 : loss 0.369277507 ; accuracy 0.78075
#> Step 90 : loss 0.324099153 ; accuracy 0.793777764
#> Accuracy over 0.8; breaking early
#> Final step 97 : loss 0.387442589 ; accuracy 0.801030934
c(model, optimizer) %<-% new_model_and_optimizer()
train_on_graph <- tf_function(train)
c(step, loss) %<-% train_on_graph(model, optimizer)
print_tempfile()
#> Step 10 : loss 1.95382404 ; accuracy 0.766729
#> Step 20 : loss 1.23789883 ; accuracy 0.754358947
#> Step 30 : loss 0.655607045 ; accuracy 0.755118132
#> Step 40 : loss 0.658970356 ; accuracy 0.759197056
#> Step 50 : loss 0.5310058 ; accuracy 0.765646279
#> Step 60 : loss 0.415600359 ; accuracy 0.772420406
#> Step 70 : loss 0.402227402 ; accuracy 0.778922141
#> Step 80 : loss 0.33683449 ; accuracy 0.785932183
#> Step 90 : loss 0.444064975 ; accuracy 0.791711211
#> Step 100 : loss 0.404047877 ; accuracy 0.797563434
#> Accuracy over 0.8; breaking early
#> Final step 106 : loss 0.3598544 ; accuracy 0.800394118