Training and Evaluation

Download as pdf or txt
Download as pdf or txt
You are on page 1of 20

Firefox https://keras.

io/guides/training_with_built_in_methods/

fit() evaluate()

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers

Model.fit() Model.evaluate() Model.predict()

fit()
fit()

tf.data.Dataset

inputs = keras.Input(shape=(784,), name="digits")


x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, activation="softmax", name="predictions")(x)

model = keras.Model(inputs=inputs, outputs=outputs)

1 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()

# Preprocess the data (these are NumPy arrays)


x_train = x_train.reshape(60000, 784).astype("float32") / 255
x_test = x_test.reshape(10000, 784).astype("float32") / 255

y_train = y_train.astype("float32")
y_test = y_test.astype("float32")

# Reserve 10,000 samples for validation


x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]

model.compile(
optimizer=keras.optimizers.RMSprop(), # Optimizer
# Loss function to minimize
loss=keras.losses.SparseCategoricalCrossentropy(),
# List of metrics to monitor
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)

fit() batch_size
epochs

print("Fit model on training data")


history = model.fit(
x_train,
y_train,
batch_size=64,
epochs=2,
# We pass some validation for
# monitoring validation loss and metrics
# at the end of each epoch
validation_data=(x_val, y_val),
)

Fit model on training data


Epoch 1/2
782/782 [==============================] - 2s 2ms/step - loss: 0.3479 -
sparse_categorical_accuracy: 0.9018 - val_loss: 0.2048 - val_sparse_categorical_accuracy: 0.9370
Epoch 2/2
782/782 [==============================] - 1s 2ms/step - loss: 0.1592 -
sparse_categorical_accuracy: 0.9521 - val_loss: 0.1377 - val_sparse_categorical_accuracy: 0.9594

history

history.history

{'loss': [0.34790968894958496, 0.1592278927564621],


'sparse_categorical_accuracy': [0.9017800092697144, 0.9521200060844421],
'val_loss': [0.20476257801055908, 0.13772223889827728],
'val_sparse_categorical_accuracy': [0.9369999766349792, 0.9593999981880188]}

evaluate()

# Evaluate the model on the test data using `evaluate`


print("Evaluate on test data")
results = model.evaluate(x_test, y_test, batch_size=128)
print("test loss, test acc:", results)

# Generate predictions (probabilities -- the output of the last layer)


# on new data using `predict`
print("Generate predictions for 3 samples")
predictions = model.predict(x_test[:3])
print("predictions shape:", predictions.shape)

2 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

Evaluate on test data


79/79 [==============================] - 0s 1ms/step - loss: 0.1408 -
sparse_categorical_accuracy: 0.9567
test loss, test acc: [0.14082984626293182, 0.9567000269889832]
Generate predictions for 3 samples
1/1 [==============================] - 0s 80ms/step
predictions shape: (3, 10)

compile()

fit()

compile()

model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)

metrics

model.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)

def get_uncompiled_model():
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, activation="softmax", name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model

def get_compiled_model():
model = get_uncompiled_model()
model.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
return model

SGD()
RMSprop()
Adam()

3 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

MeanSquaredError()
KLDivergence()
CosineSimilarity()

AUC()
Precision()
Recall()

y_true y_pred

def custom_mean_squared_error(y_true, y_pred):


return tf.math.reduce_mean(tf.square(y_true - y_pred))

model = get_uncompiled_model()
model.compile(optimizer=keras.optimizers.Adam(), loss=custom_mean_squared_error)

# We need to one-hot encode the labels to use MSE


y_train_one_hot = tf.one_hot(y_train, depth=10)
model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1)

782/782 [==============================] - 2s 2ms/step - loss: 0.0162

<keras.callbacks.History at 0x159159fd0>

y_true y_pred
tf.keras.losses.Loss

__init__(self)
call(self, y_true, y_pred)

class CustomMSE(keras.losses.Loss):
def __init__(self, regularization_factor=0.1, name="custom_mse"):
super().__init__(name=name)
self.regularization_factor = regularization_factor

def call(self, y_true, y_pred):


mse = tf.math.reduce_mean(tf.square(y_true - y_pred))
reg = tf.math.reduce_mean(tf.square(0.5 - y_pred))
return mse + reg * self.regularization_factor

model = get_uncompiled_model()
model.compile(optimizer=keras.optimizers.Adam(), loss=CustomMSE())

y_train_one_hot = tf.one_hot(y_train, depth=10)


model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1)

4 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

782/782 [==============================] - 2s 2ms/step - loss: 0.0392

<keras.callbacks.History at 0x1599fd650>

tf.keras.metrics.Metric

__init__(self)
update_state(self, y_true, y_pred, sample_weight=None)

result(self)
reset_state(self)

update_state() result()

CategoricalTruePositives

class CategoricalTruePositives(keras.metrics.Metric):
def __init__(self, name="categorical_true_positives", **kwargs):
super(CategoricalTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name="ctp", initializer="zeros")

def update_state(self, y_true, y_pred, sample_weight=None):


y_pred = tf.reshape(tf.argmax(y_pred, axis=1), shape=(-1, 1))
values = tf.cast(y_true, "int32") == tf.cast(y_pred, "int32")
values = tf.cast(values, "float32")
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, "float32")
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))

def result(self):
return self.true_positives

def reset_state(self):
# The state of the metric will be reset at the start of each epoch.
self.true_positives.assign(0.0)

model = get_uncompiled_model()
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[CategoricalTruePositives()],
)
model.fit(x_train, y_train, batch_size=64, epochs=3)

Epoch 1/3
782/782 [==============================] - 2s 2ms/step - loss: 0.3414 -
categorical_true_positives: 45121.0000
Epoch 2/3
782/782 [==============================] - 2s 2ms/step - loss: 0.1533 -
categorical_true_positives: 47725.0000
Epoch 3/3
782/782 [==============================] - 1s 2ms/step - loss: 0.1120 -
categorical_true_positives: 48333.0000

<keras.callbacks.History at 0x159b4b250>

y_true y_pred
y_pred

self.add_loss(loss_value)
compile()

5 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

class ActivityRegularizationLayer(layers.Layer):
def call(self, inputs):
self.add_loss(tf.reduce_sum(inputs) * 0.1)
return inputs # Pass-through layer.

inputs = keras.Input(shape=(784,), name="digits")


x = layers.Dense(64, activation="relu", name="dense_1")(inputs)

# Insert activity regularization as a layer


x = ActivityRegularizationLayer()(x)

x = layers.Dense(64, activation="relu", name="dense_2")(x)


outputs = layers.Dense(10, name="predictions")(x)

model = keras.Model(inputs=inputs, outputs=outputs)


model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)

# The displayed loss will be much higher than before


# due to the regularization component.
model.fit(x_train, y_train, batch_size=64, epochs=1)

782/782 [==============================] - 2s 2ms/step - loss: 2.4753

<keras.callbacks.History at 0x159cb87d0>

add_metric()

class MetricLoggingLayer(layers.Layer):
def call(self, inputs):
# The `aggregation` argument defines
# how to aggregate the per-batch values
# over each epoch:
# in this case we simply average them.
self.add_metric(
keras.backend.std(inputs), name="std_of_activation", aggregation="mean"
)
return inputs # Pass-through layer.

inputs = keras.Input(shape=(784,), name="digits")


x = layers.Dense(64, activation="relu", name="dense_1")(inputs)

# Insert std logging as a layer.


x = MetricLoggingLayer()(x)

x = layers.Dense(64, activation="relu", name="dense_2")(x)


outputs = layers.Dense(10, name="predictions")(x)

model = keras.Model(inputs=inputs, outputs=outputs)


model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
model.fit(x_train, y_train, batch_size=64, epochs=1)

782/782 [==============================] - 2s 2ms/step - loss: 0.3363 - std_of_activation:


0.9996

<keras.callbacks.History at 0x159e1dbd0>

model.add_loss(loss_tensor)
model.add_metric(metric_tensor, name, aggregation)

6 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

inputs = keras.Input(shape=(784,), name="digits")


x1 = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x2 = layers.Dense(64, activation="relu", name="dense_2")(x1)
outputs = layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)

model.add_loss(tf.reduce_sum(x1) * 0.1)

model.add_metric(keras.backend.std(x1), name="std_of_activation", aggregation="mean")

model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
model.fit(x_train, y_train, batch_size=64, epochs=1)

782/782 [==============================] - 2s 2ms/step - loss: 2.5326 - std_of_activation:


0.0021

<keras.callbacks.History at 0x159f9e690>

add_loss() compile()

LogisticEndpoint
add_loss() add_metric()

class LogisticEndpoint(keras.layers.Layer):
def __init__(self, name=None):
super(LogisticEndpoint, self).__init__(name=name)
self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
self.accuracy_fn = keras.metrics.BinaryAccuracy()

def call(self, targets, logits, sample_weights=None):


# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(targets, logits, sample_weights)
self.add_loss(loss)

# Log accuracy as a metric and add it


# to the layer using `self.add_metric()`.
acc = self.accuracy_fn(targets, logits, sample_weights)
self.add_metric(acc, name="accuracy")

# Return the inference-time prediction tensor (for `.predict()`).


return tf.nn.softmax(logits)

loss

import numpy as np

inputs = keras.Input(shape=(3,), name="inputs")


targets = keras.Input(shape=(10,), name="targets")
logits = keras.layers.Dense(10)(inputs)
predictions = LogisticEndpoint(name="predictions")(logits, targets)

model = keras.Model(inputs=[inputs, targets], outputs=predictions)


model.compile(optimizer="adam") # No loss argument!

data = {
"inputs": np.random.random((3, 3)),
"targets": np.random.random((3, 10)),
}
model.fit(data)

1/1 [==============================] - 0s 214ms/step - loss: 0.8886 - binary_accuracy:


0.0000e+00

<keras.callbacks.History at 0x15a15fa90>

7 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

validation_data
(x_val, y_val)

validation_split

validation_split=0.2 validation_split=0.6

fit()

validation_split

model = get_compiled_model()
model.fit(x_train, y_train, batch_size=64, validation_split=0.2, epochs=1)

625/625 [==============================] - 2s 2ms/step - loss: 0.3593 -


sparse_categorical_accuracy: 0.8974 - val_loss: 0.2190 - val_sparse_categorical_accuracy: 0.9318

<keras.callbacks.History at 0x15a223bd0>

validation_data validation_split fit()

tf.data.Dataset

tf.data

Datasets

Dataset fit() evaluate() predict()

model = get_compiled_model()

# First, let's create a training Dataset instance.


# For the sake of our example, we'll use the same MNIST data as before.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
# Shuffle and slice the dataset.
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)

# Now we get a test dataset.


test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(64)

# Since the dataset already takes care of batching,


# we don't pass a `batch_size` argument.
model.fit(train_dataset, epochs=3)

# You can also evaluate or predict on a dataset.


print("Evaluate")
result = model.evaluate(test_dataset)
dict(zip(model.metrics_names, result))

8 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

Epoch 1/3
782/782 [==============================] - 2s 2ms/step - loss: 0.3358 -
sparse_categorical_accuracy: 0.9046
Epoch 2/3
782/782 [==============================] - 2s 2ms/step - loss: 0.1540 -
sparse_categorical_accuracy: 0.9544
Epoch 3/3
782/782 [==============================] - 1s 2ms/step - loss: 0.1109 -
sparse_categorical_accuracy: 0.9663
Evaluate
157/157 [==============================] - 0s 1ms/step - loss: 0.1118 -
sparse_categorical_accuracy: 0.9659

{'loss': 0.11180760711431503,
'sparse_categorical_accuracy': 0.9659000039100647}

steps_per_epoch

model = get_compiled_model()

# Prepare the training dataset


train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)

# Only use the 100 batches per epoch (that's 64 * 100 samples)
model.fit(train_dataset, epochs=3, steps_per_epoch=100)

Epoch 1/3
100/100 [==============================] - 1s 2ms/step - loss: 0.7515 -
sparse_categorical_accuracy: 0.8031
Epoch 2/3
100/100 [==============================] - 0s 2ms/step - loss: 0.3731 -
sparse_categorical_accuracy: 0.8919
Epoch 3/3
100/100 [==============================] - 0s 2ms/step - loss: 0.3165 -
sparse_categorical_accuracy: 0.9084

<keras.callbacks.History at 0x15a405e90>

Dataset validation_data fit()

model = get_compiled_model()

# Prepare the training dataset


train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)

# Prepare the validation dataset


val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)

model.fit(train_dataset, epochs=1, validation_data=val_dataset)

782/782 [==============================] - 2s 2ms/step - loss: 0.3322 -


sparse_categorical_accuracy: 0.9050 - val_loss: 0.1804 - val_sparse_categorical_accuracy: 0.9483

<keras.callbacks.History at 0x15a530510>

validation_steps

9 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

model = get_compiled_model()

# Prepare the training dataset


train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)

# Prepare the validation dataset


val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)

model.fit(
train_dataset,
epochs=1,
# Only run validation using the first 10 batches of the dataset
# using the `validation_steps` argument
validation_data=val_dataset,
validation_steps=10,
)

782/782 [==============================] - 2s 2ms/step - loss: 0.3429 -


sparse_categorical_accuracy: 0.9038 - val_loss: 0.2760 - val_sparse_categorical_accuracy: 0.9312

<keras.callbacks.History at 0x1663870d0>

validation_split
Dataset
Dataset

Datasets

keras.utils.Sequence

Dataset
Sequence

keras.utils.Sequence
keras.utils.Sequence

shuffle=True fit()

Sequence

__getitem__
__len__

__getitem__
on_epoch_end

10 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

from skimage.io import imread


from skimage.transform import resize
import numpy as np

# Here, `filenames` is list of path to the images


# and `labels` are the associated labels.

class CIFAR10Sequence(Sequence):
def __init__(self, filenames, labels, batch_size):
self.filenames, self.labels = filenames, labels
self.batch_size = batch_size

def __len__(self):
return int(np.ceil(len(self.filenames) / float(self.batch_size)))

def __getitem__(self, idx):


batch_x = self.filenames[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.labels[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([
resize(imread(filename), (200, 200))
for filename in batch_x]), np.array(batch_y)

sequence = CIFAR10Sequence(filenames, labels, batch_size)


model.fit(sequence, epochs=10)

class_weight Model.fit()

Model.fit(...,
class_weight={0: 1., 1: 0.5})

import numpy as np

class_weight = {
0: 1.0,
1: 1.0,
2: 1.0,
3: 1.0,
4: 1.0,
# Set weight "2" for class "5",
# making this class 2x more important
5: 2.0,
6: 1.0,
7: 1.0,
8: 1.0,
9: 1.0,
}

print("Fit with class weight")


model = get_compiled_model()
model.fit(x_train, y_train, class_weight=class_weight, batch_size=64, epochs=1)

Fit with class weight


782/782 [==============================] - 2s 2ms/step - loss: 0.3759 -
sparse_categorical_accuracy: 0.8994

<keras.callbacks.History at 0x1664ff2d0>

11 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

sample_weight Model.fit()
tf.data (input_batch, label_batch,
sample_weight_batch)

sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.0

print("Fit with sample weight")


model = get_compiled_model()
model.fit(x_train, y_train, sample_weight=sample_weight, batch_size=64, epochs=1)

Fit with sample weight


782/782 [==============================] - 2s 2ms/step - loss: 0.3855 -
sparse_categorical_accuracy: 0.8971

<keras.callbacks.History at 0x166650090>

Dataset

sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.0

# Create a Dataset that includes sample weights


# (3rd element in the return tuple).
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train, sample_weight))

# Shuffle and slice the dataset.


train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)

model = get_compiled_model()
model.fit(train_dataset, epochs=1)

782/782 [==============================] - 2s 2ms/step - loss: 0.3739 -


sparse_categorical_accuracy: 0.9020

<keras.callbacks.History at 0x1667b0e10>

(764,)
(10,)

(32, 32, 3) (height, width,


channels) (None, 10) (timesteps, features)
(1,)
(5,)

12 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

image_input = keras.Input(shape=(32, 32, 3), name="img_input")


timeseries_input = keras.Input(shape=(None, 10), name="ts_input")

x1 = layers.Conv2D(3, 3)(image_input)
x1 = layers.GlobalMaxPooling2D()(x1)

x2 = layers.Conv1D(3, 3)(timeseries_input)
x2 = layers.GlobalMaxPooling1D()(x2)

x = layers.concatenate([x1, x2])

score_output = layers.Dense(1, name="score_output")(x)


class_output = layers.Dense(5, name="class_output")(x)

model = keras.Model(
inputs=[image_input, timeseries_input], outputs=[score_output, class_output]
)

keras.utils.plot_model(model, "multi_input_and_output_model.png", show_shapes=True)

model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()],
)

model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()],
metrics=[
[
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
[keras.metrics.CategoricalAccuracy()],
],
)

13 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"score_output": keras.losses.MeanSquaredError(),
"class_output": keras.losses.CategoricalCrossentropy(),
},
metrics={
"score_output": [
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
"class_output": [keras.metrics.CategoricalAccuracy()],
},
)

loss_weights

model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"score_output": keras.losses.MeanSquaredError(),
"class_output": keras.losses.CategoricalCrossentropy(),
},
metrics={
"score_output": [
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
"class_output": [keras.metrics.CategoricalAccuracy()],
},
loss_weights={"score_output": 2.0, "class_output": 1.0},
)

# List loss version


model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[None, keras.losses.CategoricalCrossentropy()],
)

# Or dict loss version


model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={"class_output": keras.losses.CategoricalCrossentropy()},
)

fit()

14 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()],
)

# Generate dummy NumPy data


img_data = np.random.random_sample(size=(100, 32, 32, 3))
ts_data = np.random.random_sample(size=(100, 20, 10))
score_targets = np.random.random_sample(size=(100, 1))
class_targets = np.random.random_sample(size=(100, 5))

# Fit on lists
model.fit([img_data, ts_data], [score_targets, class_targets], batch_size=32, epochs=1)

# Alternatively, fit on dicts


model.fit(
{"img_input": img_data, "ts_input": ts_data},
{"score_output": score_targets, "class_output": class_targets},
batch_size=32,
epochs=1,
)

4/4 [==============================] - 1s 5ms/step - loss: 14.4474 - score_output_loss: 0.8739 -


class_output_loss: 13.5735
4/4 [==============================] - 0s 6ms/step - loss: 12.3280 - score_output_loss: 0.6432 -
class_output_loss: 11.6848

<keras.callbacks.History at 0x166bb7490>

Dataset Dataset

train_dataset = tf.data.Dataset.from_tensor_slices(
(
{"img_input": img_data, "ts_input": ts_data},
{"score_output": score_targets, "class_output": class_targets},
)
)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)

model.fit(train_dataset, epochs=1)

2/2 [==============================] - 0s 8ms/step - loss: 10.9884 - score_output_loss: 0.5419 -


class_output_loss: 10.4466

<keras.callbacks.History at 0x1669ce250>

fit()

15 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

model = get_compiled_model()

callbacks = [
keras.callbacks.EarlyStopping(
# Stop training when `val_loss` is no longer improving
monitor="val_loss",
# "no longer improving" being defined as "no better than 1e-2 less"
min_delta=1e-2,
# "no longer improving" being further defined as "for at least 2 epochs"
patience=2,
verbose=1,
)
]
model.fit(
x_train,
y_train,
epochs=20,
batch_size=64,
callbacks=callbacks,
validation_split=0.2,
)

Epoch 1/20
625/625 [==============================] - 2s 2ms/step - loss: 0.3692 -
sparse_categorical_accuracy: 0.8946 - val_loss: 0.2295 - val_sparse_categorical_accuracy: 0.9287
Epoch 2/20
625/625 [==============================] - 1s 2ms/step - loss: 0.1683 -
sparse_categorical_accuracy: 0.9498 - val_loss: 0.1777 - val_sparse_categorical_accuracy: 0.9473
Epoch 3/20
625/625 [==============================] - 1s 2ms/step - loss: 0.1225 -
sparse_categorical_accuracy: 0.9633 - val_loss: 0.1517 - val_sparse_categorical_accuracy: 0.9546
Epoch 4/20
625/625 [==============================] - 1s 2ms/step - loss: 0.0968 -
sparse_categorical_accuracy: 0.9701 - val_loss: 0.1403 - val_sparse_categorical_accuracy: 0.9597
Epoch 5/20
625/625 [==============================] - 1s 2ms/step - loss: 0.0811 -
sparse_categorical_accuracy: 0.9754 - val_loss: 0.1394 - val_sparse_categorical_accuracy: 0.9579
Epoch 6/20
625/625 [==============================] - 1s 2ms/step - loss: 0.0674 -
sparse_categorical_accuracy: 0.9802 - val_loss: 0.1564 - val_sparse_categorical_accuracy: 0.9574
Epoch 6: early stopping

<keras.callbacks.History at 0x166c3fe50>

ModelCheckpoint
EarlyStopping
TensorBoard

CSVLogger

keras.callbacks.Callback
self.model

class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs):
self.per_batch_losses = []

def on_batch_end(self, batch, logs):


self.per_batch_losses.append(logs.get("loss"))

16 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

ModelCheckpoint

model = get_compiled_model()

callbacks = [
keras.callbacks.ModelCheckpoint(
# Path where to save the model
# The two parameters below mean that we will overwrite
# the current checkpoint if and only if
# the `val_loss` score has improved.
# The saved model name will include the current epoch.
filepath="mymodel_{epoch}",
save_best_only=True, # Only save a model if `val_loss` has improved.
monitor="val_loss",
verbose=1,
)
]
model.fit(
x_train, y_train, epochs=2, batch_size=64, callbacks=callbacks, validation_split=0.2
)

Epoch 1/2
617/625 [============================>.] - ETA: 0s - loss: 0.3668 - sparse_categorical_accuracy:
0.8954
Epoch 1: val_loss improved from inf to 0.22688, saving model to mymodel_1
INFO:tensorflow:Assets written to: mymodel_1/assets
625/625 [==============================] - 2s 3ms/step - loss: 0.3645 -
sparse_categorical_accuracy: 0.8960 - val_loss: 0.2269 - val_sparse_categorical_accuracy: 0.9332
Epoch 2/2
622/625 [============================>.] - ETA: 0s - loss: 0.1748 - sparse_categorical_accuracy:
0.9480
Epoch 2: val_loss improved from 0.22688 to 0.17561, saving model to mymodel_2
INFO:tensorflow:Assets written to: mymodel_2/assets
625/625 [==============================] - 2s 2ms/step - loss: 0.1750 -
sparse_categorical_accuracy: 0.9480 - val_loss: 0.1756 - val_sparse_categorical_accuracy: 0.9477

<keras.callbacks.History at 0x15a2f1910>

ModelCheckpoint

17 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

import os

# Prepare a directory to store all the checkpoints.


checkpoint_dir = "./ckpt"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)

def make_or_restore_model():
# Either restore the latest model, or create a fresh one
# if there is no checkpoint available.
checkpoints = [checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir)]
if checkpoints:
latest_checkpoint = max(checkpoints, key=os.path.getctime)
print("Restoring from", latest_checkpoint)
return keras.models.load_model(latest_checkpoint)
print("Creating a new model")
return get_compiled_model()

model = make_or_restore_model()
callbacks = [
# This callback saves a SavedModel every 100 batches.
# We include the training loss in the saved model name.
keras.callbacks.ModelCheckpoint(
filepath=checkpoint_dir + "/ckpt-loss={loss:.2f}", save_freq=100
)
]
model.fit(x_train, y_train, epochs=1, callbacks=callbacks)

18 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

Creating a new model


67/1563 [>.............................] - ETA: 2s - loss: 1.1577 -
sparse_categorical_accuracy: 0.6903INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.96
/assets
170/1563 [==>...........................] - ETA: 4s - loss: 0.7616 -
sparse_categorical_accuracy: 0.7950INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.70
/assets
266/1563 [====>.........................] - ETA: 5s - loss: 0.6075 -
sparse_categorical_accuracy: 0.8356INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.58
/assets
367/1563 [======>.......................] - ETA: 5s - loss: 0.5266 -
sparse_categorical_accuracy: 0.8553INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.51
/assets
499/1563 [========>.....................] - ETA: 4s - loss: 0.4711 -
sparse_categorical_accuracy: 0.8692INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.47
/assets
568/1563 [=========>....................] - ETA: 4s - loss: 0.4457 -
sparse_categorical_accuracy: 0.8762INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.44
/assets
671/1563 [===========>..................] - ETA: 4s - loss: 0.4153 -
sparse_categorical_accuracy: 0.8843INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.41
/assets
793/1563 [==============>...............] - ETA: 3s - loss: 0.3883 -
sparse_categorical_accuracy: 0.8910INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.39
/assets
871/1563 [===============>..............] - ETA: 3s - loss: 0.3720 -
sparse_categorical_accuracy: 0.8948INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.37
/assets
970/1563 [=================>............] - ETA: 2s - loss: 0.3554 -
sparse_categorical_accuracy: 0.8993INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.35
/assets
1095/1563 [====================>.........] - ETA: 2s - loss: 0.3369 -
sparse_categorical_accuracy: 0.9045INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.34
/assets
1199/1563 [======================>.......] - ETA: 1s - loss: 0.3227 -
sparse_categorical_accuracy: 0.9080INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.32
/assets
1297/1563 [=======================>......] - ETA: 1s - loss: 0.3138 -
sparse_categorical_accuracy: 0.9102INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.31
/assets
1395/1563 [=========================>....] - ETA: 0s - loss: 0.3073 -
sparse_categorical_accuracy: 0.9121INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.31
/assets
1473/1563 [===========================>..] - ETA: 0s - loss: 0.3010 -
sparse_categorical_accuracy: 0.9140INFO:tensorflow:Assets written to: ./ckpt/ckpt-loss=0.30
/assets
1563/1563 [==============================] - 8s 5ms/step - loss: 0.2943 -
sparse_categorical_accuracy: 0.9159

<keras.callbacks.History at 0x167035e50>

learning_rate

initial_learning_rate = 0.1
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True
)

optimizer = keras.optimizers.RMSprop(learning_rate=lr_schedule)

19 of 20 01-12-2022, 12:31 am
Firefox https://keras.io/guides/training_with_built_in_methods/

ExponentialDecay PiecewiseConstantDecay PolynomialDecay


InverseTimeDecay

ReduceLROnPlateau

Embedding

tensorboard --logdir=/full_path_to_your_logs

fit() TensorBoard

keras.callbacks.TensorBoard(
log_dir="/full_path_to_your_logs",
histogram_freq=0, # How often to log histogram visualizations
embeddings_freq=0, # How often to log embedding visualizations
update_freq="epoch",
) # How often to write logs (default: once per epoch)

<keras.callbacks.TensorBoard at 0x12fa767d0>…

20 of 20 01-12-2022, 12:31 am

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy