AI Coding.ipynb - Colab
AI Coding.ipynb - Colab
ipynb - Colab
#Write a Python function that implements the Softmax and Tanh activation functions.
#The function should accept a NumPy array as input and return the activated values
import numpy as np
else:
raise ValueError("Unsupported activation function. Use 'softmax' or 'tanh'.")
# Example:
x = np.array([1.0, 2.0, 3.0])
print(activate(x, activation='softmax'))
print(activate(x, activation='tanh'))
Overfitting Prevention
# Output layer
model.add(Dense(num_classes, activation='softmax'))
return model
# Example usage
input_shape = 20 # Example input shape (number of features)
num_classes = 3 # Example number of output classes
Model: "sequential_3"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ dense_9 (Dense) │ (None, 64) │ 1,344 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dropout_6 (Dropout) │ (None, 64) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_10 (Dense) │ (None, 64) │ 4,160 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dropout_7 (Dropout) │ (None, 64) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_11 (Dense) │ (None, 3) │ 195 │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
Total params: 5,699 (22.26 KB)
Trainable params: 5,699 (22.26 KB)
Non-trainable params: 0 (0.00 B)
'L2 Regularization: This is added to the Dense layers using the kernel_regularizer parameter. The l2(0.01) means that a penalty pro
portional to the square of the weights will be added to the loss function.\nDropout: This is applied using the Dropout layer, which
randomly sets a fraction of input units to 0 during training helping to prevent overfitting we took a dropout rate of 0 5 which
Autoencoder Implementation
#Write a simple autoencoder using Keras. The autoencoder should consist of an encoder that reduces
#the dimensionality of the input and a decoder that reconstructs the input.
import numpy as np
from keras.models import Model
from keras.layers import Input, Dense
# Autoencoder model
autoencoder = Model(input_layer, decoded)
# Example usage
input_shape = 20 # Number of input features
encoding_dim = 10 # Dimensionality of the encoded representation
https://colab.research.google.com/drive/17GdbS3TksCMw_WGtsCRfhH6vRtev2Baq#scrollTo=NpPihEoYynG2&printMode=true 2/6
11/4/24, 9:08 PM AI Coding.ipynb - Colab
Model: "functional_20"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ input_layer_4 (InputLayer) │ (None, 20) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_12 (Dense) │ (None, 10) │ 210 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_13 (Dense) │ (None, 20) │ 220 │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
Total params: 430 (1.68 KB)
Trainable params: 430 (1.68 KB)
Non-trainable params: 0 (0.00 B)
Epoch 1/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 1s 6ms/step - loss: 0.7316
Epoch 2/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.7258
Epoch 3/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.7217
Epoch 4/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.7174
Epoch 5/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.7143
Epoch 6/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.7107
Epoch 7/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.7083
Epoch 8/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 0.7067
Epoch 9/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.7049
Epoch 10/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.7031
Epoch 11/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 0.7021
Epoch 12/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 0.7006
Epoch 13/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6996
Epoch 14/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6986
Epoch 15/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6977
Epoch 16/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 0.6966
Epoch 17/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.6957
Epoch 18/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6950
Epoch 19/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6943
Epoch 20/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6938
Epoch 21/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6928
Epoch 22/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6923
Epoch 23/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6918
Epoch 24/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.6915
Epoch 25/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 0.6908
Epoch 26/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6905
Epoch 27/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6901
Epoch 28/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6896
Epoch 29/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6888
Epoch 30/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6887
Epoch 31/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 0.6879
Epoch 32/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.6874
Epoch 33/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6869
Epoch 34/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 0.6862
Epoch 35/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6858
Epoch 36/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6853
Epoch 37/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6847
Epoch 38/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6842
Epoch 39/50
4/4 0 4 / t l 0 6832
https://colab.research.google.com/drive/17GdbS3TksCMw_WGtsCRfhH6vRtev2Baq#scrollTo=NpPihEoYynG2&printMode=true 3/6
11/4/24, 9:08 PM AI Coding.ipynb - Colab
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6832
Epoch 40/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6828
Epoch 41/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6824
Epoch 42/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6817
Epoch 43/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6810
Epoch 44/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6804
Epoch 45/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6799
Epoch 46/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6794
Epoch 47/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.6787
Epoch 48/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 0.6784
Epoch 49/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.6775
Epoch 50/50
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0 6769
Batch Normalization
#Explain what batch normalization is and how it can be implemented in a Keras model.
#Include a code snippet demonstrating its use
"""Batch normalization is a technique used to improve the training of deep neural networks.
It helps to stabilize and accelerate training by normalizing the inputs of each layer.
The main goals of batch normalization are:
1)Reducing Internal Covariate Shift: By normalizing the input to each layer,
batch normalization helps mitigate the changes in the distribution of layer inputs during training.
2)Improving Gradient Flow: Normalizing inputs can help gradients flow better through the network, allowing for faster convergence.
3)Reducing Sensitivity to Initialization: It makes the network less sensitive to the initialization of weights,
which can lead to better performance.
4)Regularization Effect: It can provide some regularization, potentially reducing the need for other techniques like dropout.
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, BatchNormalization, Activation
from keras.optimizers import Adam
# Output layer
model.add(Dense(num_classes, activation='softmax'))
return model
# Example usage
input_shape = 20 # Number of input features
num_classes = 3 # Number of output classes
https://colab.research.google.com/drive/17GdbS3TksCMw_WGtsCRfhH6vRtev2Baq#scrollTo=NpPihEoYynG2&printMode=true 4/6
11/4/24, 9:08 PM AI Coding.ipynb - Colab
model.fit(x_train, y_train, epochs=50, batch_size=32)
https://colab.research.google.com/drive/17GdbS3TksCMw_WGtsCRfhH6vRtev2Baq#scrollTo=NpPihEoYynG2&printMode=true 5/6
11/4/24, 9:08 PM AI Coding.ipynb - Colab
Model: "sequential_4"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ dense_14 (Dense) │ (None, 64) │ 1,344 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ batch_normalization │ (None, 64) │ 256 │
│ (BatchNormalization) │ │ │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ activation (Activation) │ (None, 64) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_15 (Dense) │ (None, 64) │ 4,160 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ batch_normalization_1 │ (None, 64) │ 256 │
│ (BatchNormalization) │ │ │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ activation_1 (Activation) │ (None, 64) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_16 (Dense) │ (None, 3) │ 195 │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
Total params: 6,211 (24.26 KB)
Trainable params: 5,955 (23.26 KB)
Non-trainable params: 256 (1.00 KB)
Epoch 1/50
32/32 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.3153 - loss: 1.2886
Epoch 2/50
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - accuracy: 0.3969 - loss: 1.0969
Epoch 3/50
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.4690 - loss: 1.0479
Epoch 4/50
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.5073 - loss: 1.0251
Epoch 5/50
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.5042 - loss: 0.9818
Epoch 6/50
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.5539 - loss: 0.9582
Epoch 7/50
32/32 ━━━━━━━━━━━━━━━━━━━━
Explanation 0s 3ms/step
of the Code: Model Definition: - accuracy:
A simple 0.5578
sequential model- is
loss: 0.9148
defined with two hidden layers. Batch Normalization: The
Epoch 8/50
BatchNormalization layer is added after each dense layer before the activation
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - accuracy: 0.5747 - loss: 0.9105 function. This normalizes the output of the dense layer.
EpochFunction:
Activation 9/50 The ReLU activation is applied after the batch normalization. Output Layer: The final layer uses a softmax activation
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.6173 - loss: 0.8834
function for 10/50
Epoch multi-class classification. Compilation: The model is compiled using the Adam optimizer and categorical cross-entropy loss.
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.6117 - loss: 0.8953
Epoch 11/50
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.6097 - loss: 0.8608
Epoch 12/50
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.6507 - loss: 0.8380
Epoch 13/50
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.6676 - loss: 0.8249
Epoch 14/50
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.6724 - loss: 0.7916
Epoch 15/50
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.6589 - loss: 0.8023
Epoch 16/50
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.6926 - loss: 0.7753
Epoch 17/50
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.6629 - loss: 0.7715
https://colab.research.google.com/drive/17GdbS3TksCMw_WGtsCRfhH6vRtev2Baq#scrollTo=NpPihEoYynG2&printMode=true 6/6