Practice - DL - Ipynb - Colaboratory
Practice - DL - Ipynb - Colaboratory
Practice - DL - Ipynb - Colaboratory
1 import numpy as np
2 from keras.datasets import mnist
3 import matplotlib.pyplot as plt
1 print(train_img.shape, test_img.shape)
1 train_img = train_img/255.0
2 test_img = test_img/255.0
Epoch 1/3
1875/1875 [==============================] - 12s 6ms/step - loss: 0.1820 - accuracy: 0.9437
Epoch 2/3
1875/1875 [==============================] - 14s 7ms/step - loss: 0.0814 - accuracy: 0.9745
Epoch 3/3
1875/1875 [==============================] - 12s 6ms/step - loss: 0.0572 - accuracy: 0.9821
<keras.callbacks.History at 0x7fafa5fd49d0>
1 print(loss_acc[0], loss_acc[1])
0.07816269248723984 0.9775000214576721
Exp 5
1 import numpy as np
2 import matplotlib.pyplot as plt
3 from keras.models import Sequential
4 from keras.layers import Flatten, Dense, MaxPooling2D, Dropout, Conv2D
1 from keras.datasets import mnist
1 model = Sequential()
2 model.add(Conv2D(32, kernel_size=(3,3), activation = 'relu', input_shape=(28, 28, 1)))
3
4 model.add(Conv2D(64, kernel_size=(3,3), activation = 'relu'))
5
6 model.add(MaxPooling2D(pool_size = (2,2)))
7
8 model.add(Dropout(0.25))
9
10 model.add(Flatten())
11
12 model.add(Dense(128, activation = 'relu'))
13
14 model.add(Dropout(0.5))
15
16 model.add(Dense(10, activation = 'softmax'))
17
Epoch 1/2
1875/1875 [==============================] - 115s 61ms/step - loss: 0.4081 - accuracy: 0.9020
Epoch 2/2
1875/1875 [==============================] - 116s 62ms/step - loss: 0.1403 - accuracy: 0.9607
<keras.callbacks.History at 0x7fafa7f72c70>
1 model.save('mymodel.h5')
1 import keras
2 noor = keras.models.load_model('mymodel.h5')
1 x = np.expand_dims(test_img[3], axis = 0)
2 pred = noor.predict(x)
3 plt.imshow(test_img[3], cmap = 'gray_r')
4 print(np.argmax(pred[0]))
1/1 [==============================] - 0s 27ms/step
0
=================================================================
Total params: 138,357,544
Trainable params: 138,357,544
Non-trainable params: 0
_________________________________________________________________
1 display(img)
1 import cv2
1 image = cv2.imread('./images.jpg')
1 cv2_imshow(image)
1 cv2_imshow(image)
1 print(image.shape)
2 print(image)
1 x = np.expand_dims(image, axis = 0)
1 print(x.shape)
2 print(x)
1 pred = model.predict(x)
1 print(pred)
1 print(np.argmax(pred[0]))
235
1 p = decode_predictions(pred)
2 print(p)
1 print(p[0])
1 print(np.argmax(pred))
235
Exp 8
1 import numpy as np
2 samples = {'jupyter has 79 knows moons .', 'Neptune has 14 confirmed moons ! '}
3 token_index = {}
4 counter = 0
5 for sample in samples:
6 for considered_word in sample.split():
7 if considered_word not in token_index:
8 token_index.update({considered_word : counter + 1})
9 counter = counter + 1
10 token_index
{'jupyter': 1,
'has': 2,
'79': 3,
'knows': 4,
'moons': 5,
'.': 6,
'Neptune': 7,
'14': 8,
'confirmed': 9,
'!': 10}
1 max_length = 6
2 results = np.zeros(shape = (len(samples), max_length, max(token_index.values())+1))
1 print(results)
[[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]]
1 1 jupyter
2 2 has
3 3 79
4 4 knows
5 5 moons
6 6 .
7 7 Neptune
2 2 has
8 8 14
9 9 confirmed
5 5 moons
10 10 !
1 print(results)
[[[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]]
[[0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0.]
[0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0.]
[0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]]]
1
Exp 2
1 import numpy as np
2 from keras.models import Sequential
3 from keras.layers import Dropout, Dense
4 from keras.datasets import imdb
[list([1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25
list([1, 194, 1153, 194, 8255, 78, 228, 5, 6, 1463, 4369, 5012, 134, 26, 4, 715, 8, 118, 1634,
list([1, 14, 47, 8, 30, 31, 7, 4, 249, 108, 7, 4, 5974, 54, 61, 369, 13, 71, 149, 14, 22, 112,
...
list([1, 11, 6, 230, 245, 6401, 9, 6, 1225, 446, 2, 45, 2174, 84, 8322, 4007, 21, 4, 912, 84,
list([1, 1446, 7079, 69, 72, 3305, 13, 610, 930, 8, 12, 582, 23, 5, 16, 484, 685, 54, 349, 11,
list([1, 17, 6, 194, 337, 7, 4, 204, 22, 45, 254, 8, 106, 14, 123, 4, 2, 270, 2, 5, 2, 2, 732,
[1 0 0 ... 0 1 0]
[1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25, 100,
[1, 591, 202, 14, 31, 6, 717, 10, 10, 2, 2, 5, 4, 360, 7, 4, 177, 5760, 394, 354, 4, 123, 9, 10
218
68
0
25000 25000
1 print(data)
[list([1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25
list([1, 194, 1153, 194, 8255, 78, 228, 5, 6, 1463, 4369, 5012, 134, 26, 4, 715, 8, 118, 1634,
list([1, 14, 47, 8, 30, 31, 7, 4, 249, 108, 7, 4, 5974, 54, 61, 369, 13, 71, 149, 14, 22, 112,
...
list([1, 13, 1408, 15, 8, 135, 14, 9, 35, 32, 46, 394, 20, 62, 30, 5093, 21, 45, 184, 78, 4, 1
list([1, 11, 119, 241, 9, 4, 840, 20, 12, 468, 15, 94, 3684, 562, 791, 39, 4, 86, 107, 8, 97,
list([1, 6, 52, 7465, 430, 22, 9, 220, 2594, 8, 28, 2, 519, 3227, 6, 769, 15, 47, 6, 3482, 406
1 len(data[0])
218
1 len(data)
50000
1 d = vecotrize(data)
2 # print(d[0])
1 print(len(d[0]))
10000
1 labs = np.array(lab).astype("float32")
1 test_x = d[:10000]
2 test_y = labs[:10000]
3 train_x = d[10000:]
4 train_y = labs[10000:]
1 model = Sequential()
2 model.add(Dense(50, activation='relu', input_shape=(10000, )))
3 model.add(Dropout(0.3, noise_shape = None, seed = None))
4 model.add(Dense(50, activation='relu'))
5 model.add(Dropout(0.2, noise_shape = None, seed = None))
6 model.add(Dense(50, activation='relu'))
7 model.add(Dense(1, activation='sigmoid'))
8 model.compile(optimizer='adam', loss = 'binary_crossentropy', metrics=['accuracy'])
Epoch 1/3
80/80 [==============================] - 3s 33ms/step - loss: 0.0530 - accuracy: 0.9818 - val_l
Epoch 2/3
80/80 [==============================] - 3s 34ms/step - loss: 0.0383 - accuracy: 0.9872 - val_l
Epoch 3/3
80/80 [==============================] - 3s 35ms/step - loss: 0.0327 - accuracy: 0.9892 - val_l
1 history_dict = history.history
2 history_dict
1 loss_val = history_dict['loss']
2 loss_valid = history_dict['val_loss']
3 epochs = range(1, len(loss_val)+1)
4 plt.plot(epochs, loss_val, 'bo', label = 'training loss')
5 plt.plot(epochs, loss_valid, 'b', label = 'Validation loss')
6 plt.title('Traing and validation loss')
7 plt.xlabel('epochs')
8 plt.ylabel('loss_val')
9 plt.legend()
10 plt.show()
1 acc_val = history_dict['accuracy']
2 acc_valid = history_dict['val_accuracy']
3 epochs = range(1, len(loss_val)+1)
4 plt.plot(epochs, acc_val, 'bo', label = 'training loss')
5 plt.plot(epochs, acc_valid, 'b', label = 'Validation loss')
6 plt.title('Traing and validation Accuracy')
7 plt.xlabel('epochs')
8 plt.ylabel('loss_val')
9 plt.legend()
10 plt.show()
1
exp 3:
1 import numpy as np
2 import tensorflow as tf
3 from keras.datasets import reuters
1 print(train_data[0])
[1, 2, 2, 8, 43, 10, 447, 5, 25, 207, 270, 5, 3095, 111, 16, 369, 186, 90, 67, 7, 89, 5, 19, 10
1 word_index = reuters.get_word_index()
1 print(word_index)
{'mdbl': 10996, 'fawc': 16260, 'degussa': 12089, 'woods': 8803, 'hanging': 13796, 'localized':
1 print(reverse_word_index)
{10996: 'mdbl', 16260: 'fawc', 12089: 'degussa', 8803: 'woods', 13796: 'hanging', 20672: 'local
1 decoded_newswire
1 print(reverse_word_index[5])
said
1 decoded_newswire
1 def
'?
' vecotrize(seq,
the
? ?
ofsaid
of mln
as a dims
loss for= of
result 10000):
plc its
saiddecember
at onlyacquisition
ended said of
commonwealth
space co it
could
expects
1 traders
earnings
nowper
april
share
0 ai
2 lnresults
000 dlrs = np.zeros((len(seq),
its it
all said
4 vscash
000 flow
1 mlnper dims))
agreed
sharelargely
this year
april
should
0 arebe2 2
states
50 towill
threebillion
dlrs reuter
total 3
and
' against 0
3 for i, seqen in enumerate(seq):
4 # print(seqen)
5 results[i, seqen] = 1
6 # print(results)
7 return results
1 x_train = vecotrize(train_data)
2 x_test = vecotrize(test_data)
1 y_train = one_hot(train_lab)
2 y_test = one_hot(test_lab)
1 model = Sequential()
2 model.add(Dense(50, activation='relu', input_shape=(10000, )))
3 model.add(Dropout(0.3, noise_shape = None, seed = None))
4 model.add(Dense(50, activation='relu'))
5 model.add(Dropout(0.2, noise_shape = None, seed = None))
6 model.add(Dense(50, activation='relu'))
7 model.add(Dense(46, activation='softmax'))
8 model.compile(optimizer='adam', loss = 'categorical_crossentropy', metrics=['accuracy'])
1 x_val = x_train[:1000]
2 partial_xtrain = x_train[1000:]
3 partial_ytrain = y_train[1000:]
4 y_val = y_train[:1000]
Epoch 1/3
16/16 [==============================] - 2s 65ms/step - loss: 3.3115 - accuracy: 0.3224 - val_l
Epoch 2/3
16/16 [==============================] - 1s 47ms/step - loss: 2.2373 - accuracy: 0.4901 - val_l
Epoch 3/3
16/16 [==============================] - 1s 47ms/step - loss: 1.6899 - accuracy: 0.5992 - val_l
1 history_dict = history.history
2 history_dict
1 acc_val = history_dict['accuracy']
2 acc_valid = history_dict['val_accuracy']
3 epochs = range(1, len(loss_val)+1)
4 plt.plot(epochs, acc_val, 'bo', label = 'training loss')
5 plt.plot(epochs, acc_valid, 'b', label = 'Validation loss')
6 plt.title('Traing and validation Accuracy')
7 plt.xlabel('epochs')
8 plt.ylabel('loss_val')
9 plt.legend()
10 plt.show()
1