AI and ML LAB

Download as pdf or txt
Download as pdf or txt
You are on page 1of 9

In 

[1]:
class Graph:

def __init__(self,adjac_lis):

self.adjac_lis = adjac_lis

def get_neighbours(self,v):

return self.adjac_lis[v]

def h(self,n):

H={'A':1,'B':1, 'C':1,'D':1}

return H[n]

def a_star_algorithm(self,start,stop):

open_lst = set([start])

closed_lst = set([])

dist ={}

dist[start] = 0

prenode ={}

prenode[start] =start

while len(open_lst)>0:

n = None

for v in open_lst:

if n==None or dist[v]+self.h(v)<dist[n]+self.h(n):

n=v;

if n==None:

print("path doesnot exist")

return None

if n==stop:

reconst_path=[]

while prenode[n]!=n:

reconst_path.append(n)

n = prenode[n]

reconst_path.append(start)

reconst_path.reverse()

print("path found:{}".format(reconst_path))

return reconst_path

for (m,weight) in self.get_neighbours(n):

if m not in open_lst and m not in closed_lst:

open_lst.add(m)

prenode[m] = n

dist[m] = dist[n]+weight

else:

if dist[m]>dist[n]+weight:

dist[m] = dist[n]+weight

prenode[m]=n

if m in closed_lst:

closed_lst.remove(m)

open_lst.add(m)

open_lst.remove(n)

closed_lst.add(n)

print("Path doesnot exist")

return None

adjac_lis ={'A':[('B',1),('C',3),('D',7)],'B':[('D',5)],'C':[('D',12)]}

graph1=Graph(adjac_lis)

graph1.a_star_algorithm('A', 'D')

path found:['A', 'B', 'D']

['A', 'B', 'D']


Out[1]:

In [2]:
def recAOStar(n):

global finalPath

print("Expanding Node:",n)

and_nodes = []

or_nodes =[]

if(n in allNodes):

if 'AND' in allNodes[n]:

and_nodes = allNodes[n]['AND']

if 'OR' in allNodes[n]:

or_nodes = allNodes[n]['OR']
if len(and_nodes)==0 and len(or_nodes)==0:

return

solvable = False

marked ={}

while not solvable:

if len(marked)==len(and_nodes)+len(or_nodes):

min_cost_least,min_cost_group_least = least_cost_group(and_nodes,or_node
solvable = True

change_heuristic(n,min_cost_least)

optimal_child_group[n] = min_cost_group_least

continue

min_cost,min_cost_group = least_cost_group(and_nodes,or_nodes,marked)

is_expanded = False

if len(min_cost_group)>1:

if(min_cost_group[0] in allNodes):

is_expanded = True

recAOStar(min_cost_group[0])

if(min_cost_group[1] in allNodes):

is_expanded = True

recAOStar(min_cost_group[1])

else:

if(min_cost_group in allNodes):

is_expanded = True

recAOStar(min_cost_group)

if is_expanded:

min_cost_verify, min_cost_group_verify = least_cost_group(and_nodes, or_


if min_cost_group == min_cost_group_verify:

solvable = True

change_heuristic(n, min_cost_verify)

optimal_child_group[n] = min_cost_group

else:

solvable = True

change_heuristic(n, min_cost)

optimal_child_group[n] = min_cost_group

marked[min_cost_group]=1

return heuristic(n)

def least_cost_group(and_nodes, or_nodes, marked):

node_wise_cost = {}

for node_pair in and_nodes:

if not node_pair[0] + node_pair[1] in marked:

cost = 0

cost = cost + heuristic(node_pair[0]) + heuristic(node_pair[1]) + 2

node_wise_cost[node_pair[0] + node_pair[1]] = cost

for node in or_nodes:

if not node in marked:

cost = 0

cost = cost + heuristic(node) + 1

node_wise_cost[node] = cost

min_cost = 999999

min_cost_group = None

for costKey in node_wise_cost:

if node_wise_cost[costKey] < min_cost:

min_cost = node_wise_cost[costKey]

min_cost_group = costKey

return [min_cost, min_cost_group]

def heuristic(n):

return H_dist[n]

def change_heuristic(n, cost):

H_dist[n] = cost

return

def print_path(node):

print(optimal_child_group[node], end="")

node = optimal_child_group[node]

if len(node) > 1:

if node[0] in optimal_child_group:

print("->", end="")

print_path(node[0])

if node[1] in optimal_child_group:

print("->", end="")

print_path(node[1])

else:

if node in optimal_child_group:

print("->", end="")

print_path(node)

H_dist = {

'A': -1,

'B': 4,

'C': 2,

'D': 3,

'E': 6,

'F': 8,

'G': 2,

'H': 0,

'I': 0,

'J': 0

allNodes = {

'A': {'AND': [('C', 'D')], 'OR': ['B']},

'B': {'OR': ['E', 'F']},

'C': {'OR': ['G'], 'AND': [('H', 'I')]},

'D': {'OR': ['J']}

optimal_child_group = {}

optimal_cost = recAOStar('A')

print('Nodes which gives optimal cost are')

print_path('A')

print('\nOptimal Cost is :: ', optimal_cost)

Expanding Node: A

Expanding Node: B

Expanding Node: C

Expanding Node: D

Nodes which gives optimal cost are


CD->HI->J

Optimal Cost is :: 5

In [4]:
import csv

with open("ws.csv") as f:

csv_file=csv.reader(f)

data=list(csv_file)

s=data[0][:-1]

g=[['?' for i in range(len(s))]for j in range(len(s))]

for i in range(len(s)):

for j in range(len(s)):

g[i][j]='?'

for i in data:

if i[-1]=="Yes":

for j in range(len(s)):

if i[j]!=s[j]:

s[j]='?'

g[j][j]='?'

elif i[-1]=="No":

for j in range(len(s)):

if i[j]!=s[j]:

g[j][j]=s[j]

else:

g[j][j]="?"

print("\nSteps of Candidate Elimination Algorithm",data.index(i)+1)

print(s)

print(g)

gh=[]

for i in g:

for j in i:

if j!='?':

gh.append(i)

break

print("\nFinal specific hypothesis:\n",s)

print("\nFinal general hypothesis:\n",gh)

Steps of Candidate Elimination Algorithm 1

['Sunny', 'Warm', 'Normal', 'Strong', 'Warm', 'Same']

[['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?',
'?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'],
['?', '?', '?', '?', '?', '?']]

Steps of Candidate Elimination Algorithm 2

['Sunny', 'Warm', '?', 'Strong', 'Warm', 'Same']

[['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?',
'?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'],
['?', '?', '?', '?', '?', '?']]

Steps of Candidate Elimination Algorithm 3

['Sunny', 'Warm', '?', 'Strong', 'Warm', 'Same']

[['Sunny', '?', '?', '?', '?', '?'], ['?', 'Warm', '?', '?', '?', '?'], ['?', '?',
'?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'],
['?', '?', '?', '?', '?', 'Same']]

Steps of Candidate Elimination Algorithm 4

['Sunny', 'Warm', '?', 'Strong', '?', '?']

[['Sunny', '?', '?', '?', '?', '?'], ['?', 'Warm', '?', '?', '?', '?'], ['?', '?',
'?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'],
['?', '?', '?', '?', '?', '?']]

Final specific hypothesis:

['Sunny', 'Warm', '?', 'Strong', '?', '?']

Final general hypothesis:

[['Sunny', '?', '?', '?', '?', '?'], ['?', 'Warm', '?', '?', '?', '?']]

In [5]:
#ID3 ALGORITHM

import pandas as pd

import math

import numpy as np

data = pd.read_csv("3-dataset.csv")

features = [feat for feat in data]

features.remove("answer")

class Node:

def __init__(self):

self.children = []

self.value = ""

self.isLeaf = False

self.pred = ""

def entropy(examples):

pos = 0.0

neg = 0.0

for _, row in examples.iterrows():

if row["answer"] == "yes":

pos += 1

else:

neg += 1

if pos == 0.0 or neg == 0.0:

return 0.0

else:

p = pos / (pos + neg)

n = neg / (pos + neg)

return -(p * math.log(p, 2) + n * math.log(n, 2))

def info_gain(examples, attr):

uniq = np.unique(examples[attr])

#print ("\n",uniq)

gain = entropy(examples)

#print ("\n",gain)

for u in uniq:

subdata = examples[examples[attr] == u]
#print ("\n",subdata)

sub_e = entropy(subdata)

gain -= (float(len(subdata)) / float(len(examples))) * sub_e

#print ("\n",gain)
return gain

def ID3(examples, attrs):

root = Node()

max_gain = 0

max_feat = ""

for feature in attrs:

#print ("\n",examples)

gain = info_gain(examples, feature)

if gain > max_gain:

max_gain = gain

max_feat = feature

root.value = max_feat

#print ("\nMax feature attr",max_feat)

uniq = np.unique(examples[max_feat])

#print ("\n",uniq)

for u in uniq:

#print ("\n",u)

subdata = examples[examples[max_feat] == u]

#print ("\n",subdata)

if entropy(subdata) == 0.0:

newNode = Node()

newNode.isLeaf = True
newNode.value = u

newNode.pred = np.unique(subdata["answer"])

root.children.append(newNode)

else:

dummyNode = Node()

dummyNode.value = u

new_attrs = attrs.copy()

new_attrs.remove(max_feat)

child = ID3(subdata, new_attrs)

dummyNode.children.append(child)

root.children.append(dummyNode)

return root

def printTree(root: Node, depth=0):

for i in range(depth):

print("\t", end="")

print(root.value, end="")

if root.isLeaf:

print(" -> ", root.pred)

print()

for child in root.children:

printTree(child, depth + 1)

root = ID3(data, features)

printTree(root)

outlook

overcast -> ['yes']

rain

wind

strong -> ['no']

weak -> ['yes']

sunny

humidity

high -> ['no']

normal -> ['yes']

In [7]:
#BACKPROPAGATION ALGORITHM

import numpy as np

def sigmoid (x):

return 1/(1 + np.exp(-x))

def sigmoid_derivative(x):

return x * (1 - x)

inputs = np.array([[0,0],[0,1],[1,0],[1,1]])

expected_output = np.array([[0],[1],[1],[0]])

epochs = 10000

lr = 0.1

inputLayerNeurons, hiddenLayerNeurons, outputLayerNeurons = 2,2,1

hidden_weights = np.random.uniform(size=(inputLayerNeurons,hiddenLayerNeurons))

hidden_bias =np.random.uniform(size=(1,hiddenLayerNeurons))

output_weights = np.random.uniform(size=(hiddenLayerNeurons,outputLayerNeurons))

output_bias = np.random.uniform(size=(1,outputLayerNeurons))

print("Initial hidden weights: ",end='')

print(*hidden_weights)

print("Initial hidden biases: ",end='')

print(*hidden_bias)

print("Initial output weights: ",end='')

print(*output_weights)

print("Initial output biases: ",end='')

print(*output_bias)

for _ in range(epochs):

hidden_layer_activation = np.dot(inputs,hidden_weights)

hidden_layer_activation += hidden_bias

hidden_layer_output = sigmoid(hidden_layer_activation)

output_layer_activation = np.dot(hidden_layer_output,output_weights)

output_layer_activation += output_bias

predicted_output = sigmoid(output_layer_activation)

error = expected_output - predicted_output

d_predicted_output = error * sigmoid_derivative(predicted_output)

error_hidden_layer = d_predicted_output.dot(output_weights.T)

d_hidden_layer = error_hidden_layer * sigmoid_derivative(hidden_layer_output)

output_weights += hidden_layer_output.T.dot(d_predicted_output) * lr

output_bias += np.sum(d_predicted_output,axis=0,keepdims=True) * lr

hidden_weights += inputs.T.dot(d_hidden_layer) * lr

hidden_bias += np.sum(d_hidden_layer,axis=0,keepdims=True) * lr

print("Final hidden weights: ",end='')

print(*hidden_weights)

print("Final hidden bias: ",end='')

print(*hidden_bias)

print("Final output weights: ",end='')

print(*output_weights)

print("Final output bias: ",end='')

print(*output_bias)

print("\nOutput from neural network after 10,000 epochs: ",end='')

print(*predicted_output)

Initial hidden weights: [0.0007845 0.08723661] [0.25827275 0.21942619]

Initial hidden biases: [0.37147298 0.1673565 ]

Initial output weights: [0.01280043] [0.60209981]

Initial output biases: [0.17766749]

Final hidden weights: [2.27492892 5.05142459] [2.34666797 5.72151308]

Final hidden bias: [-3.35791635 -1.73404479]

Final output weights: [-5.70192429] [5.61712174]

Final output bias: [-2.44035624]

Output from neural network after 10,000 epochs: [0.14322429] [0.825314] [0.82306587]
[0.21951972]

In [8]:
#NAVIE BAYESIAN CLASSIFIER

import pandas as pd

# from sklearn.model_selection import train_test_split

from sklearn.model_selection import train_test_split

#to model the Gaussian Navie Bayes Classifier

from sklearn.naive_bayes import GaussianNB

#to calculate the accuracy score of the model

from sklearn.metrics import accuracy_score

DB = pd.read_csv('tennis1.csv')

print(DB.columns)

len(DB)

DB.head(3)

X = DB.values[:,0:4] #Features

Y = DB.values[:,4] #Target

#split the data into train and test

X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.30,random_state=10)
#implement Gaussian Naive Bayes

clf = GaussianNB()

clf.fit(X_train,Y_train)

Y_pred = clf.predict(X_test)

#accuracy of the GNB

accuracy_score(Y_test,Y_pred,normalize=True)

Index(['Day', 'Temperature', 'Humidity', 'Windy', 'Play'], dtype='object')

1.0
Out[8]:
In [9]:
from sklearn import datasets

from sklearn.cluster import KMeans

from sklearn.model_selection import train_test_split

from sklearn import metrics

iris = datasets.load_iris()

X_train,X_test,Y_train,Y_test = train_test_split(iris.data,iris.target)

model = KMeans(n_clusters=3)

model.fit(X_train,Y_train)

model.score

acc1=metrics.accuracy_score(Y_test,model.predict(X_test))

print(acc1)

from sklearn.mixture import GaussianMixture

model2 = GaussianMixture(n_components=3)

model2.fit(X_train,Y_train)

model2.score

metrics

acc2=metrics.accuracy_score(Y_test,model.predict(X_test))

print(acc2)

0.5526315789473685

0.5526315789473685

In [10]:
from sklearn.model_selection import train_test_split

from sklearn.neighbors import KNeighborsClassifier

from sklearn.metrics import classification_report,confusion_matrix

from sklearn import datasets

iris=datasets.load_iris()

iris_data=iris.data

iris_labels=iris.target

x_train,x_test,y_train,y_test=train_test_split(iris_data,iris_labels,test_size=0.30)
classifier=KNeighborsClassifier(n_neighbors=5)

classifier.fit(x_train,y_train)

y_pred=classifier.predict(x_test)

print('Confusion matrix is as follows')

print(confusion_matrix(y_test,y_pred))

print('Accuracy Matrics')

print(classification_report(y_test,y_pred))

Confusion matrix is as follows

[[16 0 0]

[ 0 14 0]

[ 0 0 15]]

Accuracy Matrics

precision recall f1-score support

0 1.00 1.00 1.00 16

1 1.00 1.00 1.00 14

2 1.00 1.00 1.00 15

accuracy 1.00 45

macro avg 1.00 1.00 1.00 45

weighted avg 1.00 1.00 1.00 45

In [11]:
from math import ceil

import numpy as np

from scipy import linalg

def lowess(x, y, f, iterations):

n = len(x)

r = int(ceil(f * n))

h = [np.sort(np.abs(x - x[i]))[r] for i in range(n)]

w = np.clip(np.abs((x[:, None] - x[None, :]) / h), 0.0, 1.0)

w = (1 - w ** 3) ** 3

yest = np.zeros(n)

delta = np.ones(n)

for iteration in range(iterations):

for i in range(n):

weights = delta * w[:, i]

b = np.array([np.sum(weights * y), np.sum(weights * y * x)])

A = np.array([[np.sum(weights), np.sum(weights * x)],[np.sum(weights * x


beta = linalg.solve(A, b)

yest[i] = beta[0] + beta[1] * x[i]

residuals = y - yest

s = np.median(np.abs(residuals))

delta = np.clip(residuals / (6.0 * s), -1, 1)

delta = (1 - delta ** 2) ** 2

return yest

import math

n = 100

x = np.linspace(0, 2 * math.pi, n)

y = np.sin(x) + 0.3 * np.random.randn(n)

f =0.25

iterations=3

yest = lowess(x, y, f, iterations)

import matplotlib.pyplot as plt

plt.plot(x,y,"r.")

plt.plot(x,yest,"b-")

[<matplotlib.lines.Line2D at 0x171f4daeaf0>]
Out[11]:

In [ ]:

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy