Lab
Lab
Lab
open_set = set(start_node)
closed_set = set()
g = {}
parents = {}
g[start_node] = 0
parents[start_node] = start_node
n = None
for v in open_set:
n = v # n='A'
pass
else:
open_set.add(m)
parents[m] = n
else:
parents[m] = n
if m in closed_set:
closed_set.remove(m)
open_set.add(m)
if n == None:
return None
if n == stop_node:
path = []
while parents[n] != n:
path.append(n)
n = parents[n]
path.append(start_node)
path.reverse()
return path
open_set.remove(n)
closed_set.add(n)
return None
def get_neighbors(v):
if v in Graph_nodes:
return Graph_nodes[v]
else:
return None
def heuristic(n):
H_dist = {
'A': 10,
'B': 8,
'C': 5,
'D': 7,
'E': 3,
'F': 6,
'G': 5,
'H': 3,
'I': 1,
'J': 0
return H_dist[n]
Graph_nodes = {
aStarAlgo('A', 'J')
2
class Graph:
self.graph = graph
self.H=heuristicNodeList
self.start=startNode
self.parent={}
self.status={}
self.solutionGraph={}
def applyAOStar(self):
self.aoStar(self.start, False)
return self.graph.get(v,'')
def getStatus(self,v):
return self.status.get(v,0)
self.status[v]=val
return self.H.get(n,0)
self.H[n]=value
def printSolution(self):
print("------------------------------------------------------------")
print(self.solutionGraph)
print("------------------------------------------------------------")
minimumCost=0
costToChildNodeListDict={}
costToChildNodeListDict[minimumCost]=[]
flag=True
for nodeInfoTupleList in self.getNeighbors(v):
cost=0
nodeList=[]
cost=cost+self.getHeuristicNodeValue(c)+weight
nodeList.append(c)
if flag==True:
minimumCost=cost
costToChildNodeListDict[minimumCost]=nodeList
flag=False
else:
if minimumCost>cost:
minimumCost=cost
costToChildNodeListDict[minimumCost]=nodeList
self.setHeuristicNodeValue(v, minimumCost)
self.setStatus(v,len(childNodeList))
solved=Tru
self.parent[childNode]=v
if self.getStatus(childNode)!=-1:
if solved==True:
self.setStatus(v,-1)
self.solutionGraph[v]=childNodeList
if v!=self.start:
self.aoStar(self.parent[v], True)
if backTracking==False:
self.aoStar(childNode, False)
h1 = {'A': 1, 'B': 6, 'C': 2, 'D': 12, 'E': 2, 'F': 1, 'G': 5, 'H': 7, 'I': 7, 'J':1, 'T': 3}
graph1 = {
G1.applyAOStar()
G1.printSolution()
h2 = {'A': 1, 'B': 6, 'C': 12, 'D': 10, 'E': 4, 'F': 4, 'G': 5, 'H': 7} # Heuristic values of Nodes
graph2 = {
'A': [[('B', 1), ('C', 1)], [('D', 1)]], # Neighbors of Node 'A', B, C & D with repective weights
'B': [[('G', 1)], [('H', 1)]], # Neighbors are included in a list of lists
'D': [[('E', 1), ('F', 1)]] # Each sublist indicate a "OR" node or "AND" nodes
G2.applyAOStar()
G2.printSolution()
3
import csv
with open("trainingexamples.csv") as f:
csv_file = csv.reader(f)
data = list(csv_file)
specific = data[1][:-1]
for i in data:
if i[-1] == "Yes":
for j in range(len(specific)):
if i[j] != specific[j]:
specific[j] = "?"
general[j][j] = "?"
for j in range(len(specific)):
if i[j] != specific[j]:
general[j][j] = specific[j]
else:
general[j][j] = "?"
print("\nStep " + str(data.index(i)+1) + " of Candidate Elimination Algorithm")
print(specific)
print(general)
gh = []
for i in general:
for j in i:
if j != '?':
gh.append(i)
break
4
import pandas as pd
import math
def base_entropy(dataset):
p=0
n=0
targets = list(set(target))
for i in target:
if i == targets[0]:
p=p+1
else:
n=n+1
if p == 0 or n == 0:
return 0
elif p == n:
return 1
else:
entropy = 0 - (
return entropy
p=0
n=0
targets = list(set(target))
p=p+1
n=n+1
if p == 0 or n == 0:
return 0
elif p == n:
return 1
else:
entropy = 0 - (
return entropy
p=0
n=0
targets = list(set(target))
if j == targets[0] and k == i:
p=p+1
n=n+1
return p, n
Distinct = list(set(feature))
Info_Gain = 0
for i in Distinct:
return Info_Gain
childs = dict()
for i in distinct:
return childs
size = len(dataset)
del (subdata[subdata.columns[index]])
return subdata
def greatest_information_gain(dataset):
max = -1
attribute_index = 0
size = len(dataset.columns) - 1
max = i_g
attribute_index = i
return attribute_index
impure_childs = []
attribute_index = greatest_information_gain(dataset)
tree[dataset.columns[attribute_index]] = childs
for k, v in childs.items():
if v[0] == 0:
tree[k] = targets[1]
elif v[1] == 0:
tree[k] = targets[0]
impure_childs.append(k)
for i in impure_childs:
sub = modify_data_set(dataset,attribute_index,
dataset.columns[attribute_index], i)
return tree
def main():
df = pd.read_csv("playtennis.csv")
tree = dict()
if __name__ == "__main__":
main()
5
import numpy as np
X = X/np.amax(X,axis=0)
y = y/100
def derivatives_sigmoid(x):
return x * (1 - x)
epoch=5000
lr=0.1
inputlayer_neurons = 2
hiddenlayer_neurons = 3
output_neurons = 1
wh=np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons))
bh=np.random.uniform(size=(1,hiddenlayer_neurons))
wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons))
bout=np.random.uniform(size=(1,output_neurons))
for i in range(epoch):
hinp1=np.dot(X,wh)
hinp=hinp1 + bh
hlayer_act = sigmoid(hinp)
outinp1=np.dot(hlayer_act,wout)
output = sigmoid(outinp)
EO = y-output
outgrad = derivatives_sigmoid(output)
EH = d_output.dot(wout.T)
hiddengrad = derivatives_sigmoid(hlayer_act)
d_hiddenlayer = EH * hiddengrad
wout += hlayer_act.T.dot(d_output) *lr
wh += X.T.dot(d_hiddenlayer) *lr
import pandas as pd
data = pd.read_csv('tennisdata.csv')
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
X.Outlook = le_outlook.fit_transform(X.Outlook)
le_Temperature = LabelEncoder()
X.Temperature = le_Temperature.fit_transform(X.Temperature)
le_Humidity = LabelEncoder()
X.Humidity = le_Humidity.fit_transform(X.Humidity)
le_Windy = LabelEncoder()
X.Windy = le_Windy.fit_transform(X.Windy)
le_PlayTennis = LabelEncoder()
y = le_PlayTennis.fit_transform(y)
classifier = GaussianNB()
classifier.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
7
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
iris = datasets.load_iris()
X = pd.DataFrame(iris.data)
X.columns = ['Sepal_Length','Sepal_Width','Petal_Length','Petal_Width']
y = pd.DataFrame(iris.target)
y.columns = ['Targets']
model = KMeans(n_clusters=3)
plt.figure(figsize=(14,7))
plt.title('Real Clusters')
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
plt.subplot(1, 3, 2)
plt.title('K-Means Clustering')
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
scaler = preprocessing.StandardScaler()
scaler.fit(X)
xsa = scaler.transform(X)
gmm = GaussianMixture(n_components=40)
gmm.fit(xs)
plt.subplot(1, 3, 3)
plt.title('GMM Clustering')
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
print('Observation: The GMM using EM algorithm based clustering matched the true labels
more closely than the Kmeans.')
8
from sklearn.model_selection import train_test_split
iris=datasets.load_iris()
for i in range(len(iris.target_names)):
print("Label", i , "-",str(iris.target_names[i]))
classifier = KNeighborsClassifier(n_neighbors=2)
classifier.fit(x_train, y_train)
y_pred=classifier.predict(x_test)
for r in range(0,len(x_test)):
9
import numpy as np
x0 = [1, x0]
X = [[1, i] for i in X]
X = np.asarray(X)
beta = np.linalg.pinv(xw @ X) @ xw @ Y @ x0
return beta
def draw(tau):
plt.show()
X = np.linspace(-3, 3, num=1000)
domain = X
Y = np.log(np.abs(X ** 2 - 1) + .5)
draw(10)
draw(0.1)
draw(0.01)
draw(0.001)