Artificial Intelligence Lab

Download as pdf or txt
Download as pdf or txt
You are on page 1of 35

PROBLEM SOLVING AS A SEARCH

PROGRAM

from easyAI import TwoPlayersGame, Human_Player, AI_Player, Negamax

class GameOfBones( TwoPlayersGame ):

def __init__(self, players):

self.players = players

self.pile = 20 # start with 20 bones in the pile

self.nplayer = 1 # player 1 starts

def possible_moves(self): return ['1','2','3']

def make_move(self,move): self.pile -= int(move)

def win(self):return self.pile<=0

def is_over(self): return self.win()

def show(self): print("%d bones left in the pile"%self.pile)

def scoring(self): return 100 if game.win() else 0

ai = Negamax(13) # The AI will think 13 moves in advance

game = GameOfBones( [ Human_Player(), AI_Player(ai) ] )

history = game.play()
OUTPUT
20 bones left in the pile

Player 1 what do you play ? 1

Move #1: player 1 plays 1 :


19 bones left in the pile

Move #2: player 2 plays 1 :


18 bones left in the pile

Player 1 what do you play ? 2

Move #3: player 1 plays 2 :


16 bones left in the pile

Move #4: player 2 plays 1 :


15 bones left in the pile

Player 1 what do you play ? 1

Move #5: player 1 plays 1 :


14 bones left in the pile

Move #6: player 2 plays 1 :


13 bones left in the pile

Player 1 what do you play ? 3

Move #7: player 1 plays 3 :


10 bones left in the pile

Move #8: player 2 plays 1 :


9 bones left in the pile

Player 1 what do you play ? 2

Move #9: player 1 plays 2 :


7 bones left in the pile
Move #10: player 2 plays 1 :
6 bones left in the pile

Player 1 what do you play ? 1

Move #11: player 1 plays 1 :


5 bones left in the pile

Move #12: player 2 plays 1 :


4 bones left in the pile

Player 1 what do you play ? 3

Move #13: player 1 plays 3 :


1 bones left in the pile

Move #14: player 2 plays 1 :


0 bones left in the pile
LOCAL SEARCH ALGORITHMS (HILL CLIMBING SEARCH)

PROGRAM

import random

import string

def generate_random_solution(length=13):

return [random.choice(string.printable) for _ in range(length)]

def evaluate(solution):

target = list("Hello, World!")

diff = 0

for i in range(len(target)):

s = solution[i]

t = target[i]

diff += abs(ord(s) - ord(t))

return diff

def mutate_solution(solution):

index = random.randint(0, len(solution) - 1)

solution[index] = random.choice(string.printable)

best = generate_random_solution()

best_score = evaluate(best)

while True:

print('Best score so far', best_score, 'Solution', "".join(best))


if best_score == 0:

break

new_solution = list(best)

mutate_solution(new_solution)

score = evaluate(new_solution)

if evaluate(new_solution) < best_score:

best = new_solution

best_score = score

OUTPUT
LOGICAL AGENT

PROGRAM

import speech_recognition as sr

recording = sr.Recognizer()

withsr.Microphone() as source:

recording.adjust_for_ambient_noise(source)

print("Please Say something:")

audio = recording.listen(source)

try:

print("You said: \n" + recording.recognize_google(audio))

except Exception as e:

print(e)

OUTPUT
FORWARD CHAINING AND INFERENCE RULES

PROGRAM

mammal(A) ==> vertebrate(A).

vertebrate(A) ==> animal(A).

vertebrate(A),flying(A) ==> bird(A).

vertebrate("duck").

flying("duck").

mammal("cat").

global facts

global is_changed

is_changed = True

facts = [["vertebrate","duck"],["flying","duck"],["mammal","cat"]]

defassert_fact(fact):

global facts

global is_changed

ifnot fact in facts:

facts += [fact]

is_changed = True

while is_changed:

is_changed = False

for A1 in facts:

if A1[0] == "mammal":

assert_fact(["vertebrate",A1[1]])
if A1[0] == "vertebrate":

assert_fact(["animal",A1[1]])

if A1[0] == "vertebrate"and ["flying",A1[1]] in facts:

assert_fact(["bird",A1[1]])

print(facts)

OUTPUT

[['vertebrate', 'duck'], ['flying', 'duck'], ['mammal', 'cat'], ['animal', 'duck'],

['bird', 'duck'], ['vertebrate', 'cat'], ['animal', 'cat']]


AGENT BASED ON PROPOSITIONAL LOGIC

PROGRAM

from kanren import isvar, run, membero

from kanren.core import success, fail, goaleval, condeseq, eq, var

from sympy.ntheory.generate import prime, isprime

import itertools as it

def prime_check(x):

if isvar(x):

return condeseq([(eq,x,p)] for p in map(prime, it.count(1)))

else:

return success if isprime(x) else fail

x = var()

print((set(run(0,x,(membero,x,(12,14,15,19,20,21,22,23,29,30,41,44,52,62,65,85
)),(prime_check,x)))))

print((run(10,x,prime_check(x))))

OUTPUT
NAÏVE BAYESIAN CLASSIFIER

PROGRAM

import pandas as pd

from sklearn.model_selection import train_test_split

from sklearn.feature_extraction.text import CountVectorizer

from sklearn.naive_bayes import MultinomialNB

from sklearn import metrics

#loading the dataset

msg=pd.read_csv('naivetext.csv',names=['message','label'])

print('The dimensions of the dataset',msg.shape)

msg['labelnum']=msg.label.map({'pos':1,'neg':0})

X=msg.message

y=msg.labelnum

print(X)

print(y)

#splitting the dataset into train and test data

xtrain,xtest,ytrain,ytest=train_test_split(X,y)

print ('\n the total number of Training Data :',ytrain.shape)

print ('\n the total number of Test Data :',ytest.shape)

#output of the words or Tokens in the text documents

count_vect = CountVectorizer()
xtrain_dtm = count_vect.fit_transform(xtrain)

xtest_dtm=count_vect.transform(xtest)

print('\n The words or Tokens in the text documents \n')

print(count_vect.get_feature_names())

df=pd.DataFrame(xtrain_dtm.toarray(),columns=count_vect.get_feature_names(
))

# Training Naive Bayes (NB) classifier on training data.

clf = MultinomialNB().fit(xtrain_dtm,ytrain)

predicted = clf.predict(xtest_dtm)

#printing accuracy, Confusion matrix, Precision and Recall

print('\n Accuracy of the classifier is',metrics.accuracy_score(ytest,predicted))

print('\n Confusion matrix')

print(metrics.confusion_matrix(ytest,predicted))

print('\n The value of Precision', metrics.precision_score(ytest,predicted))

print('\n The value of Recall', metrics.recall_score(ytest,predicted))


OUTPUT

The dimensions of the dataset (18, 2)

0 I love this sandwich

1 This is an amazing place

2 I feel very good about these beers

3 This is my best work

4 What an awesome view

5 I do not like this restaurant

6 I am tired of this stuff

7 I can't deal with this

8 He is my sworn enemy

9 My boss is horrible

10 This is an awesome place

11 I do not like the taste of this juice

12 I love to dance

13 I am sick and tired of this place

14 What a great holiday

15 That is a bad locality to stay

16 We will have good fun tomorrow

17 I went to my enemy's house today

Name: message, dtype: object


0 1

1 1

2 1

3 1

4 1

5 0

6 0

7 0

8 0

9 0

10 1

11 0

12 1

13 0

14 1

15 0

16 1

17 0

Name: labelnum, dtype: int64

The total number of Training Data : (13,)

The total number of Test Data : (5,)


The words or Tokens in the text documents

['about', 'am', 'an', 'awesome', 'bad', 'beers', 'best', 'boss', 'can', 'deal', 'enemy',
'feel', 'fun', 'good', 'great', 'have', 'he', 'holiday', 'horrible', 'house', 'is', 'locality',
'love', 'my', 'of', 'place', 'sandwich', 'stay', 'stuff', 'sworn', 'that', 'these', 'this',
'tired', 'to', 'today', 'tomorrow', 'very', 'view', 'we', 'went', 'what', 'will', 'with',
'work']

Accuracy of the classifier is 0.6

Confusion matrix

[[2 1]

[1 1]]

The value of Precision 0.5

The value of Recall 0.5


BAYESIAN NETWORK

PROGRAM

importnumpy as np

import pandas as pd

import csv

frompgmpy.estimatorsimport MaximumLikelihoodEstimator

frompgmpy.models import BayesianModel

frompgmpy.inference import VariableElimination

heartDisease = pd.read_csv('7-dataset.csv')

heartDisease = heartDisease.replace('?',np.nan)

print('Sample instances from the dataset are given below')

print(heartDisease.head())

print('\n Attributes and datatypes')

print(heartDisease.dtypes)

model=BayesianModel([('age','heartdisease'),('sex','heartdisease'),('exang','heartd
isease'), ('cp','heartdisease'),( 'heartdisease','restecg'),( 'heartdisease','chol')])

print('\nLearning CPD using Maximum likelihood estimators')

model.fit(heartDisease,estimator=MaximumLikelihoodEstimator)

print('\n Inferencing with Bayesian Network:')

HeartDiseasetest_infer = VariableElimination(model)

print('\n 1. Probability of HeartDisease given evidence= restecg')


q1=HeartDiseasetest_infer.query(variables=['heartdisease'],evidence={'restecg':
1})

print(q1)

print('\n 2. Probability of HeartDisease given evidence= cp ')

q2=HeartDiseasetest_infer.query(variables=['heartdisease'],evidence={'cp':2})

print(q2)
OUTPUT

Sample instances from the dataset are given below

age sexcptrestbpschol ... oldpeak slope cathalheartdisease

0 63 1 1 145 233 ... 2.3 3 0 6 0

1 67 1 4 160 286 ... 1.5 2 3 3 2

2 67 1 4 120 229 ... 2.6 2 2 7 1

3 37 1 3 130 250 ... 3.5 3 0 3 0

4 41 0 2 130 204 ... 1.4 1 0 3 0

[5 rows x 14 columns]

Attributes and datatypes

age int64

sex int64

cp int64

trestbps int64

chol int64

fbs int64

restecg int64

thalach int64

exang int64

oldpeak float64
slope int64

ca object

thal object

heartdisease int64

dtype: object

Learning CPD using Maximum likelihood estimators

Inferencing with Bayesian Network:

1. Probability of HeartDisease given evidence= restecg

+------------------------------+------------------------------------+

| heartdisease | phi(heartdisease) |

+=================+=====================+

| heartdisease(0) | 0.1012 |

+------------------------------+------------------------------------+

| heartdisease(1) | 0.0000 |

+------------------------------+------------------------------------+

| heartdisease(2) | 0.2392 |

+------------------------------+------------------------------------+

| heartdisease(3) | 0.2015 |

+------------------------------+------------------------------------+

| heartdisease(4) | 0.4581 |

+------------------------------+------------------------------------+
2. Probability of HeartDisease given evidence= cp

+------------------------------+------------------------------------+

| heartdisease | phi(heartdisease) |

+=================+=====================+

| heartdisease(0) | 0.3610 |

+------------------------------+------------------------------------+

| heartdisease(1) | 0.2159 |

+------------------------------+------------------------------------+

| heartdisease(2) | 0.1373 |

+------------------------------+------------------------------------+

| heartdisease(3) | 0.1537 |

+------------------------------+------------------------------------+

| heartdisease(4) | 0.1321 |

+------------------------------+------------------------------------+
EM ALGORITHM TO CLUSTER A SET OF DATA

PROGRAM

from sklearn.cluster import KMeans

fromsklearn.mixture import GaussianMixture

importsklearn.metrics as metrics

import pandas as pd

importnumpy as np

importmatplotlib.pyplot as plt

names = ['Sepal_Length','Sepal_Width','Petal_Length','Petal_Width', 'Class']

dataset = pd.read_csv("Iris.csv")

X = dataset.iloc[:, :-1]

label = {'Iris-setosa': 0,'Iris-versicolor': 1, 'Iris-virginica': 2}

y = [label[c] for c in dataset.iloc[:, -1]]

plt.figure(figsize=(8,5))

colormap=np.array(['red','lime','black'])

# GMM PLOT

gmm=GaussianMixture(n_components=3, random_state=0).fit(X)

y_cluster_gmm=gmm.predict(X)

plt.title('GMM Classification')

plt.scatter(X.Petal_Length,X.Petal_Width,c=colormap[y_cluster_gmm])

plt.show()
print('The accuracy score of EM: ',metrics.accuracy_score(y, y_cluster_gmm))

print('The Confusion matrix of EM:\n ',metrics.confusion_matrix(y,


y_cluster_gmm))

OUTPUT
k-NEARSET NEIGHBOUR ALGORITHM

PROGRAM

importmatplotlib.pyplot as plt

importseaborn as sns; sns.set()

importnumpy as np

fromsklearn.cluster import KMeans

fromsklearn.datasets import make_blobs

X, y_true = make_blobs(n_samples=400, centers=4, cluster_std=0.60,


random_state=0)

plt.scatter(X[:, 0], X[:, 1], s=20);

plt.show()

kmeans = KMeans(n_clusters=4)

kmeans.fit(X)

y_kmeans = kmeans.predict(X)

plt.scatter(X[:, 0], X[:, 1], c=y_kmeans, s=20, cmap='summer')

centers = kmeans.cluster_centers_

plt.scatter(centers[:, 0], centers[:, 1], c='blue', s=100, alpha=0.9);

plt.show()
OUTPUT
DECISION TREE- PRUNED AND UNPRUNED TREE

PROGRAM

import numpy as np

import pandas as pd

from sklearn.model_selection import train_test_split

from sklearn import tree

import matplotlib.pyplot as plt

df = pd.read_csv('diabetes.csv')

df.head()

X = df.drop(columns=['Outcome'])

y = df['Outcome']

x_train,x_test,y_train,y_test = train_test_split(X,y,stratify=y)

clf = tree.DecisionTreeClassifier(random_state=0)

clf.fit(x_train,y_train)

y_train_pred = clf.predict(x_train)

y_test_pred = clf.predict(x_test)

plt.figure(figsize=(8,8))

features = df.columns

classes = ['No Diabetes','Diabetes']

tree.plot_tree(clf,feature_names=features,class_names=classes,filled=True)

plt.show()

clf_ = tree.DecisionTreeClassifier(random_state=0,ccp_alpha=0.020)

clf_.fit(x_train,y_train)
y_train_pred = clf_.predict(x_train)

y_test_pred = clf_.predict(x_test)

plt.figure(figsize=(8,8))

features = df.columns

classes = ['No Diabetes','Diabetes']

tree.plot_tree(clf_,feature_names=features,class_names=classes,filled=True)

plt.show()
OUTPUT:-
NEURAL NETWORK USING BACKPROPAGATION ALGORITHM

PROGRAM

import numpy as np

X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float)

y = np.array(([92], [86], [89]), dtype=float)

X = X/np.amax(X,axis=0)

def sigmoid (x):

return 1/(1 + np.exp(-x))

def derivatives_sigmoid(x):

return x * (1 - x)

epoch=5000

lr=0.1

inputlayer_neurons = 2

hiddenlayer_neurons = 3

output_neurons = 1

wh=np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons))

bh=np.random.uniform(size=(1,hiddenlayer_neurons))

wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons))

bout=np.random.uniform(size=(1,output_neurons))

for in range(epoch):

hinp1=np.dot(X,wh)
hinp=hinp1 + bh

hlayer_act = sigmoid(hinp)

outinp1=np.dot(hlayer_act,wout)

outinp= outinp1+ bout

output = sigmoid(outinp)

EO = y-output

outgrad = derivatives_sigmoid(output)

d_output = EO* outgrad

EH = d_output.dot(wout.T)

hiddengrad = derivatives_sigmoid(hlayer_act)

d_hiddenlayer = EH * hiddengrad

wout += hlayer_act.T.dot(d_output) *lr

wh += X.T.dot(d_hiddenlayer) *lr

print("Input: \n" + str(X))

print("Actual Output: \n" + str(y))

print("Predicted Output: \n" ,output)


OUTPUT

Input:

[[0.66666667 1. ]

[0.33333333 0.55555556]

[1. 0.66666667]]

Actual Output:

[[92.]

[86.]

[89.]]

Predicted Output:

[[0.88473854]

[0.86335851]

[0.88398985]]
SUPPORT VECTOR CLASSIFICATION FOR LINEAR KERNAL

PROGRAM

from sklearn import datasets

from sklearn.model_selection import train_test_split

from sklearn import svm

from sklearn import metrics

import matplotlib.pyplot as plt

#load dataset

cancer_data = datasets.load_breast_cancer()

print(cancer_data.data[5])

print(cancer_data.data.shape)

#target set

print(cancer_data.target)

cancer_data = datasets.load_breast_cancer()

X_train, X_test, y_train, y_test = train_test_split(cancer_data.data,


cancer_data.target, test_size=0.4,random_state=109)

#create a classifier

cls = svm.SVC(kernel="linear")

#train the model

cls.fit(X_train,y_train)

#predict the response

pred = cls.predict(X_test)
#accuracy

print("acuracy:", metrics.accuracy_score(y_test,y_pred=pred))

#precision score

print("precision:", metrics.precision_score(y_test,y_pred=pred))

#recall score

print("recall" , metrics.recall_score(y_test,y_pred=pred))

print(metrics.classification_report(y_test, y_pred=pred))

#loading the dataset

letters = datasets.load_digits()

#generating the classifier

clf = svm.SVC(gamma=0.001, C=100)

#training the classifier

X,y = letters.data[:-10], letters.target[:-10]

clf.fit(X,y)

#predicting the output

print(clf.predict(letters.data[:-10]))

plt.imshow(letters.images[6], interpolation='nearest')

plt.show()
OUTPUT
IMPLEMENT LOGISTIC REGRESSION TO CLASSIFY THE PROBLEMS
ON DIABETES PREDICTIONS

PROGRAM

import pandas as pd

from sklearn.model_selection import train_test_split

from sklearn.linear_model import LogisticRegression

from sklearn import metrics

import seaborn as sn

import matplotlib.pyplot as plt

data = pd.read_csv('lg1.csv')

print(data.columns)

print(data.head())

print("dimension of diabetes data: {}".format(data.shape))

data.info()

X=data[['Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','Pedigr
ee','Age']]

y = data['Outcome']

#X = data[['gmat', 'gpa','work_experience']]

#y = data['admitted']

X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=
0)

logistic_regression= LogisticRegression()
logistic_regression.fit(X_train,y_train)

y_pred=logistic_regression.predict(X_test)

confusion_matrix = pd.crosstab(y_test, y_pred, rownames=['Actual'],


colnames=['Predicted'])

sn.heatmap(confusion_matrix, annot=True)

print('Accuracy: ',metrics.accuracy_score(y_test, y_pred))

plt.show()
OUTPUT

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy