Artificial Intelligence Lab
Artificial Intelligence Lab
Artificial Intelligence Lab
PROGRAM
self.players = players
history = game.play()
OUTPUT
20 bones left in the pile
PROGRAM
import random
import string
def generate_random_solution(length=13):
def evaluate(solution):
diff = 0
for i in range(len(target)):
s = solution[i]
t = target[i]
return diff
def mutate_solution(solution):
solution[index] = random.choice(string.printable)
best = generate_random_solution()
best_score = evaluate(best)
while True:
break
new_solution = list(best)
mutate_solution(new_solution)
score = evaluate(new_solution)
best = new_solution
best_score = score
OUTPUT
LOGICAL AGENT
PROGRAM
import speech_recognition as sr
recording = sr.Recognizer()
withsr.Microphone() as source:
recording.adjust_for_ambient_noise(source)
audio = recording.listen(source)
try:
except Exception as e:
print(e)
OUTPUT
FORWARD CHAINING AND INFERENCE RULES
PROGRAM
vertebrate("duck").
flying("duck").
mammal("cat").
global facts
global is_changed
is_changed = True
facts = [["vertebrate","duck"],["flying","duck"],["mammal","cat"]]
defassert_fact(fact):
global facts
global is_changed
facts += [fact]
is_changed = True
while is_changed:
is_changed = False
for A1 in facts:
if A1[0] == "mammal":
assert_fact(["vertebrate",A1[1]])
if A1[0] == "vertebrate":
assert_fact(["animal",A1[1]])
assert_fact(["bird",A1[1]])
print(facts)
OUTPUT
PROGRAM
import itertools as it
def prime_check(x):
if isvar(x):
else:
x = var()
print((set(run(0,x,(membero,x,(12,14,15,19,20,21,22,23,29,30,41,44,52,62,65,85
)),(prime_check,x)))))
print((run(10,x,prime_check(x))))
OUTPUT
NAÏVE BAYESIAN CLASSIFIER
PROGRAM
import pandas as pd
msg=pd.read_csv('naivetext.csv',names=['message','label'])
msg['labelnum']=msg.label.map({'pos':1,'neg':0})
X=msg.message
y=msg.labelnum
print(X)
print(y)
xtrain,xtest,ytrain,ytest=train_test_split(X,y)
count_vect = CountVectorizer()
xtrain_dtm = count_vect.fit_transform(xtrain)
xtest_dtm=count_vect.transform(xtest)
print(count_vect.get_feature_names())
df=pd.DataFrame(xtrain_dtm.toarray(),columns=count_vect.get_feature_names(
))
clf = MultinomialNB().fit(xtrain_dtm,ytrain)
predicted = clf.predict(xtest_dtm)
print(metrics.confusion_matrix(ytest,predicted))
8 He is my sworn enemy
9 My boss is horrible
12 I love to dance
1 1
2 1
3 1
4 1
5 0
6 0
7 0
8 0
9 0
10 1
11 0
12 1
13 0
14 1
15 0
16 1
17 0
['about', 'am', 'an', 'awesome', 'bad', 'beers', 'best', 'boss', 'can', 'deal', 'enemy',
'feel', 'fun', 'good', 'great', 'have', 'he', 'holiday', 'horrible', 'house', 'is', 'locality',
'love', 'my', 'of', 'place', 'sandwich', 'stay', 'stuff', 'sworn', 'that', 'these', 'this',
'tired', 'to', 'today', 'tomorrow', 'very', 'view', 'we', 'went', 'what', 'will', 'with',
'work']
Confusion matrix
[[2 1]
[1 1]]
PROGRAM
importnumpy as np
import pandas as pd
import csv
frompgmpy.estimatorsimport MaximumLikelihoodEstimator
heartDisease = pd.read_csv('7-dataset.csv')
heartDisease = heartDisease.replace('?',np.nan)
print(heartDisease.head())
print(heartDisease.dtypes)
model=BayesianModel([('age','heartdisease'),('sex','heartdisease'),('exang','heartd
isease'), ('cp','heartdisease'),( 'heartdisease','restecg'),( 'heartdisease','chol')])
model.fit(heartDisease,estimator=MaximumLikelihoodEstimator)
HeartDiseasetest_infer = VariableElimination(model)
print(q1)
q2=HeartDiseasetest_infer.query(variables=['heartdisease'],evidence={'cp':2})
print(q2)
OUTPUT
[5 rows x 14 columns]
age int64
sex int64
cp int64
trestbps int64
chol int64
fbs int64
restecg int64
thalach int64
exang int64
oldpeak float64
slope int64
ca object
thal object
heartdisease int64
dtype: object
+------------------------------+------------------------------------+
| heartdisease | phi(heartdisease) |
+=================+=====================+
| heartdisease(0) | 0.1012 |
+------------------------------+------------------------------------+
| heartdisease(1) | 0.0000 |
+------------------------------+------------------------------------+
| heartdisease(2) | 0.2392 |
+------------------------------+------------------------------------+
| heartdisease(3) | 0.2015 |
+------------------------------+------------------------------------+
| heartdisease(4) | 0.4581 |
+------------------------------+------------------------------------+
2. Probability of HeartDisease given evidence= cp
+------------------------------+------------------------------------+
| heartdisease | phi(heartdisease) |
+=================+=====================+
| heartdisease(0) | 0.3610 |
+------------------------------+------------------------------------+
| heartdisease(1) | 0.2159 |
+------------------------------+------------------------------------+
| heartdisease(2) | 0.1373 |
+------------------------------+------------------------------------+
| heartdisease(3) | 0.1537 |
+------------------------------+------------------------------------+
| heartdisease(4) | 0.1321 |
+------------------------------+------------------------------------+
EM ALGORITHM TO CLUSTER A SET OF DATA
PROGRAM
importsklearn.metrics as metrics
import pandas as pd
importnumpy as np
importmatplotlib.pyplot as plt
dataset = pd.read_csv("Iris.csv")
X = dataset.iloc[:, :-1]
plt.figure(figsize=(8,5))
colormap=np.array(['red','lime','black'])
# GMM PLOT
gmm=GaussianMixture(n_components=3, random_state=0).fit(X)
y_cluster_gmm=gmm.predict(X)
plt.title('GMM Classification')
plt.scatter(X.Petal_Length,X.Petal_Width,c=colormap[y_cluster_gmm])
plt.show()
print('The accuracy score of EM: ',metrics.accuracy_score(y, y_cluster_gmm))
OUTPUT
k-NEARSET NEIGHBOUR ALGORITHM
PROGRAM
importmatplotlib.pyplot as plt
importnumpy as np
plt.show()
kmeans = KMeans(n_clusters=4)
kmeans.fit(X)
y_kmeans = kmeans.predict(X)
centers = kmeans.cluster_centers_
plt.show()
OUTPUT
DECISION TREE- PRUNED AND UNPRUNED TREE
PROGRAM
import numpy as np
import pandas as pd
df = pd.read_csv('diabetes.csv')
df.head()
X = df.drop(columns=['Outcome'])
y = df['Outcome']
x_train,x_test,y_train,y_test = train_test_split(X,y,stratify=y)
clf = tree.DecisionTreeClassifier(random_state=0)
clf.fit(x_train,y_train)
y_train_pred = clf.predict(x_train)
y_test_pred = clf.predict(x_test)
plt.figure(figsize=(8,8))
features = df.columns
tree.plot_tree(clf,feature_names=features,class_names=classes,filled=True)
plt.show()
clf_ = tree.DecisionTreeClassifier(random_state=0,ccp_alpha=0.020)
clf_.fit(x_train,y_train)
y_train_pred = clf_.predict(x_train)
y_test_pred = clf_.predict(x_test)
plt.figure(figsize=(8,8))
features = df.columns
tree.plot_tree(clf_,feature_names=features,class_names=classes,filled=True)
plt.show()
OUTPUT:-
NEURAL NETWORK USING BACKPROPAGATION ALGORITHM
PROGRAM
import numpy as np
X = X/np.amax(X,axis=0)
def derivatives_sigmoid(x):
return x * (1 - x)
epoch=5000
lr=0.1
inputlayer_neurons = 2
hiddenlayer_neurons = 3
output_neurons = 1
wh=np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons))
bh=np.random.uniform(size=(1,hiddenlayer_neurons))
wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons))
bout=np.random.uniform(size=(1,output_neurons))
for in range(epoch):
hinp1=np.dot(X,wh)
hinp=hinp1 + bh
hlayer_act = sigmoid(hinp)
outinp1=np.dot(hlayer_act,wout)
output = sigmoid(outinp)
EO = y-output
outgrad = derivatives_sigmoid(output)
EH = d_output.dot(wout.T)
hiddengrad = derivatives_sigmoid(hlayer_act)
d_hiddenlayer = EH * hiddengrad
wh += X.T.dot(d_hiddenlayer) *lr
Input:
[[0.66666667 1. ]
[0.33333333 0.55555556]
[1. 0.66666667]]
Actual Output:
[[92.]
[86.]
[89.]]
Predicted Output:
[[0.88473854]
[0.86335851]
[0.88398985]]
SUPPORT VECTOR CLASSIFICATION FOR LINEAR KERNAL
PROGRAM
#load dataset
cancer_data = datasets.load_breast_cancer()
print(cancer_data.data[5])
print(cancer_data.data.shape)
#target set
print(cancer_data.target)
cancer_data = datasets.load_breast_cancer()
#create a classifier
cls = svm.SVC(kernel="linear")
cls.fit(X_train,y_train)
pred = cls.predict(X_test)
#accuracy
print("acuracy:", metrics.accuracy_score(y_test,y_pred=pred))
#precision score
print("precision:", metrics.precision_score(y_test,y_pred=pred))
#recall score
print("recall" , metrics.recall_score(y_test,y_pred=pred))
print(metrics.classification_report(y_test, y_pred=pred))
letters = datasets.load_digits()
clf.fit(X,y)
print(clf.predict(letters.data[:-10]))
plt.imshow(letters.images[6], interpolation='nearest')
plt.show()
OUTPUT
IMPLEMENT LOGISTIC REGRESSION TO CLASSIFY THE PROBLEMS
ON DIABETES PREDICTIONS
PROGRAM
import pandas as pd
import seaborn as sn
data = pd.read_csv('lg1.csv')
print(data.columns)
print(data.head())
data.info()
X=data[['Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','Pedigr
ee','Age']]
y = data['Outcome']
#X = data[['gmat', 'gpa','work_experience']]
#y = data['admitted']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=
0)
logistic_regression= LogisticRegression()
logistic_regression.fit(X_train,y_train)
y_pred=logistic_regression.predict(X_test)
sn.heatmap(confusion_matrix, annot=True)
plt.show()
OUTPUT