AI Lab manual-1
AI Lab manual-1
# Driver Code
print("Following is the Breadth-First Search")
bfs(visited, graph, '5') # function calling
dict[flatten] = 0
return self.get_paths(dict)
results = []
pos_0 = node.index(0)
for move in moves[pos_0]:
new_node = list(node)
new_node[move], new_node[pos_0] =
new_node[pos_0], new_node[move]
results.append(tuple(new_node))
return results
ob = Solution()
matrix = [
[3, 1, 2],
[4, 7, 5],
[6, 8, 0]
]
print(ob.solve(matrix))
4.
Write a program to implement N-queens problem using python.
N=4
def printSolution(board):
for i in range(N):
for j in range(N):
print (board[i][j],end=' ')
print()
def isSafe(board, row, col):
for i in range(col):
if board[row][i] == 1:
return False
for i, j in zip(range(row, -1, -1), range(col, -1, -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(row, N, 1), range(col, -1, -1)):
if board[i][j] == 1:
return False
return True
def solveNQUtil(board, col):
if col >= N:
return True
for i in range(N):
if isSafe(board, i, col):
board[i][col] = 1
if solveNQUtil(board, col + 1) == True:
return True
board[i][col] = 0
return False
def solveNQ():
best = MIN
for i in range(0, 2):
else:
best = MAX
for i in range(0, 2):
if __name__ == "__main__":
Chance of Canary
X is Shrimps
Color Is 1.Green 2.Yellow
Select Option 2
yes it is Canary And Color Is Yellow
8.Write a program to implement KNN algorithm to classify Iris dataset. Print both correct
and wrong predictions.
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
data = pd.read_csv('iris_csv.csv')
data.head()
X = data.iloc[:,:-1]
X.head()
acc = accuracy_score(y_test,y_pred)
print("Accuracy",acc)
Output:
Accuracy 1.0
9. Train a random data sample using linear regression model and plot the graph
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
np.random.seed(0)
X=2*np.random.rand(100,1)
y=4+3*X+np.random.randn(100,1)
model=LinearRegression()
model.fit(X,y)
X_new=np.array([[0],[2]])
y_pred=model.predict(X_new)
plt.scatter(X,y,color='blue')
plt.plot(X_new,y_pred,color='red')
plt.xlabel('X')
plt.ylabel('y')
plt.title('Linear Regression')
plt.show()
10. Implement the naïve Bayesian classifier for a sample training data set stored as a
.csv file. Compute the accuracy of the classifier, considering few test data sets.
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, confusion_matrix
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
data = pd.read_csv('iris_csv.csv')
data.head()
X = data.iloc[:,:-1]
Output:
Accuracy 1.0
Confusion matrix
[[10 0 0]
[ 0 9 0]
[ 0 0 11]]
11.Demonstrate the working of SVM classifier for a suitable data set(e.g., iris dataset)
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.datasets import load_iris
data = pd.read_csv('iris_csv.csv')
data.head()
X = data.iloc[:,:-1]
y = data.iloc[:,-1]
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2, random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
svm_classifier = SVC(kernel='linear', random_state=42)
svm_classifier.fit(X_train, y_train)
y_pred = svm_classifier.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy)
conf_matrix = confusion_matrix(y_test, y_pred)
print("Confusion Matrix:")
Accuracy: 0.9666666666666667
Confusion Matrix:
[[10 0 0]
[ 0 8 1]
[ 0 0 11]]