AI For DS
AI For DS
2110030359
B Chihnita Reddy
BFS:
import collections
# BFS algorithm
def bfs(graph, root):
while queue:
queue.append(neighbour)
if name == ' main ':
graph = {0: [1, 2], 1: [2], 2: [3], 3: [1, 2]}
print("Following is Breadth First Traversal: ") bfs(graph, 0)
OUTPUT:
Following is Breadth First Traversal: 0 1 2 3
DFS
def dfs(graph, start): visited
= set() stack = [start]
while stack:
vertex = stack.pop()
if vertex not in visited: visited.add(vertex)
stack.extend(graph[vertex] - visited)
start_vertex = 'A'
OUTPUT:
{'E', 'B', 'A', 'D', 'C', 'F'}
DLS:
def dls(graph, start, goal, depth): if start ==
goal:
return [start] if
depth == 0:
return []
return []
graph = {
start_vertex = 'A'
goal_vertex = 'F' max_depth
=3
while queue:
(cost, current) = heapq.heappop(queue)
if current in visited:
continue visited.add(current)
continue
+
heuristic(neighbor, goal)
heapq.heappush(queue, (f_score[neighbor], neighbor))
return []
graph = {
start_vertex = 'A'
goal_vertex = 'F'
heuristic = lambda x, y: abs(ord(x) - ord(y))
OUTPUT:
['A', 'B', 'E', 'F']
return []
graph = {
start_vertex = 'A'
goal_vertex = 'F' max_depth
=5
path = iddfs(graph, start_vertex, goal_vertex, max_depth)
print(path)
OUTPUT:
['A', 'C', 'F']
HILL CLIMBING:
# hill climbing search of a one-dimensional objective function
from numpy import asarray
from numpy.random import randn
from numpy.random import rand
from numpy.random import seed
# objective function
def objective(x):
return x[0]**2.0
OUTPUT:
SA:
# simulated annealing search of a one-dimensional objective function
from numpy import asarray
from numpy import exp
from numpy.random import randn
from numpy.random import rand
from numpy.random import seed
# objective function
def objective(x):
return x[0]**2.0
# take a step
candidate = curr + randn(len(bounds)) * step_size
# evaluate candidate point
candidate_eval = objective(candidate)
# check for new best solution
if candidate_eval < best_eval:
# store new best point
best, best_eval = candidate, candidate_eval
# report progress
print('>%d f(%s) = %.5f' % (i, best, best_eval))
# difference between candidate and current point evaluation
diff = candidate_eval - curr_eval
# calculate temperature for current epoch
t = temp / float(i + 1)
# calculate metropolis acceptance criterion
metropolis = exp(-diff / t)
# check if we should keep the new point
if diff < 0 or rand() < metropolis:
# store the new current point
curr, curr_eval = candidate, candidate_eval
return [best, best_eval]
OUTPUT:
GENETIC ALGO:
# genetic algorithm search of the one max optimization problem from numpy.random
import randint
from numpy.random import rand
# tournament selection
def selection(pop, scores, k=3):
# first random selection selection_ix =
randint(len(pop)) for ix in randint(0, len(pop),
k-1):
# check if better (e.g. perform a tournament) if scores[ix] <
scores[selection_ix]:
selection_ix = ix return
pop[selection_ix]
# mutation operator
def mutation(bitstring, r_mut):
for i in range(len(bitstring)):
# check for a mutation if rand()
< r_mut:
# flip the bit
bitstring[i] = 1 - bitstring[i]
# genetic algorithm
def genetic_algorithm(objective, n_bits, n_iter, n_pop, r_cross, r_mut):
# initial population of random bitstring
pop = [randint(0, 2, n_bits).tolist() for _ in range(n_pop)] # keep track of best
solution
best, best_eval = 0, objective(pop[0]) # enumerate
generations
children = list()
mutation(c, r_mut)
# mutation rate
OUTPUT: