0% found this document useful (0 votes)
65 views15 pages

AI For DS

This document provides code examples for several graph search algorithms including breadth-first search (BFS), depth-first search (DFS), depth-limited search (DLS), A* search, iterative deepening depth-first search (IDDFS), hill climbing, simulated annealing, and a genetic algorithm. For each algorithm, it provides the code implementation and example output showing the algorithm being run on a sample graph.

Uploaded by

B.C.H. Reddy
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
65 views15 pages

AI For DS

This document provides code examples for several graph search algorithms including breadth-first search (BFS), depth-first search (DFS), depth-limited search (DLS), A* search, iterative deepening depth-first search (IDDFS), hill climbing, simulated annealing, and a genetic algorithm. For each algorithm, it provides the code implementation and example output showing the algorithm being run on a sample graph.

Uploaded by

B.C.H. Reddy
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 15

AIDS LABS

2110030359
B Chihnita Reddy
BFS:
import collections
# BFS algorithm
def bfs(graph, root):

visited, queue = set(), collections.deque([root]) visited.add(root)

while queue:

# Dequeue a vertex from queue vertex =


queue.popleft() print(str(vertex) + " ", end="")

# If not visited, mark it as visited, and # enqueue it


for neighbour in graph[vertex]: if
neighbour not in visited:
visited.add(neighbour)

queue.append(neighbour)
if name == ' main ':
graph = {0: [1, 2], 1: [2], 2: [3], 3: [1, 2]}
print("Following is Breadth First Traversal: ") bfs(graph, 0)

OUTPUT:
Following is Breadth First Traversal: 0 1 2 3

DFS
def dfs(graph, start): visited
= set() stack = [start]

while stack:
vertex = stack.pop()
if vertex not in visited: visited.add(vertex)
stack.extend(graph[vertex] - visited)

return visited graph = {

'A': {'B', 'C'},


'B': {'A', 'D', 'E'},
'C': {'A', 'F'},
'D': {'B'},
'E': {'B', 'F'},
'F': {'C', 'E'}
}

start_vertex = 'A'

visited = dfs(graph, start_vertex) print(visited)

OUTPUT:
{'E', 'B', 'A', 'D', 'C', 'F'}

DLS:
def dls(graph, start, goal, depth): if start ==
goal:
return [start] if
depth == 0:

return []

for neighbor in graph[start]:


path = dls(graph, neighbor, goal, depth - 1) if path:

return [start] + path

return []
graph = {

'A': {'B', 'C'},

'B': {'A', 'D', 'E'},


'C': {'A', 'F'},
'D': {'B'},
'E': {'B', 'F'},
'F': {'C', 'E'}

start_vertex = 'A'
goal_vertex = 'F' max_depth
=3

path = dls(graph, start_vertex, goal_vertex, max_depth) print(path)

OUTPUT:- ['A', 'C', 'F']


]
A* SEARCH:
import heapq

def astar(graph, start, goal, heuristic): queue = [(0, start)]


visited = set() parent = {}
g_score = {start: 0}

f_score = {start: heuristic(start, goal)}

while queue:
(cost, current) = heapq.heappop(queue)

if current == goal: path =


[current]
while current in parent: current =
parent[current] path.append(current)
path.reverse()
return path

if current in visited:
continue visited.add(current)

for neighbor in graph[current]: if neighbor


in visited:

continue

tentative_g_score = g_score[current] + graph[current][neighbor]


if neighbor not in g_score or tentative_g_score < g_score[neighbor]:

parent[neighbor] = current g_score[neighbor] =

tentative_g_score f_score[neighbor] = tentative_g_score

+
heuristic(neighbor, goal)
heapq.heappush(queue, (f_score[neighbor], neighbor))

return []
graph = {

'A': {'B': 2, 'C': 3},

'B': {'A': 2, 'D': 4, 'E': 1},


'C': {'A': 3, 'F': 7},
'D': {'B': 4},
'E': {'B': 1, 'F': 5},
'F': {'C': 7, 'E': 5}
}

start_vertex = 'A'
goal_vertex = 'F'
heuristic = lambda x, y: abs(ord(x) - ord(y))

path = astar(graph, start_vertex, goal_vertex, heuristic) print(path)

OUTPUT:
['A', 'B', 'E', 'F']

IDDFS(ITERATIVE DEEPENING DEPTH SEARCH)\ CODE:


def iddfs(graph, start, goal, max_depth): for depth in
range(max_depth):
path = dls(graph, start, goal, depth) if path:
return path return
[]

def dls(graph, start, goal, depth):


if start == goal: return
[start]
if depth == 0: return []

for neighbor in graph[start]:


path = dls(graph, neighbor, goal, depth - 1) if path:

return [start] + path

return []
graph = {

'A': {'B', 'C'},

'B': {'A', 'D', 'E'},


'C': {'A', 'F'},
'D': {'B'},
'E': {'B', 'F'},
'F': {'C', 'E'}

start_vertex = 'A'
goal_vertex = 'F' max_depth
=5
path = iddfs(graph, start_vertex, goal_vertex, max_depth)

print(path)
OUTPUT:
['A', 'C', 'F']

HILL CLIMBING:
# hill climbing search of a one-dimensional objective function
from numpy import asarray
from numpy.random import randn
from numpy.random import rand
from numpy.random import seed

# objective function
def objective(x):
return x[0]**2.0

# hill climbing local search algorithm


def hillclimbing(objective, bounds, n_iterations, step_size):
# generate an initial point
solution = bounds[:, 0] + rand(len(bounds)) * (bounds[:, 1] - bounds[:, 0])
# evaluate the initial point
solution_eval = objective(solution)
# run the hill climb
for i in range(n_iterations):
# take a step
candidate = solution + randn(len(bounds)) * step_size
# evaluate candidate point
candidte_eval = objective(candidate)
# check if we should keep the new point
if candidte_eval <= solution_eval:
# store the new point
solution, solution_eval = candidate, candidte_eval
# report progress
print('>%d f(%s) = %.5f' % (i, solution, solution_eval))
return [solution, solution_eval]

# seed the pseudorandom number generator


seed(5)
# define range for input
bounds = asarray([[-5.0, 5.0]])
# define the total iterations
n_iterations = 1000
# define the maximum step size
step_size = 0.1
# perform the hill climbing search
best, score = hillclimbing(objective, bounds, n_iterations, step_size)
print('Done!')
print('f(%s) = %f' % (best, score))

OUTPUT:

SA:
# simulated annealing search of a one-dimensional objective function
from numpy import asarray
from numpy import exp
from numpy.random import randn
from numpy.random import rand
from numpy.random import seed

# objective function
def objective(x):
return x[0]**2.0

# simulated annealing algorithm


def simulated_annealing(objective, bounds, n_iterations, step_size, temp):
# generate an initial point
best = bounds[:, 0] + rand(len(bounds)) * (bounds[:, 1] - bounds[:, 0])
# evaluate the initial point
best_eval = objective(best)
# current working solution
curr, curr_eval = best, best_eval
# run the algorithm
for i in range(n_iterations):

# take a step
candidate = curr + randn(len(bounds)) * step_size
# evaluate candidate point
candidate_eval = objective(candidate)
# check for new best solution
if candidate_eval < best_eval:
# store new best point
best, best_eval = candidate, candidate_eval
# report progress
print('>%d f(%s) = %.5f' % (i, best, best_eval))
# difference between candidate and current point evaluation
diff = candidate_eval - curr_eval
# calculate temperature for current epoch
t = temp / float(i + 1)
# calculate metropolis acceptance criterion
metropolis = exp(-diff / t)
# check if we should keep the new point
if diff < 0 or rand() < metropolis:
# store the new current point
curr, curr_eval = candidate, candidate_eval
return [best, best_eval]

# seed the pseudorandom number generator


seed(1)
# define range for input
bounds = asarray([[-5.0, 5.0]])
# define the total iterations
n_iterations = 1000
# define the maximum step size
step_size = 0.1
# initial temperature
temp = 10
# perform the simulated annealing search
best, score = simulated_annealing(objective, bounds, n_iterations, step_size,
temp)
print('Done!')
print('f(%s) = %f' % (best, score))

OUTPUT:

GENETIC ALGO:
# genetic algorithm search of the one max optimization problem from numpy.random
import randint
from numpy.random import rand

# objective function def


onemax(x):
return -sum(x)

# tournament selection
def selection(pop, scores, k=3):
# first random selection selection_ix =
randint(len(pop)) for ix in randint(0, len(pop),
k-1):
# check if better (e.g. perform a tournament) if scores[ix] <
scores[selection_ix]:
selection_ix = ix return
pop[selection_ix]

# crossover two parents to create two children def crossover(p1,


p2, r_cross):
# children are copies of parents by default c1, c2 =
p1.copy(), p2.copy()
# check for recombination if rand() <
r_cross:
# select crossover point that is not on the end of the string pt = randint(1, len(p1)-
2)
# perform crossover c1 =
p1[:pt] + p2[pt:]
c2 = p2[:pt] + p1[pt:] return
[c1, c2]

# mutation operator
def mutation(bitstring, r_mut):
for i in range(len(bitstring)):
# check for a mutation if rand()
< r_mut:
# flip the bit
bitstring[i] = 1 - bitstring[i]

# genetic algorithm
def genetic_algorithm(objective, n_bits, n_iter, n_pop, r_cross, r_mut):
# initial population of random bitstring
pop = [randint(0, 2, n_bits).tolist() for _ in range(n_pop)] # keep track of best
solution
best, best_eval = 0, objective(pop[0]) # enumerate
generations

for gen in range(n_iter):

# evaluate all candidates in the population scores = [objective(c)


for c in pop]
# check for new best solution for i in
range(n_pop):

if scores[i] < best_eval:

best, best_eval = pop[i], scores[i]


print(">%d, new best f(%s) = %.3f" % (gen, pop[i], scores[i])) # select parents
selected = [selection(pop, scores) for _ in range(n_pop)] # create the next
generation

children = list()

for i in range(0, n_pop, 2):


# get selected parents in pairs p1, p2 =
selected[i], selected[i+1] # crossover and
mutation
for c in crossover(p1, p2, r_cross): # mutation

mutation(c, r_mut)

# store for next generation children.append(c)


# replace population pop =
children

return [best, best_eval]

# define the total iterations n_iter = 100


# bits n_bits =
20
# define the population size n_pop = 100
# crossover rate r_cross =
0.9

# mutation rate

r_mut = 1.0 / float(n_bits)


# perform the genetic algorithm search
best, score = genetic_algorithm(onemax, n_bits, n_iter, n_pop, r_cross, r_mut)
print('Done!')
print('f(%s) = %f' % (best, score))

OUTPUT:

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy