0% found this document useful (0 votes)
48 views21 pages

Daa File

Uploaded by

Danish Sharma
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
48 views21 pages

Daa File

Uploaded by

Danish Sharma
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 21

S.NO.

LIST OF EXPERIMENTS

1. Sort a given set of elements using the Quicksort method and determine the time
required to sort the elements.

2. Repeat the experiment for different values of n, the number of elements in the list to
be sorted and plot a graph of the time taken versus n.

3. The elements can be read from a file or can be generated using the random number
generator.

4. Implement a parallelized Merge Sort algorithm to sort a given set of elements and
determine the time required to sort the elements.

5. Repeat the experiment for different values of n, the number of elements in the list to
be sorted and plot a graph of the time taken versus n.

6. The elements can be read from a file or can be generated using the random number
generator.

7. Obtain the Topological ordering of vertices in a given digraph.

8. Compute the transitive closure of a given directed graph using Warshall's


algorithm.

9. Implement 0/1 Knapsack problem using Dynamic Programming.

10. From a given vertex in a weighted connected graph, find shortest paths to other
vertices using Dijkstra's algorithm.

11. Find Minimum Cost Spanning Tree of a given undirected graph using Kruskal's
algorithm.

12. Print all the nodes reachable from a given starting node in a digraph using BFS
method.

13. Check whether a given graph is connected or not using DFS method.

14. Find Minimum Cost Spanning Tree of a given undirected graph using Prim’s
algorithm.
1. Sort a given set of elements using the Quicksort method and determine the
time required to sort the elements.

import time

def quicksort(arr):
if len(arr) <= 1:
return arr
pivot = arr[len(arr) // 2]
left = [x for x in arr if x < pivot]
middle = [x for x in arr if x == pivot]
right = [x for x in arr if x > pivot]
return quicksort(left) + middle + quicksort(right)

def sort_and_measure_time(arr):
start_time = time.time()
sorted_arr = quicksort(arr)
end_time = time.time()
time_taken = end_time - start_time
return sorted_arr, time_taken

# Example usage:
arr = [3, 6, 8, 10, 1, 2, 1]
sorted_arr, time_taken = sort_and_measure_time(arr)
print(f"Sorted Array: {sorted_arr}")
print(f"Time Taken: {time_taken} seconds")
2. Repeat the experiment for different values of n, the number of elements in the
list to be sorted and plot a graph of the time taken versus n.

import time
import numpy as np
import matplotlib.pyplot as plt

def quicksort(arr):
if len(arr) <= 1:
return arr
pivot = arr[len(arr) // 2]
left = [x for x in arr if x < pivot]
middle = [x for x in arr if x == pivot]
right = [x for x in arr if x > pivot]
return quicksort(left) + middle + quicksort(right)

def sort_and_measure_time(arr):
start_time = time.time()
sorted_arr = quicksort(arr)
end_time = time.time()
time_taken = end_time - start_time
return time_taken

def run_experiment(sizes):
times = []
for size in sizes:
arr = np.random.randint(0, 10000, size) # Generate random array
time_taken = sort_and_measure_time(arr)
times.append(time_taken)
return times

# Define the sizes of the arrays to test


sizes = [100, 500, 1000, 2000, 5000, 10000, 20000, 50000]
times = run_experiment(sizes)

# Plot the results


plt.figure(figsize=(10, 6))
plt.plot(sizes, times, marker='o', linestyle='-', color='b')
plt.xlabel('Number of Elements (n)')
plt.ylabel('Time Taken (seconds)')
plt.title('Time Taken vs Number of Elements for Quicksort')
plt.grid(True)
plt.show()
3. The elements can be read from a file or can be generated using the random
number generator.

import time
import random
import matplotlib.pyplot as plt

def quicksort(arr):
if len(arr) <= 1:
return arr
pivot = arr[len(arr) // 2]
left = [x for x in arr if x < pivot]
middle = [x for x in arr if x == pivot]
right = [x for x in arr if x > pivot]
return quicksort(left) + middle + quicksort(right)

def sort_and_measure_time(arr):
start_time = time.time()
sorted_arr = quicksort(arr)
end_time = time.time()
time_taken = end_time - start_time
return sorted_arr, time_taken

def read_elements_from_file(filename):
with open(filename, 'r') as file:
elements = [int(line.strip()) for line in file]
return elements

def generate_random_elements(n):
return [random.randint(0, 1000000) for _ in range(n)]

# Choose mode: 'file' or 'random'


mode = 'random' # Change to 'file' if reading from file
filename = 'elements.txt' # File containing elements if mode is 'file'

test_sizes = [10, 100, 1000, 10000, 100000]


results = []

for n in test_sizes:
if mode == 'file':
arr = read_elements_from_file(filename)[:n]
else:
arr = generate_random_elements(n)

_, time_taken = sort_and_measure_time(arr)
results.append((n, time_taken))

# Plot the results


sizes, times = zip(*results)
plt.figure(figsize=(10, 6))
plt.plot(sizes, times, marker='o')
plt.title('Quicksort Time Complexity')
plt.xlabel('Number of Elements (n)')
plt.ylabel('Time Taken (seconds)')
plt.grid(True)
plt.xscale('log') # Using log scale for better visualization
plt.yscale('log')
plt.show()
4. Implement a parallelized Merge Sort algorithm to sort a given set of elements
and determine the time required to sort the elements.

import time
import random
from concurrent.futures import ProcessPoolExecutor
import matplotlib.pyplot as plt

def merge_sort(arr):
if len(arr) <= 1:
return arr
mid = len(arr) // 2
left = merge_sort(arr[:mid])
right = merge_sort(arr[mid:])
return merge(left, right)

def merge(left, right):


result = []
i=j=0
while i < len(left) and j < len(right):
if left[i] < right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
result.extend(left[i:])
result.extend(right[j:])
return result

def parallel_merge_sort(arr):
if len(arr) <= 1:
return arr
mid = len(arr) // 2
with ProcessPoolExecutor() as executor:
left_future = executor.submit(merge_sort, arr[:mid])
right_future = executor.submit(merge_sort, arr[mid:])
left = left_future.result()
right = right_future.result()
return merge(left, right)

def sort_and_measure_time(arr):
start_time = time.time()
sorted_arr = parallel_merge_sort(arr)
end_time = time.time()
time_taken = end_time - start_time
return sorted_arr, time_taken
def generate_random_elements(n):
return [random.randint(0, 1000000) for _ in range(n)]

# Experiment with different values of n


test_sizes = [10, 100, 1000, 10000, 100000]
results = []

for n in test_sizes:
arr = generate_random_elements(n)
_, time_taken = sort_and_measure_time(arr)
results.append((n, time_taken))

# Plot the results


sizes, times = zip(*results)
plt.figure(figsize=(10, 6))
plt.plot(sizes, times, marker='o')
plt.title('Parallelized Merge Sort Time Complexity')
plt.xlabel('Number of Elements (n)')
plt.ylabel('Time Taken (seconds)')
plt.grid(True)
plt.xscale('log') # Using log scale for better visualization
plt.yscale('log')
plt.show()
5. Repeat the experiment for different values of n, the number of elements in the
list to be sorted and plot a graph of the time taken versus n.

import time
import random
from concurrent.futures import ProcessPoolExecutor
import matplotlib.pyplot as plt

def merge_sort(arr):
if len(arr) <= 1:
return arr
mid = len(arr) // 2
left = merge_sort(arr[:mid])
right = merge_sort(arr[mid:])
return merge(left, right)

def merge(left, right):


result = []
i=j=0
while i < len(left) and j < len(right):
if left[i] < right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
result.extend(left[i:])
result.extend(right[j:])
return result

def parallel_merge_sort(arr):
if len(arr) <= 1:
return arr
mid = len(arr) // 2
with ProcessPoolExecutor() as executor:
left_future = executor.submit(merge_sort, arr[:mid])
right_future = executor.submit(merge_sort, arr[mid:])
left = left_future.result()
right = right_future.result()
return merge(left, right)

def sort_and_measure_time(arr):
start_time = time.time()
sorted_arr = parallel_merge_sort(arr)
end_time = time.time()
time_taken = end_time - start_time
return sorted_arr, time_taken

def generate_random_elements(n):
return [random.randint(0, 1000000) for _ in range(n)]
# Experiment with different values of n
test_sizes = [10, 100, 1000, 10000, 100000]
results = []

for n in test_sizes:
arr = generate_random_elements(n)
_, time_taken = sort_and_measure_time(arr)
results.append((n, time_taken))

# Plot the results


sizes, times = zip(*results)
plt.figure(figsize=(10, 6))
plt.plot(sizes, times, marker='o')
plt.title('Parallelized Merge Sort Time Complexity')
plt.xlabel('Number of Elements (n)')
plt.ylabel('Time Taken (seconds)')
plt.grid(True)
plt.xscale('log') # Using log scale for better visualization
plt.yscale('log')
plt.show()
6. The elements can be read from a file or can be generated using the random
number generator.

import time
import random
from concurrent.futures import ProcessPoolExecutor
import matplotlib.pyplot as plt

def merge_sort(arr):
if len(arr) <= 1:
return arr
mid = len(arr) // 2
left = merge_sort(arr[:mid])
right = merge_sort(arr[mid:])
return merge(left, right)

def merge(left, right):


result = []
i=j=0
while i < len(left) and j < len(right):
if left[i] < right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
result.extend(left[i:])
result.extend(right[j:])
return result

def parallel_merge_sort(arr):
if len(arr) <= 1:
return arr
mid = len(arr) // 2
with ProcessPoolExecutor() as executor:
left_future = executor.submit(merge_sort, arr[:mid])
right_future = executor.submit(merge_sort, arr[mid:])
left = left_future.result()
right = right_future.result()
return merge(left, right)

def sort_and_measure_time(arr):
start_time = time.time()
sorted_arr = parallel_merge_sort(arr)
end_time = time.time()
time_taken = end_time - start_time
return sorted_arr, time_taken

def read_elements_from_file(filename, n):


with open(filename, 'r') as file:
elements = [int(line.strip()) for line in file]
return elements[:n]
def generate_random_elements(n):
return [random.randint(0, 1000000) for _ in range(n)]

# Choose mode: 'file' or 'random'


mode = 'random' # Change to 'file' if reading from file
filename = 'elements.txt' # File containing elements if mode is 'file'

test_sizes = [10, 100, 1000, 10000, 100000]


results = []

for n in test_sizes:
if mode == 'file':
arr = read_elements_from_file(filename, n)
else:
arr = generate_random_elements(n)

_, time_taken = sort_and_measure_time(arr)
results.append((n, time_taken))

# Plot the results


sizes, times = zip(*results)
plt.figure(figsize=(10, 6))
plt.plot(sizes, times, marker='o')
plt.title('Parallelized Merge Sort Time Complexity')
plt.xlabel('Number of Elements (n)')
plt.ylabel('Time Taken (seconds)')
plt.grid(True)
plt.xscale('log') # Using log scale for better visualization
plt.yscale('log')
plt.show()
7. Obtain the Topological ordering of vertices in a given digraph.

from collections import defaultdict, deque

def topological_sort(graph):
visited = set()
stack = deque()

def dfs(v):
visited.add(v)
for neighbor in graph[v]:
if neighbor not in visited:
dfs(neighbor)
stack.appendleft(v)

for vertex in graph:


if vertex not in visited:
dfs(vertex)

return list(stack)

# Example usage
graph = defaultdict(list)

# Add edges
edges = [
('A', 'C'),
('B', 'C'),
('C', 'E'),
('B', 'D'),
('E', 'F'),
('D', 'F')
]

for u, v in edges:
graph[u].append(v)

# Perform topological sort


topological_order = topological_sort(graph)
print("Topological Order:", topological_order)
8. Compute the transitive closure of a given directed graph using Warshall's
algorithm.

def warshall_algorithm(graph):
n = len(graph)
closure = [[0] * n for _ in range(n)]

for i in range(n):
for j in range(n):
closure[i][j] = graph[i][j]

for k in range(n):
for i in range(n):
for j in range(n):
closure[i][j] = closure[i][j] or (closure[i][k] and closure[k][j])

return closure

# Example usage
# Adjacency matrix representation of the graph
# 0: no edge, 1: edge present
graph = [
[1, 1, 0, 0],
[0, 1, 1, 0],
[0, 0, 1, 1],
[0, 0, 0, 1]
]

transitive_closure = warshall_algorithm(graph)

print("Transitive Closure:")
for row in transitive_closure:
print(row)
9. Implement 0/1 Knapsack problem using Dynamic Programming.

def knapsack(weights, values, capacity):


n = len(weights)
dp = [[0] * (capacity + 1) for _ in range(n + 1)]

# Build the dp array from the bottom up


for i in range(1, n + 1):
for w in range(capacity + 1):
if weights[i - 1] <= w:
dp[i][w] = max(dp[i - 1][w], dp[i - 1][w - weights[i - 1]] + values[i - 1])
else:
dp[i][w] = dp[i - 1][w]

return dp[n][capacity]

# Example usage
weights = [1, 2, 3, 8, 7, 4]
values = [20, 5, 10, 40, 15, 25]
capacity = 10

max_value = knapsack(weights, values, capacity)


print(f"Maximum value in Knapsack: {max_value}")
10. From a given vertex in a weighted connected graph, find shortest paths to
other vertices using Dijkstra's algorithm.

import heapq

def dijkstra(graph, start):


# Initialize distances and priority queue
distances = {vertex: float('infinity') for vertex in graph}
distances[start] = 0
priority_queue = [(0, start)]

while priority_queue:
current_distance, current_vertex = heapq.heappop(priority_queue)

# If a node is reached with a distance larger than the recorded distance, skip it
if current_distance > distances[current_vertex]:
continue

# Check the neighbors


for neighbor, weight in graph[current_vertex]:
distance = current_distance + weight

# Only consider this new path if it's better


if distance < distances[neighbor]:
distances[neighbor] = distance
heapq.heappush(priority_queue, (distance, neighbor))

return distances

# Example usage
graph = {
'A': [('B', 1), ('C', 4)],
'B': [('A', 1), ('C', 2), ('D', 5)],
'C': [('A', 4), ('B', 2), ('D', 1)],
'D': [('B', 5), ('C', 1)]
}

start_vertex = 'A'
shortest_paths = dijkstra(graph, start_vertex)

print("Shortest paths from vertex", start_vertex)


for vertex, distance in shortest_paths.items():
print(f"Distance to {vertex}: {distance}")
11. Find Minimum Cost Spanning Tree of a given undirected graph using
Kruskal's algorithm.

class UnionFind:
def __init__(self, size):
self.parent = list(range(size))
self.rank = [0] * size

def find(self, u):


if self.parent[u] != u:
self.parent[u] = self.find(self.parent[u]) # Path compression
return self.parent[u]

def union(self, u, v):


root_u = self.find(u)
root_v = self.find(v)

if root_u != root_v:
# Union by rank
if self.rank[root_u] > self.rank[root_v]:
self.parent[root_v] = root_u
elif self.rank[root_u] < self.rank[root_v]:
self.parent[root_u] = root_v
else:
self.parent[root_v] = root_u
self.rank[root_u] += 1

def kruskal(num_vertices, edges):


# Initialize Union-Find
union_find = UnionFind(num_vertices)

# Sort edges based on their weight


edges.sort(key=lambda x: x[2])

mst = []
total_cost = 0

for u, v, weight in edges:


if union_find.find(u) != union_find.find(v):
union_find.union(u, v)
mst.append((u, v, weight))
total_cost += weight

return mst, total_cost


# Example usage
num_vertices = 4
edges = [
(0, 1, 10),
(0, 2, 6),
(0, 3, 5),
(1, 3, 15),
(2, 3, 4)
]

mst, total_cost = kruskal(num_vertices, edges)

print("Edges in the Minimum Spanning Tree:")


for u, v, weight in mst:
print(f"{u} - {v}: {weight}")

print(f"Total cost of the Minimum Spanning Tree: {total_cost}")


12. Print all the nodes reachable from a given starting node in a digraph using
BFS method.

from collections import deque, defaultdict

def bfs_reachable_nodes(graph, start):


visited = set()
queue = deque([start])
reachable_nodes = []

while queue:
current_node = queue.popleft()

if current_node not in visited:


visited.add(current_node)
reachable_nodes.append(current_node)

# Add all unvisited neighbors to the queue


for neighbor in graph[current_node]:
if neighbor not in visited:
queue.append(neighbor)

return reachable_nodes

# Example usage
graph = defaultdict(list)
# Add edges to the graph
edges = [
('A', 'B'),
('A', 'C'),
('B', 'D'),
('C', 'D'),
('D', 'E'),
('E', 'F')
]

for u, v in edges:
graph[u].append(v)

start_node = 'A'
reachable_nodes = bfs_reachable_nodes(graph, start_node)

print("Nodes reachable from node", start_node, "are:")


print(reachable_nodes)
13. Check whether a given graph is connected or not using DFS method.

from collections import defaultdict

def dfs(graph, node, visited):


# Mark the current node as visited
visited.add(node)

# Recursively visit all unvisited neighbors


for neighbor in graph[node]:
if neighbor not in visited:
dfs(graph, neighbor, visited)

def is_connected(graph):
# Get all nodes in the graph
nodes = list(graph.keys())

if not nodes:
return True # An empty graph is considered connected

# Start DFS from the first node


start_node = nodes[0]
visited = set()
dfs(graph, start_node, visited)

# Check if all nodes were visited


return len(visited) == len(nodes)

# Example usage
graph = defaultdict(list)
# Add edges to the graph
edges = [
('A', 'B'),
('A', 'C'),
('B', 'D'),
('C', 'D'),
('E', 'F')
]

for u, v in edges:
graph[u].append(v)
graph[v].append(u) # For undirected graph

is_connected_graph = is_connected(graph)
print("The graph is connected:" if is_connected_graph else "The graph is not connected.")
14. Find Minimum Cost Spanning Tree of a given undirected graph using Prim’s
algorithm.

import heapq
from collections import defaultdict

def prim(graph, start):


# Priority queue to select the minimum weight edge
priority_queue = []

# Dictionary to store the minimum cost to reach each vertex


min_cost = {vertex: float('inf') for vertex in graph}
min_cost[start] = 0

# Dictionary to store the MST edges


mst_edges = []

# Track vertices included in the MST


in_mst = set()

# Add the starting vertex to the priority queue


heapq.heappush(priority_queue, (0, start))

while priority_queue:
current_cost, u = heapq.heappop(priority_queue)

# If u is already in the MST, skip it


if u in in_mst:
continue

# Include u in the MST


in_mst.add(u)

# Add the edge to the MST if it's not the starting vertex
if current_cost > 0:
mst_edges.append((parent[u], u, current_cost))

# Explore all neighbors of u


for v, weight in graph[u]:
if v not in in_mst and weight < min_cost[v]:
min_cost[v] = weight
parent[v] = u
heapq.heappush(priority_queue, (weight, v))

return mst_edges, sum(weight for _, _, weight in mst_edges)


# Example usage
graph = defaultdict(list)
# Add edges to the graph (undirected graph)
edges = [
('A', 'B', 1),
('A', 'C', 3),
('B', 'C', 3),
('B', 'D', 6),
('C', 'D', 4),
('D', 'E', 2),
('C', 'E', 5)
]

for u, v, weight in edges:


graph[u].append((v, weight))
graph[v].append((u, weight)) # For undirected graph

start_vertex = 'A'
mst_edges, total_cost = prim(graph, start_vertex)

print("Edges in the Minimum Spanning Tree:")


for u, v, weight in mst_edges:
print(f"{u} - {v}: {weight}")

print(f"Total cost of the Minimum Spanning Tree: {total_cost}")

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy