SC Practicals1 To 10 Black Book
SC Practicals1 To 10 Black Book
INDEX
PRACTICAL 1A
Aim: Design a simple linear neural network model.
for i in range(0,n):
ele =
float(input())
inputs.append(ele)
print(inputs)
for i in range(0,n):
ele =
float(input())
weights.append(ele)
print(weights)
Yin = []
for i in range(0,n):
Yin.append(inputs[i]*weights[i])
print(round(sum(Yin),3))
Output
PRACTICAL 1B
Aim: Calculate the output of neural net using both binary and bipolar sigmoidal function.
import numpy as np
def binary_sigmoid(x):
return 1 / (1 + np.exp(-x))
def bipolar_sigmoid(x):
return (2 / (1 + np.exp(-x))) - 1
# Example usage
if name == " main ":
# Define inputs, weights, and biases
inputs = np.array([0.5, -0.2, 0.1])
weights = np.array([
[0.4, 0.3, 0.5],
[-0.3, 0.8, -0.6]
]).T # Transpose to match dimensions
biases = np.array([0.1, -0.1])
output
PRACTICAL 2A
Aim: Generate AND/NOT function using McCulloch-Pitts neural net.
w1 = 1
w2 = 1
print("For the ", num_ip , " inputs calculate the net input using yin = x1w1 + x2w2 ")
x1 = []
x2 = []
for j in range(0, num_ip):
ele1 = int(input("x1 = "))
ele2 = int(input("x2 = "))
x1.append(ele1)
x2.append(ele2)
print("x1 = ",x1)
print("x2 = ",x2)
n = x1 * w1
m = x2 * w2
Yin = []
for i in range(0, num_ip):
Yin.append(n[i] +
m[i]) print("Yin = ",Yin)
Yin = []
for i in range(0, num_ip):
Yin.append(n[i] - m[i])
print("After assuming one weight as excitatory and the other as inhibitory Yin = ",Yin)
Y=[]
PRACTICAL 2B
Aim: Generate XOR function using McCulloch-Pitts neural net.
import math
import numpy
import random
INPUT_NODES = 2
OUTPUT_NODES = 1
HIDDEN_NODES = 2
MAX_ITERATIONS = 130000
LEARNING_RATE = .2
print
"Neural Network Program"
class network:
def init (self, input_nodes, hidden_nodes, output_nodes, learning_rate):
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
self.total_nodes = input_nodes + hidden_nodes + output_nodes
self.learning_rate = learning_rate
self.values = numpy.zeros(self.total_nodes)
self.expectedValues = numpy.zeros(self.total_nodes)
self.thresholds = numpy.zeros(self.total_nodes)
self.weights = numpy.zeros((self.total_nodes, self.total_nodes))
random.seed(10000)
for i in range(self.input_nodes, self.total_nodes):
self.thresholds[i] = random.random() / random.random()
for j in range(i + 1, self.total_nodes):
self.weights[i][j] = random.random() * 2
def process(self):
for i in range(self.input_nodes, self.input_nodes + self.hidden_nodes):
# sum weighted input nodes for each hidden node, compare threshold, apply sigmoid
W_i = 0.0
for j in range(self.input_nodes):
W_i += self.weights[j][i] * self.values[j]
W_i -= self.thresholds[i]
self.values[i] = 1 / (1 + math.exp(-W_i))
def processErrors(self):
sumOfSquaredErrors = 0.0
for i in range(self.input_nodes + self.hidden_nodes, self.total_nodes):
error = self.expectedValues[i] - self.values[i]
sumOfSquaredErrors += math.pow(error, 2)
outputErrorGradient = self.values[i] * (1 - self.values[i]) * error
PRACTICAL 3A
Aim: Write a program to implement Hebb’s rule.
def main():
w = float(input("Consider a single neuron perceptron with a single i/p: "))
d = float(input("Enter the learning coefficient: "))
x = float(input("Enter the input value: ")) # Get the input value from the user
t = float(input("Enter the target output: ")) # Get the target output from the user
for i in range(10):
net = x + w
a = 1 if net >= 0 else 0
div = d * (t - a)
w = w + div
print(f"Iteration: {i+1}, Output: {a}, Change in weight: {div}, Adjusted weight: {w}")
OUTPUT
PRACTICAL 3B
Aim: Write a program to implement Delta rule.
def main():
inputs = []
weights = []
desired_output = 0.0
# Initialize weights
for i in range(3):
weight = float(input(f"Initialize weight vector {i}: "))
weights.append(weight)
# Get desired output
desired_output = float(input("Enter the desired output: "))
# Perceptron training loop
while True:
# Calculate net input (simplified for this example)
net_input = sum(w * x for w, x in zip(weights,
inputs)) # Calculate output (simplified for this
example)
output = 1 if net_input >= 0 else
0 # Calculate error
delta = desired_output - output
if delta == 0:
print("\nOutput is correct")
break
# Adjust weights based on error
for i in range(3):
weights[i] = weights[i] + delta * inputs[i]
print(f"\nValue of delta is: {delta}")
print("Weights have been adjusted")
if name == " main ":
main()
OUTPUT
PRACTICAL 4A
Aim: Write a program for Back Propagation Algorithm
import math
def main():
# Initial setup
coeff = 0.1
s = [{'val': 0, 'out': 0, 'wo': 0, 'wi': 0, 'top': 0} for _ in range(3)]
i=0
while i != 3:
if i == 0:
s[i]['wo'] = -1.0
s[i]['wi'] = -0.3
else:
s[i]['wo'] = s[i-1]['wo']
s[i]['wi'] = s[i-1]['wi']
print("VALUE\tTarget\tActual\two\twi")
for i in range(3):
print(f"{s[i]['val']}\t{s[i]['top']}\t{s[i]['out']}\t{s[i]['wo']}\t{s[i]['wi']}")
OUTPUT
PRACTICAL 4B
Aim: Write a program for error Backpropagation algorithm.
import math
def main():
c = float(input("Enter the learning coefficient of network c: "))
w10, b10 = map(float, input("Enter the input weights/base of first network: ").split())
w20, b20 = map(float, input("Enter the input weights/base of second network:
").split())
OUTPUT
PRACTICAL 5A
Aim: Write a program for Hopfield Network.
class Neuron:
def init (self, weights):
self.weightv = weights
class Network:
def init (self, a, b, c, d):
self.nrn = [Neuron(a), Neuron(b), Neuron(c), Neuron(d)]
self.output = [0] * 4
def main():
patrn1 = [1, 0, 1, 0]
wt1 = [0, -3, 3, -3]
wt2 = [-3, 0, -3, 3]
wt3 = [3, -3, 0, -3]
wt4 = [-3, 3, -3, 0]
# Present a pattern to the network and get the activations of the neurons
h1.activation(patrn1)
print("\n\n")
Shankar Narayan College
11
MSC-IT Part 1 Soft Computing Techniques Year 2024-2025
patrn2 = [0, 1, 0, 1]
h1.activation(patrn2)
for i in range(4):
if h1.output[i] == patrn2[i]:
print(f"\n pattern= {patrn2[i]} output = {h1.output[i]} component matches")
else:
print(f"\n pattern= {patrn2[i]} output = {h1.output[i]} discrepancy occurred")
import math
class Neuron:
def init (self, weights=None):
if weights is None:
weights = [0] * 4
self.weightv = weights
self.activation = 0
class Network:
def init (self, a, b, c, d):
self.nrn = [Neuron(a), Neuron(b), Neuron(c), Neuron(d)]
self.output = [0] * 4
def main():
patrn1 = [1, 0, 1, 0]
wt1 = [0, -3, 3, -3]
wt2 = [-3, 0, -3, 3]
wt3 = [3, -3, 0, -3]
wt4 = [-3, 3, -3, 0]
# Present a pattern to the network and get the activations of the neurons
h1.activation(patrn1)
print("\n\n")
patrn2 = [0, 1, 0, 1]
h1.activation(patrn2)
for i in range(4):
if h1.output[i] == patrn2[i]:
print(f"\n pattern= {patrn2[i]} output = {h1.output[i]} component matches")
else:
print(f"\n pattern= {patrn2[i]} output = {h1.output[i]} discrepancy occurred")
OUTPUT
nrn[3].weightv[0] is -3
nrn[3].weightv[1] is 3
nrn[3].weightv[2] is -3
nrn[3].weightv[3] is 0
activation is -6
output value is 0
pattern= 1 output = 1 component matches
pattern= 0 output = 0 component matches
pattern= 1 output = 1 component matches
pattern= 0 output = 0 component matches
nrn[0].weightv[0] is 0
nrn[0].weightv[1] is -3
nrn[0].weightv[2] is 3
nrn[0].weightv[3] is -3
activation is 3
output value is 1
nrn[2].weightv[0] is 3
nrn[2].weightv[1] is -3
nrn[2].weightv[2] is 0
nrn[2].weightv[3] is -3
activation is -6
output value is 0
nrn[3].weightv[0] is -3
nrn[3].weightv[1] is 3
nrn[3].weightv[2] is -3
nrn[3].weightv[3] is 0
activation is 3
output value is 1
pattern= 0 output = 0 component matches
pattern= 1 output = 1 component matches
pattern= 0 output = 0 component matches
pattern= 1 output = 1 component matches
nrn[0].weightv[1] is -3
nrn[0].weightv[2] is 3
nrn[0].weightv[3] is -3
activation is -6
PRACTICAL 5B
Aim: Write a program for Radial Basis function
import numpy as np
from scipy.linalg import norm, pinv
import matplotlib.pyplot as plt
class RBF:
def init (self, indim, numCenters, outdim):
self.indim = indim
self.outdim = outdim
self.numCenters = numCenters
self.centers = [np.random.uniform(-1, 1, indim) for i in range(numCenters)]
self.beta = 8
self.W = np.random.random((self.numCenters, self.outdim))
print("centers", self.centers)
# calculate activations of RBFs
G = self._calcAct(X)
print(G)
G = self._calcAct(X)
Y = np.dot(G, self.W)
return Y
x = np.mgrid[-1:1:complex(0, n)].reshape(n, 1)
# set y and add random noise
y = np.sin(3 * (x + 0.5) ** 3 - 1)
# y += np.random.normal(0, 0.1, y.shape)
# rbf regression
rbf = RBF(1, 10, 1)
rbf.train(x, y)
z = rbf.test(x)
# plot rbfs
plt.plot([c[0] for c in rbf.centers], np.zeros(rbf.numCenters), 'gs')
for c in rbf.centers:
# RF prediction lines
cx = np.arange(c - 0.7, c + 0.7, 0.01)
cy = [rbf._basisfunc(np.array([cx_]), np.array([c])) for cx_ in cx]
plt.plot(cx, cy, '-', color='gray', linewidth=0.2)
plt.xlim(-1.2, 1.2)
plt.show()
OUTPUT
[
[0.0229, 0.0001, 0.8114, 0.0000, 0.9710, 0.0000, 0.0434, 0.0183, 1.0000, 0.6736],
[0.0286, 0.0001, 0.8522, 0.0000, 0.9870, 0.0000, 0.0529, 0.0229, 0.9967, 0.7214],
[0.0353, 0.0002, 0.8891, 0.0000, 0.9967, 0.0000, 0.0642, 0.0286, 0.9870, 0.7676],
[0.0434, 0.0003, 0.9216, 0.0000, 1.0000, 0.0000, 0.0773, 0.0353, 0.9710, 0.8114],
[0.0529, 0.0004, 0.9491, 0.0000, 0.9967, 0.0000, 0.0925, 0.0434, 0.9491, 0.8522],]
PRACTICAL 6A
Aim: Kohonen Self organizing map
import numpy as np
import matplotlib.pyplot as pl
from minisom import MiniSom
PRACTICAL 6B
Aim: Adaptive resonance theory
import numpy as np
class ART1:
def init (self, rho=0.5, n_clusters=2, step=2):
self.rho = rho
self.n_clusters = n_clusters
self.step = step
self.weights_21 = None
self.weights_12 = None
weight_21 = self.weights_21
weight_12 = self.weights_12
classes = np.zeros(n_samples)
# Training loop
for i, p in enumerate(X):
disabled_neurons = []
reseted_values = []
reset = True
while reset:
output1 = p
input2 = np.dot(weight_12, output1.T)
classes[i] = winner_index
return classes
# Example usage:
if name == " main ":
data = np.array([
[0, 1, 0],
[1, 0, 0],
[1, 1, 0],
])
OUTPUT
PRACTICAL 7A
Aim: Write a program for Linear separation.
import numpy as np
import matplotlib.pyplot as plt
return distance
results = []
for point in points:
results.append(dist4line1(*point))
# print(slope, results)
if (results[0][1] != results[1][1]):
ax.plot(X, Y, "g-")
else:
ax.plot(X, Y, "r-")
plt.show()
OUTPUT
PRACTICAL 7B
Aim: Write a program for Hopfield network model for associative memory
OUTPUT
PRACTICAL 8A
Aim: Membership and Identity Operators | in, not in,
def overlapping(list1,list2):
c=0
d=0
for i in list1:
c+=1
for i in list2:
d+=1
for i in range(0,c):
for j in range(0,d):
if(list1[i]==list2[j]):
return 1
return 0
list1=[1,2,3,4,5]
list2=[6,7,8,9]
if(overlapping(list1,list2)):
print("overlapping")
else:
print("not overlapping")
OUTPUT
PRACTICAL 8B
Aim: Membership and Identity Operators is, is not
x=5
if (type(x) is int):
print ("true")
else:
print ("false")
output
PRACTICAL 9A
Aim: Find ratios using fuzzy logic
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
PRACTICAL 9B
Aim: Solve Tipping problem using fuzzy logic
import numpy as np
import skfuzzy as fuzz
from skfuzzy import control as ctrl
quality=ctrl.Antecedent(np.arange(0,11,1),'quality')
service=ctrl.Antecedent(np.arange(0,11,1),'service')
tip=ctrl.Consequent(np.arange(0,26,1),'tip')
quality['poor']=fuzz.trimf(quality.universe,[0,0,5])
quality['average']=fuzz.trimf(quality.universe,[0,5,10])
quality['good']=fuzz.trimf(quality.universe,[5,10,10])
service['poor']=fuzz.trimf(service.universe,[0,0,5])
service['average']=fuzz.trimf(service.universe,[0,5,10])
service['good']=fuzz.trimf(service.universe,[5,10,10])
tip['less']=fuzz.trimf(tip.universe,[0,0,1])
tip['some']=fuzz.trimf(tip.universe,[0,1,50])
tip['much']=fuzz.trimf(tip.universe,[1,50,100])
rule1=ctrl.Rule(quality['poor']|service['poor'],tip['less'])
rule2=ctrl.Rule(service['average'],tip['some'])
rule3=ctrl.Rule(service['good']|quality['good'],tip['much'])
tipping_ctrl=ctrl.ControlSystem([rule1,rule2,rule3])
tipping=ctrl.ControlSystemSimulation(tipping_ctrl)
tipping.input['quality']= float(input(" : "))
tipping.input['service']= float(input(" : "))
tipping.compute()
print("Recommended tip:", tipping.output['tip'])
quality.view()
service.view()
tip.view()
OUTPUT
PRACTICAL 10A
Aim: Implementation of Simple genetic algorithm
import random
POPULATION_SIZE = 100
GENES = '''abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP
QRSTUVWXYZ 1234567890, .-;:_!"#%&/()=?@${[]}'''
TARGET = "I love GeeksforGeeks"
class Individual(object):
'''
Class representing individual in population
'''
def init (self, chromosome):
self.chromosome = chromosome
self.fitness = self.cal_fitness()
@classmethod
def mutated_genes(self):
'''
create random genes for mutation
'''
global GENES
gene = random.choice(GENES)
return gene
@classmethod
def create_gnome(self):
'''
create chromosome or string of genes
'''
global TARGET
gnome_len =
len(TARGET)
return [self.mutated_genes() for _ in range(gnome_len)]
# random probability
prob = random.random()
child_chromosome.append(gp1)
# Driver code
def main():
global POPULATION_SIZE
generation = 1
found = False
population = []
# create initial population
for _ in range(POPULATION_SIZE):
gnome = Individual.create_gnome()
population.append(Individual(gnome))
while not found:
population = sorted(population, key = lambda x:x.fitness)
if population[0].fitness <= 0:
found =
True break
PRACTICAL 10B
Aim: Create two classes: City and Fitness using Genetic algorithm
import numpy as np
import random
class City:
"""
Represents a city with x and y coordinates.
"""
def init (self, x: float, y: float):
self.x = x
self.y = y
class Fitness:
"""
Calculates and stores the fitness of a route in a genetic algorithm.
"""
def init (self, route: list):
self.route = route
self.distance = 0
self.fitness = 0.0
def calculate_distance(self) -> float:
"""
Calculates the total distance of the route.
"""
if self.distance == 0:
Shankar Narayan College
30
MSC-IT Part 1 Soft Computing Techniques Year 2024-2025
total_distance = 0
for i in range(len(self.route)):
from_city = self.route[i]
to_city = self.route[(i + 1) % len(self.route)] # Loop back to the start
total_distance += from_city.distance_to(to_city)
self.distance = total_distance
return self.distance