You are on page 1of 33

Lab Manual

CL461-Artificial Intelligence Lab

Group Members
18F-0120 Abdul Rehman

18F-0144 Waqas Tahir

18F-0411 Ahmad Nadeem

Section 6E

Spring 2021

Department of Computer Science

FAST – National University of Computer & Emerging Sciences

Chiniot Faisalabad Campus

P a g e i | 33
Contents
LAB1 (Basic Graph Implementation) ……………………………………………………….……………………………………...3
LAB2 (BFS, DFS Traversal) ….…………………………………………………………………………..……………….………………4

LAB3 (Uniform Cost Search) ……………………………………………………………………………..…………………………….9

LAB-04 [ Iterative Deepening Search & Greedy Best First Search, A* Algorithm] …………………………….13
Task 1: Iterative Deepening .............................................................................................................. 13
Task 2: Greedy BFS ........................................................................................................................... 14
Task 3: A* Search .............................................................................................................................. 15
LAB-05 [ Admissibility and Consistency] ………………………………………………………………………………………….16

LAB-06 [ Hill Climbing Algorithm] …………………………………………………………………………………………………… 20

LAB-07 [ MIN-MAX Game Playing & Alpha-Beta Game Playing]……………………………………………………….22

Task 1 Min Max Algorithm: .............................................................................................................. 22


Task 2 Alpha Beta Pruning: ............................................................................................................... 22
LAB-08 [ AC-3 Algorithm CSP] ……………………………………………………………………………………………………….…24

LAB-10 [ K Mean Clustering] ……………………………………………………………………………………………………………26

LAB-11 [ Simple Perceptron Model] ………………………………………………………………………………………………..28

Task 01 .............................................................................................................................................. 28
Task 2: ............................................................................................................................................... 29
Task 3: ............................................................................................................................................... 30
LAB-12 [ Gradient Decent Optimization] …………………………………………………………………………………………31

P a g e 2 | 33
LAB1 (Basic Graph Implementation)

Code:
class graph:
def __init__(self,gdict=None):
if gdict is None:
gdict = {}
self.gdict = gdict

def edges(self):
return self.findedges()

def findedges(self):
edgename = []
for vrtx in self.gdict:
for nxtvrtx in self.gdict[vrtx]:
if {nxtvrtx, vrtx} not in edgename:
edgename.append({vrtx, nxtvrtx})
return edgename

def getVertices(self):
return (self.gdict.keys())

########## WRITE YOUR CODE HERE ###############

def addNode(self,node):
self.gdict[node]=[]

def addEdge(self,fromNode,toNode):
self.gdict[fromNode].append(toNode)

g = graph()
g.addNode("a")
g.addNode("b")
g.addNode("c")
g.addNode("d")
g.addNode("e")

g.addEdge("a","b")
g.addEdge("a","c")
g.addEdge("b","a")
g.addEdge("b","d")
g.addEdge("c","a")
g.addEdge("c","d")
g.addEdge("d","b")
g.addEdge("d","c")
P a g e 3 | 33
g.addEdge("d","e")
g.addEdge("e","d")

print(g.getVertices())
print(g.edges())

Output:

P a g e 4 | 33
LAB2 (BFS, DFS Traversal)
Depth First Search (AI)
Code:
# Graph Dictionary using adjacency list
graph = {
'A' : ['B','C'],
'B' : ['D', 'E'],
'C' : ['F'],
'D' : [],
'E' : ['F'],
'F' : []
}
visited = set() # Set to keep track of visited nodes.
# dfs algorithm using recursive call
# Parameters: visited - Nodes those are already visited
# graph - Graph variable (Dictionary in this case)
# node - Node that are to be visited

def dfs(visited, graph, node):


if node not in visited:
print (node)
visited.add(node)
for neighbour in graph[node]:
dfs(visited, graph, neighbour)

# Driver Code to run DFS function


dfs(visited, graph, 'A')
print(visited)
Output:
A
B
D
E
F
C

{'B', 'C', 'D', 'A', 'F', 'E'}

P a g e 5 | 33
Breadth First Search (AI)
graph = {'A': ['B', 'C', 'E'],
'B': ['A','D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B','D'],
'F': ['C'],
'G': ['C']}
# BFS algorithm
def breadth_first_search(visited, graph, node):
visited.clear();
queue = []
queue.append(node)
for x in queue:
print(x)
visited.add(x)
for neigh in graph[x]:
if(neigh not in visited and neigh not in queue):
queue.append(neigh)
breadth_first_search(visited,graph,"A")
print("Visited Set: ",visited);

Output:
A
B
C
E
D
F
G
Visited Set: {'C', 'B', 'D', 'G', 'A', 'F', 'E'}

Romania Map:
romania_map = {
'Arad': ['Sibiu', 'Zerind', 'Timisoara'],
'Zerind': ['Arad', 'Oradea'],
'Oradea': ['Zerind', 'Sibiu'],
'Sibiu': ['Arad', 'Oradea', 'Fagaras', 'Rimnicu'],
'Timisoara': ['Arad', 'Lugoj'],
'Lugoj': ['Timisoara', 'Mehadia'],
'Mehadia': ['Lugoj', 'Drobeta'],
'Drobeta': ['Mehadia', 'Craiova'],
'Craiova': ['Drobeta', 'Rimnicu', 'Pitesti'],
'Rimnicu': ['Sibiu', 'Craiova', 'Pitesti'],
'Fagaras': ['Sibiu', 'Bucharest'],
'Pitesti': ['Rimnicu', 'Craiova', 'Bucharest'],

P a g e 6 | 33
'Bucharest': ['Fagaras', 'Pitesti', 'Giurgiu', 'Urziceni'],
'Giurgiu': ['Bucharest'],
'Urziceni': ['Bucharest', 'Vaslui', 'Hirsova'],
'Hirsova': ['Urziceni', 'Eforie'],
'Eforie': ['Hirsova'],
'Vaslui': ['Iasi', 'Urziceni'],
'Iasi': ['Vaslui', 'Neamt'],
'Neamt': ['Iasi']
}

Romania Map Depth First Search:

starting with Arad


visited.clear()
dfs(visited, romania_map, 'Arad')

Romania Map Breadth First Search:

starting with Arad


visited.clear()
breadth_first_search(visited, romania_map, 'Arad')

P a g e 7 | 33
P a g e 8 | 33
LAB3 (Uniform Cost Search)
Code:
import heapq
class PriorityQueue:
# Constructor to Initialize Queue
def __init__(self):
self._queue = []
self._index = 0

def showQueue(self):
for x in self._queue:
print(x)

# Insert new element onto Queue


def insert(self, item, priority):
heapq.heappush(self._queue, (priority, self._index, item))
self._index += 1

# Remove element from Queue


def remove(self):
return heapq.heappop(self._queue)[-1]

# Check if Queue is empty


def is_empty(self):
return len(self._queue) == 0

class Node:

def __init__(self, key):


self.key, self.successors, self.weight_successors = key, [], {}

# return the key


def getKey(self):
return self.key

# return the successors of node


def getSuccessors(self):
return self.successors

######### WRITE CODE HERE ############


# add a node successor passing the node and the weight
def addSuccessor(self, node, weight):
# adds if successor node not exists
if node not in self.successors:
self.successors.append(node.getKey())

P a g e 9 | 33
self.weight_successors[node.getKey()] = weight

# returns weight of successors


def getWeightSuccessors(self):
return self.weight_successors

# class that represents a graph


class Graph:

def __init__(self):
self.nodes = {} # key: key of node, value: instance of Node

# adds a node in the graph passing a key


def addNode(self, key_node):
if key_node in self.nodes: # checks if the key already exists
print('Error: key %s already exists!!' % key_node)
else:
node = Node(key_node) # creates a instance of Node
self.nodes[key_node] = node # stores node as object

# connects the nodes


def connect(self, key_source, key_destination, weight):
# checks if the keys exists in the graph i.e. (nodes dictionary)
if key_source in self.nodes and key_destination in self.nodes:
if key_source != key_destination: # checks if the keys are differ
ents
if weight > 0: # checks if the weight is positive
# connects the nodes
self.nodes[key_source].addSuccessor(self.nodes[key_destina
tion], weight)
else:
print('Error: weight negative!!')
else:
print('Error: same keys!!')
else:
print('Error: key not exists!!')

# returns weight of edge


def getWeightEdge(self, key_source, key_successor):
if key_source in self.nodes and key_successor in self.nodes: # checks
if the keys exists
if key_source != key_successor: # checks if the keys are differen
ts
weight_successors = self.nodes[key_source].getWeightSuccessors
()

P a g e 10 | 33
if key_successor in weight_successors: # checks if key_succes
sor is a successor
return weight_successors[key_successor] # returns the wei
ght
else:
print('Error: successor not exists!!')
else:
print('Error: same keys!!')
else:
print('Error: key not exists!!')

# returns the keys of all successors of a node


def getSuccessors(self, key_node):
######### WRITE CODE HERE ##########
if key_node in self.nodes:
successors = self.nodes[key_node].getSuccessors()
return successors

else:
print('Error: key not exists!!')

# returns all nodes


def getNodes(self):
return self.nodes

import time
def run(graph, key_node_start, key_node_goal, verbose=False, time_sleep=0):
if key_node_start not in graph.getNodes() or key_node_goal not in graph.ge
tNodes():
print('Error: key_node_start \'%s\' or key_node_goal \'%s\' not exists
!!' % (key_node_start, key_node_goal))
else:

pQueue = PriorityQueue()

for x in graph.getSuccessors(key_node_start):
weight = graph.getWeightEdge(key_node_start, x)
pQueue.insert([x, weight], weight)

while (not pQueue.is_empty()):


current_node = pQueue.remove()
key_current_node = current_node[0]
cost_node = current_node[1]

if (key_current_node == key_node_goal):
print('Reached Goal! Cost:' + str(cost_node))
break

P a g e 11 | 33
if verbose:
print('Expands node \'%s\' with cumulative cost %s ...' % (key
_current_node, cost_node))
time.sleep(time_sleep)

for x in graph.getSuccessors(key_current_node):
weight = graph.getWeightEdge(key_current_node, x)
pQueue.insert([x, cost_node+weight], cost_node+weight)

if __name__ == "__main__":
graph = Graph()

graph.addNode('a')
graph.addNode('b')
graph.addNode('c')
graph.addNode('d')
graph.addNode('e')
graph.addNode('f')
graph.addNode('G')
graph.addNode('h')
graph.addNode('p')
graph.addNode('q')
graph.addNode('r')
graph.addNode('S')

graph.connect('S', 'd', 3)
graph.connect('S', 'e', 9)
graph.connect('S', 'p', 1)
graph.connect('b', 'a', 2)
graph.connect('c', 'a', 2)
graph.connect('d', 'b', 1)
graph.connect('d', 'c', 8)
graph.connect('d', 'e', 2)
graph.connect('e', 'h', 8)
graph.connect('e', 'r', 2)
graph.connect('f', 'c', 3)
graph.connect('f', 'G', 2)
graph.connect('h', 'p', 4)
graph.connect('h', 'q', 4)
graph.connect('p', 'q', 15)
graph.connect('r', 'f', 1)

run(graph=graph, key_node_start='S', key_node_goal='G', verbose=True, time


_sleep=2)

P a g e 12 | 33
LAB-04 [ Iterative Deepening Search &
Greedy Best First Search, A* Algorithm]
Task 1: Iterative Deepening
tree = {'S': [['A', 1], ['B', 5], ['C', 8]],
'A': [['D', 3], ['E', 7], ['G', 9]],
'B': [['G', 4]],
'C': [['G', 5]]
}

def ItrDeepening(start_node, goal_node, curr_node=None, level=0):


if level > 0:
print (curr_node, end = " ")

if curr_node in tree:
for child in tree[curr_node]:
if(ItrDeepening (start_node, goal_node, child[0], level -
1) == True):
return True

#initialise curr_node first time the func is called


if not curr_node:
curr_node = start_node
print (curr_node, end = "")

#return back to main if goal found


if curr_node == goal_node:
if curr_node != goal_node:
print (goal_node, end = " ")
return True

#start iterating again if back to start node


if curr_node == start_node:
print ("")
ItrDeepening(start_node, goal_node, start_node, level+1)
return None

#return back in case all levels are explored


if level == 0:
print ( curr_node, end = " ")
return None

P a g e 13 | 33
Task 2: Greedy BFS

heuristics = {'S': 8, 'A': 8, 'B': 4, 'C': 3, 'D': 10000, 'E': 10000, 'G': 0 }
#Dictinary for Heuristics
import heapq
class PriorityQueue:
def __init__(self):
self._queue = []
self._index = 0

def insert(self, item, priority):


heapq.heappush(self._queue, (priority, self._index, item))
self._index += 1

def remove(self):
return heapq.heappop(self._queue)[-1]

def is_empty(self):
return len(self._queue) == 0

def GreedyBestFirstSearch(Start, Goal,heuristics,tree):


queue = PriorityQueue()
visited_nodes = []

queue.insert(Start,heuristics[Start])

while (not queue.is_empty()):

node = queue.remove()
visited_nodes.append(node)

if node == Goal:
return visited_nodes

for neigh in tree[node]:


queue.insert(neigh[0],heuristics[neigh[0]])

return visited_nodes

P a g e 14 | 33
Task 3: A* Search
def ASearch(Start, Goal,heuristics,tree):
queue = PriorityQueue()
visited_nodes = {}

#parent, child, priority


queue.insert([Start, Start, heuristics[Start]], heuristics[Start])

while (not queue.is_empty()):

parent, child, priority = queue.remove()


visited_nodes[child] = parent

if child == Goal:
return visited_nodes

for neigh in tree[child]:


distance = (priority -
heuristics[child]) + neigh[1] + heuristics[neigh[0]]
queue.insert([child, neigh[0], distance], distance)

return visited_nodes

def printPath(Start, Goal, visited):


if Goal == Start:
print (Goal, end = " ")
return None

printPath(Start, visited[Goal], visited)


print (Goal, end = " ")

P a g e 15 | 33
LAB-05 [ Admissibility and Consistency]
Code:
import heapq
def aStarAlgo(start_node, stop_node):
open_set = set(start_node)
closed_set = set()
g = {} # store distance from starting node
parents = {} # parents contains an adjacency map of all nodes

# ditance of starting node from itself is zero


g[start_node] = 0
# start_node is root node i.e it has no parent nodes
# so start_node is set to its own parent node
parents[start_node] = start_node

while len(open_set) > 0:


n = None

# node with lowest f() is found


for v in open_set:
if n == None or g[v] + heuristic(v) < g[n] + heuristic(n):
n = v

if n == stop_node or Graph_nodes[n] == None:


pass
else:
for (m, weight) in get_neighbors(n):
# nodes 'm' not in first and last set are added to first
# n is set its parent
if m not in open_set and m not in closed_set:
open_set.add(m)
parents[m] = n
g[m] = g[n] + weight

# for each node m,compare its distance from start i.e g(m) to
the
# from start through n node
else:
if g[m] > g[n] + weight:
# update g(m)
g[m] = g[n] + weight
# change parent of m to n
parents[m] = n

P a g e 16 | 33
# if m in closed set,remove and add to open
if m in closed_set:
closed_set.remove(m)
open_set.add(m)

if n == None:
print('Path does not exist!')
return None

# if the current node is the stop_node


# then we begin reconstructin the path from it to the start_node
if n == stop_node:
path = []

while parents[n] != n:
path.append(n)
n = parents[n]

path.append(start_node)

path.reverse()

print('Path found: {}'.format(path))


return path

# remove n from the open_list, and add it to closed_list


# because all of his neighbors were inspected
open_set.remove(n)
closed_set.add(n)

print('Path does not exist!')


return None

# define fuction to return neighbor and its distance


# from the passed node
def get_neighbors(v):
if v in Graph_nodes:
return Graph_nodes[v]
else:
return None

# for simplicity we ll consider heuristic distances given


# and this function returns heuristic distance for all nodes
def heuristic(n):
H_dist = {
'A': 11,
'B': 6,

P a g e 17 | 33
'C': 99,
'D': 1,
'E': 7,
'G': 0,
}
return H_dist[n]

class PriorityQueue:

def __init__(self):
self._queue = []
self._index = 0

def insert(self, item, priority):


heapq.heappush(self._queue, (priority, self._index, item))
self._index += 1

def remove(self):
return heapq.heappop(self._queue)[-1]

def is_empty(self):
return len(self._queue) == 0

def check_admisibility():

def find_cost(start_node, goal_node):


if(len(get_neighbors(start_node))==0):
return 99999
pQueue = PriorityQueue()
for neighbor in get_neighbors(start_node):
pQueue.insert([neighbor[0], neighbor[1]], neighbor[1])

while (not pQueue.is_empty()):


current_node = pQueue.remove()
key_current_node = current_node[0]
cost_node = current_node[1]
if (key_current_node == goal_node):
return cost_node

for neighbor in get_neighbors(key_current_node):


pQueue.insert([neighbor[0], cost_node + neighbor[1]], cost_nod
e + neighbor[1])

for node in Graph_nodes:


if(find_cost(node,'G') <= heuristic(node)):
return False
return True

P a g e 18 | 33
def check_consistency(start_node):
for node in Graph_nodes:
for neighbor in get_neighbors(node):
if(heuristic(start_node) > heuristic(neighbor[0])+neighbor[1]):
return False
return True

# Describe your graph here


Graph_nodes = {
'A': [('B', 2), ('E', 3)],
'B': [('C', 1), ('G', 9)],
'C': [],
'E': [('D', 6)],
'D': [('G', 1)],
}
aStarAlgo('A', 'G')

if check_consistency('A') == True:
print("Graph is consistent")
else:
print("Graph is not consistent")

if check_admisibility() == True:
print("Graph is Admissible")
else:
print("Graph is not Admissible")

Output:

P a g e 19 | 33
LAB-06 [ Hill Climbing Algorithm]
Code:
import random
def randomSolution(tsp):
cities = list(range(len(tsp)))
solution = []
########### WRITE CODE HERE ##########
while(len(solution) != len(tsp)):
temp=random.randint(0,len(tsp)-1)
if(cities[temp] in solution):
temp=temp%(len(tsp)-1)+1
if(cities[temp] not in solution):
solution.append(cities[temp])
else:
solution.append(cities[temp])

return solution

def routeLength(tsp, solution):


routeLength = 0
###### WRITE CODE HERE ##########
initial_city=solution[0]
for i in solution:
routeLength+=tsp[initial_city][i]
initial_city=i
routeLength+=(tsp[solution[0]][solution[-1]])
return routeLength

def getNeighbours(solution):
neighbours = []
####### WRITE CODE HERE #########
for i in range(len(solution)):
for j in range(len(solution)):
if(i!=j):
new_sol=solution[:]
new_sol[i],new_sol[j]=new_sol[j],new_sol[i]
if(new_sol not in neighbours):
neighbours.append(new_sol)
return neighbours

def getBestNeighbour(tsp, neighbours):


bestRouteLength = routeLength(tsp, neighbours[0])
bestNeighbour = neighbours[0]
######## WRITE CODE HERE #########
for i in neighbours:

P a g e 20 | 33
temp=routeLength(tsp,i)
if temp<bestRouteLength:
bestRouteLength=temp
bestNeighbour=i
'''
You have calculated cost of solution, now loop through all the costs and p
ick the node with minimum costs
'''
return bestNeighbour, bestRouteLength

def hillClimbing(tsp):
currentSolution = randomSolution(tsp)
currentRouteLength = routeLength(tsp, currentSolution)
neighbours = getNeighbours(currentSolution)
bestNeighbour, bestNeighbourRouteLength = getBestNeighbour(tsp, neighbours
)

########### WRITE CODE HERE #############


while bestNeighbourRouteLength<currentRouteLength:
currentSolution=bestNeighbour
currentRouteLength=bestNeighbourRouteLength
neighbours=getNeighbours(currentSolution)
bestNeighbour,bestNeighbourRouteLength=getBestNeighbour(tsp, neighbour
s)

return currentSolution, currentRouteLength,neighbours,bestNeighbour,bestNe


ighbourRouteLength

def problemGenerator(nCities):
tsp = []
for i in range(nCities):
distances = []
for j in range(nCities):
if j == i:
distances.append(0)
elif j < i:
distances.append(tsp[j][i])
else:
distances.append(random.randint(10, 1000))
tsp.append(distances)
return tsp

P a g e 21 | 33
LAB-07 [ MIN-MAX Game Playing & Alpha-
Beta Game Playing]

Task 1 Min Max Algorithm:


def fun_Minmax(cd, node, maxt, scr, td):
if(cd==td):
return scr[node]
if(maxt):
return max(fun_Minmax(cd+1, node*2, False, scr, td),
fun_Minmax(cd+1, node*2+1, False, scr, td))
else:
return min(fun_Minmax(cd+1, node*2, True, scr, td),
fun_Minmax(cd+1, node*2+1, True, scr, td))

scr = [] # List for Leaf Nodes


x = int(input("Enter total number of leaf nodes="))
for i in range(x):
y = int(input("Enter Leaf Value: "))
scr.append(y)

import math
# import random
print(len(scr))

td = math.log(len(scr), 2)
print(td)

cd = int(input("Enter Current Depth Value: "))


nodev = int(input("Enter node value: "))
maxt = True

print("The answer is: ", end="")


answer = fun_Minmax(cd, nodev, maxt, scr, td)
print(answer)

Task 2 Alpha Beta Pruning:


def fun_ABMinmax(cd, node, maxt, scr, td,alpha= -100000,beta = 100000):
if(cd==td):
print(scr[node]," ",end="")
return scr[node]
if(maxt):
L = fun_ABMinmax(cd+1, node*2, False, scr, td, alpha, beta)
if(L > alpha):
P a g e 22 | 33
alpha = L
if(alpha < beta):
R = fun_ABMinmax(cd+1, node*2+1, False, scr, td, alpha, beta)
return max(L,R)
return alpha
else:
L = fun_ABMinmax(cd+1, node*2, True, scr, td, alpha, beta)
if(L < beta):
beta = L
if(alpha < beta):
R = fun_ABMinmax(cd+1, node*2+1, True, scr, td, alpha, beta)
return min(L,R)
return beta

P a g e 23 | 33
LAB-08 [ AC-3 Algorithm CSP]
Code:
# Dictionary for domains for all variables
import queue
domains = {
'A': [1, 2, 3],
'B': [1, 2, 3],
'C': [1, 2, 3]
}

constraints = {
('A', 'B'): lambda a, b: a > b,
('B', 'A'): lambda b, a: b < a,
('B', 'C'): lambda b, c: b == c,
('C', 'B'): lambda c, b: c == b,

def revise(x, y):


############### WRITE CODE HERE #################
revised = False
deleted = []

############## IMPLEMENT FUCNTIONA TO CHECK IF ALL CONSTRAINTS ARE SATISFI


ED FOR ALL VARIABLES ###########
for i in domains[x]:
temp = 0
for j in domains[y]:
if constraints[(x, y)](i, j) == False:
temp += 1

if(temp == len(domains[y])):
deleted.append(i)
revised = True

for a in deleted:
domains[x].remove(a)
return revised

def ac33(arcs):
queue=[]
# Add all the arcs to a queue.
for x in arcs:
queue.append(x)
# Repeat until the queue is empty

P a g e 24 | 33
# Take the first arc off the queue (dequeue)

# Make x arc consistent with y


while len(queue) != 0:
row=queue.pop(0)
revised = revise(row[0], row[1])
# If the x domain has changed
if revised:
x = row[0]
# Add all arcs of the form (k, x) to the queue (enqueue)
neighbors = [neighbor for neighbor in arcs if (x == neighbor[1])]
queue = queue + neighbors

arcs = [('A', 'B'), ('B', 'A'), ('B', 'C'), ('C', 'B')]


#arcs = [('A', 'B'), ('B', 'A')]
ac33(arcs)

print(domains) # {'A': [2, 3], 'C': [1, 2], 'B': [1, 2]}

Output:

P a g e 25 | 33
LAB-10 [ K Mean Clustering]
Code:
import numpy as np
import pandas as pd
from scipy.spatial import distance
import matplotlib.pyplot as plt
import random
import math
from sklearn.preprocessing import StandardScaler
%matplotlib inline

df = pd.read_csv('cluster_validation_data.txt', sep=",", header=None)

# normalize data
X = df.values
sc = StandardScaler()
sc.fit(X)
X = sc.transform(X)

def kmeans(X,k=3,max_iterations=100):
X = np.array(X)
centroid = []
for i in range(0,k):
while True:
rand = random.randint(0,len(X)-1)
if rand not in centroid:
centroid.append(rand)
break

for i in range(0,k):
centroid[i] = X[centroid[i]]

for itr in range (0 , max_iterations):


cluster = []
for i in range(0,len(X)):
distances = []
for centre in centroid:
distances.append(math.sqrt(sum([(a -
b) ** 2 for a, b in zip(centre, X[i])])))
cluster.append(distances.index(min(distances)))

sums = []
for i in range(k):
sums.append([0,0,0])

P a g e 26 | 33
for i in range(len(X)):
sums[cluster[i]][0] += X[i][0]
sums[cluster[i]][1] += X[i][1]
sums[cluster[i]][2] += 1

for i in range (k):


centroid[i][0] = sums[i][0] / sums[i][2]
centroid[i][1] = sums[i][1] / sums[i][2]

P = np.array(cluster)
return P

P = kmeans(X)
assert len(df) == len(P)
# denormalize data
X = sc.inverse_transform(X)
plt.figure(figsize=(15,10))
plt.scatter(X[:,0],X[:,1],c=P)
plt.show()

Output:

P a g e 27 | 33
LAB-11 [ Simple Perceptron Model]
import numpy as np
operator = 'or'
attributes = np.array([[0, 0], [0, 1],[1, 0], [1, 1]])

if operator == 'and':
labels = np.array([0, 0, 0, 1])
#labels = np.array([-1, -1, -1, 1])
elif operator == 'or':
labels = np.array([0, 1, 1, 1])
elif operator == 'xor':
labels = np.array([0,1, 1, 0])
elif operator == 'nand':
labels = np.array([1,1, 1, 0])

# Initializing the weights (parameters)


w = [0.9, 0.9]

bias = w[0]
# delcaring hyperparameters
alpha = 0.005
epochs = 1000
threshold = 0.5

Task 01
for i in range (0, epochs):
print("epoch", i+1)
for j in range(len(attributes)):
actual = labels [j]
sum = attributes[j][0] * w[0] +attributes[j][1]*w [1]
if sum > threshold:
predicted = 1
else:
predicted = 0
delta = actual - predicted

for k in range (0, 2):


w[k] = w[k] + delta*alpha
print(attributes[j][0]," ",operator, attributes[j][1], "-
>actual: ", actual,"predicted :",predicted, "Weights", w[0]," - ",w[1])
print ("----------------------")

P a g e 28 | 33
Task 2:
for x in range(0, epochs):
print ("Epochs ", x+1)
for j in range(0,len(attributes)):
actual = labels[j]
yin = (attributes[j][0]*w[0]) + (attributes[j][1]*w[1]) + bias
# yin = (attributes[j][0]*w[0]) + (attributes[j][1]*w[1])

delta = actual - yin


bias = bias + (alpha * delta)

for k in range(0, 2):


w[k] = w[k] + (alpha * delta * attributes[j][k])

print (attributes[j][0], attributes[j][1], "-


>actual: ", actual, " yin: ", yin, " weights: ", w[0], " ", w[1])

P a g e 29 | 33
Task 3:
Perceptron is better than Delta because Perceptron includes activation validations as well which is
not included in delta rule so error percentage in perceptron in nearly equal to 0.0% but in delta its
25%

P a g e 30 | 33
LAB-12 [ Gradient Decent Optimization]
Code:
import numpy as np
import random
import sklearn
import pandas as pd
from sklearn.datasets import make_regression
import pylab
import math
from scipy import stats

df = pd.read_csv('USA_Housing.csv', sep=",")
X = df.values

def gradient_descent(alpha, X, ep=0.0001, max_iter=10000):


converged = False
iter = 0
MSE = []

###### WRITE CODE HERE ########


#1. Initialize Thetas according to number of Features in your dataset
theta=[0.1,2.5,2.0,4.5,4.0,3.0]

#2. Calculate total Cost for all instances i.e. (Loop Comprehension for S
um)

# Iterate Loop
while not converged:
SE = 0
for row in range (0,5000):
cost = theta[0] + X[row][0]*theta[1] + X[row][1]*theta[2] + X[row]
[2]*theta[3] + X[row][3]*theta[4] + X[row][4]*theta[5]
error = X[row][5] - cost
print("cost: ", cost)
print("err: ", error)
#3. compute the gradient (d/d_theta j(theta))

#4. Apply gradient descent algorithm to update weights


#5. Retain previous values of Thetas i.e. (By assignment to new va
riables t0, t1)
gradients = [
theta[0] - (alpha*(error)),
theta[1] - (alpha*(error)*X[row][0]),

P a g e 31 | 33
theta[2] - (alpha*(error)*X[row][1]),
theta[3] - (alpha*(error)*X[row][2]),
theta[4] - (alpha*(error)*X[row][3]),
theta[5] - (alpha*(error)*X[row][4])
]
for x in range(6):
theta[x]=gradients[x]

SE += (SE- error) * (SE- error)

#6. Compute Mean Squqre error for updated thetas as done in step 2
MSE.append( (SE / 5000 ) )

#7. Calculate error differce. Keep converged flag True if done i.e. (U
se Selection Structure)
if iter != 0:
error_diff = MSE[iter] - MSE[iter-1]
if error_diff <= ep:
converged = True

#8. Update error in J and increment iteration for reiteration


theta =gradients.copy()
iter +=1

#9. In case maximum iterations done stop the loop by making convered f
lag to true
if iter > max_iter :
converged = True
return theta # Updated thetas returned

x, y = make_regression(n_samples=100, n_features=1, n_informative=1,


random_state=0, noise=35)
print ('x.shape = %s y.shape = %s' %(x.shape, y.shape))

alpha = 0.01 # learning rate


ep = 0.01 # convergence criteria

# call gredient decent, and get intercept(=theta0) and slope(=theta1)


theta = gradient_descent(0.00000000001, X, ep=0.0001, max_iter=100)
print ('theta0 =',theta[0])
print ('theta1 =',theta[1])
print ('theta2 =',theta[2])
print ('theta3 =',theta[3])
print ('theta4 =',theta[4])

# check with scipy linear regression


slope, intercept, r_value, p_value, slope_std_error = stats.linregress(x[:,0],
y)

P a g e 32 | 33
print('intercept = ', intercept)
print('Slope = ', slope)

# plot
for i in range(x.shape[0]):
y_predict = theta0 + theta1*x

pylab.plot(x,y,'o')
pylab.plot(x,y_predict,'k-')
pylab.show()
print ("Done!")

Output:

P a g e 33 | 33

You might also like