Professional Documents
Culture Documents
/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 24 19:58:18 2021
@author: jaydevraval
"""
#Imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as pyplot
trainingData = pd.read_csv(r"")
minimum = min(trainingData[i].values)
maximum = max(trainingData[i].values)
normalizeList = []
for j in trainingData[i]:
normalizeList.append((j - minimum) / (maximum - minimum))
trainingData[i] = normalizeList
#Converting to Ndarray
trainingData = trainingData.to_numpy()
#Training Initialization
featureSize = trainingData.shape[1] -1
trainingsampleSize = trainingData.shape[0]
y_prediction_training = np.zeros(trainingsampleSize)
w = np.zeros(featureSize)
epoch = 500
mse = np.zeros(epoch)
bias = 0
#Training Data
for e in range(epoch):
print("Epoch {}/500".format(e))
if e < 100:
eta = 0.5
if e >= 100 and e < 300:
eta = 0.05
err = []
for i in range(trainingsampleSize):
x = trainingData[i][:-1]
y = trainingData[i][-1]
summation = 0
for p in range(featureSize):
summation = summation + (w[p] * x[p])
y_pred = np.sign(summation)
for p in range(featureSize):
delta_w = eta * (y - y_pred) * x[p]
w[p] = w[p] + delta_w
err.append(pow(y - y_pred,2))
y_prediction_training[i] = y_pred
mse[e] = sum(err)/len(err)
#Testing
#Preparing Testing Dataset
testingData_1 = pd.read_csv(r"")
testingData_1 = testingData_1.drop('date', axis = 1)
minimum = min(testingData_1[i].values)
maximum = max(testingData_1[i].values)
normalizeList = []
for j in testingData_1[i]:
normalizeList.append((j - minimum) / (maximum - minimum))
testingData_1[i] = normalizeList
#Converting to Ndarray
testingData = testingData_1.to_numpy()
#Training Initialization
featureSize = testingData.shape[1] -1
testingsampleSize_1 = testingData.shape[0]
#Testing
y_prediction_1 = np.zeros(testingsampleSize_1)
for n in range(testingsampleSize_1):
x = testingData[n][:-1]
y = testingData[n][-1]
summation = 0
for p in range(featureSize):
summation = summation + (w[p] * x[p])
y_pred = np.sign(summation)
y_prediction_1[n] = y_pred
minimum = min(testingData_2[i].values)
maximum = max(testingData_2[i].values)
normalizeList = []
for j in testingData_2[i]:
normalizeList.append((j - minimum) / (maximum - minimum))
testingData_2[i] = normalizeList
#Converting to Ndarray
testingData = testingData_2.to_numpy()
#Training Initialization
featureSize = testingData.shape[1] -1
testingsampleSize_2 = testingData.shape[0]
#Testing
y_prediction_2 = np.zeros(testingsampleSize_2)
for n in range(testingsampleSize_2):
x = testingData[n][:-1]
y = testingData[n][-1]
summation = 0
for p in range(featureSize):
summation = summation + (w[p] * x[p])
y_pred = np.sign(summation)
y_prediction_2[n] = y_pred
print("============================")
#Misclassification in Training
err = 0
for i in range(trainingsampleSize):
y = trainingData[i][-1]
y_pred = y_prediction_training[i]
if abs(y - y_pred) != 0:
err += 1
print("============================")
err = 0
for i in range(testingsampleSize_1):
y = testingData[i][-1]
y_pred = y_prediction_1[i]
if abs(y - y_pred) != 0:
err += 1
print("============================")
err = 0
for i in range(testingsampleSize_2):
y = testingData[i][-1]
y_pred = y_prediction_2[i]
if abs(y - y_pred) != 0:
err += 1
print("============================")