Professional Documents
Culture Documents
Practical file
Deep learning
Submitted by:
Harmanjeet Singh
B. Tech CSE (7)
1803448
Submitted to:
Ms. Mamta
Index
1
Deep Learning 1803441
PRACTICAL 1
AIM: Creating a basic network and analyze its performance.
2
Deep Learning 1803441
>> [x,t]=wine_dataset;
>> size(x)
>> size(t)
>> net=patternnet(10);
>> view(net)
>> [net,tr] = train(net,x,t);
>> nntraintool
PRACTICAL 2
AIM: Deploy the confusion matrix and simulate for overfitting.
3
Deep Learning 1803441
>> net=patternnet(10);
>> view(net)
>> [net,tr] = train(net,x,t);
>> nntraintool
>> nntraintool('close')
>> plotperform(tr)
>> testX = x(:,tr.testInd);
>> testT = t(:,tr.testInd);
>> testY=net(testX);
>> testIndices = vec2ind(testY)
>> plotconfusion(testT,testY)
>> [c,cm]= confusion(testT,testY)
OUTPUT:
PRACTICAL 3
AIM: Visualizing a neural network.
4
Deep Learning 1803441
>> [x,t]=wine_dataset;
>> size(x)
>> size(t)
>> net=patternnet(10);
>> view(net)
OUTPUT:
PRACTICAL 4
AIM: Demo: Object Detection with pre-trained RetinaNet with Keras.
5
Deep Learning 1803441
class_ids.append(self.class_names.index('kangaroo'))
return masks, asarray(class_ids, dtype='int32')
# load an image reference
def image_reference(self, image_id):
info = self.image_info[image_id]
return info['path']
# train set
train_set = KangarooDataset()
train_set.load_dataset('kangaroo', is_train=True)
train_set.prepare()
# load an image
image_id = 0
image = train_set.load_image(image_id)
print(image.shape)
# load image mask
mask, class_ids = train_set.load_mask(image_id)
print(mask.shape)
# plot image
pyplot.imshow(image)
# plot mask
pyplot.imshow(mask[:, :, 0], cmap='gray', alpha=0.5)
pyplot.show()
OUTPUT:
8
Deep Learning 1803441
PRACTICAL 5
9
Deep Learning 1803441
10
Deep Learning 1803441
11
Deep Learning 1803441
return neighbour_ids
movie_titles = dict(zip(movies['movieId'], movies['title']))
movie_id = 3
similar_ids = find_similar_movies(movie_id, X, k=10)
movie_title = movie_titles[movie_id]
print(f"Since you watched {movie_title}")
for i in similar_ids:
print(movie_titles[i])
OUTPUT:
Number of ratings: 100836
# lowest rated
# highest rated
12
Deep Learning 1803441
Striptease (1996)
Twister (1996)
Bio-Dome (1996)
Sabrina (1995)
PRACTICAL 6
13
Deep Learning 1803441
import numpy as np
import matplotlib.pyplot as plt
import h5py
nn_backprop.py:
import numpy as np
import random
import math
import sys
# helper functions
def loadFile(df):
resultList = []
f = open(df, 'r')
for line in f:
f.close()
# end loadFile
14
Deep Learning 1803441
for i in range(len(v)):
x = v[i]
print('')
for i in range(len(m)):
for j in range(len(m[i])):
x = m[i,j]
print('')
lastRow = len(m) - 1
width = len(str(lastRow))
for i in range(numRows):
if indices == True:
print("[", end='')
print(str(i).rjust(width), end='')
for j in range(len(m[i])):
15
Deep Learning 1803441
x = m[i,j]
print('')
print(" . . . ")
if indices == True:
print("[", end='')
print(str(lastRow).rjust(width), end='')
for j in range(len(m[lastRow])):
x = m[lastRow,j]
print('')
class NeuralNetwork:
self.ni = numInput
self.nh = numHidden
self.no = numOutput
16
Deep Learning 1803441
self.initializeWeights()
idx = 0
for i in range(self.ni):
for j in range(self.nh):
self.ihWeights[i,j] = weights[idx]
idx += 1
for j in range(self.nh):
self.hBiases[j] = weights[idx]
idx += 1
for j in range(self.nh):
for k in range(self.no):
self.hoWeights[j,k] = weights[idx]
idx += 1
for k in range(self.no):
self.oBiases[k] = weights[idx]
idx += 1
def getWeights(self):
17
Deep Learning 1803441
for i in range(self.ni):
for j in range(self.nh):
result[idx] = self.ihWeights[i,j]
idx += 1
for j in range(self.nh):
result[idx] = self.hBiases[j]
idx += 1
for j in range(self.nh):
for k in range(self.no):
result[idx] = self.hoWeights[j,k]
idx += 1
for k in range(self.no):
result[idx] = self.oBiases[k]
idx += 1
return result
def initializeWeights(self):
lo = -0.01; hi = 0.01
self.setWeights(wts)
18
Deep Learning 1803441
for i in range(self.ni):
self.iNodes[i] = xValues[i]
for j in range(self.nh):
for i in range(self.ni):
for j in range(self.nh):
hSums[j] += self.hBiases[j]
for j in range(self.nh):
self.hNodes[j] = self.hypertan(hSums[j])
for k in range(self.no):
for j in range(self.nh):
for k in range(self.no):
oSums[k] += self.oBiases[k]
softOut = self.softmax(oSums)
for k in range(self.no):
self.oNodes[k] = softOut[k]
for k in range(self.no):
result[k] = self.oNodes[k]
return result
19
Deep Learning 1803441
epoch = 0
numTrainItems = len(trainData)
for ii in range(numTrainItems):
idx = indices[ii]
for j in range(self.ni):
for j in range(self.no):
for k in range(self.no):
20
Deep Learning 1803441
for j in range(self.nh):
for k in range(self.no):
for k in range(self.no):
for j in range(self.nh):
sum = 0.0
for k in range(self.no):
for i in range(self.ni):
for j in range(self.nh):
for j in range(self.nh):
for i in range(self.ni):
for j in range(self.nh):
21
Deep Learning 1803441
self.ihWeights[i, j] += delta
for j in range(self.nh):
self.hBiases[j] += delta
for j in range(self.nh):
for k in range(self.no):
self.hoWeights[j, k] += delta
for k in range(self.no):
self.oBiases[k] += delta
epoch += 1
if epoch % 10 == 0:
mse = self.meanSquaredError(trainData)
# end while
result = self.getWeights()
return result
# end train
num_correct = 0; num_wrong = 0
22
Deep Learning 1803441
for j in range(self.ni): # peel off input values from curr data row
x_values[j] = tdata[i,j]
for j in range(self.no): # peel off tareget values from curr data row
num_correct += 1
else:
num_wrong += 1
sumSquaredError = 0.0
for jj in range(self.ni): # peel off input values from curr data row
for jj in range(self.no): # peel off tareget values from curr data row
for j in range(self.no):
23
Deep Learning 1803441
@staticmethod
def hypertan(x):
if x < -20.0:
return -1.0
return 1.0
else:
return math.tanh(x)
@staticmethod
def softmax(oSums):
m = max(oSums)
divisor = 0.0
for k in range(len(oSums)):
divisor += math.exp(oSums[k] - m)
for k in range(len(result)):
return result
@staticmethod
return tw
24
Deep Learning 1803441
def main():
pv = sys.version
npv = np.version.version
numInput = 4
numHidden = 5
numOutput = 3
trainDataPath = "irisTrainData.txt"
trainDataMatrix = loadFile(trainDataPath)
showMatrixPartial(trainDataMatrix, 4, 1, True)
testDataPath = "irisTestData.txt"
testDataMatrix = loadFile(testDataPath)
maxEpochs = 50
learnRate = 0.05
print("\nStarting training")
25
Deep Learning 1803441
print("Training complete")
accTrain = nn.accuracy(trainDataMatrix)
accTest = nn.accuracy(testDataMatrix)
if __name__ == "__main__":
main()
# end script
26
Deep Learning 1803441
PRACTICAL 7
AIM: Text classification and word vector.
import numpy as np
from sklearn.feature_extraction.text import
CountVectorizer
vectorizer = CountVectorizer()
#
# Create sample set of documents
#
docs = np.array(['Mirabai has won a silver medal
in weight lifting in Tokyo olympics 2021',
'Sindhu has won a bronze medal in
badminton in Tokyo olympics',
'Indian hockey team is in top
four team in Tokyo olympics 2021 after 40 years'])
#
# Fit the bag-of-words model
#
bag = vectorizer.fit_transform(docs)
#
# Get unique words / tokens found in all the
documents. The unique words / tokens represents
# the features
#
print(vectorizer.get_feature_names())
#
# Associate the indices with each unique word
#
print(vectorizer.vocabulary_)
#
# Print the numerical feature vector
#
print(bag.toarray()
27
Deep Learning 1803441
OUTPUT:
28
Deep Learning 1803441
PRACTICAL 8
AIM: Fully convolutional networks.
29
Deep Learning 1803441
OUTPUT:
30
Deep Learning 1803441
PRACTICAL 9
AIM: ConvNets for classification and localization.
import cv2
import os
import numpy as np
from random import shuffle
from tqdm import tqdm
'
TRAIN_DIR = 'E:/dataset / Cats_vs_Dogs / train'
TEST_DIR = 'E:/dataset / Cats_vs_Dogs / test1'
IMG_SIZE = 50
LR = 1e-3
MODEL_NAME = 'dogsvscats-{}-{}.model'.format(LR,
'6conv-basic')
def label_img(img):
word_label = img.split('.')[-3]
if word_label == 'cat': return [1, 0]
elif word_label == 'dog': return [0, 1]
def create_train_data():
training_data = []
for img in tqdm(os.listdir(TRAIN_DIR)):
label = label_img(img)
path = os.path.join(TRAIN_DIR, img)
img = cv2.imread(path,
for img in tqdm(os.listdir(TRAIN_DIR)):
label = label_img(img)
path = os.path.join(TRAIN_DIR, img)
img = cv2.imread(path,
cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
training_data.append([np.array(img),
np.array(label)])
shuffle(training_data)
np.save('train_data.npy', training_data)
return training_data
def process_test_data():
testing_data = []
for img in tqdm(os.listdir(TEST_DIR)):
path = os.path.join(TEST_DIR, img)
img_num = img.split('.')[0]
img = cv2.imread(path,
cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
testing_data.append([np.array(img),
31
Deep Learning 1803441
img_num])
shuffle(testing_data)
np.save('test_data.npy', testing_data)
return testing_data
OUTPUT:
32
Deep Learning 1803441
PRACTICAL 10
AIM: Character level language model.
gpu = gpuDevice();
fprintf('Using a %s GPU.\n', gpu.Name)
sizeOfDouble = 8; % Each double-precision number needs 8
bytes of storage
sizes = power(2, 14:28);
sendTimes = inf(size(sizes));
gatherTimes = inf(size(sizes));
for ii=1:numel(sizes)
numElements = sizes(ii)/sizeOfDouble;
hostData = randi([0 9], numElements, 1);
gpuData = randi([0 9], numElements, 1, 'gpuArray');
% Time sending to GPU
sendFcn = @() gpuArray(hostData);
sendTimes(ii) = gputimeit(sendFcn);
% Time gathering back from GPU
gatherFcn = @() gather(gpuData);
gatherTimes(ii) = gputimeit(gatherFcn);
end
sendBandwidth = (sizes./sendTimes)/1e9;
[maxSendBandwidth,maxSendIdx] = max(sendBandwidth);
fprintf('Achieved peak send speed of %g GB/
s\n',maxSendBandwidth)
gatherBandwidth = (sizes./gatherTimes)/1e9;
[maxGatherBandwidth,maxGatherIdx] = max(gatherBandwidth);
fprintf('Achieved peak gather speed of %g GB/s\n',max(gatherBandwidth))
OUTPUT:
33